├── .gitignore ├── CONTRIBUTING ├── LICENSE ├── README.md ├── groups ├── microscopeimagequality ├── __init__.py ├── application.py ├── constants.py ├── data │ └── imagej │ │ ├── README.md │ │ ├── pom.xml │ │ └── src │ │ └── main │ │ └── java │ │ └── MicroscopeImageFocusQualityClassifier.java ├── data_provider.py ├── dataset_creation.py ├── degrade.py ├── evaluation.py ├── miq.py ├── prediction.py ├── summarize.py └── validation.py ├── project.config ├── setup.py ├── tests ├── __init__.py ├── data │ ├── 00_mcf-z-stacks-03212011_k06_s2_w12667264a-6432-4f7e-bf58-625a1319a1c9.tif │ ├── BBBC006_z_aligned__a01__s1__w1_10.png │ ├── BBBC006_z_aligned__a01__s1__w1_10_cropped.png │ ├── BBBC006_z_aligned__a02__s1__w1_10.png │ ├── BBBC006_z_aligned__a03__s1__w1_10.png │ ├── BBBC006_z_aligned__a04__s1__w1_10.png │ ├── BBBC006_z_aligned__a05__s1__w1_10.png │ ├── BBBC006_z_aligned__a06__s1__w1_10.png │ ├── BBBC006_z_aligned__a07__s1__w1_10.png │ ├── BBBC006_z_aligned__a08__s1__w1_10.png │ ├── BBBC006_z_aligned__a09__s1__w1_10.png │ ├── README │ ├── annotated_image_predicted_0_label_0.png │ ├── annotated_image_predicted_0_label_1.png │ ├── annotated_image_predicted_1_label_0.png │ ├── annotated_image_predicted_1_label_1.png │ ├── cell_image.png │ ├── cell_image.tiff │ ├── cell_image2.png │ ├── cell_image_airy_blurred.png │ ├── cell_image_blurred.png │ ├── cell_image_degraded.png │ ├── cell_image_poisson_noise.png │ ├── cell_image_poisson_noise_py.png │ ├── cell_image_saturated.png │ ├── cell_image_saturated_with_offset.png │ ├── cell_image_vignetted.png │ ├── data_train.num_records │ ├── data_train.tfrecord │ ├── images_for_glob_test │ │ ├── 00_mcf-z-stacks-03212011_a01_s1_w1a571d43d-5554-47fd-bb54-5db68290e5a7.tif │ │ ├── 00_mcf-z-stacks-03212011_a19_s2_w12703a47b-7574-461a-85e2-afc38bc0adae.tif │ │ ├── 00_mcf-z-stacks-03212011_b01_s2_w12e721516-a202-4647-9f19-fd73ff016ce6.tif │ │ ├── 00_mcf-z-stacks-03212011_b14_s2_w126875e94-8541-482c-8aea-09f2e0b1e2ce.tif │ │ ├── 00_mcf-z-stacks-03212011_c06_s2_w123356693-a029-4886-9735-9e3ca50eb8d8.tif │ │ ├── 00_mcf-z-stacks-03212011_d02_s2_w122501c04-ccbe-4fc4-bb08-924ccccaad58.tif │ │ ├── 00_mcf-z-stacks-03212011_d10_s2_w129ae2539-fdfc-4e29-9215-03a79115d678.tif │ │ ├── 00_mcf-z-stacks-03212011_e10_s2_w12490a9b6-6991-4eac-a93a-123978224209.tif │ │ ├── 00_mcf-z-stacks-03212011_e24_s2_w1241c3e73-1e5a-4121-b7b5-02af37510046.tif │ │ ├── 00_mcf-z-stacks-03212011_h01_s2_w12ab2c20a-2393-4718-8f3d-a4b9bb17a47f.tif │ │ ├── 00_mcf-z-stacks-03212011_h09_s2_w126c275bc-9c3e-40af-b3ca-54f91091d353.tif │ │ ├── 00_mcf-z-stacks-03212011_i05_s2_w12e9fa745-2bf9-463c-9e74-e9ff6386b255.tif │ │ ├── 00_mcf-z-stacks-03212011_i07_s2_w125417efe-c7d4-40b1-b26a-c1ecfea30de6.tif │ │ ├── 00_mcf-z-stacks-03212011_i16_s2_w1234f5368-8bfd-4995-bbb3-0e0c88676dc5.tif │ │ ├── 00_mcf-z-stacks-03212011_j10_s2_w12f140c18-a1df-47e2-89d1-864a42adb67c.tif │ │ ├── 00_mcf-z-stacks-03212011_l01_s2_w1272566d5-fc1b-411f-8280-035a9c69283d.tif │ │ ├── 00_mcf-z-stacks-03212011_l22_s2_w12b8de869-e1c8-438b-86b6-7576116596c8.tif │ │ ├── 00_mcf-z-stacks-03212011_l23_s2_w129334806-f24b-469c-98fe-7d1ff2f8f968.tif │ │ ├── 00_mcf-z-stacks-03212011_m14_s2_w12e7632d4-7a01-4cc0-a680-e686a4869613.tif │ │ ├── 00_mcf-z-stacks-03212011_n04_s2_w12d7547cb-bf90-434b-b2e1-79730bfcd3f7.tif │ │ ├── 00_mcf-z-stacks-03212011_n06_s2_w121660a1f-dbf1-4f9b-b1fb-6b1222cada0c.tif │ │ ├── 00_mcf-z-stacks-03212011_n16_s2_w12a4361b7-56a0-48a7-acc9-f05288591da3.tif │ │ ├── 00_mcf-z-stacks-03212011_p09_s2_w12c20e8b2-d191-40a8-b8a3-d52d13e65aab.tif │ │ ├── 00_mcf-z-stacks-03212011_p24_s2_w1283f1213-dc59-40b5-ae21-9d66400a5f20.tif │ │ └── README │ ├── psf.png │ ├── test │ │ ├── 1.tif │ │ └── 2.tif │ └── training │ │ ├── 0 │ │ └── 00_mcf-z-stacks-03212011_a01_s1_w1a571d43d-5554-47fd-bb54-5db68290e5a7.tif │ │ ├── 1 │ │ └── 00_mcf-z-stacks-03212011_b01_s2_w12e721516-a202-4647-9f19-fd73ff016ce6.tif │ │ ├── 2 │ │ └── 00_mcf-z-stacks-03212011_a19_s2_w12703a47b-7574-461a-85e2-afc38bc0adae.tif │ │ ├── 3 │ │ └── 00_mcf-z-stacks-03212011_b14_s2_w126875e94-8541-482c-8aea-09f2e0b1e2ce.tif │ │ ├── 4 │ │ └── 00_mcf-z-stacks-03212011_c06_s2_w123356693-a029-4886-9735-9e3ca50eb8d8.tif │ │ ├── 5 │ │ └── 00_mcf-z-stacks-03212011_d02_s2_w122501c04-ccbe-4fc4-bb08-924ccccaad58.tif │ │ ├── 6 │ │ └── 00_mcf-z-stacks-03212011_d10_s2_w129ae2539-fdfc-4e29-9215-03a79115d678.tif │ │ ├── 7 │ │ └── 00_mcf-z-stacks-03212011_e10_s2_w12490a9b6-6991-4eac-a93a-123978224209.tif │ │ ├── 8 │ │ └── 00_mcf-z-stacks-03212011_e24_s2_w1241c3e73-1e5a-4121-b7b5-02af37510046.tif │ │ ├── 9 │ │ └── 00_mcf-z-stacks-03212011_h01_s2_w12ab2c20a-2393-4718-8f3d-a4b9bb17a47f.tif │ │ └── 10 │ │ └── 00_mcf-z-stacks-03212011_h09_s2_w126c275bc-9c3e-40af-b3ca-54f91091d353.tif ├── test_data_provider.py ├── test_dataset_creation.py ├── test_degrade.py ├── test_evaluation.py ├── test_inference.py ├── test_miq.py └── test_validation.py └── wellmontagefijimacro ├── README.md ├── example_result.jpg └── wellmontagefijimacro.ijm /.gitignore: -------------------------------------------------------------------------------- 1 | *$py.class 2 | *,cover 3 | *.egg 4 | *.egg-info/ 5 | *.log 6 | *.manifest 7 | *.mo 8 | *.pot 9 | *.py[cod] 10 | *.sage.py 11 | *.so 12 | *.spec 13 | .cache 14 | .coverage 15 | .coverage.* 16 | .DS_Store 17 | .eggs/ 18 | .env 19 | .hypothesis/ 20 | .idea 21 | .installed.cfg 22 | .ipynb_checkpoints 23 | .Python 24 | .python-version 25 | .ropeproject 26 | .scrapy 27 | .spyderproject 28 | .tox/ 29 | .venv 30 | .webassets-cache 31 | __pycache__/ 32 | build/ 33 | celerybeat-schedule 34 | coverage.xml 35 | develop-eggs/ 36 | dist/ 37 | docs/_build/ 38 | downloads/ 39 | eggs/ 40 | env/ 41 | ENV/ 42 | htmlcov/ 43 | instance/ 44 | lib/ 45 | lib64/ 46 | local_settings.py 47 | nosetests.xml 48 | parts/ 49 | pip-delete-this-directory.txt 50 | pip-log.txt 51 | sdist/ 52 | target/ 53 | var/ 54 | venv/ 55 | wheels/ 56 | -------------------------------------------------------------------------------- /CONTRIBUTING: -------------------------------------------------------------------------------- 1 | # How to contribute 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | just a few small guidelines you need to follow. 5 | 6 | ## Contributor License Agreement 7 | 8 | Contributions to this project must be accompanied by a Contributor License 9 | Agreement. You (or your employer) retain the copyright to your contribution, 10 | this simply gives us permission to use and redistribute your contributions as 11 | part of the project. Head over to to see 12 | your current agreements on file or to sign a new one. 13 | 14 | You generally only need to submit a CLA once, so if you've already submitted one 15 | (even if it was for a different project), you probably don't need to do it 16 | again. 17 | 18 | ## Code reviews 19 | 20 | All submissions, including submissions by project members, require review. We 21 | use GitHub pull requests for this purpose. Consult [GitHub Help] for more 22 | information on using pull requests. 23 | 24 | [GitHub Help]: https://help.github.com/articles/about-pull-requests/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are wunder common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Microscope Image Focus Quality Classifier 2 | ============================ 3 | This repo contains python code for using a pre-trained TensorFlow model to classify the 4 | quality (e.g. running inference) of image focus in microscope images. 5 | 6 | Code for training a new model from a dataset of in-focus only images is included 7 | as well. 8 | 9 | This is not an official Google product. 10 | 11 | See our paper [PDF](http://rdcu.be/I5cE) for reference: 12 | 13 | Yang, S. J., Berndl, M., Ando, D. M., Barch, M., Narayanaswamy, A. , 14 | Christiansen, E., Hoyer, S., Roat, C., Hung, J., Rueden, C. T., 15 | Shankar, A., Finkbeiner, S., & and Nelson, P. (2018). **Assessing 16 | microscope image focus quality with deep learning**. *BMC Bioinformatics*, 17 | 19(1). 18 | 19 | Also see the 20 | [Fiji (ImageJ) Microscope Focus Qualtiy plugin](https://imagej.net/Microscope_Focus_Quality), 21 | which allows use of the same pre-trained model on user-supplied images 22 | in a user-friendly graphical user interface, without the need to write 23 | any code. Fiji also has macro scripting capabilities for running 24 | batches of images. This plugin is being actively maintained. I 25 | recommend testing your images with the Fiji plugin before 26 | investing further effort in this python library. 27 | 28 | Finally, please note that this python library was developed on the 29 | older and now (as of 2020) deprecated Python 2.7 and TensorFlow 1.x, and is not 30 | being actively maintained and updated. The `setup.py` 31 | currently restricts to these older versions. If you just want the 32 | pre-trained model for integration with your own inference code, you 33 | may need a `saved_model.pb` file that's currently only distributed 34 | with the Fiji plugin and downloadable 35 | [here](https://downloads.imagej.net/fiji/models/microscope-image-quality-model.zip). Updating 36 | this library to work with Python 3.x may be fairly straight 37 | forward. However, updating it to work on TensorFlow 2.x may require 38 | quite a bit of refactoring (at the very least, it appears the data 39 | provider implementation and interface may need to change). 40 | 41 | 42 | Getting started 43 | ------------- 44 | 45 | Clone the `main` branch of this repository 46 | 47 | ``` 48 | git clone -b main https://github.com/google/microscopeimagequality.git 49 | ``` 50 | 51 | Install the package: 52 | 53 | ``` 54 | cd microscopeimagequality 55 | ``` 56 | 57 | **Note**: This requires pip be installed. 58 | 59 | **Note**: This library has not been migrated beyond TensorFlow 1.x 60 | 61 | **Note**: As of now TensorFlow 1.x requires Python 3.7 or earlier. 62 | 63 | **Note**: This library has been tested with Python 3.7.9 (using `pyenv`). 64 | 65 | ``` 66 | python --version 67 | python -m pip install --editable . 68 | ``` 69 | 70 | If using `pyenv`, run `pyenv rehash`. 71 | 72 | Download the model: 73 | This downloads the `model.ckpt-1000042` checkpoint (a model trained 74 | for 1000042 steps) specified in `constants.py`. 75 | ``` 76 | microscopeimagequality download 77 | ``` 78 | or alternatively: 79 | ```python 80 | import microscopeimagequality.miq 81 | microscopeimagequality.miq.download_model() 82 | ``` 83 | 84 | Add path to local repository (e.g. `/Users/user/my_repo/microscopeimagequality`) 85 | to `PYTHONPATH` environment variable: 86 | ``` 87 | export PYTHONPATH="${PYTHONPATH}:/Users/user/my_repo/microscopeimagequality" 88 | ``` 89 | 90 | Run all tests to make sure everything works. Install any missing 91 | packages (e.g. `python -m pip install pytest`, then if using `pyenv`, 92 | run `pyenv rehash`). 93 | 94 | ``` 95 | pytest --disable-pytest-warnings 96 | ``` 97 | 98 | You should now be able to run: 99 | ``` 100 | microscopeimagequality --help 101 | ``` 102 | 103 | or directly access the 104 | module functions in a jupyter notebook or from your own python module: 105 | ``` 106 | python 107 | from microscopeimagequality import degrade 108 | degrade.degrade(...) 109 | ``` 110 | 111 | Running inference 112 | ------------- 113 | ### Requirements for running inference 114 | * A pre-trained TensorFlow model `.ckpt` files, downloadable using 115 | download instructions above. 116 | * TensorFlow 1.0.0 or higher, numpy, scipy, pypng, PIL, skimage, matplotlib 117 | * Input grayscale 16-bit images, `.png` of `.tif` format, all with the same 118 | width and height. 119 | 120 | ### How to 121 | 122 | (Optional) Confirm that all images are of the same dimension: 123 | ```sh 124 | microscopeimagequality validate tests/data/images_for_glob_test/*.tif --width 100 --height 100 125 | ``` 126 | 127 | Run inference on each image independently. 128 | 129 | ``` 130 | microscopeimagequality predict \ 131 | --output tests/output/ \ 132 | tests/data/BBBC006*10.png 133 | ``` 134 | 135 | Summarize the prediction results across the entire dataset. Output will be in 136 | "summary" sub directory. 137 | ``` 138 | microscopeimagequality summarize tests/output/miq_result_images/ 139 | ``` 140 | 141 | Training a new model 142 | ---------------- 143 | 144 | ### Requirements 145 | * TensorFlow 1.0.0 or higher, and several other python modules. 146 | * A dataset of high quality, in-focus images (at least 400+), as grayscale 16-bit 147 | images, `.png` of `.tif` format, all with the same width and height. 148 | 149 | ### How to 150 | 151 | 1. Generate additional labeled training examples of defocused images using `degrade.py`. 152 | 1. Launch `microscopeimagequality fit` to train a model. 153 | 1. Launch `microscopeimagequality evaluate` with a held-out test dataset. 154 | 1. Use TensorBoard to view training and eval progress (see `evaluation.py`). 155 | 1. When satisfied with model accuracy, save the `model.ckpt` files for later use. 156 | 157 | 158 | Example fit: 159 | ``` 160 | microscopeimagequality fit \ 161 | --output tests/train_output \ 162 | tests/data/training/0/*.tif \ 163 | tests/data/training/1/*.tif \ 164 | tests/data/training/2/*.tif \ 165 | tests/data/training/3/*.tif \ 166 | tests/data/training/4/*.tif \ 167 | tests/data/training/5/*.tif \ 168 | tests/data/training/6/*.tif \ 169 | tests/data/training/7/*.tif \ 170 | tests/data/training/8/*.tif \ 171 | tests/data/training/9/*.tif \ 172 | tests/data/training/10/*.tif 173 | ``` 174 | Example evaluation: 175 | ``` 176 | microscopeimagequality evaluate \ 177 | --checkpoint /model.ckpt-XXXXXXX \ 178 | --output tests/data/output \ 179 | tests/data/training/0/*.tif \ 180 | tests/data/training/1/*.tif \ 181 | tests/data/training/2/*.tif \ 182 | tests/data/training/3/*.tif \ 183 | tests/data/training/4/*.tif \ 184 | tests/data/training/5/*.tif \ 185 | tests/data/training/6/*.tif \ 186 | tests/data/training/7/*.tif \ 187 | tests/data/training/8/*.tif \ 188 | tests/data/training/9/*.tif \ 189 | tests/data/training/10/*.tif 190 | ``` 191 | 192 | 193 | -------------------------------------------------------------------------------- /groups: -------------------------------------------------------------------------------- 1 | # UUID Group Name 2 | # 3 | global:Anonymous-Users Anonymous Users 4 | global:Project-Owners Project Owners 5 | global:Registered-Users Registered Users 6 | mdb:gas mdb/gas 7 | -------------------------------------------------------------------------------- /microscopeimagequality/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/microscopeimagequality/__init__.py -------------------------------------------------------------------------------- /microscopeimagequality/application.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import click 5 | import numpy 6 | import six 7 | import tensorflow 8 | 9 | # Use this backend for producing PNGs without interactive display. 10 | import matplotlib 11 | matplotlib.use('Agg') 12 | 13 | import microscopeimagequality.constants as constants 14 | import microscopeimagequality.data_provider 15 | import microscopeimagequality.dataset_creation 16 | import microscopeimagequality.evaluation 17 | import microscopeimagequality.prediction 18 | import microscopeimagequality.miq 19 | import microscopeimagequality.summarize 20 | import microscopeimagequality.validation 21 | 22 | _MAX_IMAGES_TO_VALIDATE = 1e6 23 | 24 | 25 | @click.group() 26 | def command(): 27 | pass 28 | 29 | 30 | @command.command() 31 | @click.argument("images", nargs=-1, type=click.Path(exists=True)) 32 | @click.option("--checkpoint", type=click.Path()) 33 | @click.option("--output", type=click.Path()) 34 | @click.option("--patch-width", default=84) 35 | def evaluate(images, checkpoint, output, patch_width): 36 | """ 37 | 38 | """ 39 | num_classes = len(images) 40 | 41 | output_tfrecord_file_pattern = 'data_%s.sstable' 42 | 43 | image_size = microscopeimagequality.dataset_creation.image_size_from_glob(images[0], patch_width) 44 | 45 | microscopeimagequality.dataset_creation.dataset_to_examples_in_tfrecord( 46 | list_of_image_globs=images, 47 | output_directory=output, 48 | output_tfrecord_filename=output_tfrecord_file_pattern % 'test', 49 | num_classes=num_classes, 50 | image_width=image_size.width, 51 | image_height=image_size.height, 52 | image_background_value=0.0, 53 | normalize=False 54 | ) 55 | 56 | tfexamples_tfrecord_file_pattern = os.path.join(output, output_tfrecord_file_pattern) 57 | 58 | graph = tensorflow.Graph() 59 | 60 | with graph.as_default(): 61 | batch_size = int(image_size.height * image_size.width / patch_width ** 2) 62 | 63 | images, one_hot_labels, _, num_samples = microscopeimagequality.data_provider.provide_data( 64 | tfrecord_file_pattern=tfexamples_tfrecord_file_pattern, 65 | split_name='test', 66 | batch_size=batch_size, 67 | num_classes=num_classes, 68 | image_width=image_size.width, 69 | image_height=image_size.height, 70 | patch_width=patch_width, 71 | randomize=False 72 | ) 73 | 74 | logits, labels, probabilities, predictions = microscopeimagequality.evaluation.get_model_and_metrics( 75 | images=images, 76 | num_classes=num_classes, 77 | one_hot_labels=one_hot_labels, 78 | is_training=False, 79 | model_id=0 80 | ) 81 | 82 | # Define the loss 83 | microscopeimagequality.miq.add_loss(logits, one_hot_labels, use_rank_loss=True) 84 | 85 | loss = tensorflow.losses.get_total_loss() 86 | 87 | # Additional aggregate metrics 88 | aggregated_prediction, aggregated_label = microscopeimagequality.evaluation.get_aggregated_prediction(probabilities, labels, batch_size) 89 | 90 | metrics = { 91 | 'Accuracy': tensorflow.contrib.metrics.streaming_accuracy(predictions, labels), 92 | 'Mean_Loss': tensorflow.contrib.metrics.streaming_mean(loss), 93 | 'Aggregated_Accuracy': tensorflow.contrib.metrics.streaming_accuracy(aggregated_prediction, aggregated_label), 94 | } 95 | 96 | names_to_values, names_to_updates = tensorflow.contrib.slim.metrics.aggregate_metric_map(metrics) 97 | 98 | for name, value in six.iteritems(names_to_values): 99 | tensorflow.summary.scalar(name, value) 100 | 101 | tensorflow.summary.histogram("eval_images", images) 102 | tensorflow.summary.histogram("eval_labels", labels) 103 | tensorflow.summary.histogram("eval_predictions", predictions) 104 | tensorflow.summary.histogram("eval_probabilities", probabilities) 105 | 106 | microscopeimagequality.evaluation.annotate_classification_errors( 107 | images, 108 | predictions, 109 | labels, 110 | probabilities, 111 | image_height=image_size[0], 112 | image_width=image_size[1] 113 | ) 114 | 115 | # This ensures that we evaluate over exactly all samples. 116 | num_batches = num_samples 117 | 118 | tensorflow.contrib.slim.evaluation.evaluation_loop( 119 | master='', 120 | checkpoint_dir=checkpoint, 121 | logdir=output, 122 | num_evals=num_batches, 123 | eval_op=names_to_updates.values(), 124 | eval_interval_secs=60 125 | ) 126 | 127 | 128 | @command.command() 129 | @click.argument("images", nargs=-1, type=click.Path(exists=True)) 130 | @click.option("--output", nargs=1, type=click.Path()) 131 | def fit(images, output): 132 | if not os.path.exists(output): 133 | os.makedirs(output) 134 | 135 | num_classes = len(images) 136 | 137 | output_tfrecord_file_pattern = ('worker%g_' % 0) + 'data_%s.tfrecord' 138 | 139 | image_size = microscopeimagequality.dataset_creation.image_size_from_glob(images[0], 84) 140 | 141 | # Read images and convert to TFExamples in an TFRecord. 142 | microscopeimagequality.dataset_creation.dataset_to_examples_in_tfrecord( 143 | images, 144 | output, 145 | output_tfrecord_file_pattern % 'train', 146 | num_classes, 147 | image_width=image_size.width, 148 | image_height=image_size.height, 149 | image_background_value=0.0 150 | ) 151 | 152 | tfexamples_tfrecord_file_pattern = os.path.join(output, output_tfrecord_file_pattern) 153 | 154 | graph = tensorflow.Graph() 155 | 156 | # builder = tensorflow.saved_model.builder.SavedModelBuilder("/tmp/quality-fit/") 157 | # 158 | # with tf.Session(graph=tf.Graph()) as sess: 159 | # ... 160 | # builder.add_meta_graph_and_variables(sess, 161 | # ["foo-tag"], 162 | # signature_def_map=foo_signatures, 163 | # assets_collection=foo_assets) 164 | 165 | with graph.as_default(): 166 | # If ps_tasks is zero, the local device is used. When using multiple 167 | # (non-local) replicas, the ReplicaDeviceSetter distributes the variables 168 | # across the different devices. 169 | with tensorflow.device(tensorflow.train.replica_device_setter(0)): 170 | images, one_hot_labels, _, _ = microscopeimagequality.data_provider.provide_data( 171 | tfexamples_tfrecord_file_pattern, 172 | split_name='train', 173 | batch_size=64, 174 | num_classes=num_classes, 175 | image_width=image_size.width, 176 | image_height=image_size.height, 177 | patch_width=84 178 | ) 179 | 180 | # Visualize the input 181 | tensorflow.summary.image('train_input', images) 182 | # slim.summaries.add_histogram_summaries([images, labels]) 183 | 184 | # Define the model: 185 | logits = microscopeimagequality.miq.miq_model( 186 | images=images, 187 | num_classes=num_classes, 188 | is_training=True, 189 | model_id=0 190 | ) 191 | 192 | # Specify the loss function: 193 | microscopeimagequality.miq.add_loss(logits, one_hot_labels, use_rank_loss=True) 194 | total_loss = tensorflow.losses.get_total_loss() 195 | tensorflow.summary.scalar('Total_Loss', total_loss) 196 | 197 | # Specify the optimization scheme: 198 | optimizer = tensorflow.train.AdamOptimizer(0.00003) 199 | 200 | # Set up training. 201 | train_op = tensorflow.contrib.slim.learning.create_train_op(total_loss, optimizer) 202 | 203 | # Monitor model variables for debugging. 204 | # slim.summaries.add_histogram_summaries(slim.get_model_variables()) 205 | 206 | # Run training. 207 | tensorflow.contrib.slim.learning.train( 208 | train_op=train_op, 209 | logdir=output, 210 | is_chief=0 == 0, 211 | number_of_steps=10, 212 | save_summaries_secs=15, 213 | save_interval_secs=60 214 | ) 215 | 216 | @command.command() 217 | @click.argument("output_path", nargs=-1, type=click.Path(), default=None) 218 | def download(output_path): 219 | if output_path: 220 | microscopeimagequality.miq.download_model(output_path=output_path[0]) 221 | else: 222 | microscopeimagequality.miq.download_model() 223 | 224 | @command.command() 225 | @click.argument("images", nargs=-1, type=click.Path(exists=True)) 226 | @click.option("--checkpoint", type=click.Path(), default=None) 227 | @click.option("--height", type=int) 228 | @click.option("--output", type=click.Path()) 229 | @click.option("--patch-width", default=84) 230 | @click.option("--visualize", is_flag=True) 231 | @click.option("--width", type=int) 232 | def predict(images, checkpoint, output, width, height, patch_width, visualize): 233 | if output is None: 234 | logging.fatal('Eval directory required.') 235 | 236 | if checkpoint is None: 237 | checkpoint = microscopeimagequality.miq.DEFAULT_MODEL_PATH 238 | 239 | if images is None: 240 | logging.fatal('Must provide image globs list.') 241 | 242 | if not os.path.isdir(output): 243 | os.makedirs(output) 244 | 245 | use_unlabeled_data = True 246 | 247 | # Input images will be cropped to image_height x image_width. 248 | image_size = microscopeimagequality.dataset_creation.image_size_from_glob(images[0], patch_width) 249 | 250 | if width is not None and height is not None: 251 | image_width = int(patch_width * numpy.floor(width / patch_width)) 252 | 253 | image_height = int(patch_width * numpy.floor(height / patch_width)) 254 | 255 | if image_width > image_size.width or image_height > image_size.height: 256 | raise ValueError('Specified (image_width, image_height) = (%d, %d) exceeds valid dimensions (%d, %d).' % (image_width, image_height, image_size.width, image_size.height)) 257 | else: 258 | image_width = image_size.width 259 | 260 | image_height = image_size.height 261 | 262 | # All patches evaluated in a batch correspond to one single input image. 263 | batch_size = int(image_width * image_height / (patch_width ** 2)) 264 | 265 | logging.info('Using batch_size=%d for image_width=%d, image_height=%d, model_patch_width=%d', batch_size, image_width, image_height, patch_width) 266 | 267 | tfexamples_tfrecord = microscopeimagequality.prediction.build_tfrecord_from_pngs(images, use_unlabeled_data, 11, output, 0.0, 1.0, 1, 1, image_width, image_height) 268 | 269 | num_samples = microscopeimagequality.data_provider.get_num_records(tfexamples_tfrecord % microscopeimagequality.prediction._SPLIT_NAME) 270 | 271 | logging.info('TFRecord has %g samples.', num_samples) 272 | 273 | graph = tensorflow.Graph() 274 | 275 | with graph.as_default(): 276 | images, one_hot_labels, image_paths, _ = microscopeimagequality.data_provider.provide_data( 277 | batch_size=batch_size, 278 | image_height=image_height, 279 | image_width=image_width, 280 | num_classes=11, 281 | num_threads=1, 282 | patch_width=patch_width, 283 | randomize=False, 284 | split_name=microscopeimagequality.prediction._SPLIT_NAME, 285 | tfrecord_file_pattern=tfexamples_tfrecord 286 | ) 287 | 288 | model_metrics = microscopeimagequality.evaluation.get_model_and_metrics( 289 | images=images, 290 | is_training=False, 291 | model_id=0, 292 | num_classes=11, 293 | one_hot_labels=one_hot_labels 294 | ) 295 | 296 | microscopeimagequality.prediction.run_model_inference( 297 | aggregation_method=microscopeimagequality.evaluation.METHOD_AVERAGE, 298 | image_height=image_height, 299 | image_paths=image_paths, 300 | image_width=image_width, 301 | images=images, 302 | labels=model_metrics.labels, 303 | model_ckpt_file=checkpoint, 304 | num_samples=num_samples, 305 | num_shards=1, 306 | output_directory=os.path.join(output, 'miq_result_images'), 307 | patch_width=patch_width, 308 | probabilities=model_metrics.probabilities, 309 | shard_num=1, 310 | show_plots=visualize 311 | ) 312 | 313 | # Delete TFRecord to save disk space. 314 | tfrecord_path = tfexamples_tfrecord % microscopeimagequality.prediction._SPLIT_NAME 315 | 316 | os.remove(tfrecord_path) 317 | 318 | logging.info('Deleted %s', tfrecord_path) 319 | 320 | 321 | @command.command() 322 | @click.argument("experiments", type=click.Path(exists=True)) 323 | def summarize(experiments): 324 | if experiments is None: 325 | logging.fatal('Experiment directory required.') 326 | 327 | probabilities, labels, certainties, orig_names, predictions = microscopeimagequality.evaluation.load_inference_results(experiments) 328 | 329 | if not predictions: 330 | logging.fatal('No inference output found at %s.', experiments) 331 | 332 | microscopeimagequality.summarize.check_image_count_matches(experiments, len(predictions)) 333 | 334 | output_path = os.path.join(experiments, 'summary') 335 | 336 | if not os.path.isdir(output_path): 337 | os.makedirs(output_path) 338 | 339 | # Less useful plots go here. 340 | output_path_all_plots = os.path.join(output_path, 'additional_plots') 341 | 342 | if not os.path.isdir(output_path_all_plots): 343 | os.makedirs(output_path_all_plots) 344 | 345 | microscopeimagequality.summarize.save_histograms_scatter_plots_and_csv(probabilities, labels, certainties, orig_names, predictions, output_path, output_path_all_plots) 346 | 347 | microscopeimagequality.summarize.save_summary_montages(probabilities, certainties, orig_names, predictions, experiments, output_path, output_path_all_plots) 348 | 349 | logging.info('Done summarizing results at %s', output_path) 350 | 351 | 352 | # $ quality validate tests/data/images_for_glob_test/*.tif --width 100 --height 100 353 | @command.command() 354 | @click.argument("images", nargs=-1, type=click.Path(exists=True)) 355 | @click.option("--width", type=int) 356 | @click.option("--height", type=int) 357 | @click.option("--patch-width", default=84) 358 | def validate(images, width, height, patch_width): 359 | image_paths = [] 360 | 361 | for image in images: 362 | image_paths += microscopeimagequality.dataset_creation.get_images_from_glob(image, _MAX_IMAGES_TO_VALIDATE) 363 | 364 | click.echo('Found {} paths'.format(len(image_paths))) 365 | 366 | if len(image_paths) == 0: 367 | raise ValueError('No images found.') 368 | 369 | microscopeimagequality.validation.check_duplicate_image_name(image_paths) 370 | 371 | if width is None or height is None: 372 | height, width = microscopeimagequality.dataset_creation.image_size_from_glob(images, patch_width) 373 | 374 | microscopeimagequality.validation.check_image_dimensions(image_paths, height, width) 375 | -------------------------------------------------------------------------------- /microscopeimagequality/constants.py: -------------------------------------------------------------------------------- 1 | """Common constants used across image quality modules.""" 2 | 3 | VALID_MASK_FORMAT = 'valid_mask_%s' 4 | CERTAINTY_MASK_FORMAT = 'certainty_mask_%s' 5 | PREDICTIONS_MASK_FORMAT = 'predictions_mask_%s' 6 | ORIG_IMAGE_FORMAT = 'orig_name=%s' 7 | PATCH_SIDE_LENGTH = 84 8 | 9 | REMOTE_MODEL_CHECKPOINT_PATH = "https://storage.googleapis.com/microscope-image-quality/static/model/model.ckpt-1000042" 10 | -------------------------------------------------------------------------------- /microscopeimagequality/data/imagej/README.md: -------------------------------------------------------------------------------- 1 | # [ImageJ](https://imagej.net) plugin for the microscope image focus quality classifier. 2 | 3 | ## Quickstart 4 | 5 | Assuming you already have [Apache Maven](https://maven.apache.org) installed: 6 | 7 | ```sh 8 | mvn compile exec:java 9 | ``` 10 | 11 | ## Installation in Fiji 12 | 13 | If you have [Fiji](http://fiji.sc) installed and want to incorporate this plugin 14 | into your installation: 15 | 16 | ```sh 17 | # Set this to the path there Fiji.app is installed 18 | FIJI_APP_PATH="/Users/me/Desktop/Fiji.app" 19 | mvn -Dimagej.app.directory="${FIJI_APP_PATH}" 20 | ``` 21 | 22 | Then restart Fiji and click on the `Microscopy` menu. 23 | 24 | ## Notes 25 | 26 | - Instructions for installing [Apache Maven](https://maven.apache.org) might 27 | be as simple as `apt-get install maven` on Ubuntu and `brew install maven` 28 | on OS X with [homebrew](https://brew.sh) 29 | -------------------------------------------------------------------------------- /microscopeimagequality/data/imagej/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 4.0.0 7 | 8 | 9 | org.scijava 10 | pom-scijava 11 | 14.0.0 12 | 13 | 14 | 15 | 16 | com.google.research.biology.microscopy 17 | quality 18 | 1.0.0-SNAPSHOT 19 | 20 | 21 | Microscopy Image Focus Quality Classifier Plugin for ImageJ. 22 | ImageJ plugin to get a microsope image annotated with focus quality. 23 | https://microscopeimagequality.googlesource.com/All-Projects/+/main 24 | 2017 25 | 26 | 27 | 28 | imagej.public 29 | http://maven.imagej.net/content/groups/public 30 | 31 | 32 | 33 | 34 | 38 | true 39 | MicroscopeImageFocusQualityClassifier 40 | 41 | 42 | 43 | 44 | net.imagej 45 | imagej 46 | 47 | 48 | 49 | org.tensorflow 50 | tensorflow 51 | 1.1.0-rc2 52 | 53 | 54 | 55 | org.tensorflow 56 | proto 57 | 1.1.0-rc2 58 | 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /microscopeimagequality/data/imagej/src/main/java/MicroscopeImageFocusQualityClassifier.java: -------------------------------------------------------------------------------- 1 | import io.scif.img.ImgOpener; 2 | import java.io.File; 3 | import java.io.IOException; 4 | import java.util.Arrays; 5 | import java.util.List; 6 | import net.imagej.Dataset; 7 | import net.imagej.ImageJ; 8 | import net.imglib2.RandomAccess; 9 | import net.imglib2.img.Img; 10 | import net.imglib2.img.array.ArrayImgFactory; 11 | import net.imglib2.type.numeric.integer.UnsignedShortType; 12 | import org.scijava.ItemIO; 13 | import org.scijava.command.Command; 14 | import org.scijava.log.LogService; 15 | import org.scijava.plugin.Parameter; 16 | import org.scijava.plugin.Plugin; 17 | import org.tensorflow.SavedModelBundle; 18 | import org.tensorflow.Tensor; 19 | import org.tensorflow.framework.MetaGraphDef; 20 | import org.tensorflow.framework.SignatureDef; 21 | import org.tensorflow.framework.TensorInfo; 22 | 23 | /** 24 | * Command to apply the Microscopy image focus quality classifier model on an input (16-bit, 25 | * greyscale image). 26 | * 27 | *

This command will show both the input image and an annotated image marking regions of the 28 | * image with their focus quality. 29 | * 30 | *

This is a first draft, some TODOs: 31 | * 32 | *

41 | */ 42 | @Plugin(type = Command.class, menuPath = "Microscopy>Focus Quality") 43 | public class MicroscopeImageFocusQualityClassifier implements Command { 44 | 45 | @Parameter private LogService logService; 46 | 47 | @Parameter(label = "Microscope Image") 48 | private File imageFile; 49 | 50 | @Parameter(label = "Focus Quality Model", style = "directory") 51 | private File modelDir; 52 | 53 | @Parameter(type = ItemIO.OUTPUT) 54 | private Img originalImage; 55 | 56 | @Parameter(type = ItemIO.OUTPUT) 57 | private Dataset annotatedImage; 58 | 59 | // Same as the tag used in export_saved_model in the Python code. 60 | private static final String MODEL_TAG = "inference"; 61 | // Same as tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY 62 | // in Python. Perhaps this should be an exported constant in TensorFlow's Java API. 63 | private static final String DEFAULT_SERVING_SIGNATURE_DEF_KEY = "serving_default"; 64 | 65 | /* 66 | * The run() method is where we do the actual 'work' of the command. 67 | * 68 | * TODO(ashankar): The current implementation is extremely sub-optimal as the model 69 | * is being loaded on every call to run(). The model is pretty big (~100MB) and the 70 | * cost of loaded should be ammortized. Perhaps the model should be loaded once statically, 71 | * or implemented as a service plugin? 72 | */ 73 | @Override 74 | public void run() { 75 | final long loadModelStart = System.nanoTime(); 76 | try (SavedModelBundle model = SavedModelBundle.load(modelDir.getAbsolutePath(), MODEL_TAG)) { 77 | final long loadModelEnd = System.nanoTime(); 78 | logService.info( 79 | String.format( 80 | "Loaded microscope focus image quality model in %dms", 81 | (loadModelEnd - loadModelStart) / 1000000)); 82 | 83 | // Extract names from the model signature. 84 | // The strings "input", "probabilities" and "patches" are meant to be in sync with 85 | // the model exporter (export_saved_model()) in Python. 86 | final SignatureDef sig = 87 | MetaGraphDef.parseFrom(model.metaGraphDef()) 88 | .getSignatureDefOrThrow(DEFAULT_SERVING_SIGNATURE_DEF_KEY); 89 | originalImage = 90 | new ImgOpener() 91 | .openImg( 92 | imageFile.getAbsolutePath(), 93 | new ArrayImgFactory(), 94 | new UnsignedShortType()); 95 | validateFormat(originalImage); 96 | try (Tensor inputTensor = inputImageTensor(originalImage)) { 97 | final long runModelStart = System.nanoTime(); 98 | final List fetches = 99 | model 100 | .session() 101 | .runner() 102 | .feed(opName(sig.getInputsOrThrow("input")), inputTensor) 103 | .fetch(opName(sig.getOutputsOrThrow("probabilities"))) 104 | .fetch(opName(sig.getOutputsOrThrow("patches"))) 105 | .run(); 106 | final long runModelEnd = System.nanoTime(); 107 | try (Tensor probabilities = fetches.get(0); 108 | Tensor patches = fetches.get(1)) { 109 | logService.info( 110 | String.format( 111 | "Ran image through model in %dms", (runModelEnd - runModelStart) / 1000000)); 112 | logService.info("Probabilities shape: " + Arrays.toString(probabilities.shape())); 113 | logService.info("Patches shape: " + Arrays.toString(patches.shape())); 114 | 115 | float[][] floatProbs = 116 | new float[(int) probabilities.shape()[0]][(int) probabilities.shape()[1]]; 117 | probabilities.copyTo(floatProbs); 118 | for (int i = 0; i < probabilities.shape()[0]; ++i) { 119 | logService.info( 120 | String.format("Patch %02d probabilities: %s", i, Arrays.toString(floatProbs[i]))); 121 | } 122 | 123 | final int npatches = (int) patches.shape()[0]; 124 | final int patchSide = (int) patches.shape()[1]; 125 | assert patchSide == (int) patches.shape()[2]; // Square patches 126 | assert patches.shape()[3] == 1; 127 | 128 | // Log an error to force the console log to display 129 | // (otherwise the user will have to know to display the console window). 130 | // Of course, this will go away once the annotate image is generated. 131 | logService.error( 132 | "TODO: Display annotated image. Till then, see the beautiful log messages above"); 133 | } 134 | } 135 | 136 | } catch (final Exception exc) { 137 | // Use the LogService to report the error. 138 | logService.error(exc); 139 | } 140 | } 141 | 142 | private void validateFormat(Img image) throws IOException { 143 | int ndims = image.numDimensions(); 144 | if (ndims != 2) { 145 | long[] dims = new long[ndims]; 146 | image.dimensions(dims); 147 | throw new IOException( 148 | "Can only process greyscale images, not an image with " 149 | + ndims 150 | + " dimensions (" 151 | + Arrays.toString(dims) 152 | + ")"); 153 | } 154 | } 155 | 156 | /** 157 | * Convert an Img object into a Tensor suitable for input to the focus quality classification 158 | * model. 159 | */ 160 | private Tensor inputImageTensor(Img image) throws IOException { 161 | final int width = (int) image.dimension(0); 162 | final int height = (int) image.dimension(1); 163 | logService.info("Width = " + width + ", height = " + height); 164 | 165 | final RandomAccess r = image.randomAccess(); 166 | float[][] pixels = new float[height][width]; 167 | final int pos[] = new int[2]; 168 | for (int x = 0; x < width; ++x) { 169 | for (int y = 0; y < height; ++y) { 170 | pos[0] = x; 171 | pos[1] = y; 172 | r.setPosition(pos); 173 | pixels[y][x] = (float) r.get().get() / 65535; 174 | } 175 | } 176 | // An opportunity for optimization here: Instead of filling in a 2D pixels array, 177 | // create a flattened array and use: 178 | // Tensor.create(new long[]{height, width}, FloatBuffer.wrap(pixels)); 179 | // That will save some reflection cost if the Tensor.create() call here is too expensive. 180 | final long start = System.nanoTime(); 181 | Tensor t = Tensor.create(pixels); 182 | final long end = System.nanoTime(); 183 | logService.info( 184 | String.format("Created Tensor from %dx%d image in %dns", height, width, (end - start))); 185 | return t; 186 | } 187 | 188 | // The SignatureDef inputs and outputs contain names of the form :, 189 | // where for this model, is always 0. This function trims the ":0" suffix to 190 | // get the operation name. 191 | private static String opName(TensorInfo t) { 192 | final String n = t.getName(); 193 | if (n.endsWith(":0")) { 194 | return n.substring(0, n.lastIndexOf(":0")); 195 | } 196 | return n; 197 | } 198 | 199 | public static void main(String[] args) { 200 | final ImageJ ij = new ImageJ(); 201 | ij.launch(args); 202 | ij.command().run(MicroscopeImageFocusQualityClassifier.class, true); 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /microscopeimagequality/data_provider.py: -------------------------------------------------------------------------------- 1 | """This data provider reads TFRecords of image and label data. 2 | 3 | The TFRecords contain TF Example protos with full-size images and labels. Either 4 | a random cropped patch of the image, or all of the tiles within an image are 5 | extracted and converted to batched tensors, ready for training or inference. 6 | """ 7 | 8 | import logging 9 | import os 10 | 11 | import numpy 12 | import tensorflow 13 | import tensorflow.contrib.slim 14 | 15 | IMAGE_WIDTH = 520 16 | IMAGE_HEIGHT = 520 17 | FEATURE_IMAGE = 'image' 18 | FEATURE_IMAGE_CLASS = 'image/class' 19 | FEATURE_IMAGE_PATH = 'image/path' 20 | 21 | _ITEMS_TO_DESCRIPTIONS = { 22 | FEATURE_IMAGE: 'A [width x width x 1] grayscale image.', 23 | FEATURE_IMAGE_CLASS: 'A single integer between 0 and [num_classes-1]', 24 | FEATURE_IMAGE_PATH: 'A string indicating path to image.', 25 | } 26 | 27 | # Range of random brightness factors to scale training data. 28 | _BRIGHTNESS_MIN_FACTOR = 0.2 29 | _BRIGHTNESS_MAX_FACTOR = 5.0 30 | 31 | # Range of random image brightness offsets for training data. 32 | _BRIGHTNESS_MIN_OFFSET = 1.0 / 65535 33 | _BRIGHTNESS_MAX_OFFSET = 1000.0 / 65535 34 | 35 | 36 | def get_filename_num_records(tf_record_path): 37 | """Get path to text file containing number of records. 38 | 39 | Args: 40 | tf_record_path: String, path to TFRecord file. 41 | 42 | Returns: 43 | String, path to text file containing number of records in TFRecord file. 44 | """ 45 | return os.path.splitext(tf_record_path)[0] + '.num_records' 46 | 47 | 48 | def get_num_records(tf_record_path): 49 | """Get the number of records in a TFRecord by reading it from the text file. 50 | 51 | Args: 52 | tf_record_path: String, path to TFRecord file. 53 | 54 | Returns: 55 | Integer, number of records in TFRecord file, as read form the text file. 56 | """ 57 | num_records_path = get_filename_num_records(tf_record_path) 58 | with open(num_records_path, 'r') as f: 59 | num_records = int(f.read()) 60 | logging.info('%d records in %s.', num_records, num_records_path) 61 | return num_records 62 | 63 | 64 | def get_split(split_name, tfrecord_file_pattern, num_classes, image_width, image_height): 65 | """Gets a dataset tuple from tfrecord, to be used with DatasetDataProvider. 66 | 67 | Args: 68 | split_name: String, a train/test split name. 69 | tfrecord_file_pattern: String, with formatting for split name. E.g. 70 | 'file_%s.tfrecord'. 71 | num_classes: Integer representing number of classes. Must match data in 72 | the TFRecord. 73 | image_width: Integer, width of image size to be cropped. 74 | image_height: Integer, height of image size to be cropped. 75 | 76 | Returns: 77 | A `Dataset` namedtuple. 78 | 79 | Raises: 80 | ValueError: if `split_name` is not a valid train/test split. 81 | """ 82 | # Input images, each which will be modified in one of 'num_classes' ways. 83 | valid_splits = {'train', 'test'} 84 | if split_name not in valid_splits: 85 | raise ValueError('split name %s was not recognized.' % split_name) 86 | 87 | if image_height <= 0 or image_width <= 0: 88 | raise ValueError('Invalid image_height and/or image_width: %d, %d.' % 89 | image_height, image_width) 90 | image_shape = (image_height, image_width, 1) 91 | keys_to_features = { 92 | FEATURE_IMAGE: 93 | tensorflow.FixedLenFeature( 94 | image_shape, tensorflow.float32, default_value=tensorflow.zeros(image_shape)), 95 | FEATURE_IMAGE_CLASS: 96 | tensorflow.FixedLenFeature( 97 | [num_classes], tensorflow.float32, default_value=tensorflow.zeros([num_classes])), 98 | FEATURE_IMAGE_PATH: 99 | tensorflow.FixedLenFeature( 100 | [1], tensorflow.string, default_value=''), 101 | } 102 | 103 | items_to_handlers = { 104 | FEATURE_IMAGE: tensorflow.contrib.slim.tfexample_decoder.Tensor(FEATURE_IMAGE), 105 | FEATURE_IMAGE_CLASS: tensorflow.contrib.slim.tfexample_decoder.Tensor(FEATURE_IMAGE_CLASS), 106 | FEATURE_IMAGE_PATH: tensorflow.contrib.slim.tfexample_decoder.Tensor(FEATURE_IMAGE_PATH), 107 | } 108 | 109 | decoder = tensorflow.contrib.slim.tfexample_decoder.TFExampleDecoder(keys_to_features, 110 | items_to_handlers) 111 | 112 | file_pattern = tfrecord_file_pattern % split_name 113 | 114 | num_samples = get_num_records(file_pattern) 115 | return tensorflow.contrib.slim.dataset.Dataset( 116 | data_sources=file_pattern, 117 | reader=tensorflow.TFRecordReader, 118 | decoder=decoder, 119 | num_samples=num_samples, 120 | num_classes=num_classes, 121 | items_to_descriptions=_ITEMS_TO_DESCRIPTIONS) 122 | 123 | 124 | def get_batches(image, label, image_path, num_threads=800, batch_size=32): 125 | """Converts image and label into batches. 126 | 127 | Args: 128 | image: Input image tensor, size [num_images x width x width x 1]. 129 | label: Input label tensor, size [num_images x num_classes]. 130 | image_path: Input image path tensor, size [num_images x 1]. 131 | num_threads: Integer, number of threads for preprocessing and loading data. 132 | batch_size: Integer, batch size for the output. 133 | 134 | Returns: 135 | Batched version of the inputs: images (shape [batch_size x width x width x 136 | 1]), labels (shape [batch_size x num_classes]) and image_paths (shape 137 | [batch_size x 1]) tensors. 138 | """ 139 | assert len(image.get_shape().as_list()) == 4 140 | batch_images, batch_one_hot_labels, batch_image_paths = tensorflow.train.batch( 141 | [image, label, image_path], 142 | batch_size=batch_size, 143 | num_threads=num_threads, 144 | capacity=5 * batch_size, 145 | enqueue_many=True) 146 | return batch_images, batch_one_hot_labels, batch_image_paths 147 | 148 | 149 | def get_image_patch_tensor(image, label, image_path, patch_width): 150 | """Crops a random patch from image. 151 | 152 | Args: 153 | image: Input image tensor, size [width x width x 1]. 154 | label: Input label tensor, size [num_classes]. 155 | image_path: Input image path tensor, size [1]. 156 | patch_width: Integer representing width of image patch. 157 | Returns: 158 | Tensors image patch, size [1 x patch_width x patch_width x 1], 159 | expanded_label, size [1 x num_classes], and expanded_image_path, size 160 | [1 x 1]. 161 | """ 162 | assert len(image.get_shape().as_list()) == 3, image.get_shape().as_list() 163 | size = tensorflow.constant([patch_width, patch_width, 1], dtype=tensorflow.int32) 164 | 165 | patch = tensorflow.expand_dims(tensorflow.random_crop(image, size), 0) 166 | 167 | expanded_label = tensorflow.expand_dims(label, dim=0) 168 | expanded_image_path = tensorflow.expand_dims(image_path, dim=0) 169 | return patch, expanded_label, expanded_image_path 170 | 171 | 172 | def apply_random_offset(patch, min_offset, max_offset): 173 | """Adds a random offset to input image (tensor).""" 174 | # Choose offset uniformly in log space. 175 | offset = tensorflow.pow( 176 | tensorflow.constant([10.0]), 177 | tensorflow.random_uniform([1], numpy.log10(min_offset), numpy.log10(max_offset))) 178 | return tensorflow.add(patch, offset) 179 | 180 | 181 | def apply_random_brightness_adjust(patch, min_factor, max_factor): 182 | """Scales the input image (tensor) brightness by a random factor.""" 183 | # Choose brightness scale uniformly in log space. 184 | brightness = tensorflow.pow( 185 | tensorflow.constant([10.0]), 186 | tensorflow.random_uniform([1], numpy.log10(min_factor), numpy.log10(max_factor))) 187 | return tensorflow.multiply(patch, brightness) 188 | 189 | 190 | def get_image_tiles_tensor(image, label, image_path, patch_width): 191 | """Gets patches that tile the input image, starting at upper left. 192 | 193 | Args: 194 | image: Input image tensor, size [height x width x 1]. 195 | label: Input label tensor, size [num_classes]. 196 | image_path: Input image path tensor, size [1]. 197 | patch_width: Integer representing width of image patch. 198 | 199 | Returns: 200 | Tensors tiles, size [num_tiles x patch_width x patch_width x 1], labels, 201 | size [num_tiles x num_classes], and image_paths, size [num_tiles x 1]. 202 | """ 203 | tiles_before_reshape = tensorflow.extract_image_patches( 204 | tensorflow.expand_dims(image, dim=0), [1, patch_width, patch_width, 1], 205 | [1, patch_width, patch_width, 1], [1, 1, 1, 1], 'VALID') 206 | tiles = tensorflow.reshape(tiles_before_reshape, [-1, patch_width, patch_width, 1]) 207 | 208 | labels = tensorflow.tile(tensorflow.expand_dims(label, dim=0), [tensorflow.shape(tiles)[0], 1]) 209 | image_paths = tensorflow.tile( 210 | tensorflow.expand_dims(image_path, dim=0), [tensorflow.shape(tiles)[0], 1]) 211 | 212 | return tiles, labels, image_paths 213 | 214 | 215 | def provide_data(tfrecord_file_pattern, 216 | split_name, 217 | batch_size, 218 | num_classes, 219 | image_width, 220 | image_height, 221 | patch_width=28, 222 | randomize=True, 223 | num_threads=64): 224 | """Provides batches of data. 225 | 226 | Args: 227 | tfrecord_file_pattern: String, with formatting for split name. E.g. 228 | 'file_%s.tfrecord'. 229 | split_name: String indicating split name, typically 'train' or 'test'. 230 | batch_size: The number of images in each batch. If 'randomize' is False, 231 | the batch size must be the number of tiles per image. 232 | num_classes: Integer representing number of classes. 233 | image_width: Integer, width of image size to be cropped. 234 | image_height: Integer, height of image size to be cropped. 235 | patch_width: Integer width (and height) of image patch. 236 | randomize: Boolean indicating whether to use image patches that are randomly 237 | cropped, with a random offset and brightness adjustment applied. Use only 238 | for training. 239 | num_threads: Number of threads for data reading queue. Use only 1 thread for 240 | deterministic ordering of inputs. 241 | 242 | 243 | Returns: 244 | batch_images: A `Tensor` of size [batch_size, patch_width, patch_width, 1] 245 | batch_one_hot_labels: A `Tensor` of size [batch_size, num_classes], where 246 | each row has a single element set to one and the rest set to zeros. 247 | num_samples: The number of images (not tiles) in the dataset. 248 | 249 | Raises: 250 | ValueError: If the batch size is invalid. 251 | """ 252 | if batch_size <= 0: 253 | raise ValueError('Invalid batch size: %d' % batch_size) 254 | dataset_info = get_split( 255 | split_name, 256 | tfrecord_file_pattern, 257 | num_classes, 258 | image_width=image_width, 259 | image_height=image_height) 260 | provider = tensorflow.contrib.slim.dataset_data_provider.DatasetDataProvider( 261 | dataset_info, 262 | common_queue_capacity=2 * batch_size, 263 | common_queue_min=batch_size, 264 | shuffle=False, 265 | num_readers=num_threads) 266 | 267 | # image, label, image_path have shape [width x width x 1], [num_classes], [1]. 268 | [image, label, image_path] = provider.get( 269 | [FEATURE_IMAGE, FEATURE_IMAGE_CLASS, FEATURE_IMAGE_PATH]) 270 | 271 | logging.info('Data provider image shape: %s', str(image.get_shape().as_list())) 272 | if randomize: 273 | # For training, get a single randomly cropped image patch. 274 | patch_original, label, image_path = get_image_patch_tensor( 275 | image, label, image_path, patch_width=patch_width) 276 | 277 | # Apply a random offset and brightness adjustment. 278 | patch_scaled = apply_random_brightness_adjust( 279 | patch_original, 280 | min_factor=_BRIGHTNESS_MIN_FACTOR, 281 | max_factor=_BRIGHTNESS_MAX_FACTOR) 282 | 283 | patch = apply_random_offset( 284 | patch_scaled, 285 | min_offset=_BRIGHTNESS_MIN_OFFSET, 286 | max_offset=_BRIGHTNESS_MAX_OFFSET) 287 | 288 | batch_images, batch_one_hot_labels, batch_image_paths = get_batches( 289 | patch, 290 | label, 291 | image_path, 292 | batch_size=batch_size, 293 | num_threads=num_threads) 294 | else: 295 | # For testing extract tiles that perfectly tile (without overlap) the image. 296 | tiles, labels, image_paths = get_image_tiles_tensor( 297 | image, label, image_path, patch_width=patch_width) 298 | 299 | num_tiles = tiles.get_shape().as_list()[0] 300 | assert num_tiles == batch_size, 'num_tiles: %d, batch_size: %d' % ( 301 | num_tiles, batch_size) 302 | 303 | batch_images, batch_one_hot_labels, batch_image_paths = get_batches( 304 | tiles, 305 | labels, 306 | image_paths, 307 | batch_size=num_tiles, 308 | num_threads=num_threads) 309 | num_samples = provider.num_samples() 310 | return batch_images, batch_one_hot_labels, batch_image_paths, num_samples 311 | -------------------------------------------------------------------------------- /microscopeimagequality/degrade.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tool for simulating microscope image degradations. 3 | 4 | Example usage: 5 | To simulate defocus at a depth of 2 microns (for the default imaging 6 | parameters): 7 | 8 | from microscopeimagequality import degrade 9 | degrade.degrade_images('/path_clean_images/*', 10 | '/degraded_image_output/' 11 | z_depth_meters=2e-6, 12 | exposure_factor=1.0, 13 | random_seed=0, 14 | photoelectron_factor=65535, 15 | sensor_offset_in_photoelectrons=100) 16 | """ 17 | 18 | import os 19 | 20 | import numpy 21 | import scipy.integrate 22 | import scipy.signal 23 | import scipy.special 24 | import skimage.io 25 | 26 | import microscopeimagequality.dataset_creation 27 | 28 | 29 | class ImageDegrader(object): 30 | """ 31 | Holds image sensor parameters for degrading images. 32 | 33 | Attributes: 34 | _random_generator: np.random.RandomState for generating noise. 35 | _photoelectron_factor: Float, factor to convert pixel values in range [0.0, 1.0] to units photoelectrons. 36 | _sensor_offset_in_photoelectrons: Float, image sensor offset (black level), in units of photoelectrons. 37 | """ 38 | def __init__(self, random_seed=0, photoelectron_factor=65535.0, sensor_offset_in_photoelectrons=100.0): 39 | """ 40 | Initialize with image sensor parameters. 41 | 42 | Args: 43 | random_seed: Integer, the random seed. 44 | photoelectron_factor: Float, factor to convert to photoelectrons. 45 | sensor_offset_in_photoelectrons: Float, image sensor offset (black level), in terms of photoelectrons. 46 | """ 47 | self._photoelectron_factor = photoelectron_factor 48 | self._sensor_offset_in_photoelectrons = sensor_offset_in_photoelectrons 49 | self._random_generator = numpy.random.RandomState(random_seed) 50 | 51 | def random_noise(self, image): 52 | """ 53 | Applies per-pixel Poisson noise to an image. 54 | 55 | Pixel values are converted to units of photoelectrons before noise is applied. 56 | 57 | Args: 58 | image: A 2D numpy float array in [0.0, 1.0], the image to apply noise to. 59 | 60 | Returns: 61 | A 2D numpy float array of same shape as 'image', in [0.0, 1.0]. 62 | """ 63 | image_photoelectrons = numpy.maximum(0.0, image * self._photoelectron_factor - self._sensor_offset_in_photoelectrons) 64 | 65 | noisy_image_photoelectrons = self._random_generator.poisson(image_photoelectrons).astype(numpy.float64) 66 | 67 | noisy_image = (noisy_image_photoelectrons + self._sensor_offset_in_photoelectrons) / self._photoelectron_factor 68 | 69 | clipped_image = numpy.minimum(1.0, noisy_image) 70 | 71 | return clipped_image 72 | 73 | @staticmethod 74 | def apply_blur_kernel(image, psf): 75 | """ 76 | Applies a blur kernel to the image after normalizing the kernel. 77 | 78 | A symmetric boundary is used to handle the image borders. 79 | 80 | Args: 81 | image: A 2D numpy float array in [0.0, 1.0], the image to blur. 82 | psf: A 2D numpy float array, the kernel to blur the image with. 83 | 84 | Returns: 85 | A 2D numpy float array of same shape as 'image', in [0.0, 1.0]. 86 | """ 87 | psf_normalized = psf / numpy.sum(psf) 88 | 89 | return scipy.signal.convolve2d(image, psf_normalized, 'same', boundary='symm') 90 | 91 | def set_exposure(self, image, exposure_factor): 92 | """ 93 | Adjusts the image exposure. 94 | 95 | Args: 96 | image: A 2D numpy float array in [0.0, 1.0], the image to adjust exposure in. 97 | exposure_factor: A non-negative float, the factor to adjust exposure by. 98 | 99 | Returns: 100 | A 2D numpy float array of same shape as 'image', in [0.0, 1.0]. 101 | """ 102 | 103 | image_without_offset = numpy.maximum(0.0, (image * self._photoelectron_factor - self._sensor_offset_in_photoelectrons)) 104 | 105 | adjusted_without_offset = image_without_offset * exposure_factor 106 | 107 | adjusted = ((adjusted_without_offset + self._sensor_offset_in_photoelectrons) / self._photoelectron_factor) 108 | 109 | clipped_image = numpy.minimum(1.0, adjusted) 110 | 111 | return clipped_image 112 | 113 | 114 | def get_airy_psf(psf_width_pixels, psf_width_meters, z, wavelength, numerical_aperture, refractive_index, normalize=True): 115 | """ 116 | Generate Airy point spread function (psf) kernel from optical parameters. 117 | 118 | Args: 119 | psf_width_pixels: Integer, the width of the psf, in pixels. Must be odd. If this is even, testGetAiryPsfGoldenZeroDepth() will fail. 120 | psf_width_meters: Float, the width of the psf, in meters. 121 | z: Float, z-coordinate relative to the focal plane, in meters. 122 | wavelength: Float, wavelength of light in meters. 123 | numerical_aperture: Float, numerical aperture of the imaging lens. 124 | refractive_index: Float, refractive index of the imaging medium. 125 | normalize: Boolean, whether to normalize psf to max value. 126 | 127 | Returns: 128 | The psf kernel, a numpy float 2D array. 129 | 130 | Raises: 131 | ValueError: If psf_width_pixels is not an odd number. 132 | """ 133 | if psf_width_pixels % 2 == 0: 134 | raise ValueError( 135 | 'psf_width_pixels must be an odd number, but is %d.' % psf_width_pixels) 136 | 137 | meters_per_pixel = psf_width_meters / psf_width_pixels 138 | psf = numpy.zeros((psf_width_pixels, psf_width_pixels), dtype=numpy.float64) 139 | for i in range(psf_width_pixels): 140 | for j in range(psf_width_pixels): 141 | x = (i - (psf_width_pixels - 1.0) / 2.0) * meters_per_pixel 142 | y = (j - (psf_width_pixels - 1.0) / 2.0) * meters_per_pixel 143 | psf[i, j] = _evaluate_airy_function_at_point( 144 | x, y, z, wavelength, numerical_aperture, refractive_index) 145 | 146 | # Normalize PSF to max value. 147 | if normalize: 148 | return psf / numpy.max(psf) 149 | return psf 150 | 151 | 152 | def _evaluate_airy_function_at_point(x, y, z, wavelength, numerical_aperture, refractive_index): 153 | """ 154 | Evaluates the Airy point spread function at a point. 155 | 156 | Args: 157 | x: Float, x coordinate, in meters. 158 | y: Float, y coordinate, in meters. 159 | z: Float, z coordinate, in meters. 160 | wavelength: Float, wavelength of light in meters. 161 | numerical_aperture: Float, numerical aperture of the imaging lens. 162 | refractive_index: Float, refractive index of the imaging medium. 163 | 164 | Returns: 165 | A real float, the value of the Airy point spread function at the coordinate. 166 | """ 167 | k = 2 * numpy.pi / wavelength 168 | na = numerical_aperture # pylint: disable=invalid-name 169 | n = refractive_index 170 | 171 | def function_to_integrate(rho): 172 | bessel_arg = k * na / n * numpy.sqrt(numpy.power(x, 2) + numpy.power(y, 2)) * rho 173 | return scipy.special.j0(bessel_arg) * numpy.exp(-1.0 / 2.0 * 1j * k * numpy.power( 174 | rho, 2) * z * numpy.power(na / n, 2)) * rho 175 | 176 | integral_result = _integrate_numerical(function_to_integrate, 0.0, 1.0) 177 | return float(numpy.real(integral_result * numpy.conj(integral_result))) 178 | 179 | 180 | def _integrate_numerical(function_to_integrate, start, end): 181 | """ 182 | Numerically integrate a complex function with real end points. 183 | 184 | Args: 185 | function_to_integrate: Function to integrate. 186 | start: Float, real starting point. 187 | end: Float, real ending point. 188 | 189 | Returns: 190 | Complex float, the value of the numerical integration. 191 | """ 192 | 193 | def real_function(x): 194 | return numpy.real(function_to_integrate(x)) 195 | 196 | def imag_function(x): 197 | return numpy.imag(function_to_integrate(x)) 198 | 199 | real_result = scipy.integrate.quad(real_function, start, end)[0] 200 | 201 | imag_result = scipy.integrate.quad(imag_function, start, end)[0] 202 | 203 | return real_result + 1j * imag_result 204 | 205 | 206 | def degrade_images(images, output_path, z_depth_meters, exposure_factor, random_seed, photoelectron_factor, sensor_offset_in_photoelectrons, wavelength=500e-9, numerical_aperture=0.5, refractive_index=1.0, psf_width_pixels=51, pixel_size_meters=0.65e-6, skip_apply_poisson_noise=False): 207 | """ 208 | Create a PSF and degrade all specified images. 209 | 210 | Args: 211 | images: String, glob for input images, either .png, .tif or .tiff. 212 | output_path: String, path to save degraded images. 213 | z_depth_meters: Z-coordinate, in meters, distance relative to focal plane. 214 | exposure_factor: A non-negative float, the factor to adjust exposure by. 215 | random_seed: Integer, the random seed. 216 | photoelectron_factor: Float, factor to convert to photoelectrons. 217 | sensor_offset_in_photoelectrons: Float, image sensor offset (black level), in terms of photoelectrons. 218 | wavelength: Float, wavelength of light in meters. 219 | numerical_aperture: Float, numerical aperture of the imaging lens. 220 | refractive_index: Float, refractive index of the imaging medium. 221 | psf_width_pixels: Integer, the width of the psf, in pixels. Must be odd. 222 | pixel_size_meters: Float, width of each image pixel in meters. This is the magnified camera pixel size. 223 | skip_apply_poisson_noise: Boolean, skip application of Poisson noise. 224 | 225 | Raises: 226 | ValueError: If no images are found by the specified glob. 227 | """ 228 | psf_width_meters = psf_width_pixels * pixel_size_meters 229 | 230 | psf = get_airy_psf(psf_width_pixels, psf_width_meters, z_depth_meters, wavelength, numerical_aperture, refractive_index) 231 | 232 | degrader = ImageDegrader(random_seed, photoelectron_factor, sensor_offset_in_photoelectrons) 233 | 234 | image_paths = microscopeimagequality.dataset_creation.get_images_from_glob(images, max_images=1e7) 235 | 236 | if not image_paths: 237 | raise ValueError('No images found with glob %s.' % images) 238 | 239 | for path in image_paths: 240 | image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(path) 241 | blurred_image = degrader.apply_blur_kernel(image, psf) 242 | exposure_adjusted_image = degrader.set_exposure(blurred_image, exposure_factor) 243 | 244 | if skip_apply_poisson_noise: 245 | noisy_image = exposure_adjusted_image 246 | else: 247 | noisy_image = degrader.random_noise(exposure_adjusted_image) 248 | 249 | output_filename = os.path.join(output_path, '%s.png' % os.path.splitext(os.path.basename(path))[0]) 250 | 251 | output_dir = os.path.dirname(output_filename) 252 | 253 | if not os.path.isdir(output_dir): 254 | os.makedirs(output_dir) 255 | 256 | skimage.io.imsave(output_filename, noisy_image, "pil") 257 | -------------------------------------------------------------------------------- /microscopeimagequality/miq.py: -------------------------------------------------------------------------------- 1 | """ 2 | Contains the Miq model definition. Based on MNIST. 3 | 4 | The model in this file is a simple convolutional network with two 5 | convolutional layers, two pooling layers, followed by two fully connected 6 | layers. A single dropout layer is used between the two fully connected layers. 7 | """ 8 | 9 | import logging 10 | import os 11 | import pkg_resources 12 | 13 | import tensorflow 14 | import tensorflow.contrib.slim 15 | import urllib 16 | 17 | import microscopeimagequality.constants as constants 18 | 19 | DEFAULT_MODEL_DIRECTORY = pkg_resources.resource_filename(__name__, "data") 20 | DEFAULT_MODEL_PATH = DEFAULT_MODEL_DIRECTORY + "/" + os.path.basename(constants.REMOTE_MODEL_CHECKPOINT_PATH) 21 | 22 | def download_model(source_path=constants.REMOTE_MODEL_CHECKPOINT_PATH, output_path=DEFAULT_MODEL_DIRECTORY): 23 | print("Downloading model from %s to %s." % (source_path, output_path)) 24 | if not os.path.isdir(output_path): 25 | os.mkdir(output_path) 26 | file_extensions = [".index", ".meta", ".data-00000-of-00001"] 27 | for extension in file_extensions: 28 | remote_path = constants.REMOTE_MODEL_CHECKPOINT_PATH + extension 29 | local_path = os.path.join(output_path, os.path.basename(remote_path)) 30 | urllib.request.urlretrieve(remote_path, local_path) 31 | 32 | print("Downloaded %d files to %s." % (len(file_extensions), output_path)) 33 | print("Default model path is %s." % DEFAULT_MODEL_PATH) 34 | 35 | def add_loss(logits, one_hot_labels, use_rank_loss=False): 36 | """Add loss function to tf.losses. 37 | 38 | Args: 39 | logits: Tensor of logits of shape [batch_size, num_classes] 40 | one_hot_labels: A `Tensor` of size [batch_size, num_classes], where 41 | each row has a single element set to one and the rest set to zeros. 42 | use_rank_loss: Boolean, whether to use rank probability score loss instead 43 | of cross entropy. 44 | """ 45 | if not use_rank_loss: 46 | tensorflow.contrib.slim.losses.softmax_cross_entropy(logits, one_hot_labels) 47 | else: 48 | rank_loss = ranked_probability_score( 49 | tensorflow.nn.softmax(logits), one_hot_labels, dim=1) 50 | tensorflow.losses.add_loss(tensorflow.reduce_mean(rank_loss)) 51 | 52 | 53 | def miq_model(images, num_classes=2, is_training=False, model_id=0): 54 | """Creates the convolutional model. 55 | 56 | Note that since the output is a set of 'logits', the values fall in the 57 | interval of (-infinity, infinity). Consequently, to convert the outputs to a 58 | probability distribution over the characters, one will need to convert them 59 | using the softmax function: 60 | logits = miq.Miq(images, is_training=False) 61 | probabilities = tf.nn.softmax(logits) 62 | predictions = tf.argmax(logits, 1) 63 | 64 | Args: 65 | images: the input patches, a tensor of size [batch_size, patch_width, 66 | patch_width, 1]. 67 | num_classes: the number of classes in the dataset. 68 | is_training: specifies whether or not we're currently training the model. 69 | This variable will determine the behaviour of the dropout layer. 70 | model_id: Integer, model ID. 71 | 72 | Returns: 73 | the output logits, a tensor of size [batch_size, 11]. 74 | 75 | Raises: 76 | ValueError: If an invalid model ID is specified. 77 | """ 78 | logging.info('Using model_id = %d.', model_id) 79 | if model_id == 0: 80 | return model_v0(images, num_classes, is_training) 81 | elif model_id == 1: 82 | return model_v1(images, num_classes, is_training) 83 | else: 84 | raise ValueError('Unsupported model %d' % model_id) 85 | 86 | 87 | def model_v1(images, num_classes, is_training): 88 | """Dilated convolution.""" 89 | return model(images, num_classes, is_training, rate=2) 90 | 91 | 92 | def model_v0(images, num_classes, is_training): 93 | """Original model.""" 94 | return model(images, num_classes, is_training, rate=1) 95 | 96 | 97 | def model(images, num_classes, is_training, rate): 98 | """Generic model. 99 | 100 | Args: 101 | images: the input patches, a tensor of size [batch_size, patch_width, 102 | patch_width, 1]. 103 | num_classes: the number of classes in the dataset. 104 | is_training: specifies whether or not we're currently training the model. 105 | This variable will determine the behaviour of the dropout layer. 106 | rate: Integer, convolution rate. 1 for standard convolution, > 1 for dilated 107 | convolutions. 108 | 109 | Returns: 110 | the output logits, a tensor of size [batch_size, 11]. 111 | 112 | """ 113 | # Adds a convolutional layer with 32 filters of size [5x5], followed by 114 | # the default (implicit) Relu activation. 115 | net = tensorflow.contrib.slim.conv2d(images, 32, [5, 5], padding='SAME', scope='conv1') 116 | 117 | # Adds a [2x2] pooling layer with a stride of 2. 118 | net = tensorflow.contrib.slim.max_pool2d(net, [2, 2], 2, scope='pool1') 119 | 120 | # Adds a convolutional layer with 64 filters of size [5x5], followed by 121 | # the default (implicit) Relu activation. 122 | net = tensorflow.contrib.slim.conv2d(net, 64, [5, 5], padding='SAME', scope='conv2', rate=rate) 123 | 124 | # Adds a [2x2] pooling layer with a stride of 2. 125 | net = tensorflow.contrib.slim.max_pool2d(net, [2, 2], 2, scope='pool2') 126 | 127 | # Reshapes the hidden units such that instead of 2D maps, they are 1D vectors: 128 | net = tensorflow.contrib.slim.flatten(net) 129 | 130 | # Adds a fully-connected layer with 1024 hidden units, followed by the default 131 | # Relu activation. 132 | net = tensorflow.contrib.slim.fully_connected(net, 1024, scope='fc3') 133 | 134 | # Adds a dropout layer during training. 135 | net = tensorflow.contrib.slim.dropout(net, 0.5, is_training=is_training, scope='dropout3') 136 | 137 | # Adds a fully connected layer with 'num_classes' outputs. Note 138 | # that the default Relu activation has been overridden to use no activation. 139 | net = tensorflow.contrib.slim.fully_connected(net, num_classes, activation_fn=None, scope='fc4') 140 | 141 | return net 142 | 143 | 144 | def ranked_probability_score(predictions, targets, dim, name=None): 145 | r"""Calculate the Ranked Probability Score (RPS). 146 | 147 | RPS is given by the formula 148 | 149 | sum_{k=1}^K (CDF_{prediction,k} - CDF_{target,k}) ^ 2 150 | 151 | where CDF denotes the emperical CDF and each value of `k` denotes a different 152 | class, in rank order. The range of possible RPS values is `[0, K - 1]`, where 153 | `K` is the total number of classes. Perfect predictions have a score of zero. 154 | 155 | This is a better metric than cross-entropy for probabilistic classification of 156 | ranked targets, because it penalizes wrong guesses more harshly if they 157 | predict a target that is further away. For deterministic predictions (zero 158 | or one) ranked probability score is equal to absolute error in the number of 159 | classes. 160 | 161 | Importantly (like cross entropy), it is a strictly proper score rule: the 162 | highest expected reward is obtained by predicting the true probability 163 | distribution. 164 | 165 | For these reasons, it is widely used for evaluating weather forecasts, which 166 | are a prototypical use case for probabilistic regression. 167 | 168 | References: 169 | Murphy AH. A Note on the Ranked Probability Score. J. Appl. Meteorol. 1971, 170 | 10:155-156. 171 | http://dx.doi.org/10.1175/1520-0450(1971)010<0155:ANOTRP>2.0.CO;2 172 | 173 | Args: 174 | predictions: tf.Tensor with probabilities for each class. 175 | targets: tf.Tensor with one-hot encoded targets. 176 | dim: integer dimension which corresponds to different classes in both 177 | ``predictions`` and ``targets``. 178 | name: optional string name for the operation. 179 | 180 | Returns: 181 | tf.Tensor with the ranked probability score. 182 | 183 | Raises: 184 | ValueError: if predictions and targets do not have the same shape. 185 | """ 186 | with tensorflow.name_scope(name, 'ranked_probability_score', [predictions, 187 | targets]) as scope: 188 | predictions = tensorflow.convert_to_tensor(predictions, name='predictions') 189 | targets = tensorflow.convert_to_tensor(targets, name='targets') 190 | 191 | if not predictions.get_shape().is_compatible_with(targets.get_shape()): 192 | raise ValueError('predictions and targets must have compatible shapes') 193 | 194 | if predictions.dtype.is_floating and targets.dtype.is_integer: 195 | # it's safe to coerce integer targets to float dtype 196 | targets = tensorflow.cast(targets, dtype=predictions.dtype) 197 | 198 | cdf_pred = tensorflow.cumsum(predictions, dim) 199 | cdf_target = tensorflow.cumsum(targets, dim) 200 | 201 | values = (cdf_pred - cdf_target) ** 2 202 | 203 | # If desired, we could add arbitrary weighting in this sum along dim. 204 | # That would still be a proper scoring rule (it's equivalent to rescaling 205 | # the discretization): 206 | # https://www.stat.washington.edu/research/reports/2008/tr533.pdf 207 | rps = tensorflow.reduce_sum(values, dim, name=scope) 208 | 209 | return rps 210 | -------------------------------------------------------------------------------- /microscopeimagequality/prediction.py: -------------------------------------------------------------------------------- 1 | """ 2 | Run model inference to annotate input images with patch/image predictions. 3 | 4 | Example usage: 5 | quality predict \ 6 | --checkpoint /path/model.ckpt \ 7 | --output /tmp/ \ 8 | "/images/*" 9 | 10 | """ 11 | 12 | import logging 13 | import os 14 | import sys 15 | 16 | import numpy 17 | import skimage.io 18 | import tensorflow 19 | 20 | import microscopeimagequality.constants 21 | import microscopeimagequality.dataset_creation 22 | import microscopeimagequality.evaluation 23 | 24 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 25 | 26 | _SPLIT_NAME = 'test' 27 | 28 | _TFRECORD_FILE_PATTERN = 'data_%s-%05d-of-%05d.tfrecord' 29 | 30 | class ImageQualityClassifier(object): 31 | """Object for running image quality model inference. 32 | 33 | Attributes: 34 | graph: TensorFlow graph. 35 | """ 36 | 37 | def __init__(self, 38 | model_ckpt, 39 | model_patch_side_length, 40 | num_classes, 41 | graph=None): 42 | """Initialize the model from a checkpoint. 43 | 44 | Args: 45 | model_ckpt: String, path to TensorFlow model checkpoint to load. 46 | model_patch_side_length: Integer, the side length in pixels of the square 47 | image passed to the model. 48 | num_classes: Integer, the number of classes the model predicts. 49 | graph: TensorFlow graph. If None, one will be created. 50 | """ 51 | self._model_patch_side_length = model_patch_side_length 52 | self._num_classes = num_classes 53 | 54 | if graph is None: 55 | graph = tensorflow.Graph() 56 | self.graph = graph 57 | 58 | with self.graph.as_default(): 59 | self._image_placeholder = tensorflow.placeholder( 60 | tensorflow.float32, shape=[None, None, 1]) 61 | 62 | self._probabilities = self._probabilities_from_image( 63 | self._image_placeholder, model_patch_side_length, num_classes) 64 | 65 | self._sess = tensorflow.Session() 66 | saver = tensorflow.train.Saver() 67 | 68 | saver.restore(self._sess, model_ckpt) 69 | logging.info('Model restored from %s.', model_ckpt) 70 | 71 | def __del__(self): 72 | self._sess.close() 73 | 74 | def _probabilities_from_image(self, image_placeholder, 75 | model_patch_side_length, num_classes): 76 | """Get probabilities tensor from input image tensor. 77 | 78 | Args: 79 | image_placeholder: Float32 tensor, placeholder for input image. 80 | model_patch_side_length: Integer, the side length in pixels of the square 81 | image passed to the model. 82 | num_classes: Integer, the number of classes the model predicts. 83 | 84 | Returns: 85 | Probabilities tensor, shape [num_classes] representing the predicted 86 | probabilities for each class. 87 | """ 88 | labels_fake = tensorflow.zeros([self._num_classes]) 89 | 90 | image_path_fake = tensorflow.constant(['unused']) 91 | tiles, labels, _ = _get_image_tiles_tensor( 92 | image_placeholder, labels_fake, image_path_fake, 93 | model_patch_side_length) 94 | 95 | model_metrics = microscopeimagequality.evaluation.get_model_and_metrics( 96 | tiles, 97 | num_classes=num_classes, 98 | one_hot_labels=labels, 99 | is_training=False) 100 | 101 | return model_metrics.probabilities 102 | 103 | def predict(self, image): 104 | """Run inference on an image. 105 | 106 | Args: 107 | image: Numpy float array, two-dimensional. 108 | 109 | Returns: 110 | A evaluation.WholeImagePrediction object. 111 | """ 112 | feed_dict = {self._image_placeholder: numpy.expand_dims(image, 2)} 113 | [np_probabilities] = self._sess.run( 114 | [self._probabilities], feed_dict=feed_dict) 115 | 116 | return microscopeimagequality.evaluation.aggregate_prediction_from_probabilities( 117 | np_probabilities, microscopeimagequality.evaluation.METHOD_AVERAGE) 118 | 119 | def get_patch_predictions(self, image): 120 | """Run inference on each patch in an image, returning each patch score. 121 | 122 | Args: 123 | image: Numpy float array, of shape (height, width). 124 | 125 | Returns: 126 | List of tuples, with (upper_left_row, upper_left_col, height, width 127 | evaluation.WholeImagePrediction) which denote the patch location, 128 | dimensions and predition result. 129 | """ 130 | results = [] 131 | w = microscopeimagequality.constants.PATCH_SIDE_LENGTH 132 | for i in range(0, image.shape[0] - w, w): 133 | for j in range(0, image.shape[1] - w, w): 134 | results.append((i, j, w, w, self.predict(image[i:i+w, j:j+w]))) 135 | return results 136 | 137 | def get_annotated_prediction(self, image): 138 | """Run inference to annotate the input image with patch predictions. 139 | 140 | Args: 141 | image: Numpy float array, two-dimensional. 142 | 143 | Returns: 144 | RGB image as uint8 numpy array of shape (image_height, image_width, 3), 145 | representing the upper left crop of the input image, where: 146 | image_height = floor(image.shape[0] / model_patch_side_length) 147 | image_width = floor(image.shape[1] / model_patch_side_length) 148 | """ 149 | 150 | feed_dict = {self._image_placeholder: numpy.expand_dims(image, 2)} 151 | 152 | with self.graph.as_default(): 153 | patches = _get_image_tiles_tensor( 154 | self._image_placeholder, 155 | tensorflow.constant([0]), 156 | tensorflow.constant([0]), 157 | patch_width=self._model_patch_side_length)[0] 158 | [np_probabilities, np_patches] = self._sess.run( 159 | [self._probabilities, patches], feed_dict=feed_dict) 160 | 161 | # We use '-1' to denote no true label exists. 162 | np_labels = -1 * numpy.ones((np_patches.shape[0])) 163 | return numpy.squeeze( 164 | microscopeimagequality.evaluation.visualize_image_predictions( 165 | np_patches, 166 | np_probabilities, 167 | np_labels, 168 | image.shape[0], 169 | image.shape[1], 170 | show_plot=False, 171 | output_path=None)) 172 | 173 | def patch_values_to_mask(values, patch_width): 174 | """Construct a mask from an array of patch values. 175 | 176 | Args: 177 | values: A uint16 2D numpy array. 178 | patch_width: Width in pixels of each patch. 179 | 180 | Returns: 181 | The mask, a uint16 numpy array of width patch_width * 182 | values.shape[0]. 183 | 184 | Raises: 185 | ValueError: If the input values are invalid. 186 | """ 187 | if values.dtype != numpy.uint16 or len(values.shape) != 2: 188 | logging.info('dtype: %s shape: %s', values.dtype, values.shape) 189 | raise ValueError('Input must be a 2D np.uint16 array.') 190 | 191 | patches_per_column = values.shape[0] 192 | patches_per_row = values.shape[1] 193 | 194 | mask = numpy.zeros( 195 | (patches_per_column * patch_width, patches_per_row * patch_width), 196 | dtype=numpy.uint16) 197 | 198 | for i in range(patches_per_column): 199 | for j in range(patches_per_row): 200 | ymin = i * patch_width 201 | xmin = j * patch_width 202 | mask[ymin:ymin + patch_width, xmin:xmin + patch_width] = values[i, j] 203 | 204 | return mask 205 | 206 | 207 | def save_masks_and_annotated_visualization(orig_name, 208 | output_directory, 209 | prediction, 210 | certainties, 211 | np_images, 212 | np_probabilities, 213 | np_labels, 214 | patch_width, 215 | image_height, 216 | image_width, 217 | show_plots=False): 218 | """For a prediction on a single image, save the output masks and images. 219 | 220 | Args: 221 | orig_name: String, full path to original input image. 222 | output_directory: String, path to directory for outputs. 223 | prediction: Integer, index of predicted class. 224 | certainties: Dictionary mapping certainty type (string) to float value. 225 | np_images: Numpy array of patches of shape (num_patches, width, width, 1). 226 | np_probabilities: Numpy array of shape (num_patches, num_classes), the 227 | probabilities predicted by the model for each class. 228 | np_labels: Integer numpy array of shape (num_patches) indicating true class. 229 | The true class must be the same for all patches. 230 | patch_width: Integer, width of image patches. 231 | image_height: Integer, the image height. 232 | image_width: Integer, the image width. 233 | show_plots: Whether to show plots (use with Colab). 234 | 235 | Raises: 236 | ValueError: If the image to annotate cannot be found or opened. 237 | """ 238 | 239 | if not os.path.isfile(orig_name): 240 | raise ValueError('File for annotating does not exist: %s.' % orig_name) 241 | 242 | if not isinstance(orig_name, str): 243 | orig_name = orig_name.decode('utf-8') 244 | output_height, output_width = skimage.io.imread(orig_name).shape 245 | 246 | logging.info('Original image size %d x %d', output_height, output_width) 247 | 248 | def pad_and_save_image(im, image_output_path): 249 | """Pad a 2D or 3D image (numpy array) to match the original and save.""" 250 | # The image is either a greyscale 16-bit mask, or 8-bit RGB color. 251 | is_greyscale_mask = len(im.shape) == 2 252 | 253 | y_pad = output_height - im.shape[0] 254 | x_pad = output_width - im.shape[1] 255 | pad_size = ((0, y_pad), (0, x_pad)) if is_greyscale_mask else ( 256 | (0, y_pad), (0, x_pad), (0, 0)) 257 | im_padded = numpy.pad(im, pad_size, 'constant') 258 | 259 | skimage.io.imsave(image_output_path, im_padded, "pil") 260 | 261 | orig_name_png = os.path.splitext(os.path.basename(orig_name))[0] + '.png' 262 | visualized_image_name = ('actual%g_pred%g_mean_certainty=%0.3f' + 263 | (microscopeimagequality.constants.ORIG_IMAGE_FORMAT % orig_name_png)) 264 | output_path = (os.path.join(output_directory, visualized_image_name) % 265 | (np_labels[0], prediction, certainties['mean'])) 266 | 267 | annotated_visualization = numpy.squeeze( 268 | microscopeimagequality.evaluation.visualize_image_predictions( 269 | np_images, 270 | np_probabilities, 271 | np_labels, 272 | image_height, 273 | image_width, 274 | show_plot=show_plots, 275 | output_path=None)) 276 | 277 | # Pad and save visualization. 278 | pad_and_save_image(annotated_visualization, output_path) 279 | 280 | def save_mask_from_patch_values(values, mask_format): 281 | """Convert patch values to mask, pad and save.""" 282 | if numpy.min(values) < 0 or numpy.max(values) > numpy.iinfo(numpy.uint16).max: 283 | raise ValueError('Mask value out of bounds.') 284 | values = values.astype(numpy.uint16) 285 | reshaped_values = values.reshape((image_height // patch_width, image_width // patch_width)) 286 | mask = patch_values_to_mask(reshaped_values, patch_width) 287 | pad_and_save_image(mask, os.path.join(output_directory, mask_format % orig_name_png)) 288 | 289 | # Create, pad and save masks. 290 | certainties = microscopeimagequality.evaluation.certainties_from_probabilities(np_probabilities) 291 | certainties = numpy.round(certainties * 292 | numpy.iinfo(numpy.uint16).max).astype(numpy.uint16) 293 | save_mask_from_patch_values(certainties, microscopeimagequality.constants.CERTAINTY_MASK_FORMAT) 294 | 295 | predictions = numpy.argmax(np_probabilities, 1) 296 | save_mask_from_patch_values(predictions, microscopeimagequality.constants.PREDICTIONS_MASK_FORMAT) 297 | 298 | valid_pixel_regions = numpy.ones( 299 | predictions.shape, dtype=numpy.uint16) * numpy.iinfo(numpy.uint16).max 300 | save_mask_from_patch_values(valid_pixel_regions, microscopeimagequality.constants.VALID_MASK_FORMAT) 301 | 302 | def _get_image_tiles_tensor(image, label, image_path, patch_width): 303 | """Gets patches that tile the input image, starting at upper left. 304 | 305 | Args: 306 | image: Input image tensor, size [height x width x 1]. 307 | label: Input label tensor, size [num_classes]. 308 | image_path: Input image path tensor, size [1]. 309 | patch_width: Integer representing width of image patch. 310 | 311 | Returns: 312 | Tensors tiles, size [num_tiles x patch_width x patch_width x 1], labels, 313 | size [num_tiles x num_classes], and image_paths, size [num_tiles x 1]. 314 | """ 315 | tiles_before_reshape = tensorflow.extract_image_patches( 316 | tensorflow.expand_dims(image, dim=0), [1, patch_width, patch_width, 1], 317 | [1, patch_width, patch_width, 1], [1, 1, 1, 1], 'VALID') 318 | tiles = tensorflow.reshape(tiles_before_reshape, [-1, patch_width, patch_width, 1]) 319 | 320 | labels = tensorflow.tile(tensorflow.expand_dims(label, dim=0), [tensorflow.shape(tiles)[0], 1]) 321 | image_paths = tensorflow.tile( 322 | tensorflow.expand_dims(image_path, dim=0), [tensorflow.shape(tiles)[0], 1]) 323 | return tiles, labels, image_paths 324 | 325 | def run_model_inference( model_ckpt_file, probabilities, labels, images, 326 | output_directory, image_paths, num_samples, 327 | image_height, image_width, show_plots, shard_num, 328 | num_shards, patch_width, aggregation_method): 329 | """Run a previously trained model on images.""" 330 | logging.info('Running inference and writing inference results to \n%s', 331 | os.path.dirname(output_directory)) 332 | 333 | if not os.path.isdir(output_directory): 334 | os.makedirs(output_directory) 335 | 336 | aggregate_labels = [] 337 | patch_labels = [] 338 | 339 | model_directory = os.path.dirname(model_ckpt_file) 340 | if not os.path.isdir(model_directory): 341 | logging.fatal('Model checkpoint directory does not exist.') 342 | 343 | saver = tensorflow.train.Saver() 344 | with tensorflow.Session() as sess: 345 | logging.info('Restoring checkpoint %s', model_ckpt_file) 346 | 347 | saver.restore(sess, model_ckpt_file) 348 | coord = tensorflow.train.Coordinator() 349 | threads = tensorflow.train.start_queue_runners(sess=sess, coord=coord) 350 | logging.info('Started queue_runners.') 351 | 352 | for i in range(num_samples): 353 | logging.info('Running inference on sample %d.', i) 354 | 355 | [np_probabilities, np_labels, np_images, np_image_paths] = sess.run([probabilities, labels, images, image_paths]) 356 | 357 | (prediction, certainties, probabilities_i) = microscopeimagequality.evaluation.aggregate_prediction_from_probabilities(np_probabilities, aggregation_method) 358 | 359 | # Each name must be unique since all workers write to same directory. 360 | orig_name = np_image_paths[0][0] if np_image_paths[0][0] else ('not_available_%03d_%07d.png' % shard_num, i) 361 | 362 | save_masks_and_annotated_visualization(orig_name, output_directory, prediction, certainties, np_images, np_probabilities, np_labels, patch_width, image_height, image_width, show_plots) 363 | 364 | if i == 0: 365 | patch_probabilities = np_probabilities 366 | aggregate_probabilities = numpy.expand_dims(probabilities_i, 0) 367 | orig_names = [] 368 | all_certainties = {} 369 | for k in microscopeimagequality.evaluation.CERTAINTY_TYPES.values(): 370 | all_certainties[k] = [] 371 | else: 372 | patch_probabilities = numpy.concatenate((patch_probabilities, 373 | np_probabilities), 0) 374 | aggregate_probabilities = numpy.concatenate( 375 | (aggregate_probabilities, numpy.expand_dims(probabilities_i, 0))) 376 | 377 | orig_names.append(orig_name) 378 | 379 | for k, v in certainties.items(): 380 | all_certainties[k].append(v) 381 | 382 | aggregate_labels.append(np_labels[0]) 383 | 384 | patch_labels += list(np_labels) 385 | 386 | aggregate_predictions = list(numpy.argmax(aggregate_probabilities, 1)) 387 | 388 | logging.info('Inference output to %s.', output_directory) 389 | 390 | logging.info('Done evaluating model.') 391 | 392 | output_file = (os.path.join(output_directory, 'results-%05d-of-%05d.csv') % (shard_num, num_shards)) 393 | 394 | microscopeimagequality.evaluation.save_inference_results(aggregate_probabilities, aggregate_labels, all_certainties, orig_names, aggregate_predictions, output_file) 395 | 396 | # If we're not sharding, save out accuracy statistics. 397 | if num_shards == 1: 398 | save_confusion = not numpy.any(numpy.asarray(aggregate_labels) < 0) 399 | 400 | microscopeimagequality.evaluation.save_result_plots(aggregate_probabilities, aggregate_labels, save_confusion, output_directory, patch_probabilities, patch_labels) 401 | 402 | logging.info('Stopping threads') 403 | 404 | coord.request_stop() 405 | 406 | coord.join(threads) 407 | 408 | logging.info('Threads stopped') 409 | 410 | 411 | def build_tfrecord_from_pngs(image_globs_list, use_unlabeled_data, num_classes, 412 | eval_directory, image_background_value, 413 | image_brightness_scale, shard_num, num_shards, 414 | image_width, image_height): 415 | """Build a TFRecord from pngs either from synthetic images or a directory.""" 416 | 417 | # Generate a local TFRecord 418 | tfrecord_file_pattern = _TFRECORD_FILE_PATTERN % ('%s', shard_num, num_shards) 419 | 420 | num_samples_converted = microscopeimagequality.dataset_creation.dataset_to_examples_in_tfrecord( 421 | list_of_image_globs=image_globs_list, 422 | output_directory=eval_directory, 423 | output_tfrecord_filename=tfrecord_file_pattern % _SPLIT_NAME, 424 | num_classes=num_classes, 425 | image_width=image_width, 426 | image_height=image_height, 427 | max_images=1e6, 428 | randomize=False, 429 | image_background_value=image_background_value, 430 | image_brightness_scale=image_brightness_scale, 431 | shard_num=shard_num, 432 | num_shards=num_shards, 433 | normalize=False, 434 | use_unlabeled_data=use_unlabeled_data 435 | ) 436 | 437 | logging.info('Created TFRecord with %g examples.', num_samples_converted) 438 | 439 | return os.path.join(eval_directory, tfrecord_file_pattern) 440 | -------------------------------------------------------------------------------- /microscopeimagequality/summarize.py: -------------------------------------------------------------------------------- 1 | r"""Aggregate and summarize model inference results. 2 | 3 | Requires the prediction-annotated .png images and .csv files from 4 | `quality predict`. The output is in a 'summary' subdirectory, and includes an 5 | aggregated .csv file and various summary images. 6 | 7 | Example usage: 8 | microscopeimagequality summarize 9 | """ 10 | 11 | import logging 12 | import os 13 | import sys 14 | 15 | import matplotlib 16 | import matplotlib.pyplot 17 | import numpy 18 | import skimage.io 19 | 20 | import microscopeimagequality.constants 21 | import microscopeimagequality.evaluation 22 | 23 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 24 | 25 | # matplotlib.use('Agg') 26 | 27 | # Thickness of prediction border annotation, as fraction of image height. 28 | _BORDER_FRACTION = 0.08 29 | 30 | _FIG_WIDTH = 60 31 | 32 | 33 | def check_image_count_matches(experiment_path, num_images_expected): 34 | """Check the number of inference .png files is as expected. 35 | 36 | Args: 37 | experiment_path: String, path to experiment folder (e.g. 38 | path/to/miq_result_images). 39 | num_images_expected: Integer, number of expected images. 40 | """ 41 | filenames = os.listdir(experiment_path) 42 | filenames_png = [f for f in filenames if '.png' in f and 'actual' in f] 43 | logging.info('num expected: %g, num png files: %g', num_images_expected, 44 | len(filenames_png)) 45 | assert num_images_expected == len(filenames_png) 46 | 47 | 48 | def _plot_histogram(values, xlabel, ylabel, save_path, bins=10): 49 | """Plot histogram for values in [0.0, 1.0]. 50 | 51 | Args: 52 | values: List of floats. 53 | xlabel: String, x-axis label. 54 | ylabel: String, y-axis label. 55 | save_path: String, path to save the figure. 56 | bins: Integer, number of histogram bins. 57 | 58 | Raises: 59 | ValueError: If input values are out of range. 60 | """ 61 | if numpy.min(values) < 0.0 or numpy.max(values) > 1.0: 62 | raise ValueError('Input values out of range.') 63 | matplotlib.pyplot.figure() 64 | _, _, patches = matplotlib.pyplot.hist(values, bins=bins, range=(0.0, 1.0), color='gray') 65 | 66 | alpha_index = numpy.array(range(1, bins)).astype(numpy.float32) / (bins - 1) 67 | for a, p in zip(alpha_index, patches): 68 | matplotlib.pyplot.setp(p, 'alpha', a) 69 | 70 | matplotlib.pyplot.xlim(0.0, 1.0) 71 | 72 | matplotlib.pyplot.tick_params(bottom=False, left=False, top=False, right=False) 73 | matplotlib.pyplot.xlabel(xlabel) 74 | matplotlib.pyplot.ylabel(ylabel) 75 | matplotlib.pyplot.grid('off') 76 | matplotlib.pyplot.savefig(save_path, bbox_inches='tight') 77 | 78 | 79 | def _make_scatter_subplot(num_classes, predictions, certainties1, certainties2, 80 | label1, label2): 81 | """Make a single scatter subplot. 82 | 83 | Args: 84 | num_classes: Integer, total number of possible predicted classes. 85 | predictions: List of integers in [0, num_classes). 86 | certainties1: List of floats in [0.0, 1.0]. 87 | certainties2: List of floats in [0.0, 1.0]. 88 | label1: String, text axes label for certainties1. 89 | label2: String, text axes label for certainties2. 90 | """ 91 | for c in range(num_classes): 92 | mask = predictions == c 93 | if numpy.any(mask): 94 | color = matplotlib.pyplot.cm.hsv(float(c) / num_classes) 95 | alpha = _get_alpha(numpy.sum(mask)) 96 | logging.info('class %d, alpha %g counts %d', c, alpha, numpy.sum(mask)) 97 | plot_scatter( 98 | numpy.array(certainties1)[mask], 99 | numpy.array(certainties2)[mask], label1, label2, color, alpha) 100 | 101 | 102 | def plot_scatter(x, y, xlabel, ylabel, color, alpha): 103 | """Plot scatter plot.""" 104 | matplotlib.pyplot.scatter(x, y, alpha=alpha, s=2.5, c=color, linewidths=0) 105 | 106 | matplotlib.pyplot.grid('off') 107 | matplotlib.pyplot.tick_params( 108 | labelbottom=False, 109 | labelleft=False, 110 | bottom=False, 111 | left=False, 112 | top=False, 113 | right=False) 114 | matplotlib.pyplot.ylim([0.0, 1.0]) 115 | matplotlib.pyplot.xlim([0.0, 1.0]) 116 | matplotlib.pyplot.xlabel(xlabel) 117 | matplotlib.pyplot.ylabel(ylabel) 118 | 119 | 120 | def _get_alpha(count): 121 | """Optimal scatter plot alpha for a given number of points.""" 122 | # These were empirically determined. 123 | if count >= 1e4: 124 | alpha = 0.03 125 | if count < 1e4: 126 | alpha = 0.08 127 | if count < 1e3: 128 | alpha = 0.3 129 | if count < 5e2: 130 | alpha = 0.5 131 | if count < 1e2: 132 | alpha = 0.9 133 | if count < 1e1: 134 | alpha = 1.0 135 | return alpha 136 | 137 | 138 | def plot_certainties(certainties, predictions, num_classes, save_path): 139 | """Generate scatter plots of certainties. 140 | 141 | Args: 142 | certainties: Dictionary mapping string certainty type to list of float 143 | certainty values in [0.0, 1.0]. 144 | predictions: List of integer predictions in [0, num_classes). 145 | num_classes: Integer, total number of possible predicted classes. 146 | save_path: String, path to save the figure. 147 | """ 148 | keys = sorted(certainties.keys()) 149 | num_keys = len(keys) 150 | fig_width = int(2.5 * len(certainties.keys())) 151 | matplotlib.pyplot.figure(figsize=(fig_width, fig_width)) 152 | for i, k1 in enumerate(keys): 153 | for j, k2 in enumerate(keys): 154 | if i > j: 155 | matplotlib.pyplot.subplot(num_keys, num_keys, 1 + i * num_keys + j) 156 | _make_scatter_subplot(num_classes, predictions, certainties[k2], 157 | certainties[k1], k2 158 | if i == num_keys - 1 else '', k1 159 | if j == 0 else '') 160 | logging.info('Certainty %s has min %g, mean %g, max %g.', k1, 161 | numpy.min(certainties[k1]), 162 | numpy.mean(certainties[k1]), numpy.max(certainties[k1])) 163 | matplotlib.pyplot.subplots_adjust(hspace=0.05, wspace=0.05) 164 | matplotlib.pyplot.savefig(save_path, bbox_inches='tight', dpi=600) 165 | 166 | 167 | def _read_valid_part_of_annotated_image(experiment_path, orig_name): 168 | """Reads in an image and returns the valid region. 169 | 170 | The valid region defines the pixels over which the inference has been done. 171 | 172 | Args: 173 | experiment_path: String, path to inference annotated output images. 174 | orig_name: Original filename without path and extension of image to be 175 | found. 176 | 177 | Returns: 178 | An image as a numpy array, with the valid region only if a mask file 179 | exists. 180 | 181 | Raises: 182 | ValueError: If the image is not found. 183 | """ 184 | filename_index = None 185 | all_files = os.listdir(experiment_path) 186 | # Find the annotated image file. There is exactly one. 187 | for index, name in enumerate(all_files): 188 | # Exclude all masks from search. 189 | if (microscopeimagequality.constants.ORIG_IMAGE_FORMAT + '.png') % orig_name in name: 190 | filename_index = index 191 | if filename_index is None: 192 | raise ValueError('File %s not found' % orig_name) 193 | annotated_filename = all_files[filename_index] 194 | 195 | image = skimage.io.imread(os.path.join(experiment_path, annotated_filename)) 196 | 197 | mask_path = os.path.join(experiment_path, microscopeimagequality.constants.VALID_MASK_FORMAT % orig_name + '.png') 198 | 199 | # if not os.path.isdir(mask_path): 200 | # logging.info('No mask found at %s', mask_path) 201 | # else: 202 | mask = skimage.io.imread(mask_path) 203 | # Get the upper-left crop that is valid (where mask > 0). 204 | max_valid_row = numpy.argwhere(numpy.sum(mask, 1))[-1][0] 205 | max_valid_column = numpy.argwhere(numpy.sum(mask, 0))[-1][0] 206 | image = image[:max_valid_row, :max_valid_column] 207 | 208 | return image 209 | 210 | 211 | def _save_color_legend(num_classes, path): 212 | """Save a legend for interpreting the predicted class colors. 213 | 214 | This produces an image with a color bar denoting the colors of each of the 215 | predicted classes. 216 | 217 | Args: 218 | num_classes: Integer, the number of classes in the prediction task. 219 | path: Path to png file to save the figure. 220 | """ 221 | 222 | probabilities = numpy.identity(num_classes, dtype=numpy.float32) 223 | probabilities = numpy.tile(probabilities, [num_classes, 1]) 224 | patch_width = microscopeimagequality.evaluation.BORDER_SIZE // 2 225 | patches = numpy.zeros((num_classes ** 2, patch_width, patch_width, 1), dtype=numpy.float32) 226 | # Make up some dummy labels. 227 | labels = [0] * num_classes ** 2 228 | image_shape = (num_classes * patch_width, num_classes * patch_width) 229 | 230 | image = microscopeimagequality.evaluation.get_rgb_image(1.0, patches, probabilities, labels, 231 | image_shape) 232 | image = image[microscopeimagequality.evaluation.BORDER_SIZE:microscopeimagequality.evaluation.BORDER_SIZE + patch_width, :] 233 | matplotlib.pyplot.figure() 234 | matplotlib.pyplot.imshow(image, interpolation='nearest') 235 | matplotlib.pyplot.grid('off') 236 | matplotlib.pyplot.axis('off') 237 | matplotlib.pyplot.savefig(path, bbox_inches='tight') 238 | matplotlib.pyplot.close() 239 | 240 | 241 | def save_histograms_scatter_plots_and_csv(probabilities, 242 | labels, 243 | certainties, 244 | orig_names, 245 | predictions, 246 | output_path, 247 | output_path_all_plots=None): 248 | """Visualize and save various summary plots and an aggregated .csv file. 249 | 250 | Args: 251 | probabilities: Numpy float array of shape [num_samples x num_classes]. 252 | labels: List of integers, the actual classes, length num_samples. 253 | certainties: Dict of lists of floats, the certainties, each length 254 | num_samples. 255 | orig_names: List of strings, the original names, length num_samples. 256 | predictions: List of integers, the predicted classes, length 257 | num_samples. 258 | output_path: String, path to folder to save summary results. 259 | output_path_all_plots: String, path to folder to save less useful results. 260 | """ 261 | if output_path_all_plots is None: 262 | output_path_all_plots = output_path 263 | 264 | logging.info('Saving inference results in single .csv file.') 265 | microscopeimagequality.evaluation.save_inference_results(probabilities, labels, certainties, 266 | orig_names, predictions, 267 | os.path.join(output_path, 'results_all.csv')) 268 | 269 | logging.info('Generating simple result plot.') 270 | save_confusion = not numpy.any(numpy.array(labels) < 0) 271 | microscopeimagequality.evaluation.save_result_plots(probabilities, labels, save_confusion, 272 | output_path_all_plots) 273 | 274 | predictions = numpy.array(predictions) 275 | num_classes = probabilities.shape[1] 276 | 277 | _save_color_legend(num_classes, os.path.join(output_path, 'color_legend.png')) 278 | 279 | plot_certainties(certainties, predictions, num_classes, 280 | os.path.join(output_path_all_plots, 281 | 'certainty_scatter_plot_all_certainties.png')) 282 | 283 | certainties_subset = {k: certainties[k] for k in ['mean', 'aggregate']} 284 | plot_certainties(certainties_subset, predictions, num_classes, 285 | os.path.join(output_path, 'certainty_scatter_plot.png')) 286 | 287 | # Generate and save histograms for predictions and certainties. 288 | 289 | microscopeimagequality.evaluation.save_prediction_histogram( 290 | predictions, 291 | os.path.join(output_path, 'histogram_predictions.jpg'), num_classes) 292 | microscopeimagequality.evaluation.save_prediction_histogram( 293 | predictions, 294 | os.path.join(output_path, 'histogram_predictions_log.jpg'), 295 | num_classes, 296 | log=True) 297 | 298 | for kind in microscopeimagequality.evaluation.CERTAINTY_TYPES.values(): 299 | if kind == 'aggregate': 300 | path = output_path 301 | else: 302 | path = output_path_all_plots 303 | _plot_histogram(certainties[kind], '%s prediction certainty' % kind, 304 | 'image count', 305 | os.path.join(path, 'histogram_%s_certainty.jpg' % kind)) 306 | 307 | logging.info('Done summarizing results') 308 | 309 | 310 | def _adjust_image_annotation(image, label_intensity): 311 | """Adjusts the annotation at the bottom of the image.""" 312 | # Change the intensity of the bottom border. 313 | image[-1 * microscopeimagequality.evaluation.BORDER_SIZE:, :, :] = ( 314 | image[-1 * microscopeimagequality.evaluation.BORDER_SIZE:, :, :].astype(numpy.float32) * 315 | label_intensity).astype(image.dtype) 316 | 317 | # Make bottom border larger. 318 | border_size = max(microscopeimagequality.evaluation.BORDER_SIZE, 319 | int(_BORDER_FRACTION * image.shape[0])) 320 | image[-1 * border_size:, :, :] = numpy.tile(image[-1:, :, :], (border_size, 1, 321 | 1)) 322 | return image 323 | 324 | 325 | def _rank_examples(indices, rank_method, certainties, predictions, 326 | num_plots_in_row, predicted_class): 327 | """Rank the examples based on a ranking method. 328 | 329 | Args: 330 | indices: 1D numpy array of indices to rank. 331 | rank_method: String, the ranking method. 332 | certainties: List of floats, the certainties. 333 | predictions: 1D numpy array of the predicted class indices. 334 | num_plots_in_row: Int, number of plots in each row. 335 | predicted_class: Integer, the predicted class. 336 | 337 | Returns: 338 | The ranked indices as a 1D numpy array. 339 | 340 | Raises: 341 | ValueError: If the certainty rank method is invalid. 342 | """ 343 | if rank_method == 'random': 344 | numpy.random.shuffle(indices) 345 | elif 'certainty' in rank_method: 346 | class_certainties = numpy.array(certainties)[predictions == predicted_class] 347 | indices = indices[numpy.argsort(class_certainties)] 348 | if 'certainty_most' in rank_method: 349 | indices = indices[::-1] 350 | elif 'certainty_least_to_most' in rank_method: 351 | stride = indices.shape[0] // num_plots_in_row 352 | indices = indices[:stride * num_plots_in_row:stride] 353 | elif 'certainty_least' in rank_method: 354 | pass 355 | else: 356 | raise ValueError('Invalid certainty rank method %s' % rank_method) 357 | else: 358 | raise ValueError('Invalid rank_method %s' % rank_method) 359 | return indices 360 | 361 | 362 | def save_summary_montages(probabilities, 363 | certainties, 364 | orig_names, 365 | predictions, 366 | experiment_path, 367 | output_path, 368 | output_path_all_plots=None): 369 | """Visualize and save summary montage images. 370 | 371 | Args: 372 | probabilities: Numpy float array of shape [num_samples x num_classes]. 373 | certainties: Dict of lists of floats, the certainties, each length 374 | num_samples. 375 | orig_names: List of strings, the original names, length num_samples. 376 | predictions: List of integers, the predicted classes, length 377 | num_samples. 378 | experiment_path: String, path to folder containing results. 379 | output_path: String, path to folder to save summary results. 380 | output_path_all_plots: String, path to folder to save less useful results. 381 | """ 382 | if output_path_all_plots is None: 383 | output_path_all_plots = output_path 384 | 385 | predictions = numpy.array(predictions) 386 | num_samples, num_classes = probabilities.shape 387 | 388 | with open( 389 | os.path.join(output_path_all_plots, 'montage_image_paths.txt'), 'w') as f: 390 | 391 | f.write(('# This text file maps subplots in each summary image with the \n' 392 | '# original image path. Subplots are denoted by 0-indexed row \n' 393 | '# and column from upper left.\n\n')) 394 | 395 | def plot_image(index, label_intensity=1.0): 396 | """Read and plot inference image.""" 397 | 398 | orig_name = os.path.splitext(os.path.basename(orig_names[index]))[0] 399 | f.write('%s\n' % orig_names[index]) 400 | image = _read_valid_part_of_annotated_image(experiment_path, orig_name) 401 | 402 | image = _adjust_image_annotation(image, label_intensity) 403 | 404 | matplotlib.pyplot.imshow(image) 405 | matplotlib.pyplot.tick_params(labelbottom=False, labelleft=False) 406 | matplotlib.pyplot.grid('off') 407 | matplotlib.pyplot.axis('off') 408 | 409 | def subplot(nrows, ncols, num): 410 | """Makes a subplot and logs the (row, column) with 0-indexing.""" 411 | matplotlib.pyplot.subplot(nrows, ncols, num) 412 | f.write('%d, %d ' % ((num - 1) / ncols, (num - 1) % ncols)) 413 | 414 | def savefig(path): 415 | """Saves figure and logs the path.""" 416 | matplotlib.pyplot.subplots_adjust(hspace=0.01, wspace=0.01) 417 | matplotlib.pyplot.savefig(path, bbox_inches='tight') 418 | matplotlib.pyplot.close() 419 | f.write('%s\n\n' % path) 420 | 421 | def setup_new_montage_figure(nrows, ncols): 422 | """New figure with blank subplot at corners to fix figure shape.""" 423 | matplotlib.pyplot.figure(figsize=(_FIG_WIDTH, _FIG_WIDTH)) 424 | matplotlib.pyplot.subplot(nrows, ncols, 1) 425 | matplotlib.pyplot.axis('off') 426 | matplotlib.pyplot.subplot(nrows, ncols, nrows * ncols) 427 | matplotlib.pyplot.axis('off') 428 | 429 | def montage_by_class_rank(rank_method, certainties, num_per_class=10): 430 | """Montage select images per class ranked by a particular method.""" 431 | setup_new_montage_figure(num_classes, num_per_class) 432 | for i in range(num_classes): 433 | class_indices = numpy.array(range(num_samples))[predictions == i] 434 | num_plots_in_row = min(class_indices.shape[0], num_per_class) 435 | if num_plots_in_row == 0: 436 | continue 437 | class_indices = _rank_examples(class_indices, rank_method, certainties, 438 | predictions, num_plots_in_row, i) 439 | for j in range(num_plots_in_row): 440 | subplot(num_classes, num_per_class, 1 + i * num_per_class + j) 441 | plot_image(class_indices[j], certainties[class_indices[j]]) 442 | savefig(os.path.join(output_path_all_plots, 'rank_%s.jpg' % rank_method)) 443 | 444 | def montage_by_class_bin(rank_method, certainties, bins_per_class=10): 445 | """Montage one image per certainty bin for each class.""" 446 | boundaries = numpy.linspace(0.0, 1.0, bins_per_class + 1) 447 | setup_new_montage_figure(num_classes, bins_per_class) 448 | for i in range(num_classes): 449 | for j in range(bins_per_class): 450 | mask = (predictions == i) & (certainties >= boundaries[j]) & ( 451 | certainties < boundaries[j + 1]) 452 | bin_indices = numpy.array(range(num_samples))[mask] 453 | bin_certainties = numpy.array(certainties)[mask] 454 | if bin_indices.shape[0] == 0: 455 | continue 456 | # Use the approximate median value in the bin. 457 | bin_indices = bin_indices[numpy.argsort(bin_certainties)] 458 | index = bin_indices[len(bin_indices) // 2] 459 | subplot(num_classes, bins_per_class, 1 + i * bins_per_class + j) 460 | plot_image(index, certainties[index]) 461 | if rank_method == 'aggregate_certainty_least_to_most': 462 | path = output_path 463 | else: 464 | path = output_path_all_plots 465 | savefig(os.path.join(path, 'bin_%s.jpg' % rank_method)) 466 | 467 | def montage_by_certainty(certainties, kind): 468 | montage_by_class_bin('%s_certainty_least_to_most' % kind, certainties) 469 | montage_by_class_rank('%s_certainty_least' % kind, certainties) 470 | montage_by_class_rank('%s_certainty_most' % kind, certainties) 471 | montage_by_class_rank('%s_certainty_least_to_most' % kind, certainties) 472 | 473 | def montage_first_several(num_subplots, sorted_indices, name): 474 | """Montages the first num_subplots^2 images.""" 475 | setup_new_montage_figure(num_subplots, num_subplots) 476 | for i in range(num_subplots): 477 | for j in range(num_subplots): 478 | if i * num_subplots + j < len(sorted_indices): 479 | subplot(num_subplots, num_subplots, 1 + i * num_subplots + j) 480 | plot_image(sorted_indices[i * num_subplots + j]) 481 | savefig(os.path.join(output_path_all_plots, '%s.jpg' % name)) 482 | 483 | def plot_most_least_certain(certainties, kind): 484 | indices = numpy.argsort(certainties) 485 | width = min(len(certainties), 8) 486 | montage_first_several(width, indices, 'least_%s_certainty' % kind) 487 | montage_first_several(width, indices[::-1], 'most_%s_certainty' % kind) 488 | 489 | # Now actually generate the montages. 490 | montage_by_class_rank('random', certainties['mean']) 491 | for certainty in microscopeimagequality.evaluation.CERTAINTY_TYPES.values(): 492 | logging.info('Generating montages for certainty type: %s.', certainty) 493 | montage_by_certainty(certainties[certainty], certainty) 494 | plot_most_least_certain(certainties[certainty], certainty) 495 | 496 | logging.info('Done saving summary montages.') 497 | -------------------------------------------------------------------------------- /microscopeimagequality/validation.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import logging 4 | import os 5 | import sys 6 | 7 | import microscopeimagequality.dataset_creation 8 | 9 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 10 | 11 | 12 | def check_duplicate_image_name(image_paths): 13 | """ 14 | Check that there are no duplicate names (without path or extension). 15 | 16 | Args: 17 | image_paths: List of strings, paths to images. 18 | 19 | Raises: 20 | ValueError: If there is a duplicate image name. 21 | """ 22 | image_names = [os.path.basename(os.path.splitext(p)[0]) for p in image_paths] 23 | 24 | num_images = len(image_names) 25 | 26 | num_unique = len(set(image_names)) 27 | 28 | if num_images != num_unique: 29 | raise ValueError('Found %d duplicate images.' % (num_images - num_unique)) 30 | 31 | logging.info('Found no duplicates in %d images.', num_images) 32 | 33 | 34 | def check_image_dimensions(image_paths, image_height, image_width): 35 | """ 36 | Check that the image dimensions are valid. 37 | 38 | A valid image has height and width no smaller than the specified height, width. 39 | 40 | Args: 41 | image_paths: List of strings, paths to images. 42 | image_height: Integer, height of image. 43 | image_width: Integer, width of image. 44 | 45 | Raises: 46 | ValueError: If there is an invalid image dimension 47 | """ 48 | logging.info('Using image height, width %s', str((image_height, image_width))) 49 | 50 | bad_images = [] 51 | 52 | for path in image_paths: 53 | logging.info('Trying to read image %s', path) 54 | image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(path) 55 | 56 | if image.shape[0] < image_height or image.shape[1] < image_width: 57 | bad_images.append(path) 58 | logging.info('Image %s dimension %s is too small.', path, str(image.shape)) 59 | 60 | logging.info('Done checking images') 61 | 62 | logging.info('Found %d bad images.', len(bad_images)) 63 | 64 | if bad_images: 65 | raise ValueError('Found %d bad images! \n %s' % (len(bad_images), '\n'.join(bad_images))) 66 | -------------------------------------------------------------------------------- /project.config: -------------------------------------------------------------------------------- 1 | [project] 2 | description = Access inherited by all other projects. 3 | [receive] 4 | requireContributorAgreement = false 5 | requireSignedOffBy = false 6 | requireChangeId = true 7 | enableSignedPush = false 8 | [submit] 9 | mergeContent = true 10 | [capability] 11 | administrateServer = group mdb/gas 12 | [access "refs/*"] 13 | read = group Anonymous Users 14 | read = group mdb/gas 15 | read = group Project Owners 16 | [access "refs/for/*"] 17 | addPatchSet = group Registered Users 18 | [access "refs/for/refs/*"] 19 | push = group Registered Users 20 | pushMerge = group Registered Users 21 | [access "refs/heads/*"] 22 | create = group Project Owners 23 | create = group mdb/gas 24 | forgeAuthor = group Registered Users 25 | forgeCommitter = group Project Owners 26 | forgeCommitter = group mdb/gas 27 | push = group Project Owners 28 | push = group mdb/gas 29 | label-Code-Review = -2..+2 group Project Owners 30 | label-Code-Review = -2..+2 group mdb/gas 31 | label-Code-Review = -1..+1 group Registered Users 32 | submit = group Project Owners 33 | submit = group mdb/gas 34 | editTopicName = +force group Project Owners 35 | editTopicName = +force group mdb/gas 36 | [access "refs/meta/config"] 37 | exclusiveGroupPermissions = read 38 | read = group Project Owners 39 | read = group mdb/gas 40 | create = group Project Owners 41 | create = group mdb/gas 42 | push = group Project Owners 43 | push = group mdb/gas 44 | label-Code-Review = -2..+2 group Project Owners 45 | label-Code-Review = -2..+2 group mdb/gas 46 | submit = group Project Owners 47 | submit = group mdb/gas 48 | [access "refs/tags/*"] 49 | create = group Project Owners 50 | create = group mdb/gas 51 | createTag = group Project Owners 52 | createTag = group mdb/gas 53 | createSignedTag = group Project Owners 54 | createSignedTag = group mdb/gas 55 | [label "Code-Review"] 56 | function = MaxWithBlock 57 | defaultValue = 0 58 | copyMinScore = true 59 | copyAllScoresOnTrivialRebase = true 60 | value = -2 This shall not be merged 61 | value = -1 I would prefer this is not merged as is 62 | value = 0 No score 63 | value = +1 Looks good to me, but someone else must approve 64 | value = +2 Looks good to me, approved 65 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | setuptools.setup( 4 | python_requires='<3.8', 5 | entry_points={ 6 | "console_scripts": [ 7 | "microscopeimagequality=microscopeimagequality.application:command" 8 | ] 9 | }, 10 | install_requires=[ 11 | "click", 12 | "matplotlib", 13 | "nose", 14 | "numpy<1.19.0,>=1.16.0", 15 | "Pillow", 16 | "scikit-image", 17 | "scipy", 18 | "six", 19 | "tensorflow==2.6.4", 20 | "imagecodecs", 21 | ], 22 | test_requires=["pytest"], 23 | name="microscopeimagequality", 24 | package_data={ 25 | "microscopeimagequality": [ 26 | "data/" 27 | ] 28 | }, 29 | classifiers=[ 30 | 'License :: OSI Approved :: Apache Software License', 31 | 'Intended Audience :: Science/Research', 32 | 'Programming Language :: Python :: 2.7', 33 | 'Topic :: Scientific/Engineering'], 34 | description="Microscope Image Quality Classification", 35 | url='https://github.com/google/microscopeimagequality', 36 | author='Samuel Yang', 37 | author_email='samuely@google.com', 38 | license='Apache 2.0', 39 | packages=setuptools.find_packages( 40 | exclude=[ 41 | "tests" 42 | ] 43 | ), 44 | version="0.1.0dev5" 45 | ) 46 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/__init__.py -------------------------------------------------------------------------------- /tests/data/00_mcf-z-stacks-03212011_k06_s2_w12667264a-6432-4f7e-bf58-625a1319a1c9.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/00_mcf-z-stacks-03212011_k06_s2_w12667264a-6432-4f7e-bf58-625a1319a1c9.tif -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a01__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a01__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a01__s1__w1_10_cropped.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a01__s1__w1_10_cropped.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a02__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a02__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a03__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a03__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a04__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a04__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a05__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a05__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a06__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a06__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a07__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a07__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a08__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a08__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/BBBC006_z_aligned__a09__s1__w1_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/BBBC006_z_aligned__a09__s1__w1_10.png -------------------------------------------------------------------------------- /tests/data/README: -------------------------------------------------------------------------------- 1 | Test images here are modified versions of images from image set BBBC006v1 from the Broad Bioimage Benchmark Collection [Ljosa et al., Nature Methods, 2012], used with permission. -------------------------------------------------------------------------------- /tests/data/annotated_image_predicted_0_label_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/annotated_image_predicted_0_label_0.png -------------------------------------------------------------------------------- /tests/data/annotated_image_predicted_0_label_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/annotated_image_predicted_0_label_1.png -------------------------------------------------------------------------------- /tests/data/annotated_image_predicted_1_label_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/annotated_image_predicted_1_label_0.png -------------------------------------------------------------------------------- /tests/data/annotated_image_predicted_1_label_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/annotated_image_predicted_1_label_1.png -------------------------------------------------------------------------------- /tests/data/cell_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image.png -------------------------------------------------------------------------------- /tests/data/cell_image.tiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image.tiff -------------------------------------------------------------------------------- /tests/data/cell_image2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image2.png -------------------------------------------------------------------------------- /tests/data/cell_image_airy_blurred.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image_airy_blurred.png -------------------------------------------------------------------------------- /tests/data/cell_image_blurred.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image_blurred.png -------------------------------------------------------------------------------- /tests/data/cell_image_degraded.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image_degraded.png -------------------------------------------------------------------------------- /tests/data/cell_image_poisson_noise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image_poisson_noise.png -------------------------------------------------------------------------------- /tests/data/cell_image_poisson_noise_py.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image_poisson_noise_py.png -------------------------------------------------------------------------------- /tests/data/cell_image_saturated.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image_saturated.png -------------------------------------------------------------------------------- /tests/data/cell_image_saturated_with_offset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image_saturated_with_offset.png -------------------------------------------------------------------------------- /tests/data/cell_image_vignetted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/cell_image_vignetted.png -------------------------------------------------------------------------------- /tests/data/data_train.num_records: -------------------------------------------------------------------------------- 1 | 33 -------------------------------------------------------------------------------- /tests/data/data_train.tfrecord: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/data_train.tfrecord -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_a01_s1_w1a571d43d-5554-47fd-bb54-5db68290e5a7.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_a01_s1_w1a571d43d-5554-47fd-bb54-5db68290e5a7.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_a19_s2_w12703a47b-7574-461a-85e2-afc38bc0adae.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_a19_s2_w12703a47b-7574-461a-85e2-afc38bc0adae.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_b01_s2_w12e721516-a202-4647-9f19-fd73ff016ce6.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_b01_s2_w12e721516-a202-4647-9f19-fd73ff016ce6.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_b14_s2_w126875e94-8541-482c-8aea-09f2e0b1e2ce.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_b14_s2_w126875e94-8541-482c-8aea-09f2e0b1e2ce.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_c06_s2_w123356693-a029-4886-9735-9e3ca50eb8d8.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_c06_s2_w123356693-a029-4886-9735-9e3ca50eb8d8.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_d02_s2_w122501c04-ccbe-4fc4-bb08-924ccccaad58.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_d02_s2_w122501c04-ccbe-4fc4-bb08-924ccccaad58.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_d10_s2_w129ae2539-fdfc-4e29-9215-03a79115d678.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_d10_s2_w129ae2539-fdfc-4e29-9215-03a79115d678.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_e10_s2_w12490a9b6-6991-4eac-a93a-123978224209.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_e10_s2_w12490a9b6-6991-4eac-a93a-123978224209.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_e24_s2_w1241c3e73-1e5a-4121-b7b5-02af37510046.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_e24_s2_w1241c3e73-1e5a-4121-b7b5-02af37510046.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_h01_s2_w12ab2c20a-2393-4718-8f3d-a4b9bb17a47f.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_h01_s2_w12ab2c20a-2393-4718-8f3d-a4b9bb17a47f.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_h09_s2_w126c275bc-9c3e-40af-b3ca-54f91091d353.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_h09_s2_w126c275bc-9c3e-40af-b3ca-54f91091d353.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_i05_s2_w12e9fa745-2bf9-463c-9e74-e9ff6386b255.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_i05_s2_w12e9fa745-2bf9-463c-9e74-e9ff6386b255.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_i07_s2_w125417efe-c7d4-40b1-b26a-c1ecfea30de6.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_i07_s2_w125417efe-c7d4-40b1-b26a-c1ecfea30de6.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_i16_s2_w1234f5368-8bfd-4995-bbb3-0e0c88676dc5.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_i16_s2_w1234f5368-8bfd-4995-bbb3-0e0c88676dc5.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_j10_s2_w12f140c18-a1df-47e2-89d1-864a42adb67c.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_j10_s2_w12f140c18-a1df-47e2-89d1-864a42adb67c.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_l01_s2_w1272566d5-fc1b-411f-8280-035a9c69283d.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_l01_s2_w1272566d5-fc1b-411f-8280-035a9c69283d.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_l22_s2_w12b8de869-e1c8-438b-86b6-7576116596c8.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_l22_s2_w12b8de869-e1c8-438b-86b6-7576116596c8.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_l23_s2_w129334806-f24b-469c-98fe-7d1ff2f8f968.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_l23_s2_w129334806-f24b-469c-98fe-7d1ff2f8f968.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_m14_s2_w12e7632d4-7a01-4cc0-a680-e686a4869613.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_m14_s2_w12e7632d4-7a01-4cc0-a680-e686a4869613.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_n04_s2_w12d7547cb-bf90-434b-b2e1-79730bfcd3f7.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_n04_s2_w12d7547cb-bf90-434b-b2e1-79730bfcd3f7.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_n06_s2_w121660a1f-dbf1-4f9b-b1fb-6b1222cada0c.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_n06_s2_w121660a1f-dbf1-4f9b-b1fb-6b1222cada0c.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_n16_s2_w12a4361b7-56a0-48a7-acc9-f05288591da3.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_n16_s2_w12a4361b7-56a0-48a7-acc9-f05288591da3.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_p09_s2_w12c20e8b2-d191-40a8-b8a3-d52d13e65aab.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_p09_s2_w12c20e8b2-d191-40a8-b8a3-d52d13e65aab.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_p24_s2_w1283f1213-dc59-40b5-ae21-9d66400a5f20.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/images_for_glob_test/00_mcf-z-stacks-03212011_p24_s2_w1283f1213-dc59-40b5-ae21-9d66400a5f20.tif -------------------------------------------------------------------------------- /tests/data/images_for_glob_test/README: -------------------------------------------------------------------------------- 1 | Test images here are modified versions of images from image set BBBC006v1 from the Broad Bioimage Benchmark Collection [Ljosa et al., Nature Methods, 2012], used with permission. -------------------------------------------------------------------------------- /tests/data/psf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/psf.png -------------------------------------------------------------------------------- /tests/data/test/1.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/test/1.tif -------------------------------------------------------------------------------- /tests/data/test/2.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/test/2.tif -------------------------------------------------------------------------------- /tests/data/training/0/00_mcf-z-stacks-03212011_a01_s1_w1a571d43d-5554-47fd-bb54-5db68290e5a7.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/0/00_mcf-z-stacks-03212011_a01_s1_w1a571d43d-5554-47fd-bb54-5db68290e5a7.tif -------------------------------------------------------------------------------- /tests/data/training/1/00_mcf-z-stacks-03212011_b01_s2_w12e721516-a202-4647-9f19-fd73ff016ce6.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/1/00_mcf-z-stacks-03212011_b01_s2_w12e721516-a202-4647-9f19-fd73ff016ce6.tif -------------------------------------------------------------------------------- /tests/data/training/10/00_mcf-z-stacks-03212011_h09_s2_w126c275bc-9c3e-40af-b3ca-54f91091d353.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/10/00_mcf-z-stacks-03212011_h09_s2_w126c275bc-9c3e-40af-b3ca-54f91091d353.tif -------------------------------------------------------------------------------- /tests/data/training/2/00_mcf-z-stacks-03212011_a19_s2_w12703a47b-7574-461a-85e2-afc38bc0adae.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/2/00_mcf-z-stacks-03212011_a19_s2_w12703a47b-7574-461a-85e2-afc38bc0adae.tif -------------------------------------------------------------------------------- /tests/data/training/3/00_mcf-z-stacks-03212011_b14_s2_w126875e94-8541-482c-8aea-09f2e0b1e2ce.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/3/00_mcf-z-stacks-03212011_b14_s2_w126875e94-8541-482c-8aea-09f2e0b1e2ce.tif -------------------------------------------------------------------------------- /tests/data/training/4/00_mcf-z-stacks-03212011_c06_s2_w123356693-a029-4886-9735-9e3ca50eb8d8.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/4/00_mcf-z-stacks-03212011_c06_s2_w123356693-a029-4886-9735-9e3ca50eb8d8.tif -------------------------------------------------------------------------------- /tests/data/training/5/00_mcf-z-stacks-03212011_d02_s2_w122501c04-ccbe-4fc4-bb08-924ccccaad58.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/5/00_mcf-z-stacks-03212011_d02_s2_w122501c04-ccbe-4fc4-bb08-924ccccaad58.tif -------------------------------------------------------------------------------- /tests/data/training/6/00_mcf-z-stacks-03212011_d10_s2_w129ae2539-fdfc-4e29-9215-03a79115d678.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/6/00_mcf-z-stacks-03212011_d10_s2_w129ae2539-fdfc-4e29-9215-03a79115d678.tif -------------------------------------------------------------------------------- /tests/data/training/7/00_mcf-z-stacks-03212011_e10_s2_w12490a9b6-6991-4eac-a93a-123978224209.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/7/00_mcf-z-stacks-03212011_e10_s2_w12490a9b6-6991-4eac-a93a-123978224209.tif -------------------------------------------------------------------------------- /tests/data/training/8/00_mcf-z-stacks-03212011_e24_s2_w1241c3e73-1e5a-4121-b7b5-02af37510046.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/8/00_mcf-z-stacks-03212011_e24_s2_w1241c3e73-1e5a-4121-b7b5-02af37510046.tif -------------------------------------------------------------------------------- /tests/data/training/9/00_mcf-z-stacks-03212011_h01_s2_w12ab2c20a-2393-4718-8f3d-a4b9bb17a47f.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/tests/data/training/9/00_mcf-z-stacks-03212011_h01_s2_w12ab2c20a-2393-4718-8f3d-a4b9bb17a47f.tif -------------------------------------------------------------------------------- /tests/test_data_provider.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | 4 | import numpy 5 | import numpy.testing 6 | import skimage.io 7 | import tensorflow 8 | import tensorflow.contrib.slim 9 | 10 | import microscopeimagequality.data_provider 11 | 12 | TFRECORD_NUM_ENTRIES = 33 13 | 14 | TFRECORD_NUM_CLASSES = 3 15 | 16 | TFRECORD_LABEL_ORDERING = [1, 1, 1, 1, 1, 2, 0, 0, 2, 2, 2, 2, 0, 0, 2, 1, 0, 2, 0, 1, 2, 0, 2, 2, 0, 1, 0, 1, 1, 2, 0, 0, 1] 17 | 18 | input_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") 19 | 20 | test_dir = tempfile.mkdtemp() 21 | 22 | batch_size = TFRECORD_NUM_ENTRIES 23 | 24 | # For a patch size of 28, we have 324 patches per image in this tfrecord. 25 | patches_per_image = 324 26 | 27 | num_classes = TFRECORD_NUM_CLASSES 28 | 29 | tfrecord_file_pattern = os.path.join(input_directory, "data_%s.tfrecord") 30 | 31 | image_width = 520 32 | 33 | image_height = 520 34 | 35 | 36 | def test_get_filename_num_records(): 37 | tf_record_path = "/folder/filename.tfrecord" 38 | path = microscopeimagequality.data_provider.get_filename_num_records(tf_record_path) 39 | expected_path = "/folder/filename.num_records" 40 | assert expected_path == path 41 | 42 | 43 | def test_get_num_records(): 44 | tf_record_path = os.path.join(input_directory, "data_train.tfrecord") 45 | num_records = microscopeimagequality.data_provider.get_num_records(tf_record_path) 46 | expected_num_records = TFRECORD_NUM_ENTRIES 47 | assert expected_num_records == num_records 48 | 49 | 50 | def save16_bit_png(filename, im): 51 | path = os.path.join(test_dir, filename) 52 | skimage.io.imsave(path, im, "pil") 53 | 54 | 55 | def get_tf_session(graph): 56 | sv = tensorflow.train.Supervisor(logdir=os.path.join(test_dir, "tmp_logs/")) 57 | sess = sv.PrepareSession("") 58 | sv.StartQueueRunners(sess, graph.get_collection(tensorflow.GraphKeys.QUEUE_RUNNERS)) 59 | return sess 60 | 61 | 62 | def get_data_from_tfrecord(): 63 | """Helper function that gets image, label tensors from tfrecord.""" 64 | split_name = "train" 65 | num_records = microscopeimagequality.data_provider.get_num_records(tfrecord_file_pattern % split_name) 66 | assert TFRECORD_NUM_ENTRIES == num_records 67 | dataset = microscopeimagequality.data_provider.get_split(split_name, tfrecord_file_pattern, num_classes=num_classes, image_width=image_width, image_height=image_height) 68 | provider = tensorflow.contrib.slim.dataset_data_provider.DatasetDataProvider(dataset, common_queue_capacity=2 * batch_size, common_queue_min=batch_size, shuffle=False) 69 | [image, label, image_path] = provider.get([microscopeimagequality.data_provider.FEATURE_IMAGE, microscopeimagequality.data_provider.FEATURE_IMAGE_CLASS, microscopeimagequality.data_provider.FEATURE_IMAGE_PATH ]) 70 | return image, label, image_path 71 | 72 | 73 | def test_get_split(): 74 | g = tensorflow.Graph() 75 | with g.as_default(): 76 | image, label, image_path = get_data_from_tfrecord() 77 | 78 | sess = get_tf_session(g) 79 | 80 | # Check that the tensor shapes are as expected. 81 | np_image, np_label, np_image_path = sess.run([image, label, image_path]) 82 | numpy.testing.assert_array_equal(list(np_image.shape), [microscopeimagequality.data_provider.IMAGE_WIDTH, microscopeimagequality.data_provider.IMAGE_WIDTH, 1]) 83 | numpy.testing.assert_array_equal(list(np_label.shape), [num_classes]) 84 | numpy.testing.assert_array_equal([1], list(np_image_path.shape)) 85 | assert 22 == len(np_image_path[0]) 86 | 87 | # Write the image for viewing. 88 | im = (numpy.squeeze(np_image) * 65535).astype(numpy.uint16) 89 | image_class = numpy.argmax(np_label) 90 | save16_bit_png("single_im_from_tfrecord_%g.png" % image_class, im) 91 | 92 | 93 | def test_batching(): 94 | g = tensorflow.Graph() 95 | with g.as_default(): 96 | image, label, image_path = get_data_from_tfrecord() 97 | 98 | # Expand since get_batches() requires a larger dimension tensor. 99 | expanded_label = tensorflow.expand_dims(label, dim=0) 100 | expanded_image = tensorflow.expand_dims(image, dim=0) 101 | expanded_image_path = tensorflow.expand_dims(image_path, dim=0) 102 | 103 | images, labels, image_paths = microscopeimagequality.data_provider.get_batches(expanded_image, expanded_label, expanded_image_path, batch_size=batch_size, num_threads=1) 104 | 105 | sess = get_tf_session(g) 106 | 107 | [np_images, np_labels, np_image_paths] = sess.run([images, labels, image_paths]) 108 | 109 | # Check the number of images and shape is as expected. 110 | numpy.testing.assert_array_equal(list(np_images.shape), [batch_size, microscopeimagequality.data_provider.IMAGE_WIDTH, microscopeimagequality.data_provider.IMAGE_WIDTH, 1 ]) 111 | numpy.testing.assert_array_equal([batch_size, 1], list(np_image_paths.shape)) 112 | assert 1 == len(np_image_paths[0]) 113 | assert b"image_000" == os.path.basename(np_image_paths[0][0]) 114 | 115 | # Check the ordering of labels in a single batch (which is preserved 116 | # since we used num_threads=1). 117 | image_classes = numpy.argmax(np_labels, axis=1).tolist() 118 | 119 | numpy.testing.assert_array_equal(image_classes, TFRECORD_LABEL_ORDERING) 120 | 121 | 122 | def test_get_image_patch_tensor(): 123 | patch_width = 280 124 | g = tensorflow.Graph() 125 | with g.as_default(): 126 | image, label, image_path = get_data_from_tfrecord() 127 | patch, label, image_path = microscopeimagequality.data_provider.get_image_patch_tensor(image, label, image_path, patch_width=patch_width) 128 | 129 | sess = get_tf_session(g) 130 | 131 | [np_patch, np_label, np_image_path] = sess.run([patch, label, image_path]) 132 | 133 | # Check that the tensor shapes are as expected. 134 | numpy.testing.assert_array_equal(list(np_patch.shape), [1, patch_width, patch_width, 1]) 135 | numpy.testing.assert_array_equal(list(np_label.shape), [1, num_classes]) 136 | numpy.testing.assert_array_equal([1, 1], list(np_image_path.shape)) 137 | 138 | # Write the image for viewing. 139 | im = (numpy.squeeze(np_patch) * 65535).astype(numpy.uint16) 140 | save16_bit_png("single_random_patch_from_tfrecord.png", im) 141 | 142 | 143 | def test_apply_random_brightness_adjust(): 144 | g = tensorflow.Graph() 145 | with g.as_default(): 146 | image, _, _ = get_data_from_tfrecord() 147 | factor = 2.0 148 | patch = microscopeimagequality.data_provider.apply_random_brightness_adjust(image, factor, factor) 149 | 150 | sess = get_tf_session(g) 151 | 152 | [np_patch, np_image] = sess.run([patch, image]) 153 | 154 | numpy.testing.assert_array_equal(list(np_patch.shape), list(np_image.shape)) 155 | numpy.testing.assert_array_equal(np_image * factor, np_patch) 156 | 157 | 158 | def test_get_image_tiles_tensor(): 159 | patch_width = 100 160 | g = tensorflow.Graph() 161 | with g.as_default(): 162 | image, label, image_path = get_data_from_tfrecord() 163 | tiles, labels, image_paths = microscopeimagequality.data_provider.get_image_tiles_tensor(image, label, image_path, patch_width=patch_width) 164 | 165 | sess = get_tf_session(g) 166 | 167 | [np_tiles, np_labels, np_image_paths] = sess.run([tiles, labels, image_paths]) 168 | 169 | # Check that the tensor shapes are as expected. 170 | num_tiles_expected = 25 171 | numpy.testing.assert_array_equal(list(np_tiles.shape), [num_tiles_expected, patch_width, patch_width, 1]) 172 | numpy.testing.assert_array_equal(list(np_labels.shape), [num_tiles_expected, num_classes]) 173 | numpy.testing.assert_array_equal([num_tiles_expected, 1], list(np_image_paths.shape)) 174 | 175 | 176 | def test_get_image_tiles_tensor_non_square(): 177 | patch_width = 100 178 | g = tensorflow.Graph() 179 | with g.as_default(): 180 | image = tensorflow.zeros([patch_width * 4, patch_width * 3, 1]) 181 | label = tensorflow.constant([0, 0, 1]) 182 | image_path = tensorflow.constant(["path"]) 183 | tiles, labels, image_paths = microscopeimagequality.data_provider.get_image_tiles_tensor(image, label, image_path, patch_width=patch_width) 184 | 185 | sess = get_tf_session(g) 186 | 187 | [np_tiles, np_labels, np_image_paths] = sess.run([tiles, labels, image_paths]) 188 | 189 | # Check that the tensor shapes are as expected. 190 | num_tiles_expected = 12 191 | numpy.testing.assert_array_equal(list(np_tiles.shape), [num_tiles_expected, patch_width, patch_width, 1]) 192 | numpy.testing.assert_array_equal(list(np_labels.shape), [num_tiles_expected, num_classes]) 193 | numpy.testing.assert_array_equal([num_tiles_expected, 1], list(np_image_paths.shape)) 194 | 195 | 196 | def test_provide_data_with_random_patches(): 197 | images, one_hot_labels, image_paths, _ = microscopeimagequality.data_provider.provide_data(tfrecord_file_pattern, split_name="train", batch_size=batch_size, num_classes=num_classes, image_width=image_width, image_height=image_height, patch_width=28, randomize=True) 198 | 199 | assert images.get_shape().as_list(), [batch_size, 28, 28 == 1] 200 | assert one_hot_labels.get_shape().as_list(), [batch_size == num_classes] 201 | assert [batch_size, 1] == image_paths.get_shape().as_list() 202 | 203 | 204 | def test_provide_data_image_path(): 205 | g = tensorflow.Graph() 206 | with g.as_default(): 207 | _, _, image_paths, _ = microscopeimagequality.data_provider.provide_data(tfrecord_file_pattern, split_name="train", batch_size=patches_per_image, num_classes=3, image_width=image_width, image_height=image_height, patch_width=28, randomize=False, num_threads=1) 208 | 209 | sess = get_tf_session(g) 210 | 211 | [np_image_paths] = sess.run([image_paths]) 212 | 213 | filename_expected = b"image_000" 214 | assert 1 == len(np_image_paths[0]) 215 | assert filename_expected == os.path.basename(np_image_paths[0][0]) 216 | 217 | 218 | def test_provide_data_uniform_tiles(): 219 | g = tensorflow.Graph() 220 | with g.as_default(): 221 | images, one_hot_labels, _, _ = microscopeimagequality.data_provider.provide_data(tfrecord_file_pattern, split_name="train", batch_size=patches_per_image, num_classes=num_classes, image_width=image_width, image_height=image_height, patch_width=28, randomize=False) 222 | 223 | num_tiles_expected = patches_per_image 224 | assert images.get_shape().as_list(), [num_tiles_expected, 28, 28 == 1] 225 | assert one_hot_labels.get_shape().as_list(), [num_tiles_expected == num_classes] 226 | 227 | sess = get_tf_session(g) 228 | 229 | [np_images, np_labels] = sess.run([images, one_hot_labels]) 230 | assert np_labels.shape, (num_tiles_expected == num_classes) 231 | 232 | im = (numpy.squeeze(np_images[0, :, :, :]) * 65535).astype(numpy.uint16) 233 | save16_bit_png("first_tile_single_batch.png", im) 234 | 235 | 236 | def test_provide_data_with_deterministic_ordering(): 237 | # Use patches larger to speed up test, otherwise it will timeout. 238 | patch_size_factor = 3 239 | 240 | batch_size = patches_per_image / patch_size_factor ** 2 241 | 242 | g = tensorflow.Graph() 243 | 244 | with g.as_default(): 245 | images, one_hot_labels, image_paths, _ = microscopeimagequality.data_provider.provide_data(tfrecord_file_pattern, split_name="train", batch_size=batch_size, num_classes=num_classes, image_width=image_width, image_height=image_height, patch_width=28 * patch_size_factor, randomize=False, num_threads=1 ) 246 | 247 | sess = get_tf_session(g) 248 | 249 | # Here, we are looking at the first label across many batches, rather than 250 | # the ordering of labels in one batch, as in testBatching(). We check to 251 | # ensure the ordering is deterministic for num_threads = 1. 252 | image_classes = [] 253 | num_batches_tested = min(20, TFRECORD_NUM_ENTRIES) 254 | for i in range(num_batches_tested): 255 | [np_images, np_labels, np_image_paths] = sess.run([images, one_hot_labels, image_paths]) 256 | 257 | assert np_labels.shape, (batch_size == num_classes) 258 | 259 | # All class labels should be identical within this batch. 260 | image_class = numpy.argmax(np_labels, axis=1) 261 | assert numpy.all(image_class[0] == image_class) 262 | assert numpy.all(np_image_paths[0] == np_image_paths) 263 | image_classes.append(image_class[0]) 264 | 265 | im = (numpy.squeeze(np_images[0, :, :, :]) * 65535).astype(numpy.uint16) 266 | save16_bit_png("first_tile_per_batch_%g.png" % i, im) 267 | 268 | numpy.testing.assert_array_equal(image_classes, TFRECORD_LABEL_ORDERING[0:num_batches_tested]) 269 | -------------------------------------------------------------------------------- /tests/test_dataset_creation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | 4 | import numpy 5 | import pytest 6 | 7 | import microscopeimagequality.dataset_creation 8 | 9 | input_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") 10 | 11 | test_dir = tempfile.mkdtemp() 12 | 13 | input_image_path = os.path.join(input_directory, "BBBC006_z_aligned__a01__s1__w1_10.png") 14 | 15 | input_image_path_tif = os.path.join(input_directory, "00_mcf-z-stacks-03212011_k06_s2_w12667264a-6432-4f7e-bf58-625a1319a1c9.tif") 16 | 17 | glob_images = os.path.join(input_directory, "images_for_glob_test/*") 18 | 19 | list_of_class_globs = [] 20 | 21 | num_classes = 3 22 | 23 | empty_directory = os.path.join(test_dir, "empty") 24 | 25 | image_width = 520 26 | 27 | image_height = 520 28 | 29 | for _ in range(num_classes): 30 | list_of_class_globs.append(glob_images) 31 | 32 | 33 | def test_dataset_randomize_runs(): 34 | dataset = microscopeimagequality.dataset_creation.Dataset(numpy.zeros((2, 2)), ["a", "b"], image_width, image_height) 35 | 36 | dataset.randomize() 37 | 38 | 39 | def test_datset_subsample_for_shard(): 40 | labels = numpy.array([[0, 1], [2, 3], [4, 5], [6, 7]]) 41 | 42 | image_paths = ["path"] * labels.shape[0] 43 | 44 | dataset = microscopeimagequality.dataset_creation.Dataset(labels, image_paths, image_width, image_height) 45 | 46 | dataset.subsample_for_shard(0, 2) 47 | 48 | numpy.testing.assert_array_equal(numpy.array([[0, 1], [4, 5]]), dataset.labels) 49 | 50 | 51 | def test_dataset_get_sample(): 52 | dataset = microscopeimagequality.dataset_creation.Dataset(numpy.zeros((2, 2)), [input_image_path, input_image_path], image_width, image_height) 53 | 54 | _, _, image_path = dataset.get_sample(0, True) 55 | 56 | assert input_image_path == image_path 57 | 58 | 59 | def test_dataset_to_examples_in_tfrecord_runs(): 60 | microscopeimagequality.dataset_creation.dataset_to_examples_in_tfrecord( 61 | list_of_class_globs, 62 | test_dir, 63 | output_tfrecord_filename="data_train.tfrecord", 64 | num_classes=num_classes, 65 | image_width=image_width, 66 | image_height=image_height 67 | ) 68 | 69 | 70 | def test_convert_to_examples_runs(): 71 | labels = numpy.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]], dtype=numpy.float32) 72 | image_paths = [input_image_path] * 3 73 | microscopeimagequality.dataset_creation.convert_to_examples( 74 | microscopeimagequality.dataset_creation.Dataset(labels, image_paths, image_width, image_height), 75 | output_directory=test_dir, 76 | output_tfrecord_filename="data_train.tfrecord" 77 | ) 78 | 79 | 80 | def test_get_preprocesssed_image_runs(): 81 | image = microscopeimagequality.dataset_creation.get_preprocessed_image( 82 | input_image_path, 83 | image_background_value=0.0, 84 | image_brightness_scale=1.0, 85 | image_width=image_width, 86 | image_height=image_height, 87 | normalize=True 88 | ) 89 | 90 | pytest.approx((520, 520), image.shape) 91 | 92 | 93 | def test_normalize_image(): 94 | image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(input_image_path) 95 | 96 | image_normalized = microscopeimagequality.dataset_creation.normalize_image(image) 97 | 98 | expected_mean = numpy.mean(image) * 496.283426445 * microscopeimagequality.dataset_creation._FOREGROUND_MEAN 99 | 100 | assert numpy.abs(expected_mean - numpy.mean(image_normalized)) < 1e-6 101 | 102 | 103 | def test_normalize_image_no_foreground(): 104 | image = numpy.zeros((100, 100), dtype=numpy.float32) 105 | 106 | image_normalized = microscopeimagequality.dataset_creation.normalize_image(image) 107 | 108 | assert 0.0 == numpy.mean(image_normalized) 109 | 110 | 111 | def test_generate_tf_example_runs(): 112 | image = numpy.ones((100, 100), dtype=numpy.float32) 113 | 114 | label = numpy.array([0.0, 1.0], dtype=numpy.float32) 115 | 116 | image_path = "directory/filename.extension" 117 | 118 | _ = microscopeimagequality.dataset_creation.generate_tf_example(image, label, image_path) 119 | 120 | 121 | def test_read16_bit_greyscale_png(): 122 | image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(input_image_path) 123 | 124 | assert image.shape, (520 == 696) 125 | 126 | pytest.approx(numpy.max(image), 3252.0 / 65535) 127 | 128 | assert image.dtype == numpy.float32 129 | 130 | 131 | def test_read16_bit_greyscale_tif(): 132 | image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(input_image_path_tif) 133 | 134 | assert image.shape, (520 == 696) 135 | 136 | pytest.approx(numpy.max(image), 1135.0 / 65535) 137 | 138 | assert image.dtype == numpy.float32 139 | 140 | 141 | def test_get_image_paths(): 142 | paths = microscopeimagequality.dataset_creation.get_image_paths(os.path.join(input_directory, "images_for_glob_test"), 100) 143 | 144 | for path in paths: 145 | extension = os.path.splitext(path)[1] 146 | 147 | assert extension == ".png" or extension == ".tif", "path is %s" % path 148 | 149 | assert 24 == len(paths) 150 | 151 | 152 | def test_image_size_from_glob(): 153 | image_size = microscopeimagequality.dataset_creation.image_size_from_glob(input_image_path, 84) 154 | 155 | assert 504 == image_size.height 156 | 157 | assert 672 == image_size.width 158 | 159 | 160 | def test_get_images_from_glob(): 161 | paths = microscopeimagequality.dataset_creation.get_images_from_glob(glob_images, 100) 162 | 163 | for path in paths: 164 | assert os.path.splitext(path)[1] == ".png" or os.path.splitext(path)[1] == ".tif", "path is %s" % path 165 | 166 | assert 24 == len(paths) 167 | 168 | 169 | def test_read_labeled_dataset_without_patches(): 170 | max_images = 3 171 | 172 | dataset = microscopeimagequality.dataset_creation.read_labeled_dataset(list_of_class_globs, max_images, num_classes, image_width, image_height) 173 | 174 | num_images_expected = (max_images * num_classes) 175 | 176 | assert dataset.labels.shape, (num_images_expected == num_classes) 177 | 178 | assert num_images_expected == len(dataset.image_paths) 179 | 180 | 181 | def test_read_unlabeled_dataset(): 182 | max_images = 3 183 | 184 | num_classes = 5 185 | 186 | dataset = microscopeimagequality.dataset_creation.read_unlabeled_dataset([glob_images], max_images, num_classes, image_width, image_height) 187 | 188 | num_images_expected = max_images 189 | 190 | assert dataset.labels.shape, (num_images_expected == num_classes) 191 | 192 | assert num_images_expected == len(dataset.image_paths) 193 | -------------------------------------------------------------------------------- /tests/test_degrade.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | 4 | import numpy 5 | import skimage.io 6 | import tensorflow 7 | 8 | import microscopeimagequality.dataset_creation 9 | import microscopeimagequality.degrade 10 | 11 | test_data_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") 12 | 13 | test_dir = tempfile.mkdtemp() 14 | 15 | tensorflow.logging.info("Loaded test data") 16 | 17 | degrader = microscopeimagequality.degrade.ImageDegrader(random_seed=0, photoelectron_factor=65535.0, sensor_offset_in_photoelectrons=0.0) 18 | 19 | 20 | def get_test_image(name): 21 | path = os.path.join(test_data_directory, name) 22 | 23 | return microscopeimagequality.dataset_creation.read_16_bit_greyscale(path) 24 | 25 | 26 | def test_set_exposure_golden(): 27 | exposure_factor = 100.0 28 | 29 | image = get_test_image("cell_image.tiff") 30 | 31 | exposure_adjusted_image = degrader.set_exposure(image, exposure_factor) 32 | 33 | # Check image is saturated. 34 | assert 1.0 == numpy.max(exposure_adjusted_image) 35 | 36 | expected_image = get_test_image("cell_image_saturated.png") 37 | 38 | numpy.testing.assert_almost_equal(expected_image, exposure_adjusted_image, 4) 39 | 40 | 41 | def test_set_exposure_golden2(): 42 | exposure_factor = 0.0001 43 | 44 | image = get_test_image("cell_image.tiff") 45 | 46 | exposure_adjusted_image = degrader.set_exposure(image, exposure_factor) 47 | 48 | numpy.testing.assert_almost_equal(exposure_factor, numpy.max(exposure_adjusted_image) / numpy.max(image), 4) 49 | 50 | 51 | def test_set_exposure_with_offset_golden(): 52 | exposure_factor = 100.0 53 | 54 | image = get_test_image("cell_image.tiff") 55 | 56 | degrader = microscopeimagequality.degrade.ImageDegrader(random_seed=0, photoelectron_factor=65535.0, sensor_offset_in_photoelectrons=100.0) 57 | 58 | exposure_adjusted_image = degrader.set_exposure(image, exposure_factor) 59 | 60 | # Check image is saturated. 61 | assert 1.0 == numpy.max(exposure_adjusted_image) 62 | 63 | expected_image = get_test_image("cell_image_saturated_with_offset.png") 64 | 65 | numpy.testing.assert_almost_equal(expected_image, exposure_adjusted_image, 4) 66 | 67 | 68 | def test_set_exposure_no_exposure_change(): 69 | exposure_factor = 1.0 70 | 71 | image = get_test_image("cell_image.tiff") 72 | 73 | exposure_adjusted_image = degrader.set_exposure(image, exposure_factor) 74 | 75 | numpy.testing.assert_almost_equal(image, exposure_adjusted_image, 4) 76 | 77 | 78 | def test_apply_poisson_noise(): 79 | image = get_test_image("cell_image.tiff") 80 | 81 | noisy_image = degrader.random_noise(image) 82 | 83 | expected_image = get_test_image("cell_image_poisson_noise_py.png") 84 | 85 | numpy.testing.assert_almost_equal(expected_image, noisy_image) 86 | 87 | 88 | def test_get_airy_psf(): 89 | image = get_test_image("cell_image.tiff") 90 | 91 | psf = get_test_image("psf.png") 92 | 93 | blurred_image = degrader.apply_blur_kernel(image, psf) 94 | 95 | expected_image = get_test_image("cell_image_airy_blurred.png") 96 | 97 | numpy.testing.assert_almost_equal(expected_image, blurred_image, 4) 98 | 99 | 100 | def test_evaluate_airy_psf_at_point(): 101 | psf_value = microscopeimagequality.degrade.get_airy_psf(1, 1e-6, 0.0, 500e-9, 0.5, 1.0, False)[0] 102 | 103 | numpy.testing.assert_almost_equal(psf_value, .25, 5) 104 | 105 | psf_value = microscopeimagequality.degrade.get_airy_psf(1, 1e-6, 1e-6, 500e-9, 0.5, 1.0, False)[0] 106 | 107 | numpy.testing.assert_almost_equal(psf_value, .20264, 5) 108 | 109 | psf_value = microscopeimagequality.degrade.get_airy_psf(3, 3e-6, 0.0, 500e-9, 0.5, 1.0, False)[0, 1] 110 | 111 | numpy.testing.assert_almost_equal(psf_value, .00114255, 7) 112 | 113 | 114 | def test_get_airy_psf_golden(): 115 | psf = microscopeimagequality.degrade.get_airy_psf(21, 5e-6, 4.0e-6, 500e-9, 0.5, 1.0) 116 | 117 | expected_psf = get_test_image("psf.png") 118 | 119 | numpy.testing.assert_almost_equal(expected_psf, psf, 4) 120 | 121 | 122 | def test_get_airy_psf_golden_zero_depth(): 123 | psf = microscopeimagequality.degrade.get_airy_psf(5, 5e-6, 0.0e-6, 500e-9, 0.5, 1.0) 124 | 125 | # This should be a delta function for large enough pixel sizes. 126 | expected_psf = numpy.zeros((5, 5)) 127 | 128 | expected_psf[2, 2] = 1.0 129 | 130 | numpy.testing.assert_almost_equal(expected_psf, psf, 2) 131 | 132 | 133 | def test_read_write_png(): 134 | image = get_test_image("cell_image.tiff") 135 | output_path = os.path.join(test_dir, "cell_image2.png") 136 | 137 | skimage.io.imsave(output_path, image, "pil") 138 | 139 | image2 = microscopeimagequality.dataset_creation.read_16_bit_greyscale(output_path) 140 | 141 | numpy.testing.assert_almost_equal(image, image2, 4) 142 | 143 | 144 | def test_degrade_images(): 145 | glob = os.path.join(test_data_directory, "cell_image.tiff*") 146 | 147 | output_path = test_dir 148 | 149 | microscopeimagequality.degrade.degrade_images(glob, output_path, 20e-6, 1.0, 0, 65535, 0, psf_width_pixels=21, pixel_size_meters=5e-6 / 21) 150 | 151 | degraded_image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(os.path.join(output_path, "cell_image.png")) 152 | 153 | expected_image = get_test_image("cell_image_degraded.png") 154 | 155 | numpy.testing.assert_almost_equal(expected_image, degraded_image, 4) 156 | 157 | 158 | def test_degrade_images_no_change(): 159 | glob = os.path.join(test_data_directory, "cell_image.tiff*") 160 | 161 | output_path = os.path.join(test_dir, 'no_change') 162 | 163 | microscopeimagequality.degrade.degrade_images(glob, output_path, 0e-6, 1.0, 0, 65535, 0, psf_width_pixels=21, pixel_size_meters=40e-6 / 21, skip_apply_poisson_noise=True) 164 | 165 | degraded_image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(os.path.join(output_path, "cell_image.png")) 166 | 167 | expected_image = get_test_image("cell_image.tiff") 168 | 169 | numpy.testing.assert_almost_equal(expected_image, degraded_image, 4) 170 | -------------------------------------------------------------------------------- /tests/test_inference.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import tempfile 4 | 5 | import PIL.Image 6 | import numpy 7 | import tensorflow 8 | 9 | import microscopeimagequality.constants 10 | import microscopeimagequality.data_provider 11 | import microscopeimagequality.evaluation 12 | import microscopeimagequality.prediction 13 | 14 | 15 | class Inference(tensorflow.test.TestCase): 16 | def setUp(self): 17 | self.input_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)) 18 | , "data") 19 | self.test_data_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)) 20 | , "data") 21 | self.test_dir = tempfile.mkdtemp() 22 | self.glob_images = os.path.join(self.input_directory, 'images_for_glob_test/*') 23 | 24 | self.patch_width = 84 25 | self.num_classes = 11 26 | 27 | def testPatchValuesToMask(self): 28 | values = numpy.round( 29 | numpy.array([[0.2, 0.4, 0.5], [1.0, 0.0, 0.3]]) * 30 | numpy.iinfo(numpy.uint16).max).astype(numpy.uint16) 31 | mask = microscopeimagequality.prediction.patch_values_to_mask(values, self.patch_width) 32 | self.assertEquals((168, 252), mask.shape) 33 | self.assertEquals(numpy.iinfo(numpy.uint16).max, numpy.max(mask)) 34 | 35 | def testSaveMasksAndAnnotatedVisualization(self): 36 | test_filename = 'BBBC006_z_aligned__a01__s1__w1_10.png' 37 | orig_name = os.path.join(self.test_data_directory, test_filename) 38 | prediction = 1 39 | certainties = {name: 0.3 for name in microscopeimagequality.evaluation.CERTAINTY_NAMES} 40 | num_patches = 4 41 | np_images = numpy.ones((num_patches, self.patch_width, self.patch_width, 1)) 42 | np_probabilities = numpy.ones( 43 | (num_patches, self.num_classes)) / self.num_classes 44 | np_probabilities[0, :] = 0 45 | np_probabilities[0, 1] = 1.0 46 | np_probabilities[1, :] = 0 47 | np_probabilities[1, 2] = 0.4 48 | np_probabilities[1, -1] = 0.6 49 | np_labels = 2 * numpy.ones(num_patches) 50 | image_height = int(numpy.sqrt(num_patches)) * self.patch_width 51 | image_width = image_height 52 | 53 | microscopeimagequality.prediction.save_masks_and_annotated_visualization( 54 | orig_name, self.test_dir, prediction, certainties, np_images, 55 | np_probabilities, np_labels, self.patch_width, image_height, 56 | image_width) 57 | 58 | # Check that output has been generated and is the correct shape. 59 | expected_size = PIL.Image.open(orig_name, 'r').size 60 | expected_visualization_path = os.path.join( 61 | self.test_dir, 62 | 'actual2_pred1_mean_certainty=0.300orig_name=%s' % test_filename) 63 | expected_predictions_path = os.path.join(self.test_dir, 64 | microscopeimagequality.constants.PREDICTIONS_MASK_FORMAT % 65 | test_filename) 66 | expected_certainties_path = os.path.join(self.test_dir, 67 | microscopeimagequality.constants.CERTAINTY_MASK_FORMAT % 68 | test_filename) 69 | expected_valid_path = os.path.join(self.test_dir, 70 | microscopeimagequality.constants.VALID_MASK_FORMAT % 71 | test_filename) 72 | 73 | img = PIL.Image.open(expected_visualization_path, 'r') 74 | self.assertEquals(expected_size, img.size) 75 | 76 | img = PIL.Image.open(expected_predictions_path, 'r') 77 | self.assertEquals(expected_size, img.size) 78 | 79 | img = PIL.Image.open(expected_certainties_path, 'r') 80 | self.assertEquals(expected_size, img.size) 81 | 82 | img = PIL.Image.open(expected_valid_path, 'r') 83 | self.assertEquals(expected_size, img.size) 84 | 85 | def testSaveMasksAndAnnotatedVisualizationTif(self): 86 | test_filename = ('00_mcf-z-stacks-03212011_k06_s2_w12667264a' 87 | '-6432-4f7e-bf58-625a1319a1c9.tif') 88 | orig_name = os.path.join(self.test_data_directory, test_filename) 89 | prediction = 1 90 | certainties = {name: 0.3 for name in microscopeimagequality.evaluation.CERTAINTY_NAMES} 91 | num_patches = 4 92 | np_images = numpy.ones((num_patches, self.patch_width, self.patch_width, 1)) 93 | np_probabilities = numpy.ones( 94 | (num_patches, self.num_classes)) / self.num_classes 95 | image_height = int(numpy.sqrt(num_patches)) * self.patch_width 96 | image_width = image_height 97 | 98 | np_labels = 2 * numpy.ones(num_patches) 99 | 100 | microscopeimagequality.prediction.save_masks_and_annotated_visualization( 101 | orig_name, self.test_dir, prediction, certainties, np_images, 102 | np_probabilities, np_labels, self.patch_width, image_height, 103 | image_width) 104 | 105 | mask_formats = [ 106 | microscopeimagequality.constants.CERTAINTY_MASK_FORMAT, microscopeimagequality.constants.PREDICTIONS_MASK_FORMAT, 107 | microscopeimagequality.constants.VALID_MASK_FORMAT 108 | ] 109 | for mask_format in mask_formats: 110 | orig_name_png = os.path.splitext(os.path.basename(orig_name))[0] + '.png' 111 | expected_file = os.path.join(self.test_dir, 112 | mask_format % orig_name_png) 113 | self.assertTrue(os.path.isfile(expected_file)) 114 | 115 | def testRunModelInferenceFirstHalfRuns(self): 116 | batch_size = 1 117 | num_classes = 11 118 | model_patch_width = 84 119 | image_width = 84 120 | image_height = 84 121 | 122 | tfexamples_tfrecord = microscopeimagequality.prediction.build_tfrecord_from_pngs( 123 | [self.glob_images], 124 | use_unlabeled_data=True, 125 | num_classes=num_classes, 126 | eval_directory=self.test_dir, 127 | image_background_value=0, 128 | image_brightness_scale=1, 129 | shard_num=0, 130 | num_shards=1, 131 | image_width=image_width, 132 | image_height=image_height) 133 | 134 | num_samples = microscopeimagequality.data_provider.get_num_records(tfexamples_tfrecord % 135 | microscopeimagequality.prediction._SPLIT_NAME) 136 | 137 | logging.info('TFRecord has %g samples.', num_samples) 138 | 139 | g = tensorflow.Graph() 140 | with g.as_default(): 141 | images, one_hot_labels, _, _ = microscopeimagequality.data_provider.provide_data( 142 | tfexamples_tfrecord, 143 | split_name=microscopeimagequality.prediction._SPLIT_NAME, 144 | batch_size=batch_size, 145 | num_classes=num_classes, 146 | image_width=84, 147 | image_height=84, 148 | patch_width=model_patch_width, 149 | randomize=False, 150 | num_threads=1) 151 | 152 | labels = microscopeimagequality.evaluation.get_model_and_metrics( 153 | images, 154 | num_classes=num_classes, 155 | one_hot_labels=one_hot_labels, 156 | is_training=False).labels 157 | 158 | self.assertEquals(batch_size, labels.get_shape()) 159 | -------------------------------------------------------------------------------- /tests/test_miq.py: -------------------------------------------------------------------------------- 1 | import tensorflow 2 | import tensorflow.contrib.slim 3 | 4 | import microscopeimagequality.miq 5 | 6 | 7 | class MiqTest(tensorflow.test.TestCase): 8 | def test_add_loss_training_runs(self): 9 | with self.test_session(): 10 | targets = tensorflow.constant([[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) 11 | 12 | inputs = tensorflow.constant([[0.7, 0.3, 0.0], [0.9, 0.1, 0.0], [0.6, 0.4, 0.0], [0.0, 0.4, 0.6]]) 13 | 14 | predictions = tensorflow.contrib.layers.fully_connected(inputs, 3) 15 | 16 | microscopeimagequality.miq.add_loss(targets, predictions, use_rank_loss=True) 17 | 18 | total_loss = tensorflow.losses.get_total_loss() 19 | 20 | tensorflow.summary.scalar("Total Loss", total_loss) 21 | 22 | optimizer = tensorflow.train.AdamOptimizer(0.000001) 23 | 24 | # Set up training. 25 | train_op = tensorflow.contrib.slim.learning.create_train_op(total_loss, optimizer) 26 | 27 | # Run training. 28 | tensorflow.contrib.slim.learning.train(train_op, None, number_of_steps=5, log_every_n_steps=5) 29 | -------------------------------------------------------------------------------- /tests/test_validation.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | import microscopeimagequality.validation 6 | 7 | directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") 8 | 9 | pathname = os.path.join(directory, "BBBC006_z_aligned__a01__s1__w1_10.png") 10 | 11 | 12 | def test_check_duplicate_image_name_runs(): 13 | microscopeimagequality.validation.check_duplicate_image_name(["/a/b.c", "/d/e.f"]) 14 | 15 | 16 | def test_check_duplicate_image_name_same_name(): 17 | with pytest.raises(ValueError): 18 | microscopeimagequality.validation.check_duplicate_image_name(["/a/b.c", "/a/b.c"]) 19 | 20 | 21 | def test_check_duplicate_image_name_different_path_and_extension(): 22 | with pytest.raises(ValueError): 23 | microscopeimagequality.validation.check_duplicate_image_name(["/a/b.c", "/d/b.f"]) 24 | 25 | 26 | def test_check_image_dimensions_runs(): 27 | microscopeimagequality.validation.check_image_dimensions([pathname], 10, 10) 28 | 29 | 30 | def test_check_image_dimensions_image_too_small(): 31 | with pytest.raises(ValueError): 32 | microscopeimagequality.validation.check_image_dimensions([pathname], 1e4, 1e4) 33 | -------------------------------------------------------------------------------- /wellmontagefijimacro/README.md: -------------------------------------------------------------------------------- 1 | # Near Real-Time Image Analysis Macro 2 | Oftentimes in high content imaging applications, large numbers (i.e. up to 1 terabyte) of microscopy images may be acquired from each of the many "wells" in a microtiter plate (e.g. a 96-well plate) using automated microscopy. However, it's challenging to manually inspect, either exhaustively, or with spot checking, the quality of such image datasets in near real-time while the well plate could still practically be re-imaged. 3 | 4 | In previous work, we developed a [Microscope Image Focus Quality Classifier](https://github.com/google/microscopeimagequality) and corresponding [plugin for Fiji](https://imagej.net/Microscope_Focus_Quality), which uses a pre-trained deep neural network to rate the focus quality of each 84x84 pixel crop of a single microscope image. This analysis macro is an extension of that work, enabling this model to rate the focus quality of randomly sampled crops from a larger image dataset. 5 | 6 | This macro can generate whole-plate montages per channel for inspecting image quality, where random image crops are sampled from each well on a well plate. A colored annotation added as a border around each crop denotes the focus quality, as rated by the that this macro utilizes. The macro can generate representative montages for 1 terabyte of images in just 30 minutes. 7 | 8 | ### Example output 9 | 10 | ![example result](example_result.jpg) 11 | 12 | ### Example set of input images 13 | A sample dataset to test the macro can be found [here (300MB)](https://storage.googleapis.com/nyscfgas/nyscf3_data_sources_external/TestDataSet2Channels.zip). 14 | 15 | ### Requirements 16 | This Fiji (ImageJ) macro requires both [Fiji](https://imagej.net/Fiji) and the [Microscope Image Focus Quality Classifier](https://imagej.net/Microscope_Focus_Quality) plugin. 17 | 18 | Input images must live in a single directory. 19 | ### Getting started 20 | #### Installing Microscope Image Focus Quality Classifier 21 | Please see directions [here](https://imagej.net/Microscope_Focus_Quality.html#Installation). 22 | 23 | #### Setting up and opening Macro 24 | 1. Download macro and save to “macros” folder in Fiji.app folder. 25 | 1. Open Fiji 26 | 1. Go to “Plugins” tab 27 | 1. Go to “Macros” > “Edit” 28 | 1. Select macro .ijm file 29 | 30 | ### Running macro 31 | At the top of the macro, there are variables that can be changed to suit your experiment. 32 | 33 | Before running the macro, please make sure that the variables are correct. 34 | 35 | Once you have verified the variables, hit “Run”. The first window to pop-up will state the requirements and assumptions of the macro. 36 | 37 | The next window will ask for the path to the directory of your data, the total number of sites to select from, if you want to only do a focus analysis, and if there are empty wells 38 | 39 | If you do not select the box to only analyze focus, a window will appear which asks what channels you’d like to output an intensity montage. This list is based off of the channel list stated in the variables at the top of the code. If you choose to only analyze focus, this window will not appear. 40 | 41 | If you do select the box stating there are empty wells, a window will appear which allows you to choose what well to analyze. This list is based off of the row and column arrays stated in the variables at the top of the code. 42 | 43 | -------------------------------------------------------------------------------- /wellmontagefijimacro/example_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/microscopeimagequality/d77ee5f07537f6a753f909789e489adc1d2b6d9e/wellmontagefijimacro/example_result.jpg -------------------------------------------------------------------------------- /wellmontagefijimacro/wellmontagefijimacro.ijm: -------------------------------------------------------------------------------- 1 | // This script generates whole-plate montages per channel for inspecting data quality. 2 | // Requires that the images already live in a single directory. 3 | 4 | /* 5 | * Random image patches are sampled (and resampled, if the patch contains no 6 | * detectable objects). We use the final precentage score as a means to calculate 7 | * the actual focus score sans the blank patches. 8 | */ 9 | 10 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 11 | 12 | /* 13 | * Settings 14 | */ 15 | 16 | FOCUS_QUALITY_PLUGIN_PATCH_SIDE_LENGTH = 84; //pixels, do not change this 17 | 18 | //image specs 19 | 20 | image_width = 875; // pixels 21 | image_height = 512; //pixels 22 | 23 | crop_side_length = FOCUS_QUALITY_PLUGIN_PATCH_SIDE_LENGTH*3; // pixels 24 | 25 | //channel specs 26 | focus_channel = "CY3-AGP"; 27 | channel_list = newArray("DAPI","CY3-AGP"); 28 | 29 | //Plate specs 30 | rows = newArray("A","B","C","D","E","F","G","H"); 31 | columns = newArray("01","02","03","04","05","06","07","08","09","10","11","12"); 32 | 33 | // filename format 34 | // This macro currently assumes the filenames are of the format: "TestDataSet_WellA01_Site000_DAPI" 35 | 36 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 37 | 38 | /* FUNCTIONS */ 39 | 40 | function append(arr, value) { 41 | /* 42 | * Appends value to existing array 43 | * 44 | * Args: arr - array 45 | * value - element to add to array 46 | * 47 | */ 48 | arr2 = newArray(arr.length+1); 49 | for (i=0; i 0.1) { 133 | if (counter == random_iterations) { 134 | // max number of iterations 135 | break; 136 | } else { 137 | counter++; 138 | makeRectangle(randint(0, image_width - crop_side_length), randint(0,image_height - crop_side_length), crop_side_length, crop_side_length); //random crop 139 | getRawStatistics(nPixels, mean, min, max, std, histogram); 140 | } 141 | } 142 | run("Crop"); 143 | run("Enhance Contrast", "saturated=0.35"); 144 | } 145 | 146 | function random_tile_crop(well, channel, savepath, fileName, start, end, n) { 147 | /* 148 | * Creates random ROI per well and measures intensity — if intensity is less than 90%, reselect a different random ROI 149 | * 150 | * Args: well - well of interest 151 | * channel - channel/OC of interest 152 | * savepath - path of file to be saved 153 | * fileName - data directory (ex. E:/20190717_194041_709/) 154 | * 155 | * Returns: randomly chosen ROI that meets standards and creates montage of all wells 156 | */ 157 | 158 | //Randomly select tile from available tiles of a single well 159 | //Once tile is selected, randomly select ROI 160 | if (n == 1) { 161 | tile_str = "000"; 162 | run("Image Sequence...", "open="+fileName+"FOO.tif file=(.*"+well+"_Site"+tile_str+"_"+channel+") sort"); 163 | inFile = getImageID(); 164 | make_random_rectangle(inFile); 165 | } else { 166 | for (i=0; i < num_random_tiles; i++) { 167 | a = randint(start+i*(end-start)/n, start+(i+1)*(end-start)/n); 168 | while (lengthOf(""+a) < 3) { 169 | a = "0" + a; 170 | } 171 | run("Image Sequence...", "open="+fileName+"FOO.tif file=(.*"+well+"_Site"+a+"_"+channel+") sort"); 172 | inFile = getImageID(); 173 | make_random_rectangle(inFile); 174 | } 175 | } 176 | 177 | // Create montage of all selected ROIs from a single well and save as tiff 178 | if (n < 4){ 179 | image_ID = inFile; 180 | } else { 181 | run("Images to Stack"); 182 | inFile = getImageID(); 183 | run("Make Montage...", "columns="+tile_side_length+" rows="+tile_side_length+" scale=1"); 184 | image_ID = getImageID(); 185 | selectImage(inFile); 186 | close(); 187 | } 188 | selectImage(image_ID); 189 | saveAs("Tiff",savepath); 190 | close(); 191 | } 192 | function get_and_save_image_intensity(crop_side_length, num_random_tiles, total_rows, save_tiff, save_jpg) { 193 | // Add mean intensity to image and save as jpg. 194 | getRawStatistics(nPixels, mean, min, max, std, histogram); 195 | //saveAs("Tiff",save_tiff); 196 | text = "Mean Intensity: "+ mean + " (" + std + " SD)"; 197 | setFont("SansSerif", 75, " antialiased"); 198 | montage_height = crop_side_length*num_random_tiles*total_rows; 199 | if (num_random_tiles > 1) { 200 | montage_height = montage_height*0.5; // divide by 2 since each well is made of a 2x2 montage and only 2 ROIs contribute to height 201 | } 202 | makeText(text, 45, montage_height - (montage_height*0.15)); 203 | run("Add Selection...", "stroke=black fill=#FFFFFF new"); 204 | run("Enhance Contrast", "saturated=0.35"); 205 | saveAs("Jpeg", save_jpg); 206 | close(); 207 | } 208 | function quality_analysis(path_name, save_name, montageType, min_threshold, max_threshold) { 209 | /* 210 | * Conducts quality analysis 211 | * 212 | * Args: minimum and maximum values for thresholding 213 | * 214 | * Returns: percentage score of focus quality for plate 215 | */ 216 | run("Image Sequence...", "open="+path_name+"FOO.tif file=(.*Well_"+montageType+".*"+focus_channel+".tif*) sort"); 217 | inFile = getImageID(); 218 | 219 | run("Make Montage...", "columns="+total_cols+" rows="+total_rows+" scale=1"); 220 | montage = getImageID(); 221 | selectImage(inFile); 222 | close(); 223 | selectImage(montage); 224 | saveAs("Tiff",save_name+".tif"); 225 | if (intensity_montages[index(channel_list,focus_channel)] == true) { 226 | get_and_save_image_intensity(crop_side_length, num_random_tiles, total_rows, save_name+".tif", save_name+".jpg"); 227 | } 228 | open(save_name+".tif"); 229 | File.delete(save_name+".tif"); 230 | run("Enhance Contrast", "saturated=0.35"); 231 | print("Computing focus quality..."); 232 | setBatchMode(false); 233 | tilecounty = getHeight()/FOCUS_QUALITY_PLUGIN_PATCH_SIDE_LENGTH; 234 | tilecountx = getWidth()/FOCUS_QUALITY_PLUGIN_PATCH_SIDE_LENGTH; 235 | run("Microscope Image Focus Quality", "originalImage="+save_name+".tif tilecountx="+tilecountx+" tilecounty="+tilecounty+" createprobabilityimage=true overlaypatches=true solidpatches=false borderwidth=4"); 236 | selectWindow(montage_fileName + ".tif"); 237 | saveAs("Jpeg", save_name+"-focus.jpg"); 238 | close(); 239 | // The probabilities is a stack with 11 slices, corresponding to probability of 1, 4, ..., 31 pixel blur. 240 | // We sum the probabilities corresponding to 1, 4 and 7 pixel blurs here, as the acceptable focus threshold. 241 | selectWindow("Probabilities"); 242 | run("Make Substack...", "channels=1-3"); 243 | run("Z Project...", "projection=[Sum Slices]"); 244 | selectWindow("SUM_Probabilities-1"); 245 | setAutoThreshold("Default dark"); 246 | setThreshold(min_threshold, max_threshold); 247 | call("ij.plugin.frame.ThresholdAdjuster.setMode", "B&W"); 248 | setOption("BlackBackground", true); 249 | run("Convert to Mask"); 250 | getRawStatistics(nPixels, mean, min, max, std, histogram); 251 | 252 | focus_score = round(100*mean/255); 253 | print("Percentage patches in-focus: " + focus_score +"%"); 254 | close(); 255 | selectWindow("Probabilities"); 256 | close(); 257 | selectWindow("Probabilities-1"); 258 | close(); 259 | 260 | open(save_name+"-focus.jpg"); 261 | text = " Percentage Score = "+ focus_score + "% "; 262 | setFont("SansSerif", 75, " antialiased"); 263 | montage_height = crop_side_length*num_random_tiles*total_rows; 264 | if (num_random_tiles > 1) { 265 | montage_height = montage_height*0.5; // divide by 2 since each well is made of a 2x2 montage and only 2 ROIs contribute to height 266 | } 267 | makeText(text, 45, montage_height - (montage_height*0.11)); 268 | run("Add Selection...", "stroke=black fill=#FFFFFF new"); 269 | run("Select None"); 270 | saveAs("Jpeg", save_name+"-focus.jpg"); 271 | close(); 272 | setBatchMode(true); 273 | } 274 | 275 | 276 | /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 277 | 278 | 279 | channel_string = ""; 280 | for (ch=0;ch