├── LICENSE ├── README.md ├── hms2 ├── __init__.py ├── core │ ├── __init__.py │ ├── builder.py │ ├── custom_modules.py │ ├── loader_modules.py │ └── model.py └── pipeline │ ├── __init__.cpython-37m-x86_64-linux-gnu.so │ ├── __init__.cpython-38-x86_64-linux-gnu.so │ ├── __init__.cpython-39-x86_64-linux-gnu.so │ ├── callbacks.cpython-37m-x86_64-linux-gnu.so │ ├── callbacks.cpython-38-x86_64-linux-gnu.so │ ├── callbacks.cpython-39-x86_64-linux-gnu.so │ ├── dataset.cpython-37m-x86_64-linux-gnu.so │ ├── dataset.cpython-38-x86_64-linux-gnu.so │ ├── dataset.cpython-39-x86_64-linux-gnu.so │ ├── losses.cpython-37m-x86_64-linux-gnu.so │ ├── losses.cpython-38-x86_64-linux-gnu.so │ ├── losses.cpython-39-x86_64-linux-gnu.so │ ├── main.cpython-37m-x86_64-linux-gnu.so │ ├── main.cpython-38-x86_64-linux-gnu.so │ ├── main.cpython-39-x86_64-linux-gnu.so │ ├── metrics.cpython-37m-x86_64-linux-gnu.so │ ├── metrics.cpython-38-x86_64-linux-gnu.so │ ├── metrics.cpython-39-x86_64-linux-gnu.so │ ├── official_openslide.cpython-37m-x86_64-linux-gnu.so │ ├── official_openslide.cpython-38-x86_64-linux-gnu.so │ ├── official_openslide.cpython-39-x86_64-linux-gnu.so │ ├── test.py │ ├── train.py │ ├── utils.cpython-37m-x86_64-linux-gnu.so │ ├── utils.cpython-38-x86_64-linux-gnu.so │ ├── utils.cpython-39-x86_64-linux-gnu.so │ └── visualize.py ├── misc ├── camelyon_10x_hms2.png └── demo.gif ├── poetry.lock ├── projects └── Camelyon16 │ ├── configs │ ├── config_10x.yaml │ ├── config_2.5x.yaml │ └── config_5x.yaml │ └── datalists │ ├── test.csv │ ├── train.csv │ └── val.csv ├── pyproject.toml └── tests └── core ├── test_builder.py ├── test_custom_modules.py └── test_model.py /LICENSE: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More_considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 58 | Public License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 63 | ("Public License"). To the extent this Public License may be 64 | interpreted as a contract, You are granted the Licensed Rights in 65 | consideration of Your acceptance of these terms and conditions, and the 66 | Licensor grants You such rights in consideration of benefits the 67 | Licensor receives from making the Licensed Material available under 68 | these terms and conditions. 69 | 70 | 71 | Section 1 -- Definitions. 72 | 73 | a. Adapted Material means material subject to Copyright and Similar 74 | Rights that is derived from or based upon the Licensed Material 75 | and in which the Licensed Material is translated, altered, 76 | arranged, transformed, or otherwise modified in a manner requiring 77 | permission under the Copyright and Similar Rights held by the 78 | Licensor. For purposes of this Public License, where the Licensed 79 | Material is a musical work, performance, or sound recording, 80 | Adapted Material is always produced where the Licensed Material is 81 | synched in timed relation with a moving image. 82 | 83 | b. Adapter's License means the license You apply to Your Copyright 84 | and Similar Rights in Your contributions to Adapted Material in 85 | accordance with the terms and conditions of this Public License. 86 | 87 | c. BY-NC-SA Compatible License means a license listed at 88 | creativecommons.org/compatiblelicenses, approved by Creative 89 | Commons as essentially the equivalent of this Public License. 90 | 91 | d. Copyright and Similar Rights means copyright and/or similar rights 92 | closely related to copyright including, without limitation, 93 | performance, broadcast, sound recording, and Sui Generis Database 94 | Rights, without regard to how the rights are labeled or 95 | categorized. For purposes of this Public License, the rights 96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 97 | Rights. 98 | 99 | e. Effective Technological Measures means those measures that, in the 100 | absence of proper authority, may not be circumvented under laws 101 | fulfilling obligations under Article 11 of the WIPO Copyright 102 | Treaty adopted on December 20, 1996, and/or similar international 103 | agreements. 104 | 105 | f. Exceptions and Limitations means fair use, fair dealing, and/or 106 | any other exception or limitation to Copyright and Similar Rights 107 | that applies to Your use of the Licensed Material. 108 | 109 | g. License Elements means the license attributes listed in the name 110 | of a Creative Commons Public License. The License Elements of this 111 | Public License are Attribution, NonCommercial, and ShareAlike. 112 | 113 | h. Licensed Material means the artistic or literary work, database, 114 | or other material to which the Licensor applied this Public 115 | License. 116 | 117 | i. Licensed Rights means the rights granted to You subject to the 118 | terms and conditions of this Public License, which are limited to 119 | all Copyright and Similar Rights that apply to Your use of the 120 | Licensed Material and that the Licensor has authority to license. 121 | 122 | j. Licensor means the individual(s) or entity(ies) granting rights 123 | under this Public License. 124 | 125 | k. NonCommercial means not primarily intended for or directed towards 126 | commercial advantage or monetary compensation. For purposes of 127 | this Public License, the exchange of the Licensed Material for 128 | other material subject to Copyright and Similar Rights by digital 129 | file-sharing or similar means is NonCommercial provided there is 130 | no payment of monetary compensation in connection with the 131 | exchange. 132 | 133 | l. Share means to provide material to the public by any means or 134 | process that requires permission under the Licensed Rights, such 135 | as reproduction, public display, public performance, distribution, 136 | dissemination, communication, or importation, and to make material 137 | available to the public including in ways that members of the 138 | public may access the material from a place and at a time 139 | individually chosen by them. 140 | 141 | m. Sui Generis Database Rights means rights other than copyright 142 | resulting from Directive 96/9/EC of the European Parliament and of 143 | the Council of 11 March 1996 on the legal protection of databases, 144 | as amended and/or succeeded, as well as other essentially 145 | equivalent rights anywhere in the world. 146 | 147 | n. You means the individual or entity exercising the Licensed Rights 148 | under this Public License. Your has a corresponding meaning. 149 | 150 | 151 | Section 2 -- Scope. 152 | 153 | a. License grant. 154 | 155 | 1. Subject to the terms and conditions of this Public License, 156 | the Licensor hereby grants You a worldwide, royalty-free, 157 | non-sublicensable, non-exclusive, irrevocable license to 158 | exercise the Licensed Rights in the Licensed Material to: 159 | 160 | a. reproduce and Share the Licensed Material, in whole or 161 | in part, for NonCommercial purposes only; and 162 | 163 | b. produce, reproduce, and Share Adapted Material for 164 | NonCommercial purposes only. 165 | 166 | 2. Exceptions and Limitations. For the avoidance of doubt, where 167 | Exceptions and Limitations apply to Your use, this Public 168 | License does not apply, and You do not need to comply with 169 | its terms and conditions. 170 | 171 | 3. Term. The term of this Public License is specified in Section 172 | 6(a). 173 | 174 | 4. Media and formats; technical modifications allowed. The 175 | Licensor authorizes You to exercise the Licensed Rights in 176 | all media and formats whether now known or hereafter created, 177 | and to make technical modifications necessary to do so. The 178 | Licensor waives and/or agrees not to assert any right or 179 | authority to forbid You from making technical modifications 180 | necessary to exercise the Licensed Rights, including 181 | technical modifications necessary to circumvent Effective 182 | Technological Measures. For purposes of this Public License, 183 | simply making modifications authorized by this Section 2(a) 184 | (4) never produces Adapted Material. 185 | 186 | 5. Downstream recipients. 187 | 188 | a. Offer from the Licensor -- Licensed Material. Every 189 | recipient of the Licensed Material automatically 190 | receives an offer from the Licensor to exercise the 191 | Licensed Rights under the terms and conditions of this 192 | Public License. 193 | 194 | b. Additional offer from the Licensor -- Adapted Material. 195 | Every recipient of Adapted Material from You 196 | automatically receives an offer from the Licensor to 197 | exercise the Licensed Rights in the Adapted Material 198 | under the conditions of the Adapter's License You apply. 199 | 200 | c. No downstream restrictions. You may not offer or impose 201 | any additional or different terms or conditions on, or 202 | apply any Effective Technological Measures to, the 203 | Licensed Material if doing so restricts exercise of the 204 | Licensed Rights by any recipient of the Licensed 205 | Material. 206 | 207 | 6. No endorsement. Nothing in this Public License constitutes or 208 | may be construed as permission to assert or imply that You 209 | are, or that Your use of the Licensed Material is, connected 210 | with, or sponsored, endorsed, or granted official status by, 211 | the Licensor or others designated to receive attribution as 212 | provided in Section 3(a)(1)(A)(i). 213 | 214 | b. Other rights. 215 | 216 | 1. Moral rights, such as the right of integrity, are not 217 | licensed under this Public License, nor are publicity, 218 | privacy, and/or other similar personality rights; however, to 219 | the extent possible, the Licensor waives and/or agrees not to 220 | assert any such rights held by the Licensor to the limited 221 | extent necessary to allow You to exercise the Licensed 222 | Rights, but not otherwise. 223 | 224 | 2. Patent and trademark rights are not licensed under this 225 | Public License. 226 | 227 | 3. To the extent possible, the Licensor waives any right to 228 | collect royalties from You for the exercise of the Licensed 229 | Rights, whether directly or through a collecting society 230 | under any voluntary or waivable statutory or compulsory 231 | licensing scheme. In all other cases the Licensor expressly 232 | reserves any right to collect such royalties, including when 233 | the Licensed Material is used other than for NonCommercial 234 | purposes. 235 | 236 | 237 | Section 3 -- License Conditions. 238 | 239 | Your exercise of the Licensed Rights is expressly made subject to the 240 | following conditions. 241 | 242 | a. Attribution. 243 | 244 | 1. If You Share the Licensed Material (including in modified 245 | form), You must: 246 | 247 | a. retain the following if it is supplied by the Licensor 248 | with the Licensed Material: 249 | 250 | i. identification of the creator(s) of the Licensed 251 | Material and any others designated to receive 252 | attribution, in any reasonable manner requested by 253 | the Licensor (including by pseudonym if 254 | designated); 255 | 256 | ii. a copyright notice; 257 | 258 | iii. a notice that refers to this Public License; 259 | 260 | iv. a notice that refers to the disclaimer of 261 | warranties; 262 | 263 | v. a URI or hyperlink to the Licensed Material to the 264 | extent reasonably practicable; 265 | 266 | b. indicate if You modified the Licensed Material and 267 | retain an indication of any previous modifications; and 268 | 269 | c. indicate the Licensed Material is licensed under this 270 | Public License, and include the text of, or the URI or 271 | hyperlink to, this Public License. 272 | 273 | 2. You may satisfy the conditions in Section 3(a)(1) in any 274 | reasonable manner based on the medium, means, and context in 275 | which You Share the Licensed Material. For example, it may be 276 | reasonable to satisfy the conditions by providing a URI or 277 | hyperlink to a resource that includes the required 278 | information. 279 | 3. If requested by the Licensor, You must remove any of the 280 | information required by Section 3(a)(1)(A) to the extent 281 | reasonably practicable. 282 | 283 | b. ShareAlike. 284 | 285 | In addition to the conditions in Section 3(a), if You Share 286 | Adapted Material You produce, the following conditions also apply. 287 | 288 | 1. The Adapter's License You apply must be a Creative Commons 289 | license with the same License Elements, this version or 290 | later, or a BY-NC-SA Compatible License. 291 | 292 | 2. You must include the text of, or the URI or hyperlink to, the 293 | Adapter's License You apply. You may satisfy this condition 294 | in any reasonable manner based on the medium, means, and 295 | context in which You Share Adapted Material. 296 | 297 | 3. You may not offer or impose any additional or different terms 298 | or conditions on, or apply any Effective Technological 299 | Measures to, Adapted Material that restrict exercise of the 300 | rights granted under the Adapter's License You apply. 301 | 302 | 303 | Section 4 -- Sui Generis Database Rights. 304 | 305 | Where the Licensed Rights include Sui Generis Database Rights that 306 | apply to Your use of the Licensed Material: 307 | 308 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 309 | to extract, reuse, reproduce, and Share all or a substantial 310 | portion of the contents of the database for NonCommercial purposes 311 | only; 312 | 313 | b. if You include all or a substantial portion of the database 314 | contents in a database in which You have Sui Generis Database 315 | Rights, then the database in which You have Sui Generis Database 316 | Rights (but not its individual contents) is Adapted Material, 317 | including for purposes of Section 3(b); and 318 | 319 | c. You must comply with the conditions in Section 3(a) if You Share 320 | all or a substantial portion of the contents of the database. 321 | 322 | For the avoidance of doubt, this Section 4 supplements and does not 323 | replace Your obligations under this Public License where the Licensed 324 | Rights include other Copyright and Similar Rights. 325 | 326 | 327 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 328 | 329 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 330 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 331 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 332 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 333 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 334 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 335 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 336 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 337 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 338 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 339 | 340 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 341 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 342 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 343 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 344 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 345 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 346 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 347 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 348 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 349 | 350 | c. The disclaimer of warranties and limitation of liability provided 351 | above shall be interpreted in a manner that, to the extent 352 | possible, most closely approximates an absolute disclaimer and 353 | waiver of all liability. 354 | 355 | 356 | Section 6 -- Term and Termination. 357 | 358 | a. This Public License applies for the term of the Copyright and 359 | Similar Rights licensed here. However, if You fail to comply with 360 | this Public License, then Your rights under this Public License 361 | terminate automatically. 362 | 363 | b. Where Your right to use the Licensed Material has terminated under 364 | Section 6(a), it reinstates: 365 | 366 | 1. automatically as of the date the violation is cured, provided 367 | it is cured within 30 days of Your discovery of the 368 | violation; or 369 | 370 | 2. upon express reinstatement by the Licensor. 371 | 372 | For the avoidance of doubt, this Section 6(b) does not affect any 373 | right the Licensor may have to seek remedies for Your violations 374 | of this Public License. 375 | 376 | c. For the avoidance of doubt, the Licensor may also offer the 377 | Licensed Material under separate terms or conditions or stop 378 | distributing the Licensed Material at any time; however, doing so 379 | will not terminate this Public License. 380 | 381 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 382 | License. 383 | 384 | 385 | Section 7 -- Other Terms and Conditions. 386 | 387 | a. The Licensor shall not be bound by any additional or different 388 | terms or conditions communicated by You unless expressly agreed. 389 | 390 | b. Any arrangements, understandings, or agreements regarding the 391 | Licensed Material not stated herein are separate from and 392 | independent of the terms and conditions of this Public License. 393 | 394 | 395 | Section 8 -- Interpretation. 396 | 397 | a. For the avoidance of doubt, this Public License does not, and 398 | shall not be interpreted to, reduce, limit, restrict, or impose 399 | conditions on any use of the Licensed Material that could lawfully 400 | be made without permission under this Public License. 401 | 402 | b. To the extent possible, if any provision of this Public License is 403 | deemed unenforceable, it shall be automatically reformed to the 404 | minimum extent necessary to make it enforceable. If the provision 405 | cannot be reformed, it shall be severed from this Public License 406 | without affecting the enforceability of the remaining terms and 407 | conditions. 408 | 409 | c. No term or condition of this Public License will be waived and no 410 | failure to comply consented to unless expressly agreed to by the 411 | Licensor. 412 | 413 | d. Nothing in this Public License constitutes or may be interpreted 414 | as a limitation upon, or waiver of, any privileges and immunities 415 | that apply to the Licensor or You, including from the legal 416 | processes of any jurisdiction or authority. 417 | 418 | ======================================================================= 419 | 420 | Creative Commons is not a party to its public 421 | licenses. Notwithstanding, Creative Commons may elect to apply one of 422 | its public licenses to material it publishes and in those instances 423 | will be considered the “Licensor.” The text of the Creative Commons 424 | public licenses is dedicated to the public domain under the CC0 Public 425 | Domain Dedication. Except for the limited purpose of indicating that 426 | material is shared under a Creative Commons public license or as 427 | otherwise permitted by the Creative Commons policies published at 428 | creativecommons.org/policies, Creative Commons does not authorize the 429 | use of the trademark "Creative Commons" or any other trademark or logo 430 | of Creative Commons without its prior written consent including, 431 | without limitation, in connection with any unauthorized modifications 432 | to any of its public licenses or any other arrangements, 433 | understandings, or agreements concerning use of licensed material. For 434 | the avoidance of doubt, this paragraph does not form part of the 435 | public licenses. 436 | 437 | Creative Commons may be contacted at creativecommons.org. 438 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HMS2 2 | 3 | Another annotation-free whole-slide training approach to pathological classification. 4 | This repository provides scripts to reproduce the results in the paper "Deep neural network trained on gigapixel images improves lymph node metastasis detection in clinical settings", including model training, inference, visualization, and statistics calculation, etc. 5 | 6 | [>> **Demo Video** <<](https://youtu.be/Kcx_d5nEUQ8) | [**Journal Link**](https://doi.org/10.1038/s41467-022-30746-1) | [**Our Website**](https://www.aetherai.com/) 7 | 8 | [](https://youtu.be/Kcx_d5nEUQ8) 9 | 10 | ## Publications 11 | 12 | Huang, SC., Chen, CC., Lan, J. et al. Deep neural network trained on gigapixel images improves lymph node metastasis detection in clinical settings. Nat Commun 13, 3347 (2022). https://doi.org/10.1038/s41467-022-30746-1 13 | 14 | ## License 15 | 16 | Copyright (C) 2021 aetherAI Co., Ltd. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 17 | 18 | ## Requirements 19 | 20 | ### Hardware Requirements 21 | 22 | Make sure the system contains adequate amount of main memory space (minimal: 32 GB) to prevent out-of-memory error. 23 | 24 | ### Software Stacks 25 | 26 | Although Poetry can set up most Python packages automatically, you should install the following native libraries manually in advance. 27 | 28 | - CUDA 10.2+ (Recommended: 11.3) 29 | 30 | CUDA is essential for PyTorch to enable GPU-accelerated deep neural network training. See https://docs.nvidia.com/cuda/cuda-installation-guide-linux/ . 31 | 32 | - OpenMPI 3+ 33 | 34 | OpenMPI is required for multi-GPU distributed training. If `sudo` is available, you can simply install this by, 35 | ``` 36 | sudo apt install libopenmpi-dev 37 | ``` 38 | 39 | - Python 3.7+ 40 | 41 | The development kit should be installed. 42 | ``` 43 | sudo apt install python3-dev 44 | ``` 45 | 46 | - OpenSlide 47 | 48 | OpenSlide is a library to read slides. See the installation guide in https://github.com/openslide/openslide . 49 | 50 | ### Python Packages 51 | 52 | We use Poetry to manage Python packages. The environment can be automatically set up by, 53 | ``` 54 | cd [HMS2 folder] 55 | python3 -m pip install poetry 56 | python3 -m poetry install 57 | python3 -m poetry run poe install-cu113 # change this to "install-cu102" for CUDA 10.x. 58 | ``` 59 | 60 | ## Usage 61 | 62 | Before initiating a training task, you should prepare several configuration files with the following step-by-step instructions. Refer to `projects/Camelyon16` as an example for training an HMS model on the CAMELYON16(https://camelyon16.grand-challenge.org/) dataset. 63 | 64 | ### 0. (Optional) Try a pre-trained CAMELYON16 65 | 66 | If you would like to try training HMS models using CAMELYON16 or evaluating pre-trained ones, here we provided contour description files and pre-trained weights trained at 2.5x, 5x, and 10x magnifications, which is available at https://drive.google.com/file/d/12Fv-OhAze_t2_bCX7l1S5iMCgQgOvHGF/view?usp=sharing . 67 | 68 | After the ZIP file is downloaded, unzip it to the project folder: 69 | ``` 70 | unzip -o hms2_camelyon16.zip -d /path/to/hms2 71 | ``` 72 | 73 | Besides, you should prepare the slides of CAMELYON16 from https://camelyon16.grand-challenge.org/ into `projects/Camelyon16/slides`. Then follow the instructions below. 74 | 75 | | Pre-trained model | AUC (95% CI) 76 | | ----------------- | ---------------------------------- 77 | | Camelyon16_2.5x | 0.6015 (0.5022-0.7008) 78 | | Camelyon16_5x | 0.6242 (0.5194-0.7291) 79 | | Camelyon16_10x | 0.9135 (0.8490-0.9781) 80 | 81 | ![Camelyon16_10x_hms2](misc/camelyon_10x_hms2.png) 82 | 83 | ### 1. Create a Project Folder 84 | 85 | As a convention, create a project folder in `projects` with four sub-folders, `datalists`, `slides`, `contours`, and `configs`. 86 | 87 | ### 2. Define Datasets 88 | 89 | 3 CSV files defining training, validation and testing datasets, respectively, should be placed in `projects/YOUR_PROJECT/datalists`. See `projects/Camelyon16/datalists` for example. 90 | 91 | These CSV files should follow the format if your datasets were annotated in slide level: 92 | ``` 93 | [slide_1_basename],[slide_1_class] 94 | [slide_2_basename],[slide_2_class] 95 | ... 96 | ``` 97 | , where [slide\_name\_\*] specify the filename **without extension** of a slide image and [class\_id\_\*] is an integer indicating a slide-level label (e.g. 0 for normal, 1 for cancerous). 98 | 99 | Given contour-level (e.g. LN-level) labels, construct the CSV files in: 100 | ``` 101 | [slide_1_contour_1],[slide_1_contour_1_class] 102 | [slide_1_contour_2],[slide_1_contour_2_class] 103 | ... 104 | ``` 105 | You can name each contour whatever you want. 106 | 107 | #### (Optional) Contour Description Files 108 | 109 | For each contour, a contour description file in JSON should be composed with content like: 110 | ``` 111 | {"slide\_name": "slide\_1\_basename", "contours": contours} 112 | ``` 113 | , where `contours` is a list of contour. Each contour is a list of coordinates in (x, y). See `projects/Camelyon16/contours` for example. Save these files in `projects/YOUR_PROJECT/contours`. 114 | 115 | ### 3. Prepare Slide Files 116 | 117 | Place the slides files in `projects/YOUR_PROJECT/slides`. Soft links (`ln -s`) work fine. 118 | 119 | ### 4. Set Up Training Configurations 120 | 121 | Model hyper-parameters are set up in a YAML file. You can copy `projects/Camelyon16/configs/config_2.5x.yaml` and modify it for your own preference. 122 | 123 | The following table describes each field in a config. 124 | 125 | | Field | Description 126 | | -------------------------- | --------------------------------------------------------------------------------------------- 127 | | RESULT_DIR | Directory to store output stuffs, including model weights, testing results, etc. 128 | | MODEL_PATH | Path to store the model weight. (default: `${RESULT_DIR}/model.h5`) 129 | | OPTIMIZER_STATE_PATH | Path to store the state of optimizer. (default: `${RESULT_DIR}/opt_state.pt`) 130 | | STATES_PATH | Path to store the states for resuming. (default: `${RESULT_DIR}/states.pt`) 131 | | CONFIG_RECORD_PATH | Path to back up this config file. (default: `${RESULT_DIR}/config.yaml`) 132 | | USE_MIXED_PRECISION | Whether to enable mixed precision training. 133 | | USE_HMS2 | Whether to enable HMS2. 134 | | TRAIN_CSV_PATH | CSV file defining the training dataset. 135 | | VAL_CSV_PATH | CSV file defining the validation dataset. 136 | | TEST_CSV_PATH | CSV file defining the testing dataset. 137 | | CONTOUR_DIR | Directory containing contour description files. Set NULL when using slide-level labels. 138 | | SLIDE_DIR | Directory containing all the slide image files (can be soft links). 139 | | SLIDE_FILE_EXTENSION | File extension. (e.g. ".ndpi", ".svs") 140 | | SLIDE_READER | Library to read slides. (default: `openslide`) 141 | | RESIZE_RATIO | Resize ratio for downsampling slide images. 142 | | INPUT_SIZE | Size of model inputs in [height, width, channels]. Resized images are padded or cropped to the size. Try decreasing this field when main memory are limited. 143 | | GPU_AUGMENTS | Augmentations to do on GPU with patch-based affine transformation. (defaults: ["flip", "rigid", "hed_perturb"]) 144 | | AUGMENTS | Augmentations to do on CPU. 145 | | MODEL | Model architecture to use. One of `fixup_resnet50`. 146 | | POOL_USE | Global pooling method in ResNet. One of `gmp`, `gap`, and `lse`. 147 | | NUM_CLASSES | Number of classes. 148 | | BATCH_SIZE | Number of slides processed in each training iteration for each MPI worker. (default: 1) 149 | | EPOCHS | Maximal number of training epochs. 150 | | LOSS | Loss to use. One of `ce`. 151 | | METRIC_LIST | A list of metrics. 152 | | OPTIMIZER | Optimizer for model updating. 153 | | INIT_LEARNING_RATE | Initial learning rate for Adam optimizer. 154 | | REDUCE_LR_FACTOR | The learning rate will be decreased by this factor upon no validation loss improvement in consequent epochs. 155 | | REDUCE_LR_PATIENCE | Number of consequent epochs to reduce learning rate. 156 | | TIME_RECORD_PATH | Path to store a CSV file recording per-iteration training time. 157 | | TEST_TIME_RECORD_PATH | Path to store a CSV file recording per-iteration inference time. 158 | | TEST_RESULT_PATH | Path to store the model predictions after testing in a JSON format. (default: `${RESULT_DIR}/test_result.json`) 159 | | VIZ_RESIZE_RATIO | The resized ratio of the prediction maps. 160 | | VIZ_FOLDER | Folder to store prediction maps. (default: `${RESULT_DIR}/viz`) 161 | | VIZ_RAW_FOLDER | Folder to store raw prediction maps. (default: `${RESULT_DIR}/viz_raw`) 162 | 163 | ### 5. Train a Model 164 | 165 | To train a model, simply run 166 | ``` 167 | python3 -m poetry run python -m hms2.pipeline.train --config YOUR_TRAIN_CONFIG.YAML [--continue_mode] 168 | ``` 169 | , where `--continue_mode` is optional that makes the training process begin after loading the model weights. 170 | 171 | To enable multi-node, multi-GPU distributed training, simply add `mpirun` in front of the above command, e.g. 172 | ``` 173 | mpirun -np 4 -x CUDA_VISIBLE_DEVICES="0,1,2,3" python3 -m poetry run python -m hms2.pipeline.train --config YOUR_TRAIN_CONFIG.YAML 174 | ``` 175 | 176 | Typically, this step takes days to complete, depending on the computing power, while you can trace the progress in real time from program output. 177 | 178 | ### 6. Evaluate the Model 179 | 180 | To evaluate the model, call 181 | ``` 182 | [mpirun ...] python3 -m poetry run python -m hms2.pipeline.test --config YOUR_TRAIN_CONFIG.YAML 183 | ``` 184 | 185 | This command will generate a JSON file in the result directory named `test_result.json` by default. 186 | The file contains the model predictions for each testing slide. 187 | 188 | ### 7. Visualize the Model 189 | 190 | To generate the CAM heatmap of the model, call 191 | ``` 192 | [mpirun ...] python3 -m poetry run python -m hms2.pipeline.visualize --config YOUR_TRAIN_CONFIG.YAML 193 | ``` 194 | 195 | `${VIZ_FOLDER}` will store the overlaied previews of inferred test slides. The raw data of heatmaps will be available in `${VIZ_RAW_FOLDER}` in the format of `.npy`. 196 | -------------------------------------------------------------------------------- /hms2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/__init__.py -------------------------------------------------------------------------------- /hms2/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/core/__init__.py -------------------------------------------------------------------------------- /hms2/core/builder.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides a model building tool. 3 | """ 4 | from collections import namedtuple 5 | from typing import Callable, Optional, Sequence, Union 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torchvision 10 | 11 | from .custom_modules import ( 12 | FrozenBatchNorm2d, 13 | HEDPerturbAugmentorModule, 14 | LogSumExpPool2d, 15 | PermuteLayer, 16 | ScaleAndShift, 17 | ToDevice, 18 | ) 19 | from .loader_modules import ( 20 | GPUAugmentationLoaderModule, 21 | NoLoaderModule, 22 | PlainLoaderModule, 23 | ) 24 | from .model import Hms2Model 25 | 26 | 27 | class Hms2ModelBuilder: 28 | def __init__(self): 29 | self.Augmentation = namedtuple( 30 | "Augmentation", 31 | ["build_func"], 32 | ) 33 | self.Backbone = namedtuple( 34 | "Backbone", 35 | ["build_func", "output_channels", "get_hms2_parameters"], 36 | ) 37 | self.Pooling = namedtuple( 38 | "Pooling", 39 | ["local_pooling_build_func", "pooling_build_func"], 40 | ) 41 | self.CustomDense = namedtuple( 42 | "CustomDense", 43 | ["custom_dense_build_func"], 44 | ) 45 | 46 | self.augmentation_registry = {} 47 | self.backbone_registry = {} 48 | self.pooling_registry = {} 49 | self.custom_dense_registry = {} 50 | 51 | self._register_builtins() 52 | 53 | def build( 54 | self, 55 | n_classes: int, 56 | augmentation_list: Optional[Sequence[str]] = None, 57 | backbone: str = "resnet50_frozenbn", 58 | pretrained: bool = True, 59 | pooling: str = "gmp", 60 | custom_dense: Optional[str] = None, 61 | use_hms2: bool = True, 62 | device: Optional[Union[torch.device, str, int]] = None, 63 | use_cpu_for_dense: bool = False, 64 | gpu_memory_budget: float = 32.0, 65 | ) -> nn.Module: 66 | """ 67 | Build a model given parameters. 68 | 69 | Args: 70 | n_classes (int): The number of classes. 71 | augmentation_list (list or NoneType): 72 | A list of str, each of which specify an augmentation process, including 73 | "flip", "rigid", and "hed_perturb". The default is None that disables 74 | GPU augmentations. 75 | backbone (str): 76 | Specify the backbone structure. One of "resnet50_frozenbn" (default). 77 | pretrained (bool): 78 | Whether to load pretrained weights for the backbone (default: True). 79 | pooling (str or NoneType): 80 | Specify the pooling function. One of "gmp", "gmp_scaled", "gap", "lse", 81 | "cam", and "no". 82 | custom_dense (Optional[str]): 83 | Specify the module after pooling if not using the standard single dense 84 | layer. One of "no". 85 | use_hms2 (bool): Whether to enable HMS2. The default is True. 86 | device (torch.device): 87 | The device to place modules. If None (default), it calls 88 | torch.cuda.current_device() to get the device. 89 | use_cpu_for_dense: Whether to compute dense layers using CPU. 90 | gpu_memory_budget (float): 91 | The GPU memory capacity to let the builder determine the parameters of 92 | HMS2. 93 | """ 94 | # Default arguments 95 | if augmentation_list is None: 96 | augmentation_list = [] 97 | if device is None: 98 | device = torch.cuda.current_device() 99 | 100 | # Build components 101 | loader_module = self._build_loader_module( 102 | use_hms2=use_hms2, 103 | augmentation_list=augmentation_list, 104 | device=device, 105 | ) 106 | backbone_module = self._build_backbone_module( 107 | backbone=backbone, 108 | pretrained=pretrained, 109 | device=device, 110 | ) 111 | local_pooling_module = self._build_local_pooling_module( 112 | pooling=pooling, 113 | device=device, 114 | use_cpu_for_dense=use_cpu_for_dense, 115 | ) 116 | dense_module = self._build_dense_module( 117 | backbone=backbone, 118 | pooling=pooling, 119 | custom_dense=custom_dense, 120 | n_classes=n_classes, 121 | device=device, 122 | use_cpu_for_dense=use_cpu_for_dense, 123 | ) 124 | 125 | # Build the model 126 | model: nn.Module 127 | if use_hms2: 128 | hms2_parameters = self.backbone_registry[backbone].get_hms2_parameters( 129 | gpu_memory_budget=gpu_memory_budget, 130 | ) 131 | 132 | model = Hms2Model( 133 | loader_module=loader_module, 134 | conv_module=backbone_module, 135 | dense_module=dense_module, 136 | local_pooling_module=local_pooling_module, 137 | **hms2_parameters, 138 | ) 139 | else: 140 | model = _PlainModel( 141 | loader_module=loader_module, 142 | conv_module=backbone_module, 143 | dense_module=dense_module, 144 | local_pooling_module=local_pooling_module, 145 | ) 146 | 147 | return model 148 | 149 | def register_augmentation( 150 | self, 151 | signature: str, 152 | build_func: Callable, 153 | ) -> None: 154 | """Register an augmentation. 155 | 156 | Args: 157 | signature: The name of the augmentation. 158 | build_func: Calling build_func() will yield an nn.Module. 159 | """ 160 | self.augmentation_registry[signature] = self.Augmentation( 161 | build_func=build_func, 162 | ) 163 | 164 | def register_backbone( 165 | self, 166 | signature: str, 167 | build_func: Callable, 168 | output_channels: int, 169 | get_hms2_parameters: Callable, 170 | ) -> None: 171 | """Register a backbone. 172 | 173 | Args: 174 | signature: The name of the backbone. 175 | build_func: 176 | Calling build_func(pretrained=xxx) will yield an nn.Module. 177 | "pretrained: bool" must be included as an argument. 178 | output_channels: The number of the output channels. 179 | get_hms2_parameters: 180 | A callable with a parameter `gpu_memory_budget`. It returns a dict 181 | with the keys "tile_size", "emb_crop_size", and "emb_stride_size". 182 | """ 183 | self.backbone_registry[signature] = self.Backbone( 184 | build_func=build_func, 185 | output_channels=output_channels, 186 | get_hms2_parameters=get_hms2_parameters, 187 | ) 188 | 189 | def register_pooling( 190 | self, 191 | signature: str, 192 | local_pooling_build_func: Optional[Callable], 193 | pooling_build_func: Callable, 194 | ) -> None: 195 | """Register a pooling. 196 | 197 | Args: 198 | signature: The name of the pooling. 199 | local_pooling_build_func: 200 | Calling local_pooling_build_func() will yield an nn.Module. This 201 | pooling will be applied before HMS2 aggregation. 202 | pooling_build_func: 203 | Calling pooling_build_func() will yield an nn.Module. This pooling will 204 | be applied before linear layers. 205 | """ 206 | self.pooling_registry[signature] = self.Pooling( 207 | local_pooling_build_func=local_pooling_build_func, 208 | pooling_build_func=pooling_build_func, 209 | ) 210 | 211 | def register_custom_dense( 212 | self, 213 | signature: str, 214 | custom_dense_build_func: Callable[[int], nn.Module], 215 | ) -> None: 216 | """Register a custom dense. 217 | 218 | Args: 219 | signature: The name of the custom dense. 220 | custom_dense_build_func: 221 | Calling custom_dense_build_func(num_classes) will yeild an nn.Module. 222 | """ 223 | self.custom_dense_registry[signature] = self.CustomDense( 224 | custom_dense_build_func=custom_dense_build_func, 225 | ) 226 | 227 | def _register_builtins(self): 228 | # Augmentations 229 | self.register_augmentation("hed_perturb", HEDPerturbAugmentorModule) 230 | 231 | # Backbones: ResNet50 with frozen BN layers. 232 | def resnet50_frozenbn_build_func(pretrained: bool) -> nn.Module: 233 | module = torchvision.models.resnet50(pretrained=pretrained) 234 | module = FrozenBatchNorm2d.convert_frozen_batchnorm(module) 235 | module = nn.Sequential(*list(module.children())[:-2]) 236 | return module 237 | 238 | def resnet50_frozenbn_get_hms2_parameters(gpu_memory_budget: float) -> dict: 239 | if gpu_memory_budget >= 32: 240 | parameters = { 241 | "tile_size": 3072, 242 | "emb_crop_size": 7, 243 | "emb_stride_size": 32, 244 | } 245 | else: 246 | parameters = { 247 | "tile_size": 2048, 248 | "emb_crop_size": 7, 249 | "emb_stride_size": 32, 250 | } 251 | return parameters 252 | 253 | self.register_backbone( 254 | signature="resnet50_frozenbn", 255 | build_func=resnet50_frozenbn_build_func, 256 | output_channels=2048, 257 | get_hms2_parameters=resnet50_frozenbn_get_hms2_parameters, 258 | ) 259 | 260 | # Poolings 261 | self.register_pooling( 262 | "gmp", 263 | local_pooling_build_func=(lambda: nn.AdaptiveMaxPool2d((1, 1))), 264 | pooling_build_func=( 265 | lambda: nn.Sequential( 266 | nn.AdaptiveMaxPool2d((1, 1)), 267 | nn.Flatten(), 268 | ) 269 | ), 270 | ) 271 | self.register_pooling( 272 | "gmp_scaled", 273 | local_pooling_build_func=( 274 | lambda: nn.Sequential( 275 | ScaleAndShift(scale=3.79, bias=(-17.7)), 276 | nn.AdaptiveMaxPool2d((1, 1)), 277 | ) 278 | ), 279 | pooling_build_func=( 280 | lambda: nn.Sequential( 281 | nn.AdaptiveMaxPool2d((1, 1)), 282 | nn.Flatten(), 283 | ) 284 | ), 285 | ) 286 | self.register_pooling( 287 | "gmp_scaled_1k", 288 | local_pooling_build_func=( 289 | lambda: nn.Sequential( 290 | ScaleAndShift(scale=3.933, bias=(-19.14)), 291 | nn.AdaptiveMaxPool2d((1, 1)), 292 | ) 293 | ), 294 | pooling_build_func=( 295 | lambda: nn.Sequential( 296 | nn.AdaptiveMaxPool2d((1, 1)), 297 | nn.Flatten(), 298 | ) 299 | ), 300 | ) 301 | self.register_pooling( 302 | "gmp_scaled_2k", 303 | local_pooling_build_func=( 304 | lambda: nn.Sequential( 305 | ScaleAndShift(scale=4.135, bias=(-21.23)), 306 | nn.AdaptiveMaxPool2d((1, 1)), 307 | ) 308 | ), 309 | pooling_build_func=( 310 | lambda: nn.Sequential( 311 | nn.AdaptiveMaxPool2d((1, 1)), 312 | nn.Flatten(), 313 | ) 314 | ), 315 | ) 316 | self.register_pooling( 317 | "gap", 318 | local_pooling_build_func=(lambda: nn.AdaptiveAvgPool2d((1, 1))), 319 | pooling_build_func=( 320 | lambda: nn.Sequential( 321 | nn.AdaptiveAvgPool2d((1, 1)), 322 | nn.Flatten(), 323 | ) 324 | ), 325 | ) 326 | self.register_pooling( 327 | "lse", 328 | local_pooling_build_func=(lambda: LogSumExpPool2d(factor=1.0)), 329 | pooling_build_func=( 330 | lambda: nn.Sequential( 331 | LogSumExpPool2d(factor=1.0), 332 | nn.Flatten(), 333 | ) 334 | ), 335 | ) 336 | self.register_pooling( 337 | "cam", 338 | local_pooling_build_func=None, 339 | pooling_build_func=(lambda: PermuteLayer(dims=(0, 2, 3, 1))), 340 | ) 341 | self.register_pooling( 342 | "no", 343 | local_pooling_build_func=None, 344 | pooling_build_func=(lambda: nn.Identity()), 345 | ) 346 | 347 | # Custom Dense 348 | self.register_custom_dense( 349 | "no", 350 | custom_dense_build_func=(lambda: nn.Identity()), 351 | ) 352 | 353 | def _build_loader_module( 354 | self, 355 | use_hms2: bool, 356 | augmentation_list: Sequence[str], 357 | device: Union[torch.device, str, int], 358 | ) -> nn.Module: 359 | # Translate the augmentation_list 360 | augmentation_modules = [] 361 | for augmentation in augmentation_list: 362 | if use_hms2 and augmentation in ["flip", "rigid"]: 363 | # "flip" and "rigid" are built-in of HMS2 to enable patch-based 364 | # affine transformation. Skip initiating a module. 365 | pass 366 | elif augmentation in self.augmentation_registry: 367 | module = self.augmentation_registry[augmentation].build_func() 368 | augmentation_modules.append(module) 369 | 370 | else: 371 | raise RuntimeError( 372 | f"{augmentation} has not yet been registered as an augmentation." 373 | ) 374 | 375 | # Build the loader module 376 | loader_module: nn.Module 377 | if use_hms2: 378 | if augmentation_list is None: 379 | loader_module = PlainLoaderModule() 380 | else: 381 | random_rotation = "rigid" in augmentation_list 382 | random_translation = ( 383 | (-32.0, 32.0) if "rigid" in augmentation_list else None 384 | ) 385 | random_flip = "flip" in augmentation_list 386 | 387 | loader_module = GPUAugmentationLoaderModule( 388 | random_rotation=random_rotation, 389 | random_translation=random_translation, 390 | random_flip=random_flip, 391 | other_augmentations=augmentation_modules, 392 | ) 393 | else: 394 | loader_module = NoLoaderModule(augmentations=augmentation_modules) 395 | 396 | loader_module = loader_module.to(device) 397 | return loader_module 398 | 399 | def _build_backbone_module( 400 | self, 401 | backbone: str, 402 | pretrained: bool, 403 | device: Union[torch.device, str, int], 404 | ) -> nn.Module: 405 | if backbone not in self.backbone_registry: 406 | raise RuntimeError(f"{backbone} has not yet registered as a backbone.") 407 | 408 | backbone_module = self.backbone_registry[backbone].build_func( 409 | pretrained=pretrained 410 | ) 411 | backbone_module = backbone_module.to(device) 412 | return backbone_module 413 | 414 | def _build_local_pooling_module( 415 | self, 416 | pooling: str, 417 | device: Union[torch.device, str, int], 418 | use_cpu_for_dense: bool, 419 | ) -> Optional[nn.Module]: 420 | if pooling not in self.pooling_registry: 421 | raise RuntimeError(f"{pooling} has not yet registered as a pooling.") 422 | 423 | local_pooling_build_func = self.pooling_registry[ 424 | pooling 425 | ].local_pooling_build_func 426 | if local_pooling_build_func is None: 427 | if use_cpu_for_dense: 428 | return ToDevice("cpu") 429 | else: 430 | return None 431 | 432 | local_pooling_module = local_pooling_build_func() 433 | if use_cpu_for_dense: 434 | local_pooling_module = nn.Sequential( 435 | ToDevice("cpu"), 436 | local_pooling_module, 437 | ) 438 | else: 439 | local_pooling_module = local_pooling_module.to(device) 440 | 441 | return local_pooling_module 442 | 443 | def _build_dense_module( 444 | self, 445 | backbone: str, 446 | pooling: str, 447 | custom_dense: Optional[str], 448 | n_classes: int, 449 | device: Union[torch.device, str, int], 450 | use_cpu_for_dense: bool, 451 | ) -> nn.Module: 452 | if backbone not in self.backbone_registry: 453 | raise RuntimeError(f"{backbone} has not yet registered as a backbone.") 454 | 455 | output_channels = self.backbone_registry[backbone].output_channels 456 | 457 | if pooling not in self.pooling_registry: 458 | raise RuntimeError(f"{pooling} has not yet registered as a pooling.") 459 | 460 | pooling_module = self.pooling_registry[pooling].pooling_build_func() 461 | 462 | if custom_dense is None: 463 | dense_layer = nn.Linear(output_channels, n_classes, bias=True) 464 | with torch.no_grad(): 465 | dense_layer.weight.div_(10.0) 466 | dense_layer.bias.div_(10.0) 467 | else: 468 | dense_layer = self.custom_dense_registry[ 469 | custom_dense 470 | ].custom_dense_build_func() 471 | 472 | dense_module = nn.Sequential( 473 | pooling_module, 474 | dense_layer, 475 | ) 476 | if not use_cpu_for_dense: 477 | dense_module = dense_module.to(device) 478 | 479 | return dense_module 480 | 481 | 482 | class _PlainModel(nn.Module): 483 | """ 484 | Plain model with a similar interface as Hms2Model. 485 | 486 | Args: 487 | See the descriptions in `Hms2Model`. 488 | """ 489 | 490 | def __init__( 491 | self, 492 | loader_module: nn.Module, 493 | conv_module: nn.Module, 494 | dense_module: nn.Module, 495 | local_pooling_module: Optional[nn.Module] = None, 496 | ): 497 | super().__init__() 498 | 499 | self.loader_module = loader_module 500 | self.conv_module = conv_module 501 | self.dense_module = dense_module 502 | self.local_pooling_module = local_pooling_module 503 | 504 | def forward(self, img_batch: torch.Tensor) -> torch.Tensor: 505 | """ 506 | Implementation of a plain model. 507 | """ 508 | if isinstance(img_batch, torch.Tensor): 509 | if len(img_batch.size()) != 4: 510 | raise ValueError("img_batch should have 4 dimensions") 511 | else: 512 | raise ValueError("img_batch should be torch.Tensor") 513 | 514 | loaded = self.loader_module(img_batch) 515 | conved = self.conv_module(loaded) 516 | if self.local_pooling_module is not None: 517 | local_pooled = self.local_pooling_module(conved) 518 | else: 519 | local_pooled = conved 520 | output = self.dense_module(local_pooled) 521 | 522 | return output 523 | -------------------------------------------------------------------------------- /hms2/core/custom_modules.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains custom modules.. 3 | """ 4 | import abc 5 | from typing import Sequence, Tuple, Type, Union 6 | 7 | import cv2 8 | import numpy as np 9 | import scipy.linalg 10 | import torch 11 | import torch.nn as nn 12 | 13 | 14 | class BaseAugmentorModule(nn.Module, metaclass=abc.ABCMeta): 15 | @abc.abstractmethod 16 | def randomize(self) -> None: 17 | pass 18 | 19 | 20 | class HEDPerturbAugmentorModule(BaseAugmentorModule): 21 | """ 22 | An image augmentor that implements HED perturbing. 23 | 24 | Args: 25 | stain_angle (float): The maximal angle applied on perturbing the stain matrix. 26 | concentration_multiplier (tuple-like): 27 | A two-element tuple defining the scaling range of concentration perturbing. 28 | skip_background (bool): 29 | Skip this augmentation on background since it's unneccesary. 30 | """ 31 | 32 | def __init__( 33 | self, 34 | stain_angle: float = 10.0, 35 | concentration_multiplier: Tuple[float, float] = (0.5, 1.5), 36 | skip_background: bool = True, 37 | ): 38 | super().__init__() 39 | self.stain_angle = stain_angle 40 | self.concentration_multiplier = concentration_multiplier 41 | self.skip_background = skip_background 42 | 43 | self.eps = 1e-6 44 | rgb_from_hed = np.array( 45 | [ 46 | [0.65, 0.70, 0.29], 47 | [0.07, 0.99, 0.11], 48 | [0.27, 0.57, 0.78], 49 | ] 50 | ) 51 | self.hed_from_rgb = scipy.linalg.inv(rgb_from_hed) 52 | self.postfix = None 53 | 54 | def randomize(self) -> None: 55 | stain_angle_rad = np.radians(self.stain_angle) 56 | hed_from_rgb_aug = [] 57 | for stain_idx in range(self.hed_from_rgb.shape[1]): 58 | stain = self.hed_from_rgb[:, stain_idx] 59 | stain_rotation_vector = np.random.uniform( 60 | -stain_angle_rad, stain_angle_rad, size=(3,) 61 | ) 62 | stain_rotation_matrix, _ = cv2.Rodrigues(np.array([stain_rotation_vector])) 63 | stain_aug = np.matmul(stain_rotation_matrix, stain[:, np.newaxis]) 64 | hed_from_rgb_aug.append(stain_aug) 65 | hed_from_rgb_aug = np.concatenate(hed_from_rgb_aug, axis=1) 66 | rgb_from_hed_aug = scipy.linalg.inv(hed_from_rgb_aug) 67 | 68 | concentration_aug_matrix = np.diag( 69 | np.random.uniform(*self.concentration_multiplier, size=(3,)), 70 | ) 71 | 72 | # image_od_aug = image_od . hed_from_rgb . concentration_aug_matrix . 73 | # rgb_from_hed_aug 74 | postfix = np.matmul(concentration_aug_matrix, rgb_from_hed_aug) 75 | postfix = np.matmul(self.hed_from_rgb, postfix) 76 | self.postfix = postfix 77 | 78 | @torch.no_grad() 79 | def forward(self, image_batch: torch.Tensor) -> torch.Tensor: 80 | if self.postfix is None: 81 | raise RuntimeError("randomize() should be called before forward().") 82 | 83 | # When the image is all white, this augmentation will not make any change, so 84 | # skip it. 85 | if self.skip_background and torch.all(image_batch == 1.0).item(): 86 | return image_batch 87 | 88 | image_batch = torch.clamp(image_batch, min=self.eps) 89 | image_batch_od = torch.log(image_batch) / np.log(self.eps) 90 | image_batch_od = image_batch_od.permute(0, 2, 3, 1).contiguous() 91 | postfix = torch.tensor(self.postfix, dtype=torch.float32).to( 92 | image_batch_od.device 93 | ) 94 | image_batch_od_aug = torch.matmul(image_batch_od, postfix) 95 | image_batch_od_aug = image_batch_od_aug.permute(0, 3, 1, 2).contiguous() 96 | image_batch_od_aug = torch.clamp(image_batch_od_aug, min=0.0) 97 | image_batch_aug = torch.exp(image_batch_od_aug * np.log(self.eps)) 98 | image_batch_aug = torch.ceil(image_batch_aug * 255.0) / 255.0 99 | 100 | return image_batch_aug 101 | 102 | 103 | class FrozenBatchNorm2d(nn.BatchNorm2d): 104 | """ 105 | Batch normalization for 2D tensors with a frozen running mean and variance. Use the 106 | classmethod `convert_frozen_batchnorm` to rapidly convert a module containing batch 107 | normalization layers. 108 | 109 | Args: 110 | Refer to the descriptions in `torch.nn.BatchNorm2d`. 111 | """ 112 | 113 | _version = 1 114 | 115 | def __init__( 116 | self, 117 | num_features: int, 118 | eps: float = 1e-5, 119 | affine: bool = True, 120 | track_running_stats: bool = True, 121 | ): 122 | super().__init__( 123 | num_features=num_features, 124 | eps=eps, 125 | affine=affine, 126 | track_running_stats=track_running_stats, 127 | ) 128 | 129 | def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: 130 | """ 131 | Forward operations. Mean and variance calculations are removed. 132 | """ 133 | self._check_input_dim(input_tensor) 134 | 135 | output = nn.functional.batch_norm( 136 | input=input_tensor, 137 | running_mean=self.running_mean, 138 | running_var=self.running_var, 139 | weight=self.weight, 140 | bias=self.bias, 141 | training=False, 142 | eps=self.eps, 143 | ) 144 | 145 | return output 146 | 147 | @classmethod 148 | def convert_frozen_batchnorm( 149 | cls: Type["FrozenBatchNorm2d"], module: nn.Module 150 | ) -> nn.Module: 151 | """ 152 | Convert a module with batch normalization layers to frozen one. 153 | """ 154 | bn_module = ( 155 | nn.modules.batchnorm.BatchNorm2d, 156 | nn.modules.batchnorm.SyncBatchNorm, 157 | ) 158 | if isinstance(module, bn_module): 159 | frozen_bn = cls( 160 | num_features=module.num_features, 161 | eps=module.eps, 162 | affine=module.affine, 163 | track_running_stats=module.track_running_stats, 164 | ).to(device=next(module.parameters()).device) 165 | if module.affine: 166 | with torch.no_grad(): 167 | frozen_bn.weight.copy_(module.weight) 168 | frozen_bn.bias.copy_(module.bias) 169 | if module.track_running_stats: 170 | with torch.no_grad(): 171 | frozen_bn.running_mean.copy_(module.running_mean) 172 | frozen_bn.running_var.copy_(module.running_var) 173 | frozen_bn.num_batches_tracked.copy_(module.num_batches_tracked) 174 | module = frozen_bn 175 | else: 176 | for name, child in module.named_children(): 177 | new_child = cls.convert_frozen_batchnorm(child) 178 | if new_child is not child: 179 | module.add_module(name, new_child) 180 | 181 | return module 182 | 183 | 184 | class LogSumExpPool2d(nn.Module): 185 | def __init__(self, factor: float = 1.0): 186 | super().__init__() 187 | self.factor = factor 188 | 189 | def forward(self, inputs: torch.Tensor) -> torch.Tensor: 190 | _, _, height, width = inputs.shape 191 | 192 | max_pool = nn.functional.adaptive_max_pool2d(inputs, output_size=(1, 1)) 193 | exp = torch.exp(self.factor * (inputs - max_pool)) 194 | sumexp = torch.sum(exp, dim=(2, 3), keepdim=True) / (height * width) 195 | logsumexp = max_pool + torch.log(sumexp) / self.factor 196 | 197 | return logsumexp 198 | 199 | 200 | class PermuteLayer(nn.Module): 201 | def __init__(self, dims: Sequence[int]): 202 | super().__init__() 203 | self.dims = dims 204 | 205 | def forward(self, inputs: torch.Tensor) -> torch.Tensor: 206 | output = inputs.permute(*self.dims) 207 | return output 208 | 209 | 210 | class ScaleAndShift(nn.Module): 211 | __constants__ = ["scale", "bias"] 212 | 213 | def __init__(self, scale=1.0, bias=0.0): 214 | super().__init__() 215 | self.scale = scale 216 | self.bias = bias 217 | 218 | def forward(self, inputs): 219 | return inputs * self.scale + self.bias 220 | 221 | def extra_repr(self): 222 | return "scale={}, bias={}".format(self.scale, self.bias) 223 | 224 | 225 | class ToDevice(nn.Module): 226 | def __init__(self, device: Union[torch.device, str]): 227 | super().__init__() 228 | self.device = device 229 | 230 | def forward(self, input: torch.Tensor) -> torch.Tensor: 231 | output = input.to(self.device) 232 | return output 233 | -------------------------------------------------------------------------------- /hms2/core/loader_modules.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import concurrent.futures 3 | from typing import Optional, Sequence, Tuple, Union 4 | 5 | import numpy as np 6 | import torch 7 | import torch.nn as nn 8 | import torchvision.transforms as transforms 9 | from PIL import Image 10 | 11 | from .custom_modules import BaseAugmentorModule 12 | 13 | 14 | class BaseLoaderModule(nn.Module, metaclass=abc.ABCMeta): 15 | """ 16 | An abstract loader module, handling region reading, augmentation, and CPU-GPU 17 | transfer. It accepts a torch.Tensor with NHWC format and uint8 type as an input 18 | image batch. The module returns a torch.Tensor on CUDA with the FP32 type and NCHW 19 | shape. For image augmentation, all the random variables should be kept as data 20 | members and get re-randomized upon `randomize` is called. 21 | """ 22 | 23 | @abc.abstractmethod 24 | def forward( 25 | self, 26 | image_batch: torch.Tensor, 27 | coord: Tuple[int, int], 28 | size: Tuple[int, int], 29 | ) -> torch.Tensor: 30 | """ 31 | Define the forward function to read a region from an image. 32 | 33 | Args: 34 | image_batch (torch.Tensor): A tensor with NHWC format and uint8 type. 35 | coord (Tuple[int, int]): A 2-element tuple defining (x, y). 36 | size (Tuple[int, int]): A 2-element tuple defining (width, height). 37 | 38 | Returns: 39 | region (torch.Tensor): A tensor in NCHW and FP32. 40 | """ 41 | 42 | @abc.abstractmethod 43 | def randomize(self) -> None: 44 | """ 45 | Randomize all the variables for augmentations. 46 | """ 47 | 48 | def hint_future_accesses( 49 | self, 50 | image_batch: torch.Tensor, 51 | coords: Sequence[Tuple[int, int]], 52 | sizes: Sequence[Tuple[int, int]], 53 | ) -> None: 54 | """ 55 | Hint the loader module the requesting regions of future accesses and their 56 | order. The order will be (image_batch[0], coords[0], sizes[0]) -> 57 | (image_batch[1], coords[0], sizes[0]) -> ... -> (image_batch[N - 1], 58 | coords[0], sizes[0]) -> (image_batch[0], coords[1], sizes[1]) -> ... 59 | 60 | Args: 61 | image_batch (torch.Tensor): 62 | The format is defined in each derived class. 63 | coords (list): 64 | A list of tuples, each of which is a 2-element tuple defining (x, y). 65 | sizes (list): 66 | A list of tuples, each of which is a 2-element tuple definiing (w, h). 67 | """ 68 | 69 | def prefetch_next(self) -> None: 70 | """ 71 | Available when `hint_future_accesses` is called. Let the loader module to 72 | prefetch the next region. This method should be called before the next region 73 | is requested by `forward`, or an error will be raised. 74 | """ 75 | 76 | @abc.abstractmethod 77 | def record_snapshot(self) -> None: 78 | """ 79 | Start recording the snapshot for debugging. 80 | """ 81 | 82 | @abc.abstractmethod 83 | def get_snapshot(self) -> np.ndarray: 84 | """ 85 | Stop recording the snapshot and return it. 86 | 87 | Returns: 88 | snapshots: 89 | A batch of snapshots with the shape [B, H, W, 3], RGB uint8 format. 90 | """ 91 | 92 | 93 | class PlainLoaderModule(BaseLoaderModule): 94 | """ 95 | A plain loader module that simply does region reading, CPU-GPU data transfer, and 96 | normalization. If needed, augmentation should be done before an image is fed into 97 | this module. 98 | """ 99 | 100 | def __init__(self): 101 | super().__init__() 102 | 103 | self.register_buffer("device_indicator", torch.empty(0)) 104 | 105 | self.prefetch_idx = 0 106 | self.prefetch_image_batch = None 107 | self.prefetch_coords = None 108 | self.prefetch_sizes = None 109 | self.prefetch_thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1) 110 | self.prefetch_task = None 111 | 112 | self.snapshot_enabled = False 113 | self.snapshots = [] 114 | 115 | def forward( 116 | self, 117 | image_batch: torch.Tensor, 118 | coord: Tuple[int, int], 119 | size: Tuple[int, int], 120 | ) -> torch.Tensor: 121 | """ 122 | See the description in `BaseLoaderModule.forward`. 123 | """ 124 | if self.prefetch_task is not None: 125 | # If hint_future_accesses was called. 126 | if ( 127 | coord != self.prefetch_coords[self.prefetch_idx] 128 | or size != self.prefetch_sizes[self.prefetch_idx] 129 | or image_batch is not self.prefetch_image_batch 130 | ): 131 | raise ValueError( 132 | "The arguments of hint_future_accesses does not consist with those" 133 | " of forward." 134 | ) 135 | patch = self.prefetch_task.result() 136 | 137 | self.prefetch_next() 138 | else: 139 | # If hint_future_accesses was not called. 140 | patch = self._read_region( 141 | image_batch, 142 | coord, 143 | size, 144 | self.device_indicator.device, 145 | ) 146 | 147 | return patch 148 | 149 | def randomize(self) -> None: 150 | """ 151 | Do nothing since no random variable exist in this loader module. 152 | """ 153 | 154 | def hint_future_accesses( 155 | self, 156 | image_batch: torch.Tensor, 157 | coords: Sequence[Tuple[int, int]], 158 | sizes: Sequence[Tuple[int, int]], 159 | ) -> None: 160 | """ 161 | See the description in `BaseLoaderModule.hint_future_accesses`. 162 | """ 163 | self.prefetch_idx = -1 164 | self.prefetch_image_batch = image_batch 165 | self.prefetch_coords = coords 166 | self.prefetch_sizes = sizes 167 | 168 | self.prefetch_next() 169 | 170 | def prefetch_next(self) -> None: 171 | """ 172 | See the description in `BaseLoaderModule.prefetch_next`. 173 | """ 174 | self.prefetch_idx += 1 175 | if self.prefetch_idx < len(self.prefetch_coords): 176 | next_coord = self.prefetch_coords[self.prefetch_idx] 177 | next_size = self.prefetch_sizes[self.prefetch_idx] 178 | 179 | if self.prefetch_task is not None: 180 | self.prefetch_task.cancel() 181 | self.prefetch_task = self.prefetch_thread_pool.submit( 182 | self._read_region, 183 | self.prefetch_image_batch, 184 | next_coord, 185 | next_size, 186 | self.device_indicator.device, 187 | ) 188 | else: 189 | self.prefetch_task = None 190 | 191 | def record_snapshot(self) -> None: 192 | """ 193 | See the description in `BaseLoaderModule.record_snapshot`. 194 | """ 195 | self.snapshot_enabled = True 196 | 197 | def get_snapshot(self) -> np.ndarray: 198 | """ 199 | See the description in `BaseLoaderModule.get_snapshot`. 200 | """ 201 | width = max( 202 | [snapshot["coord"][0] + snapshot["size"][0] for snapshot in self.snapshots] 203 | ) 204 | height = max( 205 | [snapshot["coord"][1] + snapshot["size"][1] for snapshot in self.snapshots] 206 | ) 207 | 208 | batch_size = 0 209 | for snapshot in self.snapshots: 210 | batch_size_this = int(snapshot["patch_batch"].shape[0]) 211 | if batch_size == 0: 212 | batch_size = batch_size_this 213 | elif batch_size != batch_size_this: 214 | raise RuntimeError("Batch sizes are not consistent in the snapshots.") 215 | 216 | canvases = [] 217 | for idx in range(batch_size): 218 | canvas = Image.new( 219 | mode="RGB", 220 | size=(width, height), 221 | color=(0, 255, 0), 222 | ) 223 | for snapshot in self.snapshots: 224 | patch = Image.fromarray(snapshot["patch_batch"][idx]) 225 | box = ( 226 | snapshot["coord"][0], 227 | snapshot["coord"][1], 228 | snapshot["coord"][0] + snapshot["size"][0], 229 | snapshot["coord"][1] + snapshot["size"][1], 230 | ) 231 | canvas.paste(patch, box=box) 232 | canvas = np.array(canvas) 233 | canvases.append(canvas) 234 | canvases_array = np.array(canvases) 235 | 236 | self.snapshot_enabled = False 237 | self.snapshots = [] 238 | 239 | return canvases_array 240 | 241 | def __del__(self) -> None: 242 | self.prefetch_thread_pool.shutdown() 243 | 244 | @torch.no_grad() 245 | def _read_region( 246 | self, 247 | image_batch: torch.Tensor, 248 | coord: Tuple[int, int], 249 | size: Tuple[int, int], 250 | device: Union[torch.device, str, None], 251 | ) -> torch.Tensor: 252 | patch = image_batch[ 253 | :, 254 | coord[1] : coord[1] + size[1], 255 | coord[0] : coord[0] + size[0], 256 | :, 257 | ] 258 | patch = patch.to(device=device) # To GPU 259 | patch = patch.permute(0, 3, 1, 2).contiguous() # To NCHW 260 | patch = patch.float().div(255.0) # To FP32 261 | 262 | if self.snapshot_enabled: 263 | patch_snapshot = ( 264 | (patch * 255.0) 265 | .to(torch.uint8) 266 | .permute(0, 2, 3, 1) 267 | .contiguous() 268 | .cpu() 269 | .numpy() 270 | ) 271 | self.snapshots.append( 272 | { 273 | "coord": coord, 274 | "size": size, 275 | "patch_batch": patch_snapshot, 276 | } 277 | ) 278 | 279 | patch = transforms.functional.normalize( 280 | tensor=patch, 281 | mean=[0.485, 0.456, 0.406], 282 | std=[0.229, 0.224, 0.225], 283 | ) 284 | return patch 285 | 286 | 287 | class GPUAugmentationLoaderModule(PlainLoaderModule): 288 | """ 289 | A loader module that does region reading, CPU-GPU data transfer, on-GPU 290 | augmentation, and normalization. 291 | 292 | Args: 293 | random_rotation (bool): Enable random rotation. Default is True. 294 | random_translation (tuple or NoneType): 295 | A tuple with 2 elements (x, y). Set None to disalbe the augmentation. 296 | Default is (-32.0, 32.0). 297 | random_flip (bool): Enable random flipping. Default is True. 298 | other_augmentations (sequence or NoneType): 299 | A list of torch.nn.Module. Each consumes a torch.Tensor as an input image 300 | batch with NCHW, FP32, [0.0, 1.0] formats, and produces a torch.Tensor 301 | with the same shape and format. Both tensors are on GPU. These modeuls 302 | should implement a randomize() method. Set None (default) to indicate no 303 | further augmentations to apply. 304 | skip_background_tile_aug (bool): 305 | Skip unnecessary augmentations, including rotation, translation, and 306 | flipping, for background tiles. 307 | """ 308 | 309 | def __init__( 310 | self, 311 | random_rotation: bool = True, 312 | random_translation: Optional[Tuple[float, float]] = (-32.0, 32.0), 313 | random_flip: bool = True, 314 | other_augmentations: Optional[Sequence[BaseAugmentorModule]] = None, 315 | skip_background_tile_aug: bool = True, 316 | ): 317 | super().__init__() 318 | self.random_rotation = random_rotation 319 | self.random_translation = random_translation 320 | self.random_flip = random_flip 321 | self.other_augmentations = ( 322 | other_augmentations if other_augmentations is not None else [] 323 | ) 324 | self.skip_background_tile_aug = skip_background_tile_aug 325 | 326 | self.rotation_angle = 0.0 327 | self.translation_pixels = np.zeros(shape=[2]) 328 | self.do_flip = False 329 | self.affine_matrix = np.identity(3) 330 | 331 | def randomize(self) -> None: 332 | if self.random_rotation: 333 | self.rotation_angle = np.random.uniform(-180.0, 180.0) 334 | if self.random_translation: 335 | self.translation_pixels = np.random.uniform( 336 | self.random_translation[0], 337 | self.random_translation[1], 338 | size=(2,), 339 | ) 340 | if self.random_flip: 341 | self.do_flip = np.random.rand() > 0.5 342 | for module in self.other_augmentations: 343 | module.randomize() 344 | 345 | self.affine_matrix = self._calculate_affine_matrix() 346 | 347 | def _calculate_affine_matrix(self) -> np.ndarray: 348 | """ 349 | The order of the augmentations is rotate -> translate -> flip. 350 | Calculating affine_matrix should be in the reverse way. 351 | """ 352 | affine_matrix = np.identity(3) 353 | 354 | if self.do_flip: 355 | transform = np.array( 356 | [ 357 | [-1.0, 0.0, 0.0], 358 | [0.0, 1.0, 0.0], 359 | [0.0, 0.0, 1.0], 360 | ], 361 | ) 362 | affine_matrix = np.matmul(transform, affine_matrix) 363 | 364 | if self.random_translation: 365 | transform = np.array( 366 | [ 367 | [1.0, 0.0, -self.translation_pixels[0]], 368 | [0.0, 1.0, -self.translation_pixels[1]], 369 | [0.0, 0.0, 1.0], 370 | ] 371 | ) 372 | affine_matrix = np.matmul(transform, affine_matrix) 373 | 374 | if self.random_rotation: 375 | angle_in_rad = np.radians(self.rotation_angle) 376 | transform = np.array( 377 | [ 378 | [np.cos(angle_in_rad), -np.sin(angle_in_rad), 0.0], 379 | [np.sin(angle_in_rad), np.cos(angle_in_rad), 0.0], 380 | [0.0, 0.0, 1.0], 381 | ], 382 | ) 383 | affine_matrix = np.matmul(transform, affine_matrix) 384 | 385 | return affine_matrix 386 | 387 | @torch.no_grad() 388 | def _read_region( 389 | self, 390 | image_batch: torch.Tensor, 391 | coord: Tuple[int, int], 392 | size: Tuple[int, int], 393 | device: Union[torch.device, str, None], 394 | ): 395 | # If in evaluation mode, disable the augmentations. 396 | if not self.training: 397 | return super()._read_region(image_batch, coord, size, device) 398 | 399 | # Calculate the region center regarding the image center 400 | batch_size, height, width, channels = image_batch.shape 401 | coord = np.array(coord) 402 | size = np.array(size) 403 | image_center = np.array([width, height]) / 2.0 404 | region_center = coord + size / 2.0 405 | norm_region_center = region_center - image_center 406 | 407 | # Get the new region center after an affine transformation 408 | new_norm_region_center = np.matmul( 409 | self.affine_matrix, 410 | np.array([norm_region_center[0], norm_region_center[1], 1.0]), 411 | )[:2] 412 | new_region_center = new_norm_region_center + image_center 413 | 414 | # Calculate the coordinates of the region to read 415 | min_read_size = np.max(size * np.sqrt(2.0)) 416 | read_l = np.floor(new_region_center[0] - min_read_size / 2.0).astype(np.int32) 417 | read_r = np.ceil(new_region_center[0] + min_read_size / 2.0).astype(np.int32) 418 | read_t = np.floor(new_region_center[1] - min_read_size / 2.0).astype(np.int32) 419 | read_b = np.ceil(new_region_center[1] + min_read_size / 2.0).astype(np.int32) 420 | new_region_center_patch = new_region_center - np.array([read_l, read_t]) 421 | 422 | is_background_tile = None 423 | if read_l > width or read_r < 0 or read_t > height or read_b < 0: 424 | # When the reading region is totally out-of-range, just create a blank 425 | # tesnor on GPU. 426 | is_background_tile = True 427 | patch = torch.full( 428 | size=( 429 | batch_size, 430 | channels, 431 | read_b - read_t, 432 | read_r - read_l, 433 | ), 434 | fill_value=1.0, 435 | device=device, 436 | ) 437 | else: 438 | # When the reading region is contentful or partially out-of-range, crop 439 | # valid region, send it to GPU, and pad blank. 440 | 441 | # Deal with partially out-of-range issue 442 | patch_width = read_r - read_l 443 | patch_height = read_b - read_t 444 | pad_l = np.maximum(0, -read_l) 445 | read_l = np.maximum(0, read_l) 446 | pad_r = np.maximum(0, read_r - width) 447 | read_r = np.minimum(width, read_r) 448 | pad_t = np.maximum(0, -read_t) 449 | read_t = np.maximum(0, read_t) 450 | pad_b = np.maximum(0, read_b - height) 451 | read_b = np.minimum(height, read_b) 452 | 453 | # Read the region 454 | patch = image_batch[ 455 | :, 456 | read_t:read_b, 457 | read_l:read_r, 458 | :, 459 | ] 460 | 461 | # Determine if patch is blank. 462 | if patch.nelement() == 0 or torch.min(patch) == 255: 463 | # If so, just create a blank tesnor on GPU. 464 | is_background_tile = True 465 | patch = torch.full( 466 | size=( 467 | batch_size, 468 | channels, 469 | patch_height, 470 | patch_width, 471 | ), 472 | fill_value=1.0, 473 | device=device, 474 | ) 475 | else: 476 | # Send the patch to GPU. 477 | is_background_tile = False 478 | patch = patch.to(device=device) # To GPU 479 | patch = patch.permute(0, 3, 1, 2).contiguous() # To NCHW 480 | patch = patch.float().div(255.0) # To FP32 481 | 482 | # Pad white color for out-of-range region reading 483 | patch = nn.functional.pad( 484 | patch, 485 | pad=(pad_l, pad_r, pad_t, pad_b), 486 | mode="constant", 487 | value=1.0, 488 | ) 489 | 490 | # Rotate the patch if needed 491 | if self.random_rotation and not is_background_tile: 492 | patch = transforms.functional.rotate( 493 | img=patch, 494 | angle=self.rotation_angle, 495 | interpolation=transforms.InterpolationMode.BILINEAR, 496 | center=list(new_region_center_patch), 497 | fill=1.0, 498 | ) 499 | 500 | # Translate the patch 501 | if not is_background_tile: 502 | translate = size / 2.0 - new_region_center_patch 503 | patch = transforms.functional.affine( 504 | patch, 505 | angle=0.0, 506 | translate=list(translate), 507 | scale=1.0, 508 | shear=0.0, 509 | interpolation=transforms.InterpolationMode.BILINEAR, 510 | fill=1.0, 511 | ) 512 | 513 | # Crop out the real region 514 | patch = patch[:, :, : size[1], : size[0]] 515 | 516 | # Flip the patch if needed 517 | if self.do_flip and not is_background_tile: 518 | patch = torch.flip(patch, dims=(3,)) 519 | 520 | # Apply other augmentations 521 | for module in self.other_augmentations: 522 | patch = module(patch) 523 | 524 | if self.snapshot_enabled: 525 | patch_snapshot = ( 526 | (patch * 255.0) 527 | .to(torch.uint8) 528 | .permute(0, 2, 3, 1) 529 | .contiguous() 530 | .cpu() 531 | .numpy() 532 | ) 533 | self.snapshots.append( 534 | { 535 | "coord": coord, 536 | "size": size, 537 | "patch_batch": patch_snapshot, 538 | } 539 | ) 540 | 541 | # Normalize the patch 542 | patch = transforms.functional.normalize( 543 | tensor=patch, 544 | mean=[0.485, 0.456, 0.406], 545 | std=[0.229, 0.224, 0.225], 546 | ) 547 | 548 | return patch 549 | 550 | 551 | class NoLoaderModule(BaseLoaderModule): 552 | """ 553 | A loader module that simply does normalization. 554 | """ 555 | 556 | def __init__(self, augmentations: Optional[Sequence[nn.Module]] = None): 557 | super().__init__() 558 | 559 | self.augmentations = nn.ModuleList( 560 | augmentations if augmentations is not None else [] 561 | ) 562 | 563 | self.register_buffer("device_indicator", torch.empty(0)) 564 | 565 | def randomize(self): 566 | """ 567 | Do nothing since no random variable exist in this loader module. 568 | """ 569 | 570 | @torch.no_grad() 571 | def forward( 572 | self, 573 | image_batch: torch.Tensor, 574 | ) -> torch.Tensor: 575 | """ 576 | See the description in `BaseLoaderModule.forward`. 577 | """ 578 | # Do augmentations 579 | for augmentation in self.augmentations: 580 | image_batch = augmentation(image_batch) 581 | 582 | # Do normalization 583 | image_batch = image_batch.permute(0, 3, 1, 2).contiguous() # To NCHW 584 | image_batch = image_batch.float().div(255.0) # To FP32 585 | image_batch = image_batch - torch.tensor( 586 | [0.485, 0.456, 0.406], 587 | device=image_batch.device, 588 | ).view(-1, 1, 1) 589 | image_batch = image_batch / torch.tensor( 590 | [0.229, 0.224, 0.225], 591 | device=image_batch.device, 592 | ).view(-1, 1, 1) 593 | return image_batch 594 | 595 | def record_snapshot(self) -> None: 596 | """ 597 | See the description in `BaseLoaderModule.record_snapshot`. 598 | """ 599 | raise NotImplementedError() 600 | 601 | def get_snapshot(self) -> np.ndarray: 602 | """ 603 | See the description in `BaseLoaderModule.get_snapshot`. 604 | """ 605 | raise NotImplementedError() 606 | -------------------------------------------------------------------------------- /hms2/core/model.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, Optional, Sequence, Tuple 3 | 4 | import numpy as np 5 | import torch 6 | from torch import nn 7 | 8 | from .loader_modules import BaseLoaderModule 9 | 10 | 11 | class Hms2Model(nn.Module): 12 | """ 13 | A torch module implementing HMS2. 14 | 15 | Args: 16 | loader_module (BaseLoaderModule): 17 | A module handling region reading, augmentation, and CPU-GPU transfer. 18 | Please refer to the description in BaseLoaderModule. 19 | conv_module (torch.nn.Module): 20 | A module implements the convolutional part of the model. It should be on 21 | CUDA. 22 | dense_module (torch.nn.Module): 23 | A module implements the dense part of the model. It should be on CUDA. 24 | local_pooling_module (torch.nn.Module): 25 | A module applies a pooling operation right after each tile of the embedding 26 | feature map is produced. The default value None disables local pooling. It 27 | should be on CUDA. 28 | tile_size (int): 29 | The tile size for HMS2. Decrease this value if GPU OOM happens. 30 | emb_crop_size (int): 31 | The cropping size for embedding. The default value 7 is for ResNet 50. 32 | emb_stride_size (int): 33 | The striding size of the receptive fields of two neighboring embedding 34 | vectors. The default value 32 is for ResNet 50. 35 | skip_no_grad (bool): 36 | Skip backward computations of a tile when the gradients w.r.t. the tile 37 | are zero. The default is True. 38 | cache_background_forward (bool): 39 | Cache forward results of background tiles to skip re-computations. The 40 | default is True. 41 | cache_background_backward (bool): 42 | Cache backward results of background tiles to skip re-computations. 43 | The default is True. 44 | """ 45 | 46 | def __init__( 47 | self, 48 | loader_module: nn.Module, 49 | conv_module: nn.Module, 50 | dense_module: nn.Module, 51 | local_pooling_module: Optional[nn.Module] = None, 52 | tile_size: int = 4096, 53 | emb_crop_size: int = 7, 54 | emb_stride_size: int = 32, 55 | skip_no_grad: bool = True, 56 | cache_background_forward: bool = True, 57 | cache_background_backward: bool = True, 58 | ): 59 | super().__init__() 60 | 61 | if not isinstance(loader_module, BaseLoaderModule): 62 | raise ValueError("loader_module should be an instance of BaseLoaderModule.") 63 | 64 | self.loader_module = loader_module 65 | self.conv_module = conv_module 66 | self.dense_module = dense_module 67 | self.local_pooling_module = local_pooling_module 68 | self.tile_size = tile_size 69 | self.emb_crop_size = emb_crop_size 70 | self.emb_stride_size = emb_stride_size 71 | self.skip_no_grad = skip_no_grad 72 | self.cache_background_forward = cache_background_forward 73 | self.cache_background_backward = cache_background_backward 74 | 75 | def forward(self, image_batch: torch.Tensor) -> torch.Tensor: 76 | """ 77 | Implement how tensors flow in a HMS2 model. 78 | 79 | Args: 80 | image_batch (torch.Tensor): An image batch in NHWC and uint8 dtype. 81 | 82 | Returns: 83 | output (torch.Tensor): The output of dense_module. 84 | """ 85 | self.loader_module.randomize() 86 | conv_output = _Hms2Convolutional.apply( 87 | image_batch, 88 | _Hms2ConvolutionalArguments( 89 | loader_module=self.loader_module, 90 | conv_module=self.conv_module, 91 | local_pooling_module=self.local_pooling_module, 92 | tile_size=self.tile_size, 93 | emb_crop_size=self.emb_crop_size, 94 | emb_stride_size=self.emb_stride_size, 95 | skip_no_grad=self.skip_no_grad, 96 | cache_background_forward=self.cache_background_forward, 97 | cache_background_backward=self.cache_background_backward, 98 | ), 99 | *self.conv_module.parameters(), 100 | ) 101 | output = self.dense_module(conv_output) 102 | 103 | return output 104 | 105 | 106 | @dataclass 107 | class _Hms2ConvolutionalArguments: 108 | """ 109 | Arguments for `_Hms2Convolutional`. 110 | 111 | Args: 112 | See descriptions in `Hms2Model`. 113 | """ 114 | 115 | loader_module: BaseLoaderModule 116 | conv_module: nn.Module 117 | local_pooling_module: Optional[nn.Module] 118 | tile_size: int 119 | emb_crop_size: int 120 | emb_stride_size: int 121 | skip_no_grad: bool 122 | cache_background_forward: bool 123 | cache_background_backward: bool 124 | 125 | 126 | class _Hms2Convolutional(torch.autograd.Function): 127 | """ 128 | The core part of HMS2 that implements tiling in the convolutional part and backward 129 | re-computations. Using torch.autograd.Function instead of torch.nn.Module is 130 | because only torch.autograd.Function can rewrite custom backward operations. 131 | """ 132 | 133 | @staticmethod 134 | def forward( 135 | ctx: Any, 136 | image_batch: torch.Tensor, 137 | arguments: _Hms2ConvolutionalArguments, 138 | *conv_parameters: torch.Tensor, 139 | ) -> torch.Tensor: 140 | """ 141 | HMS2 forward-convolutional. 142 | 143 | Args: 144 | ctx (Any): 145 | See PyTorch documentations. 146 | image_batch (torch.Tensor): An image batch in NHWC and uint8 dtype. 147 | arguments (_Hms2ConvolutionalArguments): 148 | See descriptions in `_Hms2ConvolutionalArguments`. 149 | conv_parameters (list of torch.Tensor): 150 | A list retrieved by calling `conv_module.parameters()`. 151 | 152 | Returns: 153 | emb (torch.Tensor): The resulting embedding feature map. 154 | """ 155 | # Save parameters 156 | ctx.image_batch = image_batch 157 | ctx.arguments = arguments 158 | ctx.conv_parameters = conv_parameters 159 | 160 | # Load arguments 161 | loader_module = arguments.loader_module 162 | 163 | # Create a background tile cache if required 164 | if arguments.cache_background_forward: 165 | background_tile_cache_forward = _BackgroundTileCache() 166 | 167 | # Calculate the tile number 168 | tile_dimensions = _Hms2Convolutional._compute_tile_dimensions( 169 | image_batch, 170 | arguments, 171 | ) 172 | 173 | # Hint loader module the future accesses. 174 | _Hms2Convolutional._hint_loader_module( 175 | image_batch, 176 | tile_dimensions, 177 | arguments, 178 | ) 179 | 180 | # Forward convolutional 181 | with torch.no_grad(): # Do no store any feature maps 182 | # Iterate tiles 183 | emb_tiles = [] 184 | for tile_y in range(tile_dimensions[1]): 185 | emb_tiles_row = [] 186 | for tile_x in range(tile_dimensions[0]): 187 | # Load image tile 188 | ( 189 | tile_coord, 190 | tile_size, 191 | ) = _Hms2Convolutional._compute_image_tile_coord( 192 | image_batch, 193 | arguments, 194 | (tile_x, tile_y), 195 | ) 196 | image_tile_batch = loader_module( 197 | image_batch, 198 | tile_coord, 199 | tile_size, 200 | ) 201 | 202 | # Do forward 203 | emb_tile = _Hms2Convolutional._forward_tile( 204 | image_tile_batch, 205 | arguments, 206 | (tile_x, tile_y), 207 | tile_dimensions, 208 | background_tile_cache_forward=( 209 | background_tile_cache_forward 210 | if arguments.cache_background_forward 211 | else None 212 | ), 213 | ) 214 | emb_tiles_row.append(emb_tile) 215 | emb_tiles.append(emb_tiles_row) 216 | 217 | # Compute the look-up table for the coordinates of embedding tiles 218 | emb_tile_coord_lut = _Hms2Convolutional._compute_emb_tile_coord_lut( 219 | emb_tiles 220 | ) 221 | 222 | # Concatenate tiles to get the embedding feature map 223 | emb_rows = [torch.cat(emb_tiles_row, dim=3) for emb_tiles_row in emb_tiles] 224 | emb = torch.cat(emb_rows, dim=2) 225 | 226 | # Save the look-up table 227 | ctx.emb_tile_coord_lut = emb_tile_coord_lut 228 | 229 | return emb 230 | 231 | @staticmethod 232 | def backward( 233 | ctx: Any, 234 | grad_emb: torch.Tensor, 235 | ) -> Sequence[Optional[torch.Tensor]]: 236 | """ 237 | HMS2 backward-convolutional. 238 | 239 | Args: 240 | ctx (Any): 241 | See PyTorch documentations. 242 | grad_emb (torch.Tensor): The gradients w.r.t. the embedding feature map. 243 | 244 | Returns: 245 | grad_image_batch (NoneType): Remain None. 246 | grad_arguments (NoneType): Remain None. 247 | grad_conv_parameters (tuple): 248 | A tuple of the gradients w.r.t. parameters in the convolutional module. 249 | """ 250 | # Load saved parameters 251 | image_batch = ctx.image_batch 252 | arguments = ctx.arguments 253 | conv_parameters = ctx.conv_parameters 254 | emb_tile_coord_lut = ctx.emb_tile_coord_lut 255 | 256 | # Load arguments 257 | loader_module = arguments.loader_module 258 | cache_background_backward = arguments.cache_background_backward 259 | 260 | # Create a background tile cache if required 261 | if arguments.cache_background_backward: 262 | background_tile_cache_backward = _BackgroundTileCache() 263 | 264 | # Calculate the tile number 265 | tile_dimensions = _Hms2Convolutional._compute_tile_dimensions( 266 | image_batch, 267 | arguments, 268 | ) 269 | 270 | # Hint loader module the future accesses. 271 | _Hms2Convolutional._hint_loader_module( 272 | image_batch, 273 | tile_dimensions, 274 | arguments, 275 | ) 276 | 277 | # Iterate tiles 278 | grad_conv_parameters = [ 279 | torch.zeros_like(parameter, device=parameter.device) 280 | for parameter in ctx.conv_parameters 281 | ] 282 | for tile_y in range(tile_dimensions[1]): 283 | for tile_x in range(tile_dimensions[0]): 284 | with torch.enable_grad(): 285 | # Get the gradients w.r.t. the embedding tile 286 | ( 287 | grad_emb_tile_coord, 288 | grad_emb_tile_size, 289 | ) = _Hms2Convolutional._use_emb_tile_coord_lut( 290 | emb_tile_coord_lut, 291 | (tile_x, tile_y), 292 | ) 293 | grad_emb_tile = grad_emb[ 294 | :, 295 | :, 296 | grad_emb_tile_coord[1] : grad_emb_tile_coord[1] 297 | + grad_emb_tile_size[1], 298 | grad_emb_tile_coord[0] : grad_emb_tile_coord[0] 299 | + grad_emb_tile_size[0], 300 | ] 301 | 302 | # Skip this tile if all the gradients are 0 303 | if ( 304 | arguments.skip_no_grad 305 | and torch.count_nonzero(grad_emb_tile).item() == 0 306 | ): 307 | _Hms2Convolutional._prefetch_next(arguments) 308 | continue 309 | 310 | # Load image tile 311 | ( 312 | tile_coord, 313 | tile_size, 314 | ) = _Hms2Convolutional._compute_image_tile_coord( 315 | image_batch, 316 | arguments, 317 | (tile_x, tile_y), 318 | ) 319 | image_tile_batch = loader_module( 320 | image_batch, 321 | tile_coord, 322 | tile_size, 323 | ) 324 | 325 | # If caching is enabled and the tile is not on the 326 | # edge, look up background_tile_cache_backward to get gradients. 327 | # If not found, return None. 328 | partial_grad_conv_parameters = None 329 | if ( 330 | cache_background_backward 331 | and tile_y not in [0, tile_dimensions[1] - 1] 332 | and tile_x not in [0, tile_dimensions[0] - 1] 333 | ): 334 | partial_grad_conv_parameters = background_tile_cache_backward[ 335 | image_tile_batch 336 | ] 337 | 338 | if partial_grad_conv_parameters is None: 339 | # Re-compute forward convolutional. Background tile cache 340 | # should be always disabled because we need gradients. 341 | emb_tile = _Hms2Convolutional._forward_tile( 342 | image_tile_batch, 343 | arguments, 344 | (tile_x, tile_y), 345 | tile_dimensions, 346 | background_tile_cache_forward=None, 347 | ) 348 | 349 | # Compute the partial gradients w.r.t. the parameters in the 350 | # convolutional module 351 | partial_grad_conv_parameters = torch.autograd.grad( 352 | [emb_tile], 353 | conv_parameters, 354 | [grad_emb_tile], 355 | ) 356 | 357 | # Update the cache 358 | if ( 359 | cache_background_backward 360 | and tile_y not in [0, tile_dimensions[1] - 1] 361 | and tile_x not in [0, tile_dimensions[0] - 1] 362 | ): 363 | background_tile_cache_backward[ 364 | image_tile_batch 365 | ] = partial_grad_conv_parameters 366 | 367 | with torch.no_grad(): 368 | # Accumulate partial gradients 369 | for idx, partial_grad_conv_parameter in enumerate( 370 | partial_grad_conv_parameters 371 | ): 372 | grad_conv_parameters[idx] += partial_grad_conv_parameter 373 | 374 | return (None, None) + tuple(grad_conv_parameters) 375 | 376 | @staticmethod 377 | def _forward_tile( 378 | image_tile_batch: torch.Tensor, 379 | arguments: _Hms2ConvolutionalArguments, 380 | tile_indices: Tuple[int, int], 381 | tile_dimensions: Tuple[int, int], 382 | background_tile_cache_forward: Optional["_BackgroundTileCache"] = None, 383 | ) -> torch.Tensor: 384 | # Get arguments 385 | conv_module = arguments.conv_module 386 | local_pooling_module = arguments.local_pooling_module 387 | emb_crop_size = arguments.emb_crop_size 388 | 389 | # Look up background_tile_cache_forward to get emb_tile. If not found, return 390 | # None. 391 | emb_tile = None 392 | if background_tile_cache_forward is not None: 393 | emb_tile = background_tile_cache_forward[image_tile_batch] 394 | 395 | # Do convolutions when cache miss 396 | if emb_tile is None: 397 | emb_tile = conv_module(image_tile_batch) 398 | if background_tile_cache_forward is not None: 399 | background_tile_cache_forward[image_tile_batch] = emb_tile 400 | 401 | # Crop invalid borders 402 | tile_x, tile_y = tile_indices 403 | _, _, emb_tile_height, emb_tile_width = emb_tile.shape 404 | left = emb_crop_size if tile_x != 0 else 0 405 | right = -emb_crop_size if tile_x != tile_dimensions[0] - 1 else emb_tile_width 406 | top = emb_crop_size if tile_y != 0 else 0 407 | bottom = -emb_crop_size if tile_y != tile_dimensions[1] - 1 else emb_tile_height 408 | emb_tile = emb_tile[ 409 | :, 410 | :, 411 | top:bottom, 412 | left:right, 413 | ] 414 | 415 | # Local pooling 416 | if local_pooling_module is not None: 417 | emb_tile = local_pooling_module(emb_tile) 418 | 419 | return emb_tile 420 | 421 | @staticmethod 422 | def _compute_tile_dimensions( 423 | image_batch: torch.Tensor, 424 | arguments: _Hms2ConvolutionalArguments, 425 | ) -> Tuple[int, int]: 426 | # Get arguments 427 | tile_size = arguments.tile_size 428 | emb_crop_size = arguments.emb_crop_size 429 | emb_stride_size = arguments.emb_stride_size 430 | 431 | # Compute tile dimensions 432 | _, height, width, _ = image_batch.shape 433 | overlapping_size = emb_crop_size * emb_stride_size * 2 434 | tile_width = ( 435 | max(0, int(np.ceil((width - tile_size) / (tile_size - overlapping_size)))) 436 | + 1 437 | ) 438 | tile_height = ( 439 | max(0, int(np.ceil((height - tile_size) / (tile_size - overlapping_size)))) 440 | + 1 441 | ) 442 | 443 | return (tile_width, tile_height) 444 | 445 | @staticmethod 446 | def _hint_loader_module( 447 | image_batch: torch.Tensor, 448 | tile_dimensions: Tuple[int, int], 449 | arguments: _Hms2ConvolutionalArguments, 450 | ) -> None: 451 | # Get arguments 452 | loader_module = arguments.loader_module 453 | 454 | # Calculate tile coordinates and sizes that will be accessed, and hint the 455 | # loader. 456 | tile_coords = [] 457 | tile_sizes = [] 458 | for tile_y in range(tile_dimensions[1]): 459 | for tile_x in range(tile_dimensions[0]): 460 | tile_coord, tile_size = _Hms2Convolutional._compute_image_tile_coord( 461 | image_batch, 462 | arguments, 463 | (tile_x, tile_y), 464 | ) 465 | tile_coords.append(tile_coord) 466 | tile_sizes.append(tile_size) 467 | 468 | loader_module.hint_future_accesses(image_batch, tile_coords, tile_sizes) 469 | 470 | @staticmethod 471 | def _prefetch_next(arguments: _Hms2ConvolutionalArguments) -> None: 472 | loader_module = arguments.loader_module 473 | loader_module.prefetch_next() 474 | 475 | @staticmethod 476 | def _compute_image_tile_coord( 477 | image_batch: torch.Tensor, 478 | arguments: _Hms2ConvolutionalArguments, 479 | tile_indices: Tuple[int, int], 480 | ) -> Tuple[Tuple[int, int], Tuple[int, int]]: 481 | # Get arguments 482 | tile_size = arguments.tile_size 483 | emb_crop_size = arguments.emb_crop_size 484 | emb_stride_size = arguments.emb_stride_size 485 | 486 | # Compute coord and size 487 | _, height, width, _ = image_batch.shape 488 | overlapping_size = emb_crop_size * emb_stride_size * 2 489 | tile_x, tile_y = tile_indices 490 | coord_x = tile_x * (tile_size - overlapping_size) 491 | coord_y = tile_y * (tile_size - overlapping_size) 492 | size_x = min(tile_size, width - coord_x) 493 | size_y = min(tile_size, height - coord_y) 494 | 495 | return (coord_x, coord_y), (size_x, size_y) 496 | 497 | @staticmethod 498 | def _compute_emb_tile_coord_lut( 499 | emb_tiles: Sequence[Sequence[torch.Tensor]], 500 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: 501 | widths = np.array([emb_tile.shape[3] for emb_tile in emb_tiles[0]]) 502 | cum_widths = np.cumsum(widths) 503 | 504 | heights = np.array([row_emb_tiles[0].shape[2] for row_emb_tiles in emb_tiles]) 505 | cum_heights = np.cumsum(heights) 506 | 507 | return widths, cum_widths, heights, cum_heights 508 | 509 | @staticmethod 510 | def _use_emb_tile_coord_lut( 511 | lut: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], 512 | tile_indices: Tuple[int, int], 513 | ) -> Tuple[Tuple[int, int], Tuple[int, int]]: 514 | widths, cum_widths, heights, cum_heights = lut 515 | tile_x, tile_y = tile_indices 516 | 517 | coord_x = cum_widths[tile_x] - widths[tile_x] 518 | coord_y = cum_heights[tile_y] - heights[tile_y] 519 | size_x = widths[tile_x] 520 | size_y = heights[tile_y] 521 | 522 | return (coord_x, coord_y), (size_x, size_y) 523 | 524 | 525 | class _BackgroundTileCache: 526 | def __init__(self): 527 | self.cache = [] 528 | 529 | def __getitem__( 530 | self, 531 | tile: torch.Tensor, 532 | ) -> Any: 533 | # Get basic info of the tile. 534 | shape = tuple(tile.shape) 535 | pixel_values = tuple(tile[0, :, 0, 0].cpu().numpy()) 536 | 537 | # Look for the cache entry. 538 | result = None 539 | for item in self.cache: 540 | if item["shape"] == shape and item["pixel_values"] == pixel_values: 541 | result = item["result"] 542 | 543 | # If cache miss, just return. 544 | if result is None: 545 | return None 546 | 547 | # Check if the tile is background tile. If not, return. 548 | if ( 549 | torch.count_nonzero( 550 | tile - tile[0, :, 0, 0][np.newaxis, :, np.newaxis, np.newaxis] 551 | ) 552 | != 0 553 | ): 554 | return None 555 | 556 | return result 557 | 558 | def __setitem__( 559 | self, 560 | tile: torch.Tensor, 561 | result: Any, 562 | ) -> None: 563 | # Get basic info of the tile. 564 | shape = tuple(tile.shape) 565 | pixel_values = tuple(tile[0, :, 0, 0].cpu().numpy()) 566 | 567 | # Check if the tile is background tile. If not, return. 568 | if ( 569 | torch.count_nonzero( 570 | tile - tile[0, :, 0, 0][np.newaxis, :, np.newaxis, np.newaxis] 571 | ) 572 | != 0 573 | ): 574 | return 575 | 576 | # Raise error if there is the same entry. 577 | for item in self.cache: 578 | if item["shape"] == shape and item["pixel_values"] == pixel_values: 579 | raise ValueError( 580 | "The _BackgroundTileCache already stores the same entry." 581 | ) 582 | 583 | # Append the cache entry 584 | self.cache.append( 585 | { 586 | "pixel_values": pixel_values, 587 | "shape": shape, 588 | "result": result, 589 | } 590 | ) 591 | -------------------------------------------------------------------------------- /hms2/pipeline/__init__.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/__init__.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/__init__.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/__init__.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/__init__.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/__init__.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/callbacks.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/callbacks.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/callbacks.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/callbacks.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/callbacks.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/callbacks.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/dataset.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/dataset.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/dataset.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/dataset.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/dataset.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/dataset.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/losses.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/losses.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/losses.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/losses.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/losses.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/losses.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/main.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/main.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/main.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/main.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/main.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/main.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/metrics.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/metrics.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/metrics.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/metrics.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/metrics.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/metrics.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/official_openslide.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/official_openslide.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/official_openslide.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/official_openslide.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/official_openslide.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/official_openslide.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/test.py: -------------------------------------------------------------------------------- 1 | from .main import test_main 2 | 3 | if __name__ == "__main__": 4 | test_main() 5 | -------------------------------------------------------------------------------- /hms2/pipeline/train.py: -------------------------------------------------------------------------------- 1 | from .main import train_main 2 | 3 | if __name__ == "__main__": 4 | train_main() 5 | -------------------------------------------------------------------------------- /hms2/pipeline/utils.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/utils.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/utils.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/utils.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/utils.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/hms2/pipeline/utils.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /hms2/pipeline/visualize.py: -------------------------------------------------------------------------------- 1 | from .main import visualize_main 2 | 3 | if __name__ == "__main__": 4 | visualize_main() 5 | -------------------------------------------------------------------------------- /misc/camelyon_10x_hms2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/misc/camelyon_10x_hms2.png -------------------------------------------------------------------------------- /misc/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aetherAI/hms2/86ce2490263e43baf78261ab46c2828ffc150bf6/misc/demo.gif -------------------------------------------------------------------------------- /projects/Camelyon16/configs/config_10x.yaml: -------------------------------------------------------------------------------- 1 | RESULT_DIR: "results/result_Camelyon16_10x" 2 | MODEL_PATH: "${RESULT_DIR}/model.pt" 3 | OPTIMIZER_STATE_PATH: "${RESULT_DIR}/opt_state.pt" 4 | STATES_PATH: "${RESULT_DIR}/states.pt" 5 | LOAD_MODEL_BEFORE_TRAIN: False 6 | CONFIG_RECORD_PATH: "${RESULT_DIR}/config.yaml" 7 | 8 | USE_MIXED_PRECISION: False 9 | USE_HMS2: True 10 | 11 | TRAIN_CSV_PATH: "projects/Camelyon16/datalists/train.csv" 12 | VAL_CSV_PATH: "projects/Camelyon16/datalists/val.csv" 13 | TEST_CSV_PATH: "projects/Camelyon16/datalists/test.csv" 14 | CONTOUR_DIR: "projects/Camelyon16/contours" 15 | SLIDE_DIR: "projects/Camelyon16/slides" 16 | SLIDE_FILE_EXTENSION: ".tif" 17 | SLIDE_READER: "openslide" 18 | RESIZE_RATIO: 0.25 19 | INPUT_SIZE: [80000, 80000, 3] 20 | GPU_AUGMENTS: ["flip", "rigid", "hed_perturb"] 21 | AUGMENTS: [] 22 | 23 | MODEL: "resnet50_frozenbn" 24 | POOL_USE: "gmp_scaled_2k" 25 | NUM_CLASSES: 2 26 | BATCH_SIZE: 1 27 | EPOCHS: 200 28 | LOSS: "ce" 29 | METRIC_LIST: ["accuracy"] 30 | OPTIMIZER: "adamw" 31 | INIT_LEARNING_RATE: 0.00001 32 | REDUCE_LR_FACTOR: 0.1 33 | REDUCE_LR_PATIENCE: 8 34 | TIME_RECORD_PATH: "${RESULT_DIR}/time_record.csv" 35 | TEST_TIME_RECORD_PATH: "${RESULT_DIR}/test_time_record.csv" 36 | 37 | TEST_RESULT_PATH: "${RESULT_DIR}/test_result.json" 38 | ENABLE_VIZ: True 39 | VIZ_RESIZE_RATIO: 0.01 40 | VIZ_FOLDER: "${RESULT_DIR}/viz" 41 | VIZ_RAW_FOLDER: "${RESULT_DIR}/viz_raw" 42 | 43 | DEBUG_PATH: NULL 44 | -------------------------------------------------------------------------------- /projects/Camelyon16/configs/config_2.5x.yaml: -------------------------------------------------------------------------------- 1 | RESULT_DIR: "results/result_Camelyon16_2.5x" 2 | MODEL_PATH: "${RESULT_DIR}/model.pt" 3 | OPTIMIZER_STATE_PATH: "${RESULT_DIR}/opt_state.pt" 4 | STATES_PATH: "${RESULT_DIR}/states.pt" 5 | LOAD_MODEL_BEFORE_TRAIN: False 6 | CONFIG_RECORD_PATH: "${RESULT_DIR}/config.yaml" 7 | 8 | USE_MIXED_PRECISION: False 9 | USE_HMS2: True 10 | 11 | TRAIN_CSV_PATH: "projects/Camelyon16/datalists/train.csv" 12 | VAL_CSV_PATH: "projects/Camelyon16/datalists/val.csv" 13 | TEST_CSV_PATH: "projects/Camelyon16/datalists/test.csv" 14 | CONTOUR_DIR: "projects/Camelyon16/contours" 15 | SLIDE_DIR: "projects/Camelyon16/slides" 16 | SLIDE_FILE_EXTENSION: ".tif" 17 | SLIDE_READER: "openslide" 18 | RESIZE_RATIO: 0.0625 19 | INPUT_SIZE: [20000, 20000, 3] 20 | GPU_AUGMENTS: ["flip", "rigid", "hed_perturb"] 21 | AUGMENTS: [] 22 | 23 | MODEL: "resnet50_frozenbn" 24 | POOL_USE: "gmp_scaled_2k" 25 | NUM_CLASSES: 2 26 | BATCH_SIZE: 1 27 | EPOCHS: 200 28 | LOSS: "ce" 29 | METRIC_LIST: ["accuracy"] 30 | OPTIMIZER: "adamw" 31 | INIT_LEARNING_RATE: 0.00001 32 | REDUCE_LR_FACTOR: 0.1 33 | REDUCE_LR_PATIENCE: 8 34 | TIME_RECORD_PATH: "${RESULT_DIR}/time_record.csv" 35 | TEST_TIME_RECORD_PATH: "${RESULT_DIR}/test_time_record.csv" 36 | 37 | TEST_RESULT_PATH: "${RESULT_DIR}/test_result.json" 38 | ENABLE_VIZ: True 39 | VIZ_RESIZE_RATIO: 0.01 40 | VIZ_FOLDER: "${RESULT_DIR}/viz" 41 | VIZ_RAW_FOLDER: "${RESULT_DIR}/viz_raw" 42 | 43 | DEBUG_PATH: NULL 44 | -------------------------------------------------------------------------------- /projects/Camelyon16/configs/config_5x.yaml: -------------------------------------------------------------------------------- 1 | RESULT_DIR: "results/result_Camelyon16_5x" 2 | MODEL_PATH: "${RESULT_DIR}/model.pt" 3 | OPTIMIZER_STATE_PATH: "${RESULT_DIR}/opt_state.pt" 4 | STATES_PATH: "${RESULT_DIR}/states.pt" 5 | LOAD_MODEL_BEFORE_TRAIN: False 6 | CONFIG_RECORD_PATH: "${RESULT_DIR}/config.yaml" 7 | 8 | USE_MIXED_PRECISION: False 9 | USE_HMS2: True 10 | 11 | TRAIN_CSV_PATH: "projects/Camelyon16/datalists/train.csv" 12 | VAL_CSV_PATH: "projects/Camelyon16/datalists/val.csv" 13 | TEST_CSV_PATH: "projects/Camelyon16/datalists/test.csv" 14 | CONTOUR_DIR: "projects/Camelyon16/contours" 15 | SLIDE_DIR: "projects/Camelyon16/slides" 16 | SLIDE_FILE_EXTENSION: ".tif" 17 | SLIDE_READER: "openslide" 18 | RESIZE_RATIO: 0.125 19 | INPUT_SIZE: [40000, 40000, 3] 20 | GPU_AUGMENTS: ["flip", "rigid", "hed_perturb"] 21 | AUGMENTS: [] 22 | 23 | MODEL: "resnet50_frozenbn" 24 | POOL_USE: "gmp_scaled_2k" 25 | NUM_CLASSES: 2 26 | BATCH_SIZE: 1 27 | EPOCHS: 200 28 | LOSS: "ce" 29 | METRIC_LIST: ["accuracy"] 30 | OPTIMIZER: "adamw" 31 | INIT_LEARNING_RATE: 0.00001 32 | REDUCE_LR_FACTOR: 0.1 33 | REDUCE_LR_PATIENCE: 8 34 | TIME_RECORD_PATH: "${RESULT_DIR}/time_record.csv" 35 | TEST_TIME_RECORD_PATH: "${RESULT_DIR}/test_time_record.csv" 36 | 37 | TEST_RESULT_PATH: "${RESULT_DIR}/test_result.json" 38 | ENABLE_VIZ: True 39 | VIZ_RESIZE_RATIO: 0.01 40 | VIZ_FOLDER: "${RESULT_DIR}/viz" 41 | VIZ_RAW_FOLDER: "${RESULT_DIR}/viz_raw" 42 | 43 | DEBUG_PATH: NULL 44 | -------------------------------------------------------------------------------- /projects/Camelyon16/datalists/test.csv: -------------------------------------------------------------------------------- 1 | test_001,1 2 | test_002,1 3 | test_003,0 4 | test_004,1 5 | test_005,0 6 | test_006,0 7 | test_007,0 8 | test_008,1 9 | test_009,0 10 | test_010,1 11 | test_011,1 12 | test_012,0 13 | test_013,1 14 | test_014,0 15 | test_015,0 16 | test_016,1 17 | test_017,0 18 | test_018,0 19 | test_019,0 20 | test_020,0 21 | test_021,1 22 | test_022,0 23 | test_023,0 24 | test_024,0 25 | test_025,0 26 | test_026,1 27 | test_027,1 28 | test_028,0 29 | test_029,1 30 | test_030,1 31 | test_031,0 32 | test_032,0 33 | test_033,1 34 | test_034,0 35 | test_035,0 36 | test_036,0 37 | test_037,0 38 | test_038,1 39 | test_039,0 40 | test_040,1 41 | test_041,0 42 | test_042,0 43 | test_043,0 44 | test_044,0 45 | test_045,0 46 | test_046,1 47 | test_047,0 48 | test_048,1 49 | test_050,0 50 | test_051,1 51 | test_052,1 52 | test_053,0 53 | test_054,0 54 | test_055,0 55 | test_056,0 56 | test_057,0 57 | test_058,0 58 | test_059,0 59 | test_060,0 60 | test_061,1 61 | test_062,0 62 | test_063,0 63 | test_064,1 64 | test_065,1 65 | test_066,1 66 | test_067,0 67 | test_068,1 68 | test_069,1 69 | test_070,0 70 | test_071,1 71 | test_072,0 72 | test_073,1 73 | test_074,1 74 | test_075,1 75 | test_076,0 76 | test_077,0 77 | test_078,0 78 | test_079,1 79 | test_080,0 80 | test_081,0 81 | test_082,1 82 | test_083,0 83 | test_084,1 84 | test_085,0 85 | test_086,0 86 | test_087,0 87 | test_088,0 88 | test_089,0 89 | test_090,1 90 | test_091,0 91 | test_092,1 92 | test_093,0 93 | test_094,1 94 | test_095,0 95 | test_096,0 96 | test_097,1 97 | test_098,0 98 | test_099,1 99 | test_100,0 100 | test_101,0 101 | test_102,1 102 | test_103,0 103 | test_104,1 104 | test_105,1 105 | test_106,0 106 | test_107,0 107 | test_108,1 108 | test_109,0 109 | test_110,1 110 | test_111,0 111 | test_112,0 112 | test_113,1 113 | test_114,1 114 | test_115,0 115 | test_116,1 116 | test_117,1 117 | test_118,0 118 | test_119,0 119 | test_120,0 120 | test_121,1 121 | test_122,1 122 | test_123,0 123 | test_124,0 124 | test_125,0 125 | test_126,0 126 | test_127,0 127 | test_128,0 128 | test_129,0 129 | test_130,0 130 | -------------------------------------------------------------------------------- /projects/Camelyon16/datalists/train.csv: -------------------------------------------------------------------------------- 1 | normal_058,0 2 | normal_133,0 3 | tumor_020,1 4 | normal_016,0 5 | tumor_013,1 6 | tumor_002,1 7 | normal_136,0 8 | tumor_061,1 9 | normal_068,0 10 | normal_152,0 11 | normal_070,0 12 | normal_095,0 13 | normal_151,0 14 | normal_064,0 15 | tumor_111,1 16 | tumor_012,1 17 | tumor_078,1 18 | tumor_041,1 19 | normal_153,0 20 | normal_072,0 21 | tumor_037,1 22 | normal_114,0 23 | normal_073,0 24 | normal_071,0 25 | tumor_053,1 26 | tumor_051,1 27 | normal_126,0 28 | normal_018,0 29 | normal_012,0 30 | normal_132,0 31 | tumor_010,1 32 | normal_004,0 33 | normal_156,0 34 | normal_005,0 35 | normal_091,0 36 | normal_007,0 37 | tumor_029,1 38 | normal_059,0 39 | normal_148,0 40 | tumor_009,1 41 | normal_043,0 42 | normal_121,0 43 | normal_116,0 44 | tumor_073,1 45 | normal_036,0 46 | normal_067,0 47 | normal_027,0 48 | tumor_035,1 49 | tumor_058,1 50 | normal_124,0 51 | tumor_060,1 52 | normal_061,0 53 | tumor_082,1 54 | normal_149,0 55 | normal_077,0 56 | normal_020,0 57 | normal_117,0 58 | normal_063,0 59 | tumor_069,1 60 | tumor_026,1 61 | tumor_014,1 62 | normal_008,0 63 | normal_146,0 64 | tumor_023,1 65 | normal_075,0 66 | normal_131,0 67 | normal_128,0 68 | normal_102,0 69 | tumor_045,1 70 | tumor_080,1 71 | normal_087,0 72 | normal_118,0 73 | tumor_030,1 74 | normal_001,0 75 | tumor_001,1 76 | normal_026,0 77 | tumor_076,1 78 | normal_083,0 79 | tumor_021,1 80 | normal_103,0 81 | tumor_031,1 82 | normal_078,0 83 | normal_049,0 84 | tumor_034,1 85 | tumor_110,1 86 | normal_079,0 87 | tumor_027,1 88 | tumor_066,1 89 | tumor_107,1 90 | normal_142,0 91 | tumor_091,1 92 | normal_017,0 93 | tumor_070,1 94 | normal_062,0 95 | tumor_100,1 96 | normal_089,0 97 | normal_107,0 98 | normal_065,0 99 | normal_009,0 100 | tumor_077,1 101 | tumor_008,1 102 | normal_159,0 103 | tumor_071,1 104 | tumor_102,1 105 | normal_057,0 106 | normal_158,0 107 | normal_050,0 108 | tumor_006,1 109 | tumor_097,1 110 | normal_139,0 111 | tumor_056,1 112 | tumor_089,1 113 | tumor_028,1 114 | normal_028,0 115 | normal_041,0 116 | tumor_074,1 117 | normal_090,0 118 | normal_113,0 119 | tumor_098,1 120 | normal_127,0 121 | normal_150,0 122 | normal_125,0 123 | tumor_042,1 124 | normal_084,0 125 | normal_111,0 126 | normal_040,0 127 | normal_030,0 128 | tumor_019,1 129 | tumor_064,1 130 | tumor_093,1 131 | tumor_094,1 132 | normal_047,0 133 | normal_024,0 134 | normal_096,0 135 | normal_108,0 136 | normal_100,0 137 | normal_140,0 138 | normal_143,0 139 | tumor_099,1 140 | normal_098,0 141 | tumor_054,1 142 | tumor_081,1 143 | normal_013,0 144 | normal_003,0 145 | normal_074,0 146 | tumor_046,1 147 | normal_019,0 148 | normal_048,0 149 | tumor_065,1 150 | tumor_004,1 151 | tumor_018,1 152 | normal_060,0 153 | tumor_106,1 154 | normal_082,0 155 | normal_145,0 156 | normal_054,0 157 | normal_038,0 158 | tumor_007,1 159 | tumor_050,1 160 | tumor_104,1 161 | normal_076,0 162 | tumor_032,1 163 | normal_031,0 164 | normal_025,0 165 | normal_032,0 166 | normal_106,0 167 | tumor_083,1 168 | tumor_087,1 169 | normal_155,0 170 | tumor_055,1 171 | tumor_022,1 172 | tumor_039,1 173 | normal_092,0 174 | normal_033,0 175 | tumor_024,1 176 | normal_141,0 177 | normal_066,0 178 | tumor_049,1 179 | tumor_088,1 180 | tumor_005,1 181 | normal_015,0 182 | normal_052,0 183 | tumor_095,1 184 | normal_135,0 185 | normal_035,0 186 | normal_085,0 187 | normal_099,0 188 | tumor_068,1 189 | normal_160,0 190 | normal_080,0 191 | normal_037,0 192 | tumor_075,1 193 | tumor_003,1 194 | tumor_067,1 195 | tumor_063,1 196 | normal_109,0 197 | tumor_017,1 198 | normal_123,0 199 | normal_044,0 200 | normal_006,0 201 | normal_029,0 202 | normal_010,0 203 | tumor_047,1 204 | tumor_085,1 205 | tumor_103,1 206 | tumor_084,1 207 | normal_154,0 208 | normal_115,0 209 | tumor_096,1 210 | tumor_033,1 211 | normal_042,0 212 | tumor_079,1 213 | normal_056,0 214 | normal_034,0 215 | tumor_108,1 216 | normal_014,0 217 | tumor_090,1 218 | normal_157,0 219 | normal_051,0 220 | normal_134,0 221 | -------------------------------------------------------------------------------- /projects/Camelyon16/datalists/val.csv: -------------------------------------------------------------------------------- 1 | tumor_086,1 2 | normal_021,0 3 | normal_138,0 4 | normal_094,0 5 | normal_053,0 6 | normal_110,0 7 | normal_002,0 8 | normal_144,0 9 | tumor_105,1 10 | normal_011,0 11 | normal_105,0 12 | normal_081,0 13 | normal_122,0 14 | normal_023,0 15 | tumor_040,1 16 | tumor_015,1 17 | normal_137,0 18 | tumor_025,1 19 | normal_022,0 20 | normal_088,0 21 | normal_147,0 22 | tumor_059,1 23 | tumor_101,1 24 | tumor_011,1 25 | normal_120,0 26 | normal_039,0 27 | normal_055,0 28 | tumor_016,1 29 | tumor_062,1 30 | normal_101,0 31 | tumor_052,1 32 | normal_097,0 33 | tumor_043,1 34 | tumor_072,1 35 | tumor_044,1 36 | normal_112,0 37 | tumor_057,1 38 | tumor_109,1 39 | tumor_092,1 40 | normal_046,0 41 | tumor_038,1 42 | normal_130,0 43 | normal_045,0 44 | normal_069,0 45 | normal_129,0 46 | normal_119,0 47 | normal_093,0 48 | normal_104,0 49 | tumor_036,1 50 | tumor_048,1 51 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ "poetry-core>=1.0.0",] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "hms2" 7 | version = "1.3.1r" 8 | description = "Another annotation-free whole-slide training approach to pathological classification." 9 | license = "CC BY-NC-SA 4.0" 10 | authors = [ "Chi-Chung Chen ",] 11 | 12 | [tool.poetry.dependencies] 13 | python = ">=3.7, <3.11" 14 | numpy = "^1.21.6" 15 | opencv-python = "^4.5.5.64" 16 | scipy = "^1.7" 17 | Pillow = "^9.1.0" 18 | scikit-image = "^0.19.2" 19 | requests = "^2.27.1" 20 | scikit-learn = "^1.0.2" 21 | tqdm = "^4.64.0" 22 | mpi4py = "^3.1.3" 23 | PyYAML = "^6.0" 24 | pandas = ">=1.3.5, <1.4" 25 | lifelines = "^0.27.0" 26 | pycryptodome = "^3.14.1" 27 | scikit-build = "^0.14.1" 28 | openslide-python = "^1.1.2" 29 | 30 | [tool.poetry.dev-dependencies] 31 | black = "^22.3.0" 32 | flake8 = "^4.0.1" 33 | pytest = "^7.1.1" 34 | isort = "^5.10.1" 35 | mypy = "^0.942" 36 | poethepoet = "^0.13.1" 37 | types-PyYAML = "^6.0.5" 38 | onnxruntime-gpu = "^1.10.0" 39 | cython = "^0.29.28" 40 | toml = "^0.10.2" 41 | 42 | [tool.poe.tasks] 43 | install-torch-cu113 = "python -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113" 44 | install-torch-cu102 = "python -m pip install torch torchvision" 45 | install-cu113 = [ "install-torch-cu113", "install-horovod",] 46 | install-cu102 = [ "install-torch-cu102", "install-horovod",] 47 | 48 | [tool.poe.tasks.install-horovod] 49 | cmd = "python -m pip install horovod --no-cache-dir" 50 | 51 | [tool.poe.tasks.install-horovod.env] 52 | HOROVOD_WITH_PYTORCH = "1" 53 | -------------------------------------------------------------------------------- /tests/core/test_builder.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import pytest 5 | import torch 6 | 7 | from hms2.core.builder import Hms2ModelBuilder 8 | 9 | 10 | @pytest.fixture(autouse=True, scope="session") 11 | def set_up(): 12 | os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" 13 | torch.backends.cudnn.deterministic = True 14 | 15 | 16 | @pytest.fixture(scope="session") 17 | def n_classes(): 18 | return 10 19 | 20 | 21 | @pytest.fixture(scope="session", params=["resnet50_frozenbn"]) 22 | def backbone(request): 23 | return request.param 24 | 25 | 26 | @pytest.fixture(scope="session", params=["gmp", "gap"]) 27 | def pooling(request): 28 | return request.param 29 | 30 | 31 | @pytest.fixture(scope="session", params=[False, True]) 32 | def use_hms2(request): 33 | return request.param 34 | 35 | 36 | @pytest.fixture(scope="session", params=[None, ["flip", "rigid", "hed_perturb"]]) 37 | def augmentation_list(request): 38 | return request.param 39 | 40 | 41 | def test_hms2_model_builder_with_dry_run( 42 | n_classes, 43 | backbone, 44 | pooling, 45 | use_hms2, 46 | augmentation_list, 47 | ): 48 | # Skip the situation that use_hms2 == False and augmentation_list is not None 49 | if not use_hms2 and augmentation_list is not None: 50 | return 51 | 52 | # Set larger image size for HMS2 53 | if use_hms2: 54 | image_size = (5000, 5000) 55 | else: 56 | image_size = (2000, 2000) 57 | 58 | # Build a model 59 | model = Hms2ModelBuilder().build( 60 | n_classes=n_classes, 61 | backbone=backbone, 62 | pooling=pooling, 63 | use_hms2=use_hms2, 64 | augmentation_list=augmentation_list, 65 | ) 66 | 67 | # Dry-run backpropagation 68 | optimizer = torch.optim.AdamW(model.parameters()) 69 | loss = torch.nn.CrossEntropyLoss() 70 | 71 | for _ in range(2): 72 | input_batch = np.random.randint( 73 | low=0, high=255, size=((1,) + image_size + (3,)), dtype=np.uint8 74 | ) 75 | y_true_batch = np.random.randint(0, n_classes - 1, size=(1,), dtype=np.int64) 76 | input_batch = torch.tensor(input_batch) 77 | if not use_hms2: 78 | # If HMS2 is disabled, the input should be manually moved to GPU. 79 | input_batch = input_batch.cuda() 80 | 81 | y_pred_batch = model(input_batch) 82 | assert y_pred_batch.size() == (1, n_classes) 83 | 84 | y_true_batch = torch.tensor(y_true_batch).cuda() 85 | loss_batch = loss(y_pred_batch, y_true_batch) 86 | loss_batch.backward() 87 | optimizer.step() 88 | optimizer.zero_grad() 89 | 90 | 91 | def test_hms2_model_builder_with_use_less_gpu_memory_budget( 92 | n_classes, 93 | backbone, 94 | ): 95 | image_size = (5000, 5000) 96 | 97 | # Build two models with different GPU memory budgets 98 | model_rich = Hms2ModelBuilder().build( 99 | n_classes=n_classes, 100 | backbone=backbone, 101 | pooling="gap", 102 | use_hms2=True, 103 | gpu_memory_budget=32.0, 104 | ) 105 | model_poor = Hms2ModelBuilder().build( 106 | n_classes=n_classes, 107 | backbone=backbone, 108 | pooling="gap", 109 | use_hms2=True, 110 | gpu_memory_budget=16.0, 111 | ) 112 | 113 | # Run forward 114 | input_batch = np.random.randint( 115 | low=0, high=255, size=((1,) + image_size + (3,)), dtype=np.uint8 116 | ) 117 | input_batch = torch.tensor(input_batch) 118 | 119 | y_pred_batch_rich = model_rich(input_batch) 120 | y_pred_batch_poor = model_poor(input_batch) 121 | 122 | assert y_pred_batch_poor.detach().cpu().numpy() == pytest.approx( 123 | y_pred_batch_rich.detach().cpu().numpy(), abs=1.0 124 | ) 125 | 126 | 127 | def test_hms2_model_builder_with_cam( 128 | n_classes, 129 | backbone, 130 | use_hms2, 131 | ): 132 | # Set larger image size for HMS2 133 | if use_hms2: 134 | image_size = (5000, 5000) 135 | else: 136 | image_size = (2000, 2000) 137 | 138 | # Build a model 139 | model = Hms2ModelBuilder().build( 140 | n_classes=n_classes, 141 | backbone=backbone, 142 | pooling="cam", 143 | use_hms2=use_hms2, 144 | ) 145 | 146 | # Dry-run 147 | for _ in range(2): 148 | input_batch = np.random.randint( 149 | low=0, high=255, size=((1,) + image_size + (3,)), dtype=np.uint8 150 | ) 151 | input_batch = torch.tensor(input_batch) 152 | if not use_hms2: 153 | # If HMS2 is disabled, the input should be manually moved to GPU. 154 | input_batch = input_batch.cuda() 155 | 156 | cam = model(input_batch) 157 | assert cam.size()[0] == 1 158 | assert cam.size()[1] > 1 159 | assert cam.size()[2] > 1 160 | assert cam.size()[3] == n_classes 161 | 162 | 163 | def test_hms2_model_builder_with_emb( 164 | n_classes, 165 | backbone, 166 | use_hms2, 167 | ): 168 | # Set larger image size for HMS2 169 | if use_hms2: 170 | image_size = (5000, 5000) 171 | else: 172 | image_size = (2000, 2000) 173 | 174 | # Build a model 175 | model = Hms2ModelBuilder().build( 176 | n_classes=n_classes, 177 | backbone=backbone, 178 | pooling="no", 179 | custom_dense="no", 180 | use_hms2=use_hms2, 181 | ) 182 | 183 | # Dry-run 184 | for _ in range(2): 185 | input_batch = np.random.randint( 186 | low=0, high=255, size=((1,) + image_size + (3,)), dtype=np.uint8 187 | ) 188 | input_batch = torch.tensor(input_batch) 189 | if not use_hms2: 190 | # If HMS2 is disabled, the input should be manually moved to GPU. 191 | input_batch = input_batch.cuda() 192 | 193 | emb = model(input_batch) 194 | assert emb.size()[0] == 1 195 | assert emb.size()[1] == 2048 196 | assert emb.size()[2] > 1 197 | assert emb.size()[3] > 1 198 | -------------------------------------------------------------------------------- /tests/core/test_custom_modules.py: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | 3 | import numpy as np 4 | import pytest 5 | import requests 6 | import torch 7 | import torchvision 8 | from PIL import Image 9 | 10 | from hms2.core.custom_modules import FrozenBatchNorm2d 11 | 12 | 13 | @pytest.fixture(scope="session") 14 | def image(): 15 | url = "https://upload.wikimedia.org/wikipedia/zh/3/34/Lenna.jpg" 16 | with requests.get(url) as req: 17 | buff = BytesIO(req.content) 18 | image = Image.open(buff) 19 | 20 | width = 224 21 | height = 224 22 | image = image.resize([width, height]) 23 | 24 | image = np.array(image) 25 | return image 26 | 27 | 28 | def test_frozen_batch_norm_2d(image): 29 | original_model = torchvision.models.resnet50(pretrained=True).cuda().eval() 30 | image = torchvision.transforms.ToTensor()(image) 31 | image = torchvision.transforms.Normalize( 32 | (0.485, 0.456, 0.406), (0.229, 0.224, 0.225) 33 | )(image) 34 | image_batch = image[np.newaxis, :, :, :].cuda() 35 | label_batch = torch.zeros([1], dtype=torch.int64).cuda() 36 | 37 | original_output = original_model(image_batch) 38 | loss = torch.nn.CrossEntropyLoss()(original_output, label_batch) 39 | loss.backward() 40 | original_grads = [ 41 | parameter.grad.cpu().numpy() for parameter in original_model.parameters() 42 | ] 43 | original_model.zero_grad() 44 | 45 | frozen_bn_model = FrozenBatchNorm2d.convert_frozen_batchnorm(original_model) 46 | frozen_bn_model.train() 47 | frozen_bn_output = frozen_bn_model(image_batch) 48 | loss = torch.nn.CrossEntropyLoss()(frozen_bn_output, label_batch) 49 | loss.backward() 50 | frozen_bn_grads = [ 51 | parameter.grad.cpu().numpy() for parameter in frozen_bn_model.parameters() 52 | ] 53 | frozen_bn_model.zero_grad() 54 | 55 | # Check the integrity of parameters 56 | original_parameters = [ 57 | parameter.detach().cpu().numpy() for parameter in original_model.parameters() 58 | ] 59 | frozen_bn_parameters = [ 60 | parameter.detach().cpu().numpy() for parameter in frozen_bn_model.parameters() 61 | ] 62 | assert len(original_parameters) == len(frozen_bn_parameters) 63 | for idx, _ in enumerate(original_parameters): 64 | assert original_parameters[idx] == pytest.approx(frozen_bn_parameters[idx]) 65 | 66 | # Check the integrities of outputs and gradients 67 | assert original_output.detach().cpu().numpy() == pytest.approx( 68 | frozen_bn_output.detach().cpu().numpy() 69 | ) 70 | for idx, _ in enumerate(original_grads): 71 | assert original_grads[idx] == pytest.approx(frozen_bn_grads[idx], abs=1e-4) 72 | -------------------------------------------------------------------------------- /tests/core/test_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | from io import BytesIO 3 | from time import time 4 | 5 | import numpy as np 6 | import pytest 7 | import requests 8 | import torch 9 | import torch.nn as nn 10 | import torchvision 11 | import torchvision.transforms as transforms 12 | from PIL import Image 13 | from skimage.metrics import structural_similarity 14 | 15 | from hms2.core.loader_modules import ( 16 | GPUAugmentationLoaderModule, 17 | NoLoaderModule, 18 | PlainLoaderModule, 19 | ) 20 | from hms2.core.model import Hms2Model 21 | 22 | 23 | @pytest.fixture(autouse=True, scope="session") 24 | def set_up(): 25 | os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" 26 | torch.backends.cudnn.deterministic = True 27 | 28 | 29 | @pytest.fixture(scope="session") 30 | def image(): 31 | url = "https://upload.wikimedia.org/wikipedia/zh/3/34/Lenna.jpg" 32 | with requests.get(url) as req: 33 | buff = BytesIO(req.content) 34 | image = Image.open(buff) 35 | 36 | width = 2000 37 | height = 3000 38 | image = image.resize([width, height]) 39 | 40 | image = np.array(image) 41 | return image 42 | 43 | 44 | @pytest.fixture(scope="session") 45 | def image_batch(image): 46 | image_batch = torch.tensor(image, dtype=torch.uint8) 47 | image_batch = image_batch[np.newaxis, :, :, :] 48 | 49 | return image_batch 50 | 51 | 52 | @pytest.mark.parametrize("do_hint", [True, False]) 53 | @pytest.mark.parametrize("loader_module_use", ["plain", "gpu_aug_disable_aug", "no"]) 54 | def test_loader_module_forward_with_no_aug( 55 | image, image_batch, do_hint, loader_module_use 56 | ): 57 | if loader_module_use == "plain": 58 | loader_module = PlainLoaderModule() 59 | elif loader_module_use == "gpu_aug_disable_aug": 60 | loader_module = GPUAugmentationLoaderModule( 61 | random_flip=False, 62 | random_rotation=False, 63 | random_translation=None, 64 | ) 65 | elif loader_module_use == "no": 66 | loader_module = NoLoaderModule() 67 | else: 68 | assert False 69 | 70 | loader_module = loader_module.cuda() 71 | 72 | coord = (0, 1000) 73 | size = (1000, 2000) 74 | if loader_module_use in ["plain", "gpu_aug_disable_aug"]: 75 | if do_hint: 76 | loader_module.hint_future_accesses(image_batch, [coord], [size]) 77 | output = loader_module(image_batch, coord, size) 78 | assert isinstance(output, torch.Tensor) 79 | assert output.is_cuda 80 | elif loader_module_use == "no": 81 | partial_image_batch = image_batch[ 82 | :, 83 | coord[1] : coord[1] + size[1], 84 | coord[0] : coord[0] + size[0], 85 | :, 86 | ] 87 | output = loader_module(partial_image_batch) 88 | assert isinstance(output, torch.Tensor) 89 | else: 90 | assert False 91 | 92 | output = output.cpu().numpy() 93 | output = output[0, ...] 94 | output = np.transpose(output, [1, 2, 0]) 95 | output *= np.float32([0.229, 0.224, 0.225]) 96 | output += np.float32([0.485, 0.456, 0.406]) 97 | output = np.minimum(np.maximum(output * 255.0, 0.0), 255.0).astype(np.uint8) 98 | 99 | ground_truth = image[ 100 | coord[1] : coord[1] + size[1], 101 | coord[0] : coord[0] + size[0], 102 | :, 103 | ] 104 | ssim = structural_similarity(output, ground_truth, channel_axis=-1) 105 | assert ssim > 0.99 106 | 107 | 108 | def test_gpu_augmentation_loader_module_forward_with_aug(image, image_batch): 109 | # Augmentation arguments 110 | rotation_angle = 8.7 111 | translation_pixels = [9, -8] 112 | do_flip = True 113 | 114 | class AddBias(nn.Module): 115 | def __init__(self, bias): 116 | super().__init__() 117 | self.bias = bias 118 | 119 | def randomize(self): 120 | pass 121 | 122 | def forward(self, inputs): 123 | return inputs + self.bias 124 | 125 | other_augmentations = [AddBias(0.1)] 126 | 127 | # Get the loader module 128 | loader_module = GPUAugmentationLoaderModule(other_augmentations=other_augmentations) 129 | loader_module = loader_module.cuda() 130 | loader_module.do_flip = do_flip 131 | loader_module.rotation_angle = rotation_angle 132 | loader_module.translation_pixels = translation_pixels 133 | loader_module.affine_matrix = loader_module._calculate_affine_matrix() 134 | 135 | # Do forward 136 | coord = (0, 1000) 137 | size = (1000, 2000) 138 | output = loader_module(image_batch, coord, size) 139 | assert isinstance(output, torch.Tensor) 140 | assert output.is_cuda 141 | 142 | output = output.cpu().numpy() 143 | output = output[0, ...] 144 | output = np.transpose(output, [1, 2, 0]) 145 | output *= np.float32([0.229, 0.224, 0.225]) 146 | output += np.float32([0.485, 0.456, 0.406]) 147 | output -= 0.1 # Inverse of AddBias(0.1) 148 | output = np.minimum(np.maximum(output * 255.0, 0.0), 255.0).astype(np.uint8) 149 | 150 | # Get ground truth 151 | img_aug = Image.fromarray(image) 152 | img_aug = img_aug.rotate( 153 | angle=rotation_angle, 154 | resample=Image.BILINEAR, 155 | translate=translation_pixels, 156 | fillcolor=(255, 255, 255), 157 | ) 158 | if do_flip: 159 | img_aug = img_aug.transpose(method=Image.FLIP_LEFT_RIGHT) 160 | ground_truth = np.array(img_aug)[ 161 | coord[1] : coord[1] + size[1], 162 | coord[0] : coord[0] + size[0], 163 | :, 164 | ] 165 | ssim = structural_similarity(output, ground_truth, channel_axis=-1) 166 | assert np.min(ground_truth) < 128 # The selected tile should be meaningful 167 | assert ssim > 0.99 168 | 169 | 170 | def test_gpu_augmentation_loader_module_forward_with_randomness(image_batch): 171 | # Augmentation arguments 172 | class AddBias(nn.Module): 173 | def __init__(self, bias): 174 | super().__init__() 175 | self.bias = bias 176 | 177 | def randomize(self): 178 | pass 179 | 180 | def forward(self, inputs): 181 | return inputs + self.bias 182 | 183 | other_augmentations = [AddBias(0.1)] 184 | 185 | # Get the loader module 186 | loader_module = GPUAugmentationLoaderModule(other_augmentations=other_augmentations) 187 | loader_module = loader_module.cuda() 188 | 189 | # Do two forward operations in training model 190 | coord = (0, 1000) 191 | size = (1000, 2000) 192 | 193 | loader_module.train() 194 | loader_module.randomize() 195 | output_0 = loader_module(image_batch, coord, size) 196 | loader_module.randomize() 197 | output_1 = loader_module(image_batch, coord, size) 198 | assert torch.any(output_0 != output_1).item() 199 | 200 | # Do two forward operations in evaluation model 201 | loader_module.eval() 202 | loader_module.randomize() 203 | output_0 = loader_module(image_batch, coord, size) 204 | loader_module.randomize() 205 | output_1 = loader_module(image_batch, coord, size) 206 | assert torch.all(output_0 == output_1).item() 207 | 208 | 209 | @pytest.fixture(scope="session") 210 | def conv_module(): 211 | resnet50 = torchvision.models.resnet50(pretrained=True).eval() 212 | conv_module = nn.Sequential(*list(resnet50.children())[:-2]) 213 | conv_module = conv_module.cuda() 214 | return conv_module 215 | 216 | 217 | @pytest.fixture(scope="session") 218 | def dense_module(): 219 | resnet50 = torchvision.models.resnet50(pretrained=True).eval() 220 | dense_module = nn.Sequential( 221 | nn.AdaptiveMaxPool2d((1, 1)), 222 | nn.Flatten(), 223 | list(resnet50.children())[-1], 224 | ) 225 | dense_module = dense_module.cuda() 226 | return dense_module 227 | 228 | 229 | @pytest.fixture(scope="session", params=["max", "none"]) 230 | def local_pooling_module(request): 231 | if request.param == "max": 232 | local_pooling_module = nn.AdaptiveMaxPool2d((1, 1)) 233 | else: 234 | local_pooling_module = None 235 | return local_pooling_module 236 | 237 | 238 | @pytest.fixture(scope="session", params=[3072, 4096]) 239 | def hms2_model(conv_module, dense_module, local_pooling_module, request): 240 | tile_size = request.param 241 | 242 | hms2_model = Hms2Model( 243 | loader_module=PlainLoaderModule().cuda(), 244 | conv_module=conv_module, 245 | dense_module=dense_module, 246 | local_pooling_module=local_pooling_module, 247 | tile_size=tile_size, 248 | emb_crop_size=7, 249 | emb_stride_size=32, 250 | ) 251 | return hms2_model 252 | 253 | 254 | @pytest.fixture(scope="session") 255 | def plain_model(conv_module, dense_module): 256 | class PlainModel(nn.Module): 257 | def __init__(self, conv_module, dense_module): 258 | super().__init__() 259 | self.conv_module = conv_module 260 | self.dense_module = dense_module 261 | 262 | def forward(self, image_batch): 263 | image_batch = image_batch.cuda() 264 | image_batch = image_batch.permute(0, 3, 1, 2).contiguous() 265 | image_batch = image_batch.float().div(255.0) 266 | image_batch = transforms.functional.normalize( 267 | tensor=image_batch, 268 | mean=[0.485, 0.456, 0.406], 269 | std=[0.229, 0.224, 0.225], 270 | ) 271 | conv_output = self.conv_module(image_batch) 272 | output = self.dense_module(conv_output) 273 | return output 274 | 275 | plain_model = PlainModel(conv_module, dense_module) 276 | return plain_model 277 | 278 | 279 | def test_hms2_model_forward(hms2_model, plain_model, image_batch): 280 | hms2_output = hms2_model(image_batch) 281 | hms2_output = hms2_output.detach().cpu().numpy() 282 | 283 | plain_output = plain_model(image_batch) 284 | plain_output = plain_output.detach().cpu().numpy() 285 | 286 | assert hms2_output == pytest.approx(plain_output) 287 | 288 | 289 | def test_hms2_model_backward(hms2_model, plain_model, image_batch): 290 | target_batch = torch.tensor(np.array([100]), dtype=torch.long).cuda() 291 | 292 | hms2_output = hms2_model(image_batch) 293 | hms2_loss = nn.CrossEntropyLoss()(hms2_output, target_batch) 294 | hms2_model.zero_grad() 295 | hms2_loss.backward() 296 | hms2_grads = [parameter.grad.cpu().numpy() for parameter in hms2_model.parameters()] 297 | 298 | plain_output = plain_model(image_batch) 299 | plain_loss = nn.CrossEntropyLoss()(plain_output, target_batch) 300 | plain_model.zero_grad() 301 | plain_loss.backward() 302 | plain_grads = [ 303 | parameter.grad.cpu().numpy() for parameter in plain_model.parameters() 304 | ] 305 | 306 | assert len(hms2_grads) == len(plain_grads) 307 | for idx, _ in enumerate(hms2_grads): 308 | assert hms2_grads[idx] == pytest.approx(plain_grads[idx], abs=1e-4) 309 | 310 | 311 | def test_hms2_model_backward_with_no_grad(hms2_model, image_batch): 312 | target_batch = torch.tensor(np.array([100]), dtype=torch.long).cuda() 313 | 314 | optimizer = torch.optim.SGD(hms2_model.parameters(), lr=0.01) 315 | 316 | optimizer.zero_grad() 317 | hms2_output = hms2_model(image_batch) 318 | hms2_output = torch.min(hms2_output, torch.tensor(-999.9).cuda()) 319 | hms2_loss = nn.CrossEntropyLoss()(hms2_output, target_batch) 320 | hms2_loss.backward() 321 | hms2_grads = [parameter.grad for parameter in hms2_model.parameters()] 322 | optimizer.step() 323 | 324 | for grad in hms2_grads: 325 | assert grad is None or torch.count_nonzero(grad).item() == 0 326 | 327 | 328 | def test_hms2_model_with_cache_background_forward( 329 | conv_module, 330 | dense_module, 331 | local_pooling_module, 332 | ): 333 | # Create a huge white image 334 | height = 10000 335 | width = 10000 336 | image = np.full(shape=(height, width, 3), fill_value=255, dtype=np.uint8) 337 | image_batch = torch.tensor(image, dtype=torch.uint8) 338 | image_batch = image_batch[np.newaxis, :, :, :] 339 | 340 | # Create models 341 | tile_size = 3072 342 | hms2_model_use = Hms2Model( 343 | loader_module=PlainLoaderModule().cuda(), 344 | conv_module=conv_module, 345 | dense_module=dense_module, 346 | local_pooling_module=local_pooling_module, 347 | tile_size=tile_size, 348 | emb_crop_size=7, 349 | emb_stride_size=32, 350 | cache_background_forward=True, 351 | ) 352 | hms2_model_nouse = Hms2Model( 353 | loader_module=PlainLoaderModule().cuda(), 354 | conv_module=conv_module, 355 | dense_module=dense_module, 356 | local_pooling_module=local_pooling_module, 357 | tile_size=tile_size, 358 | emb_crop_size=7, 359 | emb_stride_size=32, 360 | cache_background_forward=False, 361 | ) 362 | 363 | # Test forward 364 | time_1 = time() 365 | use_output = hms2_model_use(image_batch) 366 | use_output = use_output.detach().cpu().numpy() 367 | time_2 = time() 368 | use_time = time_2 - time_1 369 | 370 | time_1 = time() 371 | nouse_output = hms2_model_nouse(image_batch) 372 | nouse_output = nouse_output.detach().cpu().numpy() 373 | time_2 = time() 374 | nouse_time = time_2 - time_1 375 | 376 | assert use_output == pytest.approx(nouse_output) 377 | assert use_time < nouse_time 378 | 379 | 380 | def test_hms2_model_with_cache_background_backward( 381 | conv_module, 382 | dense_module, 383 | local_pooling_module, 384 | ): 385 | # Create a huge white image 386 | height = 10000 387 | width = 10000 388 | image = np.full(shape=(height, width, 3), fill_value=255, dtype=np.uint8) 389 | image_batch = torch.tensor(image, dtype=torch.uint8) 390 | image_batch = image_batch[np.newaxis, :, :, :] 391 | target_batch = torch.tensor(np.array([100]), dtype=torch.long).cuda() 392 | 393 | # Create models 394 | tile_size = 3072 395 | hms2_model_use = Hms2Model( 396 | loader_module=PlainLoaderModule().cuda(), 397 | conv_module=conv_module, 398 | dense_module=dense_module, 399 | local_pooling_module=local_pooling_module, 400 | tile_size=tile_size, 401 | emb_crop_size=7, 402 | emb_stride_size=32, 403 | skip_no_grad=False, 404 | cache_background_backward=True, 405 | ) 406 | hms2_model_nouse = Hms2Model( 407 | loader_module=PlainLoaderModule().cuda(), 408 | conv_module=conv_module, 409 | dense_module=dense_module, 410 | local_pooling_module=local_pooling_module, 411 | tile_size=tile_size, 412 | emb_crop_size=7, 413 | emb_stride_size=32, 414 | skip_no_grad=False, 415 | cache_background_backward=False, 416 | ) 417 | 418 | # Test backward 419 | hms2_model_use.zero_grad() 420 | use_output = hms2_model_use(image_batch) 421 | loss = nn.CrossEntropyLoss()(use_output, target_batch) 422 | time_1 = time() 423 | loss.backward() 424 | time_2 = time() 425 | use_grads = [ 426 | parameter.grad.cpu().numpy() for parameter in hms2_model_use.parameters() 427 | ] 428 | use_time = time_2 - time_1 429 | 430 | hms2_model_nouse.zero_grad() 431 | nouse_output = hms2_model_nouse(image_batch) 432 | loss = nn.CrossEntropyLoss()(nouse_output, target_batch) 433 | time_1 = time() 434 | loss.backward() 435 | time_2 = time() 436 | nouse_grads = [ 437 | parameter.grad.cpu().numpy() for parameter in hms2_model_nouse.parameters() 438 | ] 439 | nouse_time = time_2 - time_1 440 | 441 | for use_grad, nouse_grad in zip(use_grads, nouse_grads): 442 | assert use_grad == pytest.approx(nouse_grad, abs=1e-4) 443 | assert use_time < nouse_time 444 | --------------------------------------------------------------------------------