├── .gitignore ├── LICENSE.txt ├── config └── config.json ├── pictures ├── cell-type-hor.png ├── cell-type-verticle.png ├── demo.png ├── logo.png ├── risk_score_legend.png ├── screenshot_agg.png ├── screenshot_cc.png ├── screenshot_cell_fraction.png ├── screenshot_cell_type_vis.png ├── screenshot_degree_centrality.png ├── screenshot_file_upload.png ├── screenshot_interaction.png ├── screenshot_mode.png ├── screenshot_option.png ├── screenshot_run.png └── screenshot_thumbnail.png ├── preprocessing ├── gen_Seurat_objects_counts.R ├── genepos.py ├── inferCNV.py ├── quality_control.R └── tumor_frac.py ├── readme.md ├── requirements.txt └── src ├── __init__.py ├── about.py ├── dataset.py ├── ga.py ├── get_patch_img.py ├── main_app.py ├── pathology_models.py ├── resnet.py ├── run_predict.py ├── spa_mapping.py ├── spatial_stat.py ├── tutorial.md └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | model_weights/ 3 | example/ 4 | temp/ 5 | config/auth.yaml 6 | src/auth.py 7 | Dockerfile 8 | .DS_Store 9 | 10 | 11 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "model_name": "resnet50", 3 | "target_label": "max_class", 4 | "use_h5": 0, 5 | "num_classes": 8, 6 | "batch_size": 128, 7 | "use_cuda":1, 8 | "label_column" : "label", 9 | "pretrained": 1, 10 | "img_size": 46, 11 | "aggregator": "identity", 12 | "aggregator_hdim": 2048, 13 | "compress_factor": 32 14 | } -------------------------------------------------------------------------------- /pictures/cell-type-hor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/cell-type-hor.png -------------------------------------------------------------------------------- /pictures/cell-type-verticle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/cell-type-verticle.png -------------------------------------------------------------------------------- /pictures/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/demo.png -------------------------------------------------------------------------------- /pictures/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/logo.png -------------------------------------------------------------------------------- /pictures/risk_score_legend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/risk_score_legend.png -------------------------------------------------------------------------------- /pictures/screenshot_agg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_agg.png -------------------------------------------------------------------------------- /pictures/screenshot_cc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_cc.png -------------------------------------------------------------------------------- /pictures/screenshot_cell_fraction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_cell_fraction.png -------------------------------------------------------------------------------- /pictures/screenshot_cell_type_vis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_cell_type_vis.png -------------------------------------------------------------------------------- /pictures/screenshot_degree_centrality.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_degree_centrality.png -------------------------------------------------------------------------------- /pictures/screenshot_file_upload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_file_upload.png -------------------------------------------------------------------------------- /pictures/screenshot_interaction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_interaction.png -------------------------------------------------------------------------------- /pictures/screenshot_mode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_mode.png -------------------------------------------------------------------------------- /pictures/screenshot_option.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_option.png -------------------------------------------------------------------------------- /pictures/screenshot_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_run.png -------------------------------------------------------------------------------- /pictures/screenshot_thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/pictures/screenshot_thumbnail.png -------------------------------------------------------------------------------- /preprocessing/gen_Seurat_objects_counts.R: -------------------------------------------------------------------------------- 1 | # Convert raw 10X output to Seurat objects 2 | library(Seurat) 3 | 4 | sample_list <- c('242_T', '243_T', '248_T', '251_T', '255_T', '256_T', '259_T', 5 | '260_T', '262_T', '265_T', '266_T', '268_T', '269_T', '270_T', 6 | '275_T', '296_T', '304_T', '313_T', '334_T') 7 | 8 | data_dir <- "spatial_brain/data/raw" # directory for raw 10X output 9 | save_dir <- "spatial_brain/data/Seurat_object_h5" # save path 10 | 11 | for(sample in sample_list){ 12 | print(sample) 13 | data.dir <- fileh5 <- NULL 14 | data.dir <- paste0(data_dir, "/", "#UKF", sample, "/", sample) 15 | fileh5 <- paste0(data.dir, "/filtered_feature_bc_matrix.h5") 16 | Seurat_obj <- CreateSeuratObject( 17 | counts = Read10X_h5(filename = fileh5), 18 | assay = 'Spatial', 19 | min.cells = 3, 20 | min.features = 200 21 | ) 22 | saveRDS(Seurat_obj, paste0(save_dir, "/", sample, ".rds")) 23 | } 24 | -------------------------------------------------------------------------------- /preprocessing/genepos.py: -------------------------------------------------------------------------------- 1 | # Re-implemtentation of the `genomic_position_from_gtf`` function from infercnvpy: 2 | # https://github.com/icbi-lab/infercnvpy/blob/main/src/infercnvpy/io/_genepos.py 3 | 4 | from pathlib import Path 5 | from typing import Literal, Union 6 | 7 | import gtfparse 8 | import numpy as np 9 | import pandas as pd 10 | from anndata import AnnData 11 | from scanpy import logging 12 | 13 | 14 | def genomic_position_from_gtf( 15 | gtf, 16 | adata: Union[AnnData, None] = None, 17 | *, 18 | gtf_gene_id: Literal["gene_id", "gene_name"] = "gene_name", 19 | adata_gene_id: Union[str, None] = None, 20 | inplace: bool = True, 21 | ) -> Union[pd.DataFrame, None]: 22 | """Get genomic gene positions from a GTF file. 23 | The GTF file needs to match the genome annotation used for your single cell dataset. 24 | .. warning:: 25 | Currently only tested with GENCODE GTFs. 26 | Parameters 27 | ---------- 28 | gtf 29 | Dataframe of the gtf file 30 | adata 31 | Adds the genomic positions to `adata.var`. If adata is None, returns 32 | a data frame with the genomic positions instead. 33 | gtf_gene_id 34 | Use this GTF column to match it to anndata 35 | adata_gene_id 36 | Match this column to the gene ids from the GTF file. Default: use 37 | adata.var_names. 38 | inplace 39 | If True, add the annotations directly to adata, otherwise return a dataframe. 40 | """ 41 | gene_ids_adata = (adata.var_names if adata_gene_id is None else adata.var[adata_gene_id]).values 42 | gtf = gtf.loc[gtf[gtf_gene_id].isin(gene_ids_adata), :] 43 | 44 | missing_from_gtf = len(set(gene_ids_adata) - set(gtf[gtf_gene_id].values)) 45 | if missing_from_gtf: 46 | logging.warning(f"GTF file misses annotation for {missing_from_gtf} genes in adata.") 47 | 48 | duplicated_symbols = np.sum(gtf["gene_name"].duplicated()) 49 | if duplicated_symbols: 50 | logging.warning(f"Skipped {duplicated_symbols} genes because of duplicate identifiers in GTF file.") 51 | gtf = gtf.loc[~gtf[gtf_gene_id].duplicated(keep=False), :] 52 | 53 | tmp_var = adata.var.copy() 54 | orig_index_name = tmp_var.index.name 55 | TMP_INDEX_NAME = "adata_var_index" 56 | tmp_var.index.name = TMP_INDEX_NAME 57 | tmp_var.reset_index(inplace=True) 58 | var_annotated = tmp_var.merge( 59 | gtf, 60 | how="left", 61 | left_on=TMP_INDEX_NAME if adata_gene_id is None else adata_gene_id, 62 | right_on=gtf_gene_id, 63 | validate="one_to_one", 64 | ) 65 | var_annotated.set_index(TMP_INDEX_NAME, inplace=True) 66 | var_annotated.index.name = orig_index_name 67 | 68 | if inplace: 69 | adata.var = var_annotated 70 | else: 71 | return var_annotated -------------------------------------------------------------------------------- /preprocessing/inferCNV.py: -------------------------------------------------------------------------------- 1 | import scanpy as sc 2 | import anndata as ad 3 | import pandas as pd 4 | import numpy as np 5 | import os 6 | import matplotlib.pyplot as plt 7 | import infercnvpy 8 | from genepos import genomic_position_from_gtf 9 | import pdb 10 | 11 | source_dir = "." 12 | data_dir = os.path.join(source_dir, "data/Spatial_Heiland/data/AnnDataObject") 13 | gtf_file = os.path.join(source_dir,"data/Ref_Genome/gencode.v43.annotation.txt") 14 | res_dir = os.path.join(source_dir, 'data/Spatial_Heiland/results/cnv') 15 | 16 | print("=======Reading in data============") 17 | adata = sc.read_h5ad(os.path.join(data_dir, 'concat_counts_three_data.h5ad')) 18 | sc.pp.normalize_total(adata, inplace=True) 19 | sc.pp.log1p(adata) 20 | 21 | print("=======Inferring CNV============") 22 | gtf = pd.read_csv(gtf_file, sep = "\t") 23 | genomic_position_from_gtf(gtf, adata=adata, gtf_gene_id='gene_name') 24 | infercnvpy.tl.infercnv(adata, reference_key = 'dataset', reference_cat = 'normal') 25 | sc.write(os.path.join(data_dir, 'cnv_with_normal.h5ad'), adata) -------------------------------------------------------------------------------- /preprocessing/quality_control.R: -------------------------------------------------------------------------------- 1 | library(Seurat) 2 | library(dplyr) 3 | 4 | data.dir <- "spatial_brain/data/Seurat_object_h5" 5 | save.dir <- "spatial_brain/result/cluster" 6 | cnv <- read.csv("spatial_brain/cnv/cnv_status_final.csv") 7 | colnames(cnv) <- c("barcode", "cnv_status") 8 | 9 | sample_list <- c('242_T', '243_T', '248_T', '251_T', '255_T', '256_T', '259_T', 10 | '260_T', '262_T', '265_T', '266_T', '268_T', '269_T', '270_T', 11 | '275_T', '296_T', '304_T', '313_T', '334_T') 12 | 13 | object_list <- c() 14 | num_spots_total <- c() 15 | num_spots_tumor <- c() 16 | num_spots_after <- c() 17 | processed_samples <- c() 18 | 19 | for(sample in sample_list){ 20 | if(sample %in% processed_samples) next 21 | data <- markers <- merged.data <- NULL 22 | print(paste0("Reading ", sample)) 23 | data <- readRDS(paste0(data.dir, "/", sample, ".rds")) 24 | data[["orig.ident"]] <- sample 25 | data[["barcode"]] <- paste0(rownames(data[[]]), "_", sample) 26 | print(paste0("Number of spots: ", nrow(data[[]]))) 27 | num_spots_total <- c(num_spots_total, nrow(data[[]])) 28 | 29 | # Add tumor or normal information 30 | merged.data <- merge(x=data[[]], y= cnv, by="barcode", all.x=TRUE) 31 | rownames(merged.data) = rownames(data[[]]) 32 | stopifnot(identical(merged.data$barcode, data[[]]$barcode)) # safety check 33 | data@meta.data <- merged.data 34 | print(paste0("Number of tumor spots: ", table(data@meta.data$cnv_status)['tumor'])) 35 | num_spots_tumor <- c(num_spots_tumor, table(data@meta.data$cnv_status)['tumor']) 36 | 37 | print("Fitering QC") 38 | data <- PercentageFeatureSet(data, pattern = "^MT-", col.name = "percent.mt") 39 | data <- subset(data, subset = nCount_Spatial >= 1000 & nFeature_Spatial >= 200 & percent.mt <= 5 & cnv_status != "normal") 40 | if(!sample %in% c("256_T", "262_T")){ 41 | data <- CellCycleScoring(data, s.features = cc.genes$s.genes, 42 | g2m.features = cc.genes$g2m.genes, 43 | set.ident = TRUE) 44 | data <- SCTransform(data, assay = "Spatial", 45 | variable.features.n = 3000, 46 | vars.to.regress = c("percent.mt", "S.Score", "G2M.Score"), 47 | verbose = FALSE) 48 | }else{ 49 | data <- SCTransform(data, assay = "Spatial", 50 | variable.features.n = 3000, 51 | vars.to.regress = c("percent.mt"), 52 | verbose = FALSE) 53 | } 54 | 55 | print(paste0("Number of spots after QC: ", nrow(data[[]]))) 56 | num_spots_after <- c(num_spots_after, nrow(data[[]])) 57 | 58 | object_list <- c(object_list, data) 59 | processed_samples <- c(processed_samples, sample) 60 | } 61 | 62 | sample_sum <- data.frame(sample_id = processed_samples, num_spots_total = num_spots_total, 63 | num_spots_tumor = num_spots_tumor, num_spots_after = num_spots_after) 64 | 65 | write.csv(sample_sum, paste0(save.dir, "/", "sample_spot_summary.csv")) 66 | 67 | object_list <- object_list[-6] #remove 256_T where most of the cells are normal 68 | 69 | # Data integration 70 | features <- SelectIntegrationFeatures(object.list = object_list, nfeatures = 3000) 71 | object_list <- PrepSCTIntegration(object.list = object_list, anchor.features = features) 72 | object_list <- lapply(X = object_list, FUN = RunPCA, features = features) 73 | anchors <- FindIntegrationAnchors(object.list = object_list, normalization.method = "SCT", 74 | anchor.features = features, dims = 1:30, reduction = "rpca", k.anchor = 20) 75 | integrated.data <- IntegrateData(anchorset = anchors, normalization.method = "SCT", dims = 1:30) 76 | saveRDS(integrated.data, paste0(data.dir, "integrated_tumors.rds")) -------------------------------------------------------------------------------- /preprocessing/tumor_frac.py: -------------------------------------------------------------------------------- 1 | # Visulize the heatmap of CNAs and infer tumor fraction 2 | import scanpy as sc 3 | import anndata as ad 4 | from anndata import AnnData 5 | import pandas as pd 6 | import numpy as np 7 | import scipy.sparse 8 | import os 9 | import matplotlib.pyplot as plt 10 | import infercnvpy 11 | import seaborn as sns 12 | import pdb 13 | 14 | source_dir = "." 15 | data_dir = os.path.join(source_dir, "data/Spatial_Heiland/data/AnnDataObject") 16 | save_dir = os.path.join(source_dir, "data/Spatial_Heiland/results/cnv") 17 | plot_dir = os.path.join(save_dir, "heatmap") 18 | 19 | if not os.path.exists(plot_dir): 20 | os.makedirs(plot_dir) 21 | 22 | adata = sc.read_h5ad(os.path.join(data_dir, "cnv_with_normal.h5ad")) 23 | 24 | # Filter out some low-quality samples 25 | adata = adata[~adata.obs['slide_id'].isin(["256_TC", "265_T", "256_TI"])] 26 | adata_normal = adata[adata.obs['dataset'] == "normal"] 27 | adata_tumor = adata[adata.obs['dataset'] != "normal"] 28 | 29 | # All data 30 | plt.figure() 31 | infercnvpy.pl.chromosome_heatmap(adata, groupby="slide_id", figsize = (12,12)) 32 | plt.savefig(os.path.join(plot_dir, "cnv_heatmap_all.png")) 33 | 34 | # Tumor data 35 | plt.figure() 36 | infercnvpy.pl.chromosome_heatmap(adata_tumor, groupby="slide_id", figsize = (12,12)) 37 | plt.savefig(os.path.join(plot_dir, "cnv_heatmap_tumor.png")) 38 | 39 | # Normal data 40 | plt.figure() 41 | infercnvpy.pl.chromosome_heatmap(adata_normal, groupby="slide_id", figsize = (12,12)) 42 | plt.savefig(os.path.join(plot_dir, "cnv_heatmap_normal.png")) 43 | 44 | use_rep = "cnv" 45 | df_cnv = pd.DataFrame.sparse.from_spmatrix(adata.obsm[f'X_{use_rep}']) 46 | df_cnv.index = adata.obs.index 47 | 48 | # Retrieve chromosomal region 49 | chr_pos_dict = dict(sorted(adata.uns[use_rep]["chr_pos"].items(), key=lambda x: x[1])) 50 | chr_pos = list(chr_pos_dict.values()) 51 | var_group_positions = list(zip(chr_pos, chr_pos[1:] + [adata.shape[1]])) 52 | 53 | # Add the chr id as prefix to each chromosomal region 54 | new_cols =[] 55 | for col in list(df_cnv.columns): 56 | for i, var in enumerate(var_group_positions): 57 | if col >= var[0] and col < var[1]: 58 | new_col = f"chr{i+1}:{col}" 59 | new_cols.append(new_col) 60 | continue 61 | df_cnv.columns = new_cols 62 | 63 | # Split normal and tumor matrix 64 | adata.obsm['cnv_chr'] = df_cnv 65 | adata_normal = adata[adata.obs['dataset'] == "normal"] 66 | adata_tumor = adata[adata.obs['dataset'] != "normal"] 67 | 68 | # Check the cnv scores of normal tissues for each region 69 | avg_normal = adata_normal.obsm['cnv_chr'].mean(axis = 0) 70 | print(np.min(avg_normal), np.max(avg_normal), np.mean(avg_normal)) 71 | 72 | all_res = [] 73 | 74 | # Calclulate tumor cell fraction for each tissue 75 | slides = list(adata_tumor.obs['slide_id'].unique()) 76 | slides = ['334_T'] 77 | 78 | for slide in slides: 79 | print(slide) 80 | cur_ann = adata[adata.obs['slide_id'] == slide] 81 | cur_cnv = cur_ann.obsm['cnv_chr'] 82 | 83 | # sort the chr regions based on their absolute mean values of cnvs 84 | abs_mean = cur_cnv.apply(lambda x: x.abs().mean()) 85 | sorted_mean = abs_mean.sort_values(ascending=False) 86 | cur_cnv = cur_cnv.loc[:, sorted_mean.index] 87 | 88 | # select only the chr regions with absolute mean values greater than 0.02 89 | cur_cnv = cur_cnv.loc[:, abs(sorted_mean) > 0.025] 90 | 91 | # select only the top 15 signiatures 92 | if cur_cnv.shape[1] > 15: 93 | cur_cnv = cur_cnv.iloc[:, 0:15] 94 | 95 | # group chr regions by chr 96 | cnv_grouped = cur_cnv.groupby(lambda x: x.split(':')[0], axis=1).agg('mean') 97 | cnv_grouped = cnv_grouped + 1 98 | 99 | # calculate cell frac based on cnvs of each chr 100 | def cal_frac(x): 101 | if x.mean() > 1: 102 | frac = (x-1)/(x.max()-1) 103 | else: 104 | frac = (1-x)/(1-x.min()) 105 | return frac 106 | 107 | cnv_frac = cnv_grouped.apply(lambda x: cal_frac(x)) 108 | cell_frac = cnv_frac.max(axis = 1) 109 | normal_spots = cell_frac[cell_frac<0.2].index 110 | tumor_spots = cell_frac[cell_frac>=0.2].index 111 | df_frac = pd.DataFrame(cell_frac, columns=['tumor_cell_frac']) 112 | df_frac['spot_type'] = np.where(df_frac['tumor_cell_frac'] < 0.2, "normal", "malignant") 113 | all_res.append(df_frac) 114 | 115 | # plot heatmap of tumor cell fractions 116 | cur_ann.obs = cur_ann.obs.merge(df_frac, left_index = True, right_index= True) 117 | 118 | plt.figure() 119 | sc.pl.spatial(cur_ann, 120 | color=["tumor_cell_frac", "spot_type"], 121 | library_id = slide, 122 | #vmin = [None, 0.1], vmax = [None, 0.8], 123 | cmap="RdYlBu_r", 124 | palette = {'normal': sns.color_palette()[1], 'malignant': sns.color_palette()[0]}, 125 | legend_fontsize = 'large', 126 | alpha_img = 0.3, 127 | size = 1.5, 128 | title = '', 129 | legend_loc='right margin', 130 | na_in_legend = False) 131 | plt.savefig(os.path.join(plot_dir, f'{slide}.eps')) 132 | 133 | df_final = pd.concat(all_res) 134 | df_final.to_csv(os.path.join(save_dir, "cnv_status.csv")) -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | ## GBM360 software ## 2 | 3 | GBM360 is a software that harnesses the power of machine learning to investigate the cellular heterogeneity and spatial architecture of glioblastoma (GBM).
4 | The software takes H&E-stained histology image as input and predicts the distribution of transcriptional subtype and aggressiveness of GBM cells. 5 | 6 | A fully functional software is available at: https://gbm360.stanford.edu. 7 | 8 | 9 | 10 |
11 | 12 | ## System requirements ## 13 | 14 | The software is written with [Streamlit](https://streamlit.io/) (V 1.12). Software dependencies can be found in `requirements.txt` 15 | 16 |
17 | 18 | ## Reference / Citation ## 19 | Zheng, Y., Carrillo-Perez, F., Pizurica, M. et al. Spatial cellular architecture predicts prognosis in glioblastoma. Nat Commun 14, 4122 (2023). https://doi.org/10.1038/s41467-023-39933-0 20 | 21 | ## Installation ## 22 | 23 | This repository contains the source code of GBM360 for demonstration purpose only. 24 | 1. Clone this Git repository:
25 | `git clone https://github.com/gevaertlab/GBM360.git` to your local file system. 26 | 27 | 2. Create a new conda environment:
28 | `conda create --name GBM360 python=3.9` and activate: `conda activate GBM360` 29 | 30 | 3. Install the required packages:
31 | `pip install -r requirements.txt` 32 | 33 |
34 | 35 | ## Instructions for use ## 36 | 37 | 1. Visit [https://gbm360.stanford.edu](https://gbm360.stanford.edu) in a web browser. 38 | 2. Click the `Run` tab located at the top of the page. 39 | 3. To start the analysis, user can either upload a new histology image or simply click `Use an example slide`.
40 | **Note**: 41 | - We currently support images saved in *tif*, *tiff* or *svs* format.
42 | - Ideally, the image should be scanned at 20X magnification with a pixel resolution of 0.5um / pixel. 43 | 44 | 45 | 46 | A thumbnail of the image will display when the upload is complete 47 | 48 | 49 | 50 | 4. Select the mode for running the job.
51 | **Note**: 52 | 53 | - The default mode is set to the `Test mode`, which will only predicts a limited portion of the image (1,000 patches). This is meant to speed up the process by generating a quick preview of the results. 54 | - To predict the entire image, please switch to `Complete` mode. 55 | - We are currently working on obtaining GPU support for this software, which will significantly accelerate its performance. 56 | 57 | 58 | 59 |
60 | 61 | 5. Click the `Get cell type visualization` button to predict the spatial distribution of transcriptional subtype for tumor cells. 62 | 63 | 64 | 65 | The image will be colored by the predicted transcriptional subtype: 66 | 67 | 68 | 69 |
70 | 71 | 6. Based on the spatial subtype prediction, the software will automatically make several statistical analysis to quantify subtype compositions and spatial cellular organization: 72 | 73 | (1) Subtype fraction 74 | 75 | 76 | 77 |
78 | 79 | (2) Subtype interaction 80 | 81 | 82 | 83 |
84 | 85 | (3) Clustering coefficient 86 | 87 | 88 | 89 |
90 | 91 | 6. Finally, click the `Get prognosis visualization` button to predict the aggressive score of the cells. 92 | 93 | Blue indicates low aggressiveness, while Red indicates high aggressiveness 94 | 95 | 96 | 97 | 98 | ## Preprocessing codes ## 99 | 100 | Data from 10X Genomics were first converted into Seurat or AnnData object using the Seurat or Scanpy package. 101 | 102 | 1. Quality control and data integration were performed using the Seurat package: `quality_control.R`. 103 | 2. Run `inferCNV.py` to infer copy number variation using transcriptomics profiles. 104 | 3. Run `tumor_frac.py` to infer tumor cell fraction for each spot based on the CNV profiles. 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cmake == 3.25.2 2 | numpy == 1.22.3 3 | pandas == 1.5.3 4 | Pillow == 9.0.1 5 | streamlit == 1.12.0 6 | streamlit-authenticator == 0.2.1 7 | streamlit-drawable-canvas == 0.9.1 8 | openslide-python == 1.2.0 9 | PyYAML == 6.0 10 | scikit-image == 0.19.3 11 | stqdm == 0.0.4 12 | matplotlib == 3.5.2 13 | scipy == 1.7.3 14 | torch == 1.12.0 15 | seaborn == 0.11.2 16 | glob2 == 0.7 17 | torchvision == 0.13.0 18 | scanpy == 1.9.1 19 | squidpy == 1.2.3 20 | pyvips == 2.2.1 21 | beautifulsoup4 == 4.11.1 22 | pathlib == 1.0.1 23 | altair == 4.2.2 24 | 25 | 26 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gevaertlab/GBM360/4428121d3930d2ca6646e185da688ca172bb07a5/src/__init__.py -------------------------------------------------------------------------------- /src/about.py: -------------------------------------------------------------------------------- 1 | """ 2 | The main page of GBM360 3 | """ 4 | 5 | print("about") 6 | import streamlit as st 7 | import base64 8 | 9 | 10 | def app(): 11 | 12 | with st.container(): 13 | st.markdown('

GBM360 is a software that harnesses the power of machine learning to investigate the cellular heterogeneity and spatial architecture of glioblastoma

', unsafe_allow_html=True) 14 | st.image('pictures/demo.png', width=1000) 15 | 16 | with st.expander("Citation"): 17 | st.markdown("""Zheng, Y., Carrillo-Perez, F., Pizurica, M. et al. Spatial cellular architecture predicts prognosis in glioblastoma. Nat Commun 14, 4122 (2023). https://doi.org/10.1038/s41467-023-39933-0""") 18 | 19 | with st.expander("Disclaimer"): 20 | st.markdown("""GBM360 is an academic research project and should **not** be considered a medical device approved by any federal authorities.""") 21 | st.markdown("Please remove Personal Health Information (PHI) from all uploaded files, as we are not responsible for data compliance issues", unsafe_allow_html=True) 22 | 23 | with st.expander("Contact"): 24 | paragraph = "- Dr. Yuanning Zheng is a postdoctoral scholar at Stanford University. He obtained his PhD degree in Medical Sciences from Texas A&M University and a Master in Computer Science from Georgia Institute of Technology.\n" \ 25 | "Dr. Zheng's research focuses on developing innovative machine learning and bioinformatics methods to unravel the heterogeneity and improve personalized diagnosis of cancers and other complex diseases. Email: eric2021@stanford.edu\n\n" \ 26 | "- Dr. Olivier Gevaert is an associate professor at Stanford University focusing on developing machine-learning methods for biomedical decision support from multi-scale data. Email: ogevaert@stanford.edu\n\n" \ 27 | "- Other contributors: Francisco Carrillo-Perez \n\n" \ 28 | "- Visit us at: Dr. Gevaert lab\n\n" \ 29 | "- For bug reporting, please visit: " 30 | 31 | st.markdown(paragraph, unsafe_allow_html=True) 32 | 33 | st.write("(c) 2023 All rights reserved.") 34 | 35 | -------------------------------------------------------------------------------- /src/dataset.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Define dataset structure for torch data loaders 4 | """ 5 | 6 | from torch.utils.data import Dataset 7 | 8 | class PatchDataset(Dataset): 9 | """ 10 | csv_path must contain csv with header 11 | case, wsi_file_name, attr1,...,attrk 12 | """ 13 | 14 | def __init__(self, patches, coordinates, 15 | transforms=None): 16 | 17 | self.patches = patches 18 | self.coordinates = coordinates 19 | self.transforms = transforms 20 | 21 | def __len__(self): 22 | return len(self.patches) 23 | 24 | def __getitem__(self, idx): 25 | img = self.patches[idx] 26 | 27 | if self.transforms is not None: 28 | img = self.transforms(img) 29 | 30 | result = {} 31 | result['coordinates'] = self.coordinates[idx] 32 | result['image'] = img 33 | 34 | return result -------------------------------------------------------------------------------- /src/ga.py: -------------------------------------------------------------------------------- 1 | """ 2 | Google analytics 3 | """ 4 | 5 | import pathlib 6 | from bs4 import BeautifulSoup 7 | import logging 8 | import shutil 9 | import pdb 10 | 11 | import streamlit as st 12 | 13 | def inject_ga(): 14 | 15 | GA_ID = "google_analytics" 16 | 17 | # Note: Please replace the id from G-XXXXXXXXXX to whatever your 18 | # web application's id is. You will find this in your Google Analytics account 19 | 20 | GA_JS = """ 21 | 22 | 23 | 30 | """ 31 | 32 | # Insert the script in the head tag of the static template inside your virtual 33 | #index_path = pathlib.Path(st.__file__).parent / "static" / "index.html" 34 | 35 | index_path = pathlib.Path(st.__file__).parent / "static" / "index.html" 36 | logging.info(f'editing {index_path}') 37 | soup = BeautifulSoup(index_path.read_text(), features="html.parser") 38 | if not soup.find(id=GA_ID): # if cannot find tag 39 | bck_index = index_path.with_suffix('.bck') 40 | if bck_index.exists(): 41 | shutil.copy(bck_index, index_path) # recover from backup 42 | else: 43 | shutil.copy(index_path, bck_index) # keep a backup 44 | html = str(soup) 45 | new_html = html.replace('', '\n' + GA_JS) 46 | index_path.write_text(new_html) -------------------------------------------------------------------------------- /src/get_patch_img.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions to isolate foreground from background and to extract patches 3 | """ 4 | 5 | import numpy as np 6 | from skimage.color import rgb2hsv 7 | from skimage.filters import threshold_otsu 8 | from skimage.exposure.exposure import is_low_contrast 9 | from skimage.transform import resize 10 | from scipy.ndimage.morphology import binary_dilation, binary_erosion 11 | from stqdm import stqdm 12 | from torchvision import transforms 13 | from torch.utils.data import DataLoader, SequentialSampler, DataLoader 14 | from dataset import PatchDataset 15 | 16 | def read_patches(slide, max_patches_per_slide = np.inf, image_type = 'svs'): 17 | patches, coordinates = extract_patches(slide, patch_size=(112,112), \ 18 | max_patches_per_slide=max_patches_per_slide, \ 19 | image_type = image_type) 20 | 21 | data_transforms = transforms.Compose([ 22 | transforms.Resize(224), 23 | transforms.ToTensor(), 24 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 25 | ]) 26 | dataset = PatchDataset(patches, coordinates, data_transforms) 27 | image_samplers = SequentialSampler(dataset) 28 | 29 | # Create training and validation dataloaders 30 | dataloader = DataLoader(dataset, batch_size=64, sampler=image_samplers) 31 | return dataloader 32 | 33 | def get_mask_image(img_RGB, RGB_min=50, image_type = 'svs'): 34 | 35 | img_HSV = rgb2hsv(img_RGB) 36 | 37 | background_R = img_RGB[:, :, 0] > threshold_otsu(img_RGB[:, :, 0]) 38 | background_G = img_RGB[:, :, 1] > threshold_otsu(img_RGB[:, :, 1]) 39 | background_B = img_RGB[:, :, 2] > threshold_otsu(img_RGB[:, :, 2]) 40 | tissue_RGB = np.logical_not(background_R & background_G & background_B) 41 | tissue_S = img_HSV[:, :, 1] > threshold_otsu(img_HSV[:, :, 1]) 42 | min_R = img_RGB[:, :, 0] > RGB_min 43 | min_G = img_RGB[:, :, 1] > RGB_min 44 | min_B = img_RGB[:, :, 2] > RGB_min 45 | 46 | if image_type == "svs": 47 | mask = tissue_S & tissue_RGB & min_R & min_G & min_B 48 | else: 49 | mask = min_R & min_G & min_B 50 | return mask 51 | 52 | def get_mask(slide, level='max', RGB_min=50, image_type = 'svs'): 53 | #read svs image at a certain level and compute the otsu mask 54 | if level == 'max': 55 | level = len(slide.level_dimensions) - 1 56 | # note the shape of img_RGB is the transpose of slide.level_dimensions 57 | img_RGB = np.transpose(np.array(slide.read_region((0, 0),level,slide.level_dimensions[level]).convert('RGB')), 58 | axes=[1, 0, 2]) 59 | 60 | tissue_mask = get_mask_image(img_RGB, RGB_min, image_type = image_type) 61 | return tissue_mask, level 62 | 63 | def extract_patches(slide, patch_size, max_patches_per_slide=2000, dezoom_factor=1.0, image_type = 'svs'): 64 | mask, mask_level = get_mask(slide, image_type = image_type) 65 | mask = binary_dilation(mask, iterations=3) 66 | mask = binary_erosion(mask, iterations=3) 67 | 68 | mask_level = len(slide.level_dimensions) - 1 69 | 70 | PATCH_LEVEL = 0 71 | BACKGROUND_THRESHOLD = .2 72 | 73 | ratio_x = slide.level_dimensions[PATCH_LEVEL][0] / slide.level_dimensions[mask_level][0] 74 | ratio_y = slide.level_dimensions[PATCH_LEVEL][1] / slide.level_dimensions[mask_level][1] 75 | 76 | xmax, ymax = slide.level_dimensions[PATCH_LEVEL] 77 | 78 | # handle slides with 40 magnification at base level 79 | resize_factor = float(slide.properties.get('aperio.AppMag', 20)) / 20.0 80 | resize_factor = resize_factor * dezoom_factor 81 | patch_size_resized = (int(resize_factor * patch_size[0]), int(resize_factor * patch_size[1])) 82 | i = 0 83 | 84 | indices = [(x, y) for x in range(0, xmax, patch_size_resized[0]) for y in 85 | range(0, ymax, patch_size_resized[1])] 86 | 87 | patches = [] 88 | coordinates = [] 89 | 90 | for x, y in stqdm(indices): 91 | # check if in background mask 92 | x_mask = int(x / ratio_x) 93 | y_mask = int(y / ratio_y) 94 | if mask[x_mask, y_mask] == 1: 95 | patch = slide.read_region((x, y), PATCH_LEVEL, patch_size_resized).convert('RGB') 96 | try: 97 | mask_patch = get_mask_image(np.array(patch)) 98 | mask_patch = binary_dilation(mask_patch, iterations=3) 99 | except Exception as e: 100 | print("error with slide patch {}".format(i)) 101 | print(e) 102 | if (mask_patch.sum() > BACKGROUND_THRESHOLD * mask_patch.size) and not (is_low_contrast(patch)): 103 | if resize_factor != 1.0: 104 | patch = patch.resize(patch_size) 105 | 106 | coordinates.append((x,y)) 107 | patches.append(patch) 108 | i += 1 109 | if i >= max_patches_per_slide: 110 | break 111 | 112 | return patches, coordinates -------------------------------------------------------------------------------- /src/main_app.py: -------------------------------------------------------------------------------- 1 | """ 2 | Landing point of GBM360 3 | """ 4 | 5 | import streamlit as st 6 | import os 7 | from utils import * 8 | import run_predict 9 | import auth 10 | import about 11 | import re 12 | import base64 13 | from ga import inject_ga 14 | 15 | st.set_page_config(layout="wide") 16 | st.markdown(""" 17 | 31 | """, unsafe_allow_html=True) 32 | 33 | st.image("pictures/logo.png", width = 150) 34 | 35 | inject_ga() 36 | 37 | tab1, tab2, tab3 = st.tabs(["About", "Tutorial", "Run"]) 38 | 39 | with tab1: 40 | about.app() 41 | 42 | with tab2: 43 | 44 | def markdown_images(markdown): 45 | # example image markdown: 46 | # ![Test image](images/test.png "Alternate text") 47 | images = re.findall(r'(!\[(?P[^\]]+)\]\((?P[^\)"\s]+)\s*([^\)]*)\))', markdown) 48 | #images = re.findall(r'', markdown) 49 | return images 50 | 51 | def img_to_bytes(img_path): 52 | img_bytes = Path(img_path).read_bytes() 53 | encoded = base64.b64encode(img_bytes).decode() 54 | return encoded 55 | 56 | def img_to_html(img_path, img_alt): 57 | img_format = img_path.split(".")[-1] 58 | img_html = f'{img_alt}' 59 | 60 | return img_html 61 | 62 | def markdown_insert_images(markdown): 63 | 64 | images = markdown_images(markdown) 65 | for image in images: 66 | image_markdown = image[0] 67 | image_alt = image[1] 68 | image_path = image[2] 69 | 70 | if os.path.exists(image_path): 71 | markdown = markdown.replace(image_markdown, img_to_html(image_path, image_alt)) 72 | return markdown 73 | 74 | with open("src/tutorial.md", "r") as readme_file: 75 | readme = readme_file.read() 76 | 77 | readme = markdown_insert_images(readme) 78 | 79 | with st.container(): 80 | st.markdown(readme, unsafe_allow_html=True) 81 | 82 | with tab3: 83 | if not os.path.exists('temp'): 84 | os.mkdir('temp') 85 | run_predict.app() 86 | 87 | 88 | # if not st.session_state["authentication_status"]: 89 | # st.markdown('

Please log in first.

', unsafe_allow_html=True) 90 | # else: -------------------------------------------------------------------------------- /src/pathology_models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions of pathology models 3 | 4 | """ 5 | 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | #from onmt.encoders.encoder import EncoderBase 12 | #from onmt.modules import MultiHeadedAttention 13 | #from onmt.modules.position_ffn import PositionwiseFeedForward 14 | 15 | 16 | class Identity(nn.Module): 17 | def __init__(self): 18 | super(Identity, self).__init__() 19 | 20 | def forward(self, x): 21 | out = x 22 | attention_weights = torch.ones(x.shape[0], x.shape[1], device=x.device) 23 | return out, attention_weights 24 | 25 | 26 | class TanhAttention(nn.Module): 27 | def __init__(self, dim=2048): 28 | super(TanhAttention, self).__init__() 29 | self.dim = dim 30 | self.vector = torch.nn.Parameter(torch.zeros(dim)) 31 | self.linear = nn.Linear(dim, dim, bias=False) 32 | 33 | def forward(self, x): 34 | logits = torch.tanh(self.linear(x)).matmul(self.vector.unsqueeze(-1)) 35 | attention_weights = torch.nn.functional.softmax(logits, dim=1) 36 | out = x * attention_weights * x.shape[1] 37 | return out,attention_weights 38 | 39 | 40 | class AggregationModel(nn.Module): 41 | def __init__(self, resnet, aggregator, aggregator_dim, resnet_dim=2048, out_features=1, task = "classification"): 42 | super(AggregationModel, self).__init__() 43 | self.task = task 44 | self.resnet = resnet 45 | self.aggregator = aggregator 46 | self.fc = nn.Linear(aggregator_dim, out_features) 47 | self.aggregator_dim = aggregator_dim 48 | self.resnet_dim = resnet_dim 49 | self.softmax = nn.Softmax(dim=1) 50 | 51 | def forward(self, x): 52 | features,attention_weights = self.extract(x) 53 | out = self.fc(features) 54 | if self.task == 'prob': 55 | out = self.softmax(out) 56 | return out, attention_weights 57 | 58 | def extract(self,x): 59 | (batch_size, c, h, w) = x.shape 60 | x = x.reshape(-1, c, h, w) 61 | features = self.resnet.forward_extract(x) 62 | features = features.view(batch_size, self.resnet_dim) # bsize, resnet_dim 63 | features, attention_weights = self.aggregator(features) # bsize, aggregator_dim 64 | return features,attention_weights 65 | 66 | class AggregationProjectModel(nn.Module): 67 | def __init__(self, resnet, aggregator, aggregator_dim, resnet_dim=2048, out_features=1,hdim=200,dropout=.3): 68 | super(AggregationProjectModel, self).__init__() 69 | self.resnet = resnet 70 | self.aggregator = aggregator 71 | self.aggregator_dim = aggregator_dim 72 | self.resnet_dim = resnet_dim 73 | self.hdim = hdim 74 | self.dropout = nn.Dropout(p=dropout) 75 | self.project = nn.Linear(aggregator_dim, hdim) 76 | self.fc = nn.Linear(hdim, out_features) 77 | 78 | def forward(self, x): 79 | features,attention_weights = self.extract(x) 80 | out = self.fc(features) 81 | return out, attention_weights 82 | 83 | def extract(self,x): 84 | (batch_size, bag_size, c, h, w) = x.shape 85 | x = x.reshape(-1, c, h, w) 86 | features = self.resnet.forward_extract(x) 87 | features = features.view(batch_size, bag_size, self.resnet_dim) # bsize, bagsize, resnet_dim 88 | 89 | features, attention_weights = self.aggregator(features) # bsize, bagsize, aggregator_dim 90 | features = features.mean(dim=1) # batch_size,aggregator_dim 91 | features = self.project(features) 92 | features = F.tanh(features) 93 | features = self.dropout(features) 94 | 95 | return features,attention_weights 96 | 97 | def cox_loss(cox_scores, times, status): 98 | ''' 99 | :param cox_scores: cox scores, size (batch_size) 100 | :param times: event times (either death or censor), size batch_size 101 | :param status: event status (1 for death, 0 for censor), size batch_size 102 | :return: loss of size 1, the sum of cox losses for the batch 103 | ''' 104 | 105 | times, sorted_indices = torch.sort(-times) 106 | cox_scores = cox_scores[sorted_indices] 107 | status = status[sorted_indices] 108 | cox_scores = cox_scores -torch.max(cox_scores) 109 | exp_scores = torch.exp(cox_scores) 110 | loss = cox_scores - torch.log(torch.cumsum(exp_scores, dim=0)+1e-5) 111 | loss = - loss * status 112 | # TODO maybe divide by status.sum() 113 | 114 | if (loss != loss).any(): 115 | import pdb; 116 | pdb.set_trace() 117 | 118 | return loss.mean() 119 | 120 | class CoxLoss(nn.Module): 121 | def __init__(self): 122 | super(CoxLoss,self).__init__() 123 | 124 | def forward(self,cox_scores,times,status): 125 | return cox_loss(cox_scores,times,status) -------------------------------------------------------------------------------- /src/resnet.py: -------------------------------------------------------------------------------- 1 | """ 2 | ResNet models 3 | 4 | """ 5 | 6 | import torch.nn as nn 7 | import math 8 | import torch.utils.model_zoo as model_zoo 9 | import torch.nn.functional as F 10 | import torch 11 | 12 | __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 13 | 'resnet152'] 14 | 15 | model_urls = { 16 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 17 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 18 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 19 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 20 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 21 | } 22 | 23 | 24 | def conv3x3(in_planes, out_planes, stride=1): 25 | """3x3 convolution with padding""" 26 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 27 | padding=1, bias=False) 28 | 29 | 30 | class BasicBlock(nn.Module): 31 | expansion = 1 32 | 33 | def __init__(self, inplanes, planes, stride=1, downsample=None): 34 | super(BasicBlock, self).__init__() 35 | self.conv1 = conv3x3(inplanes, planes, stride) 36 | self.bn1 = nn.BatchNorm2d(planes) 37 | self.relu = nn.ReLU(inplace=True) 38 | self.conv2 = conv3x3(planes, planes) 39 | self.bn2 = nn.BatchNorm2d(planes) 40 | self.downsample = downsample 41 | self.stride = stride 42 | 43 | def forward(self, x): 44 | residual = x 45 | 46 | out = self.conv1(x) 47 | out = self.bn1(out) 48 | out = self.relu(out) 49 | 50 | out = self.conv2(out) 51 | out = self.bn2(out) 52 | 53 | if self.downsample is not None: 54 | residual = self.downsample(x) 55 | 56 | out += residual 57 | out = self.relu(out) 58 | 59 | return out 60 | 61 | 62 | class Bottleneck(nn.Module): 63 | expansion = 4 64 | 65 | def __init__(self, inplanes, planes, stride=1, downsample=None): 66 | super(Bottleneck, self).__init__() 67 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 68 | self.bn1 = nn.BatchNorm2d(planes) 69 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 70 | padding=1, bias=False) 71 | self.bn2 = nn.BatchNorm2d(planes) 72 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 73 | self.bn3 = nn.BatchNorm2d(planes * 4) 74 | self.relu = nn.ReLU(inplace=True) 75 | self.downsample = downsample 76 | self.stride = stride 77 | 78 | def forward(self, x): 79 | residual = x 80 | 81 | out = self.conv1(x) 82 | out = self.bn1(out) 83 | out = self.relu(out) 84 | 85 | out = self.conv2(out) 86 | out = self.bn2(out) 87 | out = self.relu(out) 88 | 89 | out = self.conv3(out) 90 | out = self.bn3(out) 91 | 92 | if self.downsample is not None: 93 | residual = self.downsample(x) 94 | 95 | out += residual 96 | out = self.relu(out) 97 | 98 | return out 99 | 100 | 101 | class ResNet(nn.Module): 102 | 103 | def __init__(self, block, layers, num_classes=1000): 104 | self.inplanes = 64 105 | super(ResNet, self).__init__() 106 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 107 | bias=False) 108 | self.bn1 = nn.BatchNorm2d(64) 109 | self.relu = nn.ReLU(inplace=True) 110 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 111 | self.layer1 = self._make_layer(block, 64, layers[0]) 112 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 113 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 114 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 115 | self.avgpool = nn.AvgPool2d(7, stride=1) 116 | self.fc = nn.Linear(512 * block.expansion, num_classes) 117 | 118 | for m in self.modules(): 119 | if isinstance(m, nn.Conv2d): 120 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 121 | m.weight.data.normal_(0, math.sqrt(2. / n)) 122 | elif isinstance(m, nn.BatchNorm2d): 123 | m.weight.data.fill_(1) 124 | m.bias.data.zero_() 125 | 126 | def _make_layer(self, block, planes, blocks, stride=1): 127 | downsample = None 128 | if stride != 1 or self.inplanes != planes * block.expansion: 129 | downsample = nn.Sequential( 130 | nn.Conv2d(self.inplanes, planes * block.expansion, 131 | kernel_size=1, stride=stride, bias=False), 132 | nn.BatchNorm2d(planes * block.expansion), 133 | ) 134 | 135 | layers = [] 136 | layers.append(block(self.inplanes, planes, stride, downsample)) 137 | self.inplanes = planes * block.expansion 138 | for i in range(1, blocks): 139 | layers.append(block(self.inplanes, planes)) 140 | 141 | return nn.Sequential(*layers) 142 | 143 | def forward(self, x): 144 | x = self.conv1(x) 145 | x = self.bn1(x) 146 | x = self.relu(x) 147 | x = self.maxpool(x) 148 | 149 | x = self.layer1(x) 150 | x = self.layer2(x) 151 | x = self.layer3(x) 152 | x = self.layer4(x) 153 | 154 | x = self.avgpool(x) 155 | x = x.view(x.size(0), -1) 156 | x = self.fc(x) 157 | 158 | return x 159 | 160 | def forward_extract(self, x): 161 | x = self.conv1(x) 162 | x = self.bn1(x) 163 | x = self.relu(x) 164 | x = self.maxpool(x) 165 | 166 | x = self.layer1(x) 167 | x = self.layer2(x) 168 | x = self.layer3(x) 169 | x = self.layer4(x) 170 | 171 | x = self.avgpool(x) 172 | x = x.view(x.size(0), -1) 173 | 174 | return x 175 | 176 | class RNfour(nn.Module): 177 | 178 | def __init__(self, block, layers, num_classes=1000): 179 | self.inplanes = 64 180 | super(RNfour, self).__init__() 181 | self.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, 182 | bias=False) 183 | self.bn1 = nn.BatchNorm2d(64) 184 | self.relu = nn.ReLU(inplace=True) 185 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 186 | self.layer1 = self._make_layer(block, 64, layers[0]) 187 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 188 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 189 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 190 | self.avgpool = nn.AvgPool2d(7, stride=1) 191 | self.fc = nn.Linear(512 * block.expansion, num_classes) 192 | 193 | for m in self.modules(): 194 | if isinstance(m, nn.Conv2d): 195 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 196 | m.weight.data.normal_(0, math.sqrt(2. / n)) 197 | elif isinstance(m, nn.BatchNorm2d): 198 | m.weight.data.fill_(1) 199 | m.bias.data.zero_() 200 | 201 | def _make_layer(self, block, planes, blocks, stride=1): 202 | downsample = None 203 | if stride != 1 or self.inplanes != planes * block.expansion: 204 | downsample = nn.Sequential( 205 | nn.Conv2d(self.inplanes, planes * block.expansion, 206 | kernel_size=1, stride=stride, bias=False), 207 | nn.BatchNorm2d(planes * block.expansion), 208 | ) 209 | 210 | layers = [] 211 | layers.append(block(self.inplanes, planes, stride, downsample)) 212 | self.inplanes = planes * block.expansion 213 | for i in range(1, blocks): 214 | layers.append(block(self.inplanes, planes)) 215 | 216 | return nn.Sequential(*layers) 217 | 218 | def forward(self, x): 219 | x = self.conv1(x) 220 | x = self.bn1(x) 221 | x = self.relu(x) 222 | x = self.maxpool(x) 223 | 224 | x = self.layer1(x) 225 | x = self.layer2(x) 226 | x = self.layer3(x) 227 | x = self.layer4(x) 228 | 229 | x = self.avgpool(x) 230 | x = x.view(x.size(0), -1) 231 | x = self.fc(x) 232 | 233 | return x 234 | 235 | def forward_extract(self, x): 236 | x = self.conv1(x) 237 | x = self.bn1(x) 238 | x = self.relu(x) 239 | x = self.maxpool(x) 240 | 241 | x = self.layer1(x) 242 | x = self.layer2(x) 243 | x = self.layer3(x) 244 | x = self.layer4(x) 245 | 246 | x = self.avgpool(x) 247 | x = x.view(x.size(0), -1) 248 | 249 | return x 250 | 251 | class RNone(nn.Module): 252 | 253 | def __init__(self, block, layers, num_classes=1000): 254 | self.inplanes = 64 255 | super(RNone, self).__init__() 256 | self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, 257 | bias=False) 258 | self.bn1 = nn.BatchNorm2d(64) 259 | self.relu = nn.ReLU(inplace=True) 260 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 261 | self.layer1 = self._make_layer(block, 64, layers[0]) 262 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 263 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 264 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 265 | self.avgpool = nn.AvgPool2d(7, stride=1) 266 | self.fc = nn.Linear(512 * block.expansion, num_classes) 267 | 268 | for m in self.modules(): 269 | if isinstance(m, nn.Conv2d): 270 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 271 | m.weight.data.normal_(0, math.sqrt(2. / n)) 272 | elif isinstance(m, nn.BatchNorm2d): 273 | m.weight.data.fill_(1) 274 | m.bias.data.zero_() 275 | 276 | def _make_layer(self, block, planes, blocks, stride=1): 277 | downsample = None 278 | if stride != 1 or self.inplanes != planes * block.expansion: 279 | downsample = nn.Sequential( 280 | nn.Conv2d(self.inplanes, planes * block.expansion, 281 | kernel_size=1, stride=stride, bias=False), 282 | nn.BatchNorm2d(planes * block.expansion), 283 | ) 284 | 285 | layers = [] 286 | layers.append(block(self.inplanes, planes, stride, downsample)) 287 | self.inplanes = planes * block.expansion 288 | for i in range(1, blocks): 289 | layers.append(block(self.inplanes, planes)) 290 | 291 | return nn.Sequential(*layers) 292 | 293 | def forward(self, x): 294 | x = self.conv1(x) 295 | x = self.bn1(x) 296 | x = self.relu(x) 297 | x = self.maxpool(x) 298 | 299 | x = self.layer1(x) 300 | x = self.layer2(x) 301 | x = self.layer3(x) 302 | x = self.layer4(x) 303 | 304 | x = self.avgpool(x) 305 | x = x.view(x.size(0), -1) 306 | x = self.fc(x) 307 | 308 | return x 309 | 310 | def forward_extract(self, x): 311 | x = self.conv1(x) 312 | x = self.bn1(x) 313 | x = self.relu(x) 314 | x = self.maxpool(x) 315 | 316 | x = self.layer1(x) 317 | x = self.layer2(x) 318 | x = self.layer3(x) 319 | x = self.layer4(x) 320 | 321 | x = self.avgpool(x) 322 | x = x.view(x.size(0), -1) 323 | 324 | return x 325 | 326 | 327 | class ResNetProject(nn.Module): 328 | 329 | def __init__(self, resnet, hdim=200, input_dim=2048, dropout=.3): 330 | super(ResNetProject, self).__init__() 331 | self.resnet = resnet 332 | self.hdim = hdim 333 | self.dropout = nn.Dropout(p=dropout) 334 | self.project = nn.Linear(input_dim, hdim) 335 | self.fc = nn.Linear(hdim, 1) 336 | 337 | def forward_extract(self, x): 338 | x = self.resnet.forward_extract(x) 339 | x = self.project(x) 340 | x = F.tanh(x) 341 | x = self.dropout(x) 342 | return x 343 | 344 | def forward(self, x): 345 | x = self.forward_extract(x) 346 | x = self.fc(x) 347 | return x 348 | 349 | 350 | def resnet18(pretrained=False, **kwargs): 351 | """Constructs a ResNet-18 model. 352 | Args: 353 | pretrained (bool): If True, returns a model pre-trained on ImageNet 354 | """ 355 | model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) 356 | if pretrained: 357 | model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) 358 | return model 359 | 360 | 361 | def resnet34(pretrained=False, **kwargs): 362 | """Constructs a ResNet-34 model. 363 | Args: 364 | pretrained (bool): If True, returns a model pre-trained on ImageNet 365 | """ 366 | model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) 367 | if pretrained: 368 | model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) 369 | return model 370 | 371 | 372 | def resnet50(pretrained=False, **kwargs): 373 | """Constructs a ResNet-50 model. 374 | Args: 375 | pretrained (bool): If True, returns a model pre-trained on ImageNet 376 | """ 377 | model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) 378 | if pretrained: 379 | model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) 380 | return model 381 | 382 | def resnet50_4channel(pretrained=False, **kwargs): 383 | """Constructs a ResNet-50 model. 384 | Args: 385 | pretrained (bool): If True, returns a model pre-trained on ImageNet 386 | """ 387 | new_model = RNfour(Bottleneck, [3, 4, 6, 3], **kwargs) 388 | 389 | if pretrained: 390 | 391 | pretrained_dict = model_zoo.load_url(model_urls['resnet50']) 392 | new_model_dict = new_model.state_dict() 393 | 394 | # 1. filter out unnecessary keys 395 | filtered_pretrained_dict = {k: v for k, v in pretrained_dict.items() if k!='conv1.weight'} 396 | # 2. overwrite entries in the existing state dict 397 | new_model_dict.update(filtered_pretrained_dict) 398 | # 3. load the new state dict 399 | new_model.load_state_dict(new_model_dict) 400 | 401 | new_model.conv1.weight.data.normal_(0, 0.001) 402 | new_model.conv1.weight.data[:, :3, :, :] = pretrained_dict['conv1.weight'] 403 | 404 | 405 | return new_model 406 | 407 | def resnet50_1channel(pretrained=False, **kwargs): 408 | """Constructs a ResNet-50 model. 409 | Args: 410 | pretrained (bool): If True, returns a model pre-trained on ImageNet 411 | """ 412 | new_model = RNone(Bottleneck, [3, 4, 6, 3], **kwargs) 413 | 414 | if pretrained: 415 | 416 | pretrained_dict = model_zoo.load_url(model_urls['resnet50']) 417 | new_model_dict = new_model.state_dict() 418 | 419 | # 1. filter out unnecessary keys 420 | filtered_pretrained_dict = {k: v for k, v in pretrained_dict.items() if k!='conv1.weight'} 421 | # 2. overwrite entries in the existing state dict 422 | new_model_dict.update(filtered_pretrained_dict) 423 | # 3. load the new state dict 424 | new_model.load_state_dict(new_model_dict) 425 | 426 | con1w=pretrained_dict['conv1.weight'] 427 | con1w_mean=torch.mean(con1w, dim=1, keepdim=True) 428 | 429 | new_model.conv1.weight.data=con1w_mean 430 | 431 | 432 | 433 | 434 | return new_model 435 | 436 | def resnet101(pretrained=False, **kwargs): 437 | """Constructs a ResNet-101 model. 438 | Args: 439 | pretrained (bool): If True, returns a model pre-trained on ImageNet 440 | """ 441 | model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) 442 | if pretrained: 443 | model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) 444 | return model 445 | 446 | 447 | def resnet152(pretrained=False, **kwargs): 448 | """Constructs a ResNet-152 model. 449 | Args: 450 | pretrained (bool): If True, returns a model pre-trained on ImageNet 451 | """ 452 | model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) 453 | if pretrained: 454 | model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) 455 | return -------------------------------------------------------------------------------- /src/run_predict.py: -------------------------------------------------------------------------------- 1 | """ 2 | The run prediction page 3 | """ 4 | 5 | import numpy as np 6 | from PIL import Image 7 | import streamlit as st 8 | from openslide import OpenSlide 9 | from utils import * 10 | from spa_mapping import generate_heatmap_cell_type, generate_heatpmap_survival 11 | from spatial_stat import gen_graph, compute_percent 12 | from get_patch_img import read_patches 13 | import seaborn as sns 14 | import matplotlib.pyplot as plt 15 | from io import BytesIO 16 | import json 17 | import pyvips 18 | 19 | def app(): 20 | 21 | with open("config/config.json", 'r') as f: 22 | config = json.load(f) 23 | 24 | clear_dir('temp') 25 | 26 | # Specify canvas parameters in application 27 | bg_image = st.file_uploader("Image:", type=["tiff", 'tif', "svs"]) 28 | 29 | # Control panel 30 | example_button = st.button("Use an example slide") 31 | test_mode = st.selectbox("Run Mode:", ("Test mode (only 1,000 patches will be predicted)", "Complete")) 32 | st.markdown("**Note**: We are currently working on obtaining GPU support for this software. To expedite the process, the default mode " 33 | "is now set to `Test mode`, which will only predict 1,000 patches of the image. " 34 | "To predict the entire image, please switch to the `Complete` mode.") 35 | 36 | cell_type_button = st.button("Get cell type visualization") 37 | prognosis_button = st.button("Get prognosis visualization") 38 | clear_button = st.button("Clear the session") 39 | 40 | # Check available device 41 | device = check_device(config['use_cuda']) 42 | st.write('Device available:', device) 43 | 44 | # Initialization 45 | if 'slide' not in st.session_state: 46 | st.session_state.slide = None 47 | if 'image' not in st.session_state: 48 | st.session_state.image = None 49 | if 'image_type' not in st.session_state: 50 | st.session_state.image_type = None 51 | if 'dataloader' not in st.session_state: 52 | st.session_state.dataloader = None 53 | 54 | if bg_image: 55 | path = save_uploaded_file(bg_image) 56 | st.session_state.image_type = "svs" 57 | 58 | if path.endswith("tiff") or path.endswith("tif"): 59 | image = pyvips.Image.new_from_file(path) 60 | image.write_to_file("temp/test.tiff", pyramid=True, tile=True) 61 | path = "temp/test.tiff" 62 | st.session_state.image_type = "tif" 63 | 64 | st.session_state.slide = OpenSlide(path) 65 | st.session_state.image = st.session_state.slide.get_thumbnail(size=(512,512)) 66 | st.image(st.session_state.image) 67 | bg_image = None 68 | 69 | if example_button: 70 | path = os.path.join("example", "C3L-00365-21.svs") 71 | st.session_state.slide = OpenSlide(path) 72 | st.session_state.image = st.session_state.slide.get_thumbnail(size=(512,512)) 73 | st.image(st.session_state.image) 74 | 75 | max_patches_per_slide = np.inf 76 | if test_mode == "Test mode (only 1,000 patches will be predicted)": 77 | max_patches_per_slide = 1000 78 | 79 | if cell_type_button and st.session_state.slide: 80 | slide = st.session_state.slide 81 | with st.spinner('Reading patches...'): 82 | dataloader = read_patches(slide, max_patches_per_slide, image_type = st.session_state.image_type) 83 | 84 | with st.spinner('Loading model...'): 85 | model = load_model(checkpoint='model_weights/train_2023-04-28_prob_multi_label_weighted/model_cell.pt', config = config) 86 | 87 | with st.spinner('Predicting transcriptional subtypes...'): 88 | results = predict_cell(model, dataloader, device=device) 89 | 90 | with st.spinner('Generating visualization...'): 91 | heatmap = generate_heatmap_cell_type(slide, patch_size= (112,112), labels=results, config=config) 92 | im = Image.fromarray(heatmap) 93 | legend = Image.open('pictures/cell-type-hor.png') 94 | st.image(legend) 95 | st.image(im, caption='Subtype distribution across the tissue') 96 | 97 | with st.spinner('Calculating spatial statistics...'): 98 | df_percent = compute_percent(results) # cell type composition 99 | dgr_centr, im_mtx_slide, im_mtx_row, df_cluster = gen_graph(slide, results = results) # graph statistics 100 | 101 | # Display statistic tables for cell proportions 102 | color_ids, cluster_colors = get_color_ids() 103 | st.markdown('

Cell fraction (%)

', unsafe_allow_html=True) 104 | data_container = st.container() 105 | with data_container: 106 | table, plot, _ , _ = st.columns(4) 107 | with table: 108 | st.table(data=style_table(df_percent)) 109 | with plot: 110 | buf = BytesIO() 111 | fig, ax = plt.subplots() 112 | sns.barplot(data = df_percent, y = 'Subtype', x = "Percentage", palette = cluster_colors, ax = ax) 113 | ax.tick_params(labelsize=14) 114 | ax.set_ylabel('', fontdict= {'fontsize': 16, 'fontweight':'bold'}) 115 | ax.set_xlabel('Percentage (%)',fontdict= { 'fontsize': 16, 'fontweight':'bold'}) 116 | fig.savefig(buf, format="png", bbox_inches = "tight") 117 | st.image(buf) 118 | 119 | # Display row-normalized interaction matrix 120 | st.markdown('

Interaction matrix (row-wise normalized)

', unsafe_allow_html=True) 121 | data_container = st.container() 122 | with data_container: 123 | table, plot, _ , _ = st.columns(4) 124 | with table: 125 | st.table(data=style_table(im_mtx_row)) 126 | with plot: 127 | buf = BytesIO() 128 | fig, ax = plt.subplots() 129 | sns.heatmap(im_mtx_row, ax = ax) 130 | #ax.tick_params(labelsize=12) 131 | fig.savefig(buf, format="png", bbox_inches = "tight") 132 | st.image(buf) 133 | 134 | # Display slide-normalized interaction matrix 135 | st.markdown('

Interaction matrix (slide-wise normalized)

', unsafe_allow_html=True) 136 | data_container = st.container() 137 | with data_container: 138 | table, plot, _ , _ = st.columns(4) 139 | with table: 140 | st.table(data=style_table(im_mtx_slide)) 141 | with plot: 142 | buf = BytesIO() 143 | fig, ax = plt.subplots() 144 | sns.heatmap(im_mtx_slide, ax = ax) 145 | #ax.tick_params(labelsize=12) 146 | fig.savefig(buf, format="png", bbox_inches = "tight") 147 | st.image(buf) 148 | 149 | # Display statistic tables for clustering coefficient 150 | st.markdown('

Clustering coefficient

', unsafe_allow_html=True) 151 | data_container = st.container() 152 | with data_container: 153 | table, plot, _ , _ = st.columns(4) 154 | with table: 155 | st.table(data=style_table(df_cluster)) 156 | with plot: 157 | buf = BytesIO() 158 | fig, ax = plt.subplots() 159 | sns.barplot(data = df_cluster, y = 'Subtype', x = 'cluster_coeff' , palette = cluster_colors, ax = ax) 160 | ax.tick_params(labelsize=14) 161 | ax.set_ylabel('', fontdict= {'fontsize': 16, 'fontweight':'bold'}) 162 | ax.set_xlabel('Clustering coefficient',fontdict= { 'fontsize': 16, 'fontweight':'bold'}) 163 | fig.savefig(buf, format="png", bbox_inches = "tight") 164 | st.image(buf) 165 | 166 | if prognosis_button and st.session_state.slide: 167 | 168 | slide = st.session_state.slide 169 | 170 | with st.spinner('Reading patches...'): 171 | dataloader = read_patches(slide, max_patches_per_slide) 172 | 173 | config['num_classes'] = 1 174 | with st.spinner('Loading model...'): 175 | model = load_model(checkpoint='model_weights/model_survival.pt', config = config) 176 | 177 | with st.spinner('Predicting aggressive scores...'): 178 | results = predict_survival(model, dataloader, device=device) 179 | config['label_column'] = 'risk_score' 180 | 181 | 182 | with st.spinner('Generating visualization...'): 183 | heatmap = generate_heatpmap_survival(slide, patch_size= (112,112), 184 | results=results, 185 | config = config) 186 | 187 | legend = Image.open('pictures/risk_score_legend.png') 188 | st.image(legend) 189 | im = Image.fromarray(heatmap) 190 | st.image(im, caption='Aggressive score prediction') 191 | 192 | if clear_button: 193 | clear(path) 194 | 195 | 196 | 197 | # # Display statistic tables for degree centrality 198 | # st.markdown('

Degree centrality

', unsafe_allow_html=True) 199 | # data_container = st.container() 200 | # with data_container: 201 | # table, plot, _ , _ = st.columns(4) 202 | # with table: 203 | # st.table(data=style_table(dgr_centr)) 204 | # with plot: 205 | # buf = BytesIO() 206 | # fig, ax = plt.subplots() 207 | # sns.barplot(data = dgr_centr, y = 'Subtype', x = 'centrality' , palette = cluster_colors, ax = ax) 208 | # ax.tick_params(labelsize=14) 209 | # ax.set_ylabel('', fontdict= {'fontsize': 16, 'fontweight':'bold'}) 210 | # ax.set_xlabel('Centrality score',fontdict= { 'fontsize': 16, 'fontweight':'bold'}) 211 | # fig.savefig(buf, format="png", bbox_inches = "tight") 212 | # st.image(buf) -------------------------------------------------------------------------------- /src/spa_mapping.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Script to generate the visualization of cell types and prognostic scores in whole slide images 4 | 5 | """ 6 | 7 | from typing import Tuple 8 | import pandas as pd 9 | import numpy as np 10 | from openslide import OpenSlide 11 | import seaborn as sns 12 | from stqdm import stqdm 13 | import matplotlib.pyplot as plt 14 | from PIL import Image 15 | import math 16 | import seaborn as sns 17 | from utils import get_class, get_color_ids 18 | import pdb 19 | 20 | def assig_to_heatmap(heatmap, patch, x, y, ratio_patch_x, ratio_patch_y): 21 | 22 | new_x = int(x / ratio_patch_x) 23 | new_y = int(y / ratio_patch_y) 24 | 25 | try: 26 | if new_x+patch.shape[0] > heatmap.shape[0] and new_y+patch.shape[1] < heatmap.shape[1]: 27 | dif = heatmap.shape[0] - new_x 28 | heatmap[new_x:heatmap.shape[0], new_y:new_y+patch.shape[1], :] = patch[:dif, :, :] 29 | elif new_x+patch.shape[0] < heatmap.shape[0] and new_y+patch.shape[1] > heatmap.shape[1]: 30 | dif = heatmap.shape[1] - new_y 31 | heatmap[new_x:new_x+patch.shape[0], new_y:, :] = patch[:, :dif, :] 32 | elif new_x+patch.shape[0] > heatmap.shape[0] and new_y+patch.shape[1] > heatmap.shape[1]: 33 | return heatmap 34 | else: 35 | heatmap[new_x:new_x+patch.shape[0], new_y:new_y+patch.shape[1], :] = patch 36 | return heatmap 37 | except: 38 | return heatmap 39 | 40 | def get_indices(slide : OpenSlide, patch_size: Tuple, PATCH_LEVEL = 0, dezoom_factor = 1, use_h5 = False): 41 | 42 | xmax, ymax = slide.level_dimensions[PATCH_LEVEL] 43 | 44 | # handle slides with 40 magnification at base level 45 | if use_h5: 46 | resize_factor = 0.5 / float(slide.properties.get('openslide.mpp-x', 0.5)) 47 | else: 48 | resize_factor = float(slide.properties.get('aperio.AppMag', 20)) / 20.0 49 | 50 | resize_factor = resize_factor * dezoom_factor 51 | patch_size_resized = (int(resize_factor * patch_size[0]), int(resize_factor * patch_size[1])) 52 | 53 | indices = [(x, y) for x in range(0, xmax, patch_size_resized[0]) 54 | for y in range(0, ymax, patch_size_resized[0])] 55 | 56 | return(indices, xmax, ymax, patch_size_resized, resize_factor) 57 | 58 | def get_color_linear(minimum, maximum, value): 59 | # give the minimun and maxium value, generate a color mapped to blue-red heatmap 60 | minimum, maximum = float(minimum), float(maximum) 61 | ratio = 2 * (value-minimum) / (maximum - minimum) 62 | b = int(max(0, 255*(1 - ratio))) 63 | r = int(max(0, 255*(ratio - 1))) 64 | g = 255 - b - r 65 | return r, g, b 66 | 67 | def make_dict_cell_type(labels, config): 68 | keys = labels['coordinates'] 69 | labels = labels[config['label_column']] 70 | keys = np.concatenate((keys), axis=0) 71 | labels = np.concatenate((labels), axis=0) 72 | # convert predicted labels to actual cell types 73 | class2idx, id2class = get_class() 74 | cell_types = [id2class[k] for k in labels] 75 | # Match cell types to colors 76 | color_ids, cluster_colors = get_color_ids() 77 | colors = [color_ids[k] for k in cell_types] 78 | color_labels = dict() 79 | for key, value in zip(keys, colors): 80 | color_labels[tuple(key)] = value 81 | return color_labels 82 | 83 | def make_dict_survival(labels): 84 | keys = labels['coordinates'] 85 | values = labels['risk_score'] 86 | survival_labels = dict() 87 | for key, value in zip(keys, values): 88 | for k, v in zip(key, value): 89 | survival_labels[k] = v 90 | return survival_labels 91 | 92 | def generate_heatmap_cell_type(slide, patch_size: Tuple, labels, config): 93 | PATCH_LEVEL = 0 94 | indices, xmax, ymax, patch_size_resized, resize_factor = get_indices(slide, patch_size, PATCH_LEVEL, use_h5 = config['use_h5']) 95 | 96 | compress_factor = config['compress_factor'] * round(resize_factor) 97 | 98 | heatmap = np.zeros((xmax // compress_factor, ymax // compress_factor, 3)) 99 | labels_dict = make_dict_cell_type(labels, config) 100 | 101 | print(f'Overlap patches: {len(set(labels_dict.keys()) & set(indices))}') 102 | 103 | for x, y in stqdm(indices): 104 | try: 105 | patch = np.transpose(np.array(slide.read_region((x, y), PATCH_LEVEL, patch_size_resized).convert('RGB')), axes=[1, 0, 2]) 106 | patch = Image.fromarray(patch) 107 | patch = patch.resize((math.ceil(patch_size_resized[0] / compress_factor), math.ceil(patch_size_resized[1] / compress_factor))) 108 | patch = np.asarray(patch) 109 | 110 | if (x, y) in labels_dict: 111 | score = labels_dict[(x,y)] 112 | color = sns.color_palette()[score] 113 | visualization = np.empty((math.ceil(patch_size_resized[0] / compress_factor), math.ceil(patch_size_resized[1] / compress_factor), 3), np.uint8) 114 | visualization[:] = color[0] * 255, color[1] * 255, color[2] * 255 115 | heatmap = assig_to_heatmap(heatmap, visualization, x, y, compress_factor, compress_factor) 116 | else: 117 | heatmap = assig_to_heatmap(heatmap, patch, x, y, compress_factor, compress_factor) 118 | except Exception as e: 119 | print(e) 120 | 121 | # since the x and y coordiante is flipped after converting the patch to RGB, we flipped the image again to match the original image 122 | heatmap = np.transpose(heatmap, axes=[1, 0, 2]).astype(np.uint8) 123 | return heatmap 124 | 125 | def generate_heatpmap_survival(slide, patch_size: Tuple, results: dict, min_val=-2, max_val=2.34, config = None): 126 | 127 | PATCH_LEVEL = 0 128 | indices, xmax, ymax, patch_size_resized, resize_factor = get_indices(slide, patch_size, PATCH_LEVEL, use_h5 = config['use_h5']) 129 | 130 | compress_factor = config['compress_factor'] * round(resize_factor) 131 | heatmap = np.zeros((xmax // compress_factor, ymax // compress_factor, 3)) 132 | labels_dict = make_dict_survival(results) 133 | 134 | risk_score = [s for sublist in results['risk_score'] for s in sublist] 135 | min_val = np.min(risk_score) 136 | max_val = np.max(risk_score) 137 | 138 | for x, y in stqdm(indices): 139 | try: 140 | patch = np.transpose(np.array(slide.read_region((x, y), PATCH_LEVEL, patch_size_resized).convert('RGB')), axes=[1, 0, 2]) 141 | patch = Image.fromarray(patch) 142 | patch = patch.resize((math.ceil(patch_size_resized[0] / compress_factor), math.ceil(patch_size_resized[1] / compress_factor))) 143 | patch = np.asarray(patch) 144 | 145 | if (x, y) in labels_dict: 146 | score = labels_dict[(x,y)] 147 | color = get_color_linear(min_val, max_val, score) 148 | visualization = np.empty((math.ceil(patch_size_resized[0] / compress_factor), math.ceil(patch_size_resized[1] / compress_factor), 3), np.uint8) 149 | visualization[:] = color[0] * 255, color[1] * 255, color[2] * 255 150 | heatmap = assig_to_heatmap(heatmap, visualization, x, y, compress_factor, compress_factor) 151 | else: 152 | heatmap = assig_to_heatmap(heatmap, patch, x, y, compress_factor, compress_factor) 153 | except Exception as e: 154 | print(e) 155 | 156 | # since the x and y coordiante is flipped after converting the patch to RGB, we flipped the image again to match the original image 157 | heatmap = np.transpose(heatmap, axes=[1, 0, 2]).astype(np.uint8) 158 | 159 | return heatmap 160 | -------------------------------------------------------------------------------- /src/spatial_stat.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions to perform spatial statistical analysis 3 | """ 4 | 5 | from openslide import OpenSlide 6 | import math 7 | from anndata import AnnData 8 | import squidpy as sq 9 | import numpy as np 10 | import pandas as pd 11 | import scanpy as sc 12 | import os 13 | import matplotlib.pyplot as plt 14 | from numpy.random import default_rng 15 | import seaborn as sns 16 | import warnings 17 | from utils import get_class, get_color_ids 18 | import pdb 19 | 20 | def gen_output(results): 21 | coord = [c for sublist in results['coordinates'] for c in sublist] 22 | x = [c[0].detach().cpu().numpy() for c in coord] 23 | y = [c[1].detach().cpu().numpy() for c in coord] 24 | labels = [l for sublist in results['label'] for l in sublist] 25 | df_res = pd.DataFrame({'x': x, 'y': y, 'label': labels}) 26 | return df_res 27 | 28 | def get_matracies(slide, cluster_df, patch_size = (112, 112)): 29 | 30 | if not slide.properties.get('openslide.mpp-x'): print(f"resolution is not found, using default 0.5um/px") 31 | resize_factor = float(slide.properties.get('aperio.AppMag', 20)) / 20.0 32 | patch_size_resized = (int(resize_factor * patch_size[0]), int(resize_factor * patch_size[1])) 33 | 34 | with warnings.catch_warnings(): 35 | warnings.simplefilter("ignore") 36 | cluster_df['new_x'] = np.ceil(cluster_df['x'].values / patch_size_resized[0]) 37 | cluster_df['new_y'] = np.ceil(cluster_df['y'].values / patch_size_resized[1]) 38 | 39 | cluster_df['new_x'] = cluster_df['new_x'].astype(int) 40 | cluster_df['new_y'] = cluster_df['new_y'].astype(int) 41 | 42 | matrix_trait = pd.DataFrame({'label': cluster_df['label'], 'x': cluster_df['new_x'], 'y': cluster_df['new_y']}) 43 | 44 | return(matrix_trait) 45 | 46 | 47 | def get_interactions(cells): 48 | """ 49 | Generate a list of interactions between cell types, except for self-interactions. 50 | """ 51 | interactions = [] 52 | i, j = 0, 0 53 | while i < len(cells): 54 | j = i + 1 55 | while j < len(cells): 56 | concat = [cells[i], cells[j]] 57 | interactions.append(concat) 58 | j = j + 1 59 | i = i + 1 60 | return interactions 61 | 62 | def normalize_interactions(im_mtx, cell_types): 63 | """ 64 | Given a cell interaction matrix, normalize each type of interaction by dividing the total number of interactions 65 | """ 66 | sum_links = im_mtx.sum().sum() 67 | interactions = get_interactions(cell_types) 68 | duplicated_links = 0 69 | for inter in interactions: 70 | duplicated_links = duplicated_links + im_mtx.loc[inter[0], inter[1]] 71 | new_links = sum_links - duplicated_links 72 | im_mtx_norm = im_mtx.div(new_links) 73 | return im_mtx_norm 74 | 75 | def gen_graph(slide, results): 76 | 77 | cluster_df = gen_output(results) 78 | 79 | trait = get_matracies(slide, cluster_df = cluster_df, patch_size = (112, 112)) 80 | trait['label'] = trait['label'].astype('category') 81 | class2idx, id2class = get_class() 82 | labels = sorted(np.unique(trait['label'])) 83 | cell_types = [id2class[k] for k in labels] 84 | 85 | cell_number = trait.shape[0] 86 | rng = default_rng(0) 87 | counts = rng.integers(0, 15, size=(cell_number, 50)) # feature matrix 88 | 89 | with warnings.catch_warnings(): 90 | warnings.simplefilter("ignore") 91 | adata = AnnData(counts, obs = trait, obsm={"spatial": np.asarray(trait[['x', 'y']])}, dtype = counts.dtype) 92 | 93 | sq.gr.spatial_neighbors(adata, n_neighs=8, n_rings=2, coord_type="grid") 94 | sq.gr.centrality_scores(adata, cluster_key='label', show_progress_bar=False) 95 | sq.gr.interaction_matrix(adata, cluster_key='label') 96 | 97 | # Generate dataframes 98 | dgr_centr = pd.DataFrame({'Subtype': cell_types, 'centrality':adata.uns['label_centrality_scores']['degree_centrality']}) 99 | 100 | im_mtx = pd.DataFrame(adata.uns['label_interactions'], columns=cell_types, index=cell_types) 101 | im_mtx_slide = normalize_interactions(im_mtx, cell_types) 102 | im_mtx_row = im_mtx.div(im_mtx.sum(axis=1), axis=0) 103 | 104 | cluster_res = [] 105 | for cell in cell_types: 106 | cluster_res.append(im_mtx_row.loc[cell][cell]) 107 | df_cluster = pd.DataFrame({'Subtype': cell_types, 'cluster_coeff': cluster_res}) 108 | 109 | dgr_centr = dgr_centr.sort_values(["centrality"], ascending=False) 110 | df_cluster = df_cluster.sort_values(['cluster_coeff'], ascending=False) 111 | 112 | dgr_centr = dgr_centr.reset_index(drop = True) 113 | df_cluster = df_cluster.reset_index(drop = True) 114 | 115 | return dgr_centr, im_mtx_slide, im_mtx_row, df_cluster 116 | 117 | def compute_percent(labels): 118 | """ 119 | Compute cell type compositions 120 | """ 121 | 122 | labels = labels['label'] 123 | labels = np.concatenate((labels), axis=0) 124 | # convert predicted labels to actual cell types 125 | class2idx, id2class = get_class() 126 | pred_labels = [id2class[k] for k in labels] 127 | total = len(pred_labels) 128 | cell_types = class2idx.keys() 129 | frac = [] 130 | for cell in cell_types: 131 | count = pred_labels.count(cell) 132 | percent = float(count/total) * 100 133 | frac.append(percent) 134 | df = pd.DataFrame({'Subtype': cell_types, 'Percentage': frac}) 135 | df = df.sort_values(['Percentage'], ascending=False) 136 | df = df.reset_index(drop=True) 137 | return df 138 | 139 | 140 | 141 | 142 | 143 | -------------------------------------------------------------------------------- /src/tutorial.md: -------------------------------------------------------------------------------- 1 | 1. Click the `Run` tab located at the top of the page. 2 | 2. To start the analysis, user can either upload a new histology image or simply click `Use an example slide`.
3 | **Note**: 4 | - We currently support images saved in *tif*, *tiff* or *svs* format.
5 | - Ideally, the image should be scanned at 20X magnification with a pixel resolution of 0.5um / pixel. 6 | 7 | ![Example Image](pictures/screenshot_file_upload.png) 8 | 9 | A thumbnail of the image will display when the upload is complete 10 | 11 | ![Example Image](pictures/screenshot_thumbnail.png) 12 | 13 | 3. Select the mode for running the job.
14 | **Note**: 15 | 16 | - The default mode is set to the `Test mode`, which will only predicts a limited portion of the image (1,000 patches). This is meant to speed up the process by generating a quick preview of the results. 17 | - To predict the entire image, please switch to `Complete` mode. 18 | - We are currently working on obtaining GPU support for this software, which will significantly accelerate its performance. 19 | 20 | ![Example Image](pictures/screenshot_mode.png) 21 | 22 | 4. Click the `Get cell type visualization` button to predict the spatial distribution of transcriptional subtype for tumor cells. 23 | 24 | ![Example Image](pictures/screenshot_option.png) 25 | 26 | The image will be colored by the predicted transcriptional subtype: 27 | 28 | ![Example Image](pictures/screenshot_cell_type_vis.png) 29 | 30 |
31 | 32 | 5. Based on the spatial subtype prediction, the software will automatically make several statistical analysis to quantify subtype compositions and spatial cellular organization: 33 | 34 | (1) Subtype fraction 35 | 36 | ![Example Image](pictures/screenshot_cell_fraction.png) 37 | 38 | (2) Subtype interaction 39 | 40 | ![Example Image](pictures/screenshot_interaction.png) 41 | 42 | (3) Clustering coefficient 43 | 44 | ![Example Image](pictures/screenshot_cc.png) 45 | 46 | 6. Finally, click the `Get prognosis visualization` button to predict the aggressive scores. 47 | 48 | Blue indicates low aggressiveness, while Red indicates high aggressiveness 49 | 50 | ![Example Image](pictures/screenshot_agg.png) 51 | 52 | -------------------------------------------------------------------------------- /src/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | General utility functions 3 | """ 4 | 5 | print("utils") 6 | import os 7 | import torch 8 | import numpy as np 9 | from stqdm import stqdm 10 | import seaborn as sns 11 | 12 | from resnet import resnet50 13 | from pathology_models import AggregationModel, Identity, TanhAttention 14 | from pathlib import Path 15 | 16 | def get_class(): 17 | class2idx = { 18 | 'Normal': 0, 19 | 'NPC' : 1, 20 | 'OPC' : 2, 21 | 'AC' : 3, 22 | 'MESlike': 4, 23 | 'MEShypoxia':5 24 | } 25 | id2class = {v: k for k, v in class2idx.items()} 26 | return class2idx, id2class 27 | 28 | def get_color_ids(): 29 | color_ids = { 30 | 'Normal': 1, 31 | 'NPC' : 0, 32 | 'OPC' : 3, 33 | 'AC' : 2, 34 | 'MESlike': 5, 35 | 'MEShypoxia': 4 36 | } 37 | clusters_colors = {} 38 | for k, v in color_ids.items(): 39 | clusters_colors[k] = sns.color_palette()[v] 40 | return color_ids, clusters_colors 41 | 42 | def check_device(use_GPU): 43 | device = 'cpu' 44 | if use_GPU: 45 | if torch.backends.mps.is_available() and torch.backends.mps.is_built(): 46 | device = 'mps' 47 | elif torch.cuda.is_available(): 48 | device = 'cuda' 49 | return device 50 | 51 | def load_model(checkpoint: str, config=None): 52 | resnet = resnet50(pretrained=False) 53 | aggregator = None 54 | if config['aggregator']== 'identity': 55 | aggregator = Identity() 56 | elif config['aggregator'] == "attention": 57 | aggregator = TanhAttention(dim=2048) 58 | model = AggregationModel(resnet=resnet, aggregator=aggregator, 59 | aggregator_dim=config['aggregator_hdim'],resnet_dim=2048, 60 | out_features=config['num_classes']) 61 | 62 | model.load_state_dict(torch.load(checkpoint, map_location=torch.device('cpu'))) 63 | 64 | return model 65 | 66 | def predict_cell(model, val_dataloader, device='cpu'): 67 | 68 | model.to(torch.device(device)) 69 | ## Validation 70 | model.eval() 71 | results = { 72 | 'coordinates': [], 73 | 'label': [] 74 | } 75 | 76 | for batch_dict in stqdm(val_dataloader): 77 | inputs = batch_dict['image'].to(device) 78 | coordinates = batch_dict['coordinates'] 79 | # forward 80 | with torch.no_grad(): 81 | outputs, _ = model.forward(inputs) 82 | 83 | outputs = outputs.detach().cpu().numpy() 84 | 85 | tumor_arr = outputs[:, :6] 86 | class_weights = [1.0, 0.6, 1.4 , 0.5, 1.4, 1.8] 87 | tumor_arr = tumor_arr * class_weights 88 | 89 | pred_list = np.argmax(tumor_arr, axis=1) 90 | coordinates_list = [(x, y) for x, y in zip(coordinates[0], coordinates[1])] 91 | results['coordinates'].append(coordinates_list) 92 | results['label'].append(pred_list) 93 | 94 | return results 95 | 96 | def predict_survival(model, val_dataloader, device='cpu'): 97 | 98 | model.to(torch.device(device)) 99 | ## Validation 100 | 101 | model.eval() 102 | 103 | results = { 104 | 'coordinates': [], 105 | 'risk_score': [] 106 | } 107 | 108 | for batch_dict in stqdm(val_dataloader): 109 | inputs = batch_dict['image'].to(device) 110 | coordinates = batch_dict['coordinates'] 111 | # forward 112 | with torch.no_grad(): 113 | outputs, _ = model.forward(inputs) 114 | 115 | output_list = outputs.detach().cpu().numpy() 116 | output_list = np.concatenate(output_list, axis=0) 117 | coordinates_list = [(x.item(), y.item()) for x, y in zip(coordinates[0], coordinates[1])] 118 | results['coordinates'].append(coordinates_list) 119 | results['risk_score'].append(output_list) 120 | 121 | return results 122 | 123 | def save_uploaded_file(uploaded_file): 124 | with open(os.path.join("temp",uploaded_file.name),"wb") as f: 125 | f.write(uploaded_file.getbuffer()) 126 | return os.path.join("temp",uploaded_file.name) 127 | 128 | def clear(file): 129 | os.remove(file) 130 | 131 | def clear_dir(directory_path): 132 | # List all files in the directory 133 | file_list = os.listdir(directory_path) 134 | 135 | # Iterate through the files and remove them 136 | for filename in file_list: 137 | file_path = os.path.join(directory_path, filename) 138 | if os.path.isfile(file_path): 139 | os.remove(file_path) 140 | 141 | def style_table(df): 142 | # style 143 | th_props = [ 144 | ('font-size', '18pt'), 145 | ('text-align', 'center'), 146 | ('font-weight', 'bold'), 147 | ('color', '#6d6d6d'), 148 | ('background-color', '#f7ffff') 149 | ] 150 | 151 | td_props = [ 152 | ('font-size', '18pt') 153 | ] 154 | 155 | styles = [ 156 | dict(selector="th", props=th_props), 157 | dict(selector="td", props=td_props) 158 | ] 159 | 160 | df_style = df.style.set_properties(**{ 161 | 'font-size': '16pt', 162 | 'text-align': 'center', 163 | 'format': '{:.3f}' 164 | }).set_table_styles(styles) 165 | 166 | return df_style 167 | 168 | def read_markdown_file(markdown_file): 169 | return Path(markdown_file).read_text() 170 | 171 | 172 | 173 | 174 | 175 | --------------------------------------------------------------------------------