├── .gitignore ├── LICENSE ├── README.md ├── python ├── ddff │ ├── __init__.py │ ├── dataproviders │ │ ├── __init__.py │ │ └── datareaders │ │ │ ├── FocalStackDDFFH5Reader.py │ │ │ └── __init__.py │ ├── metricseval │ │ ├── BaseDDFFEval.py │ │ ├── DDFFEval.py │ │ ├── DDFFTFLearnEval.py │ │ └── __init__.py │ ├── models │ │ ├── DDFFNet.py │ │ └── __init__.py │ └── trainers │ │ ├── BaseTrainer.py │ │ ├── DDFFTrainer.py │ │ └── __init__.py ├── eval_ddff.py ├── eval_ddff_tflearn.py └── run_ddff.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | #Pytorch checkpoints 104 | *.pt 105 | 106 | #Datasets 107 | *.h5 108 | 109 | #Weights 110 | *.npz 111 | *.npy 112 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep Depth From Focus 2 | [Deep Depth From Focus](http://hazirbas.com/projects/ddff/) implementation in PyTorch. Please check the [ddff-toolbox](https://github.com/hazirbas/ddff-toolbox) for refocusing and camera parameters. 3 | 4 | ## Usage 5 | ### Installation 6 | To run the project a Python 3.7.0 environment and a number of packages are required. The easiest way to fetch all dependencies is to install via pip. 7 | ``` 8 | pip install -r requirements.txt 9 | ``` 10 | 11 | ### Training and Testing 12 | This implementation contains the [Deep Depth from Focus model](python/ddff/models/DDFFNet.py) and a class to run the [training and prediction](python/ddff/trainers/DDFFTrainer.py) on a provided dataset. Furthermore a [datareader](python/ddff/dataproviders/datareaders/FocalStackDDFFH5Reader.py) class is provided to read hdf5 files containing focal stacks and their corresponding disparity maps. 13 | 14 | In order to evaluate the model, an [evaluation class](python/ddff/metricseval/DDFFEval.py) is provided. It takes a model checkpoint and a path to the test data (h5 file) and features a method to calculate the errors described in the Deep Depth From Focus paper. 15 | 16 | ince the original implementation of Deep Depth From Focus was created in TensorFlow and TFLearn the class [DDFFTFLearnEval](python/ddff/metricseval/DDFFTFLearnEval.py) loads the checkpoint exported from the original model in order to perform the error evlauation. [eval_ddff_tflearn.py](python/eval_ddff_tflearn.py) shows an example of how to use the class. 17 | 18 | The pretrained weights exported from the TensorFlow/TFLearn model and converted to a PyTorch compatible dict is available [here](https://vision.in.tum.de/webarchive/hazirbas/ddff12scene/ddffnet-cc3-snapshot-121256.npz)(159.3MB). 19 | 20 | The training process can be started by running [run_ddff.py](python/run_ddff.py) which can be provided with a training dataset passing the parameter ```--dataset```. To evaulate the results the generated checkpoint file can be loaded as shown in [eval_ddff.py](python/eval_ddff.py) which calculates the error metrics on a test dataset. 21 | 22 | #### Initiazation 23 | To train the network on the dataset introduced in the Deep Depth From Focus paper [run_ddff.py](python/run_ddff.py) has to be run with respective arguments specifying where the dataset is located and other hyper parameters that can be inspected by passing the argument ```-h```. 24 | The [datareader](python/ddff/dataproviders/datareaders/FocalStackDDFFH5Reader.py) class requires the provided h5 file to contain a key for the focal stacks (default: "stack_train") and a key for the corresponding disparity maps (default: "disp_train") that can be passed during initialization of the reader. 25 | 26 | #### Data preparation 27 | The focal stacks in the hdf5 file have to be of shape [stacksize, height, width, channels] containing values in the range [0,255]. 28 | 29 | The disparity maps have to be of shape [1, height, width] containing the disparity in pixels. The dataset introduced in the Deep Depth From Focus paper contains disparities in the range [0.0202, 0.2825] 30 | 31 | Please download the [trainval](https://vision.in.tum.de/webarchive/hazirbas/ddff12scene/ddff-dataset-trainval.h5) (12.6GB) and [test](https://vision.in.tum.de/webarchive/hazirbas/ddff12scene/ddff-dataset-test.h5) (761.1MB) hdf5 datasets. Focal stacks can be read as: 32 | ~~~~ 33 | import h5py 34 | 35 | dataset = h5py.File("ddff-dataset-trainval.h5", "r") 36 | focal_stacks = dataset["stacks_train"] 37 | disparities = dataset["disp_train"] 38 | ~~~~ 39 | 40 | Please submit your results to the [Competition](https://competitions.codalab.org/competitions/17807) to evaluate on the test set. 41 | 42 | **Note that** test scores are a slightly worse by a margin of 0.0001 (MSE) than the results presented on the paper due to the framework switch. 43 | 44 | ## Citation 45 | If you use this code or the publicly shared model, please cite the following paper. 46 | 47 | Caner Hazirbas, Sebastian Georg Soyer, Maximilian Christian Staab, Laura Leal-Taixé and Daniel Cremers, _"Deep Depth From Focus"_, ACCV, 2018. ([arXiv](https://arxiv.org/abs/1704.01085)) 48 | 49 | @InProceedings{hazirbas18ddff, 50 | author = {C. Hazirbas and S. G. Soyer and M. C. Staab and L. Leal-Taixé and D. Cremers}, 51 | title = {Deep Depth From Focus}, 52 | booktitle = {Asian Conference on Computer Vision (ACCV)}, 53 | year = {2018}, 54 | month = {December}, 55 | eprint = {1704.01085}, 56 | url = {https://hazirbas.com/projects/ddff/}, 57 | } 58 | 59 | ## License 60 | The code is released under [GNU General Public License Version 3 (GPLv3)](http://www.gnu.org/licenses/gpl.html). 61 | -------------------------------------------------------------------------------- /python/ddff/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soyers/ddff-pytorch/78a6c5b5118dd0404f97072d51ca68db0eb79990/python/ddff/__init__.py -------------------------------------------------------------------------------- /python/ddff/dataproviders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soyers/ddff-pytorch/78a6c5b5118dd0404f97072d51ca68db0eb79990/python/ddff/dataproviders/__init__.py -------------------------------------------------------------------------------- /python/ddff/dataproviders/datareaders/FocalStackDDFFH5Reader.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import os 4 | import numpy as np 5 | from torch.utils.data import Dataset 6 | import torchvision 7 | import torch 8 | import h5py 9 | 10 | class FocalStackDDFFH5Reader(Dataset): 11 | 12 | def __init__(self, hdf5_filename, transform=None, stack_key="stack_train", disp_key="disp_train"): 13 | """ 14 | Args: 15 | root_dir_fs (string): Directory with all focal stacks of all image datasets. 16 | root_dir_depth (string): Directory with all depth images of all image datasets. 17 | transform (callable, optional): Optional transform to be applied 18 | on a sample. 19 | """ 20 | #Disable opencv threading since it leads to deadlocks in PyTorch DataLoader 21 | self.hdf5 = h5py.File(hdf5_filename, 'r') 22 | self.stack_key = stack_key 23 | self.disp_key = disp_key 24 | self.transform = transform 25 | 26 | def __len__(self): 27 | return self.hdf5[self.stack_key].shape[0] 28 | 29 | def __getitem__(self, idx): 30 | #Create sample dict 31 | sample = {'input': self.hdf5[self.stack_key][idx].astype(float), 'output': self.hdf5[self.disp_key][idx]} 32 | 33 | #Transform sample with data augmentation transformers 34 | if self.transform: 35 | sample = self.transform(sample) 36 | 37 | return sample 38 | 39 | def get_stack_size(self): 40 | return self.__getitem__(0)['input'].shape[0] 41 | 42 | class ToTensor(object): 43 | """Convert ndarrays in sample to Tensors.""" 44 | def __call__(self, sample): 45 | #Add color dimension to depth map 46 | sample['output'] = np.expand_dims(sample['output'], axis=0) 47 | # swap color axis because 48 | # numpy image: H x W x C 49 | # torch image: C X H X W 50 | sample['input'] = torch.from_numpy(sample['input'].transpose((0,3,1,2))).float() 51 | sample['output'] = torch.from_numpy(sample['output']).float() 52 | return sample 53 | 54 | class Normalize(object): 55 | def __init__(self, mean_input, std_input, mean_output=None, std_output=None): 56 | self.mean_input = mean_input 57 | self.std_input = std_input 58 | self.mean_output = mean_output 59 | self.std_output = std_output 60 | 61 | def __call__(self, sample): 62 | input_images = torch.stack([torchvision.transforms.functional.normalize(sample_input, mean=self.mean_input, std=self.std_input) for sample_input in sample['input']]) 63 | if self.mean_output is None or self.std_output is None: 64 | output_image = sample['output'] 65 | else: 66 | output_image = torchvision.transforms.functional.normalize(sample['output'], mean=self.mean_output, std=self.std_output) 67 | return {'input': input_images, 'output': output_image} 68 | 69 | class ClipGroundTruth(object): 70 | def __init__(self, lower_bound, upper_bound): 71 | self.lower_bound = lower_bound 72 | self.upper_bound = upper_bound 73 | 74 | def __call__(self, sample): 75 | sample['output'][sample['output'] < self.lower_bound] = 0.0 76 | sample['output'][sample['output'] > self.upper_bound] = 0.0 77 | return sample 78 | 79 | class RandomCrop(object): 80 | def __init__(self, output_size, valid_crop_threshold=0.8): 81 | assert isinstance(output_size, (int, tuple)) 82 | if isinstance(output_size, int): 83 | self.output_size = (output_size, output_size) 84 | else: 85 | assert len(output_size) == 2 86 | self.output_size = output_size 87 | self.valid_crop_threshold = valid_crop_threshold 88 | 89 | def __is_valid_crop(self, output_image, valid_pixel_cond=lambda x : x >= 0.01): 90 | valid_occurrances = valid_pixel_cond(output_image).sum() 91 | all_occurances = np.prod(output_image.shape) 92 | return (float(valid_occurrances) / float(all_occurances)) >= self.valid_crop_threshold 93 | 94 | def __call__(self, sample): 95 | h, w = sample['input'].shape[2:4] 96 | new_h, new_w = self.output_size 97 | 98 | #Generate list of possible random crops 99 | candidates = np.asarray([(x,y) for y in range(h - new_h) for x in range(w - new_w)]) 100 | np.random.shuffle(candidates) 101 | 102 | #Iterate through candidates and choose forst valid crop 103 | for x,y in candidates: 104 | output_image = sample['output'][:,y:(y + new_h),x:(x + new_w)] 105 | if self.__is_valid_crop(output_image): 106 | input_images = torch.stack([sample_input[:,y:(y + new_h),x:(x + new_w)] for sample_input in sample['input']]) 107 | return {'input': input_images, 'output': output_image} 108 | 109 | #No valid crop found. Return any crop 110 | top = np.random.randint(0, h - new_h) 111 | left = np.random.randint(0, w - new_w) 112 | input_images = torch.stack([sample_input[:,top:(top + new_h),left:(left + new_w)] for sample_input in sample['input']]) 113 | output_image = sample['output'][:,top:(top + new_h),left:(left + new_w)] 114 | return {'input': input_images, 'output': output_image} 115 | 116 | class PadSamples(object): 117 | def __init__(self, output_size, ground_truth_pad_value=0.0): 118 | assert isinstance(output_size, (int, tuple)) 119 | if isinstance(output_size, int): 120 | self.output_size = (output_size, output_size) 121 | else: 122 | assert len(output_size) == 2 123 | self.output_size = output_size 124 | self.ground_truth_pad_value = ground_truth_pad_value 125 | 126 | def __call__(self, sample): 127 | h, w = sample['input'].shape[2:4] 128 | new_h, new_w = self.output_size 129 | padh = np.int32(new_h - h) 130 | padw = np.int32(new_w - w) 131 | sample['input'] = torch.stack([torch.from_numpy(np.pad(sample_input.numpy(), ((0,0),(0,padh),(0,padw)), mode="reflect")).float() for sample_input in sample['input']]) 132 | sample['output'] = torch.from_numpy(np.pad(sample['output'].numpy(), ((0,0),(0,padh),(0,padw)), mode="constant", constant_values=self.ground_truth_pad_value)).float() 133 | 134 | return sample 135 | 136 | class RandomSubStack(object): 137 | def __init__(self, output_size): 138 | self.output_size = output_size 139 | 140 | def __call__(self, sample): 141 | sample['input'] = torch.stack([sample['input'][i] for i in np.random.choice(sample['input'].shape[0], self.output_size, replace=False)]) 142 | return sample 143 | -------------------------------------------------------------------------------- /python/ddff/dataproviders/datareaders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soyers/ddff-pytorch/78a6c5b5118dd0404f97072d51ca68db0eb79990/python/ddff/dataproviders/datareaders/__init__.py -------------------------------------------------------------------------------- /python/ddff/metricseval/BaseDDFFEval.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import numpy as np 4 | import torch 5 | import skimage.filters as skf 6 | 7 | class BaseDDFFEval: 8 | def __init__(self, trainer): 9 | self.trainer = trainer 10 | 11 | def evaluate(self, dataloader, accthrs = [1.25, 1.25**2, 1.25**3], image_size=(383,552)): 12 | avgmetrics = np.zeros((1, 7+len(accthrs)), dtype=float) 13 | for i, data in enumerate(dataloader): 14 | inputs, output = data["input"], data["output"] 15 | if torch.cuda.is_available(): 16 | inputs = inputs.cuda() 17 | output_approx = self.trainer.evaluate(inputs) 18 | metrics = self.__calmetrics(output_approx.permute(0,2,3,1).squeeze().data.cpu().numpy()[:image_size[0],:image_size[1]], output.permute(0,2,3,1).squeeze().numpy()[:image_size[0],:image_size[1]], 1.0, accthrs, bumpinessclip=0.05, ignore_zero=True) 19 | avgmetrics += metrics 20 | return avgmetrics/len(dataloader) 21 | 22 | # Metrics calculation provided by Caner Hazirbas 23 | def __calmetrics(self, pred, target, mse_factor, accthrs, bumpinessclip=0.05, ignore_zero=True): 24 | metrics = np.zeros((1, 7+len(accthrs)), dtype=float) 25 | 26 | if target.sum() == 0: 27 | return metrics 28 | 29 | pred_ = np.copy(pred) 30 | if ignore_zero: 31 | pred_[target==0.0] = 0.0 32 | numPixels = (target>0.0).sum() # number of valid pixels 33 | else: 34 | numPixels = target.size 35 | 36 | #euclidean norm 37 | metrics[0,0] = np.square(pred_-target).sum() / numPixels * mse_factor 38 | 39 | # RMS 40 | metrics[0,1] = np.sqrt(metrics[0,0]) 41 | 42 | # log RMS 43 | logrms = (np.ma.log(pred_)-np.ma.log(target)) 44 | metrics[0,2] = np.sqrt(np.square(logrms).sum() / numPixels) 45 | 46 | # absolute relative 47 | metrics[0,3] = np.ma.divide(np.abs(pred_-target), target).sum() / numPixels 48 | 49 | #square relative 50 | metrics[0,4] = np.ma.divide(np.square(pred_-target), target).sum() / numPixels 51 | 52 | # accuracies 53 | acc = np.ma.maximum(np.ma.divide(pred_,target), np.ma.divide(target, pred_)) 54 | for i, thr in enumerate(accthrs): 55 | metrics[0, 5+i] = (acc < thr).sum() / numPixels * 100. 56 | 57 | # badpix 58 | metrics[0, 8]= (np.abs(pred_-target) > 0.07).sum() / numPixels * 100. 59 | 60 | # bumpiness -- Frobenius norm of the Hessian matrix 61 | diff = np.asarray(pred-target, dtype='float64') # PRED or PRED_ 62 | chn = diff.shape[2] if len(diff.shape) > 2 else 1 63 | bumpiness = np.zeros_like(pred_).astype('float') 64 | for c in range(0,chn): 65 | if chn > 1: 66 | diff_ = diff[:, :, c] 67 | else: 68 | diff_ = diff 69 | dx = skf.scharr_v(diff_) 70 | dy = skf.scharr_h(diff_) 71 | dxx = skf.scharr_v(dx) 72 | dxy = skf.scharr_h(dx) 73 | dyy = skf.scharr_h(dy) 74 | dyx = skf.scharr_v(dy) 75 | hessiannorm = np.sqrt(np.square(dxx) + np.square(dxy) + np.square(dyy) + np.square(dyx)) 76 | bumpiness += np.clip(hessiannorm, 0, bumpinessclip) 77 | bumpiness = bumpiness[target>0].sum() if ignore_zero else bumpiness.sum() 78 | metrics[0, 9] = bumpiness / chn / numPixels * 100. 79 | 80 | return metrics 81 | -------------------------------------------------------------------------------- /python/ddff/metricseval/DDFFEval.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import numpy as np 4 | import ddff.dataproviders.datareaders.FocalStackDDFFH5Reader as FocalStackDDFFH5Reader 5 | import ddff.trainers.DDFFTrainer as DDFFTrainer 6 | from ddff.metricseval.BaseDDFFEval import BaseDDFFEval 7 | import torchvision 8 | from torch.utils.data import DataLoader 9 | 10 | class DDFFEval(BaseDDFFEval): 11 | def __init__(self, checkpoint, focal_stack_size=10): 12 | self.trainer = DDFFTrainer.DDFFTrainer.from_checkpoint(checkpoint, focal_stack_size) 13 | super(DDFFEval, self).__init__(self.trainer) 14 | 15 | def evaluate(self, filename_testset, stack_key="stack_val", disp_key="disp_val", image_size=(383,552)): 16 | #Calculate pad size for images 17 | test_pad_size = (np.ceil((image_size[0] / 32)) * 32, np.ceil((image_size[1] / 32)) * 32) #32=2**numPoolings(=5) 18 | #Create test set transforms 19 | transform_test = [FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.ToTensor(), 20 | FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.ClipGroundTruth(0.0202, 0.2825), 21 | FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.PadSamples(test_pad_size), 22 | FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.Normalize(mean_input=[0.485, 0.456, 0.406], std_input=[0.229, 0.224, 0.225])] 23 | transform_test = torchvision.transforms.Compose(transform_test) 24 | #Create dataloader 25 | datareader = FocalStackDDFFH5Reader.FocalStackDDFFH5Reader(filename_testset, transform=transform_test, stack_key=stack_key, disp_key=disp_key) 26 | dataloader = DataLoader(datareader, batch_size=1, shuffle=False, num_workers=0) 27 | return super(DDFFEval, self).evaluate(dataloader) 28 | -------------------------------------------------------------------------------- /python/ddff/metricseval/DDFFTFLearnEval.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import numpy as np 4 | import ddff.dataproviders.datareaders.FocalStackDDFFH5Reader as FocalStackDDFFH5Reader 5 | import ddff.trainers.DDFFTrainer as DDFFTrainer 6 | from ddff.metricseval.BaseDDFFEval import BaseDDFFEval 7 | import torchvision 8 | from torch.utils.data import DataLoader 9 | 10 | class DDFFTFLearnEval(BaseDDFFEval): 11 | def __init__(self, checkpoint, focal_stack_size=10, norm_mean=None, norm_std=None): 12 | trainer = DDFFTrainer.DDFFTrainer.from_tflearn(checkpoint, focal_stack_size) 13 | self.norm_mean = norm_mean 14 | self.norm_std = norm_std 15 | super(DDFFTFLearnEval, self).__init__(trainer) 16 | 17 | def evaluate(self, filename_testset, stack_key="stack_val", disp_key="disp_val", image_size=(383,552)): 18 | #Calculate pad size for images 19 | test_pad_size = (np.ceil((image_size[0] / 32)) * 32, np.ceil((image_size[1] / 32)) * 32) #32=2**numPoolings(=5) 20 | #Create test set transforms 21 | transform_test = [FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.ToTensor(), 22 | FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.PadSamples(test_pad_size)] 23 | if self.norm_mean is not None and self.norm_std is not None: 24 | transform_test += [FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.Normalize(mean_input=self.norm_mean, std_input=self.norm_std)] 25 | transform_test = torchvision.transforms.Compose(transform_test) 26 | #Create dataloader 27 | datareader = FocalStackDDFFH5Reader.FocalStackDDFFH5Reader(filename_testset, transform=transform_test, stack_key=stack_key, disp_key=disp_key) 28 | dataloader = DataLoader(datareader, batch_size=1, shuffle=False, num_workers=0) 29 | return super(DDFFTFLearnEval, self).evaluate(dataloader) 30 | -------------------------------------------------------------------------------- /python/ddff/metricseval/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soyers/ddff-pytorch/78a6c5b5118dd0404f97072d51ca68db0eb79990/python/ddff/metricseval/__init__.py -------------------------------------------------------------------------------- /python/ddff/models/DDFFNet.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import torch.nn as nn 4 | import torchvision 5 | import torch 6 | import numpy as np 7 | 8 | class DDFFNet(nn.Module): 9 | def __init__(self, focal_stack_size, output_dims=1, cc1_enabled=False, cc2_enabled=False, cc3_enabled=True, cc4_enabled=False, cc5_enabled=False, bias=False, pretrained='no_bn'): 10 | super(DDFFNet, self).__init__() 11 | self.autoencoder = DDFFAutoEncoder(output_dims, cc1_enabled, cc2_enabled, cc3_enabled, cc4_enabled, cc5_enabled, bias=bias) 12 | self.scoring = nn.Conv2d(focal_stack_size*output_dims, output_dims, 1, bias=False) 13 | #Init weights 14 | self.apply(self.weights_init) 15 | #Update pretrained weights 16 | if pretrained == 'no_bn': 17 | autoencoder_state_dict = self.autoencoder.state_dict() 18 | #Load pretrained dict 19 | pretrained_dict = torchvision.models.vgg16(pretrained=True).features.state_dict() 20 | #Filter and map pretrained dict 21 | pretrained_dict = self.__map_state_dict(pretrained_dict, bias=bias) 22 | #Update model dict 23 | autoencoder_state_dict.update(pretrained_dict) 24 | #Load updated state dict 25 | self.autoencoder.load_state_dict(autoencoder_state_dict) 26 | elif pretrained == 'bn': 27 | autoencoder_state_dict = self.autoencoder.state_dict() 28 | #Load pretrained dict 29 | pretrained_dict = torchvision.models.vgg16_bn(pretrained=True).features.state_dict() 30 | #Filter and map pretrained dict 31 | pretrained_dict = self.__map_state_dict_bn(pretrained_dict, bias=bias) 32 | #Update model dict 33 | autoencoder_state_dict.update(pretrained_dict) 34 | #Load updated state dict 35 | self.autoencoder.load_state_dict(autoencoder_state_dict) 36 | elif pretrained is not None: 37 | autoencoder_state_dict = self.autoencoder.state_dict() 38 | #Load pretrained dict 39 | pretrained_weights = np.load(pretrained, encoding="latin1").item() 40 | #Filter and map pretrained dict 41 | pretrained_dict = self.__map_state_dict_tf(pretrained_weights, bias=bias) 42 | #Update model dict 43 | autoencoder_state_dict.update(pretrained_dict) 44 | #Load updated state dict 45 | self.autoencoder.load_state_dict(autoencoder_state_dict) 46 | 47 | def forward(self, images): 48 | #Encode stacks in batch dimension and calculate features 49 | image_features = self.autoencoder(images.view(-1, *images.shape[2:])) 50 | #Encode stacks in feature dimension again 51 | image_features = image_features.view(images.shape[0], -1, *image_features.shape[2:]) 52 | #Score extracted features 53 | result = self.scoring(image_features) 54 | 55 | return result 56 | 57 | def weights_init(self, m): 58 | classname = m.__class__.__name__ 59 | if classname.find('Conv') != -1: 60 | nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') 61 | if m.bias is not None: 62 | m.bias.data.fill_(0) 63 | elif classname.find('BatchNorm') != -1: 64 | m.weight.data.normal_(0, 1.0) 65 | m.running_var.normal_(0, 1.0) 66 | m.running_mean.fill_(0) 67 | m.bias.data.fill_(0) 68 | 69 | def __map_state_dict(self, vgg16_features_dict, bias): 70 | layer_mappings = {'0.weight': 'conv1_1.weight', 71 | '2.weight': 'conv1_2.weight', 72 | '5.weight': 'conv2_1.weight', 73 | '7.weight': 'conv2_2.weight', 74 | '10.weight': 'conv3_1.weight', 75 | '12.weight': 'conv3_2.weight', 76 | '14.weight': 'conv3_3.weight', 77 | '17.weight': 'conv4_1.weight', 78 | '19.weight': 'conv4_2.weight', 79 | '21.weight': 'conv4_3.weight', 80 | '24.weight': 'conv5_1.weight', 81 | '26.weight': 'conv5_2.weight', 82 | '28.weight': 'conv5_3.weight'} 83 | if bias: 84 | layer_mappings.update({'0.bias': 'conv1_1.bias', 85 | '2.bias': 'conv1_2.bias', 86 | '5.bias': 'conv2_1.bias', 87 | '7.bias': 'conv2_2.bias', 88 | '10.bias': 'conv3_1.bias', 89 | '12.bias': 'conv3_2.bias', 90 | '14.bias': 'conv3_3.bias', 91 | '17.bias': 'conv4_1.bias', 92 | '19.bias': 'conv4_2.bias', 93 | '21.bias': 'conv4_3.bias', 94 | '24.bias': 'conv5_1.bias', 95 | '26.bias': 'conv5_2.bias', 96 | '28.bias': 'conv5_3.bias'}) 97 | #Update according to generated mapping 98 | pretrained_dict = {layer_mappings[k]: v for k, v in vgg16_features_dict.items() if k in layer_mappings} 99 | return pretrained_dict 100 | 101 | def __map_state_dict_bn(self, vgg16_features_dict, bias): 102 | layer_mappings = {'0.weight': 'conv1_1.weight', 103 | '1.weight': 'conv1_1_bn.weight', '1.bias': 'conv1_1_bn.bias', '1.running_mean': 'conv1_1_bn.running_mean', '1.running_var': 'conv1_1_bn.running_var', 104 | '3.weight': 'conv1_2.weight', 105 | '4.weight': 'conv1_2_bn.weight', '4.bias': 'conv1_2_bn.bias', '4.running_mean': 'conv1_2_bn.running_mean', '4.running_var': 'conv1_2_bn.running_var', 106 | '7.weight': 'conv2_1.weight', 107 | '8.weight': 'conv2_1_bn.weight', '8.bias': 'conv2_1_bn.bias', '8.running_mean': 'conv2_1_bn.running_mean', '8.running_var': 'conv2_1_bn.running_var', 108 | '10.weight': 'conv2_2.weight', 109 | '11.weight': 'conv2_2_bn.weight', '11.bias': 'conv2_2_bn.bias', '11.running_mean': 'conv2_2_bn.running_mean', '11.running_var': 'conv2_2_bn.running_var', 110 | '14.weight': 'conv3_1.weight', 111 | '15.weight': 'conv3_1_bn.weight', '15.bias': 'conv3_1_bn.bias', '15.running_mean': 'conv3_1_bn.running_mean', '15.running_var': 'conv3_1_bn.running_var', 112 | '17.weight': 'conv3_2.weight', 113 | '18.weight': 'conv3_2_bn.weight', '18.bias': 'conv3_2_bn.bias', '18.running_mean': 'conv3_2_bn.running_mean', '18.running_var': 'conv3_2_bn.running_var', 114 | '20.weight': 'conv3_3.weight', 115 | '21.weight': 'conv3_3_bn.weight', '21.bias': 'conv3_3_bn.bias', '21.running_mean': 'conv3_3_bn.running_mean', '21.running_var': 'conv3_3_bn.running_var', 116 | '24.weight': 'conv4_1.weight', 117 | '25.weight': 'conv4_1_bn.weight', '25.bias': 'conv4_1_bn.bias', '25.running_mean': 'conv4_1_bn.running_mean', '25.running_var': 'conv4_1_bn.running_var', 118 | '27.weight': 'conv4_2.weight', 119 | '28.weight': 'conv4_2_bn.weight', '28.bias': 'conv4_2_bn.bias', '28.running_mean': 'conv4_2_bn.running_mean', '28.running_var': 'conv4_2_bn.running_var', 120 | '30.weight': 'conv4_3.weight', 121 | '31.weight': 'conv4_3_bn.weight', '31.bias': 'conv4_3_bn.bias', '31.running_mean': 'conv4_3_bn.running_mean', '31.running_var': 'conv4_3_bn.running_var', 122 | '34.weight': 'conv5_1.weight', 123 | '35.weight': 'conv5_1_bn.weight', '35.bias': 'conv5_1_bn.bias', '35.running_mean': 'conv5_1_bn.running_mean', '35.running_var': 'conv5_1_bn.running_var', 124 | '37.weight': 'conv5_2.weight', 125 | '38.weight': 'conv5_2_bn.weight', '38.bias': 'conv5_2_bn.bias', '38.running_mean': 'conv5_2_bn.running_mean', '38.running_var': 'conv5_2_bn.running_var', 126 | '40.weight': 'conv5_3.weight', 127 | '41.weight': 'conv5_3_bn.weight', '41.bias': 'conv5_3_bn.bias', '41.running_mean': 'conv5_3_bn.running_mean', '41.running_var': 'conv5_3_bn.running_var'} 128 | if bias: 129 | layer_mappings.update({'0.bias': 'conv1_1.bias', 130 | '3.bias': 'conv1_2.bias', 131 | '7.bias': 'conv2_1.bias', 132 | '10.bias': 'conv2_2.bias', 133 | '14.bias': 'conv3_1.bias', 134 | '17.bias': 'conv3_2.bias', 135 | '20.bias': 'conv3_3.bias', 136 | '24.bias': 'conv4_1.bias', 137 | '27.bias': 'conv4_2.bias', 138 | '30.bias': 'conv4_3.bias', 139 | '34.bias': 'conv5_1.bias', 140 | '37.bias': 'conv5_2.bias', 141 | '40.bias': 'conv5_3.bias' 142 | }) 143 | #Update according to generated mapping 144 | pretrained_dict = {layer_mappings[k]: v for k, v in vgg16_features_dict.items() if k in layer_mappings} 145 | return pretrained_dict 146 | 147 | def __map_state_dict_tf(self, vgg16_features, bias): 148 | pretrained_dict = { 149 | 'conv1_1.weight': torch.from_numpy(vgg16_features['conv1_1'][0].transpose((3, 2, 0, 1))).float(), 150 | 'conv1_2.weight': torch.from_numpy(vgg16_features['conv1_2'][0].transpose((3, 2, 0, 1))).float(), 151 | 'conv2_1.weight': torch.from_numpy(vgg16_features['conv2_1'][0].transpose((3, 2, 0, 1))).float(), 152 | 'conv2_2.weight': torch.from_numpy(vgg16_features['conv2_2'][0].transpose((3, 2, 0, 1))).float(), 153 | 'conv3_1.weight': torch.from_numpy(vgg16_features['conv3_1'][0].transpose((3, 2, 0, 1))).float(), 154 | 'conv3_2.weight': torch.from_numpy(vgg16_features['conv3_2'][0].transpose((3, 2, 0, 1))).float(), 155 | 'conv3_3.weight': torch.from_numpy(vgg16_features['conv3_3'][0].transpose((3, 2, 0, 1))).float(), 156 | 'conv4_1.weight': torch.from_numpy(vgg16_features['conv4_1'][0].transpose((3, 2, 0, 1))).float(), 157 | 'conv4_2.weight': torch.from_numpy(vgg16_features['conv4_2'][0].transpose((3, 2, 0, 1))).float(), 158 | 'conv4_3.weight': torch.from_numpy(vgg16_features['conv4_3'][0].transpose((3, 2, 0, 1))).float(), 159 | 'conv5_1.weight': torch.from_numpy(vgg16_features['conv5_1'][0].transpose((3, 2, 0, 1))).float(), 160 | 'conv5_2.weight': torch.from_numpy(vgg16_features['conv5_2'][0].transpose((3, 2, 0, 1))).float(), 161 | 'conv5_3.weight': torch.from_numpy(vgg16_features['conv5_3'][0].transpose((3, 2, 0, 1))).float(), 162 | } 163 | if bias: 164 | pretrained_dict.update({ 165 | 'conv1_1.bias': torch.from_numpy(vgg16_features['conv1_1'][1]).float(), 166 | 'conv1_2.bias': torch.from_numpy(vgg16_features['conv1_2'][1]).float(), 167 | 'conv2_1.bias': torch.from_numpy(vgg16_features['conv2_1'][1]).float(), 168 | 'conv2_2.bias': torch.from_numpy(vgg16_features['conv2_2'][1]).float(), 169 | 'conv3_1.bias': torch.from_numpy(vgg16_features['conv3_1'][1]).float(), 170 | 'conv3_2.bias': torch.from_numpy(vgg16_features['conv3_2'][1]).float(), 171 | 'conv3_3.bias': torch.from_numpy(vgg16_features['conv3_3'][1]).float(), 172 | 'conv4_1.bias': torch.from_numpy(vgg16_features['conv4_1'][1]).float(), 173 | 'conv4_2.bias': torch.from_numpy(vgg16_features['conv4_2'][1]).float(), 174 | 'conv4_3.bias': torch.from_numpy(vgg16_features['conv4_3'][1]).float(), 175 | 'conv5_1.bias': torch.from_numpy(vgg16_features['conv5_1'][1]).float(), 176 | 'conv5_2.bias': torch.from_numpy(vgg16_features['conv5_2'][1]).float(), 177 | 'conv5_3.bias': torch.from_numpy(vgg16_features['conv5_3'][1]).float() 178 | }) 179 | return pretrained_dict 180 | 181 | class DDFFAutoEncoder(nn.Module): 182 | """Create model from VGG_16 by deleting the classifier layer.""" 183 | def __init__(self, output_dims, cc1_enabled, cc2_enabled, cc3_enabled, cc4_enabled, cc5_enabled, bias=False): 184 | super(DDFFAutoEncoder, self).__init__() 185 | #Save parameters 186 | self.output_dims = output_dims 187 | self.cc1_enabled = cc1_enabled 188 | self.cc2_enabled = cc2_enabled 189 | self.cc3_enabled = cc3_enabled 190 | self.cc4_enabled = cc4_enabled 191 | self.cc5_enabled = cc5_enabled 192 | 193 | #Encoder 194 | self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1, bias=bias) 195 | self.conv1_1_bn = nn.BatchNorm2d(64, eps=0.001) 196 | self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1, bias=bias) 197 | self.conv1_2_bn = nn.BatchNorm2d(64, eps=0.001) 198 | self.pool1 = nn.MaxPool2d(2, stride=2) 199 | self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1, bias=bias) 200 | self.conv2_1_bn = nn.BatchNorm2d(128, eps=0.001) 201 | self.conv2_2 = nn.Conv2d(128, 128 , 3, padding=1, bias=bias) 202 | self.conv2_2_bn = nn.BatchNorm2d(128, eps=0.001) 203 | self.pool2 = nn.MaxPool2d(2, stride=2) 204 | self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1, bias=bias) 205 | self.conv3_1_bn = nn.BatchNorm2d(256, eps=0.001) 206 | self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1, bias=bias) 207 | self.conv3_2_bn = nn.BatchNorm2d(256, eps=0.001) 208 | self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1, bias=bias) 209 | self.conv3_3_bn = nn.BatchNorm2d(256, eps=0.001) 210 | self.pool3 = nn.MaxPool2d(2, stride=2) 211 | self.encdrop3 = nn.Dropout(p=0.5) 212 | self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1, bias=bias) 213 | self.conv4_1_bn = nn.BatchNorm2d(512, eps=0.001) 214 | self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 215 | self.conv4_2_bn = nn.BatchNorm2d(512, eps=0.001) 216 | self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 217 | self.conv4_3_bn = nn.BatchNorm2d(512, eps=0.001) 218 | self.pool4 = nn.MaxPool2d(2, stride=2) 219 | self.encdrop4 = nn.Dropout(p=0.5) 220 | self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 221 | self.conv5_1_bn = nn.BatchNorm2d(512, eps=0.001) 222 | self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 223 | self.conv5_2_bn = nn.BatchNorm2d(512, eps=0.001) 224 | self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 225 | self.conv5_3_bn = nn.BatchNorm2d(512, eps=0.001) 226 | self.pool5 = nn.MaxPool2d(2, stride=2) 227 | self.encdrop5 = nn.Dropout(p=0.5) 228 | 229 | #Decoder 230 | self.upconv5 = nn.ConvTranspose2d(512, 512, 4, padding=1, stride=2, bias=False) 231 | if self.cc5_enabled: 232 | self.conv5_3_D = nn.Conv2d(1024, 512, 3, padding=1, bias=bias) 233 | else: 234 | self.conv5_3_D = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 235 | self.conv5_3_D_bn = nn.BatchNorm2d(512, eps=0.001) 236 | self.conv5_2_D = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 237 | self.conv5_2_D_bn = nn.BatchNorm2d(512, eps=0.001) 238 | self.conv5_1_D = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 239 | self.conv5_1_D_bn = nn.BatchNorm2d(512, eps=0.001) 240 | self.decdrop5 = nn.Dropout(p=0.5) 241 | 242 | self.upconv4 = nn.ConvTranspose2d(512, 512, 4, padding=1, stride=2, bias=False) 243 | if self.cc4_enabled: 244 | self.conv4_3_D = nn.Conv2d(1024, 512, 3, padding=1, bias=bias) 245 | else: 246 | self.conv4_3_D = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 247 | self.conv4_3_D_bn = nn.BatchNorm2d(512, eps=0.001) 248 | self.conv4_2_D = nn.Conv2d(512, 512, 3, padding=1, bias=bias) 249 | self.conv4_2_D_bn = nn.BatchNorm2d(512, eps=0.001) 250 | self.conv4_1_D = nn.Conv2d(512, 256, 3, padding=1, bias=bias) 251 | self.conv4_1_D_bn = nn.BatchNorm2d(256, eps=0.001) 252 | self.decdrop4 = nn.Dropout(p=0.5) 253 | 254 | self.upconv3 = nn.ConvTranspose2d(256, 256, 4, padding=1, stride=2, bias=False) 255 | if self.cc3_enabled: 256 | self.conv3_3_D = nn.Conv2d(512, 256, 3, padding=1, bias=bias) 257 | else: 258 | self.conv3_3_D = nn.Conv2d(256, 256, 3, padding=1, bias=bias) 259 | self.conv3_3_D_bn = nn.BatchNorm2d(256, eps=0.001) 260 | self.conv3_2_D = nn.Conv2d(256, 256, 3, padding=1, bias=bias) 261 | self.conv3_2_D_bn = nn.BatchNorm2d(256, eps=0.001) 262 | self.conv3_1_D = nn.Conv2d(256, 128, 3, padding=1, bias=bias) 263 | self.conv3_1_D_bn = nn.BatchNorm2d(128, eps=0.001) 264 | self.decdrop3 = nn.Dropout(p=0.5) 265 | 266 | self.upconv2 = nn.ConvTranspose2d(128, 128, 4, padding=1, stride=2, bias=False) 267 | if self.cc2_enabled: 268 | self.conv2_2_D = nn.Conv2d(256, 128, 3, padding=1, bias=bias) 269 | else: 270 | self.conv2_2_D = nn.Conv2d(128, 128, 3, padding=1, bias=bias) 271 | self.conv2_2_D_bn = nn.BatchNorm2d(128, eps=0.001) 272 | self.conv2_1_D = nn.Conv2d(128, 64, 3, padding=1, bias=bias) 273 | self.conv2_1_D_bn = nn.BatchNorm2d(64, eps=0.001) 274 | 275 | self.upconv1 = nn.ConvTranspose2d(64, 64, 4, padding=1, stride=2, bias=False) 276 | if self.cc1_enabled: 277 | self.conv1_2_D = nn.Conv2d(128, 64, 3, padding=1, bias=bias) 278 | else: 279 | self.conv1_2_D = nn.Conv2d(64, 64, 3, padding=1, bias=bias) 280 | self.conv1_2_D_bn = nn.BatchNorm2d(64, eps=0.001) 281 | self.conv1_1_D = nn.Conv2d(64, self.output_dims, 3, padding=1, bias=bias) 282 | self.conv1_1_D_bn = nn.BatchNorm2d(self.output_dims, eps=0.001) 283 | 284 | def forward(self, x): 285 | #Encoder 286 | x = nn.functional.relu(self.conv1_1_bn(self.conv1_1(x))) 287 | cc1 = nn.functional.relu(self.conv1_2_bn(self.conv1_2(x))) 288 | x = self.pool1(cc1) 289 | x = nn.functional.relu(self.conv2_1_bn(self.conv2_1(x))) 290 | cc2 = nn.functional.relu(self.conv2_2_bn(self.conv2_2(x))) 291 | x = self.pool2(cc2) 292 | x = nn.functional.relu(self.conv3_1_bn(self.conv3_1(x))) 293 | x = nn.functional.relu(self.conv3_2_bn(self.conv3_2(x))) 294 | cc3 = nn.functional.relu(self.conv3_3_bn(self.conv3_3(x))) 295 | x = self.pool3(cc3) 296 | x = self.encdrop3(x) 297 | x = nn.functional.relu(self.conv4_1_bn(self.conv4_1(x))) 298 | x = nn.functional.relu(self.conv4_2_bn(self.conv4_2(x))) 299 | cc4 = nn.functional.relu(self.conv4_3_bn(self.conv4_3(x))) 300 | x = self.pool4(cc4) 301 | x = self.encdrop4(x) 302 | x = nn.functional.relu(self.conv5_1_bn(self.conv5_1(x))) 303 | x = nn.functional.relu(self.conv5_2_bn(self.conv5_2(x))) 304 | cc5 = nn.functional.relu(self.conv5_3_bn(self.conv5_3(x))) 305 | x = self.pool5(cc5) 306 | x = self.encdrop5(x) 307 | 308 | #Decoder 309 | x = self.upconv5(x) 310 | if self.cc5_enabled: 311 | x = torch.cat([x, cc5], 1) 312 | x = nn.functional.relu(self.conv5_3_D_bn(self.conv5_3_D(x))) 313 | x = nn.functional.relu(self.conv5_2_D_bn(self.conv5_2_D(x))) 314 | x = nn.functional.relu(self.conv5_1_D_bn(self.conv5_1_D(x))) 315 | x = self.decdrop5(x) 316 | x = self.upconv4(x) 317 | if self.cc4_enabled: 318 | x = torch.cat([x, cc4], 1) 319 | x = nn.functional.relu(self.conv4_3_D_bn(self.conv4_3_D(x))) 320 | x = nn.functional.relu(self.conv4_2_D_bn(self.conv4_2_D(x))) 321 | x = nn.functional.relu(self.conv4_1_D_bn(self.conv4_1_D(x))) 322 | x = self.decdrop4(x) 323 | x = self.upconv3(x) 324 | if self.cc3_enabled: 325 | x = torch.cat([x, cc3], 1) 326 | x = nn.functional.relu(self.conv3_3_D_bn(self.conv3_3_D(x))) 327 | x = nn.functional.relu(self.conv3_2_D_bn(self.conv3_2_D(x))) 328 | x = nn.functional.relu(self.conv3_1_D_bn(self.conv3_1_D(x))) 329 | x = self.decdrop3(x) 330 | x = self.upconv2(x) 331 | if self.cc2_enabled: 332 | x = torch.cat([x, cc2], 1) 333 | x = nn.functional.relu(self.conv2_2_D_bn(self.conv2_2_D(x))) 334 | x = nn.functional.relu(self.conv2_1_D_bn(self.conv2_1_D(x))) 335 | x = self.upconv1(x) 336 | if self.cc1_enabled: 337 | x = torch.cat([x, cc1], 1) 338 | x = nn.functional.relu(self.conv1_2_D_bn(self.conv1_2_D(x))) 339 | x = nn.functional.relu(self.conv1_1_D_bn(self.conv1_1_D(x))) 340 | return x 341 | -------------------------------------------------------------------------------- /python/ddff/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soyers/ddff-pytorch/78a6c5b5118dd0404f97072d51ca68db0eb79990/python/ddff/models/__init__.py -------------------------------------------------------------------------------- /python/ddff/trainers/BaseTrainer.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import os 4 | import torch 5 | from torch import optim 6 | 7 | class BaseTrainer: 8 | def __init__(self, model, optimizer, training_loss, deterministic, scheduler=None, supervised=True): 9 | self.deterministic = deterministic 10 | if deterministic: 11 | self.__set_deterministic() 12 | self.model = model 13 | if torch.cuda.is_available(): 14 | self.model.cuda() 15 | self.optimizer = optimizer 16 | self.training_loss = training_loss 17 | self.scheduler = scheduler 18 | self.supervised = supervised 19 | 20 | if not os.path.exists('checkpoints'): 21 | os.makedirs('checkpoints') 22 | 23 | def create_optimizer(self, net, optimizer_params): 24 | if optimizer_params["algorithm"] == 'sgd': 25 | return optim.SGD( 26 | filter(lambda p: p.requires_grad, net.parameters()), 27 | lr=optimizer_params["learning_rate"] if "learning_rate" in optimizer_params else 0.001, 28 | momentum=optimizer_params["momentum"] if "momentum" in optimizer_params else 0.9, 29 | weight_decay=optimizer_params["weight_decay"] if "weight_decay" in optimizer_params else 0.0005) 30 | elif optimizer_params["algorithm"] == 'adam': 31 | return optim.Adam( 32 | filter(lambda p: p.requires_grad, net.parameters()), 33 | lr=optimizer_params["learning_rate"] if "learning_rate" in optimizer_params else 0.001, 34 | weight_decay=optimizer_params["weight_decay"] if "weight_decay" in optimizer_params else 0.0005) 35 | else: 36 | return optim.SGD( 37 | filter(lambda p: p.requires_grad, net.parameters()), 38 | lr=0.001, 39 | momentum=0.9, 40 | weight_decay=0.0005) 41 | 42 | 43 | def __set_deterministic(self): 44 | import random 45 | import numpy as np 46 | #Set RNG seeds 47 | torch.manual_seed(42) 48 | torch.cuda.manual_seed_all(42) 49 | random.seed(42) 50 | np.random.seed(42) 51 | #Make results deterministic by disabling undeterministic functions in cuDNN 52 | torch.backends.cudnn.deterministic = True 53 | 54 | def set_supervised(self, supervised): 55 | self.supervised = supervised 56 | 57 | def set_training_loss(self, training_loss): 58 | self.training_loss = training_loss 59 | 60 | def train(self, dataloader, epochs, print_frequency=50, max_gradient=None, checkpoint_file=None, checkpoint_frequency=50): 61 | #Train model 62 | self.model.train() 63 | #Create list to keep track of losses foreach epoch 64 | epoch_losses = [] 65 | #Run trainign loop 66 | for epoch in range(epochs): 67 | epoch_loss = 0.0 68 | running_loss = 0.0 69 | for i, data in enumerate(dataloader): 70 | #Zero the parameter gradients 71 | self.optimizer.zero_grad() 72 | 73 | #Get the inputs 74 | inputs = data['input'] 75 | #Copy inputs to GPU 76 | if torch.cuda.is_available(): 77 | if isinstance(inputs, list): 78 | inputs = [element.cuda() for element in inputs] 79 | else: 80 | inputs = inputs.cuda() 81 | 82 | #Forward 83 | if isinstance(inputs, list): 84 | output_approx = self.model(*inputs) 85 | else: 86 | output_approx = self.model(inputs) 87 | 88 | if self.supervised: 89 | #Get the inputs 90 | outputs = data['output'] 91 | #Copy outputs to GPU 92 | if torch.cuda.is_available(): 93 | if isinstance(outputs, list): 94 | outputs = [element.cuda() for element in outputs] 95 | else: 96 | outputs = outputs.cuda() 97 | 98 | if isinstance(outputs, list): 99 | loss = self.training_loss(*output_approx, *outputs) 100 | else: 101 | loss = self.training_loss(output_approx, outputs) 102 | else: 103 | #Calculate loss 104 | if isinstance(inputs, list): 105 | loss = self.training_loss(*output_approx, *inputs) 106 | else: 107 | loss = self.training_loss(output_approx, inputs) 108 | 109 | #Backward 110 | loss.backward() 111 | 112 | #Clip gradients 113 | if max_gradient is not None: 114 | torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_gradient, norm_type=2) 115 | 116 | #Optimize 117 | self.optimizer.step() 118 | 119 | #Store epoch loss 120 | epoch_loss += loss.item() 121 | 122 | #Print statistics 123 | running_loss += loss.item() 124 | if i % print_frequency == print_frequency-1: # print every print_frequency mini-batches 125 | print('[%d, %5d] loss: ' % 126 | (epoch + 1, i + 1) + str(running_loss / print_frequency)) 127 | running_loss = 0.0 128 | 129 | #Save checkpoint 130 | if checkpoint_file is not None and epoch % checkpoint_frequency == checkpoint_frequency-1: 131 | self.save_checkpoint(checkpoint_file, epoch=(epoch+1), save_optimizer=True) 132 | 133 | #Save loss of epoch 134 | epoch_losses += [epoch_loss/len(dataloader)] 135 | 136 | #Update learning rate based on defined schedule 137 | if self.scheduler is not None: 138 | self.scheduler.step() 139 | #Save final checkpoint 140 | if checkpoint_file is not None: 141 | self.save_checkpoint(checkpoint_file, epoch=epochs, save_optimizer=True) 142 | print("Training finished") 143 | return epoch_losses 144 | 145 | def evaluate(self, inputs): 146 | #Set model to eval mode in order to disable dropout 147 | self.model.eval() 148 | inputs.requires_grad = False 149 | return self.model(inputs) 150 | 151 | 152 | def save_checkpoint(self, filename, epoch=None, save_optimizer=True): 153 | state = {'state_dict': self.model.state_dict()} 154 | if save_optimizer: 155 | state['optimizer'] = self.optimizer.state_dict() 156 | if epoch is not None: 157 | state['epoch'] = epoch 158 | torch.save(state, 'checkpoints/'+filename) 159 | 160 | def load_checkpoint(self, filename, load_optimizer=True, load_scheduler=True): 161 | #Load model to cpu 162 | checkpoint = torch.load(filename, map_location=lambda storage, location: storage) 163 | self.model.load_state_dict(checkpoint['state_dict']) 164 | #Upload model to GPU 165 | if torch.cuda.is_available(): 166 | self.model.cuda() 167 | if load_optimizer: 168 | self.optimizer.load_state_dict(checkpoint['optimizer']) 169 | if load_scheduler and self.scheduler is not None and 'epoch' in checkpoint: 170 | self.scheduler.last_epoch = checkpoint['epoch'] 171 | if 'epoch' in checkpoint: 172 | return checkpoint['epoch'] 173 | -------------------------------------------------------------------------------- /python/ddff/trainers/DDFFTrainer.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import os 4 | import numpy as np 5 | import torch 6 | import torch.nn as nn 7 | import torchvision 8 | from torch import optim 9 | from torch.utils.data import DataLoader 10 | import ddff.models.DDFFNet as DDFFNet 11 | import ddff.dataproviders.datareaders.FocalStackDDFFH5Reader as FocalStackDDFFH5Reader 12 | from ddff.trainers.BaseTrainer import BaseTrainer 13 | 14 | class DDFFTrainer(BaseTrainer): 15 | def __init__(self, stack_size, learning_rate=0.001, cliprange=[0.0202, 0.2825], 16 | cc1_enabled=False, 17 | cc2_enabled=False, 18 | cc3_enabled=True, 19 | cc4_enabled=False, 20 | cc5_enabled=False, 21 | pretrained='no_bn', 22 | scheduler_step_size=4, 23 | scheduler_gama=0.9, 24 | deterministic=False, 25 | optimizer='sgd', 26 | normalize_loss=False): 27 | #Define model 28 | net = DDFFNet.DDFFNet(stack_size, cc1_enabled=cc1_enabled, cc2_enabled=cc2_enabled, cc3_enabled=cc3_enabled, cc4_enabled=cc4_enabled, cc5_enabled=cc5_enabled, pretrained=pretrained) 29 | #Define optimizer 30 | if optimizer == 'sgd': 31 | opt = self.create_optimizer(net, {"algorithm":'sgd', "learning_rate":learning_rate, "weight_decay": 0.0005, "momentum":0.9}) 32 | else: 33 | opt = self.create_optimizer(net, {"algorithm":'adam', "learning_rate":learning_rate, "weight_decay": 0.0005}) 34 | #Define scheduler 35 | scheduler = optim.lr_scheduler.StepLR(opt, step_size=scheduler_step_size, gamma=scheduler_gama) 36 | #Define training loss 37 | training_loss = self.MaskedLoss(nn.MSELoss(reduction="elementwise_mean" if normalize_loss else "sum"), valid_cond=lambda x : x >= cliprange[0]) 38 | 39 | #Call parent constructor 40 | super(DDFFTrainer, self).__init__(net, opt, training_loss, deterministic, scheduler=scheduler) 41 | 42 | @classmethod 43 | def from_h5_data(cls,root_dir, 44 | learning_rate=0.001, 45 | cc1_enabled=False, 46 | cc2_enabled=False, 47 | cc3_enabled=True, 48 | cc4_enabled=False, 49 | cc5_enabled=False, 50 | training_crop_size=None, 51 | validation_crop_size=None, 52 | pretrained='no_bn', 53 | normalize_mean=[0.485, 0.456, 0.406], 54 | normalize_std=[0.229, 0.224, 0.225], 55 | scheduler_step_size=4, 56 | scheduler_gama=0.9, 57 | max_gradient=5.0, 58 | deterministic=False, 59 | optimizer='sgd', 60 | normalize_loss=False, 61 | epochs=20, 62 | batch_size=2, 63 | num_workers=4, 64 | checkpoint_file=None, 65 | checkpoint_frequency=50): 66 | #Create data loaders 67 | transform_train = cls.__create_preprocessing(cls, crop_size=training_crop_size, mean=normalize_mean, std=normalize_std) 68 | transform_validation = cls.__create_preprocessing(cls, crop_size=validation_crop_size, mean=normalize_mean, std=normalize_std) 69 | #Create h5 reader 70 | dataset_train = FocalStackDDFFH5Reader.FocalStackDDFFH5Reader(root_dir, transform=transform_train, stack_key="stack_train", disp_key="disp_train") 71 | dataset_validation = FocalStackDDFFH5Reader.FocalStackDDFFH5Reader(root_dir, transform=transform_validation, stack_key="stack_val", disp_key="disp_val") 72 | #Create data loader 73 | dataloader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True, num_workers=num_workers) 74 | dataloader_validation = DataLoader(dataset_validation, batch_size=1, shuffle=True, num_workers=0) 75 | #Call constructor 76 | instance = cls(dataset_train.get_stack_size(), learning_rate=learning_rate, 77 | cc1_enabled=cc1_enabled, 78 | cc2_enabled=cc2_enabled, 79 | cc3_enabled=cc3_enabled, 80 | cc4_enabled=cc4_enabled, 81 | cc5_enabled=cc5_enabled, 82 | pretrained=pretrained, 83 | scheduler_step_size=scheduler_step_size, 84 | scheduler_gama=scheduler_gama, 85 | deterministic=deterministic, 86 | optimizer=optimizer, 87 | normalize_loss=normalize_loss) 88 | 89 | #Save instances 90 | instance.dataloader_validation = dataloader_validation 91 | 92 | #Load checkpoint if ther already exists a file 93 | if os.path.isfile(checkpoint_file): 94 | start_epoch = instance.load_checkpoint(checkpoint_file) 95 | if start_epoch is None: 96 | start_epoch = 0 97 | else: 98 | start_epoch = 0 99 | 100 | #Fit instance 101 | epoch_losses = instance.train(dataloader_train, epochs, checkpoint_file=checkpoint_file, checkpoint_frequency=checkpoint_frequency, max_gradient=max_gradient) 102 | print("Losses per epoch: " + str(epoch_losses)) 103 | 104 | return instance 105 | 106 | @classmethod 107 | def from_checkpoint(cls, checkpoint_file, stack_size, 108 | cc1_enabled=False, 109 | cc2_enabled=False, 110 | cc3_enabled=True, 111 | cc4_enabled=False, 112 | cc5_enabled=False, 113 | deterministic=False, 114 | optimizer='sgd', 115 | normalize_loss=False): 116 | #Call constructor 117 | instance = cls(stack_size, 118 | cc1_enabled=cc1_enabled, 119 | cc2_enabled=cc2_enabled, 120 | cc3_enabled=cc3_enabled, 121 | cc4_enabled=cc4_enabled, 122 | cc5_enabled=cc5_enabled, 123 | deterministic=deterministic, 124 | optimizer=optimizer, 125 | normalize_loss=normalize_loss) 126 | 127 | #Load checkpoint 128 | instance.load_checkpoint(checkpoint_file) 129 | 130 | return instance 131 | 132 | @classmethod 133 | def from_tflearn(cls, checkpoint_file, stack_size, 134 | cc1_enabled=False, 135 | cc2_enabled=False, 136 | cc3_enabled=True, 137 | cc4_enabled=False, 138 | cc5_enabled=False, 139 | deterministic=False, 140 | optimizer='sgd'): 141 | #Call constructor 142 | instance = cls(stack_size, 143 | cc1_enabled=cc1_enabled, 144 | cc2_enabled=cc2_enabled, 145 | cc3_enabled=cc3_enabled, 146 | cc4_enabled=cc4_enabled, 147 | cc5_enabled=cc5_enabled, 148 | deterministic=deterministic, 149 | optimizer=optimizer, 150 | pretrained=None) 151 | 152 | #Load checkpoint 153 | instance.load_tflearn(checkpoint_file) 154 | 155 | return instance 156 | 157 | def load_tflearn(self, checkpoint_file): 158 | #Load dict 159 | pretrained_dict = np.load(checkpoint_file) 160 | #Update according to generated mapping 161 | pretrained_dict = {self.__translate_tflearn_key(k): v for k, v in pretrained_dict.items()} 162 | #Transpose all weight tensors since tflearn stores them transposed 163 | # Tensorflow 2D Conv layer: h * w * in_channels * out_channels 164 | # PyTorch 2D Conv layer: out_channels * in_channels * h * w 165 | #Same logic was also implemented in https://github.com/ruotianluo/pytorch-mobilenet-from-tf/blob/master/convert.py 166 | pretrained_dict = {k:(v.transpose((3, 2, 0, 1)) if (k.startswith("conv") or k.startswith("upconv")) and v.ndim == 4 else v) for k, v in pretrained_dict.items()} 167 | pretrained_dict = {("scoring" + k[len("conv_disp"):] if k.startswith("conv_disp") else "autoencoder." + k):v for k, v in pretrained_dict.items()} 168 | #Convert weight arrays to torch tensors 169 | pretrained_dict = {k:torch.from_numpy(v).float() for k, v in pretrained_dict.items()} 170 | #Load weights 171 | model_state_dict = self.model.state_dict() 172 | model_state_dict.update(pretrained_dict) 173 | self.model.load_state_dict(model_state_dict) 174 | 175 | def __translate_tflearn_key(self, key): 176 | if key.endswith("/W:0"): 177 | return key[:-len("/W:0")] + ".weight" 178 | if key.endswith("/up_filter:0"): 179 | return key[:-len("/up_filter:0")] + ".weight" 180 | if key.endswith("/gamma:0"): 181 | return key[:-len("/gamma:0")] + ".weight" 182 | if key.endswith("/beta:0"): 183 | return key[:-len("/beta:0")] + ".bias" 184 | if key.endswith("/moving_mean:0"): 185 | return key[:-len("/moving_mean:0")] + ".running_mean" 186 | if key.endswith("/moving_variance:0"): 187 | return key[:-len("/moving_variance:0")] + ".running_var" 188 | 189 | def __create_preprocessing(self, crop_size=None, cliprange=[0.0202, 0.2825], mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): 190 | transform = [FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.ToTensor()] 191 | if cliprange is not None: 192 | transform += [FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.ClipGroundTruth(cliprange[0], cliprange[1])] 193 | if crop_size is not None: 194 | transform += [FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.RandomCrop(crop_size)] 195 | if mean is not None and std is not None: 196 | transform += [FocalStackDDFFH5Reader.FocalStackDDFFH5Reader.Normalize(mean_input=mean, std_input=std)] 197 | transform = torchvision.transforms.Compose(transform) 198 | return transform 199 | 200 | def create_validation_loader(self): 201 | try: 202 | return self.dataloader_validation 203 | except AttributeError: 204 | return None 205 | 206 | class MaskedLoss(nn.Module): 207 | def __init__(self, loss, valid_cond=lambda x : x > 0.0): 208 | super(DDFFTrainer.MaskedLoss, self).__init__() 209 | self.loss = loss 210 | self.valid_cond = valid_cond 211 | 212 | def forward(self, inputs, outputs): 213 | mask = self.valid_cond(outputs) 214 | return self.loss(inputs[mask], outputs[mask]) 215 | -------------------------------------------------------------------------------- /python/ddff/trainers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soyers/ddff-pytorch/78a6c5b5118dd0404f97072d51ca68db0eb79990/python/ddff/trainers/__init__.py -------------------------------------------------------------------------------- /python/eval_ddff.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import ddff.dataproviders.datareaders.FocalStackDDFFH5Reader as FocalStackDDFFH5Reader 4 | import ddff.metricseval.DDFFEval as DDFFEval 5 | 6 | if __name__ == "__main__": 7 | #Set parameters 8 | image_size = (383,552) 9 | filename_testset = "ddff-dataset-trainval.h5" 10 | checkpoint_file = "ddff_cc3_checkpoint.pt" 11 | 12 | #Create validation reader 13 | tmp_datareader = FocalStackDDFFH5Reader.FocalStackDDFFH5Reader(filename_testset, transform=None, stack_key="stack_val", disp_key="disp_val") 14 | 15 | #Create PSPDDFF evaluator 16 | evaluator = DDFFEval.DDFFEval(checkpoint_file, focal_stack_size=tmp_datareader.get_stack_size()) 17 | #Evaluate 18 | metrics = evaluator.evaluate(filename_testset, image_size=image_size) 19 | print(metrics) 20 | -------------------------------------------------------------------------------- /python/eval_ddff_tflearn.py: -------------------------------------------------------------------------------- 1 | import ddff.dataproviders.datareaders.FocalStackDDFFH5Reader as FocalStackDDFFH5Reader 2 | import ddff.metricseval.DDFFTFLearnEval as DDFFTFLearnEval 3 | 4 | if __name__ == "__main__": 5 | #Set parameters 6 | image_size = (383,552) 7 | filename_testset = "ddff-dataset-trainval.h5" 8 | checkpoint_file = "ddffnet-cc3-snapshot-121256.npz" 9 | stack_key = "stack_val" 10 | disp_key="disp_val" 11 | 12 | #Create validation reader 13 | tmp_datareader = FocalStackDDFFH5Reader.FocalStackDDFFH5Reader(filename_testset, transform=None, stack_key=stack_key, disp_key=disp_key) 14 | 15 | #Create PSPDDFF evaluator 16 | evaluator = DDFFTFLearnEval.DDFFTFLearnEval(checkpoint_file, focal_stack_size=tmp_datareader.get_stack_size(), norm_mean=None, norm_std=None) 17 | #Evaluate 18 | metrics = evaluator.evaluate(filename_testset, stack_key=stack_key, disp_key=disp_key, image_size=image_size) 19 | print(metrics) 20 | -------------------------------------------------------------------------------- /python/run_ddff.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import argparse 4 | import torch 5 | import random 6 | import numpy as np 7 | import ddff.trainers.DDFFTrainer as DDFFTrainer 8 | 9 | if __name__ == "__main__": 10 | #Add command line parser arguments 11 | parser = argparse.ArgumentParser(description='Train ddff net on specified h5 dataset.') 12 | parser.add_argument('--dataset', default="ddff-dataset-trainval.h5", help='h5 file that contains the training and validation data (default: ddff-dataset-trainval.h5)') 13 | parser.add_argument('--epochs', default=200, type=int, help='number of training epochs (default: 200)') 14 | parser.add_argument('--checkpoint', default="ddff_cc3_checkpoint.pt", help='Checkpoint file to be created during training (default: ddff_cc3_checkpoint.pt)') 15 | parser.add_argument('--checkpoint_frequency', default=5, type=int, help='Checkpoint frequency to save intermediate models. (default: 5)') 16 | parser.add_argument('--workers', default=0, type=int, help='Number of threads reading the dataset. (default: 0)') 17 | parser.add_argument('--batchsize', default=2, type=int, help='batch size during training (default: 2)') 18 | parser.add_argument('--pretrained', default="bn", help='Either specify a npy file to load tensorflow weights or use "bn" or "no_bn" to use pretrained weights from torchvision package (default: bn)') 19 | 20 | #Parse arguments 21 | args = parser.parse_args() 22 | 23 | #Finetune tensorflow vgg16 model 24 | ddff_trainer = DDFFTrainer.DDFFTrainer.from_h5_data(args.dataset, 25 | learning_rate=0.001, 26 | max_gradient=5.0, 27 | cc1_enabled=False, 28 | cc2_enabled=False, 29 | cc3_enabled=True, 30 | cc4_enabled=False, 31 | cc5_enabled=False, 32 | training_crop_size=None, 33 | validation_crop_size=None, 34 | pretrained=args.pretrained, 35 | normalize_mean=None, normalize_std=None, 36 | epochs=args.epochs, 37 | checkpoint_file=args.checkpoint, 38 | checkpoint_frequency=args.checkpoint_frequency, 39 | batch_size=args.batchsize, 40 | num_workers=args.workers, 41 | deterministic=True) 42 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | backcall==0.1.0 2 | decorator==4.3.0 3 | h5py==2.8.0 4 | ipython==7.0.1 5 | ipython-genutils==0.2.0 6 | jedi==0.13.1 7 | numpy==1.15.2 8 | parso==0.3.1 9 | pexpect==4.6.0 10 | pickleshare==0.7.5 11 | Pillow==5.3.0 12 | prompt-toolkit==2.0.6 13 | ptyprocess==0.6.0 14 | Pygments==2.2.0 15 | simplegeneric==0.8.1 16 | six==1.11.0 17 | torch==0.4.1.post2 18 | torchvision==0.2.1 19 | traitlets==4.3.2 20 | wcwidth==0.1.7 21 | --------------------------------------------------------------------------------