├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── data └── splits.json ├── img ├── 3d_craft_dataset.png └── voxelcnn_intro.png ├── main.py ├── test ├── test_criterions.py ├── test_datasets.py ├── test_evaluators.py ├── test_models.py └── test_predictor.py └── voxelcnn ├── __init__.py ├── checkpoint.py ├── criterions.py ├── datasets.py ├── evaluators.py ├── models.py ├── predictor.py ├── summary.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # celery beat schedule file 95 | celerybeat-schedule 96 | 97 | # SageMath parsed files 98 | *.sage.py 99 | 100 | # Environments 101 | .env 102 | .venv 103 | env/ 104 | venv/ 105 | ENV/ 106 | env.bak/ 107 | venv.bak/ 108 | 109 | # Spyder project settings 110 | .spyderproject 111 | .spyproject 112 | 113 | # Rope project settings 114 | .ropeproject 115 | 116 | # mkdocs documentation 117 | /site 118 | 119 | # mypy 120 | .mypy_cache/ 121 | .dmypy.json 122 | dmypy.json 123 | 124 | # Pyre type checker 125 | .pyre/ 126 | 127 | # General 128 | .DS_Store 129 | .AppleDouble 130 | .LSOverride 131 | 132 | # Icon must end with two \r 133 | Icon 134 | 135 | # Thumbnails 136 | ._* 137 | 138 | # Files that might appear in the root of a volume 139 | .DocumentRevisions-V100 140 | .fseventsd 141 | .Spotlight-V100 142 | .TemporaryItems 143 | .Trashes 144 | .VolumeIcon.icns 145 | .com.apple.timemachine.donotpresent 146 | 147 | # Directories potentially created on remote AFP share 148 | .AppleDB 149 | .AppleDesktop 150 | Network Trash Folder 151 | Temporary Items 152 | .apdisk 153 | 154 | .vscode/* 155 | !.vscode/settings.json 156 | !.vscode/tasks.json 157 | !.vscode/launch.json 158 | !.vscode/extensions.json 159 | *.code-workspace 160 | 161 | # Project specific 162 | data/* 163 | !data/splits.json 164 | *.pth 165 | *.txt 166 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to make participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies within all project spaces, and it also applies when 49 | an individual is representing the project or its community in public spaces. 50 | Examples of representing a project or community include using an official 51 | project e-mail address, posting via an official social media account, or acting 52 | as an appointed representative at an online or offline event. Representation of 53 | a project may be further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at . All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to VoxelCNN 2 | We want to make contributing to this project as easy and transparent as 3 | possible. 4 | 5 | ## Pull Requests 6 | We actively welcome your pull requests. 7 | 8 | 1. Fork the repo and create your branch from `main`. 9 | 2. If you've added code that should be tested, add tests. 10 | 3. If you've changed APIs, update the documentation. 11 | 4. Ensure the test suite passes. 12 | 5. Make sure your code lints. 13 | 6. If you haven't already, complete the Contributor License Agreement ("CLA"). 14 | 15 | ## Contributor License Agreement ("CLA") 16 | In order to accept your pull request, we need you to submit a CLA. You only need 17 | to do this once to work on any of Facebook's open source projects. 18 | 19 | Complete your CLA here: 20 | 21 | ## Issues 22 | We use GitHub issues to track public bugs. Please ensure your description is 23 | clear and has sufficient instructions to be able to reproduce the issue. 24 | 25 | Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe 26 | disclosure of security bugs. In those cases, please go through the process 27 | outlined on that page and do not file a public issue. 28 | 29 | ## License 30 | By contributing to VoxelCNN, you agree that your contributions will be licensed 31 | under the LICENSE file in the root directory of this source tree. 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More_considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial 4.0 International Public 58 | License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial 4.0 International Public License ("Public 63 | License"). To the extent this Public License may be interpreted as a 64 | contract, You are granted the Licensed Rights in consideration of Your 65 | acceptance of these terms and conditions, and the Licensor grants You 66 | such rights in consideration of benefits the Licensor receives from 67 | making the Licensed Material available under these terms and 68 | conditions. 69 | 70 | Section 1 -- Definitions. 71 | 72 | a. Adapted Material means material subject to Copyright and Similar 73 | Rights that is derived from or based upon the Licensed Material 74 | and in which the Licensed Material is translated, altered, 75 | arranged, transformed, or otherwise modified in a manner requiring 76 | permission under the Copyright and Similar Rights held by the 77 | Licensor. For purposes of this Public License, where the Licensed 78 | Material is a musical work, performance, or sound recording, 79 | Adapted Material is always produced where the Licensed Material is 80 | synched in timed relation with a moving image. 81 | 82 | b. Adapter's License means the license You apply to Your Copyright 83 | and Similar Rights in Your contributions to Adapted Material in 84 | accordance with the terms and conditions of this Public License. 85 | 86 | c. Copyright and Similar Rights means copyright and/or similar rights 87 | closely related to copyright including, without limitation, 88 | performance, broadcast, sound recording, and Sui Generis Database 89 | Rights, without regard to how the rights are labeled or 90 | categorized. For purposes of this Public License, the rights 91 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 92 | Rights. 93 | d. Effective Technological Measures means those measures that, in the 94 | absence of proper authority, may not be circumvented under laws 95 | fulfilling obligations under Article 11 of the WIPO Copyright 96 | Treaty adopted on December 20, 1996, and/or similar international 97 | agreements. 98 | 99 | e. Exceptions and Limitations means fair use, fair dealing, and/or 100 | any other exception or limitation to Copyright and Similar Rights 101 | that applies to Your use of the Licensed Material. 102 | 103 | f. Licensed Material means the artistic or literary work, database, 104 | or other material to which the Licensor applied this Public 105 | License. 106 | 107 | g. Licensed Rights means the rights granted to You subject to the 108 | terms and conditions of this Public License, which are limited to 109 | all Copyright and Similar Rights that apply to Your use of the 110 | Licensed Material and that the Licensor has authority to license. 111 | 112 | h. Licensor means the individual(s) or entity(ies) granting rights 113 | under this Public License. 114 | 115 | i. NonCommercial means not primarily intended for or directed towards 116 | commercial advantage or monetary compensation. For purposes of 117 | this Public License, the exchange of the Licensed Material for 118 | other material subject to Copyright and Similar Rights by digital 119 | file-sharing or similar means is NonCommercial provided there is 120 | no payment of monetary compensation in connection with the 121 | exchange. 122 | 123 | j. Share means to provide material to the public by any means or 124 | process that requires permission under the Licensed Rights, such 125 | as reproduction, public display, public performance, distribution, 126 | dissemination, communication, or importation, and to make material 127 | available to the public including in ways that members of the 128 | public may access the material from a place and at a time 129 | individually chosen by them. 130 | 131 | k. Sui Generis Database Rights means rights other than copyright 132 | resulting from Directive 96/9/EC of the European Parliament and of 133 | the Council of 11 March 1996 on the legal protection of databases, 134 | as amended and/or succeeded, as well as other essentially 135 | equivalent rights anywhere in the world. 136 | 137 | l. You means the individual or entity exercising the Licensed Rights 138 | under this Public License. Your has a corresponding meaning. 139 | 140 | Section 2 -- Scope. 141 | 142 | a. License grant. 143 | 144 | 1. Subject to the terms and conditions of this Public License, 145 | the Licensor hereby grants You a worldwide, royalty-free, 146 | non-sublicensable, non-exclusive, irrevocable license to 147 | exercise the Licensed Rights in the Licensed Material to: 148 | 149 | a. reproduce and Share the Licensed Material, in whole or 150 | in part, for NonCommercial purposes only; and 151 | 152 | b. produce, reproduce, and Share Adapted Material for 153 | NonCommercial purposes only. 154 | 155 | 2. Exceptions and Limitations. For the avoidance of doubt, where 156 | Exceptions and Limitations apply to Your use, this Public 157 | License does not apply, and You do not need to comply with 158 | its terms and conditions. 159 | 160 | 3. Term. The term of this Public License is specified in Section 161 | 6(a). 162 | 163 | 4. Media and formats; technical modifications allowed. The 164 | Licensor authorizes You to exercise the Licensed Rights in 165 | all media and formats whether now known or hereafter created, 166 | and to make technical modifications necessary to do so. The 167 | Licensor waives and/or agrees not to assert any right or 168 | authority to forbid You from making technical modifications 169 | necessary to exercise the Licensed Rights, including 170 | technical modifications necessary to circumvent Effective 171 | Technological Measures. For purposes of this Public License, 172 | simply making modifications authorized by this Section 2(a) 173 | (4) never produces Adapted Material. 174 | 175 | 5. Downstream recipients. 176 | 177 | a. Offer from the Licensor -- Licensed Material. Every 178 | recipient of the Licensed Material automatically 179 | receives an offer from the Licensor to exercise the 180 | Licensed Rights under the terms and conditions of this 181 | Public License. 182 | 183 | b. No downstream restrictions. You may not offer or impose 184 | any additional or different terms or conditions on, or 185 | apply any Effective Technological Measures to, the 186 | Licensed Material if doing so restricts exercise of the 187 | Licensed Rights by any recipient of the Licensed 188 | Material. 189 | 190 | 6. No endorsement. Nothing in this Public License constitutes or 191 | may be construed as permission to assert or imply that You 192 | are, or that Your use of the Licensed Material is, connected 193 | with, or sponsored, endorsed, or granted official status by, 194 | the Licensor or others designated to receive attribution as 195 | provided in Section 3(a)(1)(A)(i). 196 | 197 | b. Other rights. 198 | 199 | 1. Moral rights, such as the right of integrity, are not 200 | licensed under this Public License, nor are publicity, 201 | privacy, and/or other similar personality rights; however, to 202 | the extent possible, the Licensor waives and/or agrees not to 203 | assert any such rights held by the Licensor to the limited 204 | extent necessary to allow You to exercise the Licensed 205 | Rights, but not otherwise. 206 | 207 | 2. Patent and trademark rights are not licensed under this 208 | Public License. 209 | 210 | 3. To the extent possible, the Licensor waives any right to 211 | collect royalties from You for the exercise of the Licensed 212 | Rights, whether directly or through a collecting society 213 | under any voluntary or waivable statutory or compulsory 214 | licensing scheme. In all other cases the Licensor expressly 215 | reserves any right to collect such royalties, including when 216 | the Licensed Material is used other than for NonCommercial 217 | purposes. 218 | 219 | Section 3 -- License Conditions. 220 | 221 | Your exercise of the Licensed Rights is expressly made subject to the 222 | following conditions. 223 | 224 | a. Attribution. 225 | 226 | 1. If You Share the Licensed Material (including in modified 227 | form), You must: 228 | 229 | a. retain the following if it is supplied by the Licensor 230 | with the Licensed Material: 231 | 232 | i. identification of the creator(s) of the Licensed 233 | Material and any others designated to receive 234 | attribution, in any reasonable manner requested by 235 | the Licensor (including by pseudonym if 236 | designated); 237 | 238 | ii. a copyright notice; 239 | 240 | iii. a notice that refers to this Public License; 241 | 242 | iv. a notice that refers to the disclaimer of 243 | warranties; 244 | 245 | v. a URI or hyperlink to the Licensed Material to the 246 | extent reasonably practicable; 247 | 248 | b. indicate if You modified the Licensed Material and 249 | retain an indication of any previous modifications; and 250 | 251 | c. indicate the Licensed Material is licensed under this 252 | Public License, and include the text of, or the URI or 253 | hyperlink to, this Public License. 254 | 255 | 2. You may satisfy the conditions in Section 3(a)(1) in any 256 | reasonable manner based on the medium, means, and context in 257 | which You Share the Licensed Material. For example, it may be 258 | reasonable to satisfy the conditions by providing a URI or 259 | hyperlink to a resource that includes the required 260 | information. 261 | 262 | 3. If requested by the Licensor, You must remove any of the 263 | information required by Section 3(a)(1)(A) to the extent 264 | reasonably practicable. 265 | 266 | 4. If You Share Adapted Material You produce, the Adapter's 267 | License You apply must not prevent recipients of the Adapted 268 | Material from complying with this Public License. 269 | 270 | Section 4 -- Sui Generis Database Rights. 271 | 272 | Where the Licensed Rights include Sui Generis Database Rights that 273 | apply to Your use of the Licensed Material: 274 | 275 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 276 | to extract, reuse, reproduce, and Share all or a substantial 277 | portion of the contents of the database for NonCommercial purposes 278 | only; 279 | 280 | b. if You include all or a substantial portion of the database 281 | contents in a database in which You have Sui Generis Database 282 | Rights, then the database in which You have Sui Generis Database 283 | Rights (but not its individual contents) is Adapted Material; and 284 | 285 | c. You must comply with the conditions in Section 3(a) if You Share 286 | all or a substantial portion of the contents of the database. 287 | 288 | For the avoidance of doubt, this Section 4 supplements and does not 289 | replace Your obligations under this Public License where the Licensed 290 | Rights include other Copyright and Similar Rights. 291 | 292 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 293 | 294 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 295 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 296 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 297 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 298 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 299 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 300 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 301 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 302 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 303 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 304 | 305 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 306 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 307 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 308 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 309 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 310 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 311 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 312 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 313 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 314 | 315 | c. The disclaimer of warranties and limitation of liability provided 316 | above shall be interpreted in a manner that, to the extent 317 | possible, most closely approximates an absolute disclaimer and 318 | waiver of all liability. 319 | 320 | Section 6 -- Term and Termination. 321 | 322 | a. This Public License applies for the term of the Copyright and 323 | Similar Rights licensed here. However, if You fail to comply with 324 | this Public License, then Your rights under this Public License 325 | terminate automatically. 326 | 327 | b. Where Your right to use the Licensed Material has terminated under 328 | Section 6(a), it reinstates: 329 | 330 | 1. automatically as of the date the violation is cured, provided 331 | it is cured within 30 days of Your discovery of the 332 | violation; or 333 | 334 | 2. upon express reinstatement by the Licensor. 335 | 336 | For the avoidance of doubt, this Section 6(b) does not affect any 337 | right the Licensor may have to seek remedies for Your violations 338 | of this Public License. 339 | 340 | c. For the avoidance of doubt, the Licensor may also offer the 341 | Licensed Material under separate terms or conditions or stop 342 | distributing the Licensed Material at any time; however, doing so 343 | will not terminate this Public License. 344 | 345 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 346 | License. 347 | 348 | Section 7 -- Other Terms and Conditions. 349 | 350 | a. The Licensor shall not be bound by any additional or different 351 | terms or conditions communicated by You unless expressly agreed. 352 | 353 | b. Any arrangements, understandings, or agreements regarding the 354 | Licensed Material not stated herein are separate from and 355 | independent of the terms and conditions of this Public License. 356 | 357 | Section 8 -- Interpretation. 358 | 359 | a. For the avoidance of doubt, this Public License does not, and 360 | shall not be interpreted to, reduce, limit, restrict, or impose 361 | conditions on any use of the Licensed Material that could lawfully 362 | be made without permission under this Public License. 363 | 364 | b. To the extent possible, if any provision of this Public License is 365 | deemed unenforceable, it shall be automatically reformed to the 366 | minimum extent necessary to make it enforceable. If the provision 367 | cannot be reformed, it shall be severed from this Public License 368 | without affecting the enforceability of the remaining terms and 369 | conditions. 370 | 371 | c. No term or condition of this Public License will be waived and no 372 | failure to comply consented to unless expressly agreed to by the 373 | Licensor. 374 | 375 | d. Nothing in this Public License constitutes or may be interpreted 376 | as a limitation upon, or waiver of, any privileges and immunities 377 | that apply to the Licensor or You, including from the legal 378 | processes of any jurisdiction or authority. 379 | 380 | ======================================================================= 381 | 382 | Creative Commons is not a party to its public 383 | licenses. Notwithstanding, Creative Commons may elect to apply one of 384 | its public licenses to material it publishes and in those instances 385 | will be considered the “Licensor.” The text of the Creative Commons 386 | public licenses is dedicated to the public domain under the CC0 Public 387 | Domain Dedication. Except for the limited purpose of indicating that 388 | material is shared under a Creative Commons public license or as 389 | otherwise permitted by the Creative Commons policies published at 390 | creativecommons.org/policies, Creative Commons does not authorize the 391 | use of the trademark "Creative Commons" or any other trademark or logo 392 | of Creative Commons without its prior written consent including, 393 | without limitation, in connection with any unauthorized modifications 394 | to any of its public licenses or any other arrangements, 395 | understandings, or agreements concerning use of licensed material. For 396 | the avoidance of doubt, this paragraph does not form part of the 397 | public licenses. 398 | 399 | Creative Commons may be contacted at creativecommons.org. 400 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VoxelCNN 2 | 3 | [VoxelCNN](http://openaccess.thecvf.com/content_ICCV_2019/papers/Chen_Order-Aware_Generative_Modeling_Using_the_3D-Craft_Dataset_ICCV_2019_paper.pdf) is an order-aware generative model for building houses in Minecraft. This codebase is a [PyTorch](https://pytorch.org/) implementation of the training and evaluation pipeline for VoxelCNN. 4 | ![VoxelCNN](./img/voxelcnn_intro.png) 5 | 6 | VoxelCNN is trained and evaluated with the [*3D-Craft*](https://craftassist.s3-us-west-2.amazonaws.com/pubr/house_data.tar.gz) dataset. 7 | ![3D-Craft Dataset](./img/3d_craft_dataset.png) 8 | 9 | For more details, please refer to the ICCV 19' paper [*Order-Aware Generative Modeling Using the 3D-Craft Dataset*](http://openaccess.thecvf.com/content_ICCV_2019/papers/Chen_Order-Aware_Generative_Modeling_Using_the_3D-Craft_Dataset_ICCV_2019_paper.pdf). 10 | 11 | ## Installation 12 | 13 | Python version >= 3.7 is required. 14 | 15 | ### 0. Clone the repo 16 | 17 | ``` 18 | git clone https://github.com/facebookresearch/VoxelCNN 19 | cd VoxelCNN 20 | ``` 21 | 22 | ### 1. (Optional) Create a virtual environment 23 | 24 | ``` 25 | pip install virtualenv 26 | virtualenv voxelcnn_venv 27 | source voxelcnn_venv/bin/activate 28 | ``` 29 | 30 | ### 2. Install PyTorch 31 | 32 | Please follow the official [installation guide](https://pytorch.org/get-started/locally/) to install PyTorch version >= 1.3. 33 | 34 | ### 3. Install other dependencies 35 | 36 | ``` 37 | pip install numpy requests tqdm 38 | ``` 39 | 40 | ### 4. (Optional) Verify installation by running unit tests 41 | 42 | ``` 43 | python -m unittest discover -s test -v 44 | ``` 45 | 46 | ## Training and Evaluation 47 | 48 | The [*3D-Craft*](https://craftassist.s3-us-west-2.amazonaws.com/pubr/house_data.tar.gz) dataset will be downloaded automatically when launching the training for the first time. 49 | 50 | Run the fast training (fewer epochs, slightly worse results): 51 | ``` 52 | python main.py --num_epochs 3 --step_size 1 --save_dir /path/to/save/log/and/checkpoints 53 | ``` 54 | 55 | Example final test results for the fast training: 56 | ``` 57 | acc@1: 0.622 acc@5: 0.760 acc@10: 0.788 58 | cca_10%: 13.374 cca_25%: 11.115 cca_50%: 12.546 cca_75%: 12.564 cca_90%: 7.632 cca_avg: 11.446 59 | mtc: 131.411 mtc_normed: 0.241 60 | ``` 61 | 62 | Run the full training: 63 | ``` 64 | python main.py --save_dir /path/to/save/log/and/checkpoints 65 | ``` 66 | 67 | Example final test results for the full training: 68 | ``` 69 | acc@1: 0.640 acc@5: 0.778 acc@10: 0.806 70 | cca_10%: 13.630 cca_25%: 12.223 cca_50%: 13.168 cca_75%: 13.047 cca_90%: 7.571 cca_avg: 11.928 71 | mtc: 121.753 mtc_normed: 0.223 72 | ``` 73 | 74 | ## License 75 | 76 | VoxelCNN is released under the [CC-BY-NC 4.0 license](LICENSE). 77 | 78 | ## Citing 79 | 80 | ``` 81 | @inproceedings{zchen2019, 82 | title = {Order-Aware Generative Modeling Using the 3D-Craft Dataset}, 83 | author = {Chen, Zhuoyuan and Guo, Demi and Xiao, Tong and Xie, Saining and Chen, Xinlei and Yu, Haonan and Gray, Jonathan and Srinet, Kavya and Fan, Haoqi and Ma, Jerry and Qi, Charles R and Tulsiani, Shubham and Szlam, Arthur and Zitnick, C. Lawrence}, 84 | booktitle = {ICCV}, 85 | year = {2019}, 86 | } 87 | ``` 88 | -------------------------------------------------------------------------------- /img/3d_craft_dataset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/facebookresearch/voxelcnn/f2be8b4df1469daf2cd913d09ed04e2aab545f38/img/3d_craft_dataset.png -------------------------------------------------------------------------------- /img/voxelcnn_intro.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/facebookresearch/voxelcnn/f2be8b4df1469daf2cd913d09ed04e2aab545f38/img/voxelcnn_intro.png -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import argparse 9 | import random 10 | import warnings 11 | from datetime import datetime 12 | from os import path as osp 13 | from time import time as tic 14 | 15 | import numpy as np 16 | import torch 17 | from torch import optim 18 | from torch.utils.data import DataLoader 19 | from voxelcnn.checkpoint import Checkpointer 20 | from voxelcnn.criterions import CrossEntropyLoss 21 | from voxelcnn.datasets import Craft3DDataset 22 | from voxelcnn.evaluators import CCA, MTC, Accuracy 23 | from voxelcnn.models import VoxelCNN 24 | from voxelcnn.summary import Summary 25 | from voxelcnn.utils import Section, collate_batches, setup_logger, to_cuda 26 | 27 | 28 | def global_setup(args): 29 | if args.seed is not None: 30 | random.seed(args.seed) 31 | np.random.seed(args.seed) 32 | torch.manual_seed(args.seed) 33 | torch.backends.cudnn.deterministic = True 34 | if not args.cpu_only: 35 | if not torch.cuda.is_available(): 36 | warnings.warn("CUDA is not available. Fallback to using CPU only") 37 | args.cpu_only = True 38 | else: 39 | torch.cuda.benchmark = True 40 | 41 | 42 | def build_data_loaders(args, logger): 43 | data_loaders = {} 44 | for subset in ("train", "val", "test"): 45 | dataset = Craft3DDataset( 46 | args.data_dir, 47 | subset, 48 | max_samples=args.max_samples, 49 | next_steps=10, 50 | logger=logger, 51 | ) 52 | data_loaders[subset] = DataLoader( 53 | dataset, 54 | batch_size=args.batch_size, 55 | shuffle=subset == "train", 56 | num_workers=args.num_workers, 57 | pin_memory=not args.cpu_only, 58 | ) 59 | return data_loaders 60 | 61 | 62 | def build_model(args, logger): 63 | model = VoxelCNN() 64 | if not args.cpu_only: 65 | model.cuda() 66 | logger.info("Model architecture:\n" + str(model)) 67 | return model 68 | 69 | 70 | def build_criterion(args): 71 | criterion = CrossEntropyLoss() 72 | if not args.cpu_only: 73 | criterion.cuda() 74 | return criterion 75 | 76 | 77 | def build_optimizer(args, model): 78 | no_decay = [] 79 | decay = [] 80 | for name, param in model.named_parameters(): 81 | if name.endswith(".bias"): 82 | no_decay.append(param) 83 | else: 84 | decay.append(param) 85 | params = [{"params": no_decay, "weight_decay": 0}, {"params": decay}] 86 | return optim.SGD( 87 | params, 88 | lr=args.lr, 89 | weight_decay=args.weight_decay, 90 | momentum=args.momentum, 91 | nesterov=True, 92 | ) 93 | 94 | 95 | def build_scheduler(args, optimizer): 96 | return optim.lr_scheduler.StepLR( 97 | optimizer, step_size=args.step_size, gamma=args.gamma 98 | ) 99 | 100 | 101 | def build_evaluators(args): 102 | return { 103 | "acc@1": Accuracy(next_steps=1), 104 | "acc@5": Accuracy(next_steps=5), 105 | "acc@10": Accuracy(next_steps=10), 106 | } 107 | 108 | 109 | def train( 110 | args, epoch, data_loader, model, criterion, optimizer, scheduler, evaluators, logger 111 | ): 112 | summary = Summary(logger=logger) 113 | model.train() 114 | timestamp = tic() 115 | for i, (inputs, targets) in enumerate(data_loader): 116 | times = {"data": tic() - timestamp} 117 | if not args.cpu_only: 118 | inputs = to_cuda(inputs) 119 | targets = to_cuda(targets) 120 | outputs = model(inputs) 121 | losses = criterion(outputs, targets) 122 | with torch.no_grad(): 123 | metrics = {k: float(v(outputs, targets)) for k, v in evaluators.items()} 124 | 125 | optimizer.zero_grad() 126 | losses["overall_loss"].backward() 127 | optimizer.step() 128 | try: 129 | lr = scheduler.get_last_lr()[0] 130 | except Exception: 131 | # For backward compatibility 132 | lr = scheduler.get_lr()[0] 133 | 134 | times["time"] = tic() - timestamp 135 | summary.add(times=times, lrs={"lr": lr}, losses=losses, metrics=metrics) 136 | summary.print_current( 137 | prefix=f"[{epoch}/{args.num_epochs}][{i + 1}/{len(data_loader)}]" 138 | ) 139 | timestamp = tic() 140 | scheduler.step() 141 | 142 | 143 | @torch.no_grad() 144 | def evaluate(args, epoch, data_loader, model, evaluators, logger): 145 | summary = Summary(logger=logger) 146 | model.eval() 147 | timestamp = tic() 148 | batch_results = [] 149 | for i, (inputs, targets) in enumerate(data_loader): 150 | times = {"data": tic() - timestamp} 151 | if not args.cpu_only: 152 | inputs = to_cuda(inputs) 153 | targets = to_cuda(targets) 154 | outputs = model(inputs) 155 | batch_results.append( 156 | {k: v.step(outputs, targets) for k, v in evaluators.items()} 157 | ) 158 | 159 | times["time"] = tic() - timestamp 160 | summary.add(times=times) 161 | summary.print_current( 162 | prefix=f"[{epoch}/{args.num_epochs}][{i + 1}/{len(data_loader)}]" 163 | ) 164 | timestamp = tic() 165 | results = collate_batches(batch_results) 166 | metrics = {k: float(v.stop(results[k])) for k, v in evaluators.items()} 167 | return metrics 168 | 169 | 170 | def main(args): 171 | # Set log file name based on current date and time 172 | cur_datetime = datetime.now().strftime("%Y%m%d.%H%M%S") 173 | log_path = osp.join(args.save_dir, f"log.{cur_datetime}.txt") 174 | logger = setup_logger(save_file=log_path) 175 | logger.info(f"Save logs to: {log_path}") 176 | 177 | with Section("Global setup", logger=logger): 178 | global_setup(args) 179 | 180 | with Section("Building data loaders", logger=logger): 181 | data_loaders = build_data_loaders(args, logger) 182 | 183 | with Section("Building model", logger=logger): 184 | model = build_model(args, logger) 185 | 186 | with Section("Building criterions, optimizer, scheduler", logger=logger): 187 | criterion = build_criterion(args) 188 | optimizer = build_optimizer(args, model) 189 | scheduler = build_scheduler(args, optimizer) 190 | 191 | with Section("Building evaluators", logger=logger): 192 | evaluators = build_evaluators(args) 193 | 194 | checkpointer = Checkpointer(args.save_dir) 195 | last_epoch = 0 196 | if args.resume is not None: 197 | with Section(f"Resuming from model: {args.resume}", logger=logger): 198 | last_epoch = checkpointer.resume( 199 | args.resume, model=model, optimizer=optimizer, scheduler=scheduler 200 | ) 201 | 202 | for epoch in range(last_epoch + 1, args.num_epochs + 1): 203 | with Section(f"Training epoch {epoch}", logger=logger): 204 | train( 205 | args, 206 | epoch, 207 | data_loaders["train"], 208 | model, 209 | criterion, 210 | optimizer, 211 | scheduler, 212 | evaluators, 213 | logger, 214 | ) 215 | with Section(f"Validating epoch {epoch}", logger=logger): 216 | # Evaluate on the validation set by the lightweight accuracy metrics 217 | metrics = evaluate( 218 | args, epoch, data_loaders["val"], model, evaluators, logger 219 | ) 220 | # Use acc@10 as the key metric to select best model 221 | checkpointer.save(model, optimizer, scheduler, epoch, metrics["acc@10"]) 222 | metrics_str = " ".join(f"{k}: {v:.3f}" for k, v in metrics.items()) 223 | best_mark = "*" if epoch == checkpointer.best_epoch else "" 224 | logger.info(f"Finish epoch: {epoch} {metrics_str} {best_mark}") 225 | 226 | best_epoch = checkpointer.best_epoch 227 | with Section(f"Final test with best model from epoch: {best_epoch}", logger=logger): 228 | # Load the best model and evaluate all the metrics on the test set 229 | checkpointer.load("best", model=model) 230 | metrics = evaluate( 231 | args, best_epoch, data_loaders["test"], model, evaluators, logger 232 | ) 233 | 234 | # Additional evaluation metrics. Takes quite long time to evaluate 235 | dataset = data_loaders["test"].dataset 236 | params = { 237 | "local_size": dataset.local_size, 238 | "global_size": dataset.global_size, 239 | "history": dataset.history, 240 | } 241 | metrics.update(CCA(**params).evaluate(dataset, model)) 242 | metrics.update(MTC(**params).evaluate(dataset, model)) 243 | 244 | metrics_str = " ".join(f"{k}: {v:.3f}" for k, v in metrics.items()) 245 | logger.info(f"Final test from best epoch: {best_epoch}\n{metrics_str}") 246 | 247 | 248 | if __name__ == "__main__": 249 | work_dir = osp.dirname(osp.abspath(__file__)) 250 | parser = argparse.ArgumentParser( 251 | description="Train and evaluate VoxelCNN model on 3D-Craft dataset" 252 | ) 253 | # Data 254 | parser.add_argument( 255 | "--data_dir", 256 | type=str, 257 | default=osp.join(work_dir, "data"), 258 | help="Path to the data directory", 259 | ) 260 | parser.add_argument("--batch_size", type=int, default=64, help="Batch size") 261 | parser.add_argument( 262 | "--num_workers", 263 | type=int, 264 | default=16, 265 | help="Number of workers for preprocessing", 266 | ) 267 | parser.add_argument( 268 | "--max_samples", 269 | type=int, 270 | default=None, 271 | help="When debugging, set this option to limit the number of training samples", 272 | ) 273 | # Optimizer 274 | parser.add_argument("--lr", type=float, default=0.1, help="Initial learning rate") 275 | parser.add_argument( 276 | "--weight_decay", type=float, default=0.0001, help="Weight decay" 277 | ) 278 | parser.add_argument("--momentum", type=float, default=0.9, help="Momentum") 279 | # Scheduler 280 | parser.add_argument("--step_size", type=int, default=5, help="StepLR step size") 281 | parser.add_argument("--gamma", type=int, default=0.1, help="StepLR gamma") 282 | parser.add_argument("--num_epochs", type=int, default=12, help="Total train epochs") 283 | # Misc 284 | parser.add_argument( 285 | "--save_dir", 286 | type=str, 287 | default=osp.join(work_dir, "logs"), 288 | help="Path to a directory to save log file and checkpoints", 289 | ) 290 | parser.add_argument( 291 | "--resume", 292 | type=str, 293 | default=None, 294 | help="'latest' | 'best' | '' | ''. " 295 | "Default: None, will not resume", 296 | ) 297 | parser.add_argument("--cpu_only", action="store_true", help="Only using CPU") 298 | parser.add_argument("--seed", type=int, default=None, help="Random seed") 299 | main(parser.parse_args()) 300 | -------------------------------------------------------------------------------- /test/test_criterions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import math 9 | import unittest 10 | 11 | import torch 12 | from voxelcnn.criterions import CrossEntropyLoss 13 | 14 | 15 | class TestCrossEntropyLoss(unittest.TestCase): 16 | def test_forward(self): 17 | targets = { 18 | "coords": torch.tensor([[0], [13]]), 19 | "types": torch.tensor([[0], [1]]), 20 | } 21 | 22 | coords_outputs = torch.zeros((2, 1, 3, 3, 3)) 23 | coords_outputs[0, 0, 0, 0, 0] = 1.0 24 | coords_outputs[1, 0, 1, 1, 1] = 1.0 25 | 26 | types_outputs = torch.zeros((2, 2, 3, 3, 3)) 27 | types_outputs[0, 0, 0, 0, 0] = 1.0 28 | types_outputs[1, 1, 1, 1, 1] = 1.0 29 | outputs = {"coords": coords_outputs, "types": types_outputs} 30 | 31 | criterion = CrossEntropyLoss() 32 | losses = criterion(outputs, targets) 33 | 34 | p_coords = math.exp(1.0) / (math.exp(1.0) + 26) 35 | p_types = math.exp(1.0) / (math.exp(1.0) + 1) 36 | self.assertAlmostEqual( 37 | float(losses["coords_loss"]), -math.log(p_coords), places=3 38 | ) 39 | self.assertAlmostEqual( 40 | float(losses["types_loss"]), -math.log(p_types), places=3 41 | ) 42 | 43 | 44 | if __name__ == "__main__": 45 | unittest.main() 46 | -------------------------------------------------------------------------------- /test/test_datasets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import unittest 9 | 10 | import torch 11 | from voxelcnn.datasets import Craft3DDataset 12 | 13 | 14 | class TestCraft3DDataset(unittest.TestCase): 15 | def setUp(self): 16 | self.annotation = torch.tensor( 17 | [[0, 0, 0, 0], [3, 3, 3, 3], [1, 1, 1, 1], [2, 2, 2, 2]] 18 | ) 19 | 20 | def test_convert_to_voxels(self): 21 | voxels = Craft3DDataset._convert_to_voxels( 22 | self.annotation, size=3, occupancy_only=False 23 | ) 24 | self.assertEqual(voxels.shape, (256, 3, 3, 3)) 25 | self.assertEqual(voxels[1, 0, 0, 0], 1) 26 | self.assertEqual(voxels[2, 1, 1, 1], 1) 27 | self.assertEqual(voxels[3, 2, 2, 2], 1) 28 | 29 | voxels = Craft3DDataset._convert_to_voxels( 30 | self.annotation, size=5, occupancy_only=True 31 | ) 32 | self.assertEqual(voxels.shape, (1, 5, 5, 5)) 33 | self.assertEqual(voxels[0, 0, 0, 0], 1) 34 | self.assertEqual(voxels[0, 1, 1, 1], 1) 35 | self.assertEqual(voxels[0, 2, 2, 2], 1) 36 | self.assertEqual(voxels[0, 3, 3, 3], 1) 37 | 38 | def test_prepare_inputs(self): 39 | inputs = Craft3DDataset.prepare_inputs( 40 | self.annotation, local_size=3, global_size=5, history=2 41 | ) 42 | self.assertEqual(set(inputs.keys()), {"local", "global", "center"}) 43 | self.assertTrue(torch.all(inputs["center"] == torch.tensor([2, 2, 2]))) 44 | self.assertEqual(inputs["local"].shape, (512, 3, 3, 3)) 45 | self.assertEqual(inputs["local"][1, 0, 0, 0], 1) 46 | self.assertEqual(inputs["local"][2, 1, 1, 1], 1) 47 | self.assertEqual(inputs["local"][3, 2, 2, 2], 1) 48 | self.assertEqual(inputs["local"][257, 0, 0, 0], 1) 49 | self.assertEqual(inputs["local"][258, 1, 1, 1], 0) 50 | self.assertEqual(inputs["local"][259, 2, 2, 2], 1) 51 | self.assertEqual(inputs["global"].shape, (1, 5, 5, 5)) 52 | self.assertEqual(inputs["global"][0, 0, 0, 0], 1) 53 | self.assertEqual(inputs["global"][0, 1, 1, 1], 1) 54 | self.assertEqual(inputs["global"][0, 2, 2, 2], 1) 55 | self.assertEqual(inputs["global"][0, 3, 3, 3], 1) 56 | 57 | def test_prepare_targets(self): 58 | targets = Craft3DDataset.prepare_targets( 59 | self.annotation, next_steps=2, local_size=3 60 | ) 61 | self.assertEqual(set(targets.keys()), {"coords", "types"}) 62 | self.assertTrue(torch.all(targets["coords"] == torch.tensor([-100, 26]))) 63 | self.assertTrue(torch.all(targets["types"] == torch.tensor([-100, 1]))) 64 | 65 | 66 | if __name__ == "__main__": 67 | unittest.main() 68 | -------------------------------------------------------------------------------- /test/test_evaluators.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import unittest 9 | 10 | import torch 11 | from voxelcnn.evaluators import Accuracy 12 | 13 | 14 | class TestAccuracy(unittest.TestCase): 15 | def test_forward(self): 16 | coords_outputs = torch.zeros((2, 1, 3, 3, 3)) 17 | coords_outputs[0, 0, 0, 0, 0] = 1.0 18 | coords_outputs[1, 0, 1, 1, 1] = 1.0 19 | 20 | types_outputs = torch.zeros((2, 2, 3, 3, 3)) 21 | types_outputs[0, 0, 0, 0, 0] = 1.0 22 | types_outputs[1, 1, 1, 1, 1] = 1.0 23 | 24 | outputs = {"coords": coords_outputs, "types": types_outputs} 25 | targets = { 26 | "coords": torch.tensor([[0, 1], [12, 13]]), 27 | "types": torch.tensor([[0, 0], [1, 1]]), 28 | } 29 | 30 | acc1 = Accuracy(next_steps=1)(outputs, targets).item() 31 | self.assertEqual(acc1, 0.5) 32 | 33 | acc2 = Accuracy(next_steps=2)(outputs, targets).item() 34 | self.assertEqual(acc2, 1.0) 35 | -------------------------------------------------------------------------------- /test/test_models.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import unittest 9 | 10 | import torch 11 | from voxelcnn.models import VoxelCNN 12 | 13 | 14 | class TestVoxelCNN(unittest.TestCase): 15 | def test_forward(self): 16 | model = VoxelCNN() 17 | inputs = { 18 | "local": torch.rand(5, 256 * 3, 7, 7, 7), 19 | "global": torch.rand(5, 1, 21, 21, 21), 20 | "center": torch.randint(128, size=(5, 3)), 21 | } 22 | outputs = model(inputs) 23 | self.assertEqual(set(outputs.keys()), {"coords", "types", "center"}) 24 | self.assertEqual(outputs["coords"].shape, (5, 1, 7, 7, 7)) 25 | self.assertEqual(outputs["types"].shape, (5, 256, 7, 7, 7)) 26 | self.assertEqual(outputs["center"].shape, (5, 3)) 27 | -------------------------------------------------------------------------------- /test/test_predictor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import unittest 9 | 10 | import torch 11 | from voxelcnn.predictor import Predictor 12 | 13 | 14 | class TestPredictor(unittest.TestCase): 15 | def test_decode(self): 16 | coords_outputs = torch.zeros((2, 1, 3, 3, 3)) 17 | coords_outputs[0, 0, 0, 0, 0] = 1.0 18 | coords_outputs[1, 0, 1, 1, 1] = 1.0 19 | 20 | types_outputs = torch.zeros((2, 2, 3, 3, 3)) 21 | types_outputs[0, 0, 0, 0, 0] = 1.0 22 | types_outputs[1, 1, 1, 1, 1] = 1.0 23 | 24 | center = torch.tensor([[3, 3, 3], [10, 11, 12]]) 25 | outputs = {"coords": coords_outputs, "types": types_outputs, "center": center} 26 | 27 | predictions = Predictor.decode(outputs) 28 | self.assertTrue( 29 | torch.all(predictions == torch.tensor([[0, 2, 2, 2], [1, 10, 11, 12]])) 30 | ) 31 | -------------------------------------------------------------------------------- /voxelcnn/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | -------------------------------------------------------------------------------- /voxelcnn/checkpoint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import os 9 | import shutil 10 | from glob import glob 11 | from os import path as osp 12 | from typing import Any, Dict, Optional 13 | 14 | import torch 15 | from torch import nn, optim 16 | 17 | 18 | class Checkpointer(object): 19 | def __init__(self, root_dir: str): 20 | """ Save and load checkpoints. Maintain best metrics 21 | 22 | Args: 23 | root_dir (str): Directory to save the checkpoints 24 | """ 25 | super().__init__() 26 | self.root_dir = root_dir 27 | self.best_metric = -1 28 | self.best_epoch = None 29 | 30 | def save( 31 | self, 32 | model: nn.Module, 33 | optimizer: optim.Optimizer, 34 | scheduler: optim.lr_scheduler._LRScheduler, 35 | epoch: int, 36 | metric: float, 37 | ): 38 | if self.best_metric < metric: 39 | self.best_metric = metric 40 | self.best_epoch = epoch 41 | is_best = True 42 | else: 43 | is_best = False 44 | 45 | os.makedirs(self.root_dir, exist_ok=True) 46 | torch.save( 47 | { 48 | "model": model.state_dict(), 49 | "optimizer": optimizer.state_dict(), 50 | "scheduler": scheduler.state_dict(), 51 | "epoch": epoch, 52 | "best_epoch": self.best_epoch, 53 | "best_metric": self.best_metric, 54 | }, 55 | osp.join(self.root_dir, f"{epoch:02d}.pth"), 56 | ) 57 | 58 | if is_best: 59 | shutil.copy( 60 | osp.join(self.root_dir, f"{epoch:02d}.pth"), 61 | osp.join(self.root_dir, "best.pth"), 62 | ) 63 | 64 | def load( 65 | self, 66 | load_from: str, 67 | model: Optional[nn.Module] = None, 68 | optimizer: Optional[optim.Optimizer] = None, 69 | scheduler: Optional[optim.lr_scheduler._LRScheduler] = None, 70 | ) -> Dict[str, Any]: 71 | ckp = torch.load(self._get_path(load_from)) 72 | if model is not None: 73 | model.load_state_dict(ckp["model"]) 74 | if optimizer is not None: 75 | optimizer.load_state_dict(ckp["optimizer"]) 76 | if scheduler is not None: 77 | scheduler.load_state_dict(ckp["scheduler"]) 78 | return ckp 79 | 80 | def resume( 81 | self, 82 | resume_from: str, 83 | model: Optional[nn.Module] = None, 84 | optimizer: Optional[optim.Optimizer] = None, 85 | scheduler: Optional[optim.lr_scheduler._LRScheduler] = None, 86 | ) -> int: 87 | ckp = self.load( 88 | resume_from, model=model, optimizer=optimizer, scheduler=scheduler 89 | ) 90 | self.best_epoch = ckp["best_epoch"] 91 | self.best_metric = ckp["best_metric"] 92 | return ckp["epoch"] 93 | 94 | def _get_path(self, load_from: str) -> str: 95 | if load_from == "best": 96 | return osp.join(self.root_dir, "best.pth") 97 | if load_from == "latest": 98 | return sorted(glob(osp.join(self.root_dir, "[0-9]*.pth")))[-1] 99 | if load_from.isnumeric(): 100 | return osp.join(self.root_dir, f"{int(load_from):02d}.pth") 101 | return load_from 102 | -------------------------------------------------------------------------------- /voxelcnn/criterions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from typing import Dict 9 | 10 | import torch 11 | from torch import nn 12 | 13 | 14 | class CrossEntropyLoss(nn.Module): 15 | def __init__(self, ignore_index: int = -100): 16 | super().__init__() 17 | self.loss = nn.CrossEntropyLoss(ignore_index=ignore_index) 18 | 19 | def forward( 20 | self, outputs: Dict[str, torch.Tensor], targets: Dict[str, torch.Tensor] 21 | ) -> Dict[str, torch.Tensor]: 22 | """ Compute CrossEntropyLoss for coordinates and block types predictions 23 | 24 | Args: 25 | outputs (dict): A dict of 26 | ``` 27 | { 28 | "coords": float tensor of shape (N, 1, D, D, D), 29 | "types": float tensor of shape (N, C, D, D, D), 30 | } 31 | ``` 32 | where N is the batch size, C is the number of block types, and D is the 33 | local size. 34 | 35 | targets (dict): A dict of 36 | ``` 37 | { 38 | "coords": int tensor of shape (N, A), the encoded target coordinates 39 | "types": int tensor of shape (N, A), the target block types 40 | } 41 | ``` 42 | where N is the batch size and A is the number of next groundtruth 43 | actions. However, we only use the next one action to compute the loss. 44 | 45 | Returns: 46 | A dict of losses: coords_loss, types_loss, overall_loss, where each is a 47 | tensor of float scalar 48 | """ 49 | N, C, D, D, D = outputs["types"].shape 50 | assert outputs["coords"].shape == (N, 1, D, D, D) 51 | 52 | coords_targets = targets["coords"][:, 0].view(-1) 53 | types_targets = targets["types"][:, 0].view(-1) 54 | 55 | coords_outputs = outputs["coords"].view(N, -1) 56 | 57 | # Gather the type prediction on ground truth coordinate 58 | types_outputs = ( 59 | outputs["types"] 60 | .view(N, C, D * D * D) 61 | .gather(dim=2, index=coords_targets.view(N, 1, 1).expand(N, C, 1)) 62 | .view(N, -1) 63 | ) 64 | 65 | coords_loss = self.loss(coords_outputs, coords_targets) 66 | types_loss = self.loss(types_outputs, types_targets) 67 | 68 | return { 69 | "coords_loss": coords_loss, 70 | "types_loss": types_loss, 71 | "overall_loss": coords_loss + types_loss, 72 | } 73 | -------------------------------------------------------------------------------- /voxelcnn/datasets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import json 9 | import logging 10 | import os 11 | import tarfile 12 | import warnings 13 | from os import path as osp 14 | from typing import Dict, Optional, Tuple 15 | 16 | import numpy as np 17 | import requests 18 | import torch 19 | from torch.utils.data import Dataset 20 | 21 | 22 | class Craft3DDataset(Dataset): 23 | NUM_BLOCK_TYPES = 256 24 | URL = "https://craftassist.s3-us-west-2.amazonaws.com/pubr/house_data.tar.gz" 25 | 26 | def __init__( 27 | self, 28 | data_dir: str, 29 | subset: str, 30 | local_size: int = 7, 31 | global_size: int = 21, 32 | history: int = 3, 33 | next_steps: int = -1, 34 | max_samples: Optional[int] = None, 35 | logger: Optional[logging.Logger] = None, 36 | ): 37 | """ Download and construct 3D-Craft dataset 38 | 39 | data_dir (str): Directory to save/load the dataset 40 | subset (str): 'train' | 'val' | 'test' 41 | local_size (int): Local context size. Default: 7 42 | global_size (int): Global context size. Default: 21 43 | history (int): Number of previous steps considered as inputs. Default: 3 44 | next_steps (int): Number of next steps considered as targets. Default: -1, 45 | meaning till the end 46 | max_samples (int, optional): Limit the maximum number of samples. Used for 47 | faster debugging. Default: None, meaning no limit 48 | logger (logging.Logger, optional): A logger. Default: None, meaning will print 49 | to stdout 50 | """ 51 | super().__init__() 52 | self.data_dir = data_dir 53 | self.subset = subset 54 | self.local_size = local_size 55 | self.global_size = global_size 56 | self.history = history 57 | self.max_local_distance = self.local_size // 2 58 | self.max_global_distance = self.global_size // 2 59 | self.next_steps = next_steps 60 | self.max_samples = max_samples 61 | self.logger = logger 62 | 63 | if self.subset not in ("train", "val", "test"): 64 | raise ValueError(f"Unknown subset: {self.subset}") 65 | 66 | if not self._has_raw_data(): 67 | self._download() 68 | 69 | self._load_dataset() 70 | self._find_valid_items() 71 | 72 | self.print_stats() 73 | 74 | def print_stats(self): 75 | num_blocks_per_house = [len(x) for x in self._valid_indices.values()] 76 | ret = "\n" 77 | ret += f"3D Craft Dataset\n" 78 | ret += f"================\n" 79 | ret += f" data_dir: {self.data_dir}\n" 80 | ret += f" subset: {self.subset}\n" 81 | ret += f" local_size: {self.local_size}\n" 82 | ret += f" global_size: {self.global_size}\n" 83 | ret += f" history: {self.history}\n" 84 | ret += f" next_steps: {self.next_steps}\n" 85 | ret += f" max_samples: {self.max_samples}\n" 86 | ret += f" --------------\n" 87 | ret += f" num_houses: {len(self._valid_indices)}\n" 88 | ret += f" avg_blocks_per_house: {np.mean(num_blocks_per_house):.3f}\n" 89 | ret += f" min_blocks_per_house: {min(num_blocks_per_house)}\n" 90 | ret += f" max_blocks_per_house: {max(num_blocks_per_house)}\n" 91 | ret += f" total_valid_blocks: {len(self._flattened_valid_indices)}\n" 92 | ret += "\n" 93 | self._log(ret) 94 | 95 | def __len__(self) -> int: 96 | """ Get number of valid blocks """ 97 | ret = len(self._flattened_valid_indices) 98 | if self.max_samples is not None: 99 | ret = min(ret, self.max_samples) 100 | return ret 101 | 102 | def __getitem__( 103 | self, index: int 104 | ) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]: 105 | """ Get the index-th valid block 106 | 107 | Returns: 108 | A tuple of inputs and targets, where inputs is a dict of 109 | ``` 110 | { 111 | "local": float tensor of shape (C * H, D, D, D), 112 | "global": float tensor of shape (1, G, G, G), 113 | "center": int tensor of shape (3,), the coordinate of the last block 114 | } 115 | ``` 116 | where C is the number of block types, H is the history length, D is the 117 | local size, and G is the global size. 118 | 119 | targets is a dict of 120 | ``` 121 | { 122 | "coords": int tensor of shape (A,) 123 | "types": int tensor of shape (A,) 124 | } 125 | ``` 126 | where A is the number of next steps to be considered as targets. 127 | """ 128 | house_id, block_id = self._flattened_valid_indices[index] 129 | annotation = self._all_houses[house_id] 130 | inputs = Craft3DDataset.prepare_inputs( 131 | annotation[: block_id + 1], 132 | local_size=self.local_size, 133 | global_size=self.global_size, 134 | history=self.history, 135 | ) 136 | targets = Craft3DDataset.prepare_targets( 137 | annotation[block_id:], 138 | next_steps=self.next_steps, 139 | local_size=self.local_size, 140 | ) 141 | return inputs, targets 142 | 143 | def get_house(self, index: int) -> torch.Tensor: 144 | """ Get the annotation for the index-th house. Use for thorough evaluation """ 145 | return self._all_houses[index] 146 | 147 | def get_num_houses(self) -> int: 148 | """ Get the total number of houses. Use for thorough evaluation """ 149 | return len(self._all_houses) 150 | 151 | @staticmethod 152 | @torch.no_grad() 153 | def prepare_inputs( 154 | annotation: torch.Tensor, 155 | local_size: int = 7, 156 | global_size: int = 21, 157 | history: int = 3, 158 | ) -> Dict[str, torch.Tensor]: 159 | """ Convert annotation to input tensors 160 | 161 | Args: 162 | annotation (torch.Tensor): M x 4 int tensor, where M is the number of 163 | prebuilt blocks. The first column is the block type, followed by the 164 | block coordinates. 165 | 166 | Returns: 167 | ``` 168 | { 169 | "local": float tensor of shape (C * H, D, D, D), 170 | "global": float tensor of shape (1, G, G, G), 171 | "center": int tensor of shape (3,), the coordinate of the last block 172 | } 173 | ``` 174 | where C is the number of block types, H is the history length, D is the 175 | local size, and G is the global size. 176 | """ 177 | global_inputs = Craft3DDataset._convert_to_voxels( 178 | annotation, size=global_size, occupancy_only=True 179 | ) 180 | local_inputs = Craft3DDataset._convert_to_voxels( 181 | annotation, size=local_size, occupancy_only=False 182 | ) 183 | if len(annotation) == 0: 184 | return { 185 | "local": local_inputs.repeat(history, 1, 1, 1), 186 | "global": global_inputs, 187 | "center": torch.zeros((3,), dtype=torch.int64), 188 | } 189 | 190 | last_coord = annotation[-1, 1:] 191 | center_coord = last_coord.new_full((3,), local_size // 2) 192 | local_history = [local_inputs] 193 | for i in range(len(annotation) - 1, len(annotation) - history, -1): 194 | if i < 0: 195 | local_history.append(torch.zeros_like(local_inputs)) 196 | else: 197 | prev_inputs = local_history[-1].clone() 198 | prev_coord = annotation[i, 1:] - last_coord + center_coord 199 | if all((prev_coord >= 0) & (prev_coord < local_size)): 200 | x, y, z = prev_coord 201 | prev_inputs[:, x, y, z] = 0 202 | local_history.append(prev_inputs) 203 | local_inputs = torch.cat(local_history, dim=0) 204 | return {"local": local_inputs, "global": global_inputs, "center": last_coord} 205 | 206 | @staticmethod 207 | @torch.no_grad() 208 | def prepare_targets( 209 | annotation: torch.Tensor, next_steps: int = 1, local_size: int = 7 210 | ) -> Dict[str, torch.Tensor]: 211 | """ Convert annotation to target tensors 212 | 213 | Args: 214 | annotation (torch.Tensor): (M + 1) x 4 int tensor, where M is the number of 215 | blocks to build, plus one for the last built block. The first column 216 | is the block type, followed by the block coordinates. 217 | 218 | Returns: 219 | ``` 220 | { 221 | "coords": int tensor of shape (A,) 222 | "types": int tensor of shape (A,) 223 | } 224 | ``` 225 | where A is the number of next steps to be considered as targets 226 | """ 227 | coords_targets = torch.full((next_steps,), -100, dtype=torch.int64) 228 | types_targets = coords_targets.clone() 229 | 230 | if len(annotation) <= 1: 231 | return {"coords": coords_targets, "types": types_targets} 232 | 233 | offsets = torch.tensor([local_size * local_size, local_size, 1]) 234 | last_coord = annotation[0, 1:] 235 | center_coord = last_coord.new_full((3,), local_size // 2) 236 | 237 | N = min(1 + next_steps, len(annotation)) 238 | next_types = annotation[1:N, 0].clone() 239 | next_coords = annotation[1:N, 1:] - last_coord + center_coord 240 | mask = (next_coords < 0) | (next_coords >= local_size) 241 | mask = mask.any(dim=1) 242 | next_coords = (next_coords * offsets).sum(dim=1) 243 | next_coords[mask] = -100 244 | next_types[mask] = -100 245 | 246 | coords_targets[: len(next_coords)] = next_coords 247 | types_targets[: len(next_types)] = next_types 248 | 249 | return {"coords": coords_targets, "types": types_targets} 250 | 251 | @staticmethod 252 | def _convert_to_voxels( 253 | annotation: torch.Tensor, size: int, occupancy_only: bool = False 254 | ) -> torch.Tensor: 255 | voxels_shape = ( 256 | (1, size, size, size) 257 | if occupancy_only 258 | else (Craft3DDataset.NUM_BLOCK_TYPES, size, size, size) 259 | ) 260 | if len(annotation) == 0: 261 | return torch.zeros(voxels_shape, dtype=torch.float32) 262 | 263 | annotation = annotation.clone() 264 | if occupancy_only: 265 | # No block types. Just coordinate occupancy 266 | annotation[:, 0] = 0 267 | # Shift the coordinates to make the last block centered 268 | last_coord = annotation[-1, 1:] 269 | center_coord = last_coord.new_tensor([size // 2, size // 2, size // 2]) 270 | annotation[:, 1:] += center_coord - last_coord 271 | # Find valid annotation that inside the cube 272 | valid_mask = (annotation[:, 1:] >= 0) & (annotation[:, 1:] < size) 273 | valid_mask = valid_mask.all(dim=1) 274 | annotation = annotation[valid_mask] 275 | # Use sparse tensor to construct the voxels cube 276 | return torch.sparse.FloatTensor( 277 | annotation.t(), torch.ones(len(annotation)), voxels_shape 278 | ).to_dense() 279 | 280 | def _log(self, msg: str): 281 | if self.logger is None: 282 | print(msg) 283 | else: 284 | self.logger.info(msg) 285 | 286 | def _has_raw_data(self) -> bool: 287 | return osp.isdir(osp.join(self.data_dir, "houses")) 288 | 289 | def _download(self): 290 | os.makedirs(self.data_dir, exist_ok=True) 291 | 292 | tar_path = osp.join(self.data_dir, "houses.tar.gz") 293 | if not osp.isfile(tar_path): 294 | self._log(f"Downloading dataset from {Craft3DDataset.URL}") 295 | response = requests.get(Craft3DDataset.URL, allow_redirects=True) 296 | if response.status_code != 200: 297 | raise RuntimeError( 298 | f"Failed to retrieve image from url: {Craft3DDataset.URL}. " 299 | f"Status: {response.status_code}" 300 | ) 301 | with open(tar_path, "wb") as f: 302 | f.write(response.content) 303 | 304 | extracted_dir = osp.join(self.data_dir, "houses") 305 | if not osp.isdir(extracted_dir): 306 | self._log(f"Extracting dataset to {extracted_dir}") 307 | tar = tarfile.open(tar_path, "r") 308 | tar.extractall(self.data_dir) 309 | 310 | def _load_dataset(self): 311 | splits_path = osp.join(self.data_dir, "splits.json") 312 | if not osp.isfile(splits_path): 313 | raise RuntimeError(f"Split file not found at: {splits_path}") 314 | 315 | with open(splits_path, "r") as f: 316 | splits = json.load(f) 317 | 318 | self._all_houses = [] 319 | max_len = 0 320 | for filename in splits[self.subset]: 321 | annotation = osp.join(self.data_dir, "houses", filename, "placed.json") 322 | if not osp.isfile(annotation): 323 | warnings.warn(f"No annotation file for: {annotation}") 324 | continue 325 | annotation = self._load_annotation(annotation) 326 | if len(annotation) >= 100: 327 | self._all_houses.append(annotation) 328 | max_len = max(max_len, len(annotation)) 329 | 330 | if self.next_steps <= 0: 331 | self.next_steps = max_len 332 | 333 | def _load_annotation(self, annotation_path: str) -> torch.Tensor: 334 | with open(annotation_path, "r") as f: 335 | annotation = json.load(f) 336 | final_house = {} 337 | types_and_coords = [] 338 | last_timestamp = -1 339 | for i, item in enumerate(annotation): 340 | timestamp, annotator_id, coordinate, block_info, action = item 341 | assert timestamp >= last_timestamp 342 | last_timestamp = timestamp 343 | coordinate = tuple(np.asarray(coordinate).astype(np.int64).tolist()) 344 | block_type = np.asarray(block_info, dtype=np.uint8).astype(np.int64)[0] 345 | if action == "B": 346 | final_house.pop(coordinate, None) 347 | else: 348 | final_house[coordinate] = i 349 | types_and_coords.append((block_type,) + coordinate) 350 | indices = sorted(final_house.values()) 351 | types_and_coords = [types_and_coords[i] for i in indices] 352 | return torch.tensor(types_and_coords, dtype=torch.int64) 353 | 354 | def _find_valid_items(self): 355 | self._valid_indices = {} 356 | for i, annotation in enumerate(self._all_houses): 357 | diff_coord = annotation[:-1, 1:] - annotation[1:, 1:] 358 | valids = abs(diff_coord) <= self.max_local_distance 359 | valids = valids.all(dim=1).nonzero(as_tuple=True)[0] 360 | self._valid_indices[i] = valids.tolist() 361 | 362 | self._flattened_valid_indices = [] 363 | for i, indices in self._valid_indices.items(): 364 | for j in indices: 365 | self._flattened_valid_indices.append((i, j)) 366 | 367 | 368 | if __name__ == "__main__": 369 | work_dir = osp.join(osp.dirname(osp.abspath(__file__)), "..") 370 | dataset = Craft3DDataset(osp.join(work_dir, "data"), "val") 371 | for i in range(5): 372 | inputs, targets = dataset[i] 373 | print(targets) 374 | -------------------------------------------------------------------------------- /voxelcnn/evaluators.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from collections import defaultdict 9 | from typing import Dict, Tuple 10 | 11 | import torch 12 | from torch import nn 13 | from tqdm import tqdm 14 | 15 | from .datasets import Craft3DDataset 16 | from .predictor import Predictor 17 | 18 | 19 | class Accuracy(nn.Module): 20 | def __init__(self, next_steps: int = 1): 21 | """ Compute the accuracy of coordinates and types predictions 22 | 23 | Args: 24 | next_steps (int): The number of future ground truth steps to be considered. 25 | Default: 1 26 | """ 27 | super().__init__() 28 | self.next_steps = next_steps 29 | 30 | def step( 31 | self, outputs: Dict[str, torch.Tensor], targets: Dict[str, torch.Tensor] 32 | ) -> torch.Tensor: 33 | """ Compute sample-wise accuracy in a minibatch 34 | 35 | Args: 36 | outputs (dict): A dict of 37 | ``` 38 | { 39 | "coords": float tensor of shape (N, 1, D, D, D), 40 | "types": float tensor of shape (N, C, D, D, D), 41 | } 42 | ``` 43 | where N is the batch size, C is the number of block types, and D is the 44 | local size. 45 | 46 | targets (dict): A dict of 47 | ``` 48 | { 49 | "coords": int tensor of shape (N, A), the encoded target coordinates 50 | "types": int tensor of shape (N, A), the target block types 51 | } 52 | ``` 53 | where N is the batch size and A is the number of next groundtruth 54 | actions. 55 | """ 56 | N, C, D, D, D = outputs["types"].shape 57 | assert outputs["coords"].shape == (N, 1, D, D, D) 58 | assert targets["coords"].shape == targets["types"].shape 59 | 60 | K = self.next_steps if self.next_steps > 0 else targets["coords"].shape[1] 61 | if targets["coords"].shape[1] < K: 62 | raise RuntimeError(f"Targets do not contain next {K} steps") 63 | 64 | coords_targets = targets["coords"][:, :K].view(N, -1) 65 | types_targets = targets["types"][:, :K].view(N, -1) 66 | 67 | coords_predictions = outputs["coords"].view(N, -1).argmax(dim=1, keepdim=True) 68 | coords_correct = (coords_predictions == coords_targets).any(dim=1) 69 | 70 | types_predictions = ( 71 | outputs["types"] 72 | .view(N, C, D * D * D) 73 | .gather(dim=2, index=coords_predictions.view(N, 1, 1).expand(N, C, 1)) 74 | .argmax(dim=1) 75 | .view(N, -1) 76 | ) 77 | types_correct = (types_predictions == types_targets).any(dim=1) 78 | 79 | both_correct = coords_correct & types_correct 80 | return both_correct 81 | 82 | def stop(self, correct: torch.Tensor) -> torch.Tensor: 83 | """ Average over batched results 84 | 85 | Args: 86 | correct (torch.Tensor): (N,) bool vector, whether each sample is correct 87 | 88 | Returns: 89 | A float scalar tensor of averaged accuracy 90 | """ 91 | return correct.float().mean() 92 | 93 | def forward( 94 | self, outputs: Dict[str, torch.Tensor], targets: Dict[str, torch.Tensor] 95 | ) -> torch.Tensor: 96 | return self.stop(self.step(outputs, targets)) 97 | 98 | 99 | class CCA(object): 100 | def __init__( 101 | self, 102 | percentages: Tuple[float] = (0.1, 0.25, 0.5, 0.75, 0.9), 103 | local_size: int = 7, 104 | global_size: int = 21, 105 | history: int = 3, 106 | ): 107 | """ Consecutive Correct Actions 108 | 109 | Args: 110 | percentages (tuple): Evaluate based on these percentages of blocks 111 | prebuilt for each house. Average the CCA over these results 112 | local_size (int): Local context size. Default: 7 113 | global_size (int): Global context size. Default: 21 114 | history (int): Number of previous steps considered as inputs. Default: 3 115 | """ 116 | super().__init__() 117 | self.percentages = percentages 118 | self.local_size = local_size 119 | self.global_size = global_size 120 | self.history = history 121 | 122 | @torch.no_grad() 123 | def evaluate(self, dataset: Craft3DDataset, model: nn.Module) -> Dict[str, float]: 124 | """ Evaluate a model by CCA over a given dataset 125 | 126 | Args: 127 | dataset (Craft3DDataset): A dataset 128 | model (nn.Module): A VoxelCNN model 129 | 130 | Returns: 131 | A dict of string to float 132 | ``` 133 | { 134 | "cca_x%": where x is the percentage of prebuilt house, 135 | "cca_avg": averaged CCA across all the percentages, 136 | } 137 | ``` 138 | """ 139 | predictor = Predictor( 140 | model.eval(), 141 | local_size=self.local_size, 142 | global_size=self.global_size, 143 | history=self.history, 144 | ) 145 | all_results = defaultdict(list) 146 | for i in tqdm(range(dataset.get_num_houses())): 147 | house = dataset.get_house(i) 148 | for p in self.percentages: 149 | start = int(len(house) * p) 150 | predictions = predictor.predict_until_wrong(house, start=start) 151 | all_results[p].append(len(predictions)) 152 | 153 | results = { 154 | f"cca_{k:.0%}": float(torch.tensor(v).float().mean()) 155 | for k, v in all_results.items() 156 | } 157 | results["cca_avg"] = float(torch.tensor(list(results.values())).float().mean()) 158 | return results 159 | 160 | 161 | class MTC(object): 162 | def __init__( 163 | self, 164 | percentage: float = 0.1, 165 | local_size: int = 7, 166 | global_size: int = 21, 167 | history: int = 3, 168 | ): 169 | """ Mistakes to Complete 170 | 171 | Args: 172 | percentage (float): Evaluate based on this percentage of blocks 173 | prebuilt for each house 174 | local_size (int): Local context size. Default: 7 175 | global_size (int): Global context size. Default: 21 176 | history (int): Number of previous steps considered as inputs. Default: 3 177 | """ 178 | super().__init__() 179 | self.percentage = percentage 180 | self.local_size = local_size 181 | self.global_size = global_size 182 | self.history = history 183 | 184 | @torch.no_grad() 185 | def evaluate(self, dataset: Craft3DDataset, model: nn.Module) -> Dict[str, float]: 186 | """ Evaluate a model by MTC over a given dataset 187 | 188 | Args: 189 | dataset (Craft3DDataset): A dataset 190 | model (nn.Module): A VoxelCNN model 191 | 192 | Returns: 193 | A dict of string to float 194 | ``` 195 | { 196 | "mtc": Unnormalized MTC, 197 | "mtc_normed": Normalized MTC by the total blocks of each house, 198 | } 199 | ``` 200 | """ 201 | predictor = Predictor( 202 | model.eval(), 203 | local_size=self.local_size, 204 | global_size=self.global_size, 205 | history=self.history, 206 | ) 207 | unnormed = [] 208 | normed = [] 209 | for i in tqdm(range(dataset.get_num_houses())): 210 | house = dataset.get_house(i) 211 | start = int(len(house) * self.percentage) 212 | is_correct = predictor.predict_until_end(house, start=start)["is_correct"] 213 | num_mistakes = len(is_correct) - int(is_correct.sum()) 214 | total = len(is_correct) 215 | unnormed.append(num_mistakes) 216 | normed.append(num_mistakes / total) 217 | return { 218 | "mtc": float(torch.tensor(unnormed).float().mean()), 219 | "mtc_normed": float(torch.tensor(normed).float().mean()), 220 | } 221 | 222 | def _compute(self, predictor: Predictor, annotation: torch.Tensor) -> int: 223 | start = int(len(annotation) * self.percentage) 224 | is_correct = predictor.predict_until_end(annotation, start=start)["is_correct"] 225 | num_mistakes = len(is_correct) - is_correct.sum() 226 | return num_mistakes 227 | -------------------------------------------------------------------------------- /voxelcnn/models.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from typing import Dict, List 9 | 10 | import torch 11 | from torch import nn 12 | 13 | 14 | def conv3d( 15 | in_channels: int, 16 | out_channels: int, 17 | kernel_size: int = 3, 18 | stride: int = 1, 19 | padding: int = 1, 20 | ) -> List[nn.Module]: 21 | conv = nn.Conv3d( 22 | in_channels, 23 | out_channels, 24 | kernel_size, 25 | padding=padding, 26 | stride=stride, 27 | bias=False, 28 | ) 29 | bn = nn.BatchNorm3d(out_channels) 30 | relu = nn.ReLU() 31 | return [conv, bn, relu] 32 | 33 | 34 | class VoxelCNN(nn.Module): 35 | def __init__( 36 | self, 37 | local_size: int = 7, 38 | global_size: int = 21, 39 | history: int = 3, 40 | num_block_types: int = 256, 41 | num_features: int = 16, 42 | ): 43 | """ VoxelCNN model 44 | 45 | Args: 46 | local_size (int): Local context size. Default: 7 47 | global_size (int): Global context size. Default: 21 48 | history (int): Number of previous steps considered as inputs. Default: 3 49 | num_block_types (int): Total number of different block types. Default: 256 50 | num_features (int): Number of channels output by the encoders. Default: 16 51 | """ 52 | super().__init__() 53 | self.local_size = local_size 54 | self.global_size = global_size 55 | self.history = history 56 | self.num_block_types = num_block_types 57 | self.num_features = num_features 58 | 59 | self.local_encoder = self._build_local_encoder() 60 | self.global_encoder = self._build_global_encoder() 61 | 62 | self.feature_extractor = nn.Sequential( 63 | *conv3d(self.num_features * 2, self.num_features, kernel_size=1, padding=0) 64 | ) 65 | self.coords_predictor = nn.Conv3d( 66 | self.num_features, 1, kernel_size=1, padding=0 67 | ) 68 | self.types_predictor = nn.Conv3d( 69 | self.num_features, self.num_block_types, kernel_size=1, padding=0 70 | ) 71 | 72 | self._init_params() 73 | 74 | def forward(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: 75 | """ 76 | Args: 77 | inputs (dict): A dict of inputs 78 | ``` 79 | { 80 | "local": float tensor of shape (N, C * H, D, D, D), 81 | "global": float tensor of shape (N, 1, G, G, G), 82 | "center": int tensor of shape (N, 3), the coordinate of the last 83 | blocks, optional 84 | } 85 | ``` 86 | where N is the batch size, C is the number of block types, H is the 87 | history length, D is the local size, and G is the global size. 88 | 89 | Returns: 90 | A dict of coordinates and types scores 91 | ``` 92 | { 93 | "coords": float tensor of shape (N, 1, D, D, D), 94 | "types": float tensor of shape (N, C, D, D, D), 95 | "center": int tensor of shape (N, 3), the coordinate of the last blocks. 96 | Output only when inputs have "center" 97 | } 98 | ``` 99 | """ 100 | outputs = torch.cat( 101 | [ 102 | self.local_encoder(inputs["local"]), 103 | self.global_encoder(inputs["global"]), 104 | ], 105 | dim=1, 106 | ) 107 | outputs = self.feature_extractor(outputs) 108 | ret = { 109 | "coords": self.coords_predictor(outputs), 110 | "types": self.types_predictor(outputs), 111 | } 112 | if "center" in inputs: 113 | ret["center"] = inputs["center"] 114 | return ret 115 | 116 | def _build_local_encoder(self) -> nn.Module: 117 | layers = conv3d(self.num_block_types * self.history, self.num_features) 118 | for _ in range(3): 119 | layers.extend(conv3d(self.num_features, self.num_features)) 120 | return nn.Sequential(*layers) 121 | 122 | def _build_global_encoder(self) -> nn.Module: 123 | layers = conv3d(1, self.num_features) 124 | layers.extend(conv3d(self.num_features, self.num_features)) 125 | layers.append( 126 | nn.AdaptiveMaxPool3d((self.local_size, self.local_size, self.local_size)) 127 | ) 128 | layers.extend(conv3d(self.num_features, self.num_features)) 129 | return nn.Sequential(*layers) 130 | 131 | def _init_params(self): 132 | for m in self.modules(): 133 | if isinstance(m, nn.Conv3d): 134 | if m.bias is None: 135 | # Normal Conv3d layers 136 | nn.init.kaiming_normal_(m.weight, mode="fan_out") 137 | else: 138 | # Last layers of coords and types predictions 139 | nn.init.normal_(m.weight, mean=0, std=0.001) 140 | nn.init.constant_(m.bias, 0) 141 | elif isinstance(m, nn.BatchNorm3d): 142 | nn.init.constant_(m.weight, 1) 143 | nn.init.constant_(m.bias, 0) 144 | -------------------------------------------------------------------------------- /voxelcnn/predictor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from typing import Dict 9 | 10 | import torch 11 | from torch import nn 12 | 13 | from .datasets import Craft3DDataset 14 | from .utils import OrderedSet 15 | 16 | 17 | class Predictor(object): 18 | def __init__( 19 | self, 20 | model: nn.Module, 21 | local_size: int = 7, 22 | global_size: int = 21, 23 | history: int = 3, 24 | ): 25 | """ Predictor for inference and evaluation 26 | 27 | Args: 28 | model (nn.Module): VoxelCNN model 29 | local_size (int): Local context size. Default: 7 30 | global_size (int): Global context size. Default: 21 31 | history (int): Number of previous steps considered as inputs. Default: 3 32 | """ 33 | super().__init__() 34 | self.model = model 35 | self.local_size = local_size 36 | self.global_size = global_size 37 | self.history = history 38 | 39 | @torch.no_grad() 40 | def predict(self, annotation: torch.Tensor, steps: int = 1) -> torch.Tensor: 41 | """ Continuous prediction for given steps starting from a prebuilt house 42 | 43 | Args: 44 | annotation (torch.Tensor): M x 4 int tensor, where M is the number of 45 | prebuilt blocks. The first column is the block type, followed by the 46 | absolute block coordinates. 47 | steps (int): How many steps to predict. Default: 1 48 | 49 | Returns: 50 | An int tensor of (steps, 4) if steps > 1, otherwise (4,). Denoting the 51 | predicted blocks. The first column is the block type, followed by the 52 | absolute block coordinates. 53 | """ 54 | predictions = [] 55 | for _ in range(steps): 56 | inputs = Craft3DDataset.prepare_inputs( 57 | annotation, 58 | local_size=self.local_size, 59 | global_size=self.global_size, 60 | history=self.history, 61 | ) 62 | inputs = {k: v.unsqueeze(0) for k, v in inputs.items()} 63 | if next(self.model.parameters()).is_cuda: 64 | inputs = {k: v.cuda() for k, v in inputs.items()} 65 | outputs = self.model(inputs) 66 | prediction = self.decode(outputs).cpu() 67 | predictions.append(prediction) 68 | annotation = torch.cat([annotation, prediction], dim=0) 69 | predictions = torch.cat(predictions, dim=0) 70 | return predictions.squeeze() 71 | 72 | @torch.no_grad() 73 | def predict_until_wrong( 74 | self, annotation: torch.Tensor, start: int = 0 75 | ) -> torch.Tensor: 76 | """ Starting from a house, predict until a wrong prediction occurs 77 | 78 | Args: 79 | annotation (torch.Tensor): M x 4 int tensor, where M is the number of 80 | prebuilt blocks. The first column is the block type, followed by the 81 | absolute block coordinates. 82 | start (int): Starting from this number of blocks prebuilt 83 | 84 | Returns: 85 | An int tensor of (steps, 4). Denoting the correctly predicted blocks. The 86 | first column is the block type, followed by the absolute block 87 | coordinates. 88 | """ 89 | built = annotation[:start].tolist() 90 | to_build = {tuple(x) for x in annotation[start:].tolist()} 91 | predictions = [] 92 | while len(to_build) > 0: 93 | block = self.predict(torch.tensor(built, dtype=torch.int64)).tolist() 94 | if tuple(block) not in to_build: 95 | break 96 | predictions.append(block) 97 | built.append(block) 98 | to_build.remove(tuple(block)) 99 | return torch.tensor(predictions) 100 | 101 | @torch.no_grad() 102 | def predict_until_end( 103 | self, annotation: torch.Tensor, start: int = 0 104 | ) -> Dict[str, torch.Tensor]: 105 | """ Starting from a house, predict until a the house is completed 106 | 107 | Args: 108 | annotation (torch.Tensor): M x 4 int tensor, where M is the number of 109 | prebuilt blocks. The first column is the block type, followed by the 110 | absolute block coordinates. 111 | start (int): Starting from this number of blocks prebuilt 112 | 113 | Returns: 114 | A dict of 115 | ``` 116 | { 117 | "predictions": int tensor of shape (steps, 4), 118 | "targets": int tensor of shape (steps, 4), 119 | "is_correct": bool tensor of shape (steps,) 120 | } 121 | ``` 122 | where ``steps`` is the number of blocks predicted, in order to complete the 123 | house. ``predictions`` is the model predictions, which could be wrong at 124 | some steps. ``targets`` contains the corrected blocks. ``is_correct`` 125 | denotes whether the prediction is correct at certain step. 126 | """ 127 | built = annotation[:start].tolist() 128 | to_build = OrderedSet(tuple(x) for x in annotation[start:].tolist()) 129 | 130 | predictions = [] 131 | targets = [] 132 | is_correct = [] 133 | while len(to_build) > 0: 134 | block = self.predict(torch.tensor(built, dtype=torch.int64)).tolist() 135 | predictions.append(block) 136 | if tuple(block) in to_build: 137 | # Prediction is correct. Add it to the house 138 | is_correct.append(True) 139 | targets.append(block) 140 | built.append(block) 141 | to_build.remove(tuple(block)) 142 | else: 143 | # Prediction is wrong. Correct it by the ground truth block 144 | is_correct.append(False) 145 | correction = to_build.pop(last=False) 146 | targets.append(correction) 147 | built.append(correction) 148 | 149 | return { 150 | "predictions": torch.tensor(predictions), 151 | "targets": torch.tensor(targets), 152 | "is_correct": torch.tensor(is_correct), 153 | } 154 | 155 | @staticmethod 156 | def decode(outputs: Dict[str, torch.Tensor]) -> torch.Tensor: 157 | """ Convert model output scores to absolute block coordinates and types 158 | 159 | Args: 160 | outputs (dict): A dict of coordinates and types scores 161 | ``` 162 | { 163 | "coords": float tensor of shape (N, 1, D, D, D), 164 | "types": float tensor of shape (N, C, D, D, D), 165 | "center": int tensor of shape (N, 3), the coordinate of the last 166 | blocks 167 | } 168 | ``` 169 | where N is the batch size, C is the number of block types, D is the 170 | local context size 171 | Returns: 172 | An int tensor of shape (N, 4), where the first column is the block type, 173 | followed by absolute block coordinates 174 | """ 175 | N, C, D, D, D = outputs["types"].shape 176 | assert outputs["coords"].shape == (N, 1, D, D, D) 177 | assert outputs["center"].shape == (N, 3) 178 | 179 | coords_predictions = outputs["coords"].view(N, -1).argmax(dim=1) 180 | 181 | types_predictions = ( 182 | outputs["types"] 183 | .view(N, C, D * D * D) 184 | .gather(dim=2, index=coords_predictions.view(N, 1, 1).expand(N, C, 1)) 185 | .argmax(dim=1) 186 | .view(-1) 187 | ) 188 | 189 | z = coords_predictions % D 190 | x = coords_predictions // (D * D) 191 | y = (coords_predictions - z - x * (D * D)) // D 192 | ret = torch.stack([types_predictions, x, y, z], dim=1) 193 | ret[:, 1:] += outputs["center"] - ret.new_tensor([D, D, D]) // 2 194 | 195 | return ret 196 | 197 | 198 | if __name__ == "__main__": 199 | from .models import VoxelCNN 200 | 201 | model = VoxelCNN() 202 | predictor = Predictor(model.eval()) 203 | annotation = torch.tensor([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]) 204 | results = predictor.predict_until_end(annotation) 205 | print(results) 206 | -------------------------------------------------------------------------------- /voxelcnn/summary.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import logging 9 | from collections import defaultdict 10 | from typing import Dict, List, Optional, Union 11 | 12 | import numpy as np 13 | import torch 14 | 15 | 16 | class Summary(object): 17 | Datum = Dict[str, Union[float, torch.Tensor]] 18 | 19 | def __init__(self, window_size: int = 20, logger: Optional[logging.Logger] = None): 20 | """ Training summary helper 21 | 22 | Args: 23 | window_size (int): Compute the moving average of scalars within this number 24 | of history values 25 | logger (logging.Logger, optional): A logger. Default: None, meaning will 26 | print to stdout 27 | """ 28 | super().__init__() 29 | self.window_size = window_size 30 | self.logger = logger 31 | self._times = defaultdict(list) 32 | self._lrs = defaultdict(list) 33 | self._losses = defaultdict(list) 34 | self._metrics = defaultdict(list) 35 | 36 | def add_times(self, times: Datum): 37 | for k, v in times.items(): 38 | if isinstance(v, torch.Tensor): 39 | v = float(v) 40 | self._times[k].append(v) 41 | 42 | def add_lrs(self, lrs: Datum): 43 | for k, v in lrs.items(): 44 | if isinstance(v, torch.Tensor): 45 | v = float(v) 46 | self._lrs[k].append(v) 47 | 48 | def add_losses(self, losses: Datum): 49 | for k, v in losses.items(): 50 | if isinstance(v, torch.Tensor): 51 | v = float(v) 52 | self._losses[k].append(v) 53 | 54 | def add_metrics(self, metrics: Datum): 55 | for k, v in metrics.items(): 56 | if isinstance(v, torch.Tensor): 57 | v = float(v) 58 | self._metrics[k].append(v) 59 | 60 | def add( 61 | self, 62 | times: Optional[Datum] = None, 63 | lrs: Optional[Datum] = None, 64 | losses: Optional[Datum] = None, 65 | metrics: Optional[Datum] = None, 66 | ): 67 | if times is not None: 68 | self.add_times(times) 69 | if lrs is not None: 70 | self.add_lrs(lrs) 71 | if losses is not None: 72 | self.add_losses(losses) 73 | if metrics is not None: 74 | self.add_metrics(metrics) 75 | 76 | def print_current(self, prefix: Optional[str] = None): 77 | items = [] if prefix is None else [prefix] 78 | items += [f"{k}: {v[-1]:.6f}" for k, v in list(self._lrs.items())] 79 | items += [ 80 | f"{k}: {v[-1]:.3f} ({self._moving_average(v):.3f})" 81 | for k, v in list(self._times.items()) 82 | + list(self._losses.items()) 83 | + list(self._metrics.items()) 84 | ] 85 | self._log(" ".join(items)) 86 | 87 | def _moving_average(self, values: List[float]) -> float: 88 | return np.mean(values[max(0, len(values) - self.window_size) :]) 89 | 90 | def _log(self, msg: str): 91 | if self.logger is None: 92 | print(msg) 93 | else: 94 | self.logger.info(msg) 95 | -------------------------------------------------------------------------------- /voxelcnn/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | # All rights reserved. 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import itertools 9 | import logging 10 | import operator 11 | import os 12 | import sys 13 | from collections import OrderedDict 14 | from os import path as osp 15 | from time import time as tic 16 | from typing import Dict, List, Tuple, Union 17 | 18 | import torch 19 | 20 | 21 | StructuredData = Union[ 22 | Dict[str, "StructuredData"], 23 | List["StructuredData"], 24 | Tuple["StructuredData"], 25 | torch.Tensor, 26 | ] 27 | 28 | 29 | def to_cuda(data: StructuredData) -> StructuredData: 30 | if isinstance(data, torch.Tensor): 31 | return data.cuda(non_blocking=True) 32 | if isinstance(data, dict): 33 | return {k: to_cuda(v) for k, v in data.items()} 34 | if isinstance(data, (list, tuple)): 35 | return type(data)(to_cuda(x) for x in data) 36 | raise ValueError(f"Unknown data type: {type(data)}") 37 | 38 | 39 | def collate_batches(batches): 40 | """ Collate a list of batches into a batch 41 | 42 | Args: 43 | batches (list): a list of batches. Each batch could be a tensor, dict, tuple, 44 | list, string, number, namedtuple 45 | 46 | Returns: 47 | batch: collated batches where tensors are concatenated along the outer dim. 48 | For example, given samples `[torch.empty(3, 5), torch.empty(3, 5)]`, the 49 | result will be a tensor of shape `(6, 5)`. 50 | """ 51 | batch = batches[0] 52 | if isinstance(batch, torch.Tensor): 53 | return batch if len(batches) == 1 else torch.cat(batches, 0) 54 | if isinstance(batch, (list, tuple)): 55 | transposed = zip(*batches) 56 | return type(batch)([collate_batches(b) for b in transposed]) 57 | if isinstance(batch, dict): 58 | return {k: collate_batches([d[k] for d in batches]) for k in batch} 59 | # Otherwise, just return the input as it is 60 | return batches 61 | 62 | 63 | def setup_logger(name=None, save_file=None, rank=0, level=logging.DEBUG): 64 | """ Setup logger once for each process 65 | 66 | Logging messages will be printed to stdout, with DEBUG level. If save_file is set, 67 | messages will be logged to disk files as well. 68 | 69 | When multiprocessing, this function must be called inside each process, but only 70 | the main process (rank == 0) will log to stdout and files. 71 | 72 | Args: 73 | name (str, optional): Name of the logger. Default: None, will use the root 74 | logger 75 | save_file (str, optional): Path to a file where log messages are saved into. 76 | Default: None, do not log to file 77 | rank (int): Rank of the process. Default: 0, the main process 78 | level (int): An integer of logging level. Recommended to be one of 79 | logging.DEBUG / INFO / WARNING / ERROR / CRITICAL. Default: logging.DEBUG 80 | """ 81 | logger = logging.getLogger(name) 82 | logger.setLevel(level) 83 | logger.propagate = False 84 | 85 | # Don't log results for the non-main process 86 | if rank > 0: 87 | logging.disable(logging.CRITICAL) 88 | return logger 89 | 90 | sh = logging.StreamHandler(stream=sys.stdout) 91 | sh.setLevel(level) 92 | formatter = logging.Formatter( 93 | fmt="%(asctime)s %(levelname)s %(filename)s:%(lineno)4d: %(message)s", 94 | datefmt="%Y-%m-%d %H:%M:%S", 95 | ) 96 | sh.setFormatter(formatter) 97 | logger.addHandler(sh) 98 | logger.propagate = False # prevent double logging 99 | 100 | if save_file is not None: 101 | os.makedirs(osp.dirname(osp.abspath(save_file)), exist_ok=True) 102 | fh = logging.FileHandler(save_file) 103 | fh.setLevel(level) 104 | fh.setFormatter(formatter) 105 | logger.addHandler(fh) 106 | 107 | return logger 108 | 109 | 110 | class Section(object): 111 | """ 112 | Examples 113 | -------- 114 | >>> with Section('Loading Data'): 115 | >>> num_samples = load_data() 116 | >>> print(f'=> {num_samples} samples loaded') 117 | 118 | will print out something like 119 | => Loading data ... 120 | => 42 samples loaded 121 | => Done! 122 | """ 123 | 124 | def __init__(self, description, newline=True, logger=None, timing="auto"): 125 | super(Section, self).__init__() 126 | self.description = description 127 | self.newline = newline 128 | self.logger = logger 129 | self.timing = timing 130 | self.t0 = None 131 | self.t1 = None 132 | 133 | def __enter__(self): 134 | self.t0 = tic() 135 | self.print_message("=> " + str(self.description) + " ...") 136 | 137 | def __exit__(self, type, value, traceback): 138 | self.t1 = tic() 139 | msg = "=> Done" 140 | if self.timing != "none": 141 | t, unit = self._get_time_and_unit(self.t1 - self.t0) 142 | msg += f" in {t:.3f} {unit}" 143 | self.print_message(msg) 144 | if self.newline: 145 | self.print_message() 146 | 147 | def print_message(self, message=""): 148 | if self.logger is None: 149 | try: 150 | print(message, flush=True) 151 | except TypeError: 152 | print(message) 153 | sys.stdout.flush() 154 | else: 155 | self.logger.info(message) 156 | 157 | def _get_time_and_unit(self, time): 158 | if self.timing == "auto": 159 | return self._auto_determine_time_and_unit(time) 160 | elif self.timing == "us": 161 | return time * 1000000, "us" 162 | elif self.timing == "ms": 163 | return time * 1000, "ms" 164 | elif self.timing == "s": 165 | return time, "s" 166 | elif self.timing == "m": 167 | return time / 60, "m" 168 | elif self.timing == "h": 169 | return time / 3600, "h" 170 | else: 171 | raise ValueError(f"Unknown timing mode: {self.timing}") 172 | 173 | def _auto_determine_time_and_unit(self, time): 174 | if time < 0.001: 175 | return time * 1000000, "us" 176 | elif time < 1.0: 177 | return time * 1000, "ms" 178 | elif time < 60: 179 | return time, "s" 180 | elif time < 3600: 181 | return time / 60, "m" 182 | else: 183 | return time / 3600, "h" 184 | 185 | 186 | class OrderedSet(object): 187 | """ A set that remembers insertion order """ 188 | 189 | def __init__(self, iterable=None): 190 | self.__dict = OrderedDict() 191 | for value in iterable or []: 192 | self.add(value) 193 | 194 | def add(self, value): 195 | if value not in self.__dict: 196 | self.__dict[value] = None 197 | 198 | def remove(self, value): 199 | if value in self.__dict: 200 | del self.__dict[value] 201 | else: 202 | raise KeyError(value) 203 | 204 | def pop(self, last=True): 205 | """ Pop the last or first element 206 | 207 | Args: 208 | last (bool): If True, pop the last element (most recently inserted). 209 | Otherwise pop the first element (oldest). 210 | """ 211 | return self.__dict.popitem(last=last)[0] 212 | 213 | def __iter__(self): 214 | return self.__dict.__iter__() 215 | 216 | def __len__(self): 217 | return self.__dict.__len__() 218 | 219 | def __contains__(self, value): 220 | return value in self.__dict 221 | 222 | def intersection(self, other): 223 | new = OrderedSet() 224 | for value in self: 225 | if value in other: 226 | new.add(value) 227 | return new 228 | 229 | def __and__(self, other): 230 | return self.intersection(other) 231 | 232 | def union(self, other): 233 | new = OrderedSet() 234 | for value in itertools.chain(self, other): 235 | new.add(value) 236 | return new 237 | 238 | def __or__(self, other): 239 | return self.union(other) 240 | 241 | def __ge__(self, other): 242 | return set(self.__dict).__ge__(set(other.__dict)) 243 | 244 | def __eq__(self, other): 245 | if isinstance(other, OrderedSet): 246 | return len(self) == len(other) and all(map(operator.eq, self, other)) 247 | return set.__eq__(set(self), other) 248 | 249 | def __ne__(self, other): 250 | return not self == other 251 | 252 | def __repr__(self): 253 | return f"{self.__class__.__name__}([{', '.join(repr(x) for x in self)}])" 254 | 255 | def __str__(self): 256 | return self.__repr__() 257 | --------------------------------------------------------------------------------