├── .gitignore ├── LICENSE ├── README.md ├── configs ├── scannet_largenet_f10_scale_trainval_test.py └── scannet_largenet_f10_scale_val.py ├── data └── meta │ ├── scannetv2_test.txt │ ├── scannetv2_train.txt │ └── scannetv2_val.txt ├── dist_run.sh ├── run.sh ├── tools ├── prepare_scannet.py ├── run_net.py └── vis_erf.py ├── voxelnet ├── __init__.py ├── data │ ├── __init__.py │ └── scannetv2_cuda.py ├── models │ ├── __init__.py │ └── lrpnet.py ├── modules │ ├── csrc │ │ ├── pybind_cuda.cpp │ │ └── scatter │ │ │ ├── atomics.cuh │ │ │ ├── scatter.cu │ │ │ └── scatter.h │ ├── functional.py │ ├── load.py │ ├── pool.py │ └── scatter.py ├── optims │ ├── __init__.py │ ├── lr_scheduler.py │ └── optimizer.py ├── runner │ ├── __init__.py │ └── runner.py └── utils │ ├── __init__.py │ ├── config.py │ ├── general.py │ ├── logger.py │ ├── metrics.py │ └── registry.py └── work_dirs ├── scannet_largenet_f10_scale ├── checkpoints │ └── ckpt_epoch_500.pt ├── tensorboard │ └── events.out.tfevents.1667544485.container-045011a23c-9ecd0fb1 └── textlog │ └── log_2022_11_04_14_48_05.txt └── scannet_largenet_f10_scale_trainval ├── checkpoints └── ckpt_epoch_500.pt ├── tensorboard └── events.out.tfevents.1667623700.container-8fb311873c-3ca872b9 └── textlog └── log_2022_11_05_12_48_20.txt /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.pyc 3 | data/ 4 | work_dirs 5 | heatmaps -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Long Range Pooling for 3D Large-Scale Scene Understanding (CVPR 2023) 2 | 3 | The repository contains official Pytorch implementations for **LRPNet**. 4 | 5 | For Jittor user, https://github.com/li-xl/LRPNet is a jittor version. 6 | 7 | The paper is in [Here](https://arxiv.org/pdf/2301.06962). 8 | 9 | 10 | ## Installation 11 | ``` 12 | pip install numpy torch tensorboardX open3d 13 | pip install git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0 14 | ``` 15 | 16 | ## Scannet 17 | Download the scannet and prepare it. 18 | 19 | ```bash 20 | PYTHONPATH=./:$PYTHONPATH python3 tools/prepare_scannet.py --meta_path=data/meta --in_path= 21 | --out_path=data/scannet 22 | ``` 23 | 24 | ## Models 25 | We release our trained models and training logs in "work_dirs". 26 | 27 | ## Evaluation 28 | Like VMNet, we repeat val/test for 8 times. 29 | 30 | To evaluate the model, run: 31 | 32 | ```bash 33 | bash run.sh 0 configs/scannet_largenet_f10_scale_val.py --task=val 34 | # for test 35 | bash run.sh 0 configs/scannet_largenet_f10_scale_trainval_test.py --task=test 36 | ``` 37 | 38 | ## Citation 39 | If you find our repo useful for your research, please consider citing our paper: 40 | 41 | ``` 42 | @article{li2023long, 43 | title={Long Range Pooling for 3D Large-Scale Scene Understanding}, 44 | author={Li, Xiang-Li and Guo, Meng-Hao and Mu, Tai-Jiang and Martin, Ralph R and Hu, Shi-Min}, 45 | journal={arXiv preprint arXiv:2301.06962}, 46 | year={2023} 47 | } 48 | ``` 49 | 50 | ## LICENSE 51 | 52 | This repo is under the Apache-2.0 license. For commercial use, please contact the authors. 53 | -------------------------------------------------------------------------------- /configs/scannet_largenet_f10_scale_trainval_test.py: -------------------------------------------------------------------------------- 1 | model=dict( 2 | type="LRPNet", 3 | in_channels=3, 4 | out_channels=20, 5 | encoder_channels=[32,64,96,128,128], 6 | decoder_channels=[128,128,128,128,128], 7 | ) 8 | batch_size = 4 9 | dataset=dict( 10 | val=dict( 11 | type="ScanNetCuda", 12 | data_path="data/scannet", 13 | mode="test", 14 | batch_size = batch_size, 15 | ) 16 | ) 17 | 18 | 19 | logger=dict( 20 | type="RunLogger" 21 | ) 22 | 23 | log_interval=10 24 | checkpoint_interval=100 25 | eval_interval=10 26 | 27 | max_epoch=500 28 | # clean=True 29 | val_reps = 8 30 | resume_path = "work_dirs/scannet_largenet_f10_scale_trainval/checkpoints/ckpt_epoch_500.pt" 31 | -------------------------------------------------------------------------------- /configs/scannet_largenet_f10_scale_val.py: -------------------------------------------------------------------------------- 1 | model=dict( 2 | type="LRPNet", 3 | in_channels=3, 4 | out_channels=20, 5 | encoder_channels=[32,64,96,128,128], 6 | decoder_channels=[128,128,128,128,128], 7 | ) 8 | batch_size = 4 9 | dataset=dict( 10 | val=dict( 11 | type="ScanNetCuda", 12 | data_path="data/scannet", 13 | mode="val", 14 | batch_size = batch_size, 15 | ) 16 | ) 17 | 18 | logger=dict( 19 | type="RunLogger" 20 | ) 21 | 22 | log_interval=10 23 | checkpoint_interval=100 24 | eval_interval=10 25 | 26 | max_epoch=500 27 | # clean=True 28 | resume_path = "work_dirs/scannet_largenet_f10_scale/checkpoints/ckpt_epoch_500.pt" 29 | val_reps = 8 -------------------------------------------------------------------------------- /data/meta/scannetv2_test.txt: -------------------------------------------------------------------------------- 1 | scene0707_00 2 | scene0708_00 3 | scene0709_00 4 | scene0710_00 5 | scene0711_00 6 | scene0712_00 7 | scene0713_00 8 | scene0714_00 9 | scene0715_00 10 | scene0716_00 11 | scene0717_00 12 | scene0718_00 13 | scene0719_00 14 | scene0720_00 15 | scene0721_00 16 | scene0722_00 17 | scene0723_00 18 | scene0724_00 19 | scene0725_00 20 | scene0726_00 21 | scene0727_00 22 | scene0728_00 23 | scene0729_00 24 | scene0730_00 25 | scene0731_00 26 | scene0732_00 27 | scene0733_00 28 | scene0734_00 29 | scene0735_00 30 | scene0736_00 31 | scene0737_00 32 | scene0738_00 33 | scene0739_00 34 | scene0740_00 35 | scene0741_00 36 | scene0742_00 37 | scene0743_00 38 | scene0744_00 39 | scene0745_00 40 | scene0746_00 41 | scene0747_00 42 | scene0748_00 43 | scene0749_00 44 | scene0750_00 45 | scene0751_00 46 | scene0752_00 47 | scene0753_00 48 | scene0754_00 49 | scene0755_00 50 | scene0756_00 51 | scene0757_00 52 | scene0758_00 53 | scene0759_00 54 | scene0760_00 55 | scene0761_00 56 | scene0762_00 57 | scene0763_00 58 | scene0764_00 59 | scene0765_00 60 | scene0766_00 61 | scene0767_00 62 | scene0768_00 63 | scene0769_00 64 | scene0770_00 65 | scene0771_00 66 | scene0772_00 67 | scene0773_00 68 | scene0774_00 69 | scene0775_00 70 | scene0776_00 71 | scene0777_00 72 | scene0778_00 73 | scene0779_00 74 | scene0780_00 75 | scene0781_00 76 | scene0782_00 77 | scene0783_00 78 | scene0784_00 79 | scene0785_00 80 | scene0786_00 81 | scene0787_00 82 | scene0788_00 83 | scene0789_00 84 | scene0790_00 85 | scene0791_00 86 | scene0792_00 87 | scene0793_00 88 | scene0794_00 89 | scene0795_00 90 | scene0796_00 91 | scene0797_00 92 | scene0798_00 93 | scene0799_00 94 | scene0800_00 95 | scene0801_00 96 | scene0802_00 97 | scene0803_00 98 | scene0804_00 99 | scene0805_00 100 | scene0806_00 101 | -------------------------------------------------------------------------------- /data/meta/scannetv2_train.txt: -------------------------------------------------------------------------------- 1 | scene0000_00 2 | scene0000_01 3 | scene0000_02 4 | scene0001_00 5 | scene0001_01 6 | scene0002_00 7 | scene0002_01 8 | scene0003_00 9 | scene0003_01 10 | scene0003_02 11 | scene0004_00 12 | scene0005_00 13 | scene0005_01 14 | scene0006_00 15 | scene0006_01 16 | scene0006_02 17 | scene0007_00 18 | scene0008_00 19 | scene0009_00 20 | scene0009_01 21 | scene0009_02 22 | scene0010_00 23 | scene0010_01 24 | scene0012_00 25 | scene0012_01 26 | scene0012_02 27 | scene0013_00 28 | scene0013_01 29 | scene0013_02 30 | scene0014_00 31 | scene0016_00 32 | scene0016_01 33 | scene0016_02 34 | scene0017_00 35 | scene0017_01 36 | scene0017_02 37 | scene0018_00 38 | scene0020_00 39 | scene0020_01 40 | scene0021_00 41 | scene0022_00 42 | scene0022_01 43 | scene0023_00 44 | scene0024_00 45 | scene0024_01 46 | scene0024_02 47 | scene0026_00 48 | scene0027_00 49 | scene0027_01 50 | scene0027_02 51 | scene0028_00 52 | scene0029_00 53 | scene0029_01 54 | scene0029_02 55 | scene0031_00 56 | scene0031_01 57 | scene0031_02 58 | scene0032_00 59 | scene0032_01 60 | scene0033_00 61 | scene0034_00 62 | scene0034_01 63 | scene0034_02 64 | scene0035_00 65 | scene0035_01 66 | scene0036_00 67 | scene0036_01 68 | scene0037_00 69 | scene0038_00 70 | scene0038_01 71 | scene0038_02 72 | scene0039_00 73 | scene0039_01 74 | scene0040_00 75 | scene0040_01 76 | scene0041_00 77 | scene0041_01 78 | scene0042_00 79 | scene0042_01 80 | scene0042_02 81 | scene0043_00 82 | scene0043_01 83 | scene0044_00 84 | scene0044_01 85 | scene0044_02 86 | scene0045_00 87 | scene0045_01 88 | scene0047_00 89 | scene0048_00 90 | scene0048_01 91 | scene0049_00 92 | scene0051_00 93 | scene0051_01 94 | scene0051_02 95 | scene0051_03 96 | scene0052_00 97 | scene0052_01 98 | scene0052_02 99 | scene0053_00 100 | scene0054_00 101 | scene0055_00 102 | scene0055_01 103 | scene0055_02 104 | scene0056_00 105 | scene0056_01 106 | scene0057_00 107 | scene0057_01 108 | scene0058_00 109 | scene0058_01 110 | scene0059_00 111 | scene0059_01 112 | scene0059_02 113 | scene0060_00 114 | scene0060_01 115 | scene0061_00 116 | scene0061_01 117 | scene0062_00 118 | scene0062_01 119 | scene0062_02 120 | scene0065_00 121 | scene0065_01 122 | scene0065_02 123 | scene0066_00 124 | scene0067_00 125 | scene0067_01 126 | scene0067_02 127 | scene0068_00 128 | scene0068_01 129 | scene0069_00 130 | scene0070_00 131 | scene0071_00 132 | scene0072_00 133 | scene0072_01 134 | scene0072_02 135 | scene0073_00 136 | scene0073_01 137 | scene0073_02 138 | scene0073_03 139 | scene0074_00 140 | scene0074_01 141 | scene0074_02 142 | scene0075_00 143 | scene0076_00 144 | scene0078_00 145 | scene0078_01 146 | scene0078_02 147 | scene0079_00 148 | scene0079_01 149 | scene0080_00 150 | scene0080_01 151 | scene0080_02 152 | scene0082_00 153 | scene0083_00 154 | scene0083_01 155 | scene0085_00 156 | scene0085_01 157 | scene0087_00 158 | scene0087_01 159 | scene0087_02 160 | scene0089_00 161 | scene0089_01 162 | scene0089_02 163 | scene0090_00 164 | scene0091_00 165 | scene0092_00 166 | scene0092_01 167 | scene0092_02 168 | scene0092_03 169 | scene0092_04 170 | scene0093_00 171 | scene0093_01 172 | scene0093_02 173 | scene0094_00 174 | scene0096_00 175 | scene0096_01 176 | scene0096_02 177 | scene0097_00 178 | scene0098_00 179 | scene0098_01 180 | scene0099_00 181 | scene0099_01 182 | scene0101_00 183 | scene0101_01 184 | scene0101_02 185 | scene0101_03 186 | scene0101_04 187 | scene0101_05 188 | scene0102_00 189 | scene0102_01 190 | scene0103_00 191 | scene0103_01 192 | scene0104_00 193 | scene0105_00 194 | scene0105_01 195 | scene0105_02 196 | scene0106_00 197 | scene0106_01 198 | scene0106_02 199 | scene0107_00 200 | scene0108_00 201 | scene0109_00 202 | scene0109_01 203 | scene0110_00 204 | scene0110_01 205 | scene0110_02 206 | scene0111_00 207 | scene0111_01 208 | scene0111_02 209 | scene0112_00 210 | scene0112_01 211 | scene0112_02 212 | scene0113_00 213 | scene0113_01 214 | scene0114_00 215 | scene0114_01 216 | scene0114_02 217 | scene0115_00 218 | scene0115_01 219 | scene0115_02 220 | scene0116_00 221 | scene0116_01 222 | scene0116_02 223 | scene0117_00 224 | scene0118_00 225 | scene0118_01 226 | scene0118_02 227 | scene0119_00 228 | scene0120_00 229 | scene0120_01 230 | scene0121_00 231 | scene0121_01 232 | scene0121_02 233 | scene0122_00 234 | scene0122_01 235 | scene0123_00 236 | scene0123_01 237 | scene0123_02 238 | scene0124_00 239 | scene0124_01 240 | scene0125_00 241 | scene0126_00 242 | scene0126_01 243 | scene0126_02 244 | scene0127_00 245 | scene0127_01 246 | scene0128_00 247 | scene0129_00 248 | scene0130_00 249 | scene0132_00 250 | scene0132_01 251 | scene0132_02 252 | scene0133_00 253 | scene0134_00 254 | scene0134_01 255 | scene0134_02 256 | scene0135_00 257 | scene0136_00 258 | scene0136_01 259 | scene0136_02 260 | scene0137_00 261 | scene0137_01 262 | scene0137_02 263 | scene0138_00 264 | scene0140_00 265 | scene0140_01 266 | scene0141_00 267 | scene0141_01 268 | scene0141_02 269 | scene0142_00 270 | scene0142_01 271 | scene0143_00 272 | scene0143_01 273 | scene0143_02 274 | scene0145_00 275 | scene0147_00 276 | scene0147_01 277 | scene0148_00 278 | scene0150_00 279 | scene0150_01 280 | scene0150_02 281 | scene0151_00 282 | scene0151_01 283 | scene0152_00 284 | scene0152_01 285 | scene0152_02 286 | scene0154_00 287 | scene0155_00 288 | scene0155_01 289 | scene0155_02 290 | scene0156_00 291 | scene0157_00 292 | scene0157_01 293 | scene0158_00 294 | scene0158_01 295 | scene0158_02 296 | scene0159_00 297 | scene0160_00 298 | scene0160_01 299 | scene0160_02 300 | scene0160_03 301 | scene0160_04 302 | scene0161_00 303 | scene0161_01 304 | scene0161_02 305 | scene0162_00 306 | scene0163_00 307 | scene0163_01 308 | scene0165_00 309 | scene0165_01 310 | scene0165_02 311 | scene0166_00 312 | scene0166_01 313 | scene0166_02 314 | scene0167_00 315 | scene0168_00 316 | scene0168_01 317 | scene0168_02 318 | scene0170_00 319 | scene0170_01 320 | scene0170_02 321 | scene0171_00 322 | scene0171_01 323 | scene0172_00 324 | scene0172_01 325 | scene0173_00 326 | scene0173_01 327 | scene0173_02 328 | scene0174_00 329 | scene0174_01 330 | scene0175_00 331 | scene0176_00 332 | scene0177_00 333 | scene0177_01 334 | scene0177_02 335 | scene0178_00 336 | scene0179_00 337 | scene0180_00 338 | scene0181_00 339 | scene0181_01 340 | scene0181_02 341 | scene0181_03 342 | scene0182_00 343 | scene0182_01 344 | scene0182_02 345 | scene0183_00 346 | scene0184_00 347 | scene0185_00 348 | scene0186_00 349 | scene0186_01 350 | scene0188_00 351 | scene0189_00 352 | scene0190_00 353 | scene0191_00 354 | scene0191_01 355 | scene0191_02 356 | scene0192_00 357 | scene0192_01 358 | scene0192_02 359 | scene0194_00 360 | scene0195_00 361 | scene0195_01 362 | scene0195_02 363 | scene0197_00 364 | scene0197_01 365 | scene0197_02 366 | scene0198_00 367 | scene0199_00 368 | scene0200_00 369 | scene0200_01 370 | scene0200_02 371 | scene0201_00 372 | scene0201_01 373 | scene0201_02 374 | scene0202_00 375 | scene0204_00 376 | scene0204_01 377 | scene0204_02 378 | scene0205_00 379 | scene0205_01 380 | scene0205_02 381 | scene0206_00 382 | scene0206_01 383 | scene0206_02 384 | scene0209_00 385 | scene0209_01 386 | scene0209_02 387 | scene0210_00 388 | scene0210_01 389 | scene0211_00 390 | scene0211_01 391 | scene0211_02 392 | scene0211_03 393 | scene0212_00 394 | scene0212_01 395 | scene0212_02 396 | scene0213_00 397 | scene0214_00 398 | scene0214_01 399 | scene0214_02 400 | scene0215_00 401 | scene0215_01 402 | scene0216_00 403 | scene0218_00 404 | scene0218_01 405 | scene0219_00 406 | scene0220_00 407 | scene0220_01 408 | scene0220_02 409 | scene0223_00 410 | scene0223_01 411 | scene0223_02 412 | scene0224_00 413 | scene0225_00 414 | scene0226_00 415 | scene0226_01 416 | scene0227_00 417 | scene0228_00 418 | scene0229_00 419 | scene0229_01 420 | scene0229_02 421 | scene0230_00 422 | scene0232_00 423 | scene0232_01 424 | scene0232_02 425 | scene0233_00 426 | scene0233_01 427 | scene0234_00 428 | scene0235_00 429 | scene0236_00 430 | scene0236_01 431 | scene0237_00 432 | scene0237_01 433 | scene0238_00 434 | scene0238_01 435 | scene0239_00 436 | scene0239_01 437 | scene0239_02 438 | scene0240_00 439 | scene0241_00 440 | scene0241_01 441 | scene0241_02 442 | scene0242_00 443 | scene0242_01 444 | scene0242_02 445 | scene0243_00 446 | scene0244_00 447 | scene0244_01 448 | scene0245_00 449 | scene0247_00 450 | scene0247_01 451 | scene0248_00 452 | scene0248_01 453 | scene0248_02 454 | scene0250_00 455 | scene0250_01 456 | scene0250_02 457 | scene0252_00 458 | scene0253_00 459 | scene0254_00 460 | scene0254_01 461 | scene0255_00 462 | scene0255_01 463 | scene0255_02 464 | scene0258_00 465 | scene0259_00 466 | scene0259_01 467 | scene0260_00 468 | scene0260_01 469 | scene0260_02 470 | scene0261_00 471 | scene0261_01 472 | scene0261_02 473 | scene0261_03 474 | scene0262_00 475 | scene0262_01 476 | scene0263_00 477 | scene0263_01 478 | scene0264_00 479 | scene0264_01 480 | scene0264_02 481 | scene0265_00 482 | scene0265_01 483 | scene0265_02 484 | scene0266_00 485 | scene0266_01 486 | scene0267_00 487 | scene0268_00 488 | scene0268_01 489 | scene0268_02 490 | scene0269_00 491 | scene0269_01 492 | scene0269_02 493 | scene0270_00 494 | scene0270_01 495 | scene0270_02 496 | scene0271_00 497 | scene0271_01 498 | scene0272_00 499 | scene0272_01 500 | scene0273_00 501 | scene0273_01 502 | scene0274_00 503 | scene0274_01 504 | scene0274_02 505 | scene0275_00 506 | scene0276_00 507 | scene0276_01 508 | scene0279_00 509 | scene0279_01 510 | scene0279_02 511 | scene0280_00 512 | scene0280_01 513 | scene0280_02 514 | scene0281_00 515 | scene0282_00 516 | scene0282_01 517 | scene0282_02 518 | scene0283_00 519 | scene0284_00 520 | scene0285_00 521 | scene0286_00 522 | scene0286_01 523 | scene0286_02 524 | scene0286_03 525 | scene0287_00 526 | scene0288_00 527 | scene0288_01 528 | scene0288_02 529 | scene0289_00 530 | scene0289_01 531 | scene0290_00 532 | scene0291_00 533 | scene0291_01 534 | scene0291_02 535 | scene0292_00 536 | scene0292_01 537 | scene0293_00 538 | scene0293_01 539 | scene0294_00 540 | scene0294_01 541 | scene0294_02 542 | scene0295_00 543 | scene0295_01 544 | scene0296_00 545 | scene0296_01 546 | scene0297_00 547 | scene0297_01 548 | scene0297_02 549 | scene0298_00 550 | scene0299_00 551 | scene0299_01 552 | scene0301_00 553 | scene0301_01 554 | scene0301_02 555 | scene0302_00 556 | scene0302_01 557 | scene0303_00 558 | scene0303_01 559 | scene0303_02 560 | scene0305_00 561 | scene0305_01 562 | scene0306_00 563 | scene0306_01 564 | scene0308_00 565 | scene0309_00 566 | scene0309_01 567 | scene0310_00 568 | scene0310_01 569 | scene0310_02 570 | scene0311_00 571 | scene0312_00 572 | scene0312_01 573 | scene0312_02 574 | scene0313_00 575 | scene0313_01 576 | scene0313_02 577 | scene0315_00 578 | scene0317_00 579 | scene0317_01 580 | scene0318_00 581 | scene0319_00 582 | scene0320_00 583 | scene0320_01 584 | scene0320_02 585 | scene0320_03 586 | scene0321_00 587 | scene0322_00 588 | scene0323_00 589 | scene0323_01 590 | scene0324_00 591 | scene0324_01 592 | scene0325_00 593 | scene0325_01 594 | scene0326_00 595 | scene0327_00 596 | scene0330_00 597 | scene0331_00 598 | scene0331_01 599 | scene0332_00 600 | scene0332_01 601 | scene0332_02 602 | scene0333_00 603 | scene0335_00 604 | scene0335_01 605 | scene0335_02 606 | scene0336_00 607 | scene0336_01 608 | scene0337_00 609 | scene0337_01 610 | scene0337_02 611 | scene0339_00 612 | scene0340_00 613 | scene0340_01 614 | scene0340_02 615 | scene0341_00 616 | scene0341_01 617 | scene0344_00 618 | scene0344_01 619 | scene0345_00 620 | scene0345_01 621 | scene0346_00 622 | scene0346_01 623 | scene0347_00 624 | scene0347_01 625 | scene0347_02 626 | scene0348_00 627 | scene0348_01 628 | scene0348_02 629 | scene0349_00 630 | scene0349_01 631 | scene0350_00 632 | scene0350_01 633 | scene0350_02 634 | scene0352_00 635 | scene0352_01 636 | scene0352_02 637 | scene0358_00 638 | scene0358_01 639 | scene0358_02 640 | scene0359_00 641 | scene0359_01 642 | scene0360_00 643 | scene0361_00 644 | scene0361_01 645 | scene0361_02 646 | scene0362_00 647 | scene0362_01 648 | scene0362_02 649 | scene0362_03 650 | scene0363_00 651 | scene0364_00 652 | scene0364_01 653 | scene0365_00 654 | scene0365_01 655 | scene0365_02 656 | scene0366_00 657 | scene0367_00 658 | scene0367_01 659 | scene0368_00 660 | scene0368_01 661 | scene0369_00 662 | scene0369_01 663 | scene0369_02 664 | scene0370_00 665 | scene0370_01 666 | scene0370_02 667 | scene0371_00 668 | scene0371_01 669 | scene0372_00 670 | scene0373_00 671 | scene0373_01 672 | scene0374_00 673 | scene0375_00 674 | scene0375_01 675 | scene0375_02 676 | scene0376_00 677 | scene0376_01 678 | scene0376_02 679 | scene0379_00 680 | scene0380_00 681 | scene0380_01 682 | scene0380_02 683 | scene0381_00 684 | scene0381_01 685 | scene0381_02 686 | scene0383_00 687 | scene0383_01 688 | scene0383_02 689 | scene0384_00 690 | scene0385_00 691 | scene0385_01 692 | scene0385_02 693 | scene0386_00 694 | scene0387_00 695 | scene0387_01 696 | scene0387_02 697 | scene0388_00 698 | scene0388_01 699 | scene0390_00 700 | scene0391_00 701 | scene0392_00 702 | scene0392_01 703 | scene0392_02 704 | scene0393_00 705 | scene0393_01 706 | scene0393_02 707 | scene0394_00 708 | scene0394_01 709 | scene0395_00 710 | scene0395_01 711 | scene0395_02 712 | scene0396_00 713 | scene0396_01 714 | scene0396_02 715 | scene0397_00 716 | scene0397_01 717 | scene0398_00 718 | scene0398_01 719 | scene0399_00 720 | scene0399_01 721 | scene0400_00 722 | scene0400_01 723 | scene0401_00 724 | scene0402_00 725 | scene0403_00 726 | scene0403_01 727 | scene0404_00 728 | scene0404_01 729 | scene0404_02 730 | scene0405_00 731 | scene0407_00 732 | scene0407_01 733 | scene0408_00 734 | scene0408_01 735 | scene0409_00 736 | scene0409_01 737 | scene0410_00 738 | scene0410_01 739 | scene0411_00 740 | scene0411_01 741 | scene0411_02 742 | scene0413_00 743 | scene0415_00 744 | scene0415_01 745 | scene0415_02 746 | scene0416_00 747 | scene0416_01 748 | scene0416_02 749 | scene0416_03 750 | scene0416_04 751 | scene0417_00 752 | scene0418_00 753 | scene0418_01 754 | scene0418_02 755 | scene0419_00 756 | scene0419_01 757 | scene0419_02 758 | scene0420_00 759 | scene0420_01 760 | scene0420_02 761 | scene0421_00 762 | scene0421_01 763 | scene0421_02 764 | scene0422_00 765 | scene0424_00 766 | scene0424_01 767 | scene0424_02 768 | scene0425_00 769 | scene0425_01 770 | scene0428_00 771 | scene0428_01 772 | scene0429_00 773 | scene0431_00 774 | scene0433_00 775 | scene0434_00 776 | scene0434_01 777 | scene0434_02 778 | scene0436_00 779 | scene0437_00 780 | scene0437_01 781 | scene0438_00 782 | scene0439_00 783 | scene0439_01 784 | scene0440_00 785 | scene0440_01 786 | scene0440_02 787 | scene0442_00 788 | scene0443_00 789 | scene0444_00 790 | scene0444_01 791 | scene0445_00 792 | scene0445_01 793 | scene0446_00 794 | scene0446_01 795 | scene0447_00 796 | scene0447_01 797 | scene0447_02 798 | scene0448_00 799 | scene0448_01 800 | scene0448_02 801 | scene0449_00 802 | scene0449_01 803 | scene0449_02 804 | scene0450_00 805 | scene0451_00 806 | scene0451_01 807 | scene0451_02 808 | scene0451_03 809 | scene0451_04 810 | scene0451_05 811 | scene0452_00 812 | scene0452_01 813 | scene0452_02 814 | scene0453_00 815 | scene0453_01 816 | scene0454_00 817 | scene0455_00 818 | scene0456_00 819 | scene0456_01 820 | scene0457_00 821 | scene0457_01 822 | scene0457_02 823 | scene0459_00 824 | scene0459_01 825 | scene0460_00 826 | scene0463_00 827 | scene0463_01 828 | scene0464_00 829 | scene0465_00 830 | scene0465_01 831 | scene0466_00 832 | scene0466_01 833 | scene0467_00 834 | scene0468_00 835 | scene0468_01 836 | scene0468_02 837 | scene0469_00 838 | scene0469_01 839 | scene0469_02 840 | scene0470_00 841 | scene0470_01 842 | scene0471_00 843 | scene0471_01 844 | scene0471_02 845 | scene0472_00 846 | scene0472_01 847 | scene0472_02 848 | scene0473_00 849 | scene0473_01 850 | scene0475_00 851 | scene0475_01 852 | scene0475_02 853 | scene0476_00 854 | scene0476_01 855 | scene0476_02 856 | scene0477_00 857 | scene0477_01 858 | scene0478_00 859 | scene0478_01 860 | scene0479_00 861 | scene0479_01 862 | scene0479_02 863 | scene0480_00 864 | scene0480_01 865 | scene0481_00 866 | scene0481_01 867 | scene0482_00 868 | scene0482_01 869 | scene0483_00 870 | scene0484_00 871 | scene0484_01 872 | scene0485_00 873 | scene0486_00 874 | scene0487_00 875 | scene0487_01 876 | scene0489_00 877 | scene0489_01 878 | scene0489_02 879 | scene0491_00 880 | scene0492_00 881 | scene0492_01 882 | scene0493_00 883 | scene0493_01 884 | scene0495_00 885 | scene0497_00 886 | scene0498_00 887 | scene0498_01 888 | scene0498_02 889 | scene0499_00 890 | scene0501_00 891 | scene0501_01 892 | scene0501_02 893 | scene0502_00 894 | scene0502_01 895 | scene0502_02 896 | scene0503_00 897 | scene0504_00 898 | scene0505_00 899 | scene0505_01 900 | scene0505_02 901 | scene0505_03 902 | scene0505_04 903 | scene0506_00 904 | scene0507_00 905 | scene0508_00 906 | scene0508_01 907 | scene0508_02 908 | scene0509_00 909 | scene0509_01 910 | scene0509_02 911 | scene0510_00 912 | scene0510_01 913 | scene0510_02 914 | scene0511_00 915 | scene0511_01 916 | scene0512_00 917 | scene0513_00 918 | scene0514_00 919 | scene0514_01 920 | scene0515_00 921 | scene0515_01 922 | scene0515_02 923 | scene0516_00 924 | scene0516_01 925 | scene0517_00 926 | scene0517_01 927 | scene0517_02 928 | scene0519_00 929 | scene0520_00 930 | scene0520_01 931 | scene0521_00 932 | scene0522_00 933 | scene0523_00 934 | scene0523_01 935 | scene0523_02 936 | scene0524_00 937 | scene0524_01 938 | scene0525_00 939 | scene0525_01 940 | scene0525_02 941 | scene0526_00 942 | scene0526_01 943 | scene0528_00 944 | scene0528_01 945 | scene0529_00 946 | scene0529_01 947 | scene0529_02 948 | scene0530_00 949 | scene0531_00 950 | scene0532_00 951 | scene0532_01 952 | scene0533_00 953 | scene0533_01 954 | scene0534_00 955 | scene0534_01 956 | scene0536_00 957 | scene0536_01 958 | scene0536_02 959 | scene0537_00 960 | scene0538_00 961 | scene0539_00 962 | scene0539_01 963 | scene0539_02 964 | scene0540_00 965 | scene0540_01 966 | scene0540_02 967 | scene0541_00 968 | scene0541_01 969 | scene0541_02 970 | scene0542_00 971 | scene0543_00 972 | scene0543_01 973 | scene0543_02 974 | scene0544_00 975 | scene0545_00 976 | scene0545_01 977 | scene0545_02 978 | scene0546_00 979 | scene0547_00 980 | scene0547_01 981 | scene0547_02 982 | scene0548_00 983 | scene0548_01 984 | scene0548_02 985 | scene0551_00 986 | scene0554_00 987 | scene0554_01 988 | scene0555_00 989 | scene0556_00 990 | scene0556_01 991 | scene0557_00 992 | scene0557_01 993 | scene0557_02 994 | scene0560_00 995 | scene0561_00 996 | scene0561_01 997 | scene0562_00 998 | scene0563_00 999 | scene0564_00 1000 | scene0566_00 1001 | scene0567_00 1002 | scene0567_01 1003 | scene0569_00 1004 | scene0569_01 1005 | scene0570_00 1006 | scene0570_01 1007 | scene0570_02 1008 | scene0571_00 1009 | scene0571_01 1010 | scene0572_00 1011 | scene0572_01 1012 | scene0572_02 1013 | scene0573_00 1014 | scene0573_01 1015 | scene0576_00 1016 | scene0576_01 1017 | scene0576_02 1018 | scene0577_00 1019 | scene0579_00 1020 | scene0579_01 1021 | scene0579_02 1022 | scene0581_00 1023 | scene0581_01 1024 | scene0581_02 1025 | scene0582_00 1026 | scene0582_01 1027 | scene0582_02 1028 | scene0584_00 1029 | scene0584_01 1030 | scene0584_02 1031 | scene0585_00 1032 | scene0585_01 1033 | scene0586_00 1034 | scene0586_01 1035 | scene0586_02 1036 | scene0587_00 1037 | scene0587_01 1038 | scene0587_02 1039 | scene0587_03 1040 | scene0588_00 1041 | scene0588_01 1042 | scene0588_02 1043 | scene0588_03 1044 | scene0589_00 1045 | scene0589_01 1046 | scene0589_02 1047 | scene0590_00 1048 | scene0590_01 1049 | scene0592_00 1050 | scene0592_01 1051 | scene0594_00 1052 | scene0596_00 1053 | scene0596_01 1054 | scene0596_02 1055 | scene0597_00 1056 | scene0597_01 1057 | scene0597_02 1058 | scene0600_00 1059 | scene0600_01 1060 | scene0600_02 1061 | scene0601_00 1062 | scene0601_01 1063 | scene0602_00 1064 | scene0603_00 1065 | scene0603_01 1066 | scene0604_00 1067 | scene0604_01 1068 | scene0604_02 1069 | scene0605_00 1070 | scene0605_01 1071 | scene0610_00 1072 | scene0610_01 1073 | scene0610_02 1074 | scene0611_00 1075 | scene0611_01 1076 | scene0612_00 1077 | scene0612_01 1078 | scene0613_00 1079 | scene0613_01 1080 | scene0613_02 1081 | scene0614_00 1082 | scene0614_01 1083 | scene0614_02 1084 | scene0615_00 1085 | scene0615_01 1086 | scene0617_00 1087 | scene0619_00 1088 | scene0620_00 1089 | scene0620_01 1090 | scene0622_00 1091 | scene0622_01 1092 | scene0623_00 1093 | scene0623_01 1094 | scene0624_00 1095 | scene0625_00 1096 | scene0625_01 1097 | scene0626_00 1098 | scene0626_01 1099 | scene0626_02 1100 | scene0627_00 1101 | scene0627_01 1102 | scene0628_00 1103 | scene0628_01 1104 | scene0628_02 1105 | scene0630_00 1106 | scene0630_01 1107 | scene0630_02 1108 | scene0630_03 1109 | scene0630_04 1110 | scene0630_05 1111 | scene0630_06 1112 | scene0631_00 1113 | scene0631_01 1114 | scene0631_02 1115 | scene0632_00 1116 | scene0634_00 1117 | scene0635_00 1118 | scene0635_01 1119 | scene0636_00 1120 | scene0637_00 1121 | scene0638_00 1122 | scene0639_00 1123 | scene0640_00 1124 | scene0640_01 1125 | scene0640_02 1126 | scene0641_00 1127 | scene0642_00 1128 | scene0642_01 1129 | scene0642_02 1130 | scene0642_03 1131 | scene0646_00 1132 | scene0646_01 1133 | scene0646_02 1134 | scene0649_00 1135 | scene0649_01 1136 | scene0650_00 1137 | scene0654_00 1138 | scene0654_01 1139 | scene0656_00 1140 | scene0656_01 1141 | scene0656_02 1142 | scene0656_03 1143 | scene0657_00 1144 | scene0659_00 1145 | scene0659_01 1146 | scene0661_00 1147 | scene0662_00 1148 | scene0662_01 1149 | scene0662_02 1150 | scene0666_00 1151 | scene0666_01 1152 | scene0666_02 1153 | scene0667_00 1154 | scene0667_01 1155 | scene0667_02 1156 | scene0668_00 1157 | scene0669_00 1158 | scene0669_01 1159 | scene0672_00 1160 | scene0672_01 1161 | scene0673_00 1162 | scene0673_01 1163 | scene0673_02 1164 | scene0673_03 1165 | scene0673_04 1166 | scene0673_05 1167 | scene0674_00 1168 | scene0674_01 1169 | scene0675_00 1170 | scene0675_01 1171 | scene0676_00 1172 | scene0676_01 1173 | scene0677_00 1174 | scene0677_01 1175 | scene0677_02 1176 | scene0679_00 1177 | scene0679_01 1178 | scene0680_00 1179 | scene0680_01 1180 | scene0681_00 1181 | scene0682_00 1182 | scene0683_00 1183 | scene0687_00 1184 | scene0688_00 1185 | scene0691_00 1186 | scene0691_01 1187 | scene0692_00 1188 | scene0692_01 1189 | scene0692_02 1190 | scene0692_03 1191 | scene0692_04 1192 | scene0694_00 1193 | scene0694_01 1194 | scene0698_00 1195 | scene0698_01 1196 | scene0703_00 1197 | scene0703_01 1198 | scene0705_00 1199 | scene0705_01 1200 | scene0705_02 1201 | scene0706_00 1202 | -------------------------------------------------------------------------------- /data/meta/scannetv2_val.txt: -------------------------------------------------------------------------------- 1 | scene0011_00 2 | scene0011_01 3 | scene0015_00 4 | scene0019_00 5 | scene0019_01 6 | scene0025_00 7 | scene0025_01 8 | scene0025_02 9 | scene0030_00 10 | scene0030_01 11 | scene0030_02 12 | scene0046_00 13 | scene0046_01 14 | scene0046_02 15 | scene0050_00 16 | scene0050_01 17 | scene0050_02 18 | scene0063_00 19 | scene0064_00 20 | scene0064_01 21 | scene0077_00 22 | scene0077_01 23 | scene0081_00 24 | scene0081_01 25 | scene0081_02 26 | scene0084_00 27 | scene0084_01 28 | scene0084_02 29 | scene0086_00 30 | scene0086_01 31 | scene0086_02 32 | scene0088_00 33 | scene0088_01 34 | scene0088_02 35 | scene0088_03 36 | scene0095_00 37 | scene0095_01 38 | scene0100_00 39 | scene0100_01 40 | scene0100_02 41 | scene0131_00 42 | scene0131_01 43 | scene0131_02 44 | scene0139_00 45 | scene0144_00 46 | scene0144_01 47 | scene0146_00 48 | scene0146_01 49 | scene0146_02 50 | scene0149_00 51 | scene0153_00 52 | scene0153_01 53 | scene0164_00 54 | scene0164_01 55 | scene0164_02 56 | scene0164_03 57 | scene0169_00 58 | scene0169_01 59 | scene0187_00 60 | scene0187_01 61 | scene0193_00 62 | scene0193_01 63 | scene0196_00 64 | scene0203_00 65 | scene0203_01 66 | scene0203_02 67 | scene0207_00 68 | scene0207_01 69 | scene0207_02 70 | scene0208_00 71 | scene0217_00 72 | scene0221_00 73 | scene0221_01 74 | scene0222_00 75 | scene0222_01 76 | scene0231_00 77 | scene0231_01 78 | scene0231_02 79 | scene0246_00 80 | scene0249_00 81 | scene0251_00 82 | scene0256_00 83 | scene0256_01 84 | scene0256_02 85 | scene0257_00 86 | scene0277_00 87 | scene0277_01 88 | scene0277_02 89 | scene0278_00 90 | scene0278_01 91 | scene0300_00 92 | scene0300_01 93 | scene0304_00 94 | scene0307_00 95 | scene0307_01 96 | scene0307_02 97 | scene0314_00 98 | scene0316_00 99 | scene0328_00 100 | scene0329_00 101 | scene0329_01 102 | scene0329_02 103 | scene0334_00 104 | scene0334_01 105 | scene0334_02 106 | scene0338_00 107 | scene0338_01 108 | scene0338_02 109 | scene0342_00 110 | scene0343_00 111 | scene0351_00 112 | scene0351_01 113 | scene0353_00 114 | scene0353_01 115 | scene0353_02 116 | scene0354_00 117 | scene0355_00 118 | scene0355_01 119 | scene0356_00 120 | scene0356_01 121 | scene0356_02 122 | scene0357_00 123 | scene0357_01 124 | scene0377_00 125 | scene0377_01 126 | scene0377_02 127 | scene0378_00 128 | scene0378_01 129 | scene0378_02 130 | scene0382_00 131 | scene0382_01 132 | scene0389_00 133 | scene0406_00 134 | scene0406_01 135 | scene0406_02 136 | scene0412_00 137 | scene0412_01 138 | scene0414_00 139 | scene0423_00 140 | scene0423_01 141 | scene0423_02 142 | scene0426_00 143 | scene0426_01 144 | scene0426_02 145 | scene0426_03 146 | scene0427_00 147 | scene0430_00 148 | scene0430_01 149 | scene0432_00 150 | scene0432_01 151 | scene0435_00 152 | scene0435_01 153 | scene0435_02 154 | scene0435_03 155 | scene0441_00 156 | scene0458_00 157 | scene0458_01 158 | scene0461_00 159 | scene0462_00 160 | scene0474_00 161 | scene0474_01 162 | scene0474_02 163 | scene0474_03 164 | scene0474_04 165 | scene0474_05 166 | scene0488_00 167 | scene0488_01 168 | scene0490_00 169 | scene0494_00 170 | scene0496_00 171 | scene0500_00 172 | scene0500_01 173 | scene0518_00 174 | scene0527_00 175 | scene0535_00 176 | scene0549_00 177 | scene0549_01 178 | scene0550_00 179 | scene0552_00 180 | scene0552_01 181 | scene0553_00 182 | scene0553_01 183 | scene0553_02 184 | scene0558_00 185 | scene0558_01 186 | scene0558_02 187 | scene0559_00 188 | scene0559_01 189 | scene0559_02 190 | scene0565_00 191 | scene0568_00 192 | scene0568_01 193 | scene0568_02 194 | scene0574_00 195 | scene0574_01 196 | scene0574_02 197 | scene0575_00 198 | scene0575_01 199 | scene0575_02 200 | scene0578_00 201 | scene0578_01 202 | scene0578_02 203 | scene0580_00 204 | scene0580_01 205 | scene0583_00 206 | scene0583_01 207 | scene0583_02 208 | scene0591_00 209 | scene0591_01 210 | scene0591_02 211 | scene0593_00 212 | scene0593_01 213 | scene0595_00 214 | scene0598_00 215 | scene0598_01 216 | scene0598_02 217 | scene0599_00 218 | scene0599_01 219 | scene0599_02 220 | scene0606_00 221 | scene0606_01 222 | scene0606_02 223 | scene0607_00 224 | scene0607_01 225 | scene0608_00 226 | scene0608_01 227 | scene0608_02 228 | scene0609_00 229 | scene0609_01 230 | scene0609_02 231 | scene0609_03 232 | scene0616_00 233 | scene0616_01 234 | scene0618_00 235 | scene0621_00 236 | scene0629_00 237 | scene0629_01 238 | scene0629_02 239 | scene0633_00 240 | scene0633_01 241 | scene0643_00 242 | scene0644_00 243 | scene0645_00 244 | scene0645_01 245 | scene0645_02 246 | scene0647_00 247 | scene0647_01 248 | scene0648_00 249 | scene0648_01 250 | scene0651_00 251 | scene0651_01 252 | scene0651_02 253 | scene0652_00 254 | scene0653_00 255 | scene0653_01 256 | scene0655_00 257 | scene0655_01 258 | scene0655_02 259 | scene0658_00 260 | scene0660_00 261 | scene0663_00 262 | scene0663_01 263 | scene0663_02 264 | scene0664_00 265 | scene0664_01 266 | scene0664_02 267 | scene0665_00 268 | scene0665_01 269 | scene0670_00 270 | scene0670_01 271 | scene0671_00 272 | scene0671_01 273 | scene0678_00 274 | scene0678_01 275 | scene0678_02 276 | scene0684_00 277 | scene0684_01 278 | scene0685_00 279 | scene0685_01 280 | scene0685_02 281 | scene0686_00 282 | scene0686_01 283 | scene0686_02 284 | scene0689_00 285 | scene0690_00 286 | scene0690_01 287 | scene0693_00 288 | scene0693_01 289 | scene0693_02 290 | scene0695_00 291 | scene0695_01 292 | scene0695_02 293 | scene0695_03 294 | scene0696_00 295 | scene0696_01 296 | scene0696_02 297 | scene0697_00 298 | scene0697_01 299 | scene0697_02 300 | scene0697_03 301 | scene0699_00 302 | scene0700_00 303 | scene0700_01 304 | scene0700_02 305 | scene0701_00 306 | scene0701_01 307 | scene0701_02 308 | scene0702_00 309 | scene0702_01 310 | scene0702_02 311 | scene0704_00 312 | scene0704_01 313 | -------------------------------------------------------------------------------- /dist_run.sh: -------------------------------------------------------------------------------- 1 | set -x 2 | export PYTHONPATH=$PYTHONPATH:./ 3 | 4 | CUDA_VISIBLE_DEVICES=$1 python3 -m torch.distributed.launch --nproc_per_node=$2 --master_port=$3 tools/run_net.py --config-file=$4 $5 5 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | set -x 2 | export PYTHONPATH=$PYTHONPATH:./ 3 | 4 | CUDA_VISIBLE_DEVICES=$1 python3 tools/run_net.py --config-file=$2 $3 -------------------------------------------------------------------------------- /tools/prepare_scannet.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import glob 3 | import os 4 | import numpy as np 5 | import open3d 6 | import torch 7 | from functools import partial 8 | from plyfile import PlyData 9 | 10 | from voxelnet.utils.general import multi_process 11 | 12 | 13 | remapper = np.ones(150) * (-100) 14 | for i, x in enumerate([1,2,3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39]): 15 | remapper[x] = i 16 | 17 | def collect_point_label(room,data_dir,save_dir,task): 18 | ply_file = os.path.join(data_dir,"scans" if task != "test" else "scans_test",room,room+"_vh_clean_2.ply") 19 | save_file = os.path.join(save_dir,room+".pt") 20 | 21 | mesh = open3d.io.read_triangle_mesh(ply_file) 22 | vertices = np.asarray(mesh.vertices) 23 | vertices = vertices - vertices.mean(0) 24 | 25 | labels_file_path = ply_file.replace('.ply', '.labels.ply') 26 | if os.path.exists(labels_file_path): 27 | vertex_labels = np.asarray(PlyData.read(labels_file_path)['vertex']['label']) 28 | vertex_labels = remapper[vertex_labels] 29 | else: 30 | vertex_labels = None 31 | 32 | data = dict( 33 | vertices = torch.from_numpy(vertices).float(), 34 | colors =torch.from_numpy(np.asarray(mesh.vertex_colors) * 2 - 1).float(), 35 | labels = torch.from_numpy(vertex_labels).long() if vertex_labels is not None else vertex_labels 36 | ) 37 | torch.save(data, save_file) 38 | 39 | def process(data_dir,save_dir,meta_dir): 40 | for file in glob.glob(f"{meta_dir}/scannetv2_*.txt"): 41 | task = file.split("_")[-1].split(".")[0] 42 | with open(file, "r") as f: 43 | rooms = [line.strip() for line in f.readlines() if len(line.strip())>0] 44 | 45 | task_save_dir = os.path.join(save_dir,task) 46 | os.makedirs(task_save_dir,exist_ok=True) 47 | func = partial(collect_point_label,data_dir=data_dir,save_dir=task_save_dir,task=task) 48 | multi_process(func,rooms,processes=64) 49 | 50 | 51 | if __name__ == "__main__": 52 | parser = argparse.ArgumentParser(description='Scannet Data Preparation') 53 | parser.add_argument('--in_path', default=None, type=str, required=True, 54 | help='path to scene data (default: None)') 55 | parser.add_argument('--out_path', default=None, type=str, required=True, 56 | help='path for saving processed data (default: None)') 57 | parser.add_argument("--meta_path", default=None, type=str, required=True, 58 | help='meta path for split data (default: None)') 59 | args = parser.parse_args() 60 | 61 | process(args.in_path,args.out_path,args.meta_path) -------------------------------------------------------------------------------- /tools/run_net.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import torch.distributed as dist 4 | from voxelnet.runner import Runner 5 | from voxelnet.utils.config import init_cfg 6 | from voxelnet.utils.general import init_seeds 7 | 8 | def main(): 9 | parser = argparse.ArgumentParser(description="Voxel Training") 10 | parser.add_argument( 11 | "--config-file", 12 | default="", 13 | metavar="FILE", 14 | help="path to config file", 15 | type=str, 16 | ) 17 | parser.add_argument( 18 | "--task", 19 | default="val", 20 | help="test,val", 21 | type=str, 22 | ) 23 | parser.add_argument( 24 | "--local-rank", 25 | default=-1, 26 | type=int, 27 | ) 28 | 29 | args = parser.parse_args() 30 | 31 | assert args.task in ["val","test"],f"{args.task} not support, please choose [test,val]" 32 | 33 | # Check cuda 34 | use_cuda = torch.cuda.is_available() 35 | if use_cuda is False: 36 | raise ValueError("CUDA is not available!") 37 | 38 | if args.local_rank >=0 : 39 | dist.init_process_group(backend="nccl",init_method="env://") 40 | torch.cuda.set_device(args.local_rank) 41 | print(f"Rank {args.local_rank} initialized!") 42 | init_seeds(args.local_rank+1) 43 | 44 | if args.config_file: 45 | init_cfg(args.config_file,rank=args.local_rank) 46 | 47 | runner = Runner() 48 | 49 | if args.task == "val": 50 | runner.val() 51 | elif args.task == "test": 52 | runner.test() 53 | 54 | if __name__ == "__main__": 55 | main() -------------------------------------------------------------------------------- /tools/vis_erf.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import math 4 | import voxelnet.modules.functional as VF 5 | from voxelnet.models.lrpnet import LRPNet 6 | import torch.nn.functional as F 7 | import cv2 8 | import trimesh 9 | import os 10 | import torchsparse 11 | from torchsparse import SparseTensor 12 | 13 | def read_data(data_file,rotate=True,full_scale = [4096, 4096, 4096]): 14 | data = torch.load(data_file) 15 | labels = data['labels'].cuda() 16 | vertices_ori = data['vertices'].cuda() 17 | src_vertices = vertices_ori.clone() 18 | colors = data['colors'].cuda() 19 | src_colors = (colors.clone()+1)*255/2 20 | # Affine linear transformation 21 | trans_m = np.eye(3) 22 | 23 | trans_m *= 50 24 | theta = np.random.rand() * 2 * math.pi 25 | if rotate: 26 | trans_m = np.matmul(trans_m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]]) 27 | 28 | trans_m = trans_m.astype(np.float32) 29 | 30 | # vertices_ori = np.matmul(vertices_ori, trans_m) 31 | vertices_ori = torch.matmul(vertices_ori, torch.tensor(trans_m,device=vertices_ori.device)) 32 | 33 | # Random placement in the receptive field 34 | vertices_min = torch.min(vertices_ori, dim=0)[0].cpu().numpy() 35 | vertices_max = torch.max(vertices_ori, dim=0)[0].cpu().numpy() 36 | offset = -vertices_min + np.clip(full_scale - vertices_max + vertices_min - 0.001, 0, None) * np.random.rand(3) \ 37 | + np.clip(full_scale - vertices_max + vertices_min + 0.001, None, 0) * np.random.rand(3) 38 | 39 | vertices_ori += torch.tensor(offset,device=vertices_ori.device) 40 | 41 | # Voxelization 42 | coords_v = vertices_ori.int() 43 | 44 | # Remove duplicate items 45 | _, unique_idxs,unique_reidx = VF.unique(coords_v, dim=0, return_index=True,return_inverse=True) 46 | coords_v = coords_v[unique_idxs] 47 | colors_v = colors[unique_idxs] 48 | labels_v = labels[unique_idxs] 49 | 50 | # Put into containers 51 | coords_v_b = torch.cat([coords_v, torch.full(size=(coords_v.shape[0], 1),fill_value=0,device=coords_v.device,dtype=torch.int)], 1) 52 | 53 | return coords_v_b, colors_v, labels_v, unique_reidx, unique_idxs, src_vertices,src_colors 54 | 55 | def save_pointcloud(vertices,colors,faces,save_file): 56 | trimesh.Trimesh(vertices,faces=faces,vertex_colors=colors).export(save_file) 57 | 58 | def draw_heatmap(grad_map,vertices,position,name,src_colors,faces,ply_vertices): 59 | grad_map = np.log10(grad_map+1) 60 | grad_map /= 0.8 61 | 62 | grad_map = np.clip(grad_map,0.,1.) 63 | grad_map = np.sqrt(grad_map) 64 | 65 | heatmap = cv2.applyColorMap(np.uint8(255 * grad_map), cv2.COLORMAP_JET) 66 | heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB) 67 | heatmap = heatmap[:,0] 68 | 69 | src_colors = src_colors.cpu().numpy() 70 | 71 | fuse_colors = grad_map[:,None]*heatmap+(1-grad_map[:,None])*src_colors 72 | fuse_colors = np.clip(fuse_colors,0,255).astype(np.uint8) 73 | 74 | vertices = ply_vertices 75 | colors = fuse_colors 76 | 77 | os.makedirs("heatmaps",exist_ok=True) 78 | 79 | save_pointcloud(vertices,colors,faces,f"heatmaps/heatmap_{name}.ply") 80 | 81 | def forward_model(model,coords_v_b,colors_v,unique_reidx, unique_idxs, src_vertices,position): 82 | colors_v = torch.autograd.Variable(colors_v, requires_grad=True) 83 | # input 84 | indices = coords_v_b.int() 85 | x = SparseTensor(colors_v, indices) 86 | 87 | en0 = model.input_conv(x) 88 | 89 | en_tensors = [en0] 90 | for i in range(model.depth): 91 | x = model.downsamples[i](en_tensors[-1]) 92 | x = model.encoders[i](x) 93 | if i0,as_tuple=True)[0] 112 | 113 | center_point = F.relu(feats[f_idx]).sum() 114 | grad = torch.autograd.grad(center_point, colors_v)[0] 115 | grad = F.relu(grad) 116 | grad = grad[unique_reidx] 117 | 118 | grad_map = grad.sum(dim=1).cpu().numpy() 119 | return grad_map 120 | 121 | 122 | def build_lrp_model(use_trained = True): 123 | model = LRPNet( 124 | in_channels=3, 125 | out_channels=20, 126 | encoder_channels=[32,64,96,128,128], 127 | decoder_channels=[128,128,128,128,128], 128 | ) 129 | state_dict = torch.load("work_dirs/scannet_largenet_f10_scale/checkpoints/ckpt_epoch_500.pt")['model'] 130 | for key in list(state_dict.keys()): 131 | if key.startswith("module."): 132 | new_key = key.replace("module.", "") 133 | state_dict[new_key] = state_dict[key] 134 | del state_dict[key] 135 | 136 | if use_trained: 137 | model.load_state_dict(state_dict) 138 | model = model.cuda() 139 | return model 140 | 141 | 142 | def vis_heatmap(model,name,data_file,position,faces,vertices,position2=None): 143 | coords_v, colors_v, labels, unique_reidx,unique_idxs,src_vertices,src_colors = read_data(data_file,rotate=True) 144 | grad_map = np.zeros((src_vertices.shape[0],)) 145 | for i in range(8): 146 | coords_v, colors_v, labels, unique_reidx,unique_idxs,src_vertices,src_colors = read_data(data_file,rotate=True) 147 | grad_map_once = forward_model(model,coords_v,colors_v,unique_reidx,unique_idxs,src_vertices,position) 148 | grad_map += grad_map_once 149 | print("once",i) 150 | if position2 is None: 151 | draw_heatmap(grad_map,src_vertices,position,name,src_colors,faces,vertices) 152 | return 153 | for i in range(8): 154 | coords_v, colors_v, labels, unique_reidx,unique_idxs,src_vertices,src_colors = read_data(data_file,rotate=True) 155 | grad_map_once = forward_model(model,coords_v,colors_v,unique_reidx,unique_idxs,src_vertices,position2) 156 | grad_map += grad_map_once 157 | print("once",i) 158 | draw_heatmap(grad_map,src_vertices,position,name,src_colors,faces,vertices) 159 | 160 | scene_name = "0011_00" 161 | mesh = trimesh.load(f"scannet/scans/scene{scene_name}/scene{scene_name}_vh_clean_2.ply",process=False) 162 | faces = mesh.faces 163 | vertices = np.asarray(mesh.vertices) 164 | data_file = f"data/scannet/val/scene{scene_name}.pt" 165 | coords_v, colors_v, labels, unique_reidx,unique_idxs,src_vertices,src_colors = read_data(data_file,rotate=True) 166 | position = [1.,-2.9,-0.2] 167 | position2 = [-2.5, 1.6,0.3] 168 | model = build_lrp_model(True) 169 | vis_heatmap(model,f"lrp_{scene_name}",data_file,position,faces,vertices,position2) -------------------------------------------------------------------------------- /voxelnet/__init__.py: -------------------------------------------------------------------------------- 1 | from . import models 2 | from . import optims 3 | from . import data 4 | from . import runner -------------------------------------------------------------------------------- /voxelnet/data/__init__.py: -------------------------------------------------------------------------------- 1 | from . import scannetv2_cuda -------------------------------------------------------------------------------- /voxelnet/data/scannetv2_cuda.py: -------------------------------------------------------------------------------- 1 | from re import L 2 | import numpy as np 3 | import torch 4 | import math 5 | import torch.utils.data 6 | import glob 7 | import voxelnet.modules.functional as VF 8 | from voxelnet.utils.registry import DATASETS 9 | 10 | class LoadDataset: 11 | def __init__(self,files): 12 | self.files = files 13 | def __getitem__(self, index): 14 | return (torch.load(self.files[index]),index) 15 | def __len__(self): 16 | return len(self.files) 17 | 18 | @DATASETS.register_module() 19 | class ScanNetCuda: 20 | def __init__(self, data_path, mode = "train", shuffle=True, crop_by_limit = False, 21 | scale = 50, 22 | batch_size = 8, 23 | full_scale = [4096, 4096, 4096], 24 | limit_numpoints = 1000000, 25 | num_workers = 4, 26 | rotate=True, 27 | flip_x = True, 28 | rank=-1): 29 | 30 | self.rotate = rotate 31 | self.flip_x = flip_x 32 | self.rank=rank 33 | self.crop_by_limit = crop_by_limit 34 | self.mode = "val" 35 | val_files_path = data_path + '/val' 36 | test_files_path = data_path + '/test' 37 | val_files = sorted(glob.glob(val_files_path + '/*.pt')) 38 | test_files = sorted(glob.glob(test_files_path + '/*.pt')) 39 | self.infer_files = val_files if mode == "val" else test_files 40 | # self.test_files = test_files 41 | self.val = mode == "val" or mode == "test" 42 | 43 | self.shuffle = shuffle 44 | self.scale = scale 45 | self.batch_size = batch_size 46 | self.full_scale = full_scale 47 | self.limit_numpoints = limit_numpoints 48 | self.num_workers = num_workers 49 | 50 | 51 | def infer_data_loader(self): 52 | 53 | infer_offsets=[0] 54 | if self.val: 55 | infer_labels=[] 56 | for _, infer_file in enumerate(self.infer_files): 57 | # print("load",infer_file) 58 | data = torch.load(infer_file) 59 | infer_offsets.append(infer_offsets[-1] + data['vertices'].shape[0]) 60 | if self.val and 'labels' in data: 61 | infer_labels.append(data['labels'].squeeze().numpy()) 62 | self.infer_offsets = infer_offsets 63 | if self.val and len(infer_labels)>0: 64 | self.infer_labels = np.hstack(infer_labels) 65 | self.infer_labels = torch.tensor(self.infer_labels).cuda() 66 | 67 | dataset = LoadDataset(self.infer_files) 68 | return torch.utils.data.DataLoader( 69 | dataset, 70 | batch_size = self.batch_size, 71 | collate_fn = self.inferCollate, 72 | num_workers = self.num_workers, 73 | shuffle = self.shuffle, 74 | pin_memory = True 75 | ) 76 | 77 | def inferCollate(self, tbl): 78 | # datas = [(torch.load(self.infer_files[i]),i) for i in tbl] 79 | # return datas 80 | return tbl 81 | 82 | 83 | def inferAfter(self, batch): 84 | 85 | coords_v_b = [] 86 | colors_v_b = [] 87 | vertices_v_b = [] 88 | reindex_v_b = [] 89 | 90 | point_ids = [] 91 | num=0 92 | 93 | # Process in batch 94 | for idx, (data,i) in enumerate(batch): 95 | 96 | # vertices 97 | vertices_ori = data['vertices'].cuda() 98 | colors = data['colors'].cuda() 99 | 100 | # Affine linear transformation 101 | trans_m = np.eye(3) 102 | if self.flip_x: 103 | trans_m[0][0] *= np.random.randint(0, 2) * 2 - 1 104 | 105 | trans_m *= self.scale 106 | theta = np.random.rand() * 2 * math.pi 107 | if self.rotate: 108 | trans_m = np.matmul(trans_m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]]) 109 | 110 | trans_m = trans_m.astype(np.float32) 111 | 112 | # vertices_ori = np.matmul(vertices_ori, trans_m) 113 | vertices_ori = torch.matmul(vertices_ori, torch.tensor(trans_m,device=vertices_ori.device)) 114 | 115 | # Random placement in the receptive field 116 | vertices_min = torch.min(vertices_ori, dim=0)[0].cpu().numpy() 117 | vertices_max = torch.max(vertices_ori, dim=0)[0].cpu().numpy() 118 | offset = -vertices_min + np.clip(self.full_scale - vertices_max + vertices_min - 0.001, 0, None) * np.random.rand(3) \ 119 | + np.clip(self.full_scale - vertices_max + vertices_min + 0.001, None, 0) * np.random.rand(3) 120 | 121 | vertices_ori += torch.tensor(offset,device=vertices_ori.device) 122 | 123 | pointidx = torch.arange(0,vertices_ori.size(0),device=vertices_ori.device) 124 | 125 | # Voxelization 126 | coords_v = vertices_ori.int() 127 | 128 | # Remove duplicate items 129 | _, unique_idxs,unique_reidx = VF.unique(coords_v, dim=0, return_index=True,return_inverse=True) 130 | coords_v = coords_v[unique_idxs] 131 | colors_v = colors[unique_idxs] 132 | vertices_v = vertices_ori[unique_idxs] 133 | 134 | # Put into containers 135 | coords_v_b += [torch.cat([coords_v, torch.full(size=(coords_v.shape[0], 1),fill_value=idx,device=coords_v.device,dtype=torch.int)], 1)] 136 | colors_v_b += [colors_v] 137 | vertices_v_b += [vertices_v] 138 | reindex_v_b += [unique_reidx+num] 139 | num+=len(coords_v) 140 | 141 | point_ids += [pointidx + self.infer_offsets[i]] 142 | 143 | 144 | # Construct batches 145 | coords_v_b = torch.cat(coords_v_b, 0) 146 | colors_v_b = torch.cat(colors_v_b, 0) 147 | vertices_v_b = torch.cat(vertices_v_b, 0) 148 | point_ids = torch.cat(point_ids, 0) 149 | reindex_v_b = torch.cat(reindex_v_b,0) 150 | return {'coords_v_b': coords_v_b, 'colors_v_b': colors_v_b, 'point_ids': point_ids,"reindex_v_b":reindex_v_b,"vertices_v_b":vertices_v_b} 151 | -------------------------------------------------------------------------------- /voxelnet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from . import lrpnet -------------------------------------------------------------------------------- /voxelnet/models/lrpnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torchsparse 4 | import torchsparse.nn as spnn 5 | from torchsparse import SparseTensor 6 | 7 | from voxelnet.utils.registry import MODELS 8 | from voxelnet.modules.pool import Pool 9 | from voxelnet.modules.functional import fapply 10 | 11 | 12 | class BatchNorm(nn.BatchNorm1d): 13 | 14 | def forward(self, input: SparseTensor) -> SparseTensor: 15 | return fapply(input, super().forward) 16 | 17 | class Dropout(nn.Dropout): 18 | 19 | def forward(self, input: SparseTensor) -> SparseTensor: 20 | return fapply(input, super().forward) 21 | 22 | class ConvBlock(nn.Module): 23 | def __init__(self,in_channels,out_channels, 24 | kernel_size=1, 25 | stride=1, 26 | dilation=1, 27 | transposed=False, 28 | norm_layer=BatchNorm, 29 | conv_layer = spnn.Conv3d, 30 | activate_layer=spnn.ReLU) -> None: 31 | super().__init__() 32 | self.conv = conv_layer(in_channels=in_channels,out_channels=out_channels,kernel_size=kernel_size,stride=stride,dilation=dilation,transposed=transposed) 33 | if norm_layer is not None: 34 | # if get_cfg().rank>=0: 35 | # self.norm = SyncNorm(out_channels) 36 | # else: 37 | # self.norm = norm_layer(out_channels) 38 | self.norm = norm_layer(out_channels) 39 | else: 40 | self.norm = nn.Identity() 41 | if activate_layer is not None: 42 | self.act = activate_layer(True) 43 | else: 44 | self.act = nn.Identity() 45 | 46 | def forward(self,x): 47 | return self.act(self.norm(self.conv(x))) 48 | 49 | 50 | class SwitchModule(nn.Module): 51 | def __init__(self,channels): 52 | super().__init__() 53 | self.conv = spnn.Conv3d(channels,channels*3,kernel_size=1) 54 | 55 | def forward(self,input,x1,x2,x3): 56 | scale = self.conv(input).feats 57 | scale = scale.reshape(scale.shape[0],3,-1) 58 | f2 = torch.stack([x1.feats,x2.feats,x3.feats],dim=1) 59 | f2 = (f2*scale).sum(dim=1) 60 | output = SparseTensor(coords=input.coords, feats=f2, stride=input.stride) 61 | output.cmaps = input.cmaps 62 | output.kmaps = input.kmaps 63 | return output 64 | 65 | class LRPBlock(nn.Module): 66 | def __init__(self,in_channels,out_channels,kernel_size=3) -> None: 67 | super().__init__() 68 | self.in_channels = in_channels 69 | self.out_channels = out_channels 70 | if in_channels!=out_channels: 71 | self.lin = spnn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1, bias=False) 72 | 73 | else: 74 | self.lin = nn.Identity() 75 | self.conv0 = nn.Sequential( 76 | ConvBlock(in_channels,out_channels,kernel_size=kernel_size), 77 | ConvBlock(out_channels,out_channels,kernel_size=kernel_size) 78 | ) 79 | 80 | self.pool1 = Pool(kernel_size=kernel_size) 81 | self.pool2 = Pool(kernel_size=kernel_size,dilation=3) 82 | self.pool3 = Pool(kernel_size=kernel_size,dilation=9) 83 | 84 | self.scale0 = Scale(out_channels,1.) 85 | self.scale1 = Scale(out_channels,1e-2) 86 | p = 0.15 87 | 88 | self.dropout = Dropout(p) 89 | self.switch = SwitchModule(out_channels) 90 | 91 | def forward(self,x): 92 | x0 = self.conv0(x) 93 | x0 = x0+self.lin(x) 94 | 95 | x1 = self.pool1(x0) 96 | x2 = self.pool2(x1) 97 | x3 = self.pool3(x2) 98 | y = self.scale0(x0) + self.dropout(self.scale1(self.switch(x0,x1,x2,x3))) 99 | return y 100 | 101 | 102 | 103 | class Scale(nn.Module): 104 | def __init__(self,channels,layer_scale_init_value=1e-2) -> None: 105 | super().__init__() 106 | self.scale = nn.Parameter(torch.ones(channels)*layer_scale_init_value) 107 | 108 | def forward(self,input): 109 | feats = input.feats 110 | feats = self.scale.unsqueeze(0)*feats 111 | output = SparseTensor(coords=input.coords, feats=feats, stride=input.stride) 112 | output.cmaps = input.cmaps 113 | output.kmaps = input.kmaps 114 | return output 115 | 116 | @MODELS.register_module() 117 | class LRPNet(nn.Module): 118 | def __init__(self, 119 | in_channels=3, 120 | out_channels=20, 121 | encoder_channels=[32,64,96,128,128,128,128], 122 | decoder_channels=[128,128,128,128,128,128,128]): 123 | 124 | super().__init__() 125 | 126 | 127 | assert len(encoder_channels) == len(decoder_channels) and encoder_channels[-1] == decoder_channels[0] 128 | self.depth = len(encoder_channels)-1 129 | 130 | block = LRPBlock 131 | 132 | 133 | self.in_channels = in_channels 134 | 135 | # Unet-like structure 136 | #-------------------------------- Input ---------------------------------------- 137 | self.input_conv = nn.Sequential( 138 | ConvBlock(in_channels,encoder_channels[0],3,1), 139 | block(encoder_channels[0],encoder_channels[0],3) 140 | ) 141 | #-------------------------------- Encoder ---------------------------------------- 142 | self.encoders = nn.ModuleList() 143 | self.downsamples = nn.ModuleList() 144 | for i in range(self.depth): 145 | self.downsamples.append(ConvBlock(encoder_channels[i],encoder_channels[i+1],2,2)) 146 | self.encoders.append(block(encoder_channels[i+1],encoder_channels[i+1],3)) 147 | 148 | self.decoders = nn.ModuleList() 149 | self.upsamples = nn.ModuleList() 150 | for i in range(self.depth): 151 | self.upsamples.append(ConvBlock(decoder_channels[i],decoder_channels[i+1],2,2,transposed=True)) 152 | self.decoders.append(block(decoder_channels[i+1]+encoder_channels[self.depth-i-1],decoder_channels[i+1],3)) 153 | 154 | # Linear head 155 | self.pred = spnn.Conv3d(decoder_channels[-1], out_channels, kernel_size=1, stride=1, bias=True) 156 | 157 | 158 | 159 | def forward(self, features, indices,vertices): 160 | if self.in_channels==6: 161 | features = torch.cat([features,vertices],dim=1) 162 | # input 163 | indices = indices.int() 164 | x = SparseTensor(features, indices) 165 | 166 | en0 = self.input_conv(x) 167 | 168 | en_tensors = [en0] 169 | for i in range(self.depth): 170 | x = self.downsamples[i](en_tensors[-1]) 171 | x = self.encoders[i](x) 172 | if i 2 | #include 3 | #include 4 | 5 | #include "scatter/scatter.h" 6 | 7 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 8 | m.def("scatter_max_cuda", &scatter_max_cuda, "scatter_max_cuda"); 9 | m.def("scatter_backward_cuda", &scatter_backward_cuda, "scatter_backward_cuda"); 10 | } 11 | -------------------------------------------------------------------------------- /voxelnet/modules/csrc/scatter/atomics.cuh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define ATOMIC(NAME) \ 4 | template struct Atomic##NAME##IntegerImpl; \ 5 | \ 6 | template struct Atomic##NAME##IntegerImpl { \ 7 | inline __device__ void operator()(scalar *address, scalar val) { \ 8 | uint32_t *address_as_ui = (uint32_t *)(address - ((size_t)address & 3)); \ 9 | uint32_t old = *address_as_ui; \ 10 | uint32_t shift = ((size_t)address & 3) * 8; \ 11 | uint32_t sum; \ 12 | uint32_t assumed; \ 13 | \ 14 | do { \ 15 | assumed = old; \ 16 | sum = OP(val, scalar((old >> shift) & 0xff)); \ 17 | old = (old & ~(0x000000ff << shift)) | (sum << shift); \ 18 | old = atomicCAS(address_as_ui, assumed, old); \ 19 | } while (assumed != old); \ 20 | } \ 21 | }; \ 22 | \ 23 | template struct Atomic##NAME##IntegerImpl { \ 24 | inline __device__ void operator()(scalar *address, scalar val) { \ 25 | uint32_t *address_as_ui = \ 26 | (uint32_t *)((char *)address - ((size_t)address & 2)); \ 27 | uint32_t old = *address_as_ui; \ 28 | uint32_t sum; \ 29 | uint32_t newval; \ 30 | uint32_t assumed; \ 31 | \ 32 | do { \ 33 | assumed = old; \ 34 | sum = OP(val, (size_t)address & 2 ? scalar(old >> 16) \ 35 | : scalar(old & 0xffff)); \ 36 | newval = (size_t)address & 2 ? (old & 0xffff) | (sum << 16) \ 37 | : (old & 0xffff0000) | sum; \ 38 | old = atomicCAS(address_as_ui, assumed, newval); \ 39 | } while (assumed != old); \ 40 | } \ 41 | }; \ 42 | \ 43 | template struct Atomic##NAME##IntegerImpl { \ 44 | inline __device__ void operator()(scalar *address, scalar val) { \ 45 | uint32_t *address_as_ui = (uint32_t *)address; \ 46 | uint32_t old = *address_as_ui; \ 47 | uint32_t assumed; \ 48 | \ 49 | do { \ 50 | assumed = old; \ 51 | old = atomicCAS(address_as_ui, assumed, OP(val, (scalar)old)); \ 52 | } while (assumed != old); \ 53 | } \ 54 | }; \ 55 | \ 56 | template struct Atomic##NAME##IntegerImpl { \ 57 | inline __device__ void operator()(scalar *address, scalar val) { \ 58 | unsigned long long *address_as_ull = (unsigned long long *)address; \ 59 | unsigned long long old = *address_as_ull; \ 60 | unsigned long long assumed; \ 61 | \ 62 | do { \ 63 | assumed = old; \ 64 | old = atomicCAS(address_as_ull, assumed, OP(val, (scalar)old)); \ 65 | } while (assumed != old); \ 66 | } \ 67 | }; \ 68 | \ 69 | template struct Atomic##NAME##DecimalImpl; \ 70 | \ 71 | template <> struct Atomic##NAME##DecimalImpl { \ 72 | inline __device__ void operator()(at::Half *address, at::Half val) { \ 73 | unsigned int *address_as_ui = \ 74 | (unsigned int *)((char *)address - ((size_t)address & 2)); \ 75 | unsigned int old = *address_as_ui; \ 76 | unsigned int assumed; \ 77 | \ 78 | do { \ 79 | assumed = old; \ 80 | at::Half hsum; \ 81 | hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); \ 82 | hsum = OP(hsum, val); \ 83 | old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) \ 84 | : (old & 0xffff0000) | hsum.x; \ 85 | old = atomicCAS(address_as_ui, assumed, old); \ 86 | } while (assumed != old); \ 87 | } \ 88 | }; \ 89 | \ 90 | template <> struct Atomic##NAME##DecimalImpl { \ 91 | inline __device__ void operator()(at::BFloat16 *address, at::BFloat16 val){\ 92 | unsigned int *address_as_ui = \ 93 | (unsigned int *)((char *)address - ((size_t)address & 2)); \ 94 | unsigned int old = *address_as_ui; \ 95 | unsigned int assumed; \ 96 | \ 97 | do { \ 98 | assumed = old; \ 99 | at::BFloat16 hsum; \ 100 | hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); \ 101 | hsum = OP(hsum, val); \ 102 | old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) \ 103 | : (old & 0xffff0000) | hsum.x; \ 104 | old = atomicCAS(address_as_ui, assumed, old); \ 105 | } while (assumed != old); \ 106 | } \ 107 | }; \ 108 | \ 109 | template struct Atomic##NAME##DecimalImpl { \ 110 | inline __device__ void operator()(scalar *address, scalar val) { \ 111 | int *address_as_i = (int *)address; \ 112 | int old = *address_as_i; \ 113 | int assumed; \ 114 | \ 115 | do { \ 116 | assumed = old; \ 117 | old = atomicCAS(address_as_i, assumed, \ 118 | __float_as_int(OP(val, __int_as_float(assumed)))); \ 119 | } while (assumed != old); \ 120 | } \ 121 | }; \ 122 | \ 123 | template struct Atomic##NAME##DecimalImpl { \ 124 | inline __device__ void operator()(scalar *address, scalar val) { \ 125 | unsigned long long int *address_as_ull = \ 126 | (unsigned long long int *)address; \ 127 | unsigned long long int old = *address_as_ull; \ 128 | unsigned long long int assumed; \ 129 | \ 130 | do { \ 131 | assumed = old; \ 132 | old = atomicCAS( \ 133 | address_as_ull, assumed, \ 134 | __double_as_longlong(OP(val, __longlong_as_double(assumed)))); \ 135 | } while (assumed != old); \ 136 | } \ 137 | }; 138 | 139 | #define OP(X, Y) Y + X 140 | ATOMIC(Add) 141 | #undef OP 142 | static inline __device__ void atomAdd(uint8_t *address, uint8_t val) { 143 | AtomicAddIntegerImpl()(address, val); 144 | } 145 | static inline __device__ void atomAdd(int8_t *address, int8_t val) { 146 | AtomicAddIntegerImpl()(address, val); 147 | } 148 | static inline __device__ void atomAdd(int16_t *address, int16_t val) { 149 | AtomicAddIntegerImpl()(address, val); 150 | } 151 | static inline __device__ void atomAdd(int32_t *address, int32_t val) { 152 | atomicAdd(address, val); 153 | } 154 | static inline __device__ void atomAdd(int64_t *address, int64_t val) { 155 | AtomicAddIntegerImpl()(address, val); 156 | } 157 | #if defined(USE_ROCM) || (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700 || CUDA_VERSION < 10000)) 158 | static inline __device__ void atomAdd(at::Half *address, at::Half val) { 159 | AtomicAddDecimalImpl()(address, val); 160 | } 161 | #else 162 | static inline __device__ void atomAdd(at::Half *address, at::Half val) { 163 | atomicAdd(reinterpret_cast<__half *>(address), val); 164 | } 165 | #endif 166 | static inline __device__ void atomAdd(float *address, float val) { 167 | atomicAdd(address, val); 168 | } 169 | #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600 || CUDA_VERSION < 8000) 170 | static inline __device__ void atomAdd(double *address, double val) { 171 | AtomicAddDecimalImpl()(address, val); 172 | } 173 | #else 174 | static inline __device__ void atomAdd(double *address, double val) { 175 | atomicAdd(address, val); 176 | } 177 | #endif 178 | static inline __device__ void atomAdd(at::BFloat16 *address, at::BFloat16 val) { 179 | AtomicAddDecimalImpl()(address, val); 180 | } 181 | 182 | #define OP(X, Y) Y *X 183 | ATOMIC(Mul) 184 | #undef OP 185 | static inline __device__ void atomMul(uint8_t *address, uint8_t val) { 186 | AtomicMulIntegerImpl()(address, val); 187 | } 188 | static inline __device__ void atomMul(int8_t *address, int8_t val) { 189 | AtomicMulIntegerImpl()(address, val); 190 | } 191 | static inline __device__ void atomMul(int16_t *address, int16_t val) { 192 | AtomicMulIntegerImpl()(address, val); 193 | } 194 | static inline __device__ void atomMul(int32_t *address, int32_t val) { 195 | AtomicMulIntegerImpl()(address, val); 196 | } 197 | static inline __device__ void atomMul(int64_t *address, int64_t val) { 198 | AtomicMulIntegerImpl()(address, val); 199 | } 200 | static inline __device__ void atomMul(float *address, float val) { 201 | AtomicMulDecimalImpl()(address, val); 202 | } 203 | static inline __device__ void atomMul(at::Half *address, at::Half val) { 204 | AtomicMulDecimalImpl()(address, val); 205 | } 206 | static inline __device__ void atomMul(double *address, double val) { 207 | AtomicMulDecimalImpl()(address, val); 208 | } 209 | static inline __device__ void atomMul(at::BFloat16 *address, at::BFloat16 val) { 210 | AtomicMulDecimalImpl()(address, val); 211 | } 212 | 213 | #define OP(X, Y) Y / X 214 | ATOMIC(Div) 215 | #undef OP 216 | static inline __device__ void atomDiv(uint8_t *address, uint8_t val) { 217 | AtomicDivIntegerImpl()(address, val); 218 | } 219 | static inline __device__ void atomDiv(int8_t *address, int8_t val) { 220 | AtomicDivIntegerImpl()(address, val); 221 | } 222 | static inline __device__ void atomDiv(int16_t *address, int16_t val) { 223 | AtomicDivIntegerImpl()(address, val); 224 | } 225 | static inline __device__ void atomDiv(int32_t *address, int32_t val) { 226 | AtomicDivIntegerImpl()(address, val); 227 | } 228 | static inline __device__ void atomDiv(int64_t *address, int64_t val) { 229 | AtomicDivIntegerImpl()(address, val); 230 | } 231 | static inline __device__ void atomDiv(at::Half *address, at::Half val) { 232 | AtomicDivDecimalImpl()(address, val); 233 | } 234 | static inline __device__ void atomDiv(float *address, float val) { 235 | AtomicDivDecimalImpl()(address, val); 236 | } 237 | static inline __device__ void atomDiv(double *address, double val) { 238 | AtomicDivDecimalImpl()(address, val); 239 | } 240 | static inline __device__ void atomDiv(at::BFloat16 *address, at::BFloat16 val) { 241 | AtomicDivDecimalImpl()(address, val); 242 | } 243 | 244 | #define OP(X, Y) max(Y, X) 245 | ATOMIC(Max) 246 | #undef OP 247 | static inline __device__ void atomMax(uint8_t *address, uint8_t val) { 248 | AtomicMaxIntegerImpl()(address, val); 249 | } 250 | static inline __device__ void atomMax(int8_t *address, int8_t val) { 251 | AtomicMaxIntegerImpl()(address, val); 252 | } 253 | static inline __device__ void atomMax(int16_t *address, int16_t val) { 254 | AtomicMaxIntegerImpl()(address, val); 255 | } 256 | static inline __device__ void atomMax(int32_t *address, int32_t val) { 257 | atomicMax(address, val); 258 | } 259 | static inline __device__ void atomMax(int64_t *address, int64_t val) { 260 | AtomicMaxIntegerImpl()(address, val); 261 | } 262 | static inline __device__ void atomMax(at::Half *address, at::Half val) { 263 | AtomicMaxDecimalImpl()(address, val); 264 | } 265 | static inline __device__ void atomMax(float *address, float val) { 266 | AtomicMaxDecimalImpl()(address, val); 267 | } 268 | static inline __device__ void atomMax(double *address, double val) { 269 | AtomicMaxDecimalImpl()(address, val); 270 | } 271 | static inline __device__ void atomMax(at::BFloat16 *address, at::BFloat16 val) { 272 | AtomicMaxDecimalImpl()(address, val); 273 | } 274 | 275 | #define OP(X, Y) min(Y, X) 276 | ATOMIC(Min) 277 | #undef OP 278 | static inline __device__ void atomMin(uint8_t *address, uint8_t val) { 279 | AtomicMinIntegerImpl()(address, val); 280 | } 281 | static inline __device__ void atomMin(int8_t *address, int8_t val) { 282 | AtomicMinIntegerImpl()(address, val); 283 | } 284 | static inline __device__ void atomMin(int16_t *address, int16_t val) { 285 | AtomicMinIntegerImpl()(address, val); 286 | } 287 | static inline __device__ void atomMin(int32_t *address, int32_t val) { 288 | atomicMin(address, val); 289 | } 290 | static inline __device__ void atomMin(int64_t *address, int64_t val) { 291 | AtomicMinIntegerImpl()(address, val); 292 | } 293 | static inline __device__ void atomMin(at::Half *address, at::Half val) { 294 | AtomicMinDecimalImpl()(address, val); 295 | } 296 | static inline __device__ void atomMin(float *address, float val) { 297 | AtomicMinDecimalImpl()(address, val); 298 | } 299 | static inline __device__ void atomMin(double *address, double val) { 300 | AtomicMinDecimalImpl()(address, val); 301 | } 302 | static inline __device__ void atomMin(at::BFloat16 *address, at::BFloat16 val) { 303 | AtomicMinDecimalImpl()(address, val); 304 | } 305 | -------------------------------------------------------------------------------- /voxelnet/modules/csrc/scatter/scatter.cu: -------------------------------------------------------------------------------- 1 | 2 | #include "scatter.h" 3 | #include "atomics.cuh" 4 | 5 | #define THREADS 1024 6 | #define BLOCKS(N) (N + THREADS - 1) / THREADS 7 | 8 | template 9 | __global__ void scatter_kernel(const scalar_t *src_data, 10 | const int64_t *indices_data, 11 | scalar_t *out_data, 12 | int64_t N, int64_t M, int64_t C) { 13 | 14 | int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; 15 | 16 | int m = thread_idx / C; 17 | int c = thread_idx % C; 18 | if(m < M){ 19 | int n1 = indices_data[m*2]; 20 | int n2 = indices_data[m*2+1]; 21 | atomMax(out_data+n2*C+c, src_data[n1*C+c]); 22 | } 23 | } 24 | 25 | template 26 | __global__ void scatter_arg_kernel(const scalar_t *src_data, 27 | const int64_t *indices_data, 28 | const scalar_t *out_data, int64_t *arg_out_data, 29 | int N, int M, int C) { 30 | 31 | int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; 32 | 33 | int m = thread_idx / C; 34 | int c = thread_idx % C; 35 | if(m < M){ 36 | int n1 = indices_data[m*2]; 37 | int n2 = indices_data[m*2+1]; 38 | if (src_data[n1*C+c] == out_data[n2*C+c]) { 39 | arg_out_data[n2*C+c] = n1; 40 | } 41 | } 42 | } 43 | 44 | template 45 | __global__ void scatter_back_kernel(const scalar_t *src_data, 46 | const int64_t *indices_data, 47 | scalar_t *out_data, 48 | int64_t N, int64_t C) { 49 | 50 | int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; 51 | 52 | int n = thread_idx / C; 53 | int c = thread_idx % C; 54 | if(n < N){ 55 | int n1 = indices_data[n*C+c]; 56 | if (n1>=0){ 57 | atomAdd(out_data + n1*C+c, src_data[n*C+c]); 58 | } 59 | } 60 | } 61 | 62 | std::tuple 63 | scatter_max_cuda(torch::Tensor src, torch::Tensor index) { 64 | src = src.contiguous(); 65 | auto N = src.size(0); 66 | auto C = src.size(1); 67 | auto M = index.size(0); 68 | auto O = M*C; 69 | 70 | 71 | torch::Tensor out = torch::empty({N, C}, src.options()); 72 | torch::Tensor arg_out = torch::full({N, C},-1, index.options()); 73 | 74 | AT_DISPATCH_FLOATING_TYPES_AND_HALF( 75 | src.type(), "scatter_max_cuda", ([&] { 76 | auto src_data = src.data_ptr(); 77 | auto out_data = out.data_ptr(); 78 | auto indices_data = index.data_ptr(); 79 | auto arg_out_data = arg_out.data_ptr(); 80 | 81 | out.fill_(std::numeric_limits::lowest()); 82 | 83 | scatter_kernel<<>>(src_data,indices_data, out_data, N,M,C); 84 | 85 | out.masked_fill_(out == std::numeric_limits::lowest(),(scalar_t)0); 86 | 87 | scatter_arg_kernel<<>>( 88 | src_data, indices_data, out_data, arg_out_data,N,M,C); 89 | })); 90 | 91 | return std::make_tuple(out, arg_out); 92 | } 93 | 94 | torch::Tensor scatter_backward_cuda(torch::Tensor src, torch::Tensor index) { 95 | src = src.contiguous(); 96 | index = index.contiguous(); 97 | auto N = src.size(0); 98 | auto C = src.size(1); 99 | auto O = N*C; 100 | 101 | 102 | torch::Tensor out = torch::zeros({N, C}, src.options()); 103 | 104 | AT_DISPATCH_FLOATING_TYPES_AND_HALF( 105 | src.type(), "scatter_backward_cuda", ([&] { 106 | auto src_data = src.data_ptr(); 107 | auto out_data = out.data_ptr(); 108 | auto indices_data = index.data_ptr(); 109 | scatter_back_kernel<<>>(src_data,indices_data, out_data, N,C); 110 | })); 111 | 112 | return out; 113 | } -------------------------------------------------------------------------------- /voxelnet/modules/csrc/scatter/scatter.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | std::tuple 5 | scatter_max_cuda(torch::Tensor src, torch::Tensor index); 6 | torch::Tensor 7 | scatter_backward_cuda(torch::Tensor src, torch::Tensor index); -------------------------------------------------------------------------------- /voxelnet/modules/functional.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import torch 4 | 5 | from torchsparse import SparseTensor 6 | 7 | def unique(x,sorted=True, return_inverse=False, return_counts=False,return_index=False, dim=None): 8 | if return_index: 9 | r_in = True 10 | else: 11 | r_in = return_inverse 12 | rets = torch.unique(x, sorted=sorted, return_inverse=r_in, return_counts=return_counts, dim=dim) 13 | rets = list(rets) 14 | if return_index: 15 | inverse = rets[1] 16 | perm = torch.arange(inverse.size(0), dtype=inverse.dtype, device=inverse.device) 17 | inverse, perm = inverse.flip([0]), perm.flip([0]) 18 | perm = inverse.new_empty(rets[0].size(0)).scatter_(0, inverse, perm) 19 | rets.insert(1,perm) 20 | if not return_inverse: 21 | del rets[2] 22 | return rets 23 | 24 | 25 | def fapply(input: SparseTensor, fn: Callable[..., torch.Tensor], *args, 26 | **kwargs) -> SparseTensor: 27 | feats = fn(input.feats, *args, **kwargs) 28 | output = SparseTensor(coords=input.coords, feats=feats, stride=input.stride) 29 | output.cmaps = input.cmaps 30 | output.kmaps = input.kmaps 31 | return output -------------------------------------------------------------------------------- /voxelnet/modules/load.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | from torch.utils.cpp_extension import load as load_cpp_ext 4 | 5 | abs_path = os.path.split(os.path.abspath(__file__))[0] 6 | def search_sources(dpath): 7 | path = os.path.join(dpath,"csrc") 8 | sources = list(glob.glob(path+"/*/*.cpp"))+list(glob.glob(path+"/*/*.cu")) 9 | sources += list(glob.glob(path+"/*.cpp")) 10 | return sources 11 | 12 | voxel_module = load_cpp_ext('voxel_module', sources=search_sources(abs_path),verbose=True) 13 | -------------------------------------------------------------------------------- /voxelnet/modules/pool.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple, Union 2 | 3 | import torch 4 | from torch import nn 5 | import numpy as np 6 | 7 | from torchsparse import SparseTensor 8 | from torchsparse.nn import functional as F 9 | from torchsparse.utils import make_ntuple 10 | 11 | from voxelnet.modules.scatter import voxel_scatter_max 12 | 13 | 14 | def get_kernel_offsets(size: Union[int, Tuple[int, ...]], 15 | stride: Union[int, Tuple[int, ...]] = 1, 16 | dilation: Union[int, Tuple[int, ...]] = 1, 17 | device: str = 'cpu') -> torch.Tensor: 18 | size = make_ntuple(size, ndim=3) 19 | stride = make_ntuple(stride, ndim=3) 20 | dilation = make_ntuple(dilation, ndim=3) 21 | 22 | offsets = [(np.arange(-size[k] // 2 + 1, size[k] // 2 + 1) * stride[k] 23 | * dilation[k]) for k in range(3)] 24 | 25 | # This condition check is only to make sure that our weight layout is 26 | # compatible with `MinkowskiEngine`. 27 | if np.prod(size) % 2 == 1: 28 | offsets = [[x, y, z] for z in offsets[2] for y in offsets[1] 29 | for x in offsets[0]] 30 | else: 31 | offsets = [[x, y, z] for x in offsets[0] for y in offsets[1] 32 | for z in offsets[2]] 33 | 34 | offsets = torch.tensor(offsets, dtype=torch.int, device=device) 35 | return offsets 36 | 37 | 38 | def pool(input: SparseTensor, 39 | kernel_size: Union[int, Tuple[int, ...]], 40 | stride: Union[int, Tuple[int, ...]] = 1, 41 | dilation: Union[int, Tuple[int, ...]] = 1) -> SparseTensor: 42 | feats, coords = input.feats, input.coords 43 | 44 | kernel_size = make_ntuple(kernel_size, ndim=3) 45 | stride = make_ntuple(stride, ndim=3) 46 | dilation = make_ntuple(dilation, ndim=3) 47 | 48 | if (kernel_size == (1, 1, 1) and stride == (1, 1, 1) 49 | and dilation == (1, 1, 1)): 50 | output = SparseTensor(coords=coords, feats=feats, stride=input.stride) 51 | else: 52 | kmap = input.kmaps.get((input.stride, kernel_size, stride, dilation)) 53 | if kmap is None: 54 | offsets = get_kernel_offsets(kernel_size, 55 | dilation=dilation, 56 | stride=input.stride, 57 | device=feats.device) 58 | 59 | references = F.sphash(coords) 60 | if any(s > 1 for s in stride): 61 | coords = F.spdownsample(coords, stride, kernel_size, 62 | input.stride) 63 | queries = F.sphash(coords, offsets) 64 | results = F.sphashquery(queries, references) #[K,N] 65 | 66 | nbsizes = torch.sum(results != -1, dim=1) 67 | nbmaps = torch.nonzero(results != -1) 68 | nbmaps[:, 0] = results.view(-1)[nbmaps[:, 0] * results.size(1) 69 | + nbmaps[:, 1]] 70 | 71 | 72 | kmap = [nbmaps, nbsizes, (feats.shape[0], coords.shape[0])] 73 | input.kmaps[(input.stride, kernel_size, stride, dilation)] = kmap 74 | 75 | nbmaps = kmap[0] 76 | feats = voxel_scatter_max(feats,nbmaps) 77 | output = SparseTensor( 78 | coords=coords, 79 | feats=feats, 80 | stride=tuple(input.stride[k] * stride[k] for k in range(3))) 81 | output.cmaps = input.cmaps 82 | output.cmaps.setdefault(output.stride, output.coords) 83 | output.kmaps = input.kmaps 84 | return output 85 | 86 | class Pool(nn.Module): 87 | 88 | def __init__(self, 89 | kernel_size: Union[int, Tuple[int, ...]] = 3, 90 | stride: Union[int, Tuple[int, ...]] = 1, 91 | dilation: int = 1) -> None: 92 | super().__init__() 93 | self.kernel_size = make_ntuple(kernel_size, ndim=3) 94 | self.stride = make_ntuple(stride, ndim=3) 95 | self.dilation = dilation 96 | 97 | def forward(self, input: SparseTensor) -> SparseTensor: 98 | return pool(input, 99 | kernel_size=self.kernel_size, 100 | stride=self.stride, 101 | dilation=self.dilation) 102 | -------------------------------------------------------------------------------- /voxelnet/modules/scatter.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Function 5 | from voxelnet.modules.load import voxel_module 6 | 7 | class ScatterFunction(Function): 8 | 9 | @staticmethod 10 | def forward(ctx, 11 | input: torch.Tensor, 12 | index: torch.Tensor) -> torch.Tensor: 13 | input = input.contiguous() 14 | index = index.contiguous() 15 | 16 | output,grad_indices = voxel_module.scatter_max_cuda(input,index) 17 | 18 | ctx.for_backwards = (grad_indices,) 19 | return output 20 | 21 | @staticmethod 22 | def backward(ctx, grad_output: torch.Tensor): 23 | indices, = ctx.for_backwards 24 | grad_input = voxel_module.scatter_backward_cuda(grad_output,indices) 25 | 26 | return grad_input, None 27 | 28 | voxel_scatter_max = ScatterFunction.apply 29 | 30 | 31 | -------------------------------------------------------------------------------- /voxelnet/optims/__init__.py: -------------------------------------------------------------------------------- 1 | from . import lr_scheduler 2 | from . import optimizer 3 | -------------------------------------------------------------------------------- /voxelnet/optims/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | 3 | from voxelnet.utils.registry import SCHEDULERS 4 | 5 | @SCHEDULERS.register_module() 6 | class LambdaStepLR(optim.lr_scheduler.LambdaLR): 7 | 8 | def __init__(self, optimizer, lr_lambda, last_step=-1): 9 | super(LambdaStepLR, self).__init__(optimizer, lr_lambda, last_step) 10 | 11 | @property 12 | def last_step(self): 13 | """Use last_epoch for the step counter""" 14 | return self.last_epoch 15 | 16 | @last_step.setter 17 | def last_step(self, v): 18 | self.last_epoch = v 19 | 20 | 21 | @SCHEDULERS.register_module() 22 | class PolyLR(LambdaStepLR): 23 | """DeepLab learning rate policy""" 24 | 25 | def __init__(self, optimizer, max_iter, power=0.9, last_step=-1): 26 | super(PolyLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**power, last_step) 27 | 28 | -------------------------------------------------------------------------------- /voxelnet/optims/optimizer.py: -------------------------------------------------------------------------------- 1 | 2 | from voxelnet import optims 3 | 4 | import torch.optim as optim 5 | 6 | from voxelnet.utils.registry import OPTIMS 7 | 8 | @OPTIMS.register_module() 9 | class SGD(optim.SGD): 10 | def __init__(self, params, lr=1e-1, momentum=0.9, dampening=0.1, 11 | weight_decay=1e-4, nesterov=False): 12 | super().__init__(params,lr,momentum,dampening,weight_decay,nesterov) -------------------------------------------------------------------------------- /voxelnet/runner/__init__.py: -------------------------------------------------------------------------------- 1 | from .runner import Runner -------------------------------------------------------------------------------- /voxelnet/runner/runner.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import torch.nn.functional as F 4 | import time 5 | import numpy as np 6 | from torch.nn.parallel import DistributedDataParallel as DDP 7 | 8 | from voxelnet.utils.general import check_file,clean, current_time, init_seeds,search_ckpt,build_file 9 | from voxelnet.utils.config import get_cfg,save_cfg 10 | from voxelnet.utils.registry import MODELS,SCHEDULERS,OPTIMS,DATASETS,HOOKS,build_from_cfg 11 | from voxelnet.utils.metrics import confusion_matrix,get_iou,CLASS_LABELS 12 | 13 | 14 | class Runner: 15 | def __init__(self): 16 | cfg = get_cfg() 17 | self.cfg = cfg 18 | 19 | self.work_dir = cfg.work_dir 20 | if cfg.clean and cfg.rank<=0: 21 | clean(self.work_dir) 22 | 23 | self.checkpoint_interval = cfg.checkpoint_interval 24 | self.eval_interval = cfg.eval_interval 25 | self.log_interval = cfg.log_interval 26 | self.resume_path = cfg.resume_path 27 | self.pretrain_path = cfg.pretrain_path 28 | 29 | self.model = build_from_cfg(cfg.model,MODELS).cuda() 30 | if self.cfg.rank>=0: 31 | self.model = DDP(self.model) 32 | 33 | 34 | if self.cfg.rank <= 0: 35 | self.val_dataset = build_from_cfg(cfg.dataset.val,DATASETS) 36 | self.val_data_loader = self.val_dataset.infer_data_loader() 37 | 38 | self.logger = build_from_cfg(self.cfg.logger,HOOKS,work_dir=self.work_dir,rank=self.cfg.rank) 39 | self.logger.print('Model Parameters: '+str(sum([x.nelement() for x in self.model.parameters()]))) 40 | if self.cfg.rank <= 0: 41 | save_file = build_file(self.work_dir,prefix="config.yaml") 42 | save_cfg(save_file) 43 | self.logger.print(f"Save config to {save_file}") 44 | 45 | self.epoch = 0 46 | self.iter = 0 47 | 48 | assert cfg.max_epoch is not None or cfg.max_iter is not None,"Must set max epoch or max iter in config" 49 | self.max_epoch = cfg.max_epoch 50 | self.max_iter = cfg.max_iter 51 | 52 | self.start_time = -1 53 | 54 | if check_file(self.pretrain_path): 55 | self.load(self.pretrain_path,model_only=True) 56 | 57 | if self.resume_path is None and not cfg.clean: 58 | self.resume_path = search_ckpt(self.work_dir) 59 | if check_file(self.resume_path): 60 | self.resume() 61 | 62 | 63 | @torch.no_grad() 64 | def val(self,): 65 | self.logger.print("Validating") 66 | self.model.eval() 67 | val_reps = 8 if self.epoch == self.max_epoch else 1 68 | if self.cfg.val_reps is not None: 69 | val_reps = self.cfg.val_reps 70 | 71 | size = 20 72 | 73 | store = torch.zeros(self.val_dataset.infer_offsets[-1], size).cuda() 74 | start = time.time() 75 | for rep in range(val_reps): 76 | for i, batch in enumerate(self.val_data_loader): 77 | 78 | batch = self.val_dataset.inferAfter(batch) 79 | # Forward 80 | out_euc = self.model(batch['colors_v_b'], batch['coords_v_b'],batch['vertices_v_b']) 81 | predictions = out_euc[batch['reindex_v_b']] 82 | store.index_add_(0, batch['point_ids'], predictions) 83 | self.logger.print(f'Val Rep: {rep}, time: {time.time()-start} s') 84 | self.evaluate(store.max(1)[1], self.val_dataset.infer_labels) 85 | 86 | @torch.no_grad() 87 | def test(self,): 88 | self.logger.print("Testing") 89 | self.model.eval() 90 | val_reps = self.cfg.val_reps if self.cfg.val_reps is not None else 1 91 | 92 | store = torch.zeros(self.val_dataset.infer_offsets[-1], 20).cuda() 93 | start = time.time() 94 | for rep in range(val_reps): 95 | for i, batch in enumerate(self.val_data_loader): 96 | 97 | batch = self.val_dataset.inferAfter(batch) 98 | # Forward 99 | out_euc = self.model(batch['colors_v_b'], batch['coords_v_b'],batch['vertices_v_b']) 100 | predictions = out_euc[batch['reindex_v_b']] 101 | 102 | store.index_add_(0, batch['point_ids'], predictions) 103 | self.logger.print(f'Test Rep: {rep}, time: {time.time()-start} s') 104 | 105 | 106 | inverse_mapper = np.array([1,2,3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39]) 107 | 108 | predictions = store.max(1)[1].cpu().numpy() 109 | 110 | save_dir = os.path.join(self.work_dir, 'test_results') 111 | os.makedirs(save_dir, exist_ok=True) 112 | 113 | for idx, test_file in enumerate(self.val_dataset.infer_files): 114 | 115 | pred = predictions[self.val_dataset.infer_offsets[idx] : self.val_dataset.infer_offsets[idx + 1]] 116 | 117 | ori_pred = np.array([inverse_mapper[i] for i in pred]) 118 | ori_pred = ori_pred.astype(np.int32) 119 | 120 | test_name = os.path.join(save_dir, test_file[-15:-3] + '.txt') 121 | 122 | np.savetxt(test_name, ori_pred, fmt='%d', encoding='utf8') 123 | 124 | def evaluate(self,pred_ids, gt_ids): 125 | class_labels = CLASS_LABELS 126 | N_classes = len(class_labels) 127 | 128 | self.logger.print(f'Evaluating {gt_ids.shape[0]} points...') 129 | assert pred_ids.shape == gt_ids.shape, (pred_ids.shape, gt_ids.shape) 130 | idxs = (gt_ids >= 0) 131 | 132 | # confusion = np.bincount(pred_ids[idxs] * 20 + gt_ids[idxs], minlength=400).reshape((20, 20)) 133 | confusion = torch.bincount(pred_ids[idxs] * N_classes + gt_ids[idxs], minlength=N_classes*N_classes).reshape((N_classes, N_classes)).cpu().numpy() 134 | confusion = confusion.astype(np.ulonglong) 135 | 136 | # confusion = confusion_matrix(pred_ids, gt_ids) 137 | self.logger.print("calculate ious") 138 | class_ious = {} 139 | mean_iou = 0 140 | 141 | 142 | 143 | for i in range(N_classes): 144 | label_name = class_labels[i] 145 | class_ious[label_name] = get_iou(i, confusion) 146 | mean_iou += class_ious[label_name][0] / N_classes 147 | 148 | self.logger.print('classes IoU') 149 | self.logger.print('----------------------------') 150 | class_ious_data = {} 151 | accs = [] 152 | for i in range(N_classes): 153 | label_name = class_labels[i] 154 | self.logger.print('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2])) 155 | accs.append(class_ious[label_name][3]) 156 | 157 | class_ious_data[f'val_class_ious/{label_name}']=class_ious[label_name][0] 158 | 159 | class_ious_data['val/miou']=mean_iou 160 | class_ious_data['val/macc'] = np.mean(accs) 161 | class_ious_data['val/oacc'] = np.sum(np.diag(confusion)) / np.sum(confusion) 162 | self.logger.log(class_ious_data,global_step=self.iter) 163 | 164 | def save(self,name): 165 | save_data = { 166 | "meta":{ 167 | "epoch": self.epoch, 168 | "iter": self.iter, 169 | "max_epoch": self.max_epoch, 170 | "save_time":current_time(), 171 | }, 172 | "model":self.model.state_dict(), 173 | } 174 | 175 | save_file = build_file(self.work_dir,prefix=f"checkpoints/ckpt_{name}.pt") 176 | torch.save(save_data,save_file) 177 | self.logger.print(f"Save checkpoint to {save_file}") 178 | 179 | def load(self, load_path, model_only=False): 180 | resume_data = torch.load(load_path) 181 | 182 | if (not model_only): 183 | meta = resume_data.get("meta",dict()) 184 | self.epoch = meta.get("epoch",self.epoch) 185 | self.iter = meta.get("iter",self.iter) 186 | self.max_epoch = meta.get("max_epoch",self.max_epoch) 187 | if ("model" in resume_data): 188 | state_dict = resume_data["model"] 189 | for key in list(state_dict.keys()): 190 | if key.startswith("module."): 191 | new_key = key.replace("module.", "") 192 | state_dict[new_key] = state_dict[key] 193 | del state_dict[key] 194 | self.model.load_state_dict(state_dict) 195 | elif ("state_dict" in resume_data): 196 | state_dict = resume_data["state_dict"] 197 | for key in list(state_dict.keys()): 198 | if key.startswith("module."): 199 | new_key = key.replace("module.", "") 200 | state_dict[new_key] = state_dict[key] 201 | del state_dict[key] 202 | self.model.load_state_dict(state_dict) 203 | else: 204 | self.model.load_state_dict(resume_data) 205 | self.logger.print(f"Loading model parameters from {load_path}") 206 | 207 | def resume(self): 208 | self.load(self.resume_path) -------------------------------------------------------------------------------- /voxelnet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from . import logger -------------------------------------------------------------------------------- /voxelnet/utils/config.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from voxelnet.utils.general import check_file 3 | import os 4 | import yaml 5 | import copy 6 | from importlib import import_module 7 | import sys 8 | import inspect 9 | 10 | __all__ = ["get_cfg","init_cfg","save_cfg","print_cfg"] 11 | BASE_KEY = "_base_" 12 | COVER_KEY = "_cover_" 13 | IGNORE_KEY = "_ignore_" 14 | 15 | class Config(OrderedDict): 16 | def __init__(self, *args): 17 | super().__init__() 18 | if len(args)==1: 19 | self.load_from_file(args[0]) 20 | else: 21 | assert(len(args)==0) 22 | 23 | def __getattr__(self, name): 24 | if name in self: 25 | return self[name] 26 | return None 27 | 28 | def __setattr__(self, name, value): 29 | self[name] = value 30 | 31 | # Load config file to a Dict without considering BASE_KEY 32 | @staticmethod 33 | def _load_dict_from_file_no_base(filename): 34 | if check_file(filename,ext=[".yaml"]): 35 | with open(filename, "r") as f: 36 | cfg = yaml.safe_load(f.read()) 37 | elif check_file(filename,ext=[".py"]): 38 | f_dir = os.path.dirname(filename) 39 | f_name = os.path.basename(filename) 40 | module_name = f_name[:-3] 41 | 42 | # temp_module_name = osp.splitext(temp_config_name)[0] 43 | sys.path.insert(0, f_dir) 44 | # Config._validate_py_syntax(filename) 45 | mod = import_module(module_name) 46 | sys.path.pop(0) 47 | cfg = { 48 | name: value 49 | for name, value in mod.__dict__.items() 50 | if not name.startswith('__') 51 | } 52 | # delete imported module 53 | del sys.modules[module_name] 54 | else: 55 | assert(False), "unsupported config type." 56 | return cfg 57 | 58 | # Load config file to a Dict with considering BASE_KEY 59 | @staticmethod 60 | def _load_dict_from_file(filename): 61 | cfg = Config._load_dict_from_file_no_base(filename) 62 | cfg_dir = os.path.dirname(filename) 63 | if (BASE_KEY in cfg): 64 | if (isinstance(cfg[BASE_KEY], list)): 65 | base_filenames = cfg[BASE_KEY] 66 | else: 67 | assert(isinstance(cfg[BASE_KEY], str)) 68 | base_filenames = [cfg[BASE_KEY]] 69 | cfg_base = {} 70 | for bfn in base_filenames: 71 | Config.merge_dict_b2a(cfg_base, Config._load_dict_from_file(os.path.join(cfg_dir, bfn))) 72 | cfg.pop(BASE_KEY) 73 | Config.merge_dict_b2a(cfg_base, cfg) 74 | cfg = cfg_base 75 | return cfg 76 | 77 | @staticmethod 78 | def merge_dict_b2a(a, b): 79 | def clear_cover_key(a): 80 | if (not isinstance(a, dict)): 81 | return a 82 | out = copy.deepcopy(a) 83 | if (COVER_KEY in out): 84 | out.pop(COVER_KEY) 85 | for k, v in out.items(): 86 | out[k] = clear_cover_key(v) 87 | return out 88 | 89 | assert(isinstance(a, dict)) 90 | assert(isinstance(b, dict)) 91 | if (COVER_KEY in b): 92 | a.clear() 93 | temp = clear_cover_key(copy.deepcopy(b)) 94 | a.update(temp) 95 | return 96 | for k, v in b.items(): 97 | if ((not(k in a)) or (isinstance(v, dict) and v.get(COVER_KEY, False)) or (not isinstance(v, dict)) or (not isinstance(a[k], dict))): 98 | a[k] = clear_cover_key(copy.deepcopy(v)) 99 | else: 100 | Config.merge_dict_b2a(a[k], v) 101 | 102 | def load_from_file(self, filename): 103 | cfg = Config._load_dict_from_file(filename) 104 | self.clear() 105 | self.update(self.dfs(cfg)) 106 | if (self.name is None): 107 | self.name = os.path.splitext(os.path.basename(filename))[0] 108 | if self.work_dir is None: 109 | self.work_dir = f"work_dirs/{self.name}" 110 | 111 | def dfs(self, cfg_other): 112 | if isinstance(cfg_other,dict): 113 | now_cfg = Config() 114 | for k,d in cfg_other.items(): 115 | if (inspect.ismodule(d)): 116 | continue 117 | now_cfg[k]=self.dfs(d) 118 | elif isinstance(cfg_other,list): 119 | now_cfg = [self.dfs(d) for d in cfg_other if (not inspect.ismodule(d))] 120 | else: 121 | now_cfg = copy.deepcopy(cfg_other) 122 | return now_cfg 123 | 124 | def dump(self): 125 | """convert Config to dict""" 126 | now = dict() 127 | for k,d in self.items(): 128 | if isinstance(d,Config): 129 | d = d.dump() 130 | if isinstance(d,list): 131 | d = [dd.dump() if isinstance(dd,Config) else dd for dd in d] 132 | now[k]=d 133 | return now 134 | 135 | ''' 136 | TODO: _cover_ between sibling nodes 137 | Configs forms a tree like structure through '_base_', while '_cover_' will only work on child nodes, and will not work between sibling nodes. 138 | Maybe we need a cover works on sibling nodes, like: 139 | _base_: ['a.yaml', '@b.yaml'] 140 | means each attribute with a depth of 1 in b.yaml will have the attribute '_cover' before merge into a.yaml, and '@@b.yaml' means a depth of 2, etc. 141 | ''' 142 | 143 | _cfg = Config() 144 | 145 | def init_cfg(filename,**kwargs): 146 | print("Loading config from: ",filename) 147 | _cfg.load_from_file(filename) 148 | for k,d in kwargs.items(): 149 | _cfg[k]=d 150 | 151 | def get_cfg(): 152 | return _cfg 153 | 154 | def update_cfg(**kwargs): 155 | _cfg.update(kwargs) 156 | 157 | def save_cfg(save_file): 158 | with open(save_file,"w") as f: 159 | f.write(yaml.dump(_cfg.dump())) 160 | 161 | def print_cfg(): 162 | data = yaml.dump(_cfg.dump()) 163 | # TODO: data keys are not sorted 164 | print(data) -------------------------------------------------------------------------------- /voxelnet/utils/general.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import warnings 4 | import time 5 | import shutil 6 | import torch 7 | import torch.distributed 8 | from multiprocessing import Pool 9 | from tqdm import tqdm 10 | import random 11 | import numpy as np 12 | 13 | def init_seeds(seed=0): 14 | random.seed(seed) 15 | np.random.seed(seed) 16 | torch.manual_seed(seed) 17 | torch.cuda.manual_seed_all(seed) 18 | torch.backends.cudnn.deterministic = True 19 | torch.backends.cudnn.benchmark = False 20 | 21 | def check_file(file,ext=None): 22 | if file is None: 23 | return False 24 | if not os.path.exists(file): 25 | warnings.warn(f"{file} is not exists") 26 | return False 27 | if not os.path.isfile(file): 28 | warnings.warn(f"{file} must be a file") 29 | return False 30 | if ext: 31 | if not os.path.splitext(file)[1] in ext: 32 | # warnings.warn(f"the type of {file} must be in {ext}") 33 | return False 34 | return True 35 | 36 | def search_ckpt(work_dir): 37 | files = glob.glob(os.path.join(work_dir,"checkpoints/ckpt_*.pkl")) 38 | if len(files)==0: 39 | return None 40 | files = sorted(files,key=lambda x:int(x.split("_")[-1].split(".pkl")[0])) 41 | return files[-1] 42 | 43 | def build_file(work_dir,prefix): 44 | """ build file and makedirs the file parent path """ 45 | work_dir = os.path.abspath(work_dir) 46 | prefixes = prefix.split("/") 47 | file_name = prefixes[-1] 48 | prefix = "/".join(prefixes[:-1]) 49 | if len(prefix)>0: 50 | work_dir = os.path.join(work_dir,prefix) 51 | os.makedirs(work_dir,exist_ok=True) 52 | file = os.path.join(work_dir,file_name) 53 | return file 54 | 55 | def clean(work_dir): 56 | if os.path.exists(work_dir): 57 | shutil.rmtree(work_dir) 58 | 59 | def multi_process(func,files,processes=1): 60 | if processes <=1: 61 | for mesh_file in tqdm(files): 62 | func(mesh_file) 63 | else: 64 | with Pool(processes=processes) as pool: 65 | r = list(tqdm(pool.imap(func, files), total=len(files))) 66 | 67 | 68 | def current_time(): 69 | return time.asctime( time.localtime(time.time())) 70 | 71 | def print_network(model): 72 | print('#parameters', sum([x.nelement() for x in model.parameters()])) -------------------------------------------------------------------------------- /voxelnet/utils/logger.py: -------------------------------------------------------------------------------- 1 | from .general import build_file, current_time 2 | from .registry import HOOKS,build_from_cfg 3 | import time 4 | import os 5 | from tensorboardX import SummaryWriter 6 | from .config import get_cfg 7 | 8 | 9 | @HOOKS.register_module() 10 | class RunLogger: 11 | def __init__(self,work_dir,rank=0): 12 | self.rank = rank 13 | if self.rank>0: 14 | return 15 | self.writer = SummaryWriter(os.path.join(work_dir,"tensorboard"),flush_secs=10) 16 | save_file = build_file(work_dir,prefix="textlog/log_"+time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())+".txt") 17 | self.log_file = open(save_file,"a") 18 | 19 | def log(self,data,global_step,**kwargs): 20 | if self.rank>0: 21 | return 22 | data.update(kwargs) 23 | data = {k:d.item() if hasattr(d,"item") else d for k,d in data.items()} 24 | msg = f"iter: {global_step}" 25 | for k,d in data.items(): 26 | msg += f", {k}: {d:4f}" if isinstance(d,float) else f", {k}: {d}" 27 | if isinstance(d,str): 28 | continue 29 | self.writer.add_scalar(k,d,global_step=global_step) 30 | self.print(msg) 31 | 32 | def print(self,msg): 33 | if self.rank>0: 34 | return 35 | now_time = current_time() 36 | msg = now_time+", "+msg 37 | self.log_file.write(msg+"\n") 38 | self.log_file.flush() 39 | print(msg) -------------------------------------------------------------------------------- /voxelnet/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] 4 | UNKNOWN_ID = -100 5 | N_CLASSES = len(CLASS_LABELS) 6 | 7 | def confusion_matrix(pred_ids, gt_ids): 8 | 9 | assert pred_ids.shape == gt_ids.shape, (pred_ids.shape, gt_ids.shape) 10 | idxs = (gt_ids >= 0) 11 | 12 | return np.bincount(pred_ids[idxs] * 20 + gt_ids[idxs], minlength=400).reshape((20, 20)).astype(np.ulonglong) 13 | 14 | def get_iou(label_id, confusion): 15 | 16 | # true positives 17 | tp = np.longlong(confusion[label_id, label_id]) 18 | # false positives 19 | fp = np.longlong(confusion[label_id, :].sum()) - tp 20 | # false negatives 21 | fn = np.longlong(confusion[:, label_id].sum()) - tp 22 | 23 | denom = (tp + fp + fn) 24 | if denom == 0: 25 | return float('nan') 26 | acc = float(tp)/(tp + fn) 27 | return (float(tp) / denom, tp, denom, acc) 28 | 29 | 30 | -------------------------------------------------------------------------------- /voxelnet/utils/registry.py: -------------------------------------------------------------------------------- 1 | class Registry: 2 | def __init__(self): 3 | self._modules = {} 4 | 5 | def register_module(self, name=None,module=None): 6 | def _register_module(module): 7 | key = name 8 | if key is None: 9 | key = module.__name__ 10 | assert key not in self._modules,f"{key} is already registered." 11 | self._modules[key]=module 12 | return module 13 | 14 | if module is not None: 15 | return _register_module(module) 16 | 17 | return _register_module 18 | 19 | def get(self,name): 20 | assert name in self._modules,f"{name} is not registered." 21 | return self._modules[name] 22 | 23 | 24 | def build_from_cfg(cfg,registry,**kwargs): 25 | if isinstance(cfg,str): 26 | return registry.get(cfg)(**kwargs) 27 | elif isinstance(cfg,dict): 28 | args = cfg.copy() 29 | args.update(kwargs) 30 | obj_type = args.pop('type') 31 | obj_cls = registry.get(obj_type) 32 | try: 33 | module = obj_cls(**args) 34 | except TypeError as e: 35 | if "