├── .gitignore ├── LICENSE ├── README.md ├── assets ├── demo.png ├── framework.png ├── grasp_rd_curves │ ├── lidar_ford │ │ ├── ford_02_q1mm_bppGeo_d1T_-27.png │ │ ├── ford_02_q1mm_bppGeo_d2T_-34.png │ │ ├── ford_03_q1mm_bppGeo_d1T_-23.png │ │ ├── ford_03_q1mm_bppGeo_d2T_-31.png │ │ ├── mean_bppGeo_d1T_-25_ford.png │ │ └── mean_bppGeo_d2T_-32_ford.png │ ├── surface_dense │ │ ├── boxer_viewdep_vox12_bppGeo_d1T_-57.png │ │ ├── boxer_viewdep_vox12_bppGeo_d2T_-85.png │ │ ├── facade_00009_vox12_bppGeo_d1T_-25.png │ │ ├── facade_00009_vox12_bppGeo_d2T_-36.png │ │ ├── house_without_roof_00057_vox12_bppGeo_d1T_-43.png │ │ ├── house_without_roof_00057_vox12_bppGeo_d2T_-42.png │ │ ├── mean_bppGeo_d1T_-45_dense.png │ │ ├── mean_bppGeo_d2T_-67_dense.png │ │ ├── soldier_viewdep_vox12_bppGeo_d1T_-55.png │ │ └── soldier_viewdep_vox12_bppGeo_d2T_-86.png │ ├── surface_solid │ │ ├── dancer_vox11_00000001_bppGeo_d1T_-91.png │ │ ├── dancer_vox11_00000001_bppGeo_d2T_-80.png │ │ ├── facade_00064_vox11_bppGeo_d1T_-88.png │ │ ├── facade_00064_vox11_bppGeo_d2T_-73.png │ │ ├── mean_bppGeo_d1T_-90.png │ │ ├── mean_bppGeo_d2T_-80.png │ │ ├── queen_0200_bppGeo_d1T_-90.png │ │ ├── queen_0200_bppGeo_d2T_-78.png │ │ ├── soldier_vox10_0690_bppGeo_d1T_-86.png │ │ ├── soldier_vox10_0690_bppGeo_d2T_-72.png │ │ ├── thaidancer_viewdep_vox12_bppGeo_d1T_-88.png │ │ └── thaidancer_viewdep_vox12_bppGeo_d2T_-77.png │ └── surface_sparse │ │ ├── arco_valentino_dense_vox12_bppGeo_d1T_-10.png │ │ ├── arco_valentino_dense_vox12_bppGeo_d2T_-5.png │ │ ├── egyptian_mask_vox12_bppGeo_d1T_-2.png │ │ ├── egyptian_mask_vox12_bppGeo_d2T_-7.png │ │ ├── mean_bppGeo_d1T_-9.png │ │ ├── mean_bppGeo_d2T_-9.png │ │ ├── shiva_00035_vox12_bppGeo_d1T_-18.png │ │ ├── shiva_00035_vox12_bppGeo_d2T_-15.png │ │ ├── staue_klimt_vox12_bppGeo_d1T_-15.png │ │ ├── staue_klimt_vox12_bppGeo_d2T_-9.png │ │ ├── ulb_unicorn_vox13_bppGeo_d1T_-6.png │ │ └── ulb_unicorn_vox13_bppGeo_d2T_-14.png ├── grasp_results.csv └── mpeg_test_seq.txt ├── config ├── codec_config │ ├── grasp_ford.yaml │ └── grasp_surface.yaml ├── data_config │ ├── ford_voxel.yaml │ ├── modelnet_voxel_dense.yaml │ ├── modelnet_voxel_solid.yaml │ └── modelnet_voxel_sparse.yaml ├── net_config │ ├── grasp_dus1.yaml │ ├── grasp_dus1_faiss.yaml │ └── grasp_dus2.yaml └── optim_config │ └── optim_cd_sparse.yaml ├── experiments ├── bench.py ├── test.py └── train.py ├── install_torch-1.7.0+cu-10.1.sh ├── install_torch-1.8.1+cu-11.2.sh ├── pccai ├── __init__.py ├── codecs │ ├── __init__.py │ ├── grasp_codec.py │ ├── pcc_codec.py │ └── utils.py ├── dataloaders │ ├── __init__.py │ ├── lidar_base_loader.py │ ├── lidar_loader.py │ ├── modelnet_loader.py │ ├── point_cloud_dataset.py │ └── shapenet_part_loader.py ├── models │ ├── __init__.py │ ├── architectures │ │ └── grasp.py │ ├── modules │ │ ├── get_modules.py │ │ ├── mlpdecoder.py │ │ ├── mlpdecoder_sparse.py │ │ ├── pointnet.py │ │ ├── pointnet_residual.py │ │ ├── spcnn_down.py │ │ └── spcnn_up.py │ ├── pcc_models.py │ ├── utils.py │ └── utils_sparse.py ├── optim │ ├── __init__.py │ ├── cd_sparse.py │ ├── pcc_loss.py │ └── utils.py ├── pipelines │ ├── __init__.py │ ├── bench.py │ ├── test.py │ └── train.py └── utils │ ├── __init__.py │ ├── convert_image.py │ ├── convert_octree.py │ ├── logger.py │ ├── misc.py │ ├── option_handler.py │ ├── pc_metric.py │ └── syntax.py ├── scripts ├── bench_grasp │ ├── bench_lidar_ford │ │ ├── bench_lidar_ford_all.sh │ │ ├── bench_lidar_ford_r01.sh │ │ ├── bench_lidar_ford_r02.sh │ │ ├── bench_lidar_ford_r03.sh │ │ ├── bench_lidar_ford_r04.sh │ │ └── bench_lidar_ford_r05.sh │ ├── bench_surface_dense │ │ ├── bench_surface_dense_all.sh │ │ ├── bench_surface_dense_r01.sh │ │ ├── bench_surface_dense_r02.sh │ │ ├── bench_surface_dense_r03.sh │ │ ├── bench_surface_dense_r04.sh │ │ └── bench_surface_dense_r05.sh │ ├── bench_surface_solid │ │ ├── bench_surface_solid_all.sh │ │ ├── bench_surface_solid_r01.sh │ │ ├── bench_surface_solid_r02.sh │ │ ├── bench_surface_solid_r03.sh │ │ ├── bench_surface_solid_r04.sh │ │ └── bench_surface_solid_r05.sh │ └── bench_surface_sparse │ │ ├── bench_surface_sparse_all.sh │ │ ├── bench_surface_sparse_r01.sh │ │ ├── bench_surface_sparse_r02.sh │ │ ├── bench_surface_sparse_r03.sh │ │ ├── bench_surface_sparse_r04.sh │ │ └── bench_surface_sparse_r05.sh ├── config_args.sh ├── run.sh ├── train_grasp │ ├── train_lidar_ford │ │ ├── train_lidar_ford_r01.sh │ │ ├── train_lidar_ford_r02.sh │ │ ├── train_lidar_ford_r03.sh │ │ ├── train_lidar_ford_r04.sh │ │ └── train_lidar_ford_r05.sh │ ├── train_surface_dense │ │ ├── train_surface_dense_r01.sh │ │ ├── train_surface_dense_r02.sh │ │ ├── train_surface_dense_r03.sh │ │ ├── train_surface_dense_r04.sh │ │ └── train_surface_dense_r05.sh │ ├── train_surface_solid │ │ ├── train_surface_solid_r01.sh │ │ ├── train_surface_solid_r02.sh │ │ ├── train_surface_solid_r03.sh │ │ ├── train_surface_solid_r04.sh │ │ └── train_surface_solid_r05.sh │ └── train_surface_sparse │ │ ├── train_surface_sparse_r01.sh │ │ ├── train_surface_sparse_r02.sh │ │ ├── train_surface_sparse_r03.sh │ │ ├── train_surface_sparse_r04.sh │ │ └── train_surface_sparse_r05.sh └── visualize.sh ├── third_party └── nndistance │ ├── LICENSE │ ├── README.md │ ├── build.py │ ├── build_cpu.py │ ├── functions │ ├── __init__.py │ └── nnd.py │ ├── modules │ ├── __init__.py │ └── nnd.py │ ├── src │ ├── cpu_ops.cpp │ ├── my_lib.cpp │ ├── my_lib_cuda.cpp │ └── nnd_cuda.cu │ └── test.py └── utils ├── __init__.py ├── gen_args.py ├── merge_csv.py └── visualize.py /.gitignore: -------------------------------------------------------------------------------- 1 | tags 2 | builds 3 | *.csv 4 | *.inc 5 | *.bin 6 | *.pth 7 | *.png 8 | *.ply 9 | *.pkl 10 | *.sh 11 | *.enc 12 | *.npy 13 | *.out 14 | ScreenCamera*.json 15 | venv/ 16 | venv*/ 17 | runs/ 18 | runs*/ 19 | third_party/ 20 | tmp_runs/ 21 | .vscode/ 22 | datasets/ 23 | results/ 24 | tmp/ 25 | scripts/ 26 | datasets 27 | 28 | # Created by gitignore.io 29 | ### Python ### 30 | # Byte-compiled / optimized / DLL files 31 | __pycache__/ 32 | *.py[cod] 33 | *$py.class 34 | 35 | # C extensions 36 | *.so 37 | 38 | # Distribution / packaging 39 | .Python 40 | build/ 41 | develop-eggs/ 42 | dist/ 43 | downloads/ 44 | eggs/ 45 | .eggs/ 46 | lib/ 47 | lib64/ 48 | parts/ 49 | sdist/ 50 | var/ 51 | wheels/ 52 | pip-wheel-metadata/ 53 | share/python-wheels/ 54 | *.egg-info/ 55 | .installed.cfg 56 | *.egg 57 | MANIFEST 58 | 59 | # PyInstaller 60 | # Usually these files are written by a python script from a template 61 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 62 | *.manifest 63 | *.spec 64 | 65 | # Installer logs 66 | pip-log.txt 67 | pip-delete-this-directory.txt 68 | 69 | # Unit test / coverage reports 70 | htmlcov/ 71 | .tox/ 72 | .nox/ 73 | .coverage 74 | .coverage.* 75 | .cache 76 | nosetests.xml 77 | coverage.xml 78 | *.cover 79 | .hypothesis/ 80 | .pytest_cache/ 81 | 82 | # Translations 83 | *.mo 84 | *.pot 85 | 86 | # Django stuff: 87 | *.log 88 | local_settings.py 89 | db.sqlite3 90 | 91 | # Flask stuff: 92 | instance/ 93 | .webassets-cache 94 | 95 | # Scrapy stuff: 96 | .scrapy 97 | 98 | # Sphinx documentation 99 | docs/_build/ 100 | 101 | # PyBuilder 102 | target/ 103 | 104 | # Jupyter Notebook 105 | .ipynb_checkpoints 106 | 107 | # IPython 108 | profile_default/ 109 | ipython_config.py 110 | 111 | # pyenv 112 | .python-version 113 | 114 | # pipenv 115 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in 116 | # version control. 117 | # However, in case of collaboration, if having platform-specific dependencies 118 | # or dependencies 119 | # having no cross-platform support, pipenv may install dependencies that don’t 120 | # work, or not 121 | # install all needed dependencies. 122 | #Pipfile.lock 123 | 124 | # celery beat schedule file 125 | celerybeat-schedule 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The copyright in this software is being made available under the BSD License, 2 | included below. This software may be subject to InterDigital and other third 3 | party and contributor rights, including patent rights, and no such rights are 4 | granted under this license. 5 | 6 | Copyright (c) 2010-2022, InterDigital 7 | All rights reserved. 8 | 9 | Redistribution and use in source and binary forms, with or without 10 | modification, are permitted (subject to the limitations in the disclaimer 11 | below) provided that the following conditions are met: 12 | 13 | * Redistributions of source code must retain the above copyright notice, 14 | this list of conditions and the following disclaimer. 15 | 16 | * Redistributions in binary form must reproduce the above copyright notice, 17 | this list of conditions and the following disclaimer in the documentation 18 | and/or other materials provided with the distribution. 19 | 20 | * Neither the name of InterDigital nor the names of the Project where this 21 | contribution had been made may be used to endorse or promote products 22 | derived from this software without specific prior written permission. 23 | 24 | NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY 25 | THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND 26 | CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT 27 | NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 28 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 29 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 30 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 31 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 32 | OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 33 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 34 | OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 35 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /assets/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/demo.png -------------------------------------------------------------------------------- /assets/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/framework.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/lidar_ford/ford_02_q1mm_bppGeo_d1T_-27.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/lidar_ford/ford_02_q1mm_bppGeo_d1T_-27.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/lidar_ford/ford_02_q1mm_bppGeo_d2T_-34.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/lidar_ford/ford_02_q1mm_bppGeo_d2T_-34.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/lidar_ford/ford_03_q1mm_bppGeo_d1T_-23.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/lidar_ford/ford_03_q1mm_bppGeo_d1T_-23.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/lidar_ford/ford_03_q1mm_bppGeo_d2T_-31.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/lidar_ford/ford_03_q1mm_bppGeo_d2T_-31.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/lidar_ford/mean_bppGeo_d1T_-25_ford.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/lidar_ford/mean_bppGeo_d1T_-25_ford.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/lidar_ford/mean_bppGeo_d2T_-32_ford.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/lidar_ford/mean_bppGeo_d2T_-32_ford.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/boxer_viewdep_vox12_bppGeo_d1T_-57.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/boxer_viewdep_vox12_bppGeo_d1T_-57.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/boxer_viewdep_vox12_bppGeo_d2T_-85.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/boxer_viewdep_vox12_bppGeo_d2T_-85.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/facade_00009_vox12_bppGeo_d1T_-25.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/facade_00009_vox12_bppGeo_d1T_-25.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/facade_00009_vox12_bppGeo_d2T_-36.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/facade_00009_vox12_bppGeo_d2T_-36.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/house_without_roof_00057_vox12_bppGeo_d1T_-43.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/house_without_roof_00057_vox12_bppGeo_d1T_-43.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/house_without_roof_00057_vox12_bppGeo_d2T_-42.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/house_without_roof_00057_vox12_bppGeo_d2T_-42.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/mean_bppGeo_d1T_-45_dense.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/mean_bppGeo_d1T_-45_dense.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/mean_bppGeo_d2T_-67_dense.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/mean_bppGeo_d2T_-67_dense.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/soldier_viewdep_vox12_bppGeo_d1T_-55.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/soldier_viewdep_vox12_bppGeo_d1T_-55.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_dense/soldier_viewdep_vox12_bppGeo_d2T_-86.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_dense/soldier_viewdep_vox12_bppGeo_d2T_-86.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/dancer_vox11_00000001_bppGeo_d1T_-91.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/dancer_vox11_00000001_bppGeo_d1T_-91.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/dancer_vox11_00000001_bppGeo_d2T_-80.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/dancer_vox11_00000001_bppGeo_d2T_-80.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/facade_00064_vox11_bppGeo_d1T_-88.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/facade_00064_vox11_bppGeo_d1T_-88.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/facade_00064_vox11_bppGeo_d2T_-73.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/facade_00064_vox11_bppGeo_d2T_-73.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/mean_bppGeo_d1T_-90.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/mean_bppGeo_d1T_-90.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/mean_bppGeo_d2T_-80.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/mean_bppGeo_d2T_-80.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/queen_0200_bppGeo_d1T_-90.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/queen_0200_bppGeo_d1T_-90.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/queen_0200_bppGeo_d2T_-78.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/queen_0200_bppGeo_d2T_-78.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/soldier_vox10_0690_bppGeo_d1T_-86.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/soldier_vox10_0690_bppGeo_d1T_-86.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/soldier_vox10_0690_bppGeo_d2T_-72.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/soldier_vox10_0690_bppGeo_d2T_-72.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/thaidancer_viewdep_vox12_bppGeo_d1T_-88.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/thaidancer_viewdep_vox12_bppGeo_d1T_-88.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_solid/thaidancer_viewdep_vox12_bppGeo_d2T_-77.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_solid/thaidancer_viewdep_vox12_bppGeo_d2T_-77.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/arco_valentino_dense_vox12_bppGeo_d1T_-10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/arco_valentino_dense_vox12_bppGeo_d1T_-10.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/arco_valentino_dense_vox12_bppGeo_d2T_-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/arco_valentino_dense_vox12_bppGeo_d2T_-5.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/egyptian_mask_vox12_bppGeo_d1T_-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/egyptian_mask_vox12_bppGeo_d1T_-2.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/egyptian_mask_vox12_bppGeo_d2T_-7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/egyptian_mask_vox12_bppGeo_d2T_-7.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/mean_bppGeo_d1T_-9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/mean_bppGeo_d1T_-9.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/mean_bppGeo_d2T_-9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/mean_bppGeo_d2T_-9.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/shiva_00035_vox12_bppGeo_d1T_-18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/shiva_00035_vox12_bppGeo_d1T_-18.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/shiva_00035_vox12_bppGeo_d2T_-15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/shiva_00035_vox12_bppGeo_d2T_-15.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/staue_klimt_vox12_bppGeo_d1T_-15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/staue_klimt_vox12_bppGeo_d1T_-15.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/staue_klimt_vox12_bppGeo_d2T_-9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/staue_klimt_vox12_bppGeo_d2T_-9.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/ulb_unicorn_vox13_bppGeo_d1T_-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/ulb_unicorn_vox13_bppGeo_d1T_-6.png -------------------------------------------------------------------------------- /assets/grasp_rd_curves/surface_sparse/ulb_unicorn_vox13_bppGeo_d2T_-14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/assets/grasp_rd_curves/surface_sparse/ulb_unicorn_vox13_bppGeo_d2T_-14.png -------------------------------------------------------------------------------- /assets/grasp_results.csv: -------------------------------------------------------------------------------- 1 | sequence,numOutputPointsT,numBitsGeoEncT,d1T,d2T,encTimeT,decTimeT 2 | arco_valentino_dense_vox12,3203501,615816,61.8619,66.5516,8.703,0.43 3 | arco_valentino_dense_vox12,2676518,1956784,65.6641,70.3469,20.38,1.269 4 | arco_valentino_dense_vox12,1949402,5057176,70.4068,75.2051,34.588,2.875 5 | arco_valentino_dense_vox12,1431450,7771784,74.2559,79.0275,21.431,3.664 6 | arco_valentino_dense_vox12,2271817,12241384,78.0465,82.9215,13.879,5.371 7 | boxer_viewdep_vox12,2701476,362536,74.1627,78.124,21.816,0.482 8 | boxer_viewdep_vox12,6356135,1329464,76.6224,83.8253,74.811,1.252 9 | boxer_viewdep_vox12,5114416,2490984,77.0663,84.958,74.668,2.548 10 | boxer_viewdep_vox12,4257564,4992664,78.3553,86.1304,42.863,3.93 11 | boxer_viewdep_vox12,4150827,10093224,80.9844,87.0635,34.615,6.007 12 | dancer_vox11_00000001,2569916,46944,68.2071,69.0121,1.663,0.117 13 | dancer_vox11_00000001,3361261,87408,72.4081,73.8804,4.037,0.195 14 | dancer_vox11_00000001,3148012,209368,75.0371,77.2411,12.993,0.429 15 | dancer_vox11_00000001,2566474,481728,78.5441,81.74,27.194,0.782 16 | dancer_vox11_00000001,2553397,783240,79.7885,83.6042,27.393,0.778 17 | egyptian_mask_vox12,2005027,337728,61.8655,67.5026,1.069,0.298 18 | egyptian_mask_vox12,1004355,998264,64.9473,70.7593,1.751,0.708 19 | egyptian_mask_vox12,404723,1677776,69.6423,75.1465,1.864,1.023 20 | egyptian_mask_vox12,274113,2168792,74.2055,78.9958,1.36,0.987 21 | egyptian_mask_vox12,402136,3147520,79.4635,84.6209,1.462,1.282 22 | facade_00009_vox12,2904262,637224,69.2012,72.6777,10.898,0.535 23 | facade_00009_vox12,3573025,2483760,72.885,77.8834,30.295,1.307 24 | facade_00009_vox12,2083334,4738032,74.8936,80.2205,23.059,2.438 25 | facade_00009_vox12,2014057,6644744,75.8124,81.0853,12.58,3.534 26 | facade_00009_vox12,1850178,10272416,79.5124,84.293,12.083,4.966 27 | facade_00064_vox11,3605335,66096,66.8029,67.7305,3.541,0.144 28 | facade_00064_vox11,5206068,135400,70.6084,71.9551,9.292,0.207 29 | facade_00064_vox11,5037879,348480,73.3893,75.3407,30.152,0.566 30 | facade_00064_vox11,4240334,829592,76.3777,79.1454,64.738,1.097 31 | facade_00064_vox11,4096921,1401776,78.1758,81.5428,65.03,1.167 32 | ford_02_q1mm,42928688,58483976,47.53551519999996,53.24607773333326,272.30200000000053,54.074999999999584 33 | ford_02_q1mm,100233198,107273152,52.34020860000007,58.02865713333326,301.2360000000001,83.69000000000017 34 | ford_02_q1mm,209266841,209132376,56.921027066666596,62.37135619999998,359.5200000000009,142.07599999999962 35 | ford_02_q1mm,365570537,454449960,62.735669466666586,68.15299826666657,461.32600000000025,263.71999999999974 36 | ford_02_q1mm,427474783,688272160,66.80835040000001,72.29331246666666,522.0440000000016,396.55699999999894 37 | ford_03_q1mm,52281909,70885736,47.21229213333338,53.06162840000005,276.373,59.87799999999974 38 | ford_03_q1mm,119779805,128541808,51.83996379999996,57.4474434,311.2610000000007,94.92599999999985 39 | ford_03_q1mm,241966355,250126464,56.44332613333328,62.171692333333446,374.3340000000006,163.51200000000028 40 | ford_03_q1mm,400847888,535099552,62.502382866666615,68.33657579999996,471.99599999999975,291.2149999999998 41 | ford_03_q1mm,443711733,788076552,66.50553453333339,72.76089780000005,524.6890000000012,409.3409999999985 42 | house_without_roof_00057_vox12,5063800,845320,70.2228,73.8038,54.333,0.842 43 | house_without_roof_00057_vox12,8807385,3793304,73.7155,78.292,177.772,2.202 44 | house_without_roof_00057_vox12,6182255,7927504,75.8318,80.7386,152.165,4.653 45 | house_without_roof_00057_vox12,6411097,11849632,76.8567,81.9336,86.272,6.69 46 | house_without_roof_00057_vox12,5921318,19194224,79.2659,84.6076,58.92,9.601 47 | queen_0200,770485,14864,61.6464,62.442,0.665,0.06 48 | queen_0200,1119476,29512,65.8202,67.2294,0.981,0.083 49 | queen_0200,1099187,74160,68.9305,71.0249,2.168,0.162 50 | queen_0200,928281,167264,71.8349,76.0335,4.125,0.274 51 | queen_0200,931519,281312,73.1119,77.9084,4.16,0.28 52 | shiva_00035_vox12,1775250,246320,64.5783,68.1736,2.936,0.219 53 | shiva_00035_vox12,2152699,927608,66.7825,70.7061,9.503,0.691 54 | shiva_00035_vox12,2011575,3262472,70.5878,75.3573,19.634,2.06 55 | shiva_00035_vox12,1097026,5378032,74.0672,78.8708,11.668,3.075 56 | shiva_00035_vox12,1714358,8815600,78.0545,83.0593,7.731,4.473 57 | soldier_viewdep_vox12,3249199,462576,73.7423,77.0873,30.11,0.559 58 | soldier_viewdep_vox12,7582603,1724960,76.4804,83.4,104.508,1.568 59 | soldier_viewdep_vox12,5985084,3309752,76.914,84.6363,102.454,3.207 60 | soldier_viewdep_vox12,4938390,6502368,78.1647,85.7114,60.893,4.954 61 | soldier_viewdep_vox12,4699369,12734784,81.4495,87.15,50.489,7.593 62 | soldier_vox10_0690,1006894,22352,60.3432,60.9598,0.516,0.069 63 | soldier_vox10_0690,1420871,45872,64.5353,65.6907,0.941,0.098 64 | soldier_vox10_0690,1384487,113808,67.6673,69.6216,2.576,0.215 65 | soldier_vox10_0690,1130360,248128,71.247,74.1512,5.209,0.419 66 | soldier_vox10_0690,1106675,388264,72.8961,76.2662,5.238,0.375 67 | staue_klimt_vox12,1049123,137768,64.4024,67.9998,1.038,0.148 68 | staue_klimt_vox12,1160791,543680,66.5972,70.6105,2.933,0.427 69 | staue_klimt_vox12,990772,1838032,70.4285,75.3919,5.43,1.169 70 | staue_klimt_vox12,530034,2897840,74.0477,78.8713,3.396,1.682 71 | staue_klimt_vox12,839611,4597296,78.3543,83.4665,2.83,2.453 72 | thaidancer_viewdep_vox12,3888408,79184,73.4982,74.3779,3.257,0.108 73 | thaidancer_viewdep_vox12,4989606,147424,77.3502,78.9723,7.763,0.215 74 | thaidancer_viewdep_vox12,4769919,343840,79.4927,81.8549,23.957,0.586 75 | thaidancer_viewdep_vox12,3893651,786472,81.9878,85.3332,24.99,1.149 76 | thaidancer_viewdep_vox12,3808951,1248824,82.7545,86.4804,25.109,1.24 77 | ulb_unicorn_vox13,11971347,1282752,68.2073,77.8248,36.171,1.403 78 | ulb_unicorn_vox13,6998441,5359896,71.5852,77.9575,69.355,4.147 79 | ulb_unicorn_vox13,3581595,10913432,76.3053,82.4823,81.958,6.768 80 | ulb_unicorn_vox13,1991457,14224096,80.6156,86.3555,42.633,7.169 81 | ulb_unicorn_vox13,3078379,21095808,85.0336,90.4249,24.883,8.949 82 | -------------------------------------------------------------------------------- /assets/mpeg_test_seq.txt: -------------------------------------------------------------------------------- 1 | queen_0200 2 | soldier_vox10_0690 3 | facade_00064_vox11 4 | dancer_vox11_00000001 5 | thaidancer_viewdep_vox12 6 | soldier_viewdep_vox12 7 | boxer_viewdep_vox12 8 | facade_00009_vox12 9 | house_without_roof_00057_vox12 10 | landscape_00014_vox14 11 | facade_00064_vox14 12 | arco_valentino_dense_vox12 13 | staue_klimt_vox12 14 | shiva_00035_vox12 15 | egyptian_mask_vox12 16 | ulb_unicorn_vox13 17 | ulb_unicorn_hires_vox15 18 | stanford_area_2_vox16 19 | stanford_area_4_vox16 20 | citytunnel_q1mm 21 | overpass_q1mm 22 | tollbooth_q1mm 23 | ford_02_q1mm 24 | ford_03_q1mm 25 | qnxadas-junction-approach 26 | qnxadas-junction-exit 27 | qnxadas-motorway-join 28 | qnxadas-navigating-bends 29 | -------------------------------------------------------------------------------- /config/codec_config/grasp_ford.yaml: -------------------------------------------------------------------------------- 1 | # Configure the encoder/decoder for actual compression/decompression 2 | 3 | # Codec class 4 | codec: 'grasp_codec' 5 | 6 | # Native translation and scaling of the data 7 | translate: [131072, 131072, 131072] 8 | scale: 1 -------------------------------------------------------------------------------- /config/codec_config/grasp_surface.yaml: -------------------------------------------------------------------------------- 1 | # Configure the encoder/decoder for actual compression/decompression 2 | 3 | # Codec class 4 | codec: 'grasp_codec' 5 | 6 | # Native translation and scaling of the data 7 | translate: [0, 0, 0] 8 | scale: 1 -------------------------------------------------------------------------------- /config/data_config/ford_voxel.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Configure the Ford dataset for loading as sparse voxels 8 | 9 | # Common options of the dataset 10 | dataset: ford_simple 11 | num_points: 150000 # for point clouds in folder 0 - 10, at least have 82602 points, at most have 129392 points 12 | translate: [131072, 131072, 131072] 13 | scale: 1 14 | voxelize: True 15 | sparse_collate: True 16 | return_intensity: False 17 | 18 | # Options on the splitting scheme 19 | splitting: 20 | train: 21 | - 1 22 | test: 23 | - 2 24 | - 3 25 | 26 | # Options under individual configurations 27 | train_cfg: 28 | batch_size: 2 29 | shuffle: True 30 | num_workers: 4 31 | augmentation: True 32 | split: train 33 | val_cfg: 34 | batch_size: 4 35 | shuffle: False 36 | num_workers: 4 37 | augmentation: True 38 | split: val 39 | test_cfg: 40 | batch_size: 4 41 | shuffle: False 42 | num_workers: 4 43 | augmentation: False 44 | split: test -------------------------------------------------------------------------------- /config/data_config/modelnet_voxel_dense.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Configure the ModelNet dataset for loading as sparse voxels 8 | 9 | # Common options of the dataset 10 | dataset: modelnet_simple 11 | num_points: 25000 12 | coord_min: 0 13 | coord_max: 255 14 | voxelize: True 15 | sparse_collate: True 16 | centralize: True 17 | 18 | # Options under individual configurations 19 | train_cfg: 20 | batch_size: 8 21 | shuffle: True 22 | num_workers: 4 23 | augmentation: True 24 | split: train 25 | val_cfg: 26 | batch_size: 8 27 | shuffle: False 28 | num_workers: 1 29 | augmentation: False 30 | split: val 31 | test_cfg: 32 | batch_size: 1 33 | num_points: null 34 | shuffle: False 35 | num_workers: 1 36 | augmentation: False 37 | split: test -------------------------------------------------------------------------------- /config/data_config/modelnet_voxel_solid.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Configure the ModelNet dataset for loading as sparse voxels 8 | 9 | # Common options of the dataset 10 | dataset: modelnet_simple 11 | num_points: 400000 12 | coord_min: 0 13 | coord_max: 127 14 | voxelize: True 15 | sparse_collate: True 16 | centralize: True 17 | use_cache: 'modelnet/modelnet_voxel_127.pkl' 18 | 19 | # Options under individual configurations 20 | train_cfg: 21 | batch_size: 8 22 | shuffle: True 23 | num_workers: 4 24 | augmentation: True 25 | split: train 26 | val_cfg: 27 | batch_size: 8 28 | shuffle: False 29 | num_workers: 1 30 | augmentation: False 31 | split: val 32 | test_cfg: 33 | batch_size: 1 34 | num_points: null 35 | shuffle: False 36 | num_workers: 1 37 | augmentation: False 38 | split: test -------------------------------------------------------------------------------- /config/data_config/modelnet_voxel_sparse.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Configure the ModelNet dataset for loading as sparse voxels 8 | 9 | # Common options of the dataset 10 | dataset: modelnet_simple 11 | num_points: 20000 12 | coord_min: 0 13 | coord_max: 511 14 | voxelize: True 15 | sparse_collate: True 16 | centralize: True 17 | 18 | # Options under individual configurations 19 | train_cfg: 20 | batch_size: 8 21 | shuffle: True 22 | num_workers: 4 23 | augmentation: True 24 | split: train 25 | val_cfg: 26 | batch_size: 8 27 | shuffle: False 28 | num_workers: 1 29 | augmentation: False 30 | split: val 31 | test_cfg: 32 | batch_size: 1 33 | num_points: null 34 | shuffle: False 35 | num_workers: 1 36 | augmentation: False 37 | split: test -------------------------------------------------------------------------------- /config/net_config/grasp_dus1.yaml: -------------------------------------------------------------------------------- 1 | # network configuration for GRASP-Net, down-up scale 1 2 | 3 | --- 4 | architecture: grasp 5 | modules: 6 | dus: 1 7 | entropy_bottleneck: 8 8 | scaling_ratio: -1 # key param 9 | point_mul: -1 # key param 10 | skip_mode: -1 # key param 11 | noise: 0.125 12 | res_enc: 13 | model: point_res_enc 14 | mlp_dims: [3, 16, 32, 64, 64, 128] 15 | fc_dims: [128, 64, 16] 16 | vox_enc: 17 | model: spcnn_down 18 | dims: [16, 32, 8] 19 | vox_dec: 20 | model: spcnn_up 21 | dims: [8, 64] 22 | res_dec: 23 | model: mlpdecoder_sparse 24 | dims: [-1, 128, 128, 64] 25 | -------------------------------------------------------------------------------- /config/net_config/grasp_dus1_faiss.yaml: -------------------------------------------------------------------------------- 1 | # network configuration for GRASP-Net, down-up scale 1 2 | 3 | --- 4 | architecture: grasp 5 | modules: 6 | dus: 1 7 | entropy_bottleneck: 8 8 | scaling_ratio: -1 # key param 9 | point_mul: -1 # key param 10 | skip_mode: -1 # key param 11 | noise: 0.125 12 | res_enc: 13 | faiss: True 14 | model: point_res_enc 15 | mlp_dims: [3, 16, 32, 64, 64, 128] 16 | fc_dims: [128, 64, 16] 17 | vox_enc: 18 | model: spcnn_down 19 | dims: [16, 32, 8] 20 | vox_dec: 21 | model: spcnn_up 22 | dims: [8, 64] 23 | res_dec: 24 | model: mlpdecoder_sparse 25 | dims: [-1, 128, 128, 64] 26 | -------------------------------------------------------------------------------- /config/net_config/grasp_dus2.yaml: -------------------------------------------------------------------------------- 1 | # network configuration for GRASP-Net, down-up scale 2 2 | 3 | --- 4 | architecture: grasp 5 | modules: 6 | dus: 2 7 | entropy_bottleneck: 8 8 | scaling_ratio: -1 # key param 9 | point_mul: -1 # key param 10 | skip_mode: -1 # key param 11 | noise: 0.125 12 | res_enc: 13 | model: point_res_enc 14 | mlp_dims: [3, 16, 32, 64, 64, 128] 15 | fc_dims: [128, 64, 16] 16 | vox_enc: 17 | model: spcnn_down2 18 | dims: [16, 32, 8] 19 | vox_dec: 20 | model: spcnn_up2 21 | dims: [8, 64, 32] 22 | res_dec: 23 | model: mlpdecoder_sparse 24 | dims: [-1, 128, 128, 64] 25 | -------------------------------------------------------------------------------- /config/optim_config/optim_cd_sparse.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Optimization configuration 8 | 9 | --- 10 | n_epoch: 50 11 | main_args: 12 | lr: 0.0008 13 | opt_args: [0.9, 0.999, 0] 14 | schedule_args: ['step', 10, 0.5] 15 | loss_args: 16 | loss: cd_sparse 17 | alpha: 5 18 | beta: 1 19 | clip_max_norm: -1 -------------------------------------------------------------------------------- /experiments/bench.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Benchmarking one or more models 8 | 9 | import multiprocessing 10 | multiprocessing.set_start_method('spawn', True) 11 | 12 | import random 13 | import os 14 | import torch 15 | import sys 16 | import csv 17 | sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/..') 18 | 19 | # Load different utilities from PccAI 20 | from pccai.utils.option_handler import BenchmarkOptionHandler 21 | import pccai.utils.logger as logger 22 | from pccai.pipelines.bench import * 23 | 24 | 25 | def aggregate_sequence_log(log_dict_all): 26 | ''' 27 | Aggregate the dictionaries belonging to the same point cloud seqence as one dictionary, will be used when 28 | benchmarking dynamic point cloud sequences 29 | ''' 30 | for ckpt in log_dict_all.keys(): 31 | log_dict_ckpt = log_dict_all[ckpt] 32 | log_dict_ckpt.sort(key=lambda x: x['seq_name']) 33 | cur_seq_name = '' 34 | log_dict_ckpt_aggregate=[] 35 | 36 | for idx, log_dict in enumerate(log_dict_ckpt): 37 | if log_dict['seq_name'].lower() != cur_seq_name: # encounter a new sequence 38 | cur_seq_name = log_dict['seq_name'].lower() 39 | log_dict_tmp = { # make a new dictionary, only include keys necessary for MPEG reporting 40 | 'pc_name': cur_seq_name, 41 | 'rec_num_points': log_dict['rec_num_points'], 42 | 'bit_total': log_dict['bit_total'], 43 | 'd1_psnr': log_dict['d1_psnr'], 44 | 'seq_cnt': 1 45 | } 46 | if 'd2_psnr' in log_dict: 47 | log_dict_tmp['d2_psnr'] = log_dict['d2_psnr'] 48 | if 'enc_time' in log_dict: 49 | log_dict_tmp['enc_time'] = float(log_dict['enc_time']) 50 | if 'dec_time' in log_dict: 51 | log_dict_tmp['dec_time'] = float(log_dict['dec_time']) 52 | log_dict_ckpt_aggregate.append(log_dict_tmp) 53 | else: # update the existing sequence 54 | log_dict_ckpt_aggregate[-1]['rec_num_points'] += log_dict['rec_num_points'] 55 | log_dict_ckpt_aggregate[-1]['bit_total'] += log_dict['bit_total'] 56 | log_dict_ckpt_aggregate[-1]['d1_psnr'] += log_dict['d1_psnr'] 57 | log_dict_ckpt_aggregate[-1]['seq_cnt'] += 1 58 | if 'd2_psnr' in log_dict: 59 | log_dict_ckpt_aggregate[-1]['d2_psnr'] += log_dict['d2_psnr'] 60 | if 'enc_time' in log_dict: 61 | log_dict_ckpt_aggregate[-1]['enc_time'] += float(log_dict['enc_time']) 62 | if 'dec_time' in log_dict: 63 | log_dict_ckpt_aggregate[-1]['dec_time'] += float(log_dict['dec_time']) 64 | 65 | # Take average for each sequence 66 | for idx, log_dict in enumerate(log_dict_ckpt_aggregate): 67 | log_dict['d1_psnr'] /= log_dict['seq_cnt'] 68 | if 'd2_psnr' in log_dict: 69 | log_dict['d2_psnr'] /= log_dict['seq_cnt'] 70 | if 'enc_time' in log_dict: 71 | log_dict['enc_time'] = str(log_dict['enc_time']) 72 | if 'dec_time' in log_dict: 73 | log_dict['dec_time'] = str(log_dict['dec_time']) 74 | 75 | log_dict_all[ckpt] = log_dict_ckpt_aggregate 76 | return None 77 | 78 | 79 | def flatten_ckpt_log(log_dict_all): 80 | ''' 81 | The original log_dict_all is a dictionary indexed by the ckpts, then log_dict_all[ckpt] is a list of several 82 | dictionaries, each correspoing to the results of a inference test. This function flatten log_dict_all, so 83 | the output log_dict_all_flat is a list of dicionaries, and sorted by the pc_name (1st key) and bit_total (2nd key) 84 | ''' 85 | log_dict_all_flat = [] 86 | for ckpt, log_dict_ckpt in log_dict_all.items(): 87 | for log_dict in log_dict_ckpt: 88 | log_dict['ckpt'] = ckpt 89 | log_dict_all_flat += log_dict_ckpt 90 | log_dict_all_flat.sort(key=lambda x: (x['pc_name'], int(x['bit_total']))) # perform sorting with two keys 91 | return log_dict_all_flat 92 | 93 | 94 | def gen_mpeg_report(log_dict_all, mpeg_report_path, compute_d2, mpeg_report_sequence): 95 | """Generate the MPEG reporting CSV file""" 96 | 97 | # Parse the MPEG reporting template 98 | mpeg_seqname_file = os.path.join(os.path.split(__file__)[0], '..', 'assets', 'mpeg_test_seq.txt') 99 | with open(mpeg_seqname_file) as f: 100 | lines = f.readlines() 101 | mpeg_sequence_name = [str[:-1] for str in lines] 102 | 103 | # Preprocessing to log_dict_all 104 | if mpeg_report_sequence: 105 | aggregate_sequence_log(log_dict_all) 106 | log_dict_all = flatten_ckpt_log(log_dict_all) 107 | 108 | # Write down CSV file for MPEG reporting 109 | mpeg_report_dict_list = [] 110 | for log_dict in log_dict_all: 111 | pc_name = os.path.splitext(log_dict['pc_name'])[0].lower() 112 | if pc_name[-2:] == '_n': 113 | pc_name = pc_name[:-2] 114 | if pc_name in mpeg_sequence_name: # found an MPEG sequence 115 | mpeg_report_dict = { 116 | 'sequence': pc_name, # sequence 117 | 'numOutputPointsT': log_dict['rec_num_points'], # numOutputPointsT 118 | 'numBitsGeoEncT': log_dict['bit_total'], # numBitsGeoEncT 119 | 'd1T': log_dict['d1_psnr'] # d1T, 120 | } 121 | if compute_d2: 122 | mpeg_report_dict['d2T'] = log_dict['d2_psnr'] # d2T 123 | 124 | # Encoding/decoding time 125 | if 'enc_time' in log_dict: 126 | mpeg_report_dict['encTimeT'] = log_dict['enc_time'] 127 | if 'dec_time' in log_dict: 128 | mpeg_report_dict['decTimeT'] = log_dict['dec_time'] 129 | mpeg_report_dict_list.append(mpeg_report_dict) 130 | 131 | # Write the CSV file according to the aggregated statistics 132 | mpeg_report_header = ['sequence', 'numOutputPointsT', 'numBitsGeoEncT', 'd1T', 'd2T', 'encTimeT', 'decTimeT'] 133 | with open(mpeg_report_path, 'w') as f: 134 | writer = csv.DictWriter(f, fieldnames=mpeg_report_header) 135 | writer.writeheader() 136 | writer.writerows(mpeg_report_dict_list) 137 | if len(mpeg_report_dict_list) > 0: 138 | logger.log.info('CSV file for MPEG reporting: %s' % mpeg_report_path) 139 | 140 | 141 | if __name__ == "__main__": 142 | 143 | # Parse the options and perform training 144 | option_handler = BenchmarkOptionHandler() 145 | opt = option_handler.parse_options() 146 | 147 | # Create a folder to save the models and the log 148 | if not os.path.exists(opt.exp_folder): 149 | os.makedirs(opt.exp_folder) 150 | 151 | # Initialize a global logger then print out all the options 152 | logger.create_logger(opt.exp_folder, opt.log_file, opt.log_file_only) 153 | option_handler.print_options(opt) 154 | opt = load_benchmark_config(opt) 155 | 156 | # Go with the actual training 157 | if opt.seed is not None: 158 | torch.manual_seed(opt.seed) 159 | random.seed(opt.seed) 160 | log_dict_all = benchmark_checkpoints(opt) 161 | 162 | # Create the MPEG reporting CSV file if needed 163 | if opt.mpeg_report is not None: 164 | gen_mpeg_report( 165 | log_dict_all=log_dict_all, 166 | mpeg_report_path=os.path.join(opt.exp_folder, opt.mpeg_report), 167 | compute_d2=opt.compute_d2, 168 | mpeg_report_sequence=opt.mpeg_report_sequence 169 | ) 170 | logger.log.info('Benchmarking session %s finished.\n' % opt.exp_name) 171 | logger.destroy_logger() 172 | 173 | -------------------------------------------------------------------------------- /experiments/test.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Test a trained point cloud compression model 8 | 9 | import multiprocessing 10 | multiprocessing.set_start_method('spawn', True) 11 | 12 | import random 13 | import os 14 | import torch 15 | import sys 16 | sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/..') 17 | 18 | # Load different utilities from PccAI 19 | from pccai.utils.option_handler import TestOptionHandler 20 | import pccai.utils.logger as logger 21 | from pccai.pipelines.test import * 22 | 23 | 24 | if __name__ == "__main__": 25 | 26 | # Parse the options and perform training 27 | option_handler = TestOptionHandler() 28 | opt = option_handler.parse_options() 29 | 30 | # Create a folder to save the models and the log 31 | if not os.path.exists(opt.exp_folder): 32 | os.makedirs(opt.exp_folder) 33 | 34 | # Initialize a global logger then print out all the options 35 | logger.create_logger(opt.exp_folder, opt.log_file, opt.log_file_only) 36 | option_handler.print_options(opt) 37 | opt = load_test_config(opt) 38 | 39 | # Go with the actual training 40 | if opt.seed is not None: 41 | torch.manual_seed(opt.seed) 42 | random.seed(opt.seed) 43 | avg_loss = test_pccnet(opt) 44 | logger.log.info('Testing session %s finished.\n' % opt.exp_name) 45 | logger.destroy_logger() 46 | -------------------------------------------------------------------------------- /experiments/train.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Train a point cloud compression model 8 | 9 | import random 10 | import os 11 | import torch 12 | import sys 13 | import socket 14 | sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/..') 15 | 16 | # multi-processing utilities 17 | import torch.multiprocessing as mp 18 | import torch.distributed as dist 19 | 20 | # Load different utilities from PccAI 21 | from pccai.utils.option_handler import TrainOptionHandler 22 | import pccai.utils.logger as logger 23 | from pccai.pipelines.train import * 24 | 25 | 26 | def setup(rank, world_size, master_address, master_port): 27 | """Setup the DDP processes if necessary, each process will be allocated to one GPU.""" 28 | 29 | # Look for an available port first 30 | tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 31 | while True: 32 | loc = (master_address, master_port) 33 | res = tmp_socket.connect_ex(loc) 34 | if res != 0: break # found a port 35 | else: master_port += 1 36 | 37 | # initialize the process group 38 | os.environ['MASTER_PORT'] = str(master_port) 39 | os.environ['MASTER_ADDR'] = master_address 40 | dist.init_process_group("gloo", rank=rank, world_size=world_size) 41 | 42 | 43 | def cleanup(): 44 | """Destropy all processes.""" 45 | 46 | dist.destroy_process_group() 47 | 48 | 49 | def train_main(device, opt): 50 | """Main training wrapper.""" 51 | 52 | # Initialize a global logger then print out all the options 53 | logger.create_logger(opt.exp_folder, opt.log_file, opt.log_file_only) 54 | option_handler = TrainOptionHandler() 55 | option_handler.print_options(opt) 56 | opt = load_train_config(opt) 57 | opt.device = device 58 | opt.device_count = torch.cuda.device_count() 59 | if opt.ddp: setup(device, opt.device_count, opt.master_address, opt.master_port) 60 | 61 | # Go with the actual training 62 | if opt.seed is not None: 63 | torch.manual_seed(opt.seed) 64 | random.seed(opt.seed) 65 | avg_loss = train_pccnet(opt) 66 | logger.log.info('Training session %s finished.\n' % opt.exp_name) 67 | logger.destroy_logger() 68 | if opt.ddp: cleanup() 69 | 70 | 71 | if __name__ == "__main__": 72 | 73 | # Parse the options and perform training 74 | option_handler = TrainOptionHandler() 75 | opt = option_handler.parse_options() 76 | 77 | # Create a folder to save the models and the log 78 | if not os.path.exists(opt.exp_folder): 79 | os.makedirs(opt.exp_folder) 80 | if opt.ddp: 81 | mp.spawn(train_main, args=(opt,), nprocs=torch.cuda.device_count(), join=True) 82 | else: 83 | train_main(0, opt) -------------------------------------------------------------------------------- /install_torch-1.7.0+cu-10.1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # GRASP-Net installation example 3 | # Run "echo y | conda create -n grasp python=3.6 && conda activate grasp && ./install_torch-1.7.0+cu-10.1.sh" 4 | 5 | # 1. Basic installation for PccAI 6 | echo y | conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=10.1 -c pytorch 7 | pip install tensorboard==2.9.0 8 | pip install plyfile==0.7.4 9 | pip install --no-index torch-scatter==2.0.7 -f https://pytorch-geometric.com/whl/torch-1.7.0+cu101.html 10 | pip install --no-index torch-sparse==0.6.9 -f https://pytorch-geometric.com/whl/torch-1.7.0+cu101.html 11 | pip install torch-geometric==2.0.3 12 | 13 | # 2. Additional packages for GRASP-Net 14 | 15 | # PCGCv2 for some basic utilities 16 | cd third_party 17 | pip install h5py==3.1.0 18 | pip install torchac==0.9.3 19 | git clone https://github.com/NJUVISION/PCGCv2.git 20 | cd PCGCv2 21 | ln -s ../tmc3 ./tmc3 22 | 23 | # nndistance for computing Chamfer Distance 24 | cd ../nndistance 25 | export PATH="/usr/local/cuda-10.1/bin:$PATH" 26 | export LD_LIBRARY_PATH="/usr/local/cuda-10.1/lib64:$LD_LIBRARY_PATH" 27 | python build.py install 28 | cd ../.. 29 | 30 | # FAISS for fast nearest-neighbor search 31 | echo y | conda install -c pytorch faiss-gpu 32 | 33 | # MinkowskiEngine for sparse convolution 34 | export CXX=g++-7 35 | echo y | conda install openblas-devel -c anaconda 36 | pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps --install-option="--blas_include_dirs=${CONDA_PREFIX}/include" --install-option="--blas=openblas" 37 | -------------------------------------------------------------------------------- /install_torch-1.8.1+cu-11.2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # GRASP-Net installation example 3 | # Run "echo y | conda create -n grasp python=3.8 && conda activate grasp && ./install_torch-1.8.1+cu-11.2.sh" 4 | 5 | # 1. Basic installation for PccAI 6 | pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html 7 | pip install tensorboard==2.8.0 8 | pip install plyfile==0.7.4 9 | pip install --no-index torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.1+cu111.html 10 | pip install --no-index torch-sparse -f https://pytorch-geometric.com/whl/torch-1.8.1+cu111.html 11 | pip install torch-geometric==2.0.3 12 | 13 | # 2. Additional packages for GRASP-Net 14 | 15 | # PCGCv2 for some basic utilities 16 | cd third_party 17 | pip install h5py==3.6.0 18 | pip install torchac==0.9.3 19 | pip install ninja==1.10.2.3 20 | git clone https://github.com/NJUVISION/PCGCv2.git 21 | cd PCGCv2 22 | ln -s ../tmc3 ./tmc3 23 | 24 | # nndistance for computing Chamfer Distance 25 | cd ../nndistance 26 | export PATH="/usr/local/cuda-11.2/bin:$PATH" 27 | export LD_LIBRARY_PATH="/usr/local/cuda-11.2/lib64:$LD_LIBRARY_PATH" 28 | python build.py install 29 | cd .. 30 | 31 | # FAISS for fast nearest-neighbor search 32 | echo y | conda install -c pytorch faiss-gpu 33 | 34 | # MinkowskiEngine for sparse convolution 35 | echo y | conda install openblas-devel==0.3.10 -c anaconda 36 | export CXX=g++-7 37 | export CUDA_HOME=/usr/local/cuda-11.2 38 | git clone https://github.com/NVIDIA/MinkowskiEngine.git MinkowskiEngine_Py38 39 | cd MinkowskiEngine_Py38 40 | python setup.py install --blas_include_dirs=${CONDA_PREFIX}/include --blas=openblas 41 | cd ../.. 42 | -------------------------------------------------------------------------------- /pccai/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | -------------------------------------------------------------------------------- /pccai/codecs/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | -------------------------------------------------------------------------------- /pccai/codecs/pcc_codec.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | import numpy as np 8 | 9 | class PccCodecBase: 10 | """A base class of PCC codec. User needs to implement the compress() and decompress() method.""" 11 | 12 | def __init__(self, codec_config, pccnet, syntax): 13 | self.translate = codec_config['translate'] 14 | self.scale = codec_config['scale'] 15 | self.hetero = syntax.hetero 16 | self.phase = syntax.phase 17 | self.pccnet = pccnet 18 | 19 | 20 | def compress(self, points, tag): 21 | """Compression method.""" 22 | 23 | raise NotImplementedError() 24 | 25 | 26 | def decompress(self, file_name): 27 | """Decompression method.""" 28 | 29 | raise NotImplementedError() -------------------------------------------------------------------------------- /pccai/codecs/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Utilities related to point cloud codec 8 | 9 | import torch 10 | import numpy as np 11 | 12 | # Import all the codecs to be used 13 | from pccai.codecs.grasp_codec import GeoResCompressionCodec 14 | 15 | 16 | # List the all the codecs in the following dictionary 17 | codec_classes = { 18 | 'grasp_codec': GeoResCompressionCodec 19 | } 20 | 21 | def get_codec_class(codec_name): 22 | codec = codec_classes.get(codec_name.lower(), None) 23 | assert codec is not None, f'codec class "{codec_name}" not found, valid codec classes are: {list(codec_classes.keys())}' 24 | return codec 25 | -------------------------------------------------------------------------------- /pccai/dataloaders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | from .point_cloud_dataset import * 8 | -------------------------------------------------------------------------------- /pccai/dataloaders/lidar_base_loader.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Base LiDAR data sets, includeing Ford, KITTI and Qnxadas 8 | 9 | import os 10 | import numpy as np 11 | from torch.utils import data 12 | from pccai.utils.misc import pc_read 13 | 14 | found_quantize = False 15 | 16 | 17 | def absoluteFilePaths(directory): 18 | for dirpath, _, file_names in os.walk(directory): 19 | for f in file_names: 20 | yield os.path.abspath(os.path.join(dirpath, f)) 21 | 22 | 23 | class FordBase(data.Dataset): 24 | """A base Ford dataset.""" 25 | 26 | def __init__(self, data_config, sele_config, **kwargs): 27 | 28 | base_dir = os.path.dirname(os.path.abspath(__file__)) 29 | 30 | # Common options of the dataset 31 | self.return_intensity = data_config.get('return_intensity', False) 32 | self.dataset_path = data_config.get('dataset_path', '../../datasets/ford/') # the default dataset path 33 | self.dataset_path = os.path.abspath(os.path.join(base_dir, self.dataset_path)) 34 | self.translate = data_config.get('translate', [0, 0, 0]) 35 | self.scale = data_config.get('scale', 1) 36 | self.point_max = data_config.get('point_max', -1) 37 | 38 | # Options under a specific configuration 39 | self.split = data_config[sele_config]['split'] 40 | splitting = data_config['splitting'][self.split] 41 | 42 | self.im_idx = [] 43 | for i_folder in splitting: 44 | folder_path = os.path.join(self.dataset_path, 'Ford_' + str(i_folder).zfill(2) + '_q_1mm') 45 | assert os.path.exists(folder_path), f'{folder_path} does not exist' 46 | self.im_idx += absoluteFilePaths(folder_path) 47 | self.im_idx.sort() 48 | 49 | 50 | def __len__(self): 51 | """Returns the total number of samples""" 52 | return len(self.im_idx) 53 | 54 | 55 | def __getitem__(self, index): 56 | 57 | pc = (pc_read(self.im_idx[index]) + np.array(self.translate)) * self.scale 58 | if self.point_max > 0 and pc.shape[0] > self.point_max: 59 | pc = pc[:self.point_max, :] 60 | return {'pc': pc, 'ref': None} 61 | 62 | 63 | def get_pc_idx(self, index): 64 | return self.im_idx[index] 65 | 66 | 67 | class QnxadasBase(data.Dataset): 68 | """A base Qnxadas dataset.""" 69 | 70 | def __init__(self, data_config, sele_config, **kwargs): 71 | 72 | base_dir = os.path.dirname(os.path.abspath(__file__)) 73 | dataset_path_default = os.path.abspath(os.path.join(base_dir, '../../datasets/qnxadas/')) # the default dataset path 74 | 75 | # Common options of the dataset 76 | self.return_intensity = data_config.get('return_intensity', False) 77 | dataset_path = data_config.get('dataset_path', dataset_path_default) 78 | self.translate = data_config.get('translate', [0, 0, 0]) 79 | self.scale = data_config.get('scale', 1) 80 | 81 | # Options under a specific configuration 82 | self.split = data_config[sele_config]['split'] 83 | splitting = data_config['splitting'][self.split] 84 | 85 | self.im_idx = [] 86 | for i_folder in splitting: 87 | self.im_idx += absoluteFilePaths(os.path.join(dataset_path, i_folder)) 88 | self.im_idx.sort() 89 | 90 | 91 | def __len__(self): 92 | """Returns the total number of samples""" 93 | return len(self.im_idx) // 2 94 | 95 | 96 | def __getitem__(self, index): 97 | pc = (pc_read(self.im_idx[2 * index + 1]) + np.array(self.translate)) * self.scale 98 | return {'pc': pc, 'ref': None} 99 | 100 | 101 | def get_pc_idx(self, index): 102 | return self.im_idx[2 * index + 1] 103 | 104 | 105 | class KITTIBase(data.Dataset): 106 | """A base SemanticKITTI dataset.""" 107 | 108 | def __init__(self, data_config, sele_config, **kwargs): 109 | 110 | base_dir = os.path.dirname(os.path.abspath(__file__)) 111 | dataset_path = os.path.abspath(os.path.join(base_dir, '../../datasets/kitti/')) # the default dataset path 112 | 113 | # Other specific options 114 | self.translate = data_config.get('translate', [0, 0, 0]) 115 | self.scale = data_config.get('scale', 1) 116 | self.quantize_resolution = data_config.get('quantize_resolution', None) if found_quantize else None 117 | self.split = data_config[sele_config]['split'] 118 | splitting = data_config['splitting'][self.split] 119 | 120 | self.im_idx = [] 121 | for i_folder in splitting: 122 | self.im_idx += absoluteFilePaths('/'.join([dataset_path, str(i_folder).zfill(2),'velodyne'])) 123 | self.im_idx.sort() 124 | 125 | 126 | def __len__(self): 127 | """Returns the total number of samples""" 128 | return len(self.im_idx) 129 | 130 | 131 | def __getitem__(self, index): 132 | raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4)) 133 | if self.quantize_resolution is not None: 134 | pc = quantize_resolution(raw_data[:, :3], self.quantize_resolution) 135 | else: 136 | pc = (raw_data[:, :3] + np.array(self.translate)) * self.scale 137 | return {'pc': pc} 138 | 139 | 140 | def get_pc_idx(self, index): 141 | return self.im_idx[index] 142 | -------------------------------------------------------------------------------- /pccai/dataloaders/lidar_loader.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # A multi-modal data loader for LiDAR datasets. 8 | 9 | import os 10 | import numpy as np 11 | from torch.utils import data 12 | 13 | from pccai.utils.convert_image import pc2img 14 | from pccai.utils.convert_octree import OctreeOrganizer 15 | from pccai.dataloaders.lidar_base_loader import FordBase, KITTIBase, QnxadasBase 16 | 17 | 18 | def get_base_lidar_dataset(data_config, sele_config): 19 | if data_config['dataset'].lower().find('ford') >= 0: 20 | loader_class = FordBase 21 | elif data_config['dataset'].lower().find('kitti') >= 0: 22 | loader_class = KITTIBase 23 | elif data_config['dataset'].lower().find('qnxadas') >= 0: 24 | loader_class = QnxadasBase 25 | else: 26 | loader_class = None 27 | return loader_class(data_config, sele_config) 28 | 29 | 30 | class LidarSimple(data.Dataset): 31 | """A simple LiDAR dataset which returns a specified number of 3D points in each point cloud.""" 32 | 33 | def __init__(self, data_config, sele_config, **kwargs): 34 | 35 | self.point_cloud_dataset = get_base_lidar_dataset(data_config, sele_config) 36 | self.num_points = data_config.get('num_points', 150000) # about 150000 points per point cloud 37 | self.seed = data_config.get('seed', None) 38 | self.sparse_collate = data_config.get('sparse_collate', False) 39 | self.voxelize = data_config.get('voxelize', False) 40 | 41 | def __len__(self): 42 | return len(self.point_cloud_dataset) 43 | 44 | def __getitem__(self, index): 45 | pc = self.point_cloud_dataset[index]['pc'] # take out the point cloud coordinates only 46 | np.random.seed(self.seed) 47 | if self.voxelize: 48 | pc = np.round(pc[:self.num_points, :]).astype('int32') # always <= num_points 49 | # This is to facilitate the sparse tensor construction with Minkowski Engine 50 | if self.sparse_collate: 51 | pc = np.hstack((np.zeros((pc.shape[0], 1), dtype='int32'), pc)) 52 | # pc = np.vstack((pc, np.ones((self.num_points - pc.shape[0], 4), dtype='int32') * -1)) 53 | pc[0][0] = 1 54 | return pc 55 | else: 56 | choice = np.random.choice(pc.shape[0], self.num_points, replace=True) # always == num_points 57 | return pc[choice, :].astype(dtype=np.float32) 58 | 59 | class LidarSpherical(data.Dataset): 60 | """Converts the original Cartesian coordinate to spherical coordinate then represent as 2D images.""" 61 | 62 | def __init__(self, data_config, sele_config, **kwargs): 63 | 64 | self.point_cloud_dataset = get_base_lidar_dataset(data_config, sele_config) 65 | self.width = data_config['spherical_cfg'].get('width', 1024) # grab all the options about speherical projection 66 | self.height = data_config['spherical_cfg'].get('height', 128) 67 | self.v_fov = data_config['spherical_cfg'].get('v_fov', [-28, 3.0]) 68 | self.h_fov = data_config['spherical_cfg'].get('h_fov', [-180, 180]) 69 | self.origin_shift = data_config['spherical_cfg'].get('origin_shift', [0, 0, 0]) 70 | self.v_fov, self.h_fov = np.array(self.v_fov) / 180 * np.pi, np.array(self.h_fov) / 180 * np.pi 71 | self.num_points = self.width * self.height 72 | self.inf = 1e6 73 | 74 | def __len__(self): 75 | return len(self.point_cloud_dataset) 76 | 77 | def __getitem__(self, index): 78 | data = self.point_cloud_dataset[index]['pc'] # take out the point cloud coordinates only 79 | data[:, 0] += self.origin_shift[0] 80 | data[:, 1] += self.origin_shift[1] 81 | data[:, 2] += self.origin_shift[2] 82 | data_img = pc2img(self.h_fov, self.v_fov, self.width, self.height, self.inf, data) 83 | 84 | return data_img 85 | 86 | 87 | class LidarOctree(data.Dataset): 88 | """Converts an original point cloud into an octree.""" 89 | 90 | def __init__(self, data_config, sele_config, **kwargs): 91 | 92 | self.point_cloud_dataset = get_base_lidar_dataset(data_config, sele_config) 93 | self.rw_octree = data_config.get('rw_octree', False) 94 | if self.rw_octree: 95 | self.rw_partition_scheme = data_config.get('rw_partition_scheme', 'default') 96 | self.octree_cache_folder = 'octree_cache' 97 | 98 | # Create an octree formatter to organize octrees into arrays 99 | self.octree_organizer = OctreeOrganizer( 100 | data_config['octree_cfg'], 101 | data_config[sele_config].get('max_num_points', 150000), 102 | kwargs['syntax'].syntax_gt, 103 | self.rw_octree, 104 | data_config[sele_config].get('shuffle_blocks', False), 105 | ) 106 | 107 | def __len__(self): 108 | return len(self.point_cloud_dataset) 109 | 110 | def __getitem__(self, index): 111 | 112 | if self.rw_octree: 113 | file_name = os.path.relpath(self.point_cloud_dataset.get_pc_idx(index), self.point_cloud_dataset.dataset_path) 114 | file_name = os.path.join(self.point_cloud_dataset.dataset_path, self.octree_cache_folder, self.rw_partition_scheme, file_name) 115 | file_name = os.path.splitext(file_name)[0] + '.pkl' 116 | else: file_name = None 117 | 118 | pc = self.point_cloud_dataset[index]['pc'] 119 | # perform octree partitioning and organize the data 120 | pc_formatted, _, _, _, _ = self.octree_organizer.organize_data(pc, file_name=file_name) 121 | 122 | return pc_formatted 123 | -------------------------------------------------------------------------------- /pccai/dataloaders/modelnet_loader.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # A ModelNet data loader 8 | 9 | import os 10 | import os.path 11 | import numpy as np 12 | import pickle 13 | 14 | import torch.utils.data as data 15 | from torch_geometric.transforms.sample_points import SamplePoints 16 | from torch_geometric.datasets.modelnet import ModelNet 17 | from pccai.utils.convert_octree import OctreeOrganizer 18 | import pccai.utils.logger as logger 19 | 20 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 21 | dataset_path_default=os.path.abspath(os.path.join(BASE_DIR, '../../datasets/modelnet/')) # the default dataset path 22 | 23 | 24 | def gen_rotate(): 25 | rot = np.eye(3, dtype='float32') 26 | rot[0,0] *= np.random.randint(0,2) * 2 - 1 27 | rot = np.dot(rot, np.linalg.qr(np.random.randn(3, 3))[0]) 28 | return rot 29 | 30 | 31 | class ModelNetBase(data.Dataset): 32 | """A base ModelNet data loader.""" 33 | 34 | def __init__(self, data_config, sele_config, **kwargs): 35 | if 'coord_min' in data_config or 'coord_max' in data_config: 36 | self.coord_minmax = [data_config.get('coord_min', 0), data_config.get('coord_max', 1023)] 37 | else: 38 | self.coord_minmax = None 39 | self.centralize = data_config.get('centralize', True) 40 | self.voxelize = data_config.get('voxelize', False) 41 | self.sparse_collate = data_config.get('sparse_collate', False) 42 | self.augmentation = data_config[sele_config].get('augmentation', False) 43 | self.split = data_config[sele_config]['split'].lower() 44 | self.num_points = data_config['num_points'] 45 | sampler = SamplePoints(num=self.num_points, remove_faces=True, include_normals=False) 46 | self.point_cloud_dataset = ModelNet(root=dataset_path_default, name='40', 47 | train=True if self.split == 'train' else False, transform=sampler) 48 | 49 | 50 | def __len__(self): 51 | return len(self.point_cloud_dataset) 52 | 53 | 54 | def pc_preprocess(self, pc): 55 | """Perform different types of pre-processings to the ModelNet point clouds.""" 56 | 57 | if self.centralize: 58 | centroid = np.mean(pc, axis=0) 59 | pc = pc - centroid 60 | 61 | if self.augmentation: # random rotation 62 | pc = np.dot(pc, gen_rotate()) 63 | 64 | if self.coord_minmax is not None: 65 | pc_min, pc_max = np.min(pc), np.max(pc) 66 | pc = (pc - pc_min) / (pc_max - pc_min) * (self.coord_minmax[1] - self.coord_minmax[0]) + self.coord_minmax[0] 67 | 68 | if self.voxelize: 69 | pc = np.unique(np.round(pc).astype('int32'), axis=0) 70 | # This is to facilitate the sparse tensor construction with Minkowski Engine 71 | if self.sparse_collate: 72 | pc = np.hstack((np.zeros((pc.shape[0], 1), dtype='int32'), pc)) 73 | # pc = np.vstack((pc, np.ones((self.num_points - pc.shape[0], 4), dtype='int32') * -1)) 74 | pc[0][0] = 1 75 | return pc 76 | else: # if do not specify minmax, normalize the point cloud within a unit ball 77 | m = np.max(np.sqrt(np.sum(pc**2, axis=1))) 78 | pc = pc / m # scaling 79 | return pc.astype('float32') 80 | 81 | 82 | class ModelNetSimple(ModelNetBase): 83 | """A simple ModelNet data loader where point clouds are directly represented as 3D points.""" 84 | 85 | def __init__(self, data_config, sele_config, **kwargs): 86 | super().__init__(data_config, sele_config) 87 | 88 | # Use_cache specifies the pickle file to be read/written down, "" means no caching mechanism is used 89 | self.use_cache = data_config.get('use_cache', '') 90 | 91 | # By using the cache file, the data is no longer generated on the fly but the loading becomes much faster 92 | if self.use_cache != '': 93 | cache_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../datasets/', self.use_cache) 94 | if os.path.exists(cache_file): # the cache file already exist 95 | logger.log.info("Loading pre-processed ModelNet40 cache file...") 96 | with open(cache_file, 'rb') as f: 97 | self.cache = pickle.load(f) 98 | else: # the cache file is not there yet 99 | self.cache = [] 100 | logger.log.info("Sampling point clouds from raw ModelNet40 data...") 101 | for i in range(len(self.point_cloud_dataset)): 102 | # Be careful that here the data type is converted as uint8 to save space 103 | self.cache.append(self.pc_preprocess(self.point_cloud_dataset[i].pos.numpy()).astype(np.uint8)) 104 | with open(cache_file, 'wb') as f: 105 | pickle.dump(self.cache, f) 106 | logger.log.info("ModelNet40 data loaded...\n") 107 | 108 | def __getitem__(self, index): 109 | if self.use_cache: 110 | return self.cache[index].astype(np.int32) # data type convert back to int32 111 | else: 112 | return self.pc_preprocess(self.point_cloud_dataset[index].pos.numpy()) 113 | 114 | 115 | class ModelNetOctree(ModelNetBase): 116 | """ModelNet data loader with uniform sampling and octree partitioning.""" 117 | 118 | def __init__(self, data_config, sele_config, **kwargs): 119 | 120 | data_config['voxelize'] = True 121 | data_config['sparse_collate'] = False 122 | super().__init__(data_config, sele_config) 123 | 124 | self.rw_octree = data_config.get('rw_octree', False) 125 | if self.rw_octree: 126 | self.rw_partition_scheme = data_config.get('rw_partition_scheme', 'default') 127 | self.octree_cache_folder = 'octree_cache' 128 | 129 | # Create an octree formatter to organize octrees into arrays 130 | self.octree_organizer = OctreeOrganizer( 131 | data_config['octree_cfg'], 132 | data_config[sele_config].get('max_num_points', data_config['num_points']), 133 | kwargs['syntax'].syntax_gt, 134 | self.rw_octree, 135 | data_config[sele_config].get('shuffle_blocks', False), 136 | ) 137 | 138 | def __len__(self): 139 | return len(self.point_cloud_dataset) 140 | 141 | def __getitem__(self, index): 142 | 143 | while True: 144 | if self.rw_octree: 145 | file_name = os.path.join(dataset_path_default, self.octree_cache_folder, self.rw_partition_scheme, str(index)) + '.pkl' 146 | else: file_name = None 147 | 148 | # perform octree partitioning and organize the data 149 | pc = self.pc_preprocess(self.point_cloud_dataset[index].pos.numpy()) 150 | pc_formatted, _, _, _, all_skip = self.octree_organizer.organize_data(pc, file_name=file_name) 151 | if all_skip: 152 | index += 1 153 | if index >= len(self.point_cloud_dataset): index = 0 154 | else: break 155 | 156 | return pc_formatted 157 | -------------------------------------------------------------------------------- /pccai/dataloaders/point_cloud_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # A generic point cloud dataset wrapper 8 | 9 | from torch.utils.data import DataLoader 10 | from pccai.dataloaders.shapenet_part_loader import ShapeNetPart 11 | from pccai.dataloaders.modelnet_loader import ModelNetSimple, ModelNetOctree 12 | from pccai.dataloaders.lidar_loader import LidarSimple, LidarSpherical, LidarOctree 13 | import torch 14 | import numpy as np 15 | 16 | 17 | # https://github.com/pytorch/pytorch/issues/5059 18 | # Fix numpy random seed issue with multi worker DataLoader 19 | # Multi worker based on process forking duplicates the same numpy random seed across all workers 20 | # Note that this issue is absent with pytorch random operations 21 | def wif(id): 22 | process_seed = torch.initial_seed() 23 | # Back out the base_seed so we can use all the bits. 24 | base_seed = process_seed - id 25 | ss = np.random.SeedSequence([id, base_seed]) 26 | # More than 128 bits (4 32-bit words) would be overkill. 27 | np.random.seed(ss.generate_state(4)) 28 | 29 | 30 | def get_point_cloud_dataset(dataset_name): 31 | """List all the data sets in this function for class retrival.""" 32 | 33 | if dataset_name.lower() == 'shapenet_part': 34 | dataset_class = ShapeNetPart 35 | elif dataset_name.lower() == 'modelnet_simple': 36 | dataset_class = ModelNetSimple 37 | elif dataset_name.lower() == 'modelnet_octree': 38 | dataset_class = ModelNetOctree 39 | elif dataset_name.lower().find('simple') >= 0: 40 | dataset_class = LidarSimple 41 | elif dataset_name.lower().find('spherical') >= 0: 42 | dataset_class = LidarSpherical 43 | elif dataset_name.lower().find('octree') >= 0: 44 | dataset_class = LidarOctree 45 | else: 46 | dataset_class = None 47 | return dataset_class 48 | 49 | 50 | def sparse_collate(list_data): 51 | """A collate function tailored for generating sparse voxels of MinkowskiEngine.""" 52 | 53 | list_data = np.vstack(list_data) 54 | list_data = torch.from_numpy(list_data) 55 | return list_data 56 | 57 | 58 | def point_cloud_dataloader(data_config, syntax=None, ddp=False): 59 | """A wrapper for point cloud datasets.""" 60 | 61 | point_cloud_dataset = get_point_cloud_dataset(data_config[0]['dataset'])(data_config[0], data_config[1], syntax=syntax) 62 | collate_fn = sparse_collate if data_config[0].get('sparse_collate', False) else None 63 | dl_conf = data_config[0][data_config[1]] 64 | 65 | if ddp: # for distributed data parallel 66 | sampler = torch.utils.data.distributed.DistributedSampler(point_cloud_dataset, shuffle=dl_conf['shuffle']) 67 | point_cloud_dataloader = DataLoader(point_cloud_dataset, batch_size=int(dl_conf['batch_size'] / torch.cuda.device_count()), 68 | num_workers=int(dl_conf['num_workers'] / torch.cuda.device_count()), persistent_workers=True if dl_conf['num_workers'] > 0 else False, 69 | worker_init_fn=wif, sampler=sampler, pin_memory=False, drop_last=False, collate_fn=collate_fn) 70 | else: 71 | point_cloud_dataloader = DataLoader(point_cloud_dataset, batch_size=dl_conf['batch_size'], shuffle=dl_conf['shuffle'], 72 | num_workers=dl_conf['num_workers'], persistent_workers=True if dl_conf['num_workers'] > 0 else False, 73 | worker_init_fn=wif, pin_memory=False, drop_last=False, collate_fn=collate_fn) 74 | return point_cloud_dataset, point_cloud_dataloader -------------------------------------------------------------------------------- /pccai/dataloaders/shapenet_part_loader.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # A ShapeNet-Part data loader 8 | 9 | import torch.utils.data as data 10 | import os 11 | import os.path 12 | import torch 13 | import json 14 | import numpy as np 15 | import pccai.utils.logger as logger 16 | import multiprocessing 17 | from tqdm import tqdm 18 | from functools import partial 19 | 20 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 21 | dataset_path_default=os.path.abspath(os.path.join(BASE_DIR, '../../datasets/shapenet_part/')) # the default dataset path 22 | 23 | 24 | def pc_normalize(pc): 25 | """ pc: NxC, return NxC """ 26 | l = pc.shape[0] 27 | centroid = np.mean(pc, axis=0) 28 | pc = pc - centroid 29 | m = np.max(np.sqrt(np.sum(pc**2, axis=1))) 30 | pc = pc / m 31 | return pc 32 | 33 | 34 | def load_pc(index, datapath, classes, normalize): 35 | fn = datapath[index] 36 | cls = classes[datapath[index][0]] 37 | point_set = np.loadtxt(fn[1]).astype(np.float32) 38 | if normalize: 39 | point_set = pc_normalize(point_set) 40 | seg = np.loadtxt(fn[2]).astype(np.int64) - 1 41 | foldername = fn[3] 42 | filename = fn[4] 43 | return (point_set, seg, cls, foldername, filename) 44 | 45 | 46 | class ShapeNetPart(data.Dataset): 47 | """A ShapeNet part dataset class.""" 48 | 49 | def __init__(self, data_config, sele_config, **kwargs): 50 | # Common options of the dataset 51 | dataset_path = data_config.get('dataset_path', dataset_path_default) 52 | dataset_path = os.path.join(dataset_path, 'shapenetcore_partanno_segmentation_benchmark_v0') 53 | # Allow override of num_points in specific modes 54 | # null (YAML) / None (Python) means no sampling 55 | num_points = data_config[sele_config].get('num_points', data_config.get('num_points', 2500)) 56 | classification = data_config.get('classification', False) 57 | normalize = data_config.get('normalize', True) 58 | 59 | # Options under a specific configuration 60 | class_choice = data_config[sele_config].get('class_choice', None) 61 | split = data_config[sele_config].get('split', 'train') 62 | augmentation = data_config[sele_config].get('augmentation', False) 63 | # Should perform augmentation in __getitem__() if needed 64 | self.num_points = num_points 65 | self.catfile = os.path.join(dataset_path, 'synsetoffset2category.txt') 66 | self.cat = {} 67 | self.classification = classification 68 | self.normalize = normalize 69 | 70 | with open(self.catfile, 'r') as f: 71 | for line in f: 72 | ls = line.strip().split() 73 | self.cat[ls[0]] = ls[1] 74 | if not class_choice is None: 75 | self.cat = {k: v for k, v in self.cat.items() if k in class_choice} 76 | logger.log.info(self.cat) 77 | self.meta = {} 78 | with open(os.path.join(dataset_path, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f: 79 | train_ids = set([str(d.split('/')[2]) for d in json.load(f)]) 80 | with open(os.path.join(dataset_path, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f: 81 | val_ids = set([str(d.split('/')[2]) for d in json.load(f)]) 82 | with open(os.path.join(dataset_path, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f: 83 | test_ids = set([str(d.split('/')[2]) for d in json.load(f)]) 84 | 85 | for item in self.cat: 86 | self.meta[item] = [] 87 | dir_point = os.path.join(dataset_path, self.cat[item], 'points') 88 | dir_seg = os.path.join(dataset_path, self.cat[item], 'points_label') 89 | fns = sorted(os.listdir(dir_point)) 90 | if split == 'trainval': 91 | fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))] 92 | elif split == 'train': 93 | fns = [fn for fn in fns if fn[0:-4] in train_ids] 94 | elif split == 'val': 95 | fns = [fn for fn in fns if fn[0:-4] in val_ids] 96 | elif split == 'test': 97 | fns = [fn for fn in fns if fn[0:-4] in test_ids] 98 | else: 99 | logger.log.info('Unknown split: %s. Exiting..' % (split)) 100 | exit(0) 101 | 102 | for fn in fns: 103 | token = (os.path.splitext(os.path.basename(fn))[0]) 104 | self.meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg'),self.cat[item], token)) 105 | self.datapath = [] 106 | for item in self.cat: 107 | for fn in self.meta[item]: 108 | self.datapath.append((item, fn[0], fn[1], fn[2], fn[3])) 109 | 110 | self.classes = dict(zip(sorted(self.cat), range(len(self.cat)))) 111 | logger.log.info(self.classes) 112 | self.num_seg_classes = 0 113 | if not self.classification: 114 | for i in range(len(self.datapath)//50): 115 | l = len(np.unique(np.loadtxt(self.datapath[i][2]).astype(np.uint8))) 116 | if l > self.num_seg_classes: 117 | self.num_seg_classes = l 118 | 119 | load_pc_part = partial(load_pc, datapath=self.datapath, classes=self.classes, normalize=self.normalize) 120 | 121 | self.cache = np.empty(len(self.datapath), dtype=object) 122 | if not data_config.get('lazy_loading', False): 123 | # Precaching 124 | with multiprocessing.Pool() as p: 125 | self.cache = np.array(list(tqdm(p.imap(load_pc_part, np.arange(len(self.datapath)), 32), total=len(self.datapath))), dtype=object) 126 | 127 | def __getitem__(self, index): 128 | value = self.cache[index] 129 | if value is None: 130 | value = self.cache[index] = load_pc(index, self.datapath, self.classes, self.normalize) 131 | point_set, seg, cls, foldername, filename = value 132 | 133 | if self.num_points is not None: 134 | choice = np.random.choice(len(seg), self.num_points, replace=True) 135 | # resample 136 | point_set = point_set[choice, :] 137 | 138 | # To Pytorch 139 | point_set = torch.from_numpy(point_set) 140 | return point_set 141 | 142 | 143 | def __len__(self): 144 | return len(self.datapath) 145 | -------------------------------------------------------------------------------- /pccai/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | from .pcc_models import * -------------------------------------------------------------------------------- /pccai/models/modules/get_modules.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Import all the modules to be used here 8 | from pccai.models.modules.pointnet import PointNet, PointNetHetero 9 | from pccai.models.modules.mlpdecoder import MlpDecoder, MlpDecoderHetero 10 | from pccai.models.modules.pointnet_residual import PointResidualEncoder 11 | from pccai.models.modules.mlpdecoder_sparse import MlpDecoderSparse 12 | from pccai.models.modules.spcnn_down import SparseCnnDown1, SparseCnnDown2 13 | from pccai.models.modules.spcnn_up import SparseCnnUp1, SparseCnnUp2 14 | 15 | 16 | def get_module_class(module_name, hetero=False): 17 | """Retrieve the module classes from the modulee name.""" 18 | 19 | # List all the modules and their string name in this dictionary 20 | module_dict = { 21 | 'pointnet': [PointNet, PointNetHetero], # pointnet 22 | 'mlpdecoder': [MlpDecoder, MlpDecoderHetero], # mlpdecoder 23 | # The following modules are for GRASP-Net 24 | 'point_res_enc': [PointResidualEncoder, None], 25 | 'mlpdecoder_sparse': [MlpDecoderSparse, None], 26 | 'spcnn_down': [SparseCnnDown1, None], 27 | 'spcnn_up': [SparseCnnUp1, None], 28 | 'spcnn_down2': [SparseCnnDown2, None], 29 | 'spcnn_up2': [SparseCnnUp2, None], 30 | } 31 | 32 | module = module_dict.get(module_name.lower(), None) 33 | assert module is not None, f'module {module_name} was not found, valid modules are: {list(module_dict.keys())}' 34 | try: 35 | module = module[hetero] 36 | except IndexError as e: 37 | raise Exception(f'module {module_name} is not implemented for hetero={hetero}') 38 | 39 | return module -------------------------------------------------------------------------------- /pccai/models/modules/mlpdecoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # MLP Decoder 8 | 9 | import torch 10 | import torch.nn as nn 11 | import numpy as np 12 | from pccai.models.utils import PointwiseMLP 13 | 14 | 15 | class MlpDecoder(nn.Module): 16 | """MLP decoder in homogeneous batching mode.""" 17 | 18 | def __init__(self, net_config, **kwargs): 19 | super(MlpDecoder, self).__init__() 20 | self.num_points = net_config['num_points'] 21 | dims = net_config['dims'] 22 | self.mlp = PointwiseMLP(dims + [3 * self.num_points], doLastRelu=False) # the MLP layers 23 | 24 | def forward(self, cw): 25 | 26 | out1 = self.mlp(cw) # BatchSize X PointNum X 3 27 | return out1.view(cw.shape[0], self.num_points, -1) 28 | 29 | 30 | class MlpDecoderHetero(nn.Module): 31 | """MLP decoder for heterogeneous batching.""" 32 | 33 | def __init__(self, net_config, **kwargs): 34 | super(MlpDecoderHetero, self).__init__() 35 | self.num_points = net_config['num_points'] 36 | dims = net_config['dims'] 37 | self.mlp = PointwiseMLP(dims + [3 * self.num_points], doLastRelu=False) # the MLP layers 38 | 39 | # Grab the syntax 40 | self.syntax_cw = kwargs['syntax'].syntax_cw 41 | self.syntax_rec = kwargs['syntax'].syntax_rec 42 | 43 | def forward(self, cw): 44 | device = cw.device 45 | pc_block = self.mlp(cw[:, self.syntax_cw['cw'][0] : self.syntax_cw['cw'][1] + 1]) # apply MLP layers directly 46 | pc_block = pc_block.view(cw.shape[0] * self.num_points, -1) 47 | 48 | block_npts = torch.ones(cw.shape[0], dtype=torch.long, device=device) * self.num_points 49 | # For each point, indice the index of its codeword/block 50 | cw_idx = torch.arange(block_npts.shape[0], device=device).repeat_interleave(block_npts) 51 | # Mark a point with 1 if it is the first point of a block 52 | block_start = torch.cat((torch.ones(1, device=device), cw_idx[1:] - cw_idx[:-1])).float() 53 | 54 | # Denormalize the point cloud 55 | center = cw[:, self.syntax_cw['block_center'][0]: self.syntax_cw['block_center'][1] + 1].repeat_interleave(block_npts, 0) 56 | scale = cw[:, self.syntax_cw['block_scale']: self.syntax_cw['block_scale'] + 1].repeat_interleave(block_npts, 0) 57 | 58 | # From pc_start in cw (blocks), build pc_start for points 59 | pc_start = torch.zeros(cw.shape[0], device=device).repeat_interleave(block_npts) 60 | # Starting point index for each block 61 | block_idx = torch.cat((torch.zeros(1, device=device, dtype=torch.long), torch.cumsum(block_npts, 0)[:-1]), 0) 62 | # Mark a point as one if it is the first of its point cloud 63 | # We have this binary marker for each block of the point cloud (1 if first block, 0 otherwise) 64 | # We mark the first point of all blocks with the marker of their block 65 | pc_start[block_idx] = cw[:, self.syntax_cw['pc_start']: self.syntax_cw['pc_start'] + 1].squeeze(-1) 66 | 67 | # Denormalization: scaling and translation 68 | pc_block = pc_block / scale # scaling 69 | pc_block = pc_block + center # translation 70 | 71 | # Assemble the output 72 | out = torch.zeros(pc_block.shape[0], self.syntax_rec['__len__']).cuda() 73 | out[:, self.syntax_rec['xyz'][0] : self.syntax_rec['xyz'][1] + 1] = pc_block 74 | out[:, self.syntax_rec['block_start']] = block_start 75 | out[:, self.syntax_rec['block_center'][0] : self.syntax_rec['block_center'][1] + 1] = center 76 | out[:, self.syntax_rec['block_scale']] = scale[:, 0] 77 | out[:, self.syntax_rec['pc_start']] = pc_start 78 | return out 79 | 80 | 81 | def prepare_meta_data(self, binstrs, block_pntcnt, octree_organizer): 82 | """Convert the binary strings of an octree to a set of scales and centers of the leaf nodes. 83 | Next, arranges them as the meta data array according to the syntax for decoding. 84 | """ 85 | 86 | leaf_blocks = octree_organizer.departition_octree(binstrs, block_pntcnt) # departition the octree strings to blocks 87 | meta_data = np.zeros((len(leaf_blocks), self.syntax_cw['__len__'] - self.syntax_cw['__meta_idx__']), dtype=np.float32) 88 | cur = 0 89 | 90 | # Assemble the meta data 91 | meta_data[0, self.syntax_cw['pc_start'] - self.syntax_cw['__meta_idx__']] = 1 92 | for idx, block in enumerate(leaf_blocks): 93 | if block['binstr'] >= 0: # only keep the blocks with transform mode 94 | center, scale = octree_organizer.get_normalizer(block['bbox_min'], block['bbox_max']) 95 | meta_data[cur, self.syntax_cw['block_pntcnt'] - self.syntax_cw['__meta_idx__']] = block_pntcnt[idx] 96 | meta_data[cur, self.syntax_cw['block_scale'] - self.syntax_cw['__meta_idx__']] = scale 97 | meta_data[cur, self.syntax_cw['block_center'][0] - self.syntax_cw['__meta_idx__'] : 98 | self.syntax_cw['block_center'][1] - self.syntax_cw['__meta_idx__'] + 1] = center 99 | cur += 1 100 | 101 | # Only returns the useful part 102 | return torch.as_tensor(meta_data[:cur, :], device=torch.device('cuda')).unsqueeze(-1).unsqueeze(-1) -------------------------------------------------------------------------------- /pccai/models/modules/mlpdecoder_sparse.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # MLP Decoder implemented with MinkowskiEngine operated on sparse tensors 8 | 9 | import torch 10 | import torch.nn as nn 11 | import numpy as np 12 | import MinkowskiEngine as ME 13 | 14 | def make_pointwise_mlp_sparse(dims, doLastRelu=False): 15 | """ 16 | Make poinwise MLP layers based on MinkowskiEngine 17 | """ 18 | layers = [] 19 | for i in range(len(dims) - 1): 20 | layers.append( 21 | ME.MinkowskiLinear(dims[i], dims[i + 1], bias=True) 22 | ) 23 | if i != len(dims) - 2 or doLastRelu: 24 | layers.append(ME.MinkowskiReLU(inplace=True)) 25 | return torch.nn.Sequential(*layers) 26 | 27 | class MlpDecoderSparse(nn.Module): 28 | """ 29 | MLP decoder implemented with MinkowskiEngine 30 | """ 31 | 32 | def __init__(self, net_config, **kwargs): 33 | super(MlpDecoderSparse, self).__init__() 34 | self.num_points = net_config['num_points'] 35 | dims = net_config['dims'] 36 | self.mlp = make_pointwise_mlp_sparse(dims + [3 * self.num_points], doLastRelu=False) # the MLP layers 37 | 38 | def forward(self, x): 39 | out = self.mlp(x) # BatchSize X PointNum X 3 40 | return out 41 | -------------------------------------------------------------------------------- /pccai/models/modules/pointnet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # PointNet encoder 8 | 9 | import torch 10 | import torch.nn as nn 11 | from pccai.models.utils import PointwiseMLP, GlobalPool 12 | from torch_scatter import scatter_max, scatter_min, scatter_mean 13 | 14 | 15 | class PointNet(nn.Module): 16 | """The vanilla PointNet model in homogeneous batching mode. 17 | 18 | Args: 19 | mlp_dims: Dimension of the MLP 20 | fc_dims: Dimension of the FC after max pooling 21 | mlp_dolastrelu: whether do the last ReLu after the MLP 22 | """ 23 | 24 | def __init__(self, net_config, **kwargs): 25 | super(PointNet, self).__init__() 26 | self.pointwise_mlp = PointwiseMLP(net_config['mlp_dims'], net_config.get('mlp_dolastrelu', False)) # learnable 27 | self.fc = PointwiseMLP(net_config['fc_dims'], net_config.get('fc_dolastrelu', False)) # learnable 28 | 29 | # self.pointnet = PointNet(net_config['mlp_dims'], net_config['fc_dims'], net_config['mlp_dolastrelu']) 30 | self.global_pool = GlobalPool(nn.AdaptiveMaxPool2d((1, net_config['mlp_dims'][-1]))) 31 | 32 | def forward(self, data): 33 | return self.fc(self.global_pool(self.pointwise_mlp(data))) 34 | 35 | 36 | class PointNetHetero(nn.Module): 37 | """PointNet in heterogeneous batching mode.""" 38 | 39 | def __init__(self, net_config, **kwargs): 40 | super(PointNetHetero, self).__init__() 41 | self.pointwise_mlp = PointwiseMLP(net_config['mlp_dims'], net_config.get('mlp_dolastrelu', False)) # learnable 42 | self.fc = PointwiseMLP(net_config['fc_dims'], False) # learnable 43 | self.ext_cw = net_config.get('ext_cw', False) 44 | 45 | # Get the syntax 46 | self.syntax_gt = kwargs['syntax'].syntax_gt 47 | self.syntax_cw = kwargs['syntax'].syntax_cw 48 | 49 | def forward(self, data): 50 | device = data.device 51 | 52 | batch_size, pnt_cnt, dims = data.shape[0], data.shape[1], data.shape[2] 53 | data = data.view(-1, dims) 54 | block_idx = torch.cumsum(data[:, self.syntax_gt['block_start']] > 0, dim=0) - 1 # compute the block index with cumsum() 55 | block_idx = block_idx[data[:, self.syntax_gt['block_pntcnt']] > 0] # remove the padding and the skip points 56 | pc_start = torch.arange(0, batch_size, dtype=torch.long, device=device).repeat_interleave(pnt_cnt) 57 | pc_start = pc_start[data[:, self.syntax_gt['block_start']] > 0] # remove the "non-start" points 58 | pc_start = torch.cat((torch.ones(1, device=device), pc_start[1:] - pc_start[0: -1])) 59 | data = data[data[:, self.syntax_gt['block_pntcnt']] > 0, :] # remove the padding and the skip points 60 | 61 | # Normalize the point cloud: translation and scaling 62 | xyz_slc = slice(self.syntax_gt['xyz'][0], self.syntax_gt['xyz'][1] + 1) 63 | data[:, xyz_slc] -= data[:, self.syntax_gt['block_center'][0] : self.syntax_gt['block_center'][1] + 1] 64 | data[:, xyz_slc] *= data[:, self.syntax_gt['block_scale']].unsqueeze(-1) 65 | 66 | pnts_3d = data[:, xyz_slc] 67 | point_feature = self.pointwise_mlp(pnts_3d) # in this case, use the xyz coordinates as feature 68 | if self.ext_cw: 69 | cw_inp1 = scatter_max(point_feature, block_idx.long(), dim=0)[0] 70 | cw_inp2 = scatter_min(point_feature, block_idx.long(), dim=0)[0] 71 | cw_inp3 = scatter_mean(point_feature, block_idx.long(), dim=0) 72 | cw_inp = torch.cat([cw_inp1, cw_inp2, cw_inp3], dim=1) 73 | else: 74 | cw_inp = scatter_max(point_feature, block_idx.long(), dim=0)[0] 75 | block_feature = self.fc(cw_inp) 76 | mask = data[:, self.syntax_gt['block_start']] > 0 77 | 78 | # Return the codeword with the meta data 79 | out = torch.zeros(torch.sum(mask), self.syntax_cw['__len__'], device=device) 80 | out[:, self.syntax_cw['cw'][0] : self.syntax_cw['cw'][1] + 1] = block_feature 81 | out[:, self.syntax_cw['block_pntcnt']] = data[mask, self.syntax_gt['block_pntcnt']] 82 | out[:, self.syntax_cw['block_center'][0] : self.syntax_cw['block_center'][1] + 1] = data[mask, self.syntax_gt['block_center'][0] : self.syntax_gt['block_center'][1] + 1] 83 | out[:, self.syntax_cw['block_scale']] = data[mask, self.syntax_gt['block_scale']] 84 | out[:, self.syntax_cw['pc_start']] = pc_start 85 | return out -------------------------------------------------------------------------------- /pccai/models/modules/pointnet_residual.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Geometric subtraction and point analysis in the GRASP-Net paper 8 | 9 | import os, sys 10 | import torch 11 | import torch.nn as nn 12 | import numpy as np 13 | from pccai.models.modules.pointnet import PointNet 14 | 15 | sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../third_party/nndistance')) 16 | from modules.nnd import NNDModule 17 | nndistance = NNDModule() 18 | 19 | try: 20 | import faiss 21 | import faiss.contrib.torch_utils 22 | found_FAISS = True 23 | except ModuleNotFoundError: 24 | found_FAISS = False 25 | 26 | class PointResidualEncoder(nn.Module): 27 | 28 | def __init__(self, net_config, **kwargs): 29 | super(PointResidualEncoder, self).__init__() 30 | 31 | syntax = kwargs['syntax'] 32 | self.phase = syntax.phase.lower() 33 | self.k = net_config['k'] 34 | self.thres_dist = net_config['thres_dist'] 35 | self.feat_gen = PointNet(net_config, syntax=syntax) # feature generation, the point analysis part 36 | self.faiss = (net_config.get('faiss', False) or self.phase != 'train') and found_FAISS 37 | self.faiss_resource, self.faiss_gpu_index_flat = None, None 38 | self.faiss_exact_search = True 39 | 40 | def forward(self, x_orig, x_coarse): 41 | 42 | geo_subtraction = self.geo_subtraction_batch if self.phase =='train' else self.geo_subtraction 43 | geo_res = geo_subtraction(x_orig, x_coarse) 44 | feat = self.feat_gen(geo_res) 45 | return feat 46 | 47 | # This is to perform geometric subtraction for point clouds in a batch manner 48 | def geo_subtraction_batch(self, x_orig, x_coarse): 49 | 50 | geo_res = torch.zeros(size=(x_coarse.shape[0], self.k, 3), device=x_coarse.device) # geometric residual 51 | batch_size = x_orig[-1][0].item() + 1 52 | tot = 0 53 | 54 | for pc_cnt in range(batch_size): 55 | 56 | if self.faiss == True: # FAISS for kNN search in one shot 57 | x_coarse_cur = (x_coarse[x_coarse[:, 0] == pc_cnt][:, 1:]).float().contiguous() # current coarse 58 | x_orig_cur = (x_orig[x_orig[:, 0] == pc_cnt][:, 1:]).float().contiguous() # current full cloud 59 | if self.faiss_gpu_index_flat == None: 60 | self.faiss_resource = faiss.StandardGpuResources() 61 | self.faiss_gpu_index_flat = faiss.GpuIndexFlatL2(self.faiss_resource, 3) 62 | self.faiss_gpu_index_flat.add(x_orig_cur) 63 | _, I = self.faiss_gpu_index_flat.search(x_coarse_cur, self.k) # actual search 64 | self.faiss_gpu_index_flat.reset() 65 | x_coarse_rep = x_coarse_cur.unsqueeze(1).repeat_interleave(self.k, dim=1) 66 | geo_res[tot : tot + x_coarse_cur.shape[0], :, :] = x_orig_cur[I] - x_coarse_rep 67 | 68 | # Outlier removal 69 | mask = torch.logical_or( 70 | torch.max(geo_res[tot : tot + x_coarse_cur.shape[0], :, :], dim=2)[0] > self.thres_dist, 71 | torch.min(geo_res[tot : tot + x_coarse_cur.shape[0], :, :], dim=2)[0] < -self.thres_dist 72 | ) # True is outlier 73 | I[mask] = I[:, 0].unsqueeze(-1).repeat_interleave(self.k, dim=1)[mask] # get the indices of the first NN 74 | geo_res[tot : tot + x_coarse_cur.shape[0], :, :][mask] = x_orig_cur[I[mask]] - x_coarse_rep[mask] # recompute the outlier distance 75 | tot += x_coarse_cur.shape[0] 76 | 77 | else: # nndistance for sequential nearest-neighbor search 78 | x_coarse_cur = (x_coarse[x_coarse[:, 0] == pc_cnt][:, 1:]).float().unsqueeze(0).contiguous() # current coarse 79 | x_orig_cur = (x_orig[x_orig[:, 0] == pc_cnt][:, 1:]).float().unsqueeze(0).contiguous() # current full cloud 80 | for nn_cnt in range(self.k): 81 | if x_orig_cur.shape[1] > 0: 82 | _, _, idx_coarse, _ = nndistance(x_coarse_cur, x_orig_cur) # compute nearest neighbor 83 | geo_res[tot : tot + x_coarse_cur.shape[1], nn_cnt, :] = x_orig_cur.squeeze(0)[idx_coarse] - x_coarse_cur.squeeze(0) # residual in delta xyz 84 | mask = torch.logical_and( 85 | torch.max(geo_res[tot : tot + x_coarse_cur.shape[1], nn_cnt, :], dim=1)[0] <= self.thres_dist, 86 | torch.min(geo_res[tot : tot + x_coarse_cur.shape[1], nn_cnt, :], dim=1)[0] >= -self.thres_dist 87 | ) # False is outlier 88 | 89 | seq_outlier = torch.arange(tot, x_coarse_cur.shape[1] + tot)[torch.logical_not(mask)] 90 | geo_res[seq_outlier, nn_cnt, :] = geo_res[seq_outlier, nn_cnt - 1, :] # remove outliers from the NN set 91 | idx_coarse = idx_coarse[mask.unsqueeze(0)] # remove outliers from the NN set 92 | mask = torch.ones(x_orig_cur.shape[1], dtype=bool, device=x_orig.device) 93 | mask[idx_coarse.squeeze(0)] = False 94 | x_orig_cur = x_orig_cur[mask.unsqueeze(0)].unsqueeze(0) # get the remaining points 95 | else: # if there is no point left behind, replicate the last one 96 | geo_res[tot : tot + x_coarse_cur.shape[1], nn_cnt:, :] = \ 97 | geo_res[tot : tot + x_coarse_cur.shape[1], nn_cnt - 1, :].unsqueeze(1) 98 | break 99 | tot += x_coarse_cur.shape[1] 100 | return geo_res 101 | 102 | 103 | # This is to perform geometric subtraction for a point cloud, used during inference 104 | def geo_subtraction(self, x_orig, x_coarse): 105 | geo_res = torch.zeros(size=(x_coarse.shape[1], self.k, 3), device=x_coarse.device) 106 | x_orig, x_coarse = x_orig.squeeze(0), x_coarse.squeeze(0) 107 | self.faiss_resource = faiss.StandardGpuResources() 108 | 109 | # Perform kNN search 110 | if self.faiss_exact_search: # exact search 111 | self.faiss_gpu_index_flat = faiss.GpuIndexFlatL2(self.faiss_resource, 3) 112 | self.faiss_gpu_index_flat.add(x_orig) 113 | _, I = self.faiss_gpu_index_flat.search(x_coarse, self.k) # search in one shot 114 | else: # approximate search 115 | self.faiss_gpu_index_flat = faiss.GpuIndexIVFFlat(self.faiss_resource, 3, 4 * int(np.ceil(np.sqrt(x_orig.shape[0]))), faiss.METRIC_L2) 116 | self.faiss_gpu_index_flat.train(x_orig) 117 | self.faiss_gpu_index_flat.add(x_orig) 118 | I = torch.zeros(x_coarse.shape[0], self.k, device=x_coarse.device, dtype=torch.long) # initialize the index 119 | max_query = 2 ** 16 120 | n_times = int(np.ceil(x_coarse.shape[0] / max_query)) 121 | for cnt in range(n_times): # search by batch due to limitation of GpuIndexIVFFlat 122 | slc = slice(cnt * max_query, x_coarse.shape[0] if cnt == n_times -1 else (cnt + 1) * max_query - 1) 123 | I[slc, :] = self.faiss_gpu_index_flat.search(x_coarse[slc, :], self.k)[1] 124 | 125 | self.faiss_gpu_index_flat.reset() 126 | x_coarse_rep = x_coarse.unsqueeze(1).repeat_interleave(self.k, dim=1) 127 | geo_res = x_orig[I] - x_coarse_rep 128 | 129 | # Outlier removal 130 | mask = torch.logical_not(torch.logical_and( 131 | torch.max(geo_res, dim=2)[0] <= self.thres_dist, 132 | torch.min(geo_res, dim=2)[0] >= -self.thres_dist 133 | )) # True is outlier 134 | I[mask] = I[:, 0].unsqueeze(-1).repeat_interleave(self.k, dim=1)[mask] # get the indices of the first NN 135 | geo_res[mask] = x_orig[I[mask]] - x_coarse_rep[mask] # recompute the outlier distance 136 | del I, x_coarse_rep, x_orig, x_coarse, mask 137 | return geo_res -------------------------------------------------------------------------------- /pccai/models/modules/spcnn_down.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Downsample with sparse CNN 8 | 9 | import os 10 | import sys 11 | import torch 12 | import torch.nn as nn 13 | import MinkowskiEngine as ME 14 | 15 | sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../third_party/PCGCv2')) 16 | from autoencoder import InceptionResNet, make_layer 17 | 18 | 19 | def make_sparse_down_block(in_dim, hidden_dim, out_dim, doLastRelu=False): 20 | """ 21 | Make a down-sampling block based on IRN 22 | """ 23 | layers = [ 24 | ME.MinkowskiConvolution(in_channels=in_dim, out_channels=hidden_dim, 25 | kernel_size=3, stride=1, bias=True, dimension=3), 26 | ME.MinkowskiReLU(inplace=True), 27 | ME.MinkowskiConvolution(in_channels=hidden_dim, out_channels=out_dim, 28 | kernel_size=2, stride=2, bias=True, dimension=3), 29 | ME.MinkowskiReLU(inplace=True), 30 | make_layer(block=InceptionResNet, block_layers=3, channels=out_dim), 31 | ] 32 | if doLastRelu: layers.append(ME.MinkowskiReLU(inplace=True)) 33 | return torch.nn.Sequential(*layers) 34 | 35 | 36 | class SparseCnnDown1(nn.Module): 37 | """ 38 | SparseCnnDown module: Down-sample for one time 39 | """ 40 | 41 | def __init__(self, net_config, **kwargs): 42 | super(SparseCnnDown1, self).__init__() 43 | 44 | self.dims = net_config['dims'] 45 | self.down_block0 = make_sparse_down_block(self.dims[0], self.dims[0], self.dims[1], True) 46 | self.conv_last = ME.MinkowskiConvolution(in_channels=self.dims[1], out_channels=self.dims[2], 47 | kernel_size=3, stride=1, bias=True, dimension=3) 48 | 49 | def forward(self, x): 50 | 51 | out0 = self.down_block0(x) 52 | out0 = self.conv_last(out0) 53 | return out0 54 | 55 | 56 | class SparseCnnDown2(nn.Module): 57 | """ 58 | SparseCnnDown module: Down-sample for two times 59 | """ 60 | 61 | def __init__(self, net_config, **kwargs): 62 | super(SparseCnnDown2, self).__init__() 63 | 64 | self.dims = net_config['dims'] 65 | self.down_block2 = make_sparse_down_block(self.dims[0], self.dims[0], self.dims[1], True) 66 | self.down_block1 = make_sparse_down_block(self.dims[1], self.dims[1], self.dims[2], False) 67 | 68 | def forward(self, x): 69 | 70 | out2 = self.down_block2(x) 71 | out1 = self.down_block1(out2) 72 | return out1 73 | -------------------------------------------------------------------------------- /pccai/models/modules/spcnn_up.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Upsample with sparse CNN 8 | 9 | import os 10 | import sys 11 | import torch 12 | import torch.nn as nn 13 | import MinkowskiEngine as ME 14 | 15 | sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../third_party/PCGCv2')) 16 | from data_utils import isin 17 | from autoencoder import InceptionResNet, make_layer 18 | 19 | 20 | def make_sparse_up_block(in_dim, hidden_dim, out_dim, doLastRelu): 21 | """ 22 | Make a up-sampling block based on IRN 23 | """ 24 | layers = [ 25 | ME.MinkowskiGenerativeConvolutionTranspose(in_channels=in_dim, out_channels=hidden_dim, 26 | kernel_size=2, stride=2, bias=True, dimension=3), 27 | ME.MinkowskiReLU(inplace=True), 28 | ME.MinkowskiConvolution(in_channels=hidden_dim, out_channels=out_dim, 29 | kernel_size=3, stride=1, bias=True, dimension=3), 30 | ME.MinkowskiReLU(inplace=True), 31 | make_layer(block=InceptionResNet, block_layers=3, channels=out_dim), 32 | ] 33 | if doLastRelu: layers.append(ME.MinkowskiReLU(inplace=True)) 34 | return torch.nn.Sequential(*layers) 35 | 36 | 37 | class SparseCnnUp1(nn.Module): 38 | """ 39 | SparseCnnUp module: Up-sample for one time 40 | """ 41 | 42 | def __init__(self, net_config, **kwargs): 43 | super(SparseCnnUp1, self).__init__() 44 | 45 | self.dims = net_config['dims'] 46 | self.up_block0 = make_sparse_up_block(self.dims[0], self.dims[1], self.dims[1], False) 47 | self.pruning = ME.MinkowskiPruning() 48 | 49 | def forward(self, y1, gt_pc): # from coarse to fine 50 | 51 | out = self.up_block0(y1) 52 | out = self.prune_voxel(out, gt_pc.C) 53 | return out 54 | 55 | def prune_voxel(self, coarse_voxels, refined_voxels): 56 | mask = isin(coarse_voxels.C, refined_voxels) 57 | data_pruned = self.pruning(coarse_voxels, mask.to(coarse_voxels.device)) 58 | return data_pruned 59 | 60 | 61 | class SparseCnnUp2(nn.Module): 62 | """ 63 | SparseCnnUp2 module: Up-sample for two times 64 | """ 65 | 66 | def __init__(self, net_config, **kwargs): 67 | super(SparseCnnUp2, self).__init__() 68 | 69 | self.dims = net_config['dims'] 70 | self.up_block1 = make_sparse_up_block(self.dims[0], self.dims[1], self.dims[1], True) 71 | self.up_block2 = make_sparse_up_block(self.dims[1], self.dims[2], self.dims[2], False) 72 | 73 | self.pruning = ME.MinkowskiPruning() 74 | self.pool = ME.MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=3) 75 | 76 | def forward(self, y1, gt_pc): # from coarse to fine 77 | 78 | # Upsample for the first time 79 | out = self.up_block1(y1) 80 | y2_C = self.pool(gt_pc) 81 | out = SparseCnnUp1.prune_voxel(self, out, y2_C.C) 82 | 83 | # Upsample for the second time 84 | out = self.up_block2(out) 85 | out = SparseCnnUp1.prune_voxel(self, out, gt_pc.C) 86 | 87 | return out 88 | -------------------------------------------------------------------------------- /pccai/models/pcc_models.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | import torch.nn as nn 8 | from pccai.optim.utils import get_loss_class 9 | 10 | # Import all the architectures to be used 11 | from pccai.models.architectures.grasp import GeoResCompression 12 | 13 | # List the all the architectures in the following dictionary 14 | # For a custom architecture, it is recommended to implement a compress() and a decompress() functions that can be called by the codec. 15 | architectures = { 16 | 'grasp': GeoResCompression, 17 | } 18 | 19 | 20 | def get_architecture_class(architecture_name): 21 | architecture = architectures.get(architecture_name.lower(), None) 22 | assert architecture is not None, f'architecture "{architecture_name}" not found, valid architectures are: {list(architectures.keys())}' 23 | return architecture 24 | 25 | 26 | class PccModelWithLoss(nn.Module): 27 | """A wrapper class for point cloud compression model and its associated loss function.""" 28 | 29 | def __init__(self, net_config, syntax, loss_args = None): 30 | super(PccModelWithLoss, self).__init__() 31 | 32 | # Get the architecture and initilize it 33 | architecture_class = get_architecture_class(net_config['architecture']) 34 | self.pcc_model = architecture_class(net_config['modules'], syntax) 35 | 36 | # Get the loss class and initlize it 37 | if loss_args is not None: 38 | loss_class = get_loss_class(loss_args['loss']) 39 | self.loss = loss_class(loss_args, syntax) 40 | 41 | def forward(self, data): 42 | out = self.pcc_model(data) 43 | if self.loss is not None: out['loss'] = self.loss.loss(data, out) 44 | 45 | return out -------------------------------------------------------------------------------- /pccai/models/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Elementary modules and utility functions to process point clouds 8 | 9 | import numpy as np 10 | import torch 11 | import torch.nn as nn 12 | 13 | 14 | def get_Conv2d_layer(dims, kernel_size, stride, doLastRelu): 15 | """Elementary 2D convolution layers.""" 16 | 17 | layers = [] 18 | for i in range(1, len(dims)): 19 | padding = int((kernel_size - 1) / 2) if kernel_size != 1 else 0 20 | layers.append(nn.Conv2d(in_channels=dims[i-1], out_channels=dims[i], 21 | kernel_size=kernel_size, stride=stride, padding=padding, bias=True)) 22 | if i==len(dims)-1 and not doLastRelu: 23 | continue 24 | layers.append(nn.ReLU(inplace=True)) 25 | return layers # nn.Sequential(*layers) 26 | 27 | 28 | class Conv2dLayers(nn.Sequential): 29 | """2D convolutional layers. 30 | 31 | Args: 32 | dims: dimensions of the channels 33 | kernel_size: kernel size of the convolutional layers. 34 | doLastRelu: do the last Relu (nonlinear activation) or not. 35 | """ 36 | def __init__(self, dims, kernel_size, doLastRelu=False): 37 | layers = get_Conv2d_layer(dims, kernel_size, 1, doLastRelu) # Note: may need to init the weights and biases here 38 | super(Conv2dLayers, self).__init__(*layers) 39 | 40 | 41 | def get_and_init_FC_layer(din, dout, init_bias='zeros'): 42 | """Get a fully-connected layer.""" 43 | 44 | li = nn.Linear(din, dout) 45 | #init weights/bias 46 | nn.init.xavier_uniform_(li.weight.data, gain=nn.init.calculate_gain('relu')) 47 | if init_bias == 'uniform': 48 | nn.init.uniform_(li.bias) 49 | elif init_bias == 'zeros': 50 | li.bias.data.fill_(0.) 51 | else: 52 | raise 'Unknown init ' + init_bias 53 | return li 54 | 55 | 56 | def get_MLP_layers(dims, doLastRelu, init_bias='zeros'): 57 | """Get a series of MLP layers.""" 58 | 59 | layers = [] 60 | for i in range(1, len(dims)): 61 | layers.append(get_and_init_FC_layer(dims[i-1], dims[i], init_bias=init_bias)) 62 | if i==len(dims)-1 and not doLastRelu: 63 | continue 64 | layers.append(nn.ReLU()) 65 | return layers 66 | 67 | 68 | class PointwiseMLP(nn.Sequential): 69 | """PointwiseMLP layers. 70 | 71 | Args: 72 | dims: dimensions of the channels 73 | doLastRelu: do the last Relu (nonlinear activation) or not. 74 | Nxdin ->Nxd1->Nxd2->...-> Nxdout 75 | """ 76 | def __init__(self, dims, doLastRelu=False, init_bias='zeros'): 77 | layers = get_MLP_layers(dims, doLastRelu, init_bias) 78 | super(PointwiseMLP, self).__init__(*layers) 79 | 80 | 81 | class GlobalPool(nn.Module): 82 | """BxNxK -> BxK""" 83 | 84 | def __init__(self, pool_layer): 85 | super(GlobalPool, self).__init__() 86 | self.Pool = pool_layer 87 | 88 | def forward(self, X): 89 | X = X.unsqueeze(-3) #Bx1xNxK 90 | X = self.Pool(X) 91 | X = X.squeeze(-2) 92 | X = X.squeeze(-2) #BxK 93 | return X 94 | 95 | 96 | class PointNetGlobalMax(nn.Sequential): 97 | """BxNxdims[0] -> Bxdims[-1]""" 98 | 99 | def __init__(self, dims, doLastRelu=False): 100 | layers = [ 101 | PointwiseMLP(dims, doLastRelu=doLastRelu), #BxNxK 102 | GlobalPool(nn.AdaptiveMaxPool2d((1, dims[-1]))),#BxK 103 | ] 104 | super(PointNetGlobalMax, self).__init__(*layers) 105 | 106 | 107 | class PointNetGlobalAvg(nn.Sequential): 108 | """BxNxdims[0] -> Bxdims[-1]""" 109 | 110 | def __init__(self, dims, doLastRelu=True): 111 | layers = [ 112 | PointwiseMLP(dims, doLastRelu=doLastRelu), #BxNxK 113 | GlobalPool(nn.AdaptiveAvgPool2d((1, dims[-1]))),#BxK 114 | ] 115 | super(PointNetGlobalAvg, self).__init__(*layers) 116 | 117 | 118 | class PointNet(nn.Sequential): 119 | """Vanilla PointNet Model. 120 | 121 | Args: 122 | MLP_dims: dimensions of the pointwise MLP 123 | FC_dims: dimensions of the FC to process the max pooled feature 124 | doLastRelu: do the last Relu (nonlinear activation) or not. 125 | Nxdin ->Nxd1->Nxd2->...-> Nxdout 126 | """ 127 | def __init__(self, MLP_dims, FC_dims, MLP_doLastRelu): 128 | assert(MLP_dims[-1]==FC_dims[0]) 129 | layers = [ 130 | PointNetGlobalMax(MLP_dims, doLastRelu=MLP_doLastRelu),#BxK 131 | ] 132 | layers.extend(get_MLP_layers(FC_dims, False)) 133 | super(PointNet, self).__init__(*layers) -------------------------------------------------------------------------------- /pccai/models/utils_sparse.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Utility functions for sparse tensors 8 | 9 | import torch 10 | import numpy as np 11 | import MinkowskiEngine as ME 12 | 13 | 14 | def scale_sparse_tensor_batch(x, factor): 15 | coords = torch.hstack((x.C[:,0:1], (x.C[:,1:]*factor).round().int())) 16 | feats = torch.ones((len(coords),1)).float() 17 | x = ME.SparseTensor(features=feats, coordinates=coords, tensor_stride=1, device=x.device) 18 | return x 19 | 20 | 21 | def sort_sparse_tensor_with_dir(sparse_tensor, dir=1): 22 | """ 23 | Sort points in sparse tensor according to their coordinates. 24 | """ 25 | vec = sum([sparse_tensor.C.long().cpu()[:, i] * 26 | (sparse_tensor.C.cpu().max().long() + 1) ** (i if dir==0 else (3 - i)) 27 | for i in range(sparse_tensor.C.shape[-1])]) 28 | indices_sort = np.argsort(vec) 29 | sparse_tensor_sort = ME.SparseTensor(features=sparse_tensor.F[indices_sort], 30 | coordinates=sparse_tensor.C[indices_sort], 31 | tensor_stride=sparse_tensor.tensor_stride[0], 32 | device=sparse_tensor.device) 33 | return sparse_tensor_sort 34 | 35 | 36 | def slice_sparse_tensor(x, slice): 37 | ''' 38 | A simple function to slice a sparse tensor, can at most get 8 slices 39 | ''' 40 | 41 | if slice == 0: return [x] 42 | vars = torch.var(x.C[:, 1:].cpu().float(), dim=0).numpy() 43 | thres = np.percentile(x.C[:, 1:].cpu().numpy(), 50, axis=0) 44 | axis_l = [AxisSlice('x', vars[0], thres[0], x.C[:, 1] < thres[0]), 45 | AxisSlice('x', vars[1], thres[1], x.C[:,2] < thres[1]), 46 | AxisSlice('x', vars[2], thres[2], x.C[:,3] < thres[2])] 47 | axis_l = sorted(axis_l, key=lambda axis: axis.var, reverse=True) 48 | 49 | x_list = [] 50 | if slice == 1: 51 | masks = [ 52 | axis_l[0].mask, 53 | axis_l[0].nm(), 54 | ] 55 | elif slice == 2: 56 | masks = [ 57 | torch.logical_and(axis_l[0].mask, axis_l[1].mask), 58 | torch.logical_and(axis_l[0].nm(), axis_l[1].mask), 59 | torch.logical_and(axis_l[0].mask, axis_l[1].nm()), 60 | torch.logical_and(axis_l[0].nm(), axis_l[1].nm()) 61 | ] 62 | elif slice == 3: 63 | masks = [ 64 | torch.logical_and(torch.logical_and(axis_l[0].mask, axis_l[1].mask), axis_l[2].mask), 65 | torch.logical_and(torch.logical_and(axis_l[0].nm(), axis_l[1].mask), axis_l[2].mask), 66 | torch.logical_and(torch.logical_and(axis_l[0].mask, axis_l[1].nm()), axis_l[2].mask), 67 | torch.logical_and(torch.logical_and(axis_l[0].nm(), axis_l[1].nm()), axis_l[2].mask), 68 | torch.logical_and(torch.logical_and(axis_l[0].mask, axis_l[1].mask), axis_l[2].nm()), 69 | torch.logical_and(torch.logical_and(axis_l[0].nm(), axis_l[1].mask), axis_l[2].nm()), 70 | torch.logical_and(torch.logical_and(axis_l[0].mask, axis_l[1].nm()), axis_l[2].nm()), 71 | torch.logical_and(torch.logical_and(axis_l[0].nm(), axis_l[1].nm()), axis_l[2].nm()) 72 | ] 73 | 74 | for mask in masks: 75 | x_list.append(ME.SparseTensor( 76 | features=torch.ones((torch.sum(mask), 1)).float(), 77 | coordinates=x.C[mask], 78 | tensor_stride=1, device=x.device)) 79 | return x_list 80 | 81 | 82 | class AxisSlice: 83 | def __init__(self, name, var, thres, mask): 84 | self.name = name 85 | self.var = var 86 | self.thres = thres 87 | self.mask = mask 88 | 89 | def __repr__(self): 90 | return repr((self.name, self.var, self.thres, self.mask)) 91 | 92 | def nm(self): 93 | return torch.logical_not(self.mask) -------------------------------------------------------------------------------- /pccai/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | -------------------------------------------------------------------------------- /pccai/optim/cd_sparse.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Compute Chamfer Distance loss for MinkowskiEngine sparse tensors 8 | 9 | import torch 10 | import sys 11 | import os 12 | 13 | from pccai.optim.pcc_loss import PccLossBase 14 | 15 | 16 | def nndistance_simple(rec, data): 17 | """ 18 | A simple nearest neighbor search, not very efficient, just for reference 19 | """ 20 | rec_sq = torch.sum(rec * rec, dim=2, keepdim=True) 21 | data_sq = torch.sum(data * data, dim=2, keepdim=True) 22 | cross = torch.matmul(data, rec.permute(0, 2, 1)) 23 | dist = data_sq - 2 * cross + rec_sq.permute(0, 2, 1) 24 | data_dist, data_idx = torch.min(dist, dim=2) 25 | rec_dist, rec_idx = torch.min(dist, dim=1) 26 | return data_dist, rec_dist, data_idx, rec_idx 27 | 28 | 29 | try: 30 | # If you want to use the efficient NN search for computing CD loss, compiled the nndistance() 31 | # function under the third_party folder according to instructions in Readme.md 32 | sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../third_party/nndistance')) 33 | from modules.nnd import NNDModule 34 | nndistance = NNDModule() 35 | except ModuleNotFoundError: 36 | # Without the compiled nndistance(), by default the nearest neighbor will be done using pytorch-geometric 37 | nndistance = nndistance_simple 38 | 39 | 40 | class ChamferDistSparse(PccLossBase): 41 | """Chamfer distance loss for sparse voxels.""" 42 | 43 | def __init__(self, loss_args, syntax): 44 | super().__init__(loss_args, syntax) 45 | 46 | 47 | def xyz_loss(self, loss_out, net_in, net_out): 48 | """Compute the xyz-loss.""" 49 | 50 | x_hat = net_out['x_hat'] 51 | gt = net_out['gt'] 52 | batch_size = x_hat[-1][0].round().int().item() + 1 53 | dist = torch.zeros(batch_size, device=x_hat.device) 54 | for i in range(batch_size): 55 | dist_out, dist_x, _, _ = nndistance( 56 | x_hat[x_hat[:, 0].round().int()==i, 1:].unsqueeze(0).contiguous(), 57 | gt[gt[:, 0] == i, 1:].unsqueeze(0).float().contiguous() 58 | ) 59 | dist[i] = torch.max(torch.mean(dist_out), torch.mean(dist_x)) 60 | loss = torch.mean(dist) 61 | loss_out['xyz_loss'] = loss.unsqueeze(0) # write the 'xyz_loss' as return 62 | 63 | 64 | def loss(self, net_in, net_out): 65 | """Overall R-D loss computation.""" 66 | 67 | loss_out = {} 68 | 69 | # Rate loss 70 | if 'likelihoods' in net_out and len(net_out['likelihoods']) > 0: 71 | self.bpp_loss(loss_out, net_out['likelihoods'], net_out['gt'].shape[0]) 72 | else: 73 | loss_out['bpp_loss'] = torch.zeros((1,)) 74 | if net_out['x_hat'].is_cuda: 75 | loss_out['bpp_loss'] = loss_out['bpp_loss'].cuda() 76 | 77 | # Distortion loss 78 | self.xyz_loss(loss_out, net_in, net_out) 79 | 80 | # R-D loss = alpha * D + beta * R 81 | loss_out["loss"] = self.alpha * loss_out['xyz_loss'] + self.beta * loss_out["bpp_loss"] # R-D loss 82 | return loss_out 83 | -------------------------------------------------------------------------------- /pccai/optim/pcc_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | import math 8 | import torch 9 | import sys 10 | import os 11 | 12 | class PccLossBase: 13 | """A base class of rate-distortion loss computation for point cloud compression.""" 14 | 15 | def __init__(self, loss_args, syntax): 16 | self.alpha = loss_args['alpha'] 17 | self.beta = loss_args['beta'] 18 | self.hetero = syntax.hetero 19 | self.phase = syntax.phase 20 | 21 | 22 | @staticmethod 23 | def bpp_loss(loss_out, likelihoods, count): 24 | """Compute the rate loss with the likelihoods.""" 25 | 26 | bpp_loss = 0 27 | for k, v in likelihoods.items(): 28 | if v is not None: 29 | loss = torch.log(v).sum() / (-math.log(2) * count) 30 | bpp_loss += loss 31 | loss_out[f'bpp_loss_{k}'] = loss.unsqueeze(0) 32 | loss_out['bpp_loss'] = bpp_loss.unsqueeze(0) 33 | 34 | 35 | def xyz_loss(self, **kwargs): 36 | """Needs to implement the xyz_loss""" 37 | 38 | raise NotImplementedError() 39 | 40 | 41 | def loss(self, **kwargs): 42 | """Needs to implement the overall loss. Can be R-D loss for lossy compression, or rate-only loss for lossless compression.""" 43 | 44 | raise NotImplementedError() 45 | -------------------------------------------------------------------------------- /pccai/optim/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Utilities related to network optimization 8 | 9 | import torch 10 | import torch.optim as optim 11 | 12 | # Import all the loss classes to be used 13 | from pccai.optim.cd_sparse import ChamferDistSparse 14 | 15 | 16 | # List the all the loss classes in the following dictionary 17 | loss_classes = { 18 | 'cd_sparse': ChamferDistSparse 19 | } 20 | 21 | def get_loss_class(loss_name): 22 | loss = loss_classes.get(loss_name.lower(), None) 23 | assert loss is not None, f'loss class "{loss_name}" not found, valid loss classes are: {list(loss_classes.keys())}' 24 | return loss 25 | 26 | 27 | def configure_optimization(pccnet, optim_config): 28 | """Configure the optimizers and the schedulers for training.""" 29 | 30 | # Separate parameters for the main optimizer and the auxiliary optimizer 31 | parameters = set( 32 | n 33 | for n, p in pccnet.named_parameters() 34 | if not n.endswith(".quantiles") and p.requires_grad 35 | ) 36 | aux_parameters = set( 37 | n 38 | for n, p in pccnet.named_parameters() 39 | if n.endswith(".quantiles") and p.requires_grad 40 | ) 41 | 42 | # Make sure we don't have an intersection of parameters 43 | params_dict = dict(pccnet.named_parameters()) 44 | inter_params = parameters & aux_parameters 45 | union_params = parameters | aux_parameters 46 | assert len(inter_params) == 0 47 | assert len(union_params) - len(params_dict.keys()) == 0 48 | 49 | # We only support the Adam optimizer to make things less complicated 50 | optimizer = optim.Adam( 51 | (params_dict[n] for n in sorted(list(parameters))), 52 | lr=optim_config['main_args']['lr'], 53 | betas=(optim_config['main_args']['opt_args'][0], optim_config['main_args']['opt_args'][1]), 54 | weight_decay=optim_config['main_args']['opt_args'][2] 55 | ) 56 | sche_args = optim_config['main_args']['schedule_args'] 57 | if sche_args[0].lower() == 'exp': 58 | scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=sche_args[1]) 59 | elif sche_args[0].lower() == 'step': 60 | scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=sche_args[1], gamma=sche_args[2]) 61 | elif sche_args[0].lower() == 'multistep': 62 | scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=sche_args[1:-1], gamma=sche_args[-1]) 63 | else: # 'fix' scheme 64 | scheduler = None 65 | 66 | # For the auxiliary parameters 67 | if len(aux_parameters) > 0: 68 | aux_optimizer = optim.Adam( 69 | (params_dict[n] for n in sorted(list(aux_parameters))), 70 | lr=optim_config['aux_args']['lr'], 71 | betas=(optim_config['aux_args']['opt_args'][0], optim_config['aux_args']['opt_args'][1]), 72 | weight_decay=optim_config['aux_args']['opt_args'][2] 73 | ) 74 | aux_sche_args = optim_config['aux_args']['schedule_args'] 75 | if aux_sche_args[0].lower() == 'exp': 76 | aux_scheduler = optim.lr_scheduler.ExponentialLR(aux_optimizer, gamma=aux_sche_args[1]) 77 | elif aux_sche_args[0].lower() == 'step': 78 | aux_scheduler = optim.lr_scheduler.StepLR(aux_optimizer, step_size=aux_sche_args[1], gamma=aux_sche_args[2]) 79 | elif aux_sche_args[0].lower() == 'multistep': 80 | aux_scheduler = optim.lr_scheduler.MultiStepLR(aux_optimizer, milestones=aux_sche_args[1:-1], gamma=aux_sche_args[-1]) 81 | else: # 'fix' scheme 82 | aux_scheduler = None 83 | else: 84 | aux_optimizer = aux_scheduler = None 85 | 86 | return optimizer, scheduler, aux_optimizer, aux_scheduler -------------------------------------------------------------------------------- /pccai/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | -------------------------------------------------------------------------------- /pccai/pipelines/bench.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Benchmarking PCC models 8 | 9 | import time 10 | import os 11 | import numpy as np 12 | import yaml 13 | import torch 14 | import glob 15 | 16 | # Load different utilities from PccAI 17 | from pccai.utils.syntax import SyntaxGenerator 18 | from pccai.utils.pc_metric import compute_metrics 19 | from pccai.utils.misc import pc_read, pc_write, load_state_dict_with_fallback 20 | from pccai.codecs.utils import get_codec_class 21 | from pccai.models.pcc_models import get_architecture_class 22 | import pccai.utils.logger as logger 23 | 24 | 25 | def create_pccnet(net_config, checkpoint, syntax, device): 26 | """Build the network model.""" 27 | 28 | # Construct the PCC model 29 | architecture_class = get_architecture_class(net_config['architecture']) 30 | pccnet = architecture_class(net_config['modules'], syntax) 31 | 32 | # Load the network weights 33 | state_dict = checkpoint['net_state_dict'].copy() 34 | for _ in range(len(state_dict)): 35 | k, v = state_dict.popitem(False) 36 | state_dict[k[len('.pcc_model'):]] = v 37 | load_state_dict_with_fallback(pccnet, state_dict) 38 | pccnet.to(device) 39 | pccnet.eval() 40 | logger.log.info("Model weights loaded.") 41 | return pccnet 42 | 43 | 44 | def benchmark_checkpoints(opt): 45 | """Benchmarking several networks with the same architecture.""" 46 | 47 | logger.log.info("%d GPU(s) will be used for benchmarking." % torch.cuda.device_count()) 48 | opt.phase = 'deploy' 49 | device = torch.device("cuda:0") 50 | log_dict_all = {} 51 | tmp_folder = './tmp' 52 | os.makedirs(tmp_folder, exist_ok=True) 53 | 54 | # Gather all the point cloud files to be tested 55 | pc_file_list=[] 56 | for item in opt.input: 57 | if item.lower()[-4:] == '.ply': 58 | pc_file_list.append(item) 59 | else: 60 | pc_file_list += list(glob.iglob(item + '/**/*.ply', recursive=True)) 61 | pc_file_list.sort() 62 | 63 | for filename_ckpt in opt.checkpoints: 64 | 65 | log_dict_ckpt = [] 66 | logger.log.info("Working on checkpoint %s." % filename_ckpt) 67 | checkpoint = torch.load(filename_ckpt) 68 | if opt.checkpoint_net_config == True: 69 | opt.net_config = checkpoint['net_config'] 70 | logger.log.info("Model config loaded from check point.") 71 | logger.log.info(opt.net_config) 72 | syntax = SyntaxGenerator(opt=opt) 73 | pccnet = create_pccnet(opt.net_config, checkpoint, syntax, device) 74 | 75 | # Start the benchmarking 76 | t = time.monotonic() 77 | for idx, pc_file in enumerate(pc_file_list): 78 | 79 | bit_depth = opt.bit_depth[0 if len(opt.bit_depth) == 1 else idx] # support testing several point clouds with different bit-depths, individual bit_depths need to be provided in this case 80 | codec = get_codec_class(opt.codec_config['codec'])(opt.codec_config, pccnet, bit_depth, syntax) # initialize the codec 81 | 82 | # Load the point cloud and initialize the log_dict 83 | pc_raw = pc_read(pc_file) 84 | log_dict = { 85 | 'pc_name': os.path.split(pc_file)[1], 86 | 'num_points': pc_raw.shape[0], 87 | } 88 | if opt.mpeg_report_sequence: 89 | log_dict['seq_name'] = os.path.basename(os.path.dirname(pc_file)) 90 | 91 | with torch.no_grad(): 92 | # Encode pc_raw with pccnet, obtain compressed_files 93 | compressed_files, stat_dict_enc = codec.compress(pc_raw, tag=os.path.join(tmp_folder, os.path.splitext(log_dict['pc_name'])[0] + '_' + opt.exp_name)) 94 | 95 | # Decode compressed_files with pccnet, obtain pc_rec 96 | if opt.skip_decode == False: 97 | pc_rec, stat_dict_dec = codec.decompress(compressed_files) 98 | 99 | # Update the log_dict and compute D1, D2 100 | log_dict['bit_total'] = np.sum([os.stat(f).st_size for f in compressed_files]) * 8 101 | log_dict['bpp'] = log_dict['bit_total'] / log_dict['num_points'] 102 | 103 | peak_value = opt.peak_value[0 if len(opt.peak_value) == 1 else idx] # support point clouds with different bit-depths, individual peak values need to be provided in this case 104 | if opt.skip_decode: 105 | log_dict['d1_psnr'] = -1 106 | log_dict['d2_psnr'] = -1 107 | log_dict['rec_num_points'] = -1 108 | else: 109 | log_dict.update(compute_metrics(pc_file, pc_rec, peak_value, opt.compute_d2)) 110 | log_dict['rec_num_points'] = pc_rec.shape[0] 111 | log_dict.update(stat_dict_enc) 112 | if opt.skip_decode == False: 113 | log_dict.update(stat_dict_dec) 114 | log_dict_ckpt.append(log_dict) 115 | if opt.remove_compressed_files: 116 | for f in compressed_files: os.remove(f) 117 | 118 | # Log current metrics if needed 119 | if opt.print_freq > 0 and idx % opt.print_freq == 0: 120 | message = ' id: %d/%d, ' % (idx + 1, len(pc_file_list)) 121 | for k, v in log_dict.items(): 122 | message += '%s: %s, ' % (k, str(v)) 123 | logger.log.info(message[:-2]) 124 | 125 | # Write down the point cloud if needed 126 | if opt.pc_write_freq > 0 and idx % opt.pc_write_freq == 0 and opt.skip_decode == False: 127 | filename_rec = os.path.join(opt.exp_folder, opt.write_prefix + os.path.splitext(log_dict['pc_name'])[0] + "_rec.ply") 128 | pc_write(pc_rec, filename_rec) 129 | 130 | elapse = time.monotonic() - t 131 | log_dict_all[filename_ckpt] = log_dict_ckpt 132 | 133 | # Compute the average metrics for this current checkpoint 134 | basic_metrics = [(log_dict['bpp'], log_dict['bit_total'], log_dict['num_points'], log_dict['d1_psnr'], 135 | log_dict['d2_psnr'] if opt.compute_d2 else -1) for log_dict in log_dict_ckpt] 136 | avg_bpp, avg_size, avg_num_points, avg_d1_psnr, avg_d2_psnr = np.mean(np.array(basic_metrics), axis=0).tolist() 137 | avg_metrics = {'bpp': avg_bpp, 'seq_bpp': avg_size / avg_num_points, 'd1_psnr': avg_d1_psnr} 138 | if avg_d2_psnr > 0: avg_metrics['d2_psnr'] = avg_d2_psnr 139 | 140 | # Log current metrics for the check point 141 | message = 'Compression metrics --- time: %f, ' % elapse 142 | for k, v in avg_metrics.items(): message += 'avg_%s: %f, ' % (k, v) 143 | logger.log.info(message[:-2] + '\n') 144 | 145 | return log_dict_all 146 | 147 | 148 | def load_benchmark_config(opt): 149 | """Load all the configuration files for benchmarking.""" 150 | 151 | # Load the codec configuration 152 | with open(opt.codec_config, 'r') as file: 153 | codec_config = yaml.load(file, Loader=yaml.FullLoader) 154 | if opt.slice is not None: 155 | codec_config['slice'] = opt.slice 156 | opt.codec_config = codec_config 157 | 158 | # Load the network configuration 159 | if opt.net_config != '': 160 | with open(opt.net_config, 'r') as file: 161 | net_config = yaml.load(file, Loader=yaml.FullLoader) 162 | opt.net_config = net_config 163 | 164 | return opt 165 | 166 | 167 | if __name__ == "__main__": 168 | 169 | logger.log.error('Not implemented.') -------------------------------------------------------------------------------- /pccai/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | -------------------------------------------------------------------------------- /pccai/utils/convert_image.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Convert a LiDAR point cloud to a range image based on spherical coordinate conversion 8 | 9 | import numpy as np 10 | 11 | 12 | def cart2spherical(input_xyz): 13 | """Conversion from Cartisian coordinates to spherical coordinates.""" 14 | 15 | r = np.sqrt(input_xyz[:, 0] ** 2 + input_xyz[:, 1] ** 2 + input_xyz[:, 2] ** 2) 16 | alpha = np.arctan2(input_xyz[:, 1], input_xyz[:, 0]) # corresponding to width 17 | epsilon = np.arcsin(input_xyz[:, 2] / r) # corrsponding to height 18 | return np.stack((r, alpha, epsilon), axis = 1) 19 | 20 | 21 | def spherical2cart(input_spherical): 22 | """Conversion from spherical coordinates to Cartesian coordinates.""" 23 | 24 | x = input_spherical[:, 0] * np.cos(input_spherical[:, 1]) * np.cos(input_spherical[:, 2]) 25 | y = input_spherical[:, 0] * np.sin(input_spherical[:, 1]) * np.cos(input_spherical[:, 2]) 26 | z = input_spherical[:, 0] * np.sin(input_spherical[:, 2]) 27 | return np.stack((x, y, z), axis=1) 28 | 29 | 30 | def pc2img(h_fov, v_fov, width, height, inf, data): 31 | """Convert a point cloud to an 2D image.""" 32 | 33 | data_spherical = cart2spherical(data) 34 | 35 | # Project the point cloud onto an image! 36 | x = (data_spherical[:, 1] - h_fov[0]) / (h_fov[1] - h_fov[0]) 37 | y = (data_spherical[:, 2] - v_fov[0]) / (v_fov[1] - v_fov[0]) 38 | x = np.round(x * (width - 1)).astype(np.int32) 39 | y = np.round(y * (height - 1)).astype(np.int32) 40 | 41 | # exclude the pixels that are out of the selected FOV 42 | mask = ~((x < 0) | (x >= width) | (y < 0) | (y >= height)) 43 | x, y = x[mask], y[mask] 44 | range = data_spherical[:, 0][mask] 45 | data_img = np.ones((height, width), dtype = np.float32) * inf 46 | data_img[y, x] = range 47 | 48 | return data_img 49 | 50 | 51 | def img2pc(h_fov, v_fov, width, height, inf, data): 52 | """Convert an 2D image back to the point cloud.""" 53 | 54 | alpha = (np.arange(width) / (width - 1)) * (h_fov[1] - h_fov[0]) + h_fov[0] 55 | epsilon = (np.arange(height) / (height - 1)) * (v_fov[1] - v_fov[0]) + v_fov[0] 56 | alpha, epsilon = np.meshgrid(alpha, epsilon) 57 | data_pc = np.stack((data, alpha, epsilon), axis=2) 58 | data_pc = data_pc.reshape(-1, 3) 59 | data_pc = data_pc[data_pc[:, 0] < inf - 1, :] 60 | data_pc = spherical2cart(data_pc) 61 | 62 | return data_pc -------------------------------------------------------------------------------- /pccai/utils/logger.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # A logger for outputting all the information to both the display and a specified file 8 | 9 | import logging 10 | import sys 11 | import os 12 | 13 | log = None 14 | 15 | 16 | def create_logger(exp_folder, file_name, log_file_only): 17 | 18 | if log_file_only: 19 | handlers = [] 20 | else: 21 | handlers = [logging.StreamHandler(sys.stdout)] 22 | if file_name != '': 23 | log_path = os.path.join(exp_folder, file_name) 24 | os.makedirs(os.path.split(log_path)[0], exist_ok=True) 25 | handlers.append(logging.FileHandler(log_path, mode = 'w')) 26 | [logging.root.removeHandler(handler) for handler in logging.root.handlers[:]] # remove all existing handlers 27 | logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', handlers=handlers) 28 | global log 29 | log = logging.getLogger() 30 | 31 | def destroy_logger(): 32 | handlers = log.handlers[:] 33 | for handler in handlers: 34 | handler.close() 35 | log.removeHandler(handler) -------------------------------------------------------------------------------- /pccai/utils/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | import numpy as np 8 | import pccai.utils.logger as logger 9 | from plyfile import PlyData, PlyElement 10 | 11 | # # Abandon Open3D for simplification 12 | # import open3d as o3d 13 | 14 | # def pc_read(file_name): 15 | # return np.asarray(o3d.io.read_point_cloud(file_name).points) 16 | 17 | # def pc_write(pc, file_name, coloring=None, normals=None): 18 | # """Basic writing tool for point clouds.""" 19 | 20 | # pc = pt_to_np(pc) 21 | # pc_o3d = o3d.geometry.PointCloud() 22 | # try: 23 | # pc_o3d.points = o3d.utility.Vector3dVector(pc) 24 | # if coloring is not None: 25 | # pc_o3d.colors = o3d.utility.Vector3dVector(coloring) 26 | # if normals is not None: 27 | # pc_o3d.normals = o3d.utility.Vector3dVector(normals) 28 | # except RuntimeError as e: 29 | # logger.log.info(pc, coloring, normals) 30 | # logger.log.info(type(pc), type(coloring), type(normals)) 31 | # raise e 32 | # o3d.io.write_point_cloud(file_name, pc_o3d) 33 | 34 | 35 | def pc_write(pc, file_name): 36 | pc_np = pc.T.cpu().numpy() 37 | vertex = list(zip(pc_np[0], pc_np[1], pc_np[2])) 38 | vertex = np.array(vertex, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) 39 | elements = PlyElement.describe(vertex, "vertex") 40 | PlyData([elements]).write(file_name) 41 | return 42 | 43 | 44 | def pc_read(filename): 45 | ply_raw = PlyData.read(filename)['vertex'].data 46 | pc = np.vstack((ply_raw['x'], ply_raw['y'], ply_raw['z'])).transpose() 47 | return np.ascontiguousarray(pc) 48 | 49 | 50 | def pt_to_np(tensor): 51 | """Convert PyTorch tensor to NumPy array.""" 52 | 53 | return tensor.contiguous().cpu().detach().numpy() 54 | 55 | 56 | def load_state_dict_with_fallback(obj, dict): 57 | """Load a checkpoint with fall back.""" 58 | 59 | try: 60 | obj.load_state_dict(dict) 61 | except RuntimeError as e: 62 | logger.log.exception(e) 63 | logger.log.info(f'Strict load_state_dict has failed. Attempting in non strict mode.') 64 | obj.load_state_dict(dict, strict=False) -------------------------------------------------------------------------------- /pccai/utils/pc_metric.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | import subprocess 8 | import os 9 | import random 10 | 11 | from pccai.utils.misc import pc_write 12 | base_path = os.path.split(__file__)[0] 13 | 14 | def compute_metrics(gt_file, pc_rec, res, normal=False): 15 | """Compute D1 and/or D2 with pc_error tool from MPEG""" 16 | 17 | tmp_file_name = os.path.join('./tmp/', 'metric_'+str(hex(int(random.random() * 1e15)))+'.ply') 18 | rec_file = os.path.join(base_path, '../..', tmp_file_name) 19 | pc_error_path = os.path.join(base_path, '../..', 'third_party/pc_error') 20 | pc_write(pc_rec, rec_file) 21 | cmd = pc_error_path + ' -a '+ gt_file + ' -b '+ rec_file + ' --hausdorff=1 '+ ' --resolution=' + str(res) 22 | if normal: cmd = cmd + ' -n ' + gt_file 23 | bg_proc=subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) 24 | line_b = bg_proc.stdout.readline() 25 | 26 | d1_key = 'mseF,PSNR (p2point):' 27 | d2_key = 'mseF,PSNR (p2plane):' 28 | d1_psnr, d2_psnr = None, None 29 | while line_b: 30 | line = line_b.decode(encoding='utf-8') 31 | line_b = bg_proc.stdout.readline() 32 | idx = line.find(d1_key) 33 | if idx > 0: d1_psnr = float(line[idx + len(d1_key):]) 34 | if normal: 35 | idx = line.find(d2_key) 36 | if idx > 0: d2_psnr = float(line[idx + len(d2_key):]) 37 | os.remove(rec_file) 38 | return {"d1_psnr": d1_psnr, "d2_psnr": d2_psnr} 39 | -------------------------------------------------------------------------------- /pccai/utils/syntax.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # Defines and generates the internal syntax and status, which serves the heterogeneous mode and marks the module phase 8 | 9 | def gen_syntax_gt(hetero): 10 | if hetero: 11 | syntax_gt = { 12 | '__len__': 10, 13 | 'xyz': [0, 2], 14 | 'block_pntcnt': 3, 15 | 'block_center': [4, 6], 16 | 'block_scale': 7, 17 | 'block_start': 9, 18 | } 19 | else: 20 | syntax_gt = None 21 | return syntax_gt 22 | 23 | 24 | class SyntaxGenerator(): 25 | """Generate the syntax for internal data and module status communications.""" 26 | 27 | def __init__(self, opt): 28 | self.hetero = opt.hetero 29 | self.phase = opt.phase 30 | self.generate_syntax_gt() 31 | self.generate_syntax_rec() 32 | self.generate_syntax_cw(opt.net_config) 33 | 34 | def generate_syntax_gt(self, **kwargs): 35 | """xyz have to be arranged at the beginning, the rest can be swapped 36 | Data syntax: x, y, z, block_pntcnt, block_center, block_scale, block_start 37 | index: 0, 1, 2, 3, 4 ~ 6, 7, 8 38 | """ 39 | self.syntax_gt = gen_syntax_gt(self.hetero) 40 | 41 | def generate_syntax_rec(self, **kwargs): 42 | """xyz have to be arranged at the beginning, the rest can be swapped 43 | Rec. syntax: x, y, z, pc_start 44 | index: 0, 1, 2, 3 45 | """ 46 | if self.hetero: 47 | self.syntax_rec = { 48 | '__len__': 10, 49 | 'xyz': [0, 2], 50 | 'block_start': 3, 51 | 'block_center': [4, 6], 52 | 'block_scale': 7, 53 | 'pc_start': 8, 54 | } 55 | else: self.syntax_rec = None 56 | 57 | def generate_syntax_cw(self, net_config, **kwargs): 58 | """Codewords have to be arranged at the beginning, the rest can be swapped 59 | Code syntax: codeword, block_pntcnt, block_center, block_scale, pc_start 60 | index: 0 ~ 511, 512, 513 ~ 515, 516, 517 61 | \-------------------- --------------------/ 62 | \/ 63 | meta_data 64 | """ 65 | if self.hetero: 66 | len_cw = net_config['modules']['entropy_bottleneck'] 67 | self.syntax_cw = { 68 | '__len__': len_cw + 7, 69 | '__meta_idx__': len_cw, 70 | 'cw': [0, len_cw - 1], 71 | 'block_pntcnt': len_cw, 72 | 'block_center': [len_cw + 1, len_cw + 3], 73 | 'block_scale': len_cw + 4, 74 | 'pc_start': len_cw + 5, 75 | } 76 | else: self.syntax_cw = None 77 | 78 | 79 | def syn_slc(syntax, attr): 80 | """Create a slice from a syntax and a key""" 81 | 82 | syn = syntax[attr] 83 | return slice(syn[0], syn[1] + 1) -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_lidar_ford/bench_lidar_ford_all.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_lidar_ford/r01.pth ${HOME_DIR}/results/grasp_lidar_ford/r02.pth ${HOME_DIR}/results/grasp_lidar_ford/r03.pth ${HOME_DIR}/results/grasp_lidar_ford/r04.pth ${HOME_DIR}/results/grasp_lidar_ford/r05.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_ford.yaml" 7 | INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm ${HOME_DIR}/datasets/ford/ford_03_q1mm" 8 | # INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0100.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0101.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-1599.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0200.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0201.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-1699.ply" 9 | COMPUTE_D2="True" 10 | MPEG_REPORT="mpeg_report.csv" 11 | MPEG_REPORT_SEQUENCE="True" # view the input point clouds as sequences 12 | WRITE_PREFIX="grasp_" 13 | PRINT_FREQ="1" 14 | PC_WRITE_FREQ="-1" 15 | TF_SUMMARY="False" 16 | REMOVE_COMPRESSED_FILES="True" 17 | PEAK_VALUE="30000" 18 | BIT_DEPTH="18" 19 | SLICE="0" 20 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 21 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_lidar_ford/bench_lidar_ford_r01.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_lidar_ford/r01.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_ford.yaml" 7 | INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm ${HOME_DIR}/datasets/ford/ford_03_q1mm" 8 | # INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0100.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0101.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-1599.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0200.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0201.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-1699.ply" 9 | COMPUTE_D2="True" 10 | MPEG_REPORT="mpeg_report.csv" 11 | MPEG_REPORT_SEQUENCE="True" # view the input point clouds as sequences 12 | WRITE_PREFIX="grasp_" 13 | PRINT_FREQ="1" 14 | PC_WRITE_FREQ="-1" 15 | TF_SUMMARY="False" 16 | REMOVE_COMPRESSED_FILES="True" 17 | PEAK_VALUE="30000" 18 | BIT_DEPTH="18" 19 | SLICE="0" 20 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 21 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_lidar_ford/bench_lidar_ford_r02.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_lidar_ford/r02.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_ford.yaml" 7 | INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm ${HOME_DIR}/datasets/ford/ford_03_q1mm" 8 | # INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0100.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0101.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-1599.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0200.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0201.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-1699.ply" 9 | COMPUTE_D2="True" 10 | MPEG_REPORT="mpeg_report.csv" 11 | MPEG_REPORT_SEQUENCE="True" # view the input point clouds as sequences 12 | WRITE_PREFIX="grasp_" 13 | PRINT_FREQ="1" 14 | PC_WRITE_FREQ="-1" 15 | TF_SUMMARY="False" 16 | REMOVE_COMPRESSED_FILES="True" 17 | PEAK_VALUE="30000" 18 | BIT_DEPTH="18" 19 | SLICE="0" 20 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 21 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_lidar_ford/bench_lidar_ford_r03.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_lidar_ford/r03.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_ford.yaml" 7 | INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm ${HOME_DIR}/datasets/ford/ford_03_q1mm" 8 | # INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0100.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0101.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-1599.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0200.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0201.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-1699.ply" 9 | COMPUTE_D2="True" 10 | MPEG_REPORT="mpeg_report.csv" 11 | MPEG_REPORT_SEQUENCE="True" # view the input point clouds as sequences 12 | WRITE_PREFIX="grasp_" 13 | PRINT_FREQ="1" 14 | PC_WRITE_FREQ="-1" 15 | TF_SUMMARY="False" 16 | REMOVE_COMPRESSED_FILES="True" 17 | PEAK_VALUE="30000" 18 | BIT_DEPTH="18" 19 | SLICE="0" 20 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 21 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_lidar_ford/bench_lidar_ford_r04.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_lidar_ford/r04.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_ford.yaml" 7 | INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm ${HOME_DIR}/datasets/ford/ford_03_q1mm" 8 | # INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0100.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0101.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-1599.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0200.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0201.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-1699.ply" 9 | COMPUTE_D2="True" 10 | MPEG_REPORT="mpeg_report.csv" 11 | MPEG_REPORT_SEQUENCE="True" # view the input point clouds as sequences 12 | WRITE_PREFIX="grasp_" 13 | PRINT_FREQ="1" 14 | PC_WRITE_FREQ="-1" 15 | TF_SUMMARY="False" 16 | REMOVE_COMPRESSED_FILES="True" 17 | PEAK_VALUE="30000" 18 | BIT_DEPTH="18" 19 | SLICE="0" 20 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 21 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_lidar_ford/bench_lidar_ford_r05.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_lidar_ford/r05.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_ford.yaml" 7 | INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm ${HOME_DIR}/datasets/ford/ford_03_q1mm" 8 | # INPUT="${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0100.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-0101.ply ${HOME_DIR}/datasets/ford/ford_02_q1mm/Ford_02_vox1mm-1599.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0200.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-0201.ply ${HOME_DIR}/datasets/ford/ford_03_q1mm/Ford_03_vox1mm-1699.ply" 9 | COMPUTE_D2="True" 10 | MPEG_REPORT="mpeg_report.csv" 11 | MPEG_REPORT_SEQUENCE="True" # view the input point clouds as sequences 12 | WRITE_PREFIX="grasp_" 13 | PRINT_FREQ="1" 14 | PC_WRITE_FREQ="-1" 15 | TF_SUMMARY="False" 16 | REMOVE_COMPRESSED_FILES="True" 17 | PEAK_VALUE="30000" 18 | BIT_DEPTH="18" 19 | SLICE="0" 20 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 21 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_dense/bench_surface_dense_all.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_dense/r01.pth ${HOME_DIR}/results/grasp_surface_dense/r02.pth ${HOME_DIR}/results/grasp_surface_dense/r03.pth ${HOME_DIR}/results/grasp_surface_dense/r04.pth ${HOME_DIR}/results/grasp_surface_dense/r05.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/soldier_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/boxer_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/Facade_00009_vox12.ply ${HOME_DIR}/datasets/cat1/A/House_without_roof_00057_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095" 16 | BIT_DEPTH="12 12 12 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_dense/bench_surface_dense_r01.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_dense/r01.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/soldier_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/boxer_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/Facade_00009_vox12.ply ${HOME_DIR}/datasets/cat1/A/House_without_roof_00057_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095" 16 | BIT_DEPTH="12 12 12 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_dense/bench_surface_dense_r02.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_dense/r02.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/soldier_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/boxer_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/Facade_00009_vox12.ply ${HOME_DIR}/datasets/cat1/A/House_without_roof_00057_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095" 16 | BIT_DEPTH="12 12 12 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_dense/bench_surface_dense_r03.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_dense/r03.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/soldier_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/boxer_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/Facade_00009_vox12.ply ${HOME_DIR}/datasets/cat1/A/House_without_roof_00057_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095" 16 | BIT_DEPTH="12 12 12 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_dense/bench_surface_dense_r04.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_dense/r04.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/soldier_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/boxer_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/Facade_00009_vox12.ply ${HOME_DIR}/datasets/cat1/A/House_without_roof_00057_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095" 16 | BIT_DEPTH="12 12 12 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_dense/bench_surface_dense_r05.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_dense/r05.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/soldier_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/boxer_viewdep_vox12.ply ${HOME_DIR}/datasets/cat1/A/Facade_00009_vox12.ply ${HOME_DIR}/datasets/cat1/A/House_without_roof_00057_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095" 16 | BIT_DEPTH="12 12 12 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_solid/bench_surface_solid_all.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_solid/r01.pth ${HOME_DIR}/results/grasp_surface_solid/r02.pth ${HOME_DIR}/results/grasp_surface_solid/r03.pth ${HOME_DIR}/results/grasp_surface_solid/r04.pth ${HOME_DIR}/results/grasp_surface_solid/r05.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/queen_0200.ply ${HOME_DIR}/datasets/cat1/A/soldier_vox10_0690.ply ${HOME_DIR}/datasets/cat1/A/Facade_00064_vox11.ply ${HOME_DIR}/datasets/cat1/A/dancer_vox11_00000001.ply ${HOME_DIR}/datasets/cat1/A/Thaidancer_viewdep_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="1023 1023 2047 2047 4095" 16 | BIT_DEPTH="10 10 11 11 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_solid/bench_surface_solid_r01.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_solid/r01.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/queen_0200.ply ${HOME_DIR}/datasets/cat1/A/soldier_vox10_0690.ply ${HOME_DIR}/datasets/cat1/A/Facade_00064_vox11.ply ${HOME_DIR}/datasets/cat1/A/dancer_vox11_00000001.ply ${HOME_DIR}/datasets/cat1/A/Thaidancer_viewdep_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="1023 1023 2047 2047 4095" 16 | BIT_DEPTH="10 10 11 11 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_solid/bench_surface_solid_r02.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_solid/r02.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/queen_0200.ply ${HOME_DIR}/datasets/cat1/A/soldier_vox10_0690.ply ${HOME_DIR}/datasets/cat1/A/Facade_00064_vox11.ply ${HOME_DIR}/datasets/cat1/A/dancer_vox11_00000001.ply ${HOME_DIR}/datasets/cat1/A/Thaidancer_viewdep_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="1023 1023 2047 2047 4095" 16 | BIT_DEPTH="10 10 11 11 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_solid/bench_surface_solid_r03.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_solid/r03.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/queen_0200.ply ${HOME_DIR}/datasets/cat1/A/soldier_vox10_0690.ply ${HOME_DIR}/datasets/cat1/A/Facade_00064_vox11.ply ${HOME_DIR}/datasets/cat1/A/dancer_vox11_00000001.ply ${HOME_DIR}/datasets/cat1/A/Thaidancer_viewdep_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="1023 1023 2047 2047 4095" 16 | BIT_DEPTH="10 10 11 11 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_solid/bench_surface_solid_r04.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_solid/r04.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/queen_0200.ply ${HOME_DIR}/datasets/cat1/A/soldier_vox10_0690.ply ${HOME_DIR}/datasets/cat1/A/Facade_00064_vox11.ply ${HOME_DIR}/datasets/cat1/A/dancer_vox11_00000001.ply ${HOME_DIR}/datasets/cat1/A/Thaidancer_viewdep_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="1023 1023 2047 2047 4095" 16 | BIT_DEPTH="10 10 11 11 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_solid/bench_surface_solid_r05.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_solid/r05.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/A/queen_0200.ply ${HOME_DIR}/datasets/cat1/A/soldier_vox10_0690.ply ${HOME_DIR}/datasets/cat1/A/Facade_00064_vox11.ply ${HOME_DIR}/datasets/cat1/A/dancer_vox11_00000001.ply ${HOME_DIR}/datasets/cat1/A/Thaidancer_viewdep_vox12.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="1023 1023 2047 2047 4095" 16 | BIT_DEPTH="10 10 11 11 12" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_sparse/bench_surface_sparse_all.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_sparse/r01.pth ${HOME_DIR}/results/grasp_surface_sparse/r02.pth ${HOME_DIR}/results/grasp_surface_sparse/r03.pth ${HOME_DIR}/results/grasp_surface_sparse/r04.pth ${HOME_DIR}/results/grasp_surface_sparse/r05.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/B/Arco_Valentino_Dense_vox12.ply ${HOME_DIR}/datasets/cat1/B/Staue_Klimt_vox12.ply ${HOME_DIR}/datasets/cat1/A/Shiva_00035_vox12.ply ${HOME_DIR}/datasets/cat1/A/Egyptian_mask_vox12.ply ${HOME_DIR}/datasets/cat1/A/ULB_Unicorn_vox13.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095 8191" 16 | BIT_DEPTH="12 12 12 12 13" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_sparse/bench_surface_sparse_r01.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_sparse/r01.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/B/Arco_Valentino_Dense_vox12.ply ${HOME_DIR}/datasets/cat1/B/Staue_Klimt_vox12.ply ${HOME_DIR}/datasets/cat1/A/Shiva_00035_vox12.ply ${HOME_DIR}/datasets/cat1/A/Egyptian_mask_vox12.ply ${HOME_DIR}/datasets/cat1/A/ULB_Unicorn_vox13.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095 8191" 16 | BIT_DEPTH="12 12 12 12 13" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_sparse/bench_surface_sparse_r02.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_sparse/r02.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/B/Arco_Valentino_Dense_vox12.ply ${HOME_DIR}/datasets/cat1/B/Staue_Klimt_vox12.ply ${HOME_DIR}/datasets/cat1/A/Shiva_00035_vox12.ply ${HOME_DIR}/datasets/cat1/A/Egyptian_mask_vox12.ply ${HOME_DIR}/datasets/cat1/A/ULB_Unicorn_vox13.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095 8191" 16 | BIT_DEPTH="12 12 12 12 13" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_sparse/bench_surface_sparse_r03.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_sparse/r03.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/B/Arco_Valentino_Dense_vox12.ply ${HOME_DIR}/datasets/cat1/B/Staue_Klimt_vox12.ply ${HOME_DIR}/datasets/cat1/A/Shiva_00035_vox12.ply ${HOME_DIR}/datasets/cat1/A/Egyptian_mask_vox12.ply ${HOME_DIR}/datasets/cat1/A/ULB_Unicorn_vox13.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095 8191" 16 | BIT_DEPTH="12 12 12 12 13" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_sparse/bench_surface_sparse_r04.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_sparse/r04.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/B/Arco_Valentino_Dense_vox12.ply ${HOME_DIR}/datasets/cat1/B/Staue_Klimt_vox12.ply ${HOME_DIR}/datasets/cat1/A/Shiva_00035_vox12.ply ${HOME_DIR}/datasets/cat1/A/Egyptian_mask_vox12.ply ${HOME_DIR}/datasets/cat1/A/ULB_Unicorn_vox13.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095 8191" 16 | BIT_DEPTH="12 12 12 12 13" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/bench_grasp/bench_surface_sparse/bench_surface_sparse_r05.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/bench.py" 2 | 3 | # Main configurations 4 | CHECKPOINTS="${HOME_DIR}/results/grasp_surface_sparse/r05.pth" 5 | CHECKPOINT_NET_CONFIG="True" 6 | CODEC_CONFIG="${HOME_DIR}/config/codec_config/grasp_surface.yaml" 7 | INPUT="${HOME_DIR}/datasets/cat1/B/Arco_Valentino_Dense_vox12.ply ${HOME_DIR}/datasets/cat1/B/Staue_Klimt_vox12.ply ${HOME_DIR}/datasets/cat1/A/Shiva_00035_vox12.ply ${HOME_DIR}/datasets/cat1/A/Egyptian_mask_vox12.ply ${HOME_DIR}/datasets/cat1/A/ULB_Unicorn_vox13.ply" 8 | COMPUTE_D2="True" 9 | MPEG_REPORT="mpeg_report.csv" 10 | WRITE_PREFIX="grasp_" 11 | PRINT_FREQ="1" 12 | PC_WRITE_FREQ="-1" 13 | TF_SUMMARY="False" 14 | REMOVE_COMPRESSED_FILES="True" 15 | PEAK_VALUE="4095 4095 4095 4095 8191" 16 | BIT_DEPTH="12 12 12 12 13" 17 | SLICE="0" 18 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 19 | -------------------------------------------------------------------------------- /scripts/config_args.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright (c) 2010-2022, InterDigital 4 | # All rights reserved. 5 | 6 | # See LICENSE under the root folder. 7 | 8 | 9 | if [ $# -eq 2 ]; then 10 | export CUDA_VISIBLE_DEVICES=$2 11 | echo export CUDA_VISIBLE_DEVICES=$2 12 | fi 13 | # export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python 14 | source './scripts/tmp/'$1 15 | echo $1 16 | echo python ${RUN_ARGUMENTS} 17 | python ${RUN_ARGUMENTS} 18 | -------------------------------------------------------------------------------- /scripts/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright (c) 2010-2022, InterDigital 4 | # All rights reserved. 5 | 6 | # See LICENSE under the root folder. 7 | 8 | 9 | SPEC=$1 10 | LAUNCHER=$2 11 | USE_GPU=$3 12 | 13 | TMP_ARGS=`python ./utils/gen_args.py ${SPEC}` 14 | if [[ ${LAUNCHER} == "d" ]]; then 15 | echo "Launch the job directly." 16 | ./scripts/config_args.sh ${TMP_ARGS} ${USE_GPU} 2>&1 & 17 | elif [[ ${LAUNCHER} == "f" ]]; then 18 | echo "Launch the job directly in foreground." 19 | ./scripts/config_args.sh ${TMP_ARGS} ${USE_GPU} 2>&1 20 | elif [[ ${LAUNCHER} == "s" ]]; then 21 | echo "Launch the job with slurm." 22 | source './scripts/tmp/'${TMP_ARGS} 23 | # Please modify according your needs 24 | sbatch --job-name=${EXP_NAME} -n 1 -D ${HOME_DIR} --gres=gpu:1 ./scripts/config_args.sh ${TMP_ARGS} 0 25 | else 26 | echo "No launcher is specified." 27 | fi 28 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_lidar_ford/train_lidar_ford_r01.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="1" # distortion trade-off 11 | BETA="50000" # rate trade-off 12 | SCALING_RATIO="0.0009765625" # quantization ratio 13 | POINT_MUL="5" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_lidar_ford/train_lidar_ford_r02.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="1" # distortion trade-off 11 | BETA="20000" # rate trade-off 12 | SCALING_RATIO="0.001953125" # quantization ratio 13 | POINT_MUL="5" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_lidar_ford/train_lidar_ford_r03.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="1" # distortion trade-off 11 | BETA="5000" # rate trade-off 12 | SCALING_RATIO="0.00390625" # quantization ratio 13 | POINT_MUL="5" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_lidar_ford/train_lidar_ford_r04.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="1" # distortion trade-off 11 | BETA="800" # rate trade-off 12 | SCALING_RATIO="0.0078125" # quantization ratio 13 | POINT_MUL="5" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_lidar_ford/train_lidar_ford_r05.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/ford_voxel.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="1" # distortion trade-off 11 | BETA="250" # rate trade-off 12 | SCALING_RATIO="0.015625" # quantization ratio 13 | POINT_MUL="5" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_dense/train_surface_dense_r01.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus2.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # distortion trade-off 11 | BETA="3" # rate trade-off 12 | SCALING_RATIO="0.125" # quantization ratio 13 | POINT_MUL="10" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_dense/train_surface_dense_r02.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus2.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # distortion trade-off 11 | BETA="2.5" # rate trade-off 12 | SCALING_RATIO="0.25" # quantization ratio 13 | POINT_MUL="10" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_dense/train_surface_dense_r03.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus2.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # distortion trade-off 11 | BETA="1.0" # rate trade-off 12 | SCALING_RATIO="0.375" # quantization ratio 13 | POINT_MUL="5" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_dense/train_surface_dense_r04.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus2.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # distortion trade-off 11 | BETA="1.2" # rate trade-off 12 | SCALING_RATIO="0.5" # quantization ratio 13 | POINT_MUL="5" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_dense/train_surface_dense_r05.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus2.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_dense.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # distortion trade-off 11 | BETA="0.6" # rate trade-off 12 | SCALING_RATIO="0.625" # quantization ratio 13 | POINT_MUL="5" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_solid/train_surface_solid_r01.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1_faiss.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="512" # Rate trade-off 12 | SCALING_RATIO="0.0625" # quantization ratio 13 | POINT_MUL="512" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | LOG_FILE_ONLY="False" 26 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_solid/train_surface_solid_r02.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1_faiss.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="128" # Rate trade-off 12 | SCALING_RATIO="0.125" # quantization ratio 13 | POINT_MUL="150" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | LOG_FILE_ONLY="False" 26 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_solid/train_surface_solid_r03.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1_faiss.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="32" # Rate trade-off 12 | SCALING_RATIO="0.25" # quantization ratio 13 | POINT_MUL="50" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_solid/train_surface_solid_r04.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1_faiss.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="5" # Rate trade-off 12 | SCALING_RATIO="0.375" # quantization ratio 13 | POINT_MUL="20" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_solid/train_surface_solid_r05.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1_faiss.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_solid.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="1" # Rate trade-off 12 | SCALING_RATIO="0.375" # quantization ratio 13 | POINT_MUL="20" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_sparse/train_surface_sparse_r01.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="256" # Rate trade-off 12 | SCALING_RATIO="0.0625" # quantization ratio 13 | POINT_MUL="15" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | LOG_FILE_ONLY="False" 26 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_sparse/train_surface_sparse_r02.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="64" # Rate trade-off 12 | SCALING_RATIO="0.125" # quantization ratio 13 | POINT_MUL="10" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | LOG_FILE_ONLY="False" 26 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_sparse/train_surface_sparse_r03.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="8" # Rate trade-off 12 | SCALING_RATIO="0.25" # quantization ratio 13 | POINT_MUL="10" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | LOG_FILE_ONLY="False" 26 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_sparse/train_surface_sparse_r04.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="5" # Rate trade-off 12 | SCALING_RATIO="0.375" # quantization ratio 13 | POINT_MUL="3" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | LOG_FILE_ONLY="False" 26 | -------------------------------------------------------------------------------- /scripts/train_grasp/train_surface_sparse/train_surface_sparse_r05.sh: -------------------------------------------------------------------------------- 1 | PY_NAME="${HOME_DIR}/experiments/train.py" 2 | 3 | # Main configurations 4 | NET_CONFIG="${HOME_DIR}/config/net_config/grasp_dus1.yaml" 5 | OPTIM_CONFIG="${HOME_DIR}/config/optim_config/optim_cd_sparse.yaml" 6 | TRAIN_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml train_cfg" 7 | VAL_DATA_CONFIG="${HOME_DIR}/config/data_config/modelnet_voxel_sparse.yaml val_cfg" 8 | 9 | # Method-specific parameters 10 | ALPHA="5" # Distortion trade-off 11 | BETA="1" # Rate trade-off 12 | SCALING_RATIO="0.5" # quantization ratio 13 | POINT_MUL="3" # point multiplication, also the number of neighbors to search 14 | SKIP_MODE="False" # skip mode 15 | 16 | # Logging settings 17 | PRINT_FREQ="20" 18 | PC_WRITE_FREQ="-1" 19 | TF_SUMMARY="True" 20 | SAVE_CHECKPOINT_FREQ="1" 21 | SAVE_CHECKPOINT_MAX="10" 22 | VAL_FREQ="-1" 23 | VAL_PRINT_FREQ="20" 24 | LOG_FILE=$(date); LOG_FILE=log_${LOG_FILE//' '/$'_'}.txt 25 | LOG_FILE_ONLY="False" 26 | -------------------------------------------------------------------------------- /scripts/visualize.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright (c) 2010-2022, InterDigital 4 | # All rights reserved. 5 | 6 | # See LICENSE under the root folder. 7 | 8 | 9 | # Rendering settings 10 | FILE="./datasets/ford/Ford_01_q_1mm/Ford_01_vox1mm-0100.ply" 11 | RADIUS=-1 12 | RADIUS_ORIGIN=-1 13 | VIEW_FILE=. 14 | 15 | # Begin rendering 16 | python ./utils/visualize.py \ 17 | --file_name $FILE \ 18 | --output_file . \ 19 | --view_file $VIEW_FILE \ 20 | --radius $RADIUS \ 21 | --radius_origin $RADIUS_ORIGIN \ 22 | --window_name $FILE 23 | -------------------------------------------------------------------------------- /third_party/nndistance/LICENSE: -------------------------------------------------------------------------------- 1 | 3D Point Capsule Networks 2 | 3 | Copyright (c) 2019, Chair for Computer Aided Medical Procedures & Augmented Reality, Technical University of Munich 4 | 5 | The MIT License (MIT) 6 | 7 | Copyright (c) 2019 Yongheng Zhao 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to deal 11 | in the Software without restriction, including without limitation the rights 12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | SOFTWARE. -------------------------------------------------------------------------------- /third_party/nndistance/README.md: -------------------------------------------------------------------------------- 1 | python build.py install 2 | -------------------------------------------------------------------------------- /third_party/nndistance/build.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension,CppExtension,CUDAExtension 3 | 4 | 5 | # setup(name='my_lib', 6 | # ext_modules=[CppExtension('my_lib', ['src/my_lib.cpp'])], 7 | # cmdclass={'build_ext': BuildExtension}) 8 | 9 | setup(name='my_lib_cuda', 10 | ext_modules=[CUDAExtension('my_lib_cuda',['src/my_lib_cuda.cpp', 'src/nnd_cuda.cu'] 11 | )], 12 | cmdclass={'build_ext': BuildExtension} 13 | ) 14 | 15 | 16 | #if __name__ == '__main__': 17 | # ffi.build() 18 | -------------------------------------------------------------------------------- /third_party/nndistance/build_cpu.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension,CppExtension 3 | 4 | 5 | setup(name='my_lib_cuda', 6 | ext_modules=[CppExtension('my_lib_cuda', ['src/my_lib.cpp'])], 7 | cmdclass={'build_ext': BuildExtension}) 8 | 9 | 10 | #if __name__ == '__main__': 11 | # ffi.build() 12 | -------------------------------------------------------------------------------- /third_party/nndistance/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/third_party/nndistance/functions/__init__.py -------------------------------------------------------------------------------- /third_party/nndistance/functions/nnd.py: -------------------------------------------------------------------------------- 1 | # functions/add.py 2 | import torch 3 | from torch.autograd import Function 4 | #from _ext import my_lib 5 | import my_lib_cuda as my_lib 6 | 7 | 8 | class NNDFunction(Function): 9 | 10 | @staticmethod 11 | def forward(ctx, xyz1, xyz2): 12 | device = xyz1.device 13 | batchsize, n, _ = xyz1.size() 14 | _, m, _ = xyz2.size() 15 | 16 | dist1 = torch.zeros(batchsize, n) 17 | dist2 = torch.zeros(batchsize, m) 18 | 19 | idx1 = torch.zeros(batchsize, n).type(torch.IntTensor) 20 | idx2 = torch.zeros(batchsize, m).type(torch.IntTensor) 21 | 22 | if not xyz1.is_cuda: 23 | my_lib.nnd_forward(xyz1, xyz2, dist1, dist2, idx1, idx2) 24 | else: 25 | dist1 = dist1.cuda() 26 | dist2 = dist2.cuda() 27 | idx1 = idx1.cuda() 28 | idx2 = idx2.cuda() 29 | my_lib.nnd_forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2) 30 | 31 | ctx.save_for_backward(xyz1,xyz2,dist1,dist2,idx1,idx2) 32 | idx1 = idx1.to(device=device, dtype=torch.long) 33 | idx2 = idx2.to(device=device, dtype=torch.long) 34 | return dist1, dist2, idx1, idx2 35 | 36 | @staticmethod 37 | def backward(ctx, graddist1, graddist2, gradidx1, gradidx2): 38 | 39 | xyz1, xyz2, dist1, dist2, idx1, idx2 = ctx.saved_tensors 40 | graddist1 = graddist1.contiguous() 41 | graddist2 = graddist2.contiguous() 42 | 43 | gradxyz1 = torch.zeros(xyz1.size()) 44 | gradxyz2 = torch.zeros(xyz2.size()) 45 | 46 | if not graddist1.is_cuda: 47 | my_lib.nnd_backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) 48 | else: 49 | gradxyz1 = gradxyz1.cuda() 50 | gradxyz2 = gradxyz2.cuda() 51 | my_lib.nnd_backward_cuda(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) 52 | return gradxyz1, gradxyz2 -------------------------------------------------------------------------------- /third_party/nndistance/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/third_party/nndistance/modules/__init__.py -------------------------------------------------------------------------------- /third_party/nndistance/modules/nnd.py: -------------------------------------------------------------------------------- 1 | from torch.nn.modules.module import Module 2 | from functions.nnd import NNDFunction 3 | 4 | class NNDModule(Module): 5 | def forward(self, input1, input2): 6 | return NNDFunction().apply(input1, input2) 7 | -------------------------------------------------------------------------------- /third_party/nndistance/src/cpu_ops.cpp: -------------------------------------------------------------------------------- 1 | void nnsearch(int b,int n,int m,const float * xyz1,const float * xyz2,float * dist,int * idx){ 2 | for (int i=0;i(); 49 | float *xyz2_data = xyz2.data(); 50 | float *dist1_data = dist1.data(); 51 | float *dist2_data = dist2.data(); 52 | int *idx1_data = idx1.data(); 53 | int *idx2_data = idx2.data(); 54 | nnsearch(batchsize, n, m, xyz1_data, xyz2_data, dist1_data, idx1_data); 55 | nnsearch(batchsize, m, n, xyz2_data, xyz1_data, dist2_data, idx2_data); 56 | 57 | return 1; 58 | } 59 | 60 | 61 | 62 | int nnd_backward( 63 | at::Tensor xyz1, 64 | at::Tensor xyz2, 65 | at::Tensor gradxyz1, 66 | at::Tensor gradxyz2, 67 | at::Tensor graddist1, 68 | at::Tensor graddist2, 69 | at::Tensor idx1, 70 | at::Tensor idx2) 71 | { 72 | int b = xyz1.size(0); 73 | int n = xyz1.size(1); 74 | int m = xyz2.size(1); 75 | 76 | /* 77 | auto gradxyz1 = at::zeros_like(xyz1); 78 | auto gradxyz2 = at::zeros_like(xyz2); 79 | */ 80 | float *xyz1_data = xyz1.data(); 81 | float *xyz2_data = xyz2.data(); 82 | float *gradxyz1_data = gradxyz1.data(); 83 | float *gradxyz2_data = gradxyz2.data(); 84 | float *graddist1_data = graddist1.data(); 85 | float *graddist2_data = graddist2.data(); 86 | int *idx1_data = idx1.data(); 87 | int *idx2_data = idx2.data(); 88 | 89 | for (int i=0;i 2 | 3 | #include "cpu_ops.cpp" 4 | 5 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 6 | m.def("nnd_forward", &nnd_forward, "nnd_forward"); 7 | m.def("nnd_backward", &nnd_backward, "nnd_backward"); 8 | } 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /third_party/nndistance/src/my_lib_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "cpu_ops.cpp" 4 | 5 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 6 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 7 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 8 | 9 | int NmDistanceKernelLauncher( 10 | at::Tensor xyz1, 11 | at::Tensor xyz2, 12 | at::Tensor dist1, 13 | at::Tensor dist2, 14 | at::Tensor idx1, 15 | at::Tensor idx2); 16 | int NmDistanceGradKernelLauncher( 17 | at::Tensor xyz1, 18 | at::Tensor xyz2, 19 | at::Tensor gradxyz1, 20 | at::Tensor gradxyz2, 21 | at::Tensor graddist1, 22 | at::Tensor graddist2, 23 | at::Tensor idx1, 24 | at::Tensor idx2); 25 | 26 | int nnd_forward_cuda( 27 | at::Tensor xyz1, 28 | at::Tensor xyz2, 29 | at::Tensor dist1, 30 | at::Tensor dist2, 31 | at::Tensor idx1, 32 | at::Tensor idx2) { 33 | CHECK_INPUT(xyz1); 34 | CHECK_INPUT(xyz2); 35 | CHECK_INPUT(dist1); 36 | CHECK_INPUT(dist2); 37 | CHECK_INPUT(idx1); 38 | CHECK_INPUT(idx2); 39 | 40 | 41 | return NmDistanceKernelLauncher(xyz1, xyz2, dist1, dist2, idx1, idx2); 42 | } 43 | 44 | 45 | int nnd_backward_cuda( 46 | at::Tensor xyz1, 47 | at::Tensor xyz2, 48 | at::Tensor gradxyz1, 49 | at::Tensor gradxyz2, 50 | at::Tensor graddist1, 51 | at::Tensor graddist2, 52 | at::Tensor idx1, 53 | at::Tensor idx2) 54 | { 55 | CHECK_INPUT(xyz1); 56 | CHECK_INPUT(xyz2); 57 | CHECK_INPUT(gradxyz1); 58 | CHECK_INPUT(gradxyz2); 59 | CHECK_INPUT(graddist1); 60 | CHECK_INPUT(graddist2); 61 | CHECK_INPUT(idx1); 62 | CHECK_INPUT(idx2); 63 | 64 | return NmDistanceGradKernelLauncher( 65 | xyz1, 66 | xyz2, 67 | gradxyz1, 68 | gradxyz2, 69 | graddist1, 70 | graddist2, 71 | idx1, 72 | idx2); 73 | } 74 | 75 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 76 | m.def("nnd_forward_cuda", &nnd_forward_cuda, "NND forward (CUDA)"); 77 | m.def("nnd_backward_cuda", &nnd_backward_cuda, "NND backward (CUDA)"); 78 | m.def("nnd_forward", &nnd_forward, "nnd_forward"); 79 | m.def("nnd_backward", &nnd_backward, "nnd_backward"); 80 | } -------------------------------------------------------------------------------- /third_party/nndistance/src/nnd_cuda.cu: -------------------------------------------------------------------------------- 1 | #include 2 | //#include "nnd_cuda.h" 3 | #include 4 | #include 5 | 6 | 7 | 8 | __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ 9 | const int batch=512; 10 | __shared__ float buf[batch*3]; 11 | for (int i=blockIdx.x;ibest){ 123 | result[(i*n+j)]=best; 124 | result_i[(i*n+j)]=best_i; 125 | } 126 | } 127 | __syncthreads(); 128 | } 129 | } 130 | } 131 | 132 | int NmDistanceKernelLauncher( 133 | at::Tensor xyz1, 134 | at::Tensor xyz2, 135 | at::Tensor dist1, 136 | at::Tensor dist2, 137 | at::Tensor idx1, 138 | at::Tensor idx2) 139 | { 140 | int b = xyz1.size(0); 141 | int n = xyz1.size(1); 142 | int m = xyz2.size(1); 143 | 144 | float *xyz1_data = xyz1.data(); 145 | float *xyz2_data = xyz2.data(); 146 | float *dist1_data = dist1.data(); 147 | float *dist2_data = dist2.data(); 148 | int *idx1_data = idx1.data(); 149 | int *idx2_data = idx2.data(); 150 | 151 | 152 | NmDistanceKernel<<>>(b,n,xyz1_data,m,xyz2_data,dist1_data,idx1_data); 153 | NmDistanceKernel<<>>(b,m,xyz2_data,n,xyz1_data,dist2_data,idx2_data); 154 | 155 | cudaError_t err = cudaGetLastError(); 156 | if (err != cudaSuccess) { 157 | printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err)); 158 | //THError("aborting"); 159 | return 0; 160 | } 161 | return 1; 162 | } 163 | 164 | __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ 165 | for (int i=blockIdx.x;i(); 203 | float *xyz2_data = xyz2.data(); 204 | float *gradxyz1_data = gradxyz1.data(); 205 | float *gradxyz2_data = gradxyz2.data(); 206 | float *graddist1_data = graddist1.data(); 207 | float *graddist2_data = graddist2.data(); 208 | int *idx1_data = idx1.data(); 209 | int *idx2_data = idx2.data(); 210 | 211 | NmDistanceGradKernel<<>>(b,n,xyz1_data,m,xyz2_data,graddist1_data,idx1_data,gradxyz1_data,gradxyz2_data); 212 | NmDistanceGradKernel<<>>(b,m,xyz2_data,n,xyz1_data,graddist2_data,idx2_data,gradxyz2_data,gradxyz1_data); 213 | 214 | cudaError_t err = cudaGetLastError(); 215 | if (err != cudaSuccess) { 216 | printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); 217 | //THError("aborting"); 218 | return 0; 219 | } 220 | return 1; 221 | 222 | } 223 | -------------------------------------------------------------------------------- /third_party/nndistance/test.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | 5 | from modules.nnd import NNDModule 6 | 7 | dist = NNDModule() 8 | 9 | p1 = torch.rand(10,1000,3) 10 | p2 = torch.rand(10,1500,3) 11 | points1 = Variable(p1,requires_grad = True) 12 | points2 = Variable(p2) 13 | points1=points1.cuda() 14 | points2=points2.cuda() 15 | dist1, dist2, idx1, idx2 = dist(points1, points2) 16 | print(dist1, dist2, idx1, idx2) 17 | loss = torch.sum(dist1) 18 | print(loss) 19 | loss.backward() 20 | print(points1.grad, points2.grad) 21 | 22 | 23 | points1 = Variable(p1.cuda(), requires_grad = True) 24 | points2 = Variable(p2.cuda()) 25 | dist1, dist2, idx1, idx2 = dist(points1, points2) 26 | print(dist1, dist2, idx1, idx2) 27 | loss = torch.sum(dist1) 28 | print(loss) 29 | loss.backward() 30 | print(points1.grad, points2.grad) 31 | 32 | # Test indexing 33 | nn2 = torch.gather(points1, 1, idx2.unsqueeze(-1).expand([-1,-1,points1.shape[2]]).cuda()) 34 | print(nn2) 35 | loss = torch.sum(nn2) 36 | print(loss) 37 | loss.backward() 38 | print(points1.grad, points2.grad) 39 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/InterDigitalInc/GRASP-Net/f6f32c85062eee4b6fb744ad4bf7cd2d05af88f8/utils/__init__.py -------------------------------------------------------------------------------- /utils/gen_args.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # A simple tool to generate temporary scripts which holds the options 8 | 9 | import sys 10 | import os 11 | import random 12 | 13 | cur_dir = os.path.dirname(os.path.abspath(__file__)) 14 | home_dir=os.path.abspath(os.path.join(cur_dir, '..')) 15 | 16 | def main(): 17 | 18 | # Create a folder if not exist 19 | tmp_script_folder = 'tmp' 20 | tmp_script_path = os.path.join(home_dir, 'scripts', tmp_script_folder) 21 | if not os.path.exists(tmp_script_path): 22 | os.makedirs(tmp_script_path) 23 | 24 | # Create the new argument script 25 | tmp_file_name = 'tmp_'+str(hex(int(random.random() * 1e15)))+'.sh' 26 | tmp_file = open(os.path.join(home_dir, 'scripts', 'tmp', tmp_file_name), 'w') 27 | tmp_file.write('HOME_DIR="' + home_dir + '"\n') 28 | exp_name = os.path.basename(sys.argv[1]).split('.')[0] 29 | tmp_file.write('EXP_NAME="' + exp_name + '"\n') 30 | 31 | # add the arguments one-by-one 32 | addline = 'RUN_ARGUMENTS="${PY_NAME} --exp_name ${EXP_NAME} ' 33 | len_addline = len(addline) 34 | with open(sys.argv[1]) as f: 35 | args = f.readlines() 36 | for line in args: 37 | line = line.lstrip() 38 | if len(line) > 0 and line[0].isalpha(): 39 | idx = line.find('=') 40 | opt_name = line[0:idx].upper() 41 | if opt_name != "PY_NAME" and opt_name != "EXP_NAME": 42 | addline += "--" + opt_name.lower() + " ${" + opt_name + "} " 43 | if opt_name != 'RUN_ARGUMENTS' and opt_name != "EXP_NAME": 44 | tmp_file.write(line) 45 | addline = "\n" + addline[:-1] + '"' 46 | if len(addline) > len_addline: 47 | tmp_file.write(addline) 48 | 49 | return tmp_file_name 50 | 51 | 52 | if __name__ == "__main__": 53 | 54 | tmp_file_name = main() 55 | print(tmp_file_name) 56 | sys.exit(0) 57 | -------------------------------------------------------------------------------- /utils/merge_csv.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | ''' 7 | Merge CSV files for MPEG reporting purpose. 8 | Usage: python ./utils/merge_csv.py --input_files file_1.csv file_2.csv --output_file file_3.csv 9 | ''' 10 | 11 | import argparse 12 | import csv 13 | import os 14 | 15 | 16 | def main(opt): 17 | 18 | # Read the input CSV files and sort the entries 19 | log_dict_all = [] 20 | for csv_file in opt.input_files: 21 | with open(csv_file, 'r') as f: 22 | reader = csv.DictReader(f) 23 | for item in reader: 24 | log_dict_all.append(dict(item)) 25 | log_dict_all.sort(key=lambda x: (x['sequence'], int(x['numBitsGeoEncT']))) # perform sorting with two keys 26 | 27 | # Write the merged CSV file 28 | mpeg_report_header = ['sequence', 'numOutputPointsT', 'numBitsGeoEncT', 'd1T', 'd2T', 'encTimeT', 'decTimeT'] 29 | with open(opt.output_file, 'w') as f: 30 | writer = csv.DictWriter(f, fieldnames=mpeg_report_header) 31 | writer.writeheader() 32 | writer.writerows(log_dict_all) 33 | 34 | 35 | def add_options(parser): 36 | 37 | parser.add_argument('--input_files', type=str, nargs='+', required=True, help='File name of the input image.') 38 | parser.add_argument('--output_file', type=str, required=True, help='File name of the output image.') 39 | 40 | return parser 41 | 42 | 43 | if __name__ == "__main__": 44 | 45 | # Initialize parser with basic options 46 | parser = argparse.ArgumentParser( 47 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 48 | parser = add_options(parser) 49 | opt, _ = parser.parse_known_args() 50 | main(opt) -------------------------------------------------------------------------------- /utils/visualize.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010-2022, InterDigital 2 | # All rights reserved. 3 | 4 | # See LICENSE under the root folder. 5 | 6 | 7 | # A stand-alone visualization tool for point cloud with Open3D 8 | 9 | import numpy as np 10 | import open3d as o3d 11 | import argparse 12 | import os 13 | 14 | def str2bool(val): 15 | if isinstance(val, bool): 16 | return val 17 | if val.lower() in ('true', 'yes', 't', 'y', '1'): 18 | return True 19 | elif val.lower() in ('false', 'no', 'f', 'n', '0'): 20 | return False 21 | else: 22 | raise argparse.ArgumentTypeError('Expect a Boolean value.') 23 | 24 | 25 | def read_point_cloud(file_name): 26 | """Read a point cloud from the specified file, support both "ply" and "bin".""" 27 | 28 | pcd = None 29 | if os.path.splitext(file_name)[1].lower() == '.ply': 30 | pcd = o3d.io.read_point_cloud(file_name) 31 | elif os.path.splitext(file_name)[1].lower() == '.bin': 32 | xyz = np.fromfile(file_name, dtype=np.float32).reshape((-1, 4)) 33 | pcd = o3d.geometry.PointCloud() 34 | pcd.points = o3d.utility.Vector3dVector(xyz[:,:3]) 35 | 36 | return pcd 37 | 38 | 39 | def sphere_pc_generator(opt): 40 | """Generate the point cloud geometry with sphere decorator.""" 41 | 42 | pcd = read_point_cloud(opt.file_name) 43 | points = np.asarray(pcd.points) 44 | spheres = [] 45 | 46 | # Create the balls one-by-one 47 | for cnt, xyz in enumerate(points): 48 | sphere = o3d.geometry.TriangleMesh.create_sphere(opt.radius, resolution=20) # create a ball 49 | sphere.compute_vertex_normals() 50 | sphere.paint_uniform_color(opt.color) # paint the ball 51 | sphere.translate(xyz, False) # translate it 52 | spheres.append(sphere) 53 | 54 | return spheres 55 | 56 | 57 | def main(): 58 | 59 | # Handle the radius 60 | vis = o3d.visualization.Visualizer() 61 | vis.create_window(window_name=opt.window_name, height=opt.window_height, width=opt.window_width) 62 | 63 | if opt.radius > 0: # Render the point cloud with ball decorator 64 | pc_elem = sphere_pc_generator(opt) 65 | 66 | # Aggregate the generated spheres 67 | geo = pc_elem[0] 68 | for i in range(1, len(pc_elem)): 69 | geo += pc_elem[i] 70 | print('Aggregated %d shperes for the point cloud.' % len(pc_elem)) 71 | else: 72 | geo = read_point_cloud(opt.file_name) 73 | print('Loaded a point cloud with %d points.' % len(geo.points)) 74 | 75 | # Draw the stuff finally 76 | vis.add_geometry(geo) # Add the point cloud 77 | 78 | # Mark the origin if needed 79 | if opt.radius_origin > 0: 80 | origin = o3d.geometry.TriangleMesh.create_sphere(opt.radius_origin, resolution=20) # create a ball 81 | origin.compute_vertex_normals() 82 | origin.paint_uniform_color(opt.color) # paint the ball 83 | origin.translate([0, 0, 0], False) # translate it 84 | vis.add_geometry(origin) # Add the origin 85 | 86 | ctr = vis.get_view_control() 87 | if opt.view_file != '.': # Set the camera view point 88 | param = o3d.io.read_pinhole_camera_parameters(opt.view_file) 89 | ctr.convert_from_pinhole_camera_parameters(param) 90 | 91 | # Render and save as an image if the ouput file path is given 92 | if opt.output_file != '.': 93 | vis.capture_screen_image(opt.output_file, True) 94 | else: 95 | vis.run() 96 | vis.destroy_window() 97 | 98 | 99 | def add_options(parser): 100 | parser.add_argument('--file_name', type=str, required=True, help='File name of the point cloud.') 101 | parser.add_argument('--output_file', type=str, default='.', help='Output file name for the rendered image.') 102 | parser.add_argument('--view_file', type=str, default='.', help='View point file for rendering.') 103 | parser.add_argument('--radius', type=float, default=-1, help='Radius of the rendered points. If > 0, render each point as a ball.') 104 | parser.add_argument('--color', type=float, nargs='+', default=[0.2, 0.2, 0.2], help='Specify the color of the rendered point cloud if ball decorator is used.') 105 | parser.add_argument('--radius_origin', type=float, default=-1, help='Radius of the origin points. If < 0, do not add origin.') 106 | parser.add_argument('--window_name', type=str, default='Point Cloud', help='Window name.') 107 | parser.add_argument('--window_height', type=int, default=1200, help='Window height.') 108 | parser.add_argument('--window_width', type=int, default=1600, help='Window width.') 109 | 110 | return parser 111 | 112 | 113 | if __name__ == "__main__": 114 | 115 | # Initialize parser with basic options 116 | parser = argparse.ArgumentParser( 117 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 118 | parser = add_options(parser) 119 | opt, _ = parser.parse_known_args() 120 | main() --------------------------------------------------------------------------------