├── .clang-format ├── .github ├── config.yml └── workflows │ ├── style.yml │ └── ubuntu.yml ├── .gitignore ├── .style.yapf ├── LICENSE ├── Makefile ├── README.md ├── __init__.py ├── ci ├── check_style.py └── run_ci.sh ├── data └── demo │ ├── fragment.pcd │ └── fragment.ply ├── docker ├── Dockerfile ├── Dockerfile.cpu ├── Dockerfile.fromsource ├── Dockerfile.fromsource10 └── Dockerfile.jetson ├── docs ├── howtos.md ├── images │ ├── getting_started_ml_visualizer.gif │ ├── visualizer_BoundingBoxes.png │ ├── visualizer_custom_lut.png │ ├── visualizer_int_attr.png │ ├── visualizer_predictions.gif │ └── visualizer_random_color_attr.png └── tutorial │ └── notebook │ ├── add_own_dataset.rst │ ├── index.rst │ ├── train_ss_model_using_pytorch.rst │ └── train_ss_model_using_tensorflow.rst ├── examples ├── demo_data │ ├── labels │ │ ├── 000700.npy │ │ └── 000750.npy │ └── points │ │ ├── 000700.npy │ │ └── 000750.npy ├── vis_pred.py └── visualize.py ├── ml3d ├── __init__.py ├── configs │ ├── __init__.py │ ├── default_cfgs │ │ ├── kpconv.yml │ │ ├── parislille3d.yml │ │ ├── randlanet.yml │ │ ├── s3dis.yml │ │ ├── semantic3d.yml │ │ ├── semantic_segmentation.yml │ │ ├── semantickitti.yml │ │ ├── shapenet.yml │ │ └── toronto3d.yml │ ├── kpconv_parislille3d.yml │ ├── kpconv_s3dis.yml │ ├── kpconv_semantic3d.yml │ ├── kpconv_semantickitti.yml │ ├── kpconv_toronto3d.yml │ ├── pointpillars_argoverse.yml │ ├── pointpillars_habitat_sampling.yml │ ├── pointpillars_habitat_sampling_dense_01.yml │ ├── pointpillars_habitat_sampling_dense_01_wd_4.yml │ ├── pointpillars_habitat_sampling_dense_01_wd_4_7500.yml │ ├── pointpillars_habitat_sampling_dense_01_wd_4_mean_box.yml │ ├── pointpillars_habitat_sampling_dense_5000_mean_box.yml │ ├── pointpillars_habitat_sampling_sparse.yml │ ├── pointpillars_habitat_sampling_sparse_001_adam_defaults_0.yml │ ├── pointpillars_habitat_sampling_sparse_001_wd_4.yml │ ├── pointpillars_habitat_sampling_sparse_01.yml │ ├── pointpillars_habitat_sampling_sparse_01_adam_defaults_0.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_0.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_005.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_01.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_16.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_2.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_32.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_4.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_4_5000_2x2.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_4_5000_2x2_mean_box.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_4_7500.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_4_7500_2x2.yml │ ├── pointpillars_habitat_sampling_sparse_01_wd_8.yml │ ├── pointpillars_habitat_sampling_sparse_05.yml │ ├── pointpillars_habitat_sampling_sparse_1.yml │ ├── pointpillars_habitat_sampling_sparse_10.yml │ ├── pointpillars_habitat_sampling_sparse_3.yml │ ├── pointpillars_habitat_sampling_sparse_5.yml │ ├── pointpillars_habitat_sampling_sparse_5000_2x2_mean_box.yml │ ├── pointpillars_habitat_sampling_sparse_5000_2x2_mean_box_wd_100.yml │ ├── pointpillars_kitti.yml │ ├── pointpillars_kitti_car_cyclist.yml │ ├── pointpillars_kitti_car_only.yml │ ├── pointpillars_lyft.yml │ ├── pointpillars_nuscenes.yml │ ├── pointpillars_nuscenes_sparse.yml │ ├── pointpillars_waymo.yml │ ├── pointrcnn_kitti.yml │ ├── randlanet_parislille3d.yml │ ├── randlanet_s3dis.yml │ ├── randlanet_semantic3d.yml │ ├── randlanet_semantickitti.yml │ ├── randlanet_toronto3d.yml │ ├── sparse_pointpillars_kitti_car_only.yml │ ├── sparse_pointpillars_kitti_car_only_lr_10.yml │ ├── sparse_pointpillars_kitti_car_only_sparse12_dense3.yml │ ├── sparse_pointpillars_kitti_car_only_sparse1_dense23.yml │ ├── sparse_pointpillars_kitti_car_only_wd_100.yml │ ├── sparse_pointpillars_kitti_car_only_wd_100_lr_10.yml │ ├── sparse_pointpillars_kitti_car_only_wide.yml │ └── sparseconvunet_scannet.yml ├── datasets │ ├── __init__.py │ ├── _resources │ │ ├── download_paris_lille3d.sh │ │ ├── download_semantic3d.sh │ │ ├── download_toronto3d.sh │ │ ├── lyft │ │ │ ├── test.txt │ │ │ ├── train.txt │ │ │ └── val.txt │ │ ├── s3dis_annotation_paths.txt │ │ ├── scannet │ │ │ ├── scannetv2-labels.combined.tsv │ │ │ ├── scannetv2_test.txt │ │ │ ├── scannetv2_train.txt │ │ │ └── scannetv2_val.txt │ │ └── semantic-kitti.yaml │ ├── argoverse.py │ ├── augment │ │ ├── __init__.py │ │ └── augmentation.py │ ├── base_dataset.py │ ├── customdataset.py │ ├── habitat_sampling.py │ ├── inference_dummy.py │ ├── kitti.py │ ├── lyft.py │ ├── nuscenes.py │ ├── parislille3d.py │ ├── s3dis.py │ ├── samplers │ │ ├── __init__.py │ │ ├── semseg_random.py │ │ └── semseg_spatially_regular.py │ ├── scannet.py │ ├── semantic3d.py │ ├── semantickitti.py │ ├── shapenet.py │ ├── sunrgbd.py │ ├── toronto3d.py │ ├── utils │ │ ├── __init__.py │ │ ├── bev_box.py │ │ ├── dataprocessing.py │ │ ├── operations.py │ │ ├── semantic-kitti.yaml │ │ └── transforms.py │ └── waymo.py ├── metrics │ ├── __init__.py │ └── mAP.py ├── tf │ ├── __init__.py │ ├── dataloaders │ │ ├── __init__.py │ │ └── tf_dataloader.py │ ├── models │ │ ├── __init__.py │ │ ├── base_model.py │ │ ├── base_model_objdet.py │ │ ├── kpconv.py │ │ ├── network_blocks.py │ │ ├── point_pillars.py │ │ ├── point_rcnn.py │ │ ├── randlanet.py │ │ ├── sparseconvnet.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── kernels │ │ │ ├── __init__.py │ │ │ └── kernel_points.py │ ├── modules │ │ ├── __init__.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── cross_entropy.py │ │ │ ├── focal_loss.py │ │ │ ├── semseg_loss.py │ │ │ └── smooth_L1.py │ │ ├── metrics │ │ │ ├── __init__.py │ │ │ └── semseg_metric.py │ │ ├── optimizers │ │ │ └── __init__.py │ │ ├── pointnet.py │ │ └── schedulers │ │ │ ├── __init__.py │ │ │ ├── bn_momentum_scheduler.py │ │ │ ├── cosine_warmup_scheduler.py │ │ │ └── lr_one_cycle_scheduler.py │ ├── pipelines │ │ ├── __init__.py │ │ ├── base_pipeline.py │ │ ├── object_detection.py │ │ └── semantic_segmentation.py │ └── utils │ │ ├── __init__.py │ │ ├── helper_tf.py │ │ ├── objdet_helper.py │ │ ├── pointnet │ │ ├── pointnet2_modules.py │ │ ├── pointnet2_utils.py │ │ └── tf_utils.py │ │ ├── roipool3d │ │ └── roipool3d_utils.py │ │ └── tf_utils.py ├── torch │ ├── __init__.py │ ├── dataloaders │ │ ├── __init__.py │ │ ├── concat_batcher.py │ │ ├── default_batcher.py │ │ ├── torch_dataloader.py │ │ └── torch_sampler.py │ ├── models │ │ ├── __init__.py │ │ ├── base_model.py │ │ ├── base_model_objdet.py │ │ ├── kpconv.py │ │ ├── point_pillars.py │ │ ├── point_rcnn.py │ │ ├── randlanet.py │ │ ├── sparse_point_pillars.py │ │ ├── sparse_point_pillars_sparse12_dense3.py │ │ ├── sparse_point_pillars_sparse1_dense23.py │ │ ├── sparse_point_pillars_wide.py │ │ └── sparseconvnet.py │ ├── modules │ │ ├── __init__.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── cross_entropy.py │ │ │ ├── focal_loss.py │ │ │ ├── semseg_loss.py │ │ │ └── smooth_L1.py │ │ ├── metrics │ │ │ ├── __init__.py │ │ │ └── semseg_metric.py │ │ ├── optimizers │ │ │ ├── __init__.py │ │ │ └── optim_wrapper.py │ │ ├── pointnet.py │ │ └── schedulers │ │ │ ├── __init__.py │ │ │ ├── bn_momentum_scheduler.py │ │ │ ├── cosine_warmup_scheduler.py │ │ │ └── lr_one_cycle_scheduler.py │ ├── pipelines │ │ ├── __init__.py │ │ ├── base_pipeline.py │ │ ├── object_detection.py │ │ └── semantic_segmentation.py │ └── utils │ │ ├── __init__.py │ │ ├── helper_torch.py │ │ ├── objdet_helper.py │ │ ├── pointnet │ │ ├── pointnet2_modules.py │ │ ├── pointnet2_utils.py │ │ └── pytorch_utils.py │ │ ├── roipool3d │ │ ├── __init__.py │ │ └── roipool3d_utils.py │ │ └── torch_utils.py ├── utils │ ├── __init__.py │ ├── builder.py │ ├── config.py │ ├── dataset_helper.py │ ├── log.py │ └── registry.py └── vis │ ├── __init__.py │ ├── boundingbox.py │ ├── colormap.py │ ├── labellut.py │ └── visualizer.py ├── model_zoo.md ├── requirements-tensorflow.txt ├── requirements-torch-cuda.txt ├── requirements-torch.txt ├── requirements.txt ├── scripts ├── README.md ├── collect_bboxes.py ├── demo_api_train.py ├── demo_datasets.py ├── demo_obj_det.py ├── download_datasets │ ├── download_kitti.sh │ ├── download_lyft.sh │ ├── download_parislille3d.sh │ ├── download_semantic3d.sh │ ├── download_semantickitti.sh │ ├── download_shapenet.sh │ ├── download_sunrgbd.sh │ └── download_toronto3d.sh ├── preprocess_argoverse.py ├── preprocess_lyft.py ├── preprocess_nuscenes.py ├── preprocess_scannet.py ├── preprocess_semantic3d.py ├── preprocess_sunrgbd.py ├── preprocess_waymo.py ├── run_pipeline.py └── train_scripts │ ├── kpconv_kitti.sh │ ├── kpconv_paris.sh │ ├── kpconv_s3dis.sh │ ├── kpconv_semantic3d.sh │ ├── kpconv_toronto.sh │ ├── pointpillars_kitti.sh │ ├── randlanet_kitti.sh │ ├── randlanet_paris.sh │ ├── randlanet_s3dis.sh │ ├── randlanet_semantic3d.sh │ └── randlanet_toronto.sh ├── set_open3d_ml_root.sh ├── setup.py ├── tb.sh ├── tensorboard.bash ├── test_cpu_dense.sh ├── test_cpu_sparse.sh ├── test_gpu_dense.sh ├── test_gpu_sparse.sh ├── test_open3d.py ├── test_robot_dense_habitat.sh ├── test_robot_sparse_habitat.sh ├── test_slurm_dense.sh ├── test_slurm_sparse.sh ├── test_xavier_dense_habitat.sh ├── test_xavier_sparse_habitat.sh ├── tests ├── test_integration.py └── test_models.py ├── train_slurm_dense.sh ├── train_slurm_dense_01.sh ├── train_slurm_dense_01_wd_4.sh ├── train_slurm_dense_01_wd_4_5000_mean_box.sh ├── train_slurm_dense_01_wd_4_7500.sh ├── train_slurm_dense_5000_mean_box.sh ├── train_slurm_kitti_dense.sh ├── train_slurm_kitti_sparse.sh ├── train_slurm_kitti_sparse_lr_10.sh ├── train_slurm_kitti_sparse_sparse12_dense3.sh ├── train_slurm_kitti_sparse_sparse1_dense23.sh ├── train_slurm_kitti_sparse_wd_100.sh ├── train_slurm_kitti_sparse_wd_100_lr_10.sh ├── train_slurm_kitti_sparse_wide.sh ├── train_slurm_nuscenes_dense.sh ├── train_slurm_nuscenes_sparse.sh ├── train_slurm_sparse.sh ├── train_slurm_sparse_001_adam_defaults_0.sh ├── train_slurm_sparse_001_wd_4.sh ├── train_slurm_sparse_01.sh ├── train_slurm_sparse_01_adam_defaults_0.sh ├── train_slurm_sparse_01_wd_0.sh ├── train_slurm_sparse_01_wd_005.sh ├── train_slurm_sparse_01_wd_01.sh ├── train_slurm_sparse_01_wd_16.sh ├── train_slurm_sparse_01_wd_2.sh ├── train_slurm_sparse_01_wd_32.sh ├── train_slurm_sparse_01_wd_4.sh ├── train_slurm_sparse_01_wd_4_5000_2x2.sh ├── train_slurm_sparse_01_wd_4_5000_2x2_mean_box.sh ├── train_slurm_sparse_01_wd_4_7500.sh ├── train_slurm_sparse_01_wd_4_7500_2x2.sh ├── train_slurm_sparse_01_wd_8.sh ├── train_slurm_sparse_05.sh ├── train_slurm_sparse_1.sh ├── train_slurm_sparse_10.sh ├── train_slurm_sparse_3.sh ├── train_slurm_sparse_5.sh ├── train_slurm_sparse_5000_2x2_mean_box.sh ├── train_slurm_sparse_5000_2x2_mean_box_wd_100.sh └── version.txt /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: Google 2 | IndentWidth: 4 3 | ColumnLimit: 80 4 | UseTab: Never 5 | Standard: c++14 6 | ContinuationIndentWidth: 8 7 | AccessModifierOffset: -4 8 | BinPackParameters: false 9 | SortIncludes: true 10 | -------------------------------------------------------------------------------- /.github/config.yml: -------------------------------------------------------------------------------- 1 | # Configuration for update-docs - https://github.com/behaviorbot/update-docs 2 | 3 | # Comment to be posted to on PRs that don't update the changelog 4 | updateDocsComment: > 5 | Thanks for submitting this pull request! The maintainers of this repository would appreciate if you could update the **CHANGELOG.md** based on your changes. 6 | 7 | updateDocsWhiteList: 8 | - documentation 9 | 10 | updateDocsTargetFiles: 11 | - CHANGELOG.md 12 | -------------------------------------------------------------------------------- /.github/workflows/style.yml: -------------------------------------------------------------------------------- 1 | name: Style check 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - master 8 | pull_request: 9 | types: [opened, reopened, synchronize] 10 | 11 | jobs: 12 | style-check: 13 | runs-on: ubuntu-18.04 14 | steps: 15 | - name: Checkout source code 16 | uses: actions/checkout@v2 17 | with: 18 | submodules: true 19 | - name: Set up Python version 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: 3.6 23 | - name: Install dependencies 24 | run: | 25 | sudo apt-get install --yes clang-format-10 26 | python -m pip install -U yapf==0.30.0 nbformat pydocstyle==6.0.0 27 | - name: Run style check 28 | run: | 29 | python ci/check_style.py 30 | - name: Run docstring style check 31 | run: | 32 | pydocstyle --convention=google --add-ignore=D1,D205,D415,D212 . 33 | -------------------------------------------------------------------------------- /.github/workflows/ubuntu.yml: -------------------------------------------------------------------------------- 1 | name: Ubuntu CI 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - master 8 | pull_request: 9 | types: [opened, reopened, synchronize] 10 | 11 | jobs: 12 | ubuntu: 13 | runs-on: ubuntu-18.04 14 | strategy: 15 | fail-fast: false 16 | env: 17 | NPROC: 2 18 | steps: 19 | - name: Checkout source code 20 | uses: actions/checkout@v2 21 | with: 22 | submodules: true 23 | - name: Setup cache 24 | uses: actions/cache@v2 25 | with: 26 | # Ref: https://github.com/apache/incubator-mxnet/pull/18459/files 27 | path: ~/.ccache 28 | # We include the commit sha in the cache key, as new cache entries are 29 | # only created if there is no existing entry for the key yet. 30 | key: ${{ runner.os }}-ccache-${{ github.sha }} 31 | # Restore any ccache cache entry, if none for 32 | # ${{ runner.os }}-ccache-${{ github.sha }} exists. 33 | # Common prefix will be used so that ccache can be used across commits. 34 | restore-keys: | 35 | ${{ runner.os }}-ccache 36 | - name: Set up Python version 37 | uses: actions/setup-python@v2 38 | with: 39 | python-version: 3.6 40 | # Pre-installed 18.04 packages: https://git.io/JfHmW 41 | - name: Install ccache 42 | run: | 43 | sudo apt-get --yes install ccache 44 | ccache -M 2G # GitHub's total cache limit is 5GB for all OSes. 45 | - name: Config and build 46 | run: | 47 | PATH=/usr/lib/ccache:$PATH 48 | ccache -s 49 | ./ci/run_ci.sh 50 | ccache -s 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | .DS_Store 3 | __pycache__ 4 | # Distribution / packaging 5 | .Python 6 | env/ 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | **/lib/ 14 | **/build/ 15 | **/lib64/ 16 | *.egg-info 17 | .installed.cfg 18 | *.egg 19 | *linux-gnu.so 20 | *darwin.so 21 | *.so 22 | # 23 | logs/ 24 | test/ 25 | test_kpconv/ 26 | kernels/ 27 | **/.fuse* 28 | train_log/ 29 | -------------------------------------------------------------------------------- /.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | based_on_style = google 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | The MIT License (MIT) 3 | 4 | Open3D: www.open3d.org 5 | Copyright (c) 2020 www.open3d.org 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy 8 | of this software and associated documentation files (the "Software"), to deal 9 | in the Software without restriction, including without limitation the rights 10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the Software is 12 | furnished to do so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in 15 | all copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 | THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | 3 | check-style: 4 | python ./ci/check_style.py 5 | 6 | apply-style: 7 | python ./ci/check_style.py --do_apply_style 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sparse PointPillars 2 | 3 | This is the official repo for our implementation of _Sparse PointPillars: Maintaining and Exploiting Input Sparsity to Improve Runtime on Embedded Systems_, accepted to IROS 2022. 4 | 5 | It is based on the [Open3D-ML](https://github.com/isl-org/Open3D-ML) codebase. 6 | 7 | ## Datasets Used 8 | 9 | For our experiments we used [KITTI](http://www.cvlibs.net/datasets/kitti/), a standard 3D object detection datset, and [Matterport-Chair](https://github.com/kylevedder/MatterportDataSampling), a 3D chair detection task dataset generated from multiple houses in the [Matterport3D](https://niessner.github.io/Matterport/) dataset. Model weights and data files are available on our [project page](https://vedder.io/sparse_point_pillars). 10 | 11 | ## Citation 12 | 13 | Please cite our work ([pdf](https://arxiv.org/abs/2106.06882)) if you use Sparse PointPillars. 14 | 15 | ```bib 16 | @article{Vedder2022, 17 | author = {Kyle Vedder and Eric Eaton}, 18 | title = {{Sparse PointPillars: Maintaining and Exploiting Input Sparsity to Improve Runtime on Embedded Systems}}, 19 | journal = {International Conference on Intelligent Robots and Systems (IROS)}, 20 | year = {2022}, 21 | } 22 | ``` 23 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/__init__.py -------------------------------------------------------------------------------- /ci/run_ci.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # The following environment variables are required: 4 | # - NPROC 5 | # 6 | TENSORFLOW_VER="2.4.1" 7 | TORCH_GLNX_VER="1.7.1+cpu" 8 | YAPF_VER="0.30.0" 9 | 10 | set -euo pipefail 11 | 12 | # 1. Prepare the Open3D-ML repo and install dependencies 13 | export PATH_TO_OPEN3D_ML=$(pwd) 14 | # the build system of the main repo expects a master branch. make sure master exists 15 | git checkout -b master || true 16 | pip install -r requirements.txt 17 | echo $PATH_TO_OPEN3D_ML 18 | cd .. 19 | python -m pip install -U Cython 20 | 21 | 22 | # 23 | # 2. clone Open3D and install dependencies 24 | # 25 | git clone --recursive --branch master https://github.com/isl-org/Open3D.git 26 | 27 | ./Open3D/util/install_deps_ubuntu.sh assume-yes 28 | python -m pip install -U tensorflow==$TENSORFLOW_VER 29 | python -m pip install -U torch==${TORCH_GLNX_VER} -f https://download.pytorch.org/whl/torch_stable.html 30 | python -m pip install -U pytest 31 | python -m pip install -U yapf=="$YAPF_VER" 32 | 33 | # 34 | # 3. Configure for bundling the Open3D-ML part 35 | # 36 | mkdir Open3D/build 37 | pushd Open3D/build 38 | cmake -DBUNDLE_OPEN3D_ML=ON \ 39 | -DOPEN3D_ML_ROOT=$PATH_TO_OPEN3D_ML \ 40 | -DBUILD_TENSORFLOW_OPS=ON \ 41 | -DBUILD_PYTORCH_OPS=ON \ 42 | -DBUILD_GUI=OFF \ 43 | -DBUILD_RPC_INTERFACE=OFF \ 44 | -DBUILD_UNIT_TESTS=OFF \ 45 | -DBUILD_BENCHMARKS=OFF \ 46 | -DBUILD_EXAMPLES=OFF \ 47 | .. 48 | 49 | # 4. Build and install wheel 50 | make -j"$NPROC" install-pip-package 51 | 52 | # 53 | # 5. run examples/tests in the Open3D-ML repo outside of the repo directory to 54 | # make sure that the installed package works. 55 | # 56 | popd 57 | mkdir test_workdir 58 | pushd test_workdir 59 | mv $PATH_TO_OPEN3D_ML/tests . 60 | pytest tests/test_integration.py 61 | pytest tests/test_models.py 62 | 63 | # now do the same but in dev mode by setting OPEN3D_ML_ROOT 64 | export OPEN3D_ML_ROOT=$PATH_TO_OPEN3D_ML 65 | pytest tests/test_integration.py 66 | pytest tests/test_models.py 67 | unset OPEN3D_ML_ROOT 68 | 69 | popd 70 | -------------------------------------------------------------------------------- /data/demo/fragment.pcd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/data/demo/fragment.pcd -------------------------------------------------------------------------------- /data/demo/fragment.ply: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/data/demo/fragment.ply -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.1.1-cudnn8-devel-ubuntu20.04 2 | SHELL ["/bin/bash", "-c"] 3 | RUN nvcc --version 4 | 5 | # Set the timezone info because otherwise tzinfo blocks install 6 | # flow and ignores the non-interactive frontend command 🤬🤬🤬 7 | RUN ln -snf /usr/share/zoneinfo/America/New_York /etc/localtime && echo "/usr/share/zoneinfo/America/New_York" > /etc/timezone 8 | 9 | # Core system packages 10 | RUN apt-get update --fix-missing 11 | RUN apt install -y software-properties-common wget curl gpg gcc git make 12 | 13 | # Install miniconda to /miniconda 14 | RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh 15 | RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b 16 | RUN rm Miniconda3-latest-Linux-x86_64.sh 17 | ENV PATH=/miniconda/bin:${PATH} 18 | RUN conda update -y conda 19 | 20 | # Additional dev packages 21 | RUN apt install -y --no-install-recommends libssl-dev libmodule-install-perl libboost-all-dev libgl1-mesa-dev libopenblas-dev 22 | 23 | # Install torch prerequisites 24 | RUN conda install python=3.8 25 | RUN conda install pip 26 | RUN conda install cudatoolkit=11.1 -c conda-forge 27 | RUN pip install open3d 28 | RUN python -c "import open3d as o3d" 29 | RUN pip install https://github.com/isl-org/open3d_downloads/releases/download/torch1.7.1/torch-1.7.1-cp38-cp38-linux_x86_64.whl 30 | RUN python -c "import torch; print(torch.__version__)" 31 | RUN conda install tensorboard matplotlib 32 | ENV TORCH_CUDA_ARCH_LIST="Ampere" 33 | RUN pip install -U git+https://github.com/NVIDIA/MinkowskiEngine --install-option="--force_cuda" --install-option="--blas=openblas" -v --no-deps 34 | RUN pip install pyntcloud 35 | 36 | ENV OPEN3D_ML_ROOT /project 37 | WORKDIR /project 38 | -------------------------------------------------------------------------------- /docker/Dockerfile.cpu: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | SHELL ["/bin/bash", "-c"] 3 | 4 | # Set the timezone info because otherwise tzinfo blocks install 5 | # flow and ignores the non-interactive frontend command 🤬🤬🤬 6 | RUN ln -snf /usr/share/zoneinfo/America/New_York /etc/localtime && echo "/usr/share/zoneinfo/America/New_York" > /etc/timezone 7 | 8 | # Core system packages 9 | RUN apt-get update --fix-missing 10 | RUN apt install -y software-properties-common wget curl gpg gcc git make 11 | 12 | # Install miniconda to /miniconda 13 | RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh 14 | RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b 15 | RUN rm Miniconda3-latest-Linux-x86_64.sh 16 | ENV PATH=/miniconda/bin:${PATH} 17 | RUN conda update -y conda 18 | 19 | # Additional dev packages 20 | RUN apt install -y --no-install-recommends build-essential python3-dev libssl-dev libmodule-install-perl libboost-all-dev libgl1-mesa-dev libopenblas-dev 21 | 22 | RUN conda install python=3.8 pip tensorboard 23 | RUN pip install https://github.com/isl-org/open3d_downloads/releases/download/torch1.7.1/torch-1.7.1-cp38-cp38-linux_x86_64.whl 24 | RUN pip install open3d pyntcloud 25 | RUN pip install -U git+https://github.com/NVIDIA/MinkowskiEngine --install-option="--cpu_only" --install-option="--blas=openblas" -v --no-deps 26 | 27 | ENV OPEN3D_ML_ROOT /project 28 | WORKDIR /project 29 | -------------------------------------------------------------------------------- /docker/Dockerfile.jetson: -------------------------------------------------------------------------------- 1 | FROM nvcr.io/nvidia/l4t-pytorch:r32.6.1-pth1.9-py3 2 | SHELL ["/bin/bash", "-c"] 3 | RUN git clone --recursive https://github.com/intel-isl/Open3D 4 | WORKDIR Open3D 5 | RUN apt update 6 | RUN apt install apt-utils 7 | RUN SUDO=" " util/install_deps_ubuntu.sh assume-yes 8 | 9 | RUN apt update --fix-missing 10 | RUN apt install -y software-properties-common wget curl gpg gcc git make libssl-dev 11 | 12 | # Install miniconda to /miniconda 13 | #RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-aarch64.sh 14 | #RUN bash Miniconda3-latest-Linux-aarch64.sh -p /miniconda -b 15 | #RUN rm Miniconda3-latest-Linux-aarch64.sh 16 | #ENV PATH=/miniconda/bin:${PATH} 17 | #RUN conda update -y conda 18 | 19 | #RUN conda install cmake 20 | 21 | # Install CMake 22 | RUN apt purge -y --auto-remove cmake 23 | RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null 24 | RUN apt-add-repository 'deb https://apt.kitware.com/ubuntu/ bionic main' 25 | RUN lsb_release -a 26 | RUN apt update 27 | RUN apt install -y cmake locate 28 | 29 | RUN mkdir build 30 | WORKDIR build 31 | RUN ls 32 | RUN ln -s /usr/local/cuda/targets/aarch64-linux/lib /usr/local/cuda/targets/aarch64-linux/lib64 33 | #RUN ls /usr/local/cuda/targets/aarch64-linux/lib64 34 | #RUN locate libcudart.so 35 | RUN which nvcc 36 | RUN nvcc --version 37 | RUN cmake\ 38 | -DBUILD_GUI=OFF\ 39 | -DBUILD_UNIT_TESTS=OFF\ 40 | =DCMAKE_BUILD_TYPE=Release\ 41 | .. 42 | # -DBUILD_CUDA_MODULE=OFF\ 43 | # -DBUILD_TENSORFLOW_OPS=OFF\ 44 | # -DBUILD_PYTORCH_OPS=ON\ 45 | 46 | RUN make -j`nproc` 47 | RUN make install 48 | RUN make install-pip-package 49 | ENV LD_LIBRARY_PATH /usr/local/cuda/lib64 ${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} 50 | #RUN python3 -c "import torch; print(torch.__version__)" 51 | #RUN python3 -c "import open3d; print(open3d.__version__)" 52 | 53 | ENV TORCH_CUDA_ARCH_LIST="Turing" 54 | #RUN cat ~/.bashrc 55 | RUN pip3 install -U git+https://github.com/NVIDIA/MinkowskiEngine --install-option="--force_cuda" --install-option="--blas=openblas" -v --no-deps 56 | 57 | -------------------------------------------------------------------------------- /docs/images/getting_started_ml_visualizer.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/docs/images/getting_started_ml_visualizer.gif -------------------------------------------------------------------------------- /docs/images/visualizer_BoundingBoxes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/docs/images/visualizer_BoundingBoxes.png -------------------------------------------------------------------------------- /docs/images/visualizer_custom_lut.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/docs/images/visualizer_custom_lut.png -------------------------------------------------------------------------------- /docs/images/visualizer_int_attr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/docs/images/visualizer_int_attr.png -------------------------------------------------------------------------------- /docs/images/visualizer_predictions.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/docs/images/visualizer_predictions.gif -------------------------------------------------------------------------------- /docs/images/visualizer_random_color_attr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/docs/images/visualizer_random_color_attr.png -------------------------------------------------------------------------------- /docs/tutorial/notebook/index.rst: -------------------------------------------------------------------------------- 1 | .. _jupyter_notebooks: 2 | 3 | Jupyter Notebooks 4 | =================================================================== 5 | 6 | The following tutorials will demonstrate how you can create Jupyter Notebooks that can train a semantic segmentation usign TensorFlow and PyTorch. 7 | 8 | .. toctree:: 9 | 10 | train_ss_model_using_pytorch 11 | train_ss_model_using_tensorflow 12 | add_own_dataset.rst 13 | 14 | -------------------------------------------------------------------------------- /examples/demo_data/labels/000700.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/examples/demo_data/labels/000700.npy -------------------------------------------------------------------------------- /examples/demo_data/labels/000750.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/examples/demo_data/labels/000750.npy -------------------------------------------------------------------------------- /examples/demo_data/points/000700.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/examples/demo_data/points/000700.npy -------------------------------------------------------------------------------- /examples/demo_data/points/000750.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/examples/demo_data/points/000750.npy -------------------------------------------------------------------------------- /ml3d/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [] 2 | -------------------------------------------------------------------------------- /ml3d/configs/__init__.py: -------------------------------------------------------------------------------- 1 | """Config files for ml3d.""" 2 | -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/kpconv.yml: -------------------------------------------------------------------------------- 1 | name: KPFCNN 2 | ckpt_path: # path/to/your/checkpoint 3 | KP_extent: 1.2 4 | KP_influence: linear 5 | aggregation_mode: sum 6 | architecture: 7 | - simple 8 | - resnetb 9 | - resnetb_strided 10 | - resnetb 11 | - resnetb 12 | - resnetb_strided 13 | - resnetb 14 | - resnetb 15 | - resnetb_strided 16 | - resnetb 17 | - resnetb 18 | - resnetb_strided 19 | - resnetb 20 | - nearest_upsample 21 | - unary 22 | - nearest_upsample 23 | - unary 24 | - nearest_upsample 25 | - unary 26 | - nearest_upsample 27 | - unary 28 | augment_color: 0.8 29 | augment_noise: 0.001 30 | augment_rotation: vertical 31 | augment_scale_anisotropic: true 32 | augment_scale_max: 1.2 33 | augment_scale_min: 0.8 34 | augment_symmetries: 35 | - true 36 | - false 37 | - false 38 | batch_limit: 50000 39 | batch_norm_momentum: 0.02 40 | batch_num: 30 41 | batcher: ConcatBatcher 42 | conv_radius: 2.5 43 | deform_fitting_mode: point2point 44 | deform_fitting_power: 1.0 45 | deform_radius: 6.0 46 | density_parameter: 5.0 47 | first_features_dim: 128 48 | first_subsampling_dl: 0.06 49 | fixed_kernel_points: center 50 | ignored_label_inds: [] 51 | in_features_dim: 2 # with colors : 5, without colors : 2 52 | in_points_dim: 3 53 | in_radius: 4.0 54 | lbl_values: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] 55 | max_in_points: 20000 56 | modulated: false 57 | num_classes: 19 # number of valid classes. 58 | num_kernel_points: 15 59 | num_layers: 5 60 | repulse_extent: 1.2 61 | use_batch_norm: true 62 | val_batch_num: 30 63 | -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/parislille3d.yml: -------------------------------------------------------------------------------- 1 | name: ParisLille3D 2 | dataset_path: # path/to/your/dataset 3 | cache_dir: ./logs/cache 4 | class_weights: [65075320, 33014819, 656096, 61715, 296523, 4052947, 172132,4212295, 10599237] 5 | ignored_label_inds: 6 | - 0 7 | num_points: 65536 8 | test_result_folder: ./test 9 | val_files: 10 | - Lille2.ply 11 | use_cache: False 12 | -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/randlanet.yml: -------------------------------------------------------------------------------- 1 | name: RandLANet 2 | batcher: DefaultBatcher 3 | ckpt_path: # path/to/your/checkpoint 4 | dim_feature: 8 5 | dim_input: 3 # 3 + feature dimension. 6 | dim_output: 7 | - 16 8 | - 64 9 | - 128 10 | - 256 11 | grid_size: 0.06 12 | ignored_label_inds: [] 13 | k_n: 16 14 | num_classes: 19 15 | num_layers: 4 16 | num_points: 45056 17 | sub_sampling_ratio: 18 | - 4 19 | - 4 20 | - 4 21 | - 4 -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/s3dis.yml: -------------------------------------------------------------------------------- 1 | name: S3DIS 2 | dataset_path: # path/to/your/dataset 3 | cache_dir: ./logs/cache 4 | class_weights: [3370714, 2856755, 4919229, 318158, 375640, 5 | 478001, 974733, 650464, 791496, 88727, 1284130, 229758, 2272837] 6 | dataset_path: ../dataset/S3DIS/ 7 | ignored_label_inds: [] 8 | num_points: 40960 9 | test_area_idx: 3 10 | test_result_folder: ./test 11 | use_cache: False 12 | -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/semantic3d.yml: -------------------------------------------------------------------------------- 1 | name: Semantic3D 2 | dataset_path: # path/to/your/dataset 3 | cache_dir: .logs/cache/ 4 | class_weights: [5181602, 5012952, 6830086, 1311528, 10476365, 946982, 334860, 269353, 269353] 5 | ignored_label_inds: 6 | - 0 7 | num_points: 65536 8 | test_result_folder: ./test 9 | use_cache: true 10 | val_files: 11 | - bildstein_station3_xyz_intensity_rgb 12 | - sg27_station2_intensity_rgb 13 | -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/semantic_segmentation.yml: -------------------------------------------------------------------------------- 1 | adam_lr: 0.01 2 | batch_size: 2 3 | val_batch_size: 1 4 | test_batch_size: 1 5 | max_epoch: 100 6 | learning_rate: 0.01 7 | lr_decays: 0.95 8 | deform_lr_factor: 0.1 9 | save_ckpt_freq: 5 10 | adam_lr: 1e-2 11 | scheduler_gamma: 0.95 12 | momentum: 0.98 13 | main_log_dir: ./logs 14 | train_sum_dir: train_log 15 | device: gpu 16 | -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/semantickitti.yml: -------------------------------------------------------------------------------- 1 | name: SemanticKITTI 2 | dataset_path: # path/to/your/dataset 3 | cache_dir: ./logs/cache 4 | use_cache: false 5 | class_weights: [55437630, 320797, 541736, 2578735, 3274484, 552662, 6 | 184064, 78858, 240942562, 17294618, 170599734, 6369672, 230413074, 101130274, 7 | 476491114, 9833174, 129609852, 4506626, 1168181] 8 | test_result_folder: ./test 9 | test_split: ['11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21'] 10 | training_split: ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10'] 11 | validation_split: ['08'] 12 | all_split: ['00', '01', '02', '03', '04', '05', '06', '07', '09', 13 | '08', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21'] 14 | -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/shapenet.yml: -------------------------------------------------------------------------------- 1 | name: ShapeNet 2 | dataset_path: # path/to/your/dataset 3 | cache_dir: .logs/cache/ 4 | class_weights: [2690, 76, 55, 1824, 3746, 69, 787, 392, 1546, 445, 202, 184, 275, 66, 152, 5266] 5 | ignored_label_inds: [] 6 | num_points: 17775 7 | test_result_folder: ./test 8 | use_cache: false 9 | -------------------------------------------------------------------------------- /ml3d/configs/default_cfgs/toronto3d.yml: -------------------------------------------------------------------------------- 1 | name: Toronto3D 2 | dataset_path: # path/to/your/dataset 3 | cache_dir: ./logs/cache 4 | class_weights: [41697357, 1745448, 6572572, 19136493, 674897, 897825, 4634634, 374721] 5 | ignored_label_inds: 6 | - 0 7 | num_points: 65536 8 | test_files: 9 | - L002.ply 10 | test_result_folder: ./test 11 | train_files: 12 | - L001.ply 13 | - L003.ply 14 | - L004.ply 15 | val_files: 16 | - L002.ply 17 | use_cache: False 18 | -------------------------------------------------------------------------------- /ml3d/configs/kpconv_parislille3d.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: ParisLille3D 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | class_weights: [65075320, 33014819, 656096, 61715, 296523, 4052947, 172132,4212295, 10599237] 6 | ignored_label_inds: 7 | - 0 8 | test_result_folder: ./test 9 | val_files: 10 | - Lille2.ply 11 | use_cache: True 12 | steps_per_epoch_train: 40 13 | steps_per_epoch_valid: 10 14 | sampler: 15 | name: SemSegSpatiallyRegularSampler 16 | model: 17 | name: KPFCNN 18 | ckpt_path: # path/to/your/checkpoint 19 | KP_extent: 1.0 20 | KP_influence: linear 21 | aggregation_mode: sum 22 | architecture: [ 23 | 'simple', 24 | 'resnetb', 25 | 'resnetb_strided', 26 | 'resnetb', 27 | 'resnetb_strided', 28 | 'resnetb_deformable', 29 | 'resnetb_deformable_strided', 30 | 'resnetb_deformable', 31 | 'resnetb_deformable_strided', 32 | 'resnetb_deformable', 33 | 'nearest_upsample', 34 | 'unary', 35 | 'nearest_upsample', 36 | 'unary', 37 | 'nearest_upsample', 38 | 'unary', 39 | 'nearest_upsample', 40 | 'unary'] 41 | reduce_fc: true 42 | augment_color: 0.8 43 | augment_noise: 0.01 44 | augment_rotation: vertical 45 | augment_scale_anisotropic: true 46 | augment_scale_max: 1.1 47 | augment_scale_min: 0.9 48 | augment_symmetries: 49 | - true 50 | - false 51 | - false 52 | batch_limit: 20000 53 | grad_clip_norm: 100.0 54 | batch_norm_momentum: 0.98 55 | batcher: ConcatBatcher 56 | conv_radius: 2.5 57 | deform_fitting_mode: point2point 58 | deform_fitting_power: 1.0 59 | deform_radius: 6.0 60 | density_parameter: 5.0 61 | first_features_dim: 128 62 | first_subsampling_dl: 0.08 63 | fixed_kernel_points: center 64 | ignored_label_inds: [0] 65 | in_features_dim: 1 66 | in_points_dim: 3 67 | l_relu: 0.2 68 | in_radius: 4.0 69 | lbl_values: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 70 | min_in_points: 10000 71 | max_in_points: 17000 72 | modulated: false 73 | num_classes: 9 74 | num_kernel_points: 15 75 | num_layers: 5 76 | repulse_extent: 1.2 77 | use_batch_norm: true 78 | pipeline: 79 | name: SemanticSegmentation 80 | adam_lr: 0.01 81 | momentum: 0.98 82 | batch_size: 1 83 | learning_rate: 0.01 84 | lr_decays: 0.98477 85 | deform_lr_factor: 0.1 86 | main_log_dir: ./logs 87 | max_epoch: 100 88 | save_ckpt_freq: 5 89 | scheduler_gamma: 0.95 90 | test_batch_size: 1 91 | train_sum_dir: train_log 92 | val_batch_size: 1 93 | weight_decay: 0.001 94 | -------------------------------------------------------------------------------- /ml3d/configs/kpconv_s3dis.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: S3DIS 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | class_weights: [3370714, 2856755, 4919229, 318158, 375640, 6 | 478001, 974733, 650464, 791496, 88727, 1284130, 229758, 2272837] 7 | ignored_label_inds: [] 8 | num_points: 40960 9 | test_area_idx: 3 10 | test_result_folder: ./test 11 | use_cache: false 12 | model: 13 | name: KPFCNN 14 | ckpt_path: # path/to/your/checkpoint 15 | KP_extent: 1.2 16 | KP_influence: linear 17 | aggregation_mode: sum 18 | architecture: 19 | - simple 20 | - resnetb 21 | - resnetb_strided 22 | - resnetb 23 | - resnetb 24 | - resnetb_strided 25 | - resnetb 26 | - resnetb 27 | - resnetb_strided 28 | - resnetb 29 | - resnetb 30 | - resnetb_strided 31 | - resnetb 32 | - nearest_upsample 33 | - unary 34 | - nearest_upsample 35 | - unary 36 | - nearest_upsample 37 | - unary 38 | - nearest_upsample 39 | - unary 40 | augment_color: 1.0 41 | augment_noise: 0.001 42 | augment_rotation: vertical 43 | augment_scale_anisotropic: true 44 | augment_scale_max: 1.1 45 | augment_scale_min: 0.9 46 | augment_symmetries: 47 | - true 48 | - false 49 | - false 50 | batch_limit: 20000 51 | batch_norm_momentum: 0.98 52 | batcher: ConcatBatcher 53 | conv_radius: 2.5 54 | deform_fitting_mode: point2point 55 | deform_fitting_power: 1.0 56 | deform_radius: 6.0 57 | density_parameter: 5.0 58 | first_features_dim: 128 59 | first_subsampling_dl: 0.04 60 | fixed_kernel_points: center 61 | ignored_label_inds: [] 62 | in_features_dim: 5 63 | in_points_dim: 3 64 | in_radius: 1.5 65 | lbl_values: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] 66 | max_in_points: 20000 67 | modulated: false 68 | num_classes: 13 69 | num_kernel_points: 15 70 | num_layers: 5 71 | repulse_extent: 1.2 72 | use_batch_norm: true 73 | t_normalize: 74 | method: linear 75 | normalize_points: False 76 | feat_bias: 0 77 | feat_scale: 255 78 | pipeline: 79 | name: SemanticSegmentation 80 | adam_lr: 0.01 81 | batch_size: 4 82 | learning_rate: 0.01 83 | lr_decays: 0.98477 84 | deform_lr_factor: 0.1 85 | main_log_dir: ./logs 86 | max_epoch: 800 87 | momentum: 0.98 88 | save_ckpt_freq: 10 89 | scheduler_gamma: 0.98477 90 | test_batch_size: 4 91 | train_sum_dir: train_log 92 | val_batch_size: 4 93 | weight_decay: 0.001 94 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: PointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 800 85 | main_log_dir: ./logs 86 | train_sum_dir: train_log 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_dense_01.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: PointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 800 85 | main_log_dir: ./logs_dense_01 86 | train_sum_dir: train_log_dense_01 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_dense_01_wd_4.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: PointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_dense_01_wd_4 86 | train_sum_dir: train_log_dense_01_wd_4 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_dense_01_wd_4_7500.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 7500 6 | 7 | model: 8 | name: PointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_dense_01_wd_4_7500 86 | train_sum_dir: train_log_dense_01_wd_4_7500 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_dense_01_wd_4_mean_box.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: PointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -19.0, -2, 25, 19, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [768, 512] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -19.0, 0, 25, 19, 0] 61 | ] 62 | sizes: [[0.960005934232471, 0.6668719666706728, 0.8838318530669755]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 20 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_dense_01_wd_4_mean_box 86 | train_sum_dir: train_log_dense_01_wd_4_mean_box 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_dense_5000_mean_box.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: PointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -19.0, -2, 25, 19, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [768, 512] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -19.0, 0, 25, 19, 0] 61 | ] 62 | sizes: [[0.960005934232471, 0.6668719666706728, 0.8838318530669755]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 20 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: false 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 200 85 | main_log_dir: ./logs_dense_5000_mean_box 86 | train_sum_dir: train_log_dense_5000_mean_box 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 800 85 | main_log_dir: ./logs 86 | train_sum_dir: train_log 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_001_adam_defaults_0.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_001_adam_defaults_0 86 | train_sum_dir: train_log_sparse_001_adam_defaults_0 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.00001 91 | betas: [0.99, 0.999] 92 | weight_decay: 0.0 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_001_wd_4.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_001_wd_4 86 | train_sum_dir: train_log_sparse_001_wd_4 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.00001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01 86 | train_sum_dir: train_log_sparse_01 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_adam_defaults_0.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_adam_defaults_0 86 | train_sum_dir: train_log_sparse_01_adam_defaults_0 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.99, 0.999] 92 | weight_decay: 0.0 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_0.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_0 86 | train_sum_dir: train_log_sparse_01_wd_0 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_005.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_005 86 | train_sum_dir: train_log_sparse_01_005 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.05 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_01.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_01 86 | train_sum_dir: train_log_sparse_01_wd_01 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.1 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_16.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_16 86 | train_sum_dir: train_log_sparse_01_wd_16 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.000625 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_2.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_2 86 | train_sum_dir: train_log_sparse_01_wd_2 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.005 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_32.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_32 86 | train_sum_dir: train_log_sparse_01_wd_32 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0003125 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_4 86 | train_sum_dir: train_log_sparse_01_wd_4 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4_5000_2x2.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_4_2x2 86 | train_sum_dir: train_log_sparse_01_wd_4_2x2 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4_5000_2x2_mean_box.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -19.0, -2, 25, 19, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [768, 512] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -19.0, 0, 25, 19, 0] 61 | ] 62 | sizes: [[0.960005934232471, 0.6668719666706728, 0.8838318530669755]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 20 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_4_mean_box 86 | train_sum_dir: train_log_sparse_01_wd_4_mean_box 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4_7500.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 7500 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_4_7500 86 | train_sum_dir: train_log_sparse_01_wd_4_7500 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4_7500_2x2.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 7500 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_4_7500_2x2 86 | train_sum_dir: train_log_sparse_01_wd_4_7500_2x2 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0025 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_8.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 300 85 | main_log_dir: ./logs_sparse_01_wd_8 86 | train_sum_dir: train_log_sparse_01_wd_8 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.00125 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_05.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 800 85 | main_log_dir: ./logs_sparse_05 86 | train_sum_dir: train_log 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.0005 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_1.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 800 85 | main_log_dir: ./logs_sparse_1 86 | train_sum_dir: train_log 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_10.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 800 85 | main_log_dir: ./logs_sparse_10 86 | train_sum_dir: train_log_sparse_10 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.01 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_3.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 800 85 | main_log_dir: ./logs_sparse_3 86 | train_sum_dir: train_log_sparse_3 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.003 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_5.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -10.0, -2, 10, 10, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [496, 432] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -10.0, 0, 10, 10, 0] 61 | ] 62 | sizes: [[0.8, 0.8, 0.95]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 10 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: true 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 800 85 | main_log_dir: ./logs_sparse_5 86 | train_sum_dir: train_log_sparse_5 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.005 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_5000_2x2_mean_box.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -19.0, -2, 25, 19, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [768, 512] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -19.0, 0, 25, 19, 0] 61 | ] 62 | sizes: [[0.960005934232471, 0.6668719666706728, 0.8838318530669755]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 20 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: false 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 250 85 | main_log_dir: ./logs_sparse_5000_2x2_mean_box 86 | train_sum_dir: train_log_sparse_5000_2x2_mean_box 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.01 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/pointpillars_habitat_sampling_sparse_5000_2x2_mean_box_wd_100.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: HabitatSampling 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | steps_per_epoch_train: 5000 6 | 7 | model: 8 | name: SparsePointPillars 9 | ckpt_path: # path/to/your/checkpoint 10 | 11 | batcher: "ignore" 12 | 13 | point_cloud_range: [0, -19.0, -2, 25, 19, 2] 14 | classes: ['chair'] 15 | 16 | loss: 17 | focal: 18 | gamma: 2.0 19 | alpha: 0.25 20 | loss_weight: 1.0 21 | smooth_l1: 22 | beta: 0.11 23 | loss_weight: 2.0 24 | cross_entropy: 25 | loss_weight: 0.2 26 | 27 | voxelize: 28 | max_num_points: 32 29 | voxel_size: &vsize 30 | [0.05, 0.05, 4] 31 | max_voxels: [16000, 40000] 32 | 33 | voxel_encoder: 34 | in_channels: 3 35 | feat_channels: [64] 36 | voxel_size: *vsize 37 | 38 | scatter: 39 | in_channels: 64 40 | output_shape: [768, 512] 41 | 42 | backbone: 43 | in_channels: 64 44 | out_channels: [64, 128, 256] 45 | layer_nums: [3, 5, 5] 46 | layer_strides: [2, 2, 2] 47 | 48 | neck: 49 | in_channels: [64, 128, 256] 50 | out_channels: [128, 128, 128] 51 | upsample_strides: [1, 2, 4] 52 | use_conv_for_no_stride: false 53 | 54 | head: 55 | in_channels: 384 56 | feat_channels: 384 57 | nms_pre: 100 58 | score_thr: 0.1 59 | ranges: [ 60 | [0, -19.0, 0, 25, 19, 0] 61 | ] 62 | sizes: [[0.960005934232471, 0.6668719666706728, 0.8838318530669755]] 63 | rotations: [0, 1.57] 64 | iou_thr: [[0.35, 0.5]] 65 | 66 | augment: 67 | PointShuffle: True 68 | ObjectRangeFilter: True 69 | ObjectSample: 70 | min_points_dict: 71 | chair: 20 72 | sample_dict: 73 | chair: 10 74 | 75 | 76 | pipeline: 77 | name: ObjectDetection 78 | test_compute_metric: false 79 | batch_size: 6 80 | val_batch_size: 1 81 | test_batch_size: 1 82 | save_ckpt_freq: 2 83 | validation_freq: 5 84 | max_epoch: 200 85 | main_log_dir: ./logs_sparse_5000_2x2_mean_box_wd_100 86 | train_sum_dir: train_log_sparse_5000_2x2_mean_box_wd_100 87 | grad_clip_norm: 2 88 | 89 | optimizer: 90 | lr: 0.001 91 | betas: [0.95, 0.99] 92 | weight_decay: 0.0001 93 | 94 | # evaluation properties 95 | overlaps: [0.5] 96 | similar_classes: {} 97 | difficulties: [0, 1, 2] 98 | 99 | -------------------------------------------------------------------------------- /ml3d/configs/randlanet_parislille3d.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: ParisLille3D 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | class_weights: [53033221, 25821560, 546190, 54417, 6 | 180638, 3998129, 160899, 3441844, 9681606] 7 | ignored_label_inds: 8 | - 0 9 | num_points: 65536 10 | test_result_folder: ./test 11 | val_files: 12 | - Lille2.ply 13 | use_cache: true 14 | steps_per_epoch_train: 50 15 | steps_per_epoch_valid: 10 16 | sampler: 17 | name: SemSegSpatiallyRegularSampler 18 | model: 19 | name: RandLANet 20 | batcher: DefaultBatcher 21 | ckpt_path: # path/to/your/checkpoint 22 | dim_feature: 8 23 | dim_input: 3 24 | dim_output: 25 | - 16 26 | - 64 27 | - 128 28 | - 256 29 | - 512 30 | grid_size: 0.08 31 | ignored_label_inds: 32 | - 0 33 | k_n: 16 34 | num_classes: 9 35 | num_layers: 5 36 | num_points: 65536 37 | sub_sampling_ratio: 38 | - 4 39 | - 4 40 | - 4 41 | - 4 42 | - 2 43 | weight_decay: 0.001 44 | t_align: true 45 | t_normalize: 46 | recentering: [0, 1] 47 | t_augment: 48 | turn_on: false 49 | rotation_method: vertical 50 | scale_anisotropic: false 51 | symmetries: true 52 | noise_level: 0.01 53 | min_s: 0.9 54 | max_s: 1.1 55 | pipeline: 56 | name: SemanticSegmentation 57 | adam_lr: 0.004 58 | batch_size: 2 59 | main_log_dir: ./logs 60 | max_epoch: 200 61 | save_ckpt_freq: 5 62 | scheduler_gamma: 0.9886 63 | test_batch_size: 2 64 | train_sum_dir: train_log 65 | val_batch_size: 2 66 | -------------------------------------------------------------------------------- /ml3d/configs/randlanet_s3dis.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: S3DIS 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | class_weights: [3370714, 2856755, 4919229, 318158, 375640, 6 | 478001, 974733, 650464, 791496, 88727, 1284130, 229758, 2272837] 7 | ignored_label_inds: [] 8 | num_points: 40960 9 | test_area_idx: 3 10 | test_result_folder: ./test 11 | use_cache: False 12 | model: 13 | name: RandLANet 14 | batcher: DefaultBatcher 15 | ckpt_path: # path/to/your/checkpoint 16 | dim_feature: 8 17 | dim_input: 6 18 | dim_output: 19 | - 16 20 | - 64 21 | - 128 22 | - 256 23 | - 512 24 | grid_size: 0.04 25 | ignored_label_inds: [] 26 | k_n: 16 27 | num_classes: 13 28 | num_layers: 5 29 | num_points: 40960 30 | sub_sampling_ratio: 31 | - 4 32 | - 4 33 | - 4 34 | - 4 35 | - 2 36 | t_normalize: 37 | method: linear 38 | normalize_points: False 39 | feat_bias: 0 40 | feat_scale: 1 41 | pipeline: 42 | name: SemanticSegmentation 43 | adam_lr: 0.01 44 | batch_size: 2 45 | learning_rate: 0.01 46 | main_log_dir: ./logs 47 | max_epoch: 100 48 | save_ckpt_freq: 20 49 | scheduler_gamma: 0.95 50 | test_batch_size: 3 51 | train_sum_dir: train_log 52 | val_batch_size: 2 53 | -------------------------------------------------------------------------------- /ml3d/configs/randlanet_semantic3d.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: Semantic3D 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache_small3d/ 5 | class_weights: [5181602, 5012952, 6830086, 1311528, 10476365, 946982, 334860, 269353] 6 | ignored_label_inds: 7 | - 0 8 | num_points: 65536 9 | test_result_folder: ./test 10 | use_cache: true 11 | val_files: 12 | - bildstein_station1_xyz_intensity_rgb 13 | - domfountain_station1_xyz_intensity_rgb 14 | steps_per_epoch_train: 500 15 | steps_per_epoch_valid: 10 16 | model: 17 | name: RandLANet 18 | batcher: DefaultBatcher 19 | ckpt_path: # path/to/your/checkpoint 20 | dim_feature: 8 21 | dim_input: 6 22 | dim_output: 23 | - 16 24 | - 64 25 | - 128 26 | - 256 27 | - 512 28 | grid_size: 0.06 29 | ignored_label_inds: 30 | - 0 31 | k_n: 16 32 | num_classes: 8 33 | num_layers: 5 34 | num_points: 65536 35 | sub_sampling_ratio: 36 | - 4 37 | - 4 38 | - 4 39 | - 4 40 | - 2 41 | t_normalize: 42 | method: linear 43 | feat_bias: 0 44 | feat_scale: 255 45 | t_augment: 46 | turn_on: true 47 | rotation_method: vertical 48 | scale_anisotropic: true 49 | symmetries: [true, false, false] 50 | noise_level: 0.001 51 | min_s: 0.8 52 | max_s: 1.2 53 | pipeline: 54 | name: SemanticSegmentation 55 | adam_lr: 0.01 56 | batch_size: 2 57 | learning_rate: 0.01 58 | main_log_dir: ./logs 59 | max_epoch: 100 60 | save_ckpt_freq: 5 61 | scheduler_gamma: 0.95 62 | test_batch_size: 1 63 | train_sum_dir: train_log 64 | val_batch_size: 1 65 | test_compute_metric: false 66 | -------------------------------------------------------------------------------- /ml3d/configs/randlanet_semantickitti.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: SemanticKITTI 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | class_weights: [55437630, 320797, 541736, 2578735, 3274484, 552662, 184064, 6 | 78858, 240942562, 17294618, 170599734, 6369672, 230413074, 101130274, 7 | 476491114, 9833174, 129609852, 4506626, 1168181] 8 | test_result_folder: ./test 9 | test_split: ['11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21'] 10 | training_split: ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10'] 11 | all_split: ['00', '01', '02', '03', '04', '05', '06', '07', '09', 12 | '08', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21'] 13 | validation_split: ['08'] 14 | use_cache: true 15 | sampler: 16 | name: 'SemSegRandomSampler' 17 | model: 18 | name: RandLANet 19 | batcher: DefaultBatcher 20 | ckpt_path: # path/to/your/checkpoint 21 | dim_feature: 8 22 | dim_input: 3 23 | dim_output: 24 | - 16 25 | - 64 26 | - 128 27 | - 256 28 | grid_size: 0.06 29 | ignored_label_inds: 30 | - 0 31 | k_n: 16 32 | num_classes: 19 33 | num_layers: 4 34 | num_points: 45056 35 | sub_sampling_ratio: 36 | - 4 37 | - 4 38 | - 4 39 | - 4 40 | t_normalize: 41 | recentering: [] 42 | pipeline: 43 | name: SemanticSegmentation 44 | adam_lr: 0.01 45 | batch_size: 1 46 | learning_rate: 0.01 47 | main_log_dir: ./logs 48 | max_epoch: 100 49 | save_ckpt_freq: 2 50 | scheduler_gamma: 0.95 51 | test_batch_size: 1 52 | train_sum_dir: train_log 53 | val_batch_size: 1 54 | -------------------------------------------------------------------------------- /ml3d/configs/randlanet_toronto3d.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: Toronto3D 3 | cache_dir: ./logs/cache 4 | dataset_path: # path/to/your/dataset 5 | class_weights: [41697357, 1745448, 6572572, 19136493, 674897, 897825, 4634634, 374721] 6 | ignored_label_inds: 7 | - 0 8 | num_classes: 8 9 | num_points: 65536 10 | test_files: 11 | - L002.ply 12 | test_result_folder: ./test 13 | train_files: 14 | - L001.ply 15 | - L003.ply 16 | - L004.ply 17 | use_cache: true 18 | val_files: 19 | - L002.ply 20 | steps_per_epoch_train: 100 21 | steps_per_epoch_valid: 10 22 | model: 23 | name: RandLANet 24 | batcher: DefaultBatcher 25 | ckpt_path: # path/to/your/checkpoint 26 | dim_feature: 8 27 | dim_input: 6 28 | dim_output: 29 | - 16 30 | - 64 31 | - 128 32 | - 256 33 | - 512 34 | grid_size: 0.06 35 | ignored_label_inds: 36 | - 0 37 | k_n: 16 38 | num_classes: 8 39 | num_layers: 5 40 | num_points: 65536 41 | sub_sampling_ratio: 42 | - 4 43 | - 4 44 | - 4 45 | - 4 46 | - 2 47 | weight_decay: 0 48 | t_augment: 49 | turn_on: false 50 | rotation_method: vertical 51 | scale_anisotropic: false 52 | symmetries: true 53 | noise_level: 0.001 54 | min_s: 0.9 55 | max_s: 1.1 56 | t_normalize: 57 | method: linear 58 | normalize_points: False 59 | feat_bias: 0 60 | feat_scale: 1 61 | pipeline: 62 | name: SemanticSegmentation 63 | adam_lr: 0.01 64 | batch_size: 2 65 | learning_rate: 0.01 66 | main_log_dir: ./logs 67 | max_epoch: 200 68 | save_ckpt_freq: 5 69 | scheduler_gamma: 0.9886 70 | test_batch_size: 1 71 | train_sum_dir: train_log 72 | val_batch_size: 2 73 | test_continue: false 74 | test_compute_metric: true 75 | -------------------------------------------------------------------------------- /ml3d/configs/sparseconvunet_scannet.yml: -------------------------------------------------------------------------------- 1 | dataset: 2 | name: Scannet 3 | dataset_path: # path/to/your/dataset 4 | cache_dir: ./logs/cache 5 | class_weights: [52539651, 40780759, 6961796, 4665739, 13155460, 4172698, 6368886, 7869501, 6780748, 4418153, 883498, 883847, 3330654, 3637628, 939700, 593984, 547679, 460448, 567893, 6490881] 6 | test_result_folder: ./test 7 | use_cache: False 8 | sampler: 9 | name: 'SemSegRandomSampler' 10 | model: 11 | name: SparseConvUnet 12 | batcher: ConcatBatcher 13 | ckpt_path: # path/to/your/checkpoint 14 | multiplier: 32 15 | voxel_size: 0.02 16 | residual_blocks: True 17 | conv_block_reps: 1 18 | in_channels: 3 19 | num_classes: 20 20 | grid_size: 4096 21 | ignored_label_inds: [-1] 22 | augment: 23 | rotate: 24 | method: vertical 25 | scale: 26 | min_s: 0.9 27 | max_s: 1.1 28 | noise: 29 | noise_std: 0.01 30 | RandomDropout: 31 | dropout_ratio: 0.2 32 | RandomHorizontalFlip: 33 | axes: [0, 1] 34 | ChromaticAutoContrast: 35 | randomize_blend_factor: True 36 | blend_factor: 0.5 37 | ChromaticTranslation: 38 | trans_range_ratio: 0.1 39 | ChromaticJitter: 40 | std: 0.05 41 | pipeline: 42 | name: SemanticSegmentation 43 | adam_lr: 0.001 44 | batch_size: 8 45 | learning_rate: 0.01 46 | main_log_dir: ./logs 47 | max_epoch: 512 48 | save_ckpt_freq: 5 49 | scheduler_gamma: 0.99 50 | test_batch_size: 1 51 | train_sum_dir: train_log 52 | val_batch_size: 8 53 | -------------------------------------------------------------------------------- /ml3d/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | """I/O, attributes, and processing for different datasets.""" 2 | 3 | from .semantickitti import SemanticKITTI 4 | from .s3dis import S3DIS 5 | from .parislille3d import ParisLille3D 6 | from .toronto3d import Toronto3D 7 | from .customdataset import Custom3D 8 | from .semantic3d import Semantic3D 9 | from .inference_dummy import InferenceDummySplit 10 | from .samplers import SemSegRandomSampler, SemSegSpatiallyRegularSampler 11 | from . import utils 12 | 13 | from .kitti import KITTI 14 | from .nuscenes import NuScenes 15 | from .waymo import Waymo 16 | from .lyft import Lyft 17 | from .shapenet import ShapeNet 18 | from .argoverse import Argoverse 19 | from .scannet import Scannet 20 | from .sunrgbd import SunRGBD 21 | from .habitat_sampling import HabitatSampling 22 | 23 | __all__ = [ 24 | 'SemanticKITTI', 'S3DIS', 'Toronto3D', 'ParisLille3D', 'Semantic3D', 25 | 'Custom3D', 'utils', 'KITTI', 'Waymo', 'NuScenes', 'Lyft', 'ShapeNet', 26 | 'SemSegRandomSampler', 'InferenceDummySplit', 27 | 'SemSegSpatiallyRegularSampler', 'Argoverse', 'Scannet', 'SunRGBD', 28 | 'HabitatSampling' 29 | ] 30 | -------------------------------------------------------------------------------- /ml3d/datasets/_resources/download_paris_lille3d.sh: -------------------------------------------------------------------------------- 1 | export BASE_DIR="/home/sanskar_agrawal/data/test/Paris_Lille3D" 2 | 3 | mkdir -p $BASE_DIR 4 | 5 | export url_train="https://cloud.mines-paristech.fr/index.php/s/JhIxgyt0ALgRZ1O/download?path=%2F&files=training_10_classes" 6 | export url_test="https://cloud.mines-paristech.fr/index.php/s/JhIxgyt0ALgRZ1O/download?path=%2F&files=test_10_classes" 7 | 8 | wget -c -N -O $BASE_DIR'/training_10_classes.zip' $url_train 9 | wget -c -N -O $BASE_DIR'/test_10_classes.zip' $url_test 10 | 11 | cd $BASE_DIR 12 | 13 | unzip test_10_classes.zip 14 | unzip train_10_classes.zip 15 | 16 | rm test_10_classes.zip 17 | rm train_10_classes.zip 18 | 19 | -------------------------------------------------------------------------------- /ml3d/datasets/_resources/download_toronto3d.sh: -------------------------------------------------------------------------------- 1 | export BASE_DIR="/home/sanskar_agrawal/data/test/" 2 | export url="https://xx9lca.sn.files.1drv.com/y4mUm9-LiY3vULTW79zlB3xp0wzCPASzteId4wdUZYpzWiw6Jp4IFoIs6ADjLREEk1-IYH8KRGdwFZJrPlIebwytHBYVIidsCwkHhW39aQkh3Vh0OWWMAcLVxYwMTjXwDxHl-CDVDau420OG4iMiTzlsK_RTC_ypo3z-Adf-h0gp2O8j5bOq-2TZd9FD1jPLrkf3759rB-BWDGFskF3AsiB3g" 3 | 4 | mkdir -p $BASE_DIR 5 | 6 | wget -c -N -O $BASE_DIR'/Toronto_3D.zip' $url 7 | 8 | cd $BASE_DIR 9 | 10 | unzip Toronto_3D.zip 11 | 12 | rm Toronto_3D.zip 13 | -------------------------------------------------------------------------------- /ml3d/datasets/_resources/lyft/val.txt: -------------------------------------------------------------------------------- 1 | host-a004-lidar0-1233080749298771736-1233080774198118416 2 | host-a004-lidar0-1232905197298264546-1232905222198133856 3 | host-a011-lidar0-1232732468299489666-1232732493199050666 4 | host-a101-lidar0-1241561147998866622-1241561172899320654 5 | host-a006-lidar0-1237322885198285226-1237322910098576786 6 | host-a004-lidar0-1233963848198981116-1233963873098642176 7 | host-a011-lidar0-1232752543198025666-1232752568099126026 8 | host-a004-lidar0-1232842367198056546-1232842392097783226 9 | host-a004-lidar0-1233615989298293586-1233616014198854636 10 | host-a011-lidar0-1233965426299054906-1233965451199121906 11 | host-a011-lidar0-1236104034298928316-1236104059198988026 12 | host-a007-lidar0-1233946614199227636-1233946639098289666 13 | host-a015-lidar0-1235423696198069636-1235423721098551296 14 | host-a004-lidar0-1233014843199117706-1233014868098023786 15 | host-a011-lidar0-1236093962299300416-1236093987199363346 16 | host-a011-lidar0-1234639296198260986-1234639321099417316 17 | host-a011-lidar0-1233524871199389346-1233524896098591466 18 | host-a011-lidar0-1235933781298838116-1235933806199517736 19 | host-a011-lidar0-1233965312298542226-1233965337198958586 20 | host-a011-lidar0-1233090567199118316-1233090592098933996 21 | host-a007-lidar0-1233621256298511876-1233621281197988026 22 | host-a007-lidar0-1233079617197863906-1233079642098533586 23 | host-a015-lidar0-1236112516098396876-1236112540999028556 24 | host-a008-lidar0-1236016333197799906-1236016358099063636 25 | host-a101-lidar0-1240710366399037786-1240710391298976894 26 | host-a102-lidar0-1242755350298764586-1242755375198787666 27 | host-a101-lidar0-1240877587199107226-1240877612099413030 28 | host-a101-lidar0-1242583745399163026-1242583770298821706 29 | host-a011-lidar0-1232817034199342856-1232817059098800346 30 | host-a004-lidar0-1232905117299287546-1232905142198246226 -------------------------------------------------------------------------------- /ml3d/datasets/_resources/scannet/scannetv2_test.txt: -------------------------------------------------------------------------------- 1 | scene0707_00 2 | scene0708_00 3 | scene0709_00 4 | scene0710_00 5 | scene0711_00 6 | scene0712_00 7 | scene0713_00 8 | scene0714_00 9 | scene0715_00 10 | scene0716_00 11 | scene0717_00 12 | scene0718_00 13 | scene0719_00 14 | scene0720_00 15 | scene0721_00 16 | scene0722_00 17 | scene0723_00 18 | scene0724_00 19 | scene0725_00 20 | scene0726_00 21 | scene0727_00 22 | scene0728_00 23 | scene0729_00 24 | scene0730_00 25 | scene0731_00 26 | scene0732_00 27 | scene0733_00 28 | scene0734_00 29 | scene0735_00 30 | scene0736_00 31 | scene0737_00 32 | scene0738_00 33 | scene0739_00 34 | scene0740_00 35 | scene0741_00 36 | scene0742_00 37 | scene0743_00 38 | scene0744_00 39 | scene0745_00 40 | scene0746_00 41 | scene0747_00 42 | scene0748_00 43 | scene0749_00 44 | scene0750_00 45 | scene0751_00 46 | scene0752_00 47 | scene0753_00 48 | scene0754_00 49 | scene0755_00 50 | scene0756_00 51 | scene0757_00 52 | scene0758_00 53 | scene0759_00 54 | scene0760_00 55 | scene0761_00 56 | scene0762_00 57 | scene0763_00 58 | scene0764_00 59 | scene0765_00 60 | scene0766_00 61 | scene0767_00 62 | scene0768_00 63 | scene0769_00 64 | scene0770_00 65 | scene0771_00 66 | scene0772_00 67 | scene0773_00 68 | scene0774_00 69 | scene0775_00 70 | scene0776_00 71 | scene0777_00 72 | scene0778_00 73 | scene0779_00 74 | scene0780_00 75 | scene0781_00 76 | scene0782_00 77 | scene0783_00 78 | scene0784_00 79 | scene0785_00 80 | scene0786_00 81 | scene0787_00 82 | scene0788_00 83 | scene0789_00 84 | scene0790_00 85 | scene0791_00 86 | scene0792_00 87 | scene0793_00 88 | scene0794_00 89 | scene0795_00 90 | scene0796_00 91 | scene0797_00 92 | scene0798_00 93 | scene0799_00 94 | scene0800_00 95 | scene0801_00 96 | scene0802_00 97 | scene0803_00 98 | scene0804_00 99 | scene0805_00 100 | scene0806_00 101 | -------------------------------------------------------------------------------- /ml3d/datasets/augment/__init__.py: -------------------------------------------------------------------------------- 1 | from .augmentation import SemsegAugmentation, ObjdetAugmentation 2 | -------------------------------------------------------------------------------- /ml3d/datasets/inference_dummy.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os, sys, glob, pickle 3 | import logging 4 | 5 | from .base_dataset import BaseDataset, BaseDatasetSplit 6 | from ..utils import make_dir, DATASET, get_module 7 | 8 | logging.basicConfig( 9 | level=logging.INFO, 10 | format='%(levelname)s - %(asctime)s - %(module)s - %(message)s', 11 | ) 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | class InferenceDummySplit(BaseDatasetSplit): 16 | 17 | def __init__(self, inference_data): 18 | self.split = 'test' 19 | self.inference_data = inference_data 20 | self.cfg = {} 21 | sampler_cls = get_module('sampler', 'SemSegSpatiallyRegularSampler') 22 | self.sampler = sampler_cls(self) 23 | 24 | def __len__(self): 25 | return 1 26 | 27 | def get_data(self, idx): 28 | return self.inference_data 29 | 30 | def get_attr(self, idx): 31 | pc_path = 'inference_data' 32 | split = self.split 33 | attr = {'idx': 0, 'name': 'inference', 'path': pc_path, 'split': split} 34 | return attr 35 | 36 | 37 | DATASET._register_module(InferenceDummySplit) 38 | -------------------------------------------------------------------------------- /ml3d/datasets/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .semseg_random import SemSegRandomSampler 2 | from .semseg_spatially_regular import SemSegSpatiallyRegularSampler 3 | 4 | __all__ = ['SemSegRandomSampler', 'SemSegSpatiallyRegularSampler'] 5 | -------------------------------------------------------------------------------- /ml3d/datasets/samplers/semseg_random.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | 4 | from ...utils import SAMPLER 5 | 6 | 7 | class SemSegRandomSampler(object): 8 | """Random sampler for semantic segmentation datsets.""" 9 | 10 | def __init__(self, dataset): 11 | self.dataset = dataset 12 | self.length = len(dataset) 13 | self.split = self.dataset.split 14 | 15 | def __len__(self): 16 | return self.length 17 | 18 | def initialize_with_dataloader(self, dataloader): 19 | self.length = len(dataloader) 20 | 21 | def get_cloud_sampler(self): 22 | 23 | def gen(): 24 | ids = np.random.permutation(self.length) 25 | for i in ids: 26 | yield i 27 | 28 | return gen() 29 | 30 | @staticmethod 31 | def get_point_sampler(): 32 | 33 | def _random_centered_gen(**kwargs): 34 | pc = kwargs.get('pc', None) 35 | num_points = kwargs.get('num_points', None) 36 | search_tree = kwargs.get('search_tree', None) 37 | if pc is None or num_points is None or search_tree is None: 38 | raise KeyError("Please provide pc, num_points, and search_tree \ 39 | for point_sampler in SemSegRandomSampler") 40 | 41 | center_idx = np.random.choice(len(pc), 1) 42 | center_point = pc[center_idx, :].reshape(1, -1) 43 | 44 | if (pc.shape[0] < num_points): 45 | diff = num_points - pc.shape[0] 46 | idxs = np.array(range(pc.shape[0])) 47 | idxs = list(idxs) + list(random.choices(idxs, k=diff)) 48 | idxs = np.asarray(idxs) 49 | else: 50 | idxs = search_tree.query(center_point, k=num_points)[1][0] 51 | random.shuffle(idxs) 52 | pc = pc[idxs] 53 | return pc, idxs, center_point 54 | 55 | return _random_centered_gen 56 | 57 | 58 | SAMPLER._register_module(SemSegRandomSampler) 59 | -------------------------------------------------------------------------------- /ml3d/datasets/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dataprocessing import DataProcessing 2 | from .transforms import trans_normalize, trans_augment, trans_crop_pc, ObjdetAugmentation 3 | from .operations import create_3D_rotations, get_min_bbox 4 | from .bev_box import BEVBox3D 5 | 6 | __all__ = [ 7 | 'DataProcessing', 'trans_normalize', 'create_3D_rotations', 'trans_augment', 8 | 'trans_crop_pc', 'BEVBox3D' 9 | ] 10 | -------------------------------------------------------------------------------- /ml3d/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | import open3d 2 | 3 | print("open3d.core.cuda.device_count()", open3d.core.cuda.device_count()) 4 | print("open3d.core.cuda.is_available()", open3d.core.cuda.is_available()) 5 | if open3d.core.cuda.device_count() > 0: 6 | # Open3D is built with CUDA and the machine has a CUDA device. 7 | from open3d.ml.contrib import iou_bev_cuda as iou_bev 8 | from open3d.ml.contrib import iou_3d_cuda as iou_3d 9 | print("CUDA iou_bev") 10 | else: 11 | from open3d.ml.contrib import iou_bev_cpu as iou_bev 12 | from open3d.ml.contrib import iou_3d_cpu as iou_3d 13 | print("CPU iou_bev") 14 | 15 | from .mAP import precision_3d, mAP 16 | 17 | __all__ = ['precision_3d', 'mAP', 'iou_bev', 'iou_3d'] 18 | -------------------------------------------------------------------------------- /ml3d/tf/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipelines import * 2 | from .models import * 3 | from .dataloaders import * 4 | from .modules import * 5 | -------------------------------------------------------------------------------- /ml3d/tf/dataloaders/__init__.py: -------------------------------------------------------------------------------- 1 | """Dataloader for TensorFlow.""" 2 | 3 | from .tf_dataloader import TFDataloader 4 | __all__ = ['TFDataloader'] 5 | -------------------------------------------------------------------------------- /ml3d/tf/models/__init__.py: -------------------------------------------------------------------------------- 1 | """Tensorflow network models.""" 2 | 3 | from .randlanet import RandLANet 4 | from .kpconv import KPFCNN 5 | from .point_pillars import PointPillars 6 | from .sparseconvnet import SparseConvUnet 7 | from .point_rcnn import PointRCNN 8 | 9 | __all__ = ['RandLANet', 'KPFCNN', 'PointPillars', 'SparseConvUnet', 'PointRCNN'] 10 | -------------------------------------------------------------------------------- /ml3d/tf/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /ml3d/tf/models/utils/kernels/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /ml3d/tf/modules/__init__.py: -------------------------------------------------------------------------------- 1 | """Functional modules for tensorflow.""" 2 | 3 | from .losses import * 4 | from .metrics import * 5 | -------------------------------------------------------------------------------- /ml3d/tf/modules/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from .semseg_loss import SemSegLoss 2 | from .cross_entropy import CrossEntropyLoss 3 | from .focal_loss import FocalLoss 4 | from .smooth_L1 import SmoothL1Loss 5 | 6 | __all__ = ['SemSegLoss', 'CrossEntropyLoss', 'FocalLoss', 'SmoothL1Loss'] 7 | -------------------------------------------------------------------------------- /ml3d/tf/modules/losses/cross_entropy.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | class CrossEntropyLoss(tf.Module): 5 | 6 | def __init__(self, loss_weight=1.0): 7 | """CrossEntropyLoss. 8 | 9 | Args: 10 | loss_weight (float, optional): Weight of the loss. Defaults to 1.0. 11 | """ 12 | super(CrossEntropyLoss, self).__init__() 13 | self.loss_weight = loss_weight 14 | 15 | self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( 16 | from_logits=True, reduction=tf.keras.losses.Reduction.NONE) 17 | 18 | def __call__(self, 19 | cls_score, 20 | label, 21 | weight=None, 22 | avg_factor=None, 23 | **kwargs): 24 | """Forward function. 25 | 26 | Args: 27 | cls_score (tf.Tensor): The prediction. 28 | label (tf.Tensor): The learning label of the prediction. 29 | weight (tf.Tensor, optional): Sample-wise loss weight. 30 | avg_factor (int, optional): Average factor that is used to average 31 | the loss. Defaults to None. 32 | 33 | Returns: 34 | tf.Tensor: The calculated loss 35 | """ 36 | if weight is not None: 37 | loss = self.loss_fn(label, cls_score, sample_weight=weight) 38 | else: 39 | loss = self.loss_fn(label, cls_score) 40 | 41 | loss = loss * self.loss_weight 42 | 43 | if avg_factor: 44 | return tf.reduce_sum(loss) / avg_factor 45 | else: 46 | return tf.reduce_mean(loss) 47 | -------------------------------------------------------------------------------- /ml3d/tf/modules/losses/focal_loss.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | class FocalLoss(tf.Module): 5 | 6 | def __init__(self, gamma=2.0, alpha=0.25, loss_weight=1.0): 7 | """`Focal Loss `_ 8 | 9 | Args: 10 | gamma (float, optional): The gamma for calculating the modulating 11 | factor. Defaults to 2.0. 12 | alpha (float, optional): A balanced form for Focal Loss. 13 | Defaults to 0.25. 14 | loss_weight (float, optional): Weight of loss. Defaults to 1.0. 15 | """ 16 | super(FocalLoss, self).__init__() 17 | self.gamma = gamma 18 | self.alpha = alpha 19 | self.loss_weight = loss_weight 20 | 21 | def __call__(self, pred, target, weight=None, avg_factor=None): 22 | 23 | pred_sigmoid = tf.math.sigmoid(pred) 24 | 25 | if len(pred.shape) > 1 and int(pred.shape[-1]) > 1: 26 | target = tf.one_hot(target, int(pred.shape[-1])) 27 | target = tf.cast(target, pred.dtype) 28 | 29 | pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) 30 | 31 | focal_weight = (self.alpha * target + (1 - self.alpha) * 32 | (1 - target)) * tf.pow(pt, self.gamma) 33 | 34 | loss = tf.nn.sigmoid_cross_entropy_with_logits(target, 35 | pred) * focal_weight 36 | 37 | if weight is not None: 38 | loss = loss * weight 39 | 40 | loss = loss * self.loss_weight 41 | 42 | if avg_factor: 43 | return tf.reduce_sum(loss) / avg_factor 44 | else: 45 | return tf.reduce_mean(loss) 46 | -------------------------------------------------------------------------------- /ml3d/tf/modules/losses/smooth_L1.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | class SmoothL1Loss(tf.Module): 5 | """Smooth L1 loss. 6 | 7 | Args: 8 | beta (float, optional): The threshold in the piecewise function. 9 | Defaults to 1.0. 10 | loss_weight (float, optional): The weight of loss. 11 | """ 12 | 13 | def __init__(self, beta=1.0, loss_weight=1.0): 14 | super(SmoothL1Loss, self).__init__() 15 | self.beta = beta 16 | self.loss_weight = loss_weight 17 | 18 | def __call__(self, pred, target, weight=None, avg_factor=None, **kwargs): 19 | """Forward function. 20 | 21 | Args: 22 | pred (tf.Tensor): The prediction. 23 | target (tf.Tensor): The learning target of the prediction. 24 | weight (tf.Tensor, optional): The weight of loss for each 25 | prediction. Defaults to None. 26 | avg_factor (int, optional): Average factor that is used to average 27 | the loss. Defaults to None. 28 | reduction_override (str, optional): The reduction method used to 29 | override the original reduction method of the loss. 30 | Defaults to None. 31 | """ 32 | assert pred.shape == target.shape and tf.size(target) > 0 33 | 34 | diff = tf.abs(pred - target) 35 | 36 | loss = tf.where(diff < self.beta, 0.5 * diff * diff / self.beta, 37 | diff - 0.5 * self.beta) 38 | 39 | if weight is not None: 40 | loss = loss * weight 41 | 42 | loss = loss * self.loss_weight 43 | 44 | if avg_factor: 45 | return tf.reduce_sum(loss) / avg_factor 46 | else: 47 | return tf.reduce_mean(loss) 48 | -------------------------------------------------------------------------------- /ml3d/tf/modules/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .semseg_metric import SemSegMetric 2 | 3 | __all__ = ['SemSegMetric'] 4 | -------------------------------------------------------------------------------- /ml3d/tf/modules/optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/ml3d/tf/modules/optimizers/__init__.py -------------------------------------------------------------------------------- /ml3d/tf/modules/schedulers/__init__.py: -------------------------------------------------------------------------------- 1 | from .bn_momentum_scheduler import BNMomentumScheduler 2 | from .lr_one_cycle_scheduler import OneCycleScheduler 3 | from .cosine_warmup_scheduler import CosineWarmupLR 4 | 5 | __all__ = ['BNMomentumScheduler', 'OneCycleScheduler', 'CosineWarmupLR'] 6 | -------------------------------------------------------------------------------- /ml3d/tf/modules/schedulers/bn_momentum_scheduler.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | def set_bn_momentum_default(bn_momentum): 5 | 6 | def fn(m): 7 | if isinstance(m, tf.keras.layers.BatchNormalization): 8 | m.momentum = bn_momentum 9 | 10 | return fn 11 | 12 | 13 | class BNMomentumScheduler(object): 14 | 15 | def __init__(self, 16 | model, 17 | bn_lambda, 18 | last_epoch=-1, 19 | setter=set_bn_momentum_default): 20 | if not isinstance(model, tf.keras.layers.Layer): 21 | raise RuntimeError( 22 | "Class '{}' is not a Tensorflow Keras Layer".format( 23 | type(model).__name__)) 24 | 25 | self.model = model 26 | self.setter = setter 27 | self.lmbd = bn_lambda 28 | 29 | self.step(last_epoch + 1) 30 | self.last_epoch = last_epoch 31 | 32 | def step(self, epoch=None): 33 | if epoch is None: 34 | epoch = self.last_epoch + 1 35 | 36 | self.last_epoch = epoch 37 | self.model.apply(self.setter(self.lmbd(epoch))) 38 | -------------------------------------------------------------------------------- /ml3d/tf/modules/schedulers/cosine_warmup_scheduler.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import math 3 | 4 | 5 | class CosineWarmupLR(tf.keras.optimizers.schedules.LearningRateSchedule): 6 | 7 | def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1): 8 | self.T_max = T_max 9 | self.eta_min = eta_min 10 | super(CosineWarmupLR, self).__init__(optimizer, last_epoch) 11 | 12 | def get_lr(self): 13 | return [ 14 | self.eta_min + (base_lr - self.eta_min) * 15 | (1 - math.cos(math.pi * self.last_epoch / self.T_max)) / 2 16 | for base_lr in self.base_lrs 17 | ] 18 | -------------------------------------------------------------------------------- /ml3d/tf/modules/schedulers/lr_one_cycle_scheduler.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | 5 | class OneCycleScheduler(tf.keras.optimizers.schedules.LearningRateSchedule): 6 | """Scheduler class for cyclic learning rate scheduling. 7 | 8 | Args: 9 | total_step: number of steps for one cycle. 10 | lr_max: maximum cyclic learning rate. 11 | div_factor: factor by which initial learning starts. 12 | """ 13 | 14 | def __init__(self, total_step, lr_max=0.002, div_factor=10.0): 15 | 16 | self.lr_max = lr_max 17 | self.div_factor = div_factor 18 | self.total_step = total_step 19 | super(OneCycleScheduler, self).__init__() 20 | 21 | def __call__(self, step): 22 | lr_low = self.lr_max / self.div_factor 23 | 24 | angle = (np.pi / self.total_step * step) 25 | lr1 = tf.abs(lr_low + (self.lr_max - lr_low) * tf.math.sin(angle)) 26 | 27 | angle = (np.pi / self.total_step) * ( 28 | (step - self.total_step / 2) % self.total_step) 29 | 30 | lr2 = tf.abs(self.lr_max * tf.math.cos(angle)) 31 | 32 | lr = tf.where(step < self.total_step / 2, lr1, lr2) 33 | 34 | return lr 35 | -------------------------------------------------------------------------------- /ml3d/tf/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | """3D ML pipelines for tensorflow.""" 2 | 3 | from .semantic_segmentation import SemanticSegmentation 4 | from .object_detection import ObjectDetection 5 | 6 | __all__ = ['SemanticSegmentation', 'ObjectDetection'] 7 | -------------------------------------------------------------------------------- /ml3d/tf/pipelines/base_pipeline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import yaml 3 | import tensorflow as tf 4 | from abc import ABC, abstractmethod 5 | 6 | from os.path import join, exists, dirname, abspath 7 | 8 | from ...utils import Config, make_dir 9 | 10 | 11 | class BasePipeline(ABC): 12 | """Base pipeline class.""" 13 | 14 | def __init__(self, model, dataset=None, **kwargs): 15 | """Initialize. 16 | 17 | Args: 18 | model: network 19 | dataset: dataset, or None for inference model 20 | device: 'gpu' or 'cpu' 21 | kwargs: 22 | 23 | Returns: 24 | class: The corresponding class. 25 | """ 26 | if kwargs['name'] is None: 27 | raise KeyError("Please give a name to the pipeline") 28 | 29 | self.cfg = Config(kwargs) 30 | self.name = self.cfg.name 31 | 32 | self.model = model 33 | self.dataset = dataset 34 | 35 | make_dir(self.cfg.main_log_dir) 36 | dataset_name = dataset.name if dataset is not None else '' 37 | self.cfg.logs_dir = join( 38 | self.cfg.main_log_dir, 39 | model.__class__.__name__ + '_' + dataset_name + '_tf') 40 | make_dir(self.cfg.logs_dir) 41 | 42 | @abstractmethod 43 | def run_inference(self, data): 44 | """Run inference on a given data. 45 | 46 | Args: 47 | data: A raw data. 48 | 49 | Returns: 50 | Returns the inference results. 51 | """ 52 | return 53 | 54 | @abstractmethod 55 | def run_test(self): 56 | """Run testing on test sets.""" 57 | return 58 | 59 | @abstractmethod 60 | def run_train(self): 61 | """Run training on train sets.""" 62 | return 63 | -------------------------------------------------------------------------------- /ml3d/tf/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Utils for tensorflow networks.""" 2 | -------------------------------------------------------------------------------- /ml3d/tf/utils/roipool3d/roipool3d_utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.python.framework import ops 3 | import open3d 4 | if open3d.core.cuda.device_count() > 0: 5 | from open3d.ml.tf.ops import roi_pool 6 | import numpy as np 7 | 8 | 9 | def enlarge_box3d(boxes3d, extra_width): 10 | """Enlarge 3D boxes. 11 | 12 | Args: 13 | boxes3d: (N, 7) [x, y, z, h, w, l, ry] 14 | extra_width: Extra width. 15 | """ 16 | trans = np.zeros((boxes3d.shape[-1],)) 17 | trans[1] = extra_width 18 | trans[3:6] = extra_width * 2 19 | 20 | return boxes3d + trans 21 | 22 | 23 | def roipool3d_gpu(pts, 24 | pts_feature, 25 | boxes3d, 26 | pool_extra_width, 27 | sampled_pt_num=512): 28 | """ 29 | :param pts: (B, N, 3) 30 | :param pts_feature: (B, N, C) 31 | :param boxes3d: (B, M, 7) 32 | :param pool_extra_width: float 33 | :param sampled_pt_num: int 34 | :return: 35 | pooled_features: (B, M, 512, 3 + C) 36 | pooled_empty_flag: (B, M) 37 | """ 38 | if not open3d.core.cuda.device_count() > 0: 39 | raise NotImplementedError 40 | 41 | batch_size = pts.shape[0] 42 | pooled_boxes3d = tf.reshape( 43 | enlarge_box3d(tf.reshape(boxes3d, (-1, 7)), pool_extra_width), 44 | (batch_size, -1, 7)) 45 | 46 | pooled_features, pooled_empty_flag = roi_pool(pts, pooled_boxes3d, 47 | pts_feature, sampled_pt_num) 48 | 49 | return pooled_features, pooled_empty_flag 50 | 51 | 52 | ops.NoGradient('Open3DRoiPool') 53 | -------------------------------------------------------------------------------- /ml3d/tf/utils/tf_utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import tensorflow as tf 3 | 4 | 5 | def atoi(text): 6 | return int(text) if text.isdigit() else text 7 | 8 | 9 | def natural_keys(text): 10 | return [atoi(c) for c in re.split('(\d+)', text)] 11 | 12 | 13 | def gen_CNN(channels, 14 | conv=tf.keras.layers.Conv1D, 15 | use_bias=True, 16 | activation=tf.keras.layers.ReLU, 17 | batch_norm=False): 18 | layers = [] 19 | for i in range(len(channels) - 1): 20 | in_size, out_size = channels[i:i + 2] 21 | layers.append( 22 | conv(out_size, 1, use_bias=use_bias, data_format="channels_first")) 23 | if batch_norm: 24 | layers.append( 25 | tf.keras.layers.BatchNormalization(axis=1, 26 | momentum=0.9, 27 | epsilon=1e-05)) 28 | if activation is not None: 29 | layers.append(activation()) 30 | 31 | return tf.keras.Sequential(layers) 32 | -------------------------------------------------------------------------------- /ml3d/torch/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipelines import * 2 | from .models import * 3 | from .dataloaders import * 4 | from .modules import * 5 | -------------------------------------------------------------------------------- /ml3d/torch/dataloaders/__init__.py: -------------------------------------------------------------------------------- 1 | """Dataloader for PyTorch.""" 2 | 3 | from .torch_dataloader import TorchDataloader 4 | from .torch_sampler import get_sampler 5 | from .default_batcher import DefaultBatcher 6 | from .concat_batcher import ConcatBatcher 7 | 8 | __all__ = ['TorchDataloader', 'DefaultBatcher', 'ConcatBatcher', 'get_sampler'] 9 | -------------------------------------------------------------------------------- /ml3d/torch/dataloaders/torch_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data import Sampler 3 | 4 | 5 | class TorchSamplerWrapper(Sampler): 6 | 7 | def __init__(self, sampler): 8 | self.sampler = sampler 9 | 10 | def __iter__(self): 11 | return self.sampler.get_cloud_sampler() 12 | 13 | def __len__(self): 14 | return len(self.sampler) 15 | 16 | 17 | def get_sampler(sampler): 18 | return TorchSamplerWrapper(sampler) 19 | -------------------------------------------------------------------------------- /ml3d/torch/models/__init__.py: -------------------------------------------------------------------------------- 1 | """Networks for torch.""" 2 | 3 | from .randlanet import RandLANet 4 | from .kpconv import KPFCNN 5 | from .point_pillars import PointPillars 6 | from .sparse_point_pillars import SparsePointPillars 7 | from .sparseconvnet import SparseConvUnet 8 | from .point_rcnn import PointRCNN 9 | from .sparse_point_pillars_wide import SparsePointPillarsWide 10 | from .sparse_point_pillars_sparse1_dense23 import SparsePointPillarsSparse1Dense23 11 | from .sparse_point_pillars_sparse12_dense3 import SparsePointPillarsSparse12Dense3 12 | 13 | __all__ = [ 14 | 'RandLANet', 'KPFCNN', 'PointPillars', 'PointRCNN', 'SparseConvUnet', 15 | 'SparsePointPillars', 'SparsePointPillarsWide', 16 | 'SparsePointPillarsSparse1Dense23', 'SparsePointPillarsSparse12Dense3' 17 | ] 18 | -------------------------------------------------------------------------------- /ml3d/torch/modules/__init__.py: -------------------------------------------------------------------------------- 1 | """Functional modules for torch.""" 2 | 3 | from .losses import * 4 | from .metrics import * 5 | -------------------------------------------------------------------------------- /ml3d/torch/modules/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from .semseg_loss import filter_valid_label, SemSegLoss 2 | from .cross_entropy import CrossEntropyLoss 3 | from .focal_loss import FocalLoss 4 | from .smooth_L1 import SmoothL1Loss 5 | 6 | __all__ = [ 7 | 'filter_valid_label', 'SemSegLoss', 'CrossEntropyLoss', 'FocalLoss', 8 | 'SmoothL1Loss' 9 | ] 10 | -------------------------------------------------------------------------------- /ml3d/torch/modules/losses/cross_entropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | def one_hot(index, classes): 7 | out_idx = torch.arange(classes, device=index.device) 8 | out_idx = torch.unsqueeze(out_idx, 0) 9 | index = torch.unsqueeze(index, -1) 10 | return (index == out_idx).float() 11 | 12 | 13 | class CrossEntropyLoss(nn.Module): 14 | 15 | def __init__(self, loss_weight=1.0): 16 | """CrossEntropyLoss. 17 | 18 | Args: 19 | loss_weight (float, optional): Weight of the loss. Defaults to 1.0. 20 | """ 21 | super(CrossEntropyLoss, self).__init__() 22 | self.loss_weight = loss_weight 23 | 24 | def forward(self, cls_score, label, weight=None, avg_factor=None, **kwargs): 25 | """Forward function. 26 | 27 | Args: 28 | cls_score (torch.Tensor): The prediction. 29 | label (torch.Tensor): The learning label of the prediction. 30 | weight (torch.Tensor, optional): Sample-wise loss weight. 31 | avg_factor (int, optional): Average factor that is used to average 32 | the loss. Defaults to None. 33 | 34 | Returns: 35 | torch.Tensor: The calculated loss 36 | """ 37 | loss = F.cross_entropy(cls_score, label, reduction='none') 38 | 39 | if weight is not None: 40 | loss = loss * weight 41 | 42 | loss = loss * self.loss_weight 43 | 44 | if avg_factor: 45 | return loss.sum() / avg_factor 46 | else: 47 | return loss.mean() 48 | -------------------------------------------------------------------------------- /ml3d/torch/modules/losses/focal_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Variable 5 | 6 | 7 | def one_hot(index, classes): 8 | out_idx = torch.arange(classes, device=index.device) 9 | out_idx = torch.unsqueeze(out_idx, 0) 10 | index = torch.unsqueeze(index, -1) 11 | return (index == out_idx).float() 12 | 13 | 14 | class FocalLoss(nn.Module): 15 | 16 | def __init__(self, gamma=2.0, alpha=0.25, loss_weight=1.0): 17 | """`Focal Loss `_ 18 | 19 | Args: 20 | gamma (float, optional): The gamma for calculating the modulating 21 | factor. Defaults to 2.0. 22 | alpha (float, optional): A balanced form for Focal Loss. 23 | Defaults to 0.25. 24 | loss_weight (float, optional): Weight of loss. Defaults to 1.0. 25 | """ 26 | super(FocalLoss, self).__init__() 27 | self.gamma = gamma 28 | self.alpha = alpha 29 | self.loss_weight = loss_weight 30 | 31 | def forward(self, pred, target, weight=None, avg_factor=None): 32 | pred_sigmoid = pred.sigmoid() 33 | 34 | target = one_hot(target, int(pred.shape[-1])) 35 | target = target.type_as(pred) 36 | 37 | pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) 38 | focal_weight = (self.alpha * target + (1 - self.alpha) * 39 | (1 - target)) * pt.pow(self.gamma) 40 | loss = F.binary_cross_entropy_with_logits( 41 | pred, target, reduction='none') * focal_weight 42 | 43 | if weight is not None: 44 | loss = loss * weight 45 | 46 | loss = loss * self.loss_weight 47 | 48 | if avg_factor is None: 49 | return loss.mean() 50 | elif avg_factor > 0: 51 | return loss.sum() / avg_factor 52 | else: 53 | return loss 54 | -------------------------------------------------------------------------------- /ml3d/torch/modules/losses/semseg_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | from ....datasets.utils import DataProcessing 5 | 6 | 7 | def filter_valid_label(scores, labels, num_classes, ignored_label_inds, device): 8 | """Loss functions for semantic segmentation.""" 9 | valid_scores = scores.reshape(-1, num_classes) 10 | valid_labels = labels.reshape(-1).to(device) 11 | 12 | ignored_bool = torch.zeros_like(valid_labels, dtype=torch.bool) 13 | for ign_label in ignored_label_inds: 14 | ignored_bool = torch.logical_or(ignored_bool, 15 | torch.eq(valid_labels, ign_label)) 16 | 17 | valid_idx = torch.where(torch.logical_not(ignored_bool))[0].to(device) 18 | 19 | valid_scores = torch.gather(valid_scores, 0, 20 | valid_idx.unsqueeze(-1).expand(-1, num_classes)) 21 | valid_labels = torch.gather(valid_labels, 0, valid_idx) 22 | 23 | # Reduce label values in the range of logit shape 24 | reducing_list = torch.arange(0, num_classes, dtype=torch.int64) 25 | inserted_value = torch.zeros([1], dtype=torch.int64) 26 | 27 | for ign_label in ignored_label_inds: 28 | if ign_label >= 0: 29 | 30 | reducing_list = torch.cat([ 31 | reducing_list[:ign_label], inserted_value, 32 | reducing_list[ign_label:] 33 | ], 0) 34 | valid_labels = torch.gather(reducing_list.to(device), 0, 35 | valid_labels.long()) 36 | 37 | return valid_scores, valid_labels 38 | 39 | 40 | class SemSegLoss(object): 41 | """Loss functions for semantic segmentation.""" 42 | 43 | def __init__(self, pipeline, model, dataset, device): 44 | super(SemSegLoss, self).__init__() 45 | # weighted_CrossEntropyLoss 46 | if 'class_weights' in dataset.cfg.keys() and len( 47 | dataset.cfg.class_weights) != 0: 48 | class_wt = DataProcessing.get_class_weights( 49 | dataset.cfg.class_weights) 50 | weights = torch.tensor(class_wt, dtype=torch.float, device=device) 51 | 52 | self.weighted_CrossEntropyLoss = nn.CrossEntropyLoss(weight=weights) 53 | else: 54 | self.weighted_CrossEntropyLoss = nn.CrossEntropyLoss() 55 | -------------------------------------------------------------------------------- /ml3d/torch/modules/losses/smooth_L1.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class SmoothL1Loss(nn.Module): 6 | """Smooth L1 loss. 7 | 8 | Args: 9 | beta (float, optional): The threshold in the piecewise function. 10 | Defaults to 1.0. 11 | reduction (str, optional): The method to reduce the loss. 12 | Options are "none", "mean" and "sum". Defaults to "mean". 13 | loss_weight (float, optional): The weight of loss. 14 | """ 15 | 16 | def __init__(self, beta=1.0, loss_weight=1.0): 17 | super(SmoothL1Loss, self).__init__() 18 | self.beta = beta 19 | self.loss_weight = loss_weight 20 | 21 | def forward(self, pred, target, weight=None, avg_factor=None, **kwargs): 22 | """Forward function. 23 | 24 | Args: 25 | pred (torch.Tensor): The prediction. 26 | target (torch.Tensor): The learning target of the prediction. 27 | weight (torch.Tensor, optional): The weight of loss for each 28 | prediction. Defaults to None. 29 | avg_factor (int, optional): Average factor that is used to average 30 | the loss. Defaults to None. 31 | reduction_override (str, optional): The reduction method used to 32 | override the original reduction method of the loss. 33 | Defaults to None. 34 | """ 35 | assert pred.size() == target.size() and target.numel() > 0 36 | diff = torch.abs(pred - target) 37 | loss = torch.where(diff < self.beta, 0.5 * diff * diff / self.beta, 38 | diff - 0.5 * self.beta) 39 | if weight is not None: 40 | loss = loss * weight 41 | 42 | loss = loss * self.loss_weight 43 | 44 | if avg_factor: 45 | return loss.sum() / avg_factor 46 | else: 47 | return loss.mean() 48 | -------------------------------------------------------------------------------- /ml3d/torch/modules/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .semseg_metric import SemSegMetric 2 | 3 | __all__ = ['SemSegMetric'] 4 | -------------------------------------------------------------------------------- /ml3d/torch/modules/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .optim_wrapper import OptimWrapper 2 | 3 | __all__ = ['OptimWrapper'] 4 | -------------------------------------------------------------------------------- /ml3d/torch/modules/schedulers/__init__.py: -------------------------------------------------------------------------------- 1 | from .bn_momentum_scheduler import BNMomentumScheduler 2 | from .lr_one_cycle_scheduler import OneCycleScheduler 3 | from .cosine_warmup_scheduler import CosineWarmupLR 4 | 5 | __all__ = ['BNMomentumScheduler', 'OneCycleScheduler', 'CosineWarmupLR'] 6 | -------------------------------------------------------------------------------- /ml3d/torch/modules/schedulers/cosine_warmup_scheduler.py: -------------------------------------------------------------------------------- 1 | #***************************************************************************************/ 2 | # 3 | # Based on PointRCNN Library (MIT license): 4 | # https://github.com/sshaoshuai/PointRCNN 5 | # 6 | # Copyright (c) 2019 Shaoshuai Shi 7 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | 15 | # The above copyright notice and this permission notice shall be included in all 16 | # copies or substantial portions of the Software. 17 | 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | # 26 | #***************************************************************************************/ 27 | 28 | import torch.optim.lr_scheduler as lr_sched 29 | import math 30 | 31 | 32 | class CosineWarmupLR(lr_sched._LRScheduler): 33 | 34 | def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1): 35 | self.T_max = T_max 36 | self.eta_min = eta_min 37 | super(CosineWarmupLR, self).__init__(optimizer, last_epoch) 38 | 39 | def get_lr(self): 40 | return [ 41 | self.eta_min + (base_lr - self.eta_min) * 42 | (1 - math.cos(math.pi * self.last_epoch / self.T_max)) / 2 43 | for base_lr in self.base_lrs 44 | ] 45 | -------------------------------------------------------------------------------- /ml3d/torch/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | """3D ML pipelines for torch.""" 2 | 3 | from .semantic_segmentation import SemanticSegmentation 4 | from .object_detection import ObjectDetection 5 | 6 | __all__ = ['SemanticSegmentation', 'ObjectDetection'] 7 | -------------------------------------------------------------------------------- /ml3d/torch/pipelines/base_pipeline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import yaml 3 | import torch 4 | from abc import ABC, abstractmethod 5 | 6 | from os.path import join, exists, dirname, abspath 7 | 8 | # use relative import for being compatible with Open3d main repo 9 | from ...utils import Config, make_dir 10 | 11 | 12 | class BasePipeline(ABC): 13 | """Base pipeline class.""" 14 | 15 | def __init__(self, model, dataset=None, device='gpu', **kwargs): 16 | """Initialize. 17 | 18 | Args: 19 | model: A network model. 20 | dataset: A dataset, or None for inference model. 21 | devce: 'gpu' or 'cpu'. 22 | kwargs: 23 | 24 | Returns: 25 | class: The corresponding class. 26 | """ 27 | self.cfg = Config(kwargs) 28 | 29 | if kwargs['name'] is None: 30 | raise KeyError("Please give a name to the pipeline") 31 | self.name = self.cfg.name 32 | 33 | self.model = model 34 | self.dataset = dataset 35 | 36 | make_dir(self.cfg.main_log_dir) 37 | dataset_name = dataset.name if dataset is not None else '' 38 | self.cfg.logs_dir = join( 39 | self.cfg.main_log_dir, 40 | model.__class__.__name__ + '_' + dataset_name + '_torch') 41 | make_dir(self.cfg.logs_dir) 42 | 43 | if device == 'cpu' or not torch.cuda.is_available(): 44 | self.device = torch.device('cpu') 45 | else: 46 | self.device = torch.device('cuda' if len(device.split(':')) == 47 | 1 else 'cuda:' + device.split(':')[1]) 48 | 49 | @abstractmethod 50 | def run_inference(self, data): 51 | """Run inference on a given data. 52 | 53 | Args: 54 | data: A raw data. 55 | 56 | Returns: 57 | Returns the inference results. 58 | """ 59 | return 60 | 61 | @abstractmethod 62 | def run_test(self): 63 | """Run testing on test sets.""" 64 | return 65 | 66 | @abstractmethod 67 | def run_train(self): 68 | """Run training on train sets.""" 69 | return 70 | -------------------------------------------------------------------------------- /ml3d/torch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Utils for torch networks.""" 2 | 3 | from .torch_utils import latest_torch_ckpt 4 | 5 | __all__ = ['latest_torch_ckpt'] 6 | -------------------------------------------------------------------------------- /ml3d/torch/utils/roipool3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/SparsePointPillars/ebe2960b1d2591a031f50a5d5782595dd1b4e8e4/ml3d/torch/utils/roipool3d/__init__.py -------------------------------------------------------------------------------- /ml3d/torch/utils/roipool3d/roipool3d_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import open3d 3 | if open3d.core.cuda.device_count() > 0: 4 | from open3d.ml.torch.ops import roi_pool 5 | import numpy as np 6 | 7 | 8 | def enlarge_box3d(boxes3d, extra_width): 9 | """Enlarge 3D box. 10 | 11 | Args: 12 | boxes3d: (N, 7) [x, y, z, h, w, l, ry] 13 | extra_width: extra width 14 | """ 15 | if isinstance(boxes3d, np.ndarray): 16 | large_boxes3d = boxes3d.copy() 17 | else: 18 | large_boxes3d = boxes3d.clone() 19 | large_boxes3d[:, 3:6] += extra_width * 2 20 | large_boxes3d[:, 1] += extra_width 21 | return large_boxes3d 22 | 23 | 24 | def roipool3d_gpu(pts, 25 | pts_feature, 26 | boxes3d, 27 | pool_extra_width, 28 | sampled_pt_num=512): 29 | """Roipool3D GPU. 30 | 31 | Args: 32 | pts: (B, N, 3) 33 | pts_feature: (B, N, C) 34 | boxes3d: (B, M, 7) 35 | pool_extra_width: float 36 | sampled_pt_num: int 37 | 38 | Returns: 39 | pooled_features: (B, M, 512, 3 + C) 40 | pooled_empty_flag: (B, M) 41 | """ 42 | if not open3d.core.cuda.device_count() > 0: 43 | raise NotImplementedError 44 | 45 | batch_size = pts.shape[0] 46 | pooled_boxes3d = enlarge_box3d(boxes3d.view(-1, 7), 47 | pool_extra_width).view(batch_size, -1, 7) 48 | 49 | pooled_features, pooled_empty_flag = roi_pool(pts.contiguous(), 50 | pooled_boxes3d.contiguous(), 51 | pts_feature.contiguous(), 52 | sampled_pt_num) 53 | 54 | return pooled_features, pooled_empty_flag 55 | -------------------------------------------------------------------------------- /ml3d/torch/utils/torch_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from torch import nn 4 | import torch.nn.functional as F 5 | 6 | 7 | def atoi(text): 8 | return int(text) if text.isdigit() else text 9 | 10 | 11 | def natural_keys(text): 12 | return [atoi(c) for c in re.split('(\d+)', text)] 13 | 14 | 15 | def latest_torch_ckpt(train_ckpt_dir): 16 | files = os.listdir(train_ckpt_dir) 17 | ckpt_list = [f for f in files if f.endswith('.pth')] 18 | if len(ckpt_list) == 0: 19 | return None 20 | ckpt_list.sort(key=natural_keys) 21 | 22 | ckpt_name = ckpt_list[-1] 23 | return os.path.join(train_ckpt_dir, ckpt_name) 24 | 25 | 26 | def gen_CNN(channels, 27 | conv=nn.Conv1d, 28 | bias=True, 29 | activation=nn.ReLU, 30 | batch_norm=None, 31 | instance_norm=None): 32 | layers = [] 33 | for i in range(len(channels) - 1): 34 | in_size, out_size = channels[i:i + 2] 35 | layers.append(conv(in_size, out_size, 1, bias=bias)) 36 | if batch_norm is not None: 37 | layers.append(batch_norm(out_size)) 38 | if activation is not None: 39 | layers.append(activation(inplace=True)) 40 | if instance_norm is not None: 41 | layers.append( 42 | instance_norm(out_size, affine=False, 43 | track_running_stats=False)) 44 | 45 | return nn.Sequential(*layers) 46 | -------------------------------------------------------------------------------- /ml3d/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Utils for 3D ML.""" 2 | 3 | from .config import Config 4 | from .log import LogRecord, get_runid, code2md 5 | from .builder import (MODEL, PIPELINE, DATASET, SAMPLER, get_module, 6 | convert_framework_name, convert_device_name) 7 | from .dataset_helper import get_hash, make_dir, Cache 8 | 9 | __all__ = [ 10 | 'Config', 'make_dir', 'LogRecord', 'MODEL', 'SAMPLER', 'PIPELINE', 11 | 'DATASET', 'get_module', 'convert_framework_name', 'get_hash', 'make_dir', 12 | 'Cache', 'convert_device_name' 13 | ] 14 | -------------------------------------------------------------------------------- /ml3d/utils/builder.py: -------------------------------------------------------------------------------- 1 | from .registry import Registry, get_from_name 2 | 3 | MODEL = Registry('model') 4 | DATASET = Registry('dataset') 5 | PIPELINE = Registry('pipeline') 6 | SAMPLER = Registry('sampler') 7 | 8 | 9 | def build(cfg, registry, args=None): 10 | return build_from_cfg(cfg, registry, args) 11 | 12 | 13 | def build_network(cfg): 14 | return build(cfg, NETWORK) 15 | 16 | 17 | def convert_device_name(framework): 18 | """Convert device to either cpu or cuda.""" 19 | gpu_names = ["gpu", "cuda"] 20 | cpu_names = ["cpu"] 21 | if framework not in cpu_names + gpu_names: 22 | raise KeyError("the device shoule either " 23 | "be cuda or cpu but got {}".format(framework)) 24 | if framework in gpu_names: 25 | return "cuda" 26 | else: 27 | return "cpu" 28 | 29 | 30 | def convert_framework_name(framework): 31 | """Convert framework to either tf or torch.""" 32 | tf_names = ["tf", "tensorflow", "TF"] 33 | torch_names = ["torch", "pytorch", "PyTorch"] 34 | if framework not in tf_names + torch_names: 35 | raise KeyError("the framework shoule either " 36 | "be tf or torch but got {}".format(framework)) 37 | if framework in tf_names: 38 | return "tf" 39 | else: 40 | return "torch" 41 | 42 | 43 | def get_module(module_type, module_name, framework=None, **kwargs): 44 | """Fetch modules (pipeline, model, or) from registry.""" 45 | if module_type == 'pipeline': 46 | framework = convert_framework_name(framework) 47 | return get_from_name(module_name, PIPELINE, framework) 48 | 49 | elif module_type == "dataset": 50 | return get_from_name(module_name, DATASET, framework) 51 | 52 | elif module_type == "sampler": 53 | return get_from_name(module_name, SAMPLER, framework) 54 | 55 | elif module_type == "model": 56 | framework = convert_framework_name(framework) 57 | return get_from_name(module_name, MODEL, framework) 58 | else: 59 | raise KeyError("module type should be model, dataset, or pipeline but " 60 | "got {}".format(module_type)) 61 | -------------------------------------------------------------------------------- /ml3d/utils/dataset_helper.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from pathlib import Path 3 | from typing import Callable 4 | import numpy as np 5 | 6 | from os import makedirs, listdir 7 | from os.path import exists, join, isfile, dirname, abspath, splitext 8 | 9 | 10 | def make_dir(folder_name): 11 | """Create a directory. 12 | 13 | If already exists, do nothing 14 | """ 15 | if not exists(folder_name): 16 | makedirs(folder_name) 17 | 18 | 19 | def get_hash(x: str): 20 | """Generate a hash from a string.""" 21 | h = hashlib.md5(x.encode()) 22 | return h.hexdigest() 23 | 24 | 25 | class Cache(object): 26 | """Cache converter for preprocessed data.""" 27 | 28 | def __init__(self, func: Callable, cache_dir: str, cache_key: str): 29 | """Initialize. 30 | 31 | Args: 32 | func: preprocess function of a model. 33 | cache_dir: directory to store the cache. 34 | cache_key: key of this cache 35 | Returns: 36 | class: The corresponding class. 37 | """ 38 | self.func = func 39 | self.cache_dir = join(cache_dir, cache_key) 40 | make_dir(self.cache_dir) 41 | self.cached_ids = [splitext(p)[0] for p in listdir(self.cache_dir)] 42 | 43 | def __call__(self, unique_id: str, *data): 44 | """Call the converter. If the cache exists, load and return the cache, 45 | otherwise run the preprocess function and store the cache. 46 | 47 | Args: 48 | unique_id: A unique key of this data. 49 | data: Input to the preprocess function. 50 | 51 | Returns: 52 | class: Preprocessed (cache) data. 53 | """ 54 | fpath = join(self.cache_dir, str('{}.npy'.format(unique_id))) 55 | 56 | if not exists(fpath): 57 | output = self.func(*data) 58 | 59 | self._write(output, fpath) 60 | self.cached_ids.append(unique_id) 61 | else: 62 | output = self._read(fpath) 63 | 64 | return self._read(fpath) 65 | 66 | def _write(self, x, fpath): 67 | np.save(fpath, x) 68 | 69 | def _read(self, fpath): 70 | return np.load(fpath, allow_pickle=True).item() 71 | -------------------------------------------------------------------------------- /ml3d/utils/log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from pathlib import Path 4 | 5 | 6 | class LogRecord(logging.LogRecord): 7 | """Class for logging information.""" 8 | 9 | def getMessage(self): 10 | msg = self.msg 11 | if self.args: 12 | if isinstance(self.args, dict): 13 | msg = msg.format(**self.args) 14 | else: 15 | msg = msg.format(*self.args) 16 | return msg 17 | 18 | 19 | def get_runid(path): 20 | """Get runid or an experiment.""" 21 | name = Path(path).name 22 | if not os.path.exists(Path(path).parent): 23 | return '00001' 24 | files = os.listdir(Path(path).parent) 25 | runid = 0 26 | for f in files: 27 | try: 28 | id, val = f.split('_', 1) 29 | runid = max(runid, int(id)) 30 | except: 31 | pass 32 | runid = str(runid + 1) 33 | runid = '0' * (5 - len(runid)) + runid 34 | return runid 35 | 36 | 37 | def code2md(code_text, language=None): 38 | """Format code as markdown for display (eg in tensorboard)""" 39 | four_spaces = ' ' 40 | code_md = four_spaces + code_text.replace(os.linesep, 41 | os.linesep + four_spaces) 42 | return code_md[:-4] 43 | -------------------------------------------------------------------------------- /ml3d/vis/__init__.py: -------------------------------------------------------------------------------- 1 | """Visualizer for 3D ML.""" 2 | 3 | from .boundingbox import * 4 | from .colormap import * 5 | from .visualizer import * 6 | -------------------------------------------------------------------------------- /ml3d/vis/labellut.py: -------------------------------------------------------------------------------- 1 | class LabelLUT: 2 | """The class to manage look-up table for assigning colors to labels.""" 3 | 4 | class Label: 5 | 6 | def __init__(self, name, value, color): 7 | self.name = name 8 | self.value = value 9 | self.color = color 10 | 11 | Colors = [[0., 0., 0.], [0.96078431, 0.58823529, 0.39215686], 12 | [0.96078431, 0.90196078, 0.39215686], 13 | [0.58823529, 0.23529412, 0.11764706], 14 | [0.70588235, 0.11764706, 0.31372549], [1., 0., 0.], 15 | [0.11764706, 0.11764706, 1.], [0.78431373, 0.15686275, 1.], 16 | [0.35294118, 0.11764706, 0.58823529], [1., 0., 1.], 17 | [1., 0.58823529, 1.], [0.29411765, 0., 0.29411765], 18 | [0.29411765, 0., 0.68627451], [0., 0.78431373, 1.], 19 | [0.19607843, 0.47058824, 1.], [0., 0.68627451, 0.], 20 | [0., 0.23529412, 21 | 0.52941176], [0.31372549, 0.94117647, 0.58823529], 22 | [0.58823529, 0.94117647, 1.], [0., 0., 1.], [1.0, 1.0, 0.25], 23 | [0.5, 1.0, 0.25], [0.25, 1.0, 0.25], [0.25, 1.0, 0.5], 24 | [0.25, 1.0, 1.25], [0.25, 0.5, 1.25], [0.25, 0.25, 1.0], 25 | [0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.375, 0.375, 0.375], 26 | [0.5, 0.5, 0.5], [0.625, 0.625, 0.625], [0.75, 0.75, 0.75], 27 | [0.875, 0.875, 0.875]] 28 | 29 | def __init__(self): 30 | self._next_color = 0 31 | self.labels = {} 32 | 33 | def add_label(self, name, value, color=None): 34 | """Adds a label to the table. 35 | 36 | **Example:** 37 | The following sample creates a LUT with 3 labels:: 38 | 39 | lut = ml3d.vis.LabelLUT() 40 | lut.add_label('one', 1) 41 | lut.add_label('two', 2) 42 | lut.add_label('three', 3, [0,0,1]) # use blue for label 'three' 43 | 44 | **Args:** 45 | name: The label name as string. 46 | value: The value associated with the label. 47 | color: Optional RGB color. E.g., [0.2, 0.4, 1.0]. 48 | """ 49 | if color is None: 50 | if self._next_color >= len(self.Colors): 51 | color = [0.85, 1.0, 1.0] 52 | else: 53 | color = self.Colors[self._next_color] 54 | self._next_color += 1 55 | self.labels[value] = self.Label(name, value, color) 56 | -------------------------------------------------------------------------------- /requirements-tensorflow.txt: -------------------------------------------------------------------------------- 1 | tensorflow~=2.4.1 2 | -------------------------------------------------------------------------------- /requirements-torch-cuda.txt: -------------------------------------------------------------------------------- 1 | https://github.com/isl-org/open3d_downloads/releases/download/torch1.7.1/torch-1.7.1-cp36-cp36m-linux_x86_64.whl ; python_version == '3.6' 2 | https://github.com/isl-org/open3d_downloads/releases/download/torch1.7.1/torch-1.7.1-cp37-cp37m-linux_x86_64.whl ; python_version == '3.7' 3 | https://github.com/isl-org/open3d_downloads/releases/download/torch1.7.1/torch-1.7.1-cp38-cp38-linux_x86_64.whl ; python_version == '3.8' 4 | -f https://download.pytorch.org/whl/torch_stable.html 5 | torchvision==0.8.2+cu110 6 | tensorboard 7 | -------------------------------------------------------------------------------- /requirements-torch.txt: -------------------------------------------------------------------------------- 1 | -f https://download.pytorch.org/whl/torch_stable.html 2 | torch==1.7.1+cpu ; sys_platform != 'darwin' 3 | torchvision==0.8.2+cpu ; sys_platform != 'darwin' 4 | torch==1.7.1 ; sys_platform == 'darwin' 5 | torchvision==0.8.2 ; sys_platform == 'darwin' 6 | tensorboard 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | addict 2 | pillow>=8.2.0 3 | matplotlib>=3 4 | numpy>1.15 5 | pandas>=1.0 6 | pyyaml>=5.4.1 7 | scikit-learn>=0.21 8 | tqdm 9 | -------------------------------------------------------------------------------- /scripts/demo_api_train.py: -------------------------------------------------------------------------------- 1 | from open3d.ml.datasets import (SemanticKITTI, ParisLille3D, Semantic3D, S3DIS, 2 | Toronto3D) 3 | from open3d.ml.torch.pipelines import SemanticSegmentation 4 | from open3d.ml.torch.models import RandLANet 5 | from open3d.ml.utils import Config, get_module 6 | 7 | import argparse 8 | 9 | 10 | def parse_args(): 11 | parser = argparse.ArgumentParser( 12 | description='Demo for training and inference') 13 | parser.add_argument('--path_semantickitti', 14 | help='path to semantiSemanticKITTI', 15 | required=True) 16 | parser.add_argument('--path_ckpt_randlanet', 17 | help='path to RandLANet checkpoint') 18 | 19 | args, _ = parser.parse_known_args() 20 | 21 | dict_args = vars(args) 22 | for k in dict_args: 23 | v = dict_args[k] 24 | print("{}: {}".format(k, v) if v is not None else "{} not given". 25 | format(k)) 26 | 27 | return args 28 | 29 | 30 | def demo_train(args): 31 | # Initialize the training by passing parameters 32 | dataset = SemanticKITTI(args.path_semantickitti, use_cache=True) 33 | 34 | model = RandLANet(dim_input=3) 35 | 36 | pipeline = SemanticSegmentation(model=model, dataset=dataset, max_epoch=100) 37 | 38 | pipeline.run_train() 39 | 40 | 41 | def demo_inference(args): 42 | # Inference and test example 43 | from open3d.ml.tf.pipelines import SemanticSegmentation 44 | from open3d.ml.tf.models import RandLANet 45 | 46 | Pipeline = get_module("pipeline", "SemanticSegmentation", "tf") 47 | Model = get_module("model", "RandLANet", "tf") 48 | Dataset = get_module("dataset", "SemanticKITTI") 49 | 50 | RandLANet = Model(ckpt_path=args.path_ckpt_randlanet) 51 | 52 | # Initialize by specifying config file path 53 | SemanticKITTI = Dataset(args.path_semantickitti, use_cache=False) 54 | 55 | pipeline = Pipeline(model=RandLANet, dataset=SemanticKITTI) 56 | 57 | # inference 58 | # get data 59 | train_split = SemanticKITTI.get_split("train") 60 | data = train_split.get_data(0) 61 | # restore weights 62 | 63 | # run inference 64 | results = pipeline.run_inference(data) 65 | print(results) 66 | 67 | # test 68 | pipeline.run_test() 69 | 70 | 71 | if __name__ == '__main__': 72 | args = parse_args() 73 | demo_train(args) 74 | demo_inference(args) 75 | -------------------------------------------------------------------------------- /scripts/download_datasets/download_kitti.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$#" -ne 1 ]; then 4 | echo "Please, provide the base directory to store the dataset." 5 | exit 1 6 | fi 7 | 8 | if ! command -v unzip &> /dev/null 9 | then 10 | echo "Error: unzip could not be found. Please, install it to continue" 11 | exit 12 | fi 13 | 14 | BASE_DIR="$1"/Kitti 15 | 16 | mkdir -p $BASE_DIR 17 | 18 | url_velodyne="https://s3.eu-central-1.amazonaws.com/avg-kitti/data_object_velodyne.zip" 19 | url_calib="https://s3.eu-central-1.amazonaws.com/avg-kitti/data_object_calib.zip" 20 | url_label="https://s3.eu-central-1.amazonaws.com/avg-kitti/data_object_label_2.zip" 21 | 22 | wget -c -N -O $BASE_DIR'/data_object_velodyne.zip' $url_velodyne 23 | wget -c -N -O $BASE_DIR'/data_object_calib.zip' $url_calib 24 | wget -c -N -O $BASE_DIR'/data_object_label_2.zip' $url_label 25 | 26 | cd $BASE_DIR 27 | 28 | unzip data_object_velodyne.zip 29 | unzip data_object_calib.zip 30 | unzip data_object_label_2.zip 31 | 32 | mkdir zip_files 33 | mv data_object_velodyne.zip zip_files 34 | mv data_object_calib.zip zip_files 35 | mv data_object_label_2.zip zip_files 36 | -------------------------------------------------------------------------------- /scripts/download_datasets/download_lyft.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$#" -ne 1 ]; then 4 | echo "Please, provide the base directory to store the dataset." 5 | exit 1 6 | fi 7 | 8 | BASE_DIR="$1"/Lyft 9 | 10 | mkdir -p $BASE_DIR 11 | 12 | url_train="https://lyft-l5-datasets-public.s3-us-west-2.amazonaws.com/3d-object-detection/train.tar" 13 | url_test="https://lyft-l5-datasets-public.s3-us-west-2.amazonaws.com/3d-object-detection/test.tar" 14 | 15 | wget -c -N -O $BASE_DIR'/train.tar' $url_train 16 | wget -c -N -O $BASE_DIR'/test.tar' $url_test 17 | 18 | cd $BASE_DIR 19 | 20 | tar -xvf train.tar 21 | tar -xvf test.tar 22 | 23 | mkdir tar_files 24 | mv train.tar tar_files 25 | mv test.tar tar_files 26 | 27 | mkdir v1.01-train 28 | mkdir v1.01-test 29 | 30 | mv train_data v1.01-train/data 31 | mv train_images v1.01-train/images 32 | mv train_lidar v1.01-train/lidar 33 | mv train_maps v1.01-train/maps 34 | 35 | mv test_data v1.01-test/data 36 | mv test_images v1.01-test/images 37 | mv test_lidar v1.01-test/lidar 38 | mv test_maps v1.01-test/maps -------------------------------------------------------------------------------- /scripts/download_datasets/download_parislille3d.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$#" -ne 1 ]; then 4 | echo "Please, provide the base directory to store the dataset." 5 | exit 1 6 | fi 7 | 8 | if ! command -v unzip &> /dev/null 9 | then 10 | echo "Error: unzip could not be found. Please, install it to continue" 11 | exit 12 | fi 13 | 14 | BASE_DIR="$1"/Paris_Lille3D 15 | 16 | mkdir -p $BASE_DIR 17 | 18 | export url_train="https://cloud.mines-paristech.fr/index.php/s/JhIxgyt0ALgRZ1O/download?path=%2F&files=training_10_classes" 19 | export url_test="https://cloud.mines-paristech.fr/index.php/s/JhIxgyt0ALgRZ1O/download?path=%2F&files=test_10_classes" 20 | 21 | wget -c -N -O $BASE_DIR'/training_10_classes.zip' $url_train 22 | wget -c -N -O $BASE_DIR'/test_10_classes.zip' $url_test 23 | 24 | cd $BASE_DIR 25 | 26 | unzip test_10_classes.zip 27 | unzip training_10_classes.zip 28 | 29 | mkdir -p $BASE_DIR/zip_files 30 | mv test_10_classes.zip $BASE_DIR/zip_files 31 | mv training_10_classes.zip $BASE_DIR/zip_files 32 | -------------------------------------------------------------------------------- /scripts/download_datasets/download_semantickitti.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$#" -ne 1 ]; then 4 | echo "Please, provide the base directory to store the dataset." 5 | exit 1 6 | fi 7 | 8 | if ! command -v unzip &> /dev/null 9 | then 10 | echo "Error: unzip could not be found. Please, install it to continue" 11 | exit 12 | fi 13 | 14 | BASE_DIR="$1"/SemanticKitti 15 | 16 | mkdir -p $BASE_DIR 17 | 18 | url_data="https://s3.eu-central-1.amazonaws.com/avg-kitti/data_odometry_velodyne.zip" 19 | url_label="http://semantic-kitti.org/assets/data_odometry_labels.zip" 20 | 21 | wget -c -N -O $BASE_DIR'/data_odometry_velodyne.zip' $url_data 22 | wget -c -N -O $BASE_DIR'/data_odometry_labels.zip' $url_label 23 | 24 | cd $BASE_DIR 25 | 26 | unzip data_odometry_velodyne.zip 27 | unzip data_odometry_labels.zip 28 | 29 | mkdir zip_files 30 | mv data_odometry_labels.zip zip_files 31 | mv data_odometry_velodyne.zip zip_files 32 | -------------------------------------------------------------------------------- /scripts/download_datasets/download_shapenet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$#" -ne 1 ]]; then 4 | echo "Please, provide the base directory to store the dataset." 5 | exit 1 6 | fi 7 | 8 | if ! command -v unzip &> /dev/null 9 | then 10 | echo "Error: unzip could not be found. Please, install it to continue" 11 | exit 12 | fi 13 | 14 | BASE_DIR="$1"/ShapeNet 15 | 16 | mkdir -p ${BASE_DIR} 17 | 18 | export url="https://shapenet.cs.stanford.edu/ericyi/shapenetcore_partanno_segmentation_benchmark_v0.zip" 19 | 20 | wget -c -N -O ${BASE_DIR}'/shapenetcore_partanno_segmentation_benchmark_v0.zip' ${url} --no-check-certificate 21 | 22 | cd ${BASE_DIR} 23 | 24 | unzip shapenetcore_partanno_segmentation_benchmark_v0.zip 25 | 26 | mkdir -p ${BASE_DIR}/zip_files 27 | mv shapenetcore_partanno_segmentation_benchmark_v0.zip ${BASE_DIR}/zip_files 28 | -------------------------------------------------------------------------------- /scripts/download_datasets/download_sunrgbd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$#" -ne 1 ]]; then 4 | echo "Please, provide the base directory to store the dataset." 5 | exit 1 6 | fi 7 | 8 | if ! command -v unzip &> /dev/null 9 | then 10 | echo "Error: unzip could not be found. Please, install it to continue" 11 | exit 12 | fi 13 | 14 | BASE_DIR="$1"/sunrgbd 15 | 16 | mkdir -p ${BASE_DIR} 17 | 18 | url_sunrgbd="http://rgbd.cs.princeton.edu/data/SUNRGBD.zip" 19 | url_2dbb="http://rgbd.cs.princeton.edu/data/SUNRGBDMeta2DBB_v2.mat" 20 | url_3dbb="http://rgbd.cs.princeton.edu/data/SUNRGBDMeta3DBB_v2.mat" 21 | url_toolbox="http://rgbd.cs.princeton.edu/data/SUNRGBDtoolbox.zip" 22 | 23 | wget -c -N -O ${BASE_DIR}'/SUNRGBD.zip' ${url_sunrgbd} --no-check-certificate 24 | wget -c -N -O ${BASE_DIR}'/SUNRGBDMeta2DBB_v2.mat' ${url_2dbb} --no-check-certificate 25 | wget -c -N -O ${BASE_DIR}'/SUNRGBDMeta3DBB_v2.mat' ${url_3dbb} --no-check-certificate 26 | wget -c -N -O ${BASE_DIR}'/SUNRGBDtoolbox.zip' ${url_toolbox} --no-check-certificate 27 | 28 | cd ${BASE_DIR} 29 | 30 | unzip SUNRGBD.zip 31 | unzip SUNRGBDtoolbox.zip 32 | 33 | mkdir -p ${BASE_DIR}/zip_files 34 | mv SUNRGBDtoolbox.zip SUNRGBD.zip ${BASE_DIR}/zip_files 35 | -------------------------------------------------------------------------------- /scripts/download_datasets/download_toronto3d.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$#" -ne 1 ]; then 4 | echo "Please, provide the base directory to store the dataset." 5 | exit 1 6 | fi 7 | 8 | if ! command -v unzip &> /dev/null 9 | then 10 | echo "Error: unzip could not be found. Please, install it to continue." 11 | exit 12 | fi 13 | 14 | BASE_DIR="$1"/Toronto3D 15 | 16 | export url="https://xx9lca.sn.files.1drv.com/y4mUm9-LiY3vULTW79zlB3xp0wzCPASzteId4wdUZYpzWiw6Jp4IFoIs6ADjLREEk1-IYH8KRGdwFZJrPlIebwytHBYVIidsCwkHhW39aQkh3Vh0OWWMAcLVxYwMTjXwDxHl-CDVDau420OG4iMiTzlsK_RTC_ypo3z-Adf-h0gp2O8j5bOq-2TZd9FD1jPLrkf3759rB-BWDGFskF3AsiB3g" 17 | 18 | mkdir -p $BASE_DIR 19 | 20 | wget -c -N -O $BASE_DIR'/Toronto_3D.zip' $url 21 | 22 | cd $BASE_DIR 23 | 24 | unzip -j Toronto_3D.zip 25 | 26 | # cleanup 27 | mkdir -p $BASE_DIR/zip_files 28 | mv Toronto_3D.zip $BASE_DIR/zip_files 29 | 30 | -------------------------------------------------------------------------------- /scripts/train_scripts/kpconv_kitti.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/kpconv_semantickitti.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/kpconv_paris.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/kpconv_parislille3d.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/kpconv_s3dis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/kpconv_s3dis.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/kpconv_semantic3d.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/kpconv_semantic3d.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/kpconv_toronto.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/kpconv_toronto3d.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/pointpillars_kitti.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/pointpillars_kitti.yml \ 13 | --dataset_path $2 --pipeline ObjectDetection 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/randlanet_kitti.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/randlanet_semantickitti.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/randlanet_paris.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/randlanet_parislille3d.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/randlanet_s3dis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/randlanet_s3dis.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /scripts/train_scripts/randlanet_semantic3d.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | 7 | if [ "$#" -ne 2 ]; then 8 | echo "Please, provide the the training framework: torch/tf and dataset path" 9 | exit 1 10 | fi 11 | 12 | cd ../.. 13 | python scripts/run_pipeline.py $1 -c ml3d/configs/randlanet_semantic3d.yml \ 14 | --dataset_path $2 15 | -------------------------------------------------------------------------------- /scripts/train_scripts/randlanet_toronto.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu 3 | #SBATCH -c 4 4 | #SBATCH --gres=gpu:1 5 | 6 | if [ "$#" -ne 2 ]; then 7 | echo "Please, provide the the training framework: torch/tf and dataset path" 8 | exit 1 9 | fi 10 | 11 | cd ../.. 12 | python scripts/run_pipeline.py $1 -c ml3d/configs/randlanet_toronto3d.yml \ 13 | --dataset_path $2 14 | -------------------------------------------------------------------------------- /set_open3d_ml_root.sh: -------------------------------------------------------------------------------- 1 | # Sets the env var OPEN3D_ML_ROOT to the directory of this file. 2 | # The open3d package will use this var to integrate ml3d into a common namespace. 3 | export OPEN3D_ML_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | if [[ $0 == $BASH_SOURCE ]]; then 5 | echo "source this script to set the OPEN3D_ML_ROOT env var." 6 | else 7 | echo "OPEN3D_ML_ROOT is now $OPEN3D_ML_ROOT" 8 | fi 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | from os import path 5 | from setuptools import find_packages, setup 6 | from typing import List 7 | import torch 8 | from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension 9 | 10 | torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] 11 | assert torch_ver >= [1, 4], "Requires PyTorch >= 1.4" 12 | 13 | if __name__ == '__main__': 14 | setup( 15 | name='ml3d', 16 | description='An extension of Open3D for 3D machine learning tasks', 17 | author='yi', 18 | packages=find_packages(exclude=('configs', 'tools', 'demo')), 19 | ) 20 | -------------------------------------------------------------------------------- /tb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | job_id=$(sbatch tensorboard.bash $1 | awk '{print $4}') 3 | sleep 4 4 | echo "Job $job_id" 5 | cat "slurm-$job_id.out" 6 | rm "slurm-$job_id.out" -------------------------------------------------------------------------------- /tensorboard.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --qos=viz 3 | #SBATCH --partition=viz 4 | #SBATCH --cores=1 5 | #SBATCH --mem=2G 6 | 7 | PORT_MAP=/tmp/tensorboard_port_map 8 | TB_PORT=$(cat $PORT_MAP | grep "$SLURM_JOBID," | cut -d',' -f2) 9 | IP_ADDRESS=$(hostname -I | cut -d' ' -f1) 10 | 11 | TB_FOLDER=$1 12 | 13 | echo "Go to http://$IP_ADDRESS:$TB_PORT" 14 | 15 | tensorboard --bind_all --logdir $TB_FOLDER --port $TB_PORT 16 | 17 | -------------------------------------------------------------------------------- /test_cpu_dense.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling.yml --dataset_path /data/habitat_sampling_dataset/ --pipeline ObjectDetection --split test --device cpu -------------------------------------------------------------------------------- /test_cpu_sparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse.yml --dataset_path /data/habitat_sampling_dataset/ --pipeline ObjectDetection --split test --device cpu -------------------------------------------------------------------------------- /test_gpu_dense.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling.yml --dataset_path /data/habitat_sampling_dataset/ --pipeline ObjectDetection --split test -------------------------------------------------------------------------------- /test_gpu_sparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse.yml --dataset_path /data/habitat_sampling_dataset/ --pipeline ObjectDetection --split test -------------------------------------------------------------------------------- /test_open3d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from open3d.ml.contrib import iou_3d_cpu 3 | 4 | a1 = np.array([[0., 0., 0., 1., 1., 1., 0.], [0., 0., 0., 1., 1., 1., 5 | 0.]]).astype(np.float32) 6 | a2 = np.array([[0., 0., 0., 1., 1., 1., 0.], [0., 0., 0., 1., 1., 1., 0.], 7 | [0., 0., 0., 1., 1., 1., 0.]]).astype(np.float32) 8 | a3 = np.array([[3., 0., 0., 1., 1., 1., 0.]]).astype(np.float32) 9 | print(iou_3d_cpu(a1, a2).shape) 10 | print(iou_3d_cpu(a1, a3)) 11 | -------------------------------------------------------------------------------- /test_robot_dense_habitat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #!/bin/bash 3 | x=1 4 | while [ $x -le 3 ] 5 | do 6 | echo "Step $x" 7 | source set_open3d_ml_root.sh; OMP_NUM_THREADS=`nproc` python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_dense_5000_mean_box.yml --dataset_path /data/habitat_sampling_dataset --pipeline ObjectDetection --split test --ckpt_path=./logs_dense_5000_mean_box/PointPillars_HabitatSampling_torch/checkpoint/ckpt_00200.pth --device cpu 8 | x=$(( $x + 1 )) 9 | done 10 | -------------------------------------------------------------------------------- /test_robot_sparse_habitat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #!/bin/bash 3 | x=1 4 | while [ $x -le 3 ] 5 | do 6 | echo "Step $x" 7 | source set_open3d_ml_root.sh; OMP_NUM_THREADS=`nproc` python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_5000_2x2_mean_box.yml --dataset_path /data/habitat_sampling_dataset --pipeline ObjectDetection --split test --ckpt_path=./logs_sparse_5000_2x2_mean_box/SparsePointPillars_HabitatSampling_torch/checkpoint/ckpt_00200.pth --device cpu 8 | x=$(( $x + 1 )) 9 | done 10 | -------------------------------------------------------------------------------- /test_slurm_dense.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=00:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection --split test --ckpt_path=./logs_dense_01_wd_4_two_thirds/PointPillars_HabitatSampling_torch/checkpoint/ckpt_00300.pth" 12 | #bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection --split test --ckpt_path=./logs_dense_01/PointPillars_HabitatSampling_torch/checkpoint/ckpt_00300.pth" 13 | -------------------------------------------------------------------------------- /test_slurm_sparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=00:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection --split test --ckpt_path=./logs_sparse_01_wd_4_2x2/SparsePointPillars_HabitatSampling_torch/checkpoint/ckpt_00300.pth" 12 | #bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection --split test --ckpt_path=./logs_sparse_01_wd_4/SparsePointPillars_HabitatSampling_torch/checkpoint/ckpt_00300.pth" 13 | #bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection --split test --ckpt_path=./logs_sparse_01/SparsePointPillars_HabitatSampling_torch/checkpoint/ckpt_00500.pth" 14 | -------------------------------------------------------------------------------- /test_xavier_dense_habitat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #!/bin/bash 3 | x=1 4 | while [ $x -le 10 ] 5 | do 6 | echo "Step $x" 7 | source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_dense_5000_mean_box.yml --dataset_path /data/habitat_sampling_dataset --pipeline ObjectDetection --split test --ckpt_path=./logs_dense_5000_mean_box/PointPillars_HabitatSampling_torch/checkpoint/ckpt_00200.pth 8 | x=$(( $x + 1 )) 9 | done 10 | -------------------------------------------------------------------------------- /test_xavier_sparse_habitat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #!/bin/bash 3 | x=1 4 | while [ $x -le 10 ] 5 | do 6 | echo "Step $x" 7 | source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_5000_2x2_mean_box.yml --dataset_path /data/habitat_sampling_dataset --pipeline ObjectDetection --split test --ckpt_path=./logs_sparse_5000_2x2_mean_box/SparsePointPillars_HabitatSampling_torch/checkpoint/ckpt_00200.pth 8 | x=$(( $x + 1 )) 9 | done 10 | -------------------------------------------------------------------------------- /tests/test_integration.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | 4 | if 'PATH_TO_OPEN3D_ML' in os.environ.keys(): 5 | base = os.environ['PATH_TO_OPEN3D_ML'] 6 | else: 7 | base = '.' 8 | # base = '../Open3D-ML' 9 | 10 | 11 | def test_integration_torch(): 12 | import torch 13 | import open3d.ml.torch as ml3d 14 | from open3d.ml.datasets import S3DIS 15 | from open3d.ml.utils import Config, get_module 16 | from open3d.ml.torch.models import RandLANet, KPFCNN 17 | from open3d.ml.torch.pipelines import SemanticSegmentation 18 | print(dir(ml3d)) 19 | 20 | config = base + '/ml3d/configs/randlanet_toronto3d.yml' 21 | cfg = Config.load_from_file(config) 22 | 23 | model = ml3d.models.RandLANet(**cfg.model) 24 | 25 | print(model) 26 | 27 | 28 | def test_integration_tf(): 29 | import tensorflow as tf 30 | import open3d.ml.tf as ml3d 31 | from open3d.ml.datasets import S3DIS 32 | from open3d.ml.utils import Config, get_module 33 | from open3d.ml.tf.models import RandLANet, KPFCNN 34 | from open3d.ml.tf.pipelines import SemanticSegmentation 35 | print(dir(ml3d)) 36 | 37 | config = base + '/ml3d/configs/randlanet_toronto3d.yml' 38 | cfg = Config.load_from_file(config) 39 | 40 | model = ml3d.models.RandLANet(**cfg.model) 41 | 42 | print(model) 43 | 44 | 45 | test_integration_torch() 46 | -------------------------------------------------------------------------------- /train_slurm_dense.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=36:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_dense_01.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_dense_01.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_dense_01_wd_4.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_dense_01_wd_4.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_dense_01_wd_4_5000_mean_box.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_dense_01_wd_4_mean_box.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_dense_01_wd_4_7500.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=32:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_dense_01_wd_4_7500.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_dense_5000_mean_box.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_dense_5000_mean_box.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_kitti_dense.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; apt install -y time; time OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_kitti_car_only.yml --dataset_path /Datasets/kitti_object_detect/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_kitti_sparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; apt install -y time; time OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/sparse_pointpillars_kitti_car_only.yml --dataset_path /Datasets/kitti_object_detect/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_kitti_sparse_lr_10.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/sparse_pointpillars_kitti_car_only_lr_10.yml --dataset_path /Datasets/kitti_object_detect/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_kitti_sparse_sparse12_dense3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/sparse_pointpillars_kitti_car_only_sparse12_dense3.yml --dataset_path /Datasets/kitti_object_detect/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_kitti_sparse_sparse1_dense23.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/sparse_pointpillars_kitti_car_only_sparse1_dense23.yml --dataset_path /Datasets/kitti_object_detect/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_kitti_sparse_wd_100.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/sparse_pointpillars_kitti_car_only_wd_100.yml --dataset_path /Datasets/kitti_object_detect/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_kitti_sparse_wd_100_lr_10.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/sparse_pointpillars_kitti_car_only_wd_100_lr_10.yml --dataset_path /Datasets/kitti_object_detect/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_kitti_sparse_wide.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/sparse_pointpillars_kitti_car_only_wide.yml --dataset_path /Datasets/kitti_object_detect/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_nuscenes_dense.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; apt install -y time; time OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_nuscenes.yml --dataset_path /Datasets/nuscenes/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_nuscenes_sparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=20G\ 4 | --cpus-per-gpu=12\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --container-mounts=/scratch:/scratch,/Datasets:/Datasets,/home/kvedder/code/Open3D-ML:/project\ 9 | --container-image=docker-registry.grasp.cluster#open3dml \ 10 | bash -c "source set_open3d_ml_root.sh; apt install -y time; time OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_nuscenes_sparse.yml --dataset_path /Datasets/nuscenes/ --pipeline ObjectDetection" 11 | -------------------------------------------------------------------------------- /train_slurm_sparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_001_adam_defaults_0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_001_adam_defaults_0.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_001_wd_4.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_001_wd_4.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_adam_defaults_0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_adam_defaults_0.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_0.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_005.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_005.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_01.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_01.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_16.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=14:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_16.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_2.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_32.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=14:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_32.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_4.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_4_5000_2x2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4_5000_2x2.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_4_5000_2x2_mean_box.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4_5000_2x2_mean_box.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_4_7500.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=28:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4_7500.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_4_7500_2x2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=28:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_4_7500_2x2.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_01_wd_8.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=14:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_01_wd_8.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_05.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_05.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_1.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_10.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_10.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_3.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_5.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=32G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=48:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_5.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_5000_2x2_mean_box.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_5000_2x2_mean_box.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /train_slurm_sparse_5000_2x2_mean_box_wd_100.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | srun --gpus=1\ 3 | --mem-per-gpu=24G\ 4 | --cpus-per-gpu=16\ 5 | --qos=eaton-high\ 6 | --time=18:10:00\ 7 | --partition=eaton-compute \ 8 | --exclude=node-1080ti-3\ 9 | --container-mounts=/scratch:/scratch,/home/kvedder/code/Open3D-ML:/project\ 10 | --container-image=docker-registry.grasp.cluster#open3dml \ 11 | bash -c "source set_open3d_ml_root.sh; OMP_NUM_THREADS=1 python scripts/run_pipeline.py torch -c ml3d/configs/pointpillars_habitat_sampling_sparse_5000_2x2_mean_box_wd_100.yml --dataset_path /scratch/kvedder/habitat_sampling_large --pipeline ObjectDetection" 12 | -------------------------------------------------------------------------------- /version.txt: -------------------------------------------------------------------------------- 1 | OPEN3D_VERSION_MAJOR 0 2 | OPEN3D_VERSION_MINOR 12 3 | OPEN3D_VERSION_PATCH 0 4 | --------------------------------------------------------------------------------