├── .dockerignore ├── .gitignore ├── Dockerfile ├── Dockerfile_cuda:11.8.0 ├── LICENSE ├── LICENSE.md ├── Makefile ├── README.md ├── bash_helpers ├── get_sample_labelled_data.sh ├── get_sample_small_unlabelled.sh ├── merge_multiple_results.sh └── run_remove_outer_points.sh ├── big_table_creation ├── __init__.py ├── merge_files.py ├── run_compute_per_class_metric_for_instance.sh ├── run_compute_per_class_metric_other.sh └── vizualize_small_trees.py ├── build.sh ├── compute_capacity.sh ├── conf ├── config.yaml ├── config_debug.yaml ├── data │ ├── default.yaml │ └── panoptic │ │ ├── default.yaml │ │ ├── npm3d-sparseconv_grid_012_R_10_cylinder_area4.yaml │ │ ├── npm3d-sparseconv_grid_012_R_12_cylinder_area4.yaml │ │ ├── npm3d-sparseconv_grid_012_R_14_cylinder_area4.yaml │ │ ├── npm3d-sparseconv_grid_012_R_16_cylinder_area1.yaml │ │ ├── npm3d-sparseconv_grid_012_R_16_cylinder_area2.yaml │ │ ├── npm3d-sparseconv_grid_012_R_16_cylinder_area3.yaml │ │ ├── npm3d-sparseconv_grid_012_R_16_cylinder_area4.yaml │ │ ├── npm3d-sparseconv_grid_014_R_16_cylinder_area4.yaml │ │ └── treeins_rad8.yaml ├── debugging │ ├── default.yaml │ ├── early_break.yaml │ └── find_neighbour_dist.yaml ├── eval.yaml ├── hydra │ ├── job_logging │ │ └── custom.yaml │ └── output │ │ └── custom.yaml ├── lr_scheduler │ ├── cosine.yaml │ ├── cyclic.yaml │ ├── exponential.yaml │ ├── multi_step.yaml │ ├── multi_step_reg.yaml │ ├── plateau.yaml │ ├── poly_lr.yaml │ └── step.yaml ├── models │ ├── default.yaml │ └── panoptic │ │ ├── area4_ablation_14.yaml │ │ ├── area4_ablation_15.yaml │ │ ├── area4_ablation_19.yaml │ │ ├── area4_ablation_3heads_5.yaml │ │ └── area4_ablation_3heads_6.yaml ├── sota.yaml ├── task │ ├── default.yaml │ ├── panoptic.yaml │ └── segmentation.yaml ├── training │ ├── 1_area1.yaml │ ├── 1_area2.yaml │ ├── 1_area3.yaml │ ├── 1_area4.yaml │ ├── 4_area1.yaml │ ├── 4_area2.yaml │ ├── 4_area3.yaml │ ├── 4_area4.yaml │ ├── 6_area1.yaml │ ├── 6_area2.yaml │ ├── 6_area3.yaml │ ├── 6_area4.yaml │ ├── 7_area1.yaml │ ├── 7_area2.yaml │ ├── 7_area3.yaml │ ├── 7_area4.yaml │ ├── 8_area1.yaml │ ├── 8_area2.yaml │ ├── 8_area3.yaml │ ├── 8_area4.yaml │ ├── default.yaml │ └── treeins.yaml └── visualization │ ├── default.yaml │ └── eval.yaml ├── eval.py ├── evaluation_stats_FOR.py ├── evaluation_stats_NPM3D.py ├── forward_scripts ├── __init__.py ├── conf │ ├── config.yaml │ └── dataset │ │ └── shapenet.yaml ├── forward.py └── notebooks │ └── viz_shapenet.ipynb ├── merge_all.sh ├── metrics ├── __init__.py ├── aggregate_cvs_metrics.py ├── attach_labels_to_las_file_pred2gt.py ├── inst_seg_visualizer.py ├── instance_segmentation_metrics.py ├── instance_segmentation_metrics_in_folder.py ├── metric.py └── metrics_sem_seg.py ├── model_file ├── .gitattributes ├── .hydra │ └── overrides.yaml └── PointGroup-PAPER.pt ├── mypy.ini ├── nibio_inference ├── __init__.py ├── attach_gt_2_pred.py ├── bring_back_to_utm_coordinates.py ├── clear_cache.py ├── distance_filtering_dem_based.py ├── fix_naming_of_input_files.py ├── las_to_pandas.py ├── merge_inference_results.py ├── merge_inference_results_in_folders.py ├── merge_point_cloud.py ├── merge_pt_ss_is.py ├── merge_pt_ss_is_in_folders.py ├── merge_pt_ss_is_in_folders_parallel.py ├── merge_pt_ss_is_knn.py ├── modify_eval.py ├── pandas_to_las.py ├── pandas_to_ply.py ├── pipeline_local2utm.py ├── pipeline_utm2local.py ├── pipeline_utm2local_parallel.py ├── ply_to_pandas.py ├── remove_outer_points_for_instance.py ├── rename_result_files_instance.py ├── rename_result_files_segmentation.py └── split_point_cloud.py ├── nibio_sparsify ├── __init__.py ├── sparsify_las_based_sq_m.py └── sparsify_las_based_sq_m_in_folder.py ├── oracle_wrapper.py ├── poetry.lock ├── pyproject.toml ├── run_bash_in_podman_with_gpu.sh ├── run_batch_inference.sh ├── run_docker_locally.sh ├── run_inference.sh ├── run_oracle_pipeline.sh ├── run_paper_test.sh ├── run_pipeline.sh ├── run_podman_with_gpu.sh ├── sample_data_conversion.py ├── scripts ├── cv_s3dis.py ├── datasets │ └── download-scannet.py ├── find_env.py ├── find_runs.py ├── omegaconf2dict.py ├── sanity_check │ └── scannet_check.py ├── switch_cuda.sh ├── test_registration_scripts │ ├── conf │ │ ├── config.yaml │ │ └── fpfh.yaml │ ├── demo_kitti.py │ ├── descriptor_matcher.py │ ├── evaluate.py │ ├── fpfh.py │ ├── misc.py │ ├── save_feature.py │ └── see_matches.py └── visualizations │ ├── __init__.py │ ├── modelnet.py │ ├── o3d_utils.py │ ├── s3dis_panoptic.py │ └── scannet_panoptic.py ├── torch_points3d ├── __init__.py ├── applications │ ├── __init__.py │ ├── conf │ │ ├── kpconv │ │ │ ├── encoder_4.yaml │ │ │ └── unet_4.yaml │ │ ├── pointnet2 │ │ │ ├── encoder_3_ms.yaml │ │ │ ├── unet_3_ms.yaml │ │ │ ├── unet_3_ss.yaml │ │ │ └── unet_4_ss.yaml │ │ ├── rsconv │ │ │ ├── encoder_4.yaml │ │ │ └── unet_4.yaml │ │ └── sparseconv3d │ │ │ ├── encoder_2.yaml │ │ │ ├── encoder_4.yaml │ │ │ ├── unet_2.yaml │ │ │ └── unet_4.yaml │ ├── kpconv.py │ ├── minkowski.py │ ├── modelfactory.py │ ├── models.py │ ├── pointnet2.py │ ├── pretrained_api.py │ ├── rsconv.py │ ├── sparseconv3d.py │ └── utils.py ├── core │ ├── __init__.py │ ├── base_conv │ │ ├── __init__.py │ │ ├── base_conv.py │ │ ├── dense.py │ │ ├── message_passing.py │ │ └── partial_dense.py │ ├── common_modules │ │ ├── __init__.py │ │ ├── base_modules.py │ │ ├── dense_modules.py │ │ ├── gathering.py │ │ └── spatial_transform.py │ ├── data_transform │ │ ├── __init__.py │ │ ├── feature_augment.py │ │ ├── features.py │ │ ├── filters.py │ │ ├── grid_transform.py │ │ ├── inference_transforms.py │ │ ├── prebatchcollate.py │ │ ├── precollate.py │ │ ├── sparse_transforms.py │ │ ├── transforms.py │ │ └── transforms_backup.py │ ├── initializer │ │ ├── __init__.py │ │ └── initializer.py │ ├── losses │ │ ├── __init__.py │ │ ├── dirichlet_loss.py │ │ ├── huber_loss.py │ │ ├── losses.py │ │ ├── metric_losses.py │ │ └── panoptic_losses.py │ ├── regularizer │ │ ├── __init__.py │ │ └── regularizers.py │ ├── schedulers │ │ ├── __init__.py │ │ ├── bn_schedulers.py │ │ └── lr_schedulers.py │ └── spatial_ops │ │ ├── __init__.py │ │ ├── interpolate.py │ │ ├── neighbour_finder.py │ │ └── sampling.py ├── datasets │ ├── __init__.py │ ├── base_dataset.py │ ├── batch.py │ ├── classification │ │ ├── __init__.py │ │ └── modelnet.py │ ├── dataset_factory.py │ ├── multiscale_data.py │ ├── object_detection │ │ ├── __init__.py │ │ ├── box_data.py │ │ ├── scannet.py │ │ └── scannet_metadata │ │ │ └── scannet_means.npz │ ├── panoptic │ │ ├── __init__.py │ │ ├── npm3d.py │ │ ├── treeins.py │ │ ├── treeins_set1.py │ │ └── utils.py │ ├── registration │ │ ├── __init__.py │ │ ├── base3dmatch.py │ │ ├── base_kitti.py │ │ ├── base_siamese_dataset.py │ │ ├── basetest.py │ │ ├── detector.py │ │ ├── fusion.py │ │ ├── general3dmatch.py │ │ ├── kitti.py │ │ ├── modelnet.py │ │ ├── pair.py │ │ ├── test3dmatch.py │ │ ├── testeth.py │ │ ├── testkaist.py │ │ ├── testplanetary.py │ │ ├── testtum.py │ │ ├── urls │ │ │ ├── split_test.txt │ │ │ ├── split_train.txt │ │ │ ├── url_7-scenes.txt │ │ │ ├── url_analysis-by-synthesis.txt │ │ │ ├── url_bundlefusion.txt │ │ │ ├── url_rgbd-scenes-v2.txt │ │ │ ├── url_sun3d.txt │ │ │ ├── url_test.txt │ │ │ ├── url_train.txt │ │ │ ├── url_train_small.txt │ │ │ ├── url_train_tiny.txt │ │ │ └── url_val.txt │ │ └── utils.py │ ├── samplers.py │ └── segmentation │ │ ├── __init__.py │ │ ├── forward │ │ ├── __init__.py │ │ └── shapenet.py │ │ ├── npm3d.py │ │ ├── s3dis.py │ │ ├── scannet.py │ │ ├── semantickitti.py │ │ ├── shapenet.py │ │ ├── treeins.py │ │ └── treeins_set1.py ├── metrics │ ├── __init__.py │ ├── base_tracker.py │ ├── box_detection │ │ ├── __init__.py │ │ └── ap.py │ ├── classification_tracker.py │ ├── colored_tqdm.py │ ├── confusion_matrix.py │ ├── meters.py │ ├── model_checkpoint.py │ ├── object_detection_tracker.py │ ├── panoptic_tracker.py │ ├── panoptic_tracker_italy.py │ ├── panoptic_tracker_mine.py │ ├── panoptic_tracker_npm3d.py │ ├── panoptic_tracker_pointgroup.py │ ├── panoptic_tracker_pointgroup_npm3d.py │ ├── panoptic_tracker_pointgroup_stpls3d.py │ ├── panoptic_tracker_pointgroup_stpls3d_old.py │ ├── panoptic_tracker_pointgroup_treeins.py │ ├── panoptic_tracker_pointgroup_treeins_partseg.py │ ├── panoptic_tracker_s3dis.py │ ├── registration_metrics.py │ ├── registration_tracker.py │ ├── s3dis_tracker.py │ ├── scannet_segmentation_tracker.py │ ├── segmentation_helpers.py │ ├── segmentation_tracker.py │ └── shapenet_part_tracker.py ├── models │ ├── __init__.py │ ├── base_architectures │ │ ├── __init__.py │ │ ├── backbone.py │ │ └── unet.py │ ├── base_model.py │ ├── model_factory.py │ ├── model_interface.py │ ├── panoptic │ │ ├── PointGroup3heads.py │ │ ├── PointGroup3heads_backup.py │ │ ├── __init__.py │ │ ├── kpconv.py │ │ ├── minkowski.py │ │ ├── ply.py │ │ ├── pointgroup.py │ │ ├── pointgroupbackup.py │ │ ├── pointgroupembed.py │ │ ├── pointnet2.py │ │ ├── structure_3heads.py │ │ ├── structure_3heads_backup.py │ │ ├── structures.py │ │ ├── structures_embed.py │ │ └── structures_mine.py │ └── segmentation │ │ ├── __init__.py │ │ ├── base.py │ │ ├── kpconv.py │ │ ├── minkowski.py │ │ ├── ms_svconv3d.py │ │ ├── pointcnn.py │ │ ├── pointnet.py │ │ ├── pointnet2.py │ │ ├── ppnet.py │ │ ├── pvcnn.py │ │ ├── randlanet.py │ │ ├── rsconv.py │ │ └── sparseconv3d.py ├── modules │ ├── KPConv │ │ ├── __init__.py │ │ ├── blocks.py │ │ ├── convolution_ops.py │ │ ├── kernel_utils.py │ │ ├── kernels.py │ │ ├── losses.py │ │ └── plyutils.py │ ├── MinkowskiEngine │ │ ├── __init__.py │ │ ├── api_modules.py │ │ ├── common.py │ │ ├── modules.py │ │ ├── networks.py │ │ ├── res16unet.py │ │ └── resunet.py │ ├── PPNet │ │ ├── __init__.py │ │ ├── blocks.py │ │ └── ops.py │ ├── PVCNN │ │ ├── blocks.py │ │ ├── pvcnn.py │ │ └── utils.py │ ├── PointCNN │ │ ├── __init__.py │ │ └── modules.py │ ├── PointNet │ │ ├── __init__.py │ │ └── modules.py │ ├── RSConv │ │ ├── __init__.py │ │ ├── dense.py │ │ ├── message_passing.py │ │ └── original_model.txt │ ├── RandLANet │ │ ├── __init__.py │ │ └── modules.py │ ├── SparseConv3d │ │ ├── __init__.py │ │ ├── modules.py │ │ └── nn │ │ │ ├── __init__.py │ │ │ ├── minkowski.py │ │ │ └── torchsparse.py │ ├── VoteNet │ │ ├── __init__.py │ │ ├── dense_samplers.py │ │ ├── loss_helper.py │ │ ├── proposal_module.py │ │ ├── votenet_results.py │ │ └── voting_module.py │ ├── __init__.py │ └── pointnet2 │ │ ├── __init__.py │ │ ├── dense.py │ │ └── message_passing.py ├── trainer.py ├── utils │ ├── __init__.py │ ├── batch_seed.py │ ├── batch_seed_euc.py │ ├── box_utils.py │ ├── colors.py │ ├── config.py │ ├── debugging_vars.py │ ├── download.py │ ├── enums.py │ ├── geometry.py │ ├── hdbscan_cluster.py │ ├── hdbscan_cluster_gpu.py │ ├── mean_shift_cos_gpu.py │ ├── mean_shift_euc_gpu.py │ ├── meanshift_cluster.py │ ├── mock.py │ ├── model_building_utils │ │ ├── activation_resolver.py │ │ ├── model_definition_resolver.py │ │ └── resolver_utils.py │ ├── o3d_utils.py │ ├── registration.py │ ├── running_stats.py │ ├── timer.py │ ├── transform_utils.py │ └── wandb_utils.py └── visualization │ ├── __init__.py │ ├── experiment_manager.py │ └── visualizer.py ├── train.py └── visualization ├── __init__.py └── viz.py /.dockerignore: -------------------------------------------------------------------------------- 1 | # add data folder 2 | /data_bucket/ 3 | /data/ 4 | /processed_data_full/ 5 | /processed_data_ready_for_training/ 6 | /processed_data_ready_for_training_sparse_1000/ 7 | /processed_data_ready_for_training_sparse_1000_500_100_10/processed_0.2_test/ 8 | /B_temp_folder/ 9 | /sample_test_data/ 10 | /small_data_for_test/ 11 | # /processed_data_ready_for_training_sparse_1000_500_100_10/ 12 | /outputs/from_gpu26/ 13 | # !/outputs/from_gpu26/mls_data_run-PointGroup-PAPER-20230922_083127 14 | /bucket_in_folder/ 15 | /bucket_out_folder/ 16 | /docker_in_folder/ 17 | /docker_out_folder/ 18 | /for_instance_no_outer_sparse_many_times/ 19 | /for_instance_no_outer_sparse/ 20 | /maciek/ 21 | /data_for_test_results_final/ 22 | /data_for_test/ 23 | /outputs/from_gpu26/mls_data_run-PointGroup-PAPER-20230922_083127/eval/ 24 | /processed_data_ready_for_training_sparse_1000_500_100_10/treeinsfused/processed_0.2_test/ 25 | /temp_folder/ 26 | /helios_for_instance/ 27 | /Stefano/ 28 | 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 NIBIO 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | License 2 | ========== 3 | Unless otherwise indicated, all files this repository are 4 | 5 | Copyright (c) 2020, Principia Labs Ltd 6 | (nicolas.chaulet@gmail.com & thomas.chaton.ai@gmail.com) 7 | 8 | and are released under the terms of the BSD open source license. 9 | 10 | Overall license (BSD) 11 | --------------------- 12 | 13 | Copyright (c) 2020, Principia Labs Ltd 14 | (nicolas.chaulet@gmail.com & thomas.chaton.ai@gmail.com) 15 | 16 | All rights reserved. 17 | 18 | Redistribution and use in source and binary forms, with or without 19 | modification, are permitted provided that the following 20 | conditions are met: 21 | 22 | * Redistributions of source code must retain the above copyright 23 | notice, this list of conditions and the following disclaimer. 24 | * Redistributions in binary form must reproduce the above copyright 25 | notice, this list of conditions and the following disclaimer in 26 | the documentation and/or other materials provided 27 | with the distribution. 28 | * Neither the name of Principia Labs Ltd nor the 29 | names of its contributors may be used to endorse or promote 30 | products derived from this software without specific prior 31 | written permission. 32 | 33 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 36 | FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 37 | COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 38 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 39 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 40 | OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 41 | AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 42 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 43 | OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 44 | OF SUCH DAMAGE. 45 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: staticchecks 2 | staticchecks: 3 | flake8 . --count --select=E9,F402,F6,F7,F5,F8,F9 --show-source --statistics 4 | mypy torch_points3d 5 | -------------------------------------------------------------------------------- /bash_helpers/get_sample_labelled_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is used to get the labelled data from the sample data 4 | SOURCE_DIR="/home/nibio/mutable-outside-world/sample_test_data/labelled/" 5 | DEST_DIR="/home/nibio/mutable-outside-world/data_for_test" 6 | 7 | 8 | # clear the destination directory 9 | rm -rf $DEST_DIR/* 10 | 11 | # Copy the files 12 | cp -r $SOURCE_DIR/* $DEST_DIR 13 | 14 | # print the files copied 15 | echo "Files copied:" 16 | ls $DEST_DIR 17 | 18 | -------------------------------------------------------------------------------- /bash_helpers/get_sample_small_unlabelled.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is used to get the labelled data from the sample data 4 | SOURCE_DIR="/home/nibio/mutable-outside-world/sample_test_data/small_unlabelled/" 5 | DEST_DIR="/home/nibio/mutable-outside-world/data_for_test" 6 | 7 | 8 | # clear the destination directory 9 | rm -rf $DEST_DIR/* 10 | 11 | # Copy the files 12 | cp -r $SOURCE_DIR/* $DEST_DIR 13 | 14 | # print the files copied 15 | echo "Files copied:" 16 | ls $DEST_DIR 17 | 18 | -------------------------------------------------------------------------------- /bash_helpers/merge_multiple_results.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Assign command-line arguments to variables 4 | results_folder=$1 5 | sparse_folder=$2 6 | 7 | # Ensure the results folder exists 8 | mkdir -p "$results_folder" 9 | 10 | # Base directory where the sparse results are located 11 | base_dir="/home/nibio/mutable-outside-world/for_instance_no_outer_sparse_many_times/${sparse_folder}" 12 | 13 | # Call the Python script for each results directory 14 | for i in {10,25,50,75,100}; do 15 | python3 nibio_inference/merge_inference_results.py -i "${base_dir}/results_${i}" -o "${results_folder}/results_${i}.csv" 16 | done 17 | 18 | # Merge CSV files 19 | { 20 | # Handle the first file separately to keep its header 21 | head -n 1 "${results_folder}/results_10.csv" && 22 | 23 | # Loop through the files, tail -n +2 skips the header of each file 24 | for i in 100 75 50 25 10; do 25 | tail -n +2 "${results_folder}/results_${i}.csv" 26 | done 27 | } > "${results_folder}/merged_results.csv" 28 | 29 | -------------------------------------------------------------------------------- /bash_helpers/run_remove_outer_points.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # python3 nibio_inference/remove_outer_points_for_instance.py -i ~/data/m1_forstance_test/original_as_is/for_instance/ -o ~/data/m1_forstance_test/original_as_is/for_instance_no_outer 4 | 5 | # run the same as above but for ~/data/m1_forstance_test/sparse_10 6 | python3 nibio_inference/remove_outer_points_for_instance.py -i ~/data/m1_forstance_test/sparse_10/for_instance/ -o ~/data/m1_forstance_test/sparse_10/for_instance_no_outer 7 | 8 | # run the same as above but for ~/data/m1_forstance_test/sparse_100 9 | python3 nibio_inference/remove_outer_points_for_instance.py -i ~/data/m1_forstance_test/sparse_100/for_instance/ -o ~/data/m1_forstance_test/sparse_100/for_instance_no_outer 10 | 11 | # run the same as above but for ~/data/m1_forstance_test/sparse_500 12 | python3 nibio_inference/remove_outer_points_for_instance.py -i ~/data/m1_forstance_test/sparse_500/for_instance/ -o ~/data/m1_forstance_test/sparse_500/for_instance_no_outer 13 | 14 | # run the same as above but for ~/data/m1_forstance_test/sparse_1000 15 | python3 nibio_inference/remove_outer_points_for_instance.py -i ~/data/m1_forstance_test/sparse_1000/for_instance/ -o ~/data/m1_forstance_test/sparse_1000/for_instance_no_outer -------------------------------------------------------------------------------- /big_table_creation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/big_table_creation/__init__.py -------------------------------------------------------------------------------- /big_table_creation/run_compute_per_class_metric_other.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | TMP_DIR=/home/nibio/mutable-outside-world/tmp_per_class_metric 3 | 4 | # remove tmp dir if exists 5 | rm -rf ${TMP_DIR} 6 | 7 | # create tmp dir 8 | mkdir ${TMP_DIR} 9 | 10 | mkdir ${TMP_DIR}/austrian_plot_out 11 | mkdir ${TMP_DIR}/english_plot_out 12 | mkdir ${TMP_DIR}/german_plot_out 13 | mkdir ${TMP_DIR}/mls_out 14 | 15 | cp /home/nibio/data/test_data_agnostic_instanceSeg_treeins_agnostic_sparse_1000_500_100_10/results_/austrian_plot_out/final_results/*.laz ${TMP_DIR}/austrian_plot_out 16 | cp /home/nibio/data/test_data_agnostic_instanceSeg_treeins_agnostic_sparse_1000_500_100_10/results_/english_plot_out/final_results/*.laz ${TMP_DIR}/english_plot_out 17 | cp /home/nibio/data/test_data_agnostic_instanceSeg_treeins_agnostic_sparse_1000_500_100_10/results_/german_plot_out/final_results/*.laz ${TMP_DIR}/german_plot_out 18 | cp /home/nibio/data/test_data_agnostic_instanceSeg_treeins_agnostic_sparse_1000_500_100_10/results_/mls_out/final_results/*.laz ${TMP_DIR}/mls_out 19 | 20 | # copy metics folder for each 21 | cp -r /home/nibio/data/test_data_agnostic_instanceSeg_treeins_agnostic_sparse_1000_500_100_10/results_/austrian_plot_out/metrics_out ${TMP_DIR}/austrian_plot_out 22 | cp -r /home/nibio/data/test_data_agnostic_instanceSeg_treeins_agnostic_sparse_1000_500_100_10/results_/english_plot_out/metrics_out ${TMP_DIR}/english_plot_out 23 | cp -r /home/nibio/data/test_data_agnostic_instanceSeg_treeins_agnostic_sparse_1000_500_100_10/results_/german_plot_out/metrics_out ${TMP_DIR}/german_plot_out 24 | cp -r /home/nibio/data/test_data_agnostic_instanceSeg_treeins_agnostic_sparse_1000_500_100_10/results_/mls_out/metrics_out ${TMP_DIR}/mls_out 25 | 26 | echo "Done copying files" -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | docker build -t nibio/e2e-oracle-inst-seg:latest . # build for Oracle registry and change the flag to not debug mode before building 3 | # docker build -t nibio/e2e-instance:latest . 4 | -------------------------------------------------------------------------------- /compute_capacity.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Base directories 4 | BASE_DIR=/home/nibio/data/timing_check 5 | 6 | # Plot names 7 | declare -a PLOTS=("austrian_plot" "english_plot" "for_instance_no_outer" "german_plot" "mls") 8 | 9 | # Function to calculate folder sizes 10 | calculate_folder_sizes() { 11 | local ORIGINAL_DIR=$1 12 | 13 | for PLOT in "${PLOTS[@]}"; do 14 | echo "Calculating size for ${PLOT}" 15 | du -sh ${ORIGINAL_DIR}/${PLOT}/ | awk '{print $1 " GB\t" $2}' 16 | done 17 | } 18 | 19 | # Run size calculation for a given dataset (e.g., sparse_1000 or sparse_500) 20 | run_size_calculation() { 21 | local DATASET_NAME=$1 22 | 23 | echo "Calculating sizes for ${DATASET_NAME}..." 24 | 25 | local ORIGINAL_DIR=${BASE_DIR}/${DATASET_NAME} 26 | 27 | calculate_folder_sizes ${ORIGINAL_DIR} 28 | 29 | echo "Done calculating sizes for ${DATASET_NAME}" 30 | } 31 | 32 | # Execute the size calculations 33 | run_size_calculation "sparse_10" 34 | run_size_calculation "sparse_100" 35 | run_size_calculation "sparse_500" 36 | run_size_calculation "sparse_1000" 37 | run_size_calculation "original_as_is" 38 | 39 | # Additional datasets can be added as needed 40 | -------------------------------------------------------------------------------- /conf/config.yaml: -------------------------------------------------------------------------------- 1 | defaults: # for loading the default.yaml config 2 | - task: panoptic 3 | 4 | - visualization: default 5 | - lr_scheduler: exponential 6 | - training: ablation_area4_set2 #pointgroup_NPM3D-embed-cluster1_scratch1 7 | # 8 | - debugging: default 9 | - models: panoptic/area4_ablation_2 #panoptic/pointgroup-embed_clustertype1_scratch1 10 | - data: panoptic/npm3d-sparseconv_grid_012_R_8_area4 11 | - sota # Contains current SOTA results on different datasets (extracted from papers !). 12 | # - hydra/job_logging: custom 13 | - hydra/output: custom # add the support for user-defined experiment folder (where to save the experiment files) 14 | 15 | 16 | job_name: ablation_area4_set2 # prefix name for saving the experiment file. 17 | model_name: PointGroup-PAPER # Name of the specific model to load 18 | update_lr_scheduler_on: "on_epoch" # ["on_epoch", "on_num_batch", "on_num_sample"] 19 | selection_stage: "" 20 | pretty_print: False 21 | eval_frequency: 1 22 | 23 | tracker_options: # Extra options for the tracker 24 | full_res: False 25 | make_submission: False 26 | track_boxes: False -------------------------------------------------------------------------------- /conf/config_debug.yaml: -------------------------------------------------------------------------------- 1 | defaults: # for loading the default.yaml config 2 | - task: panoptic 3 | 4 | - visualization: default 5 | - lr_scheduler: exponential 6 | - training: npm3d_benchmark/kpconv-panoptic #kpconv_panoptic 7 | # 8 | - debugging: default 9 | - models: panoptic/kpconv 10 | - data: panoptic/npm3d-kpconv #panoptic/Italy-kpconv 11 | - sota # Contains current SOTA results on different datasets (extracted from papers !). 12 | # - hydra/job_logging: custom 13 | # - hydra/output: custom # add the support for user-defined experiment folder (where to save the experiment files) 14 | 15 | job_name: benchmark # prefix name for saving the experiment file. 16 | model_name: KPConvPaperNPM3D # Name of the specific model to load 17 | update_lr_scheduler_on: "on_epoch" # ["on_epoch", "on_num_batch", "on_num_sample"] 18 | selection_stage: "" 19 | pretty_print: False 20 | eval_frequency: 1 21 | 22 | tracker_options: # Extra options for the tracker 23 | full_res: False 24 | make_submission: False 25 | track_boxes: False -------------------------------------------------------------------------------- /conf/data/default.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | task: ${task.name} -------------------------------------------------------------------------------- /conf/data/panoptic/default.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - /data/default -------------------------------------------------------------------------------- /conf/data/panoptic/npm3d-sparseconv_grid_012_R_10_cylinder_area4.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - panoptic/default 4 | 5 | task: panoptic 6 | class: npm3d.NPM3DFusedDataset 7 | dataroot: data 8 | fold: 4 9 | first_subsampling: 0.12 10 | radius: 10 11 | grid_size: ${data.first_subsampling} 12 | keep_instance: True 13 | use_category: False 14 | sampling_format: 'cylinder' 15 | mode: last 16 | pre_collate_transform: 17 | #- transform: PointCloudFusion # One point cloud per area 18 | - transform: SaveOriginalPosId # Required so that one can recover the original point in the fused point cloud 19 | - transform: GridSampling3D # Samples on a grid 20 | params: 21 | size: ${data.first_subsampling} 22 | mode: ${data.mode} 23 | train_transforms: 24 | - transform: RandomNoise 25 | params: 26 | sigma: 0.01 27 | - transform: RandomRotate 28 | params: 29 | degrees: 180 30 | axis: 2 31 | - transform: RandomScaleAnisotropic 32 | params: 33 | scales: [0.9, 1.1] 34 | - transform: RandomSymmetry 35 | params: 36 | axis: [True, False, False] 37 | - transform: XYZRelaFeature 38 | params: 39 | add_x: True 40 | add_y: True 41 | add_z: True 42 | - transform: XYZFeature 43 | params: 44 | add_x: False 45 | add_y: False 46 | add_z: True 47 | - transform: AddFeatsByKeys 48 | params: 49 | list_add_to_x: [True, True, True, True] 50 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 51 | delete_feats: [True, True, True, True] 52 | - transform: Center 53 | - transform: GridSampling3D 54 | params: 55 | size: ${data.first_subsampling} 56 | quantize_coords: True 57 | mode: ${data.mode} 58 | - transform: ShiftVoxels 59 | test_transform: 60 | - transform: XYZRelaFeature 61 | params: 62 | add_x: True 63 | add_y: True 64 | add_z: True 65 | - transform: XYZFeature 66 | params: 67 | add_x: False 68 | add_y: False 69 | add_z: True 70 | - transform: AddFeatsByKeys 71 | params: 72 | list_add_to_x: [True, True, True, True] 73 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 74 | delete_feats: [True, True, True, True] 75 | - transform: Center 76 | - transform: GridSampling3D 77 | params: 78 | size: ${data.first_subsampling} 79 | quantize_coords: True 80 | mode: ${data.mode} 81 | val_transform: ${data.test_transform} -------------------------------------------------------------------------------- /conf/data/panoptic/npm3d-sparseconv_grid_012_R_12_cylinder_area4.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - panoptic/default 4 | 5 | task: panoptic 6 | class: npm3d.NPM3DFusedDataset 7 | dataroot: data 8 | fold: 4 9 | first_subsampling: 0.12 10 | radius: 12 11 | grid_size: ${data.first_subsampling} 12 | keep_instance: True 13 | use_category: False 14 | sampling_format: 'cylinder' 15 | mode: last 16 | pre_collate_transform: 17 | #- transform: PointCloudFusion # One point cloud per area 18 | - transform: SaveOriginalPosId # Required so that one can recover the original point in the fused point cloud 19 | - transform: GridSampling3D # Samples on a grid 20 | params: 21 | size: ${data.first_subsampling} 22 | mode: ${data.mode} 23 | train_transforms: 24 | - transform: RandomNoise 25 | params: 26 | sigma: 0.01 27 | - transform: RandomRotate 28 | params: 29 | degrees: 180 30 | axis: 2 31 | - transform: RandomScaleAnisotropic 32 | params: 33 | scales: [0.9, 1.1] 34 | - transform: RandomSymmetry 35 | params: 36 | axis: [True, False, False] 37 | - transform: XYZRelaFeature 38 | params: 39 | add_x: True 40 | add_y: True 41 | add_z: True 42 | - transform: XYZFeature 43 | params: 44 | add_x: False 45 | add_y: False 46 | add_z: True 47 | - transform: AddFeatsByKeys 48 | params: 49 | list_add_to_x: [True, True, True, True] 50 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 51 | delete_feats: [True, True, True, True] 52 | - transform: Center 53 | - transform: GridSampling3D 54 | params: 55 | size: ${data.first_subsampling} 56 | quantize_coords: True 57 | mode: ${data.mode} 58 | - transform: ShiftVoxels 59 | test_transform: 60 | - transform: XYZRelaFeature 61 | params: 62 | add_x: True 63 | add_y: True 64 | add_z: True 65 | - transform: XYZFeature 66 | params: 67 | add_x: False 68 | add_y: False 69 | add_z: True 70 | - transform: AddFeatsByKeys 71 | params: 72 | list_add_to_x: [True, True, True, True] 73 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 74 | delete_feats: [True, True, True, True] 75 | - transform: Center 76 | - transform: GridSampling3D 77 | params: 78 | size: ${data.first_subsampling} 79 | quantize_coords: True 80 | mode: ${data.mode} 81 | val_transform: ${data.test_transform} -------------------------------------------------------------------------------- /conf/data/panoptic/npm3d-sparseconv_grid_012_R_14_cylinder_area4.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - panoptic/default 4 | 5 | task: panoptic 6 | class: npm3d.NPM3DFusedDataset 7 | dataroot: data 8 | fold: 4 9 | first_subsampling: 0.12 10 | radius: 14 11 | grid_size: ${data.first_subsampling} 12 | keep_instance: True 13 | use_category: False 14 | sampling_format: 'cylinder' 15 | mode: last 16 | pre_collate_transform: 17 | #- transform: PointCloudFusion # One point cloud per area 18 | - transform: SaveOriginalPosId # Required so that one can recover the original point in the fused point cloud 19 | - transform: GridSampling3D # Samples on a grid 20 | params: 21 | size: ${data.first_subsampling} 22 | mode: ${data.mode} 23 | train_transforms: 24 | - transform: RandomNoise 25 | params: 26 | sigma: 0.01 27 | - transform: RandomRotate 28 | params: 29 | degrees: 180 30 | axis: 2 31 | - transform: RandomScaleAnisotropic 32 | params: 33 | scales: [0.9, 1.1] 34 | - transform: RandomSymmetry 35 | params: 36 | axis: [True, False, False] 37 | - transform: XYZRelaFeature 38 | params: 39 | add_x: True 40 | add_y: True 41 | add_z: True 42 | - transform: XYZFeature 43 | params: 44 | add_x: False 45 | add_y: False 46 | add_z: True 47 | - transform: AddFeatsByKeys 48 | params: 49 | list_add_to_x: [True, True, True, True] 50 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 51 | delete_feats: [True, True, True, True] 52 | - transform: Center 53 | - transform: GridSampling3D 54 | params: 55 | size: ${data.first_subsampling} 56 | quantize_coords: True 57 | mode: ${data.mode} 58 | - transform: ShiftVoxels 59 | test_transform: 60 | - transform: XYZRelaFeature 61 | params: 62 | add_x: True 63 | add_y: True 64 | add_z: True 65 | - transform: XYZFeature 66 | params: 67 | add_x: False 68 | add_y: False 69 | add_z: True 70 | - transform: AddFeatsByKeys 71 | params: 72 | list_add_to_x: [True, True, True, True] 73 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 74 | delete_feats: [True, True, True, True] 75 | - transform: Center 76 | - transform: GridSampling3D 77 | params: 78 | size: ${data.first_subsampling} 79 | quantize_coords: True 80 | mode: ${data.mode} 81 | val_transform: ${data.test_transform} -------------------------------------------------------------------------------- /conf/data/panoptic/npm3d-sparseconv_grid_012_R_16_cylinder_area1.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - panoptic/default 4 | 5 | task: panoptic 6 | class: npm3d.NPM3DFusedDataset 7 | dataroot: data 8 | fold: 1 9 | first_subsampling: 0.12 10 | radius: 16 11 | grid_size: ${data.first_subsampling} 12 | keep_instance: True 13 | use_category: False 14 | sampling_format: 'cylinder' 15 | mode: last 16 | pre_collate_transform: 17 | #- transform: PointCloudFusion # One point cloud per area 18 | - transform: SaveOriginalPosId # Required so that one can recover the original point in the fused point cloud 19 | - transform: GridSampling3D # Samples on a grid 20 | params: 21 | size: ${data.first_subsampling} 22 | mode: ${data.mode} 23 | train_transforms: 24 | - transform: RandomNoise 25 | params: 26 | sigma: 0.01 27 | - transform: RandomRotate 28 | params: 29 | degrees: 180 30 | axis: 2 31 | - transform: RandomScaleAnisotropic 32 | params: 33 | scales: [0.9, 1.1] 34 | - transform: RandomSymmetry 35 | params: 36 | axis: [True, False, False] 37 | - transform: XYZRelaFeature 38 | params: 39 | add_x: True 40 | add_y: True 41 | add_z: True 42 | - transform: XYZFeature 43 | params: 44 | add_x: False 45 | add_y: False 46 | add_z: True 47 | - transform: AddFeatsByKeys 48 | params: 49 | list_add_to_x: [True, True, True, True] 50 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 51 | delete_feats: [True, True, True, True] 52 | - transform: Center 53 | - transform: GridSampling3D 54 | params: 55 | size: ${data.first_subsampling} 56 | quantize_coords: True 57 | mode: ${data.mode} 58 | - transform: ShiftVoxels 59 | test_transform: 60 | - transform: XYZRelaFeature 61 | params: 62 | add_x: True 63 | add_y: True 64 | add_z: True 65 | - transform: XYZFeature 66 | params: 67 | add_x: False 68 | add_y: False 69 | add_z: True 70 | - transform: AddFeatsByKeys 71 | params: 72 | list_add_to_x: [True, True, True, True] 73 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 74 | delete_feats: [True, True, True, True] 75 | - transform: Center 76 | - transform: GridSampling3D 77 | params: 78 | size: ${data.first_subsampling} 79 | quantize_coords: True 80 | mode: ${data.mode} 81 | val_transform: ${data.test_transform} -------------------------------------------------------------------------------- /conf/data/panoptic/npm3d-sparseconv_grid_012_R_16_cylinder_area2.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - panoptic/default 4 | 5 | task: panoptic 6 | class: npm3d.NPM3DFusedDataset 7 | dataroot: data 8 | fold: 2 9 | first_subsampling: 0.12 10 | radius: 16 11 | grid_size: ${data.first_subsampling} 12 | keep_instance: True 13 | use_category: False 14 | sampling_format: 'cylinder' 15 | mode: last 16 | pre_collate_transform: 17 | #- transform: PointCloudFusion # One point cloud per area 18 | - transform: SaveOriginalPosId # Required so that one can recover the original point in the fused point cloud 19 | - transform: GridSampling3D # Samples on a grid 20 | params: 21 | size: ${data.first_subsampling} 22 | mode: ${data.mode} 23 | train_transforms: 24 | - transform: RandomNoise 25 | params: 26 | sigma: 0.01 27 | - transform: RandomRotate 28 | params: 29 | degrees: 180 30 | axis: 2 31 | - transform: RandomScaleAnisotropic 32 | params: 33 | scales: [0.9, 1.1] 34 | - transform: RandomSymmetry 35 | params: 36 | axis: [True, False, False] 37 | - transform: XYZRelaFeature 38 | params: 39 | add_x: True 40 | add_y: True 41 | add_z: True 42 | - transform: XYZFeature 43 | params: 44 | add_x: False 45 | add_y: False 46 | add_z: True 47 | - transform: AddFeatsByKeys 48 | params: 49 | list_add_to_x: [True, True, True, True] 50 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 51 | delete_feats: [True, True, True, True] 52 | - transform: Center 53 | - transform: GridSampling3D 54 | params: 55 | size: ${data.first_subsampling} 56 | quantize_coords: True 57 | mode: ${data.mode} 58 | - transform: ShiftVoxels 59 | test_transform: 60 | - transform: XYZRelaFeature 61 | params: 62 | add_x: True 63 | add_y: True 64 | add_z: True 65 | - transform: XYZFeature 66 | params: 67 | add_x: False 68 | add_y: False 69 | add_z: True 70 | - transform: AddFeatsByKeys 71 | params: 72 | list_add_to_x: [True, True, True, True] 73 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 74 | delete_feats: [True, True, True, True] 75 | - transform: Center 76 | - transform: GridSampling3D 77 | params: 78 | size: ${data.first_subsampling} 79 | quantize_coords: True 80 | mode: ${data.mode} 81 | val_transform: ${data.test_transform} -------------------------------------------------------------------------------- /conf/data/panoptic/npm3d-sparseconv_grid_012_R_16_cylinder_area3.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - panoptic/default 4 | 5 | task: panoptic 6 | class: npm3d.NPM3DFusedDataset 7 | dataroot: data 8 | fold: 3 9 | first_subsampling: 0.12 10 | radius: 16 11 | grid_size: ${data.first_subsampling} 12 | keep_instance: True 13 | use_category: False 14 | sampling_format: 'cylinder' 15 | mode: last 16 | pre_collate_transform: 17 | #- transform: PointCloudFusion # One point cloud per area 18 | - transform: SaveOriginalPosId # Required so that one can recover the original point in the fused point cloud 19 | - transform: GridSampling3D # Samples on a grid 20 | params: 21 | size: ${data.first_subsampling} 22 | mode: ${data.mode} 23 | train_transforms: 24 | - transform: RandomNoise 25 | params: 26 | sigma: 0.01 27 | - transform: RandomRotate 28 | params: 29 | degrees: 180 30 | axis: 2 31 | - transform: RandomScaleAnisotropic 32 | params: 33 | scales: [0.9, 1.1] 34 | - transform: RandomSymmetry 35 | params: 36 | axis: [True, False, False] 37 | - transform: XYZRelaFeature 38 | params: 39 | add_x: True 40 | add_y: True 41 | add_z: True 42 | - transform: XYZFeature 43 | params: 44 | add_x: False 45 | add_y: False 46 | add_z: True 47 | - transform: AddFeatsByKeys 48 | params: 49 | list_add_to_x: [True, True, True, True] 50 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 51 | delete_feats: [True, True, True, True] 52 | - transform: Center 53 | - transform: GridSampling3D 54 | params: 55 | size: ${data.first_subsampling} 56 | quantize_coords: True 57 | mode: ${data.mode} 58 | - transform: ShiftVoxels 59 | test_transform: 60 | - transform: XYZRelaFeature 61 | params: 62 | add_x: True 63 | add_y: True 64 | add_z: True 65 | - transform: XYZFeature 66 | params: 67 | add_x: False 68 | add_y: False 69 | add_z: True 70 | - transform: AddFeatsByKeys 71 | params: 72 | list_add_to_x: [True, True, True, True] 73 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 74 | delete_feats: [True, True, True, True] 75 | - transform: Center 76 | - transform: GridSampling3D 77 | params: 78 | size: ${data.first_subsampling} 79 | quantize_coords: True 80 | mode: ${data.mode} 81 | val_transform: ${data.test_transform} -------------------------------------------------------------------------------- /conf/data/panoptic/npm3d-sparseconv_grid_012_R_16_cylinder_area4.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - panoptic/default 4 | 5 | task: panoptic 6 | class: npm3d.NPM3DFusedDataset 7 | dataroot: data 8 | fold: 4 9 | first_subsampling: 0.12 10 | radius: 16 11 | grid_size: ${data.first_subsampling} 12 | keep_instance: True 13 | use_category: False 14 | sampling_format: 'cylinder' 15 | mode: last 16 | pre_collate_transform: 17 | #- transform: PointCloudFusion # One point cloud per area 18 | - transform: SaveOriginalPosId # Required so that one can recover the original point in the fused point cloud 19 | - transform: GridSampling3D # Samples on a grid 20 | params: 21 | size: ${data.first_subsampling} 22 | mode: ${data.mode} 23 | train_transforms: 24 | - transform: RandomNoise 25 | params: 26 | sigma: 0.01 27 | - transform: RandomRotate 28 | params: 29 | degrees: 180 30 | axis: 2 31 | - transform: RandomScaleAnisotropic 32 | params: 33 | scales: [0.9, 1.1] 34 | - transform: RandomSymmetry 35 | params: 36 | axis: [True, False, False] 37 | - transform: XYZRelaFeature 38 | params: 39 | add_x: True 40 | add_y: True 41 | add_z: True 42 | - transform: XYZFeature 43 | params: 44 | add_x: False 45 | add_y: False 46 | add_z: True 47 | - transform: AddFeatsByKeys 48 | params: 49 | list_add_to_x: [True, True, True, True] 50 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 51 | delete_feats: [True, True, True, True] 52 | - transform: Center 53 | - transform: GridSampling3D 54 | params: 55 | size: ${data.first_subsampling} 56 | quantize_coords: True 57 | mode: ${data.mode} 58 | - transform: ShiftVoxels 59 | test_transform: 60 | - transform: XYZRelaFeature 61 | params: 62 | add_x: True 63 | add_y: True 64 | add_z: True 65 | - transform: XYZFeature 66 | params: 67 | add_x: False 68 | add_y: False 69 | add_z: True 70 | - transform: AddFeatsByKeys 71 | params: 72 | list_add_to_x: [True, True, True, True] 73 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 74 | delete_feats: [True, True, True, True] 75 | - transform: Center 76 | - transform: GridSampling3D 77 | params: 78 | size: ${data.first_subsampling} 79 | quantize_coords: True 80 | mode: ${data.mode} 81 | val_transform: ${data.test_transform} -------------------------------------------------------------------------------- /conf/data/panoptic/npm3d-sparseconv_grid_014_R_16_cylinder_area4.yaml: -------------------------------------------------------------------------------- 1 | # @package data 2 | defaults: 3 | - panoptic/default 4 | 5 | task: panoptic 6 | class: npm3d.NPM3DFusedDataset 7 | dataroot: data 8 | fold: 4 9 | first_subsampling: 0.14 10 | radius: 16 11 | grid_size: ${data.first_subsampling} 12 | keep_instance: True 13 | use_category: False 14 | sampling_format: 'cylinder' 15 | mode: last 16 | pre_collate_transform: 17 | #- transform: PointCloudFusion # One point cloud per area 18 | - transform: SaveOriginalPosId # Required so that one can recover the original point in the fused point cloud 19 | - transform: GridSampling3D # Samples on a grid 20 | params: 21 | size: ${data.first_subsampling} 22 | mode: ${data.mode} 23 | train_transforms: 24 | - transform: RandomNoise 25 | params: 26 | sigma: 0.01 27 | - transform: RandomRotate 28 | params: 29 | degrees: 180 30 | axis: 2 31 | - transform: RandomScaleAnisotropic 32 | params: 33 | scales: [0.9, 1.1] 34 | - transform: RandomSymmetry 35 | params: 36 | axis: [True, False, False] 37 | - transform: XYZRelaFeature 38 | params: 39 | add_x: True 40 | add_y: True 41 | add_z: True 42 | - transform: XYZFeature 43 | params: 44 | add_x: False 45 | add_y: False 46 | add_z: True 47 | - transform: AddFeatsByKeys 48 | params: 49 | list_add_to_x: [True, True, True, True] 50 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 51 | delete_feats: [True, True, True, True] 52 | - transform: Center 53 | - transform: GridSampling3D 54 | params: 55 | size: ${data.first_subsampling} 56 | quantize_coords: True 57 | mode: ${data.mode} 58 | - transform: ShiftVoxels 59 | test_transform: 60 | - transform: XYZRelaFeature 61 | params: 62 | add_x: True 63 | add_y: True 64 | add_z: True 65 | - transform: XYZFeature 66 | params: 67 | add_x: False 68 | add_y: False 69 | add_z: True 70 | - transform: AddFeatsByKeys 71 | params: 72 | list_add_to_x: [True, True, True, True] 73 | feat_names: [pos_x_rela, pos_y_rela, pos_z_rela, pos_z] 74 | delete_feats: [True, True, True, True] 75 | - transform: Center 76 | - transform: GridSampling3D 77 | params: 78 | size: ${data.first_subsampling} 79 | quantize_coords: True 80 | mode: ${data.mode} 81 | val_transform: ${data.test_transform} -------------------------------------------------------------------------------- /conf/debugging/default.yaml: -------------------------------------------------------------------------------- 1 | # @package debugging 2 | find_neighbour_dist: False 3 | num_batches: 50 4 | early_break: False 5 | profiling: False -------------------------------------------------------------------------------- /conf/debugging/early_break.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | early_break: True -------------------------------------------------------------------------------- /conf/debugging/find_neighbour_dist.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | find_neighbour_dist: True 3 | num_batches: 20 -------------------------------------------------------------------------------- /conf/hydra/job_logging/custom.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | formatters: 3 | simple: 4 | format: "%(message)s" 5 | root: 6 | handlers: [debug_console_handler, file_handler] 7 | version: 1 8 | handlers: 9 | debug_console_handler: 10 | level: DEBUG 11 | formatter: simple 12 | class: logging.StreamHandler 13 | stream: ext://sys.stdout 14 | file_handler: 15 | level: DEBUG 16 | formatter: simple 17 | class: logging.FileHandler 18 | filename: train.log 19 | disable_existing_loggers: False 20 | -------------------------------------------------------------------------------- /conf/hydra/output/custom.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | hydra: 3 | run: 4 | dir: ./outputs/${job_name}/${job_name}-${model_name}-${now:%Y%m%d_%H%M%S} 5 | -------------------------------------------------------------------------------- /conf/lr_scheduler/cosine.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | class: CosineAnnealingLR 3 | params: 4 | T_max: 10 -------------------------------------------------------------------------------- /conf/lr_scheduler/cyclic.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | class: CyclicLR 3 | params: 4 | base_lr: ${training.optim.base_lr} 5 | max_lr: 0.1 6 | -------------------------------------------------------------------------------- /conf/lr_scheduler/exponential.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | class: ExponentialLR 3 | params: 4 | gamma: 0.9885 # = 0.1**(1/200.) divide by 10 every 200 epochs -------------------------------------------------------------------------------- /conf/lr_scheduler/multi_step.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | class: MultiStepLR 3 | params: 4 | milestones: [80,120,160] 5 | gamma: 0.2 6 | -------------------------------------------------------------------------------- /conf/lr_scheduler/multi_step_reg.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | class: MultiStepLR 3 | params: 4 | milestones: [600, 1200, 1800, 3000] 5 | gamma: 0.5 6 | -------------------------------------------------------------------------------- /conf/lr_scheduler/plateau.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | class: ReduceLROnPlateau 3 | params: 4 | mode: "min" -------------------------------------------------------------------------------- /conf/lr_scheduler/poly_lr.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | class: PolyLR 3 | params: 4 | on_epoch: 5 | max_iter: 150 6 | power: 0.9 7 | on_num_batch: 8 | max_iter: 60000 9 | power: 2 10 | -------------------------------------------------------------------------------- /conf/lr_scheduler/step.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | class: StepLR 3 | params: 4 | step_size: 10 5 | gamma: 0.9 6 | last_epoch: -1 -------------------------------------------------------------------------------- /conf/models/default.yaml: -------------------------------------------------------------------------------- 1 | # @package models 2 | -------------------------------------------------------------------------------- /conf/sota.yaml: -------------------------------------------------------------------------------- 1 | # @package sota 2 | s3dis5: 3 | miou: 67.1 4 | mrec: 72.8 5 | 6 | s3dis: 7 | acc: 88.2 8 | macc: 81.5 9 | miou: 70.6 10 | 11 | scannet: 12 | miou: 72.5 13 | 14 | semantic3d: 15 | miou: 76.0 16 | acc: 94.4 17 | 18 | semantickitti: 19 | miou: 50.3 20 | 21 | modelnet40: 22 | acc: 92.9 23 | 24 | shapenet: 25 | mciou: 85.1 26 | miou: 86.4 -------------------------------------------------------------------------------- /conf/task/default.yaml: -------------------------------------------------------------------------------- 1 | # @package task 2 | defaults: 3 | - /data@_group_: default 4 | - /models@_group_: default 5 | 6 | # By default.yaml we turn off recursive instantiation, allowing the user to instantiate themselves at the appropriate times. 7 | _recursive_: false 8 | 9 | #_target_: lightning_transformers.core.model.TaskTransformer 10 | lr_scheduler: ${lr_scheduler} 11 | -------------------------------------------------------------------------------- /conf/task/panoptic.yaml: -------------------------------------------------------------------------------- 1 | # @package task 2 | defaults: 3 | - /task/default 4 | - override /data@_group_: panoptic/default 5 | - override /models@_group_: panoptic/default 6 | 7 | name: panoptic 8 | -------------------------------------------------------------------------------- /conf/task/segmentation.yaml: -------------------------------------------------------------------------------- 1 | # @package task 2 | defaults: 3 | - /task/default 4 | - override /data@_group_: segmentation/default 5 | - override /models@_group_: segmentation/default 6 | 7 | name: segmentation 8 | -------------------------------------------------------------------------------- /conf/training/1_area1.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area1 #panoptic-boost 35 | log: True 36 | notes: "s_1" 37 | name: "s_1" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/1_area2.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area2 #panoptic-boost 35 | log: True 36 | notes: "s_1" 37 | name: "s_1" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/1_area3.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area3 #panoptic-boost 35 | log: True 36 | notes: "s_1" 37 | name: "s_1" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/1_area4.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area4 #panoptic-boost 35 | log: True 36 | notes: "s_1" 37 | name: "s_1" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/4_area1.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area1 #panoptic-boost 35 | log: True 36 | notes: "s_4" 37 | name: "s_4" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/4_area2.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area2 #panoptic-boost 35 | log: True 36 | notes: "s_4" 37 | name: "s_4" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/4_area3.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area3 #panoptic-boost 35 | log: True 36 | notes: "s_4" 37 | name: "s_4" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/4_area4.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area4 #panoptic-boost 35 | log: True 36 | notes: "s_4" 37 | name: "s_4" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/6_area1.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area1 #panoptic-boost 35 | log: True 36 | notes: "s_6" 37 | name: "s_6" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/6_area2.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area2 #panoptic-boost 35 | log: True 36 | notes: "s_6" 37 | name: "s_6" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/6_area3.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area3 #panoptic-boost 35 | log: True 36 | notes: "s_6" 37 | name: "s_6" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/6_area4.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area4 #panoptic-boost 35 | log: True 36 | notes: "s_6" 37 | name: "s_6" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/7_area1.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area1 #panoptic-boost 35 | log: True 36 | notes: "s_7" 37 | name: "s_7" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/7_area2.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area2 #panoptic-boost 35 | log: True 36 | notes: "s_7" 37 | name: "s_7" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/7_area3.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area3 #panoptic-boost 35 | log: True 36 | notes: "s_7" 37 | name: "s_7" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/7_area4.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area4 #panoptic-boost 35 | log: True 36 | notes: "s_7" 37 | name: "s_7" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/8_area1.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area1 #panoptic-boost 35 | log: True 36 | notes: "s_8" 37 | name: "s_8" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/8_area2.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area2 #panoptic-boost 35 | log: True 36 | notes: "s_8" 37 | name: "s_8" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/8_area3.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area3 #panoptic-boost 35 | log: True 36 | notes: "s_8" 37 | name: "s_8" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/8_area4.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 100 4 | num_workers: 4 5 | batch_size: 4 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "F1" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "binbin" 34 | project: compare-area4 #panoptic-boost 35 | log: True 36 | notes: "s_8" 37 | name: "s_8" 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/training/default.yaml: -------------------------------------------------------------------------------- 1 | # @package training 2 | # Those arguments defines the training hyper-parameters 3 | epochs: 100 4 | num_workers: 6 5 | batch_size: 16 6 | shuffle: True 7 | cuda: 0 # -1 -> no cuda otherwise takes the specified index 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 12 | grad_clip: -1 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | lr_scheduler: ${lr_scheduler} 18 | bn_scheduler: 19 | bn_policy: "step_decay" 20 | params: 21 | bn_momentum: 0.1 22 | bn_decay: 0.9 23 | decay_step: 10 24 | bn_clip: 1e-2 25 | weight_name: "latest" # Used during resume, select with model to load from [miou, macc, acc..., latest] 26 | enable_cudnn: True 27 | checkpoint_dir: "" 28 | 29 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 30 | # parameters for Weights and Biases 31 | wandb: 32 | entity: "" 33 | project: default 34 | log: True 35 | notes: 36 | name: 37 | public: True # It will be display the model within wandb log, else not. 38 | config: 39 | model_name: ${model_name} 40 | 41 | # parameters for TensorBoard Visualization 42 | tensorboard: 43 | log: True 44 | -------------------------------------------------------------------------------- /conf/training/treeins.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/chrischoy/SpatioTemporalSegmentation/blob/master/config.py 2 | # @package training 3 | epochs: 2 #TO ADAPT: specify number of training epochs 150 4 | num_workers: 0 5 | batch_size: 4 #TO ADAPT: specify batch size 6 | shuffle: True 7 | cuda: 0 8 | precompute_multi_scale: False # Compute multiscate features on cpu for faster training / inference 9 | optim: 10 | base_lr: 0.001 11 | grad_clip: -1 12 | # accumulated_gradient: -1 # Accumulate gradient accumulated_gradient * batch_size 13 | optimizer: 14 | class: Adam 15 | params: 16 | lr: ${training.optim.base_lr} # The path is cut from training 17 | weight_decay: 0 18 | lr_scheduler: ${lr_scheduler} 19 | bn_scheduler: 20 | bn_policy: "step_decay" 21 | params: 22 | bn_momentum: 0.1 23 | bn_decay: 0.5 24 | decay_step: 20 25 | bn_clip: 1e-2 26 | weight_name: "latest" # Used during resume, select with model to load from [miou, macc, acc..., latest] 27 | enable_cudnn: True 28 | checkpoint_dir: "" 29 | 30 | # Those arguments within experiment defines which model, dataset and task to be created for benchmarking 31 | # parameters for Weights and Biases 32 | wandb: 33 | entity: "maciej-wielgosz-nibio" #TO ADAPT: change to your own wandb account name 34 | project: test 35 | log: True 36 | notes: "treeins" 37 | name: "treeins" #TO ADAPT: specify name of experiment that will be shown on wandb 38 | id: 39 | public: True # It will be display the model within wandb log, else not. 40 | config: 41 | grid_size: ${data.grid_size} 42 | 43 | # parameters for TensorBoard Visualization 44 | tensorboard: 45 | log: False 46 | -------------------------------------------------------------------------------- /conf/visualization/default.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activate: True 3 | format: "pointcloud" # image will come later 4 | num_samples_per_epoch: 10 5 | deterministic: True # False -> Randomly sample elements from epoch to epoch 6 | saved_keys: 7 | pos: [['x', 'float'], ['y', 'float'], ['z', 'float']] 8 | y: [['l', 'float']] 9 | pred: [['p', 'float']] 10 | -------------------------------------------------------------------------------- /conf/visualization/eval.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | activate: True 3 | format: "pointcloud" # image will come later 4 | num_samples_per_epoch: -1 5 | deterministic: True # False -> Randomly sample elements from epoch to epoch 6 | saved_keys: 7 | pos: [['x', 'float'], ['y', 'float'], ['z', 'float']] 8 | y: [['l', 'float']] 9 | pred: [['p', 'float']] 10 | 11 | -------------------------------------------------------------------------------- /eval.py: -------------------------------------------------------------------------------- 1 | import hydra 2 | from hydra.core.global_hydra import GlobalHydra 3 | from omegaconf import OmegaConf 4 | from torch_points3d.trainer import Trainer 5 | 6 | 7 | @hydra.main(config_path="conf", config_name="eval") 8 | def main(cfg): 9 | OmegaConf.set_struct(cfg, False) # This allows getattr and hasattr methods to function correctly 10 | if cfg.pretty_print: 11 | print(OmegaConf.to_yaml(cfg)) 12 | 13 | trainer = Trainer(cfg) 14 | trainer.eval(stage_name = "test") 15 | # 16 | # # https://github.com/facebookresearch/hydra/issues/440 17 | GlobalHydra.get_state().clear() 18 | return 0 19 | 20 | 21 | if __name__ == "__main__": 22 | main() 23 | -------------------------------------------------------------------------------- /forward_scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/forward_scripts/__init__.py -------------------------------------------------------------------------------- /forward_scripts/conf/config.yaml: -------------------------------------------------------------------------------- 1 | num_workers: 2 2 | batch_size: 16 3 | cuda: 0 4 | weight_name: "miou" # Used during resume, select with model to load from [miou, macc, acc..., latest] 5 | enable_cudnn: True 6 | checkpoint_dir: "/home/nicolas/deeppointcloud-benchmarks/outputs/2020-02-24/15-02-47" # "{your_path}/outputs/2020-01-28/11-04-13" for example 7 | model_name: TOTO 8 | enable_dropout: False 9 | output_path: "/home/nicolas/deeppointcloud-benchmarks/forward_scripts/out" # Where the output goes 10 | input_path: "/home/nicolas/deeppointcloud-benchmarks/forward_scripts/test_data" # Folder where to find the data 11 | 12 | # Dataset specific 13 | defaults: 14 | - dataset: "" 15 | optional: True -------------------------------------------------------------------------------- /forward_scripts/conf/dataset/shapenet.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | forward_category: "Cap" #Category of the data in the folder to be infered -------------------------------------------------------------------------------- /metrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/metrics/__init__.py -------------------------------------------------------------------------------- /metrics/metric.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from nibio_inference.las_to_pandas import las_to_pandas 3 | 4 | 5 | 6 | class Metric(object): 7 | gt_label = 'treeID' 8 | preds_label = 'preds_instance_segmentation' 9 | 10 | def __init__(self, laz_file_path, verbose=False): 11 | self.df = las_to_pandas(laz_file_path) 12 | self.verbose = verbose 13 | 14 | 15 | def confusion_matrix(self): 16 | # Create a confusion matrix 17 | confusion = pd.crosstab(self.df[self.gt_label], self.df[self.preds_label]) 18 | # save the confusion matrix to a csv file 19 | confusion.to_csv('confusion_matrix.csv') 20 | 21 | return confusion 22 | 23 | 24 | def __call__(self): 25 | self.confusion_matrix() 26 | 27 | if __name__ == "__main__": 28 | import argparse 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument('-i', '--input_file', type=str, required=True, help="Input file.") 31 | parser.add_argument('-v', '--verbose', action='store_true', help="Print verbose output.") 32 | args = parser.parse_args() 33 | 34 | 35 | metric = Metric(args.input_file, verbose=args.verbose) 36 | metric() 37 | print(metric.confusion_matrix()) 38 | -------------------------------------------------------------------------------- /model_file/.gitattributes: -------------------------------------------------------------------------------- 1 | PointGroup-PAPER.pt filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /model_file/.hydra/overrides.yaml: -------------------------------------------------------------------------------- 1 | - task=panoptic 2 | - data=panoptic/treeins_rad8 3 | - models=panoptic/area4_ablation_3heads_5 4 | - model_name=PointGroup-PAPER 5 | - training=treeins 6 | - job_name=mls_data_run 7 | - data.dataroot=/home/datascience/tmp_out_folder/utm2local 8 | - batch_size=6 9 | - epochs=100 10 | -------------------------------------------------------------------------------- /model_file/PointGroup-PAPER.pt: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:0b4d74b4644e37a16f59008ad0f5c62894fc4d2d906f3abd803bbfc5b5dd803a 3 | size 665666007 4 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | ignore_missing_imports = True 3 | 4 | [mypy-torch_points3d.modules.MinkowskiEngine.*] 5 | ignore_errors = True 6 | -------------------------------------------------------------------------------- /nibio_inference/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/nibio_inference/__init__.py -------------------------------------------------------------------------------- /nibio_inference/bring_back_to_utm_coordinates.py: -------------------------------------------------------------------------------- 1 | import json 2 | from nibio_inference.ply_to_pandas import ply_to_pandas 3 | from nibio_inference.pandas_to_ply import pandas_to_ply 4 | 5 | 6 | def bring_back_to_utm_coordinates(path_to_new_file, path_to_the_old_file): 7 | points_df = ply_to_pandas(path_to_new_file) 8 | 9 | min_values_path = path_to_the_old_file.replace('.ply', '_min_values.json') 10 | 11 | with open(min_values_path, 'r') as f: 12 | min_values = json.load(f) 13 | 14 | min_x, min_y, min_z = min_values 15 | 16 | # add the min values back to x, y, z 17 | points_df['x'] = points_df['x'] + min_x 18 | points_df['y'] = points_df['y'] + min_y 19 | points_df['z'] = points_df['z'] + min_z 20 | 21 | # save the modified file 22 | pandas_to_ply(points_df, output_file_path=path_to_new_file) -------------------------------------------------------------------------------- /nibio_inference/clear_cache.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import yaml 4 | 5 | parser = argparse.ArgumentParser(description='Read eval.yaml location') 6 | parser.add_argument('--eval_yaml', type=str, required=True, help='Path to eval.yaml file') 7 | args = parser.parse_args() 8 | 9 | with open(args.eval_yaml) as f: 10 | data = yaml.load(f, Loader=yaml.FullLoader) 11 | 12 | # go to checkpoint_dir and '.hydra' folder and open overrides.yaml and find data.dataroot there 13 | 14 | with open(os.path.join(data['checkpoint_dir'], '.hydra/overrides.yaml')) as f: 15 | data = yaml.load(f, Loader=yaml.FullLoader) 16 | 17 | for item in data: 18 | if 'data.dataroot' in item: 19 | path = item.split('=')[1].strip() 20 | 21 | # add processed_0.2_test to the path 22 | path = os.path.join(path, 'treeinsfused') 23 | path = os.path.join(path, 'processed_0.2_test') 24 | 25 | print(f"Clearing: {path}") 26 | 27 | # clear all the files in the directory if the path exists 28 | if os.path.exists(path): 29 | for filename in os.listdir(path): 30 | filepath = os.path.join(path, filename) 31 | if os.path.isfile(filepath): 32 | os.remove(filepath) 33 | print(f"Removed: {filepath}") 34 | 35 | else: 36 | print(f"Path does not exist: {path}") 37 | -------------------------------------------------------------------------------- /nibio_inference/fix_naming_of_input_files.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import random 4 | import re 5 | 6 | def rename_files(input_folder): 7 | # List all files in the given directory 8 | for filename in os.listdir(input_folder): 9 | # generate as many random numbers as the number of files in the directory 10 | # and put them in a list 11 | random_numbers = random.sample(range(1000, 9999), len(os.listdir(input_folder))) 12 | 13 | # Construct the file path 14 | filepath = os.path.join(input_folder, filename) 15 | # Check if it's a file 16 | if os.path.isfile(filepath): 17 | # Replace '-' with '_' in the filename 18 | # generate a random four digit number and append it to the filename in the beginning 19 | # as the string 20 | new_filename = str(random_numbers.pop()) + '_' + filename 21 | # new_filename = filename 22 | # # remove all the spaces from filename 23 | # # replace all the special characters from filename but not the extension 24 | # new_filename = re.sub(r'[^a-zA-Z0-9_\.]', '', new_filename) 25 | # # replace multiple underscores with single underscore 26 | # new_filename = re.sub(r'_+', '_', new_filename) 27 | new_filename = filename.replace('-', '_') 28 | new_filename = new_filename.replace(' ', '_') 29 | 30 | # Construct the new file path 31 | new_filepath = os.path.join(input_folder, new_filename) 32 | # Rename the file 33 | os.rename(filepath, new_filepath) 34 | print(f"Renamed: {filepath} to {new_filepath}") 35 | 36 | def main(): 37 | parser = argparse.ArgumentParser(description='Replace "-" with "_" in filenames within a directory.') 38 | parser.add_argument('input_folder', type=str, help='Path to the input folder') 39 | 40 | args = parser.parse_args() 41 | rename_files(args.input_folder) 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /nibio_inference/merge_inference_results_in_folders.py: -------------------------------------------------------------------------------- 1 | import os 2 | import csv 3 | import argparse 4 | import pandas as pd 5 | 6 | import shutil # Legg til dette øverst i filen din 7 | 8 | from nibio_inference.merge_inference_results import main as merge_main 9 | 10 | 11 | def process_folders(list_of_folders, output_file, verbose): 12 | """Process the list of folders and merge metrics.""" 13 | 14 | # Create a temporary folder to hold the merged metrics 15 | tmp_folder = 'tmp_merged_metrics' 16 | os.makedirs(tmp_folder, exist_ok=True) 17 | 18 | # Traverse directories 19 | for folder in list_of_folders: 20 | if verbose: 21 | print(f"Processing folder {folder}") 22 | # Remove trailing slash if it exists 23 | folder = folder.rstrip('/') 24 | # perform merge and generate a temporary file which is named after the folder 25 | temp_output_file = os.path.join(tmp_folder, os.path.basename(folder) + '.csv') 26 | merge_main(folder, temp_output_file) 27 | 28 | # read all the csv files from the temporary folder as pandas dataframes and merge them so that they are accoring to the order of the list_of_folders 29 | 30 | df = pd.DataFrame() 31 | for file in os.listdir(tmp_folder): 32 | if file.endswith('.csv'): 33 | df = df.append(pd.read_csv(os.path.join(tmp_folder, file)), ignore_index=True) 34 | 35 | # Delete the temporary folder 36 | shutil.rmtree(tmp_folder) 37 | 38 | # Write to csv 39 | df.to_csv(output_file, index=False) 40 | 41 | 42 | if __name__ == '__main__': 43 | list_of_folders = [ 44 | "/home/nibio/data/test_data_agnostic_instanceSeg/results_/", 45 | "/home/nibio/data/test_data_agnostic_instanceSeg/results_1000/", 46 | "/home/nibio/data/test_data_agnostic_instanceSeg/results_500/", 47 | "/home/nibio/data/test_data_agnostic_instanceSeg/results_100/", 48 | "/home/nibio/data/test_data_agnostic_instanceSeg/results_10/" 49 | ] 50 | 51 | output_file = 'merged_metrics_all.csv' 52 | verbose = True 53 | 54 | process_folders(list_of_folders, output_file, verbose) 55 | -------------------------------------------------------------------------------- /nibio_inference/pandas_to_ply.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from plyfile import PlyElement, PlyData 4 | 5 | def pandas_to_ply(csv, csv_file_provided=False, output_file_path=None): 6 | 7 | 8 | 9 | # Check if the csv_file_provided argument is provided 10 | 11 | if csv_file_provided: 12 | df = pd.read_csv(csv) 13 | else: 14 | df = csv 15 | 16 | # remove duplicated columns 17 | df = df.loc[:,~df.columns.duplicated()] 18 | 19 | # Replace spaces in column names with underscores 20 | df.columns = [col.replace(' ', '_') for col in df.columns] 21 | 22 | # Create a structured numpy array with dtype based on the columns of the DataFrame 23 | dtypes = [(col, 'f4') for col in df.columns] 24 | data = np.array(list(map(tuple, df.to_records(index=False))), dtype=dtypes) 25 | 26 | # Create a new PlyElement 27 | vertex = PlyElement.describe(data, 'vertex') 28 | 29 | # Save the data to a PLY file 30 | if output_file_path is None: 31 | ply_file_path = output_file_path.replace('.csv', '.ply') 32 | 33 | ply_data = PlyData([vertex], text=False) 34 | ply_data.write(output_file_path) 35 | 36 | if __name__ == "__main__": 37 | csv_path = "/path/to/your/file.csv" 38 | ply_path = "/path/to/your/file.ply" 39 | 40 | pandas_to_ply(csv_path, ply_path) 41 | -------------------------------------------------------------------------------- /nibio_inference/pipeline_utm2local_parallel.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | from joblib import Parallel, delayed 5 | from nibio_inference.las_to_pandas import las_to_pandas 6 | from nibio_inference.pandas_to_ply import pandas_to_ply 7 | from nibio_inference.ply_to_pandas import ply_to_pandas 8 | 9 | 10 | def process_file(filename, input_folder, output_folder): 11 | input_file_path = os.path.join(input_folder, filename) 12 | base_filename = os.path.splitext(filename)[0] 13 | output_file_path = os.path.join(output_folder, f"{base_filename}_out.ply") 14 | json_file_path = os.path.join(output_folder, f"{base_filename}_out_min_values.json") 15 | 16 | if filename.endswith((".ply", ".las", ".laz")): 17 | modification_pipeline(input_file_path, output_file_path, json_file_path, filename.endswith(".ply")) 18 | 19 | 20 | def modification_pipeline(input_file_path, output_file_path, json_file_path, is_ply): 21 | coord_names = ['x', 'y', 'z'] if is_ply else ['X', 'Y', 'Z'] 22 | print(f"Processing in utm2local: {input_file_path}") 23 | 24 | points_df = ply_to_pandas(input_file_path) if is_ply else las_to_pandas(input_file_path) 25 | 26 | min_values = {name: points_df[name].min() for name in coord_names} 27 | for name in coord_names: 28 | points_df[name] -= min_values[name] 29 | 30 | min_values_list = [float(val) for val in min_values.values()] 31 | 32 | with open(json_file_path, 'w') as f: 33 | print(f"Saving min values to: {json_file_path}") 34 | json.dump(min_values_list, f) 35 | 36 | pandas_to_ply(points_df, csv_file_provided=False, output_file_path=output_file_path) 37 | 38 | 39 | if __name__ == "__main__": 40 | parser = argparse.ArgumentParser(description='Process las or laz files and save results as ply files.') 41 | parser.add_argument('-i', '--input_folder', type=str, help='Path to the input folder containing ply files.') 42 | parser.add_argument('-o', '--output_folder', type=str, help='Path to the output folder to save las files.') 43 | 44 | args = parser.parse_args() 45 | 46 | os.makedirs(args.output_folder, exist_ok=True) 47 | 48 | filenames = os.listdir(args.input_folder) 49 | 50 | print(f"Processing {len(filenames)} files...") 51 | Parallel(n_jobs=4)( 52 | delayed(process_file)(filename, args.input_folder, args.output_folder) for filename in filenames 53 | ) 54 | print(f"Output files are saved in: {args.output_folder}") 55 | -------------------------------------------------------------------------------- /nibio_inference/remove_outer_points_for_instance.py: -------------------------------------------------------------------------------- 1 | import os 2 | import jaklas 3 | from nibio_inference.las_to_pandas import las_to_pandas 4 | from nibio_inference.pandas_to_las import pandas_to_las 5 | import argparse 6 | 7 | 8 | def remove_outer_points_for_instance(las_file, output_file_path): 9 | """ 10 | Remove points outside of the instance bounding box 11 | """ 12 | df = las_to_pandas(las_file) 13 | 14 | # leave only the points which are not in the instance 15 | df = df[df['classification'] != 3] 16 | 17 | # print columns 18 | # print(df.columns) 19 | 20 | # use jaklas to save the dataframe to las file with a new name 21 | jaklas.write(df, output_file_path) 22 | 23 | # pandas_to_las(df, 24 | # csv_file_provided=False, 25 | # output_file_path=output_file_path, 26 | # verbose=False) 27 | 28 | 29 | if __name__ == "__main__": 30 | # las_file = "/home/nibio/mutable-outside-world/test_sparsity/CULS_plot_2_annotated.las" 31 | 32 | parser = argparse.ArgumentParser(description='Remove points outside of the instance bounding box.') 33 | 34 | # read a folder name 35 | parser.add_argument('-i', '--input_folder', type=str, help='Path to the input folder containing modified files.') 36 | parser.add_argument('-o', '--output_folder', type=str, help='Path to the output folder to save reverted files.') 37 | 38 | args = parser.parse_args() 39 | 40 | # read all the files in the folder 41 | las_files = os.listdir(args.input_folder) 42 | 43 | # check if there are laz or las files 44 | las_files = [f for f in las_files if f.endswith(".las") or f.endswith(".laz")] 45 | 46 | # loop over all the files 47 | for las_file in las_files: 48 | print("Processing file: {}".format(las_file)) 49 | # remove the outer points 50 | remove_outer_points_for_instance(os.path.join(args.input_folder, las_file), os.path.join(args.output_folder, las_file)) 51 | 52 | # print the number of files 53 | print("Processed {} files. Done.".format(len(las_files))) 54 | 55 | -------------------------------------------------------------------------------- /nibio_inference/rename_result_files_instance.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | import os 4 | import yaml 5 | 6 | from nibio_inference.bring_back_to_utm_coordinates import bring_back_to_utm_coordinates 7 | 8 | def rename_files(yaml_file, directory): 9 | try: 10 | with open(yaml_file, 'r') as file: 11 | # Load the YAML file 12 | data = yaml.load(file, Loader=yaml.FullLoader) 13 | 14 | # Get the fold section 15 | fold_section = data.get('data', {}).get('fold', []) 16 | 17 | for index, file_path in enumerate(fold_section): 18 | 19 | # Extract the file name from the path 20 | file_name = os.path.basename(file_path) 21 | 22 | # Create new file name as result_index.ply 23 | old_file_name = f'result_{index}.ply' 24 | 25 | # use file_name to create new file name by add instance_segmenta 26 | new_file_name = 'instance_segmentation_' + file_name 27 | 28 | old_file_path = os.path.join(directory, old_file_name) 29 | new_file_path = os.path.join(directory, new_file_name) 30 | 31 | # Rename the file 32 | os.rename(old_file_path, new_file_path) 33 | 34 | # bring_back_to_utm_coordinates(new_file_path, file_path) 35 | 36 | print(f'Renamed {old_file_path} to {new_file_path} ') 37 | 38 | except Exception as e: 39 | print(f'An error occurred: {e}') 40 | 41 | if __name__ == '__main__': 42 | if len(sys.argv) != 3: 43 | print('Usage: python script.py ') 44 | sys.exit(1) 45 | 46 | yaml_file = sys.argv[1] # Path to the YAML file that contains the paths of the .ply files 47 | directory = sys.argv[2] # Path to the directory containing the .ply files after inference 48 | 49 | rename_files(yaml_file, directory) 50 | -------------------------------------------------------------------------------- /nibio_inference/rename_result_files_segmentation.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import yaml 4 | 5 | from nibio_inference.bring_back_to_utm_coordinates import bring_back_to_utm_coordinates 6 | 7 | def rename_files(yaml_file, directory): 8 | try: 9 | with open(yaml_file, 'r') as file: 10 | # Load the YAML file 11 | data = yaml.load(file, Loader=yaml.FullLoader) 12 | 13 | # Get the fold section 14 | fold_section = data.get('data', {}).get('fold', []) 15 | 16 | for index, file_path in enumerate(fold_section): 17 | # Extract the file name from the path 18 | file_name = os.path.basename(file_path) 19 | 20 | # Create new file name as result_index.ply 21 | old_file_name = f'semantic_result_{index}.ply' 22 | old_file_path = os.path.join(directory, old_file_name) 23 | 24 | # put semantic_segmentation_ in front of the file name 25 | new_file_name = 'semantic_segmentation_'+ file_name 26 | new_file_path = os.path.join(directory, new_file_name) 27 | 28 | # Rename the file 29 | os.rename(old_file_path, new_file_path) 30 | 31 | # bring_back_to_utm_coordinates(new_file_path, file_path) 32 | 33 | print(f'Renamed {old_file_path} to {new_file_path} ') 34 | 35 | except Exception as e: 36 | print(f'An error occurred: {e}') 37 | 38 | if __name__ == '__main__': 39 | if len(sys.argv) != 3: 40 | print('Usage: python script.py ') 41 | sys.exit(1) 42 | 43 | yaml_file = sys.argv[1] 44 | directory = sys.argv[2] 45 | 46 | rename_files(yaml_file, directory) 47 | -------------------------------------------------------------------------------- /nibio_sparsify/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/nibio_sparsify/__init__.py -------------------------------------------------------------------------------- /oracle_wrapper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | PATH_DATA = '/home/datascience' # TODO: change this to the path taken from the config file 5 | DEBUG_MODE = False 6 | 7 | # read the configuration file 8 | def run_oracle_wrapper(): 9 | 10 | if DEBUG_MODE: 11 | # this is mapped in the docker run 12 | data_location = "/home/data_bucket" 13 | 14 | else: 15 | # get the input and output locations from the environment variables 16 | data_location = os.environ['OBJ_INPUT_LOCATION'] 17 | 18 | # remap the input and output locations 19 | data_location = data_location.replace("@axqlz2potslu", "").replace("oci://", "/mnt/") 20 | 21 | # create the output folder if it does not exist 22 | os.makedirs(PATH_DATA, exist_ok=True) 23 | 24 | # copy files from input_location to the input folder 25 | shutil.copytree(data_location, os.path.join(PATH_DATA, 'data')) 26 | 27 | if __name__ == '__main__': 28 | run_oracle_wrapper() -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "torch_points3d" 3 | version = "0.2.0" # This will be overriden by the CI at publish time 4 | description = "Point Cloud Deep Learning Extension Library for PyTorch" 5 | authors = ["Thomas Chaton ", "Nicolas Chaulet "] 6 | packages = [ 7 | {include = "torch_points3d"}, 8 | ] 9 | readme = "README.md" 10 | documentation = "https://torch-points3d.readthedocs.io/en/latest/" 11 | 12 | [tool.poetry.dependencies] 13 | python = "^3.7" 14 | matplotlib = "^3.1" 15 | hydra-core = "~1.0.0" 16 | wandb = "^0.8.18" 17 | tqdm = "^4.40" 18 | open3d = "0.12.0" 19 | torchnet = "^0.0.4" 20 | tensorboard = "^2.1" 21 | torch = "^1.7.0" 22 | torch-scatter = "^2.0.0" 23 | torch-sparse = "^0.6.10" 24 | torch-cluster = "^1.5.6" 25 | torch-geometric = "^1.7.1" 26 | pytorch_metric_learning = "^0.9.87.dev0" 27 | torch-points-kernels = "^0.7.0" 28 | numpy = "<1.20.0" 29 | scikit-image = "^0.16.2" 30 | numba = "^0.50.0" 31 | plyfile = "^0.7.2" 32 | gdown = "^3.12.0" 33 | types-six = "^0.1.6" 34 | types-requests = "^0.1.11" 35 | h5py = "^3.3.0" 36 | 37 | [tool.poetry.dev-dependencies] 38 | pylint = "^2.4" 39 | autopep8 = "^1.4" 40 | flake8 = "^3.7" 41 | rope = "^0.14.0" 42 | pre-commit = "^1.21.0" 43 | black = "^19.10b0" 44 | jupyterlab = "^1.2.6" 45 | pyvista = "^0.23.1" 46 | panel = "^0.8.0" 47 | param = "^1.9.3" 48 | codecov = "^2.0.16" 49 | gpustat = "^0.6.0" 50 | snakeviz = "^2.0.1" 51 | sphinx_rtd_theme = "^0.4.3" 52 | sphinx = "^2.4.4" 53 | sphinx-autobuild = "^0.7.1" 54 | mypy = "^0.770" 55 | ipywidgets = "^7.5.1" 56 | 57 | [tool.black] 58 | line-length = 120 59 | 60 | [build-system] 61 | requires = ["poetry>=1.0.0"] 62 | build-backend = "poetry.masonry.api" 63 | -------------------------------------------------------------------------------- /run_bash_in_podman_with_gpu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | BASEDIR="$(dirname $(realpath ${0}))/" 4 | podman run --rm -it --device nvidia.com/gpu=all --security-opt=label=disable \ 5 | -v ${BASEDIR}/bucket_in_folder:/home/nibio/mutable-outside-world/bucket_in_folder:z \ 6 | -v ${BASEDIR}/bucket_out_folder:/home/nibio/mutable-outside-world/bucket_out_folder:z \ 7 | localhost/nibio/e2e-oracle-inst-seg:latest bash 8 | 9 | 10 | -------------------------------------------------------------------------------- /run_batch_inference.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if an argument is provided 4 | # if [ "$#" -ne 1 ]; then 5 | # echo "Usage: $0 " 6 | # exit 1 7 | # fi 8 | 9 | # Assign the folder A to a variable, if variable is empty then use default value of /home/nibio/mutable-outside-world/data_for_test 10 | folderA="$1" 11 | : "${folderA:=/home/nibio/mutable-outside-world/data_for_test}" 12 | 13 | 14 | # Check if folder A exists 15 | if [ ! -d "$folderA" ]; then 16 | echo "Folder A does not exist." 17 | exit 1 18 | fi 19 | 20 | # Create folder B and C 21 | folderB="/home/nibio/mutable-outside-world/B_temp_folder" 22 | folderC="/home/nibio/mutable-outside-world/data_for_test_results_final" 23 | folderTemp="/home/nibio/mutable-outside-world/temp_folder" 24 | 25 | # remove old folders 26 | rm -rf "$folderB" 27 | rm -rf "$folderC" 28 | rm -rf "$folderTemp" 29 | 30 | mkdir -p "$folderB" 31 | mkdir -p "$folderC" 32 | mkdir -p "$folderTemp" 33 | 34 | # Function to copy and process files 35 | process_files() { 36 | # Copy files to folder B 37 | cp "$@" "$folderB/" 38 | 39 | # Run inference script on folder B 40 | bash /home/nibio/mutable-outside-world/run_inference.sh "$folderB" "$folderTemp" 41 | 42 | # Copy results to folder C 43 | cp "$folderTemp/final_results"/* "$folderC/" 44 | } 45 | 46 | # Get a list of files in folder A 47 | files=($(find "$folderA" -maxdepth 1 -type f)) 48 | 49 | # Process files in chunks of 10 50 | for ((i=0; i<${#files[@]}; i+=10)); do 51 | # check if folder B is empty if not remove all files 52 | if [ "$(ls -A "$folderB")" ]; then 53 | rm -rf "$folderB"/* 54 | fi 55 | process_files "${files[@]:i:10}" 56 | done 57 | 58 | echo "Processing complete." 59 | -------------------------------------------------------------------------------- /run_docker_locally.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CONTAINER_NAME="test_e2e_instance" 4 | IMAGE_NAME="nibio/e2e-instance" 5 | 6 | # Check if the container exists 7 | if [ $(docker container ls -a -q -f name=$CONTAINER_NAME) ]; then 8 | echo "Removing existing container $CONTAINER_NAME" 9 | docker container rm $CONTAINER_NAME 10 | else 11 | echo "Container $CONTAINER_NAME does not exist." 12 | fi 13 | 14 | # Check if the image exists 15 | # if [ $(docker image ls -q -f reference=$IMAGE_NAME) ]; then 16 | # echo "Removing existing image $IMAGE_NAME" 17 | # docker image rm $IMAGE_NAME 18 | # else 19 | # echo "Image $IMAGE_NAME does not exist." 20 | # fi 21 | 22 | # ./build.sh 23 | docker build -t $IMAGE_NAME . 24 | 25 | echo "Running the container" 26 | # docker run -it --gpus all --name $CONTAINER_NAME $IMAGE_NAME > e2e-instance.log 2>&1 27 | 28 | docker run -it --gpus all \ 29 | --name $CONTAINER_NAME \ 30 | --mount type=bind,source=/home/nibio/mutable-outside-world/code/PanopticSegForLargeScalePointCloud_maciej/bucket_in_folder,target=/home/nibio/mutable-outside-world/bucket_in_folder \ 31 | --mount type=bind,source=/home/nibio/mutable-outside-world/code/PanopticSegForLargeScalePointCloud_maciej/bucket_out_folder,target=/home/nibio/mutable-outside-world/bucket_out_folder \ 32 | $IMAGE_NAME 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /run_pipeline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | python3 oracle_wrapper.py 5 | 6 | python3 train.py task=panoptic \ 7 | data=panoptic/treeins_rad8 \ 8 | models=panoptic/area4_ablation_3heads_5 \ 9 | model_name=PointGroup-PAPER \ 10 | training=treeins \ 11 | job_name=treeins_my_first_run \ 12 | epochs=$epochs \ 13 | batch_size=$batch_size \ 14 | cuda=$cuda 15 | 16 | -------------------------------------------------------------------------------- /run_podman_with_gpu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | BASEDIR="$(dirname $(realpath ${0}))/" 4 | podman run --rm -it --device nvidia.com/gpu=all --security-opt=label=disable \ 5 | -v ${BASEDIR}/bucket_in_folder:/home/nibio/mutable-outside-world/bucket_in_folder:z \ 6 | -v ${BASEDIR}/bucket_out_folder:/home/nibio/mutable-outside-world/bucket_out_folder:z \ 7 | localhost/nibio/e2e-oracle-inst-seg:latest 8 | -------------------------------------------------------------------------------- /scripts/omegaconf2dict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import os.path as osp 4 | import torch 5 | from omegaconf import OmegaConf 6 | import omegaconf 7 | 8 | 9 | def parse_args(): 10 | 11 | parser = argparse.ArgumentParser("a simple script to convert omegaconf file to dict, you need omegaconf v 1.4.1 in order to convert files") 12 | parser.add_argument('-f', help='input of the .pt file', dest="file", type=str) 13 | parser.add_argument('--old', help='input of the .pt file', dest="old", type=str) 14 | parser.add_argument('-o', help='output of the .pt file', dest="out", type=str) 15 | args = parser.parse_args() 16 | return args 17 | 18 | 19 | def convert(dico, exclude_keys=["models", "optimizer"], depth=0, verbose=True): 20 | if isinstance(dico, dict): 21 | for k, v in dico.items(): 22 | if k not in exclude_keys: 23 | print(depth * " ", k, type(v)) 24 | convert(v, depth=depth+1) 25 | if isinstance(v, omegaconf.dictconfig.DictConfig): 26 | dico[k] = OmegaConf.to_container(v) 27 | 28 | elif isinstance(dico, list): 29 | for i, v in enumerate(dico): 30 | print(depth * " ", i, type(v)) 31 | convert(v, depth=depth+1) 32 | if isinstance(v, omegaconf.dictconfig.DictConfig): 33 | dico[i] = OmegaConf.to_container(v) 34 | 35 | 36 | 37 | 38 | 39 | if __name__ == "__main__": 40 | args = parse_args() 41 | 42 | assert omegaconf.__version__ <= "1.4.1" 43 | dict_model = torch.load(args.file) 44 | if (args.old): 45 | torch.save(dict_model, args.old) 46 | convert(dict_model) 47 | torch.save(dict_model, args.out) 48 | 49 | # print(omegaconf.OmegaConf.to_container) 50 | -------------------------------------------------------------------------------- /scripts/test_registration_scripts/conf/config.yaml: -------------------------------------------------------------------------------- 1 | 2 | path_raw_fragment: "../../../data/test3dmatch/raw/raw_fragment" 3 | path_results: "../../2020-03-19/14-07-35/3DMatch" 4 | list_tau1: [0.1, 0.05, 0.15, 0.2, 0.25, 0.3] 5 | list_tau2: [0.05, 0.1, 0.15, 0.20, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65] 6 | -------------------------------------------------------------------------------- /scripts/test_registration_scripts/conf/fpfh.yaml: -------------------------------------------------------------------------------- 1 | input_path: '../../../data/test3dmatch/processed/fragment' 2 | output_path: '../../fpfh/3DMatch' 3 | radius: 0.3 4 | max_nn: 256 5 | radius_normal: 0.093 6 | max_nn_normal: 17 7 | -------------------------------------------------------------------------------- /scripts/test_registration_scripts/fpfh.py: -------------------------------------------------------------------------------- 1 | import open3d 2 | import torch 3 | import numpy as np 4 | import hydra 5 | import os 6 | import os.path as osp 7 | import sys 8 | import json 9 | from omegaconf import OmegaConf 10 | 11 | # Import building function for model and dataset 12 | DIR = os.path.dirname(os.path.realpath(__file__)) 13 | ROOT = os.path.join(DIR, "..") 14 | sys.path.insert(0, ROOT) 15 | 16 | from test_registration_scripts.save_feature import save 17 | 18 | 19 | class FPFH(object): 20 | def __init__(self, radius=0.3, max_nn=128, radius_normal=0.3, max_nn_normal=17): 21 | """ 22 | given a fragment, compute FPFH descriptor for keypoints 23 | """ 24 | self.kdtree = open3d.geometry.KDTreeSearchParamHybrid(radius, max_nn) 25 | self.kdtree_normal = open3d.geometry.KDTreeSearchParamHybrid(radius_normal, max_nn_normal) 26 | 27 | def __call__(self, data): 28 | pcd = open3d.geometry.PointCloud() 29 | pcd.points = open3d.utility.Vector3dVector(data.pos.numpy()) 30 | pcd.estimate_normals(self.kdtree_normal) 31 | fpfh_feature = open3d.pipelines.registration.compute_fpfh_feature(pcd, self.kdtree) 32 | return np.asarray(fpfh_feature.data).T[data.keypoints.numpy()] 33 | 34 | 35 | @hydra.main(config_path="conf/fpfh.yaml") 36 | def main(cfg): 37 | OmegaConf.set_struct(cfg, False) 38 | print(cfg) 39 | input_path = cfg.input_path 40 | output_path = cfg.output_path 41 | radius = cfg.radius 42 | max_nn = cfg.max_nn 43 | radius_normal = cfg.radius_normal 44 | max_nn_normal = cfg.max_nn_normal 45 | 46 | fpfh = FPFH(radius, max_nn, radius_normal, max_nn_normal) 47 | 48 | list_frag = sorted([f for f in os.listdir(input_path) if "fragment" in f]) 49 | path_table = osp.join(input_path, "table.json") 50 | with open(path_table, "r") as f: 51 | table = json.load(f) 52 | 53 | for i in range(len(list_frag)): 54 | print(i, table[str(i)], list_frag[i]) 55 | data = torch.load(osp.join(input_path, list_frag[i])) 56 | feat = fpfh(data) 57 | save(osp.join(output_path, "features"), table[str(i)]["scene_path"], table[str(i)]["fragment_name"], data, feat) 58 | 59 | 60 | if __name__ == "__main__": 61 | main() 62 | -------------------------------------------------------------------------------- /scripts/test_registration_scripts/misc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def read_gt_log(path): 5 | """ 6 | read the gt.log of evaluation set of 3DMatch or ETH Dataset and parse it. 7 | """ 8 | list_pair = [] 9 | list_mat = [] 10 | with open(path, "r") as f: 11 | all_mat = f.readlines() 12 | mat = np.zeros((4, 4)) 13 | for i in range(len(all_mat)): 14 | if i % 5 == 0: 15 | if i != 0: 16 | list_mat.append(mat) 17 | mat = np.zeros((4, 4)) 18 | list_pair.append(list(map(int, all_mat[i].split("\t")[:-1]))) 19 | else: 20 | line = all_mat[i].split("\t") 21 | 22 | mat[i % 5 - 1] = np.asarray(line[:4], dtype=np.float64) 23 | list_mat.append(mat) 24 | return list_pair, list_mat 25 | -------------------------------------------------------------------------------- /scripts/visualizations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/scripts/visualizations/__init__.py -------------------------------------------------------------------------------- /scripts/visualizations/modelnet.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import importlib 4 | 5 | DIR = os.path.dirname(os.path.realpath(__file__)) 6 | torch_points3d = os.path.join(DIR, "..", "..", "torch_points3d") 7 | assert os.path.exists(torch_points3d) 8 | 9 | MODULE_PATH = os.path.join(torch_points3d, "__init__.py") 10 | MODULE_NAME = "torch_points3d" 11 | spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) 12 | module = importlib.util.module_from_spec(spec) 13 | sys.modules[spec.name] = module 14 | spec.loader.exec_module(module) 15 | 16 | from omegaconf import OmegaConf 17 | import numpy as np 18 | import open3d 19 | 20 | from torch_points3d.datasets.classification.modelnet import SampledModelNet 21 | import torch_points3d.core.data_transform as T3D 22 | import torch_geometric.transforms as T 23 | from torch_points3d.utils.o3d_utils import * 24 | 25 | dataroot = os.path.join(DIR, "../data/modelnet") 26 | pre_transform = T.Compose([T.NormalizeScale(), T3D.GridSampling3D(0.02)]) 27 | dataset = SampledModelNet(dataroot, name="40", train=True, transform=None, pre_transform=pre_transform, pre_filter=None) 28 | 29 | colors = {} 30 | while True: 31 | try: 32 | pcds = [] 33 | for idx in range(40): 34 | print(idx) 35 | i = np.random.randint(0, len(dataset)) 36 | sample = dataset[i] 37 | label = sample.y.item() 38 | if label not in colors: 39 | color = np.asarray([np.random.uniform(0, 1), np.random.uniform(0, 1), np.random.uniform(0, 1)]) 40 | colors[label] = color 41 | else: 42 | color = colors[label] 43 | pcd = torch2o3d(sample) 44 | pcd.paint_uniform_color(color) 45 | points = np.asarray(pcd.points) + np.tile( 46 | np.asarray([4 * ((idx * 1) % 5), 3 * ((idx * 1) // 5), 0])[np.newaxis, ...], 47 | (np.asarray(pcd.points).shape[0], 1), 48 | ) 49 | pcd.points = open3d.utility.Vector3dVector(points) 50 | pcd.estimate_normals(search_param=open3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=70)) 51 | pcds.append(pcd) 52 | open3d.visualization.draw_geometries(pcds) 53 | except KeyboardInterrupt: 54 | break 55 | -------------------------------------------------------------------------------- /scripts/visualizations/o3d_utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/scripts/visualizations/o3d_utils.py -------------------------------------------------------------------------------- /scripts/visualizations/s3dis_panoptic.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import importlib 4 | 5 | DIR = os.path.dirname(os.path.realpath(__file__)) 6 | torch_points3d = os.path.join(DIR, "..", "..", "torch_points3d") 7 | assert os.path.exists(torch_points3d) 8 | 9 | MODULE_PATH = os.path.join(torch_points3d, "__init__.py") 10 | MODULE_NAME = "torch_points3d" 11 | spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) 12 | module = importlib.util.module_from_spec(spec) 13 | sys.modules[spec.name] = module 14 | spec.loader.exec_module(module) 15 | 16 | from omegaconf import OmegaConf 17 | import numpy as np 18 | import torch 19 | import open3d 20 | 21 | from torch_points3d.datasets.panoptic.s3dis import S3DISFusedDataset 22 | from torch_points3d.utils.o3d_utils import * 23 | 24 | 25 | dataset_options = OmegaConf.load(os.path.join(DIR, "../../conf/data/panoptic/s3disfused.yaml")) 26 | 27 | dataset_options.data.dataroot = os.path.join(DIR, "../../data") 28 | dataset = S3DISFusedDataset(dataset_options.data) 29 | print(dataset) 30 | 31 | dataset._train_dataset.transform = None 32 | 33 | while True: 34 | try: 35 | i = np.random.randint(0, len(dataset.train_dataset)) 36 | sample = dataset.train_dataset[i] 37 | pcd = torch2o3d(sample) 38 | pcd.estimate_normals(search_param=open3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=70)) 39 | list_objects = [] 40 | existing_colors = [] 41 | mask = sample.instance_mask 42 | scene = apply_mask(sample, torch.logical_not(mask)) 43 | scene_pcd = torch2o3d(scene, color=[0.8, 0.8, 0.8]) 44 | 45 | scene_pcd.estimate_normals(search_param=open3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=70)) 46 | scene_pcd = scene_pcd.voxel_down_sample(0.07) 47 | 48 | for i in range(1, sample.num_instances.item() + 1): 49 | instance_mask = sample.instance_labels == i 50 | obj = apply_mask(sample, instance_mask) 51 | new_color = generate_new_color(existing_colors) 52 | pcd = torch2o3d(obj, color=new_color) 53 | existing_colors.append(new_color) 54 | pcd.estimate_normals(search_param=open3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=70)) 55 | list_objects.append(pcd) 56 | 57 | print() 58 | print(sample) 59 | open3d.visualization.draw_geometries([scene_pcd, *list_objects]) 60 | except KeyboardInterrupt: 61 | break 62 | -------------------------------------------------------------------------------- /scripts/visualizations/scannet_panoptic.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import importlib 4 | 5 | DIR = os.path.dirname(os.path.realpath(__file__)) 6 | torch_points3d = os.path.join(DIR, "..", "..", "torch_points3d") 7 | assert os.path.exists(torch_points3d) 8 | 9 | MODULE_PATH = os.path.join(torch_points3d, "__init__.py") 10 | MODULE_NAME = "torch_points3d" 11 | spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) 12 | module = importlib.util.module_from_spec(spec) 13 | sys.modules[spec.name] = module 14 | spec.loader.exec_module(module) 15 | 16 | from omegaconf import OmegaConf 17 | import numpy as np 18 | import torch 19 | import open3d 20 | 21 | from torch_points3d.datasets.panoptic.scannet import ScannetDataset 22 | from torch_points3d.utils.o3d_utils import * 23 | 24 | 25 | dataset_options = OmegaConf.load(os.path.join(DIR, "../../conf/data/panoptic/scannet-sparse.yaml")) 26 | 27 | dataset_options.data.dataroot = os.path.join(DIR, "../../data") 28 | dataset = ScannetDataset(dataset_options.data) 29 | print(dataset) 30 | 31 | dataset._train_dataset.transform = None 32 | 33 | while True: 34 | try: 35 | i = np.random.randint(0, len(dataset.train_dataset)) 36 | sample = dataset.train_dataset[i] 37 | pcd = torch2o3d(sample) 38 | pcd.estimate_normals(search_param=open3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=70)) 39 | list_objects = [] 40 | existing_colors = [] 41 | mask = sample.instance_mask 42 | scene = apply_mask(sample, torch.logical_not(mask)) 43 | scene_pcd = torch2o3d(scene, color=[0.8, 0.8, 0.8]) 44 | 45 | scene_pcd.estimate_normals(search_param=open3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=70)) 46 | scene_pcd = scene_pcd.voxel_down_sample(0.07) 47 | 48 | for i in range(1, sample.num_instances.item() + 1): 49 | instance_mask = sample.instance_labels == i 50 | obj = apply_mask(sample, instance_mask) 51 | new_color = generate_new_color(existing_colors) 52 | pcd = torch2o3d(obj, color=new_color) 53 | existing_colors.append(new_color) 54 | pcd.estimate_normals(search_param=open3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=70)) 55 | list_objects.append(pcd) 56 | 57 | print() 58 | print(sample) 59 | open3d.visualization.draw_geometries([scene_pcd, *list_objects]) 60 | except KeyboardInterrupt: 61 | break 62 | -------------------------------------------------------------------------------- /torch_points3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/__init__.py -------------------------------------------------------------------------------- /torch_points3d/applications/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/applications/__init__.py -------------------------------------------------------------------------------- /torch_points3d/applications/conf/kpconv/encoder_4.yaml: -------------------------------------------------------------------------------- 1 | class: kpconv.KPConvPaper 2 | conv_type: "PARTIAL_DENSE" 3 | define_constants: 4 | in_grid_size: 0.02 5 | in_feat: 64 6 | bn_momentum: 0.2 7 | output_nc: 256 8 | max_neighbors: 25 9 | down_conv: 10 | down_conv_nn: 11 | [ 12 | [[FEAT + 1, in_feat], [in_feat, 2*in_feat]], 13 | [[2*in_feat, 2*in_feat], [2*in_feat, 4*in_feat]], 14 | [[4*in_feat, 4*in_feat], [4*in_feat, 8*in_feat]], 15 | [[8*in_feat, 8*in_feat], [8*in_feat, 16*in_feat]], 16 | [[16*in_feat, 16*in_feat], [16*in_feat, 32 * in_feat]], 17 | ] 18 | grid_size: 19 | [ 20 | [in_grid_size, in_grid_size], 21 | [2*in_grid_size, 2*in_grid_size], 22 | [4*in_grid_size, 4*in_grid_size], 23 | [8*in_grid_size, 8*in_grid_size], 24 | [16*in_grid_size, 16*in_grid_size], 25 | ] 26 | prev_grid_size: 27 | [ 28 | [in_grid_size, in_grid_size], 29 | [in_grid_size, 2*in_grid_size], 30 | [2*in_grid_size, 4*in_grid_size], 31 | [4*in_grid_size, 8*in_grid_size], 32 | [8*in_grid_size, 16*in_grid_size], 33 | ] 34 | block_names: 35 | [ 36 | ["SimpleBlock", "ResnetBBlock"], 37 | ["ResnetBBlock", "ResnetBBlock"], 38 | ["ResnetBBlock", "ResnetBBlock"], 39 | ["ResnetBBlock", "ResnetBBlock"], 40 | ["ResnetBBlock", "ResnetBBlock"], 41 | ] 42 | has_bottleneck: 43 | [ 44 | [False, True], 45 | [True, True], 46 | [True, True], 47 | [True, True], 48 | [True, True], 49 | ] 50 | deformable: 51 | [ 52 | [False, False], 53 | [False, False], 54 | [False, False], 55 | [False, False], 56 | [False, False], 57 | ] 58 | max_num_neighbors: 59 | [[max_neighbors,max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors]] 60 | module_name: KPDualBlock 61 | innermost: 62 | module_name: GlobalBaseModule 63 | activation: 64 | name: LeakyReLU 65 | negative_slope: 0.2 66 | aggr: "mean" 67 | nn: [32 * in_feat + 3, 32 * in_feat] 68 | 69 | -------------------------------------------------------------------------------- /torch_points3d/applications/conf/pointnet2/encoder_3_ms.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "DENSE" 2 | define_constants: 3 | in_feat: 64 4 | down_conv: 5 | module_name: PointNetMSGDown 6 | npoint: [512, 128] 7 | radii: [[0.1, 0.2, 0.4], [0.4, 0.8]] 8 | nsamples: [[32, 64, 128], [64, 128]] 9 | down_conv_nn: 10 | [ 11 | [ 12 | [FEAT + 3, in_feat // 2, in_feat // 2, in_feat], 13 | [FEAT+ 3, in_feat, in_feat, in_feat * 2], 14 | [FEAT+ 3, in_feat, in_feat + in_feat // 2 , in_feat * 2], 15 | ], 16 | [ 17 | [in_feat + in_feat * 2 + in_feat * 2 + 3, in_feat * 2, in_feat * 2, in_feat * 4], 18 | [in_feat + in_feat * 2 + in_feat * 2 + 3, in_feat * 2, in_feat * 3, in_feat * 4], 19 | ], 20 | ] 21 | innermost: 22 | module_name: GlobalDenseBaseModule 23 | nn: [in_feat * 4 * 2 + 3, in_feat * 4, in_feat * 8] -------------------------------------------------------------------------------- /torch_points3d/applications/conf/pointnet2/unet_3_ms.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "DENSE" 2 | down_conv: 3 | module_name: PointNetMSGDown 4 | npoint: [512, 128] 5 | radii: [[0.1, 0.2, 0.4], [0.4, 0.8]] 6 | nsamples: [[32, 64, 128], [64, 128]] 7 | down_conv_nn: 8 | [ 9 | [ 10 | [FEAT+3, 32, 32, 64], 11 | [FEAT+3, 64, 64, 128], 12 | [FEAT+3, 64, 96, 128], 13 | ], 14 | [ 15 | [64 + 128 + 128+3, 128, 128, 256], 16 | [64 + 128 + 128+3, 128, 196, 256], 17 | ], 18 | ] 19 | innermost: 20 | module_name: GlobalDenseBaseModule 21 | nn: [256 * 2 + 3, 256, 512, 1024] 22 | up_conv: 23 | module_name: DenseFPModule 24 | up_conv_nn: 25 | [ 26 | [1024 + 256*2, 256, 256], 27 | [256 + 128 * 2 + 64, 256, 128], 28 | [128 + FEAT, 128, 128], 29 | ] 30 | skip: True -------------------------------------------------------------------------------- /torch_points3d/applications/conf/pointnet2/unet_3_ss.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "DENSE" 2 | down_conv: 3 | module_name: PointNetMSGDown 4 | npoint: [512, 128] 5 | radii: [[0.2], [0.4]] 6 | nsamples: [[64], [64]] 7 | down_conv_nn: [[[FEAT + 3, 64, 64, 128]], [[128+3, 128, 128, 256]]] 8 | innermost: 9 | module_name: GlobalDenseBaseModule 10 | nn: [256 + 3, 256, 512, 1024] 11 | up_conv: 12 | module_name: DenseFPModule 13 | up_conv_nn: 14 | [ 15 | [1024 + 256, 256, 256], 16 | [256 + 128, 256, 128], 17 | [128 + FEAT, 128, 128, 128], 18 | ] 19 | skip: True 20 | -------------------------------------------------------------------------------- /torch_points3d/applications/conf/pointnet2/unet_4_ss.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "DENSE" 2 | define_constants: 3 | in_feat: 64 4 | down_conv: 5 | module_name: PointNetMSGDown 6 | npoint: [2048, 1024, 512, 256] 7 | radii: [[0.2], [0.4], [0.8], [1.2]] 8 | nsamples: [[64], [32], [16], [16]] 9 | down_conv_nn: [[[FEAT + 3, in_feat, in_feat, in_feat * 2]], 10 | [[in_feat * 2 + 3, in_feat * 2, in_feat * 2, in_feat * 4]], 11 | [[in_feat * 4 + 3, in_feat * 2, in_feat * 2, in_feat * 4]]] 12 | save_sampling_id: [True, False, False, False] 13 | normalize_xyz: [True, True, True, True] 14 | innermost: 15 | module_name: GlobalDenseBaseModule 16 | nn: [ in_feat * 4 + 3, in_feat * 8, in_feat * 16] 17 | up_conv: 18 | module_name: DenseFPModule 19 | up_conv_nn: 20 | [ 21 | [in_feat * 16 + in_feat * 4, in_feat * 8, in_feat * 8], 22 | [in_feat * 8 + in_feat * 4, in_feat * 8, in_feat * 8], 23 | [in_feat * 8 + in_feat * 2, in_feat * 4, in_feat * 4], 24 | [in_feat * 4 + FEAT, in_feat * 2, in_feat * 2] 25 | ] 26 | skip: True -------------------------------------------------------------------------------- /torch_points3d/applications/conf/rsconv/encoder_4.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "DENSE" 2 | define_constants: 3 | in_feat: 64 4 | down_conv: 5 | module_name: RSConvOriginalMSGDown 6 | npoint: [1024, 256, 64, 16] 7 | radii: 8 | [ 9 | [0.075, 0.1, 0.125], 10 | [0.1, 0.15, 0.2], 11 | [0.2, 0.3, 0.4], 12 | [0.4, 0.6, 0.8], 13 | ] 14 | nsamples: [[16, 32, 48], [16, 48, 64], [16, 32, 48], [16, 24, 32]] 15 | down_conv_nn: 16 | [ 17 | [[10, in_feat//2, 16], [FEAT + 3, 16]], 18 | [10, in_feat//2, in_feat * 3 + 3], 19 | [10, in_feat, (in_feat * 2) * 3 + 3], 20 | [10, 2 * in_feat, (in_feat * 4) * 3 + 3], 21 | ] 22 | channel_raising_nn: 23 | [ 24 | [16, in_feat], 25 | [in_feat * 3 + 3, (in_feat * 2)], 26 | [(in_feat * 2) * 3 + 3, (in_feat * 4)], 27 | [(in_feat * 4) * 3 + 3, (in_feat * 8)], 28 | ] 29 | innermost: 30 | module_name: GlobalDenseBaseModule 31 | nn: [(in_feat * 8) * 3 + 3, in_feat * 8] 32 | aggr: "mean" 33 | -------------------------------------------------------------------------------- /torch_points3d/applications/conf/rsconv/unet_4.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "DENSE" 2 | down_conv: 3 | module_name: RSConvOriginalMSGDown 4 | npoint: [1024, 256, 64, 16] 5 | radii: 6 | [ 7 | [0.075, 0.1, 0.125], 8 | [0.1, 0.15, 0.2], 9 | [0.2, 0.3, 0.4], 10 | [0.4, 0.6, 0.8], 11 | ] 12 | nsamples: [[16, 32, 48], [16, 48, 64], [16, 32, 48], [16, 24, 32]] 13 | down_conv_nn: 14 | [ 15 | [[10, 64//2, 16], [FEAT + 3, 16]], 16 | [10, 128//4, 64 * 3 + 3], 17 | [10, 256//4, 128 * 3 + 3], 18 | [10, 512//4, 256 * 3 + 3], 19 | ] 20 | channel_raising_nn: 21 | [ 22 | [16, 64], 23 | [64 * 3 + 3, 128], 24 | [128 * 3 + 3, 256], 25 | [256 * 3 + 3, 512], 26 | ] 27 | innermost: 28 | - module_name: GlobalDenseBaseModule 29 | nn: [512 * 3 + 3, 128] 30 | aggr: "mean" 31 | - module_name: GlobalDenseBaseModule 32 | nn: [256 * 3 + 3, 128] 33 | aggr: "mean" 34 | up_conv: 35 | bn: True 36 | bias: False 37 | module_name: DenseFPModule 38 | up_conv_nn: 39 | [ 40 | [512 * 3 + 256 * 3, 512, 512], 41 | [128 * 3 + 512, 512, 512], 42 | [64 * 3 + 512, 256, 256], 43 | [256 + FEAT , 128, 128], 44 | ] 45 | skip: True -------------------------------------------------------------------------------- /torch_points3d/applications/conf/sparseconv3d/encoder_2.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "SPARSE" 2 | define_constants: 3 | in_feat: 32 4 | block: ResBlock # Can be any of the blocks in modules/SparseConv3d/modules.py 5 | down_conv: 6 | module_name: ResNetDown 7 | block: block 8 | N: [0, 1, 2] 9 | down_conv_nn: [[FEAT, in_feat], [in_feat, in_feat], [in_feat, 2*in_feat]] 10 | kernel_size: [3, 3, 3] 11 | stride: [1, 2, 2] 12 | innermost: 13 | module_name: GlobalBaseModule 14 | activation: 15 | name: LeakyReLU 16 | negative_slope: 0.2 17 | aggr: "mean" 18 | nn: [2*in_feat, 2*in_feat] 19 | -------------------------------------------------------------------------------- /torch_points3d/applications/conf/sparseconv3d/encoder_4.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "SPARSE" 2 | define_constants: 3 | in_feat: 32 4 | block: ResBlock # Can be any of the blocks in modules/SparseConv3d/modules.py 5 | down_conv: 6 | module_name: ResNetDown 7 | block: block 8 | N: [0, 1, 2, 2, 3] 9 | down_conv_nn: 10 | [ 11 | [FEAT, in_feat], 12 | [in_feat, in_feat], 13 | [in_feat, 2*in_feat], 14 | [2*in_feat, 4*in_feat], 15 | [4*in_feat, 8*in_feat], 16 | ] 17 | kernel_size: [3, 3, 3, 3, 3] 18 | stride: [1, 2, 2, 2, 2] 19 | innermost: 20 | module_name: GlobalBaseModule 21 | activation: 22 | name: LeakyReLU 23 | negative_slope: 0.2 24 | aggr: "mean" 25 | nn: [8*in_feat, 8*in_feat] 26 | -------------------------------------------------------------------------------- /torch_points3d/applications/conf/sparseconv3d/unet_2.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "SPARSE" 2 | define_constants: 3 | in_feat: 32 4 | block: ResBlock # Can be any of the blocks in modules/SparseConv3d/modules.py 5 | down_conv: 6 | block: block 7 | module_name: ResNetDown 8 | N: [0, 1, 2] 9 | down_conv_nn: [[FEAT, in_feat], [in_feat, in_feat], [in_feat, 2*in_feat]] 10 | kernel_size: [2, 2] 11 | stride: [1, 2, 2] 12 | up_conv: 13 | block: block 14 | module_name: ResNetUp 15 | N: [1, 1, 0] 16 | up_conv_nn: 17 | [ 18 | [4*in_feat + 2*in_feat, 3*in_feat], 19 | [3*in_feat + in_feat, 3*in_feat], 20 | [3*in_feat + in_feat, 3*in_feat], 21 | ] 22 | kernel_size: [2, 2, 3] 23 | stride: [2, 2, 1] 24 | -------------------------------------------------------------------------------- /torch_points3d/applications/conf/sparseconv3d/unet_4.yaml: -------------------------------------------------------------------------------- 1 | conv_type: "SPARSE" 2 | define_constants: 3 | in_feat: 32 4 | block: ResBlock # Can be any of the blocks in modules/SparseConv3d/modules.py 5 | down_conv: 6 | module_name: ResNetDown 7 | block: block 8 | N: [0, 1, 2, 2, 3] 9 | down_conv_nn: 10 | [ 11 | [FEAT, in_feat], 12 | [in_feat, in_feat], 13 | [in_feat, 2*in_feat], 14 | [2*in_feat, 4*in_feat], 15 | [4*in_feat, 8*in_feat], 16 | ] 17 | kernel_size: [3, 3, 3, 3, 3] 18 | stride: [1, 2, 2, 2, 2] 19 | up_conv: 20 | block: block 21 | module_name: ResNetUp 22 | N: [1, 1, 1, 1, 0] 23 | up_conv_nn: 24 | [ 25 | [8*in_feat, 4*in_feat], 26 | [4*in_feat + 4*in_feat, 4*in_feat], 27 | [4*in_feat + 2*in_feat, 3*in_feat], 28 | [3*in_feat + in_feat, 3*in_feat], 29 | [3*in_feat + in_feat, 3*in_feat], 30 | ] 31 | kernel_size: [3, 3, 3, 3, 3] 32 | stride: [2, 2, 2, 2, 1] 33 | -------------------------------------------------------------------------------- /torch_points3d/applications/models.py: -------------------------------------------------------------------------------- 1 | from .kpconv import KPConv 2 | from .pointnet2 import PointNet2 3 | from .rsconv import RSConv 4 | import logging 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | try: 9 | from .sparseconv3d import SparseConv3d 10 | except: 11 | log.warning( 12 | "Sparse convolutions are not supported, please install one of the available backends, MinkowskiEngine or MIT SparseConv" 13 | ) 14 | 15 | try: 16 | from .minkowski import Minkowski 17 | except: 18 | log.warning("MinkowskiEngine is not installed.") 19 | -------------------------------------------------------------------------------- /torch_points3d/applications/utils.py: -------------------------------------------------------------------------------- 1 | def extract_output_nc(model_config): 2 | """ Extracts the number of channels at the output of the network form the model config 3 | """ 4 | if model_config.get('up_conv') is not None: 5 | output_nc = model_config.up_conv.up_conv_nn[-1][-1] 6 | elif model_config.get('innermost') is not None: 7 | output_nc = model_config.innermost.nn[-1] 8 | else: 9 | raise ValueError("Input model_config does not match expected pattern") 10 | return output_nc 11 | -------------------------------------------------------------------------------- /torch_points3d/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/core/__init__.py -------------------------------------------------------------------------------- /torch_points3d/core/base_conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_conv import * 2 | -------------------------------------------------------------------------------- /torch_points3d/core/base_conv/base_conv.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | 3 | from torch_points3d.core.common_modules.base_modules import BaseModule 4 | 5 | 6 | class BaseConvolution(ABC, BaseModule): 7 | def __init__(self, sampler, neighbour_finder, *args, **kwargs): 8 | BaseModule.__init__(self) 9 | self.sampler = sampler 10 | self.neighbour_finder = neighbour_finder 11 | -------------------------------------------------------------------------------- /torch_points3d/core/common_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_modules import * 2 | from .spatial_transform import * 3 | -------------------------------------------------------------------------------- /torch_points3d/core/common_modules/dense_modules.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from .base_modules import Seq 3 | 4 | 5 | class Conv2D(Seq): 6 | def __init__(self, in_channels, out_channels, bias=True, bn=True, activation=nn.LeakyReLU(negative_slope=0.01)): 7 | super().__init__() 8 | self.append(nn.Conv2d(in_channels, out_channels, kernel_size=(1, 1), stride=(1, 1), bias=bias)) 9 | if bn: 10 | self.append(nn.BatchNorm2d(out_channels)) 11 | if activation: 12 | self.append(activation) 13 | 14 | 15 | class Conv1D(Seq): 16 | def __init__(self, in_channels, out_channels, bias=True, bn=True, activation=nn.LeakyReLU(negative_slope=0.01)): 17 | super().__init__() 18 | self.append(nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=bias)) 19 | if bn: 20 | self.append(nn.BatchNorm1d(out_channels)) 21 | if activation: 22 | self.append(activation) 23 | 24 | 25 | class MLP2D(Seq): 26 | def __init__(self, channels, bias=False, bn=True, activation=nn.LeakyReLU(negative_slope=0.01)): 27 | super().__init__() 28 | for i in range(len(channels) - 1): 29 | self.append(Conv2D(channels[i], channels[i + 1], bn=bn, bias=bias, activation=activation)) 30 | -------------------------------------------------------------------------------- /torch_points3d/core/common_modules/gathering.py: -------------------------------------------------------------------------------- 1 | def gather(x, idx, method=2): 2 | """ 3 | https://github.com/pytorch/pytorch/issues/15245 4 | implementation of a custom gather operation for faster backwards. 5 | :param x: input with shape [N, D_1, ... D_d] 6 | :param idx: indexing with shape [n_1, ..., n_m] 7 | :param method: Choice of the method 8 | :return: x[idx] with shape [n_1, ..., n_m, D_1, ... D_d] 9 | """ 10 | idx[idx == -1] = x.shape[0] - 1 # Shadow point 11 | if method == 0: 12 | return x[idx] 13 | elif method == 1: 14 | x = x.unsqueeze(1) 15 | x = x.expand((-1, idx.shape[-1], -1)) 16 | idx = idx.unsqueeze(2) 17 | idx = idx.expand((-1, -1, x.shape[-1])) 18 | return x.gather(0, idx) 19 | elif method == 2: 20 | for i, ni in enumerate(idx.size()[1:]): 21 | x = x.unsqueeze(i + 1) 22 | new_s = list(x.size()) 23 | new_s[i + 1] = ni 24 | x = x.expand(new_s) 25 | n = len(idx.size()) 26 | for i, di in enumerate(x.size()[n:]): 27 | idx = idx.unsqueeze(i + n) 28 | new_s = list(idx.size()) 29 | new_s[i + n] = di 30 | idx = idx.expand(new_s) 31 | return x.gather(0, idx) 32 | else: 33 | raise ValueError("Unkown method") 34 | -------------------------------------------------------------------------------- /torch_points3d/core/data_transform/prebatchcollate.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | log = logging.getLogger(__name__) 4 | 5 | 6 | class ClampBatchSize: 7 | """ Drops sample in a batch if the batch gets too large 8 | 9 | Parameters 10 | ---------- 11 | num_points : int, optional 12 | Maximum number of points per batch, by default 100000 13 | """ 14 | 15 | def __init__(self, num_points=100000): 16 | self._num_points = num_points 17 | 18 | def __call__(self, datas): 19 | assert isinstance(datas, list) 20 | batch_id = 0 21 | batch_num_points = 0 22 | removed_sample = False 23 | datas_out = [] 24 | for batch_id, d in enumerate(datas): 25 | num_points = datas[batch_id].pos.shape[0] 26 | batch_num_points += num_points 27 | if self._num_points and batch_num_points > self._num_points: 28 | batch_num_points -= num_points 29 | removed_sample = True 30 | continue 31 | datas_out.append(d) 32 | 33 | if removed_sample: 34 | num_full_points = sum(len(d.pos) for d in datas) 35 | num_full_batch_size = len(datas_out) 36 | log.warning( 37 | f"\t\tCannot fit {num_full_points} points into {self._num_points} points " 38 | f"limit. Truncating batch size at {num_full_batch_size} out of {len(datas)} with {batch_num_points}." 39 | ) 40 | return datas_out 41 | 42 | def __repr__(self): 43 | return "{}(num_points={})".format(self.__class__.__name__, self._num_points) 44 | -------------------------------------------------------------------------------- /torch_points3d/core/data_transform/precollate.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class NormalizeFeature(object): 4 | """Normalize a feature. By default, features will be scaled between [0,1]. Should only be applied on a dataset-level. 5 | 6 | Parameters 7 | ---------- 8 | standardize: bool: Will use standardization rather than scaling. 9 | """ 10 | 11 | def __init__(self, feature_name, standardize=False): 12 | self._feature_name = feature_name 13 | self._standardize = standardize 14 | 15 | def __call__(self, data): 16 | assert hasattr(data, self._feature_name) 17 | feature = data[self._feature_name] 18 | if self._standardize: 19 | feature = (feature - feature.mean()) / (feature.std()) 20 | else: 21 | feature = (feature - feature.min()) / (feature.max() - feature.min()) 22 | data[self._feature_name] = feature 23 | return data 24 | 25 | def __repr__(self): 26 | return "{}(feature_name={}, standardize={})".format(self.__class__.__name__, self._feature_name, self._standardize) -------------------------------------------------------------------------------- /torch_points3d/core/data_transform/sparse_transforms.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | import itertools 3 | import numpy as np 4 | import math 5 | import re 6 | import torch 7 | import scipy 8 | import random 9 | from tqdm.auto import tqdm as tq 10 | from torch.nn import functional as F 11 | from functools import partial 12 | from torch_geometric.nn import fps, radius, knn, voxel_grid 13 | from torch_geometric.nn.pool.consecutive import consecutive_cluster 14 | from torch_geometric.nn.pool.pool import pool_pos, pool_batch 15 | from torch_scatter import scatter_add, scatter_mean 16 | from torch_cluster import grid_cluster 17 | 18 | from torch_points3d.datasets.multiscale_data import MultiScaleData 19 | from torch_points3d.utils.config import is_list 20 | from torch_points3d.utils import is_iterable 21 | from .grid_transform import group_data, GridSampling3D, shuffle_data 22 | 23 | 24 | class RandomCoordsFlip(object): 25 | def __init__(self, ignored_axis, is_temporal=False, p=0.95): 26 | """This transform is used to flip sparse coords using a given axis. Usually, it would be x or y 27 | 28 | Parameters 29 | ---------- 30 | ignored_axis: str 31 | Axis to be chosen between x, y, z 32 | is_temporal : bool 33 | Used to indicate if the pointcloud is actually 4 dimensional 34 | 35 | Returns 36 | ------- 37 | data: Data 38 | Returns the same data object with only one point per voxel 39 | """ 40 | assert 0 <= p <= 1, "p should be within 0 and 1. Higher probability reduce chance of flipping" 41 | self._is_temporal = is_temporal 42 | self._D = 4 if is_temporal else 3 43 | mapping = {"x": 0, "y": 1, "z": 2} 44 | self._ignored_axis = [mapping[axis] for axis in ignored_axis] 45 | # Use the rest of axes for flipping. 46 | self._horz_axes = set(range(self._D)) - set(self._ignored_axis) 47 | self._p = p 48 | 49 | def __call__(self, data): 50 | for curr_ax in self._horz_axes: 51 | if random.random() < self._p: 52 | coords = data.coords 53 | coord_max = torch.max(coords[:, curr_ax]) 54 | data.coords[:, curr_ax] = coord_max - coords[:, curr_ax] 55 | return data 56 | 57 | def __repr__(self): 58 | return "{}(flip_axis={}, prob={}, is_temporal={})".format( 59 | self.__class__.__name__, self._horz_axes, self._p, self._is_temporal 60 | ) 61 | -------------------------------------------------------------------------------- /torch_points3d/core/initializer/__init__.py: -------------------------------------------------------------------------------- 1 | from .initializer import * 2 | -------------------------------------------------------------------------------- /torch_points3d/core/initializer/initializer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import init 3 | 4 | 5 | def init_weights(net, init_type="normal", gain=0.02): 6 | def init_func(m): 7 | classname = m.__class__.__name__ 8 | if hasattr(m, "weight") and (classname.find("Conv") != -1 or classname.find("Linear") != -1): 9 | if init_type == "normal": 10 | init.normal_(m.weight.data, 0.0, gain) 11 | elif init_type == "xavier": 12 | init.xavier_normal_(m.weight.data, gain=gain) 13 | elif init_type == "kaiming": 14 | init.kaiming_normal_(m.weight.data, a=0, mode="fan_in") 15 | elif init_type == "orthogonal": 16 | init.orthogonal_(m.weight.data, gain=gain) 17 | else: 18 | raise NotImplementedError("initialization method [%s] is not implemented" % init_type) 19 | if hasattr(m, "bias") and m.bias is not None: 20 | init.constant_(m.bias.data, 0.0) 21 | elif classname.find("BatchNorm2d") != -1: 22 | init.normal_(m.weight.data, 1.0, gain) 23 | init.constant_(m.bias.data, 0.0) 24 | 25 | print("initialize network with %s" % init_type) 26 | net.apply(init_func) 27 | 28 | 29 | def init_net(net, init_type="normal", init_gain=0.02, gpu_ids=[]): 30 | if len(gpu_ids) > 0: 31 | assert torch.cuda.is_available() 32 | net.to(gpu_ids[0]) 33 | net = torch.nn.DataParallel(net, gpu_ids) 34 | init_weights(net, init_type, gain=init_gain) 35 | return net 36 | -------------------------------------------------------------------------------- /torch_points3d/core/losses/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from .losses import * 4 | from .huber_loss import * 5 | from .panoptic_losses import * 6 | from pytorch_metric_learning.miners import * 7 | from pytorch_metric_learning.losses import * 8 | 9 | _custom_losses = sys.modules["torch_points3d.core.losses.losses"] 10 | _torch_metric_learning_losses = sys.modules["pytorch_metric_learning.losses"] 11 | _torch_metric_learning_miners = sys.modules["pytorch_metric_learning.miners"] 12 | _intersection = set(_custom_losses.__dict__) & set(_torch_metric_learning_losses.__dict__) 13 | _intersection = set([module for module in _intersection if not module.startswith("_")]) 14 | if _intersection: 15 | raise Exception( 16 | "It seems that you are overiding a transform from pytorch metric learning, \ 17 | this is forbiden, please rename your classes {}".format( 18 | _intersection 19 | ) 20 | ) 21 | 22 | 23 | def instantiate_loss_or_miner(option, mode="loss"): 24 | """ 25 | create a loss from an OmegaConf dict such as 26 | TripletMarginLoss. 27 | params: 28 | margin=0.1 29 | It can also instantiate a miner to better learn a loss 30 | """ 31 | class_ = getattr(option, "class", None) 32 | try: 33 | params = option.get('params') 34 | except KeyError: 35 | params = None 36 | 37 | try: 38 | lparams = option.get('lparams') 39 | except KeyError: 40 | lparams = None 41 | 42 | if "loss" in mode: 43 | cls = getattr(_custom_losses, class_, None) 44 | if not cls: 45 | cls = getattr(_torch_metric_learning_losses, class_, None) 46 | if not cls: 47 | raise ValueError("loss %s is nowhere to be found" % class_) 48 | elif mode == "miner": 49 | cls = getattr(_torch_metric_learning_miners, class_, None) 50 | if not cls: 51 | raise ValueError("miner %s is nowhere to be found" % class_) 52 | else: 53 | raise NotImplementedError("Cannot instantiate this mode {}".format(mode)) 54 | 55 | if params and lparams: 56 | return cls(*lparams, **params) 57 | if params: 58 | return cls(**params) 59 | if lparams: 60 | return cls(*params) 61 | return cls() 62 | -------------------------------------------------------------------------------- /torch_points3d/core/losses/huber_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def nn_distance(pc1, pc2, l1smooth=False, delta=1.0, l1=False): 5 | """ 6 | Input: 7 | pc1: (B,N,C) torch tensor 8 | pc2: (B,M,C) torch tensor 9 | l1smooth: bool, whether to use l1smooth loss 10 | delta: scalar, the delta used in l1smooth loss 11 | Output: 12 | dist1: (B,N) torch float32 tensor 13 | idx1: (B,N) torch int64 tensor 14 | dist2: (B,M) torch float32 tensor 15 | idx2: (B,M) torch int64 tensor 16 | """ 17 | N = pc1.shape[1] 18 | M = pc2.shape[1] 19 | pc1_expand_tile = pc1.unsqueeze(2).repeat(1, 1, M, 1) 20 | pc2_expand_tile = pc2.unsqueeze(1).repeat(1, N, 1, 1) 21 | pc_diff = pc1_expand_tile - pc2_expand_tile 22 | 23 | if l1smooth: 24 | pc_dist = torch.sum(huber_loss(pc_diff, delta), dim=-1) # (B,N,M) 25 | elif l1: 26 | pc_dist = torch.sum(torch.abs(pc_diff), dim=-1) # (B,N,M) 27 | else: 28 | pc_dist = torch.sum(pc_diff ** 2, dim=-1) # (B,N,M) 29 | dist1, idx1 = torch.min(pc_dist, dim=2) # (B,N) 30 | dist2, idx2 = torch.min(pc_dist, dim=1) # (B,M) 31 | return dist1, idx1, dist2, idx2 32 | 33 | 34 | def huber_loss(error, delta=1.0): 35 | """ 36 | Args: 37 | error: Torch tensor (d1,d2,...,dk) 38 | Returns: 39 | loss: Torch tensor (d1,d2,...,dk) 40 | 41 | x = error = pred - gt or dist(pred,gt) 42 | 0.5 * |x|^2 if |x|<=d 43 | 0.5 * d^2 + d * (|x|-d) if |x|>d 44 | Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py 45 | """ 46 | abs_error = torch.abs(error) 47 | # quadratic = torch.min(abs_error, torch.FloatTensor([delta])) 48 | quadratic = torch.clamp(abs_error, max=delta) 49 | linear = abs_error - quadratic 50 | loss = 0.5 * quadratic ** 2 + delta * linear 51 | return loss 52 | 53 | 54 | class HuberLoss(torch.nn.Module): 55 | def __init__(self, delta=0.1): 56 | super().__init__() 57 | self._delta = delta 58 | 59 | def forward(self, error): 60 | return huber_loss(error, self._delta) 61 | -------------------------------------------------------------------------------- /torch_points3d/core/regularizer/__init__.py: -------------------------------------------------------------------------------- 1 | from .regularizers import * 2 | -------------------------------------------------------------------------------- /torch_points3d/core/schedulers/__init__.py: -------------------------------------------------------------------------------- 1 | from .lr_schedulers import * 2 | from .bn_schedulers import * 3 | -------------------------------------------------------------------------------- /torch_points3d/core/spatial_ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .neighbour_finder import * 2 | from .sampling import * 3 | from .interpolate import * 4 | -------------------------------------------------------------------------------- /torch_points3d/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/datasets/__init__.py -------------------------------------------------------------------------------- /torch_points3d/datasets/batch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch_geometric.data import Data 3 | 4 | 5 | class SimpleBatch(Data): 6 | r""" A classic batch object wrapper with :class:`torch_geometric.data.Data` being the 7 | base class, all its methods can also be used here. 8 | """ 9 | 10 | def __init__(self, batch=None, **kwargs): 11 | super(SimpleBatch, self).__init__(**kwargs) 12 | 13 | self.batch = batch 14 | self.__data_class__ = Data 15 | 16 | @staticmethod 17 | def from_data_list(data_list): 18 | r"""Constructs a batch object from a python list holding 19 | :class:`torch_geometric.data.Data` objects. 20 | """ 21 | keys = [set(data.keys) for data in data_list] 22 | keys = list(set.union(*keys)) 23 | 24 | # Check if all dimensions matches and we can concatenate data 25 | # if len(data_list) > 0: 26 | # for data in data_list[1:]: 27 | # for key in keys: 28 | # assert data_list[0][key].shape == data[key].shape 29 | 30 | batch = SimpleBatch() 31 | batch.__data_class__ = data_list[0].__class__ 32 | 33 | for key in keys: 34 | batch[key] = [] 35 | 36 | for _, data in enumerate(data_list): 37 | for key in data.keys: 38 | item = data[key] 39 | batch[key].append(item) 40 | 41 | for key in batch.keys: 42 | item = batch[key][0] 43 | if ( 44 | torch.is_tensor(item) 45 | or isinstance(item, int) 46 | or isinstance(item, float) 47 | ): 48 | batch[key] = torch.stack(batch[key]) 49 | else: 50 | raise ValueError("Unsupported attribute type") 51 | 52 | return batch.contiguous() 53 | # return [batch.x.transpose(1, 2).contiguous(), batch.pos, batch.y.view(-1)] 54 | 55 | @property 56 | def num_graphs(self): 57 | """Returns the number of graphs in the batch.""" 58 | return self.batch[-1].item() + 1 59 | -------------------------------------------------------------------------------- /torch_points3d/datasets/classification/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/datasets/classification/__init__.py -------------------------------------------------------------------------------- /torch_points3d/datasets/dataset_factory.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import copy 3 | import hydra 4 | import logging 5 | 6 | from torch_points3d.datasets.base_dataset import BaseDataset 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | 11 | def get_dataset_class(dataset_config): 12 | task = dataset_config.task 13 | # Find and create associated dataset 14 | try: 15 | dataset_config.dataroot = hydra.utils.to_absolute_path( 16 | dataset_config.dataroot) 17 | except Exception: 18 | log.error("This should happen only during testing") 19 | dataset_class = getattr(dataset_config, "class") 20 | dataset_paths = dataset_class.split(".") 21 | module = ".".join(dataset_paths[:-1]) 22 | class_name = dataset_paths[-1] 23 | dataset_module = ".".join(["torch_points3d.datasets", task, module]) 24 | datasetlib = importlib.import_module(dataset_module) 25 | 26 | target_dataset_name = class_name 27 | for name, cls in datasetlib.__dict__.items(): 28 | if name.lower() == target_dataset_name.lower() and issubclass(cls, BaseDataset): 29 | dataset_cls = cls 30 | 31 | if dataset_cls is None: 32 | raise NotImplementedError( 33 | "In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." 34 | % (module, class_name) 35 | ) 36 | return dataset_cls 37 | 38 | 39 | def instantiate_dataset(dataset_config) -> BaseDataset: 40 | """Import the module "data/[module].py". 41 | In the file, the class called {class_name}() will 42 | be instantiated. It has to be a subclass of BaseDataset, 43 | and it is case-insensitive. 44 | """ 45 | dataset_cls = get_dataset_class(dataset_config) 46 | dataset = dataset_cls(dataset_config) 47 | return dataset 48 | -------------------------------------------------------------------------------- /torch_points3d/datasets/object_detection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/datasets/object_detection/__init__.py -------------------------------------------------------------------------------- /torch_points3d/datasets/object_detection/box_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | class BoxData: 6 | """ Basic data structure to hold a box prediction or ground truth 7 | if an score is provided then it will be treated as a prediction. Else, it is a ground truth box 8 | """ 9 | 10 | def __init__(self, classname, corners3d, score=None): 11 | assert corners3d.shape == (8, 3) 12 | assert score is None or score <= 1 and score >= 0 13 | 14 | if torch.is_tensor(classname): 15 | classname = classname.cpu().item() 16 | self.classname = classname 17 | 18 | if torch.is_tensor(corners3d): 19 | corners3d = corners3d.cpu().numpy() 20 | self.corners3d = corners3d 21 | 22 | if torch.is_tensor(score): 23 | score = score.cpu().item() 24 | self.score = score 25 | 26 | @property 27 | def is_gt(self): 28 | return self.score is None 29 | 30 | def __repr__(self): 31 | return "{}: (score={})".format(self.__class__.__name__, self.score) 32 | -------------------------------------------------------------------------------- /torch_points3d/datasets/object_detection/scannet_metadata/scannet_means.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/datasets/object_detection/scannet_metadata/scannet_means.npz -------------------------------------------------------------------------------- /torch_points3d/datasets/panoptic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/datasets/panoptic/__init__.py -------------------------------------------------------------------------------- /torch_points3d/datasets/panoptic/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | def set_extra_labels(data, instance_classes, num_max_objects): 5 | """ Adds extra labels for the instance and object segmentation tasks 6 | - num_instances: number of instances 7 | - center_label: [64, 3] on centre per instance 8 | - instance_labels: [num_points] 9 | - vote_label: [num_points, 3] displacmenet between each point and the center. 10 | - instance_mask: [num_points] boolean mask 11 | """ 12 | # Initaliase variables 13 | num_points = data.pos.shape[0] 14 | semantic_labels = data.y 15 | 16 | # compute votes *AFTER* augmentation 17 | instances = np.unique(data.instance_labels) 18 | centers = [] 19 | point_votes = torch.zeros([num_points, 3]) 20 | instance_labels = torch.zeros(num_points, dtype=torch.long) 21 | instance_idx = 1 22 | for i_instance in instances: 23 | # find all points belong to that instance 24 | ind = np.where(data.instance_labels == i_instance)[0] 25 | # find the semantic label 26 | instance_class = semantic_labels[ind[0]].item() 27 | if instance_class in instance_classes: # We keep this instance 28 | pos = data.pos[ind, :3] 29 | max_pox = pos.max(0)[0] 30 | min_pos = pos.min(0)[0] 31 | center = 0.5 * (min_pos + max_pox) 32 | point_votes[ind, :] = center - pos 33 | centers.append(center.clone().detach()) 34 | instance_labels[ind] = instance_idx 35 | instance_idx += 1 36 | 37 | num_instances = len(centers) 38 | if num_instances > num_max_objects: 39 | raise ValueError( 40 | "We have more objects than expected. Please increase the NUM_MAX_OBJECTS variable.") 41 | data.center_label = torch.zeros((num_max_objects, 3)) 42 | if num_instances: 43 | data.center_label[:num_instances, :] = torch.stack(centers) 44 | 45 | data.vote_label = point_votes.float() 46 | data.instance_labels = instance_labels 47 | data.instance_mask = instance_labels != 0 48 | data.num_instances = torch.tensor([num_instances]) 49 | return data -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/datasets/registration/__init__.py -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/detector.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class RandomDetector(object): 5 | """ 6 | Random selector for test points 7 | """ 8 | 9 | def __init__(self, num_points=5000): 10 | self.num_points = num_points 11 | 12 | def __call__(self, data): 13 | keypoints_idx = torch.randint(0, 14 | data.pos.shape[0], 15 | (self.num_points, )) 16 | data.keypoints = keypoints_idx 17 | return data 18 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/split_test.txt: -------------------------------------------------------------------------------- 1 | 7-scenes-redkitchen 2 | sun3d-home_at-home_at_scan1_2013_jan_1 3 | sun3d-home_md-home_md_scan9_2012_sep_30 4 | sun3d-hotel_uc-scan3 5 | sun3d-hotel_umd-maryland_hotel1 6 | sun3d-hotel_umd-maryland_hotel3 7 | sun3d-mit_76_studyroom-76-1studyroom2 8 | sun3d-mit_lab_hj-lab_hj_tea_nov_2_2012_scan1_erika 9 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/split_train.txt: -------------------------------------------------------------------------------- 1 | sun3d-brown_bm_1-brown_bm_1 2 | sun3d-brown_bm_4-brown_bm_4 3 | sun3d-brown_cogsci_1-brown_cogsci_1 4 | sun3d-brown_cs_2-brown_cs2 5 | sun3d-brown_cs_3-brown_cs3 6 | sun3d-harvard_c3-hv_c3_1 7 | sun3d-harvard_c5-hv_c5_1 8 | sun3d-harvard_c6-hv_c6_1 9 | sun3d-harvard_c8-hv_c8_3 10 | sun3d-harvard_c11-hv_c11_2 11 | sun3d-home_bksh-home_bksh_oct_30_2012_scan2_erika 12 | sun3d-hotel_nips2012-nips_4 13 | sun3d-hotel_sf-scan1 14 | sun3d-mit_32_d507-d507_2 15 | sun3d-mit_46_ted_lab1-ted_lab_2 16 | sun3d-mit_76_417-76-417b 17 | sun3d-mit_dorm_next_sj-dorm_next_sj_oct_30_2012_scan1_erika 18 | sun3d-mit_w20_athena-sc_athena_oct_29_2012_scan1_erika 19 | 7-scenes-chess 20 | 7-scenes-fire 21 | 7-scenes-heads 22 | 7-scenes-office 23 | 7-scenes-pumpkin 24 | 7-scenes-stairs 25 | rgbd-scenes-v2-scene_01 26 | rgbd-scenes-v2-scene_02 27 | rgbd-scenes-v2-scene_03 28 | rgbd-scenes-v2-scene_04 29 | rgbd-scenes-v2-scene_05 30 | rgbd-scenes-v2-scene_06 31 | rgbd-scenes-v2-scene_07 32 | rgbd-scenes-v2-scene_08 33 | rgbd-scenes-v2-scene_09 34 | rgbd-scenes-v2-scene_10 35 | rgbd-scenes-v2-scene_11 36 | rgbd-scenes-v2-scene_12 37 | rgbd-scenes-v2-scene_13 38 | rgbd-scenes-v2-scene_14 39 | bundlefusion-apt0 40 | bundlefusion-apt1 41 | bundlefusion-apt2 42 | bundlefusion-copyroom 43 | bundlefusion-office0 44 | bundlefusion-office1 45 | bundlefusion-office2 46 | bundlefusion-office3 47 | analysis-by-synthesis-apt1-kitchen 48 | analysis-by-synthesis-apt1-living 49 | analysis-by-synthesis-apt2-bed 50 | analysis-by-synthesis-apt2-kitchen 51 | analysis-by-synthesis-apt2-living 52 | analysis-by-synthesis-apt2-luke 53 | analysis-by-synthesis-office2-5a 54 | analysis-by-synthesis-office2-5b 55 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/url_7-scenes.txt: -------------------------------------------------------------------------------- 1 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-chess.zip 2 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-fire.zip 3 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-heads.zip 4 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-office.zip 5 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-pumpkin.zip 6 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-redkitchen.zip 7 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-stairs.zip 8 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/url_analysis-by-synthesis.txt: -------------------------------------------------------------------------------- 1 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-apt1-kitchen.zip 2 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-apt1-living.zip 3 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-apt2-bed.zip 4 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-apt2-kitchen.zip 5 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-apt2-living.zip 6 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-apt2-luke.zip 7 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-office2-5a.zip 8 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-office2-5b.zip 9 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/url_bundlefusion.txt: -------------------------------------------------------------------------------- 1 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-apt0.zip 2 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-apt1.zip 3 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-apt2.zip 4 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-copyroom.zip 5 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-office0.zip 6 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-office1.zip 7 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-office2.zip 8 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-office3.zip 9 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/url_rgbd-scenes-v2.txt: -------------------------------------------------------------------------------- 1 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_01.zip 2 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_02.zip 3 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_03.zip 4 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_04.zip 5 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_05.zip 6 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_06.zip 7 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_07.zip 8 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_08.zip 9 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_09.zip 10 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_10.zip 11 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_11.zip 12 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_12.zip 13 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_13.zip 14 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_14.zip 15 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/url_test.txt: -------------------------------------------------------------------------------- 1 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/scene-fragments/7-scenes-redkitchen.zip 2 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/scene-fragments/sun3d-home_at-home_at_scan1_2013_jan_1.zip 3 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/scene-fragments/sun3d-home_md-home_md_scan9_2012_sep_30.zip 4 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/scene-fragments/sun3d-hotel_uc-scan3.zip 5 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/scene-fragments/sun3d-hotel_umd-maryland_hotel1.zip 6 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/scene-fragments/sun3d-hotel_umd-maryland_hotel3.zip 7 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/scene-fragments/sun3d-mit_76_studyroom-76-1studyroom2.zip 8 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/scene-fragments/sun3d-mit_lab_hj-lab_hj_tea_nov_2_2012_scan1_erika.zip 9 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/url_train_small.txt: -------------------------------------------------------------------------------- 1 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-chess.zip 2 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-heads.zip 3 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-pumpkin.zip 4 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-apt2-luke.zip 5 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-office2-5a.zip 6 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-office2-5b.zip 7 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-apt2.zip 8 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-copyroom.zip 9 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-office2.zip 10 | 11 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_02.zip 12 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_08.zip 13 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-brown_cs_2-brown_cs2.zip 14 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-harvard_c11-hv_c11_2.zip 15 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-home_bksh-home_bksh_oct_30_2012_scan2_erika.zip 16 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-hotel_nips2012-nips_4.zip 17 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-mit_32_d507-d507_2.zip 18 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/url_train_tiny.txt: -------------------------------------------------------------------------------- 1 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-chess.zip 2 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-mit_32_d507-d507_2.zip 3 | -------------------------------------------------------------------------------- /torch_points3d/datasets/registration/urls/url_val.txt: -------------------------------------------------------------------------------- 1 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-brown_bm_4-brown_bm_4.zip 2 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-harvard_c11-hv_c11_2.zip 3 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/7-scenes-heads.zip 4 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/bundlefusion-office0.zip 5 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/analysis-by-synthesis-apt2-kitchen.zip 6 | http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/rgbd-scenes-v2-scene_10.zip 7 | -------------------------------------------------------------------------------- /torch_points3d/datasets/samplers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from torch.utils.data import Sampler 4 | 5 | class BalancedRandomSampler(Sampler): 6 | r"""This sampler is responsible for creating balanced batch based on the class distribution. 7 | It is implementing a replacement=True strategy for indices selection 8 | """ 9 | def __init__(self, labels, replacement=True): 10 | 11 | self.num_samples = len(labels) 12 | 13 | self.idx_classes, self.counts = np.unique(labels, return_counts=True) 14 | self.indices = { 15 | idx: np.argwhere(labels == idx).flatten() for idx in self.idx_classes 16 | } 17 | 18 | def __iter__(self): 19 | indices = [] 20 | for _ in range(self.num_samples): 21 | idx = np.random.choice(self.idx_classes) 22 | indice = int(np.random.choice(self.indices[idx])) 23 | indices.append(indice) 24 | return iter(indices) 25 | 26 | def __len__(self): 27 | return self.num_samples 28 | 29 | def __repr__(self): 30 | return "{}(num_samples={})".format(self.__class__.__name__, self.num_samples) 31 | 32 | -------------------------------------------------------------------------------- /torch_points3d/datasets/segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | IGNORE_LABEL: int = -1 2 | 3 | from .shapenet import ShapeNet, ShapeNetDataset 4 | from .s3dis import S3DISFusedDataset, S3DIS1x1Dataset, S3DISOriginalFused, S3DISSphere 5 | from .scannet import ScannetDataset, Scannet 6 | -------------------------------------------------------------------------------- /torch_points3d/datasets/segmentation/forward/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/datasets/segmentation/forward/__init__.py -------------------------------------------------------------------------------- /torch_points3d/metrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/metrics/__init__.py -------------------------------------------------------------------------------- /torch_points3d/metrics/box_detection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/metrics/box_detection/__init__.py -------------------------------------------------------------------------------- /torch_points3d/metrics/colored_tqdm.py: -------------------------------------------------------------------------------- 1 | from tqdm.auto import tqdm 2 | from collections import OrderedDict 3 | from numbers import Number 4 | import numpy as np 5 | 6 | from torch_points3d.utils.colors import COLORS 7 | 8 | 9 | class Coloredtqdm(tqdm): 10 | def set_postfix(self, ordered_dict=None, refresh=True, color=None, round=4, **kwargs): 11 | postfix = OrderedDict([] if ordered_dict is None else ordered_dict) 12 | 13 | for key in sorted(kwargs.keys()): 14 | postfix[key] = kwargs[key] 15 | 16 | for key in postfix.keys(): 17 | if isinstance(postfix[key], Number): 18 | postfix[key] = self.format_num_to_k(np.round(postfix[key], round), k=round + 1) 19 | if isinstance(postfix[key], str): 20 | postfix[key] = str(postfix[key]) 21 | if len(postfix[key]) != round: 22 | postfix[key] += (round - len(postfix[key])) * " " 23 | 24 | if color is not None: 25 | self.postfix = color 26 | else: 27 | self.postfix = "" 28 | 29 | self.postfix += ", ".join(key + "=" + postfix[key] for key in postfix.keys()) 30 | if color is not None: 31 | self.postfix += COLORS.END_TOKEN 32 | 33 | if refresh: 34 | self.refresh() 35 | 36 | def format_num_to_k(self, seq, k=4): 37 | seq = str(seq) 38 | length = len(seq) 39 | out = seq + " " * (k - length) if length < k else seq 40 | return out if length < k else seq[:k] 41 | -------------------------------------------------------------------------------- /torch_points3d/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/models/__init__.py -------------------------------------------------------------------------------- /torch_points3d/models/base_architectures/__init__.py: -------------------------------------------------------------------------------- 1 | from .unet import * 2 | from .backbone import * 3 | -------------------------------------------------------------------------------- /torch_points3d/models/model_factory.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import hydra 3 | 4 | from .base_model import BaseModel 5 | from torch_points3d.utils.model_building_utils.model_definition_resolver import resolve_model 6 | 7 | 8 | def instantiate_model(config, dataset) -> BaseModel: 9 | """ Creates a model given a datset and a training config. The config should contain the following: 10 | - config.data.task: task that will be evaluated 11 | - config.model_name: model to instantiate 12 | - config.models: All models available 13 | """ 14 | 15 | # Get task and model_name 16 | task = config.data.task 17 | tested_model_name = config.model_name 18 | 19 | # Find configs 20 | models = config.get('models') 21 | model_config = getattr(models, tested_model_name, None) 22 | if model_config is None: 23 | models_keys = models.keys() if models is not None else "" 24 | raise Exception("The model_name {} isn t within {}".format(tested_model_name, list(models_keys))) 25 | resolve_model(model_config, dataset, task) 26 | 27 | model_class = getattr(model_config, "class") 28 | model_paths = model_class.split(".") 29 | module = ".".join(model_paths[:-1]) 30 | class_name = model_paths[-1] 31 | model_module = ".".join(["torch_points3d.models", task, module]) 32 | modellib = importlib.import_module(model_module) 33 | 34 | model_cls = None 35 | for name, cls in modellib.__dict__.items(): 36 | if name.lower() == class_name.lower(): 37 | model_cls = cls 38 | 39 | if model_cls is None: 40 | raise NotImplementedError( 41 | "In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." 42 | % (model_module, class_name) 43 | ) 44 | model = model_cls(model_config, "dummy", dataset, modellib) 45 | return model 46 | -------------------------------------------------------------------------------- /torch_points3d/models/model_interface.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod, abstractproperty, ABC 2 | 3 | 4 | class CheckpointInterface(ABC): 5 | """This class is a minimal interface class for models. 6 | """ 7 | 8 | @abstractproperty # type: ignore 9 | def schedulers(self): 10 | pass 11 | 12 | @schedulers.setter 13 | def schedulers(self, schedulers): 14 | pass 15 | 16 | @abstractproperty # type: ignore 17 | def optimizer(self): 18 | pass 19 | 20 | @optimizer.setter 21 | def optimizer(self, optimizer): 22 | pass 23 | 24 | @abstractmethod 25 | def state_dict(self): 26 | pass 27 | 28 | @abstractmethod 29 | def load_state_dict(self, state, strict=False): 30 | pass 31 | 32 | 33 | class DatasetInterface(ABC): 34 | @abstractproperty 35 | def conv_type(self): 36 | pass 37 | 38 | def get_spatial_ops(self): 39 | pass 40 | 41 | 42 | class TrackerInterface(ABC): 43 | @property 44 | @abstractmethod 45 | def conv_type(self): 46 | pass 47 | 48 | @abstractmethod 49 | def get_labels(self): 50 | """ returns a trensor of size ``[N_points]`` where each value is the label of a point 51 | """ 52 | 53 | @abstractmethod 54 | def get_batch(self): 55 | """ returns a trensor of size ``[N_points]`` where each value is the batch index of a point 56 | """ 57 | 58 | @abstractmethod 59 | def get_output(self): 60 | """ returns a trensor of size ``[N_points,...]`` where each value is the output 61 | of the network for a point (output of the last layer in general) 62 | """ 63 | 64 | @abstractmethod 65 | def get_input(self): 66 | """ returns the last input that was given to the model or raises error 67 | """ 68 | 69 | @abstractmethod 70 | def get_current_losses(self): 71 | """Return traning losses / errors. train.py will print out these errors on console""" 72 | 73 | @abstractproperty 74 | def device(self): 75 | """ Returns the device onto which the model leaves (cpu or gpu) 76 | """ 77 | -------------------------------------------------------------------------------- /torch_points3d/models/panoptic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/models/panoptic/__init__.py -------------------------------------------------------------------------------- /torch_points3d/models/panoptic/structures_mine.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from typing import NamedTuple, List 4 | 5 | class PanopticResults(NamedTuple): 6 | semantic_logits: torch.Tensor 7 | offset_logits: torch.Tensor 8 | embedding_logits: torch.Tensor 9 | embed_clusters: List[torch.Tensor] # Each item contains the list of indices in the cluster 10 | offset_clusters: List[torch.Tensor] # Each item contains the list of indices in the cluster 11 | embed_pre: torch.Tensor 12 | offset_pre: torch.Tensor 13 | 14 | class PanopticLabels(NamedTuple): 15 | center_label: torch.Tensor 16 | y: torch.Tensor 17 | num_instances: torch.Tensor 18 | instance_labels: torch.Tensor 19 | instance_mask: torch.Tensor 20 | vote_label: torch.Tensor 21 | -------------------------------------------------------------------------------- /torch_points3d/models/segmentation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/models/segmentation/__init__.py -------------------------------------------------------------------------------- /torch_points3d/models/segmentation/minkowski.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch.nn.functional as F 3 | import torch 4 | 5 | from torch_points3d.modules.MinkowskiEngine import * 6 | from torch_points3d.models.base_architectures import UnwrappedUnetBasedModel 7 | from torch_points3d.models.base_model import BaseModel 8 | from torch_points3d.datasets.segmentation import IGNORE_LABEL 9 | from torch_points3d.applications.minkowski import Minkowski 10 | 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | class Minkowski_Baseline_Model(BaseModel): 16 | def __init__(self, option, model_type, dataset, modules): 17 | super(Minkowski_Baseline_Model, self).__init__(option) 18 | self._weight_classes = dataset.weight_classes 19 | self.model = initialize_minkowski_unet( 20 | option.model_name, dataset.feature_dimension, dataset.num_classes, **option.get("extra_options", {}) 21 | ) 22 | self.loss_names = ["loss_seg"] 23 | 24 | def set_input(self, data, device): 25 | 26 | self.batch_idx = data.batch.squeeze() 27 | coords = torch.cat([data.batch.unsqueeze(-1).int(), data.coords.int()], -1) 28 | self.input = ME.SparseTensor(features=data.x, coordinates=coords, device=device) 29 | self.labels = data.y.to(device) 30 | 31 | def forward(self, *args, **kwargs): 32 | self.output = F.log_softmax(self.model(self.input).features, dim=-1) 33 | if self._weight_classes is not None: 34 | self._weight_classes = self._weight_classes.to(self.device) 35 | if self.labels is not None: 36 | self.loss_seg = F.nll_loss(self.output, self.labels, ignore_index=IGNORE_LABEL, weight=self._weight_classes) 37 | 38 | def backward(self): 39 | self.loss_seg.backward() 40 | -------------------------------------------------------------------------------- /torch_points3d/models/segmentation/pointcnn.py: -------------------------------------------------------------------------------- 1 | from .base import Segmentation_MP 2 | from torch_points3d.modules.PointCNN import * 3 | 4 | 5 | class PointCNNSeg(Segmentation_MP): 6 | """ Unet base implementation of PointCNN 7 | https://arxiv.org/abs/1801.07791 8 | """ 9 | -------------------------------------------------------------------------------- /torch_points3d/models/segmentation/pvcnn.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch.nn.functional as F 3 | import torch 4 | 5 | from torch_points3d.modules.PVCNN import pvcnn 6 | from torch_points3d.models.base_model import BaseModel 7 | from torch_points3d.datasets.segmentation import IGNORE_LABEL 8 | 9 | from torchsparse import SparseTensor 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | class PVCNN(BaseModel): 15 | def __init__(self, option, model_type, dataset, modules): 16 | super(PVCNN, self).__init__(option) 17 | self._weight_classes = dataset.weight_classes 18 | 19 | self.model = pvcnn.PVCNN(option, model_type, dataset, modules) 20 | 21 | self.loss_names = ["loss_seg"] 22 | 23 | def set_input(self, data, device): 24 | if data.batch.dim() == 1: 25 | data.batch = data.batch.unsqueeze(-1) 26 | coords = torch.cat([data.pos, data.batch], -1) 27 | self.batch_idx = data.batch.squeeze() 28 | self.input = SparseTensor(data.x, coords).to(self.device) 29 | self.labels = data.y.to(self.device) 30 | 31 | def forward(self, *args, **kwargs): 32 | self.output = self.model(self.input) 33 | if self._weight_classes is not None: 34 | self._weight_classes = self._weight_classes.to(self.device) 35 | if self.labels is not None: 36 | self.loss_seg = F.cross_entropy( 37 | self.output, self.labels, weight=self._weight_classes, ignore_index=IGNORE_LABEL 38 | ) 39 | 40 | def backward(self): 41 | self.loss_seg.backward() 42 | -------------------------------------------------------------------------------- /torch_points3d/models/segmentation/randlanet.py: -------------------------------------------------------------------------------- 1 | from .base import Segmentation_MP 2 | from torch_points3d.modules.RandLANet import * 3 | 4 | 5 | class RandLANetSeg(Segmentation_MP): 6 | """ Unet base implementation of RandLANet 7 | """ 8 | -------------------------------------------------------------------------------- /torch_points3d/models/segmentation/sparseconv3d.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch.nn.functional as F 3 | import torch.nn as nn 4 | import torchsparse as TS 5 | 6 | 7 | from torch_points3d.models.base_model import BaseModel 8 | from torch_points3d.datasets.segmentation import IGNORE_LABEL 9 | from torch_points3d.applications.sparseconv3d import SparseConv3d 10 | import torch_points3d.modules.SparseConv3d as sp3d 11 | 12 | from torch_points3d.core.common_modules import FastBatchNorm1d, Seq 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | 17 | class APIModel(BaseModel): 18 | def __init__(self, option, model_type, dataset, modules): 19 | # call the initialization method of UnetBasedModel 20 | super().__init__(option) 21 | self._weight_classes = dataset.weight_classes 22 | self.backbone = SparseConv3d( 23 | "unet", dataset.feature_dimension, config=option.backbone, backend=option.get("backend", "minkowski") 24 | ) 25 | self._supports_mixed = sp3d.nn.get_backend() == "torchsparse" 26 | self.head = nn.Sequential(nn.Linear(self.backbone.output_nc, dataset.num_classes)) 27 | self.loss_names = ["loss_seg"] 28 | 29 | def set_input(self, data, device): 30 | self.batch_idx = data.batch.squeeze() 31 | self.input = data 32 | if data.y is not None: 33 | self.labels = data.y.to(self.device) 34 | else: 35 | self.labels = None 36 | 37 | def forward(self, *args, **kwargs): 38 | features = self.backbone(self.input).x 39 | logits = self.head(features) 40 | self.output = F.log_softmax(logits, dim=-1) 41 | if self._weight_classes is not None: 42 | self._weight_classes = self._weight_classes.to(self.device) 43 | if self.labels is not None: 44 | self.loss_seg = F.nll_loss(self.output, self.labels, ignore_index=IGNORE_LABEL, weight=self._weight_classes) 45 | 46 | def backward(self): 47 | self.loss_seg.backward() 48 | -------------------------------------------------------------------------------- /torch_points3d/modules/KPConv/__init__.py: -------------------------------------------------------------------------------- 1 | from .blocks import * 2 | from .kernels import * 3 | -------------------------------------------------------------------------------- /torch_points3d/modules/KPConv/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def fitting_loss(sq_distance, radius): 5 | """ KPConv fitting loss. For each query point it ensures that at least one neighboor is 6 | close to each kernel point 7 | 8 | Arguments: 9 | sq_distance - For each querry point, from all neighboors to all KP points [N_querry, N_neighboors, N_KPoints] 10 | radius - Radius of the convolution 11 | """ 12 | kpmin = sq_distance.min(dim=1)[0] 13 | normalised_kpmin = kpmin / (radius ** 2) 14 | return torch.mean(normalised_kpmin) 15 | 16 | 17 | def repulsion_loss(deformed_kpoints, radius): 18 | """ Ensures that the deformed points within the kernel remain equidistant 19 | 20 | Arguments: 21 | deformed_kpoints - deformed points for each query point 22 | radius - Radius of the kernel 23 | """ 24 | deformed_kpoints / float(radius) 25 | n_points = deformed_kpoints.shape[1] 26 | repulsive_loss = 0 27 | for i in range(n_points): 28 | with torch.no_grad(): 29 | other_points = torch.cat([deformed_kpoints[:, :i, :], deformed_kpoints[:, i + 1 :, :]], dim=1) 30 | distances = torch.sqrt(torch.sum((other_points - deformed_kpoints[:, i : i + 1, :]) ** 2, dim=-1)) 31 | repulsion_force = torch.sum(torch.pow(torch.relu(1.5 - distances), 2), dim=1) 32 | repulsive_loss += torch.mean(repulsion_force) 33 | return repulsive_loss 34 | 35 | 36 | def permissive_loss(deformed_kpoints, radius): 37 | """This loss is responsible to penalize deformed_kpoints to 38 | move outside from the radius defined for the convolution 39 | """ 40 | norm_deformed_normalized = torch.norm(deformed_kpoints, p=2, dim=-1) / float(radius) 41 | permissive_loss = torch.mean(norm_deformed_normalized[norm_deformed_normalized > 1.0]) 42 | return permissive_loss 43 | -------------------------------------------------------------------------------- /torch_points3d/modules/MinkowskiEngine/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | try: 4 | from .networks import * 5 | from .res16unet import * 6 | from .resunet import * 7 | 8 | _custom_models = sys.modules[__name__] 9 | 10 | def initialize_minkowski_unet( 11 | model_name, in_channels, out_channels, D=3, conv1_kernel_size=3, dilations=[1, 1, 1, 1], **kwargs 12 | ): 13 | net_cls = getattr(_custom_models, model_name) 14 | return net_cls( 15 | in_channels=in_channels, out_channels=out_channels, D=D, conv1_kernel_size=conv1_kernel_size, **kwargs 16 | ) 17 | 18 | 19 | except: 20 | import logging 21 | 22 | log = logging.getLogger(__name__) 23 | log.warning("Could not load Minkowski Engine, please check that it is installed correctly") 24 | -------------------------------------------------------------------------------- /torch_points3d/modules/PPNet/__init__.py: -------------------------------------------------------------------------------- 1 | from .blocks import * 2 | from .ops import * 3 | -------------------------------------------------------------------------------- /torch_points3d/modules/PVCNN/blocks.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | import torchsparse.nn as spnn 4 | 5 | 6 | class BasicConvolutionBlock(nn.Module): 7 | def __init__(self, inc, outc, ks=3, stride=1, dilation=1): 8 | super().__init__() 9 | self.net = nn.Sequential( 10 | spnn.Conv3d(inc, outc, kernel_size=ks, dilation=dilation, stride=stride), 11 | spnn.BatchNorm(outc), 12 | spnn.ReLU(True), 13 | ) 14 | 15 | def forward(self, x): 16 | out = self.net(x) 17 | return out 18 | 19 | 20 | class BasicDeconvolutionBlock(nn.Module): 21 | def __init__(self, inc, outc, ks=3, stride=1): 22 | super().__init__() 23 | self.net = nn.Sequential( 24 | spnn.Conv3d(inc, outc, kernel_size=ks, stride=stride, transpose=True), spnn.BatchNorm(outc), spnn.ReLU(True) 25 | ) 26 | 27 | def forward(self, x): 28 | return self.net(x) 29 | 30 | 31 | class ResidualBlock(nn.Module): 32 | def __init__(self, inc, outc, ks=3, stride=1, dilation=1): 33 | super().__init__() 34 | self.net = nn.Sequential( 35 | spnn.Conv3d(inc, outc, kernel_size=ks, dilation=dilation, stride=stride), 36 | spnn.BatchNorm(outc), 37 | spnn.ReLU(True), 38 | spnn.Conv3d(outc, outc, kernel_size=ks, dilation=dilation, stride=1), 39 | spnn.BatchNorm(outc), 40 | ) 41 | 42 | self.downsample = ( 43 | nn.Sequential() 44 | if (inc == outc and stride == 1) 45 | else nn.Sequential(spnn.Conv3d(inc, outc, kernel_size=1, dilation=1, stride=stride), spnn.BatchNorm(outc)) 46 | ) 47 | 48 | self.relu = spnn.ReLU(True) 49 | 50 | def forward(self, x): 51 | out = self.relu(self.net(x) + self.downsample(x)) 52 | return out 53 | -------------------------------------------------------------------------------- /torch_points3d/modules/PointCNN/__init__.py: -------------------------------------------------------------------------------- 1 | from .modules import * 2 | -------------------------------------------------------------------------------- /torch_points3d/modules/PointNet/__init__.py: -------------------------------------------------------------------------------- 1 | from .modules import * 2 | -------------------------------------------------------------------------------- /torch_points3d/modules/RSConv/__init__.py: -------------------------------------------------------------------------------- 1 | from .dense import * 2 | from .message_passing import * 3 | -------------------------------------------------------------------------------- /torch_points3d/modules/RSConv/message_passing.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import ReLU 3 | from torch_geometric.nn import MessagePassing 4 | 5 | 6 | from torch_points3d.core.base_conv.message_passing import * 7 | from torch_points3d.core.spatial_ops import * 8 | 9 | 10 | class Convolution(MessagePassing): 11 | r"""The Relation Shape Convolution layer from "Relation-Shape Convolutional Neural Network for Point Cloud Analysis" 12 | https://arxiv.org/pdf/1904.07601 13 | 14 | local_nn - an MLP which is applied to the relation vector h_ij between points i and j to determine 15 | the weights applied to each element of the feature for x_j 16 | 17 | global_nn - an optional MPL for channel-raising following the convolution 18 | 19 | """ 20 | 21 | def __init__(self, local_nn, activation=ReLU(), global_nn=None, aggr="max", **kwargs): 22 | super(Convolution, self).__init__(aggr=aggr) 23 | self.local_nn = MLP(local_nn) 24 | self.activation = activation 25 | self.global_nn = MLP(global_nn) if global_nn is not None else None 26 | 27 | def forward(self, x, pos, edge_index): 28 | return self.propagate(edge_index, x=x, pos=pos) 29 | 30 | def message(self, pos_i, pos_j, x_j): 31 | 32 | if x_j is None: 33 | x_j = pos_j 34 | 35 | vij = pos_i - pos_j 36 | dij = torch.norm(vij, dim=1).unsqueeze(1) 37 | 38 | hij = torch.cat([dij, vij, pos_i, pos_j,], dim=1) 39 | 40 | M_hij = self.local_nn(hij) 41 | 42 | msg = M_hij * x_j 43 | 44 | return msg 45 | 46 | def update(self, aggr_out): 47 | x = self.activation(aggr_out) 48 | if self.global_nn is not None: 49 | x = self.global_nn(x) 50 | return x 51 | 52 | 53 | class RSConvDown(BaseConvolutionDown): 54 | def __init__(self, ratio=None, radius=None, local_nn=None, down_conv_nn=None, *args, **kwargs): 55 | super(RSConvDown, self).__init__(FPSSampler(ratio), RadiusNeighbourFinder(radius), *args, **kwargs) 56 | 57 | self._conv = Convolution(local_nn=local_nn, global_nn=down_conv_nn) 58 | 59 | def conv(self, x, pos, edge_index, batch): 60 | return self._conv(x, pos, edge_index) 61 | -------------------------------------------------------------------------------- /torch_points3d/modules/RandLANet/__init__.py: -------------------------------------------------------------------------------- 1 | from .modules import * 2 | -------------------------------------------------------------------------------- /torch_points3d/modules/SparseConv3d/__init__.py: -------------------------------------------------------------------------------- 1 | from .nn import * 2 | -------------------------------------------------------------------------------- /torch_points3d/modules/SparseConv3d/nn/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import logging 4 | import importlib 5 | 6 | ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../..") 7 | sys.path.insert(0, ROOT) 8 | 9 | log = logging.getLogger(__name__) 10 | 11 | # Import torchsparse for documentation and linting purposes 12 | try: 13 | from .torchsparse import * # type: ignore 14 | except: 15 | try: 16 | from .minkowski import * # type: ignore 17 | except: 18 | pass 19 | 20 | 21 | __all__ = ["cat", "Conv3d", "Conv3dTranspose", "ReLU", "SparseTensor", "BatchNorm"] 22 | for val in __all__: 23 | exec(val + "=None") 24 | 25 | def backend_valid(_backend): 26 | return _backend in {"torchsparse", "minkowski"} 27 | 28 | sp3d_backend = None 29 | 30 | def get_backend(): 31 | return sp3d_backend 32 | 33 | def set_backend(_backend): 34 | """ Use this method to switch sparse backend dynamically. When importing this module with a wildcard such as 35 | from torch_points3d.modules.SparseConv3d.nn import * 36 | make sure that you import it again after calling this method. 37 | 38 | 39 | Parameters 40 | ---------- 41 | backend : str 42 | "torchsparse" or "minkowski" 43 | """ 44 | assert backend_valid(_backend) 45 | try: 46 | modules = importlib.import_module("." + _backend, __name__) # noqa: F841 47 | global sp3d_backend 48 | sp3d_backend = _backend 49 | except: 50 | log.exception("Could not import %s backend for sparse convolutions" % _backend) 51 | for val in __all__: 52 | exec("globals()['%s'] = modules.%s" % (val, val)) 53 | -------------------------------------------------------------------------------- /torch_points3d/modules/SparseConv3d/nn/minkowski.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import MinkowskiEngine as ME 3 | 4 | 5 | class Conv3d(ME.MinkowskiConvolution): 6 | def __init__( 7 | self, 8 | in_channels: int, 9 | out_channels: int, 10 | kernel_size: int = 3, 11 | stride: int = 1, 12 | dilation: int = 1, 13 | bias: bool = False, 14 | ) -> None: 15 | super().__init__( 16 | in_channels, 17 | out_channels, 18 | kernel_size=kernel_size, 19 | stride=stride, 20 | dilation=dilation, 21 | bias=bias, 22 | dimension=3, 23 | ) 24 | 25 | 26 | class Conv3dTranspose(ME.MinkowskiConvolutionTranspose): 27 | def __init__( 28 | self, 29 | in_channels: int, 30 | out_channels: int, 31 | kernel_size: int = 3, 32 | stride: int = 1, 33 | dilation: int = 1, 34 | bias: bool = False, 35 | ) -> None: 36 | super().__init__( 37 | in_channels, 38 | out_channels, 39 | kernel_size=kernel_size, 40 | stride=stride, 41 | dilation=dilation, 42 | bias=bias, 43 | dimension=3, 44 | ) 45 | 46 | 47 | class BatchNorm(ME.MinkowskiBatchNorm): 48 | def __repr__(self): 49 | return self.bn.__repr__() 50 | 51 | 52 | class ReLU(ME.MinkowskiReLU): 53 | def __init__(self, inplace=False): 54 | super().__init__(inplace=False) 55 | 56 | 57 | def cat(*args): 58 | return ME.cat(*args) 59 | 60 | 61 | def SparseTensor(feats, coordinates, batch, device=torch.device("cpu")): 62 | if batch.dim() == 1: 63 | batch = batch.unsqueeze(-1) 64 | coords = torch.cat([batch.int(), coordinates.int()], -1) 65 | return ME.SparseTensor(features=feats, coordinates=coords, device=device) 66 | -------------------------------------------------------------------------------- /torch_points3d/modules/SparseConv3d/nn/torchsparse.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchsparse as TS 3 | import torchsparse.nn 4 | 5 | 6 | class Conv3d(TS.nn.Conv3d): 7 | def __init__( 8 | self, 9 | in_channels: int, 10 | out_channels: int, 11 | kernel_size: int = 3, 12 | stride: int = 1, 13 | dilation: int = 1, 14 | bias: bool = False, 15 | ) -> None: 16 | super().__init__( 17 | in_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, bias=bias, 18 | ) 19 | 20 | 21 | class Conv3dTranspose(TS.nn.Conv3d): 22 | def __init__( 23 | self, 24 | in_channels: int, 25 | out_channels: int, 26 | kernel_size: int = 3, 27 | stride: int = 1, 28 | dilation: int = 1, 29 | bias: bool = False, 30 | transpose: bool = False, 31 | ) -> None: 32 | super().__init__( 33 | in_channels, 34 | out_channels, 35 | kernel_size=kernel_size, 36 | stride=stride, 37 | dilation=dilation, 38 | bias=bias, 39 | transposed=True, 40 | ) 41 | 42 | 43 | class BatchNorm(torch.nn.Module): 44 | def __init__(self, num_features: int, *, eps: float = 1e-5, momentum: float = 0.1) -> None: 45 | super().__init__() 46 | self.bn = TS.nn.BatchNorm(num_features=num_features, eps=eps, momentum=momentum) 47 | 48 | def forward(self, feats): 49 | return self.bn(feats) 50 | 51 | def __repr__(self): 52 | return self.bn.__repr__() 53 | 54 | 55 | class ReLU(TS.nn.ReLU): 56 | def __init__(self, inplace=True): 57 | super().__init__(inplace=inplace) 58 | 59 | 60 | def cat(*args): 61 | return TS.cat(args) 62 | 63 | 64 | def SparseTensor(feats, coordinates, batch, device=torch.device("cpu")): 65 | if batch.dim() == 1: 66 | batch = batch.unsqueeze(-1) 67 | coords = torch.cat([coordinates.int(), batch.int()], -1) 68 | return TS.SparseTensor(feats, coords).to(device) 69 | -------------------------------------------------------------------------------- /torch_points3d/modules/VoteNet/__init__.py: -------------------------------------------------------------------------------- 1 | from .voting_module import * 2 | from .proposal_module import * 3 | from .loss_helper import get_loss 4 | from .votenet_results import VoteNetResults 5 | -------------------------------------------------------------------------------- /torch_points3d/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/torch_points3d/modules/__init__.py -------------------------------------------------------------------------------- /torch_points3d/modules/pointnet2/__init__.py: -------------------------------------------------------------------------------- 1 | from .dense import * 2 | from .message_passing import * 3 | -------------------------------------------------------------------------------- /torch_points3d/modules/pointnet2/message_passing.py: -------------------------------------------------------------------------------- 1 | from torch_geometric.nn import PointConv 2 | 3 | from torch_points3d.core.base_conv.base_conv import * 4 | from torch_points3d.core.base_conv.message_passing import * 5 | from torch_points3d.core.common_modules.base_modules import * 6 | from torch_points3d.core.spatial_ops import FPSSampler, RandomSampler, MultiscaleRadiusNeighbourFinder 7 | 8 | 9 | class SAModule(BaseMSConvolutionDown): 10 | def __init__(self, ratio=None, radius=None, radius_num_point=None, down_conv_nn=None, *args, **kwargs): 11 | super(SAModule, self).__init__( 12 | FPSSampler(ratio=ratio), 13 | MultiscaleRadiusNeighbourFinder(radius, max_num_neighbors=radius_num_point), 14 | *args, 15 | **kwargs 16 | ) 17 | 18 | local_nn = MLP(down_conv_nn) if down_conv_nn is not None else None 19 | 20 | self._conv = PointConv(local_nn=local_nn, global_nn=None) 21 | self._radius = radius 22 | self._ratio = ratio 23 | self._num_points = radius_num_point 24 | 25 | def conv(self, x, pos, edge_index, batch): 26 | return self._conv(x, pos, edge_index) 27 | 28 | def extra_repr(self): 29 | return "{}(ratio {}, radius {}, radius_points {})".format( 30 | self.__class__.__name__, self._ratio, self._radius, self._num_points 31 | ) 32 | -------------------------------------------------------------------------------- /torch_points3d/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .colors import * 2 | from .config import * 3 | from .enums import * 4 | from .running_stats import * 5 | from .timer import * 6 | from .transform_utils import * 7 | -------------------------------------------------------------------------------- /torch_points3d/utils/batch_seed.py: -------------------------------------------------------------------------------- 1 | # Author Mengyang Zhao 2 | 3 | import math 4 | import operator 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | 9 | import torch 10 | from torch import exp, sqrt 11 | 12 | def cos_batch(a, b): 13 | #return sqrt(((a[None,:] - b[:,None]) ** 2).sum(2)) 14 | 15 | num = a@b.T 16 | denom = torch.norm(a, dim=1).reshape(-1, 1) * torch.norm(b, dim=1) 17 | return num / denom 18 | 19 | def get_weight(sim, bandwidth): 20 | 21 | thr = 1-bandwidth 22 | #max = torch.tensor(1.0e+10).double().cuda() 23 | max = torch.tensor(1.0).double().cuda() 24 | min = torch.tensor(0.0).double().cuda() 25 | #dis=torch.where(sim>thr, 1-sim, max) 26 | dis=torch.where(sim>thr, max, min) 27 | 28 | return dis 29 | 30 | def gaussian(dist, bandwidth): 31 | return exp(-0.5 * ((dist / bandwidth))**2) / (bandwidth * math.sqrt(2 * math.pi)) 32 | 33 | def meanshift_torch(data, seed , bandwidth, max_iter=300): 34 | 35 | stop_thresh = 1e-3 * bandwidth 36 | iter=0 37 | 38 | X = torch.from_numpy(np.copy(data)).double().cuda() 39 | S = torch.from_numpy(np.copy(seed)).double().cuda() 40 | B = torch.tensor(bandwidth).double().cuda() 41 | 42 | while True: 43 | #cosine = cos_batch(S, X) 44 | 45 | weight = get_weight(cos_batch(S, X),B) 46 | 47 | #torch.where(distances>(1-bandwidth)) 48 | #weight = gaussian(distances, B) 49 | num = (weight[:, :, None] * X).sum(dim=1) 50 | S_old = S 51 | S = num / weight.sum(1)[:, None] 52 | #cosine2 = torch.norm(S - S_old, dim=1).mean() 53 | iter+=1 54 | 55 | if (torch.norm(S - S_old, dim=1).mean() < stop_thresh or iter == max_iter): 56 | break 57 | 58 | p_num=[] 59 | for line in weight: 60 | p_num.append(line[line==1].size()[0]) 61 | 62 | my_mean = S.cpu().numpy() 63 | 64 | return my_mean, p_num -------------------------------------------------------------------------------- /torch_points3d/utils/batch_seed_euc.py: -------------------------------------------------------------------------------- 1 | # Author Mengyang Zhao 2 | 3 | import math 4 | import operator 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | 9 | import torch 10 | from torch import exp, sqrt 11 | 12 | def euc_batch(a, b): 13 | result = sqrt(((b[None,:] - a[:,None]) ** 2).sum(2)) 14 | #pdist = torch.nn.PairwiseDistance(p=2) 15 | #result = pdist(a, b) 16 | return result 17 | 18 | #num = a@b.T 19 | #denom = torch.norm(a, dim=1).reshape(-1, 1) * torch.norm(b, dim=1) 20 | #return num / denom 21 | 22 | def get_weight(sim, bandwidth): 23 | 24 | thr = 1-bandwidth 25 | #max = torch.tensor(1.0e+10).double().cuda() 26 | max = torch.tensor(1.0).double().cuda() 27 | min = torch.tensor(0.0).double().cuda() 28 | #dis=torch.where(sim>thr, 1-sim, max) 29 | dis=torch.where(sim>thr, max, min) 30 | 31 | return dis 32 | 33 | def gaussian(dist, bandwidth): 34 | return exp(-0.5 * ((dist / bandwidth))**2) / (bandwidth * math.sqrt(2 * math.pi)) 35 | 36 | def meanshift_torch(data, seed , bandwidth, max_iter=300): 37 | 38 | stop_thresh = 1e-3 * bandwidth 39 | iter=0 40 | 41 | X = torch.from_numpy(np.copy(data)).double().cuda() 42 | S = torch.from_numpy(np.copy(seed)).double().cuda() 43 | B = torch.tensor(bandwidth).double().cuda() 44 | 45 | while True: 46 | #cosine = cos_batch(S, X) 47 | 48 | weight = gaussian(euc_batch(S, X),B) 49 | 50 | #torch.where(distances>(1-bandwidth)) 51 | #weight = gaussian(distances, B) 52 | num = (weight[:, :, None] * X).sum(dim=1) 53 | S_old = S 54 | S = num / weight.sum(1)[:, None] 55 | #cosine2 = torch.norm(S - S_old, dim=1).mean() 56 | iter+=1 57 | 58 | if (torch.norm(S - S_old, dim=1).mean() < stop_thresh or iter == max_iter): 59 | break 60 | 61 | p_num=[] 62 | for line in weight: 63 | p_num.append(line[line==1].size()[0]) 64 | 65 | my_mean = S.cpu().numpy() 66 | 67 | return my_mean, p_num -------------------------------------------------------------------------------- /torch_points3d/utils/debugging_vars.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | DEBUGGING_VARS = {"FIND_NEIGHBOUR_DIST": False} 4 | 5 | 6 | def extract_histogram(spatial_ops, normalize=True): 7 | out = [] 8 | for idx, nf in enumerate(spatial_ops["neighbour_finder"]): 9 | dist_meters = nf.dist_meters 10 | temp = {} 11 | for dist_meter in dist_meters: 12 | hist = dist_meter.histogram.copy() 13 | if normalize: 14 | hist /= hist.sum() 15 | temp[str(dist_meter.radius)] = hist.tolist() 16 | dist_meter.reset() 17 | out.append(temp) 18 | return out 19 | 20 | 21 | class DistributionNeighbour(object): 22 | def __init__(self, radius, bins=1000): 23 | self._radius = radius 24 | self._bins = bins 25 | self._histogram = np.zeros(self._bins) 26 | 27 | def reset(self): 28 | self._histogram = np.zeros(self._bins) 29 | 30 | @property 31 | def radius(self): 32 | return self._radius 33 | 34 | @property 35 | def histogram(self): 36 | return self._histogram 37 | 38 | @property 39 | def histogram_non_zero(self): 40 | idx = len(self._histogram) - np.cumsum(self._histogram[::-1]).nonzero()[0][0] 41 | return self._histogram[:idx] 42 | 43 | def add_valid_neighbours(self, points): 44 | for num_valid in points: 45 | self._histogram[num_valid] += 1 46 | 47 | def __repr__(self): 48 | return "{}(radius={}, bins={})".format(self.__class__.__name__, self._radius, self._bins) 49 | -------------------------------------------------------------------------------- /torch_points3d/utils/download.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | from six.moves import urllib 4 | import ssl 5 | 6 | 7 | def download_url(url, folder, log=True): 8 | r"""Downloads the content of an URL to a specific folder. 9 | 10 | Args: 11 | url (string): The url. 12 | folder (string): The folder. 13 | log (bool, optional): If :obj:`False`, will not print anything to the 14 | console. (default: :obj:`True`) 15 | """ 16 | 17 | filename = url.rpartition("/")[2] 18 | path = osp.join(folder, filename) 19 | 20 | if osp.exists(path): # pragma: no cover 21 | if log: 22 | print("Using exist file", filename) 23 | return path 24 | 25 | if log: 26 | print("Downloading", url) 27 | 28 | try: 29 | os.makedirs(folder) 30 | except: 31 | pass 32 | context = ssl._create_unverified_context() 33 | data = urllib.request.urlopen(url, context=context) 34 | 35 | with open(path, "wb") as f: 36 | f.write(data.read()) 37 | 38 | return path 39 | -------------------------------------------------------------------------------- /torch_points3d/utils/enums.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | 4 | class SchedulerUpdateOn(enum.Enum): 5 | ON_EPOCH = "on_epoch" 6 | ON_NUM_BATCH = "on_num_batch" 7 | ON_NUM_SAMPLE = "on_num_sample" 8 | 9 | 10 | class ConvolutionFormat(enum.Enum): 11 | DENSE = "dense" 12 | PARTIAL_DENSE = "partial_dense" 13 | MESSAGE_PASSING = "message_passing" 14 | SPARSE = "sparse" 15 | -------------------------------------------------------------------------------- /torch_points3d/utils/geometry.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import random 3 | 4 | 5 | def euler_angles_to_rotation_matrix(theta, random_order=False): 6 | R_x = torch.tensor( 7 | [[1, 0, 0], [0, torch.cos(theta[0]), -torch.sin(theta[0])], [0, torch.sin(theta[0]), torch.cos(theta[0])]] 8 | ) 9 | 10 | R_y = torch.tensor( 11 | [[torch.cos(theta[1]), 0, torch.sin(theta[1])], [0, 1, 0], [-torch.sin(theta[1]), 0, torch.cos(theta[1])]] 12 | ) 13 | 14 | R_z = torch.tensor( 15 | [[torch.cos(theta[2]), -torch.sin(theta[2]), 0], [torch.sin(theta[2]), torch.cos(theta[2]), 0], [0, 0, 1]] 16 | ) 17 | 18 | matrices = [R_x, R_y, R_z] 19 | if random_order: 20 | random.shuffle(matrices) 21 | R = torch.mm(matrices[2], torch.mm(matrices[1], matrices[0])) 22 | return R 23 | 24 | 25 | def get_cross_product_matrix(k): 26 | return torch.tensor([[0, -k[2], k[1]], [k[2], 0, -k[0]], [-k[1], k[0], 0]], device=k.device) 27 | 28 | 29 | def rodrigues(axis, theta): 30 | """ 31 | given an axis of norm one and an angle, compute the rotation matrix using rodrigues formula 32 | source : https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula 33 | """ 34 | K = get_cross_product_matrix(axis) 35 | t = torch.tensor([theta], device=axis.device) 36 | R = torch.eye(3, device=axis.device) + torch.sin(t) * K + (1 - torch.cos(t)) * K.mm(K) 37 | return R 38 | 39 | 40 | def get_trans(x): 41 | """ 42 | get the rotation matrix from the vector representation using the rodrigues formula 43 | """ 44 | T = torch.eye(4, device=x.device) 45 | T[:3, 3] = x[3:] 46 | axis = x[:3] 47 | theta = torch.norm(axis) 48 | if theta > 0: 49 | axis = axis / theta 50 | T[:3, :3] = rodrigues(axis, theta) 51 | return T 52 | -------------------------------------------------------------------------------- /torch_points3d/utils/model_building_utils/activation_resolver.py: -------------------------------------------------------------------------------- 1 | import torch.nn 2 | 3 | from torch_points3d.utils.config import is_dict 4 | 5 | 6 | def get_activation(act_opt, create_cls=True): 7 | if is_dict(act_opt): 8 | act_opt = dict(act_opt) 9 | act = getattr(torch.nn, act_opt["name"]) 10 | del act_opt["name"] 11 | args = dict(act_opt) 12 | else: 13 | act = getattr(torch.nn, act_opt) 14 | args = {} 15 | 16 | if create_cls: 17 | return act(**args) 18 | else: 19 | return act 20 | -------------------------------------------------------------------------------- /torch_points3d/utils/model_building_utils/model_definition_resolver.py: -------------------------------------------------------------------------------- 1 | from omegaconf.dictconfig import DictConfig 2 | from omegaconf.listconfig import ListConfig 3 | #import torch 4 | 5 | def resolve_model(model_config, dataset, tested_task): 6 | """ Parses the model config and evaluates any expression that may contain constants 7 | """ 8 | # placeholders to subsitute 9 | #if dataset.feature_dimension==None: 10 | # dataset.feature_dimension=4 ### 11 | #if dataset.num_classes==None: 12 | # dataset.num_classes=9 ### 13 | #if dataset.stuff_classes==None: 14 | # a= [0,1,5] 15 | # dataset.stuff_classes = torch.as_tensor(a) 16 | constants = { 17 | "FEAT": max(dataset.feature_dimension, 0), 18 | "TASK": tested_task, 19 | "N_CLS": dataset.num_classes if hasattr(dataset, "num_classes") else None, 20 | } 21 | 22 | # user defined contants to subsitute 23 | if "define_constants" in model_config.keys(): 24 | constants.update(dict(model_config.define_constants)) 25 | 26 | resolve(model_config, constants) 27 | 28 | 29 | def resolve(obj, constants): 30 | """ Resolves expressions and constants in obj. 31 | returns False if obj is a ListConfig or DictConfig, True is obj is a primative type. 32 | """ 33 | if type(obj) == DictConfig: 34 | it = (k for k in obj) 35 | elif type(obj) == ListConfig: 36 | it = range(len(obj)) 37 | else: 38 | # obj is a single element 39 | return True 40 | 41 | # recursively resolve all children of obj 42 | for k in it: 43 | 44 | # if obj[k] is a primative type, evalulate it 45 | if resolve(obj[k], constants): 46 | if type(obj[k]) is str: 47 | try: 48 | obj[k] = eval(obj[k], constants) 49 | except NameError: 50 | # we tried to resolve a string which isn't an expression 51 | pass 52 | except ValueError: 53 | # we tried to resolve a string which is also a builtin (e.g. max) 54 | pass 55 | except Exception as e: 56 | print(e) 57 | 58 | return False 59 | -------------------------------------------------------------------------------- /torch_points3d/utils/model_building_utils/resolver_utils.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | # from https://stackoverflow.com/questions/6027558/flatten-nested-dictionaries-compressing-keys 4 | # flattens nested dicts to a single dict, with keys concatenated 5 | # e.g. flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}) 6 | # {'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10} 7 | def flatten_dict(d, parent_key="", sep="_"): 8 | items = [] 9 | for k, v in d.items(): 10 | new_key = parent_key + sep + k if parent_key else k 11 | if isinstance(v, collections.abc.MutableMapping): 12 | items.extend(flatten_dict(v, new_key, sep=sep).items()) 13 | else: 14 | items.append((new_key, v)) 15 | return dict(items) 16 | -------------------------------------------------------------------------------- /torch_points3d/utils/o3d_utils.py: -------------------------------------------------------------------------------- 1 | import open3d 2 | import random 3 | 4 | 5 | def get_random_color(pastel_factor=0.5): 6 | return [(x + pastel_factor) / (1.0 + pastel_factor) for x in [random.uniform(0, 1.0) for i in [1, 2, 3]]] 7 | 8 | 9 | def color_distance(c1, c2): 10 | return sum([abs(x[0] - x[1]) for x in zip(c1, c2)]) 11 | 12 | 13 | def generate_new_color(existing_colors, pastel_factor=0.5): 14 | max_distance = None 15 | best_color = None 16 | for i in range(0, 100): 17 | color = get_random_color(pastel_factor=pastel_factor) 18 | if not existing_colors: 19 | return color 20 | best_distance = min([color_distance(color, c) for c in existing_colors]) 21 | if not max_distance or best_distance > max_distance: 22 | max_distance = best_distance 23 | best_color = color 24 | return best_color 25 | 26 | 27 | def torch2o3d(data, color=[1, 0, 0]): 28 | xyz = data.pos 29 | norm = getattr(data, "norm", None) 30 | pcd = open3d.geometry.PointCloud() 31 | pcd.points = open3d.utility.Vector3dVector(xyz.detach().cpu().numpy()) 32 | if norm is not None: 33 | pcd.normals = open3d.utility.Vector3dVector(norm.detach().cpu().numpy()) 34 | pcd.paint_uniform_color(color) 35 | return pcd 36 | 37 | 38 | def apply_mask(d, mask, skip_keys=[]): 39 | data = d.clone() 40 | size_pos = len(data.pos) 41 | for k in data.keys: 42 | if size_pos == len(data[k]) and k not in skip_keys: 43 | data[k] = data[k][mask] 44 | return data 45 | -------------------------------------------------------------------------------- /torch_points3d/utils/running_stats.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class RunningStats: 5 | def __init__(self): 6 | self.n = 0 7 | self.old_m = 0 8 | self.new_m = 0 9 | self.old_s = 0 10 | self.new_s = 0 11 | 12 | def clear(self): 13 | self.n = 0 14 | 15 | def push(self, x): 16 | self.n += 1 17 | 18 | if self.n == 1: 19 | self.old_m = self.new_m = x 20 | self.old_s = 0 21 | else: 22 | self.new_m = self.old_m + (x - self.old_m) / self.n 23 | self.new_s = self.old_s + (x - self.old_m) * (x - self.new_m) 24 | 25 | self.old_m = self.new_m 26 | self.old_s = self.new_s 27 | 28 | def mean(self): 29 | return self.new_m if self.n else 0.0 30 | 31 | def variance(self): 32 | return self.new_s / (self.n - 1) if self.n > 1 else 0.0 33 | 34 | def std(self): 35 | return np.sqrt(self.variance()) 36 | -------------------------------------------------------------------------------- /torch_points3d/utils/timer.py: -------------------------------------------------------------------------------- 1 | from time import time 2 | from collections import defaultdict 3 | import functools 4 | from .running_stats import RunningStats 5 | 6 | FunctionStats: defaultdict = defaultdict(RunningStats) 7 | 8 | 9 | def time_func(*outer_args, **outer_kwargs): 10 | print_rec = outer_kwargs.get("print_rec", 100) 11 | measure_runtime = outer_kwargs.get("measure_runtime", False) 12 | name = outer_kwargs.get("name", "") 13 | 14 | def time_func_inner(func): 15 | @functools.wraps(func) 16 | def func_wrapper(*args, **kwargs): 17 | if measure_runtime: 18 | func_name = name if name else func.__name__ 19 | if FunctionStats.get(func_name, None) is not None: 20 | if FunctionStats[func_name].n % print_rec == 0: 21 | stats = FunctionStats[func_name] 22 | stats_mean = stats.mean() 23 | print( 24 | "{} run in {} | {} over {} runs".format( 25 | func_name, stats_mean, stats_mean * stats.n, stats.n 26 | ) 27 | ) 28 | # print('{} run in {} +/- {} over {} runs'.format(func.__name__, stats.mean(), stats.std(), stats.n)) 29 | t0 = time() 30 | out = func(*args, **kwargs) 31 | diff = time() - t0 32 | FunctionStats[func_name].push(diff) 33 | return out 34 | else: 35 | return func(*args, **kwargs) 36 | 37 | return func_wrapper 38 | 39 | return time_func_inner 40 | 41 | 42 | @time_func(print_rec=50, measure_runtime=True) 43 | def do_nothing(): 44 | pass 45 | 46 | 47 | def iteration(): 48 | for _ in range(10000): 49 | do_nothing() 50 | 51 | 52 | if __name__ == "__main__": 53 | iteration() 54 | -------------------------------------------------------------------------------- /torch_points3d/utils/transform_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class SamplingStrategy(object): 5 | 6 | STRATEGIES = ["random", "freq_class_based"] 7 | CLASS_WEIGHT_METHODS = ["sqrt"] 8 | 9 | def __init__(self, strategy="random", class_weight_method="sqrt"): 10 | 11 | if strategy.lower() in self.STRATEGIES: 12 | self._strategy = strategy.lower() 13 | 14 | if class_weight_method.lower() in self.CLASS_WEIGHT_METHODS: 15 | self._class_weight_method = class_weight_method.lower() 16 | 17 | def __call__(self, data): 18 | 19 | if self._strategy == "random": 20 | random_center = np.random.randint(0, len(data.pos)) 21 | 22 | elif self._strategy == "freq_class_based": 23 | labels = np.asarray(data.y) 24 | uni, uni_counts = np.unique(np.asarray(data.y), return_counts=True) 25 | uni_counts = uni_counts.mean() / uni_counts 26 | if self._class_weight_method == "sqrt": 27 | uni_counts = np.sqrt(uni_counts) 28 | uni_counts /= np.sum(uni_counts) 29 | chosen_label = np.random.choice(uni, p=uni_counts) 30 | random_center = np.random.choice(np.argwhere(labels == chosen_label).flatten()) 31 | else: 32 | raise NotImplementedError 33 | 34 | return random_center 35 | 36 | def __repr__(self): 37 | return "{}(strategy={}, class_weight_method={})".format( 38 | self.__class__.__name__, self._strategy, self._class_weight_method 39 | ) 40 | -------------------------------------------------------------------------------- /torch_points3d/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .visualizer import * 2 | from .experiment_manager import ExperimentManager 3 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import hydra 2 | from hydra.core.global_hydra import GlobalHydra 3 | from omegaconf import OmegaConf 4 | from torch_points3d.trainer import Trainer 5 | import logging 6 | 7 | @hydra.main(config_path="conf", config_name="config") 8 | def main(cfg): 9 | 10 | numba_logger = logging.getLogger('numba') 11 | numba_logger.setLevel(logging.WARNING) 12 | OmegaConf.set_struct(cfg, False) # This allows getattr and hasattr methods to function correctly 13 | if cfg.pretty_print: 14 | print(OmegaConf.to_yaml(cfg)) 15 | 16 | trainer = Trainer(cfg) 17 | trainer.train() 18 | # 19 | # # https://github.com/facebookresearch/hydra/issues/440 20 | GlobalHydra.get_state().clear() 21 | return 0 22 | 23 | 24 | if __name__ == "__main__": 25 | main() 26 | -------------------------------------------------------------------------------- /visualization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SmartForest-no/SegmentAnyTree/361650805e3d7c9664b9d5c44ac6af8047942832/visualization/__init__.py --------------------------------------------------------------------------------