├── LICENSE ├── README.md ├── V2B_main ├── __pycache__ │ ├── test_tracking.cpython-37.pyc │ ├── test_tracking.cpython-38.pyc │ ├── train_tracking.cpython-37.pyc │ └── train_tracking.cpython-38.pyc ├── datasets │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── base_dataset.cpython-37.pyc │ │ ├── get_dataset_v2b.cpython-38.pyc │ │ ├── get_v2b_db.cpython-37.pyc │ │ ├── get_v2b_db.cpython-38.pyc │ │ └── v2b_dataset.cpython-37.pyc │ ├── base_dataset.py │ ├── get_v2b_db.py │ └── v2b_dataset.py ├── lib │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-37.pyc │ ├── pointops │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── __init__.cpython-37.pyc │ │ ├── functions │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── pointops.cpython-36.pyc │ │ │ │ └── pointops.cpython-37.pyc │ │ │ └── pointops.py │ │ ├── setup.py │ │ └── src │ │ │ ├── __init__.py │ │ │ ├── ballquery │ │ │ ├── ballquery_cuda.cpp │ │ │ ├── ballquery_cuda_kernel.cu │ │ │ └── ballquery_cuda_kernel.h │ │ │ ├── cuda_utils.h │ │ │ ├── featuredistribute │ │ │ ├── featuredistribute_cuda.cpp │ │ │ ├── featuredistribute_cuda_kernel.cu │ │ │ └── featuredistribute_cuda_kernel.h │ │ │ ├── grouping │ │ │ ├── grouping_cuda.cpp │ │ │ ├── grouping_cuda_kernel.cu │ │ │ └── grouping_cuda_kernel.h │ │ │ ├── grouping_int │ │ │ ├── grouping_int_cuda.cpp │ │ │ ├── grouping_int_cuda_kernel.cu │ │ │ └── grouping_int_cuda_kernel.h │ │ │ ├── interpolation │ │ │ ├── interpolation_cuda.cpp │ │ │ ├── interpolation_cuda_kernel.cu │ │ │ └── interpolation_cuda_kernel.h │ │ │ ├── knnquery │ │ │ ├── __init__.py │ │ │ ├── knnquery_cuda.cpp │ │ │ ├── knnquery_cuda_kernel.cu │ │ │ └── knnquery_cuda_kernel.h │ │ │ ├── labelstat │ │ │ ├── labelstat_cuda.cpp │ │ │ ├── labelstat_cuda_kernel.cu │ │ │ └── labelstat_cuda_kernel.h │ │ │ ├── pointops_api.cpp │ │ │ └── sampling │ │ │ ├── sampling_cuda.cpp │ │ │ ├── sampling_cuda_kernel.cu │ │ │ └── sampling_cuda_kernel.h │ └── sync_bn │ │ ├── __init__.py │ │ ├── batchnorm.py │ │ ├── comm.py │ │ ├── replicate.py │ │ └── unittest.py ├── main.py ├── modules │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── backbone_net.cpython-37.pyc │ │ ├── completion_net.cpython-37.pyc │ │ └── v2b_net.cpython-37.pyc │ ├── backbone_net.py │ ├── completion_net.py │ ├── pointnet2 │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── __init__.cpython-37.pyc │ │ ├── _ext-src │ │ │ ├── include │ │ │ │ ├── ball_query.h │ │ │ │ ├── ball_query_score.h │ │ │ │ ├── cuda_utils.h │ │ │ │ ├── group_points.h │ │ │ │ ├── interpolate.h │ │ │ │ ├── sampling.h │ │ │ │ └── utils.h │ │ │ └── src │ │ │ │ ├── ball_query.cpp │ │ │ │ ├── ball_query_gpu.cu │ │ │ │ ├── ball_query_score.cpp │ │ │ │ ├── ball_query_score_gpu.cu │ │ │ │ ├── bindings.cpp │ │ │ │ ├── group_points.cpp │ │ │ │ ├── group_points_gpu.cu │ │ │ │ ├── interpolate.cpp │ │ │ │ ├── interpolate_gpu.cu │ │ │ │ ├── sampling.cpp │ │ │ │ └── sampling_gpu.cu │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── kalman_filter.cpython-36.pyc │ │ │ ├── kalman_filter.cpython-37.pyc │ │ │ ├── non_local.cpython-36.pyc │ │ │ ├── non_local.cpython-37.pyc │ │ │ ├── pointnet2_modules.cpython-36.pyc │ │ │ └── pointnet2_modules.cpython-37.pyc │ │ │ ├── linalg_utils.py │ │ │ └── pointnet2_modules.py │ ├── v2b_net.py │ └── voxel_utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── block.cpython-36.pyc │ │ ├── block.cpython-37.pyc │ │ ├── region_proposal_network.cpython-36.pyc │ │ ├── region_proposal_network.cpython-37.pyc │ │ ├── se.cpython-37.pyc │ │ ├── torch_util.cpython-36.pyc │ │ ├── torch_util.cpython-37.pyc │ │ ├── voxel_grid.cpython-36.pyc │ │ ├── voxel_grid.cpython-37.pyc │ │ ├── voxelization.cpython-37.pyc │ │ ├── voxelnet.cpython-36.pyc │ │ └── voxelnet.cpython-37.pyc │ │ ├── functional │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── backend.cpython-36.pyc │ │ │ ├── backend.cpython-37.pyc │ │ │ ├── voxelization.cpython-36.pyc │ │ │ └── voxelization.cpython-37.pyc │ │ ├── backend.py │ │ ├── setup.py │ │ ├── src │ │ │ ├── bindings.cpp │ │ │ ├── cuda_utils.cuh │ │ │ ├── utils.hpp │ │ │ ├── voxelization │ │ │ │ ├── vox.cpp │ │ │ │ ├── vox.cu │ │ │ │ ├── vox.cuh │ │ │ │ └── vox.hpp │ │ │ └── voxelize_float │ │ │ │ ├── voxf.cpp │ │ │ │ ├── voxf.cu │ │ │ │ ├── voxf.cuh │ │ │ │ └── voxf.hpp │ │ └── voxelization.py │ │ ├── se.py │ │ ├── voxel │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── block.cpython-37.pyc │ │ │ ├── region_proposal_network.cpython-37.pyc │ │ │ ├── torch_util.cpython-37.pyc │ │ │ └── voxelnet.cpython-37.pyc │ │ ├── block.py │ │ ├── region_proposal_network.py │ │ ├── torch_util.py │ │ ├── voxel_grid.py │ │ ├── voxel_grid_test.py │ │ └── voxelnet.py │ │ └── voxelization.py ├── results │ ├── kitti_car │ │ └── Epoch30.pth │ └── kitti_pedestrian │ │ └── Epoch30.pth ├── test_tracking.py ├── train_tracking.py ├── trainers │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── tester.cpython-37.pyc │ │ └── trainer.cpython-37.pyc │ ├── tester.py │ └── trainer.py ├── utils │ ├── __pycache__ │ │ ├── attr_dict.cpython-37.pyc │ │ ├── attr_dict.cpython-38.pyc │ │ ├── data_classes.cpython-37.pyc │ │ ├── decode.cpython-37.pyc │ │ ├── kitti_utils.cpython-37.pyc │ │ ├── metrics.cpython-37.pyc │ │ ├── options.cpython-37.pyc │ │ ├── options.cpython-38.pyc │ │ ├── searchspace.cpython-37.pyc │ │ ├── show_line.cpython-37.pyc │ │ └── show_line.cpython-38.pyc │ ├── attr_dict.py │ ├── data_classes.py │ ├── decode.py │ ├── kitti_utils.py │ ├── loss │ │ ├── PCLosses.py │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── PCLosses.cpython-37.pyc │ │ │ ├── PCLosses.cpython-38.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── losses.cpython-37.pyc │ │ │ ├── losses.cpython-38.pyc │ │ │ ├── utils.cpython-36.pyc │ │ │ ├── utils.cpython-37.pyc │ │ │ └── utils.cpython-38.pyc │ │ ├── losses.py │ │ └── utils.py │ ├── metrics.py │ ├── options.py │ ├── searchspace.py │ └── show_line.py └── visualization │ ├── LineMesh.py │ ├── __pycache__ │ ├── LineMesh.cpython-37.pyc │ └── LineMesh.cpython-38.pyc │ ├── result │ └── kitti_car_2 │ │ ├── 0000.jpg │ │ ├── 0005.jpg │ │ ├── 0010.jpg │ │ ├── 0015.jpg │ │ ├── 0020.jpg │ │ ├── 0025.jpg │ │ ├── 0030.jpg │ │ ├── 0035.jpg │ │ ├── 0040.jpg │ │ ├── 0045.jpg │ │ └── 0050.jpg │ └── visual.py ├── nuscenes-devkit-master ├── .gitignore ├── LICENSE.txt ├── README.md ├── docs │ ├── faqs.md │ ├── installation.md │ ├── instructions_lidarseg.md │ ├── instructions_nuimages.md │ ├── instructions_nuscenes.md │ ├── schema_nuimages.md │ └── schema_nuscenes.md ├── python-sdk │ ├── nuimages │ │ ├── __init__.py │ │ ├── export │ │ │ └── export_release.py │ │ ├── nuimages.py │ │ ├── scripts │ │ │ ├── render_images.py │ │ │ └── render_rare_classes.py │ │ ├── tests │ │ │ ├── __init__.py │ │ │ ├── assert_download.py │ │ │ ├── test_attributes.py │ │ │ └── test_foreign_keys.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── test_nuimages.py │ │ │ └── utils.py │ ├── nuscenes │ │ ├── __init__.py │ │ ├── can_bus │ │ │ ├── README.md │ │ │ └── can_bus_api.py │ │ ├── eval │ │ │ ├── __init__.py │ │ │ ├── common │ │ │ │ ├── __init__.py │ │ │ │ ├── config.py │ │ │ │ ├── data_classes.py │ │ │ │ ├── loaders.py │ │ │ │ ├── render.py │ │ │ │ └── utils.py │ │ │ ├── detection │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── algo.py │ │ │ │ ├── config.py │ │ │ │ ├── configs │ │ │ │ │ └── detection_cvpr_2019.json │ │ │ │ ├── constants.py │ │ │ │ ├── data_classes.py │ │ │ │ ├── evaluate.py │ │ │ │ ├── render.py │ │ │ │ ├── tests │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_algo.py │ │ │ │ │ ├── test_data_classes.py │ │ │ │ │ ├── test_evaluate.py │ │ │ │ │ ├── test_loader.py │ │ │ │ │ └── test_utils.py │ │ │ │ └── utils.py │ │ │ ├── lidarseg │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── evaluate.py │ │ │ │ ├── render.py │ │ │ │ ├── tests │ │ │ │ │ └── __init__.py │ │ │ │ ├── utils.py │ │ │ │ └── validate_submission.py │ │ │ ├── panoptic │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── evaluate.py │ │ │ │ ├── panoptic_seg_evaluator.py │ │ │ │ ├── panoptic_track_evaluator.py │ │ │ │ └── utils.py │ │ │ ├── prediction │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── baseline_model_inference.py │ │ │ │ ├── compute_metrics.py │ │ │ │ ├── config.py │ │ │ │ ├── configs │ │ │ │ │ └── predict_2020_icra.json │ │ │ │ ├── data_classes.py │ │ │ │ ├── docker_container │ │ │ │ │ ├── README.md │ │ │ │ │ └── docker │ │ │ │ │ │ ├── Dockerfile │ │ │ │ │ │ └── docker-compose.yml │ │ │ │ ├── metrics.py │ │ │ │ ├── splits.py │ │ │ │ ├── submission │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── do_inference.py │ │ │ │ │ └── extra_packages.txt │ │ │ │ └── tests │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_dataclasses.py │ │ │ │ │ └── test_metrics.py │ │ │ └── tracking │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── algo.py │ │ │ │ ├── configs │ │ │ │ └── tracking_nips_2019.json │ │ │ │ ├── constants.py │ │ │ │ ├── data_classes.py │ │ │ │ ├── evaluate.py │ │ │ │ ├── loaders.py │ │ │ │ ├── metrics.py │ │ │ │ ├── mot.py │ │ │ │ ├── render.py │ │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ ├── scenarios.py │ │ │ │ ├── test_algo.py │ │ │ │ └── test_evaluate.py │ │ │ │ └── utils.py │ │ ├── lidarseg │ │ │ ├── __init__.py │ │ │ ├── class_histogram.py │ │ │ └── lidarseg_utils.py │ │ ├── map_expansion │ │ │ ├── __init__.py │ │ │ ├── arcline_path_utils.py │ │ │ ├── bitmap.py │ │ │ ├── map_api.py │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ ├── test_all_maps.py │ │ │ │ └── test_arcline_path_utils.py │ │ │ └── utils.py │ │ ├── nuscenes.py │ │ ├── panoptic │ │ │ ├── __init__.py │ │ │ ├── generate_panoptic_labels.py │ │ │ └── panoptic_utils.py │ │ ├── prediction │ │ │ ├── __init__.py │ │ │ ├── helper.py │ │ │ ├── input_representation │ │ │ │ ├── __init__.py │ │ │ │ ├── agents.py │ │ │ │ ├── combinators.py │ │ │ │ ├── interface.py │ │ │ │ ├── static_layers.py │ │ │ │ ├── tests │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_agents.py │ │ │ │ │ ├── test_combinators.py │ │ │ │ │ ├── test_static_layers.py │ │ │ │ │ └── test_utils.py │ │ │ │ └── utils.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── backbone.py │ │ │ │ ├── covernet.py │ │ │ │ ├── mtp.py │ │ │ │ └── physics.py │ │ │ └── tests │ │ │ │ ├── __init__.py │ │ │ │ ├── run_covernet.py │ │ │ │ ├── run_image_generation.py │ │ │ │ ├── run_mtp.py │ │ │ │ ├── test_backbone.py │ │ │ │ ├── test_covernet.py │ │ │ │ ├── test_mtp.py │ │ │ │ ├── test_mtp_loss.py │ │ │ │ ├── test_physics_models.py │ │ │ │ └── test_predict_helper.py │ │ ├── scripts │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── export_2d_annotations_as_json.py │ │ │ ├── export_egoposes_on_map.py │ │ │ ├── export_instance_videos.py │ │ │ ├── export_kitti.py │ │ │ ├── export_pointclouds_as_obj.py │ │ │ ├── export_poses.py │ │ │ └── export_scene_videos.py │ │ ├── tests │ │ │ ├── __init__.py │ │ │ ├── assert_download.py │ │ │ ├── test_lidarseg.py │ │ │ ├── test_nuscenes.py │ │ │ └── test_predict_helper.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── color_map.py │ │ │ ├── data_classes.py │ │ │ ├── data_io.py │ │ │ ├── geometry_utils.py │ │ │ ├── kitti.py │ │ │ ├── map_mask.py │ │ │ ├── splits.py │ │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── test_data_classes.py │ │ │ ├── test_geometry_utils.py │ │ │ └── test_map_mask.py │ └── tutorials │ │ ├── README.md │ │ ├── can_bus_tutorial.ipynb │ │ ├── map_expansion_tutorial.ipynb │ │ ├── nuimages_tutorial.ipynb │ │ ├── nuscenes_lidarseg_panoptic_tutorial.ipynb │ │ ├── nuscenes_tutorial.ipynb │ │ ├── prediction_tutorial.ipynb │ │ └── trajectory.gif └── setup │ ├── Dockerfile │ ├── Jenkinsfile │ ├── requirements.txt │ ├── requirements │ ├── requirements_base.txt │ ├── requirements_nuimages.txt │ ├── requirements_prediction.txt │ └── requirements_tracking.txt │ ├── setup.py │ └── test_tutorial.sh ├── nuscenes_json ├── category.json └── lidarseg.json └── requirements.txt /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 fpthink 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /V2B_main/__pycache__/test_tracking.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/__pycache__/test_tracking.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/__pycache__/test_tracking.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/__pycache__/test_tracking.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/__pycache__/train_tracking.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/__pycache__/train_tracking.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/__pycache__/train_tracking.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/__pycache__/train_tracking.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/datasets/__init__.py -------------------------------------------------------------------------------- /V2B_main/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/datasets/__pycache__/base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/datasets/__pycache__/base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/datasets/__pycache__/get_dataset_v2b.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/datasets/__pycache__/get_dataset_v2b.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/datasets/__pycache__/get_v2b_db.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/datasets/__pycache__/get_v2b_db.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/datasets/__pycache__/get_v2b_db.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/datasets/__pycache__/get_v2b_db.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/datasets/__pycache__/v2b_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/datasets/__pycache__/v2b_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/datasets/get_v2b_db.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import DataLoader 2 | from datasets.v2b_dataset import TrainDataset, TestDataset, TestDataset_WOD 3 | 4 | def get_dataset(opts, partition, shuffle=False): 5 | loader, db = None, None 6 | 7 | if opts.use_tiny: 8 | split = "Tiny_" + partition 9 | else: 10 | split = "Full_" + partition 11 | 12 | 13 | if partition in ["Train", "Valid"]: 14 | db = TrainDataset(opts, split) 15 | loader = DataLoader(db, batch_size=opts.batch_size, shuffle=shuffle, num_workers=opts.n_workers, pin_memory=True) 16 | else: 17 | # Test dataset 18 | if opts.which_dataset.upper() in ['KITTI', 'NUSCENES']: 19 | db = TestDataset(opts, split) 20 | loader = DataLoader(db, batch_size=1, shuffle=shuffle, num_workers=opts.n_workers, pin_memory=True, collate_fn=lambda x: x) 21 | else: 22 | # waymo test 23 | db = TestDataset_WOD(opts, pc_type='raw_pc') 24 | 25 | return loader, db -------------------------------------------------------------------------------- /V2B_main/lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/__init__.py -------------------------------------------------------------------------------- /V2B_main/lib/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/lib/pointops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/__init__.py -------------------------------------------------------------------------------- /V2B_main/lib/pointops/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/lib/pointops/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/lib/pointops/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/functions/__init__.py -------------------------------------------------------------------------------- /V2B_main/lib/pointops/functions/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/functions/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/lib/pointops/functions/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/functions/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/lib/pointops/functions/__pycache__/pointops.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/functions/__pycache__/pointops.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/lib/pointops/functions/__pycache__/pointops.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/functions/__pycache__/pointops.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/lib/pointops/setup.py: -------------------------------------------------------------------------------- 1 | #python3 setup.py install 2 | from setuptools import setup 3 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 4 | 5 | setup( 6 | name='pointops', 7 | ext_modules=[ 8 | CUDAExtension('pointops_cuda', [ 9 | 'src/pointops_api.cpp', 10 | 11 | 'src/ballquery/ballquery_cuda.cpp', 12 | 'src/ballquery/ballquery_cuda_kernel.cu', 13 | 'src/knnquery/knnquery_cuda.cpp', 14 | 'src/knnquery/knnquery_cuda_kernel.cu', 15 | 'src/grouping/grouping_cuda.cpp', 16 | 'src/grouping/grouping_cuda_kernel.cu', 17 | 'src/grouping_int/grouping_int_cuda.cpp', 18 | 'src/grouping_int/grouping_int_cuda_kernel.cu', 19 | 'src/interpolation/interpolation_cuda.cpp', 20 | 'src/interpolation/interpolation_cuda_kernel.cu', 21 | 'src/sampling/sampling_cuda.cpp', 22 | 'src/sampling/sampling_cuda_kernel.cu', 23 | 24 | 'src/labelstat/labelstat_cuda.cpp', 25 | 'src/labelstat/labelstat_cuda_kernel.cu', 26 | 27 | 'src/featuredistribute/featuredistribute_cuda.cpp', 28 | 'src/featuredistribute/featuredistribute_cuda_kernel.cu' 29 | ], 30 | extra_compile_args={'cxx': ['-g'], 31 | 'nvcc': ['-O2']}) 32 | ], 33 | cmdclass={'build_ext': BuildExtension}) 34 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/src/__init__.py -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/ballquery/ballquery_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "ballquery_cuda_kernel.h" 7 | 8 | extern THCState *state; 9 | 10 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 11 | #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") 12 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 13 | 14 | void ballquery_cuda(int b, int n, int m, float radius, int nsample, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) 15 | { 16 | const float *new_xyz = new_xyz_tensor.data(); 17 | const float *xyz = xyz_tensor.data(); 18 | int *idx = idx_tensor.data(); 19 | 20 | ballquery_cuda_launcher(b, n, m, radius, nsample, new_xyz, xyz, idx); 21 | } 22 | 23 | 24 | void ballquery_cuda_fast(int b, int n, int m, float radius, int nsample, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) 25 | { 26 | CHECK_INPUT(new_xyz_tensor); 27 | CHECK_INPUT(xyz_tensor); 28 | 29 | const float *new_xyz = new_xyz_tensor.data(); 30 | const float *xyz = xyz_tensor.data(); 31 | int *idx = idx_tensor.data(); 32 | 33 | cudaStream_t stream = THCState_getCurrentStream(state); 34 | 35 | ballquery_cuda_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx, stream); 36 | } 37 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/ballquery/ballquery_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _BALLQUERY_CUDA_KERNEL 2 | #define _BALLQUERY_CUDA_KERNEL 3 | #include 4 | #include 5 | #include 6 | 7 | void ballquery_cuda(int b, int n, int m, float radius, int nsample, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor); 8 | 9 | void ballquery_cuda_fast(int b, int n, int m, float radius, int nsample, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor); 10 | 11 | #ifdef __cplusplus 12 | extern "C" { 13 | #endif 14 | 15 | void ballquery_cuda_launcher(int b, int n, int m, float radius, int nsample, const float *xyz, const float *new_xyz, int *idx); 16 | 17 | void ballquery_cuda_launcher_fast(int b, int n, int m, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx, cudaStream_t stream); 18 | 19 | #ifdef __cplusplus 20 | } 21 | #endif 22 | 23 | #endif 24 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _CUDA_UTILS_H 2 | #define _CUDA_UTILS_H 3 | 4 | #include 5 | 6 | #define TOTAL_THREADS 1024 7 | 8 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 9 | #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") 10 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 11 | 12 | #define THREADS_PER_BLOCK 256 13 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) 14 | 15 | inline int opt_n_threads(int work_size) { 16 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); 17 | return max(min(1 << pow_2, TOTAL_THREADS), 1); 18 | } 19 | 20 | inline dim3 opt_block_config(int x, int y) { 21 | const int x_threads = opt_n_threads(x); 22 | const int y_threads = max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); 23 | dim3 block_config(x_threads, y_threads, 1); 24 | return block_config; 25 | } 26 | 27 | #endif -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/featuredistribute/featuredistribute_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "featuredistribute_cuda_kernel.h" 7 | 8 | extern THCState *state; 9 | 10 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 11 | #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") 12 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 13 | 14 | 15 | void featuredistribute_cuda(int b, int n, int m, at::Tensor max_xyz_tensor, at::Tensor xyz_tensor, at::Tensor distribute_idx_tensor) 16 | { 17 | CHECK_INPUT(max_xyz_tensor); 18 | CHECK_INPUT(xyz_tensor); 19 | 20 | const float *max_xyz = max_xyz_tensor.data(); 21 | const float *xyz = xyz_tensor.data(); 22 | int *distribute_idx = distribute_idx_tensor.data(); 23 | 24 | cudaStream_t stream = THCState_getCurrentStream(state); 25 | 26 | featuredistribute_cuda_launcher(b, n, m, max_xyz, xyz, distribute_idx, stream); 27 | } 28 | 29 | 30 | void featuregather_forward_cuda(int b, int n, int m, int c, at::Tensor max_feature_tensor, at::Tensor distribute_idx_tensor, at::Tensor distribute_feature_tensor) 31 | { 32 | CHECK_INPUT(max_feature_tensor); 33 | CHECK_INPUT(distribute_idx_tensor); 34 | 35 | const float *max_feature = max_feature_tensor.data(); 36 | const int *distribute_idx = distribute_idx_tensor.data(); 37 | float *distribute_feature = distribute_feature_tensor.data(); 38 | 39 | cudaStream_t stream = THCState_getCurrentStream(state); 40 | 41 | featuregather_forward_cuda_launcher(b, n, m, c, max_feature, distribute_idx, distribute_feature, stream); 42 | } 43 | 44 | 45 | void featuregather_backward_cuda(int b, int n, int m, int c, at::Tensor grad_distribute_feature_tensor, at::Tensor distribute_idx_tensor, at::Tensor grad_max_feature_tensor) 46 | { 47 | CHECK_INPUT(grad_distribute_feature_tensor); 48 | CHECK_INPUT(distribute_idx_tensor); 49 | 50 | const float *grad_distribute_feature = grad_distribute_feature_tensor.data(); 51 | const int *distribute_idx = distribute_idx_tensor.data(); 52 | float *grad_max_feature = grad_max_feature_tensor.data(); 53 | 54 | cudaStream_t stream = THCState_getCurrentStream(state); 55 | 56 | featuregather_backward_cuda_launcher(b, n, m, c, grad_distribute_feature, distribute_idx, grad_max_feature, stream); 57 | } -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/featuredistribute/featuredistribute_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _FEATUREDISTRIBUTE_CUDA_KERNEL 2 | #define _FEATUREDISTRIBUTE_CUDA_KERNEL 3 | #include 4 | #include 5 | #include 6 | 7 | void featuredistribute_cuda(int b, int n, int m, at::Tensor max_xyz_tensor, at::Tensor xyz_tensor, at::Tensor distribute_idx_tensor); 8 | void featuregather_forward_cuda(int b, int n, int m, int c, at::Tensor max_feature_tensor, at::Tensor distribute_idx_tensor, at::Tensor distribute_feature_tensor); 9 | void featuregather_backward_cuda(int b, int n, int m, int c, at::Tensor grad_distribute_feature_tensor, at::Tensor distribute_idx_tensor, at::Tensor grad_max_feature_tensor); 10 | 11 | #ifdef __cplusplus 12 | extern "C" { 13 | #endif 14 | 15 | void featuredistribute_cuda_launcher(int b, int n, int m, const float *max_xyz, const float *xyz, int *distribute_idx, cudaStream_t stream); 16 | void featuregather_forward_cuda_launcher(int b, int n, int m, int c, const float *max_feature, const int *distribute_idx, float *distribute_feature, cudaStream_t stream); 17 | void featuregather_backward_cuda_launcher(int b, int n, int m, int c, const float *grad_distribute_feature, const int *distribute_idx, float *grad_max_feature, cudaStream_t stream); 18 | 19 | #ifdef __cplusplus 20 | } 21 | #endif 22 | 23 | #endif 24 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/grouping/grouping_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "grouping_cuda_kernel.h" 7 | 8 | extern THCState *state; 9 | 10 | void grouping_forward_cuda(int b, int c, int n, int m, int nsample, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) 11 | { 12 | const float *points = points_tensor.data(); 13 | const int *idx = idx_tensor.data(); 14 | float *out = out_tensor.data(); 15 | grouping_forward_cuda_launcher(b, c, n, m, nsample, points, idx, out); 16 | } 17 | 18 | void grouping_backward_cuda(int b, int c, int n, int m, int nsample, at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) 19 | { 20 | float *grad_points = grad_points_tensor.data(); 21 | const int *idx = idx_tensor.data(); 22 | const float *grad_out = grad_out_tensor.data(); 23 | grouping_backward_cuda_launcher(b, c, n, m, nsample, grad_out, idx, grad_points); 24 | } 25 | 26 | void grouping_forward_cuda_fast(int b, int c, int n, int npoints, int nsample, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) { 27 | 28 | const float *points = points_tensor.data(); 29 | const int *idx = idx_tensor.data(); 30 | float *out = out_tensor.data(); 31 | grouping_forward_cuda_launcher_fast(b, c, n, npoints, nsample, points, idx, out); 32 | } -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/grouping/grouping_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _GROUPING_CUDA_KERNEL 2 | #define _GROUPING_CUDA_KERNEL 3 | #include 4 | #include 5 | #include 6 | 7 | void grouping_forward_cuda(int b, int c, int n, int m, int nsample, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out); 8 | void grouping_backward_cuda(int b, int c, int n, int m, int nsample, at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); 9 | 10 | void grouping_forward_cuda_fast(int b, int c, int n, int npoints, int nsample, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 11 | 12 | #ifdef __cplusplus 13 | extern "C" { 14 | #endif 15 | 16 | void grouping_forward_cuda_launcher(int b, int c, int n, int m, int nsample, const float *points, const int *idx, float *out); 17 | void grouping_backward_cuda_launcher(int b, int c, int n, int m, int nsample, const float *grad_out, const int *idx, float *grad_points); 18 | 19 | void grouping_forward_cuda_launcher_fast(int b, int c, int n, int npoints, int nsample, const float *points, const int *idx, float *out); 20 | 21 | #ifdef __cplusplus 22 | } 23 | #endif 24 | #endif 25 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/grouping_int/grouping_int_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "grouping_int_cuda_kernel.h" 7 | 8 | extern THCState *state; 9 | 10 | void grouping_int_forward_cuda(int b, int c, int n, int m, int nsample, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) 11 | { 12 | const long int *points = points_tensor.data(); 13 | const int *idx = idx_tensor.data(); 14 | long int *out = out_tensor.data(); 15 | grouping_int_forward_cuda_launcher(b, c, n, m, nsample, points, idx, out); 16 | } 17 | 18 | void grouping_int_forward_cuda_fast(int b, int c, int n, int m, int nsample, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) 19 | { 20 | const long int *points = points_tensor.data(); 21 | const int *idx = idx_tensor.data(); 22 | long int *out = out_tensor.data(); 23 | grouping_int_forward_cuda_launcher_fast(b, c, n, m, nsample, points, idx, out); 24 | } -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/grouping_int/grouping_int_cuda_kernel.cu: -------------------------------------------------------------------------------- 1 | #include "../cuda_utils.h" 2 | #include "grouping_int_cuda_kernel.h" 3 | 4 | // input: points(b, c, n) idx(b, m, nsample) 5 | // output: out(b, c, m, nsample) 6 | __global__ void grouping_int_forward_cuda_kernel(int b, int c, int n, int m, int nsample, const long int *points, const int *idx, long int *out) 7 | { 8 | int batch_index = blockIdx.x; 9 | points += batch_index * n * c; 10 | idx += batch_index * m * nsample; 11 | out += batch_index * m * nsample * c; 12 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 13 | const int stride = blockDim.y * blockDim.x; 14 | for (int i = index; i < c * m; i += stride) 15 | { 16 | const int l = i / m; 17 | const int j = i % m; 18 | for (int k = 0; k < nsample; ++k) 19 | { 20 | int ii = idx[j * nsample + k]; 21 | out[(l * m + j) * nsample + k] = points[l * n + ii]; 22 | } 23 | } 24 | } 25 | 26 | 27 | void grouping_int_forward_cuda_launcher(int b, int c, int n, int m, int nsample, const long int *points, const int *idx, long int *out) 28 | { 29 | grouping_int_forward_cuda_kernel<<>>(b, c, n, m, nsample, points, idx, out); 30 | } 31 | 32 | 33 | __global__ void grouping_int_forward_cuda_kernel_fast(int b, int c, int n, int npoints, int nsample, const long int *__restrict__ points, const int *__restrict__ idx, long int *__restrict__ out) 34 | { 35 | int bs_idx = blockIdx.z; 36 | int c_idx = blockIdx.y; 37 | int index = blockIdx.x * blockDim.x + threadIdx.x; 38 | int pt_idx = index / nsample; 39 | if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; 40 | 41 | int sample_idx = index % nsample; 42 | 43 | idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; 44 | int in_idx = bs_idx * c * n + c_idx * n + idx[0]; 45 | int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; 46 | 47 | out[out_idx] = points[in_idx]; 48 | } 49 | 50 | 51 | void grouping_int_forward_cuda_launcher_fast(int b, int c, int n, int npoints, int nsample, const long int *points, const int *idx, long int *out) 52 | { 53 | cudaError_t err; 54 | 55 | dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) 56 | dim3 threads(THREADS_PER_BLOCK); 57 | 58 | grouping_int_forward_cuda_kernel_fast<<>>(b, c, n, npoints, nsample, points, idx, out); 59 | // cudaDeviceSynchronize(); // for using printf in kernel function 60 | err = cudaGetLastError(); 61 | if (cudaSuccess != err) { 62 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 63 | exit(-1); 64 | } 65 | } -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/grouping_int/grouping_int_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _GROUPING_INT_CUDA_KERNEL 2 | #define _GROUPING_INT_CUDA_KERNEL 3 | #include 4 | #include 5 | #include 6 | 7 | void grouping_int_forward_cuda(int b, int c, int n, int m, int nsample, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out); 8 | 9 | void grouping_int_forward_cuda_fast(int b, int c, int n, int m, int nsample, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 10 | 11 | #ifdef __cplusplus 12 | extern "C" { 13 | #endif 14 | 15 | void grouping_int_forward_cuda_launcher(int b, int c, int n, int m, int nsample, const long int *points, const int *idx, long int *out); 16 | 17 | void grouping_int_forward_cuda_launcher_fast(int b, int c, int n, int npoints, int nsample, const long int *points, const int *idx, long int *out); 18 | 19 | #ifdef __cplusplus 20 | } 21 | #endif 22 | #endif 23 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/interpolation/interpolation_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "interpolation_cuda_kernel.h" 6 | 7 | extern THCState *state; 8 | 9 | void nearestneighbor_cuda(int b, int n, int m, at::Tensor unknown_tensor, at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) 10 | { 11 | const float *unknown = unknown_tensor.data(); 12 | const float *known = known_tensor.data(); 13 | float *dist2 = dist2_tensor.data(); 14 | int *idx = idx_tensor.data(); 15 | nearestneighbor_cuda_launcher(b, n, m, unknown, known, dist2, idx); 16 | } 17 | 18 | void interpolation_forward_cuda(int b, int c, int m, int n, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor) 19 | { 20 | const float *points = points_tensor.data(); 21 | const float *weight = weight_tensor.data(); 22 | float *out = out_tensor.data(); 23 | const int *idx = idx_tensor.data(); 24 | interpolation_forward_cuda_launcher(b, c, m, n, points, idx, weight, out); 25 | } 26 | 27 | void interpolation_backward_cuda(int b, int c, int n, int m, at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor) 28 | { 29 | const float *grad_out = grad_out_tensor.data(); 30 | const float *weight = weight_tensor.data(); 31 | float *grad_points = grad_points_tensor.data(); 32 | const int *idx = idx_tensor.data(); 33 | interpolation_backward_cuda_launcher(b, c, n, m, grad_out, idx, weight, grad_points); 34 | } 35 | 36 | void nearestneighbor_cuda_fast(int b, int n, int m, at::Tensor unknown_tensor, at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) { 37 | const float *unknown = unknown_tensor.data(); 38 | const float *known = known_tensor.data(); 39 | float *dist2 = dist2_tensor.data(); 40 | int *idx = idx_tensor.data(); 41 | nearestneighbor_cuda_launcher_fast(b, n, m, unknown, known, dist2, idx); 42 | } 43 | 44 | void interpolation_forward_cuda_fast(int b, int c, int m, int n, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor) { 45 | 46 | const float *points = points_tensor.data(); 47 | const float *weight = weight_tensor.data(); 48 | float *out = out_tensor.data(); 49 | const int *idx = idx_tensor.data(); 50 | interpolation_forward_cuda_launcher_fast(b, c, m, n, points, idx, weight, out); 51 | } -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/interpolation/interpolation_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _INTERPOLATION_CUDA_KERNEL 2 | #define _INTERPOLATION_CUDA_KERNEL 3 | #include 4 | #include 5 | #include 6 | 7 | void nearestneighbor_cuda(int b, int n, int m, at::Tensor unknown_tensor, at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); 8 | void interpolation_forward_cuda(int b, int c, int m, int n, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); 9 | void interpolation_backward_cuda(int b, int c, int n, int m, at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor); 10 | 11 | void nearestneighbor_cuda_fast(int b, int n, int m, at::Tensor unknown_tensor, at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); 12 | void interpolation_forward_cuda_fast(int b, int c, int m, int n, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); 13 | 14 | #ifdef __cplusplus 15 | extern "C" { 16 | #endif 17 | 18 | void nearestneighbor_cuda_launcher(int b, int n, int m, const float *unknown, const float *known, float *dist2, int *idx); 19 | void interpolation_forward_cuda_launcher(int b, int c, int m, int n, const float *points, const int *idx, const float *weight, float *out); 20 | void interpolation_backward_cuda_launcher(int b, int c, int n, int m, const float *grad_out, const int *idx, const float *weight, float *grad_points); 21 | 22 | void nearestneighbor_cuda_launcher_fast(int b, int n, int m, const float *unknown, const float *known, float *dist2, int *idx); 23 | void interpolation_forward_cuda_launcher_fast(int b, int c, int m, int n, const float *points, const int *idx, const float *weight, float *out); 24 | 25 | #ifdef __cplusplus 26 | } 27 | #endif 28 | #endif 29 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/knnquery/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/lib/pointops/src/knnquery/__init__.py -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/knnquery/knnquery_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "knnquery_cuda_kernel.h" 7 | 8 | extern THCState *state; 9 | 10 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 11 | #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") 12 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 13 | 14 | 15 | void knnquery_cuda(int b, int n, int m, int nsample, at::Tensor xyz_tensor, at::Tensor new_xyz_tensor, at::Tensor idx_tensor, at::Tensor dist2_tensor) 16 | { 17 | CHECK_INPUT(new_xyz_tensor); 18 | CHECK_INPUT(xyz_tensor); 19 | 20 | const float *new_xyz = new_xyz_tensor.data(); 21 | const float *xyz = xyz_tensor.data(); 22 | int *idx = idx_tensor.data(); 23 | float *dist2 = dist2_tensor.data(); 24 | 25 | cudaStream_t stream = THCState_getCurrentStream(state); 26 | 27 | knnquery_cuda_launcher(b, n, m, nsample, xyz, new_xyz, idx, dist2, stream); 28 | } 29 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/knnquery/knnquery_cuda_kernel.cu: -------------------------------------------------------------------------------- 1 | #include "../cuda_utils.h" 2 | #include "knnquery_cuda_kernel.h" 3 | 4 | // input: xyz (b, n, 3) new_xyz (b, m, 3) 5 | // output: idx (b, m, nsample) dist2 (b, m, nsample) 6 | __global__ void knnquery_cuda_kernel(int b, int n, int m, int nsample, const float *__restrict__ xyz, const float *__restrict__ new_xyz, int *__restrict__ idx, float *__restrict__ dist2) { 7 | int bs_idx = blockIdx.y; 8 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 9 | if (bs_idx >= b || pt_idx >= m) return; 10 | 11 | new_xyz += bs_idx * m * 3 + pt_idx * 3; 12 | xyz += bs_idx * n * 3; 13 | idx += bs_idx * m * nsample + pt_idx * nsample; 14 | 15 | float new_x = new_xyz[0]; 16 | float new_y = new_xyz[1]; 17 | float new_z = new_xyz[2]; 18 | 19 | //double* best = new double[nsample]; 20 | //int* besti = new int[nsample]; 21 | double best[200]; 22 | int besti[200]; 23 | for(int i = 0; i < nsample; i++){ 24 | best[i] = 1e40; 25 | besti[i] = 0; 26 | } 27 | for(int k = 0; k < n; k++){ 28 | float x = xyz[k * 3 + 0]; 29 | float y = xyz[k * 3 + 1]; 30 | float z = xyz[k * 3 + 2]; 31 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); 32 | for(int j = 0; j < nsample; j++){ 33 | if(d2 < best[j]){ 34 | for(int i = nsample - 1; i > j; i--){ 35 | best[i] = best[i - 1]; 36 | besti[i] = besti[i - 1]; 37 | } 38 | best[j] = d2; 39 | besti[j] = k; 40 | break; 41 | } 42 | } 43 | } 44 | for(int i = 0; i < nsample; i++){ 45 | idx[i] = besti[i]; 46 | dist2[i] = best[i]; 47 | } 48 | //delete []best; 49 | //delete []besti; 50 | } 51 | 52 | 53 | void knnquery_cuda_launcher(int b, int n, int m, int nsample, const float *xyz, const float *new_xyz, int *idx, float *dist2, cudaStream_t stream) { 54 | // param new_xyz: (B, m, 3) 55 | // param xyz: (B, n, 3) 56 | // param idx: (B, m, nsample) 57 | 58 | cudaError_t err; 59 | 60 | dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) 61 | dim3 threads(THREADS_PER_BLOCK); 62 | 63 | knnquery_cuda_kernel<<>>(b, n, m, nsample, xyz, new_xyz, idx, dist2); 64 | // cudaDeviceSynchronize(); // for using printf in kernel function 65 | 66 | err = cudaGetLastError(); 67 | if (cudaSuccess != err) { 68 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 69 | exit(-1); 70 | } 71 | } -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/knnquery/knnquery_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _KNNQUERY_CUDA_KERNEL 2 | #define _KNNQUERY_CUDA_KERNEL 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | void knnquery_cuda(int b, int n, int m, int nsample, at::Tensor xyz_tensor, at::Tensor new_xyz_tensor, at::Tensor idx_tensor, at::Tensor dist2_tensor); 9 | 10 | #ifdef __cplusplus 11 | extern "C" { 12 | #endif 13 | 14 | void knnquery_cuda_launcher(int b, int n, int m, int nsample, const float *xyz, const float *new_xyz, int *idx, float *dist2, cudaStream_t stream); 15 | 16 | #ifdef __cplusplus 17 | } 18 | #endif 19 | 20 | #endif -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/labelstat/labelstat_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "labelstat_cuda_kernel.h" 7 | 8 | extern THCState *state; 9 | 10 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 11 | #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") 12 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 13 | 14 | void labelstat_idx_cuda_fast(int b, int n, int m, int nsample, int nclass, 15 | at::Tensor label_stat_tensor, at::Tensor idx_tensor, at::Tensor new_label_stat_tensor) 16 | { 17 | CHECK_INPUT(label_stat_tensor); 18 | CHECK_INPUT(idx_tensor); 19 | 20 | const int *label_stat = label_stat_tensor.data(); 21 | const int *idx = idx_tensor.data(); 22 | int *new_label_stat = new_label_stat_tensor.data(); 23 | 24 | cudaStream_t stream = THCState_getCurrentStream(state); 25 | 26 | labelstat_idx_cuda_launcher_fast(b, n, m, nsample, nclass, label_stat, idx, new_label_stat, stream); 27 | } 28 | 29 | void labelstat_ballrange_cuda_fast(int b, int n, int m, float radius, int nclass, 30 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor label_stat_tensor, at::Tensor new_label_stat_tensor) 31 | { 32 | CHECK_INPUT(new_xyz_tensor); 33 | CHECK_INPUT(xyz_tensor); 34 | CHECK_INPUT(label_stat_tensor); 35 | 36 | const float *new_xyz = new_xyz_tensor.data(); 37 | const float *xyz = xyz_tensor.data(); 38 | const int *label_stat = label_stat_tensor.data(); 39 | int *new_label_stat = new_label_stat_tensor.data(); 40 | 41 | cudaStream_t stream = THCState_getCurrentStream(state); 42 | 43 | labelstat_ballrange_cuda_launcher_fast(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat, stream); 44 | } 45 | 46 | void labelstat_and_ballquery_cuda_fast(int b, int n, int m, float radius, int nsample, int nclass, 47 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor label_stat_tensor, at::Tensor idx_tensor, at::Tensor new_label_stat_tensor) 48 | { 49 | CHECK_INPUT(new_xyz_tensor); 50 | CHECK_INPUT(xyz_tensor); 51 | CHECK_INPUT(label_stat_tensor); 52 | CHECK_INPUT(idx_tensor); 53 | 54 | const float *new_xyz = new_xyz_tensor.data(); 55 | const float *xyz = xyz_tensor.data(); 56 | const int *label_stat = label_stat_tensor.data(); 57 | int *idx = idx_tensor.data(); 58 | int *new_label_stat = new_label_stat_tensor.data(); 59 | 60 | cudaStream_t stream = THCState_getCurrentStream(state); 61 | 62 | labelstat_and_ballquery_cuda_launcher_fast(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat, stream); 63 | } 64 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/labelstat/labelstat_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _LABELSTAT_CUDA_KERNEL 2 | #define _LABELSTAT_CUDA_KERNEL 3 | #include 4 | #include 5 | #include 6 | 7 | void labelstat_and_ballquery_cuda_fast(int b, int n, int m, float radius, int nsample, int nclass, 8 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor label_stat_tensor, at::Tensor idx_tensor, at::Tensor new_label_stat_tensor); 9 | 10 | void labelstat_ballrange_cuda_fast(int b, int n, int m, float radius, int nclass, 11 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor label_stat_tensor, at::Tensor new_label_stat_tensor); 12 | 13 | void labelstat_idx_cuda_fast(int b, int n, int m, int nsample, int nclass, 14 | at::Tensor label_stat_tensor, at::Tensor idx_tensor, at::Tensor new_label_stat_tensor); 15 | 16 | #ifdef __cplusplus 17 | extern "C" { 18 | #endif 19 | 20 | void labelstat_and_ballquery_cuda_launcher_fast(int b, int n, int m, float radius, int nsample, int nclass, \ 21 | const float *new_xyz, const float *xyz, const int *label_stat, int *idx, int *new_label_stat, cudaStream_t stream); 22 | 23 | void labelstat_ballrange_cuda_launcher_fast(int b, int n, int m, float radius, int nclass, \ 24 | const float *new_xyz, const float *xyz, const int *label_stat, int *new_label_stat, cudaStream_t stream); 25 | 26 | void labelstat_idx_cuda_launcher_fast(int b, int n, int m, int nsample, int nclass, \ 27 | const int *label_stat, const int *idx, int *new_label_stat, cudaStream_t stream); 28 | 29 | #ifdef __cplusplus 30 | } 31 | #endif 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/pointops_api.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "ballquery/ballquery_cuda_kernel.h" 5 | #include "grouping/grouping_cuda_kernel.h" 6 | #include "grouping_int/grouping_int_cuda_kernel.h" 7 | #include "sampling/sampling_cuda_kernel.h" 8 | #include "interpolation/interpolation_cuda_kernel.h" 9 | #include "knnquery/knnquery_cuda_kernel.h" 10 | 11 | #include "labelstat/labelstat_cuda_kernel.h" 12 | #include "featuredistribute/featuredistribute_cuda_kernel.h" 13 | 14 | 15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 16 | m.def("ballquery_cuda", &ballquery_cuda_fast, "ballquery_cuda_fast"); // name in python, cpp function address, docs 17 | 18 | m.def("knnquery_cuda", &knnquery_cuda, "knnquery_cuda"); 19 | 20 | m.def("grouping_forward_cuda", &grouping_forward_cuda_fast, "grouping_forward_cuda_fast"); 21 | m.def("grouping_backward_cuda", &grouping_backward_cuda, "grouping_backward_cuda"); 22 | 23 | m.def("grouping_int_forward_cuda", &grouping_int_forward_cuda_fast, "grouping_int_forward_cuda_fast"); 24 | 25 | m.def("gathering_forward_cuda", &gathering_forward_cuda, "gathering_forward_cuda"); 26 | m.def("gathering_backward_cuda", &gathering_backward_cuda, "gathering_backward_cuda"); 27 | m.def("furthestsampling_cuda", &furthestsampling_cuda, "furthestsampling_cuda"); 28 | 29 | m.def("nearestneighbor_cuda", &nearestneighbor_cuda_fast, "nearestneighbor_cuda_fast"); 30 | m.def("interpolation_forward_cuda", &interpolation_forward_cuda_fast, "interpolation_forward_cuda_fast"); 31 | m.def("interpolation_backward_cuda", &interpolation_backward_cuda, "interpolation_backward_cuda"); 32 | 33 | m.def("labelstat_idx_cuda", &labelstat_idx_cuda_fast, "labelstat_idx_cuda_fast"); 34 | m.def("labelstat_ballrange_cuda", &labelstat_ballrange_cuda_fast, "labelstat_ballrange_cuda_fast"); 35 | m.def("labelstat_and_ballquery_cuda", &labelstat_and_ballquery_cuda_fast, "labelstat_and_ballquery_cuda_fast"); 36 | 37 | m.def("featuredistribute_cuda", &featuredistribute_cuda, "featuredistribute_cuda"); 38 | m.def("featuregather_forward_cuda", &featuregather_forward_cuda, "featuregather_forward_cuda"); 39 | m.def("featuregather_backward_cuda", &featuregather_backward_cuda, "featuregather_backward_cuda"); 40 | } 41 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/sampling/sampling_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "sampling_cuda_kernel.h" 6 | 7 | extern THCState *state; 8 | 9 | void gathering_forward_cuda(int b, int c, int n, int m, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) 10 | { 11 | const float *points = points_tensor.data(); 12 | const int *idx = idx_tensor.data(); 13 | float *out = out_tensor.data(); 14 | gathering_forward_cuda_launcher(b, c, n, m, points, idx, out); 15 | } 16 | 17 | void gathering_backward_cuda(int b, int c, int n, int m, at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) 18 | { 19 | 20 | const float *grad_out = grad_out_tensor.data(); 21 | const int *idx = idx_tensor.data(); 22 | float *grad_points = grad_points_tensor.data(); 23 | gathering_backward_cuda_launcher(b, c, n, m, grad_out, idx, grad_points); 24 | } 25 | 26 | void furthestsampling_cuda(int b, int n, int m, at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) 27 | { 28 | const float *points = points_tensor.data(); 29 | float *temp = temp_tensor.data(); 30 | int *idx = idx_tensor.data(); 31 | furthestsampling_cuda_launcher(b, n, m, points, temp, idx); 32 | } 33 | -------------------------------------------------------------------------------- /V2B_main/lib/pointops/src/sampling/sampling_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _SAMPLING_CUDA_KERNEL 2 | #define _SAMPLING_CUDA_KERNEL 3 | #include 4 | #include 5 | #include 6 | 7 | void gathering_forward_cuda(int b, int c, int n, int m, at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 8 | void gathering_backward_cuda(int b, int c, int n, int m, at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); 9 | void furthestsampling_cuda(int b, int n, int m, at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); 10 | 11 | #ifdef __cplusplus 12 | extern "C" { 13 | #endif 14 | 15 | void gathering_forward_cuda_launcher(int b, int c, int n, int m, const float *points, const int *idx, float *out); 16 | void gathering_backward_cuda_launcher(int b, int c, int n, int m, const float *grad_out, const int *idx, float *grad_points); 17 | void furthestsampling_cuda_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs); 18 | 19 | #ifdef __cplusplus 20 | } 21 | #endif 22 | #endif 23 | -------------------------------------------------------------------------------- /V2B_main/lib/sync_bn/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : __init__.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d 12 | from .replicate import DataParallelWithCallback, patch_replication_callback 13 | -------------------------------------------------------------------------------- /V2B_main/lib/sync_bn/unittest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : unittest.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import unittest 12 | 13 | import numpy as np 14 | from torch.autograd import Variable 15 | 16 | 17 | def as_numpy(v): 18 | if isinstance(v, Variable): 19 | v = v.data 20 | return v.cpu().numpy() 21 | 22 | 23 | class TorchTestCase(unittest.TestCase): 24 | def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3): 25 | npa, npb = as_numpy(a), as_numpy(b) 26 | self.assertTrue( 27 | np.allclose(npa, npb, atol=atol), 28 | 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max()) 29 | ) 30 | -------------------------------------------------------------------------------- /V2B_main/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) -------------------------------------------------------------------------------- /V2B_main/modules/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/__pycache__/backbone_net.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/__pycache__/backbone_net.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/__pycache__/completion_net.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/__pycache__/completion_net.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/__pycache__/v2b_net.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/__pycache__/v2b_net.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | 9 | __version__ = "2.1.1" 10 | 11 | try: 12 | __POINTNET2_SETUP__ 13 | except NameError: 14 | __POINTNET2_SETUP__ = False 15 | 16 | if not __POINTNET2_SETUP__: 17 | from modules.pointnet2 import utils 18 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/include/ball_query.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, 5 | const int nsample); 6 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/include/ball_query_score.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor ball_query_score(at::Tensor new_xyz, at::Tensor xyz, at::Tensor score, const float radius, 5 | const int nsample); -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/include/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _CUDA_UTILS_H 2 | #define _CUDA_UTILS_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #define TOTAL_THREADS 512 14 | 15 | inline int opt_n_threads(int work_size) { 16 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); 17 | 18 | return max(min(1 << pow_2, TOTAL_THREADS), 1); 19 | } 20 | 21 | inline dim3 opt_block_config(int x, int y) { 22 | const int x_threads = opt_n_threads(x); 23 | const int y_threads = 24 | max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); 25 | dim3 block_config(x_threads, y_threads, 1); 26 | 27 | return block_config; 28 | } 29 | 30 | #define CUDA_CHECK_ERRORS() \ 31 | do { \ 32 | cudaError_t err = cudaGetLastError(); \ 33 | if (cudaSuccess != err) { \ 34 | fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \ 35 | cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \ 36 | __FILE__); \ 37 | exit(-1); \ 38 | } \ 39 | } while (0) 40 | 41 | #endif 42 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/include/group_points.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor group_points(at::Tensor points, at::Tensor idx); 5 | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); 6 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/include/interpolate.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | std::vector three_nn(at::Tensor unknowns, at::Tensor knows); 7 | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, 8 | at::Tensor weight); 9 | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, 10 | at::Tensor weight, const int m); 11 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/include/sampling.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor gather_points(at::Tensor points, at::Tensor idx); 5 | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); 6 | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples); 7 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/include/utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | #define CHECK_CUDA(x) \ 6 | do { \ 7 | AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor"); \ 8 | } while (0) 9 | 10 | #define CHECK_CONTIGUOUS(x) \ 11 | do { \ 12 | AT_CHECK(x.is_contiguous(), #x " must be a contiguous tensor"); \ 13 | } while (0) 14 | 15 | #define CHECK_IS_INT(x) \ 16 | do { \ 17 | AT_CHECK(x.scalar_type() == at::ScalarType::Int, \ 18 | #x " must be an int tensor"); \ 19 | } while (0) 20 | 21 | #define CHECK_IS_FLOAT(x) \ 22 | do { \ 23 | AT_CHECK(x.scalar_type() == at::ScalarType::Float, \ 24 | #x " must be a float tensor"); \ 25 | } while (0) 26 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/src/ball_query.cpp: -------------------------------------------------------------------------------- 1 | #include "ball_query.h" 2 | #include "utils.h" 3 | 4 | void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, 5 | int nsample, const float *new_xyz, 6 | const float *xyz, int *idx); 7 | 8 | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, 9 | const int nsample) { 10 | CHECK_CONTIGUOUS(new_xyz); 11 | CHECK_CONTIGUOUS(xyz); 12 | CHECK_IS_FLOAT(new_xyz); 13 | CHECK_IS_FLOAT(xyz); 14 | 15 | if (new_xyz.type().is_cuda()) { 16 | CHECK_CUDA(xyz); 17 | } 18 | 19 | at::Tensor idx = 20 | torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample}, 21 | at::device(new_xyz.device()).dtype(at::ScalarType::Int)); 22 | 23 | if (new_xyz.type().is_cuda()) { 24 | query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1), 25 | radius, nsample, new_xyz.data(), 26 | xyz.data(), idx.data()); 27 | } else { 28 | AT_CHECK(false, "CPU not supported"); 29 | } 30 | 31 | return idx; 32 | } 33 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/src/ball_query_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "cuda_utils.h" 6 | 7 | // input: new_xyz(b, m, 3) xyz(b, n, 3) 8 | // output: idx(b, m, nsample) 9 | __global__ void query_ball_point_kernel(int b, int n, int m, float radius, 10 | int nsample, 11 | const float *__restrict__ new_xyz, 12 | const float *__restrict__ xyz, 13 | int *__restrict__ idx) { 14 | int batch_index = blockIdx.x; 15 | xyz += batch_index * n * 3; 16 | new_xyz += batch_index * m * 3; 17 | idx += m * nsample * batch_index; 18 | 19 | int index = threadIdx.x; 20 | int stride = blockDim.x; 21 | 22 | float radius2 = radius * radius; 23 | for (int j = index; j < m; j += stride) { 24 | float new_x = new_xyz[j * 3 + 0]; 25 | float new_y = new_xyz[j * 3 + 1]; 26 | float new_z = new_xyz[j * 3 + 2]; 27 | for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { 28 | float x = xyz[k * 3 + 0]; 29 | float y = xyz[k * 3 + 1]; 30 | float z = xyz[k * 3 + 2]; 31 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + 32 | (new_z - z) * (new_z - z); 33 | if (d2 < radius2) { 34 | if (cnt == 0) { 35 | for (int l = 0; l < nsample; ++l) { 36 | idx[j * nsample + l] = k; 37 | } 38 | } 39 | idx[j * nsample + cnt] = k; 40 | ++cnt; 41 | } 42 | } 43 | } 44 | } 45 | 46 | void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, 47 | int nsample, const float *new_xyz, 48 | const float *xyz, int *idx) { 49 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 50 | query_ball_point_kernel<<>>( 51 | b, n, m, radius, nsample, new_xyz, xyz, idx); 52 | 53 | CUDA_CHECK_ERRORS(); 54 | } 55 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/src/ball_query_score.cpp: -------------------------------------------------------------------------------- 1 | #include "ball_query_score.h" 2 | #include "utils.h" 3 | 4 | void query_ball_point_score_kernel_wrapper(int b, int n, int m, float radius, 5 | int nsample, const float *new_xyz, 6 | const float *xyz, const float *score, float *unique_score); 7 | 8 | 9 | at::Tensor ball_query_score(at::Tensor new_xyz, at::Tensor xyz, at::Tensor score, const float radius, 10 | const int nsample) { 11 | CHECK_CONTIGUOUS(new_xyz); 12 | CHECK_CONTIGUOUS(xyz); 13 | CHECK_CONTIGUOUS(score); 14 | CHECK_IS_FLOAT(new_xyz); 15 | CHECK_IS_FLOAT(xyz); 16 | CHECK_IS_FLOAT(score); 17 | 18 | if (new_xyz.type().is_cuda()) { 19 | CHECK_CUDA(xyz); 20 | CHECK_CUDA(score); 21 | } 22 | 23 | at::Tensor unique_score = 24 | torch::zeros({new_xyz.size(0), new_xyz.size(1)}, 25 | at::device(new_xyz.device()).dtype(at::ScalarType::Float)); 26 | 27 | if (new_xyz.type().is_cuda()) { 28 | query_ball_point_score_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1), 29 | radius, nsample, new_xyz.data(), 30 | xyz.data(), score.data(), unique_score.data()); 31 | } else { 32 | AT_CHECK(false, "CPU not supported"); 33 | } 34 | 35 | return unique_score; 36 | } 37 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/src/ball_query_score_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "cuda_utils.h" 6 | 7 | // input: new_xyz(b, m, 3) xyz(b, n, 3) 8 | // output: idx(b, m, nsample) 9 | __global__ void query_ball_point_score_kernel(int b, int n, int m, float radius, 10 | int nsample, 11 | const float *__restrict__ new_xyz, 12 | const float *__restrict__ xyz, 13 | const float *__restrict__ score, 14 | float *__restrict__ unique_score) { 15 | int batch_index = blockIdx.x; 16 | xyz += batch_index * n * 3; 17 | new_xyz += batch_index * m * 3; 18 | score += batch_index * n; 19 | unique_score += m * batch_index; 20 | 21 | int index = threadIdx.x; 22 | int stride = blockDim.x; 23 | 24 | float radius2 = radius * radius; 25 | for (int j = index; j < m; j += stride) { 26 | float new_x = new_xyz[j * 3 + 0]; 27 | float new_y = new_xyz[j * 3 + 1]; 28 | float new_z = new_xyz[j * 3 + 2]; 29 | for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { 30 | float x = xyz[k * 3 + 0]; 31 | float y = xyz[k * 3 + 1]; 32 | float z = xyz[k * 3 + 2]; 33 | float s = score[k]; 34 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + 35 | (new_z - z) * (new_z - z); 36 | if (d2 < radius2) { 37 | unique_score[j] += s; 38 | ++cnt; 39 | } 40 | } 41 | } 42 | } 43 | 44 | void query_ball_point_score_kernel_wrapper(int b, int n, int m, float radius, 45 | int nsample, const float *new_xyz, 46 | const float *xyz, const float *score, float *unique_score) { 47 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 48 | query_ball_point_score_kernel<<>>( 49 | b, n, m, radius, nsample, new_xyz, xyz, score, unique_score); 50 | 51 | CUDA_CHECK_ERRORS(); 52 | } 53 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/src/bindings.cpp: -------------------------------------------------------------------------------- 1 | #include "ball_query.h" 2 | #include "group_points.h" 3 | #include "interpolate.h" 4 | #include "sampling.h" 5 | #include "ball_query_score.h" 6 | 7 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 8 | m.def("gather_points", &gather_points); 9 | m.def("gather_points_grad", &gather_points_grad); 10 | m.def("furthest_point_sampling", &furthest_point_sampling); 11 | 12 | m.def("three_nn", &three_nn); 13 | m.def("three_interpolate", &three_interpolate); 14 | m.def("three_interpolate_grad", &three_interpolate_grad); 15 | 16 | m.def("ball_query", &ball_query); 17 | m.def("ball_query_score", &ball_query_score); 18 | 19 | m.def("group_points", &group_points); 20 | m.def("group_points_grad", &group_points_grad); 21 | } 22 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/_ext-src/src/group_points.cpp: -------------------------------------------------------------------------------- 1 | #include "group_points.h" 2 | #include "utils.h" 3 | 4 | void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, 5 | const float *points, const int *idx, 6 | float *out); 7 | 8 | void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 9 | int nsample, const float *grad_out, 10 | const int *idx, float *grad_points); 11 | 12 | at::Tensor group_points(at::Tensor points, at::Tensor idx) { 13 | CHECK_CONTIGUOUS(points); 14 | CHECK_CONTIGUOUS(idx); 15 | CHECK_IS_FLOAT(points); 16 | CHECK_IS_INT(idx); 17 | 18 | if (points.type().is_cuda()) { 19 | CHECK_CUDA(idx); 20 | } 21 | 22 | at::Tensor output = 23 | torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)}, 24 | at::device(points.device()).dtype(at::ScalarType::Float)); 25 | 26 | if (points.type().is_cuda()) { 27 | group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), 28 | idx.size(1), idx.size(2), points.data(), 29 | idx.data(), output.data()); 30 | } else { 31 | AT_CHECK(false, "CPU not supported"); 32 | } 33 | 34 | return output; 35 | } 36 | 37 | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) { 38 | CHECK_CONTIGUOUS(grad_out); 39 | CHECK_CONTIGUOUS(idx); 40 | CHECK_IS_FLOAT(grad_out); 41 | CHECK_IS_INT(idx); 42 | 43 | if (grad_out.type().is_cuda()) { 44 | CHECK_CUDA(idx); 45 | } 46 | 47 | at::Tensor output = 48 | torch::zeros({grad_out.size(0), grad_out.size(1), n}, 49 | at::device(grad_out.device()).dtype(at::ScalarType::Float)); 50 | 51 | if (grad_out.type().is_cuda()) { 52 | group_points_grad_kernel_wrapper( 53 | grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2), 54 | grad_out.data(), idx.data(), output.data()); 55 | } else { 56 | AT_CHECK(false, "CPU not supported"); 57 | } 58 | 59 | return output; 60 | } 61 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__pycache__/kalman_filter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/utils/__pycache__/kalman_filter.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__pycache__/kalman_filter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/utils/__pycache__/kalman_filter.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__pycache__/non_local.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/utils/__pycache__/non_local.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__pycache__/non_local.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/utils/__pycache__/non_local.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__pycache__/pointnet2_modules.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/utils/__pycache__/pointnet2_modules.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/__pycache__/pointnet2_modules.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/pointnet2/utils/__pycache__/pointnet2_modules.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/pointnet2/utils/linalg_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | from enum import Enum 10 | import numpy as np 11 | 12 | PDist2Order = Enum("PDist2Order", "d_first d_second") 13 | 14 | 15 | def pdist2(X, Z=None, order=PDist2Order.d_second): 16 | # type: (torch.Tensor, torch.Tensor, PDist2Order) -> torch.Tensor 17 | r""" Calculates the pairwise distance between X and Z 18 | 19 | D[b, i, j] = l2 distance X[b, i] and Z[b, j] 20 | 21 | Parameters 22 | --------- 23 | X : torch.Tensor 24 | X is a (B, N, d) tensor. There are B batches, and N vectors of dimension d 25 | Z: torch.Tensor 26 | Z is a (B, M, d) tensor. If Z is None, then Z = X 27 | 28 | Returns 29 | ------- 30 | torch.Tensor 31 | Distance matrix is size (B, N, M) 32 | """ 33 | 34 | if order == PDist2Order.d_second: 35 | if X.dim() == 2: 36 | X = X.unsqueeze(0) 37 | if Z is None: 38 | Z = X 39 | G = np.matmul(X, Z.transpose(-2, -1)) 40 | S = (X * X).sum(-1, keepdim=True) 41 | R = S.transpose(-2, -1) 42 | else: 43 | if Z.dim() == 2: 44 | Z = Z.unsqueeze(0) 45 | G = np.matmul(X, Z.transpose(-2, -1)) 46 | S = (X * X).sum(-1, keepdim=True) 47 | R = (Z * Z).sum(-1, keepdim=True).transpose(-2, -1) 48 | else: 49 | if X.dim() == 2: 50 | X = X.unsqueeze(0) 51 | if Z is None: 52 | Z = X 53 | G = np.matmul(X.transpose(-2, -1), Z) 54 | R = (X * X).sum(-2, keepdim=True) 55 | S = R.transpose(-2, -1) 56 | else: 57 | if Z.dim() == 2: 58 | Z = Z.unsqueeze(0) 59 | G = np.matmul(X.transpose(-2, -1), Z) 60 | S = (X * X).sum(-2, keepdim=True).transpose(-2, -1) 61 | R = (Z * Z).sum(-2, keepdim=True) 62 | 63 | return torch.abs(R + S - 2 * G).squeeze(0) 64 | 65 | 66 | def pdist2_slow(X, Z=None): 67 | if Z is None: 68 | Z = X 69 | D = torch.zeros(X.size(0), X.size(2), Z.size(2)) 70 | 71 | for b in range(D.size(0)): 72 | for i in range(D.size(1)): 73 | for j in range(D.size(2)): 74 | D[b, i, j] = torch.dist(X[b, :, i], Z[b, :, j]) 75 | return D 76 | 77 | 78 | if __name__ == "__main__": 79 | X = torch.randn(2, 3, 5) 80 | Z = torch.randn(2, 3, 3) 81 | 82 | print(pdist2(X, order=PDist2Order.d_first)) 83 | print(pdist2_slow(X)) 84 | print(torch.dist(pdist2(X, order=PDist2Order.d_first), pdist2_slow(X))) 85 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from modules.voxel_utils.se import SE3d 2 | from modules.voxel_utils.voxelization import Voxelization 3 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/block.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/block.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/block.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/block.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/region_proposal_network.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/region_proposal_network.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/region_proposal_network.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/region_proposal_network.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/se.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/se.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/torch_util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/torch_util.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/torch_util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/torch_util.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/voxel_grid.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/voxel_grid.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/voxel_grid.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/voxel_grid.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/voxelization.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/voxelization.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/voxelnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/voxelnet.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/__pycache__/voxelnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/__pycache__/voxelnet.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/functional/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/functional/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/__pycache__/backend.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/functional/__pycache__/backend.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/__pycache__/backend.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/functional/__pycache__/backend.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/__pycache__/voxelization.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/functional/__pycache__/voxelization.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/__pycache__/voxelization.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/functional/__pycache__/voxelization.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/backend.py: -------------------------------------------------------------------------------- 1 | import os 2 | from torch.utils.cpp_extension import load 3 | 4 | _src_path = os.path.dirname(os.path.abspath(__file__)) 5 | _backend = load(name='_backend4', 6 | extra_cflags=['-O3', '-std=c++17'], 7 | sources=[os.path.join(_src_path,'src', f) for f in [ 8 | 'voxelization/vox.cpp', 9 | 'voxelization/vox.cu', 10 | 'voxelize_float/voxf.cpp', 11 | 'voxelize_float/voxf.cu', 12 | 'bindings.cpp', 13 | ]] 14 | ) 15 | 16 | __all__ = ['_backend'] 17 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/setup.py: -------------------------------------------------------------------------------- 1 | #python3 setup.py install 2 | from setuptools import setup 3 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 4 | 5 | setup( 6 | name='voxelization', 7 | ext_modules=[ 8 | CUDAExtension('voxelize_cuda', [ 9 | 'src/bindings.cpp', 10 | 11 | 'src/voxelization/vox.cpp', 12 | 'src/voxelization/vox.cu' 13 | ],) 14 | ], 15 | cmdclass={'build_ext': BuildExtension}) 16 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/bindings.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "voxelization/vox.hpp" 4 | #include "voxelize_float/voxf.hpp" 5 | 6 | PYBIND11_MODULE(_backend4, m) { 7 | m.def("avg_voxelize_forward", &avg_voxelize_forward, 8 | "Voxelization forward with average pooling (CUDA)"); 9 | m.def("avg_voxelize_backward", &avg_voxelize_backward, 10 | "Voxelization backward (CUDA)"); 11 | m.def("favg_voxelize_forward", &favg_voxelize_forward, 12 | "fVoxelization forward with average pooling (CUDA)"); 13 | m.def("favg_voxelize_backward", &favg_voxelize_backward, 14 | "fVoxelization backward (CUDA)"); 15 | 16 | } 17 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/cuda_utils.cuh: -------------------------------------------------------------------------------- 1 | #ifndef _CUDA_UTILS_H 2 | #define _CUDA_UTILS_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #define MAXIMUM_THREADS 512 14 | 15 | inline int optimal_num_threads(int work_size) { 16 | const int pow_2 = std::log2(static_cast(work_size)); 17 | return max(min(1 << pow_2, MAXIMUM_THREADS), 1); 18 | } 19 | 20 | inline dim3 optimal_block_config(int x, int y) { 21 | const int x_threads = optimal_num_threads(x); 22 | const int y_threads = 23 | max(min(optimal_num_threads(y), MAXIMUM_THREADS / x_threads), 1); 24 | dim3 block_config(x_threads, y_threads, 1); 25 | return block_config; 26 | } 27 | 28 | #define CUDA_CHECK_ERRORS() \ 29 | { \ 30 | cudaError_t err = cudaGetLastError(); \ 31 | if (cudaSuccess != err) { \ 32 | fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \ 33 | cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \ 34 | __FILE__); \ 35 | exit(-1); \ 36 | } \ 37 | } 38 | 39 | #endif 40 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/utils.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _UTILS_HPP 2 | #define _UTILS_HPP 3 | 4 | #include 5 | #include 6 | 7 | #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") 8 | 9 | #define CHECK_CONTIGUOUS(x) \ 10 | TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor") 11 | 12 | #define CHECK_IS_INT(x) \ 13 | TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, \ 14 | #x " must be an int tensor") 15 | 16 | #define CHECK_IS_FLOAT(x) \ 17 | TORCH_CHECK(x.scalar_type() == at::ScalarType::Float, \ 18 | #x " must be a float tensor") 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/voxelization/vox.cpp: -------------------------------------------------------------------------------- 1 | #include "vox.hpp" 2 | #include "vox.cuh" 3 | 4 | #include "../utils.hpp" 5 | 6 | /* 7 | Function: average pool voxelization (forward) 8 | Args: 9 | features: features, FloatTensor[b, c, n] 10 | coords : coords of each point, IntTensor[b, 3, n] 11 | resolution : voxel resolution 12 | Return: 13 | out : outputs, FloatTensor[b, c, s], s = r ** 3 14 | ind : voxel index of each point, IntTensor[b, n] 15 | cnt : #points in each voxel index, IntTensor[b, s] 16 | */ 17 | std::vector avg_voxelize_forward(const at::Tensor features, 18 | const at::Tensor coords, 19 | const int x, 20 | const int y, 21 | const int z) { 22 | CHECK_CUDA(features); 23 | CHECK_CUDA(coords); 24 | CHECK_CONTIGUOUS(features); 25 | CHECK_CONTIGUOUS(coords); 26 | //CHECK_IS_FLOAT(features); 27 | CHECK_IS_INT(coords); 28 | 29 | int b = features.size(0); 30 | int c = features.size(1); 31 | int n = features.size(2); 32 | int r = z; 33 | int r2 = z * y; 34 | int r3 = r2 * x; 35 | at::Tensor ind = torch::zeros( 36 | {b, n}, at::device(features.device()).dtype(at::ScalarType::Int)); 37 | at::Tensor out = torch::zeros( 38 | {b, c, r3}, at::device(features.device()).dtype(at::ScalarType::Double)); 39 | at::Tensor cnt = torch::zeros( 40 | {b, r3}, at::device(features.device()).dtype(at::ScalarType::Int)); 41 | avg_voxelize(b, c, n, r, r2, r3, coords.data_ptr(), 42 | features.data_ptr(), ind.data_ptr(), 43 | cnt.data_ptr(), out.data_ptr()); 44 | return {out, ind, cnt}; 45 | } 46 | 47 | /* 48 | Function: average pool voxelization (backward) 49 | Args: 50 | grad_y : grad outputs, FloatTensor[b, c, s] 51 | indices: voxel index of each point, IntTensor[b, n] 52 | cnt : #points in each voxel index, IntTensor[b, s] 53 | Return: 54 | grad_x : grad inputs, FloatTensor[b, c, n] 55 | */ 56 | at::Tensor avg_voxelize_backward(const at::Tensor grad_y, 57 | const at::Tensor indices, 58 | const at::Tensor cnt) { 59 | CHECK_CUDA(grad_y); 60 | CHECK_CUDA(indices); 61 | CHECK_CUDA(cnt); 62 | CHECK_CONTIGUOUS(grad_y); 63 | CHECK_CONTIGUOUS(indices); 64 | CHECK_CONTIGUOUS(cnt); 65 | CHECK_IS_FLOAT(grad_y); 66 | CHECK_IS_INT(indices); 67 | CHECK_IS_INT(cnt); 68 | 69 | int b = grad_y.size(0); 70 | int c = grad_y.size(1); 71 | int s = grad_y.size(2); 72 | int n = indices.size(1); 73 | at::Tensor grad_x = torch::zeros( 74 | {b, c, n}, at::device(grad_y.device()).dtype(at::ScalarType::Float)); 75 | avg_voxelize_grad(b, c, n, s, indices.data_ptr(), cnt.data_ptr(), 76 | grad_y.data_ptr(), grad_x.data_ptr()); 77 | return grad_x; 78 | } 79 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/voxelization/vox.cuh: -------------------------------------------------------------------------------- 1 | #ifndef _VOX_CUH 2 | #define _VOX_CUH 3 | 4 | // CUDA function declarations 5 | void avg_voxelize(int b, int c, int n, int r, int r2, int r3, const int *coords, 6 | const double *feat, int *ind, int *cnt, double *out); 7 | void avg_voxelize_grad(int b, int c, int n, int s, const int *idx, 8 | const int *cnt, const float *grad_y, float *grad_x); 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/voxelization/vox.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _VOX_HPP 2 | #define _VOX_HPP 3 | 4 | #include 5 | #include 6 | 7 | std::vector avg_voxelize_forward(const at::Tensor features, 8 | const at::Tensor coords, 9 | const int x, 10 | const int y, 11 | const int z); 12 | 13 | at::Tensor avg_voxelize_backward(const at::Tensor grad_y, 14 | const at::Tensor indices, 15 | const at::Tensor cnt); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/voxelize_float/voxf.cpp: -------------------------------------------------------------------------------- 1 | #include "voxf.hpp" 2 | #include "voxf.cuh" 3 | 4 | #include "../utils.hpp" 5 | 6 | /* 7 | Function: average pool voxelization (forward) 8 | Args: 9 | features: features, FloatTensor[b, c, n] 10 | coords : coords of each point, IntTensor[b, 3, n] 11 | resolution : voxel resolution 12 | Return: 13 | out : outputs, FloatTensor[b, c, s], s = r ** 3 14 | ind : voxel index of each point, IntTensor[b, n] 15 | cnt : #points in each voxel index, IntTensor[b, s] 16 | */ 17 | std::vector favg_voxelize_forward(const at::Tensor features, 18 | const at::Tensor coords, 19 | const int x, 20 | const int y, 21 | const int z) { 22 | CHECK_CUDA(features); 23 | CHECK_CUDA(coords); 24 | CHECK_CONTIGUOUS(features); 25 | CHECK_CONTIGUOUS(coords); 26 | CHECK_IS_FLOAT(features); 27 | CHECK_IS_INT(coords); 28 | 29 | int b = features.size(0); 30 | int c = features.size(1); 31 | int n = features.size(2); 32 | int r = z; 33 | int r2 = z * y; 34 | int r3 = r2 * x; 35 | at::Tensor ind = torch::zeros( 36 | {b, n}, at::device(features.device()).dtype(at::ScalarType::Int)); 37 | at::Tensor out = torch::zeros( 38 | {b, c, r3}, at::device(features.device()).dtype(at::ScalarType::Float)); 39 | at::Tensor cnt = torch::zeros( 40 | {b, r3}, at::device(features.device()).dtype(at::ScalarType::Int)); 41 | favg_voxelize(b, c, n, r, r2, r3, coords.data_ptr(), 42 | features.data_ptr(), ind.data_ptr(), 43 | cnt.data_ptr(), out.data_ptr()); 44 | return {out, ind, cnt}; 45 | } 46 | 47 | /* 48 | Function: average pool voxelization (backward) 49 | Args: 50 | grad_y : grad outputs, FloatTensor[b, c, s] 51 | indices: voxel index of each point, IntTensor[b, n] 52 | cnt : #points in each voxel index, IntTensor[b, s] 53 | Return: 54 | grad_x : grad inputs, FloatTensor[b, c, n] 55 | */ 56 | at::Tensor favg_voxelize_backward(const at::Tensor grad_y, 57 | const at::Tensor indices, 58 | const at::Tensor cnt) { 59 | CHECK_CUDA(grad_y); 60 | CHECK_CUDA(indices); 61 | CHECK_CUDA(cnt); 62 | CHECK_CONTIGUOUS(grad_y); 63 | CHECK_CONTIGUOUS(indices); 64 | CHECK_CONTIGUOUS(cnt); 65 | CHECK_IS_FLOAT(grad_y); 66 | CHECK_IS_INT(indices); 67 | CHECK_IS_INT(cnt); 68 | 69 | int b = grad_y.size(0); 70 | int c = grad_y.size(1); 71 | int s = grad_y.size(2); 72 | int n = indices.size(1); 73 | at::Tensor grad_x = torch::zeros( 74 | {b, c, n}, at::device(grad_y.device()).dtype(at::ScalarType::Float)); 75 | favg_voxelize_grad(b, c, n, s, indices.data_ptr(), cnt.data_ptr(), 76 | grad_y.data_ptr(), grad_x.data_ptr()); 77 | return grad_x; 78 | } 79 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/voxelize_float/voxf.cuh: -------------------------------------------------------------------------------- 1 | #ifndef _VOXF_CUH 2 | #define _VOXF_CUH 3 | 4 | // CUDA function declarations 5 | void favg_voxelize(int b, int c, int n, int r, int r2, int r3, const int *coords, 6 | const float *feat, int *ind, int *cnt, float *out); 7 | void favg_voxelize_grad(int b, int c, int n, int s, const int *idx, 8 | const int *cnt, const float *grad_y, float *grad_x); 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/src/voxelize_float/voxf.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _VOXF_HPP 2 | #define _VOXF_HPP 3 | 4 | #include 5 | #include 6 | 7 | std::vector favg_voxelize_forward(const at::Tensor features, 8 | const at::Tensor coords, 9 | const int x, 10 | const int y, 11 | const int z); 12 | 13 | at::Tensor favg_voxelize_backward(const at::Tensor grad_y, 14 | const at::Tensor indices, 15 | const at::Tensor cnt); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/functional/voxelization.py: -------------------------------------------------------------------------------- 1 | from torch.autograd import Function 2 | 3 | from modules.voxel_utils.functional.backend import _backend 4 | 5 | __all__ = ['avg_voxelize'] 6 | 7 | 8 | class AvgVoxelization(Function): 9 | @staticmethod 10 | def forward(ctx, features, coords, x,y,z): 11 | """ 12 | :param ctx: 13 | :param features: Features of the point cloud, FloatTensor[B, C, N] 14 | :param coords: Voxelized Coordinates of each point, IntTensor[B, 3, N] 15 | :param resolution: Voxel resolution 16 | :return: 17 | Voxelized Features, FloatTensor[B, C, x, y, z] 18 | """ 19 | features = features.contiguous() 20 | coords = coords.int().contiguous() 21 | b, c, _ = features.shape 22 | features_d=features.double() 23 | out, indices, counts = _backend.avg_voxelize_forward(features_d, coords, x,y,z) 24 | out=out.float() 25 | ctx.save_for_backward(indices, counts) 26 | return out.view(b, c, x, y, z) 27 | 28 | @staticmethod 29 | def backward(ctx, grad_output): 30 | """ 31 | :param ctx: 32 | :param grad_output: gradient of output, FloatTensor[B, C, R, R, R] 33 | :return: 34 | gradient of inputs, FloatTensor[B, C, N] 35 | """ 36 | b, c = grad_output.shape[:2] 37 | indices, counts = ctx.saved_tensors 38 | grad_features = _backend.avg_voxelize_backward(grad_output.contiguous().view(b, c, -1), indices, counts) 39 | return grad_features, None, None,None,None 40 | 41 | 42 | avg_voxelize = AvgVoxelization.apply 43 | 44 | class fAvgVoxelization(Function): 45 | @staticmethod 46 | def forward(ctx, features, coords, x,y,z): 47 | """ 48 | :param ctx: 49 | :param features: Features of the point cloud, FloatTensor[B, C, N] 50 | :param coords: Voxelized Coordinates of each point, IntTensor[B, 3, N] 51 | :param resolution: Voxel resolution 52 | :return: 53 | Voxelized Features, FloatTensor[B, C, x, y, z] 54 | """ 55 | features = features.contiguous() 56 | coords = coords.int().contiguous() 57 | b, c, _ = features.shape 58 | out, indices, counts = _backend.favg_voxelize_forward(features, coords, x,y,z) 59 | ctx.save_for_backward(indices, counts) 60 | return out.view(b, c, x, y, z) 61 | 62 | @staticmethod 63 | def backward(ctx, grad_output): 64 | """ 65 | :param ctx: 66 | :param grad_output: gradient of output, FloatTensor[B, C, R, R, R] 67 | :return: 68 | gradient of inputs, FloatTensor[B, C, N] 69 | """ 70 | b, c = grad_output.shape[:2] 71 | indices, counts = ctx.saved_tensors 72 | grad_features = _backend.favg_voxelize_backward(grad_output.contiguous().view(b, c, -1), indices, counts) 73 | return grad_features, None, None,None,None 74 | 75 | favg_voxelize = fAvgVoxelization.apply -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/se.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | __all__ = ['SE3d'] 4 | 5 | 6 | class SE3d(nn.Module): 7 | def __init__(self, channel, reduction=8): 8 | super().__init__() 9 | self.fc = nn.Sequential( 10 | nn.Linear(channel, channel // reduction, bias=False), 11 | nn.ReLU(inplace=True), 12 | nn.Linear(channel // reduction, channel, bias=False), 13 | nn.Sigmoid() 14 | ) 15 | 16 | def forward(self, inputs): 17 | return inputs * self.fc(inputs.mean(-1).mean(-1).mean(-1)).view(inputs.shape[0], inputs.shape[1], 1, 1, 1) 18 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/voxel/__init__.py -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/voxel/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/__pycache__/block.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/voxel/__pycache__/block.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/__pycache__/region_proposal_network.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/voxel/__pycache__/region_proposal_network.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/__pycache__/torch_util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/voxel/__pycache__/torch_util.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/__pycache__/voxelnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/modules/voxel_utils/voxel/__pycache__/voxelnet.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/block.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | class GroupCompletion(nn.Module): 4 | def __init__(self,inplane): 5 | super(GroupCompletion, self).__init__() 6 | self.one=nn.Sequential( 7 | nn.Conv1d(inplane,2*inplane,1,1), 8 | nn.ReLU(inplace=True), 9 | nn.Conv1d(2*inplane,3*2048,1,1) 10 | ) 11 | # self.two= 12 | # self.three= 13 | def forward(self, x): 14 | out=self.one(x) 15 | return out -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/torch_util.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | class Conv2d(nn.Module): 4 | def __init__(self, inplanes, planes, stride, padding): 5 | super(Conv2d, self).__init__() 6 | self.conv1 = nn.Conv2d(inplanes, planes, 3, stride=stride, padding=padding) 7 | self.bn1 = nn.BatchNorm2d(planes) 8 | self.relu = nn.ReLU(inplace=True) 9 | 10 | def forward(self, x): 11 | out = self.conv1(x) 12 | out = self.bn1(out) 13 | out = self.relu(out) 14 | return out 15 | 16 | class Conv3d(nn.Module): 17 | def __init__(self, inplanes, planes, stride, padding): 18 | super(Conv3d, self).__init__() 19 | self.conv1 = nn.Conv3d(inplanes, planes, 3, stride=stride, padding=padding) 20 | self.bn1 = nn.BatchNorm3d(planes) 21 | self.relu = nn.ReLU(inplace=True) 22 | 23 | def forward(self, x): 24 | out = self.conv1(x) 25 | out = self.bn1(out) 26 | out = self.relu(out) 27 | return out -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxel/voxelnet.py: -------------------------------------------------------------------------------- 1 | from .torch_util import Conv2d,Conv3d 2 | import torch.nn.functional as F 3 | import torch.nn as nn 4 | import torch 5 | import time 6 | from collections import OrderedDict 7 | import logging 8 | logger = logging.getLogger('global') 9 | 10 | class FCN(nn.Module): 11 | def __init__(self, inplanes, planes): 12 | super(FCN, self).__init__() 13 | planes = int(planes/2) 14 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=True) 15 | self.bn1 = nn.BatchNorm2d(planes) 16 | self.relu = nn.ReLU(inplace=True) 17 | 18 | def forward(self, x): 19 | out = self.conv1(x) 20 | out = self.bn1(out) 21 | out = self.relu(out) 22 | return out 23 | 24 | class VFE(nn.Module): 25 | def __init__(self, inplanes, planes): 26 | super(VFE, self).__init__() 27 | self.fcn1 = FCN(inplanes, planes) 28 | 29 | def forward(self, x): 30 | batch, channel, voxels, num_T = x.size() 31 | out = self.fcn1(x) 32 | point_wise_feature = F.max_pool2d(out, kernel_size=[1, num_T], stride=[1, num_T]) 33 | logger.debug('point_wise_feature size: {}'.format(point_wise_feature.size())) 34 | out = torch.cat((out, point_wise_feature.repeat(1, 1, 1, num_T)), 1) 35 | logger.debug('VFE size: {}'.format(out.size())) 36 | return out 37 | 38 | class Conv_Middle_layers(nn.Module): 39 | def __init__(self,inplanes): 40 | super(Conv_Middle_layers, self).__init__() 41 | self.conv1 = Conv3d(inplanes, 64, stride=(2, 1, 1), padding=(1, 1, 1)) 42 | self.conv2 = Conv3d(64, 64, stride=(1, 1, 1), padding=(0, 1, 1)) 43 | self.conv3 = Conv3d(64, 64, stride=(2, 1, 1), padding=(1, 1, 1)) 44 | self.conv4 = nn.Sequential(OrderedDict([ 45 | # ('conv3d',nn.Conv3d(64,128,kernel_size=(1,1,1),stride=(1,1,1),padding=(0,0,0))), 46 | ('conv3d',nn.Conv3d(64,128,kernel_size=(3,1,1),stride=(1,1,1),padding=(0,0,0))), 47 | ('bn',nn.BatchNorm3d(128)), 48 | ('relu',nn.ReLU(inplace=True)) 49 | ])) 50 | 51 | def forward(self, x): 52 | out = self.conv1(x) 53 | out = self.conv2(out) 54 | out = self.conv3(out) 55 | out=self.conv4(out) 56 | shape = out.size() 57 | # print("conv3d feature before maxpool: {}".format(shape)) 58 | out=F.max_pool3d(out,kernel_size=[shape[2], 1, 1]) 59 | out=out.squeeze(2) 60 | # print("conv3d feature size: {}".format(out.size())) 61 | return out 62 | 63 | -------------------------------------------------------------------------------- /V2B_main/modules/voxel_utils/voxelization.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from modules.voxel_utils.functional.voxelization import avg_voxelize,favg_voxelize 4 | 5 | __all__ = ['Voxelization'] 6 | 7 | 8 | class Voxelization(nn.Module): 9 | def __init__(self, x,y,z,scene_ground=torch.tensor([-5.6, -3.6, -2.4]),voxel_size=torch.tensor([0.3, 0.3, 0.2]),mode=True): 10 | super().__init__() 11 | self.x = int(x) 12 | self.y = int(y) 13 | self.z = int(z) 14 | self.scene_ground=scene_ground 15 | self.voxel_size=voxel_size 16 | self.min_voxel_coord=torch.floor(self.scene_ground/ self.voxel_size) 17 | self.resolution=(-2*self.min_voxel_coord).int() 18 | self.mode=mode 19 | 20 | def forward(self, features, coords): 21 | #(b,c,n)(b,n,3) 22 | coords_detach = coords.detach() 23 | discrete_pts = torch.floor(coords_detach / self.voxel_size.cuda()) 24 | voxel_indices = (discrete_pts - self.min_voxel_coord.cuda()).int() 25 | voxel_indices=voxel_indices.transpose(1, 2).contiguous() 26 | if self.mode: 27 | return favg_voxelize(features, voxel_indices, self.x,self.y,self.z) 28 | else: 29 | return avg_voxelize(features, voxel_indices, self.x, self.y, self.z) 30 | 31 | def extra_repr(self): 32 | print('information:x {} y {} z {} min_voxel_coord {} voxel_size {} '.format(self.x,self.y,self.z,self.min_voxel_coord,self.voxel_size)) 33 | # if __name__ == '__main__': 34 | # conv=nn.Conv1d(128,128,1,1) 35 | # voxel=Voxelization(56,36,24) 36 | # voxel.cuda() 37 | # conv.cuda() 38 | # coords_x=torch.rand((1,2048,1),dtype=torch.float32)*5.6 39 | # coords_y=torch.rand((1,2048,1),dtype=torch.float32)*3.6 40 | # coords_z=torch.rand((1,2048,1),dtype=torch.float32)*2.4 41 | # # coord=torch.tensor([[0.1,0.1,0.1],[0,0,0],[0.05,0.05,0.05]],dtype=torch.float32).unsqueeze(dim=0).cuda() 42 | # coord=torch.cat([coords_x,coords_y,coords_z],dim=2).cuda() 43 | # output1 = [[], []] 44 | # for i in range(2): 45 | # import time 46 | # import random 47 | # ti1=time.time() 48 | # random.seed(0) 49 | # torch.manual_seed(0) 50 | # # torch.cuda.manual_seed(0) 51 | # features = torch.rand(1, 128, 2048, dtype=torch.float32).cuda().requires_grad_() 52 | # out = conv(features) 53 | # voxels=voxel(out,coord) 54 | # ti2 = time.time() 55 | # print(ti2-ti1) 56 | # output1[i].append(voxels.clone().detach()) 57 | # for t1, t2 in zip(output1[0], output1[1]): 58 | # print(t1.equal(t2)) 59 | # # print(voxels[0,:,28,18,12]) 60 | # voxels=voxels.permute(0, 1, 4, 3, 2).contiguous() 61 | # voxels=voxels.sum() 62 | # voxels.backward() 63 | # print(features.grad,features.dtype,voxels.dtype) -------------------------------------------------------------------------------- /V2B_main/results/kitti_car/Epoch30.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/results/kitti_car/Epoch30.pth -------------------------------------------------------------------------------- /V2B_main/results/kitti_pedestrian/Epoch30.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/results/kitti_pedestrian/Epoch30.pth -------------------------------------------------------------------------------- /V2B_main/trainers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/trainers/__init__.py -------------------------------------------------------------------------------- /V2B_main/trainers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/trainers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/trainers/__pycache__/tester.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/trainers/__pycache__/tester.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/trainers/__pycache__/trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/trainers/__pycache__/trainer.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/attr_dict.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/attr_dict.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/attr_dict.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/attr_dict.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/data_classes.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/data_classes.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/decode.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/decode.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/kitti_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/kitti_utils.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/metrics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/metrics.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/options.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/options.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/options.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/searchspace.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/searchspace.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/show_line.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/show_line.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/__pycache__/show_line.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/__pycache__/show_line.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/utils/attr_dict.py: -------------------------------------------------------------------------------- 1 | class AttrDict(dict): 2 | 3 | def __init__(self, *args, **kwargs): 4 | super(AttrDict, self).__init__(*args, **kwargs) 5 | 6 | def __getattr__(self, key): 7 | if key.startswith('__'): 8 | raise AttributeError 9 | return self.get(key, None) 10 | 11 | def __setattr__(self, key, value): 12 | if key.startswith('__'): 13 | raise AttributeError("Cannot set magic attribute '{}'".format(key)) 14 | self[key] = value -------------------------------------------------------------------------------- /V2B_main/utils/decode.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import torch 6 | import torch.nn as nn 7 | from utils.loss.utils import _gather_feat, _tranpose_and_gather_feat 8 | 9 | def _nms(heat, kernel=3): 10 | pad = (kernel - 1) // 2 11 | 12 | hmax = nn.functional.max_pool2d( 13 | heat, (kernel, kernel), stride=1, padding=pad) 14 | keep = (hmax == heat).float() 15 | return heat * keep 16 | 17 | 18 | def _topk_channel(scores, K=40): 19 | batch, cat, height, width = scores.size() 20 | 21 | topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K) 22 | 23 | topk_inds = topk_inds % (height * width) 24 | topk_ys = (topk_inds / width).int().float() 25 | topk_xs = (topk_inds % width).int().float() 26 | 27 | return topk_scores, topk_inds, topk_ys, topk_xs 28 | 29 | def _topk(scores, K=40): 30 | batch, cat, height, width = scores.size() 31 | 32 | topk_scores, topk_inds = torch.topk(scores.view(batch,-1), K) 33 | 34 | topk_inds = topk_inds % (height * width) 35 | topk_ys = (topk_inds / width).int().float() 36 | topk_xs = (topk_inds % width).int().float() 37 | 38 | 39 | return topk_scores, topk_inds, topk_ys, topk_xs 40 | 41 | 42 | def mot_decode(heat, reg=None, z=None, K=5): 43 | batch, cat, height, width = heat.size() 44 | 45 | # heat = torch.sigmoid(heat) 46 | # perform nms on heatmaps 47 | # heat = _nms(heat) 48 | 49 | scores, inds, ys, xs = _topk(heat, K=K) 50 | if reg is not None: 51 | reg = _tranpose_and_gather_feat(reg, inds) 52 | reg = reg.view(batch, K, 3) 53 | xs = xs.view(batch, K, 1) + reg[:, :, 0:1] 54 | ys = ys.view(batch, K, 1) + reg[:, :, 1:2] 55 | ry=reg[:,:,2].unsqueeze(dim=2) 56 | else: 57 | xs = xs.view(batch, K, 1) + 0.5 58 | ys = ys.view(batch, K, 1) + 0.5 59 | z = _tranpose_and_gather_feat(z, inds) 60 | 61 | scores = scores.view(batch, K, 1) 62 | xy_img_z = torch.cat([xs,ys,z,ry,scores],dim=2) 63 | 64 | return xy_img_z 65 | if __name__ == '__main__': 66 | hm=torch.rand(1,1,36,56) 67 | wh=torch.rand(1,3,36,56)*3 68 | reg=torch.rand(1,3,36,56) 69 | z=torch.rand(1,1,36,56) 70 | _,_=mot_decode(hm,wh,reg,z) 71 | -------------------------------------------------------------------------------- /V2B_main/utils/loss/PCLosses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | # Calculate the pairwise distance between point clouds in a batch 6 | def batch_pairwise_dist(x, y, use_cuda=True): 7 | x = x.transpose(2, 1) 8 | y = y.transpose(2, 1) 9 | bs, num_points_x, points_dim = x.size() 10 | _, num_points_y, _ = y.size() 11 | xx = torch.bmm(x, x.transpose(2, 1)) 12 | yy = torch.bmm(y, y.transpose(2, 1)) 13 | zz = torch.bmm(x, y.transpose(2, 1)) 14 | if use_cuda: 15 | dtype = torch.cuda.LongTensor 16 | else: 17 | dtype = torch.LongTensor 18 | diag_ind_x = torch.arange(0, num_points_x).cuda() 19 | diag_ind_y = torch.arange(0, num_points_y).cuda() 20 | rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as( 21 | zz.transpose(2, 1)) 22 | ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz) 23 | # torch.cuda.empty_cache() 24 | P = (rx.transpose(2, 1) + ry - 2 * zz) 25 | return P 26 | 27 | 28 | # Calculate Chamfer Loss 29 | class ChamferLoss(nn.Module): 30 | 31 | def __init__(self): 32 | super(ChamferLoss, self).__init__() 33 | self.use_cuda = torch.cuda.is_available() 34 | 35 | def forward(self, preds, gts,idx): 36 | # preds & gts of size (BS, 3, N) 37 | 38 | bs,points_dim,num_points_x,seeds = preds.size() 39 | if idx is not None: 40 | num = idx.size(0) 41 | preds=preds.permute(0,3,1,2).contiguous()[idx[:,0],idx[:,1]] 42 | gts=gts.unsqueeze(-1).expand(bs,points_dim,num_points_x,seeds).permute(0,3,1,2).contiguous()[idx[:,0],idx[:,1]] 43 | elif idx is None: 44 | preds=preds.permute(0,3,1,2).contiguous().view(bs*seeds,points_dim,num_points_x) 45 | gts = gts.unsqueeze(-1).expand(bs, points_dim, num_points_x, seeds).permute(0, 3, 1, 2).contiguous().view(bs*seeds,points_dim,num_points_x) 46 | num = preds.size(0) 47 | 48 | P = batch_pairwise_dist(preds, gts, self.use_cuda) 49 | # P of size (BS, 3, N) 50 | mins1, _ = torch.min(P, 1) 51 | # mins1=mins1.view(bs,-1,num_points_x) 52 | loss_1 = torch.sum(mins1) # sum of all batches 53 | mins2, _ = torch.min(P, 2) 54 | # mins2 = mins2.view(bs, -1, num_points_x) 55 | loss_2 = torch.sum(mins2) # sum of all batches 56 | 57 | return (loss_1 + loss_2)/num 58 | 59 | 60 | # Calculate accuracy and completeness between two point clouds 61 | def acc_comp(preds, gts, rho=0.02): 62 | P = batch_pairwise_dist(preds, gts).abs().sqrt() 63 | pred_mins, _ = torch.min(P, 2) 64 | gts_mins, _ = torch.min(P, 1) 65 | acc = pred_mins.mean(dim=1, dtype=torch.float) 66 | comp = gts_mins.mean(dim=1, dtype=torch.float) 67 | return acc, comp 68 | -------------------------------------------------------------------------------- /V2B_main/utils/loss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__init__.py -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/PCLosses.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/PCLosses.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/PCLosses.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/PCLosses.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/losses.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/losses.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/losses.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/losses.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/utils/loss/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/utils/loss/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import torch 6 | 7 | 8 | def _sigmoid(x): 9 | y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4) 10 | return y 11 | 12 | def _gather_feat(feat, ind, mask=None): 13 | dim = feat.size(2) 14 | ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) 15 | feat = feat.gather(1, ind) 16 | if mask is not None: 17 | mask = mask.unsqueeze(2).expand_as(feat) 18 | feat = feat[mask] 19 | feat = feat.view(-1, dim) 20 | return feat 21 | 22 | def _tranpose_and_gather_feat(feat, ind): 23 | feat = feat.permute(0, 2, 3, 1).contiguous() 24 | feat = feat.view(feat.size(0), -1, feat.size(3)) 25 | feat = _gather_feat(feat, ind) 26 | return feat 27 | 28 | def flip_tensor(x): 29 | return torch.flip(x, [3]) 30 | # tmp = x.detach().cpu().numpy()[..., ::-1].copy() 31 | # return torch.from_numpy(tmp).to(x.device) 32 | 33 | def flip_lr(x, flip_idx): 34 | tmp = x.detach().cpu().numpy()[..., ::-1].copy() 35 | shape = tmp.shape 36 | for e in flip_idx: 37 | tmp[:, e[0], ...], tmp[:, e[1], ...] = \ 38 | tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy() 39 | return torch.from_numpy(tmp.reshape(shape)).to(x.device) 40 | 41 | def flip_lr_off(x, flip_idx): 42 | tmp = x.detach().cpu().numpy()[..., ::-1].copy() 43 | shape = tmp.shape 44 | tmp = tmp.reshape(tmp.shape[0], 17, 2, 45 | tmp.shape[2], tmp.shape[3]) 46 | tmp[:, :, 0, :, :] *= -1 47 | for e in flip_idx: 48 | tmp[:, e[0], ...], tmp[:, e[1], ...] = \ 49 | tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy() 50 | return torch.from_numpy(tmp.reshape(shape)).to(x.device) -------------------------------------------------------------------------------- /V2B_main/utils/options.py: -------------------------------------------------------------------------------- 1 | from utils.attr_dict import AttrDict 2 | import torch, os 3 | import numpy as np 4 | 5 | opts = AttrDict() 6 | 7 | opts.model_name = 'V2B' 8 | opts.which_dataset = ['KITTI', 'NUSCENES', 'WAYMO'][0] 9 | opts.train_test = ['train', 'test'][0] 10 | opts.use_tiny = [True, False][1] 11 | opts.reference_BB = ['previous_result', 'previous_gt', 'ground_truth'][0] 12 | 13 | opts.device = torch.device("cuda") 14 | opts.batch_size = 48 15 | opts.feat_emb = 32 16 | opts.n_workers = 12 17 | opts.n_epoches = 30 18 | opts.n_gpus = 2 19 | opts.learning_rate = 0.001 20 | opts.subsample_number = 1024 21 | opts.min_points_num = 20 22 | opts.IoU_Space = 3 23 | opts.seed = 1 24 | opts.is_completion = True 25 | 26 | opts.n_input_feats = 0 27 | opts.use_xyz = True 28 | 29 | opts.offset_BB = np.array([2, 2, 1]) 30 | opts.scale_BB = np.array([1, 1, 1]) 31 | opts.voxel_size = [0.3, 0.3, 0.3] 32 | opts.xy_size = [0.3, 0.3] 33 | opts.area_extents = [-5.6, 5.6, -3.6, 3.6, -2.4, 2.4] 34 | opts.xy_area_extents = [-5.6, 5.6, -3.6, 3.6] 35 | opts.downsample = 1.0 36 | opts.regress_radius = 2 37 | 38 | opts.ncols = 150 39 | 40 | ## dataset 41 | opts.db = AttrDict( 42 | KITTI = AttrDict( 43 | data_dir = "/opt/data/common/kitti_tracking/kitti_t_o/training/", 44 | val_data_dir = "/opt/data/common/kitti_tracking/kitti_t_o/training/", 45 | category_name = ["Car", "Pedestrian", "Van", "Cyclist"][0], 46 | ), 47 | NUSCENES = AttrDict( 48 | data_dir = "/opt/data/common/nuScenes/KITTI_style/train_track", 49 | val_data_dir = "/opt/data/common/nuScenes/KITTI_style/val", 50 | category_name = ["car", "pedestrian", "truck", "bicycle"][0], 51 | ), 52 | WAYMO = AttrDict( 53 | data_dir = "/opt/data/common/waymo/sot/", 54 | val_data_dir = "/opt/data/common/waymo/sot/", 55 | category_name = ["vehicle", "pedestrian", "cyclist"][0], 56 | ) 57 | ) 58 | -------------------------------------------------------------------------------- /V2B_main/utils/show_line.py: -------------------------------------------------------------------------------- 1 | from math import floor 2 | 3 | 4 | def print_info(ncols, info, placeholder='='): 5 | info_len = len(info) 6 | placeholder_len = ncols - info_len 7 | 8 | print((int(placeholder_len/2)-1) * placeholder, end=' ') 9 | print(info, end=' ') 10 | print((int((placeholder_len+1)/2)-1) * placeholder) 11 | -------------------------------------------------------------------------------- /V2B_main/visualization/__pycache__/LineMesh.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/__pycache__/LineMesh.cpython-37.pyc -------------------------------------------------------------------------------- /V2B_main/visualization/__pycache__/LineMesh.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/__pycache__/LineMesh.cpython-38.pyc -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0000.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0000.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0005.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0010.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0015.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0015.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0020.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0020.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0025.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0025.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0030.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0030.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0035.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0035.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0040.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0040.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0045.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0045.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/result/kitti_car_2/0050.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/V2B_main/visualization/result/kitti_car_2/0050.jpg -------------------------------------------------------------------------------- /V2B_main/visualization/visual.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import numpy as np 4 | import open3d as o3d 5 | from LineMesh import LineMesh 6 | 7 | def show_one_frame( pc, 8 | gt_bbox, 9 | pred_bbox, 10 | capture_path=None, 11 | window_pose=[960,540,100,100], 12 | ): 13 | vis = o3d.visualization.Visualizer() 14 | vis.create_window(width=window_pose[0], height=window_pose[1], left=window_pose[2], top=window_pose[3]) 15 | 16 | scene_color = np.ones((pc.shape[0], 3)) * 0.4 17 | point_cloud = o3d.geometry.PointCloud() 18 | point_cloud.points = o3d.utility.Vector3dVector(pc) 19 | point_cloud.colors = o3d.utility.Vector3dVector(scene_color) 20 | 21 | # bbox 22 | lines_box = np.array([[0, 1], [1, 2], [2, 3], [3, 0], 23 | [4, 5], [5, 6], [6, 7], [7, 4], 24 | [0, 4], [1, 5], [2, 6], [3, 7]]) 25 | 26 | gt_colors = np.array([[0., 1., 0.] for _ in range(len(lines_box))]) # green 27 | gt_line_mesh = LineMesh(gt_bbox, lines_box, gt_colors, radius=0.02) 28 | 29 | pred_colors = np.array([[1., 0., 0.] for _ in range(len(lines_box))]) # red 30 | pred_line_mesh = LineMesh(pred_bbox, lines_box, pred_colors, radius=0.02) 31 | 32 | vis.add_geometry(point_cloud) 33 | gt_line_mesh.add_line(vis) 34 | pred_line_mesh.add_line(vis) 35 | 36 | vis.run() 37 | 38 | # save picture 39 | if not capture_path is None: 40 | vis.capture_screen_image(capture_path) 41 | 42 | vis.destroy_window() 43 | 44 | if __name__ == "__main__": 45 | data_path = 'visualization/data/kitti_car_2.pth' 46 | which_dataset, category_name, tracklet_id = data_path.split('/')[-1].split('.')[0].split('_') 47 | 48 | file = open(data_path, "rb") 49 | data = pickle.load(file) 50 | file.close() 51 | 52 | pc = data['pointcloud'] 53 | gt_bbox = data['gt_box'] 54 | pred_bbox = data['pred_box'] 55 | 56 | save_path = 'visualization/result/' + data_path.split('/')[-1].split('.')[0] 57 | if not os.path.exists(save_path): 58 | os.mkdir(save_path) 59 | 60 | frame_interval = 5 61 | window_pose = [800, 800, 100, 100] 62 | for i in range(0, len(pc), frame_interval): 63 | show_one_frame( pc[i].T, 64 | gt_bbox[i].T, 65 | pred_bbox[i].T, 66 | window_pose=window_pose, 67 | capture_path=save_path+'/{:0>4d}.jpg'.format(i), 68 | ) -------------------------------------------------------------------------------- /nuscenes-devkit-master/.gitignore: -------------------------------------------------------------------------------- 1 | *.brf 2 | *.gz 3 | *.log 4 | *.aux 5 | *.pdf 6 | *.pyc 7 | *.png 8 | *.jpg 9 | *ipynb_* 10 | *._* 11 | *.so 12 | *.o 13 | *.pth.tar 14 | bbox.c 15 | doc 16 | .DS_STORE 17 | .DS_Store 18 | .idea 19 | .project 20 | .pydevproject 21 | _ext 22 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2019 Aptiv 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/docs/faqs.md: -------------------------------------------------------------------------------- 1 | # Frequently asked questions 2 | On this page we try to answer questions frequently asked by our users. 3 | 4 | - How can I get in contact? 5 | - For questions about commercialization, collaboration and marketing, please contact [nuScenes@nuTonomy.com](mailto:nuScenes@nuTonomy.com). 6 | - For issues and bugs *with the devkit*, file an issue on [Github](https://github.com/nutonomy/nuscenes-devkit/issues). 7 | - For any other questions, please post in the [nuScenes user forum](https://forum.nuscenes.org/). 8 | 9 | - Can I use nuScenes and nuImages for free? 10 | - For non-commercial use [nuScenes and nuImages are free](https://www.nuscenes.org/terms-of-use), e.g. for educational use and some research use. 11 | - For commercial use please contact [nuScenes@nuTonomy.com](mailto:nuScenes@nuTonomy.com). To allow startups to use our dataset, we adjust the pricing terms to the use case and company size. 12 | 13 | - How can I participate in the nuScenes challenges? 14 | - See the overview site for the [object detection challenge](https://www.nuscenes.org/object-detection). 15 | - See the overview site for the [tracking challenge](https://www.nuscenes.org/tracking). 16 | - See the overview site for the [prediction challenge](https://www.nuscenes.org/prediction). 17 | 18 | - How can I get more information on the sensors used? 19 | - Read the [Data collection](https://www.nuscenes.org/data-collection) page. 20 | - Note that we do not *publicly* reveal the vendor name and model to avoid endorsing a particular vendor. All sensors are publicly available from third-party vendors. 21 | - For more information, please contact [nuScenes@nuTonomy.com](mailto:nuScenes@nuTonomy.com). 22 | 23 | - Can I use nuScenes for 2d object detection? 24 | - Objects in nuScenes are annotated in 3d. 25 | - You can use [this script](https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/scripts/export_2d_annotations_as_json.py) to project them to 2d, but note that such 2d boxes are not generally tight. 26 | 27 | - How can I share my new dataset / paper for Autonomous Driving? 28 | - Please contact [nuScenes@nuTonomy.com](mailto:nuScenes@nuTonomy.com) to discuss possible collaborations and listing your work on the [Publications](https://www.nuscenes.org/publications) page. 29 | - To discuss it with the community, please post in the [nuScenes user forum](https://forum.nuscenes.org/). -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuimages/__init__.py: -------------------------------------------------------------------------------- 1 | from .nuimages import NuImages 2 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuimages/export/export_release.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2020. 3 | 4 | import fire 5 | import os 6 | import json 7 | import tarfile 8 | from typing import List 9 | 10 | 11 | def export_release(dataroot='/data/sets/nuimages', version: str = 'v1.0') -> None: 12 | """ 13 | This script tars the image and metadata files for release on https://www.nuscenes.org/download. 14 | :param dataroot: The nuImages folder. 15 | :param version: The nuImages dataset version. 16 | """ 17 | # Create export folder. 18 | export_dir = os.path.join(dataroot, 'export') 19 | if not os.path.isdir(export_dir): 20 | os.makedirs(export_dir) 21 | 22 | # Determine the images from the mini split. 23 | mini_src = os.path.join(dataroot, version + '-mini') 24 | with open(os.path.join(mini_src, 'sample_data.json'), 'r') as f: 25 | sample_data = json.load(f) 26 | file_names = [sd['filename'] for sd in sample_data] 27 | 28 | # Hard-code the mapping from archive names to their relative folder paths. 29 | archives = { 30 | 'all-metadata': [version + '-train', version + '-val', version + '-test', version + '-mini'], 31 | 'all-samples': ['samples'], 32 | 'all-sweeps-cam-back': ['sweeps/CAM_BACK'], 33 | 'all-sweeps-cam-back-left': ['sweeps/CAM_BACK_LEFT'], 34 | 'all-sweeps-cam-back-right': ['sweeps/CAM_BACK_RIGHT'], 35 | 'all-sweeps-cam-front': ['sweeps/CAM_FRONT'], 36 | 'all-sweeps-cam-front-left': ['sweeps/CAM_FRONT_LEFT'], 37 | 'all-sweeps-cam-front-right': ['sweeps/CAM_FRONT_RIGHT'], 38 | 'mini': [version + '-mini'] + file_names 39 | } 40 | 41 | # Pack each folder. 42 | for key, folder_list in archives.items(): 43 | out_path = os.path.join(export_dir, 'nuimages-%s-%s.tgz' % (version, key)) 44 | if os.path.exists(out_path): 45 | print('Warning: Skipping export for file as it already exists: %s' % out_path) 46 | continue 47 | print('Compressing archive %s...' % out_path) 48 | pack_folder(out_path, dataroot, folder_list) 49 | 50 | 51 | def pack_folder(out_path: str, dataroot: str, folder_list: List[str], tar_format: str = 'w:gz') -> None: 52 | """ 53 | :param out_path: The output path where we write the tar file. 54 | :param dataroot: The nuImages folder. 55 | :param folder_list: List of files or folders to include in the archive. 56 | :param tar_format: The compression format to use. See tarfile package for more options. 57 | """ 58 | tar = tarfile.open(out_path, tar_format) 59 | for name in folder_list: 60 | folder_path = os.path.join(dataroot, name) 61 | tar.add(folder_path, arcname=name) 62 | tar.close() 63 | 64 | 65 | if __name__ == '__main__': 66 | fire.Fire(export_release) 67 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuimages/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuimages/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuimages/tests/assert_download.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2020. 3 | 4 | import argparse 5 | import os 6 | 7 | from tqdm import tqdm 8 | 9 | from nuimages import NuImages 10 | 11 | 12 | def verify_setup(nuim: NuImages): 13 | """ 14 | Script to verify that the nuImages installation is complete. 15 | Note that this may take several minutes or hours. 16 | """ 17 | 18 | # Check that each sample_data file exists. 19 | print('Checking that sample_data files are complete...') 20 | for sd in tqdm(nuim.sample_data): 21 | file_path = os.path.join(nuim.dataroot, sd['filename']) 22 | assert os.path.exists(file_path), 'Error: Missing sample_data at: %s' % file_path 23 | 24 | 25 | if __name__ == "__main__": 26 | 27 | # Settings. 28 | parser = argparse.ArgumentParser(description='Test that the installed dataset is complete.', 29 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 30 | parser.add_argument('--dataroot', type=str, default='/data/sets/nuimages', 31 | help='Default nuImages data directory.') 32 | parser.add_argument('--version', type=str, default='v1.0-train', 33 | help='Which version of the nuImages dataset to evaluate on, e.g. v1.0-train.') 34 | parser.add_argument('--verbose', type=int, default=1, 35 | help='Whether to print to stdout.') 36 | 37 | args = parser.parse_args() 38 | dataroot = args.dataroot 39 | version = args.version 40 | verbose = bool(args.verbose) 41 | 42 | # Init. 43 | nuim_ = NuImages(version=version, verbose=verbose, dataroot=dataroot) 44 | 45 | # Verify data blobs. 46 | verify_setup(nuim_) 47 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuimages/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuimages/utils/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuimages/utils/test_nuimages.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2020. 3 | 4 | import os 5 | import unittest 6 | 7 | from nuimages import NuImages 8 | 9 | 10 | class TestNuImages(unittest.TestCase): 11 | 12 | def test_load(self): 13 | """ 14 | Loads up NuImages. 15 | This is intended to simply run the NuImages class to check for import errors, typos, etc. 16 | """ 17 | 18 | assert 'NUIMAGES' in os.environ, 'Set NUIMAGES env. variable to enable tests.' 19 | nuim = NuImages(version='v1.0-mini', dataroot=os.environ['NUIMAGES'], verbose=False) 20 | 21 | # Trivial assert statement 22 | self.assertEqual(nuim.table_root, os.path.join(os.environ['NUIMAGES'], 'v1.0-mini')) 23 | 24 | 25 | if __name__ == '__main__': 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/__init__.py: -------------------------------------------------------------------------------- 1 | from .nuscenes import NuScenes, NuScenesExplorer 2 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/common/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/common/config.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2019. 3 | 4 | import json 5 | import os 6 | from typing import Union 7 | 8 | from nuscenes.eval.detection.data_classes import DetectionConfig 9 | from nuscenes.eval.tracking.data_classes import TrackingConfig 10 | 11 | 12 | def config_factory(configuration_name: str) -> Union[DetectionConfig, TrackingConfig]: 13 | """ 14 | Creates a *Config instance that can be used to initialize a *Eval instance, where * stands for Detection/Tracking. 15 | Note that this only works if the config file is located in the nuscenes/eval/common/configs folder. 16 | :param configuration_name: Name of desired configuration in eval_detection_configs. 17 | :return: *Config instance. 18 | """ 19 | # Check if config exists. 20 | tokens = configuration_name.split('_') 21 | assert len(tokens) > 1, 'Error: Configuration name must be have prefix "detection_" or "tracking_"!' 22 | task = tokens[0] 23 | this_dir = os.path.dirname(os.path.abspath(__file__)) 24 | cfg_path = os.path.join(this_dir, '..', task, 'configs', '%s.json' % configuration_name) 25 | assert os.path.exists(cfg_path), 'Requested unknown configuration {}'.format(configuration_name) 26 | 27 | # Load config file and deserialize it. 28 | with open(cfg_path, 'r') as f: 29 | data = json.load(f) 30 | if task == 'detection': 31 | cfg = DetectionConfig.deserialize(data) 32 | elif task == 'tracking': 33 | cfg = TrackingConfig.deserialize(data) 34 | else: 35 | raise Exception('Error: Invalid config file name: %s' % configuration_name) 36 | 37 | return cfg 38 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/common/render.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, Varun Bankiti, and Alex Lang, 2019. 3 | 4 | from typing import Any 5 | 6 | import matplotlib.pyplot as plt 7 | 8 | Axis = Any 9 | 10 | 11 | def setup_axis(xlabel: str = None, 12 | ylabel: str = None, 13 | xlim: int = None, 14 | ylim: int = None, 15 | title: str = None, 16 | min_precision: float = None, 17 | min_recall: float = None, 18 | ax: Axis = None, 19 | show_spines: str = 'none'): 20 | """ 21 | Helper method that sets up the axis for a plot. 22 | :param xlabel: x label text. 23 | :param ylabel: y label text. 24 | :param xlim: Upper limit for x axis. 25 | :param ylim: Upper limit for y axis. 26 | :param title: Axis title. 27 | :param min_precision: Visualize minimum precision as horizontal line. 28 | :param min_recall: Visualize minimum recall as vertical line. 29 | :param ax: (optional) an existing axis to be modified. 30 | :param show_spines: Whether to show axes spines, set to 'none' by default. 31 | :return: The axes object. 32 | """ 33 | if ax is None: 34 | ax = plt.subplot() 35 | 36 | ax.get_xaxis().tick_bottom() 37 | ax.tick_params(labelsize=16) 38 | ax.get_yaxis().tick_left() 39 | 40 | # Hide the selected axes spines. 41 | if show_spines in ['bottomleft', 'none']: 42 | ax.spines['top'].set_visible(False) 43 | ax.spines['right'].set_visible(False) 44 | 45 | if show_spines == 'none': 46 | ax.spines['bottom'].set_visible(False) 47 | ax.spines['left'].set_visible(False) 48 | elif show_spines in ['all']: 49 | pass 50 | else: 51 | raise NotImplementedError 52 | 53 | if title is not None: 54 | ax.set_title(title, size=24) 55 | if xlabel is not None: 56 | ax.set_xlabel(xlabel, size=16) 57 | if ylabel is not None: 58 | ax.set_ylabel(ylabel, size=16) 59 | if xlim is not None: 60 | ax.set_xlim(0, xlim) 61 | if ylim is not None: 62 | ax.set_ylim(0, ylim) 63 | if min_recall is not None: 64 | ax.axvline(x=min_recall, linestyle='--', color=(0, 0, 0, 0.3)) 65 | if min_precision is not None: 66 | ax.axhline(y=min_precision, linestyle='--', color=(0, 0, 0, 0.3)) 67 | 68 | return ax 69 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/detection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/detection/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/detection/config.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2019. 3 | 4 | import json 5 | import os 6 | 7 | from nuscenes.eval.detection.data_classes import DetectionConfig 8 | 9 | 10 | def config_factory(configuration_name: str) -> DetectionConfig: 11 | """ 12 | Creates a DetectionConfig instance that can be used to initialize a NuScenesEval instance. 13 | Note that this only works if the config file is located in the nuscenes/eval/detection/configs folder. 14 | :param configuration_name: Name of desired configuration in eval_detection_configs. 15 | :return: DetectionConfig instance. 16 | """ 17 | 18 | # Check if config exists. 19 | this_dir = os.path.dirname(os.path.abspath(__file__)) 20 | cfg_path = os.path.join(this_dir, 'configs', '%s.json' % configuration_name) 21 | assert os.path.exists(cfg_path), \ 22 | 'Requested unknown configuration {}'.format(configuration_name) 23 | 24 | # Load config file and deserialize it. 25 | with open(cfg_path, 'r') as f: 26 | data = json.load(f) 27 | cfg = DetectionConfig.deserialize(data) 28 | 29 | return cfg 30 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/detection/configs/detection_cvpr_2019.json: -------------------------------------------------------------------------------- 1 | { 2 | "class_range": { 3 | "car": 50, 4 | "truck": 50, 5 | "bus": 50, 6 | "trailer": 50, 7 | "construction_vehicle": 50, 8 | "pedestrian": 40, 9 | "motorcycle": 40, 10 | "bicycle": 40, 11 | "traffic_cone": 30, 12 | "barrier": 30 13 | }, 14 | "dist_fcn": "center_distance", 15 | "dist_ths": [0.5, 1.0, 2.0, 4.0], 16 | "dist_th_tp": 2.0, 17 | "min_recall": 0.1, 18 | "min_precision": 0.1, 19 | "max_boxes_per_sample": 500, 20 | "mean_ap_weight": 5 21 | } 22 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/detection/constants.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Oscar Beijbom and Varun Bankiti, 2019. 3 | 4 | DETECTION_NAMES = ['car', 'truck', 'bus', 'trailer', 'construction_vehicle', 'pedestrian', 'motorcycle', 'bicycle', 5 | 'traffic_cone', 'barrier'] 6 | 7 | PRETTY_DETECTION_NAMES = {'car': 'Car', 8 | 'truck': 'Truck', 9 | 'bus': 'Bus', 10 | 'trailer': 'Trailer', 11 | 'construction_vehicle': 'Constr. Veh.', 12 | 'pedestrian': 'Pedestrian', 13 | 'motorcycle': 'Motorcycle', 14 | 'bicycle': 'Bicycle', 15 | 'traffic_cone': 'Traffic Cone', 16 | 'barrier': 'Barrier'} 17 | 18 | DETECTION_COLORS = {'car': 'C0', 19 | 'truck': 'C1', 20 | 'bus': 'C2', 21 | 'trailer': 'C3', 22 | 'construction_vehicle': 'C4', 23 | 'pedestrian': 'C5', 24 | 'motorcycle': 'C6', 25 | 'bicycle': 'C7', 26 | 'traffic_cone': 'C8', 27 | 'barrier': 'C9'} 28 | 29 | ATTRIBUTE_NAMES = ['pedestrian.moving', 'pedestrian.sitting_lying_down', 'pedestrian.standing', 'cycle.with_rider', 30 | 'cycle.without_rider', 'vehicle.moving', 'vehicle.parked', 'vehicle.stopped'] 31 | 32 | PRETTY_ATTRIBUTE_NAMES = {'pedestrian.moving': 'Ped. Moving', 33 | 'pedestrian.sitting_lying_down': 'Ped. Sitting', 34 | 'pedestrian.standing': 'Ped. Standing', 35 | 'cycle.with_rider': 'Cycle w/ Rider', 36 | 'cycle.without_rider': 'Cycle w/o Rider', 37 | 'vehicle.moving': 'Veh. Moving', 38 | 'vehicle.parked': 'Veh. Parked', 39 | 'vehicle.stopped': 'Veh. Stopped'} 40 | 41 | TP_METRICS = ['trans_err', 'scale_err', 'orient_err', 'vel_err', 'attr_err'] 42 | 43 | PRETTY_TP_METRICS = {'trans_err': 'Trans.', 'scale_err': 'Scale', 'orient_err': 'Orient.', 'vel_err': 'Vel.', 44 | 'attr_err': 'Attr.'} 45 | 46 | TP_METRICS_UNITS = {'trans_err': 'm', 47 | 'scale_err': '1-IOU', 48 | 'orient_err': 'rad.', 49 | 'vel_err': 'm/s', 50 | 'attr_err': '1-acc.'} 51 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/detection/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/detection/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/detection/utils.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2018. 3 | 4 | from typing import List, Optional 5 | 6 | 7 | def category_to_detection_name(category_name: str) -> Optional[str]: 8 | """ 9 | Default label mapping from nuScenes to nuScenes detection classes. 10 | Note that pedestrian does not include personal_mobility, stroller and wheelchair. 11 | :param category_name: Generic nuScenes class. 12 | :return: nuScenes detection class. 13 | """ 14 | detection_mapping = { 15 | 'movable_object.barrier': 'barrier', 16 | 'vehicle.bicycle': 'bicycle', 17 | 'vehicle.bus.bendy': 'bus', 18 | 'vehicle.bus.rigid': 'bus', 19 | 'vehicle.car': 'car', 20 | 'vehicle.construction': 'construction_vehicle', 21 | 'vehicle.motorcycle': 'motorcycle', 22 | 'human.pedestrian.adult': 'pedestrian', 23 | 'human.pedestrian.child': 'pedestrian', 24 | 'human.pedestrian.construction_worker': 'pedestrian', 25 | 'human.pedestrian.police_officer': 'pedestrian', 26 | 'movable_object.trafficcone': 'traffic_cone', 27 | 'vehicle.trailer': 'trailer', 28 | 'vehicle.truck': 'truck' 29 | } 30 | 31 | if category_name in detection_mapping: 32 | return detection_mapping[category_name] 33 | else: 34 | return None 35 | 36 | 37 | def detection_name_to_rel_attributes(detection_name: str) -> List[str]: 38 | """ 39 | Returns a list of relevant attributes for a given detection class. 40 | :param detection_name: The detection class. 41 | :return: List of relevant attributes. 42 | """ 43 | if detection_name in ['pedestrian']: 44 | rel_attributes = ['pedestrian.moving', 'pedestrian.sitting_lying_down', 'pedestrian.standing'] 45 | elif detection_name in ['bicycle', 'motorcycle']: 46 | rel_attributes = ['cycle.with_rider', 'cycle.without_rider'] 47 | elif detection_name in ['car', 'bus', 'construction_vehicle', 'trailer', 'truck']: 48 | rel_attributes = ['vehicle.moving', 'vehicle.parked', 'vehicle.stopped'] 49 | elif detection_name in ['barrier', 'traffic_cone']: 50 | # Classes without attributes: barrier, traffic_cone. 51 | rel_attributes = [] 52 | else: 53 | raise ValueError('Error: %s is not a valid detection class.' % detection_name) 54 | 55 | return rel_attributes 56 | 57 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/lidarseg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/lidarseg/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/lidarseg/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/lidarseg/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/panoptic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/panoptic/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/baseline_model_inference.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Freddy Boulton, 2020. 3 | 4 | """ Script for running baseline models on a given nuscenes-split. """ 5 | 6 | import argparse 7 | import json 8 | import os 9 | 10 | from nuscenes import NuScenes 11 | from nuscenes.eval.prediction.config import load_prediction_config 12 | from nuscenes.eval.prediction.splits import get_prediction_challenge_split 13 | from nuscenes.prediction import PredictHelper 14 | from nuscenes.prediction.models.physics import ConstantVelocityHeading, PhysicsOracle 15 | 16 | 17 | def main(version: str, data_root: str, 18 | split_name: str, output_dir: str, config_name: str = 'predict_2020_icra.json') -> None: 19 | """ 20 | Performs inference for all of the baseline models defined in the physics model module. 21 | :param version: nuScenes dataset version. 22 | :param data_root: Directory where the NuScenes data is stored. 23 | :param split_name: nuScenes data split name, e.g. train, val, mini_train, etc. 24 | :param output_dir: Directory where predictions should be stored. 25 | :param config_name: Name of config file. 26 | """ 27 | 28 | nusc = NuScenes(version=version, dataroot=data_root) 29 | helper = PredictHelper(nusc) 30 | dataset = get_prediction_challenge_split(split_name) 31 | config = load_prediction_config(helper, config_name) 32 | oracle = PhysicsOracle(config.seconds, helper) 33 | cv_heading = ConstantVelocityHeading(config.seconds, helper) 34 | 35 | cv_preds = [] 36 | oracle_preds = [] 37 | for token in dataset: 38 | cv_preds.append(cv_heading(token).serialize()) 39 | oracle_preds.append(oracle(token).serialize()) 40 | 41 | json.dump(cv_preds, open(os.path.join(output_dir, "cv_preds.json"), "w")) 42 | json.dump(oracle_preds, open(os.path.join(output_dir, "oracle_preds.json"), "w")) 43 | 44 | 45 | if __name__ == "__main__": 46 | 47 | parser = argparse.ArgumentParser(description='Perform Inference with baseline models.') 48 | parser.add_argument('--version', help='nuScenes version number.') 49 | parser.add_argument('--data_root', help='Directory storing NuScenes data.', default='/data/sets/nuscenes') 50 | parser.add_argument('--split_name', help='Data split to run inference on.') 51 | parser.add_argument('--output_dir', help='Directory to store output files.') 52 | parser.add_argument('--config_name', help='Config file to use.', default='predict_2020_icra.json') 53 | 54 | args = parser.parse_args() 55 | main(args.version, args.data_root, args.split_name, args.output_dir, args.config_name) 56 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/config.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Freddy Boulton, Eric Wolff, 2020. 3 | import json 4 | import os 5 | from typing import List, Dict, Any 6 | 7 | from nuscenes.eval.prediction.metrics import Metric, deserialize_metric 8 | from nuscenes.prediction import PredictHelper 9 | 10 | 11 | class PredictionConfig: 12 | 13 | def __init__(self, 14 | metrics: List[Metric], 15 | seconds: int = 6, 16 | frequency: int = 2): 17 | """ 18 | Data class that specifies the prediction evaluation settings. 19 | Initialized with: 20 | metrics: List of nuscenes.eval.prediction.metric.Metric objects. 21 | seconds: Number of seconds to predict for each agent. 22 | frequency: Rate at which prediction is made, in Hz. 23 | """ 24 | self.metrics = metrics 25 | self.seconds = seconds 26 | self.frequency = frequency # Hz 27 | 28 | def serialize(self) -> Dict[str, Any]: 29 | """ Serialize instance into json-friendly format. """ 30 | 31 | return {'metrics': [metric.serialize() for metric in self.metrics], 32 | 'seconds': self.seconds} 33 | 34 | @classmethod 35 | def deserialize(cls, content: Dict[str, Any], helper: PredictHelper): 36 | """ Initialize from serialized dictionary. """ 37 | return cls([deserialize_metric(metric, helper) for metric in content['metrics']], 38 | seconds=content['seconds']) 39 | 40 | 41 | def load_prediction_config(helper: PredictHelper, config_name: str = 'predict_2020_icra.json') -> PredictionConfig: 42 | """ 43 | Loads a PredictionConfig from json file stored in eval/prediction/configs. 44 | :param helper: Instance of PredictHelper. Needed for OffRoadRate metric. 45 | :param config_name: Name of json config file. 46 | :return: PredictionConfig. 47 | """ 48 | this_dir = os.path.dirname(os.path.abspath(__file__)) 49 | cfg_path = os.path.join(this_dir, "configs", config_name) 50 | assert os.path.exists(cfg_path), f'Requested unknown configuration {cfg_path}' 51 | 52 | # Load config file and deserialize it. 53 | with open(cfg_path, 'r') as f: 54 | config = json.load(f) 55 | 56 | return PredictionConfig.deserialize(config, helper) 57 | 58 | 59 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/configs/predict_2020_icra.json: -------------------------------------------------------------------------------- 1 | { 2 | "seconds": 6, 3 | "metrics": [ 4 | { 5 | "k_to_report": [ 6 | 1, 7 | 5, 8 | 10 9 | ], 10 | "name": "MinFDEK", 11 | "aggregators": [ 12 | { 13 | "name": "RowMean" 14 | } 15 | ] 16 | }, 17 | { 18 | "k_to_report": [ 19 | 1, 20 | 5, 21 | 10 22 | ], 23 | "name": "MinADEK", 24 | "aggregators": [ 25 | { 26 | "name": "RowMean" 27 | } 28 | ] 29 | }, 30 | { 31 | "k_to_report": [ 32 | 1, 33 | 5, 34 | 10 35 | ], 36 | "name": "MissRateTopK", 37 | "aggregators": [ 38 | { 39 | "name": "RowMean" 40 | } 41 | ], 42 | "tolerance": 2 43 | }, 44 | { 45 | "name": "OffRoadRate", 46 | "aggregators": [ 47 | { 48 | "name": "RowMean" 49 | } 50 | ] 51 | } 52 | ] 53 | } -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/docker_container/README.md: -------------------------------------------------------------------------------- 1 | # nuScenes Prediction Challenge Docker Submission Process 2 | 3 | We will ask at least the top five teams ranked on the leader board to submit their code to us so we can 4 | evaluate their model on the private test set. To ensure reproducibility, we will run their code 5 | in a Docker container. This document explains how you can run your model inside the Docker container we 6 | will use. If you follow these steps, then if it runs on your machine, it will run on ours. 7 | 8 | ## Requirements 9 | 10 | - Docker version >= 19 (We tested with 19.03.7) 11 | - machine with GPU card, nvidia drivers and CUDA 10.1 (for GPU support) 12 | - nvidia-docker https://github.com/NVIDIA/nvidia-docker. You can use generic docker image if you don't need GPU support 13 | - nuScenes dataset 14 | - cloned nuScenes-devkit repo https://github.com/nutonomy/nuscenes-devkit 15 | 16 | ## Usage 17 | - Pull docker image. For CUDA 10.1 use: 18 | ``` 19 | docker pull nuscenes/dev-challenge:10.1 20 | ``` 21 | - Pull docker image. For CUDA 9.2 use: 22 | ``` 23 | docker pull nuscenes/dev-challenge:9.2 24 | ``` 25 | 26 | 27 | - Create directory for output data 28 | ``` 29 | mkdir -p ~/Documents/submissions 30 | ``` 31 | 32 | - Create home directory for the image (needed if you need to install extra packages). 33 | ``` 34 | mkdir -p ~/Desktop/home_directory 35 | ``` 36 | 37 | - Modify `do_inference.py` in `nuscenes/eval/prediction/submission` to 38 | run your model. Place your model weights in 39 | `nuscenes/eval/prediction/submission` as well. If you need to install any 40 | extra packages, add them (along with the **exact** version number) to 41 | `nuscenes/eval/prediction/submission/extra_packages.txt`. 42 | 43 | - Run docker container 44 | ``` 45 | cd 46 | docker run [ --gpus all ] -ti --rm \ 47 | -v :/data/sets/nuscenes \ 48 | -v /python-sdk:/nuscenes-dev/python-sdk \ 49 | -v :/nuscenes-dev/prediction \ 50 | -v ~/Documents/:/nuscenes-dev/Documents \ 51 | -v ~/Desktop/home_directory:/home/ 52 | 53 | ``` 54 | 55 | NOTE: The docker image uses 1000:1000 uid:gid 56 | If this is different from your local setup, you may want to add this options into `docker run` command 57 | ``` 58 | --user `id -u`:`id -g` -v /etc/passwd:/etc/passwd -v /etc/group:/etc/group 59 | ``` 60 | 61 | - Execute your script inside docker container 62 | ``` 63 | source activate /home/nuscenes/.conda/envs/nuscenes 64 | 65 | pip install -r submission/extra_packages.txt 66 | 67 | # Use v1.0-trainval and split_name val to run on the entire val set 68 | 69 | python do_inference.py --version v1.0-mini \ 70 | --data_root /data/sets/nuscenes \ 71 | --split_name mini_val \ 72 | --output_dir /nuscenes-dev/Documents/submissions \ 73 | --submission_name 74 | ``` 75 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/docker_container/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG FROM 2 | FROM ${FROM} 3 | 4 | MAINTAINER nutonomy.com 5 | 6 | RUN apt-get update && apt-get install -y --no-install-recommends \ 7 | curl \ 8 | libsm6 \ 9 | libxext6 \ 10 | libxrender-dev \ 11 | libgl1-mesa-glx \ 12 | libglib2.0-0 \ 13 | xvfb \ 14 | && rm -rf /var/lib/apt/lists/* 15 | 16 | RUN curl -OL https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ 17 | && bash ./Miniconda3-latest-Linux-x86_64.sh -b -p /opt/miniconda3 \ 18 | && rm -f Miniconda3-latest-Linux-x86_64.sh 19 | 20 | ENV PATH /opt/miniconda3/bin:$PATH 21 | 22 | RUN conda update -n base -c defaults conda 23 | 24 | RUN groupadd -g 1000 dev \ 25 | && useradd -d /home/nuscenes -u 1000 -g 1000 -m -s /bin/bash dev 26 | 27 | USER dev 28 | 29 | WORKDIR /nuscenes-dev/prediction 30 | 31 | ENV PYTHONPATH=/nuscenes-dev/python-sdk 32 | 33 | COPY setup/requirements.txt . 34 | 35 | RUN bash -c "conda create -y -n nuscenes python=3.7 \ 36 | && source activate nuscenes \ 37 | && pip install --no-cache-dir -r /nuscenes-dev/prediction/requirements.txt \ 38 | && conda clean --yes --all" 39 | 40 | VOLUME [ '/nuscenes-dev/python-sdk', '/nuscenes-dev/prediction', '/data/sets/nuscenes', '/nuscenes-dev/Documents' ] 41 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/docker_container/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | services: 4 | dev-10.1: 5 | image: nuscenes/dev-challenge:10.1 6 | build: 7 | context: ../../../../../../ 8 | dockerfile: python-sdk/nuscenes/eval/prediction/docker_container/docker/Dockerfile 9 | args: 10 | FROM: nvidia/cuda:10.1-base-ubuntu18.04 11 | dev-9.2: 12 | image: nuscenes/dev-challenge:9.2 13 | build: 14 | context: ../../../../../../ 15 | dockerfile: python-sdk/nuscenes/eval/prediction/docker_container/docker/Dockerfile 16 | args: 17 | FROM: nvidia/cuda:9.2-base-ubuntu18.04 18 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/splits.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Freddy Boulton. 3 | 4 | import json 5 | import os 6 | from itertools import chain 7 | from typing import List 8 | 9 | from nuscenes.utils.splits import create_splits_scenes 10 | 11 | NUM_IN_TRAIN_VAL = 200 12 | 13 | 14 | def get_prediction_challenge_split(split: str, dataroot: str = '/data/sets/nuscenes') -> List[str]: 15 | """ 16 | Gets a list of {instance_token}_{sample_token} strings for each split. 17 | :param split: One of 'mini_train', 'mini_val', 'train', 'val'. 18 | :param dataroot: Path to the nuScenes dataset. 19 | :return: List of tokens belonging to the split. Format {instance_token}_{sample_token}. 20 | """ 21 | if split not in {'mini_train', 'mini_val', 'train', 'train_val', 'val'}: 22 | raise ValueError("split must be one of (mini_train, mini_val, train, train_val, val)") 23 | 24 | if split == 'train_val': 25 | split_name = 'train' 26 | else: 27 | split_name = split 28 | 29 | path_to_file = os.path.join(dataroot, "maps", "prediction", "prediction_scenes.json") 30 | prediction_scenes = json.load(open(path_to_file, "r")) 31 | scenes = create_splits_scenes() 32 | scenes_for_split = scenes[split_name] 33 | 34 | if split == 'train': 35 | scenes_for_split = scenes_for_split[NUM_IN_TRAIN_VAL:] 36 | if split == 'train_val': 37 | scenes_for_split = scenes_for_split[:NUM_IN_TRAIN_VAL] 38 | 39 | token_list_for_scenes = map(lambda scene: prediction_scenes.get(scene, []), scenes_for_split) 40 | 41 | return list(chain.from_iterable(token_list_for_scenes)) 42 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/submission/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/submission/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/submission/extra_packages.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/submission/extra_packages.txt -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/prediction/tests/test_dataclasses.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | 5 | from nuscenes.eval.prediction.data_classes import Prediction 6 | 7 | 8 | class TestPrediction(unittest.TestCase): 9 | 10 | def test(self): 11 | prediction = Prediction('instance', 'sample', np.ones((2, 2, 2)), np.zeros(2)) 12 | 13 | self.assertEqual(prediction.number_of_modes, 2) 14 | self.assertDictEqual(prediction.serialize(), {'instance': 'instance', 15 | 'sample': 'sample', 16 | 'prediction': [[[1, 1], [1, 1]], 17 | [[1, 1], [1, 1]]], 18 | 'probabilities': [0, 0]}) 19 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/tracking/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/tracking/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/tracking/configs/tracking_nips_2019.json: -------------------------------------------------------------------------------- 1 | { 2 | "class_range": { 3 | "car": 50, 4 | "truck": 50, 5 | "bus": 50, 6 | "trailer": 50, 7 | "pedestrian": 40, 8 | "motorcycle": 40, 9 | "bicycle": 40 10 | }, 11 | "dist_fcn": "center_distance", 12 | "dist_th_tp": 2.0, 13 | "min_recall": 0.1, 14 | "max_boxes_per_sample": 500, 15 | "metric_worst": { 16 | "amota": 0.0, 17 | "amotp": 2.0, 18 | "recall": 0.0, 19 | "motar": 0.0, 20 | "mota": 0.0, 21 | "motp": 2.0, 22 | "mt": 0.0, 23 | "ml": -1.0, 24 | "faf": 500, 25 | "gt": -1, 26 | "tp": 0.0, 27 | "fp": -1.0, 28 | "fn": -1.0, 29 | "ids": -1.0, 30 | "frag": -1.0, 31 | "tid": 20, 32 | "lgd": 20 33 | }, 34 | "num_thresholds": 40 35 | } 36 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/tracking/constants.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, Caglayan Dicle and Oscar Beijbom, 2019. 3 | 4 | TRACKING_NAMES = ['bicycle', 'bus', 'car', 'motorcycle', 'pedestrian', 'trailer', 'truck'] 5 | 6 | AMOT_METRICS = ['amota', 'amotp'] 7 | INTERNAL_METRICS = ['recall', 'motar', 'gt'] 8 | LEGACY_METRICS = ['mota', 'motp', 'mt', 'ml', 'faf', 'tp', 'fp', 'fn', 'ids', 'frag', 'tid', 'lgd'] 9 | TRACKING_METRICS = [*AMOT_METRICS, *INTERNAL_METRICS, *LEGACY_METRICS] 10 | 11 | PRETTY_TRACKING_NAMES = { 12 | 'bicycle': 'Bicycle', 13 | 'bus': 'Bus', 14 | 'car': 'Car', 15 | 'motorcycle': 'Motorcycle', 16 | 'pedestrian': 'Pedestrian', 17 | 'trailer': 'Trailer', 18 | 'truck': 'Truck' 19 | } 20 | 21 | TRACKING_COLORS = { 22 | 'bicycle': 'C9', # Differs from detection. 23 | 'bus': 'C2', 24 | 'car': 'C0', 25 | 'motorcycle': 'C6', 26 | 'pedestrian': 'C5', 27 | 'trailer': 'C3', 28 | 'truck': 'C1' 29 | } 30 | 31 | # Define mapping for metrics averaged over classes. 32 | AVG_METRIC_MAP = { # Mapping from average metric name to individual per-threshold metric name. 33 | 'amota': 'motar', 34 | 'amotp': 'motp' 35 | } 36 | 37 | # Define mapping for metrics that use motmetrics library. 38 | MOT_METRIC_MAP = { # Mapping from motmetrics names to metric names used here. 39 | 'num_frames': '', # Used in FAF. 40 | 'num_objects': 'gt', # Used in MOTAR computation. 41 | 'num_predictions': '', # Only printed out. 42 | 'num_matches': 'tp', # Used in MOTAR computation and printed out. 43 | 'motar': 'motar', # Only used in AMOTA. 44 | 'mota_custom': 'mota', # Traditional MOTA, but clipped below 0. 45 | 'motp_custom': 'motp', # Traditional MOTP. 46 | 'faf': 'faf', 47 | 'mostly_tracked': 'mt', 48 | 'mostly_lost': 'ml', 49 | 'num_false_positives': 'fp', 50 | 'num_misses': 'fn', 51 | 'num_switches': 'ids', 52 | 'num_fragmentations_custom': 'frag', 53 | 'recall': 'recall', 54 | 'tid': 'tid', 55 | 'lgd': 'lgd' 56 | } 57 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/eval/tracking/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/eval/tracking/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/lidarseg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/lidarseg/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/map_expansion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/map_expansion/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/map_expansion/bitmap.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Tuple, Any 3 | 4 | import numpy as np 5 | from PIL import Image 6 | import matplotlib.pyplot as plt 7 | 8 | Axis = Any 9 | 10 | 11 | class BitMap: 12 | 13 | def __init__(self, dataroot: str, map_name: str, layer_name: str): 14 | """ 15 | This class is used to render bitmap map layers. Currently these are: 16 | - semantic_prior: The semantic prior (driveable surface and sidewalks) mask from nuScenes 1.0. 17 | - basemap: The HD lidar basemap used for localization and as general context. 18 | 19 | :param dataroot: Path of the nuScenes dataset. 20 | :param map_name: Which map out of `singapore-onenorth`, `singepore-hollandvillage`, `singapore-queenstown` and 21 | 'boston-seaport'. 22 | :param layer_name: The type of bitmap map, `semanitc_prior` or `basemap. 23 | """ 24 | self.dataroot = dataroot 25 | self.map_name = map_name 26 | self.layer_name = layer_name 27 | 28 | self.image = self.load_bitmap() 29 | 30 | def load_bitmap(self) -> np.ndarray: 31 | """ 32 | Load the specified bitmap. 33 | """ 34 | # Load bitmap. 35 | if self.layer_name == 'basemap': 36 | map_path = os.path.join(self.dataroot, 'maps', 'basemap', self.map_name + '.png') 37 | elif self.layer_name == 'semantic_prior': 38 | map_hashes = { 39 | 'singapore-onenorth': '53992ee3023e5494b90c316c183be829', 40 | 'singapore-hollandvillage': '37819e65e09e5547b8a3ceaefba56bb2', 41 | 'singapore-queenstown': '93406b464a165eaba6d9de76ca09f5da', 42 | 'boston-seaport': '36092f0b03a857c6a3403e25b4b7aab3' 43 | } 44 | map_hash = map_hashes[self.map_name] 45 | map_path = os.path.join(self.dataroot, 'maps', map_hash + '.png') 46 | else: 47 | raise Exception('Error: Invalid bitmap layer: %s' % self.layer_name) 48 | 49 | # Convert to numpy. 50 | if os.path.exists(map_path): 51 | image = np.array(Image.open(map_path)) 52 | else: 53 | raise Exception('Error: Cannot find %s %s! Please make sure that the map is correctly installed.' 54 | % (self.layer_name, map_path)) 55 | 56 | # Invert semantic prior colors. 57 | if self.layer_name == 'semantic_prior': 58 | image = image.max() - image 59 | 60 | return image 61 | 62 | def render(self, canvas_edge: Tuple[float, float], ax: Axis = None): 63 | """ 64 | Render the bitmap. 65 | Note: Regardless of the image dimensions, the image will be rendered to occupy the entire map. 66 | :param canvas_edge: The dimension of the current map in meters (width, height). 67 | :param ax: Optional axis to render to. 68 | """ 69 | if ax is None: 70 | ax = plt.subplot() 71 | x, y = canvas_edge 72 | if len(self.image.shape) == 2: 73 | ax.imshow(self.image, extent=[0, x, 0, y], cmap='gray') 74 | else: 75 | ax.imshow(self.image, extent=[0, x, 0, y]) 76 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/map_expansion/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/map_expansion/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/panoptic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/panoptic/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/__init__.py: -------------------------------------------------------------------------------- 1 | from .helper import PredictHelper, convert_global_coords_to_local, convert_local_coords_to_global -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/input_representation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/prediction/input_representation/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/input_representation/combinators.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Freddy Boulton, 2020. 3 | from functools import reduce 4 | from typing import List 5 | 6 | import cv2 7 | import numpy as np 8 | 9 | from nuscenes.prediction.input_representation.interface import Combinator 10 | 11 | 12 | def add_foreground_to_image(base_image: np.ndarray, 13 | foreground_image: np.ndarray) -> np.ndarray: 14 | """ 15 | Overlays a foreground image on top of a base image without mixing colors. Type uint8. 16 | :param base_image: Image that will be the background. Type uint8. 17 | :param foreground_image: Image that will be the foreground. 18 | :return: Image Numpy array of type uint8. 19 | """ 20 | 21 | if not base_image.shape == foreground_image.shape: 22 | raise ValueError("base_image and foreground image must have the same shape." 23 | " Received {} and {}".format(base_image.shape, foreground_image.shape)) 24 | 25 | if not (base_image.dtype == "uint8" and foreground_image.dtype == "uint8"): 26 | raise ValueError("base_image and foreground image must be of type 'uint8'." 27 | " Received {} and {}".format(base_image.dtype, foreground_image.dtype)) 28 | 29 | img2gray = cv2.cvtColor(foreground_image, cv2.COLOR_BGR2GRAY) 30 | _, mask = cv2.threshold(img2gray, 0, 255, cv2.THRESH_BINARY) 31 | mask_inv = cv2.bitwise_not(mask) 32 | img1_bg = cv2.bitwise_and(base_image, base_image, mask=mask_inv) 33 | img2_fg = cv2.bitwise_and(foreground_image, foreground_image, mask=mask) 34 | combined_image = cv2.add(img1_bg, img2_fg) 35 | return combined_image 36 | 37 | 38 | class Rasterizer(Combinator): 39 | """ 40 | Combines images into a three channel image. 41 | """ 42 | 43 | def combine(self, data: List[np.ndarray]) -> np.ndarray: 44 | """ 45 | Combine three channel images into a single image. 46 | :param data: List of images to combine. 47 | :return: Numpy array representing image (type 'uint8') 48 | """ 49 | # All images in the dict are the same shape 50 | image_shape = data[0].shape 51 | 52 | base_image = np.zeros(image_shape).astype("uint8") 53 | return reduce(add_foreground_to_image, [base_image] + data) 54 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/input_representation/interface.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Freddy Boulton 2020. 3 | import abc 4 | from typing import List 5 | 6 | import numpy as np 7 | 8 | 9 | class StaticLayerRepresentation(abc.ABC): 10 | """ Represents static map information as a numpy array. """ 11 | 12 | @abc.abstractmethod 13 | def make_representation(self, instance_token: str, sample_token: str) -> np.ndarray: 14 | raise NotImplementedError() 15 | 16 | 17 | class AgentRepresentation(abc.ABC): 18 | """ Represents information of agents in scene as numpy array. """ 19 | 20 | @abc.abstractmethod 21 | def make_representation(self, instance_token: str, sample_token: str) -> np.ndarray: 22 | raise NotImplementedError() 23 | 24 | 25 | class Combinator(abc.ABC): 26 | """ Combines the StaticLayer and Agent representations into a single one. """ 27 | 28 | @abc.abstractmethod 29 | def combine(self, data: List[np.ndarray]) -> np.ndarray: 30 | raise NotImplementedError() 31 | 32 | 33 | class InputRepresentation: 34 | """ 35 | Specifies how to represent the input for a prediction model. 36 | Need to provide a StaticLayerRepresentation - how the map is represented, 37 | an AgentRepresentation - how agents in the scene are represented, 38 | and a Combinator, how the StaticLayerRepresentation and AgentRepresentation should be combined. 39 | """ 40 | 41 | def __init__(self, static_layer: StaticLayerRepresentation, agent: AgentRepresentation, 42 | combinator: Combinator): 43 | 44 | self.static_layer_rasterizer = static_layer 45 | self.agent_rasterizer = agent 46 | self.combinator = combinator 47 | 48 | def make_input_representation(self, instance_token: str, sample_token: str) -> np.ndarray: 49 | 50 | static_layers = self.static_layer_rasterizer.make_representation(instance_token, sample_token) 51 | agents = self.agent_rasterizer.make_representation(instance_token, sample_token) 52 | 53 | return self.combinator.combine([static_layers, agents]) 54 | 55 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/input_representation/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/prediction/input_representation/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/input_representation/tests/test_combinators.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Freddy Boulton, 2020. 3 | 4 | import unittest 5 | 6 | import cv2 7 | import numpy as np 8 | 9 | from nuscenes.prediction.input_representation.combinators import Rasterizer 10 | 11 | 12 | class TestRasterizer(unittest.TestCase): 13 | 14 | def test(self): 15 | 16 | layer_1 = np.zeros((100, 100, 3)) 17 | box_1 = cv2.boxPoints(((50, 50), (20, 20), 0)) 18 | layer_1 = cv2.fillPoly(layer_1, pts=[np.int0(box_1)], color=(255, 255, 255)) 19 | 20 | layer_2 = np.zeros((100, 100, 3)) 21 | box_2 = cv2.boxPoints(((70, 30), (10, 10), 0)) 22 | layer_2 = cv2.fillPoly(layer_2, pts=[np.int0(box_2)], color=(0, 0, 255)) 23 | 24 | rasterizer = Rasterizer() 25 | image = rasterizer.combine([layer_1.astype('uint8'), layer_2.astype('uint8')]) 26 | 27 | answer = np.zeros((100, 100, 3)) 28 | answer = cv2.fillPoly(answer, pts=[np.int0(box_1)], color=(255, 255, 255)) 29 | answer = cv2.fillPoly(answer, pts=[np.int0(box_2)], color=(0, 0, 255)) 30 | answer = answer.astype('uint8') 31 | 32 | np.testing.assert_allclose(answer, image) 33 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/input_representation/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from nuscenes.prediction.input_representation import utils 4 | 5 | 6 | class Test_convert_to_pixel_coords(unittest.TestCase): 7 | 8 | def test_above_and_to_the_right(self): 9 | 10 | location = (55, 60) 11 | center_of_image_in_global = (50, 50) 12 | center_of_image_in_pixels = (400, 250) 13 | 14 | pixels = utils.convert_to_pixel_coords(location, 15 | center_of_image_in_global, 16 | center_of_image_in_pixels) 17 | 18 | answer = (300, 300) 19 | self.assertTupleEqual(pixels, answer) 20 | 21 | pixels = utils.convert_to_pixel_coords(location, 22 | center_of_image_in_global, 23 | center_of_image_in_pixels, 24 | resolution=0.2) 25 | answer = (350, 275) 26 | self.assertTupleEqual(pixels, answer) 27 | 28 | def test_above_and_to_the_left(self): 29 | 30 | location = (40, 70) 31 | center_of_image_in_global = (50, 50) 32 | center_of_image_in_pixels = (300, 300) 33 | 34 | pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, 35 | center_of_image_in_pixels) 36 | answer = (100, 200) 37 | self.assertTupleEqual(pixels, answer) 38 | 39 | pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, 40 | center_of_image_in_pixels, resolution=0.2) 41 | answer = (200, 250) 42 | self.assertTupleEqual(answer, pixels) 43 | 44 | def test_below_and_to_the_right(self): 45 | 46 | location = (60, 45) 47 | center_of_image_in_global = (50, 50) 48 | center_of_image_in_pixels = (400, 250) 49 | 50 | pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels) 51 | answer = (450, 350) 52 | self.assertTupleEqual(pixels, answer) 53 | 54 | def test_below_and_to_the_left(self): 55 | 56 | location = (30, 40) 57 | center_of_image_in_global = (50, 50) 58 | center_of_image_in_pixels = (400, 250) 59 | 60 | pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels) 61 | answer = (500, 50) 62 | self.assertTupleEqual(pixels, answer) 63 | 64 | def test_same_location(self): 65 | 66 | location = (50, 50) 67 | center_of_image_in_global = (50, 50) 68 | center_of_image_in_pixels = (400, 250) 69 | 70 | pixels = utils.convert_to_pixel_coords(location, center_of_image_in_global, center_of_image_in_pixels) 71 | self.assertTupleEqual(pixels, (400, 250)) 72 | 73 | class Test_get_crops(unittest.TestCase): 74 | 75 | def test(self): 76 | 77 | row_crop, col_crop = utils.get_crops(40, 10, 25, 25, 0.1, 800) 78 | 79 | self.assertEqual(row_crop, slice(0, 500)) 80 | self.assertEqual(col_crop, slice(150, 650)) 81 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/prediction/models/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/prediction/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/tests/run_covernet.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Freddy Boulton, 2020. 3 | 4 | """ 5 | Regression test to see if CoverNet implementation can overfit on a single example. 6 | """ 7 | 8 | import argparse 9 | import math 10 | 11 | import numpy as np 12 | import torch 13 | import torch.optim as optim 14 | from torch.utils.data import DataLoader, IterableDataset 15 | 16 | from nuscenes.prediction.models.backbone import MobileNetBackbone 17 | from nuscenes.prediction.models.covernet import CoverNet, ConstantLatticeLoss 18 | 19 | 20 | def generate_trajectory(theta: float) -> torch.Tensor: 21 | trajectory = torch.zeros(6, 2) 22 | trajectory[:, 0] = torch.arange(6) * math.cos(theta) 23 | trajectory[:, 1] = torch.arange(6) * math.sin(theta) 24 | return trajectory 25 | 26 | 27 | class Dataset(IterableDataset): 28 | """ Implements an infinite dataset of the same input image, agent state vector and ground truth label. """ 29 | 30 | def __iter__(self,): 31 | 32 | while True: 33 | image = torch.zeros((3, 100, 100)) 34 | agent_state_vector = torch.ones(3) 35 | ground_truth = generate_trajectory(math.pi / 2) 36 | 37 | yield image, agent_state_vector, ground_truth.unsqueeze(0) 38 | 39 | 40 | if __name__ == "__main__": 41 | 42 | parser = argparse.ArgumentParser(description='Run CoverNet to make sure it overfits on a single test case.') 43 | parser.add_argument('--use_gpu', type=int, help='Whether to use gpu', default=0) 44 | args = parser.parse_args() 45 | 46 | if args.use_gpu: 47 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 48 | else: 49 | device = torch.device('cpu') 50 | 51 | dataset = Dataset() 52 | dataloader = DataLoader(dataset, batch_size=16, num_workers=0) 53 | 54 | backbone = MobileNetBackbone('mobilenet_v2') 55 | model = CoverNet(backbone, num_modes=3, input_shape=(3, 100, 100)) 56 | model = model.to(device) 57 | 58 | lattice = torch.zeros(3, 6, 2) 59 | lattice[0] = generate_trajectory(math.pi / 2) 60 | lattice[1] = generate_trajectory(math.pi / 4) 61 | lattice[2] = generate_trajectory(3 * math.pi / 4) 62 | 63 | loss_function = ConstantLatticeLoss(lattice) 64 | 65 | optimizer = optim.SGD(model.parameters(), lr=0.1) 66 | 67 | n_iter = 0 68 | 69 | minimum_loss = 0 70 | 71 | for img, agent_state_vector, ground_truth in dataloader: 72 | 73 | img = img.to(device) 74 | agent_state_vector = agent_state_vector.to(device) 75 | ground_truth = ground_truth.to(device) 76 | 77 | optimizer.zero_grad() 78 | 79 | logits = model(img, agent_state_vector) 80 | loss = loss_function(logits, ground_truth) 81 | loss.backward() 82 | optimizer.step() 83 | 84 | current_loss = loss.cpu().detach().numpy() 85 | 86 | print(f"Current loss is {current_loss:.2f}") 87 | if np.allclose(current_loss, minimum_loss, atol=1e-2): 88 | print(f"Achieved near-zero loss after {n_iter} iterations.") 89 | break 90 | 91 | n_iter += 1 92 | 93 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/tests/test_backbone.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | try: 4 | import torch 5 | from torchvision.models.resnet import BasicBlock, Bottleneck 6 | except ModuleNotFoundError: 7 | raise unittest.SkipTest('Skipping test as torch was not found!') 8 | 9 | from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone 10 | 11 | 12 | class TestBackBones(unittest.TestCase): 13 | 14 | def count_layers(self, model): 15 | if isinstance(model[4][0], BasicBlock): 16 | n_convs = 2 17 | elif isinstance(model[4][0], Bottleneck): 18 | n_convs = 3 19 | else: 20 | raise ValueError("Backbone layer block not supported!") 21 | 22 | return sum([len(model[i]) for i in range(4, 8)]) * n_convs + 2 23 | 24 | def test_resnet(self): 25 | 26 | rn_18 = ResNetBackbone('resnet18') 27 | rn_34 = ResNetBackbone('resnet34') 28 | rn_50 = ResNetBackbone('resnet50') 29 | rn_101 = ResNetBackbone('resnet101') 30 | rn_152 = ResNetBackbone('resnet152') 31 | 32 | tensor = torch.ones((1, 3, 100, 100)) 33 | 34 | self.assertEqual(rn_18(tensor).shape[1], 512) 35 | self.assertEqual(rn_34(tensor).shape[1], 512) 36 | self.assertEqual(rn_50(tensor).shape[1], 2048) 37 | self.assertEqual(rn_101(tensor).shape[1], 2048) 38 | self.assertAlmostEqual(rn_152(tensor).shape[1], 2048) 39 | 40 | self.assertEqual(self.count_layers(list(rn_18.backbone.children())), 18) 41 | self.assertEqual(self.count_layers(list(rn_34.backbone.children())), 34) 42 | self.assertEqual(self.count_layers(list(rn_50.backbone.children())), 50) 43 | self.assertEqual(self.count_layers(list(rn_101.backbone.children())), 101) 44 | self.assertEqual(self.count_layers(list(rn_152.backbone.children())), 152) 45 | 46 | with self.assertRaises(ValueError): 47 | ResNetBackbone('resnet51') 48 | 49 | def test_mobilenet(self): 50 | 51 | mobilenet = MobileNetBackbone('mobilenet_v2') 52 | 53 | tensor = torch.ones((1, 3, 100, 100)) 54 | 55 | self.assertEqual(mobilenet(tensor).shape[1], 1280) -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/prediction/tests/test_mtp.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | try: 4 | import torch 5 | except ModuleNotFoundError: 6 | raise unittest.SkipTest('Skipping test as torch was not found!') 7 | 8 | from nuscenes.prediction.models import backbone 9 | from nuscenes.prediction.models import mtp 10 | 11 | 12 | class TestMTP(unittest.TestCase): 13 | 14 | def setUp(self): 15 | self.image = torch.ones((1, 3, 100, 100)) 16 | self.agent_state_vector = torch.ones((1, 3)) 17 | self.image_5 = torch.ones((5, 3, 100, 100)) 18 | self.agent_state_vector_5 = torch.ones((5, 3)) 19 | 20 | def _run(self, model): 21 | pred = model(self.image, self.agent_state_vector) 22 | pred_5 = model(self.image_5, self.agent_state_vector_5) 23 | 24 | self.assertTupleEqual(pred.shape, (1, 75)) 25 | self.assertTupleEqual(pred_5.shape, (5, 75)) 26 | 27 | model.training = False 28 | pred = model(self.image, self.agent_state_vector) 29 | self.assertTrue(torch.allclose(pred[:, -3:].sum(axis=1), torch.ones(pred.shape[0]))) 30 | 31 | def test_works_with_resnet_18(self,): 32 | rn_18 = backbone.ResNetBackbone('resnet18') 33 | model = mtp.MTP(rn_18, 3, 6, 2, input_shape=(3, 100, 100)) 34 | self._run(model) 35 | 36 | def test_works_with_resnet_34(self,): 37 | rn_34 = backbone.ResNetBackbone('resnet34') 38 | model = mtp.MTP(rn_34, 3, 6, 2, input_shape=(3, 100, 100)) 39 | self._run(model) 40 | 41 | def test_works_with_resnet_50(self,): 42 | rn_50 = backbone.ResNetBackbone('resnet50') 43 | model = mtp.MTP(rn_50, 3, 6, 2, input_shape=(3, 100, 100)) 44 | self._run(model) 45 | 46 | def test_works_with_resnet_101(self,): 47 | rn_101 = backbone.ResNetBackbone('resnet101') 48 | model = mtp.MTP(rn_101, 3, 6, 2, input_shape=(3, 100, 100)) 49 | self._run(model) 50 | 51 | def test_works_with_resnet_152(self,): 52 | rn_152 = backbone.ResNetBackbone('resnet152') 53 | model = mtp.MTP(rn_152, 3, 6, 2, input_shape=(3, 100, 100)) 54 | self._run(model) 55 | 56 | def test_works_with_mobilenet_v2(self,): 57 | mobilenet = backbone.MobileNetBackbone('mobilenet_v2') 58 | model = mtp.MTP(mobilenet, 3, 6, 2, input_shape=(3, 100, 100)) 59 | self._run(model) 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/scripts/README.md: -------------------------------------------------------------------------------- 1 | Misc scripts not part of the core code-base. -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/scripts/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/scripts/export_egoposes_on_map.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2018. 3 | 4 | """ 5 | Exports an image for each map location with all the ego poses drawn on the map. 6 | """ 7 | 8 | import argparse 9 | import os 10 | 11 | import matplotlib.pyplot as plt 12 | import numpy as np 13 | 14 | from nuscenes import NuScenes 15 | 16 | 17 | def export_ego_poses(nusc: NuScenes, out_dir: str): 18 | """ Script to render where ego vehicle drives on the maps """ 19 | 20 | # Load NuScenes locations 21 | locations = np.unique([log['location'] for log in nusc.log]) 22 | 23 | # Create output directory 24 | if not os.path.isdir(out_dir): 25 | os.makedirs(out_dir) 26 | 27 | for location in locations: 28 | print('Rendering map {}...'.format(location)) 29 | nusc.render_egoposes_on_map(location) 30 | out_path = os.path.join(out_dir, 'egoposes-{}.png'.format(location)) 31 | plt.tight_layout() 32 | plt.savefig(out_path) 33 | 34 | 35 | if __name__ == '__main__': 36 | 37 | # Settings. 38 | parser = argparse.ArgumentParser(description='Export all ego poses to an image.', 39 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 40 | parser.add_argument('--out_dir', type=str, help='Directory where to save maps with ego poses.') 41 | parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes', 42 | help='Default nuScenes data directory.') 43 | parser.add_argument('--version', type=str, default='v1.0-trainval', 44 | help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') 45 | parser.add_argument('--verbose', type=int, default=1, 46 | help='Whether to print to stdout.') 47 | 48 | args = parser.parse_args() 49 | dataroot = args.dataroot 50 | version = args.version 51 | verbose = bool(args.verbose) 52 | 53 | # Init. 54 | nusc_ = NuScenes(version=version, verbose=verbose, dataroot=dataroot) 55 | 56 | # Export ego poses 57 | export_ego_poses(nusc_, args.out_dir) 58 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/scripts/export_scene_videos.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2018. 3 | 4 | """ 5 | Exports a video of each scene (with annotations) to disk. 6 | """ 7 | 8 | import argparse 9 | import os 10 | 11 | from nuscenes import NuScenes 12 | 13 | 14 | def export_videos(nusc: NuScenes, out_dir: str): 15 | """ Export videos of the images displayed in the images. """ 16 | 17 | # Load NuScenes class 18 | scene_tokens = [s['token'] for s in nusc.scene] 19 | 20 | # Create output directory 21 | if not os.path.isdir(out_dir): 22 | os.makedirs(out_dir) 23 | 24 | # Write videos to disk 25 | for scene_token in scene_tokens: 26 | scene = nusc.get('scene', scene_token) 27 | print('Writing scene %s' % scene['name']) 28 | out_path = os.path.join(out_dir, scene['name']) + '.avi' 29 | if not os.path.exists(out_path): 30 | nusc.render_scene(scene['token'], out_path=out_path) 31 | 32 | 33 | if __name__ == '__main__': 34 | 35 | # Settings. 36 | parser = argparse.ArgumentParser(description='Export all videos of annotations.', 37 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 38 | parser.add_argument('--out_dir', type=str, help='Directory where to save videos.', default='videos') 39 | parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes', 40 | help='Default nuScenes data directory.') 41 | parser.add_argument('--version', type=str, default='v1.0-trainval', 42 | help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') 43 | parser.add_argument('--verbose', type=int, default=1, 44 | help='Whether to print to stdout.') 45 | 46 | args = parser.parse_args() 47 | dataroot = args.dataroot 48 | version = args.version 49 | verbose = bool(args.verbose) 50 | 51 | # Init. 52 | nusc_ = NuScenes(version=version, verbose=verbose, dataroot=dataroot) 53 | 54 | # Export videos of annotations 55 | export_videos(nusc_, args.out_dir) 56 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/tests/assert_download.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2018. 3 | 4 | import argparse 5 | import os 6 | 7 | from tqdm import tqdm 8 | 9 | from nuscenes import NuScenes 10 | 11 | 12 | def verify_setup(nusc: NuScenes): 13 | """ 14 | Script to verify that the nuScenes installation is complete. 15 | """ 16 | 17 | # Check that each sample_data file exists. 18 | print('Checking that sample_data files are complete...') 19 | for sd in tqdm(nusc.sample_data): 20 | file_path = os.path.join(nusc.dataroot, sd['filename']) 21 | assert os.path.exists(file_path), 'Error: Missing sample_data at: %s' % file_path 22 | 23 | # Check that each map file exists. 24 | print('Checking that map files are complete...') 25 | for map in tqdm(nusc.map): 26 | file_path = os.path.join(nusc.dataroot, map['filename']) 27 | assert os.path.exists(file_path), 'Error: Missing map at: %s' % file_path 28 | 29 | 30 | if __name__ == "__main__": 31 | 32 | # Settings. 33 | parser = argparse.ArgumentParser(description='Test that the installed dataset is complete.', 34 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 35 | parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes', 36 | help='Default nuScenes data directory.') 37 | parser.add_argument('--version', type=str, default='v1.0-trainval', 38 | help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') 39 | parser.add_argument('--verbose', type=int, default=1, 40 | help='Whether to print to stdout.') 41 | 42 | args = parser.parse_args() 43 | dataroot = args.dataroot 44 | version = args.version 45 | verbose = bool(args.verbose) 46 | 47 | # Init. 48 | nusc_ = NuScenes(version=version, verbose=verbose, dataroot=dataroot) 49 | 50 | # Verify data blobs. 51 | verify_setup(nusc_) 52 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/tests/test_lidarseg.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | 4 | from nuscenes import NuScenes 5 | 6 | 7 | class TestNuScenesLidarseg(unittest.TestCase): 8 | def setUp(self): 9 | assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.' 10 | self.nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False) 11 | 12 | def test_num_classes(self) -> None: 13 | """ 14 | Check that the correct number of classes (32 classes) are loaded. 15 | """ 16 | self.assertEqual(len(self.nusc.lidarseg_idx2name_mapping), 32) 17 | 18 | def test_num_colors(self) -> None: 19 | """ 20 | Check that the number of colors in the colormap matches the number of classes. 21 | """ 22 | num_classes = len(self.nusc.lidarseg_idx2name_mapping) 23 | num_colors = len(self.nusc.colormap) 24 | self.assertEqual(num_colors, num_classes) 25 | 26 | def test_classes(self) -> None: 27 | """ 28 | Check that the class names match the ones in the colormap, and are in the same order. 29 | """ 30 | classes_in_colormap = list(self.nusc.colormap.keys()) 31 | for name, idx in self.nusc.lidarseg_name2idx_mapping.items(): 32 | self.assertEqual(name, classes_in_colormap[idx]) 33 | 34 | 35 | if __name__ == '__main__': 36 | # Runs the tests without throwing errors. 37 | test = TestNuScenesLidarseg() 38 | test.setUp() 39 | test.test_num_classes() 40 | test.test_num_colors() 41 | test.test_classes() 42 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/tests/test_nuscenes.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Oscar Beijbom, 2019. 3 | 4 | import os 5 | import unittest 6 | 7 | from nuscenes import NuScenes 8 | 9 | 10 | class TestNuScenes(unittest.TestCase): 11 | 12 | def test_load(self): 13 | """ 14 | Loads up NuScenes. 15 | This is intended to simply run the NuScenes class to check for import errors, typos, etc. 16 | """ 17 | 18 | assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.' 19 | nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False) 20 | 21 | # Trivial assert statement 22 | self.assertEqual(nusc.table_root, os.path.join(os.environ['NUSCENES'], 'v1.0-mini')) 23 | 24 | 25 | if __name__ == '__main__': 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/utils/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/utils/color_map.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Tuple 2 | 3 | 4 | def get_colormap() -> Dict[str, Tuple[int, int, int]]: 5 | """ 6 | Get the defined colormap. 7 | :return: A mapping from the class names to the respective RGB values. 8 | """ 9 | 10 | classname_to_color = { # RGB. 11 | "noise": (0, 0, 0), # Black. 12 | "animal": (70, 130, 180), # Steelblue 13 | "human.pedestrian.adult": (0, 0, 230), # Blue 14 | "human.pedestrian.child": (135, 206, 235), # Skyblue, 15 | "human.pedestrian.construction_worker": (100, 149, 237), # Cornflowerblue 16 | "human.pedestrian.personal_mobility": (219, 112, 147), # Palevioletred 17 | "human.pedestrian.police_officer": (0, 0, 128), # Navy, 18 | "human.pedestrian.stroller": (240, 128, 128), # Lightcoral 19 | "human.pedestrian.wheelchair": (138, 43, 226), # Blueviolet 20 | "movable_object.barrier": (112, 128, 144), # Slategrey 21 | "movable_object.debris": (210, 105, 30), # Chocolate 22 | "movable_object.pushable_pullable": (105, 105, 105), # Dimgrey 23 | "movable_object.trafficcone": (47, 79, 79), # Darkslategrey 24 | "static_object.bicycle_rack": (188, 143, 143), # Rosybrown 25 | "vehicle.bicycle": (220, 20, 60), # Crimson 26 | "vehicle.bus.bendy": (255, 127, 80), # Coral 27 | "vehicle.bus.rigid": (255, 69, 0), # Orangered 28 | "vehicle.car": (255, 158, 0), # Orange 29 | "vehicle.construction": (233, 150, 70), # Darksalmon 30 | "vehicle.emergency.ambulance": (255, 83, 0), 31 | "vehicle.emergency.police": (255, 215, 0), # Gold 32 | "vehicle.motorcycle": (255, 61, 99), # Red 33 | "vehicle.trailer": (255, 140, 0), # Darkorange 34 | "vehicle.truck": (255, 99, 71), # Tomato 35 | "flat.driveable_surface": (0, 207, 191), # nuTonomy green 36 | "flat.other": (175, 0, 75), 37 | "flat.sidewalk": (75, 0, 75), 38 | "flat.terrain": (112, 180, 60), 39 | "static.manmade": (222, 184, 135), # Burlywood 40 | "static.other": (255, 228, 196), # Bisque 41 | "static.vegetation": (0, 175, 0), # Green 42 | "vehicle.ego": (255, 240, 245) 43 | } 44 | 45 | return classname_to_color 46 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/utils/data_io.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | 5 | def load_bin_file(bin_path: str, type: str = 'lidarseg') -> np.ndarray: 6 | """ 7 | Loads a .bin file containing the lidarseg or lidar panoptic labels. 8 | :param bin_path: Path to the .bin file. 9 | :param type: semantic type, 'lidarseg': stored in 8-bit format, 'panoptic': store in 32-bit format. 10 | :return: An array containing the labels, with dtype of np.uint8 for lidarseg and np.int32 for panoptic. 11 | """ 12 | assert os.path.exists(bin_path), 'Error: Unable to find {}.'.format(bin_path) 13 | if type == 'lidarseg': 14 | bin_content = np.fromfile(bin_path, dtype=np.uint8) 15 | elif type == 'panoptic': 16 | bin_content = np.load(bin_path)['data'] 17 | else: 18 | raise TypeError(f"Only lidarseg/panoptic type is supported, received {type}") 19 | assert len(bin_content) > 0, 'Error: {} is empty.'.format(bin_path) 20 | 21 | return bin_content 22 | 23 | 24 | def panoptic_to_lidarseg(panoptic_labels: np.ndarray) -> np.ndarray: 25 | """ 26 | Convert panoptic label array to lidarseg label array 27 | :param panoptic_labels: , encoded in (instance_id + 1000 * category_idx), note instance_id 28 | for stuff points is 0. 29 | :return: lidarseg semantic labels, . 30 | """ 31 | return (panoptic_labels // 1000).astype(np.uint8) 32 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/utils/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/nuscenes/utils/tests/__init__.py -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/nuscenes/utils/tests/test_data_classes.py: -------------------------------------------------------------------------------- 1 | # nuScenes dev-kit. 2 | # Code written by Holger Caesar, 2021. 3 | 4 | import os 5 | import unittest 6 | 7 | from nuscenes import NuScenes 8 | from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud 9 | 10 | 11 | class TestDataClasses(unittest.TestCase): 12 | 13 | def test_load_pointclouds(self): 14 | """ 15 | Loads up lidar and radar pointclouds. 16 | """ 17 | assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.' 18 | dataroot = os.environ['NUSCENES'] 19 | nusc = NuScenes(version='v1.0-mini', dataroot=dataroot, verbose=False) 20 | sample_rec = nusc.sample[0] 21 | lidar_name = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])['filename'] 22 | radar_name = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT'])['filename'] 23 | lidar_path = os.path.join(dataroot, lidar_name) 24 | radar_path = os.path.join(dataroot, radar_name) 25 | pc1 = LidarPointCloud.from_file(lidar_path) 26 | pc2 = RadarPointCloud.from_file(radar_path) 27 | pc3, _ = LidarPointCloud.from_file_multisweep(nusc, sample_rec, 'LIDAR_TOP', 'LIDAR_TOP', nsweeps=2) 28 | pc4, _ = RadarPointCloud.from_file_multisweep(nusc, sample_rec, 'RADAR_FRONT', 'RADAR_FRONT', nsweeps=2) 29 | 30 | # Check for valid dimensions. 31 | assert pc1.points.shape[0] == pc3.points.shape[0] == 4, 'Error: Invalid dimension for lidar pointcloud!' 32 | assert pc2.points.shape[0] == pc4.points.shape[0] == 18, 'Error: Invalid dimension for radar pointcloud!' 33 | assert pc1.points.dtype == pc3.points.dtype, 'Error: Invalid dtype for lidar pointcloud!' 34 | assert pc2.points.dtype == pc4.points.dtype, 'Error: Invalid dtype for radar pointcloud!' 35 | 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/tutorials/README.md: -------------------------------------------------------------------------------- 1 | # Tutorials 2 | This folder contains all the tutorials for the devkit of the [nuScenes](https://www.nuscenes.org/nuscenes) and [nuImages](https://www.nuscenes.org/nuimages) datasets. 3 | 4 | All the tutorials are also [available on Google Colab](https://colab.research.google.com/github/nutonomy/nuscenes-devkit/). 5 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/python-sdk/tutorials/trajectory.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fpthink/V2B/0c5f20cd7260bbfa878e194197bacf5ff8d63730/nuscenes-devkit-master/python-sdk/tutorials/trajectory.gif -------------------------------------------------------------------------------- /nuscenes-devkit-master/setup/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM continuumio/miniconda3:4.6.14 2 | ENV PATH /opt/conda/bin:$PATH 3 | 4 | RUN apt-get update && \ 5 | apt-get install -y --no-install-recommends \ 6 | libsm6 \ 7 | libxext6 \ 8 | libxrender-dev \ 9 | libgl1-mesa-glx \ 10 | libglib2.0-0 \ 11 | xvfb && \ 12 | rm -rf /var/lib/apt/lists/* 13 | 14 | WORKDIR /nuscenes-dev 15 | # create conda nuscenes env 16 | ARG PYTHON_VERSION 17 | RUN bash -c "conda create -y -n nuscenes python=${PYTHON_VERSION} \ 18 | && source activate nuscenes \ 19 | && conda clean --yes --all" 20 | 21 | COPY setup/requirements.txt . 22 | COPY setup/requirements/ requirements/ 23 | # Install Python dependencies inside of the Docker image via pip & Conda. 24 | # pycocotools installed from conda-forge 25 | RUN bash -c "source activate nuscenes \ 26 | && find . -name "\\*.txt" -exec sed -i -e '/pycocotools/d' {} \; \ 27 | && pip install --no-cache -r /nuscenes-dev/requirements.txt \ 28 | && conda config --append channels conda-forge \ 29 | && conda install --yes pycocotools \ 30 | && conda clean --yes --all" -------------------------------------------------------------------------------- /nuscenes-devkit-master/setup/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/requirements_base.txt 2 | -r requirements/requirements_nuimages.txt 3 | # -r requirements/requirements_prediction.txt # Uncomment this for the prediction code 4 | # -r requirements/requirements_tracking.txt # Uncomment this for the tracking code 5 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/setup/requirements/requirements_base.txt: -------------------------------------------------------------------------------- 1 | cachetools 2 | descartes 3 | fire 4 | jupyter 5 | matplotlib 6 | numpy 7 | opencv-python 8 | Pillow>6.2.1 9 | pyquaternion>=0.9.5 10 | scikit-learn 11 | scipy 12 | Shapely 13 | tqdm 14 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/setup/requirements/requirements_nuimages.txt: -------------------------------------------------------------------------------- 1 | pycocotools>=2.0.1 2 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/setup/requirements/requirements_prediction.txt: -------------------------------------------------------------------------------- 1 | torch>=1.3.1 2 | torchvision>=0.4.2 3 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/setup/requirements/requirements_tracking.txt: -------------------------------------------------------------------------------- 1 | motmetrics<=1.1.3 2 | pandas>=0.24 3 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/setup/setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import setuptools 4 | 5 | with open('../README.md', 'r') as fh: 6 | long_description = fh.read() 7 | 8 | # Since nuScenes 2.0 the requirements are stored in separate files. 9 | with open('requirements.txt') as f: 10 | req_paths = f.read().splitlines() 11 | requirements = [] 12 | for req_path in req_paths: 13 | if req_path.startswith('#'): 14 | continue 15 | req_path = req_path.replace('-r ', '') 16 | with open(req_path) as f: 17 | requirements += f.read().splitlines() 18 | 19 | 20 | def get_dirlist(_rootdir): 21 | dirlist = [] 22 | 23 | with os.scandir(_rootdir) as rit: 24 | for entry in rit: 25 | if not entry.name.startswith('.') and entry.is_dir(): 26 | dirlist.append(entry.path) 27 | dirlist += get_dirlist(entry.path) 28 | 29 | return dirlist 30 | 31 | 32 | # Get subfolders recursively 33 | os.chdir('..') 34 | rootdir = 'python-sdk' 35 | packages = [d.replace('/', '.').replace('{}.'.format(rootdir), '') for d in get_dirlist(rootdir)] 36 | 37 | # Filter out Python cache folders 38 | packages = [p for p in packages if not p.endswith('__pycache__')] 39 | 40 | setuptools.setup( 41 | name='nuscenes-devkit', 42 | version='1.1.6', 43 | author='Holger Caesar, Oscar Beijbom, Qiang Xu, Varun Bankiti, Alex H. Lang, Sourabh Vora, Venice Erin Liong, ' 44 | 'Sergi Widjaja, Kiwoo Shin, Caglayan Dicle, Freddy Boulton, Whye Kit Fong, Asha Asvathaman, Lubing Zhou ' 45 | 'et al.', 46 | author_email='nuscenes@motional.com', 47 | description='The official devkit of the nuScenes dataset (www.nuscenes.org).', 48 | long_description=long_description, 49 | long_description_content_type='text/markdown', 50 | url='https://github.com/nutonomy/nuscenes-devkit', 51 | python_requires='>=3.6', 52 | install_requires=requirements, 53 | packages=packages, 54 | package_dir={'': 'python-sdk'}, 55 | package_data={'': ['*.json']}, 56 | include_package_data=True, 57 | classifiers=[ 58 | 'Programming Language :: Python :: 3.6', 59 | 'Operating System :: OS Independent', 60 | 'License :: Free for non-commercial use' 61 | ], 62 | license='cc-by-nc-sa-4.0' 63 | ) 64 | -------------------------------------------------------------------------------- /nuscenes-devkit-master/setup/test_tutorial.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | # This script is to be executed inside a Docker container 5 | source activate nuscenes 6 | 7 | # Generate python script from Jupyter notebook and then copy into Docker image. 8 | jupyter nbconvert --to python python-sdk/tutorials/nuscenes_tutorial.ipynb || { echo "Failed to convert nuscenes_tutorial notebook to python script"; exit 1; } 9 | jupyter nbconvert --to python python-sdk/tutorials/nuimages_tutorial.ipynb || { echo "Failed to convert nuimages_tutorial notebook to python script"; exit 1; } 10 | jupyter nbconvert --to python python-sdk/tutorials/can_bus_tutorial.ipynb || { echo "Failed to convert can_bus_tutorial notebook to python script"; exit 1; } 11 | jupyter nbconvert --to python python-sdk/tutorials/map_expansion_tutorial.ipynb || { echo "Failed to convert map_expansion_tutorial notebook to python script"; exit 1; } 12 | jupyter nbconvert --to python python-sdk/tutorials/prediction_tutorial.ipynb || { echo "Failed to convert prediction notebook to python script"; exit 1; } 13 | 14 | # Remove extraneous matplot inline command and comment out any render* methods. 15 | sed -i.bak "/get_ipython.*/d; s/\(nusc\.render.*\)/#\1/" python-sdk/tutorials/nuscenes_tutorial.py || { echo "error in sed command"; exit 1; } 16 | sed -i.bak "/get_ipython.*/d; s/\(nusc\.render.*\)/#\1/" python-sdk/tutorials/nuimages_tutorial.py || { echo "error in sed command"; exit 1; } 17 | sed -i.bak "/get_ipython.*/d; s/\(nusc_can.plot.*\)/#\1/" python-sdk/tutorials/can_bus_tutorial.py || { echo "error in sed command"; exit 1; } 18 | sed -i.bak "/get_ipython.*/d; s/\(^plt.*\)/#\1/" python-sdk/tutorials/can_bus_tutorial.py || { echo "error in sed command"; exit 1; } 19 | sed -i.bak "/get_ipython.*/d; s/\(fig, ax.*\)/#\1/" python-sdk/tutorials/map_expansion_tutorial.py || { echo "error in sed command"; exit 1; } 20 | sed -i.bak "/get_ipython.*/d; s/\(nusc_map.render.*\)/#\1/" python-sdk/tutorials/map_expansion_tutorial.py || { echo "error in sed command"; exit 1; } 21 | sed -i.bak "/get_ipython.*/d; s/\(ego_poses = .*\)/#\1/" python-sdk/tutorials/map_expansion_tutorial.py || { echo "error in sed command"; exit 1; } 22 | sed -i.bak "/get_ipython.*/d; s/\(plt.imshow.*\)/#\1/" python-sdk/tutorials/prediction_tutorial.py || { echo "error in sed command"; exit 1; } 23 | 24 | # Run tutorial 25 | xvfb-run python python-sdk/tutorials/nuscenes_tutorial.py 26 | # xvfb-run python python-sdk/tutorials/nuimages_tutorial.py # skip until PR-440 merged 27 | xvfb-run python python-sdk/tutorials/can_bus_tutorial.py 28 | xvfb-run python python-sdk/tutorials/map_expansion_tutorial.py 29 | xvfb-run python python-sdk/tutorials/prediction_tutorial.py 30 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | git+git://github.com/DualMoonBird/pytorch_etw.git@v1.1.1#egg=etw_pytorch_utils 2 | h5py 3 | numpy 4 | torchvision 5 | pprint 6 | enum34 7 | future 8 | pandas 9 | shapely 10 | matplotlib 11 | pomegrante 12 | ipykernel 13 | imageio 14 | pyquaternion 15 | Python >= 3.7 16 | --------------------------------------------------------------------------------