├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── README.md
├── delete_png.py
├── docs
├── Makefile
├── conf.py
├── index.rst
├── make.bat
├── md_files
│ ├── config_tutorial.md
│ ├── contributor.md
│ ├── data_annotation_tutorial.md
│ ├── data_intro.md
│ ├── data_pipeline.png
│ ├── installation.md
│ ├── lidar_benchmark.md
│ └── logic_flow.md
└── requirements.txt
├── environment.yml
├── images
├── camera_demo.gif
├── demo1.gif
└── pytorch-logo-dark.png
├── opencood
├── __init__.py
├── data_utils
│ ├── __init__.py
│ ├── augmentor
│ │ ├── __init__.py
│ │ ├── augment_utils.py
│ │ └── data_augmentor.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── backup
│ │ │ ├── __init__.py.backup
│ │ │ ├── basedataset.py
│ │ │ ├── camera_basedataset.py
│ │ │ ├── camera_intermediate_fusion_dataset.py
│ │ │ ├── camera_intermediate_fusion_dataset_dair.py
│ │ │ ├── camera_late_fusion_dataset.py
│ │ │ ├── camera_late_fusion_dataset_dair.py
│ │ │ ├── early_fusion_dataset.py
│ │ │ ├── early_fusion_dataset_dair.py
│ │ │ ├── early_fusion_dataset_v2x.py
│ │ │ ├── early_fusion_vis_dataset.py
│ │ │ ├── infer_only
│ │ │ │ ├── camera_intermediate_fusion_dataset_.py
│ │ │ │ ├── camera_late_fusion_dataset_.py
│ │ │ │ └── late_fusion_dataset_.py
│ │ │ ├── intermediate_fusion_dataset.py
│ │ │ ├── intermediate_fusion_dataset_dair.py
│ │ │ ├── intermediate_fusion_dataset_v2.py
│ │ │ ├── intermediate_fusion_dataset_v2_dair.py
│ │ │ ├── intermediate_fusion_dataset_v2_v2x.py
│ │ │ ├── intermediate_fusion_dataset_v2x.py
│ │ │ ├── intermediate_fusion_dataset_v3.py
│ │ │ ├── late_fusion_dataset.py
│ │ │ ├── late_fusion_dataset_dair.py
│ │ │ ├── late_fusion_dataset_v2x.py
│ │ │ └── lidar_camera_intermediate_fusion_dataset.py
│ │ ├── basedataset
│ │ │ ├── dairv2x_basedataset.py
│ │ │ ├── dairv2x_basedataset_backup.py
│ │ │ ├── dairv2x_basedataset_latency.py
│ │ │ ├── dairv2x_basedataset_ori.py
│ │ │ ├── dairv2x_basedataset_sifei.py
│ │ │ ├── irv2v_basedataset.py
│ │ │ ├── opv2v_basedataset.py
│ │ │ ├── opv2v_basedataset_error_fusion.py
│ │ │ ├── opv2v_basedataset_ori.py
│ │ │ ├── opv2vh_basedataset.py
│ │ │ ├── test_v2xsim.py
│ │ │ ├── uav_dataset
│ │ │ │ ├── __init__.py
│ │ │ │ ├── airsim_camera.py
│ │ │ │ ├── decode.py
│ │ │ │ ├── eval_utils.py
│ │ │ │ ├── get_dataset.py
│ │ │ │ ├── images.py
│ │ │ │ ├── multiDet.py
│ │ │ │ ├── processing.py
│ │ │ │ └── transformation.py
│ │ │ ├── v2v4real_basedataset.py
│ │ │ ├── v2v4real_basedataset_ori.py
│ │ │ ├── v2xseq_basedataset.py
│ │ │ ├── v2xset_basedataset.py
│ │ │ ├── v2xsim_basedataset.py
│ │ │ └── v2xsim_basedataset_ori.py
│ │ ├── basedataset_ori
│ │ │ ├── dairv2x_basedataset.py
│ │ │ ├── opv2v_basedataset.py
│ │ │ ├── opv2v_basedataset_heter.py
│ │ │ ├── v2v4real_basedataset.py
│ │ │ ├── v2xseq_basedataset.py
│ │ │ ├── v2xset_basedataset.py
│ │ │ └── v2xsim_basedataset.py
│ │ ├── early_fusion_dataset.py
│ │ ├── intermediate_2stage_fusion_dataset.py
│ │ ├── intermediate_fusion_dataset.py
│ │ ├── intermediate_fusion_multisweep_dataset.py
│ │ ├── intermediate_fusion_multisweep_dataset_ms.py
│ │ ├── intermediate_heter_fusion_dataset.py
│ │ ├── intermediate_heter_fusion_dataset_1101.py
│ │ ├── intermediate_heter_fusion_dataset_ori.py
│ │ ├── late_fusion_dataset.py
│ │ ├── late_heter_fusion_dataset.py
│ │ ├── late_heter_fusion_dataset_ori.py
│ │ ├── late_heter_fusion_dataset_pjt.py
│ │ └── test.py
│ ├── post_processor
│ │ ├── __init__.py
│ │ ├── base_postprocessor.py
│ │ ├── base_postprocessor_ori.py
│ │ ├── bev_postprocessor.py
│ │ ├── ciassd_postprocessor.py
│ │ ├── fpvrcnn_postprocessor.py
│ │ ├── uncertainty_voxel_postprocessor.py
│ │ ├── voxel_postprocessor.py
│ │ └── voxel_postprocessor_oti.py
│ └── pre_processor
│ │ ├── __init__.py
│ │ ├── base_preprocessor.py
│ │ ├── bev_preprocessor.py
│ │ ├── sp_voxel_preprocessor.py
│ │ ├── sp_voxel_preprocessor_ori.py
│ │ └── voxel_preprocessor.py
├── extra_file_plus
│ └── opv2v_4modality.json
├── hypes_yaml
│ ├── __init__.py
│ ├── backup
│ │ ├── ciassd_early_fusion.yaml
│ │ ├── ciassd_intermediate_fusion.yaml
│ │ ├── ciassd_late_fusion.yaml
│ │ ├── dair-v2x
│ │ │ ├── camera
│ │ │ │ ├── dair_lss_max.yaml
│ │ │ │ ├── dair_lss_single.yaml
│ │ │ │ └── dair_lss_single_with_fg.yaml
│ │ │ ├── npj
│ │ │ │ ├── dair_disconet.yaml
│ │ │ │ ├── dair_early.yaml
│ │ │ │ ├── dair_fcooper.yaml
│ │ │ │ ├── dair_fpvrcnn.yaml
│ │ │ │ ├── dair_mash.yaml
│ │ │ │ ├── dair_ms_ba.yaml
│ │ │ │ ├── dair_ms_ba_cd.yaml
│ │ │ │ ├── dair_ms_ba_cd_dcnb.yaml
│ │ │ │ ├── dair_ms_ba_cd_dcne.yaml
│ │ │ │ ├── dair_ms_max_ba_cd.yaml
│ │ │ │ ├── dair_ms_upperbound.yaml
│ │ │ │ ├── dair_selfattn.yaml
│ │ │ │ ├── dair_v2vnet.yaml
│ │ │ │ ├── dair_v2vnet_robust.yaml
│ │ │ │ └── dair_v2xvit.yaml
│ │ │ ├── pj
│ │ │ │ ├── dair_disconet.yaml
│ │ │ │ ├── dair_fcooper.yaml
│ │ │ │ ├── dair_ms_ba.yaml
│ │ │ │ ├── dair_ms_ba_cd.yaml
│ │ │ │ ├── dair_selfattn.yaml
│ │ │ │ ├── dair_v2vnet.yaml
│ │ │ │ ├── dair_v2vnet_avg.yaml
│ │ │ │ └── dair_v2xvit.yaml
│ │ │ └── uncertainty
│ │ │ │ ├── dair_single.yaml
│ │ │ │ ├── dair_uncertainty_d3_dir_l2_von.yaml
│ │ │ │ └── pose_graph_pre_calc_dair.yaml
│ │ ├── fpvrcnn_intermediate_fusion.yaml
│ │ ├── opv2v
│ │ │ ├── ablation
│ │ │ │ ├── point_pillar_ms_deform_fusion_ba.yaml
│ │ │ │ ├── point_pillar_ms_deform_fusion_ba_cutting_dropping.yaml
│ │ │ │ ├── point_pillar_ms_deform_fusion_no_ba.yaml
│ │ │ │ ├── point_pillar_ms_fusion_ba.yaml
│ │ │ │ ├── point_pillar_ms_fusion_ba_cutting_dropping.yaml
│ │ │ │ ├── point_pillar_ms_fusion_ba_no_uncertainty.yaml
│ │ │ │ ├── point_pillar_ms_fusion_ba_no_uncertainty_boxthres1_5.yaml
│ │ │ │ ├── point_pillar_ms_fusion_ba_proj_first.yaml
│ │ │ │ ├── point_pillar_ms_fusion_no_ba.yaml
│ │ │ │ └── point_pillar_ss_fusion_no_ba.yaml
│ │ │ ├── camera
│ │ │ │ ├── ablation_opv2v_single_lss_wo_gt_depth_8x_fgmask.yaml
│ │ │ │ ├── ablation_opv2v_single_lss_wo_gt_depth_8x_label_smoothing.yaml
│ │ │ │ ├── lidar_single.yaml
│ │ │ │ ├── opv2v_att_fusion_w_single_wo_gt_depth_LID_w_depth_sup.yaml
│ │ │ │ ├── opv2v_att_ms_fusion_w_single_w_gt_depth_LID.yaml
│ │ │ │ ├── opv2v_att_ms_fusion_w_single_wo_gt_depth_LID.yaml
│ │ │ │ ├── opv2v_disconet_fusion_w_single_wo_gt_depth_LID_w_depth_sup.yaml
│ │ │ │ ├── opv2v_max_fusion_w_single_w_gt_depth_LID.yaml
│ │ │ │ ├── opv2v_max_fusion_w_single_wo_gt_depth_LID_w_depth_sup.yaml
│ │ │ │ ├── opv2v_max_ms_fusion_w_single_w_gt_depth_LID.yaml
│ │ │ │ ├── opv2v_max_ms_fusion_w_single_wo_gt_depth_LID.yaml
│ │ │ │ ├── opv2v_max_ms_fusion_w_single_wo_gt_depth_UD.yaml
│ │ │ │ ├── opv2v_single_lss_w_gt_depth_8x.yaml
│ │ │ │ ├── opv2v_single_lss_wo_gt_depth_8x.yaml
│ │ │ │ ├── opv2v_single_lss_wo_gt_depth_8x_UD.yaml
│ │ │ │ ├── opv2v_v2vnet_fusion_w_single_wo_gt_depth_LID_w_depth_sup.yaml
│ │ │ │ ├── opv2v_v2xvit_fusion_w_single_wo_gt_depth_LID_w_depth_sup.yaml
│ │ │ │ ├── opv2v_when2comm_fusion_w_single_wo_gt_depth_LID_w_depth_sup.yaml
│ │ │ │ └── test_opv2v_max_ms_fusion_w_single_wo_gt_depth_LID.yaml
│ │ │ ├── npj
│ │ │ │ ├── opv2v_disconet.yaml
│ │ │ │ ├── opv2v_early.yaml
│ │ │ │ ├── opv2v_fcooper.yaml
│ │ │ │ ├── opv2v_fpvrcnn.yaml
│ │ │ │ ├── opv2v_fvoxelrcnn.yaml
│ │ │ │ ├── opv2v_mash.yaml
│ │ │ │ ├── opv2v_ms_ba.yaml
│ │ │ │ ├── opv2v_ms_ba_boxthres1_5.yaml
│ │ │ │ ├── opv2v_ms_ba_dcnb.yaml
│ │ │ │ ├── opv2v_ms_ba_dcne.yaml
│ │ │ │ ├── opv2v_ms_max_ba.yaml
│ │ │ │ ├── opv2v_ms_max_boxthres1_5_ba.yaml
│ │ │ │ ├── opv2v_self_att.yaml
│ │ │ │ ├── opv2v_v2vnet.yaml
│ │ │ │ ├── opv2v_v2vnet_robust.yaml
│ │ │ │ └── opv2v_v2xvit.yaml
│ │ │ └── uncertainty
│ │ │ │ ├── [deprecated]point_pillar_d3_l2_dir_limit.yaml
│ │ │ │ ├── [deprecated]point_pillar_d3_l2_dir_nolimit.yaml
│ │ │ │ ├── [deprecated]point_pillar_d3_von_dir_nolimit.yaml
│ │ │ │ ├── point_pillar_d3_von_dir_limit.yaml
│ │ │ │ └── pose_graph_pre_calc_opv2v.yaml
│ │ ├── pixor_early_fusion.yaml
│ │ ├── pixor_intermediate_fusion.yaml
│ │ ├── pixor_late_fusion.yaml
│ │ ├── point_pillar_disconet.yaml
│ │ ├── point_pillar_early_fusion.yaml
│ │ ├── point_pillar_fcooper.yaml
│ │ ├── point_pillar_intermediate_fusion.yaml
│ │ ├── point_pillar_intermediate_fusion_v2x.yaml
│ │ ├── point_pillar_late_fusion.yaml
│ │ ├── point_pillar_v2vnet.yaml
│ │ ├── point_pillar_v2xvit.yaml
│ │ ├── pose_graph_pre_calc_dair.yaml
│ │ ├── pose_graph_pre_calc_opv2v.yaml
│ │ ├── pose_graph_pre_calc_v2x2.yaml
│ │ ├── second_early_fusion.yaml
│ │ ├── second_intermediate_fusion.yaml
│ │ ├── second_late_fusion.yaml
│ │ ├── v2x_v2
│ │ │ ├── npj
│ │ │ │ ├── v2x2_disconet.yaml
│ │ │ │ ├── v2x2_early.yaml
│ │ │ │ ├── v2x2_fcooper.yaml
│ │ │ │ ├── v2x2_fpvrcnn.yaml
│ │ │ │ ├── v2x2_mash.yaml
│ │ │ │ ├── v2x2_ms_ba.yaml
│ │ │ │ ├── v2x2_ms_ba_boxthres1_5.yaml
│ │ │ │ ├── v2x2_ms_ba_cd.yaml
│ │ │ │ ├── v2x2_ms_ba_dcnb.yaml
│ │ │ │ ├── v2x2_ms_ba_dcne.yaml
│ │ │ │ ├── v2x2_ms_max_ba.yaml
│ │ │ │ ├── v2x2_ms_max_ba_boxthres1_5.yaml
│ │ │ │ ├── v2x2_ms_upbound.yaml
│ │ │ │ ├── v2x2_selfattn.yaml
│ │ │ │ ├── v2x2_v2vnet.yaml
│ │ │ │ ├── v2x2_v2vnet_robust.yaml
│ │ │ │ └── v2x2_v2xvit.yaml
│ │ │ ├── pj
│ │ │ │ ├── v2v2_mash.yaml
│ │ │ │ ├── v2x2_disconet.yaml
│ │ │ │ ├── v2x2_early.yaml
│ │ │ │ ├── v2x2_fcooper.yaml
│ │ │ │ ├── v2x2_fpvrcnn.yaml
│ │ │ │ ├── v2x2_selfattn.yaml
│ │ │ │ ├── v2x2_v2vnet.yaml
│ │ │ │ ├── v2x2_v2vnet_robust.yaml
│ │ │ │ └── v2x2_v2xvit.yaml
│ │ │ └── uncertainty
│ │ │ │ ├── pose_graph_pre_calc_v2x2.yaml
│ │ │ │ ├── v2x2_uncertainty_d3_dir_l2_von.yaml
│ │ │ │ ├── v2x2_uncertainty_d3_dir_l2_von_s0.yaml
│ │ │ │ └── v2x2_uncertainty_d3_dir_l2_von_v0.01.yaml
│ │ ├── visualization_dair.yaml
│ │ ├── visualization_opv2v.yaml
│ │ ├── visualization_v2x.yaml
│ │ ├── voxelnet_early_fusion.yaml
│ │ ├── voxelnet_intermediate_fusion.yaml
│ │ └── voxelnet_late_fusion.yaml
│ ├── dairv2x
│ │ ├── lidar_only
│ │ │ ├── coalign
│ │ │ │ ├── SECOND_uncertainty.yaml
│ │ │ │ ├── pointpillar_coalign.yaml
│ │ │ │ ├── pointpillar_coalign_delay.yaml
│ │ │ │ └── precalc.yaml
│ │ │ ├── dair_centerpoint_disconet.yaml
│ │ │ ├── dair_centerpoint_early.yaml
│ │ │ ├── dair_centerpoint_fcooper.yaml
│ │ │ ├── dair_centerpoint_inter.yaml
│ │ │ ├── dair_centerpoint_late.yaml
│ │ │ ├── dair_centerpoint_multiscale_attn.yaml
│ │ │ ├── dair_centerpoint_multiscale_max.yaml
│ │ │ ├── dair_centerpoint_v2vnet.yaml
│ │ │ ├── dair_centerpoint_v2xvit.yaml
│ │ │ ├── dair_centerpoint_when2com.yaml
│ │ │ ├── dair_centerpoint_where2comm.yaml
│ │ │ ├── dair_pointpillar_disconet_delay.yaml
│ │ │ ├── dair_pointpillar_early_delay.yaml
│ │ │ ├── dair_pointpillar_fcooper_delay.yaml
│ │ │ ├── dair_pointpillar_late_delay.yaml
│ │ │ ├── dair_pointpillar_single_delay.yaml
│ │ │ ├── dair_pointpillar_v2xnet_delay.yaml
│ │ │ ├── dair_pointpillar_v2xnet_delay_wsz.yaml
│ │ │ ├── dair_pointpillar_v2xvit_delay.yaml
│ │ │ ├── dair_pointpillar_v2xvit_delay_wsz.yaml
│ │ │ └── dair_pointpillar_where2com_delay.yaml
│ │ └── visualization_dair.yaml
│ ├── opv2v
│ │ ├── HEAL
│ │ │ ├── end_to_end_training_convnext_singlesup
│ │ │ │ ├── m1_end2end.yaml
│ │ │ │ ├── m1m2_end2end.yaml
│ │ │ │ ├── m1m2m3_end2end.yaml
│ │ │ │ ├── m1m2m3m4_end2end.yaml
│ │ │ │ ├── m1m3_end2end.yaml
│ │ │ │ ├── m2_end2end.yaml
│ │ │ │ └── m3_end2end.yaml
│ │ │ ├── final
│ │ │ │ ├── 4_modality_late.yaml
│ │ │ │ ├── m1_base.yaml
│ │ │ │ ├── m1m2_base.yaml
│ │ │ │ ├── m1m2m3_base.yaml
│ │ │ │ ├── m1m3_base.yaml
│ │ │ │ ├── m2_base.yaml
│ │ │ │ └── m3_base.yaml
│ │ │ ├── pretrain_align_to_m1
│ │ │ │ ├── m2_alignto_m1.yaml
│ │ │ │ ├── m3_alignto_m1.yaml
│ │ │ │ └── m4_alignto_m1.yaml
│ │ │ ├── pretrain_align_to_m1m2
│ │ │ │ ├── m3_alignto_m1m2.yaml
│ │ │ │ └── m4_alignto_m1m2.yaml
│ │ │ ├── pretrain_align_to_m1m2m3
│ │ │ │ └── m4_align_to_m1m2m3.yaml
│ │ │ ├── pretrain_align_to_m1m3
│ │ │ │ ├── m2_align_to_m1m3.yaml
│ │ │ │ └── m4_align_to_m1m3.yaml
│ │ │ ├── pretrain_align_to_m2
│ │ │ │ ├── m1_alignto_m2.yaml
│ │ │ │ ├── m3_alignto_m2.yaml
│ │ │ │ └── m4_alignto_m2.yaml
│ │ │ ├── pretrain_align_to_m3
│ │ │ │ ├── m1_align_to_m3.yaml
│ │ │ │ ├── m2_align_to_m3.yaml
│ │ │ │ └── m4_align_to_m3.yaml
│ │ │ └── single_model
│ │ │ │ ├── m1_pointpillar_pretrain.yaml
│ │ │ │ ├── m2_LSSeff48_pretrain.yaml
│ │ │ │ ├── m3_SECOND32_pretrain.yaml
│ │ │ │ └── m4_LSSres48_pretrain.yaml
│ │ ├── cam_only
│ │ │ ├── lss_att.yaml
│ │ │ ├── lss_disco.yaml
│ │ │ ├── lss_fcooper.yaml
│ │ │ ├── lss_ms_att.yaml
│ │ │ ├── lss_ms_max.yaml
│ │ │ ├── lss_single_efficientnet.yaml
│ │ │ ├── lss_single_efficientnet_72_48.yaml
│ │ │ ├── lss_single_efficientnet_UD.yaml
│ │ │ ├── lss_single_resnet101.yaml
│ │ │ ├── lss_v2vnet.yaml
│ │ │ ├── lss_v2xvit.yaml
│ │ │ ├── lss_voxel_single_efficientnet.yaml
│ │ │ └── lss_voxel_single_resnet101.yaml
│ │ ├── heter
│ │ │ ├── SECOND_lss_fcooper.yaml
│ │ │ ├── SECOND_lss_selfatt.yaml
│ │ │ ├── heterception_.yaml
│ │ │ └── late
│ │ │ │ ├── SECOND_lss_LID_72_48.yaml
│ │ │ │ ├── pointpillar_lss_LID.yaml
│ │ │ │ ├── pointpillar_lss_LID_48_48.yaml
│ │ │ │ ├── pointpillar_lss_LID_72_48.yaml
│ │ │ │ ├── pointpillar_lss_UD.yaml
│ │ │ │ └── pointpillar_lss_UD_72_48.yaml
│ │ ├── lidar_only
│ │ │ ├── SECOND.yaml
│ │ │ ├── SECOND_early.yaml
│ │ │ ├── center_point_disconet.yaml
│ │ │ ├── center_point_early.yaml
│ │ │ ├── center_point_fcooper.yaml
│ │ │ ├── center_point_late.yaml
│ │ │ ├── center_point_single.yaml
│ │ │ ├── center_point_v2vnet.yaml
│ │ │ ├── center_point_v2xvit.yaml
│ │ │ ├── center_point_when2com.yaml
│ │ │ ├── centerpoint_v2xvit.yaml
│ │ │ ├── coalign
│ │ │ │ ├── SECOND_uncertainty.yaml
│ │ │ │ ├── pointpillar_coalign.yaml
│ │ │ │ ├── pointpillar_coalign_delay.yaml
│ │ │ │ └── precalc.yaml
│ │ │ ├── fpvrcnn.yaml
│ │ │ ├── fvoxelrcnn.yaml
│ │ │ ├── opv2v_centerpoint_where2comm_multisweep_w_shrink_wflow.yaml
│ │ │ ├── pointpillar_disconet.yaml
│ │ │ ├── pointpillar_disconet_delay.yaml
│ │ │ ├── pointpillar_early.yaml
│ │ │ ├── pointpillar_early_delay.yaml
│ │ │ ├── pointpillar_fcooper.yaml
│ │ │ ├── pointpillar_fcooper_delay.yaml
│ │ │ ├── pointpillar_fcooper_resnet.yaml
│ │ │ ├── pointpillar_late_delay.yaml
│ │ │ ├── pointpillar_multiscale_att.yaml
│ │ │ ├── pointpillar_multiscale_max.yaml
│ │ │ ├── pointpillar_no_fusion_delay.yaml
│ │ │ ├── pointpillar_single.yaml
│ │ │ ├── pointpillar_v2vnet.yaml
│ │ │ ├── pointpillar_v2vnet_delay.yaml
│ │ │ ├── pointpillar_v2vnet_delay_wsz.yaml
│ │ │ ├── pointpillar_v2vnet_delay_wsz_2.yaml
│ │ │ ├── pointpillar_v2xvit.yaml
│ │ │ ├── pointpillar_v2xvit_delay.yaml
│ │ │ ├── pointpillar_when2com_delay.yaml
│ │ │ ├── pointpillar_where2com_delay.yaml
│ │ │ ├── readme.md
│ │ │ └── where2comm_codebook.yaml
│ │ └── visualization_opv2v.yaml
│ ├── uav
│ │ └── uav_centerpoint_where2comm.yaml
│ ├── v2v4real
│ │ ├── lidar_only
│ │ │ ├── pointpillar_early.yaml
│ │ │ ├── pointpillar_fcooper.yaml
│ │ │ ├── pointpillar_multiscale_att.yaml
│ │ │ ├── pointpillar_multiscale_max.yaml
│ │ │ ├── pointpillar_single.yaml
│ │ │ ├── pointpillar_v2vnet.yaml
│ │ │ └── pointpillar_v2xvit.yaml
│ │ └── visualization_v2v4real.yaml
│ ├── v2xset
│ │ ├── cam_only
│ │ │ ├── lss_att.yaml
│ │ │ ├── lss_disco.yaml
│ │ │ ├── lss_fcooper.yaml
│ │ │ ├── lss_ms_att.yaml
│ │ │ ├── lss_ms_max.yaml
│ │ │ ├── lss_single_efficientnet.yaml
│ │ │ ├── lss_single_resnet101.yaml
│ │ │ ├── lss_v2vnet.yaml
│ │ │ └── lss_v2xvit.yaml
│ │ ├── heter
│ │ │ ├── LModel1_CModel1
│ │ │ │ ├── identity_layer1.yaml
│ │ │ │ ├── identity_layer1_att.yaml
│ │ │ │ ├── identity_layer1_singlesup.yaml
│ │ │ │ ├── identity_layer3.yaml
│ │ │ │ ├── lss_eff_pretrain.yaml
│ │ │ │ ├── lss_resnet101_pretrain_m3.yaml
│ │ │ │ ├── pointpillar_pretrain.yaml
│ │ │ │ ├── resnet1x1_layer1.yaml
│ │ │ │ ├── resnet1x1_layer1_att.yaml
│ │ │ │ ├── resnet1x1_layer1_singlesup.yaml
│ │ │ │ ├── sdta_layer1.yaml
│ │ │ │ ├── sdta_layer1_aligner6.yaml
│ │ │ │ ├── sdta_layer1_aligner6_singlesup.yaml
│ │ │ │ ├── sdta_layer1_aligner6_singlesup_kd10.yaml
│ │ │ │ ├── sdta_layer1_aligner6_singlesup_kd100.yaml
│ │ │ │ ├── sdta_layer1_aligner6_singlesup_kd1000.yaml
│ │ │ │ ├── sdta_layer1_singlesup.yaml
│ │ │ │ ├── sdta_layer3.yaml
│ │ │ │ └── second_pretrain_m3.yaml
│ │ │ ├── LModel1_CModel1_old
│ │ │ │ ├── identity_layer1.yaml
│ │ │ │ ├── identity_layer1_att.yaml
│ │ │ │ ├── identity_layer3.yaml
│ │ │ │ ├── resnet1x1_layer1.yaml
│ │ │ │ ├── resnet1x1_layer1_att.yaml
│ │ │ │ ├── resnet1x1_layer1_singlesup.yaml
│ │ │ │ ├── sdta_layer1.yaml
│ │ │ │ ├── sdta_layer1_aligner6.yaml
│ │ │ │ ├── sdta_layer1_aligner6_singlesup.yaml
│ │ │ │ ├── sdta_layer1_aligner6_singlesup_kd10.yaml
│ │ │ │ ├── sdta_layer1_aligner6_singlesup_kd100.yaml
│ │ │ │ ├── sdta_layer1_aligner6_singlesup_kd1000.yaml
│ │ │ │ ├── sdta_layer1_singlesup.yaml
│ │ │ │ └── sdta_layer3.yaml
│ │ │ ├── backup
│ │ │ │ ├── SECOND_lss_LID_72_48.yaml
│ │ │ │ ├── pointpillars_lss_LID_72_48.yaml
│ │ │ │ ├── pointpillars_lss_LID_72_48_2.yaml
│ │ │ │ ├── pointpillars_lss_LID_72_48_down16x.yaml
│ │ │ │ └── v2
│ │ │ │ │ ├── lidaronly_resnet1x1_lss_16x.yaml
│ │ │ │ │ └── lidaronly_resnet1x1_lss_8x.yaml
│ │ │ ├── cam_liftsplat_pretrain.yaml
│ │ │ ├── cam_liftsplat_pretrain_16x_64c.yaml
│ │ │ ├── cam_liftsplat_pretrain_resnet101.yaml
│ │ │ ├── lidar_SECOND_pretrain.yaml
│ │ │ └── lidar_pointpillars_pretrain.yaml
│ │ └── lidar_only
│ │ │ ├── SECOND.yaml
│ │ │ ├── SECOND_early.yaml
│ │ │ ├── coalign
│ │ │ ├── SECOND_uncertainty.yaml
│ │ │ ├── pointpillar_coalign.yaml
│ │ │ └── precalc.yaml
│ │ │ ├── fpvrcnn.yaml
│ │ │ ├── fvoxelrcnn.yaml
│ │ │ ├── pointpillar_disconet.yaml
│ │ │ ├── pointpillar_early.yaml
│ │ │ ├── pointpillar_fcooper.yaml
│ │ │ ├── pointpillar_fcooper_resnet.yaml
│ │ │ ├── pointpillar_multiscale_att.yaml
│ │ │ ├── pointpillar_multiscale_max.yaml
│ │ │ ├── pointpillar_multiscale_max_noresnet.yaml
│ │ │ ├── pointpillar_single.yaml
│ │ │ ├── pointpillar_single_resnetbackbone.yaml
│ │ │ ├── pointpillar_v2vnet.yaml
│ │ │ ├── pointpillar_v2xvit.yaml
│ │ │ ├── pointpillar_when2comm.yaml
│ │ │ ├── pointpillar_where2comm.yaml
│ │ │ └── readme.md
│ ├── v2xsim2
│ │ ├── lidaronly
│ │ │ └── coalign
│ │ │ │ ├── SECOND_uncertainty.yaml
│ │ │ │ ├── pointpillar_coalign.yaml
│ │ │ │ └── precalc.yaml
│ │ └── visualization.yaml
│ ├── yaml_utils.py
│ └── yaml_utils_ori.py
├── loss
│ ├── __init__.py
│ ├── backup
│ │ ├── heterception_loss.py.2stage
│ │ ├── heterception_redet_loss.py
│ │ ├── point_pillar_dir_depth_loss.py
│ │ ├── point_pillar_mash_loss.py
│ │ ├── point_pillar_uncertainty_loss.py
│ │ └── point_pillar_v2v_robust_loss.py
│ ├── center_point_codebook_loss.py
│ ├── center_point_flow_loss.py
│ ├── center_point_loss.py
│ ├── center_point_test_loss.py
│ ├── ciassd_loss.py
│ ├── fpvrcnn_loss.py
│ ├── heterception_loss.py
│ ├── pixor_loss.py
│ ├── point_pillar_depth_adv_loss.py
│ ├── point_pillar_depth_loss.py
│ ├── point_pillar_disconet_loss.py
│ ├── point_pillar_loss.py
│ ├── point_pillar_uncertainty_loss.py
│ ├── uav_base_loss.py
│ ├── uav_center_point_loss.py
│ ├── uncertainty_loss_old.py
│ └── voxel_net_loss.py
├── models
│ ├── __init__.py
│ ├── center_point.py
│ ├── center_point_baseline.py
│ ├── center_point_baseline_max.py
│ ├── center_point_baseline_multiscale.py
│ ├── center_point_intermediate.py
│ ├── center_point_where2comm.py
│ ├── center_point_where2comm_kalman.py
│ ├── center_point_where2comm_multisweep.py
│ ├── center_point_where2comm_multisweep_flow.py
│ ├── center_point_where2comm_multisweep_flow_attn.py
│ ├── center_point_where2comm_multisweep_flow_bp.py
│ ├── center_point_where2comm_multisweep_flow_infer.py
│ ├── center_point_where2comm_multisweep_flow_ms.py
│ ├── center_point_where2comm_multisweep_flow_ss.py
│ ├── center_point_where2comm_multisweep_kalman_track.py
│ ├── ciassd.py
│ ├── comm_modules
│ │ ├── where2comm.py
│ │ ├── where2comm1.py
│ │ └── where2comm_ori.py
│ ├── da_modules
│ │ └── gsl.py
│ ├── fpvrcnn.py
│ ├── fuse_modules
│ │ ├── __init__.py
│ │ ├── att_fuse.py
│ │ ├── deform_fuse.py
│ │ ├── deform_transformer_fuse.py
│ │ ├── disco_fuse.py
│ │ ├── f_cooper_fuse.py
│ │ ├── fuse_utils.py
│ │ ├── fuse_utils_ori.py
│ │ ├── fusion_in_one.py
│ │ ├── fusion_in_one_ori.py
│ │ ├── fusion_in_one_pjt.py
│ │ ├── hmvit
│ │ │ ├── base_transformer.py
│ │ │ ├── hetero_decoder.py
│ │ │ ├── hetero_fusion.py
│ │ │ ├── split_attn.py
│ │ │ └── torch_transformation_utils.py
│ │ ├── max_fuse.py
│ │ ├── mean_fuse.py
│ │ ├── modality_aware_fusion.py
│ │ ├── ms_max_fuse.py
│ │ ├── pointwise_fuse.py
│ │ ├── pyramid_fuse.py
│ │ ├── self_attn.py
│ │ ├── swap_fusion_modules.py
│ │ ├── temporal_fuse.py
│ │ ├── transformer.py
│ │ ├── transformer_fuse.py
│ │ ├── v2v_fuse.py
│ │ ├── v2xvit_fuse[not_use].py
│ │ ├── when2com_fuse.py
│ │ ├── where2comm.py
│ │ ├── where2comm1.py
│ │ └── where2comm_attn.py
│ ├── fvoxelrcnn.py
│ ├── heter_SECOND_late.py
│ ├── heter_SECOND_lift_splat.py
│ ├── heter_SECOND_lift_splat_2stage.py
│ ├── heter_SECOND_lift_splat_late.py
│ ├── heter_encoders.py
│ ├── heter_model_baseline.py
│ ├── heter_model_baseline_dair_error.py
│ ├── heter_model_baseline_new.py
│ ├── heter_model_baseline_ori.py
│ ├── heter_model_late.py
│ ├── heter_model_shared_head.py
│ ├── heter_model_shared_head_changeable.py
│ ├── heter_model_shared_head_channel16.py
│ ├── heter_model_shared_head_featuremap.py
│ ├── heter_model_shared_head_late.py
│ ├── heter_model_shared_head_ori.py
│ ├── heter_model_shared_head_solver.py
│ ├── heter_model_shared_head_withoutcodebook.py
│ ├── heter_model_sharedhead.py
│ ├── heter_model_sharedhead_get_feature.py
│ ├── heter_model_sharedhead_loadcodebook.py
│ ├── heter_model_sharedhead_with_feature.py
│ ├── heter_pointpillars_lift_splat.py
│ ├── heter_pointpillars_lift_splat.py.backup
│ ├── heter_pointpillars_lift_splat_late.py
│ ├── heter_pointpillars_lift_splat_late.py.backup
│ ├── heter_pointpillars_lift_splat_v2 copy.py
│ ├── heter_pointpillars_lift_splat_v2.py
│ ├── heter_pointpillars_lift_splat_v2_fordebug.py
│ ├── heter_pointpillars_lift_splat_v2_kd.py
│ ├── heter_pointpillars_lift_splat_v2_layer0.py
│ ├── heter_pointpillars_lift_splat_v2_loadcodebook.py
│ ├── heter_pointpillars_lift_splat_v2_with_feature.py
│ ├── heter_pointpillars_lift_splat_v2_without_codebook.py
│ ├── heter_pointpillars_lift_splat_v3.py
│ ├── hmvit.py
│ ├── lift_splat_shoot.py
│ ├── lift_splat_shoot_intermediate.py
│ ├── lift_splat_shoot_voxel.py
│ ├── pixor.py
│ ├── pixor_intermediate.py
│ ├── point_pillar.py
│ ├── point_pillar_.py
│ ├── point_pillar_baseline.py
│ ├── point_pillar_baseline_multiscale.py
│ ├── point_pillar_deform_transformer.py
│ ├── point_pillar_disconet.py
│ ├── point_pillar_disconet_teacher.py
│ ├── point_pillar_intermediate.py
│ ├── point_pillar_mash.py
│ ├── point_pillar_uncertainty.py
│ ├── point_pillar_v2vnet_robust.py
│ ├── point_pillar_where2comm.py
│ ├── point_pillar_where2comm_bp.py
│ ├── point_pillar_where2comm_kalman.py
│ ├── point_pillar_where2comm_kalman_.py
│ ├── point_pillar_where2comm_kalman_allocate.py
│ ├── point_pillar_where2comm_kalman_allocate1.py
│ ├── point_pillar_where2comm_kalman_allocate_frame.py
│ ├── point_pillar_where2comm_kalman_allocate_latency.py
│ ├── point_pillar_where2comm_kalman_bp.py
│ ├── point_pillar_where2comm_kalman_ms.py
│ ├── point_pillar_where2comm_kalman_multiscale.py
│ ├── point_pillar_where2comm_kalman_szwei.py
│ ├── point_pillar_where2comm_multisweep_flow.py
│ ├── second.py
│ ├── second_intermediate.py
│ ├── second_ssfa.py
│ ├── second_ssfa_.py
│ ├── second_ssfa_uncertainty.py
│ ├── sub_modules
│ │ ├── MotionNet.py
│ │ ├── SyncLSTM.py
│ │ ├── att_bev_backbone.py
│ │ ├── auto_encoder.py
│ │ ├── base_bev_backbone.py
│ │ ├── base_bev_backbone_resnet.py
│ │ ├── base_bev_backbone_resnet_channel16.py
│ │ ├── base_bev_backbone_resnet_v2.py
│ │ ├── base_transformer.py
│ │ ├── base_transformer_ori.py
│ │ ├── bev_roi_head.py
│ │ ├── bevformer.py
│ │ ├── box_align.py
│ │ ├── box_align_v2.py
│ │ ├── cbam.py
│ │ ├── cia_ssd_utils.py
│ │ ├── codebook.py
│ │ ├── codebook_test.py
│ │ ├── convgru.py
│ │ ├── convgru_ori.py
│ │ ├── dcn_net.py
│ │ ├── deformable_transformer_backbone.py
│ │ ├── dense_head.py
│ │ ├── detr_module.py
│ │ ├── discriminator.py
│ │ ├── downsample_conv.py
│ │ ├── eqmotion
│ │ │ ├── __init__.py
│ │ │ ├── gcl_t.py
│ │ │ └── model_t.py
│ │ ├── feature_alignnet.py
│ │ ├── feature_alignnet_modules.py
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ └── ms_deform_attn_func.py
│ │ ├── height_compression.py
│ │ ├── hmsa.py
│ │ ├── lss_submodule.py
│ │ ├── mash_utils.py
│ │ ├── matcher.py
│ │ ├── matcher_sizhe.py
│ │ ├── matcher_v2.py
│ │ ├── matcher_v3.py
│ │ ├── mean_vfe.py
│ │ ├── ms_deform_attn.py
│ │ ├── mswin.py
│ │ ├── naive_compress.py
│ │ ├── pillar_vfe.py
│ │ ├── point_pillar_scatter.py
│ │ ├── pose_graph_optim.py
│ │ ├── quantizer.py
│ │ ├── refactor.py
│ │ ├── resblock.py
│ │ ├── resblock_new.py
│ │ ├── roi_head.py
│ │ ├── sparse_backbone_3d.py
│ │ ├── split_attn.py
│ │ ├── temporal_compensation.py
│ │ ├── torch_transformation_utils.py
│ │ ├── v2v_robust_module.py
│ │ ├── v2xvit_basic.py
│ │ ├── view_embedding.py
│ │ ├── voxel_rcnn_head.py
│ │ ├── voxel_roi_pooling.py
│ │ └── vsa.py
│ ├── voxel_net.py
│ └── voxel_net_intermediate.py
├── pcdet_utils
│ ├── __init__.py
│ ├── iou3d_nms
│ │ ├── iou3d_nms_utils.py
│ │ └── src
│ │ │ ├── iou3d_cpu.cpp
│ │ │ ├── iou3d_cpu.h
│ │ │ ├── iou3d_nms.cpp
│ │ │ ├── iou3d_nms.h
│ │ │ ├── iou3d_nms_api.cpp
│ │ │ └── iou3d_nms_kernel.cu
│ ├── pointnet2
│ │ ├── pointnet2_batch
│ │ │ ├── pointnet2_modules.py
│ │ │ ├── pointnet2_utils.py
│ │ │ └── src
│ │ │ │ ├── ball_query.cpp
│ │ │ │ ├── ball_query_gpu.cu
│ │ │ │ ├── ball_query_gpu.h
│ │ │ │ ├── cuda_utils.h
│ │ │ │ ├── group_points.cpp
│ │ │ │ ├── group_points_gpu.cu
│ │ │ │ ├── group_points_gpu.h
│ │ │ │ ├── interpolate.cpp
│ │ │ │ ├── interpolate_gpu.cu
│ │ │ │ ├── interpolate_gpu.h
│ │ │ │ ├── pointnet2_api.cpp
│ │ │ │ ├── sampling.cpp
│ │ │ │ ├── sampling_gpu.cu
│ │ │ │ └── sampling_gpu.h
│ │ └── pointnet2_stack
│ │ │ ├── pointnet2_modules.py
│ │ │ ├── pointnet2_utils.py
│ │ │ ├── src
│ │ │ ├── ball_query.cpp
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── ball_query_gpu.h
│ │ │ ├── cuda_utils.h
│ │ │ ├── group_points.cpp
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── group_points_gpu.h
│ │ │ ├── interpolate.cpp
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── interpolate_gpu.h
│ │ │ ├── pointnet2_api.cpp
│ │ │ ├── sampling.cpp
│ │ │ ├── sampling_gpu.cu
│ │ │ ├── sampling_gpu.h
│ │ │ ├── vector_pool.cpp
│ │ │ ├── vector_pool_gpu.cu
│ │ │ ├── vector_pool_gpu.h
│ │ │ ├── voxel_query.cpp
│ │ │ ├── voxel_query_gpu.cu
│ │ │ └── voxel_query_gpu.h
│ │ │ ├── voxel_pool_modules.py
│ │ │ └── voxel_query_utils.py
│ ├── roiaware_pool3d
│ │ ├── roiaware_pool3d_utils.py
│ │ └── src
│ │ │ ├── roiaware_pool3d.cpp
│ │ │ └── roiaware_pool3d_kernel.cu
│ └── setup.py
├── tools
│ ├── __init__.py
│ ├── codebook_trainer.py
│ ├── config_generate.py
│ ├── corp.py
│ ├── data_process.py
│ ├── dataset_calculation.py
│ ├── debug_utils.py
│ ├── eval_utils.py
│ ├── get_data.py
│ ├── get_feature.py
│ ├── heal_tools.py
│ ├── inference_2_modality_dairv2x.py
│ ├── inference_4_modality.py
│ ├── inference_assigned_modality.py
│ ├── inference_comm_bp.py
│ ├── inference_comm_bp_plus.py
│ ├── inference_comm_w_noise.py
│ ├── inference_comm_w_noise_withsolver.py
│ ├── inference_for_visualization.py
│ ├── inference_speed.py
│ ├── inference_test.py
│ ├── inference_tiny.py
│ ├── inference_track.py
│ ├── inference_uav.py
│ ├── inference_utils.py
│ ├── inference_w_noise.py
│ ├── multi_gpu_utils.py
│ ├── params_calc.py
│ ├── pose_graph_evaluate.py
│ ├── pose_graph_pre_calc.py
│ ├── track
│ │ ├── AB3DMOT.py
│ │ ├── covariance.py
│ │ ├── eval_mot.py
│ │ ├── eval_track.sh
│ │ ├── run_mot_challenge.py
│ │ ├── sort.py
│ │ ├── v2v4real
│ │ │ └── run.sh
│ │ ├── vis_track.py
│ │ └── vis_track_2.py
│ ├── train.py
│ ├── train2.py
│ ├── train_codebook_with_feature.py
│ ├── train_ddp.py
│ ├── train_utils.py
│ ├── train_w_kd.py
│ └── vis_comm_utils.py
├── track
│ ├── Makefile
│ ├── README.md
│ ├── TrackEval
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── Readme.md
│ │ ├── docs
│ │ │ ├── How_To
│ │ │ │ └── Add_a_new_metric.md
│ │ │ ├── MOTChallenge-Official
│ │ │ │ └── Readme.md
│ │ │ └── RobMOTS-Official
│ │ │ │ └── Readme.md
│ │ ├── scripts
│ │ │ ├── comparison_plots.py
│ │ │ ├── run_bdd.py
│ │ │ ├── run_davis.py
│ │ │ ├── run_headtracking_challenge.py
│ │ │ ├── run_kitti.py
│ │ │ ├── run_kitti_mots.py
│ │ │ ├── run_mot_challenge.py
│ │ │ ├── run_mots_challenge.py
│ │ │ ├── run_rob_mots.py
│ │ │ ├── run_tao.py
│ │ │ └── run_youtube_vis.py
│ │ ├── tests
│ │ │ ├── test_all_quick.py
│ │ │ ├── test_davis.py
│ │ │ ├── test_metrics.py
│ │ │ ├── test_mot17.py
│ │ │ └── test_mots.py
│ │ └── trackeval
│ │ │ ├── __init__.py
│ │ │ ├── _timing.py
│ │ │ ├── baselines
│ │ │ ├── __init__.py
│ │ │ ├── baseline_utils.py
│ │ │ ├── non_overlap.py
│ │ │ ├── pascal_colormap.py
│ │ │ ├── stp.py
│ │ │ ├── thresholder.py
│ │ │ └── vizualize.py
│ │ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ ├── _base_dataset.py
│ │ │ ├── bdd100k.py
│ │ │ ├── davis.py
│ │ │ ├── head_tracking_challenge.py
│ │ │ ├── kitti_2d_box.py
│ │ │ ├── kitti_mots.py
│ │ │ ├── mot_challenge_2d_box.py
│ │ │ ├── mots_challenge.py
│ │ │ ├── rob_mots.py
│ │ │ ├── rob_mots_classmap.py
│ │ │ ├── run_rob_mots.py
│ │ │ ├── tao.py
│ │ │ └── youtube_vis.py
│ │ │ ├── eval.py
│ │ │ ├── metrics
│ │ │ ├── __init__.py
│ │ │ ├── _base_metric.py
│ │ │ ├── clear.py
│ │ │ ├── count.py
│ │ │ ├── hota.py
│ │ │ ├── identity.py
│ │ │ ├── ideucl.py
│ │ │ ├── j_and_f.py
│ │ │ ├── track_map.py
│ │ │ └── vace.py
│ │ │ ├── plotting.py
│ │ │ └── utils.py
│ ├── create_data_com.py
│ ├── create_seqmaps.py
│ ├── prep_det_res.py
│ ├── run_multi_agent_mot_challenge.py
│ └── sort
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── sort.py
├── utils
│ ├── __init__.py
│ ├── bessel_utils.py
│ ├── box_overlaps.pyx
│ ├── box_utils.py
│ ├── box_utils_ori.py
│ ├── camera_utils.py
│ ├── cleanup_utils.py
│ ├── codebook_utils.py
│ ├── common_utils.py
│ ├── draco_compression.py
│ ├── eval_utils.py
│ ├── eval_utils.py.backup
│ ├── flow_utils.py
│ ├── heter_utils.py
│ ├── heter_utils_ori.py
│ ├── img2hdf5.py
│ ├── keypoint_utils.py
│ ├── max_consensus.py
│ ├── model_utils.py
│ ├── pcd_utils.py
│ ├── pose_utils.py
│ ├── setup.py
│ ├── spconv_utils.py
│ ├── subsampling_utils.py
│ └── transformation_utils.py
├── version.py
└── visualization
│ ├── __init__.py
│ ├── debug_plot.py
│ ├── draw_box_align
│ ├── draw_optimization.py
│ ├── img2video.py
│ └── inference_for_video_vis.py
│ ├── draw_fancy
│ ├── collaboration_view.py
│ ├── draw_fancy_dataset.py
│ ├── draw_fancy_datasetv2x.py
│ ├── img2video.py
│ ├── location_in_bev.py
│ ├── scene_overview.py
│ └── single_view.py
│ ├── my_vis.py
│ ├── simple_plot3d
│ ├── __init__.py
│ ├── canvas_3d.py
│ └── canvas_bev.py
│ ├── simple_vis.py
│ ├── vis_comm.py
│ ├── vis_data_sequence.py
│ ├── vis_data_sequence_allcav.py
│ ├── vis_data_sequence_dairv2x.py
│ ├── vis_data_sequence_opv2v.py
│ ├── vis_data_sequence_opv2v_one_sample.py
│ ├── vis_data_sequence_v2v4real.py
│ ├── vis_data_sequence_v2xsim2.py
│ ├── vis_data_sequence_v2xsim2_delay.py
│ └── vis_utils.py
├── requirements.txt
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | .DS_Store
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | pip-wheel-metadata/
26 | share/python-wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | .python-version
88 |
89 | # pipenv
90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
93 | # install all needed dependencies.
94 | #Pipfile.lock
95 |
96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
97 | __pypackages__/
98 |
99 | # Celery stuff
100 | celerybeat-schedule
101 | celerybeat.pid
102 |
103 | # SageMath parsed files
104 | *.sage.py
105 |
106 | # Environments
107 | .env
108 | .venv
109 | env/
110 | venv/
111 | ENV/
112 | env.bak/
113 | venv.bak/
114 |
115 | # Spyder project settings
116 | .spyderproject
117 | .spyproject
118 |
119 | # Rope project settings
120 | .ropeproject
121 |
122 | # mkdocs documentation
123 | /site
124 |
125 | # mypy
126 | .mypy_cache/
127 | .dmypy.json
128 | dmypy.json
129 |
130 | # Pyre type checker
131 | .pyre/
132 | logs/
133 | *.c
134 | *.so
135 | .idea
136 |
137 | # Checkpoint
138 | opencood/logs/
139 |
140 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Build documentation in the docs/ directory with Sphinx
9 | sphinx:
10 | configuration: docs/conf.py
11 |
12 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Academic Software License: © 2021 UCLA Mobility Lab (“Institution”). Academic or nonprofit researchers are permitted to use this Software (as defined below) subject to Paragraphs 1-3:
3 |
4 | Institution hereby grants to you free of charge, so long as you are an academic or nonprofit researcher, a nonexclusive license under Institution’s copyright ownership interest in this software and any derivative works made by you thereof (collectively, the “Software”) to use, copy, and make derivative works of the Software solely for educational or academic research purposes, in all cases subject to the terms of this Academic Software License. Except as granted herein, all rights are reserved by Institution, including the right to pursue patent protection of the Software.
5 |
6 | Please note you are prohibited from further transferring the Software -- including any derivatives you make thereof -- to any person or entity. Failure by you to adhere to the requirements in Paragraphs 1 and 2 will result in immediate termination of the license granted to you pursuant to this Academic Software License effective as of the date you first used the Software.
7 |
8 | IN NO EVENT SHALL INSTITUTION BE LIABLE TO ANY ENTITY OR PERSON FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE, EVEN IF INSTITUTION HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. INSTITUTION SPECIFICALLY DISCLAIMS ANY AND ALL WARRANTIES, EXPRESS AND IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE IS PROVIDED “AS IS.” INSTITUTION HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS OF THIS SOFTWARE.
9 |
10 | Commercial entities: please contact the UCLA Mobility Lab at jiaqima@ucla.edu for licensing opportunities.
11 |
--------------------------------------------------------------------------------
/delete_png.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | def del_files(path):
4 | for root , dirs, files in os.walk(path):
5 | for name in files:
6 | if name.endswith(".png"):
7 | os.remove(os.path.join(root, name))
8 | print ("Delete File: " + os.path.join(root, name))
9 |
10 | # test
11 | if __name__ == "__main__":
12 | path = '/GPFS/public/sifeiliu/OpenCOODv2_new/opencood/logs_HEAL'
13 | del_files(path)
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 | # sys.path.insert(0, os.path.abspath('.'))
16 | sys.path.insert(0, os.path.abspath('./../'))
17 |
18 |
19 | # -- Project information -----------------------------------------------------
20 |
21 | project = 'OpenCOOD'
22 | copyright = '2021, UCLA Mobility Lab'
23 | author = 'Runsheng Xu, Hao Xiang, Jiaqi Ma'
24 |
25 | # The full version, including alpha/beta/rc tags
26 | release = '0.1'
27 |
28 |
29 | # -- General configuration ---------------------------------------------------
30 |
31 | # Add any Sphinx extension module names here, as strings. They can be
32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 | # ones.
34 | extensions = [
35 | 'sphinx.ext.autodoc',
36 | 'sphinx.ext.napoleon',
37 | 'sphinx.ext.doctest',
38 | 'sphinx.ext.intersphinx',
39 | 'sphinx.ext.todo',
40 | 'sphinx.ext.coverage',
41 | 'sphinx.ext.mathjax',
42 | 'recommonmark',
43 | ]
44 |
45 | # Add any paths that contain templates here, relative to this directory.
46 | templates_path = ['_templates']
47 |
48 | # List of patterns, relative to source directory, that match files and
49 | # directories to ignore when looking for source files.
50 | # This pattern also affects html_static_path and html_extra_path.
51 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
52 |
53 | todo_include_todos = False
54 |
55 | # -- Options for HTML output -------------------------------------------------
56 |
57 | # The theme to use for HTML and HTML Help pages. See the documentation for
58 | # a list of builtin themes.
59 | #
60 | html_theme = 'sphinx_rtd_theme'
61 |
62 | # Add any paths that contain custom static files (such as style sheets) here,
63 | # relative to this directory. They are copied after the builtin static files,
64 | # so a file named "default.css" will overwrite the builtin "default.css".
65 | html_static_path = ['_static']
66 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. OpenCDA documentation master file, created by
2 | sphinx-quickstart on Fri Jul 2 11:48:53 2021.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to OpenCOOD's documentation!
7 | ===================================
8 | OpenCOOD is an open-source cooperative detection framework for autonomous driving. It provides SOTA cooperative detection algorithms,
9 | convenient APIs for the large-scale simulated V2V perception dataset `OPV2V `_, and a set of useful tools for log replay.
10 |
11 | In collaboration with `OpenCDA `_ , OpenCOOD is mainly focus on offline cooperative perception training and testing. If you are
12 | interested in online cooperative perception and the corresponding closed-loop simulation test, OpenCDA will be the best tool.
13 |
14 | OpenCOOD is a work in progress. Many features on the roadmap are being continuously developed. We welcome your contribution and please visit our Github repo
15 | for the latest release.
16 |
17 | `OpenCOOD source code on Github `_
18 |
19 | .. toctree::
20 | :maxdepth: 1
21 | :caption: Getting Started
22 |
23 | md_files/data_intro.md
24 | md_files/installation.md
25 |
26 | .. toctree::
27 | :maxdepth: 1
28 | :caption: Tutorials
29 |
30 | md_files/config_tutorial.md
31 | md_files/data_annotation_tutorial.md
32 | md_files/logic_flow.md
33 |
34 | .. toctree::
35 | :maxdepth: 1
36 | :caption: Additional Information
37 |
38 | md_files/contributor.md
39 |
40 | **Citing OpenCOOD**\ :
41 |
42 | If you are using our OpenCOOD framework or codes for your development, please cite the following paper::
43 |
44 | @inproceedings{xu2022opencood,
45 | author = {Runsheng Xu, Hao Xiang, Xin Xia, Xu Han, Jinlong Li, Jiaqi Ma},
46 | title = {OPV2V: An Open Benchmark Dataset and Fusion Pipeline for Perception with Vehicle-to-Vehicle Communication},
47 | booktitle = {2022 IEEE International Conference on Robotics and Automation (ICRA)},
48 | year = {2022}}
49 |
50 | Also, under this LICENSE, OpenCOOD is for non-commercial research only. Researchers can modify the source code for their own research only.
51 |
52 | Indices and tables
53 | ==================
54 |
55 | * :ref:`genindex`
56 | * :ref:`modindex`
57 | * :ref:`search`
58 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/md_files/contributor.md:
--------------------------------------------------------------------------------
1 | ## About Us
2 | OpenCOOD is brought to you by [UCLA Mobility Lab](https://mobility-lab.seas.ucla.edu/).
3 |
4 | ### Supervisor
5 | Dr. Jiaqi Ma (Associate Professor @ UCLA)
6 | - Lab Principal Investigator
7 | - [Linkedin](https://www.linkedin.com/in/jiaqi-ma-17037838/)
8 | - [Google Scholar](https://scholar.google.com/citations?user=S3cQz1AAAAAJ&hl=en)
9 |
10 |
11 | ### Core Developer
12 | Runsheng Xu (Ph.D. Student @ UCLA):
13 | - [Homepage](https://derrickxunu.github.io/)
14 | - [Linkedin](https://www.linkedin.com/in/runsheng-xu/)
15 |
16 | Hao Xiang (Ph.D. Student @ UCLA):
17 | - [Homepage](https://xhwind.github.io/)
18 | - [Linkedin](https://www.linkedin.com/in/hao-xiang-42bb5a1b2/)
19 |
20 |
--------------------------------------------------------------------------------
/docs/md_files/data_annotation_tutorial.md:
--------------------------------------------------------------------------------
1 | ## Tutorial 2: Data Annotation Introduction
2 |
3 | ---
4 | We save all groundtruth annotations per agent per timestamp in the yaml files. For instance,
5 | `2021_08_24_21_29_28/4805/000069.yaml` refers to the data annotations with the perspective of te
6 | agent 4805 at timestamp 69 in the scenario database `2021_08_24_21_29_28`. Here we go through an example:
7 |
8 | ```yaml
9 | camera0: # parameters for frontal camera
10 | cords: # the x,y,z,roll,yaw,pitch under CARLA map coordinate
11 | - 141.35067749023438
12 | - -388.642578125
13 | - 1.0410505533218384
14 | - 0.07589337974786758
15 | - 174.18048095703125
16 | - 0.20690691471099854
17 | extrinsic: # extrinsic matrix from camera to LiDAR
18 | - - 0.9999999999999999
19 | - -5.1230071481984265e-18
20 | - 9.322129061605055e-20
21 | - -2.999993025731527
22 | - - -2.5011383190939924e-18
23 | - 1.0
24 | - 1.1458579204685086e-19
25 | - -3.934422863949294e-06
26 | - - 2.7713237218713775e-20
27 | - 3.7310309839064755e-20
28 | - 1.0
29 | - 0.8999999040861146
30 | - - 0.0
31 | - 0.0
32 | - 0.0
33 | - 1.0
34 | intrinsic: # camera intrinsic matrix
35 | - - 335.639852470912
36 | - 0.0
37 | - 400.0
38 | - - 0.0
39 | - 335.639852470912
40 | - 300.0
41 | - - 0.0
42 | - 0.0
43 | - 1.0
44 | camera1: ... # params of right rear camera
45 | camera2: ... # params of left rear camera
46 | canera3: ... # params of back camera
47 | ego_speed: 18.13 # agent's current speed, km/h
48 | lidar_pose: # LiDAR pose under CARLA map coordinate system
49 | - 144.33
50 | - -388.94
51 | - 1.93
52 | - 0.078
53 | - 174.18
54 | - 0.21
55 | plan_trajectory: # agent's planning trajectory
56 | - - 140.
57 | - -388
58 | - 87
59 | predicted_ego_pos: # agent's localization (x,y,z,roll,yaw,pitch) gained from GPS
60 | - 143.78
61 | - -388.94
62 | - 0.036
63 | - 0.080
64 | - -185.95
65 | - 0.18
66 | true_ego_pos: # agent's true localization
67 | - 143.83
68 | - -388.89
69 | - 0.032
70 | - 0.075
71 | - 174.18
72 | - 0.21
73 | vehicles: # the surrounding vehicles that have at least one LiDAR point hit from the agent
74 | 4796: # the id of the vehicle (i.e. object)
75 | angle: # roll, yaw, pitch under CARLA map coordinate system
76 | - 0.096
77 | - -177.86
78 | - 0.197
79 | center: # the relative position from bounding box center to the frontal axis of this vehicle
80 | - 0.0004
81 | - 0.0005
82 | - 0.71
83 | extent: # half length, width and height of the vehicle in meter
84 | - 2.45
85 | - 1.06
86 | - 0.75
87 | location: # x, y ,z position of the center in the frontal axis of the vehicle under CARLA map coordinate system
88 | - 158.55
89 | - -385.75
90 | - 0.032
91 | speed: 19.47 # vehicle's speed
92 | 4880: ...
93 | ```
94 |
95 |
--------------------------------------------------------------------------------
/docs/md_files/data_pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/docs/md_files/data_pipeline.png
--------------------------------------------------------------------------------
/docs/md_files/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | * [__System/Hardware Requirements__](#requirements)
4 | * [__Installation__](#installation)
5 | * [__1. Dependency Installation__](#1-dependency-installation)
6 | * [__2. Install Pytorch__](#2-pytorch-installation-18)
7 | * [__3. Install Spconv__](#3-spconv-121-requred)
8 |
9 |
10 |
11 |
12 | ---
13 | ## System/Hardware Requirements
14 | To get started, the following requirements should be fulfilled.
15 | * __System requirements.__ OpenCOOD is tested under Ubuntu 18.04
16 | * __Adequate GPU.__ A minimum of 6GB gpu is recommended.
17 | * __Disk Space.__ Estimate 100GB of space is recommended for data downoading.
18 | * __Python__ Python3.7 is required.
19 |
20 |
21 | ---
22 | ## Installation
23 | ### 1. Dependency Installation
24 | First, download OpenCOOD github to your local folder if you haven't done it yet.
25 | ```sh
26 | git clone https://github.com/DerrickXuNu/OpenCOOD.git
27 | cd OpenCOOD
28 | ```
29 | Next we create a conda environment and install the requirements.
30 |
31 | ```sh
32 | conda env create -f environment.yml
33 | conda activate opencood
34 | python setup.py develop
35 | ```
36 |
37 | If conda install failed, install through pip
38 | ```sh
39 | pip install -r requirements.txt
40 | ```
41 |
42 | ### 2. Pytorch Installation (>=1.8)
43 | Go to https://pytorch.org/ to install pytorch cuda version.
44 |
45 | ### 3. Spconv (1.2.1 requred)
46 | OpenCOOD currently uses the old spconv version to generate voxel features. We will
47 | upgrade to spconv 2.0 in the short future. To install spconv 1.2.1, please follow the guide in https://github.com/traveller59/spconv/tree/v1.2.1.
48 |
49 | #### Tips for installing spconv 1.2.1:
50 | 1. make sure your cmake version >= 3.13.2
51 | 2. CUDNN and CUDA runtime library (use `nvcc --version` to check) needs to be installed on your machine.
52 |
53 | ### 4. Bbx IOU cuda version compile
54 | Install bbx nms calculation cuda version
55 |
56 | ```bash
57 | python opencood/utils/setup.py build_ext --inplace
58 | ```
59 |
60 |
61 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib==3.4.2
2 | numpy
3 | open3d
4 | opencv-python
5 | cython
6 | tensorboardX
7 | shapely
8 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: opencood
2 | channels:
3 | - defaults
4 | dependencies:
5 | - pip==21.1.2
6 | - python==3.7.11
7 | - pip:
8 | - matplotlib==3.4.2
9 | - numpy
10 | - open3d
11 | - opencv-python==4.5.5.62
12 | - cython
13 | - pygame
14 | - tensorboardX
15 | - shapely
16 | - einops
17 |
--------------------------------------------------------------------------------
/images/camera_demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/images/camera_demo.gif
--------------------------------------------------------------------------------
/images/demo1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/images/demo1.gif
--------------------------------------------------------------------------------
/images/pytorch-logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/images/pytorch-logo-dark.png
--------------------------------------------------------------------------------
/opencood/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/__init__.py
--------------------------------------------------------------------------------
/opencood/data_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/data_utils/__init__.py
--------------------------------------------------------------------------------
/opencood/data_utils/augmentor/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/data_utils/augmentor/__init__.py
--------------------------------------------------------------------------------
/opencood/data_utils/augmentor/augment_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: OpenPCDet
3 |
4 | import numpy as np
5 |
6 | from opencood.utils import common_utils
7 |
8 |
9 | def random_flip_along_x(gt_boxes, points):
10 | """
11 | Args:
12 | gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
13 | points: (M, 3 + C)
14 | Returns:
15 | """
16 | enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
17 | if enable:
18 | gt_boxes[:, 1] = -gt_boxes[:, 1]
19 | gt_boxes[:, 6] = -gt_boxes[:, 6]
20 | points[:, 1] = -points[:, 1]
21 |
22 | if gt_boxes.shape[1] > 7:
23 | gt_boxes[:, 8] = -gt_boxes[:, 8]
24 |
25 | return gt_boxes, points
26 |
27 |
28 | def random_flip_along_y(gt_boxes, points):
29 | """
30 | Args:
31 | gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
32 | points: (M, 3 + C)
33 | Returns:
34 | """
35 | enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
36 | if enable:
37 | gt_boxes[:, 0] = -gt_boxes[:, 0]
38 | gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
39 | points[:, 0] = -points[:, 0]
40 |
41 | if gt_boxes.shape[1] > 7:
42 | gt_boxes[:, 7] = -gt_boxes[:, 7]
43 |
44 | return gt_boxes, points
45 |
46 |
47 | def global_rotation(gt_boxes, points, rot_range):
48 | """
49 | Args:
50 | gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
51 | points: (M, 3 + C),
52 | rot_range: [min, max]
53 | Returns:
54 | """
55 | noise_rotation = np.random.uniform(rot_range[0],
56 | rot_range[1])
57 | points = common_utils.rotate_points_along_z(points[np.newaxis, :, :],
58 | np.array([noise_rotation]))[0]
59 |
60 | gt_boxes[:, 0:3] = \
61 | common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3],
62 | np.array([noise_rotation]))[0]
63 | gt_boxes[:, 6] += noise_rotation
64 |
65 | if gt_boxes.shape[1] > 7:
66 | gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
67 | np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[
68 | np.newaxis, :, :],
69 | np.array([noise_rotation]))[0][:, 0:2]
70 |
71 | return gt_boxes, points
72 |
73 |
74 | def global_scaling(gt_boxes, points, scale_range):
75 | """
76 | Args:
77 | gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
78 | points: (M, 3 + C),
79 | scale_range: [min, max]
80 | Returns:
81 | """
82 | if scale_range[1] - scale_range[0] < 1e-3:
83 | return gt_boxes, points
84 | noise_scale = np.random.uniform(scale_range[0], scale_range[1])
85 | points[:, :3] *= noise_scale
86 | gt_boxes[:, :6] *= noise_scale
87 |
88 | return gt_boxes, points
89 |
--------------------------------------------------------------------------------
/opencood/data_utils/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from opencood.data_utils.datasets.late_fusion_dataset import getLateFusionDataset
2 | from opencood.data_utils.datasets.late_heter_fusion_dataset import getLateheterFusionDataset
3 | from opencood.data_utils.datasets.early_fusion_dataset import getEarlyFusionDataset
4 | from opencood.data_utils.datasets.intermediate_fusion_dataset import getIntermediateFusionDataset
5 | from opencood.data_utils.datasets.intermediate_2stage_fusion_dataset import getIntermediate2stageFusionDataset
6 | from opencood.data_utils.datasets.intermediate_heter_fusion_dataset import getIntermediateheterFusionDataset
7 | from opencood.data_utils.datasets.basedataset.opv2v_basedataset import OPV2VBaseDataset
8 | from opencood.data_utils.datasets.basedataset.v2xsim_basedataset import V2XSIMBaseDataset
9 | from opencood.data_utils.datasets.basedataset.dairv2x_basedataset import DAIRV2XBaseDataset
10 | from opencood.data_utils.datasets.basedataset.dairv2x_basedataset_latency import DAIRV2XBaseDatasetLatency
11 | from opencood.data_utils.datasets.basedataset.v2xset_basedataset import V2XSETBaseDataset
12 | import os
13 | import numpy as np
14 | import matplotlib.pyplot as plt
15 |
16 | def build_dataset(dataset_cfg, visualize=False, train=True):
17 | fusion_name = dataset_cfg['fusion']['core_method']
18 | dataset_name = dataset_cfg['fusion']['dataset']
19 |
20 | assert fusion_name in ['late', 'lateheter', 'intermediate', 'intermediate2stage', 'intermediateheter', 'early']
21 | assert dataset_name in ['opv2v', 'v2xsim', 'dairv2x', 'v2xset']
22 |
23 | fusion_dataset_func = "get" + fusion_name.capitalize() + "FusionDataset"
24 | fusion_dataset_func = eval(fusion_dataset_func)
25 | base_dataset_cls = dataset_name.upper() + "BaseDataset"
26 | base_dataset_cls = eval(base_dataset_cls)
27 | print("base_dataset_cls:", base_dataset_cls)
28 | dataset = fusion_dataset_func(base_dataset_cls)(
29 | params=dataset_cfg,
30 | visualize=visualize,
31 | train=train
32 | )
33 |
34 |
35 | return dataset
36 |
37 | def build_dataset_latency(dataset_cfg, visualize=False, train=True):
38 | fusion_name = dataset_cfg['fusion']['core_method']
39 | dataset_name = dataset_cfg['fusion']['dataset']
40 |
41 | assert fusion_name in ['late', 'lateheter', 'intermediate', 'intermediate2stage', 'intermediateheter', 'early']
42 | assert dataset_name in ['opv2v', 'v2xsim', 'dairv2x', 'v2xset']
43 |
44 | fusion_dataset_func = "get" + fusion_name.capitalize() + "FusionDataset"
45 | fusion_dataset_func = eval(fusion_dataset_func)
46 | base_dataset_cls = dataset_name.upper() + "BaseDataset" + "Latency"
47 | base_dataset_cls = eval(base_dataset_cls)
48 | print("base_dataset_cls:", base_dataset_cls)
49 | dataset = fusion_dataset_func(base_dataset_cls)(
50 | params=dataset_cfg,
51 | visualize=visualize,
52 | train=train
53 | )
54 |
55 |
56 | return dataset
57 |
--------------------------------------------------------------------------------
/opencood/data_utils/datasets/basedataset/test_v2xsim.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 | from collections import OrderedDict
4 |
5 | root_dir="/remote-home/share/junhaoge/v2xsim_infos_train.pkl" # not dir, but file
6 | with open(root_dir, 'rb') as f:
7 | dataset_info = pickle.load(f)
8 | scene_database = dict()
9 | min_timestap = 1e10
10 | scene_flag = False
11 | scenes_info = OrderedDict()
12 | last_scene_idx = -1
13 | last_scene_info_idx = -1
14 | for i, scene_info in enumerate(dataset_info):
15 | # # print(i)
16 | # # print(scene_info["token"])
17 | # timestamp = scene_info["timestamp"]
18 | # if timestamp < 20:
19 | # scene_flag = True
20 | # if timestamp < min_timestap:
21 | # min_timestap = timestamp
22 | # scene_database.update({i: OrderedDict()})
23 | # cav_num = scene_info['agent_num']
24 | # # print(cav_num)
25 | # # print(timestamp)
26 | # # print(type(scene_info))
27 | # # print(scene_info.keys())
28 | # # print(scene_info["lidar_path_1"])
29 | # scene_idx = scene_info['lidar_path_1'].split('/')[-1].split('.')[0].split('_')[1]
30 | # print(scene_idx)
31 | cav_num = scene_info['agent_num']
32 | scene_idx = scene_info['lidar_path_1'].split('/')[-1].split('.')[0].split('_')[1]
33 | scene_info_idx = int(scene_info['lidar_path_1'].split('/')[-1].split('.')[0].split('_')[2])
34 |
35 | if last_scene_idx != scene_idx:
36 | scenes_info[scene_idx] = OrderedDict()
37 | scenes_info[scene_idx]["min_idx"] = scene_info_idx
38 | if last_scene_idx != -1:
39 | scenes_info[last_scene_idx]["max_idx"] = last_scene_info_idx
40 | last_scene_idx = scene_idx
41 | else:
42 | pass
43 |
44 | last_scene_info_idx = scene_info_idx
45 | # break
46 | # assert cav_num > 0
47 | # # with open(".txt", 'rb') as f:
48 | # # lidar_data = pickle.load(f)
49 | # cav_ids = list(range(1, cav_num + 1))
50 | # max_cav = 5
51 |
52 | # for j, cav_id in enumerate(cav_ids):
53 | # if j > max_cav - 1:
54 | # print('too many cavs reinitialize')
55 | # break
56 |
57 | # scene_database[i][cav_id] = OrderedDict()
58 |
59 | # scene_database[i][cav_id]['ego'] = j==0
60 |
61 | # scene_database[i][cav_id]['lidar'] = scene_info[f'lidar_path_{cav_id}']
62 | # # need to delete this line is running in /GPFS
63 | # scene_database[i][cav_id]['lidar'] = \
64 | # scene_database[i][cav_id]['lidar'].replace("/GPFS/rhome/yifanlu/workspace/dataset/v2xsim2-complete", "dataset/V2X-Sim-2.0")
65 |
66 | # scene_database[i][cav_id]['params'] = OrderedDict()
67 | # scene_database[i][cav_id]['params'][
68 | # 'vehicles'] = scene_info[f'labels_{cav_id}'][
69 | # 'gt_boxes_global']
70 | # scene_database[i][cav_id]['params'][
71 | # 'object_ids'] = scene_info[f'labels_{cav_id}'][
72 | # 'gt_object_ids'].tolist()
73 | print(scenes_info)
--------------------------------------------------------------------------------
/opencood/data_utils/datasets/basedataset/uav_dataset/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/data_utils/datasets/basedataset/uav_dataset/__init__.py
--------------------------------------------------------------------------------
/opencood/data_utils/datasets/basedataset/uav_dataset/get_dataset.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | from .airsim_camera import MULTIAGENTAIRSIMCAM
6 | from .multiDet import MultiAgentDetDataset
7 |
8 |
9 | def get_dataset():
10 | class Dataset(MULTIAGENTAIRSIMCAM, MultiAgentDetDataset):
11 | def collate_batch_train(self):
12 | pass
13 | return Dataset
14 |
15 |
--------------------------------------------------------------------------------
/opencood/data_utils/datasets/basedataset/v2xset_basedataset.py:
--------------------------------------------------------------------------------
1 | from opencood.data_utils.datasets.basedataset.opv2v_basedataset import OPV2VBaseDataset
2 |
3 | # All the same as OPV2V
4 | class V2XSETBaseDataset(OPV2VBaseDataset):
5 | def __init__(self, params, visulize, train=True):
6 | super().__init__(params, visulize, train)
7 |
8 | if self.load_camera_file is True: # '2021_09_09_13_20_58'. This scenario has only 3 camera files?
9 | scenario_folders_new = [x for x in self.scenario_folders if '2021_09_09_13_20_58' not in x]
10 | self.scenario_folders = scenario_folders_new
11 | self.reinitialize()
12 |
13 |
14 | def generate_object_center_camera(self,
15 | cav_contents,
16 | reference_lidar_pose):
17 | """
18 | Since V2XSet has not release bev_visiblity map, we can only filter object by range.
19 |
20 | Suppose the detection range of camera is within 50m
21 | """
22 | return self.post_processor.generate_object_center_v2xset_camera(
23 | cav_contents, reference_lidar_pose
24 | )
--------------------------------------------------------------------------------
/opencood/data_utils/datasets/basedataset_ori/v2xset_basedataset.py:
--------------------------------------------------------------------------------
1 | from opencood.data_utils.datasets.basedataset.opv2v_basedataset import OPV2VBaseDataset
2 |
3 | # All the same as OPV2V
4 | class V2XSETBaseDataset(OPV2VBaseDataset):
5 | def __init__(self, params, visulize, train=True):
6 | super().__init__(params, visulize, train)
7 |
8 | if self.load_camera_file is True: # '2021_09_09_13_20_58'. This scenario has only 3 camera files?
9 | scenario_folders_new = [x for x in self.scenario_folders if '2021_09_09_13_20_58' not in x]
10 | self.scenario_folders = scenario_folders_new
11 | self.reinitialize()
12 |
13 |
14 | def generate_object_center_camera(self,
15 | cav_contents,
16 | reference_lidar_pose):
17 | """
18 | Since V2XSet has not release bev_visiblity map, we can only filter object by range.
19 |
20 | Suppose the detection range of camera is within 50m
21 | """
22 | return self.post_processor.generate_object_center_v2xset_camera(
23 | cav_contents, reference_lidar_pose
24 | )
--------------------------------------------------------------------------------
/opencood/data_utils/post_processor/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 | from opencood.data_utils.post_processor.voxel_postprocessor import VoxelPostprocessor
6 | from opencood.data_utils.post_processor.bev_postprocessor import BevPostprocessor
7 | from opencood.data_utils.post_processor.ciassd_postprocessor import CiassdPostprocessor
8 | from opencood.data_utils.post_processor.fpvrcnn_postprocessor import FpvrcnnPostprocessor
9 | from opencood.data_utils.post_processor.uncertainty_voxel_postprocessor import UncertaintyVoxelPostprocessor
10 |
11 | __all__ = {
12 | 'VoxelPostprocessor': VoxelPostprocessor,
13 | 'BevPostprocessor': BevPostprocessor,
14 | 'CiassdPostprocessor': CiassdPostprocessor,
15 | 'FpvrcnnPostprocessor': FpvrcnnPostprocessor,
16 | 'UncertaintyVoxelPostprocessor': UncertaintyVoxelPostprocessor,
17 | }
18 |
19 |
20 | def build_postprocessor(anchor_cfg, train):
21 | process_method_name = anchor_cfg['core_method']
22 | anchor_generator = __all__[process_method_name](
23 | anchor_params=anchor_cfg,
24 | train=train
25 | )
26 |
27 | return anchor_generator
--------------------------------------------------------------------------------
/opencood/data_utils/pre_processor/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 | from opencood.data_utils.pre_processor.base_preprocessor import BasePreprocessor
6 | from opencood.data_utils.pre_processor.voxel_preprocessor import VoxelPreprocessor
7 | from opencood.data_utils.pre_processor.bev_preprocessor import BevPreprocessor
8 | from opencood.data_utils.pre_processor.sp_voxel_preprocessor import SpVoxelPreprocessor
9 |
10 | __all__ = {
11 | 'BasePreprocessor': BasePreprocessor,
12 | 'VoxelPreprocessor': VoxelPreprocessor,
13 | 'BevPreprocessor': BevPreprocessor,
14 | 'SpVoxelPreprocessor': SpVoxelPreprocessor
15 | }
16 |
17 |
18 | def build_preprocessor(preprocess_cfg, train):
19 | process_method_name = preprocess_cfg['core_method']
20 | error_message = f"{process_method_name} is not found. " \
21 | f"Please add your processor file's name in opencood/" \
22 | f"data_utils/processor/init.py"
23 | assert process_method_name in ['BasePreprocessor', 'VoxelPreprocessor',
24 | 'BevPreprocessor', 'SpVoxelPreprocessor'], \
25 | error_message
26 |
27 | processor = __all__[process_method_name](
28 | preprocess_params=preprocess_cfg,
29 | train=train
30 | )
31 |
32 | return processor
33 |
--------------------------------------------------------------------------------
/opencood/data_utils/pre_processor/base_preprocessor.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 | import numpy as np
6 |
7 | from opencood.utils import pcd_utils
8 |
9 |
10 | class BasePreprocessor(object):
11 | """
12 | Basic Lidar pre-processor.
13 |
14 | Parameters
15 | ----------
16 | preprocess_params : dict
17 | The dictionary containing all parameters of the preprocessing.
18 |
19 | train : bool
20 | Train or test mode.
21 | """
22 |
23 | def __init__(self, preprocess_params, train):
24 | self.params = preprocess_params
25 | self.train = train
26 |
27 | def preprocess(self, pcd_np):
28 | """
29 | Preprocess the lidar points by simple sampling.
30 |
31 | Parameters
32 | ----------
33 | pcd_np : np.ndarray
34 | The raw lidar.
35 |
36 | Returns
37 | -------
38 | data_dict : the output dictionary.
39 | """
40 | data_dict = {}
41 | sample_num = self.params['args']['sample_num']
42 |
43 | pcd_np = pcd_utils.downsample_lidar(pcd_np, sample_num)
44 | data_dict['downsample_lidar'] = pcd_np
45 |
46 | return data_dict
47 |
48 | def project_points_to_bev_map(self, points, ratio=0.1):
49 | """
50 | Project points to BEV occupancy map with default ratio=0.1.
51 |
52 | Parameters
53 | ----------
54 | points : np.ndarray
55 | (N, 3) / (N, 4)
56 |
57 | ratio : float
58 | Discretization parameters. Default is 0.1.
59 |
60 | Returns
61 | -------
62 | bev_map : np.ndarray
63 | BEV occupancy map including projected points with shape
64 | (img_row, img_col).
65 |
66 | """
67 | L1, W1, H1, L2, W2, H2 = self.params["cav_lidar_range"]
68 | img_row = int((L2 - L1) / ratio)
69 | img_col = int((W2 - W1) / ratio)
70 | bev_map = np.zeros((img_row, img_col))
71 | bev_origin = np.array([L1, W1, H1]).reshape(1, -1)
72 | # (N, 3)
73 | indices = ((points[:, :3] - bev_origin) / ratio).astype(int)
74 | mask = np.logical_and(indices[:, 0] > 0, indices[:, 0] < img_row)
75 | mask = np.logical_and(mask, np.logical_and(indices[:, 1] > 0,
76 | indices[:, 1] < img_col))
77 | indices = indices[mask, :]
78 | bev_map[indices[:, 0], indices[:, 1]] = 1
79 | return bev_map
80 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/hypes_yaml/__init__.py
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/ciassd_intermediate_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: ciassd
2 | yaml_parser: "load_voxel_params"
3 |
4 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
5 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/validate"
6 | test_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/test"
7 |
8 | noise_setting:
9 | add_noise: false
10 |
11 | comm_range: 70
12 |
13 | train_params:
14 | batch_size: &batch_size 2
15 | epoches: 30
16 | eval_freq: 1
17 | save_freq: 1
18 |
19 | fusion:
20 | core_method: 'IntermediateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
21 | args:
22 | proj_first: true
23 |
24 | # preprocess-related
25 | preprocess:
26 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
27 | core_method: 'SpVoxelPreprocessor'
28 | args:
29 | voxel_size: &voxel_size [0.4, 0.4, 0.4]
30 | max_points_per_voxel: &T 32
31 | max_voxel_train: 36000
32 | max_voxel_test: 70000
33 | # lidar range for each individual cav.
34 | cav_lidar_range: &cav_lidar [-140.8, -41.6, -3, 140.8, 41.6, 1]
35 |
36 | data_augment:
37 | - NAME: random_world_flip
38 | ALONG_AXIS_LIST: [ 'x' ]
39 |
40 | - NAME: random_world_rotation
41 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
42 |
43 | - NAME: random_world_scaling
44 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
45 |
46 | # anchor box related
47 | postprocess:
48 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
49 | gt_range: *cav_lidar
50 | anchor_args:
51 | cav_lidar_range: *cav_lidar
52 | l: 3.9
53 | w: 1.6
54 | h: 1.56
55 | r: [0, 90]
56 | num: &achor_num 2
57 | target_args:
58 | pos_threshold: 0.6
59 | neg_threshold: 0.45
60 | score_threshold: 0.25
61 | order: 'hwl' # hwl or lwh
62 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
63 | nms_thresh: 0.15
64 |
65 | # model related
66 | model:
67 | core_method: voxel_net_intermediate # corresponding to VoxelNet
68 | gt_range: *cav_lidar
69 | args:
70 | N: *batch_size
71 | T: *T
72 | lidar_range: *cav_lidar
73 | voxel_size: *voxel_size
74 | anchor_num: *achor_num
75 | compression: 4
76 | pillar_vfe:
77 | use_norm: true
78 | with_distance: false
79 | use_absolute_xyz: true
80 | num_filters: [ 64 ]
81 |
82 | loss:
83 | core_method: point_pillar_loss
84 | args:
85 | cls_weight: 1.0
86 | reg: 2.0
87 |
88 | optimizer:
89 | core_method: Adam
90 | lr: 0.002
91 | args:
92 | eps: 1e-10
93 | weight_decay: 1e-4
94 |
95 | lr_scheduler:
96 | core_method: multistep #step, multistep and Exponential support
97 | gamma: 0.1
98 | step_size: [15, 30]
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/opv2v/camera/lidar_single.yaml:
--------------------------------------------------------------------------------
1 | name: OPV2V_lidar
2 | root_dir: "/GPFS/rhome/yifanlu/OpenCOOD/dataset/OPV2V/train"
3 | validate_dir: "/GPFS/rhome/yifanlu/OpenCOOD/dataset/OPV2V/validate"
4 | test_dir: "/GPFS/rhome/yifanlu/OpenCOOD/dataset/OPV2V/test"
5 |
6 | noise_setting:
7 | add_noise: False
8 |
9 | yaml_parser: "load_point_pillar_params"
10 | train_params:
11 | batch_size: &batch_size 4
12 | epoches: 20
13 | eval_freq: 1
14 | save_freq: 1
15 |
16 | comm_range: 70
17 |
18 | fusion:
19 | core_method: 'LateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
20 | args: []
21 |
22 | # preprocess-related
23 | preprocess:
24 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
25 | core_method: 'SpVoxelPreprocessor'
26 | args:
27 | voxel_size: &voxel_size [0.4, 0.4, 4]
28 | max_points_per_voxel: 32
29 | max_voxel_train: 16000
30 | max_voxel_test: 40000
31 | # lidar range for each individual cav.
32 | cav_lidar_range: &cav_lidar [-70.4, -40, -3, 70.4, 40, 1]
33 |
34 | data_augment:
35 | - NAME: random_world_flip
36 | ALONG_AXIS_LIST: [ 'x' ]
37 |
38 | - NAME: random_world_rotation
39 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
40 |
41 | - NAME: random_world_scaling
42 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
43 |
44 | # anchor box related
45 | postprocess:
46 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
47 | gt_range: *cav_lidar
48 | anchor_args:
49 | cav_lidar_range: *cav_lidar
50 | l: 3.9
51 | w: 1.6
52 | h: 1.56
53 | r: [0, 90]
54 | num: &achor_num 2
55 | target_args:
56 | pos_threshold: 0.6
57 | neg_threshold: 0.45
58 | score_threshold: 0.20
59 | order: 'hwl' # hwl or lwh
60 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
61 | nms_thresh: 0.15
62 |
63 | # model related
64 | model:
65 | core_method: point_pillar
66 | args:
67 | voxel_size: *voxel_size
68 | lidar_range: *cav_lidar
69 | anchor_number: *achor_num
70 | pillar_vfe:
71 | use_norm: true
72 | with_distance: false
73 | use_absolute_xyz: true
74 | num_filters: [64]
75 | point_pillar_scatter:
76 | num_features: 64
77 |
78 | base_bev_backbone:
79 | layer_nums: [3, 5, 8]
80 | layer_strides: [2, 2, 2]
81 | num_filters: [64, 128, 256]
82 | upsample_strides: [1, 2, 4]
83 | num_upsample_filter: [128, 128, 128]
84 |
85 | anchor_num: *achor_num
86 |
87 | loss:
88 | core_method: point_pillar_loss
89 | args:
90 | cls_weight: 1.0
91 | reg: 2.0
92 |
93 | optimizer:
94 | core_method: Adam
95 | lr: 0.002
96 | args:
97 | eps: 1e-10
98 | weight_decay: 1e-4
99 |
100 | lr_scheduler:
101 | core_method: multistep #step, multistep and Exponential support
102 | gamma: 0.1
103 | step_size: [10, 15]
104 |
105 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/opv2v/npj/opv2v_early.yaml:
--------------------------------------------------------------------------------
1 | name: point_pillar_early_fusion
2 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
3 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/validate"
4 | test_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/test"
5 |
6 | yaml_parser: "load_point_pillar_params"
7 | train_params:
8 | batch_size: &batch_size 4
9 | epoches: 15
10 | eval_freq: 1
11 | save_freq: 1
12 |
13 | comm_range: 70
14 |
15 | fusion:
16 | core_method: 'EarlyFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
17 | args:
18 | proj_first: false
19 |
20 | # preprocess-related
21 | preprocess:
22 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
23 | core_method: 'SpVoxelPreprocessor'
24 | args:
25 | voxel_size: &voxel_size [0.4, 0.4, 4]
26 | max_points_per_voxel: 32
27 | max_voxel_train: 32000
28 | max_voxel_test: 70000
29 | # lidar range for each individual cav.
30 | cav_lidar_range: &cav_lidar [-140.8, -40, -3, 140.8, 40, 1]
31 |
32 | data_augment:
33 | - NAME: random_world_flip
34 | ALONG_AXIS_LIST: [ 'x' ]
35 |
36 | - NAME: random_world_rotation
37 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
38 |
39 | - NAME: random_world_scaling
40 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
41 |
42 | # anchor box related
43 | postprocess:
44 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
45 | anchor_args:
46 | cav_lidar_range: *cav_lidar
47 | l: 3.9
48 | w: 1.6
49 | h: 1.56
50 | r: [0, 90]
51 | num: &achor_num 2
52 | target_args:
53 | pos_threshold: 0.6
54 | neg_threshold: 0.45
55 | score_threshold: 0.20
56 | order: 'hwl' # hwl or lwh
57 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
58 | nms_thresh: 0.15
59 |
60 | # model related
61 | model:
62 | core_method: point_pillar
63 | args:
64 | voxel_size: *voxel_size
65 | lidar_range: *cav_lidar
66 | anchor_number: *achor_num
67 | pillar_vfe:
68 | use_norm: true
69 | with_distance: false
70 | use_absolute_xyz: true
71 | num_filters: [64]
72 | point_pillar_scatter:
73 | num_features: 64
74 |
75 | base_bev_backbone:
76 | layer_nums: [3, 5, 8]
77 | layer_strides: [2, 2, 2]
78 | num_filters: [64, 128, 256]
79 | upsample_strides: [1, 2, 4]
80 | num_upsample_filter: [128, 128, 128]
81 |
82 | anchor_num: *achor_num
83 |
84 | loss:
85 | core_method: point_pillar_loss
86 | args:
87 | cls_weight: 1.0
88 | reg: 2.0
89 |
90 | optimizer:
91 | core_method: Adam
92 | lr: 0.002
93 | args:
94 | eps: 1e-10
95 | weight_decay: 1e-4
96 |
97 | lr_scheduler:
98 | core_method: multistep #step, multistep and Exponential support
99 | gamma: 0.1
100 | step_size: [10, 15]
101 |
102 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/pixor_early_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: pixor_early_fusion
2 |
3 | yaml_parser: "load_bev_params"
4 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
5 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train/validate"
6 | train_params:
7 | batch_size: &batch_size 2
8 | epoches: 15
9 | eval_freq: 1
10 | save_freq: 1
11 | fusion:
12 | core_method: 'EarlyFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
13 | args: []
14 |
15 | # preprocess-related
16 | preprocess:
17 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
18 | core_method: 'BevPreprocessor'
19 | args:
20 | res: &res 0.2 # discretization resolusion
21 | downsample_rate: &downsample_rate 4 # pixor downsample ratio
22 | # lidar range for each individual cav.
23 | cav_lidar_range: &cav_lidar [-160, -40, -3, 160, 40, 1] # must be divisible by 16
24 |
25 | data_augment: []
26 |
27 | # anchor box related
28 | postprocess:
29 | core_method: 'BevPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
30 | nms_thresh: 0.15
31 | anchor_args:
32 | cav_lidar_range: *cav_lidar
33 | res: *res
34 | downsample_rate: *downsample_rate # pixor downsample ratio
35 | target_args:
36 | score_threshold: 0.5
37 |
38 |
39 | order: 'lwh' # hwl or lwh
40 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
41 |
42 | # model related
43 | model:
44 | core_method: pixor # corresponding to VoxelNet
45 | args:
46 | use_bn: True
47 | decode: False
48 |
49 | loss:
50 | core_method: pixor_loss
51 | args:
52 | alpha: 1.0
53 | beta: 1.0
54 |
55 | optimizer:
56 | core_method: Adam
57 | lr: 0.001
58 | args:
59 | eps: 1e-10
60 | weight_decay: 1e-4
61 |
62 | lr_scheduler:
63 | core_method: Exponential #step, multistep and Exponential support
64 | gamma: 0.99
65 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/pixor_intermediate_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: pixor_intermediate_fusion
2 |
3 | yaml_parser: "load_bev_params"
4 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
5 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/validate"
6 | train_params:
7 | batch_size: &batch_size 2
8 | epoches: 15
9 | eval_freq: 1
10 | save_freq: 1
11 | max_cav: &max_cav 5
12 | fusion:
13 | core_method: 'IntermediateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
14 | args: []
15 |
16 | # preprocess-related
17 | preprocess:
18 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
19 | core_method: 'BevPreprocessor'
20 | args:
21 | res: &res 0.2 # discretization resolusion
22 | downsample_rate: &downsample_rate 4 # pixor downsample ratio
23 | # lidar range for each individual cav.
24 | cav_lidar_range: &cav_lidar [-160, -40, -3, 160, 40, 1] # must be divisible by 16
25 |
26 | data_augment: []
27 |
28 | # anchor box related
29 | postprocess:
30 | core_method: 'BevPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
31 | nms_thresh: 0.15
32 | anchor_args:
33 | cav_lidar_range: *cav_lidar
34 | res: *res
35 | downsample_rate: *downsample_rate # pixor downsample ratio
36 | target_args:
37 | score_threshold: 0.7
38 |
39 |
40 | order: 'lwh' # hwl or lwh
41 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
42 |
43 | # model related
44 | model:
45 | core_method: pixor_intermediate
46 | args:
47 | use_bn: True
48 | decode: False
49 |
50 | loss:
51 | core_method: pixor_loss
52 | args:
53 | alpha: 1.0
54 | beta: 1.0
55 |
56 | optimizer:
57 | core_method: Adam
58 | lr: 0.001
59 | args:
60 | eps: 1e-10
61 | weight_decay: 1e-4
62 |
63 | lr_scheduler:
64 | core_method: Exponential #step, multistep and Exponential support
65 | gamma: 0.99
66 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/pixor_late_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: pixor_post_fusion
2 |
3 | yaml_parser: "load_bev_params"
4 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
5 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/validate"
6 | train_params:
7 | batch_size: &batch_size 2
8 | epoches: 15
9 | eval_freq: 1
10 | save_freq: 1
11 | fusion:
12 | core_method: 'LateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
13 | args: []
14 |
15 | # preprocess-related
16 | preprocess:
17 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
18 | core_method: 'BevPreprocessor'
19 | args:
20 | res: &res 0.2 # discretization resolusion
21 | downsample_rate: &downsample_rate 4 # pixor downsample ratio
22 | # lidar range for each individual cav.
23 | cav_lidar_range: &cav_lidar [-80, -40, -3, 80, 40, 1] # must be divisible by 16
24 |
25 | data_augment: []
26 |
27 | # anchor box related
28 | postprocess:
29 | core_method: 'BevPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
30 | nms_thresh: 0.15
31 | anchor_args:
32 | cav_lidar_range: *cav_lidar
33 | res: *res
34 | downsample_rate: *downsample_rate # pixor downsample ratio
35 | target_args:
36 | score_threshold: 0.3
37 |
38 |
39 | order: 'lwh' # hwl or lwh
40 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
41 |
42 | # model related
43 | model:
44 | core_method: pixor # corresponding to VoxelNet
45 | args:
46 | use_bn: True
47 | decode: False
48 |
49 | loss:
50 | core_method: pixor_loss
51 | args:
52 | alpha: 1.0
53 | beta: 1.0
54 |
55 | optimizer:
56 | core_method: Adam
57 | lr: 0.001
58 | args:
59 | eps: 1e-10
60 | weight_decay: 1e-4
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/point_pillar_early_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: point_pillar_early_fusion
2 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
3 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/validate"
4 | test_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/test"
5 |
6 | yaml_parser: "load_point_pillar_params"
7 | train_params:
8 | batch_size: &batch_size 4
9 | epoches: 15
10 | eval_freq: 1
11 | save_freq: 1
12 |
13 | comm_range: 70
14 |
15 | fusion:
16 | core_method: 'EarlyFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
17 | args:
18 | proj_first: true
19 |
20 | # preprocess-related
21 | preprocess:
22 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
23 | core_method: 'SpVoxelPreprocessor'
24 | args:
25 | voxel_size: &voxel_size [0.4, 0.4, 4]
26 | max_points_per_voxel: 32
27 | max_voxel_train: 32000
28 | max_voxel_test: 70000
29 | # lidar range for each individual cav.
30 | cav_lidar_range: &cav_lidar [-140.8, -40, -3, 140.8, 40, 1]
31 |
32 | data_augment:
33 | - NAME: random_world_flip
34 | ALONG_AXIS_LIST: [ 'x' ]
35 |
36 | - NAME: random_world_rotation
37 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
38 |
39 | - NAME: random_world_scaling
40 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
41 |
42 | # anchor box related
43 | postprocess:
44 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
45 | anchor_args:
46 | cav_lidar_range: *cav_lidar
47 | l: 3.9
48 | w: 1.6
49 | h: 1.56
50 | r: [0, 90]
51 | num: &achor_num 2
52 | target_args:
53 | pos_threshold: 0.6
54 | neg_threshold: 0.45
55 | score_threshold: 0.20
56 | order: 'hwl' # hwl or lwh
57 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
58 | nms_thresh: 0.15
59 |
60 | # model related
61 | model:
62 | core_method: point_pillar
63 | args:
64 | voxel_size: *voxel_size
65 | lidar_range: *cav_lidar
66 | anchor_number: *achor_num
67 | pillar_vfe:
68 | use_norm: true
69 | with_distance: false
70 | use_absolute_xyz: true
71 | num_filters: [64]
72 | point_pillar_scatter:
73 | num_features: 64
74 |
75 | base_bev_backbone:
76 | layer_nums: [3, 5, 8]
77 | layer_strides: [2, 2, 2]
78 | num_filters: [64, 128, 256]
79 | upsample_strides: [1, 2, 4]
80 | num_upsample_filter: [128, 128, 128]
81 |
82 | anchor_num: *achor_num
83 |
84 | loss:
85 | core_method: point_pillar_loss
86 | args:
87 | cls_weight: 1.0
88 | reg: 2.0
89 |
90 | optimizer:
91 | core_method: Adam
92 | lr: 0.002
93 | args:
94 | eps: 1e-10
95 | weight_decay: 1e-4
96 |
97 | lr_scheduler:
98 | core_method: multistep #step, multistep and Exponential support
99 | gamma: 0.1
100 | step_size: [10, 15]
101 |
102 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/point_pillar_fcooper.yaml:
--------------------------------------------------------------------------------
1 | name: point_pillar_fcooper
2 | root_dir: 'opv2v_data_dumping/train'
3 | validate_dir: 'opv2v_data_dumping/validate'
4 |
5 | yaml_parser: "load_point_pillar_params"
6 | train_params:
7 | batch_size: &batch_size 2
8 | epoches: 15
9 | eval_freq: 1
10 | save_freq: 1
11 | max_cav: &max_cav 5
12 |
13 | fusion:
14 | core_method: 'IntermediateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
15 | args: []
16 |
17 | # preprocess-related
18 | preprocess:
19 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
20 | core_method: 'SpVoxelPreprocessor'
21 | args:
22 | voxel_size: &voxel_size [0.4, 0.4, 4]
23 | max_points_per_voxel: 32
24 | max_voxel_train: 32000
25 | max_voxel_test: 70000
26 | # lidar range for each individual cav.
27 | cav_lidar_range: &cav_lidar [-140.8, -40, -3, 140.8, 40, 1]
28 |
29 | data_augment:
30 | - NAME: random_world_flip
31 | ALONG_AXIS_LIST: [ 'x' ]
32 |
33 | - NAME: random_world_rotation
34 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
35 |
36 | - NAME: random_world_scaling
37 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
38 |
39 | # anchor box related
40 | postprocess:
41 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
42 | anchor_args:
43 | cav_lidar_range: *cav_lidar
44 | l: 3.9
45 | w: 1.6
46 | h: 1.56
47 | r: [0, 90]
48 | feature_stride: 2
49 | num: &achor_num 2
50 | target_args:
51 | pos_threshold: 0.6
52 | neg_threshold: 0.45
53 | score_threshold: 0.20
54 | order: 'hwl' # hwl or lwh
55 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
56 | nms_thresh: 0.15
57 |
58 | # model related
59 | model:
60 | core_method: point_pillar_fcooper
61 | args:
62 | voxel_size: *voxel_size
63 | lidar_range: *cav_lidar
64 | anchor_number: *achor_num
65 | max_cav: *max_cav
66 | compression: 0 # compression rate
67 | backbone_fix: false
68 |
69 | pillar_vfe:
70 | use_norm: true
71 | with_distance: false
72 | use_absolute_xyz: true
73 | num_filters: [64]
74 | point_pillar_scatter:
75 | num_features: 64
76 |
77 | base_bev_backbone:
78 | layer_nums: [3, 5, 8]
79 | layer_strides: [2, 2, 2]
80 | num_filters: [64, 128, 256]
81 | upsample_strides: [1, 2, 4]
82 | num_upsample_filter: [128, 128, 128]
83 | shrink_header:
84 | kernal_size: [ 1 ]
85 | stride: [ 1 ]
86 | padding: [ 0 ]
87 | dim: [ 256 ]
88 | input_dim: 384 # 128 * 3
89 |
90 | loss:
91 | core_method: point_pillar_loss
92 | args:
93 | cls_weight: 1.0
94 | reg: 2.0
95 |
96 | optimizer:
97 | core_method: Adam
98 | lr: 0.001
99 | args:
100 | eps: 1e-10
101 | weight_decay: 1e-4
102 |
103 | lr_scheduler:
104 | core_method: multistep #step, multistep and Exponential support
105 | gamma: 0.1
106 | step_size: [10, 15]
107 |
108 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/point_pillar_intermediate_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: point_pillar_intermediate_fusion
2 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
3 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/validate"
4 |
5 | noise_setting:
6 | add_noise: false
7 |
8 | yaml_parser: "load_point_pillar_params"
9 | train_params:
10 | batch_size: &batch_size 2
11 | epoches: 15
12 | eval_freq: 1
13 | save_freq: 1
14 |
15 | fusion:
16 | core_method: 'IntermediateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
17 | args: []
18 |
19 | # preprocess-related
20 | preprocess:
21 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
22 | core_method: 'SpVoxelPreprocessor'
23 | args:
24 | voxel_size: &voxel_size [0.4, 0.4, 4]
25 | max_points_per_voxel: 32
26 | max_voxel_train: 32000
27 | max_voxel_test: 70000
28 | # lidar range for each individual cav.
29 | cav_lidar_range: &cav_lidar [-140.8, -40, -3, 140.8, 40, 1]
30 |
31 | data_augment:
32 | - NAME: random_world_flip
33 | ALONG_AXIS_LIST: [ 'x' ]
34 |
35 | - NAME: random_world_rotation
36 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
37 |
38 | - NAME: random_world_scaling
39 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
40 |
41 | # anchor box related
42 | postprocess:
43 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
44 | anchor_args:
45 | cav_lidar_range: *cav_lidar
46 | l: 3.9
47 | w: 1.6
48 | h: 1.56
49 | r: [0, 90]
50 | feature_stride: 2
51 | num: &achor_num 2
52 | target_args:
53 | pos_threshold: 0.6
54 | neg_threshold: 0.45
55 | score_threshold: 0.20
56 | order: 'hwl' # hwl or lwh
57 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
58 | nms_thresh: 0.15
59 |
60 | # model related
61 | model:
62 | core_method: point_pillar_intermediate
63 | args:
64 | voxel_size: *voxel_size
65 | lidar_range: *cav_lidar
66 | anchor_number: *achor_num
67 |
68 | pillar_vfe:
69 | use_norm: true
70 | with_distance: false
71 | use_absolute_xyz: true
72 | num_filters: [64]
73 | point_pillar_scatter:
74 | num_features: 64
75 |
76 | base_bev_backbone:
77 | layer_nums: [3, 5, 8]
78 | layer_strides: [2, 2, 2]
79 | num_filters: [64, 128, 256]
80 | upsample_strides: [1, 2, 4]
81 | num_upsample_filter: [128, 128, 128]
82 | compression: 0
83 |
84 | anchor_num: *achor_num
85 |
86 | loss:
87 | core_method: point_pillar_loss
88 | args:
89 | cls_weight: 1.0
90 | reg: 2.0
91 |
92 | optimizer:
93 | core_method: Adam
94 | lr: 0.002
95 | args:
96 | eps: 1e-10
97 | weight_decay: 1e-4
98 |
99 | lr_scheduler:
100 | core_method: multistep #step, multistep and Exponential support
101 | gamma: 0.1
102 | step_size: [10, 15]
103 |
104 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/point_pillar_late_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: point_pillar_late_fusion
2 | root_dir: "opv2v_data_dumping/train"
3 | validate_dir: "opv2v_data_dumping/validate"
4 |
5 | yaml_parser: "load_point_pillar_params"
6 | train_params:
7 | batch_size: &batch_size 2
8 | epoches: 15
9 | eval_freq: 1
10 | save_freq: 1
11 |
12 | fusion:
13 | core_method: 'LateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
14 | args: []
15 |
16 | # preprocess-related
17 | preprocess:
18 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
19 | core_method: 'SpVoxelPreprocessor'
20 | args:
21 | voxel_size: &voxel_size [0.4, 0.4, 4]
22 | max_points_per_voxel: 32
23 | max_voxel_train: 16000
24 | max_voxel_test: 40000
25 | # lidar range for each individual cav.
26 | cav_lidar_range: &cav_lidar [-70.4, -40, -3, 70.4, 40, 1]
27 |
28 | data_augment:
29 | - NAME: random_world_flip
30 | ALONG_AXIS_LIST: [ 'x' ]
31 |
32 | - NAME: random_world_rotation
33 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
34 |
35 | - NAME: random_world_scaling
36 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
37 |
38 | # anchor box related
39 | postprocess:
40 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
41 | anchor_args:
42 | cav_lidar_range: *cav_lidar
43 | l: 3.9
44 | w: 1.6
45 | h: 1.56
46 | r: [0, 90]
47 | num: &achor_num 2
48 | target_args:
49 | pos_threshold: 0.6
50 | neg_threshold: 0.45
51 | score_threshold: 0.20
52 | order: 'hwl' # hwl or lwh
53 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
54 | nms_thresh: 0.15
55 |
56 | # model related
57 | model:
58 | core_method: point_pillar
59 | args:
60 | voxel_size: *voxel_size
61 | lidar_range: *cav_lidar
62 | anchor_number: *achor_num
63 | pillar_vfe:
64 | use_norm: true
65 | with_distance: false
66 | use_absolute_xyz: true
67 | num_filters: [64]
68 | point_pillar_scatter:
69 | num_features: 64
70 |
71 | base_bev_backbone:
72 | layer_nums: [3, 5, 8]
73 | layer_strides: [2, 2, 2]
74 | num_filters: [64, 128, 256]
75 | upsample_strides: [1, 2, 4]
76 | num_upsample_filter: [128, 128, 128]
77 |
78 | anchor_num: *achor_num
79 |
80 | loss:
81 | core_method: point_pillar_loss
82 | args:
83 | cls_weight: 1.0
84 | reg: 2.0
85 |
86 | optimizer:
87 | core_method: Adam
88 | lr: 0.002
89 | args:
90 | eps: 1e-10
91 | weight_decay: 1e-4
92 |
93 | lr_scheduler:
94 | core_method: multistep #step, multistep and Exponential support
95 | gamma: 0.1
96 | step_size: [10, 15]
97 |
98 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/second_early_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: second_early_fusion
2 | root_dir: "opv2v_data_dumping/train"
3 | validate_dir: "opv2v_data_dumping/validate"
4 |
5 | yaml_parser: "load_second_params"
6 | train_params:
7 | batch_size: &batch_size 2
8 | epoches: 30
9 | eval_freq: 1
10 | save_freq: 1
11 |
12 | fusion:
13 | core_method: 'EarlyFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
14 | args: []
15 |
16 | # preprocess-related
17 | preprocess:
18 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
19 | core_method: 'SpVoxelPreprocessor'
20 | args:
21 | voxel_size: &voxel_size [0.1, 0.1, 0.1]
22 | max_points_per_voxel: 5
23 | max_voxel_train: 32000
24 | max_voxel_test: 70000
25 | # lidar range for each individual cav.
26 | cav_lidar_range: &cav_lidar [-140.8, -40, -3, 140.8, 40, 1]
27 |
28 | data_augment:
29 | - NAME: random_world_flip
30 | ALONG_AXIS_LIST: [ 'x' ]
31 |
32 | - NAME: random_world_rotation
33 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
34 |
35 | - NAME: random_world_scaling
36 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
37 |
38 | # anchor box related
39 | postprocess:
40 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
41 | anchor_args:
42 | cav_lidar_range: *cav_lidar
43 | l: 3.9
44 | w: 1.6
45 | h: 1.56
46 | r: [0, 90]
47 | feature_stride: 8
48 | num: &achor_num 2
49 | target_args:
50 | pos_threshold: 0.6
51 | neg_threshold: 0.45
52 | score_threshold: 0.20
53 | order: 'hwl' # hwl or lwh
54 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
55 | nms_thresh: 0.15
56 |
57 | # model related
58 | model:
59 | core_method: second
60 | args:
61 | batch_size: *batch_size
62 | voxel_size: *voxel_size
63 | lidar_range: *cav_lidar
64 | anchor_number: *achor_num
65 | mean_vfe:
66 | feature_points: 4
67 |
68 | backbone_3d: []
69 |
70 | height_compression:
71 | feature_num: 256
72 |
73 | base_bev_backbone:
74 | layer_nums: [5, 5]
75 | layer_strides: [1, 2]
76 | num_filters: [128, 256]
77 | upsample_strides: [1, 2]
78 | num_upsample_filter: [256, 256]
79 |
80 | anchor_num: *achor_num
81 |
82 | loss:
83 | core_method: point_pillar_loss
84 | args:
85 | cls_weight: 1.0
86 | reg: 2.0
87 |
88 | optimizer:
89 | core_method: Adam
90 | lr: 0.002
91 | args:
92 | eps: 1e-10
93 | weight_decay: 1e-4
94 |
95 | lr_scheduler:
96 | core_method: multistep #step, multistep and Exponential support
97 | gamma: 0.1
98 | step_size: [15, 30]
99 |
100 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/second_intermediate_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: second_intermediate_fusion
2 | root_dir: "opv2v_data_dumping/train"
3 | validate_dir: "opv2v_data_dumping/validate"
4 |
5 | yaml_parser: "load_second_params"
6 | train_params:
7 | batch_size: &batch_size 2
8 | epoches: 60
9 | eval_freq: 1
10 | save_freq: 1
11 |
12 | fusion:
13 | core_method: 'IntermediateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
14 | args: []
15 |
16 | # preprocess-related
17 | preprocess:
18 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
19 | core_method: 'SpVoxelPreprocessor'
20 | args:
21 | voxel_size: &voxel_size [0.1, 0.1, 0.1]
22 | max_points_per_voxel: 5
23 | max_voxel_train: 36000
24 | max_voxel_test: 70000
25 | # lidar range for each individual cav.
26 | cav_lidar_range: &cav_lidar [-140.8, -40, -3, 140.8, 40, 1]
27 |
28 | data_augment:
29 | - NAME: random_world_flip
30 | ALONG_AXIS_LIST: [ 'x' ]
31 |
32 | - NAME: random_world_rotation
33 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
34 |
35 | - NAME: random_world_scaling
36 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
37 |
38 | # anchor box related
39 | postprocess:
40 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
41 | anchor_args:
42 | cav_lidar_range: *cav_lidar
43 | l: 3.9
44 | w: 1.6
45 | h: 1.56
46 | r: [0, 90]
47 | feature_stride: 8
48 | num: &achor_num 2
49 | target_args:
50 | pos_threshold: 0.6
51 | neg_threshold: 0.45
52 | score_threshold: 0.20
53 | order: 'hwl' # hwl or lwh
54 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
55 | nms_thresh: 0.15
56 |
57 | # model related
58 | model:
59 | core_method: second_intermediate
60 | args:
61 | batch_size: *batch_size
62 | voxel_size: *voxel_size
63 | lidar_range: *cav_lidar
64 | anchor_number: *achor_num
65 | mean_vfe:
66 | feature_points: 4
67 |
68 | backbone_3d: []
69 |
70 | height_compression:
71 | feature_num: 256
72 |
73 | base_bev_backbone:
74 | layer_nums: [5, 5]
75 | layer_strides: [1, 2]
76 | num_filters: [128, 256]
77 | upsample_strides: [1, 2]
78 | num_upsample_filter: [256, 256]
79 |
80 | anchor_num: *achor_num
81 |
82 | loss:
83 | core_method: point_pillar_loss
84 | args:
85 | cls_weight: 1.0
86 | reg: 2.0
87 |
88 | optimizer:
89 | core_method: Adam
90 | lr: 0.002
91 | args:
92 | eps: 1e-10
93 | weight_decay: 1e-4
94 |
95 | lr_scheduler:
96 | core_method: multistep #step, multistep and Exponential support
97 | gamma: 0.1
98 | step_size: [15, 30]
99 |
100 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/second_late_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: second_late_fusion_low_res
2 | root_dir: "opv2v_data_dumping/train"
3 | validate_dir: "opv2v_data_dumping/validate"
4 |
5 | yaml_parser: "load_second_params"
6 | train_params:
7 | batch_size: &batch_size 2
8 | epoches: 40
9 | eval_freq: 1
10 | save_freq: 1
11 |
12 | fusion:
13 | core_method: 'LateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
14 | args: []
15 |
16 | # preprocess-related
17 | preprocess:
18 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
19 | core_method: 'SpVoxelPreprocessor'
20 | args:
21 | voxel_size: &voxel_size [0.1, 0.1, 0.1]
22 | max_points_per_voxel: 5
23 | max_voxel_train: 16000
24 | max_voxel_test: 40000
25 | # lidar range for each individual cav.
26 | cav_lidar_range: &cav_lidar [-70.4, -40, -3, 70.4, 40, 1]
27 |
28 | data_augment:
29 | - NAME: random_world_flip
30 | ALONG_AXIS_LIST: [ 'x' ]
31 |
32 | - NAME: random_world_rotation
33 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
34 |
35 | - NAME: random_world_scaling
36 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
37 |
38 | # anchor box related
39 | postprocess:
40 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
41 | anchor_args:
42 | cav_lidar_range: *cav_lidar
43 | l: 3.9
44 | w: 1.6
45 | h: 1.56
46 | r: [0, 90]
47 | feature_stride: 8
48 | num: &achor_num 2
49 | target_args:
50 | pos_threshold: 0.6
51 | neg_threshold: 0.45
52 | score_threshold: 0.20
53 | order: 'hwl' # hwl or lwh
54 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
55 | nms_thresh: 0.15
56 |
57 | # model related
58 | model:
59 | core_method: second
60 | args:
61 | batch_size: *batch_size
62 | voxel_size: *voxel_size
63 | lidar_range: *cav_lidar
64 | anchor_number: *achor_num
65 | mean_vfe:
66 | feature_points: 4
67 |
68 | backbone_3d: []
69 |
70 | height_compression:
71 | feature_num: 256
72 |
73 | base_bev_backbone:
74 | layer_nums: [5, 5]
75 | layer_strides: [1, 2]
76 | num_filters: [128, 256]
77 | upsample_strides: [1, 2]
78 | num_upsample_filter: [256, 256]
79 |
80 | anchor_num: *achor_num
81 |
82 | loss:
83 | core_method: point_pillar_loss
84 | args:
85 | cls_weight: 1.0
86 | reg: 2.0
87 |
88 | optimizer:
89 | core_method: Adam
90 | lr: 0.002
91 | args:
92 | eps: 1e-10
93 | weight_decay: 1e-4
94 |
95 | lr_scheduler:
96 | core_method: multistep #step, multistep and Exponential support
97 | gamma: 0.1
98 | step_size: [15, 30]
99 |
100 |
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/visualization_dair.yaml:
--------------------------------------------------------------------------------
1 | # this yaml is only for visualization
2 | name: visualization
3 |
4 | yaml_parser: "load_voxel_params"
5 | data_dir: "/GPFS/rhome/quanhaoli/workspace/dataset/my_dair_v2x/v2x_c/cooperative-vehicle-infrastructure"
6 | root_dir: "/GPFS/rhome/quanhaoli/workspace/dataset/my_dair_v2x/v2x_c/cooperative-vehicle-infrastructure/train.json"
7 | validate_dir: "/GPFS/rhome/quanhaoli/workspace/dataset/my_dair_v2x/v2x_c/cooperative-vehicle-infrastructure/val.json"
8 | test_dir: "/GPFS/rhome/quanhaoli/workspace/dataset/my_dair_v2x/v2x_c/cooperative-vehicle-infrastructure/val.json"
9 |
10 | noise_setting:
11 | add_noise: false
12 |
13 | train_params:
14 | batch_size: &batch_size 4
15 | epoches: 100
16 | eval_freq: 1
17 | save_freq: 1
18 |
19 | only_vis_ego: False
20 | comm_range: 200
21 |
22 | fusion:
23 | core_method: 'LateFusionDatasetDAIR' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
24 | args:
25 | proj_first: false
26 | clip_pc: false
27 |
28 | # preprocess-related
29 | preprocess:
30 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
31 | core_method: 'SpVoxelPreprocessor'
32 | args:
33 | voxel_size: &voxel_size [0.4, 0.4, 4]
34 | max_points_per_voxel: 32
35 | max_voxel_train: 32000
36 | max_voxel_test: 70000
37 | # lidar range for each individual cav. Format: xyzxyz minmax
38 | cav_lidar_range: &cav_lidar [-90.8, -40, -3, 90.8, 40, 1]
39 |
40 | data_augment:
41 | - NAME: random_world_flip
42 | ALONG_AXIS_LIST: [ 'x' ]
43 |
44 | - NAME: random_world_rotation
45 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
46 |
47 | - NAME: random_world_scaling
48 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
49 |
50 | # anchor box related
51 | postprocess:
52 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
53 | gt_range: [-90, -40, -3, 90, 40, 1]
54 | anchor_args:
55 | cav_lidar_range: *cav_lidar
56 | l: 3.9
57 | w: 1.6
58 | h: 1.56
59 | r: [0, 90]
60 | num: &achor_num 2
61 | target_args:
62 | pos_threshold: 0.6
63 | neg_threshold: 0.45
64 | score_threshold: 0.20
65 | order: 'hwl' # hwl or lwh
66 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
67 | nms_thresh: 0.15
68 |
69 | # model related
70 | model:
71 | core_method: point_pillar
72 | args:
73 | voxel_size: *voxel_size
74 | lidar_range: *cav_lidar
75 | anchor_number: *achor_num
76 | pillar_vfe:
77 | use_norm: true
78 | with_distance: false
79 | use_absolute_xyz: true
80 | num_filters: [64]
81 | point_pillar_scatter:
82 | num_features: 64
83 |
84 | base_bev_backbone:
85 | layer_nums: [3, 5, 8]
86 | layer_strides: [2, 2, 2]
87 | num_filters: [64, 128, 256]
88 | upsample_strides: [1, 2, 4]
89 | num_upsample_filter: [128, 128, 128]
90 |
91 | anchor_num: *achor_num
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/visualization_opv2v.yaml:
--------------------------------------------------------------------------------
1 | # this yaml is only for visualization
2 | name: visualization
3 |
4 | yaml_parser: "load_voxel_params"
5 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
6 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/test"
7 |
8 | noise_setting:
9 | add_noise: false
10 |
11 | train_params:
12 | batch_size: &batch_size 4
13 | epoches: 100
14 | eval_freq: 1
15 | save_freq: 1
16 |
17 | only_vis_ego: False
18 | comm_range: 70
19 |
20 | fusion:
21 | core_method: 'LateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
22 | args:
23 | proj_first: false
24 |
25 | # preprocess-related
26 | preprocess:
27 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
28 | core_method: 'SpVoxelPreprocessor'
29 | args:
30 | voxel_size: &voxel_size [0.4, 0.4, 4]
31 | max_points_per_voxel: 32
32 | max_voxel_train: 32000
33 | max_voxel_test: 70000
34 | # lidar range for each individual cav.
35 | cav_lidar_range: &cav_lidar [-140.8, -40, -3, 140.8, 40, 1]
36 |
37 | data_augment:
38 | - NAME: random_world_flip
39 | ALONG_AXIS_LIST: [ 'x' ]
40 |
41 | - NAME: random_world_rotation
42 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
43 |
44 | - NAME: random_world_scaling
45 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
46 |
47 | # anchor box related
48 | postprocess:
49 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
50 | gt_range: [-140, -40, -3, 140, 40, 1]
51 | anchor_args:
52 | cav_lidar_range: *cav_lidar
53 | l: 3.9
54 | w: 1.6
55 | h: 1.56
56 | r: [0, 90]
57 | num: &achor_num 2
58 | target_args:
59 | pos_threshold: 0.6
60 | neg_threshold: 0.45
61 | score_threshold: 0.20
62 | order: 'hwl' # hwl or lwh
63 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
64 | nms_thresh: 0.15
65 |
66 | # model related
67 | model:
68 | core_method: point_pillar
69 | args:
70 | voxel_size: *voxel_size
71 | lidar_range: *cav_lidar
72 | anchor_number: *achor_num
73 | pillar_vfe:
74 | use_norm: true
75 | with_distance: false
76 | use_absolute_xyz: true
77 | num_filters: [64]
78 | point_pillar_scatter:
79 | num_features: 64
80 |
81 | base_bev_backbone:
82 | layer_nums: [3, 5, 8]
83 | layer_strides: [2, 2, 2]
84 | num_filters: [64, 128, 256]
85 | upsample_strides: [1, 2, 4]
86 | num_upsample_filter: [128, 128, 128]
87 |
88 | anchor_num: *achor_num
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/visualization_v2x.yaml:
--------------------------------------------------------------------------------
1 | # this yaml is only for visualization
2 | name: visualization
3 |
4 | yaml_parser: "load_voxel_params"
5 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/v2xsim2_info/v2xsim_infos_train.pkl"
6 | # validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/v2xsim_info/v2xsim_infos_train.pkl"
7 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/v2xsim2_info/v2xsim_infos_train.pkl"
8 |
9 | train_params:
10 | batch_size: &batch_size 4
11 | epoches: 100
12 | eval_freq: 1
13 | save_freq: 1
14 |
15 | only_vis_ego: False
16 | comm_range: 50
17 |
18 | fusion:
19 | core_method: 'LateFusionDatasetV2X' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
20 | args:
21 | proj_first: True
22 |
23 | # preprocess-related
24 | preprocess:
25 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
26 | core_method: 'SpVoxelPreprocessor'
27 | args:
28 | voxel_size: &voxel_size [0.4, 0.4, 0.4]
29 | max_points_per_voxel: &T 32
30 | max_voxel_train: 36000
31 | max_voxel_test: 70000
32 | # lidar range for each individual cav.
33 | cav_lidar_range: &cav_lidar [-32, -32, -3, 32, 32, 1]
34 |
35 | data_augment:
36 | - NAME: random_world_flip
37 | ALONG_AXIS_LIST: [ 'x' ]
38 |
39 | - NAME: random_world_rotation
40 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
41 |
42 | - NAME: random_world_scaling
43 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
44 |
45 | # anchor box related
46 | postprocess:
47 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
48 | gt_range: [-32,-32,-3,32,32,1]
49 | anchor_args:
50 | cav_lidar_range: *cav_lidar
51 | l: 3.9
52 | w: 1.6
53 | h: 1.56
54 | r: [0, 90]
55 | num: &achor_num 2
56 | target_args:
57 | pos_threshold: 0.6
58 | neg_threshold: 0.45
59 | score_threshold: 0.96
60 | order: 'hwl' # hwl or lwh
61 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
62 | nms_thresh: 0.15
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/voxelnet_early_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: voxelnet_early_fusion
2 |
3 | yaml_parser: "load_voxel_params"
4 | root_dir: "opv2v_data_dumping/train"
5 | validate_dir: "opv2v_data_dumping/validate"
6 |
7 | train_params:
8 | batch_size: &batch_size 4
9 | epoches: 100
10 | eval_freq: 1
11 | save_freq: 1
12 |
13 | fusion:
14 | core_method: 'EarlyFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
15 | args: []
16 |
17 | # preprocess-related
18 | preprocess:
19 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
20 | core_method: 'SpVoxelPreprocessor'
21 | args:
22 | voxel_size: &voxel_size [0.4, 0.4, 0.4]
23 | max_points_per_voxel: &T 32
24 | max_voxel_train: 36000
25 | max_voxel_test: 70000
26 | # lidar range for each individual cav.
27 | cav_lidar_range: &cav_lidar [-140.8, -40, -3, 140.8, 40, 1]
28 |
29 | data_augment:
30 | - NAME: random_world_flip
31 | ALONG_AXIS_LIST: [ 'x' ]
32 |
33 | - NAME: random_world_rotation
34 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
35 |
36 | - NAME: random_world_scaling
37 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
38 |
39 | # anchor box related
40 | postprocess:
41 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
42 | anchor_args:
43 | cav_lidar_range: *cav_lidar
44 | l: 3.9
45 | w: 1.6
46 | h: 1.56
47 | r: [0, 90]
48 | num: &achor_num 2
49 | target_args:
50 | pos_threshold: 0.6
51 | neg_threshold: 0.45
52 | score_threshold: 0.96
53 | order: 'hwl' # hwl or lwh
54 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
55 | nms_thresh: 0.20
56 |
57 | # model related
58 | model:
59 | core_method: voxel_net # corresponding to VoxelNet
60 | args:
61 | N: *batch_size
62 | T: *T
63 | lidar_range: *cav_lidar
64 | voxel_size: *voxel_size
65 | anchor_num: *achor_num
66 | pillar_vfe:
67 | use_norm: true
68 | with_distance: false
69 | use_absolute_xyz: true
70 | num_filters: [ 64 ]
71 |
72 | loss:
73 | core_method: point_pillar_loss
74 | args:
75 | cls_weight: 1.0
76 | reg: 2.0
77 |
78 | optimizer:
79 | core_method: Adam
80 | lr: 0.002
81 | args:
82 | eps: 1e-10
83 | weight_decay: 1e-4
84 |
85 | lr_scheduler:
86 | core_method: multistep #step, multistep and Exponential support
87 | gamma: 0.1
88 | step_size: [15, 30]
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/voxelnet_intermediate_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: voxelnet_intermediate_fusion
2 |
3 | yaml_parser: "load_voxel_params"
4 | root_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/train"
5 | validate_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/validate"
6 | test_dir: "/GPFS/rhome/yifanlu/workspace/dataset/OPV2V/test"
7 |
8 | comm_range: 70
9 |
10 | train_params:
11 | batch_size: &batch_size 2
12 | epoches: 30
13 | eval_freq: 1
14 | save_freq: 1
15 |
16 | fusion:
17 | core_method: 'IntermediateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
18 | args: []
19 |
20 | # preprocess-related
21 | preprocess:
22 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
23 | core_method: 'SpVoxelPreprocessor'
24 | args:
25 | voxel_size: &voxel_size [0.4, 0.4, 0.4]
26 | max_points_per_voxel: &T 32
27 | max_voxel_train: 36000
28 | max_voxel_test: 70000
29 | # lidar range for each individual cav.
30 | cav_lidar_range: &cav_lidar [-140.8, -41.6, -3, 140.8, 41.6, 1]
31 |
32 | data_augment:
33 | - NAME: random_world_flip
34 | ALONG_AXIS_LIST: [ 'x' ]
35 |
36 | - NAME: random_world_rotation
37 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
38 |
39 | - NAME: random_world_scaling
40 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
41 |
42 | # anchor box related
43 | postprocess:
44 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
45 | anchor_args:
46 | cav_lidar_range: *cav_lidar
47 | l: 3.9
48 | w: 1.6
49 | h: 1.56
50 | r: [0, 90]
51 | num: &achor_num 2
52 | target_args:
53 | pos_threshold: 0.6
54 | neg_threshold: 0.45
55 | score_threshold: 0.25
56 | order: 'hwl' # hwl or lwh
57 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
58 | nms_thresh: 0.15
59 |
60 | # model related
61 | model:
62 | core_method: voxel_net_intermediate # corresponding to VoxelNet
63 | args:
64 | N: *batch_size
65 | T: *T
66 | lidar_range: *cav_lidar
67 | voxel_size: *voxel_size
68 | anchor_num: *achor_num
69 | compression: 0
70 | pillar_vfe:
71 | use_norm: true
72 | with_distance: false
73 | use_absolute_xyz: true
74 | num_filters: [ 64 ]
75 |
76 | loss:
77 | core_method: point_pillar_loss
78 | args:
79 | cls_weight: 1.0
80 | reg: 2.0
81 |
82 | optimizer:
83 | core_method: Adam
84 | lr: 0.002
85 | args:
86 | eps: 1e-10
87 | weight_decay: 1e-4
88 |
89 | lr_scheduler:
90 | core_method: multistep #step, multistep and Exponential support
91 | gamma: 0.1
92 | step_size: [15, 30]
--------------------------------------------------------------------------------
/opencood/hypes_yaml/backup/voxelnet_late_fusion.yaml:
--------------------------------------------------------------------------------
1 | name: voxelnet_late_fusion
2 |
3 | yaml_parser: "load_voxel_params"
4 | root_dir: "opv2v_data_dumping/train"
5 | validate_dir: "opv2v_data_dumping/validate"
6 |
7 | train_params:
8 | batch_size: &batch_size 4
9 | epoches: 30
10 | eval_freq: 1
11 | save_freq: 1
12 |
13 | fusion:
14 | core_method: 'LateFusionDataset' # LateFusionDataset, EarlyFusionDataset, IntermediateFusionDataset supported
15 | args: []
16 |
17 | # preprocess-related
18 | preprocess:
19 | # options: BasePreprocessor, VoxelPreprocessor, BevPreprocessor
20 | core_method: 'SpVoxelPreprocessor'
21 | args:
22 | voxel_size: &voxel_size [0.4, 0.4, 0.4]
23 | max_points_per_voxel: &T 32
24 | max_voxel_train: 16000
25 | max_voxel_test: 40000
26 | # lidar range for each individual cav.
27 | cav_lidar_range: &cav_lidar [-70.4, -40, -3, 70.4, 40, 1]
28 |
29 | data_augment:
30 | - NAME: random_world_flip
31 | ALONG_AXIS_LIST: [ 'x' ]
32 |
33 | - NAME: random_world_rotation
34 | WORLD_ROT_ANGLE: [ -0.78539816, 0.78539816 ]
35 |
36 | - NAME: random_world_scaling
37 | WORLD_SCALE_RANGE: [ 0.95, 1.05 ]
38 |
39 | # anchor box related
40 | postprocess:
41 | core_method: 'VoxelPostprocessor' # VoxelPostprocessor, BevPostprocessor supported
42 | anchor_args:
43 | cav_lidar_range: *cav_lidar
44 | l: 3.9
45 | w: 1.6
46 | h: 1.56
47 | r: [0, 90]
48 | num: &achor_num 2
49 | target_args:
50 | pos_threshold: 0.6
51 | neg_threshold: 0.45
52 | score_threshold: 0.25
53 | order: 'hwl' # hwl or lwh
54 | max_num: 100 # maximum number of objects in a single frame. use this number to make sure different frames has the same dimension in the same batch
55 | nms_thresh: 0.05
56 |
57 | # model related
58 | model:
59 | core_method: voxel_net # corresponding to VoxelNet
60 | args:
61 | N: *batch_size
62 | T: *T
63 | lidar_range: *cav_lidar
64 | voxel_size: *voxel_size
65 | anchor_num: *achor_num
66 | pillar_vfe:
67 | use_norm: true
68 | with_distance: false
69 | use_absolute_xyz: true
70 | num_filters: [ 64 ]
71 |
72 | loss:
73 | core_method: point_pillar_loss
74 | args:
75 | cls_weight: 1.0
76 | reg: 2.0
77 |
78 | optimizer:
79 | core_method: Adam
80 | lr: 0.002
81 | args:
82 | eps: 1e-10
83 | weight_decay: 1e-4
84 |
85 | lr_scheduler:
86 | core_method: multistep #step, multistep and Exponential support
87 | gamma: 0.1
88 | step_size: [15, 30]
--------------------------------------------------------------------------------
/opencood/hypes_yaml/opv2v/lidar_only/readme.md:
--------------------------------------------------------------------------------
1 | # To train fpv-rcnn & fvoxel-rcnn
2 |
3 | To train FPV-RCNN efficiently, it is recommended to first train first stage and then train the whole network.
4 | An example training schedule could be:
5 |
6 | 1. Train stage1 for 20 epochs: use fpvrcnn.yaml as reference, make sure that stage2 is inactive
7 |
8 | ```yaml
9 | model:
10 | args:
11 | activate_stage2: False
12 | ```
13 |
14 | 2. Train stage1 and stage2 **for another 20 epochs**: set ```activate_stage2``` to ```True``` and ```epoches``` to ```40```, resume parameters from step 1 and train futher.
15 |
16 |
17 | Note that fvoxel-rcnn stage2 seems only accept batchsize to be 1.
--------------------------------------------------------------------------------
/opencood/hypes_yaml/v2xset/lidar_only/readme.md:
--------------------------------------------------------------------------------
1 | # To train fpv-rcnn & fvoxel-rcnn
2 |
3 | To train FPV-RCNN efficiently, it is recommended to first train first stage and then train the whole network.
4 | An example training schedule could be:
5 |
6 | 1. Train stage1 for 20 epochs: use fpvrcnn.yaml as reference, make sure that stage2 is inactive
7 |
8 | ```yaml
9 | model:
10 | args:
11 | activate_stage2: False
12 | ```
13 |
14 | 2. Train stage1 and stage2 **for another 20 epochs**: set ```activate_stage2``` to ```True``` and ```epoches``` to ```40```, resume parameters from step 1 and train futher.
15 |
16 |
17 | Note that fvoxel-rcnn stage2 seems only accept batchsize to be 1.
--------------------------------------------------------------------------------
/opencood/loss/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/loss/__init__.py
--------------------------------------------------------------------------------
/opencood/loss/heterception_loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import numpy as np
4 | from opencood.loss.point_pillar_loss import PointPillarLoss
5 | from icecream import ic
6 |
7 | class HeterceptionLoss(PointPillarLoss):
8 | def __init__(self, args):
9 | super(HeterceptionLoss, self).__init__(args)
10 | self.ssl = args['ssl']
11 | self.l2_loss = nn.MSELoss()
12 |
13 | def forward(self, output_dict, target_dict, suffix=""):
14 | """
15 | Parameters
16 | ----------
17 | output_dict : dict
18 | target_dict : dict
19 | """
20 | total_loss = super().forward(output_dict, target_dict, suffix)
21 |
22 | ###### self-supervise learning loss ######
23 | if f'poi_feature_pred{suffix}' in output_dict and output_dict[f'poi_feature_pred{suffix}'].shape[0]!=0:
24 | ssl_loss = self.l2_loss(output_dict[f'poi_feature_pred{suffix}'],
25 | output_dict[f'poi_feature_gt{suffix}'],)
26 | ssl_loss *= self.ssl['weight']
27 | total_loss += ssl_loss
28 | self.loss_dict.update({'total_loss': total_loss.item(),
29 | 'ssl_loss': ssl_loss.item()})
30 | return total_loss
31 |
32 | def logging(self, epoch, batch_id, batch_len, writer = None, suffix=""):
33 | """
34 | Print out the loss function for current iteration.
35 |
36 | Parameters
37 | ----------
38 | epoch : int
39 | Current epoch for training.
40 | batch_id : int
41 | The current batch.
42 | batch_len : int
43 | Total batch length in one iteration of training,
44 | writer : SummaryWriter
45 | Used to visualize on tensorboard
46 | """
47 | total_loss = self.loss_dict.get('total_loss', 0)
48 | reg_loss = self.loss_dict.get('reg_loss', 0)
49 | cls_loss = self.loss_dict.get('cls_loss', 0)
50 | dir_loss = self.loss_dict.get('dir_loss', 0)
51 | iou_loss = self.loss_dict.get('iou_loss', 0)
52 | ssl_loss = self.loss_dict.get('ssl_loss', 0)
53 |
54 |
55 | print("[epoch %d][%d/%d]%s || Loss: %.4f || Conf Loss: %.4f"
56 | " || Loc Loss: %.4f || Dir Loss: %.4f || IoU Loss: %.4f || SSL Loss: %.4f" % (
57 | epoch, batch_id + 1, batch_len, suffix,
58 | total_loss, cls_loss, reg_loss, dir_loss, iou_loss, ssl_loss))
59 |
60 | if not writer is None:
61 | writer.add_scalar('Regression_loss', reg_loss,
62 | epoch*batch_len + batch_id)
63 | writer.add_scalar('Confidence_loss', cls_loss,
64 | epoch*batch_len + batch_id)
65 | writer.add_scalar('Dir_loss', dir_loss,
66 | epoch*batch_len + batch_id)
67 | writer.add_scalar('Iou_loss', iou_loss,
68 | epoch*batch_len + batch_id)
69 | writer.add_scalar('SSL_loss', ssl_loss,
70 | epoch*batch_len + batch_id)
--------------------------------------------------------------------------------
/opencood/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/models/__init__.py
--------------------------------------------------------------------------------
/opencood/models/ciassd.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import numpy as np
4 |
5 | from opencood.models.sub_modules.mean_vfe import MeanVFE
6 | from opencood.models.sub_modules.sparse_backbone_3d import VoxelBackBone8x
7 | from opencood.models.sub_modules.height_compression import HeightCompression
8 | from opencood.models.sub_modules.cia_ssd_utils import SSFA, Head
9 |
10 |
11 | class CIASSD(nn.Module):
12 | def __init__(self, args):
13 | super(CIASSD, self).__init__()
14 | lidar_range = np.array(args['lidar_range'])
15 | grid_size = np.round((lidar_range[3:6] - lidar_range[:3]) /
16 | np.array(args['voxel_size'])).astype(np.int64)
17 | self.vfe = MeanVFE(args['mean_vfe'], args['mean_vfe']['num_point_features'])
18 | self.spconv_block = VoxelBackBone8x(args['spconv'],
19 | input_channels=args['spconv']['num_features_in'],
20 | grid_size=grid_size)
21 | self.map_to_bev = HeightCompression(args['map2bev'])
22 | self.ssfa = SSFA(args['ssfa'])
23 | self.head = Head(**args['head'])
24 |
25 | def forward(self, batch_dict):
26 | voxel_features = batch_dict['processed_lidar']['voxel_features']
27 | voxel_coords = batch_dict['processed_lidar']['voxel_coords']
28 | voxel_num_points = batch_dict['processed_lidar']['voxel_num_points']
29 |
30 | # save memory
31 | batch_dict.pop('processed_lidar')
32 | batch_dict.update({'voxel_features': voxel_features,
33 | 'voxel_coords': voxel_coords,
34 | 'voxel_num_points': voxel_num_points})
35 |
36 | batch_dict['batch_size'] = batch_dict['object_bbx_center'].shape[0]
37 |
38 | batch_dict = self.vfe(batch_dict)
39 | batch_dict = self.spconv_block(batch_dict)
40 | batch_dict = self.map_to_bev(batch_dict)
41 | out = self.ssfa(batch_dict['spatial_features'])
42 | out = self.head(out)
43 | batch_dict['preds_dict_stage1'] = out
44 |
45 | return batch_dict
46 |
47 |
48 |
49 | if __name__=="__main__":
50 | model = SSFA(None)
51 | print(model)
--------------------------------------------------------------------------------
/opencood/models/da_modules/gsl.py:
--------------------------------------------------------------------------------
1 | """
2 | https://github.com/DerrickXuNu/MPDA/blob/9879d4b615/opencood/models/da_modules/gradient_layer.py
3 | """
4 |
5 | import torch
6 |
7 |
8 | class _GradientScalarLayer(torch.autograd.Function):
9 | @staticmethod
10 | def forward(ctx, input, weight):
11 | ctx.weight = weight
12 | return input.view_as(input)
13 |
14 | @staticmethod
15 | def backward(ctx, grad_output):
16 | grad_input = grad_output.clone()
17 | return ctx.weight * grad_input, None
18 |
19 |
20 | gradient_scalar = _GradientScalarLayer.apply
21 |
22 |
23 | class GradientScalarLayer(torch.nn.Module):
24 | def __init__(self, weight):
25 | super(GradientScalarLayer, self).__init__()
26 | self.weight = weight
27 |
28 | def forward(self, input):
29 | return gradient_scalar(input, self.weight)
30 |
31 | def __repr__(self):
32 | tmpstr = self.__class__.__name__ + "("
33 | tmpstr += "weight=" + str(self.weight)
34 | tmpstr += ")"
35 | return tmpstr
--------------------------------------------------------------------------------
/opencood/models/fuse_modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/models/fuse_modules/__init__.py
--------------------------------------------------------------------------------
/opencood/models/fuse_modules/f_cooper_fuse.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | """
7 | Implementation of F-cooper maxout fusing.
8 | """
9 | import torch
10 | import torch.nn as nn
11 |
12 |
13 | class SpatialFusion(nn.Module):
14 | def __init__(self):
15 | super(SpatialFusion, self).__init__()
16 |
17 | def regroup(self, x, record_len):
18 | cum_sum_len = torch.cumsum(record_len, dim=0)
19 | split_x = torch.tensor_split(x, cum_sum_len[:-1].cpu())
20 | return split_x
21 |
22 | def forward(self, x, record_len):
23 | # x: B, C, H, W, split x:[(B1, C, W, H), (B2, C, W, H)]
24 | split_x = self.regroup(x, record_len)
25 | out = []
26 |
27 | for xx in split_x:
28 | xx = torch.max(xx, dim=0, keepdim=True)[0]
29 | out.append(xx)
30 | return torch.cat(out, dim=0)
--------------------------------------------------------------------------------
/opencood/models/fuse_modules/fuse_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import torch
7 | import numpy as np
8 |
9 | from einops import rearrange
10 | from opencood.utils.common_utils import torch_tensor_to_numpy
11 |
12 |
13 | def regroup(dense_feature, record_len, max_len):
14 | """
15 | Regroup the data based on the record_len.
16 | Parameters
17 | ----------
18 | dense_feature : torch.Tensor
19 | N, C, H, W
20 | record_len : list
21 | [sample1_len, sample2_len, ...]
22 | max_len : int
23 | Maximum cav number
24 | Returns
25 | -------
26 | regroup_feature : torch.Tensor
27 | B, L, C, H, W
28 | """
29 | cum_sum_len = list(np.cumsum(torch_tensor_to_numpy(record_len)))
30 | split_features = torch.tensor_split(dense_feature,
31 | cum_sum_len[:-1])
32 | regroup_features = []
33 | mask = []
34 |
35 | for split_feature in split_features:
36 | # M, C, H, W
37 | feature_shape = split_feature.shape
38 |
39 | # the maximum M is 5 as most 5 cavs
40 | padding_len = max_len - feature_shape[0]
41 | mask.append([1] * feature_shape[0] + [0] * padding_len)
42 |
43 | padding_tensor = torch.zeros(padding_len, feature_shape[1],
44 | feature_shape[2], feature_shape[3])
45 | padding_tensor = padding_tensor.to(split_feature.device)
46 |
47 | split_feature = torch.cat([split_feature, padding_tensor],
48 | dim=0)
49 |
50 | # 1, 5C, H, W
51 | split_feature = split_feature.view(-1,
52 | feature_shape[2],
53 | feature_shape[3]).unsqueeze(0)
54 | regroup_features.append(split_feature)
55 |
56 | # B, 5C, H, W
57 | regroup_features = torch.cat(regroup_features, dim=0)
58 | # B, L, C, H, W
59 | regroup_features = rearrange(regroup_features,
60 | 'b (l c) h w -> b l c h w',
61 | l=max_len)
62 | mask = torch.from_numpy(np.array(mask)).to(regroup_features.device)
63 |
64 | return regroup_features, mask
--------------------------------------------------------------------------------
/opencood/models/fuse_modules/fuse_utils_ori.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import torch
7 | import numpy as np
8 |
9 | from einops import rearrange
10 | from opencood.utils.common_utils import torch_tensor_to_numpy
11 |
12 |
13 | def regroup(dense_feature, record_len, max_len):
14 | """
15 | Regroup the data based on the record_len.
16 | Parameters
17 | ----------
18 | dense_feature : torch.Tensor
19 | N, C, H, W
20 | record_len : list
21 | [sample1_len, sample2_len, ...]
22 | max_len : int
23 | Maximum cav number
24 | Returns
25 | -------
26 | regroup_feature : torch.Tensor
27 | B, L, C, H, W
28 | """
29 | cum_sum_len = list(np.cumsum(torch_tensor_to_numpy(record_len)))
30 | split_features = torch.tensor_split(dense_feature,
31 | cum_sum_len[:-1])
32 | regroup_features = []
33 | mask = []
34 |
35 | for split_feature in split_features:
36 | # M, C, H, W
37 | feature_shape = split_feature.shape
38 |
39 | # the maximum M is 5 as most 5 cavs
40 | padding_len = max_len - feature_shape[0]
41 | mask.append([1] * feature_shape[0] + [0] * padding_len)
42 |
43 | padding_tensor = torch.zeros(padding_len, feature_shape[1],
44 | feature_shape[2], feature_shape[3])
45 | padding_tensor = padding_tensor.to(split_feature.device)
46 |
47 | split_feature = torch.cat([split_feature, padding_tensor],
48 | dim=0)
49 |
50 | # 1, 5C, H, W
51 | split_feature = split_feature.view(-1,
52 | feature_shape[2],
53 | feature_shape[3]).unsqueeze(0)
54 | regroup_features.append(split_feature)
55 |
56 | # B, 5C, H, W
57 | regroup_features = torch.cat(regroup_features, dim=0)
58 | # B, L, C, H, W
59 | regroup_features = rearrange(regroup_features,
60 | 'b (l c) h w -> b l c h w',
61 | l=max_len)
62 | mask = torch.from_numpy(np.array(mask)).to(regroup_features.device)
63 |
64 | return regroup_features, mask
--------------------------------------------------------------------------------
/opencood/models/fuse_modules/hmvit/split_attn.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 |
8 | class RadixSoftmax(nn.Module):
9 | def __init__(self, radix, cardinality):
10 | super(RadixSoftmax, self).__init__()
11 | self.radix = radix
12 | self.cardinality = cardinality
13 |
14 | def forward(self, x):
15 | # x: (B, L, 1, 1, 3C)
16 | batch = x.size(0)
17 | cav_num = x.size(1)
18 |
19 | if self.radix > 1:
20 | # x: (B, L, 1, 3, C)
21 | x = x.view(batch,
22 | cav_num,
23 | self.cardinality, self.radix, -1)
24 | x = F.softmax(x, dim=3)
25 | # B, 3LC
26 | x = x.reshape(batch, -1)
27 | else:
28 | x = torch.sigmoid(x)
29 | return x
30 |
31 |
32 | class SplitAttn(nn.Module):
33 | def __init__(self, input_dim, num_windows=3):
34 | super(SplitAttn, self).__init__()
35 | self.input_dim = input_dim
36 |
37 | self.fc1 = nn.Linear(input_dim, input_dim, bias=False)
38 | self.bn1 = nn.LayerNorm(input_dim)
39 | self.act1 = nn.ReLU()
40 | self.fc2 = nn.Linear(input_dim, input_dim * num_windows, bias=False)
41 |
42 | self.num_windows = num_windows
43 |
44 | self.rsoftmax = RadixSoftmax(num_windows, 1)
45 |
46 | def forward(self, window_list):
47 | # window list: [(B, L, H, W, C) * 3]
48 | # assert len(window_list) == 3, 'only 3 windows are supported'
49 |
50 | B, L = window_list[0].shape[0], window_list[0].shape[1]
51 |
52 | # global average pooling, B, L, H, W, C
53 | x_gap = sum(window_list)
54 | # B, L, 1, 1, C
55 | x_gap = x_gap.mean((2, 3), keepdim=True)
56 | x_gap = self.act1(self.bn1(self.fc1(x_gap)))
57 | # B, L, 1, 1, C*num_window
58 | x_attn = self.fc2(x_gap)
59 | # B L 1 1 3C
60 | x_attn = self.rsoftmax(x_attn).view(B, L, 1, 1, -1)
61 | out = 0
62 | for i in range(len(window_list)):
63 | start = i * self.input_dim
64 | end = (i + 1) * self.input_dim
65 | out += window_list[i] * x_attn[:, :, :, :, start: end]
66 |
67 | return out
68 |
--------------------------------------------------------------------------------
/opencood/models/heter_pointpillars_lift_splat_v2_fordebug.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/models/heter_pointpillars_lift_splat_v2_fordebug.py
--------------------------------------------------------------------------------
/opencood/models/point_pillar_.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu , OpenPCDet
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import torch
7 | import torch.nn as nn
8 |
9 |
10 | from opencood.models.sub_modules.pillar_vfe import PillarVFE
11 | from opencood.models.sub_modules.point_pillar_scatter import PointPillarScatter
12 | from opencood.models.sub_modules.base_bev_backbone_resnet import ResNetBEVBackbone
13 | from opencood.models.sub_modules.base_bev_backbone import BaseBEVBackbone
14 | from opencood.models.sub_modules.downsample_conv import DownsampleConv
15 |
16 |
17 | class PointPillar_(nn.Module):
18 | def __init__(self, args, ):
19 | super(PointPillar_, self).__init__()
20 | # PIllar VFE
21 | self.pillar_vfe = PillarVFE(args['pillar_vfe'],
22 | num_point_features=4,
23 | voxel_size=args['voxel_size'],
24 | point_cloud_range=args['lidar_range'])
25 | self.scatter = PointPillarScatter(args['point_pillar_scatter'])
26 |
27 |
28 | def forward(self, data_dict):
29 | pass
--------------------------------------------------------------------------------
/opencood/models/second.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu , OpenPCDet
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import torch.nn as nn
7 |
8 | from opencood.models.sub_modules.mean_vfe import MeanVFE
9 | from opencood.models.sub_modules.sparse_backbone_3d import VoxelBackBone8x
10 | from opencood.models.sub_modules.height_compression import HeightCompression
11 | from opencood.models.sub_modules.base_bev_backbone import BaseBEVBackbone
12 |
13 |
14 | class Second(nn.Module):
15 | def __init__(self, args):
16 | super(Second, self).__init__()
17 |
18 | # mean_vfe
19 | self.mean_vfe = MeanVFE(args['mean_vfe'], 4)
20 | # sparse 3d backbone
21 | self.backbone_3d = VoxelBackBone8x(args['backbone_3d'],
22 | 4, args['grid_size'])
23 | # height compression
24 | self.height_compression = HeightCompression(args['height_compression'])
25 | # base ben backbone
26 | self.backbone_2d = BaseBEVBackbone(args['base_bev_backbone'], 256)
27 |
28 | # head
29 | self.cls_head = nn.Conv2d(256 * 2, args['anchor_number'],
30 | kernel_size=1)
31 | self.reg_head = nn.Conv2d(256 * 2, 7 * args['anchor_num'],
32 | kernel_size=1)
33 |
34 | def forward(self, data_dict):
35 |
36 | voxel_features = data_dict['processed_lidar']['voxel_features']
37 | voxel_coords = data_dict['processed_lidar']['voxel_coords']
38 | voxel_num_points = data_dict['processed_lidar']['voxel_num_points']
39 | batch_size = voxel_coords[:,0].max() + 1 # batch size is padded in the first idx
40 |
41 | batch_dict = {'voxel_features': voxel_features,
42 | 'voxel_coords': voxel_coords,
43 | 'voxel_num_points': voxel_num_points,
44 | 'batch_size': batch_size}
45 |
46 | batch_dict = self.mean_vfe(batch_dict)
47 | batch_dict = self.backbone_3d(batch_dict)
48 | batch_dict = self.height_compression(batch_dict)
49 | batch_dict = self.backbone_2d(batch_dict)
50 |
51 | spatial_features_2d = batch_dict['spatial_features_2d']
52 |
53 | psm = self.cls_head(spatial_features_2d)
54 | rm = self.reg_head(spatial_features_2d)
55 |
56 | output_dict = {'psm': psm,
57 | 'rm': rm}
58 |
59 | return output_dict
--------------------------------------------------------------------------------
/opencood/models/second_intermediate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import torch
7 | import torch.nn as nn
8 |
9 | from opencood.models.sub_modules.mean_vfe import MeanVFE
10 | from opencood.models.sub_modules.sparse_backbone_3d import VoxelBackBone8x
11 | from opencood.models.sub_modules.height_compression import HeightCompression
12 | from opencood.models.sub_modules.att_bev_backbone import AttBEVBackbone
13 |
14 |
15 | class SecondIntermediate(nn.Module):
16 | def __init__(self, args):
17 | super(SecondIntermediate, self).__init__()
18 |
19 | self.batch_size = args['batch_size']
20 | # mean_vfe
21 | self.mean_vfe = MeanVFE(args['mean_vfe'], 4)
22 | # sparse 3d backbone
23 | self.backbone_3d = VoxelBackBone8x(args['backbone_3d'],
24 | 4, args['grid_size'])
25 | # height compression
26 | self.height_compression = HeightCompression(args['height_compression'])
27 | # base ben backbone
28 | self.backbone_2d = AttBEVBackbone(args['base_bev_backbone'], 256)
29 |
30 | # head
31 | self.cls_head = nn.Conv2d(256 * 2, args['anchor_number'],
32 | kernel_size=1)
33 | self.reg_head = nn.Conv2d(256 * 2, 7 * args['anchor_num'],
34 | kernel_size=1)
35 |
36 | def forward(self, data_dict):
37 |
38 | voxel_features = data_dict['processed_lidar']['voxel_features']
39 | voxel_coords = data_dict['processed_lidar']['voxel_coords']
40 | voxel_num_points = data_dict['processed_lidar']['voxel_num_points']
41 | record_len = data_dict['record_len']
42 |
43 | batch_dict = {'voxel_features': voxel_features,
44 | 'voxel_coords': voxel_coords,
45 | 'voxel_num_points': voxel_num_points,
46 | 'batch_size': torch.sum(record_len).cpu().numpy(),
47 | 'record_len': record_len}
48 |
49 | batch_dict = self.mean_vfe(batch_dict)
50 | batch_dict = self.backbone_3d(batch_dict)
51 | batch_dict = self.height_compression(batch_dict)
52 | batch_dict = self.backbone_2d(batch_dict)
53 |
54 | spatial_features_2d = batch_dict['spatial_features_2d']
55 |
56 | psm = self.cls_head(spatial_features_2d)
57 | rm = self.reg_head(spatial_features_2d)
58 |
59 | output_dict = {'psm': psm,
60 | 'rm': rm}
61 |
62 | return output_dict
--------------------------------------------------------------------------------
/opencood/models/second_ssfa.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu , OpenPCDet
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import torch.nn as nn
7 |
8 | from opencood.models.sub_modules.mean_vfe import MeanVFE
9 | from opencood.models.sub_modules.sparse_backbone_3d import VoxelBackBone8x
10 | from opencood.models.sub_modules.height_compression import HeightCompression
11 | from opencood.models.sub_modules.base_bev_backbone import BaseBEVBackbone
12 | from opencood.models.sub_modules.cia_ssd_utils import SSFA, Head
13 | from opencood.models.sub_modules.downsample_conv import DownsampleConv
14 | import numpy as np
15 |
16 | class SecondSSFA(nn.Module):
17 | def __init__(self, args):
18 | super(SecondSSFA, self).__init__()
19 | lidar_range = np.array(args['lidar_range'])
20 | grid_size = np.round((lidar_range[3:6] - lidar_range[:3]) /
21 | np.array(args['voxel_size'])).astype(np.int64)
22 | self.vfe = MeanVFE(args['mean_vfe'],
23 | args['mean_vfe']['num_point_features'])
24 | self.spconv_block = VoxelBackBone8x(args['spconv'],
25 | input_channels=args['spconv'][
26 | 'num_features_in'],
27 | grid_size=grid_size)
28 | self.map_to_bev = HeightCompression(args['map2bev'])
29 | self.ssfa = SSFA(args['ssfa'])
30 |
31 | self.shrink_flag = False
32 | if 'shrink_header' in args:
33 | self.shrink_flag = True
34 | self.shrink_conv = DownsampleConv(args['shrink_header'])
35 | self.out_channel = args['shrink_header']['dim'][-1]
36 |
37 | self.head = Head(**args['head'])
38 |
39 | def forward(self, data_dict):
40 |
41 | voxel_features = data_dict['processed_lidar']['voxel_features']
42 | voxel_coords = data_dict['processed_lidar']['voxel_coords']
43 | voxel_num_points = data_dict['processed_lidar']['voxel_num_points']
44 | batch_size = voxel_coords[:,0].max() + 1 # batch size is padded in the first idx
45 |
46 | batch_dict = {'voxel_features': voxel_features,
47 | 'voxel_coords': voxel_coords,
48 | 'voxel_num_points': voxel_num_points,
49 | 'batch_size': batch_size}
50 |
51 | batch_dict = self.vfe(batch_dict)
52 | batch_dict = self.spconv_block(batch_dict)
53 | batch_dict = self.map_to_bev(batch_dict)
54 | out = self.ssfa(batch_dict['spatial_features'])
55 | if self.shrink_flag:
56 | out = self.shrink_conv(out)
57 |
58 | return self.head(out)
59 |
--------------------------------------------------------------------------------
/opencood/models/second_ssfa_.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu , OpenPCDet
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import torch.nn as nn
7 |
8 | from opencood.models.sub_modules.mean_vfe import MeanVFE
9 | from opencood.models.sub_modules.sparse_backbone_3d import VoxelBackBone8x
10 | from opencood.models.sub_modules.height_compression import HeightCompression
11 | from opencood.models.sub_modules.base_bev_backbone import BaseBEVBackbone
12 | from opencood.models.sub_modules.cia_ssd_utils import SSFA, Head
13 | from opencood.models.sub_modules.downsample_conv import DownsampleConv
14 | import numpy as np
15 |
16 | class SecondSSFA_(nn.Module):
17 | def __init__(self, args):
18 | super(SecondSSFA_, self).__init__()
19 | lidar_range = np.array(args['lidar_range'])
20 | grid_size = np.round((lidar_range[3:6] - lidar_range[:3]) /
21 | np.array(args['voxel_size'])).astype(np.int64)
22 | self.vfe = MeanVFE(args['mean_vfe'],
23 | args['mean_vfe']['num_point_features'])
24 | self.spconv_block = VoxelBackBone8x(args['spconv'],
25 | input_channels=args['spconv'][
26 | 'num_features_in'],
27 | grid_size=grid_size)
28 | self.map_to_bev = HeightCompression(args['map2bev'])
29 |
30 | def forward(self, data_dict):
31 | pass
--------------------------------------------------------------------------------
/opencood/models/sub_modules/auto_encoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class AutoEncoder(nn.Module):
6 | def __init__(self, feature_num, layer_num):
7 | super().__init__()
8 | self.feature_num = feature_num
9 | self.feature_stride = 2
10 |
11 | self.encoder = nn.ModuleList()
12 | self.decoder = nn.ModuleList()
13 |
14 | for i in range(layer_num):
15 | cur_layers = [
16 | nn.ZeroPad2d(1),
17 | nn.Conv2d(
18 | feature_num, feature_num, kernel_size=3,
19 | stride=2, padding=0, bias=False
20 | ),
21 | nn.BatchNorm2d(feature_num, eps=1e-3, momentum=0.01),
22 | nn.ReLU()]
23 |
24 | cur_layers.extend([
25 | nn.Conv2d(feature_num, feature_num // self.feature_stride,
26 | kernel_size=3, padding=1, bias=False),
27 | nn.BatchNorm2d(feature_num // self.feature_stride,
28 | eps=1e-3, momentum=0.01),
29 | nn.ReLU()
30 | ])
31 |
32 | self.encoder.append(nn.Sequential(*cur_layers))
33 | feature_num = feature_num // self.feature_stride
34 |
35 | feature_num = self.feature_num
36 | for i in range(layer_num):
37 | cur_layers = [nn.Sequential(
38 | nn.ConvTranspose2d(
39 | feature_num // 2, feature_num,
40 | kernel_size=2,
41 | stride=2, bias=False
42 | ),
43 | nn.BatchNorm2d(feature_num,
44 | eps=1e-3, momentum=0.01),
45 | nn.ReLU()
46 | )]
47 |
48 | cur_layers.extend([nn.Sequential(
49 | nn.Conv2d(
50 | feature_num, feature_num, kernel_size=3,
51 | stride=1, bias=False, padding=1
52 | ),
53 | nn.BatchNorm2d(feature_num, eps=1e-3,
54 | momentum=0.01),
55 | nn.ReLU()
56 | )])
57 | self.decoder.append(nn.Sequential(*cur_layers))
58 | feature_num //= 2
59 |
60 | def forward(self, x):
61 | for i in range(len(self.encoder)):
62 | x = self.encoder[i](x)
63 |
64 | for i in range(len(self.decoder)-1, -1, -1):
65 | x = self.decoder[i](x)
66 |
67 | return x
--------------------------------------------------------------------------------
/opencood/models/sub_modules/bevformer.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/models/sub_modules/bevformer.py
--------------------------------------------------------------------------------
/opencood/models/sub_modules/dcn_net.py:
--------------------------------------------------------------------------------
1 | from mmcv.ops import DeformConv2dPack as DCN
2 | import torch
3 | import torch.nn as nn
4 |
5 | class DCNNet(nn.Module):
6 | def __init__(self, args):
7 | super(DCNNet,self).__init__()
8 |
9 | module_list =[]
10 | in_channels = args['in_channels']
11 | out_channels = args['out_channels']
12 | stride = args['stride']
13 | kernel_size = args['kernel_size']
14 | padding = args['padding']
15 |
16 | for i in range(args['n_blocks']):
17 | module_list.append(DCN(in_channels[i],out_channels[i],kernel_size[i],stride=stride[i],padding=padding[i]))
18 | self.model = nn.Sequential(*module_list)
19 |
20 | def forward(self, x):
21 | return self.model(x)
--------------------------------------------------------------------------------
/opencood/models/sub_modules/dense_head.py:
--------------------------------------------------------------------------------
1 | # author: Yifan Lu
2 | # dense head for stage1, predict cls, reg, dir
3 | import torch.nn as nn
4 | import torch
5 |
6 | class Head(nn.Module):
7 | def __init__(self, args):
8 | super(Head, self).__init__()
9 |
10 | self.conv_box = nn.Conv2d(args['num_input'], args['num_pred'], 1) # 128 -> 14
11 | self.conv_cls = nn.Conv2d(args['num_input'], args['num_cls'], 1) # 128 -> 2
12 | self.conv_dir = nn.Conv2d(args['num_input'], args['num_dir'], 1) # 128 -> 4
13 | self.conv_iou = nn.Conv2d(args['num_input'], args['num_dir'], 1, bias=False)
14 |
15 | def forward(self, x):
16 | box_preds = self.conv_box(x)
17 | cls_preds = self.conv_cls(x)
18 | dir_preds = self.conv_dir(x) # dir_preds.shape=[8, w, h, 4]
19 | iou_preds = self.conv_iou(x)
20 |
21 | ret_dict = {"reg_preds": box_preds, \
22 | "cls_preds": cls_preds, \
23 | "dir_preds": dir_preds, \
24 | "iou_preds": iou_preds}
25 |
26 | return ret_dict
--------------------------------------------------------------------------------
/opencood/models/sub_modules/discriminator.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from opencood.models.da_modules.gsl import GradientScalarLayer
4 |
5 | def weights_init(m):
6 | classname = m.__class__.__name__
7 | if classname.find('Conv') != -1:
8 | nn.init.normal_(m.weight.data, 0.0, 0.02)
9 | elif classname.find('BatchNorm') != -1:
10 | nn.init.normal_(m.weight.data, 1.0, 0.02)
11 | nn.init.constant_(m.bias.data, 0)
12 |
13 | class Discriminator(nn.Module):
14 | def __init__(self, args):
15 | super().__init__()
16 | self.indim = args['indim']
17 | self.roi_size = args['roi_align_size']
18 | self.netD = nn.Sequential(
19 | nn.Conv2d(self.indim, self.indim//2, kernel_size=1, stride=1, padding=0),
20 | nn.BatchNorm2d(self.indim//2),
21 | nn.LeakyReLU(0.2, inplace=True),
22 | nn.Conv2d(self.indim//2, self.indim//4, kernel_size=1, stride=1, padding=0),
23 | nn.BatchNorm2d(self.indim//4),
24 | nn.LeakyReLU(0.2, inplace=True),
25 | nn.AvgPool2d(kernel_size=self.roi_size, stride=1, padding=0), # [N, self.indim//4, 1, 1],
26 | nn.Flatten(start_dim=1),
27 | nn.Linear(self.indim//4, self.indim//8),
28 | nn.LeakyReLU(0.2, inplace=True),
29 | nn.Linear(self.indim//8, 1),
30 | nn.Sigmoid()
31 | )
32 | self.grl = GradientScalarLayer(- args.get('scale', 1))
33 |
34 | self.netD.apply(weights_init)
35 |
36 | def forward(self, x):
37 | """
38 | Input:
39 | x: [N, indim, RoIsize, RoIsize]
40 | Output:
41 | cls: [N, 1]
42 | """
43 | x = self.grl(x)
44 | return self.netD(x)
--------------------------------------------------------------------------------
/opencood/models/sub_modules/downsample_conv.py:
--------------------------------------------------------------------------------
1 | """
2 | Class used to downsample features by 3*3 conv
3 | """
4 | import torch.nn as nn
5 |
6 |
7 | class DoubleConv(nn.Module):
8 | """
9 | Double convoltuion
10 | Args:
11 | in_channels: input channel num
12 | out_channels: output channel num
13 | """
14 |
15 | def __init__(self, in_channels, out_channels, kernel_size,
16 | stride, padding):
17 | super().__init__()
18 | self.double_conv = nn.Sequential(
19 | nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
20 | stride=stride, padding=padding),
21 | nn.ReLU(inplace=True),
22 | nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
23 | nn.ReLU(inplace=True)
24 | )
25 |
26 | def forward(self, x):
27 | return self.double_conv(x)
28 |
29 |
30 | class DownsampleConv(nn.Module):
31 | def __init__(self, config):
32 | super(DownsampleConv, self).__init__()
33 | self.layers = nn.ModuleList([])
34 | input_dim = config['input_dim']
35 |
36 | for (ksize, dim, stride, padding) in zip(config['kernal_size'],
37 | config['dim'],
38 | config['stride'],
39 | config['padding']):
40 | self.layers.append(DoubleConv(input_dim,
41 | dim,
42 | kernel_size=ksize,
43 | stride=stride,
44 | padding=padding))
45 | input_dim = dim
46 |
47 | def forward(self, x):
48 | for i in range(len(self.layers)):
49 | x = self.layers[i](x)
50 | return x
--------------------------------------------------------------------------------
/opencood/models/sub_modules/eqmotion/__init__.py:
--------------------------------------------------------------------------------
1 | from .model_t import EqMotion
2 |
3 |
--------------------------------------------------------------------------------
/opencood/models/sub_modules/functions/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from .ms_deform_attn_func import MSDeformAttnFunction
10 |
11 |
--------------------------------------------------------------------------------
/opencood/models/sub_modules/height_compression.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | class HeightCompression(nn.Module):
5 | def __init__(self, model_cfg, **kwargs):
6 | super().__init__()
7 | self.model_cfg = model_cfg
8 | self.num_bev_features = self.model_cfg['feature_num']
9 |
10 | def forward(self, batch_dict):
11 | """
12 | Args:
13 | batch_dict:
14 | encoded_spconv_tensor: sparse tensor
15 | Returns:
16 | batch_dict:
17 | spatial_features:
18 |
19 | """
20 | encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
21 | spatial_features = encoded_spconv_tensor.dense()
22 | N, C, D, H, W = spatial_features.shape
23 | spatial_features = spatial_features.view(N, C * D, H, W)
24 | batch_dict['spatial_features'] = spatial_features
25 | batch_dict['spatial_features_stride'] = \
26 | batch_dict['encoded_spconv_tensor_stride']
27 | return batch_dict
28 |
--------------------------------------------------------------------------------
/opencood/models/sub_modules/mean_vfe.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | class MeanVFE(nn.Module):
5 | def __init__(self, model_cfg, num_point_features, **kwargs):
6 | super().__init__()
7 | self.model_cfg = model_cfg
8 | self.num_point_features = num_point_features
9 |
10 | def get_output_feature_dim(self):
11 | return self.num_point_features
12 |
13 | def forward(self, batch_dict, **kwargs):
14 | """
15 | Args:
16 | batch_dict:
17 | voxels: (num_voxels, max_points_per_voxel, C)
18 | voxel_num_points: optional (num_voxels)
19 | **kwargs:
20 |
21 | Returns:
22 | vfe_features: (num_voxels, C)
23 | """
24 | voxel_features, voxel_num_points = batch_dict['voxel_features'], \
25 | batch_dict['voxel_num_points']
26 | points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
27 | normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).\
28 | type_as(voxel_features)
29 | points_mean = points_mean / normalizer
30 | batch_dict['voxel_features'] = points_mean.contiguous()
31 |
32 | return batch_dict
33 |
34 |
--------------------------------------------------------------------------------
/opencood/models/sub_modules/naive_compress.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class NaiveCompressor(nn.Module):
6 | """
7 | A very naive compression that only compress on the channel.
8 | """
9 | def __init__(self, input_dim, compress_raito):
10 | super().__init__()
11 | self.encoder = nn.Sequential(
12 | nn.Conv2d(input_dim, input_dim//compress_raito, kernel_size=3,
13 | stride=1, padding=1),
14 | nn.BatchNorm2d(input_dim//compress_raito, eps=1e-3, momentum=0.01),
15 | nn.ReLU()
16 | )
17 | self.decoder = nn.Sequential(
18 | nn.Conv2d(input_dim//compress_raito, input_dim, kernel_size=3,
19 | stride=1, padding=1),
20 | nn.BatchNorm2d(input_dim, eps=1e-3, momentum=0.01),
21 | nn.ReLU(),
22 | nn.Conv2d(input_dim, input_dim, kernel_size=3, stride=1, padding=1),
23 | nn.BatchNorm2d(input_dim, eps=1e-3,
24 | momentum=0.01),
25 | nn.ReLU()
26 | )
27 |
28 | def forward(self, x):
29 | x = self.encoder(x)
30 | x = self.decoder(x)
31 |
32 | return x
--------------------------------------------------------------------------------
/opencood/models/sub_modules/point_pillar_scatter.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class PointPillarScatter(nn.Module):
6 | def __init__(self, model_cfg):
7 | super().__init__()
8 |
9 | self.model_cfg = model_cfg
10 | self.num_bev_features = self.model_cfg['num_features']
11 | self.nx, self.ny, self.nz = model_cfg['grid_size'] # [704, 200, 1]
12 |
13 | assert self.nz == 1
14 |
15 | def forward(self, batch_dict):
16 | """ 将生成的pillar按照坐标索引还原到原空间中
17 | Args:
18 | pillar_features:(M, 64)
19 | coords:(M, 4) 第一维是batch_index
20 |
21 | Returns:
22 | batch_spatial_features:(4, 64, H, W)
23 |
24 | |-------|
25 | | | |-------------|
26 | | | -> | * |
27 | | | | |
28 | | * | |-------------|
29 | |-------|
30 |
31 | Lidar Point Cloud Feature Map
32 | x-axis up Along with W
33 | y-axis right Along with H
34 |
35 | Something like clockwise rotation of 90 degree.
36 |
37 | """
38 | pillar_features, coords = batch_dict['pillar_features'], batch_dict[
39 | 'voxel_coords']
40 | batch_spatial_features = []
41 | batch_size = coords[:, 0].max().int().item() + 1
42 |
43 | for batch_idx in range(batch_size):
44 | spatial_feature = torch.zeros(
45 | self.num_bev_features,
46 | self.nz * self.nx * self.ny,
47 | dtype=pillar_features.dtype,
48 | device=pillar_features.device)
49 | # batch_index的mask
50 | batch_mask = coords[:, 0] == batch_idx
51 | # 根据mask提取坐标
52 | this_coords = coords[batch_mask, :] # (batch_idx_voxel,4) # zyx order, x in [0,706], y in [0,200]
53 | # 这里的坐标是b,z,y和x的形式,且只有一层,因此计算索引的方式如下
54 | indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
55 | # 转换数据类型
56 | indices = indices.type(torch.long)
57 | # 根据mask提取pillar_features
58 | pillars = pillar_features[batch_mask, :] # (batch_idx_voxel,64)
59 | pillars = pillars.t() # (64,batch_idx_voxel)
60 | # 在索引位置填充pillars
61 | spatial_feature[:, indices] = pillars
62 | # 将空间特征加入list,每个元素为(64, self.nz * self.nx * self.ny)
63 | batch_spatial_features.append(spatial_feature)
64 |
65 | batch_spatial_features = \
66 | torch.stack(batch_spatial_features, 0)
67 | batch_spatial_features = \
68 | batch_spatial_features.view(batch_size, self.num_bev_features *
69 | self.nz, self.ny, self.nx) # It put y axis(in lidar frame) as image height. [..., 200, 704]
70 | batch_dict['spatial_features'] = batch_spatial_features
71 |
72 | return batch_dict
73 |
74 |
--------------------------------------------------------------------------------
/opencood/models/sub_modules/refactor.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | from icecream import ic
5 |
6 | def flatten(l):
7 | return [item for sublist in l for item in sublist]
8 |
9 | def refactor(batch_dict, lidar_agent_indicator):
10 | agent_num = len(lidar_agent_indicator)
11 | proposal_agentids_sample_list = batch_dict['agentid_fused'] # [sample1, sample2, ..., sample{batchnum}]
12 |
13 | lidar_matrix_list = []
14 | camera_matrix_list = []
15 |
16 | # scatter agentid
17 | for proposal_agentids_list in proposal_agentids_sample_list: # [[0,1,2],[1,2],[0,2],...]
18 | proposal_num = len(proposal_agentids_list)
19 |
20 | sp_row = [[i]*len(proposal_agentids_list[i]) for i in range(len(proposal_agentids_list))]
21 | sp_row = flatten(sp_row)
22 | sp_col = torch.cat(proposal_agentids_list).tolist()
23 |
24 | indice = np.array([sp_row, sp_col], dtype=np.int32)
25 | value = np.ones_like(sp_row)
26 |
27 | lidar_matrix = torch.sparse_coo_tensor(indice, value, (proposal_num, agent_num), device=lidar_agent_indicator.device).to_dense()
28 | camera_matrix = torch.sparse_coo_tensor(indice, value, (proposal_num, agent_num), device=lidar_agent_indicator.device).to_dense()
29 |
30 | lidar_mask = (lidar_agent_indicator)
31 | camera_mask = (1 - lidar_agent_indicator)
32 |
33 | lidar_matrix *= lidar_mask
34 | camera_matrix *= camera_mask
35 |
36 | lidar_matrix_list.append(lidar_matrix)
37 | camera_matrix_list.append(camera_matrix)
38 |
39 | batch_dict['lidar_matrix_list'] = lidar_matrix_list
40 | batch_dict['camera_matrix_list'] = camera_matrix_list
41 |
42 | return batch_dict
--------------------------------------------------------------------------------
/opencood/models/sub_modules/split_attn.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | class RadixSoftmax(nn.Module):
7 | def __init__(self, radix, cardinality):
8 | super(RadixSoftmax, self).__init__()
9 | self.radix = radix
10 | self.cardinality = cardinality
11 |
12 | def forward(self, x):
13 | # x: (B, L, 1, 1, 3C)
14 | batch = x.size(0)
15 | cav_num = x.size(1)
16 |
17 | if self.radix > 1:
18 | # x: (B, L, 1, 3, C)
19 | x = x.view(batch,
20 | cav_num,
21 | self.cardinality, self.radix, -1)
22 | x = F.softmax(x, dim=3)
23 | # B, 3LC
24 | x = x.reshape(batch, -1)
25 | else:
26 | x = torch.sigmoid(x)
27 | return x
28 |
29 |
30 | class SplitAttn(nn.Module):
31 | def __init__(self, input_dim):
32 | super(SplitAttn, self).__init__()
33 | self.input_dim = input_dim
34 |
35 | self.fc1 = nn.Linear(input_dim, input_dim, bias=False)
36 | self.bn1 = nn.LayerNorm(input_dim)
37 | self.act1 = nn.ReLU()
38 | self.fc2 = nn.Linear(input_dim, input_dim * 3, bias=False)
39 |
40 | self.rsoftmax = RadixSoftmax(3, 1)
41 |
42 | def forward(self, window_list):
43 | # window list: [(B, L, H, W, C) * 3]
44 | assert len(window_list) == 3, 'only 3 windows are supported'
45 |
46 | sw, mw, bw = window_list[0], window_list[1], window_list[2]
47 | B, L = sw.shape[0], sw.shape[1]
48 |
49 | # global average pooling, B, L, H, W, C
50 | x_gap = sw + mw + bw
51 | # B, L, 1, 1, C
52 | x_gap = x_gap.mean((2, 3), keepdim=True)
53 | x_gap = self.act1(self.bn1(self.fc1(x_gap)))
54 | # B, L, 1, 1, 3C
55 | x_attn = self.fc2(x_gap)
56 | # B L 1 1 3C
57 | x_attn = self.rsoftmax(x_attn).view(B, L, 1, 1, -1)
58 |
59 | out = sw * x_attn[:, :, :, :, 0:self.input_dim] + \
60 | mw * x_attn[:, :, :, :, self.input_dim:2*self.input_dim] +\
61 | bw * x_attn[:, :, :, :, self.input_dim*2:]
62 |
63 | return out
--------------------------------------------------------------------------------
/opencood/pcdet_utils/__init__.py:
--------------------------------------------------------------------------------
1 | """This is used to save the utility functions for OpenPCDet
2 | """
--------------------------------------------------------------------------------
/opencood/pcdet_utils/iou3d_nms/src/iou3d_cpu.h:
--------------------------------------------------------------------------------
1 | #ifndef IOU3D_CPU_H
2 | #define IOU3D_CPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor);
10 |
11 | #endif
12 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/iou3d_nms/src/iou3d_nms.h:
--------------------------------------------------------------------------------
1 | #ifndef IOU3D_NMS_H
2 | #define IOU3D_NMS_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap);
10 | int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou);
11 | int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh);
12 | int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh);
13 |
14 | #endif
15 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/iou3d_nms/src/iou3d_nms_api.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 |
7 | #include "iou3d_cpu.h"
8 | #include "iou3d_nms.h"
9 |
10 |
11 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
12 | m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, "oriented boxes overlap");
13 | m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou");
14 | m.def("nms_gpu", &nms_gpu, "oriented nms gpu");
15 | m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu");
16 | m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou");
17 | }
18 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/ball_query.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include "ball_query_gpu.h"
14 |
15 | extern THCState *state;
16 |
17 | #define CHECK_CUDA(x) do { \
18 | if (!x.type().is_cuda()) { \
19 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
20 | exit(-1); \
21 | } \
22 | } while (0)
23 | #define CHECK_CONTIGUOUS(x) do { \
24 | if (!x.is_contiguous()) { \
25 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
26 | exit(-1); \
27 | } \
28 | } while (0)
29 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
30 |
31 |
32 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample,
33 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) {
34 | CHECK_INPUT(new_xyz_tensor);
35 | CHECK_INPUT(xyz_tensor);
36 | const float *new_xyz = new_xyz_tensor.data();
37 | const float *xyz = xyz_tensor.data();
38 | int *idx = idx_tensor.data();
39 |
40 | ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx);
41 | return 1;
42 | }
43 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/ball_query_gpu.cu:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 | #include
8 | #include
9 | #include
10 |
11 | #include "ball_query_gpu.h"
12 | #include "cuda_utils.h"
13 |
14 |
15 | __global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample,
16 | const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) {
17 | // new_xyz: (B, M, 3)
18 | // xyz: (B, N, 3)
19 | // output:
20 | // idx: (B, M, nsample)
21 | int bs_idx = blockIdx.y;
22 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
23 | if (bs_idx >= b || pt_idx >= m) return;
24 |
25 | new_xyz += bs_idx * m * 3 + pt_idx * 3;
26 | xyz += bs_idx * n * 3;
27 | idx += bs_idx * m * nsample + pt_idx * nsample;
28 |
29 | float radius2 = radius * radius;
30 | float new_x = new_xyz[0];
31 | float new_y = new_xyz[1];
32 | float new_z = new_xyz[2];
33 |
34 | int cnt = 0;
35 | for (int k = 0; k < n; ++k) {
36 | float x = xyz[k * 3 + 0];
37 | float y = xyz[k * 3 + 1];
38 | float z = xyz[k * 3 + 2];
39 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
40 | if (d2 < radius2){
41 | if (cnt == 0){
42 | for (int l = 0; l < nsample; ++l) {
43 | idx[l] = k;
44 | }
45 | }
46 | idx[cnt] = k;
47 | ++cnt;
48 | if (cnt >= nsample) break;
49 | }
50 | }
51 | }
52 |
53 |
54 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \
55 | const float *new_xyz, const float *xyz, int *idx) {
56 | // new_xyz: (B, M, 3)
57 | // xyz: (B, N, 3)
58 | // output:
59 | // idx: (B, M, nsample)
60 |
61 | cudaError_t err;
62 |
63 | dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
64 | dim3 threads(THREADS_PER_BLOCK);
65 |
66 | ball_query_kernel_fast<<>>(b, n, m, radius, nsample, new_xyz, xyz, idx);
67 | // cudaDeviceSynchronize(); // for using printf in kernel function
68 | err = cudaGetLastError();
69 | if (cudaSuccess != err) {
70 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
71 | exit(-1);
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/ball_query_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _BALL_QUERY_GPU_H
2 | #define _BALL_QUERY_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample,
10 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor);
11 |
12 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample,
13 | const float *xyz, const float *new_xyz, int *idx);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/cuda_utils.h:
--------------------------------------------------------------------------------
1 | #ifndef _CUDA_UTILS_H
2 | #define _CUDA_UTILS_H
3 |
4 | #include
5 |
6 | #define TOTAL_THREADS 1024
7 | #define THREADS_PER_BLOCK 256
8 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
9 |
10 | inline int opt_n_threads(int work_size) {
11 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0);
12 |
13 | return max(min(1 << pow_2, TOTAL_THREADS), 1);
14 | }
15 | #endif
16 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/group_points.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of point grouping, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include "group_points_gpu.h"
14 |
15 | extern THCState *state;
16 |
17 |
18 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample,
19 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) {
20 |
21 | float *grad_points = grad_points_tensor.data();
22 | const int *idx = idx_tensor.data();
23 | const float *grad_out = grad_out_tensor.data();
24 |
25 | group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points);
26 | return 1;
27 | }
28 |
29 |
30 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample,
31 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) {
32 |
33 | const float *points = points_tensor.data();
34 | const int *idx = idx_tensor.data();
35 | float *out = out_tensor.data();
36 |
37 | group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out);
38 | return 1;
39 | }
40 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/group_points_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _GROUP_POINTS_GPU_H
2 | #define _GROUP_POINTS_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 |
10 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample,
11 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor);
12 |
13 | void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
14 | const float *points, const int *idx, float *out);
15 |
16 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample,
17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor);
18 |
19 | void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
20 | const float *grad_out, const int *idx, float *grad_points);
21 |
22 | #endif
23 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/interpolate.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of point interpolation, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include "interpolate_gpu.h"
17 |
18 | extern THCState *state;
19 |
20 |
21 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor,
22 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) {
23 | const float *unknown = unknown_tensor.data();
24 | const float *known = known_tensor.data();
25 | float *dist2 = dist2_tensor.data();
26 | int *idx = idx_tensor.data();
27 |
28 | three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx);
29 | }
30 |
31 |
32 | void three_interpolate_wrapper_fast(int b, int c, int m, int n,
33 | at::Tensor points_tensor,
34 | at::Tensor idx_tensor,
35 | at::Tensor weight_tensor,
36 | at::Tensor out_tensor) {
37 |
38 | const float *points = points_tensor.data();
39 | const float *weight = weight_tensor.data();
40 | float *out = out_tensor.data();
41 | const int *idx = idx_tensor.data();
42 |
43 | three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out);
44 | }
45 |
46 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m,
47 | at::Tensor grad_out_tensor,
48 | at::Tensor idx_tensor,
49 | at::Tensor weight_tensor,
50 | at::Tensor grad_points_tensor) {
51 |
52 | const float *grad_out = grad_out_tensor.data();
53 | const float *weight = weight_tensor.data();
54 | float *grad_points = grad_points_tensor.data();
55 | const int *idx = idx_tensor.data();
56 |
57 | three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points);
58 | }
59 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/interpolate_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _INTERPOLATE_GPU_H
2 | #define _INTERPOLATE_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 |
10 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor,
11 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor);
12 |
13 | void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown,
14 | const float *known, float *dist2, int *idx);
15 |
16 |
17 | void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor,
18 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor);
19 |
20 | void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
21 | const float *points, const int *idx, const float *weight, float *out);
22 |
23 |
24 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor,
25 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor);
26 |
27 | void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out,
28 | const int *idx, const float *weight, float *grad_points);
29 |
30 | #endif
31 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/pointnet2_api.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "ball_query_gpu.h"
5 | #include "group_points_gpu.h"
6 | #include "sampling_gpu.h"
7 | #include "interpolate_gpu.h"
8 |
9 |
10 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
11 | m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast");
12 |
13 | m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast");
14 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast");
15 |
16 | m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast");
17 | m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast");
18 |
19 | m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper");
20 |
21 | m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast");
22 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast");
23 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast");
24 | }
25 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/sampling.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of point sampling and gathering, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #include "sampling_gpu.h"
14 |
15 | extern THCState *state;
16 |
17 |
18 | int gather_points_wrapper_fast(int b, int c, int n, int npoints,
19 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){
20 | const float *points = points_tensor.data();
21 | const int *idx = idx_tensor.data();
22 | float *out = out_tensor.data();
23 |
24 | gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out);
25 | return 1;
26 | }
27 |
28 |
29 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints,
30 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) {
31 |
32 | const float *grad_out = grad_out_tensor.data();
33 | const int *idx = idx_tensor.data();
34 | float *grad_points = grad_points_tensor.data();
35 |
36 | gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points);
37 | return 1;
38 | }
39 |
40 |
41 | int furthest_point_sampling_wrapper(int b, int n, int m,
42 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) {
43 |
44 | const float *points = points_tensor.data();
45 | float *temp = temp_tensor.data();
46 | int *idx = idx_tensor.data();
47 |
48 | furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx);
49 | return 1;
50 | }
51 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_batch/src/sampling_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _SAMPLING_GPU_H
2 | #define _SAMPLING_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 |
9 | int gather_points_wrapper_fast(int b, int c, int n, int npoints,
10 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor);
11 |
12 | void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints,
13 | const float *points, const int *idx, float *out);
14 |
15 |
16 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints,
17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor);
18 |
19 | void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints,
20 | const float *grad_out, const int *idx, float *grad_points);
21 |
22 |
23 | int furthest_point_sampling_wrapper(int b, int n, int m,
24 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor);
25 |
26 | void furthest_point_sampling_kernel_launcher(int b, int n, int m,
27 | const float *dataset, float *temp, int *idxs);
28 |
29 | #endif
30 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/ball_query.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include "ball_query_gpu.h"
14 |
15 | extern THCState *state;
16 |
17 | #define CHECK_CUDA(x) do { \
18 | if (!x.type().is_cuda()) { \
19 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
20 | exit(-1); \
21 | } \
22 | } while (0)
23 | #define CHECK_CONTIGUOUS(x) do { \
24 | if (!x.is_contiguous()) { \
25 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
26 | exit(-1); \
27 | } \
28 | } while (0)
29 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
30 |
31 | int ball_query_wrapper_stack(int B, int M, float radius, int nsample,
32 | at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor,
33 | at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor) {
34 | CHECK_INPUT(new_xyz_tensor);
35 | CHECK_INPUT(xyz_tensor);
36 | CHECK_INPUT(new_xyz_batch_cnt_tensor);
37 | CHECK_INPUT(xyz_batch_cnt_tensor);
38 |
39 | const float *new_xyz = new_xyz_tensor.data();
40 | const float *xyz = xyz_tensor.data();
41 | const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data();
42 | const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data();
43 | int *idx = idx_tensor.data();
44 |
45 | ball_query_kernel_launcher_stack(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx);
46 | return 1;
47 | }
48 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/ball_query_gpu.h:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #ifndef _STACK_BALL_QUERY_GPU_H
9 | #define _STACK_BALL_QUERY_GPU_H
10 |
11 | #include
12 | #include
13 | #include
14 | #include
15 |
16 | int ball_query_wrapper_stack(int B, int M, float radius, int nsample,
17 | at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor,
18 | at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor);
19 |
20 |
21 | void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample,
22 | const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx);
23 |
24 |
25 | #endif
26 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/cuda_utils.h:
--------------------------------------------------------------------------------
1 | #ifndef _STACK_CUDA_UTILS_H
2 | #define _STACK_CUDA_UTILS_H
3 |
4 | #include
5 |
6 | #define THREADS_PER_BLOCK 256
7 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
8 |
9 | #endif
10 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/group_points.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include "group_points_gpu.h"
14 |
15 | extern THCState *state;
16 | #define CHECK_CUDA(x) do { \
17 | if (!x.type().is_cuda()) { \
18 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
19 | exit(-1); \
20 | } \
21 | } while (0)
22 | #define CHECK_CONTIGUOUS(x) do { \
23 | if (!x.is_contiguous()) { \
24 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
25 | exit(-1); \
26 | } \
27 | } while (0)
28 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
29 |
30 |
31 | int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample,
32 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor,
33 | at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor) {
34 |
35 | CHECK_INPUT(grad_out_tensor);
36 | CHECK_INPUT(idx_tensor);
37 | CHECK_INPUT(idx_batch_cnt_tensor);
38 | CHECK_INPUT(features_batch_cnt_tensor);
39 | CHECK_INPUT(grad_features_tensor);
40 |
41 | const float *grad_out = grad_out_tensor.data();
42 | const int *idx = idx_tensor.data();
43 | const int *idx_batch_cnt = idx_batch_cnt_tensor.data();
44 | const int *features_batch_cnt = features_batch_cnt_tensor.data();
45 | float *grad_features = grad_features_tensor.data();
46 |
47 | group_points_grad_kernel_launcher_stack(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features);
48 | return 1;
49 | }
50 |
51 |
52 | int group_points_wrapper_stack(int B, int M, int C, int nsample,
53 | at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor,
54 | at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor) {
55 |
56 | CHECK_INPUT(features_tensor);
57 | CHECK_INPUT(features_batch_cnt_tensor);
58 | CHECK_INPUT(idx_tensor);
59 | CHECK_INPUT(idx_batch_cnt_tensor);
60 | CHECK_INPUT(out_tensor);
61 |
62 | const float *features = features_tensor.data();
63 | const int *idx = idx_tensor.data();
64 | const int *features_batch_cnt = features_batch_cnt_tensor.data();
65 | const int *idx_batch_cnt = idx_batch_cnt_tensor.data();
66 | float *out = out_tensor.data();
67 |
68 | group_points_kernel_launcher_stack(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out);
69 | return 1;
70 | }
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/group_points_gpu.h:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #ifndef _STACK_GROUP_POINTS_GPU_H
9 | #define _STACK_GROUP_POINTS_GPU_H
10 |
11 | #include
12 | #include
13 | #include
14 | #include
15 |
16 |
17 | int group_points_wrapper_stack(int B, int M, int C, int nsample,
18 | at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor,
19 | at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor);
20 |
21 | void group_points_kernel_launcher_stack(int B, int M, int C, int nsample,
22 | const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out);
23 |
24 | int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample,
25 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor,
26 | at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor);
27 |
28 | void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample,
29 | const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features);
30 |
31 | #endif
32 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/interpolate_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _INTERPOLATE_GPU_H
2 | #define _INTERPOLATE_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 |
10 | void three_nn_wrapper_stack(at::Tensor unknown_tensor,
11 | at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor,
12 | at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor);
13 |
14 |
15 | void three_interpolate_wrapper_stack(at::Tensor features_tensor,
16 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor);
17 |
18 |
19 |
20 | void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor,
21 | at::Tensor weight_tensor, at::Tensor grad_features_tensor);
22 |
23 |
24 | void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown,
25 | const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt,
26 | float *dist2, int *idx);
27 |
28 |
29 | void three_interpolate_kernel_launcher_stack(int N, int channels,
30 | const float *features, const int *idx, const float *weight, float *out);
31 |
32 |
33 |
34 | void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out,
35 | const int *idx, const float *weight, float *grad_features);
36 |
37 |
38 |
39 | #endif
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/pointnet2_api.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "ball_query_gpu.h"
5 | #include "group_points_gpu.h"
6 | #include "sampling_gpu.h"
7 | #include "voxel_query_gpu.h"
8 | #include "interpolate_gpu.h"
9 |
10 |
11 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
12 | m.def("ball_query_wrapper", &ball_query_wrapper_stack, "ball_query_wrapper_stack");
13 | m.def("voxel_query_wrapper", &voxel_query_wrapper_stack, "voxel_query_wrapper_stack");
14 |
15 | m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper");
16 |
17 | m.def("group_points_wrapper", &group_points_wrapper_stack, "group_points_wrapper_stack");
18 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_stack, "group_points_grad_wrapper_stack");
19 |
20 | m.def("three_nn_wrapper", &three_nn_wrapper_stack, "three_nn_wrapper_stack");
21 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_stack, "three_interpolate_wrapper_stack");
22 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_stack, "three_interpolate_grad_wrapper_stack");
23 | }
24 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/sampling.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 |
6 | #include "sampling_gpu.h"
7 |
8 | extern THCState *state;
9 | #define CHECK_CUDA(x) do { \
10 | if (!x.type().is_cuda()) { \
11 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
12 | exit(-1); \
13 | } \
14 | } while (0)
15 | #define CHECK_CONTIGUOUS(x) do { \
16 | if (!x.is_contiguous()) { \
17 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
18 | exit(-1); \
19 | } \
20 | } while (0)
21 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
22 |
23 |
24 | int furthest_point_sampling_wrapper(int b, int n, int m,
25 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) {
26 |
27 | CHECK_INPUT(points_tensor);
28 | CHECK_INPUT(temp_tensor);
29 | CHECK_INPUT(idx_tensor);
30 |
31 | const float *points = points_tensor.data();
32 | float *temp = temp_tensor.data();
33 | int *idx = idx_tensor.data();
34 |
35 | furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx);
36 | return 1;
37 | }
38 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/sampling_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _SAMPLING_GPU_H
2 | #define _SAMPLING_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 |
9 | int furthest_point_sampling_wrapper(int b, int n, int m,
10 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor);
11 |
12 | void furthest_point_sampling_kernel_launcher(int b, int n, int m,
13 | const float *dataset, float *temp, int *idxs);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/voxel_query.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "voxel_query_gpu.h"
10 |
11 | extern THCState *state;
12 |
13 | #define CHECK_CUDA(x) do { \
14 | if (!x.type().is_cuda()) { \
15 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
16 | exit(-1); \
17 | } \
18 | } while (0)
19 | #define CHECK_CONTIGUOUS(x) do { \
20 | if (!x.is_contiguous()) { \
21 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
22 | exit(-1); \
23 | } \
24 | } while (0)
25 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
26 |
27 |
28 | int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius,
29 | int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor,
30 | at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor) {
31 | CHECK_INPUT(new_coords_tensor);
32 | CHECK_INPUT(point_indices_tensor);
33 | CHECK_INPUT(new_xyz_tensor);
34 | CHECK_INPUT(xyz_tensor);
35 |
36 | const float *new_xyz = new_xyz_tensor.data();
37 | const float *xyz = xyz_tensor.data();
38 | const int *new_coords = new_coords_tensor.data();
39 | const int *point_indices = point_indices_tensor.data();
40 | int *idx = idx_tensor.data();
41 |
42 | voxel_query_kernel_launcher_stack(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx);
43 | return 1;
44 | }
--------------------------------------------------------------------------------
/opencood/pcdet_utils/pointnet2/pointnet2_stack/src/voxel_query_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _STACK_VOXEL_QUERY_GPU_H
2 | #define _STACK_VOXEL_QUERY_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius,
10 | int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor,
11 | at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor);
12 |
13 |
14 | void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample,
15 | float radius, int z_range, int y_range, int x_range, const float *new_xyz,
16 | const float *xyz, const int *new_coords, const int *point_indices, int *idx);
17 |
18 |
19 | #endif
--------------------------------------------------------------------------------
/opencood/pcdet_utils/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from setuptools import find_packages, setup
4 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension
5 |
6 |
7 | def make_cuda_ext(name, module, sources):
8 | cuda_ext = CUDAExtension(
9 | name='%s.%s' % (module, name),
10 | sources=[os.path.join(*module.split('.'), src) for src in sources]
11 | )
12 | return cuda_ext
13 |
14 |
15 | setup(
16 | name='pcd utils',
17 | cmdclass={'build_ext': BuildExtension},
18 | ext_modules=[make_cuda_ext(
19 | name='iou3d_nms_cuda',
20 | module='opencood.pcdet_utils.iou3d_nms',
21 | sources=[
22 | 'src/iou3d_cpu.cpp',
23 | 'src/iou3d_nms_api.cpp',
24 | 'src/iou3d_nms.cpp',
25 | 'src/iou3d_nms_kernel.cu'
26 | ]),
27 | make_cuda_ext(
28 | name='roiaware_pool3d_cuda',
29 | module='opencood.pcdet_utils.roiaware_pool3d',
30 | sources=[
31 | 'src/roiaware_pool3d.cpp',
32 | 'src/roiaware_pool3d_kernel.cu',
33 | ]
34 | ),
35 | make_cuda_ext(
36 | name='pointnet2_stack_cuda',
37 | module='opencood.pcdet_utils.pointnet2.pointnet2_stack',
38 | sources=[
39 | 'src/pointnet2_api.cpp',
40 | 'src/ball_query.cpp',
41 | 'src/ball_query_gpu.cu',
42 | 'src/group_points.cpp',
43 | 'src/group_points_gpu.cu',
44 | 'src/sampling.cpp',
45 | 'src/sampling_gpu.cu',
46 | 'src/interpolate.cpp',
47 | 'src/interpolate_gpu.cu',
48 | 'src/voxel_query_gpu.cu',
49 | 'src/voxel_query.cpp'
50 | ],
51 | ),
52 | make_cuda_ext(
53 | name='pointnet2_batch_cuda',
54 | module='opencood.pcdet_utils.pointnet2.pointnet2_batch',
55 | sources=[
56 | 'src/pointnet2_api.cpp',
57 | 'src/ball_query.cpp',
58 | 'src/ball_query_gpu.cu',
59 | 'src/group_points.cpp',
60 | 'src/group_points_gpu.cu',
61 | 'src/interpolate.cpp',
62 | 'src/interpolate_gpu.cu',
63 | 'src/sampling.cpp',
64 | 'src/sampling_gpu.cu',
65 | ],
66 | )]
67 |
68 | )
--------------------------------------------------------------------------------
/opencood/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/tools/__init__.py
--------------------------------------------------------------------------------
/opencood/tools/config_generate.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import yaml
3 | import opencood.hypes_yaml.yaml_utils as yaml_utils
4 |
5 | def get_parser():
6 | parser = argparse.ArgumentParser(description="synthetic data generation")
7 | parser.add_argument("--hypes_yaml", "-y", type=str, required=True,
8 | help='data generation yaml file needed ')
9 | parser.add_argument('--model_dir', default='',
10 | help='Continued training path')
11 | parser.add_argument("--save_name", "-s", type=str, required=True,
12 | help='yaml save path ')
13 | opt = parser.parse_args()
14 | return opt
15 | if __name__ == "__main__":
16 | opt = get_parser()
17 | hypes = yaml_utils.load_yaml(opt.hypes_yaml, opt)
18 | with open(opt.save_name, 'w') as outfile:
19 | yaml.dump(hypes, outfile)
--------------------------------------------------------------------------------
/opencood/tools/debug_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu , Hao Xiang ,
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import argparse
7 |
8 | import torch
9 | from torch.utils.data import DataLoader
10 |
11 | import opencood.hypes_yaml.yaml_utils as yaml_utils
12 | from opencood.tools import train_utils
13 | from opencood.data_utils.datasets import build_dataset
14 | from opencood.visualization import vis_utils
15 |
16 |
17 | def test_parser():
18 | parser = argparse.ArgumentParser(description="synthetic data generation")
19 | parser.add_argument('--model_dir', type=str, required=True,
20 | help='Continued training path')
21 | parser.add_argument('--fusion_method', type=str, default='late',
22 | help='late, early or intermediate')
23 | opt = parser.parse_args()
24 | return opt
25 |
26 |
27 | def test_bev_post_processing():
28 | opt = test_parser()
29 | assert opt.fusion_method in ['late', 'early', 'intermediate']
30 |
31 | hypes = yaml_utils.load_yaml(None, opt)
32 |
33 | print('Dataset Building')
34 | opencood_dataset = build_dataset(hypes, visualize=True, train=False)
35 | data_loader = DataLoader(opencood_dataset,
36 | batch_size=1,
37 | num_workers=0,
38 | collate_fn=opencood_dataset.collate_batch_test,
39 | shuffle=False,
40 | pin_memory=False,
41 | drop_last=False)
42 |
43 | print('Creating Model')
44 | model = train_utils.create_model(hypes)
45 | # we assume gpu is necessary
46 | if torch.cuda.is_available():
47 | model.cuda()
48 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
49 |
50 | print('Loading Model from checkpoint')
51 | saved_path = opt.model_dir
52 | _, model = train_utils.load_saved_model(saved_path, model)
53 | model.eval()
54 | for i, batch_data in enumerate(data_loader):
55 | batch_data = train_utils.to_device(batch_data, device)
56 | label_map = batch_data["ego"]["label_dict"]["label_map"]
57 | output_dict = {
58 | "cls": label_map[:, 0, :, :],
59 | "reg": label_map[:, 1:, :, :]
60 | }
61 | gt_box_tensor, _ = opencood_dataset.post_processor.post_process_debug(
62 | batch_data["ego"], output_dict)
63 | vis_utils.visualize_single_sample_output_bev(gt_box_tensor,
64 | batch_data['ego'][
65 | 'origin_lidar'].squeeze(
66 | 0),
67 | opencood_dataset)
68 |
69 |
70 | if __name__ == '__main__':
71 | test_bev_post_processing()
72 |
--------------------------------------------------------------------------------
/opencood/tools/multi_gpu_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import torch.distributed as dist
4 |
5 |
6 | def get_dist_info():
7 | if dist.is_available() and dist.is_initialized():
8 | rank = dist.get_rank()
9 | world_size = dist.get_world_size()
10 | else:
11 | rank = 0
12 | world_size = 1
13 | return rank, world_size
14 |
15 |
16 | def init_distributed_mode(args):
17 | if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
18 | args.rank = int(os.environ["RANK"])
19 | args.world_size = int(os.environ['WORLD_SIZE'])
20 | args.gpu = int(os.environ['LOCAL_RANK'])
21 | elif 'SLURM_PROCID' in os.environ:
22 | args.rank = int(os.environ['SLURM_PROCID'])
23 | args.gpu = args.rank % torch.cuda.device_count()
24 | else:
25 | print('Not using distributed mode')
26 | args.distributed = False
27 | return
28 |
29 | args.distributed = True
30 |
31 | torch.cuda.set_device(args.gpu)
32 | args.dist_backend = 'nccl'
33 | print('| distributed init (rank {}): {}'.format(
34 | args.rank, args.dist_url), flush=True)
35 | torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
36 | world_size=args.world_size, rank=args.rank)
37 | torch.distributed.barrier()
38 | setup_for_distributed(args.rank == 0)
39 |
40 |
41 | def setup_for_distributed(is_master):
42 | """
43 | This function disables printing when not in master process
44 | """
45 | import builtins as __builtin__
46 | builtin_print = __builtin__.print
47 |
48 | def print(*args, **kwargs):
49 | force = kwargs.pop('force', False)
50 | if is_master or force:
51 | builtin_print(*args, **kwargs)
52 |
53 | __builtin__.print = print
--------------------------------------------------------------------------------
/opencood/tools/params_calc.py:
--------------------------------------------------------------------------------
1 | """
2 | Todo:
3 |
4 | Given a yaml file (from hypes_yaml)
5 |
6 | Calculate the following params number:
7 |
8 | M1 Single
9 | M2 Single
10 | M3 Single
11 | M4 Single
12 | M1M2M3M4 End-to-end Training
13 | M1M2M3 End-to-end Training
14 | M1M2 End-to-end Training
15 |
16 | ConvNeXt Aligner with WarpNet
17 | ConvNeXt Aligner without WarpNet
18 |
19 | """
20 | import torch
21 | import argparse
22 | from opencood.tools import train_utils
23 | import opencood.hypes_yaml.yaml_utils as yaml_utils
24 | from efficientnet_pytorch import EfficientNet
25 |
26 | def calc_parser():
27 | parser = argparse.ArgumentParser(description="synthetic data generation")
28 | parser.add_argument("--hypes_yaml", "-y", type=str, required=True,
29 | help='data generation yaml file needed ')
30 | parser.add_argument('--model_dir', default='')
31 | opt = parser.parse_args()
32 | return opt
33 |
34 |
35 | def main():
36 | opt = calc_parser()
37 | hypes = yaml_utils.load_yaml(opt.hypes_yaml, opt)
38 | print('creating model')
39 | model = train_utils.create_model(hypes)
40 | total_params = sum([param.nelement() for param in model.encoder_m2.camencode.trunk.parameters()])
41 |
42 | print(f'total params number : {total_params/1e6}')
43 |
44 | total_params = sum([param.nelement() for param in model.encoder_m2.camencode.up1.conv.parameters()])
45 |
46 | print(f'total params number : {total_params/1e6}')
47 |
48 | total_params = sum([param.nelement() for param in model.encoder_m2.camencode.up2.parameters()])
49 |
50 | print(f'total params number : {total_params/1e6}')
51 |
52 | total_params = sum([param.nelement() for param in model.encoder_m2.camencode.depth_head.parameters()])
53 |
54 | print(f'total params number : {total_params/1e6}')
55 |
56 | total_params = sum([param.nelement() for param in model.encoder_m2.camencode.image_head.parameters()])
57 |
58 | print(f'total params number : {total_params/1e6}')
59 | # b = EfficientNet.from_pretrained("efficientnet-b0")
60 | # total_b = sum([param.nelement() for param in b.parameters()])
61 | # print(total_b/1e6)
62 |
63 | # total_b = sum([param.nelement() for param in b._blocks.parameters()])
64 | # print(total_b/1e6)
65 |
66 |
67 |
68 |
69 | if __name__=='__main__':
70 | main()
71 |
--------------------------------------------------------------------------------
/opencood/tools/track/eval_track.sh:
--------------------------------------------------------------------------------
1 | model_dir=$1
2 | model_name=$2
3 | note=$3
4 |
5 | # python opencood/tools/track/sort.py --det_logs_path $model_dir'/npy'
6 |
7 | python opencood/tools/track/AB3DMOT.py --det_logs_path $model_dir'/npy'
8 |
9 | python opencood/tools/track/eval_mot.py --model_dir $model_dir --model_name $model_name --note $note
10 |
--------------------------------------------------------------------------------
/opencood/tools/track/v2v4real/run.sh:
--------------------------------------------------------------------------------
1 | d=3
2 |
3 | # # No
4 | # dir=''
5 | # # Late
6 | # dir=''
7 | # # Early
8 | # dir='/remote-home/share/sizhewei/logs_v2/v2v4real_point_pillar_lidar_early_2023_06_21_19_57_47'
9 | # # F-Cooper
10 | # dir='/remote-home/share/sizhewei/logs_v2/v2v4real_point_pillar_lidar_fcooper_2023_06_21_07_51_44'
11 | # # V2VNet
12 | dir='/remote-home/share/sizhewei/logs_v2/v2v4real_point_pillar_lidar_v2vnet_2023_06_28_01_47_15'
13 | # # V2XViT
14 | # dir='/remote-home/share/sizhewei/logs_v2/v2v4real_point_pillar_lidar_v2xvit_2023_06_21_07_55_11'
15 | # # where2comm(max)
16 | # dir='/remote-home/share/sizhewei/logs_v2/v2v4real_point_pillar_lidar_multiscale_max_2023_06_21_07_51_38'
17 | # # where2comm(attn)
18 | # dir='/remote-home/share/sizhewei/logs_v2/v2v4real_point_pillar_lidar_multiscale_att_2023_06_28_09_32_06'
19 |
20 | # opencood/tools/track/opv2v/0.sh
21 | # chmod +x opencood/tools/track/opv2v/0.sh
22 | #clear
23 |
24 | # CUDA_VISIBLE_DEVICES=$d python opencood/tools/inference_track.py --model_dir "$dir/" --save_track
25 |
26 | # CUDA_VISIBLE_DEVICES=$d python opencood/tools/track/AB3DMOT.py --det_logs_path "$dir/intermediate_epoch27/npy"
27 |
28 | CUDA_VISIBLE_DEVICES=$d python opencood/tools/track/eval_mot.py --model_dir "$dir/intermediate_epoch60"
29 |
--------------------------------------------------------------------------------
/opencood/track/Makefile:
--------------------------------------------------------------------------------
1 | # Path to the original V2X-Sim dataset
2 | original_data_path := /scratch/dm4524/data/V2X-Sim-2
3 | # Path to the dataset for detection
4 | det_data_path := /scratch/dm4524/data/V2X-Sim-det
5 | # [lowerbound / upperbound/ v2v / disco / when2com / when2com_warp / who2com / who2com_warp]
6 | mode := disco
7 | # [with_rsu / no_rsu]
8 | rsu := no_rsu
9 | # Index of current agent
10 | current_agent := 0
11 | # [train / test / val]
12 | split := test
13 | # det path
14 | det_path := /scratch/dm4524/ai4ce/coperception/tools/det
15 | # file containing idxes of scenes to run tracking
16 | scene_idxes_file := /scratch/dm4524/ai4ce/coperception/coperception/utils/test_scenes.txt
17 | # Index of the start agent
18 | from_agent := 1
19 | # Index of the end agent + 1
20 | to_agent := 6
21 | # Det logs path (to get the tracking input)
22 | det_logs_path := $(det_path)/logs_compress_0
23 |
24 | .PHONY: sort
25 |
26 | create_data:
27 | python create_data_com.py --root $(original_data_path) --data $(det_data_path)/$(split) --split $(split) --from_agent $(from_agent) --to_agent $(to_agent) --scene_idxes_file $(scene_idxes_file)
28 |
29 | # needed by SORT codebase
30 | create_seqmaps:
31 | python create_seqmaps.py --split $(split) --scene_idxes_file $(scene_idxes_file) --from_agent $(from_agent) --to_agent $(to_agent)
32 |
33 | sort:
34 | cd sort && python sort.py --mode $(mode)/$(rsu) --split $(split) --from_agent $(from_agent) --to_agent $(to_agent) --scene_idxes_file $(scene_idxes_file) --det_logs_path $(det_logs_path)
35 |
36 | eval:
37 | python run_multi_agent_mot_challenge.py --mode $(mode) --rsu $(rsu) --from_agent $(from_agent) --to_agent $(to_agent) --scene_idxes_file $(scene_idxes_file) --split $(split)
--------------------------------------------------------------------------------
/opencood/track/TrackEval/.gitignore:
--------------------------------------------------------------------------------
1 | gt_data/*
2 | !gt_data/Readme.md
3 | tracker_output/*
4 | !tracker_output/Readme.md
5 | output/*
6 | data/*
7 | !goutput/Readme.md
8 | **/__pycache__
9 | .idea
10 | error_log.txt
--------------------------------------------------------------------------------
/opencood/track/TrackEval/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Jonathon Luiten
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/docs/How_To/Add_a_new_metric.md:
--------------------------------------------------------------------------------
1 | # How to add a new or custom family of evaluation metrics to TrackEval
2 |
3 | - Create your metrics code in ```trackeval/metrics/.py```.
4 | - It's probably easiest to start by copying an existing metrics code and editing it, e.g. ```trackeval/metrics/identity.py``` is probably the simplest.
5 | - Your metric should be class, and it should inherit from the ```trackeval.metrics._base_metric._BaseMetric``` class.
6 | - Define an ```__init__``` function that defines the different ```fields``` (values) that your metric will calculate. See ```trackeval/metrics/_base_metric.py``` for a list of currently used field types. Feel free to add new types.
7 | - Define your code to actually calculate your metric for a single sequence and single class in a function called ```eval_sequence```, which takes a data dictionary as input, and returns a results dictionary as output.
8 | - Define functions for how to combine your metric field values over a) sequences ```combine_sequences```, b) over classes ```combine_classes_class_averaged```, and c) over classes weighted by the number of detections ```combine_classes_det_averaged```.
9 | - We find using a function such as the ```_compute_final_fields``` function that we use in the current metrics is convienient because it is likely used for metrics calculation and for the different metric combination, however this is not required.
10 | - Register your new metric by adding it to ```trackeval/metrics/init.py```
11 | - Your new metric can be used by passing the metrics class to a list of metrics which is passed to the evaluator (see files in ```scripts/*```).
12 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/scripts/comparison_plots.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
5 | import trackeval # noqa: E402
6 |
7 | plots_folder = os.path.abspath(
8 | os.path.join(os.path.dirname(__file__), "..", "data", "plots")
9 | )
10 | tracker_folder = os.path.abspath(
11 | os.path.join(os.path.dirname(__file__), "..", "data", "trackers")
12 | )
13 |
14 | # dataset = os.path.join('kitti', 'kitti_2d_box_train')
15 | # classes = ['cars', 'pedestrian']
16 |
17 | dataset = os.path.join("mot_challenge", "MOT17-train")
18 | classes = ["pedestrian"]
19 |
20 | data_fol = os.path.join(tracker_folder, dataset)
21 | trackers = os.listdir(data_fol)
22 | out_loc = os.path.join(plots_folder, dataset)
23 | for cls in classes:
24 | trackeval.plotting.plot_compare_trackers(data_fol, trackers, cls, out_loc)
25 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/tests/test_mot17.py:
--------------------------------------------------------------------------------
1 | """ Test to ensure that the code is working correctly.
2 | Runs all metrics on 14 trackers for the MOT Challenge MOT17 benchmark.
3 | """
4 |
5 |
6 | import sys
7 | import os
8 | import numpy as np
9 | from multiprocessing import freeze_support
10 |
11 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
12 | import trackeval # noqa: E402
13 |
14 | # Fixes multiprocessing on windows, does nothing otherwise
15 | if __name__ == "__main__":
16 | freeze_support()
17 |
18 | eval_config = {
19 | "USE_PARALLEL": False,
20 | "NUM_PARALLEL_CORES": 8,
21 | }
22 | evaluator = trackeval.Evaluator(eval_config)
23 | metrics_list = [
24 | trackeval.metrics.HOTA(),
25 | trackeval.metrics.CLEAR(),
26 | trackeval.metrics.Identity(),
27 | ]
28 | test_data_loc = os.path.join(
29 | os.path.dirname(__file__), "..", "data", "tests", "mot_challenge", "MOT17-train"
30 | )
31 | trackers = [
32 | "DPMOT",
33 | "GNNMatch",
34 | "IA",
35 | "ISE_MOT17R",
36 | "Lif_T",
37 | "Lif_TsimInt",
38 | "LPC_MOT",
39 | "MAT",
40 | "MIFTv2",
41 | "MPNTrack",
42 | "SSAT",
43 | "TracktorCorr",
44 | "Tracktorv2",
45 | "UnsupTrack",
46 | ]
47 |
48 | for tracker in trackers:
49 | # Run code on tracker
50 | dataset_config = {"TRACKERS_TO_EVAL": [tracker], "BENCHMARK": "MOT17"}
51 | dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)]
52 | raw_results, messages = evaluator.evaluate(dataset_list, metrics_list)
53 |
54 | results = {
55 | seq: raw_results["MotChallenge2DBox"][tracker][seq]["pedestrian"]
56 | for seq in raw_results["MotChallenge2DBox"][tracker].keys()
57 | }
58 | current_metrics_list = metrics_list + [trackeval.metrics.Count()]
59 | metric_names = trackeval.utils.validate_metrics_list(current_metrics_list)
60 |
61 | # Load expected results:
62 | test_data = trackeval.utils.load_detail(
63 | os.path.join(test_data_loc, tracker, "pedestrian_detailed.csv")
64 | )
65 | assert len(test_data.keys()) == 22, len(test_data.keys())
66 |
67 | # Do checks
68 | for seq in test_data.keys():
69 | assert len(test_data[seq].keys()) > 250, len(test_data[seq].keys())
70 |
71 | details = []
72 | for metric, metric_name in zip(current_metrics_list, metric_names):
73 | table_res = {
74 | seq_key: seq_value[metric_name]
75 | for seq_key, seq_value in results.items()
76 | }
77 | details.append(metric.detailed_results(table_res))
78 | res_fields = sum([list(s["COMBINED_SEQ"].keys()) for s in details], [])
79 | res_values = sum([list(s[seq].values()) for s in details], [])
80 | res_dict = dict(zip(res_fields, res_values))
81 |
82 | for field in test_data[seq].keys():
83 | if not np.isclose(res_dict[field], test_data[seq][field]):
84 | print(tracker, seq, res_dict[field], test_data[seq][field], field)
85 | raise AssertionError
86 |
87 | print("Tracker %s tests passed" % tracker)
88 | print("All tests passed")
89 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/trackeval/__init__.py:
--------------------------------------------------------------------------------
1 | from .eval import Evaluator
2 | from . import datasets
3 | from . import metrics
4 | from . import plotting
5 | from . import utils
6 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/trackeval/_timing.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from time import perf_counter
3 | import inspect
4 |
5 | DO_TIMING = False
6 | DISPLAY_LESS_PROGRESS = False
7 | timer_dict = {}
8 | counter = 0
9 |
10 |
11 | def time(f):
12 | @wraps(f)
13 | def wrap(*args, **kw):
14 | if DO_TIMING:
15 | # Run function with timing
16 | ts = perf_counter()
17 | result = f(*args, **kw)
18 | te = perf_counter()
19 | tt = te - ts
20 |
21 | # Get function name
22 | arg_names = inspect.getfullargspec(f)[0]
23 | if arg_names[0] == "self" and DISPLAY_LESS_PROGRESS:
24 | return result
25 | elif arg_names[0] == "self":
26 | method_name = type(args[0]).__name__ + "." + f.__name__
27 | else:
28 | method_name = f.__name__
29 |
30 | # Record accumulative time in each function for analysis
31 | if method_name in timer_dict.keys():
32 | timer_dict[method_name] += tt
33 | else:
34 | timer_dict[method_name] = tt
35 |
36 | # If code is finished, display timing summary
37 | if method_name == "Evaluator.evaluate":
38 | print("")
39 | print("Timing analysis:")
40 | for key, value in timer_dict.items():
41 | print("%-70s %2.4f sec" % (key, value))
42 | else:
43 | # Get function argument values for printing special arguments of interest
44 | arg_titles = ["tracker", "seq", "cls"]
45 | arg_vals = []
46 | for i, a in enumerate(arg_names):
47 | if a in arg_titles:
48 | arg_vals.append(args[i])
49 | arg_text = "(" + ", ".join(arg_vals) + ")"
50 |
51 | # Display methods and functions with different indentation.
52 | if arg_names[0] == "self":
53 | print("%-74s %2.4f sec" % (" " * 4 + method_name + arg_text, tt))
54 | elif arg_names[0] == "test":
55 | pass
56 | else:
57 | global counter
58 | counter += 1
59 | print("%i %-70s %2.4f sec" % (counter, method_name + arg_text, tt))
60 |
61 | return result
62 | else:
63 | # If config["TIME_PROGRESS"] is false, or config["USE_PARALLEL"] is true, run functions normally without timing.
64 | return f(*args, **kw)
65 |
66 | return wrap
67 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/trackeval/baselines/__init__.py:
--------------------------------------------------------------------------------
1 | import baseline_utils
2 | import stp
3 | import non_overlap
4 | import pascal_colormap
5 | import thresholder
6 | import vizualize
7 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/trackeval/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .kitti_2d_box import Kitti2DBox
2 | from .kitti_mots import KittiMOTS
3 | from .mot_challenge_2d_box import MotChallenge2DBox
4 | from .mots_challenge import MOTSChallenge
5 | from .bdd100k import BDD100K
6 | from .davis import DAVIS
7 | from .tao import TAO
8 | from .youtube_vis import YouTubeVIS
9 | from .head_tracking_challenge import HeadTrackingChallenge
10 | from .rob_mots import RobMOTS
11 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/trackeval/datasets/rob_mots_classmap.py:
--------------------------------------------------------------------------------
1 | cls_id_to_name = {
2 | 1: "person",
3 | 2: "bicycle",
4 | 3: "car",
5 | 4: "motorcycle",
6 | 5: "airplane",
7 | 6: "bus",
8 | 7: "train",
9 | 8: "truck",
10 | 9: "boat",
11 | 10: "traffic light",
12 | 11: "fire hydrant",
13 | 12: "stop sign",
14 | 13: "parking meter",
15 | 14: "bench",
16 | 15: "bird",
17 | 16: "cat",
18 | 17: "dog",
19 | 18: "horse",
20 | 19: "sheep",
21 | 20: "cow",
22 | 21: "elephant",
23 | 22: "bear",
24 | 23: "zebra",
25 | 24: "giraffe",
26 | 25: "backpack",
27 | 26: "umbrella",
28 | 27: "handbag",
29 | 28: "tie",
30 | 29: "suitcase",
31 | 30: "frisbee",
32 | 31: "skis",
33 | 32: "snowboard",
34 | 33: "sports ball",
35 | 34: "kite",
36 | 35: "baseball bat",
37 | 36: "baseball glove",
38 | 37: "skateboard",
39 | 38: "surfboard",
40 | 39: "tennis racket",
41 | 40: "bottle",
42 | 41: "wine glass",
43 | 42: "cup",
44 | 43: "fork",
45 | 44: "knife",
46 | 45: "spoon",
47 | 46: "bowl",
48 | 47: "banana",
49 | 48: "apple",
50 | 49: "sandwich",
51 | 50: "orange",
52 | 51: "broccoli",
53 | 52: "carrot",
54 | 53: "hot dog",
55 | 54: "pizza",
56 | 55: "donut",
57 | 56: "cake",
58 | 57: "chair",
59 | 58: "couch",
60 | 59: "potted plant",
61 | 60: "bed",
62 | 61: "dining table",
63 | 62: "toilet",
64 | 63: "tv",
65 | 64: "laptop",
66 | 65: "mouse",
67 | 66: "remote",
68 | 67: "keyboard",
69 | 68: "cell phone",
70 | 69: "microwave",
71 | 70: "oven",
72 | 71: "toaster",
73 | 72: "sink",
74 | 73: "refrigerator",
75 | 74: "book",
76 | 75: "clock",
77 | 76: "vase",
78 | 77: "scissors",
79 | 78: "teddy bear",
80 | 79: "hair drier",
81 | 80: "toothbrush",
82 | }
83 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/trackeval/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .hota import HOTA
2 | from .clear import CLEAR
3 | from .identity import Identity
4 | from .count import Count
5 | from .j_and_f import JAndF
6 | from .track_map import TrackMAP
7 | from .vace import VACE
8 | from .ideucl import IDEucl
9 |
--------------------------------------------------------------------------------
/opencood/track/TrackEval/trackeval/metrics/count.py:
--------------------------------------------------------------------------------
1 | from ._base_metric import _BaseMetric
2 | from .. import _timing
3 |
4 |
5 | class Count(_BaseMetric):
6 | """Class which simply counts the number of tracker and gt detections and ids."""
7 |
8 | def __init__(self, config=None):
9 | super().__init__()
10 | self.integer_fields = ["Dets", "GT_Dets", "IDs", "GT_IDs"]
11 | self.fields = self.integer_fields
12 | self.summary_fields = self.fields
13 |
14 | @_timing.time
15 | def eval_sequence(self, data):
16 | """Returns counts for one sequence"""
17 | # Get results
18 | res = {
19 | "Dets": data["num_tracker_dets"],
20 | "GT_Dets": data["num_gt_dets"],
21 | "IDs": data["num_tracker_ids"],
22 | "GT_IDs": data["num_gt_ids"],
23 | "Frames": data["num_timesteps"],
24 | }
25 | return res
26 |
27 | def combine_sequences(self, all_res):
28 | """Combines metrics across all sequences"""
29 | res = {}
30 | for field in self.integer_fields:
31 | res[field] = self._combine_sum(all_res, field)
32 | return res
33 |
34 | def combine_classes_class_averaged(self, all_res, ignore_empty_classes=None):
35 | """Combines metrics across all classes by averaging over the class values"""
36 | res = {}
37 | for field in self.integer_fields:
38 | res[field] = self._combine_sum(all_res, field)
39 | return res
40 |
41 | def combine_classes_det_averaged(self, all_res):
42 | """Combines metrics across all classes by averaging over the detection values"""
43 | res = {}
44 | for field in self.integer_fields:
45 | res[field] = self._combine_sum(all_res, field)
46 | return res
47 |
--------------------------------------------------------------------------------
/opencood/track/create_seqmaps.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 |
4 |
5 | def parse_args():
6 | """Parse input arguments."""
7 | parser = argparse.ArgumentParser(description="SORT demo")
8 | parser.add_argument(
9 | "--scene_idxes_file",
10 | type=str,
11 | help="File containing idxes of scenes to run tracking",
12 | )
13 | parser.add_argument(
14 | "--from_agent", default=0, type=int, help="start from which agent"
15 | )
16 | parser.add_argument(
17 | "--to_agent", default=6, type=int, help="until which agent (index + 1)"
18 | )
19 | parser.add_argument("--split", type=str, help="[test/val]")
20 | args = parser.parse_args()
21 | return args
22 |
23 |
24 | if __name__ == "__main__":
25 | args = parse_args()
26 | scene_idxes_file = args.scene_idxes_file
27 | from_agent = args.from_agent
28 | to_agent = args.to_agent
29 | split = args.split
30 |
31 | scene_idxes_file = open(scene_idxes_file, "r")
32 | scene_idxes = [int(line.strip()) for line in scene_idxes_file]
33 |
34 | seqmaps_dir = "TrackEval/data/gt/mot_challenge/seqmaps"
35 | os.makedirs(seqmaps_dir, exist_ok=True)
36 | for ii in range(from_agent, to_agent):
37 | seqmap_file = os.path.join(seqmaps_dir, f"V2X-{split}{ii}.txt")
38 | seqmap_file = open(seqmap_file, "w")
39 | seqmap_file.write("name\n")
40 | for scene_idx in scene_idxes:
41 | seqmap_file.write(f"{scene_idx}\n")
42 |
--------------------------------------------------------------------------------
/opencood/track/prep_det_res.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import argparse
4 |
5 |
6 | def main(args):
7 | if args.split == "val":
8 | seqs = range(80, 90)
9 | elif args.split == "test":
10 | seqs = range(90, 100)
11 |
12 | save_dir = f"./TrackEval/data/trackers/mot_challenge/V2X-{args.split}/sort-{args.mode}/data"
13 | os.makedirs(save_dir, exist_ok=True)
14 | for seq in seqs:
15 | shutil.copy(
16 | os.path.join(args.root, f"{seq}.txt"), os.path.join(save_dir, f"{seq}.txt")
17 | )
18 |
19 |
20 | if __name__ == "__main__":
21 | parser = argparse.ArgumentParser()
22 | parser.add_argument("--mode", type=str)
23 | parser.add_argument("--split", type=str)
24 | parser.add_argument("--root", type=str)
25 | args = parser.parse_args()
26 | main(args)
27 |
--------------------------------------------------------------------------------
/opencood/track/sort/.gitignore:
--------------------------------------------------------------------------------
1 | output/
2 | mot_benchmark
3 | disco/
4 | lowerbound/
5 | output/
6 | upperbound/
7 | v2v/
8 | when2com/
9 | when2com_warp/
10 | who2com/
11 | who2com_warp/
--------------------------------------------------------------------------------
/opencood/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/utils/__init__.py
--------------------------------------------------------------------------------
/opencood/utils/cleanup_utils.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import sys
4 |
5 | def clean_all_numeric_checkpoint(path):
6 | """
7 | remove all intermediate checkpoint except bestval
8 |
9 | path: str,
10 | a path to log directory
11 | """
12 | file_list = glob.glob(os.path.join(path, "net_epoch[0-9]*.pth"))
13 | for file in file_list:
14 | os.remove(file)
15 |
16 |
17 | if __name__ == "__main__":
18 | path = sys.argv[1]
19 | assert os.path.isdir(path)
20 | clean_all_numeric_checkpoint(path)
--------------------------------------------------------------------------------
/opencood/utils/draco_compression.py:
--------------------------------------------------------------------------------
1 | """
2 | To use this script, draco [https://github.com/google/draco.git] should be installed.
3 | 1. during test, keypoints coordinats and features should be saved as .ply file using
4 | the funcion save_ply.
5 | 2. Compress and anylize the CPM size using function draco_compression.
6 | """
7 | import random, os, re
8 | import numpy as np
9 | import torch
10 | from glob import glob
11 | import subprocess
12 |
13 | draco = "/media/hdd/yuan/draco/build_dir/draco_encoder"
14 |
15 |
16 | def save_ply(path, batch_coords, batch_features):
17 | # path = "/media/hdd/yuan/OpenCOOD/opencood/logs/fpvrcnn_intermediate_fusion/cpms/"
18 | dirname = "{:06d}".format(random.randint(0, 999999))
19 | os.mkdir(path + dirname)
20 | for bi, (coords, features) in enumerate(zip(batch_coords[1:],
21 | batch_features[1:])):
22 | header = "ply\n" \
23 | "format ascii 1.0\n" \
24 | f"element vertex {len(coords)}\n" \
25 | "property float x\n" \
26 | "property float y\n" \
27 | "property float z\n"
28 | header = header + "".join([f"property float feat{i}\n" for i in range(32)]) + "end_header"
29 | data = torch.cat([coords, features], dim=1).detach().cpu().numpy()
30 | np.savetxt(path + dirname + f"/{bi + 1}.ply", data,
31 | delimiter=' ', header=header, comments='')
32 |
33 |
34 | def draco_compression(ply_path):
35 | files = glob(os.path.join(ply_path, '*/*.ply'))
36 | cpm_sizes = list(map(draco_compression_one, files))
37 | return cpm_sizes
38 |
39 |
40 | def draco_compression_one(file):
41 | out_file = file.replace('ply', 'drc')
42 | std_out = subprocess.getoutput(f"{draco} -point_cloud -i {file} -o {out_file}")
43 | size_str = re.findall('[0-9]+ bytes', std_out)
44 | if len(size_str)<1:
45 | print("Compression failed:", file)
46 | cpm_size = 0
47 | else:
48 | cpm_size = int(size_str[0].split(' ')[0])
49 |
50 | return cpm_size
51 |
52 |
53 | def cal_avg_num_kpts(ply_path):
54 | files = glob(os.path.join(ply_path, '*/*.ply'))
55 |
56 | def read_vertex_num(file):
57 | with open(file, 'r') as f:
58 | size_str = re.findall('element vertex [0-9]+', f.read())[0]
59 | return float(size_str.split(' ')[-1]) * 4 * 32 / 1024
60 |
61 | sizes = list(map(read_vertex_num, files))
62 |
63 | return sizes
64 |
65 |
66 | if __name__=="__main__":
67 | cpm_sizes = cal_avg_num_kpts("/media/hdd/yuan/OpenCOOD/opencood/logs/fpvrcnn_intermediate_fusion/cpms")
68 | # cpm_sizes = draco_compression("/media/hdd/yuan/OpenCOOD/opencood/logs/fpvrcnn_intermediate_fusion/cpms")
69 | print(np.array(cpm_sizes).mean())
--------------------------------------------------------------------------------
/opencood/utils/heter_utils_ori.py:
--------------------------------------------------------------------------------
1 | """
2 | Agent Selection Module for Heterogeneous Collaboration.
3 |
4 | Maybe later can use data augment, one sample with different selection setting.
5 | """
6 | import numpy as np
7 | import torch
8 |
9 |
10 | class AgentSelector:
11 | def __init__(self, args, max_cav):
12 | self.lidar_ratio = args['lidar_ratio']
13 | self.ego_modality = args['ego_modality'] # 'random' / 'lidar'/ 'camera'
14 | self.max_cav = max_cav
15 |
16 | self.preset = None
17 | if "preset_file" in args:
18 | self.preset_file = args['preset_file'] # txt file
19 | self.preset = np.loadtxt(self.preset_file)
20 |
21 |
22 | def select_agent(self, i):
23 | """
24 | select agent to be equipped with LiDAR / Camera according to the strategy
25 | 1 indicates lidar
26 | 0 indicates camera
27 | """
28 | lidar_agent = np.random.choice(2, self.max_cav, p=[1 - self.lidar_ratio, self.lidar_ratio])
29 |
30 | if self.ego_modality == 'lidar':
31 | lidar_agent[0] = 1
32 |
33 | if self.ego_modality == 'camera':
34 | lidar_agent[0] = 0
35 |
36 | if self.preset:
37 | lidar_agent = self.preset[i]
38 |
39 | return lidar_agent, 1 - lidar_agent
--------------------------------------------------------------------------------
/opencood/utils/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 | from Cython.Build import cythonize
3 | import numpy
4 | setup(
5 | name='box overlaps',
6 | ext_modules=cythonize('opencood/utils/box_overlaps.pyx'),
7 | include_dirs=[numpy.get_include()]
8 | )
--------------------------------------------------------------------------------
/opencood/version.py:
--------------------------------------------------------------------------------
1 | """Specifies the current version number of OpenCOOD."""
2 |
3 | __version__ = "0.1.0"
4 |
--------------------------------------------------------------------------------
/opencood/visualization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PhyllisH/CodeFilling/57f83ccc7d8457da74f06424811c130c3020e19e/opencood/visualization/__init__.py
--------------------------------------------------------------------------------
/opencood/visualization/debug_plot.py:
--------------------------------------------------------------------------------
1 | """
2 | Use this tool to plot the feature map for verifying code.
3 | """
4 |
5 | import matplotlib.pyplot as plt
6 | import torch
7 | import os
8 |
9 | def plot_feature(feature, channel, save_path, flag="", vmin=None, vmax=None, colorbar=False):
10 | """
11 | Args:
12 | feature : torch.tensor or np.ndarry
13 | suppose in shape [N, C, H, W]
14 |
15 | channel : int or list of int
16 | channel for ploting
17 |
18 | save_path : str
19 | save path for visualizing results.
20 | """
21 | if isinstance(feature, torch.Tensor):
22 | feature = feature.detach().cpu().numpy()
23 |
24 | if isinstance(channel, int):
25 | channel = [channel]
26 |
27 | if not os.path.exists(save_path):
28 | os.mkdir(save_path)
29 |
30 | N, C, H, W = feature.shape
31 | for c in channel:
32 | for n in range(N):
33 | plt.imshow(feature[n,c], vmin=vmin, vmax=vmax)
34 | file_path = os.path.join(save_path, f"{flag}_agent_{n}_channel_{c}.png")
35 | if colorbar:
36 | plt.colorbar()
37 | plt.savefig(file_path, dpi=400)
38 | plt.close()
39 | print(f"Saving to {file_path}")
--------------------------------------------------------------------------------
/opencood/visualization/draw_box_align/img2video.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import glob
4 | import os
5 |
6 |
7 | projnames = ['/GPFS/rhome/yifanlu/OpenCOOD/opencood/logs/OPV2V_npj_v2xvit_w_2022_09_05_18_52_53/vis_0.4_0.4_0_0_video_vis',
8 | '/GPFS/rhome/yifanlu/OpenCOOD/opencood/logs/OPV2V_npj_v2xvit_w_2022_09_05_18_52_53/vis_0.6_0.6_0_0_video_vis']
9 | # projnames = ['/GPFS/rhome/yifanlu/OpenCOOD/opencood/logs/OPV2V_npj_disconet_w_2022_09_02_16_19_51/vis_0.4_0.4_0_0_video_vis',
10 | # '/GPFS/rhome/yifanlu/OpenCOOD/opencood/logs/OPV2V_npj_disconet_w_2022_09_02_16_19_51/vis_0.6_0.6_0_0_video_vis']
11 | # projnames = ['/GPFS/rhome/yifanlu/OpenCOOD/opencood/logs/OPV2V_npj_v2vnet_robust_new/vis_0.4_0.4_0_0_video_vis',
12 | # '/GPFS/rhome/yifanlu/OpenCOOD/opencood/logs/OPV2V_npj_v2vnet_robust_new/vis_0.6_0.6_0_0_video_vis']
13 | # projnames = ['/GPFS/rhome/yifanlu/OpenCOOD/opencood/logs/OPV2V_npj_ms_ba/vis_0.4_0.4_0_0_video_vis',
14 | # '/GPFS/rhome/yifanlu/OpenCOOD/opencood/logs/OPV2V_npj_ms_ba/vis_0.6_0.6_0_0_video_vis']
15 |
16 | print(projnames)
17 |
18 | for projname in projnames:
19 | img_array = []
20 | for filename in sorted(glob.glob(f'{projname}/3d_*'))[30:75]:
21 | print(filename)
22 | img = cv2.imread(filename, cv2.IMREAD_COLOR)
23 | height, width, layers = img.shape
24 | size = (width,height)
25 | img_array.append(img)
26 |
27 | size = (2560, 1920)
28 | out = cv2.VideoWriter(f'./result_video_cut_bev/v2xvit_{projname.split("/")[-1]}'+".mp4",cv2.VideoWriter_fourcc(*'mp4v'), 10, size)
29 |
30 | for i in range(len(img_array)):
31 | out.write(img_array[i])
32 | out.release()
--------------------------------------------------------------------------------
/opencood/visualization/draw_fancy/img2video.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import glob
4 | import os
5 |
6 | projnames = os.listdir("./result")
7 | projnames = ['scene_overview_Mixed2','location_in_bev2']
8 | print(projnames)
9 |
10 | for projname in projnames:
11 | img_array = []
12 | for filename in sorted(glob.glob(f'./result/{projname}/*.png')):
13 | print(filename)
14 | img = cv2.imread(filename)
15 | height, width, layers = img.shape
16 | size = (width,height)
17 | img_array.append(img)
18 |
19 |
20 | out = cv2.VideoWriter(f'./result_video/{projname}.mp4',cv2.VideoWriter_fourcc(*'mp4v'), 15, size)
21 |
22 | for i in range(len(img_array)):
23 | out.write(img_array[i])
24 | out.release()
--------------------------------------------------------------------------------
/opencood/visualization/draw_fancy/location_in_bev.py:
--------------------------------------------------------------------------------
1 |
2 | from torch.utils.data import Subset
3 | from matplotlib import pyplot as plt
4 | import matplotlib.patches as mpatches
5 | import numpy as np
6 | from matplotlib import pyplot as plt
7 | import matplotlib
8 | import matplotlib.patches as mpatches
9 | import os
10 |
11 | v2x = True
12 | if v2x:
13 | from opencood.visualization.draw_fancy.draw_fancy_datasetv2x import SimpleDataset
14 | else:
15 | from opencood.visualization.draw_fancy.draw_fancy_dataset import SimpleDataset
16 |
17 | COLOR = ['red','springgreen','dodgerblue', 'darkviolet']
18 | COLOR_RGB = [ tuple([int(cc * 255) for cc in matplotlib.colors.to_rgb(c)]) for c in COLOR]
19 | COLOR_PC = [tuple([int(cc*0.2 + 255*0.8) for cc in c]) for c in COLOR_RGB]
20 | classes = ['agent1', 'agent2', 'agent3', 'agent4']
21 |
22 | def main():
23 | ## basic setting
24 | dataset = SimpleDataset()
25 | data_dict_demo = dataset[0]
26 | cav_ids = list(data_dict_demo.keys())
27 | color_map = dict()
28 | posx = dict()
29 | posy = dict()
30 | for (idx, cav_id) in enumerate(cav_ids):
31 | color_map[cav_id] = COLOR[idx]
32 | posx[cav_id] = []
33 | posy[cav_id] = []
34 | recs = []
35 | for i in range(0,len(cav_ids)):
36 | recs.append(mpatches.Rectangle((0,0),1,1,fc=COLOR[i]))
37 |
38 |
39 |
40 |
41 | ## matplotlib setting
42 | plt.figure()
43 |
44 | ## draw
45 | print("loop over dataset")
46 | dataset_len = len(dataset)
47 | for idx in range(dataset_len):
48 | print(idx)
49 | base_data_dict = dataset[idx]
50 |
51 |
52 | plt.style.use('dark_background')
53 | plt.xlim((-100,120))
54 | plt.ylim((-70,250))
55 | plt.xticks([])
56 | plt.yticks([])
57 | if not v2x:
58 | plt.gca().invert_xaxis()
59 | plt.legend(recs,classes,loc='lower left')
60 |
61 | for cav_id, cav_content in base_data_dict.items():
62 | pos = cav_content['params']['lidar_pose'] # list [6,]
63 | posx[cav_id].append(pos[0])
64 | posy[cav_id].append(pos[1])
65 |
66 | start_idx = max(0, idx-10)
67 | end_idx = idx
68 | for inner_idx in range(start_idx, end_idx + 1):
69 | plt.scatter(np.array(posx[cav_id][inner_idx:inner_idx+1]),
70 | np.array(posy[cav_id][inner_idx:inner_idx+1]),
71 | s=(inner_idx-start_idx + 1)*4,
72 | alpha=(1 - (end_idx - inner_idx) * 0.09),
73 | c=color_map[cav_id])
74 |
75 | if v2x:
76 | save_path = f"./result_v2x/location_in_bev"
77 | else:
78 | save_path = f"./result/location_in_bev"
79 |
80 | if not os.path.exists(save_path):
81 | os.mkdir(save_path)
82 |
83 | plt.savefig(f"{save_path}/trajectory_{idx:02d}.png", dpi=300)
84 | plt.clf()
85 |
86 |
87 | if __name__ == "__main__":
88 | main()
--------------------------------------------------------------------------------
/opencood/visualization/my_vis.py:
--------------------------------------------------------------------------------
1 | from operator import gt
2 | import numpy as np
3 | import pickle
4 | from pyquaternion import Quaternion
5 | from matplotlib import pyplot as plt
6 | from icecream import ic
7 | from torch import margin_ranking_loss
8 |
9 |
10 |
11 | def visualize(pred_box_tensor, gt_tensor, pcd, show_vis, save_path, dataset=None):
12 | """
13 | Visualize the prediction, ground truth with point cloud together.
14 |
15 | Parameters
16 | ----------
17 | pred_box_tensor : torch.Tensor
18 | (N, 8, 3) prediction.
19 |
20 | gt_tensor : torch.Tensor
21 | (N, 8, 3) groundtruth bbx
22 |
23 | pcd : torch.Tensor
24 | PointCloud, (N, 4).
25 |
26 | show_vis : bool
27 | Whether to show visualization.
28 |
29 | save_path : str
30 | Save the visualization results to given path.
31 |
32 | dataset : BaseDataset
33 | opencood dataset object.
34 |
35 | """
36 |
37 | pcd_np = pcd.cpu().numpy()
38 | pred_box_np = pred_box_tensor.cpu().numpy()
39 | gt_box_np = gt_tensor.cpu().numpy()
40 |
41 | plt.figure(dpi=400)
42 | # draw point cloud. It's in lidar coordinate
43 | plt.scatter(pcd_np[:,0], pcd_np[:,1], s=0.5)
44 |
45 | N = gt_tensor.shape[0]
46 | for i in range(N):
47 | plt.plot(gt_box_np[i,:,0], gt_box_np[i,:,1], c= "r", marker='.', linewidth=1, markersize=1.5)
48 |
49 | N = pred_box_tensor.shape[0]
50 | for i in range(N):
51 | plt.plot(pred_box_np[i,:,0], pred_box_np[i,:,1], c= "g", marker='.', linewidth=1, markersize=1.5)
52 |
53 |
54 | plt.savefig(save_path)
55 | plt.clf()
--------------------------------------------------------------------------------
/opencood/visualization/simple_plot3d/__init__.py:
--------------------------------------------------------------------------------
1 | from .canvas_3d import Canvas_3D
2 | from .canvas_bev import Canvas_BEV
3 |
--------------------------------------------------------------------------------
/opencood/visualization/vis_data_sequence.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | import os
7 | from torch.utils.data import DataLoader, Subset
8 | from opencood.data_utils import datasets
9 | import torch
10 | from opencood.tools import train_utils, inference_utils
11 | from opencood.hypes_yaml.yaml_utils import load_yaml
12 | from opencood.visualization import vis_utils, simple_vis
13 | from opencood.data_utils.datasets.late_fusion_dataset_v2x import \
14 | LateFusionDatasetV2X
15 | from opencood.data_utils.datasets.late_fusion_dataset import \
16 | LateFusionDataset
17 | import numpy as np
18 |
19 | if __name__ == '__main__':
20 | current_path = os.path.dirname(os.path.realpath(__file__))
21 | params = load_yaml(os.path.join(current_path,
22 | '../hypes_yaml/visualization_v2x.yaml'))
23 | output_path = "/GPFS/rhome/yifanlu/OpenCOOD/data_vis/v2x_2.0_new/train"
24 |
25 | opencda_dataset = LateFusionDatasetV2X(params, visualize=True,
26 | train=False)
27 | len = len(opencda_dataset)
28 | sampled_indices = np.random.permutation(len)[:100]
29 | subset = Subset(opencda_dataset, sampled_indices)
30 |
31 | data_loader = DataLoader(subset, batch_size=1, num_workers=2,
32 | collate_fn=opencda_dataset.collate_batch_test,
33 | shuffle=False,
34 | pin_memory=False)
35 | vis_gt_box = True
36 | vis_pred_box = False
37 | hypes = params
38 |
39 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
40 |
41 | for i, batch_data in enumerate(data_loader):
42 | print(i)
43 | batch_data = train_utils.to_device(batch_data, device)
44 | gt_box_tensor = opencda_dataset.post_processor.generate_gt_bbx(batch_data)
45 |
46 | vis_save_path = os.path.join(output_path, '3d_%05d.png' % i)
47 | simple_vis.visualize(None,
48 | gt_box_tensor,
49 | batch_data['ego']['origin_lidar'][0],
50 | hypes['postprocess']['gt_range'],
51 | vis_save_path,
52 | method='3d',
53 | vis_gt_box = vis_gt_box,
54 | vis_pred_box = vis_pred_box,
55 | left_hand=False)
56 |
57 | vis_save_path = os.path.join(output_path, 'bev_%05d.png' % i)
58 | simple_vis.visualize(None,
59 | gt_box_tensor,
60 | batch_data['ego']['origin_lidar'][0],
61 | hypes['postprocess']['gt_range'],
62 | vis_save_path,
63 | method='bev',
64 | vis_gt_box = vis_gt_box,
65 | vis_pred_box = vis_pred_box,
66 | left_hand=False)
--------------------------------------------------------------------------------
/opencood/visualization/vis_data_sequence_v2xsim2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Yangheng Zhao
3 |
4 | import os
5 | from torch.utils.data import DataLoader, Subset
6 | from opencood.data_utils import datasets
7 | import torch
8 | from opencood.tools import train_utils, inference_utils
9 | from opencood.hypes_yaml.yaml_utils import load_yaml
10 | from opencood.visualization import simple_vis
11 | from opencood.data_utils.datasets import build_dataset
12 | import numpy as np
13 |
14 | if __name__ == '__main__':
15 | current_path = os.path.dirname(os.path.realpath(__file__))
16 | params = load_yaml(os.path.join(current_path,
17 | "../hypes_yaml/visualization_v2x.yaml"))
18 | output_path = "/DB/data/yanghengzhao/coperception/OpenCOODv2/data_vis"
19 | opencda_dataset = build_dataset(params, visualize=True, train=False)
20 | len = len(opencda_dataset)
21 | sampled_indices = np.random.permutation(len)[:100]
22 | subset = Subset(opencda_dataset, sampled_indices)
23 |
24 | data_loader = DataLoader(subset, batch_size=1, num_workers=2,
25 | collate_fn=opencda_dataset.collate_batch_test,
26 | shuffle=False,
27 | pin_memory=False)
28 | vis_gt_box = True
29 | vis_pred_box = False
30 | hypes = params
31 |
32 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
33 |
34 | for i, batch_data in enumerate(data_loader):
35 | print(i)
36 | batch_data = train_utils.to_device(batch_data, device)
37 | gt_box_tensor = opencda_dataset.post_processor.generate_gt_bbx(batch_data)
38 |
39 | vis_save_path = os.path.join(output_path, '3d_%05d.png' % i)
40 | simple_vis.visualize({},
41 | batch_data['ego']['origin_lidar'][0],
42 | hypes['postprocess']['gt_range'],
43 | vis_save_path,
44 | method='3d',
45 | left_hand=False)
46 |
47 | vis_save_path = os.path.join(output_path, 'bev_%05d.png' % i)
48 | simple_vis.visualize({},
49 | batch_data['ego']['origin_lidar'][0],
50 | hypes['postprocess']['gt_range'],
51 | vis_save_path,
52 | method='bev',
53 | left_hand=False)
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | cython
3 | tensorboardX
4 | shapely
5 | einops
6 | imageio
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Author: Runsheng Xu
3 | # License: TDG-Attribution-NonCommercial-NoDistrib
4 |
5 |
6 | from os.path import dirname, realpath
7 | from setuptools import setup, find_packages, Distribution
8 | from opencood.version import __version__
9 |
10 |
11 | def _read_requirements_file():
12 | """Return the elements in requirements.txt."""
13 | req_file_path = '%s/requirements.txt' % dirname(realpath(__file__))
14 | with open(req_file_path) as f:
15 | return [line.strip() for line in f]
16 |
17 |
18 | setup(
19 | name='OpenCOOD',
20 | version=__version__,
21 | packages=find_packages(),
22 | license='MIT',
23 | author='Runsheng Xu, Hao Xiang, Yifan Lu',
24 | author_email='yifan_lu@sjtu.edu.cn',
25 | description='An opensource pytorch framework for autonomous driving '
26 | 'cooperative detection',
27 | long_description=open("README.md").read(),
28 | install_requires=_read_requirements_file(),
29 | )
30 |
--------------------------------------------------------------------------------