├── README.md ├── examples ├── AutoCls2D_Cifar100 │ ├── Baseline │ │ ├── experiment.py │ │ ├── final_info.json │ │ └── launcher.sh │ └── HARCNet │ │ ├── experiment.py │ │ ├── harcnet.py │ │ ├── idea.json │ │ ├── launcher.sh │ │ └── res │ │ └── final_info.json ├── AutoCls3D_ModelNet40 │ ├── Baseline │ │ ├── data_transforms.py │ │ ├── experiment.py │ │ ├── final_info.json │ │ ├── launcher.sh │ │ └── metrics.py │ └── HIRE-Net │ │ ├── data_transforms.py │ │ ├── experiment.py │ │ ├── idea.json │ │ ├── launcher.sh │ │ ├── metrics.py │ │ └── res │ │ └── final_info.json ├── AutoClsSST_SST-2 │ ├── Baseline │ │ ├── experiment.py │ │ ├── final_info.json │ │ └── launcher.sh │ └── Transformer-Hybrid-Augmentation-Sentiment │ │ ├── experiment.py │ │ ├── idea.json │ │ ├── launcher.sh │ │ ├── psycholinguistic_utils.py │ │ └── res │ │ ├── config.json │ │ └── final_info.json ├── AutoEAP_UMI-STARR-seq │ ├── Baseline │ │ ├── config │ │ │ └── config-conv-117.json │ │ ├── experiment.py │ │ ├── final_info.json │ │ └── launcher.sh │ └── HyenaMSTA+ │ │ ├── config │ │ └── config-conv-117.json │ │ ├── experiment.py │ │ ├── hyenamsta_model.py │ │ ├── idea.json │ │ ├── launcher.sh │ │ └── res │ │ └── final_info.json ├── AutoMolecule3D_MD17 │ ├── Baseline │ │ ├── examples │ │ │ └── ViSNet-MD17.yml │ │ ├── experiment.py │ │ ├── final_info.json │ │ ├── launcher.sh │ │ ├── metrics.py │ │ └── visnet │ │ │ ├── data.py │ │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── chignolin.py │ │ │ ├── md17.py │ │ │ ├── md22.py │ │ │ ├── molecule3d.py │ │ │ ├── qm9.py │ │ │ └── rmd17.py │ │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── output_modules.py │ │ │ └── utils.py │ │ │ ├── priors.py │ │ │ └── utils.py │ └── HEDGE-Net │ │ ├── examples │ │ └── ViSNet-MD17.yml │ │ ├── experiment.py │ │ ├── idea.json │ │ ├── launcher.sh │ │ ├── metrics.py │ │ ├── res │ │ ├── final_info.json │ │ ├── input.yaml │ │ └── splits.npz │ │ └── visnet │ │ ├── data.py │ │ ├── datasets │ │ ├── __init__.py │ │ ├── chignolin.py │ │ ├── md17.py │ │ ├── md22.py │ │ ├── molecule3d.py │ │ ├── qm9.py │ │ └── rmd17.py │ │ ├── models │ │ ├── __init__.py │ │ ├── output_modules.py │ │ └── utils.py │ │ ├── priors.py │ │ └── utils.py ├── AutoPCDet_Once │ ├── Baseline │ │ ├── README.md │ │ ├── final_infos.json │ │ ├── launcher.sh │ │ ├── pcdet │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── datasets │ │ │ │ ├── __init__.py │ │ │ │ ├── augmentor │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── augmentor_utils.py │ │ │ │ │ ├── data_augmentor.py │ │ │ │ │ └── database_sampler.py │ │ │ │ ├── dataset.py │ │ │ │ ├── once │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── once_dataset.py │ │ │ │ │ ├── once_eval │ │ │ │ │ │ ├── eval_utils.py │ │ │ │ │ │ ├── evaluation.py │ │ │ │ │ │ └── iou_utils.py │ │ │ │ │ └── once_toolkits.py │ │ │ │ └── processor │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── data_processor.py │ │ │ │ │ └── point_feature_encoder.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── backbones_2d │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_bev_backbone.py │ │ │ │ │ └── map_to_bev │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── height_compression.py │ │ │ │ ├── backbones_3d │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── pfe │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── voxel_set_abstraction.py │ │ │ │ │ ├── spconv_backbone.py │ │ │ │ │ └── vfe │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── mean_vfe.py │ │ │ │ │ │ └── vfe_template.py │ │ │ │ ├── dense_heads │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── center_head.py │ │ │ │ ├── detectors │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── centerpoint.py │ │ │ │ │ └── detector3d_template.py │ │ │ │ └── model_utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── centernet_utils.py │ │ │ │ │ └── model_nms_utils.py │ │ │ ├── ops │ │ │ │ ├── __init__.py │ │ │ │ ├── bev_pool │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bev_pool.py │ │ │ │ │ ├── bev_pool_ext.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ └── src │ │ │ │ │ │ ├── bev_pool.cpp │ │ │ │ │ │ └── bev_pool_cuda.cu │ │ │ │ ├── ingroup_inds │ │ │ │ │ ├── ingroup_inds_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ ├── ingroup_inds_op.py │ │ │ │ │ └── src │ │ │ │ │ │ ├── error.cuh │ │ │ │ │ │ ├── ingroup_inds.cpp │ │ │ │ │ │ └── ingroup_inds_kernel.cu │ │ │ │ ├── iou3d_nms │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── iou3d_nms_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ ├── iou3d_nms_utils.py │ │ │ │ │ └── src │ │ │ │ │ │ ├── iou3d_cpu.cpp │ │ │ │ │ │ ├── iou3d_cpu.h │ │ │ │ │ │ ├── iou3d_nms.cpp │ │ │ │ │ │ ├── iou3d_nms.h │ │ │ │ │ │ ├── iou3d_nms_api.cpp │ │ │ │ │ │ └── iou3d_nms_kernel.cu │ │ │ │ ├── pointnet2 │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── pointnet2_batch │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── pointnet2_batch_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ │ ├── pointnet2_modules.py │ │ │ │ │ │ ├── pointnet2_utils.py │ │ │ │ │ │ └── src │ │ │ │ │ │ │ ├── ball_query.cpp │ │ │ │ │ │ │ ├── ball_query_gpu.cu │ │ │ │ │ │ │ ├── ball_query_gpu.h │ │ │ │ │ │ │ ├── cuda_utils.h │ │ │ │ │ │ │ ├── group_points.cpp │ │ │ │ │ │ │ ├── group_points_gpu.cu │ │ │ │ │ │ │ ├── group_points_gpu.h │ │ │ │ │ │ │ ├── interpolate.cpp │ │ │ │ │ │ │ ├── interpolate_gpu.cu │ │ │ │ │ │ │ ├── interpolate_gpu.h │ │ │ │ │ │ │ ├── pointnet2_api.cpp │ │ │ │ │ │ │ ├── sampling.cpp │ │ │ │ │ │ │ ├── sampling_gpu.cu │ │ │ │ │ │ │ └── sampling_gpu.h │ │ │ │ │ └── pointnet2_stack │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── pointnet2_modules.py │ │ │ │ │ │ ├── pointnet2_stack_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ │ ├── pointnet2_utils.py │ │ │ │ │ │ ├── src │ │ │ │ │ │ ├── ball_query.cpp │ │ │ │ │ │ ├── ball_query_gpu.cu │ │ │ │ │ │ ├── ball_query_gpu.h │ │ │ │ │ │ ├── cuda_utils.h │ │ │ │ │ │ ├── group_points.cpp │ │ │ │ │ │ ├── group_points_gpu.cu │ │ │ │ │ │ ├── group_points_gpu.h │ │ │ │ │ │ ├── interpolate.cpp │ │ │ │ │ │ ├── interpolate_gpu.cu │ │ │ │ │ │ ├── interpolate_gpu.h │ │ │ │ │ │ ├── pointnet2_api.cpp │ │ │ │ │ │ ├── sampling.cpp │ │ │ │ │ │ ├── sampling_gpu.cu │ │ │ │ │ │ ├── sampling_gpu.h │ │ │ │ │ │ ├── vector_pool.cpp │ │ │ │ │ │ ├── vector_pool_gpu.cu │ │ │ │ │ │ ├── vector_pool_gpu.h │ │ │ │ │ │ ├── voxel_query.cpp │ │ │ │ │ │ ├── voxel_query_gpu.cu │ │ │ │ │ │ └── voxel_query_gpu.h │ │ │ │ │ │ ├── voxel_pool_modules.py │ │ │ │ │ │ └── voxel_query_utils.py │ │ │ │ ├── roiaware_pool3d │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── roiaware_pool3d_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ ├── roiaware_pool3d_utils.py │ │ │ │ │ └── src │ │ │ │ │ │ ├── roiaware_pool3d.cpp │ │ │ │ │ │ └── roiaware_pool3d_kernel.cu │ │ │ │ └── roipoint_pool3d │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── roipoint_pool3d_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ ├── roipoint_pool3d_utils.py │ │ │ │ │ └── src │ │ │ │ │ ├── roipoint_pool3d.cpp │ │ │ │ │ └── roipoint_pool3d_kernel.cu │ │ │ ├── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── box_utils.py │ │ │ │ ├── common_utils.py │ │ │ │ ├── commu_utils.py │ │ │ │ ├── loss_utils.py │ │ │ │ └── spconv_utils.py │ │ │ └── version.py │ │ └── tools │ │ │ ├── _init_path.py │ │ │ ├── cfgs │ │ │ ├── dataset_configs │ │ │ │ └── once_dataset.yaml │ │ │ └── once_models │ │ │ │ └── centerpoint.yaml │ │ │ ├── eval_utils │ │ │ └── eval_utils.py │ │ │ ├── scripts │ │ │ └── dist_train.sh │ │ │ ├── train.py │ │ │ └── train_utils │ │ │ ├── optimization │ │ │ ├── __init__.py │ │ │ ├── fastai_optim.py │ │ │ └── learning_schedules_fastai.py │ │ │ └── train_utils.py │ └── SARA3D │ │ ├── idea.json │ │ ├── launcher.sh │ │ ├── pcdet │ │ ├── __init__.py │ │ ├── config.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── augmentor │ │ │ │ ├── __init__.py │ │ │ │ ├── augmentor_utils.py │ │ │ │ ├── data_augmentor.py │ │ │ │ └── database_sampler.py │ │ │ ├── dataset.py │ │ │ ├── once │ │ │ │ ├── __init__.py │ │ │ │ ├── once_dataset.py │ │ │ │ ├── once_eval │ │ │ │ │ ├── eval_utils.py │ │ │ │ │ ├── evaluation.py │ │ │ │ │ └── iou_utils.py │ │ │ │ └── once_toolkits.py │ │ │ └── processor │ │ │ │ ├── __init__.py │ │ │ │ ├── data_processor.py │ │ │ │ └── point_feature_encoder.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── backbones_2d │ │ │ │ ├── __init__.py │ │ │ │ ├── base_bev_backbone.py │ │ │ │ └── map_to_bev │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── height_compression.py │ │ │ ├── backbones_3d │ │ │ │ ├── __init__.py │ │ │ │ ├── pfe │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── voxel_set_abstraction.py │ │ │ │ ├── spconv_backbone.py │ │ │ │ └── vfe │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── mean_vfe.py │ │ │ │ │ ├── resa_vfe.py │ │ │ │ │ └── vfe_template.py │ │ │ ├── dense_heads │ │ │ │ ├── __init__.py │ │ │ │ └── center_head.py │ │ │ ├── detectors │ │ │ │ ├── __init__.py │ │ │ │ ├── centerpoint.py │ │ │ │ ├── detector3d_template.py │ │ │ │ └── sara3d.py │ │ │ └── model_utils │ │ │ │ ├── __init__.py │ │ │ │ ├── aca_utils.py │ │ │ │ ├── centernet_utils.py │ │ │ │ └── model_nms_utils.py │ │ ├── ops │ │ │ ├── __init__.py │ │ │ ├── bev_pool │ │ │ │ ├── __init__.py │ │ │ │ ├── bev_pool.py │ │ │ │ ├── bev_pool_ext.cpython-39-x86_64-linux-gnu.so │ │ │ │ └── src │ │ │ │ │ ├── bev_pool.cpp │ │ │ │ │ └── bev_pool_cuda.cu │ │ │ ├── ingroup_inds │ │ │ │ ├── ingroup_inds_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ ├── ingroup_inds_op.py │ │ │ │ └── src │ │ │ │ │ ├── error.cuh │ │ │ │ │ ├── ingroup_inds.cpp │ │ │ │ │ └── ingroup_inds_kernel.cu │ │ │ ├── iou3d_nms │ │ │ │ ├── __init__.py │ │ │ │ ├── iou3d_nms_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ ├── iou3d_nms_utils.py │ │ │ │ └── src │ │ │ │ │ ├── iou3d_cpu.cpp │ │ │ │ │ ├── iou3d_cpu.h │ │ │ │ │ ├── iou3d_nms.cpp │ │ │ │ │ ├── iou3d_nms.h │ │ │ │ │ ├── iou3d_nms_api.cpp │ │ │ │ │ └── iou3d_nms_kernel.cu │ │ │ ├── pointnet2 │ │ │ │ ├── __init__.py │ │ │ │ ├── pointnet2_batch │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── pointnet2_batch_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ ├── pointnet2_modules.py │ │ │ │ │ ├── pointnet2_utils.py │ │ │ │ │ └── src │ │ │ │ │ │ ├── ball_query.cpp │ │ │ │ │ │ ├── ball_query_gpu.cu │ │ │ │ │ │ ├── ball_query_gpu.h │ │ │ │ │ │ ├── cuda_utils.h │ │ │ │ │ │ ├── group_points.cpp │ │ │ │ │ │ ├── group_points_gpu.cu │ │ │ │ │ │ ├── group_points_gpu.h │ │ │ │ │ │ ├── interpolate.cpp │ │ │ │ │ │ ├── interpolate_gpu.cu │ │ │ │ │ │ ├── interpolate_gpu.h │ │ │ │ │ │ ├── pointnet2_api.cpp │ │ │ │ │ │ ├── sampling.cpp │ │ │ │ │ │ ├── sampling_gpu.cu │ │ │ │ │ │ └── sampling_gpu.h │ │ │ │ └── pointnet2_stack │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── pointnet2_modules.py │ │ │ │ │ ├── pointnet2_stack_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ │ ├── pointnet2_utils.py │ │ │ │ │ ├── src │ │ │ │ │ ├── ball_query.cpp │ │ │ │ │ ├── ball_query_gpu.cu │ │ │ │ │ ├── ball_query_gpu.h │ │ │ │ │ ├── cuda_utils.h │ │ │ │ │ ├── group_points.cpp │ │ │ │ │ ├── group_points_gpu.cu │ │ │ │ │ ├── group_points_gpu.h │ │ │ │ │ ├── interpolate.cpp │ │ │ │ │ ├── interpolate_gpu.cu │ │ │ │ │ ├── interpolate_gpu.h │ │ │ │ │ ├── pointnet2_api.cpp │ │ │ │ │ ├── sampling.cpp │ │ │ │ │ ├── sampling_gpu.cu │ │ │ │ │ ├── sampling_gpu.h │ │ │ │ │ ├── vector_pool.cpp │ │ │ │ │ ├── vector_pool_gpu.cu │ │ │ │ │ ├── vector_pool_gpu.h │ │ │ │ │ ├── voxel_query.cpp │ │ │ │ │ ├── voxel_query_gpu.cu │ │ │ │ │ └── voxel_query_gpu.h │ │ │ │ │ ├── voxel_pool_modules.py │ │ │ │ │ └── voxel_query_utils.py │ │ │ ├── roiaware_pool3d │ │ │ │ ├── __init__.py │ │ │ │ ├── roiaware_pool3d_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ ├── roiaware_pool3d_utils.py │ │ │ │ └── src │ │ │ │ │ ├── roiaware_pool3d.cpp │ │ │ │ │ └── roiaware_pool3d_kernel.cu │ │ │ └── roipoint_pool3d │ │ │ │ ├── __init__.py │ │ │ │ ├── roipoint_pool3d_cuda.cpython-39-x86_64-linux-gnu.so │ │ │ │ ├── roipoint_pool3d_utils.py │ │ │ │ └── src │ │ │ │ ├── roipoint_pool3d.cpp │ │ │ │ └── roipoint_pool3d_kernel.cu │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── box_utils.py │ │ │ ├── common_utils.py │ │ │ ├── commu_utils.py │ │ │ ├── loss_utils.py │ │ │ └── spconv_utils.py │ │ └── version.py │ │ ├── res │ │ ├── final_info.json │ │ ├── sara3d.yaml │ │ └── train_20250510-105903.log │ │ └── tools │ │ ├── _init_path.py │ │ ├── cfgs │ │ ├── dataset_configs │ │ │ └── once_dataset.yaml │ │ └── once_models │ │ │ ├── centerpoint.yaml │ │ │ └── sara3d.yaml │ │ ├── eval_utils │ │ └── eval_utils.py │ │ ├── scripts │ │ └── dist_train.sh │ │ ├── train.py │ │ └── train_utils │ │ ├── optimization │ │ ├── __init__.py │ │ ├── fastai_optim.py │ │ └── learning_schedules_fastai.py │ │ └── train_utils.py ├── AutoPower_IEEE39_bus │ ├── AdaptiveHierarchicalGraphTransformer │ │ ├── configs │ │ │ └── test_senseflow_39.yaml │ │ ├── experiment.py │ │ ├── idea.json │ │ ├── launcher.sh │ │ ├── metrics.py │ │ ├── res │ │ │ └── final_info.json │ │ └── src │ │ │ ├── __init__.py │ │ │ ├── dataset │ │ │ └── powerflow_dataset.py │ │ │ ├── oven.py │ │ │ └── utils.py │ └── Baseline │ │ ├── configs │ │ └── test_senseflow_39.yaml │ │ ├── experiment.py │ │ ├── launcher.sh │ │ ├── metrics.py │ │ ├── res │ │ └── final_info.json │ │ └── src │ │ ├── __init__.py │ │ ├── dataset │ │ └── powerflow_dataset.py │ │ ├── oven.py │ │ └── utils.py ├── AutoSeg_VOC12 │ ├── Baseline │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── cityscapes.py │ │ │ ├── data │ │ │ │ └── train_aug.txt │ │ │ ├── utils.py │ │ │ └── voc.py │ │ ├── launcher.sh │ │ ├── main.py │ │ ├── metrics │ │ │ ├── __init__.py │ │ │ └── stream_metrics.py │ │ ├── network │ │ │ ├── .DS_Store │ │ │ ├── __init__.py │ │ │ ├── _deeplab.py │ │ │ ├── backbone │ │ │ │ ├── __init__.py │ │ │ │ ├── hrnetv2.py │ │ │ │ ├── mobilenetv2.py │ │ │ │ ├── resnet.py │ │ │ │ └── xception.py │ │ │ ├── modeling.py │ │ │ └── utils.py │ │ ├── predict.py │ │ ├── requirements.txt │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── ext_transforms.py │ │ │ ├── loss.py │ │ │ ├── scheduler.py │ │ │ ├── utils.py │ │ │ └── visualizer.py │ └── EntropyOptimizedAttentionNet │ │ ├── LICENSE │ │ ├── README.md │ │ ├── datasets │ │ ├── __init__.py │ │ ├── cityscapes.py │ │ ├── data │ │ │ └── train_aug.txt │ │ ├── utils.py │ │ └── voc.py │ │ ├── idea.json │ │ ├── launcher.sh │ │ ├── main.py │ │ ├── metrics │ │ ├── __init__.py │ │ └── stream_metrics.py │ │ ├── network │ │ ├── .DS_Store │ │ ├── __init__.py │ │ ├── _deeplab.py │ │ ├── backbone │ │ │ ├── __init__.py │ │ │ ├── hrnetv2.py │ │ │ ├── mobilenetv2.py │ │ │ ├── resnet.py │ │ │ └── xception.py │ │ ├── enhanced_deeplab.py │ │ ├── enhanced_modules.py │ │ ├── modeling.py │ │ └── utils.py │ │ ├── predict.py │ │ ├── requirements.txt │ │ ├── res │ │ └── final_info.json │ │ └── utils │ │ ├── __init__.py │ │ ├── ext_transforms.py │ │ ├── loss.py │ │ ├── scheduler.py │ │ ├── utils.py │ │ └── visualizer.py ├── AutoTPPR_Perturb-seq │ ├── Baseline │ │ ├── experiment.py │ │ ├── final_info.json │ │ └── launcher.sh │ └── GEARS_LocalRegularization │ │ ├── experiment.py │ │ ├── idea.json │ │ ├── launcher.sh │ │ └── res │ │ └── final_info.json └── AutoTSF_ETTh1 │ ├── AdaptiveHybridDFTNet │ ├── data_provider │ │ ├── __init__.py │ │ ├── data_factory.py │ │ └── data_loader.py │ ├── exp │ │ └── exp_main.py │ ├── experiment.py │ ├── idea.json │ ├── launcher.sh │ ├── res │ │ └── final_info.json │ └── utils │ │ ├── masking.py │ │ ├── metrics.py │ │ ├── timefeatures.py │ │ └── tools.py │ └── Baseline │ ├── data_provider │ ├── __init__.py │ ├── data_factory.py │ └── data_loader.py │ ├── exp │ └── exp_main.py │ ├── experiment.py │ ├── launcher.sh │ ├── res │ └── final_info.json │ └── utils │ ├── masking.py │ ├── metrics.py │ ├── timefeatures.py │ └── tools.py └── images ├── framework.png └── novelseek.png /examples/AutoCls2D_Cifar100/Baseline/final_info.json: -------------------------------------------------------------------------------- 1 | {"cifar100": {"means": {"best_acc": 0.8120, "epoch": 190}}} -------------------------------------------------------------------------------- /examples/AutoCls2D_Cifar100/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py \ 2 | --num_workers 4 \ 3 | --out_dir run_1 \ 4 | --in_channels 3 \ 5 | --data_root ./datasets/cifar100/ \ 6 | --max_epoch 200 \ 7 | --val_per_epoch 5 -------------------------------------------------------------------------------- /examples/AutoCls2D_Cifar100/HARCNet/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py \ 2 | --num_workers 4 \ 3 | --out_dir run_1 \ 4 | --in_channels 3 \ 5 | --data_root ./datasets/cifar100 \ 6 | --val_per_epoch 5 -------------------------------------------------------------------------------- /examples/AutoCls2D_Cifar100/HARCNet/res/final_info.json: -------------------------------------------------------------------------------- 1 | {"cifar100": {"means": {"best_acc": 0.833299994468689, "epoch": 199}, "config": {"alpha": 0.6, "beta": 0.6, "gamma": 2.2, "memory_size": 5, "decay_rate": 2.0, "consistency_weight": 0.05, "auxiliary_weight": 0.05, "use_adaptive_aug": true, "use_temporal_consistency": true}}} -------------------------------------------------------------------------------- /examples/AutoCls3D_ModelNet40/Baseline/data_transforms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def random_point_dropout(batch_pc, max_dropout_ratio=0.875): 5 | ''' batch_pc: BxNx3 ''' 6 | for b in range(batch_pc.shape[0]): 7 | dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875 8 | drop_idx = np.where(np.random.random((batch_pc.shape[1]))<=dropout_ratio)[0] 9 | if len(drop_idx)>0: 10 | batch_pc[b,drop_idx,:] = batch_pc[b,0,:] # set to the first point 11 | return batch_pc 12 | 13 | def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25): 14 | """ Randomly scale the point cloud. Scale is per point cloud. 15 | Input: 16 | BxNx3 array, original batch of point clouds 17 | Return: 18 | BxNx3 array, scaled batch of point clouds 19 | """ 20 | B, N, C = batch_data.shape 21 | scales = np.random.uniform(scale_low, scale_high, B) 22 | for batch_index in range(B): 23 | batch_data[batch_index,:,:] *= scales[batch_index] 24 | return batch_data 25 | 26 | def shift_point_cloud(batch_data, shift_range=0.1): 27 | """ Randomly shift point cloud. Shift is per point cloud. 28 | Input: 29 | BxNx3 array, original batch of point clouds 30 | Return: 31 | BxNx3 array, shifted batch of point clouds 32 | """ 33 | B, N, C = batch_data.shape 34 | shifts = np.random.uniform(-shift_range, shift_range, (B,3)) 35 | for batch_index in range(B): 36 | batch_data[batch_index,:,:] += shifts[batch_index,:] 37 | return batch_data -------------------------------------------------------------------------------- /examples/AutoCls3D_ModelNet40/Baseline/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "modelnet40":{ 3 | "means":{ 4 | "best_oa": 91.0, 5 | "best_acc": 87.6, 6 | "epoch": 120 7 | } 8 | } 9 | } -------------------------------------------------------------------------------- /examples/AutoCls3D_ModelNet40/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py \ 2 | --out_dir run_0 \ 3 | --data_root ./datasets/modelnet40 \ 4 | --max_epoch 200 \ 5 | --val_per_epoch 5 6 | -------------------------------------------------------------------------------- /examples/AutoCls3D_ModelNet40/HIRE-Net/data_transforms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def random_point_dropout(batch_pc, max_dropout_ratio=0.875): 5 | ''' batch_pc: BxNx3 ''' 6 | for b in range(batch_pc.shape[0]): 7 | dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875 8 | drop_idx = np.where(np.random.random((batch_pc.shape[1]))<=dropout_ratio)[0] 9 | if len(drop_idx)>0: 10 | batch_pc[b,drop_idx,:] = batch_pc[b,0,:] # set to the first point 11 | return batch_pc 12 | 13 | def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25): 14 | """ Randomly scale the point cloud. Scale is per point cloud. 15 | Input: 16 | BxNx3 array, original batch of point clouds 17 | Return: 18 | BxNx3 array, scaled batch of point clouds 19 | """ 20 | B, N, C = batch_data.shape 21 | scales = np.random.uniform(scale_low, scale_high, B) 22 | for batch_index in range(B): 23 | batch_data[batch_index,:,:] *= scales[batch_index] 24 | return batch_data 25 | 26 | def shift_point_cloud(batch_data, shift_range=0.1): 27 | """ Randomly shift point cloud. Shift is per point cloud. 28 | Input: 29 | BxNx3 array, original batch of point clouds 30 | Return: 31 | BxNx3 array, shifted batch of point clouds 32 | """ 33 | B, N, C = batch_data.shape 34 | shifts = np.random.uniform(-shift_range, shift_range, (B,3)) 35 | for batch_index in range(B): 36 | batch_data[batch_index,:,:] += shifts[batch_index,:] 37 | return batch_data -------------------------------------------------------------------------------- /examples/AutoCls3D_ModelNet40/HIRE-Net/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py \ 2 | --out_dir run_1 \ 3 | --data_root ./datasets/modelnet40 \ 4 | --max_epoch 200 \ 5 | --val_per_epoch 5 6 | -------------------------------------------------------------------------------- /examples/AutoCls3D_ModelNet40/HIRE-Net/res/final_info.json: -------------------------------------------------------------------------------- 1 | {"modelnet40": {"means": {"best_oa": 95.50243377685547, "best_acc": 92.41918182373047, "epoch": 70}}} -------------------------------------------------------------------------------- /examples/AutoClsSST_SST-2/Baseline/final_info.json: -------------------------------------------------------------------------------- 1 | {"sentiment": {"means": {"best_acc": 0.9105504587155964}}} -------------------------------------------------------------------------------- /examples/AutoClsSST_SST-2/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py -------------------------------------------------------------------------------- /examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py -------------------------------------------------------------------------------- /examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "max_seq_len": 50, 3 | "epochs": 3, 4 | "batch_size": 32, 5 | "learning_rate": 2e-05, 6 | "patience": 1, 7 | "max_grad_norm": 10.0, 8 | "warmup_ratio": 0.1, 9 | "model_path": "./hug_ckpts/BERT_ckpt", 10 | "num_labels": 2, 11 | "if_save_model": true, 12 | "out_dir": "run_1", 13 | "use_hybrid_augmentation": true, 14 | "sigma": 0.1, 15 | "alpha": 0.5, 16 | "gamma": 0.1, 17 | "evaluate_adversarial": true, 18 | "adversarial_types": [ 19 | "sarcasm", 20 | "negation", 21 | "polysemy" 22 | ] 23 | } -------------------------------------------------------------------------------- /examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "sentiment": { 3 | "means": { 4 | "best_acc": 0.9346512904997254, 5 | "best_f1": 0.934620573857732, 6 | "best_auc": 0.9836853202864146 7 | } 8 | } 9 | } -------------------------------------------------------------------------------- /examples/AutoEAP_UMI-STARR-seq/Baseline/config/config-conv-117.json: -------------------------------------------------------------------------------- 1 | { 2 | "batch_size": 64, 3 | "encode": "one-hot", 4 | "epochs": 100, 5 | "early_stop": 20, 6 | "lr": 0.001, 7 | "convolution_layers": { 8 | "n_layers": 4, 9 | "filters": [1024, 512, 256, 128], 10 | "kernel_sizes": [8, 16, 32, 64] 11 | }, 12 | "transformer_layers": { 13 | "n_layers": 0, 14 | "attn_key_dim": [16, 16, 16], 15 | "attn_heads": [2048, 2048, 2048] 16 | }, 17 | "n_dense_layer": 1, 18 | "dense_neurons1": 64, 19 | "dropout_conv": "yes", 20 | "dropout_prob": 0.4, 21 | "pad": "same" 22 | } 23 | -------------------------------------------------------------------------------- /examples/AutoEAP_UMI-STARR-seq/Baseline/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "AutoDNA":{ 3 | "means":{ 4 | "PCC(Dev)": 0.52, 5 | "PCC(Hk)": 0.65 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /examples/AutoEAP_UMI-STARR-seq/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py --out_dir $1 > $1/train.log 2>&1 2 | -------------------------------------------------------------------------------- /examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/config/config-conv-117.json: -------------------------------------------------------------------------------- 1 | { 2 | "batch_size": 64, 3 | "encode": "one-hot", 4 | "epochs": 100, 5 | "early_stop": 20, 6 | "lr": 0.001, 7 | "convolution_layers": { 8 | "n_layers": 4, 9 | "filters": [1024, 512, 256, 128], 10 | "kernel_sizes": [8, 16, 32, 64] 11 | }, 12 | "transformer_layers": { 13 | "n_layers": 0, 14 | "attn_key_dim": [16, 16, 16], 15 | "attn_heads": [2048, 2048, 2048] 16 | }, 17 | "n_dense_layer": 1, 18 | "dense_neurons1": 64, 19 | "dropout_conv": "yes", 20 | "dropout_prob": 0.4, 21 | "pad": "same" 22 | } 23 | -------------------------------------------------------------------------------- /examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py --out_dir $1 > $1/train.log 2>&1 2 | -------------------------------------------------------------------------------- /examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/res/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "AutoDNA": { 3 | "means": { 4 | "PCC(Dev)": "0.71", 5 | "PCC(Hk)": "0.79" 6 | } 7 | } 8 | } -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/examples/ViSNet-MD17.yml: -------------------------------------------------------------------------------- 1 | load_model: null 2 | 3 | # training settings 4 | num_epochs: 1000 5 | lr_warmup_steps: 1000 6 | lr: 0.0004 7 | lr_patience: 30 8 | lr_min: 1.e-07 9 | lr_factor: 0.8 10 | weight_decay: 0.0 11 | early_stopping_patience: 600 12 | loss_type: MSE 13 | loss_scale_y: 0.05 14 | loss_scale_dy: 1.0 15 | energy_weight: 0.05 16 | force_weight: 0.95 17 | 18 | # dataset specific 19 | dataset: MD17 20 | dataset_arg: aspirin 21 | dataset_root: /path/to/data 22 | derivative: true 23 | split_mode: null 24 | 25 | # dataloader specific 26 | reload: 0 27 | batch_size: 4 28 | inference_batch_size: 16 29 | standardize: true 30 | splits: null 31 | train_size: 950 32 | val_size: 50 33 | test_size: null 34 | num_workers: 12 35 | 36 | # model architecture specific 37 | model: ViSNetBlock 38 | output_model: Scalar 39 | prior_model: null 40 | 41 | # architectural specific 42 | embedding_dimension: 256 43 | num_layers: 9 44 | num_rbf: 32 45 | activation: silu 46 | rbf_type: expnorm 47 | trainable_rbf: false 48 | attn_activation: silu 49 | num_heads: 8 50 | cutoff: 5.0 51 | max_z: 100 52 | max_num_neighbors: 32 53 | reduce_op: add 54 | lmax: 2 55 | vecnorm_type: none 56 | trainable_vecnorm: false 57 | vertex_type: None 58 | 59 | # other specific 60 | ngpus: -1 61 | num_nodes: 1 62 | precision: 32 63 | log_dir: aspirin_log 64 | task: train 65 | seed: 1 66 | distributed_backend: ddp 67 | redirect: false 68 | accelerator: gpu 69 | test_interval: 1500 70 | save_interval: 1 71 | out_dir: run_0 -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "AutoMolecule3D":{ 3 | "means":{ 4 | "Scalar MAE": 0.120, 5 | "Forces MAE": 0.157 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py --conf examples/ViSNet-MD17.yml --dataset-arg aspirin --dataset-root ./datasets/molecule_data/aspirin_data --out_dir $1 2 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def calculate_mae(y_true, y_pred): 4 | 5 | mae = np.abs(y_true - y_pred).mean() 6 | return mae 7 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/visnet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .chignolin import Chignolin 2 | from .md17 import MD17 3 | from .md22 import MD22 4 | from .molecule3d import Molecule3D 5 | from .qm9 import QM9 6 | from .rmd17 import rMD17 7 | 8 | __all__ = ["Chignolin", "MD17", "MD22", "Molecule3D", "QM9", "rMD17"] 9 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/visnet/datasets/chignolin.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from ase.units import Bohr, Hartree 4 | from torch_geometric.data import Data, InMemoryDataset 5 | from tqdm import trange 6 | 7 | 8 | class Chignolin(InMemoryDataset): 9 | 10 | self_energies = { 11 | 1: -0.496665677271, 12 | 6: -37.8289474402, 13 | 7: -54.5677547104, 14 | 8: -75.0321126521, 15 | 16: -398.063946327, 16 | } 17 | 18 | def __init__(self, root, transform=None, pre_transform=None): 19 | 20 | super(Chignolin, self).__init__(root, transform, pre_transform) 21 | 22 | self.data, self.slices = torch.load(self.processed_paths[0]) 23 | 24 | @property 25 | def raw_file_names(self): 26 | return [f'chignolin.npz'] 27 | 28 | @property 29 | def processed_file_names(self): 30 | return [f'chignolin.pt'] 31 | 32 | def process(self): 33 | for path, processed_path in zip(self.raw_paths, self.processed_paths): 34 | 35 | data_npz = np.load(path) 36 | concat_z = torch.from_numpy(data_npz["Z"]).long() 37 | concat_positions = torch.from_numpy(data_npz["R"]).float() 38 | energies = torch.from_numpy(data_npz["E"]).float() 39 | concat_forces = torch.from_numpy(data_npz["F"]).float() * Hartree / Bohr 40 | num_atoms = 166 41 | 42 | samples = [] 43 | for index in trange(energies.shape[0]): 44 | z = concat_z[index * num_atoms:(index + 1) * num_atoms] 45 | ref_energy = torch.sum(torch.tensor([self.self_energies[int(atom)] for atom in z])) 46 | pos = concat_positions[index * num_atoms:(index + 1) * num_atoms, :] 47 | y = (energies[index] - ref_energy) * Hartree 48 | # ! NOTE: Convert Engrad to Force 49 | dy = -concat_forces[index * num_atoms:(index + 1) * num_atoms, :] 50 | data = Data(z=z, pos=pos, y=y.reshape(1, 1), dy=dy) 51 | 52 | if self.pre_filter is not None: 53 | data = self.pre_filter(data) 54 | 55 | if self.pre_transform is not None: 56 | data = self.pre_transform(data) 57 | 58 | samples.append(data) 59 | 60 | data, slices = self.collate(samples) 61 | torch.save((data, slices), processed_path) -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/visnet/datasets/qm9.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch_geometric.datasets import QM9 as QM9_geometric 3 | from torch_geometric.nn.models.schnet import qm9_target_dict 4 | from torch_geometric.transforms import Compose 5 | 6 | 7 | class QM9(QM9_geometric): 8 | def __init__(self, root, transform=None, pre_transform=None, pre_filter=None, dataset_arg=None): 9 | assert dataset_arg is not None, ( 10 | "Please pass the desired property to " 11 | 'train on via "dataset_arg". Available ' 12 | f'properties are {", ".join(qm9_target_dict.values())}.' 13 | ) 14 | 15 | self.label = dataset_arg 16 | label2idx = dict(zip(qm9_target_dict.values(), qm9_target_dict.keys())) 17 | self.label_idx = label2idx[self.label] 18 | 19 | if transform is None: 20 | transform = self._filter_label 21 | else: 22 | transform = Compose([transform, self._filter_label]) 23 | 24 | super(QM9, self).__init__(root, transform=transform, pre_transform=pre_transform, pre_filter=pre_filter) 25 | 26 | def get_atomref(self, max_z=100): 27 | atomref = self.atomref(self.label_idx) 28 | if atomref is None: 29 | return None 30 | if atomref.size(0) != max_z: 31 | tmp = torch.zeros(max_z).unsqueeze(1) 32 | idx = min(max_z, atomref.size(0)) 33 | tmp[:idx] = atomref[:idx] 34 | return tmp 35 | return atomref 36 | 37 | def _filter_label(self, batch): 38 | batch.y = batch.y[:, self.label_idx].unsqueeze(1) 39 | return batch -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/visnet/models/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["ViSNetBlock"] 2 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/Baseline/visnet/priors.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | import torch 4 | import torch.nn as nn 5 | from pytorch_lightning.utilities import rank_zero_warn 6 | 7 | __all__ = ["Atomref"] 8 | 9 | 10 | class BasePrior(nn.Module, metaclass=ABCMeta): 11 | """ 12 | Base class for prior models. 13 | Derive this class to make custom prior models, which take some arguments and a dataset as input. 14 | As an example, have a look at the `torchmdnet.priors.Atomref` prior. 15 | """ 16 | 17 | def __init__(self): 18 | super(BasePrior, self).__init__() 19 | 20 | @abstractmethod 21 | def get_init_args(self): 22 | """ 23 | A function that returns all required arguments to construct a prior object. 24 | The values should be returned inside a dict with the keys being the arguments' names. 25 | All values should also be saveable in a .yaml file as this is used to reconstruct the 26 | prior model from a checkpoint file. 27 | """ 28 | return 29 | 30 | @abstractmethod 31 | def forward(self, x, z): 32 | """ 33 | Forward method of the prior model. 34 | 35 | Args: 36 | x (torch.Tensor): scalar atomwise predictions from the model. 37 | z (torch.Tensor): atom types of all atoms. 38 | 39 | Returns: 40 | torch.Tensor: updated scalar atomwise predictions 41 | """ 42 | return 43 | 44 | 45 | class Atomref(BasePrior): 46 | """ 47 | Atomref prior model. 48 | When using this in combination with some dataset, the dataset class must implement 49 | the function `get_atomref`, which returns the atomic reference values as a tensor. 50 | """ 51 | 52 | def __init__(self, max_z=None, dataset=None): 53 | super(Atomref, self).__init__() 54 | if max_z is None and dataset is None: 55 | raise ValueError("Can't instantiate Atomref prior, all arguments are None.") 56 | if dataset is None: 57 | atomref = torch.zeros(max_z, 1) 58 | else: 59 | atomref = dataset.get_atomref() 60 | if atomref is None: 61 | rank_zero_warn( 62 | "The atomref returned by the dataset is None, defaulting to zeros with max. " 63 | "atomic number 99. Maybe atomref is not defined for the current target." 64 | ) 65 | atomref = torch.zeros(100, 1) 66 | 67 | if atomref.ndim == 1: 68 | atomref = atomref.view(-1, 1) 69 | self.register_buffer("initial_atomref", atomref) 70 | self.atomref = nn.Embedding(len(atomref), 1) 71 | self.atomref.weight.data.copy_(atomref) 72 | 73 | def reset_parameters(self): 74 | self.atomref.weight.data.copy_(self.initial_atomref) 75 | 76 | def get_init_args(self): 77 | return dict(max_z=self.initial_atomref.size(0)) 78 | 79 | def forward(self, x, z): 80 | return x + self.atomref(z) 81 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/examples/ViSNet-MD17.yml: -------------------------------------------------------------------------------- 1 | load_model: null 2 | 3 | # training settings 4 | num_epochs: 1000 5 | lr_warmup_steps: 1000 6 | lr: 0.0004 7 | lr_patience: 30 8 | lr_min: 1.e-07 9 | lr_factor: 0.8 10 | weight_decay: 0.0 11 | early_stopping_patience: 600 12 | loss_type: MSE 13 | loss_scale_y: 0.05 14 | loss_scale_dy: 1.0 15 | energy_weight: 0.05 16 | force_weight: 0.95 17 | 18 | # dataset specific 19 | dataset: MD17 20 | dataset_arg: aspirin 21 | dataset_root: /path/to/data 22 | derivative: true 23 | split_mode: null 24 | 25 | # dataloader specific 26 | reload: 0 27 | batch_size: 4 28 | inference_batch_size: 16 29 | standardize: true 30 | splits: null 31 | train_size: 950 32 | val_size: 50 33 | test_size: null 34 | num_workers: 12 35 | 36 | # model architecture specific 37 | model: ViSNetBlock 38 | output_model: Scalar 39 | prior_model: null 40 | 41 | # architectural specific 42 | embedding_dimension: 256 43 | num_layers: 9 44 | num_rbf: 32 45 | activation: silu 46 | rbf_type: expnorm 47 | trainable_rbf: false 48 | attn_activation: silu 49 | num_heads: 8 50 | cutoff: 5.0 51 | max_z: 100 52 | max_num_neighbors: 32 53 | reduce_op: add 54 | lmax: 2 55 | vecnorm_type: none 56 | trainable_vecnorm: false 57 | vertex_type: None 58 | 59 | # other specific 60 | ngpus: -1 61 | num_nodes: 1 62 | precision: 32 63 | log_dir: aspirin_log 64 | task: train 65 | seed: 1 66 | distributed_backend: ddp 67 | redirect: false 68 | accelerator: gpu 69 | test_interval: 1500 70 | save_interval: 1 71 | out_dir: run_0 -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py --conf examples/ViSNet-MD17.yml --dataset-arg aspirin --dataset-root ./molecule_data/aspirin_data --log-dir aspirin_log --out_dir $1 2 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def calculate_mae(y_true, y_pred): 4 | 5 | mae = np.abs(y_true - y_pred).mean() 6 | return mae 7 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/res/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "AutoMolecule3D":{ 3 | "means":{ 4 | "Scalar MAE": 0.118, 5 | "Forces MAE": 0.149 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/res/input.yaml: -------------------------------------------------------------------------------- 1 | accelerator: gpu 2 | activation: silu 3 | attn_activation: silu 4 | batch_size: 4 5 | cutoff: 5.0 6 | dataset: MD17 7 | dataset_arg: aspirin 8 | dataset_root: ./molecule_data/aspirin_data 9 | derivative: true 10 | distributed_backend: ddp 11 | early_stopping_patience: 600 12 | embedding_dimension: 256 13 | energy_weight: 0.05 14 | force_weight: 0.95 15 | inference_batch_size: 16 16 | lmax: 2 17 | load_model: null 18 | log_dir: aspirin_log_1 19 | loss_scale_dy: 1.0 20 | loss_scale_y: 0.05 21 | loss_type: MSE 22 | lr: 0.0004 23 | lr_factor: 0.8 24 | lr_min: 1.0e-07 25 | lr_patience: 30 26 | lr_warmup_steps: 1000 27 | max_num_neighbors: 32 28 | max_z: 100 29 | model: ViSNetBlock 30 | ngpus: -1 31 | num_epochs: 1000 32 | num_heads: 8 33 | num_layers: 9 34 | num_nodes: 1 35 | num_rbf: 32 36 | num_workers: 12 37 | out_dir: run_4 38 | output_model: Scalar 39 | precision: 32 40 | prior_args: null 41 | prior_model: null 42 | rbf_type: expnorm 43 | redirect: false 44 | reduce_op: add 45 | reload: 0 46 | save_interval: 1 47 | seed: 1 48 | split_mode: null 49 | splits: null 50 | standardize: true 51 | task: train 52 | test_interval: 1500 53 | test_size: null 54 | train_size: 950 55 | trainable_rbf: false 56 | trainable_vecnorm: false 57 | use_substructures: true 58 | val_size: 50 59 | vecnorm_type: none 60 | vertex_type: None 61 | weight_decay: 0.0 62 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/res/splits.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoMolecule3D_MD17/HEDGE-Net/res/splits.npz -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/visnet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .chignolin import Chignolin 2 | from .md17 import MD17 3 | from .md22 import MD22 4 | from .molecule3d import Molecule3D 5 | from .qm9 import QM9 6 | from .rmd17 import rMD17 7 | 8 | __all__ = ["Chignolin", "MD17", "MD22", "Molecule3D", "QM9", "rMD17"] 9 | -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/visnet/datasets/chignolin.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from ase.units import Bohr, Hartree 4 | from torch_geometric.data import Data, InMemoryDataset 5 | from tqdm import trange 6 | 7 | 8 | class Chignolin(InMemoryDataset): 9 | 10 | self_energies = { 11 | 1: -0.496665677271, 12 | 6: -37.8289474402, 13 | 7: -54.5677547104, 14 | 8: -75.0321126521, 15 | 16: -398.063946327, 16 | } 17 | 18 | def __init__(self, root, transform=None, pre_transform=None): 19 | 20 | super(Chignolin, self).__init__(root, transform, pre_transform) 21 | 22 | self.data, self.slices = torch.load(self.processed_paths[0]) 23 | 24 | @property 25 | def raw_file_names(self): 26 | return [f'chignolin.npz'] 27 | 28 | @property 29 | def processed_file_names(self): 30 | return [f'chignolin.pt'] 31 | 32 | def process(self): 33 | for path, processed_path in zip(self.raw_paths, self.processed_paths): 34 | 35 | data_npz = np.load(path) 36 | concat_z = torch.from_numpy(data_npz["Z"]).long() 37 | concat_positions = torch.from_numpy(data_npz["R"]).float() 38 | energies = torch.from_numpy(data_npz["E"]).float() 39 | concat_forces = torch.from_numpy(data_npz["F"]).float() * Hartree / Bohr 40 | num_atoms = 166 41 | 42 | samples = [] 43 | for index in trange(energies.shape[0]): 44 | z = concat_z[index * num_atoms:(index + 1) * num_atoms] 45 | ref_energy = torch.sum(torch.tensor([self.self_energies[int(atom)] for atom in z])) 46 | pos = concat_positions[index * num_atoms:(index + 1) * num_atoms, :] 47 | y = (energies[index] - ref_energy) * Hartree 48 | # ! NOTE: Convert Engrad to Force 49 | dy = -concat_forces[index * num_atoms:(index + 1) * num_atoms, :] 50 | data = Data(z=z, pos=pos, y=y.reshape(1, 1), dy=dy) 51 | 52 | if self.pre_filter is not None: 53 | data = self.pre_filter(data) 54 | 55 | if self.pre_transform is not None: 56 | data = self.pre_transform(data) 57 | 58 | samples.append(data) 59 | 60 | data, slices = self.collate(samples) 61 | torch.save((data, slices), processed_path) -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/visnet/datasets/qm9.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch_geometric.datasets import QM9 as QM9_geometric 3 | from torch_geometric.nn.models.schnet import qm9_target_dict 4 | from torch_geometric.transforms import Compose 5 | 6 | 7 | class QM9(QM9_geometric): 8 | def __init__(self, root, transform=None, pre_transform=None, pre_filter=None, dataset_arg=None): 9 | assert dataset_arg is not None, ( 10 | "Please pass the desired property to " 11 | 'train on via "dataset_arg". Available ' 12 | f'properties are {", ".join(qm9_target_dict.values())}.' 13 | ) 14 | 15 | self.label = dataset_arg 16 | label2idx = dict(zip(qm9_target_dict.values(), qm9_target_dict.keys())) 17 | self.label_idx = label2idx[self.label] 18 | 19 | if transform is None: 20 | transform = self._filter_label 21 | else: 22 | transform = Compose([transform, self._filter_label]) 23 | 24 | super(QM9, self).__init__(root, transform=transform, pre_transform=pre_transform, pre_filter=pre_filter) 25 | 26 | def get_atomref(self, max_z=100): 27 | atomref = self.atomref(self.label_idx) 28 | if atomref is None: 29 | return None 30 | if atomref.size(0) != max_z: 31 | tmp = torch.zeros(max_z).unsqueeze(1) 32 | idx = min(max_z, atomref.size(0)) 33 | tmp[:idx] = atomref[:idx] 34 | return tmp 35 | return atomref 36 | 37 | def _filter_label(self, batch): 38 | batch.y = batch.y[:, self.label_idx].unsqueeze(1) 39 | return batch -------------------------------------------------------------------------------- /examples/AutoMolecule3D_MD17/HEDGE-Net/visnet/models/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["ViSNetBlock"] 2 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/final_infos.json: -------------------------------------------------------------------------------- 1 | { 2 | "Once": { 3 | "means": { 4 | "mAP": 64.99 5 | } 6 | } 7 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | source activate pcdet 2 | 3 | cd tools 4 | 5 | # Check if $1 exists, if not create the directory 6 | if [ -z "$1" ]; then 7 | echo "Error: Output directory not specified" 8 | exit 1 9 | fi 10 | 11 | if [ ! -d "$1" ]; then 12 | echo "Creating output directory: $1" 13 | mkdir -p "$1" 14 | fi 15 | 16 | bash scripts/dist_train.sh 2 --cfg_file ./cfgs/once_models/centerpoint.yaml --out_dir $1 --extra_tag $1 17 | cd ../ 18 | cp -r tools/$1/* ./ -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/__init__.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | from pathlib import Path 3 | 4 | from .version import __version__ 5 | 6 | __all__ = [ 7 | '__version__' 8 | ] 9 | 10 | 11 | def get_git_commit_number(): 12 | if not (Path(__file__).parent / '../.git').exists(): 13 | return '0000000' 14 | 15 | cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE) 16 | git_commit_number = cmd_out.stdout.decode('utf-8')[:7] 17 | return git_commit_number 18 | 19 | 20 | script_version = get_git_commit_number() 21 | 22 | 23 | if script_version not in __version__: 24 | __version__ = __version__ + '+py%s' % script_version 25 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from functools import partial 3 | from torch.utils.data import DataLoader 4 | from torch.utils.data import DistributedSampler as _DistributedSampler 5 | 6 | from pcdet.utils import common_utils 7 | 8 | from .dataset import DatasetTemplate 9 | from .once.once_dataset import ONCEDataset 10 | 11 | __all__ = { 12 | 'DatasetTemplate': DatasetTemplate, 13 | 'ONCEDataset': ONCEDataset 14 | } 15 | 16 | 17 | class DistributedSampler(_DistributedSampler): 18 | 19 | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): 20 | super().__init__(dataset, num_replicas=num_replicas, rank=rank) 21 | self.shuffle = shuffle 22 | 23 | def __iter__(self): 24 | if self.shuffle: 25 | g = torch.Generator() 26 | g.manual_seed(self.epoch) 27 | indices = torch.randperm(len(self.dataset), generator=g).tolist() 28 | else: 29 | indices = torch.arange(len(self.dataset)).tolist() 30 | 31 | indices += indices[:(self.total_size - len(indices))] 32 | assert len(indices) == self.total_size 33 | 34 | indices = indices[self.rank:self.total_size:self.num_replicas] 35 | assert len(indices) == self.num_samples 36 | 37 | return iter(indices) 38 | 39 | 40 | def build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4, seed=None, 41 | logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0): 42 | 43 | dataset = __all__[dataset_cfg.DATASET]( 44 | dataset_cfg=dataset_cfg, 45 | class_names=class_names, 46 | root_path=root_path, 47 | training=training, 48 | logger=logger, 49 | ) 50 | 51 | if merge_all_iters_to_one_epoch: 52 | assert hasattr(dataset, 'merge_all_iters_to_one_epoch') 53 | dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs) 54 | 55 | if dist: 56 | if training: 57 | sampler = torch.utils.data.distributed.DistributedSampler(dataset) 58 | else: 59 | rank, world_size = common_utils.get_dist_info() 60 | sampler = DistributedSampler(dataset, world_size, rank, shuffle=False) 61 | else: 62 | sampler = None 63 | dataloader = DataLoader( 64 | dataset, batch_size=batch_size, pin_memory=True, num_workers=workers, 65 | shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch, 66 | drop_last=False, sampler=sampler, timeout=0, worker_init_fn=partial(common_utils.worker_init_fn, seed=seed) 67 | ) 68 | 69 | return dataset, dataloader, sampler 70 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/datasets/augmentor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/datasets/augmentor/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/datasets/once/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/datasets/once/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/datasets/once/once_eval/eval_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def compute_split_parts(num_samples, num_parts): 5 | part_samples = num_samples // num_parts 6 | remain_samples = num_samples % num_parts 7 | if part_samples == 0: 8 | return [num_samples] 9 | if remain_samples == 0: 10 | return [part_samples] * num_parts 11 | else: 12 | return [part_samples] * num_parts + [remain_samples] 13 | 14 | 15 | def overall_filter(boxes): 16 | ignore = np.zeros(boxes.shape[0], dtype=bool) # all false 17 | return ignore 18 | 19 | 20 | def distance_filter(boxes, level): 21 | ignore = np.ones(boxes.shape[0], dtype=bool) # all true 22 | dist = np.sqrt(np.sum(boxes[:, 0:3] * boxes[:, 0:3], axis=1)) 23 | 24 | if level == 0: # 0-30m 25 | flag = dist < 30 26 | elif level == 1: # 30-50m 27 | flag = (dist >= 30) & (dist < 50) 28 | elif level == 2: # 50m-inf 29 | flag = dist >= 50 30 | else: 31 | assert False, 'level < 3 for distance metric, found level %s' % (str(level)) 32 | 33 | ignore[flag] = False 34 | return ignore 35 | 36 | 37 | def overall_distance_filter(boxes, level): 38 | ignore = np.ones(boxes.shape[0], dtype=bool) # all true 39 | dist = np.sqrt(np.sum(boxes[:, 0:3] * boxes[:, 0:3], axis=1)) 40 | 41 | if level == 0: 42 | flag = np.ones(boxes.shape[0], dtype=bool) 43 | elif level == 1: # 0-30m 44 | flag = dist < 30 45 | elif level == 2: # 30-50m 46 | flag = (dist >= 30) & (dist < 50) 47 | elif level == 3: # 50m-inf 48 | flag = dist >= 50 49 | else: 50 | assert False, 'level < 4 for overall & distance metric, found level %s' % (str(level)) 51 | 52 | ignore[flag] = False 53 | return ignore -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/datasets/processor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/datasets/processor/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/datasets/processor/point_feature_encoder.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class PointFeatureEncoder(object): 5 | def __init__(self, config, point_cloud_range=None): 6 | super().__init__() 7 | self.point_encoding_config = config 8 | assert list(self.point_encoding_config.src_feature_list[0:3]) == ['x', 'y', 'z'] 9 | self.used_feature_list = self.point_encoding_config.used_feature_list 10 | self.src_feature_list = self.point_encoding_config.src_feature_list 11 | self.point_cloud_range = point_cloud_range 12 | 13 | @property 14 | def num_point_features(self): 15 | return getattr(self, self.point_encoding_config.encoding_type)(points=None) 16 | 17 | def forward(self, data_dict): 18 | """ 19 | Args: 20 | data_dict: 21 | points: (N, 3 + C_in) 22 | ... 23 | Returns: 24 | data_dict: 25 | points: (N, 3 + C_out), 26 | use_lead_xyz: whether to use xyz as point-wise features 27 | ... 28 | """ 29 | data_dict['points'], use_lead_xyz = getattr(self, self.point_encoding_config.encoding_type)( 30 | data_dict['points'] 31 | ) 32 | data_dict['use_lead_xyz'] = use_lead_xyz 33 | 34 | if self.point_encoding_config.get('filter_sweeps', False) and 'timestamp' in self.src_feature_list: 35 | max_sweeps = self.point_encoding_config.max_sweeps 36 | idx = self.src_feature_list.index('timestamp') 37 | dt = np.round(data_dict['points'][:, idx], 2) 38 | max_dt = sorted(np.unique(dt))[min(len(np.unique(dt))-1, max_sweeps-1)] 39 | data_dict['points'] = data_dict['points'][dt <= max_dt] 40 | 41 | return data_dict 42 | 43 | def absolute_coordinates_encoding(self, points=None): 44 | if points is None: 45 | num_output_features = len(self.used_feature_list) 46 | return num_output_features 47 | 48 | assert points.shape[-1] == len(self.src_feature_list) 49 | point_feature_list = [points[:, 0:3]] 50 | for x in self.used_feature_list: 51 | if x in ['x', 'y', 'z']: 52 | continue 53 | idx = self.src_feature_list.index(x) 54 | point_feature_list.append(points[:, idx:idx+1]) 55 | point_features = np.concatenate(point_feature_list, axis=1) 56 | 57 | return point_features, True 58 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | import numpy as np 4 | import torch 5 | 6 | from .detectors import build_detector 7 | 8 | try: 9 | import kornia 10 | except: 11 | pass 12 | # print('Warning: kornia is not installed. This package is only required by CaDDN') 13 | 14 | 15 | 16 | def build_network(model_cfg, num_class, dataset): 17 | model = build_detector( 18 | model_cfg=model_cfg, num_class=num_class, dataset=dataset 19 | ) 20 | return model 21 | 22 | 23 | def load_data_to_gpu(batch_dict): 24 | for key, val in batch_dict.items(): 25 | if key == 'camera_imgs': 26 | batch_dict[key] = val.cuda() 27 | elif not isinstance(val, np.ndarray): 28 | continue 29 | elif key in ['frame_id', 'metadata', 'calib', 'image_paths','ori_shape','img_process_infos']: 30 | continue 31 | elif key in ['images']: 32 | batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous() 33 | elif key in ['image_shape']: 34 | batch_dict[key] = torch.from_numpy(val).int().cuda() 35 | else: 36 | batch_dict[key] = torch.from_numpy(val).float().cuda() 37 | 38 | 39 | def model_fn_decorator(): 40 | ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict']) 41 | 42 | def model_func(model, batch_dict): 43 | load_data_to_gpu(batch_dict) 44 | ret_dict, tb_dict, disp_dict = model(batch_dict) 45 | 46 | loss = ret_dict['loss'].mean() 47 | if hasattr(model, 'update_global_step'): 48 | model.update_global_step() 49 | else: 50 | model.module.update_global_step() 51 | 52 | return ModelReturn(loss, tb_dict, disp_dict) 53 | 54 | return model_func 55 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/backbones_2d/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_bev_backbone import BaseBEVBackbone 2 | 3 | __all__ = { 4 | 'BaseBEVBackbone': BaseBEVBackbone 5 | } 6 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/backbones_2d/map_to_bev/__init__.py: -------------------------------------------------------------------------------- 1 | from .height_compression import HeightCompression 2 | 3 | __all__ = { 4 | 'HeightCompression': HeightCompression 5 | } 6 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/backbones_2d/map_to_bev/height_compression.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class HeightCompression(nn.Module): 5 | def __init__(self, model_cfg, **kwargs): 6 | super().__init__() 7 | self.model_cfg = model_cfg 8 | self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES 9 | 10 | def forward(self, batch_dict): 11 | """ 12 | Args: 13 | batch_dict: 14 | encoded_spconv_tensor: sparse tensor 15 | Returns: 16 | batch_dict: 17 | spatial_features: 18 | 19 | """ 20 | encoded_spconv_tensor = batch_dict['encoded_spconv_tensor'] 21 | spatial_features = encoded_spconv_tensor.dense() 22 | N, C, D, H, W = spatial_features.shape 23 | spatial_features = spatial_features.view(N, C * D, H, W) 24 | batch_dict['spatial_features'] = spatial_features 25 | batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride'] 26 | return batch_dict 27 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/backbones_3d/__init__.py: -------------------------------------------------------------------------------- 1 | from .spconv_backbone import VoxelResBackBone8x 2 | 3 | 4 | __all__ = { 5 | 'VoxelResBackBone8x': VoxelResBackBone8x 6 | } 7 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/backbones_3d/pfe/__init__.py: -------------------------------------------------------------------------------- 1 | from .voxel_set_abstraction import VoxelSetAbstraction 2 | 3 | __all__ = { 4 | 'VoxelSetAbstraction': VoxelSetAbstraction 5 | } 6 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/backbones_3d/vfe/__init__.py: -------------------------------------------------------------------------------- 1 | from .mean_vfe import MeanVFE 2 | from .vfe_template import VFETemplate 3 | 4 | __all__ = { 5 | 'VFETemplate': VFETemplate, 6 | 'MeanVFE': MeanVFE 7 | } 8 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/backbones_3d/vfe/mean_vfe.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .vfe_template import VFETemplate 4 | 5 | 6 | class MeanVFE(VFETemplate): 7 | def __init__(self, model_cfg, num_point_features, **kwargs): 8 | super().__init__(model_cfg=model_cfg) 9 | self.num_point_features = num_point_features 10 | 11 | def get_output_feature_dim(self): 12 | return self.num_point_features 13 | 14 | def forward(self, batch_dict, **kwargs): 15 | """ 16 | Args: 17 | batch_dict: 18 | voxels: (num_voxels, max_points_per_voxel, C) 19 | voxel_num_points: optional (num_voxels) 20 | **kwargs: 21 | 22 | Returns: 23 | vfe_features: (num_voxels, C) 24 | """ 25 | voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points'] 26 | points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False) 27 | normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features) 28 | points_mean = points_mean / normalizer 29 | batch_dict['voxel_features'] = points_mean.contiguous() 30 | 31 | return batch_dict 32 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/backbones_3d/vfe/vfe_template.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class VFETemplate(nn.Module): 5 | def __init__(self, model_cfg, **kwargs): 6 | super().__init__() 7 | self.model_cfg = model_cfg 8 | 9 | def get_output_feature_dim(self): 10 | raise NotImplementedError 11 | 12 | def forward(self, **kwargs): 13 | """ 14 | Args: 15 | **kwargs: 16 | 17 | Returns: 18 | batch_dict: 19 | ... 20 | vfe_features: (num_voxels, C) 21 | """ 22 | raise NotImplementedError 23 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/dense_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .center_head import CenterHead 2 | 3 | __all__ = { 4 | 'CenterHead': CenterHead 5 | } 6 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .detector3d_template import Detector3DTemplate 2 | from .centerpoint import CenterPoint 3 | 4 | __all__ = { 5 | 'Detector3DTemplate': Detector3DTemplate, 6 | 'CenterPoint': CenterPoint 7 | } 8 | 9 | 10 | def build_detector(model_cfg, num_class, dataset): 11 | model = __all__[model_cfg.NAME]( 12 | model_cfg=model_cfg, num_class=num_class, dataset=dataset 13 | ) 14 | 15 | return model 16 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/detectors/centerpoint.py: -------------------------------------------------------------------------------- 1 | from .detector3d_template import Detector3DTemplate 2 | 3 | 4 | class CenterPoint(Detector3DTemplate): 5 | def __init__(self, model_cfg, num_class, dataset): 6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) 7 | self.module_list = self.build_networks() 8 | 9 | def forward(self, batch_dict): 10 | for cur_module in self.module_list: 11 | batch_dict = cur_module(batch_dict) 12 | 13 | if self.training: 14 | loss, tb_dict, disp_dict = self.get_training_loss() 15 | 16 | ret_dict = { 17 | 'loss': loss 18 | } 19 | return ret_dict, tb_dict, disp_dict 20 | else: 21 | pred_dicts, recall_dicts = self.post_processing(batch_dict) 22 | return pred_dicts, recall_dicts 23 | 24 | def get_training_loss(self): 25 | disp_dict = {} 26 | 27 | loss_rpn, tb_dict = self.dense_head.get_loss() 28 | tb_dict = { 29 | 'loss_rpn': loss_rpn.item(), 30 | **tb_dict 31 | } 32 | 33 | loss = loss_rpn 34 | return loss, tb_dict, disp_dict 35 | 36 | def post_processing(self, batch_dict): 37 | post_process_cfg = self.model_cfg.POST_PROCESSING 38 | batch_size = batch_dict['batch_size'] 39 | final_pred_dict = batch_dict['final_box_dicts'] 40 | recall_dict = {} 41 | for index in range(batch_size): 42 | pred_boxes = final_pred_dict[index]['pred_boxes'] 43 | 44 | recall_dict = self.generate_recall_record( 45 | box_preds=pred_boxes, 46 | recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, 47 | thresh_list=post_process_cfg.RECALL_THRESH_LIST 48 | ) 49 | 50 | return final_pred_dict, recall_dict 51 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/models/model_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/models/model_utils/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/bev_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .bev_pool import bev_pool -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/bev_pool/bev_pool_ext.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/bev_pool/bev_pool_ext.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/ingroup_inds/ingroup_inds_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/ingroup_inds/ingroup_inds_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/ingroup_inds/ingroup_inds_op.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | try: 4 | from . import ingroup_inds_cuda 5 | # import ingroup_indices 6 | except ImportError: 7 | ingroup_indices = None 8 | print('Can not import ingroup indices') 9 | 10 | ingroup_indices = ingroup_inds_cuda 11 | 12 | from torch.autograd import Function 13 | class IngroupIndicesFunction(Function): 14 | 15 | @staticmethod 16 | def forward(ctx, group_inds): 17 | 18 | out_inds = torch.zeros_like(group_inds) - 1 19 | 20 | ingroup_indices.forward(group_inds, out_inds) 21 | 22 | ctx.mark_non_differentiable(out_inds) 23 | 24 | return out_inds 25 | 26 | @staticmethod 27 | def backward(ctx, g): 28 | 29 | return None 30 | 31 | ingroup_inds = IngroupIndicesFunction.apply -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/ingroup_inds/src/error.cuh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | #define CHECK_CALL(call) \ 5 | do \ 6 | { \ 7 | const cudaError_t error_code = call; \ 8 | if (error_code != cudaSuccess) \ 9 | { \ 10 | printf("CUDA Error:\n"); \ 11 | printf(" File: %s\n", __FILE__); \ 12 | printf(" Line: %d\n", __LINE__); \ 13 | printf(" Error code: %d\n", error_code); \ 14 | printf(" Error text: %s\n", \ 15 | cudaGetErrorString(error_code)); \ 16 | exit(1); \ 17 | } \ 18 | } while (0) -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/ingroup_inds/src/ingroup_inds.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define CHECK_CUDA(x) \ 7 | TORCH_CHECK(x.device().is_cuda(), #x, " must be a CUDAtensor ") 8 | #define CHECK_CONTIGUOUS(x) \ 9 | TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") 10 | #define CHECK_INPUT(x) \ 11 | CHECK_CUDA(x); \ 12 | CHECK_CONTIGUOUS(x) 13 | 14 | 15 | void ingroup_inds_launcher( 16 | const long *group_inds_data, 17 | long *out_inds_data, 18 | int N, 19 | int max_group_id 20 | ); 21 | 22 | 23 | void ingroup_inds_gpu( 24 | at::Tensor group_inds, 25 | at::Tensor out_inds 26 | ); 27 | 28 | void ingroup_inds_gpu( 29 | at::Tensor group_inds, 30 | at::Tensor out_inds 31 | ) { 32 | 33 | CHECK_INPUT(group_inds); 34 | CHECK_INPUT(out_inds); 35 | int N = group_inds.size(0); 36 | int max_group_id = group_inds.max().item().toLong(); 37 | 38 | 39 | long *group_inds_data = group_inds.data_ptr(); 40 | long *out_inds_data = out_inds.data_ptr(); 41 | 42 | ingroup_inds_launcher( 43 | group_inds_data, 44 | out_inds_data, 45 | N, 46 | max_group_id 47 | ); 48 | 49 | } 50 | 51 | 52 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 53 | m.def("forward", &ingroup_inds_gpu, "cuda version of get_inner_win_inds of SST"); 54 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/ingroup_inds/src/ingroup_inds_kernel.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "cuda_fp16.h" 8 | 9 | #define CHECK_CALL(call) \ 10 | do \ 11 | { \ 12 | const cudaError_t error_code = call; \ 13 | if (error_code != cudaSuccess) \ 14 | { \ 15 | printf("CUDA Error:\n"); \ 16 | printf(" File: %s\n", __FILE__); \ 17 | printf(" Line: %d\n", __LINE__); \ 18 | printf(" Error code: %d\n", error_code); \ 19 | printf(" Error text: %s\n", \ 20 | cudaGetErrorString(error_code)); \ 21 | exit(1); \ 22 | } \ 23 | } while (0) 24 | 25 | #define THREADS_PER_BLOCK 256 26 | #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) 27 | 28 | // #define DEBUG 29 | // #define ASSERTION 30 | 31 | __global__ void ingroup_inds_kernel( 32 | const long *group_inds, 33 | long *out_inds, 34 | int *ingroup_counter, 35 | int N 36 | ) { 37 | 38 | int idx = blockIdx.x * blockDim.x + threadIdx.x; 39 | if (idx >= N) return; 40 | long this_group_id = group_inds[idx]; 41 | 42 | int cnt = atomicAdd(&ingroup_counter[this_group_id], 1); 43 | out_inds[idx] = cnt; 44 | } 45 | 46 | 47 | void ingroup_inds_launcher( 48 | const long *group_inds, 49 | long *out_inds, 50 | int N, 51 | int max_group_id 52 | ) { 53 | 54 | int *ingroup_counter = NULL; 55 | CHECK_CALL(cudaMalloc(&ingroup_counter, (max_group_id + 1) * sizeof(int))); 56 | CHECK_CALL(cudaMemset(ingroup_counter, 0, (max_group_id + 1) * sizeof(int))); 57 | 58 | dim3 blocks(DIVUP(N, THREADS_PER_BLOCK)); 59 | dim3 threads(THREADS_PER_BLOCK); 60 | 61 | ingroup_inds_kernel<<>>( 62 | group_inds, 63 | out_inds, 64 | ingroup_counter, 65 | N 66 | ); 67 | 68 | cudaFree(ingroup_counter); 69 | 70 | #ifdef DEBUG 71 | CHECK_CALL(cudaGetLastError()); 72 | CHECK_CALL(cudaDeviceSynchronize()); 73 | #endif 74 | 75 | return; 76 | 77 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/iou3d_nms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/iou3d_nms/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/iou3d_nms/iou3d_nms_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/iou3d_nms/iou3d_nms_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/iou3d_nms/src/iou3d_cpu.h: -------------------------------------------------------------------------------- 1 | #ifndef IOU3D_CPU_H 2 | #define IOU3D_CPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor); 10 | int boxes_aligned_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor); 11 | #endif 12 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/iou3d_nms/src/iou3d_nms.h: -------------------------------------------------------------------------------- 1 | #ifndef IOU3D_NMS_H 2 | #define IOU3D_NMS_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | int boxes_aligned_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); 11 | int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); 12 | int paired_boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); 13 | int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou); 14 | int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh); 15 | int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "iou3d_cpu.h" 8 | #include "iou3d_nms.h" 9 | 10 | 11 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 12 | m.def("boxes_aligned_overlap_bev_gpu", &boxes_aligned_overlap_bev_gpu, "aligned oriented boxes overlap"); 13 | m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, "oriented boxes overlap"); 14 | m.def("paired_boxes_overlap_bev_gpu", &paired_boxes_overlap_bev_gpu, "oriented boxes overlap"); 15 | m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou"); 16 | m.def("nms_gpu", &nms_gpu, "oriented nms gpu"); 17 | m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu"); 18 | m.def("boxes_aligned_iou_bev_cpu", &boxes_aligned_iou_bev_cpu, "aligned oriented boxes iou"); 19 | m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou"); 20 | } 21 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_batch_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_batch_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of ball query, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "ball_query_gpu.h" 13 | 14 | #define CHECK_CUDA(x) do { \ 15 | if (!x.type().is_cuda()) { \ 16 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 17 | exit(-1); \ 18 | } \ 19 | } while (0) 20 | #define CHECK_CONTIGUOUS(x) do { \ 21 | if (!x.is_contiguous()) { \ 22 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 23 | exit(-1); \ 24 | } \ 25 | } while (0) 26 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 27 | 28 | 29 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, 30 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) { 31 | CHECK_INPUT(new_xyz_tensor); 32 | CHECK_INPUT(xyz_tensor); 33 | const float *new_xyz = new_xyz_tensor.data(); 34 | const float *xyz = xyz_tensor.data(); 35 | int *idx = idx_tensor.data(); 36 | 37 | ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx); 38 | return 1; 39 | } 40 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.cu: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of ball query, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #include "ball_query_gpu.h" 12 | #include "cuda_utils.h" 13 | 14 | 15 | __global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample, 16 | const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { 17 | // new_xyz: (B, M, 3) 18 | // xyz: (B, N, 3) 19 | // output: 20 | // idx: (B, M, nsample) 21 | int bs_idx = blockIdx.y; 22 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 23 | if (bs_idx >= b || pt_idx >= m) return; 24 | 25 | new_xyz += bs_idx * m * 3 + pt_idx * 3; 26 | xyz += bs_idx * n * 3; 27 | idx += bs_idx * m * nsample + pt_idx * nsample; 28 | 29 | float radius2 = radius * radius; 30 | float new_x = new_xyz[0]; 31 | float new_y = new_xyz[1]; 32 | float new_z = new_xyz[2]; 33 | 34 | int cnt = 0; 35 | for (int k = 0; k < n; ++k) { 36 | float x = xyz[k * 3 + 0]; 37 | float y = xyz[k * 3 + 1]; 38 | float z = xyz[k * 3 + 2]; 39 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); 40 | if (d2 < radius2){ 41 | if (cnt == 0){ 42 | for (int l = 0; l < nsample; ++l) { 43 | idx[l] = k; 44 | } 45 | } 46 | idx[cnt] = k; 47 | ++cnt; 48 | if (cnt >= nsample) break; 49 | } 50 | } 51 | } 52 | 53 | 54 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \ 55 | const float *new_xyz, const float *xyz, int *idx) { 56 | // new_xyz: (B, M, 3) 57 | // xyz: (B, N, 3) 58 | // output: 59 | // idx: (B, M, nsample) 60 | 61 | cudaError_t err; 62 | 63 | dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) 64 | dim3 threads(THREADS_PER_BLOCK); 65 | 66 | ball_query_kernel_fast<<>>(b, n, m, radius, nsample, new_xyz, xyz, idx); 67 | // cudaDeviceSynchronize(); // for using printf in kernel function 68 | err = cudaGetLastError(); 69 | if (cudaSuccess != err) { 70 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 71 | exit(-1); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _BALL_QUERY_GPU_H 2 | #define _BALL_QUERY_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, 10 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor); 11 | 12 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, 13 | const float *xyz, const float *new_xyz, int *idx); 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _CUDA_UTILS_H 2 | #define _CUDA_UTILS_H 3 | 4 | #include 5 | 6 | #define TOTAL_THREADS 1024 7 | #define THREADS_PER_BLOCK 256 8 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) 9 | 10 | inline int opt_n_threads(int work_size) { 11 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); 12 | 13 | return max(min(1 << pow_2, TOTAL_THREADS), 1); 14 | } 15 | #endif 16 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/group_points.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of point grouping, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "group_points_gpu.h" 13 | 14 | 15 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, 16 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { 17 | 18 | float *grad_points = grad_points_tensor.data(); 19 | const int *idx = idx_tensor.data(); 20 | const float *grad_out = grad_out_tensor.data(); 21 | 22 | group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points); 23 | return 1; 24 | } 25 | 26 | 27 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, 28 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) { 29 | 30 | const float *points = points_tensor.data(); 31 | const int *idx = idx_tensor.data(); 32 | float *out = out_tensor.data(); 33 | 34 | group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out); 35 | return 1; 36 | } 37 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _GROUP_POINTS_GPU_H 2 | #define _GROUP_POINTS_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, 11 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 12 | 13 | void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, 14 | const float *points, const int *idx, float *out); 15 | 16 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, 17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); 18 | 19 | void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, 20 | const float *grad_out, const int *idx, float *grad_points); 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of point interpolation, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include "interpolate_gpu.h" 16 | 17 | 18 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, 19 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) { 20 | const float *unknown = unknown_tensor.data(); 21 | const float *known = known_tensor.data(); 22 | float *dist2 = dist2_tensor.data(); 23 | int *idx = idx_tensor.data(); 24 | 25 | three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx); 26 | } 27 | 28 | 29 | void three_interpolate_wrapper_fast(int b, int c, int m, int n, 30 | at::Tensor points_tensor, 31 | at::Tensor idx_tensor, 32 | at::Tensor weight_tensor, 33 | at::Tensor out_tensor) { 34 | 35 | const float *points = points_tensor.data(); 36 | const float *weight = weight_tensor.data(); 37 | float *out = out_tensor.data(); 38 | const int *idx = idx_tensor.data(); 39 | 40 | three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out); 41 | } 42 | 43 | 44 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, 45 | at::Tensor grad_out_tensor, 46 | at::Tensor idx_tensor, 47 | at::Tensor weight_tensor, 48 | at::Tensor grad_points_tensor) { 49 | 50 | const float *grad_out = grad_out_tensor.data(); 51 | const float *weight = weight_tensor.data(); 52 | float *grad_points = grad_points_tensor.data(); 53 | const int *idx = idx_tensor.data(); 54 | 55 | three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points); 56 | } 57 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _INTERPOLATE_GPU_H 2 | #define _INTERPOLATE_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, 11 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); 12 | 13 | void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown, 14 | const float *known, float *dist2, int *idx); 15 | 16 | 17 | void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor, 18 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); 19 | 20 | void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n, 21 | const float *points, const int *idx, const float *weight, float *out); 22 | 23 | 24 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor, 25 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor); 26 | 27 | void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out, 28 | const int *idx, const float *weight, float *grad_points); 29 | 30 | #endif 31 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/pointnet2_api.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "ball_query_gpu.h" 5 | #include "group_points_gpu.h" 6 | #include "sampling_gpu.h" 7 | #include "interpolate_gpu.h" 8 | 9 | 10 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 11 | m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast"); 12 | 13 | m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast"); 14 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast"); 15 | 16 | m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast"); 17 | m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast"); 18 | 19 | m.def("farthest_point_sampling_wrapper", &farthest_point_sampling_wrapper, "farthest_point_sampling_wrapper"); 20 | 21 | m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast"); 22 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast"); 23 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast"); 24 | } 25 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/sampling.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of point sampling and gathering, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include "sampling_gpu.h" 12 | 13 | 14 | int gather_points_wrapper_fast(int b, int c, int n, int npoints, 15 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){ 16 | const float *points = points_tensor.data(); 17 | const int *idx = idx_tensor.data(); 18 | float *out = out_tensor.data(); 19 | 20 | gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out); 21 | return 1; 22 | } 23 | 24 | 25 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, 26 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { 27 | 28 | const float *grad_out = grad_out_tensor.data(); 29 | const int *idx = idx_tensor.data(); 30 | float *grad_points = grad_points_tensor.data(); 31 | 32 | gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points); 33 | return 1; 34 | } 35 | 36 | 37 | int farthest_point_sampling_wrapper(int b, int n, int m, 38 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { 39 | 40 | const float *points = points_tensor.data(); 41 | float *temp = temp_tensor.data(); 42 | int *idx = idx_tensor.data(); 43 | 44 | farthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx); 45 | return 1; 46 | } 47 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _SAMPLING_GPU_H 2 | #define _SAMPLING_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | int gather_points_wrapper_fast(int b, int c, int n, int npoints, 10 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 11 | 12 | void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, 13 | const float *points, const int *idx, float *out); 14 | 15 | 16 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, 17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); 18 | 19 | void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, 20 | const float *grad_out, const int *idx, float *grad_points); 21 | 22 | 23 | int farthest_point_sampling_wrapper(int b, int n, int m, 24 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); 25 | 26 | void farthest_point_sampling_kernel_launcher(int b, int n, int m, 27 | const float *dataset, float *temp, int *idxs); 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_stack_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_stack_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2019-2020. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "ball_query_gpu.h" 13 | 14 | #define CHECK_CUDA(x) do { \ 15 | if (!x.type().is_cuda()) { \ 16 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 17 | exit(-1); \ 18 | } \ 19 | } while (0) 20 | #define CHECK_CONTIGUOUS(x) do { \ 21 | if (!x.is_contiguous()) { \ 22 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 23 | exit(-1); \ 24 | } \ 25 | } while (0) 26 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 27 | 28 | 29 | int ball_query_wrapper_stack(int B, int M, float radius, int nsample, 30 | at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, 31 | at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor) { 32 | CHECK_INPUT(new_xyz_tensor); 33 | CHECK_INPUT(xyz_tensor); 34 | CHECK_INPUT(new_xyz_batch_cnt_tensor); 35 | CHECK_INPUT(xyz_batch_cnt_tensor); 36 | 37 | const float *new_xyz = new_xyz_tensor.data(); 38 | const float *xyz = xyz_tensor.data(); 39 | const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data(); 40 | const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); 41 | int *idx = idx_tensor.data(); 42 | 43 | ball_query_kernel_launcher_stack(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx); 44 | return 1; 45 | } 46 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2019-2020. 5 | */ 6 | 7 | 8 | #ifndef _STACK_BALL_QUERY_GPU_H 9 | #define _STACK_BALL_QUERY_GPU_H 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | int ball_query_wrapper_stack(int B, int M, float radius, int nsample, 17 | at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, 18 | at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor); 19 | 20 | 21 | void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample, 22 | const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx); 23 | 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _STACK_CUDA_UTILS_H 2 | #define _STACK_CUDA_UTILS_H 3 | 4 | #include 5 | 6 | #define THREADS_PER_BLOCK 256 7 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/group_points.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2019-2020. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "group_points_gpu.h" 13 | 14 | #define CHECK_CUDA(x) do { \ 15 | if (!x.type().is_cuda()) { \ 16 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 17 | exit(-1); \ 18 | } \ 19 | } while (0) 20 | #define CHECK_CONTIGUOUS(x) do { \ 21 | if (!x.is_contiguous()) { \ 22 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 23 | exit(-1); \ 24 | } \ 25 | } while (0) 26 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 27 | 28 | 29 | int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample, 30 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, 31 | at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor) { 32 | 33 | CHECK_INPUT(grad_out_tensor); 34 | CHECK_INPUT(idx_tensor); 35 | CHECK_INPUT(idx_batch_cnt_tensor); 36 | CHECK_INPUT(features_batch_cnt_tensor); 37 | CHECK_INPUT(grad_features_tensor); 38 | 39 | const float *grad_out = grad_out_tensor.data(); 40 | const int *idx = idx_tensor.data(); 41 | const int *idx_batch_cnt = idx_batch_cnt_tensor.data(); 42 | const int *features_batch_cnt = features_batch_cnt_tensor.data(); 43 | float *grad_features = grad_features_tensor.data(); 44 | 45 | group_points_grad_kernel_launcher_stack(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); 46 | return 1; 47 | } 48 | 49 | 50 | int group_points_wrapper_stack(int B, int M, int C, int nsample, 51 | at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor, 52 | at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor) { 53 | 54 | CHECK_INPUT(features_tensor); 55 | CHECK_INPUT(features_batch_cnt_tensor); 56 | CHECK_INPUT(idx_tensor); 57 | CHECK_INPUT(idx_batch_cnt_tensor); 58 | CHECK_INPUT(out_tensor); 59 | 60 | const float *features = features_tensor.data(); 61 | const int *idx = idx_tensor.data(); 62 | const int *features_batch_cnt = features_batch_cnt_tensor.data(); 63 | const int *idx_batch_cnt = idx_batch_cnt_tensor.data(); 64 | float *out = out_tensor.data(); 65 | 66 | group_points_kernel_launcher_stack(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); 67 | return 1; 68 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2019-2020. 5 | */ 6 | 7 | 8 | #ifndef _STACK_GROUP_POINTS_GPU_H 9 | #define _STACK_GROUP_POINTS_GPU_H 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | 17 | int group_points_wrapper_stack(int B, int M, int C, int nsample, 18 | at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor, 19 | at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor); 20 | 21 | void group_points_kernel_launcher_stack(int B, int M, int C, int nsample, 22 | const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out); 23 | 24 | int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample, 25 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, 26 | at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor); 27 | 28 | void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, 29 | const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features); 30 | 31 | #endif 32 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _INTERPOLATE_GPU_H 2 | #define _INTERPOLATE_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | void three_nn_wrapper_stack(at::Tensor unknown_tensor, 11 | at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor, 12 | at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); 13 | 14 | 15 | void three_interpolate_wrapper_stack(at::Tensor features_tensor, 16 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); 17 | 18 | 19 | 20 | void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor, 21 | at::Tensor weight_tensor, at::Tensor grad_features_tensor); 22 | 23 | 24 | void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown, 25 | const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, 26 | float *dist2, int *idx); 27 | 28 | 29 | void three_interpolate_kernel_launcher_stack(int N, int channels, 30 | const float *features, const int *idx, const float *weight, float *out); 31 | 32 | 33 | 34 | void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out, 35 | const int *idx, const float *weight, float *grad_features); 36 | 37 | 38 | 39 | #endif -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/pointnet2_api.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "ball_query_gpu.h" 5 | #include "group_points_gpu.h" 6 | #include "sampling_gpu.h" 7 | #include "interpolate_gpu.h" 8 | #include "voxel_query_gpu.h" 9 | #include "vector_pool_gpu.h" 10 | 11 | 12 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 13 | m.def("ball_query_wrapper", &ball_query_wrapper_stack, "ball_query_wrapper_stack"); 14 | m.def("voxel_query_wrapper", &voxel_query_wrapper_stack, "voxel_query_wrapper_stack"); 15 | 16 | m.def("farthest_point_sampling_wrapper", &farthest_point_sampling_wrapper, "farthest_point_sampling_wrapper"); 17 | m.def("stack_farthest_point_sampling_wrapper", &stack_farthest_point_sampling_wrapper, "stack_farthest_point_sampling_wrapper"); 18 | 19 | m.def("group_points_wrapper", &group_points_wrapper_stack, "group_points_wrapper_stack"); 20 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_stack, "group_points_grad_wrapper_stack"); 21 | 22 | m.def("three_nn_wrapper", &three_nn_wrapper_stack, "three_nn_wrapper_stack"); 23 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_stack, "three_interpolate_wrapper_stack"); 24 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_stack, "three_interpolate_grad_wrapper_stack"); 25 | 26 | m.def("query_stacked_local_neighbor_idxs_wrapper_stack", &query_stacked_local_neighbor_idxs_wrapper_stack, "query_stacked_local_neighbor_idxs_wrapper_stack"); 27 | m.def("query_three_nn_by_stacked_local_idxs_wrapper_stack", &query_three_nn_by_stacked_local_idxs_wrapper_stack, "query_three_nn_by_stacked_local_idxs_wrapper_stack"); 28 | 29 | m.def("vector_pool_wrapper", &vector_pool_wrapper_stack, "vector_pool_grad_wrapper_stack"); 30 | m.def("vector_pool_grad_wrapper", &vector_pool_grad_wrapper_stack, "vector_pool_grad_wrapper_stack"); 31 | } 32 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/sampling.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "sampling_gpu.h" 5 | 6 | #define CHECK_CUDA(x) do { \ 7 | if (!x.type().is_cuda()) { \ 8 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 9 | exit(-1); \ 10 | } \ 11 | } while (0) 12 | #define CHECK_CONTIGUOUS(x) do { \ 13 | if (!x.is_contiguous()) { \ 14 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 15 | exit(-1); \ 16 | } \ 17 | } while (0) 18 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 19 | 20 | 21 | int farthest_point_sampling_wrapper(int b, int n, int m, 22 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { 23 | 24 | CHECK_INPUT(points_tensor); 25 | CHECK_INPUT(temp_tensor); 26 | CHECK_INPUT(idx_tensor); 27 | 28 | const float *points = points_tensor.data(); 29 | float *temp = temp_tensor.data(); 30 | int *idx = idx_tensor.data(); 31 | 32 | farthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx); 33 | return 1; 34 | } 35 | 36 | 37 | int stack_farthest_point_sampling_wrapper(at::Tensor points_tensor, 38 | at::Tensor temp_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor, 39 | at::Tensor num_sampled_points_tensor) { 40 | 41 | CHECK_INPUT(points_tensor); 42 | CHECK_INPUT(temp_tensor); 43 | CHECK_INPUT(idx_tensor); 44 | CHECK_INPUT(xyz_batch_cnt_tensor); 45 | CHECK_INPUT(num_sampled_points_tensor); 46 | 47 | int batch_size = xyz_batch_cnt_tensor.size(0); 48 | int N = points_tensor.size(0); 49 | const float *points = points_tensor.data(); 50 | float *temp = temp_tensor.data(); 51 | int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); 52 | int *idx = idx_tensor.data(); 53 | int *num_sampled_points = num_sampled_points_tensor.data(); 54 | 55 | stack_farthest_point_sampling_kernel_launcher(N, batch_size, points, temp, xyz_batch_cnt, idx, num_sampled_points); 56 | return 1; 57 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _SAMPLING_GPU_H 2 | #define _SAMPLING_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | int farthest_point_sampling_wrapper(int b, int n, int m, 10 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); 11 | 12 | void farthest_point_sampling_kernel_launcher(int b, int n, int m, 13 | const float *dataset, float *temp, int *idxs); 14 | 15 | int stack_farthest_point_sampling_wrapper( 16 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor xyz_batch_cnt_tensor, 17 | at::Tensor idx_tensor, at::Tensor num_sampled_points_tensor); 18 | 19 | 20 | void stack_farthest_point_sampling_kernel_launcher(int N, int batch_size, 21 | const float *dataset, float *temp, int *xyz_batch_cnt, int *idxs, int *num_sampled_points); 22 | 23 | #endif 24 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "voxel_query_gpu.h" 9 | 10 | #define CHECK_CUDA(x) do { \ 11 | if (!x.type().is_cuda()) { \ 12 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 13 | exit(-1); \ 14 | } \ 15 | } while (0) 16 | #define CHECK_CONTIGUOUS(x) do { \ 17 | if (!x.is_contiguous()) { \ 18 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 19 | exit(-1); \ 20 | } \ 21 | } while (0) 22 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 23 | 24 | 25 | int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius, 26 | int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, 27 | at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor) { 28 | CHECK_INPUT(new_coords_tensor); 29 | CHECK_INPUT(point_indices_tensor); 30 | CHECK_INPUT(new_xyz_tensor); 31 | CHECK_INPUT(xyz_tensor); 32 | 33 | const float *new_xyz = new_xyz_tensor.data(); 34 | const float *xyz = xyz_tensor.data(); 35 | const int *new_coords = new_coords_tensor.data(); 36 | const int *point_indices = point_indices_tensor.data(); 37 | int *idx = idx_tensor.data(); 38 | 39 | voxel_query_kernel_launcher_stack(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx); 40 | return 1; 41 | } 42 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _STACK_VOXEL_QUERY_GPU_H 2 | #define _STACK_VOXEL_QUERY_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius, 10 | int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, 11 | at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor); 12 | 13 | 14 | void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample, 15 | float radius, int z_range, int y_range, int x_range, const float *new_xyz, 16 | const float *xyz, const int *new_coords, const int *point_indices, int *idx); 17 | 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/roiaware_pool3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/roiaware_pool3d/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/roiaware_pool3d/roiaware_pool3d_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/roiaware_pool3d/roiaware_pool3d_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/roipoint_pool3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/roipoint_pool3d/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/roipoint_pool3d/roipoint_pool3d_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/ops/roipoint_pool3d/roipoint_pool3d_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Function 4 | 5 | from ...utils import box_utils 6 | from . import roipoint_pool3d_cuda 7 | 8 | 9 | class RoIPointPool3d(nn.Module): 10 | def __init__(self, num_sampled_points=512, pool_extra_width=1.0): 11 | super().__init__() 12 | self.num_sampled_points = num_sampled_points 13 | self.pool_extra_width = pool_extra_width 14 | 15 | def forward(self, points, point_features, boxes3d): 16 | """ 17 | Args: 18 | points: (B, N, 3) 19 | point_features: (B, N, C) 20 | boxes3d: (B, M, 7), [x, y, z, dx, dy, dz, heading] 21 | 22 | Returns: 23 | pooled_features: (B, M, 512, 3 + C) 24 | pooled_empty_flag: (B, M) 25 | """ 26 | return RoIPointPool3dFunction.apply( 27 | points, point_features, boxes3d, self.pool_extra_width, self.num_sampled_points 28 | ) 29 | 30 | 31 | class RoIPointPool3dFunction(Function): 32 | @staticmethod 33 | def forward(ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512): 34 | """ 35 | Args: 36 | ctx: 37 | points: (B, N, 3) 38 | point_features: (B, N, C) 39 | boxes3d: (B, num_boxes, 7), [x, y, z, dx, dy, dz, heading] 40 | pool_extra_width: 41 | num_sampled_points: 42 | 43 | Returns: 44 | pooled_features: (B, num_boxes, 512, 3 + C) 45 | pooled_empty_flag: (B, num_boxes) 46 | """ 47 | assert points.shape.__len__() == 3 and points.shape[2] == 3 48 | batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[1], point_features.shape[2] 49 | pooled_boxes3d = box_utils.enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7) 50 | 51 | pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, 3 + feature_len)) 52 | pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int() 53 | 54 | roipoint_pool3d_cuda.forward( 55 | points.contiguous(), pooled_boxes3d.contiguous(), 56 | point_features.contiguous(), pooled_features, pooled_empty_flag 57 | ) 58 | 59 | return pooled_features, pooled_empty_flag 60 | 61 | @staticmethod 62 | def backward(ctx, grad_out): 63 | raise NotImplementedError 64 | 65 | 66 | if __name__ == '__main__': 67 | pass 68 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #define CHECK_CUDA(x) do { \ 5 | if (!x.type().is_cuda()) { \ 6 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 7 | exit(-1); \ 8 | } \ 9 | } while (0) 10 | #define CHECK_CONTIGUOUS(x) do { \ 11 | if (!x.is_contiguous()) { \ 12 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 13 | exit(-1); \ 14 | } \ 15 | } while (0) 16 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 17 | 18 | 19 | void roipool3dLauncher(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, 20 | const float *xyz, const float *boxes3d, const float *pts_feature, float *pooled_features, int *pooled_empty_flag); 21 | 22 | 23 | int roipool3d_gpu(at::Tensor xyz, at::Tensor boxes3d, at::Tensor pts_feature, at::Tensor pooled_features, at::Tensor pooled_empty_flag){ 24 | // params xyz: (B, N, 3) 25 | // params boxes3d: (B, M, 7) 26 | // params pts_feature: (B, N, C) 27 | // params pooled_features: (B, M, 512, 3+C) 28 | // params pooled_empty_flag: (B, M) 29 | CHECK_INPUT(xyz); 30 | CHECK_INPUT(boxes3d); 31 | CHECK_INPUT(pts_feature); 32 | CHECK_INPUT(pooled_features); 33 | CHECK_INPUT(pooled_empty_flag); 34 | 35 | int batch_size = xyz.size(0); 36 | int pts_num = xyz.size(1); 37 | int boxes_num = boxes3d.size(1); 38 | int feature_in_len = pts_feature.size(2); 39 | int sampled_pts_num = pooled_features.size(2); 40 | 41 | 42 | const float * xyz_data = xyz.data(); 43 | const float * boxes3d_data = boxes3d.data(); 44 | const float * pts_feature_data = pts_feature.data(); 45 | float * pooled_features_data = pooled_features.data(); 46 | int * pooled_empty_flag_data = pooled_empty_flag.data(); 47 | 48 | roipool3dLauncher(batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num, 49 | xyz_data, boxes3d_data, pts_feature_data, pooled_features_data, pooled_empty_flag_data); 50 | 51 | 52 | 53 | return 1; 54 | } 55 | 56 | 57 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 58 | m.def("forward", &roipool3d_gpu, "roipool3d forward (CUDA)"); 59 | } 60 | 61 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/Baseline/pcdet/utils/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/utils/spconv_utils.py: -------------------------------------------------------------------------------- 1 | from typing import Set 2 | 3 | import spconv 4 | if float(spconv.__version__[2:]) >= 2.2: 5 | spconv.constants.SPCONV_USE_DIRECT_TABLE = False 6 | 7 | try: 8 | import spconv.pytorch as spconv 9 | except: 10 | import spconv as spconv 11 | 12 | import torch.nn as nn 13 | 14 | 15 | def find_all_spconv_keys(model: nn.Module, prefix="") -> Set[str]: 16 | """ 17 | Finds all spconv keys that need to have weight's transposed 18 | """ 19 | found_keys: Set[str] = set() 20 | for name, child in model.named_children(): 21 | new_prefix = f"{prefix}.{name}" if prefix != "" else name 22 | 23 | if isinstance(child, spconv.conv.SparseConvolution): 24 | new_prefix = f"{new_prefix}.weight" 25 | found_keys.add(new_prefix) 26 | 27 | found_keys.update(find_all_spconv_keys(child, prefix=new_prefix)) 28 | 29 | return found_keys 30 | 31 | 32 | def replace_feature(out, new_features): 33 | if "replace_feature" in out.__dir__(): 34 | # spconv 2.x behaviour 35 | return out.replace_feature(new_features) 36 | else: 37 | out.features = new_features 38 | return out 39 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/pcdet/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.6.0+8caccce" 2 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/tools/_init_path.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../') -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/tools/cfgs/dataset_configs/once_dataset.yaml: -------------------------------------------------------------------------------- 1 | DATASET: 'ONCEDataset' 2 | DATA_PATH: './datasets/once' 3 | CLOUD_DATA_PATH: './datasets/once' 4 | 5 | POINT_CLOUD_RANGE: [-75.2, -75.2, -5.0, 75.2, 75.2, 3.0] 6 | 7 | INFO_PATH: { 8 | 'train': [once_infos_train.pkl], 9 | 'val': [once_infos_val.pkl], 10 | 'test': [once_infos_test.pkl], 11 | } 12 | 13 | DATA_SPLIT: { 14 | 'train': train, 15 | 'test': val 16 | } 17 | 18 | DATA_AUGMENTOR: 19 | DISABLE_AUG_LIST: ['placeholder'] 20 | AUG_CONFIG_LIST: 21 | - NAME: gt_sampling 22 | USE_ROAD_PLANE: False 23 | DB_INFO_PATH: 24 | - once_dbinfos_train.pkl 25 | PREPARE: { 26 | filter_by_min_points: ['Car:5', 'Bus:5', 'Truck:5', 'Pedestrian:5', 'Cyclist:5'], 27 | } 28 | 29 | SAMPLE_GROUPS: ['Car:1', 'Bus:4', 'Truck:3', 'Pedestrian:2', 'Cyclist:2'] 30 | NUM_POINT_FEATURES: 4 31 | REMOVE_EXTRA_WIDTH: [0.0, 0.0, 0.0] 32 | LIMIT_WHOLE_SCENE: True 33 | 34 | - NAME: random_world_flip 35 | ALONG_AXIS_LIST: ['x', 'y'] 36 | 37 | - NAME: random_world_rotation 38 | WORLD_ROT_ANGLE: [-0.78539816, 0.78539816] 39 | 40 | - NAME: random_world_scaling 41 | WORLD_SCALE_RANGE: [0.95, 1.05] 42 | 43 | 44 | POINT_FEATURE_ENCODING: { 45 | encoding_type: absolute_coordinates_encoding, 46 | used_feature_list: ['x', 'y', 'z', 'intensity'], 47 | src_feature_list: ['x', 'y', 'z', 'intensity'], 48 | } 49 | 50 | 51 | DATA_PROCESSOR: 52 | - NAME: mask_points_and_boxes_outside_range 53 | REMOVE_OUTSIDE_BOXES: True 54 | 55 | - NAME: shuffle_points 56 | SHUFFLE_ENABLED: { 57 | 'train': True, 58 | 'test': False 59 | } 60 | 61 | - NAME: transform_points_to_voxels 62 | VOXEL_SIZE: [0.1, 0.1, 0.2] 63 | MAX_POINTS_PER_VOXEL: 5 64 | MAX_NUMBER_OF_VOXELS: { 65 | 'train': 60000, 66 | 'test': 60000 67 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/Baseline/tools/scripts/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | NGPUS=$1 5 | PY_ARGS=${@:2} 6 | 7 | while true 8 | do 9 | PORT=$(( ((RANDOM<<15)|RANDOM) % 49152 + 10000 )) 10 | status="$(nc -z 127.0.0.1 $PORT < /dev/null &>/dev/null; echo $?)" 11 | if [ "${status}" != "0" ]; then 12 | break; 13 | fi 14 | done 15 | echo $PORT 16 | 17 | python -m torch.distributed.launch --nproc_per_node=${NGPUS} --rdzv_endpoint=localhost:${PORT} train.py --launcher pytorch ${PY_ARGS} 18 | 19 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/launcher.sh: -------------------------------------------------------------------------------- 1 | conda activate openpcdet 2 | cd tools 3 | 4 | # Check if $1 exists, if not create the directory 5 | if [ -z "$1" ]; then 6 | echo "Error: Output directory not specified" 7 | exit 1 8 | fi 9 | 10 | if [ ! -d "$1" ]; then 11 | echo "Creating output directory: $1" 12 | mkdir -p "$1" 13 | fi 14 | 15 | bash scripts/dist_train.sh 2 --cfg_file ./cfgs/once_models/sara3d.yaml --out_dir $1 --extra_tag $1 16 | cd ../ 17 | cp -r tools/$1/* ./ -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/__init__.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | from pathlib import Path 3 | 4 | from .version import __version__ 5 | 6 | __all__ = [ 7 | '__version__' 8 | ] 9 | 10 | 11 | def get_git_commit_number(): 12 | if not (Path(__file__).parent / '../.git').exists(): 13 | return '0000000' 14 | 15 | cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE) 16 | git_commit_number = cmd_out.stdout.decode('utf-8')[:7] 17 | return git_commit_number 18 | 19 | 20 | script_version = get_git_commit_number() 21 | 22 | 23 | if script_version not in __version__: 24 | __version__ = __version__ + '+py%s' % script_version 25 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from functools import partial 3 | from torch.utils.data import DataLoader 4 | from torch.utils.data import DistributedSampler as _DistributedSampler 5 | 6 | from pcdet.utils import common_utils 7 | 8 | from .dataset import DatasetTemplate 9 | from .once.once_dataset import ONCEDataset 10 | 11 | __all__ = { 12 | 'DatasetTemplate': DatasetTemplate, 13 | 'ONCEDataset': ONCEDataset 14 | } 15 | 16 | 17 | class DistributedSampler(_DistributedSampler): 18 | 19 | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): 20 | super().__init__(dataset, num_replicas=num_replicas, rank=rank) 21 | self.shuffle = shuffle 22 | 23 | def __iter__(self): 24 | if self.shuffle: 25 | g = torch.Generator() 26 | g.manual_seed(self.epoch) 27 | indices = torch.randperm(len(self.dataset), generator=g).tolist() 28 | else: 29 | indices = torch.arange(len(self.dataset)).tolist() 30 | 31 | indices += indices[:(self.total_size - len(indices))] 32 | assert len(indices) == self.total_size 33 | 34 | indices = indices[self.rank:self.total_size:self.num_replicas] 35 | assert len(indices) == self.num_samples 36 | 37 | return iter(indices) 38 | 39 | 40 | def build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4, seed=None, 41 | logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0): 42 | 43 | dataset = __all__[dataset_cfg.DATASET]( 44 | dataset_cfg=dataset_cfg, 45 | class_names=class_names, 46 | root_path=root_path, 47 | training=training, 48 | logger=logger, 49 | ) 50 | 51 | if merge_all_iters_to_one_epoch: 52 | assert hasattr(dataset, 'merge_all_iters_to_one_epoch') 53 | dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs) 54 | 55 | if dist: 56 | if training: 57 | sampler = torch.utils.data.distributed.DistributedSampler(dataset) 58 | else: 59 | rank, world_size = common_utils.get_dist_info() 60 | sampler = DistributedSampler(dataset, world_size, rank, shuffle=False) 61 | else: 62 | sampler = None 63 | dataloader = DataLoader( 64 | dataset, batch_size=batch_size, pin_memory=True, num_workers=workers, 65 | shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch, 66 | drop_last=False, sampler=sampler, timeout=0, worker_init_fn=partial(common_utils.worker_init_fn, seed=seed) 67 | ) 68 | 69 | return dataset, dataloader, sampler 70 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/datasets/augmentor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/datasets/augmentor/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/datasets/once/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/datasets/once/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/datasets/once/once_eval/eval_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def compute_split_parts(num_samples, num_parts): 5 | part_samples = num_samples // num_parts 6 | remain_samples = num_samples % num_parts 7 | if part_samples == 0: 8 | return [num_samples] 9 | if remain_samples == 0: 10 | return [part_samples] * num_parts 11 | else: 12 | return [part_samples] * num_parts + [remain_samples] 13 | 14 | 15 | def overall_filter(boxes): 16 | ignore = np.zeros(boxes.shape[0], dtype=bool) # all false 17 | return ignore 18 | 19 | 20 | def distance_filter(boxes, level): 21 | ignore = np.ones(boxes.shape[0], dtype=bool) # all true 22 | dist = np.sqrt(np.sum(boxes[:, 0:3] * boxes[:, 0:3], axis=1)) 23 | 24 | if level == 0: # 0-30m 25 | flag = dist < 30 26 | elif level == 1: # 30-50m 27 | flag = (dist >= 30) & (dist < 50) 28 | elif level == 2: # 50m-inf 29 | flag = dist >= 50 30 | else: 31 | assert False, 'level < 3 for distance metric, found level %s' % (str(level)) 32 | 33 | ignore[flag] = False 34 | return ignore 35 | 36 | 37 | def overall_distance_filter(boxes, level): 38 | ignore = np.ones(boxes.shape[0], dtype=bool) # all true 39 | dist = np.sqrt(np.sum(boxes[:, 0:3] * boxes[:, 0:3], axis=1)) 40 | 41 | if level == 0: 42 | flag = np.ones(boxes.shape[0], dtype=bool) 43 | elif level == 1: # 0-30m 44 | flag = dist < 30 45 | elif level == 2: # 30-50m 46 | flag = (dist >= 30) & (dist < 50) 47 | elif level == 3: # 50m-inf 48 | flag = dist >= 50 49 | else: 50 | assert False, 'level < 4 for overall & distance metric, found level %s' % (str(level)) 51 | 52 | ignore[flag] = False 53 | return ignore -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/datasets/processor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/datasets/processor/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/datasets/processor/point_feature_encoder.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class PointFeatureEncoder(object): 5 | def __init__(self, config, point_cloud_range=None): 6 | super().__init__() 7 | self.point_encoding_config = config 8 | assert list(self.point_encoding_config.src_feature_list[0:3]) == ['x', 'y', 'z'] 9 | self.used_feature_list = self.point_encoding_config.used_feature_list 10 | self.src_feature_list = self.point_encoding_config.src_feature_list 11 | self.point_cloud_range = point_cloud_range 12 | 13 | @property 14 | def num_point_features(self): 15 | return getattr(self, self.point_encoding_config.encoding_type)(points=None) 16 | 17 | def forward(self, data_dict): 18 | """ 19 | Args: 20 | data_dict: 21 | points: (N, 3 + C_in) 22 | ... 23 | Returns: 24 | data_dict: 25 | points: (N, 3 + C_out), 26 | use_lead_xyz: whether to use xyz as point-wise features 27 | ... 28 | """ 29 | data_dict['points'], use_lead_xyz = getattr(self, self.point_encoding_config.encoding_type)( 30 | data_dict['points'] 31 | ) 32 | data_dict['use_lead_xyz'] = use_lead_xyz 33 | 34 | if self.point_encoding_config.get('filter_sweeps', False) and 'timestamp' in self.src_feature_list: 35 | max_sweeps = self.point_encoding_config.max_sweeps 36 | idx = self.src_feature_list.index('timestamp') 37 | dt = np.round(data_dict['points'][:, idx], 2) 38 | max_dt = sorted(np.unique(dt))[min(len(np.unique(dt))-1, max_sweeps-1)] 39 | data_dict['points'] = data_dict['points'][dt <= max_dt] 40 | 41 | return data_dict 42 | 43 | def absolute_coordinates_encoding(self, points=None): 44 | if points is None: 45 | num_output_features = len(self.used_feature_list) 46 | return num_output_features 47 | 48 | assert points.shape[-1] == len(self.src_feature_list) 49 | point_feature_list = [points[:, 0:3]] 50 | for x in self.used_feature_list: 51 | if x in ['x', 'y', 'z']: 52 | continue 53 | idx = self.src_feature_list.index(x) 54 | point_feature_list.append(points[:, idx:idx+1]) 55 | point_features = np.concatenate(point_feature_list, axis=1) 56 | 57 | return point_features, True 58 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | import numpy as np 4 | import torch 5 | 6 | from .detectors import build_detector 7 | 8 | try: 9 | import kornia 10 | except: 11 | pass 12 | # print('Warning: kornia is not installed. This package is only required by CaDDN') 13 | 14 | 15 | 16 | def build_network(model_cfg, num_class, dataset): 17 | model = build_detector( 18 | model_cfg=model_cfg, num_class=num_class, dataset=dataset 19 | ) 20 | return model 21 | 22 | 23 | def load_data_to_gpu(batch_dict): 24 | for key, val in batch_dict.items(): 25 | if key == 'camera_imgs': 26 | batch_dict[key] = val.cuda() 27 | elif not isinstance(val, np.ndarray): 28 | continue 29 | elif key in ['frame_id', 'metadata', 'calib', 'image_paths','ori_shape','img_process_infos']: 30 | continue 31 | elif key in ['images']: 32 | batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous() 33 | elif key in ['image_shape']: 34 | batch_dict[key] = torch.from_numpy(val).int().cuda() 35 | else: 36 | batch_dict[key] = torch.from_numpy(val).float().cuda() 37 | 38 | 39 | def model_fn_decorator(): 40 | ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict']) 41 | 42 | def model_func(model, batch_dict): 43 | load_data_to_gpu(batch_dict) 44 | ret_dict, tb_dict, disp_dict = model(batch_dict) 45 | 46 | loss = ret_dict['loss'].mean() 47 | if hasattr(model, 'update_global_step'): 48 | model.update_global_step() 49 | else: 50 | model.module.update_global_step() 51 | 52 | return ModelReturn(loss, tb_dict, disp_dict) 53 | 54 | return model_func 55 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/backbones_2d/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_bev_backbone import BaseBEVBackbone 2 | 3 | __all__ = { 4 | 'BaseBEVBackbone': BaseBEVBackbone 5 | } 6 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/backbones_2d/map_to_bev/__init__.py: -------------------------------------------------------------------------------- 1 | from .height_compression import HeightCompression 2 | 3 | __all__ = { 4 | 'HeightCompression': HeightCompression 5 | } 6 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/backbones_2d/map_to_bev/height_compression.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class HeightCompression(nn.Module): 5 | def __init__(self, model_cfg, **kwargs): 6 | super().__init__() 7 | self.model_cfg = model_cfg 8 | self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES 9 | 10 | def forward(self, batch_dict): 11 | """ 12 | Args: 13 | batch_dict: 14 | encoded_spconv_tensor: sparse tensor 15 | Returns: 16 | batch_dict: 17 | spatial_features: 18 | 19 | """ 20 | encoded_spconv_tensor = batch_dict['encoded_spconv_tensor'] 21 | spatial_features = encoded_spconv_tensor.dense() 22 | N, C, D, H, W = spatial_features.shape 23 | spatial_features = spatial_features.view(N, C * D, H, W) 24 | batch_dict['spatial_features'] = spatial_features 25 | batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride'] 26 | return batch_dict 27 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/backbones_3d/__init__.py: -------------------------------------------------------------------------------- 1 | from .spconv_backbone import VoxelResBackBone8x 2 | 3 | 4 | __all__ = { 5 | 'VoxelResBackBone8x': VoxelResBackBone8x 6 | } 7 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/backbones_3d/pfe/__init__.py: -------------------------------------------------------------------------------- 1 | from .voxel_set_abstraction import VoxelSetAbstraction 2 | 3 | __all__ = { 4 | 'VoxelSetAbstraction': VoxelSetAbstraction 5 | } 6 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/backbones_3d/vfe/__init__.py: -------------------------------------------------------------------------------- 1 | from .mean_vfe import MeanVFE 2 | from .vfe_template import VFETemplate 3 | from .resa_vfe import RESAVFE 4 | 5 | __all__ = { 6 | 'VFETemplate': VFETemplate, 7 | 'MeanVFE': MeanVFE, 8 | 'RESAVFE': RESAVFE 9 | } 10 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/backbones_3d/vfe/mean_vfe.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .vfe_template import VFETemplate 4 | 5 | 6 | class MeanVFE(VFETemplate): 7 | def __init__(self, model_cfg, num_point_features, **kwargs): 8 | super().__init__(model_cfg=model_cfg) 9 | self.num_point_features = num_point_features 10 | 11 | def get_output_feature_dim(self): 12 | return self.num_point_features 13 | 14 | def forward(self, batch_dict, **kwargs): 15 | """ 16 | Args: 17 | batch_dict: 18 | voxels: (num_voxels, max_points_per_voxel, C) 19 | voxel_num_points: optional (num_voxels) 20 | **kwargs: 21 | 22 | Returns: 23 | vfe_features: (num_voxels, C) 24 | """ 25 | voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points'] 26 | points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False) 27 | normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features) 28 | points_mean = points_mean / normalizer 29 | batch_dict['voxel_features'] = points_mean.contiguous() 30 | 31 | return batch_dict 32 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/backbones_3d/vfe/vfe_template.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class VFETemplate(nn.Module): 5 | def __init__(self, model_cfg, **kwargs): 6 | super().__init__() 7 | self.model_cfg = model_cfg 8 | 9 | def get_output_feature_dim(self): 10 | raise NotImplementedError 11 | 12 | def forward(self, **kwargs): 13 | """ 14 | Args: 15 | **kwargs: 16 | 17 | Returns: 18 | batch_dict: 19 | ... 20 | vfe_features: (num_voxels, C) 21 | """ 22 | raise NotImplementedError 23 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/dense_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .center_head import CenterHead 2 | 3 | __all__ = { 4 | 'CenterHead': CenterHead 5 | } 6 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .detector3d_template import Detector3DTemplate 2 | from .centerpoint import CenterPoint 3 | from .sara3d import SARA3D 4 | 5 | __all__ = { 6 | 'Detector3DTemplate': Detector3DTemplate, 7 | 'CenterPoint': CenterPoint, 8 | 'SARA3D': SARA3D 9 | } 10 | 11 | 12 | def build_detector(model_cfg, num_class, dataset): 13 | model = __all__[model_cfg.NAME]( 14 | model_cfg=model_cfg, num_class=num_class, dataset=dataset 15 | ) 16 | 17 | return model 18 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/detectors/centerpoint.py: -------------------------------------------------------------------------------- 1 | from .detector3d_template import Detector3DTemplate 2 | 3 | 4 | class CenterPoint(Detector3DTemplate): 5 | def __init__(self, model_cfg, num_class, dataset): 6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) 7 | self.module_list = self.build_networks() 8 | 9 | def forward(self, batch_dict): 10 | for cur_module in self.module_list: 11 | batch_dict = cur_module(batch_dict) 12 | 13 | if self.training: 14 | loss, tb_dict, disp_dict = self.get_training_loss() 15 | 16 | ret_dict = { 17 | 'loss': loss 18 | } 19 | return ret_dict, tb_dict, disp_dict 20 | else: 21 | pred_dicts, recall_dicts = self.post_processing(batch_dict) 22 | return pred_dicts, recall_dicts 23 | 24 | def get_training_loss(self): 25 | disp_dict = {} 26 | 27 | loss_rpn, tb_dict = self.dense_head.get_loss() 28 | tb_dict = { 29 | 'loss_rpn': loss_rpn.item(), 30 | **tb_dict 31 | } 32 | 33 | loss = loss_rpn 34 | return loss, tb_dict, disp_dict 35 | 36 | def post_processing(self, batch_dict): 37 | post_process_cfg = self.model_cfg.POST_PROCESSING 38 | batch_size = batch_dict['batch_size'] 39 | final_pred_dict = batch_dict['final_box_dicts'] 40 | recall_dict = {} 41 | for index in range(batch_size): 42 | pred_boxes = final_pred_dict[index]['pred_boxes'] 43 | 44 | recall_dict = self.generate_recall_record( 45 | box_preds=pred_boxes, 46 | recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, 47 | thresh_list=post_process_cfg.RECALL_THRESH_LIST 48 | ) 49 | 50 | return final_pred_dict, recall_dict 51 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/models/model_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .aca_utils import AdaptiveConfidenceAggregation 2 | 3 | __all__ = { 4 | 'AdaptiveConfidenceAggregation': AdaptiveConfidenceAggregation 5 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/bev_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .bev_pool import bev_pool -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/bev_pool/bev_pool_ext.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/bev_pool/bev_pool_ext.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/ingroup_inds/ingroup_inds_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/ingroup_inds/ingroup_inds_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/ingroup_inds/ingroup_inds_op.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | try: 4 | from . import ingroup_inds_cuda 5 | # import ingroup_indices 6 | except ImportError: 7 | ingroup_indices = None 8 | print('Can not import ingroup indices') 9 | 10 | ingroup_indices = ingroup_inds_cuda 11 | 12 | from torch.autograd import Function 13 | class IngroupIndicesFunction(Function): 14 | 15 | @staticmethod 16 | def forward(ctx, group_inds): 17 | 18 | out_inds = torch.zeros_like(group_inds) - 1 19 | 20 | ingroup_indices.forward(group_inds, out_inds) 21 | 22 | ctx.mark_non_differentiable(out_inds) 23 | 24 | return out_inds 25 | 26 | @staticmethod 27 | def backward(ctx, g): 28 | 29 | return None 30 | 31 | ingroup_inds = IngroupIndicesFunction.apply -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/ingroup_inds/src/error.cuh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | #define CHECK_CALL(call) \ 5 | do \ 6 | { \ 7 | const cudaError_t error_code = call; \ 8 | if (error_code != cudaSuccess) \ 9 | { \ 10 | printf("CUDA Error:\n"); \ 11 | printf(" File: %s\n", __FILE__); \ 12 | printf(" Line: %d\n", __LINE__); \ 13 | printf(" Error code: %d\n", error_code); \ 14 | printf(" Error text: %s\n", \ 15 | cudaGetErrorString(error_code)); \ 16 | exit(1); \ 17 | } \ 18 | } while (0) -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/ingroup_inds/src/ingroup_inds.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define CHECK_CUDA(x) \ 7 | TORCH_CHECK(x.device().is_cuda(), #x, " must be a CUDAtensor ") 8 | #define CHECK_CONTIGUOUS(x) \ 9 | TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") 10 | #define CHECK_INPUT(x) \ 11 | CHECK_CUDA(x); \ 12 | CHECK_CONTIGUOUS(x) 13 | 14 | 15 | void ingroup_inds_launcher( 16 | const long *group_inds_data, 17 | long *out_inds_data, 18 | int N, 19 | int max_group_id 20 | ); 21 | 22 | 23 | void ingroup_inds_gpu( 24 | at::Tensor group_inds, 25 | at::Tensor out_inds 26 | ); 27 | 28 | void ingroup_inds_gpu( 29 | at::Tensor group_inds, 30 | at::Tensor out_inds 31 | ) { 32 | 33 | CHECK_INPUT(group_inds); 34 | CHECK_INPUT(out_inds); 35 | int N = group_inds.size(0); 36 | int max_group_id = group_inds.max().item().toLong(); 37 | 38 | 39 | long *group_inds_data = group_inds.data_ptr(); 40 | long *out_inds_data = out_inds.data_ptr(); 41 | 42 | ingroup_inds_launcher( 43 | group_inds_data, 44 | out_inds_data, 45 | N, 46 | max_group_id 47 | ); 48 | 49 | } 50 | 51 | 52 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 53 | m.def("forward", &ingroup_inds_gpu, "cuda version of get_inner_win_inds of SST"); 54 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/ingroup_inds/src/ingroup_inds_kernel.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "cuda_fp16.h" 8 | 9 | #define CHECK_CALL(call) \ 10 | do \ 11 | { \ 12 | const cudaError_t error_code = call; \ 13 | if (error_code != cudaSuccess) \ 14 | { \ 15 | printf("CUDA Error:\n"); \ 16 | printf(" File: %s\n", __FILE__); \ 17 | printf(" Line: %d\n", __LINE__); \ 18 | printf(" Error code: %d\n", error_code); \ 19 | printf(" Error text: %s\n", \ 20 | cudaGetErrorString(error_code)); \ 21 | exit(1); \ 22 | } \ 23 | } while (0) 24 | 25 | #define THREADS_PER_BLOCK 256 26 | #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) 27 | 28 | // #define DEBUG 29 | // #define ASSERTION 30 | 31 | __global__ void ingroup_inds_kernel( 32 | const long *group_inds, 33 | long *out_inds, 34 | int *ingroup_counter, 35 | int N 36 | ) { 37 | 38 | int idx = blockIdx.x * blockDim.x + threadIdx.x; 39 | if (idx >= N) return; 40 | long this_group_id = group_inds[idx]; 41 | 42 | int cnt = atomicAdd(&ingroup_counter[this_group_id], 1); 43 | out_inds[idx] = cnt; 44 | } 45 | 46 | 47 | void ingroup_inds_launcher( 48 | const long *group_inds, 49 | long *out_inds, 50 | int N, 51 | int max_group_id 52 | ) { 53 | 54 | int *ingroup_counter = NULL; 55 | CHECK_CALL(cudaMalloc(&ingroup_counter, (max_group_id + 1) * sizeof(int))); 56 | CHECK_CALL(cudaMemset(ingroup_counter, 0, (max_group_id + 1) * sizeof(int))); 57 | 58 | dim3 blocks(DIVUP(N, THREADS_PER_BLOCK)); 59 | dim3 threads(THREADS_PER_BLOCK); 60 | 61 | ingroup_inds_kernel<<>>( 62 | group_inds, 63 | out_inds, 64 | ingroup_counter, 65 | N 66 | ); 67 | 68 | cudaFree(ingroup_counter); 69 | 70 | #ifdef DEBUG 71 | CHECK_CALL(cudaGetLastError()); 72 | CHECK_CALL(cudaDeviceSynchronize()); 73 | #endif 74 | 75 | return; 76 | 77 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/iou3d_nms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/iou3d_nms/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/iou3d_nms/iou3d_nms_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/iou3d_nms/iou3d_nms_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/iou3d_nms/src/iou3d_cpu.h: -------------------------------------------------------------------------------- 1 | #ifndef IOU3D_CPU_H 2 | #define IOU3D_CPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor); 10 | int boxes_aligned_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor); 11 | #endif 12 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/iou3d_nms/src/iou3d_nms.h: -------------------------------------------------------------------------------- 1 | #ifndef IOU3D_NMS_H 2 | #define IOU3D_NMS_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | int boxes_aligned_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); 11 | int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); 12 | int paired_boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); 13 | int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou); 14 | int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh); 15 | int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "iou3d_cpu.h" 8 | #include "iou3d_nms.h" 9 | 10 | 11 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 12 | m.def("boxes_aligned_overlap_bev_gpu", &boxes_aligned_overlap_bev_gpu, "aligned oriented boxes overlap"); 13 | m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, "oriented boxes overlap"); 14 | m.def("paired_boxes_overlap_bev_gpu", &paired_boxes_overlap_bev_gpu, "oriented boxes overlap"); 15 | m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou"); 16 | m.def("nms_gpu", &nms_gpu, "oriented nms gpu"); 17 | m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu"); 18 | m.def("boxes_aligned_iou_bev_cpu", &boxes_aligned_iou_bev_cpu, "aligned oriented boxes iou"); 19 | m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou"); 20 | } 21 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_batch_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_batch_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of ball query, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "ball_query_gpu.h" 13 | 14 | #define CHECK_CUDA(x) do { \ 15 | if (!x.type().is_cuda()) { \ 16 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 17 | exit(-1); \ 18 | } \ 19 | } while (0) 20 | #define CHECK_CONTIGUOUS(x) do { \ 21 | if (!x.is_contiguous()) { \ 22 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 23 | exit(-1); \ 24 | } \ 25 | } while (0) 26 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 27 | 28 | 29 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, 30 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) { 31 | CHECK_INPUT(new_xyz_tensor); 32 | CHECK_INPUT(xyz_tensor); 33 | const float *new_xyz = new_xyz_tensor.data(); 34 | const float *xyz = xyz_tensor.data(); 35 | int *idx = idx_tensor.data(); 36 | 37 | ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx); 38 | return 1; 39 | } 40 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.cu: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of ball query, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #include "ball_query_gpu.h" 12 | #include "cuda_utils.h" 13 | 14 | 15 | __global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample, 16 | const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { 17 | // new_xyz: (B, M, 3) 18 | // xyz: (B, N, 3) 19 | // output: 20 | // idx: (B, M, nsample) 21 | int bs_idx = blockIdx.y; 22 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 23 | if (bs_idx >= b || pt_idx >= m) return; 24 | 25 | new_xyz += bs_idx * m * 3 + pt_idx * 3; 26 | xyz += bs_idx * n * 3; 27 | idx += bs_idx * m * nsample + pt_idx * nsample; 28 | 29 | float radius2 = radius * radius; 30 | float new_x = new_xyz[0]; 31 | float new_y = new_xyz[1]; 32 | float new_z = new_xyz[2]; 33 | 34 | int cnt = 0; 35 | for (int k = 0; k < n; ++k) { 36 | float x = xyz[k * 3 + 0]; 37 | float y = xyz[k * 3 + 1]; 38 | float z = xyz[k * 3 + 2]; 39 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); 40 | if (d2 < radius2){ 41 | if (cnt == 0){ 42 | for (int l = 0; l < nsample; ++l) { 43 | idx[l] = k; 44 | } 45 | } 46 | idx[cnt] = k; 47 | ++cnt; 48 | if (cnt >= nsample) break; 49 | } 50 | } 51 | } 52 | 53 | 54 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \ 55 | const float *new_xyz, const float *xyz, int *idx) { 56 | // new_xyz: (B, M, 3) 57 | // xyz: (B, N, 3) 58 | // output: 59 | // idx: (B, M, nsample) 60 | 61 | cudaError_t err; 62 | 63 | dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) 64 | dim3 threads(THREADS_PER_BLOCK); 65 | 66 | ball_query_kernel_fast<<>>(b, n, m, radius, nsample, new_xyz, xyz, idx); 67 | // cudaDeviceSynchronize(); // for using printf in kernel function 68 | err = cudaGetLastError(); 69 | if (cudaSuccess != err) { 70 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 71 | exit(-1); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _BALL_QUERY_GPU_H 2 | #define _BALL_QUERY_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, 10 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor); 11 | 12 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, 13 | const float *xyz, const float *new_xyz, int *idx); 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _CUDA_UTILS_H 2 | #define _CUDA_UTILS_H 3 | 4 | #include 5 | 6 | #define TOTAL_THREADS 1024 7 | #define THREADS_PER_BLOCK 256 8 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) 9 | 10 | inline int opt_n_threads(int work_size) { 11 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); 12 | 13 | return max(min(1 << pow_2, TOTAL_THREADS), 1); 14 | } 15 | #endif 16 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/group_points.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of point grouping, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "group_points_gpu.h" 13 | 14 | 15 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, 16 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { 17 | 18 | float *grad_points = grad_points_tensor.data(); 19 | const int *idx = idx_tensor.data(); 20 | const float *grad_out = grad_out_tensor.data(); 21 | 22 | group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points); 23 | return 1; 24 | } 25 | 26 | 27 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, 28 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) { 29 | 30 | const float *points = points_tensor.data(); 31 | const int *idx = idx_tensor.data(); 32 | float *out = out_tensor.data(); 33 | 34 | group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out); 35 | return 1; 36 | } 37 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _GROUP_POINTS_GPU_H 2 | #define _GROUP_POINTS_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, 11 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 12 | 13 | void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, 14 | const float *points, const int *idx, float *out); 15 | 16 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, 17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); 18 | 19 | void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, 20 | const float *grad_out, const int *idx, float *grad_points); 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of point interpolation, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include "interpolate_gpu.h" 16 | 17 | 18 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, 19 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) { 20 | const float *unknown = unknown_tensor.data(); 21 | const float *known = known_tensor.data(); 22 | float *dist2 = dist2_tensor.data(); 23 | int *idx = idx_tensor.data(); 24 | 25 | three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx); 26 | } 27 | 28 | 29 | void three_interpolate_wrapper_fast(int b, int c, int m, int n, 30 | at::Tensor points_tensor, 31 | at::Tensor idx_tensor, 32 | at::Tensor weight_tensor, 33 | at::Tensor out_tensor) { 34 | 35 | const float *points = points_tensor.data(); 36 | const float *weight = weight_tensor.data(); 37 | float *out = out_tensor.data(); 38 | const int *idx = idx_tensor.data(); 39 | 40 | three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out); 41 | } 42 | 43 | 44 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, 45 | at::Tensor grad_out_tensor, 46 | at::Tensor idx_tensor, 47 | at::Tensor weight_tensor, 48 | at::Tensor grad_points_tensor) { 49 | 50 | const float *grad_out = grad_out_tensor.data(); 51 | const float *weight = weight_tensor.data(); 52 | float *grad_points = grad_points_tensor.data(); 53 | const int *idx = idx_tensor.data(); 54 | 55 | three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points); 56 | } 57 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _INTERPOLATE_GPU_H 2 | #define _INTERPOLATE_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, 11 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); 12 | 13 | void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown, 14 | const float *known, float *dist2, int *idx); 15 | 16 | 17 | void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor, 18 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); 19 | 20 | void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n, 21 | const float *points, const int *idx, const float *weight, float *out); 22 | 23 | 24 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor, 25 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor); 26 | 27 | void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out, 28 | const int *idx, const float *weight, float *grad_points); 29 | 30 | #endif 31 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/pointnet2_api.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "ball_query_gpu.h" 5 | #include "group_points_gpu.h" 6 | #include "sampling_gpu.h" 7 | #include "interpolate_gpu.h" 8 | 9 | 10 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 11 | m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast"); 12 | 13 | m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast"); 14 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast"); 15 | 16 | m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast"); 17 | m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast"); 18 | 19 | m.def("farthest_point_sampling_wrapper", &farthest_point_sampling_wrapper, "farthest_point_sampling_wrapper"); 20 | 21 | m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast"); 22 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast"); 23 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast"); 24 | } 25 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/sampling.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | batch version of point sampling and gathering, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2018. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include "sampling_gpu.h" 12 | 13 | 14 | int gather_points_wrapper_fast(int b, int c, int n, int npoints, 15 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){ 16 | const float *points = points_tensor.data(); 17 | const int *idx = idx_tensor.data(); 18 | float *out = out_tensor.data(); 19 | 20 | gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out); 21 | return 1; 22 | } 23 | 24 | 25 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, 26 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { 27 | 28 | const float *grad_out = grad_out_tensor.data(); 29 | const int *idx = idx_tensor.data(); 30 | float *grad_points = grad_points_tensor.data(); 31 | 32 | gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points); 33 | return 1; 34 | } 35 | 36 | 37 | int farthest_point_sampling_wrapper(int b, int n, int m, 38 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { 39 | 40 | const float *points = points_tensor.data(); 41 | float *temp = temp_tensor.data(); 42 | int *idx = idx_tensor.data(); 43 | 44 | farthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx); 45 | return 1; 46 | } 47 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _SAMPLING_GPU_H 2 | #define _SAMPLING_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | int gather_points_wrapper_fast(int b, int c, int n, int npoints, 10 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 11 | 12 | void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, 13 | const float *points, const int *idx, float *out); 14 | 15 | 16 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, 17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); 18 | 19 | void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, 20 | const float *grad_out, const int *idx, float *grad_points); 21 | 22 | 23 | int farthest_point_sampling_wrapper(int b, int n, int m, 24 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); 25 | 26 | void farthest_point_sampling_kernel_launcher(int b, int n, int m, 27 | const float *dataset, float *temp, int *idxs); 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_stack_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_stack_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2019-2020. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "ball_query_gpu.h" 13 | 14 | #define CHECK_CUDA(x) do { \ 15 | if (!x.type().is_cuda()) { \ 16 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 17 | exit(-1); \ 18 | } \ 19 | } while (0) 20 | #define CHECK_CONTIGUOUS(x) do { \ 21 | if (!x.is_contiguous()) { \ 22 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 23 | exit(-1); \ 24 | } \ 25 | } while (0) 26 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 27 | 28 | 29 | int ball_query_wrapper_stack(int B, int M, float radius, int nsample, 30 | at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, 31 | at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor) { 32 | CHECK_INPUT(new_xyz_tensor); 33 | CHECK_INPUT(xyz_tensor); 34 | CHECK_INPUT(new_xyz_batch_cnt_tensor); 35 | CHECK_INPUT(xyz_batch_cnt_tensor); 36 | 37 | const float *new_xyz = new_xyz_tensor.data(); 38 | const float *xyz = xyz_tensor.data(); 39 | const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data(); 40 | const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); 41 | int *idx = idx_tensor.data(); 42 | 43 | ball_query_kernel_launcher_stack(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx); 44 | return 1; 45 | } 46 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2019-2020. 5 | */ 6 | 7 | 8 | #ifndef _STACK_BALL_QUERY_GPU_H 9 | #define _STACK_BALL_QUERY_GPU_H 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | int ball_query_wrapper_stack(int B, int M, float radius, int nsample, 17 | at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, 18 | at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor); 19 | 20 | 21 | void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample, 22 | const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx); 23 | 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _STACK_CUDA_UTILS_H 2 | #define _STACK_CUDA_UTILS_H 3 | 4 | #include 5 | 6 | #define THREADS_PER_BLOCK 256 7 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/group_points.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2019-2020. 5 | */ 6 | 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "group_points_gpu.h" 13 | 14 | #define CHECK_CUDA(x) do { \ 15 | if (!x.type().is_cuda()) { \ 16 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 17 | exit(-1); \ 18 | } \ 19 | } while (0) 20 | #define CHECK_CONTIGUOUS(x) do { \ 21 | if (!x.is_contiguous()) { \ 22 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 23 | exit(-1); \ 24 | } \ 25 | } while (0) 26 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 27 | 28 | 29 | int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample, 30 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, 31 | at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor) { 32 | 33 | CHECK_INPUT(grad_out_tensor); 34 | CHECK_INPUT(idx_tensor); 35 | CHECK_INPUT(idx_batch_cnt_tensor); 36 | CHECK_INPUT(features_batch_cnt_tensor); 37 | CHECK_INPUT(grad_features_tensor); 38 | 39 | const float *grad_out = grad_out_tensor.data(); 40 | const int *idx = idx_tensor.data(); 41 | const int *idx_batch_cnt = idx_batch_cnt_tensor.data(); 42 | const int *features_batch_cnt = features_batch_cnt_tensor.data(); 43 | float *grad_features = grad_features_tensor.data(); 44 | 45 | group_points_grad_kernel_launcher_stack(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); 46 | return 1; 47 | } 48 | 49 | 50 | int group_points_wrapper_stack(int B, int M, int C, int nsample, 51 | at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor, 52 | at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor) { 53 | 54 | CHECK_INPUT(features_tensor); 55 | CHECK_INPUT(features_batch_cnt_tensor); 56 | CHECK_INPUT(idx_tensor); 57 | CHECK_INPUT(idx_batch_cnt_tensor); 58 | CHECK_INPUT(out_tensor); 59 | 60 | const float *features = features_tensor.data(); 61 | const int *idx = idx_tensor.data(); 62 | const int *features_batch_cnt = features_batch_cnt_tensor.data(); 63 | const int *idx_batch_cnt = idx_batch_cnt_tensor.data(); 64 | float *out = out_tensor.data(); 65 | 66 | group_points_kernel_launcher_stack(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); 67 | return 1; 68 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. 3 | Written by Shaoshuai Shi 4 | All Rights Reserved 2019-2020. 5 | */ 6 | 7 | 8 | #ifndef _STACK_GROUP_POINTS_GPU_H 9 | #define _STACK_GROUP_POINTS_GPU_H 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | 17 | int group_points_wrapper_stack(int B, int M, int C, int nsample, 18 | at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor, 19 | at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor); 20 | 21 | void group_points_kernel_launcher_stack(int B, int M, int C, int nsample, 22 | const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out); 23 | 24 | int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample, 25 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, 26 | at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor); 27 | 28 | void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, 29 | const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features); 30 | 31 | #endif 32 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _INTERPOLATE_GPU_H 2 | #define _INTERPOLATE_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | void three_nn_wrapper_stack(at::Tensor unknown_tensor, 11 | at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor, 12 | at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); 13 | 14 | 15 | void three_interpolate_wrapper_stack(at::Tensor features_tensor, 16 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); 17 | 18 | 19 | 20 | void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor, 21 | at::Tensor weight_tensor, at::Tensor grad_features_tensor); 22 | 23 | 24 | void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown, 25 | const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, 26 | float *dist2, int *idx); 27 | 28 | 29 | void three_interpolate_kernel_launcher_stack(int N, int channels, 30 | const float *features, const int *idx, const float *weight, float *out); 31 | 32 | 33 | 34 | void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out, 35 | const int *idx, const float *weight, float *grad_features); 36 | 37 | 38 | 39 | #endif -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/pointnet2_api.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "ball_query_gpu.h" 5 | #include "group_points_gpu.h" 6 | #include "sampling_gpu.h" 7 | #include "interpolate_gpu.h" 8 | #include "voxel_query_gpu.h" 9 | #include "vector_pool_gpu.h" 10 | 11 | 12 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 13 | m.def("ball_query_wrapper", &ball_query_wrapper_stack, "ball_query_wrapper_stack"); 14 | m.def("voxel_query_wrapper", &voxel_query_wrapper_stack, "voxel_query_wrapper_stack"); 15 | 16 | m.def("farthest_point_sampling_wrapper", &farthest_point_sampling_wrapper, "farthest_point_sampling_wrapper"); 17 | m.def("stack_farthest_point_sampling_wrapper", &stack_farthest_point_sampling_wrapper, "stack_farthest_point_sampling_wrapper"); 18 | 19 | m.def("group_points_wrapper", &group_points_wrapper_stack, "group_points_wrapper_stack"); 20 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_stack, "group_points_grad_wrapper_stack"); 21 | 22 | m.def("three_nn_wrapper", &three_nn_wrapper_stack, "three_nn_wrapper_stack"); 23 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_stack, "three_interpolate_wrapper_stack"); 24 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_stack, "three_interpolate_grad_wrapper_stack"); 25 | 26 | m.def("query_stacked_local_neighbor_idxs_wrapper_stack", &query_stacked_local_neighbor_idxs_wrapper_stack, "query_stacked_local_neighbor_idxs_wrapper_stack"); 27 | m.def("query_three_nn_by_stacked_local_idxs_wrapper_stack", &query_three_nn_by_stacked_local_idxs_wrapper_stack, "query_three_nn_by_stacked_local_idxs_wrapper_stack"); 28 | 29 | m.def("vector_pool_wrapper", &vector_pool_wrapper_stack, "vector_pool_grad_wrapper_stack"); 30 | m.def("vector_pool_grad_wrapper", &vector_pool_grad_wrapper_stack, "vector_pool_grad_wrapper_stack"); 31 | } 32 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/sampling.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "sampling_gpu.h" 5 | 6 | #define CHECK_CUDA(x) do { \ 7 | if (!x.type().is_cuda()) { \ 8 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 9 | exit(-1); \ 10 | } \ 11 | } while (0) 12 | #define CHECK_CONTIGUOUS(x) do { \ 13 | if (!x.is_contiguous()) { \ 14 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 15 | exit(-1); \ 16 | } \ 17 | } while (0) 18 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 19 | 20 | 21 | int farthest_point_sampling_wrapper(int b, int n, int m, 22 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { 23 | 24 | CHECK_INPUT(points_tensor); 25 | CHECK_INPUT(temp_tensor); 26 | CHECK_INPUT(idx_tensor); 27 | 28 | const float *points = points_tensor.data(); 29 | float *temp = temp_tensor.data(); 30 | int *idx = idx_tensor.data(); 31 | 32 | farthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx); 33 | return 1; 34 | } 35 | 36 | 37 | int stack_farthest_point_sampling_wrapper(at::Tensor points_tensor, 38 | at::Tensor temp_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor, 39 | at::Tensor num_sampled_points_tensor) { 40 | 41 | CHECK_INPUT(points_tensor); 42 | CHECK_INPUT(temp_tensor); 43 | CHECK_INPUT(idx_tensor); 44 | CHECK_INPUT(xyz_batch_cnt_tensor); 45 | CHECK_INPUT(num_sampled_points_tensor); 46 | 47 | int batch_size = xyz_batch_cnt_tensor.size(0); 48 | int N = points_tensor.size(0); 49 | const float *points = points_tensor.data(); 50 | float *temp = temp_tensor.data(); 51 | int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); 52 | int *idx = idx_tensor.data(); 53 | int *num_sampled_points = num_sampled_points_tensor.data(); 54 | 55 | stack_farthest_point_sampling_kernel_launcher(N, batch_size, points, temp, xyz_batch_cnt, idx, num_sampled_points); 56 | return 1; 57 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _SAMPLING_GPU_H 2 | #define _SAMPLING_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | int farthest_point_sampling_wrapper(int b, int n, int m, 10 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); 11 | 12 | void farthest_point_sampling_kernel_launcher(int b, int n, int m, 13 | const float *dataset, float *temp, int *idxs); 14 | 15 | int stack_farthest_point_sampling_wrapper( 16 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor xyz_batch_cnt_tensor, 17 | at::Tensor idx_tensor, at::Tensor num_sampled_points_tensor); 18 | 19 | 20 | void stack_farthest_point_sampling_kernel_launcher(int N, int batch_size, 21 | const float *dataset, float *temp, int *xyz_batch_cnt, int *idxs, int *num_sampled_points); 22 | 23 | #endif 24 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "voxel_query_gpu.h" 9 | 10 | #define CHECK_CUDA(x) do { \ 11 | if (!x.type().is_cuda()) { \ 12 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 13 | exit(-1); \ 14 | } \ 15 | } while (0) 16 | #define CHECK_CONTIGUOUS(x) do { \ 17 | if (!x.is_contiguous()) { \ 18 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 19 | exit(-1); \ 20 | } \ 21 | } while (0) 22 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 23 | 24 | 25 | int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius, 26 | int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, 27 | at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor) { 28 | CHECK_INPUT(new_coords_tensor); 29 | CHECK_INPUT(point_indices_tensor); 30 | CHECK_INPUT(new_xyz_tensor); 31 | CHECK_INPUT(xyz_tensor); 32 | 33 | const float *new_xyz = new_xyz_tensor.data(); 34 | const float *xyz = xyz_tensor.data(); 35 | const int *new_coords = new_coords_tensor.data(); 36 | const int *point_indices = point_indices_tensor.data(); 37 | int *idx = idx_tensor.data(); 38 | 39 | voxel_query_kernel_launcher_stack(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx); 40 | return 1; 41 | } 42 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _STACK_VOXEL_QUERY_GPU_H 2 | #define _STACK_VOXEL_QUERY_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius, 10 | int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, 11 | at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor); 12 | 13 | 14 | void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample, 15 | float radius, int z_range, int y_range, int x_range, const float *new_xyz, 16 | const float *xyz, const int *new_coords, const int *point_indices, int *idx); 17 | 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/roiaware_pool3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/roiaware_pool3d/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/roiaware_pool3d/roiaware_pool3d_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/roiaware_pool3d/roiaware_pool3d_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/roipoint_pool3d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/roipoint_pool3d/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/roipoint_pool3d/roipoint_pool3d_cuda.cpython-39-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/ops/roipoint_pool3d/roipoint_pool3d_cuda.cpython-39-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Function 4 | 5 | from ...utils import box_utils 6 | from . import roipoint_pool3d_cuda 7 | 8 | 9 | class RoIPointPool3d(nn.Module): 10 | def __init__(self, num_sampled_points=512, pool_extra_width=1.0): 11 | super().__init__() 12 | self.num_sampled_points = num_sampled_points 13 | self.pool_extra_width = pool_extra_width 14 | 15 | def forward(self, points, point_features, boxes3d): 16 | """ 17 | Args: 18 | points: (B, N, 3) 19 | point_features: (B, N, C) 20 | boxes3d: (B, M, 7), [x, y, z, dx, dy, dz, heading] 21 | 22 | Returns: 23 | pooled_features: (B, M, 512, 3 + C) 24 | pooled_empty_flag: (B, M) 25 | """ 26 | return RoIPointPool3dFunction.apply( 27 | points, point_features, boxes3d, self.pool_extra_width, self.num_sampled_points 28 | ) 29 | 30 | 31 | class RoIPointPool3dFunction(Function): 32 | @staticmethod 33 | def forward(ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512): 34 | """ 35 | Args: 36 | ctx: 37 | points: (B, N, 3) 38 | point_features: (B, N, C) 39 | boxes3d: (B, num_boxes, 7), [x, y, z, dx, dy, dz, heading] 40 | pool_extra_width: 41 | num_sampled_points: 42 | 43 | Returns: 44 | pooled_features: (B, num_boxes, 512, 3 + C) 45 | pooled_empty_flag: (B, num_boxes) 46 | """ 47 | assert points.shape.__len__() == 3 and points.shape[2] == 3 48 | batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[1], point_features.shape[2] 49 | pooled_boxes3d = box_utils.enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7) 50 | 51 | pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, 3 + feature_len)) 52 | pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int() 53 | 54 | roipoint_pool3d_cuda.forward( 55 | points.contiguous(), pooled_boxes3d.contiguous(), 56 | point_features.contiguous(), pooled_features, pooled_empty_flag 57 | ) 58 | 59 | return pooled_features, pooled_empty_flag 60 | 61 | @staticmethod 62 | def backward(ctx, grad_out): 63 | raise NotImplementedError 64 | 65 | 66 | if __name__ == '__main__': 67 | pass 68 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #define CHECK_CUDA(x) do { \ 5 | if (!x.type().is_cuda()) { \ 6 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 7 | exit(-1); \ 8 | } \ 9 | } while (0) 10 | #define CHECK_CONTIGUOUS(x) do { \ 11 | if (!x.is_contiguous()) { \ 12 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ 13 | exit(-1); \ 14 | } \ 15 | } while (0) 16 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 17 | 18 | 19 | void roipool3dLauncher(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, 20 | const float *xyz, const float *boxes3d, const float *pts_feature, float *pooled_features, int *pooled_empty_flag); 21 | 22 | 23 | int roipool3d_gpu(at::Tensor xyz, at::Tensor boxes3d, at::Tensor pts_feature, at::Tensor pooled_features, at::Tensor pooled_empty_flag){ 24 | // params xyz: (B, N, 3) 25 | // params boxes3d: (B, M, 7) 26 | // params pts_feature: (B, N, C) 27 | // params pooled_features: (B, M, 512, 3+C) 28 | // params pooled_empty_flag: (B, M) 29 | CHECK_INPUT(xyz); 30 | CHECK_INPUT(boxes3d); 31 | CHECK_INPUT(pts_feature); 32 | CHECK_INPUT(pooled_features); 33 | CHECK_INPUT(pooled_empty_flag); 34 | 35 | int batch_size = xyz.size(0); 36 | int pts_num = xyz.size(1); 37 | int boxes_num = boxes3d.size(1); 38 | int feature_in_len = pts_feature.size(2); 39 | int sampled_pts_num = pooled_features.size(2); 40 | 41 | 42 | const float * xyz_data = xyz.data(); 43 | const float * boxes3d_data = boxes3d.data(); 44 | const float * pts_feature_data = pts_feature.data(); 45 | float * pooled_features_data = pooled_features.data(); 46 | int * pooled_empty_flag_data = pooled_empty_flag.data(); 47 | 48 | roipool3dLauncher(batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num, 49 | xyz_data, boxes3d_data, pts_feature_data, pooled_features_data, pooled_empty_flag_data); 50 | 51 | 52 | 53 | return 1; 54 | } 55 | 56 | 57 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 58 | m.def("forward", &roipool3d_gpu, "roipool3d forward (CUDA)"); 59 | } 60 | 61 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPCDet_Once/SARA3D/pcdet/utils/__init__.py -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/utils/spconv_utils.py: -------------------------------------------------------------------------------- 1 | from typing import Set 2 | 3 | import spconv 4 | if float(spconv.__version__[2:]) >= 2.2: 5 | spconv.constants.SPCONV_USE_DIRECT_TABLE = False 6 | 7 | try: 8 | import spconv.pytorch as spconv 9 | except: 10 | import spconv as spconv 11 | 12 | import torch.nn as nn 13 | 14 | 15 | def find_all_spconv_keys(model: nn.Module, prefix="") -> Set[str]: 16 | """ 17 | Finds all spconv keys that need to have weight's transposed 18 | """ 19 | found_keys: Set[str] = set() 20 | for name, child in model.named_children(): 21 | new_prefix = f"{prefix}.{name}" if prefix != "" else name 22 | 23 | if isinstance(child, spconv.conv.SparseConvolution): 24 | new_prefix = f"{new_prefix}.weight" 25 | found_keys.add(new_prefix) 26 | 27 | found_keys.update(find_all_spconv_keys(child, prefix=new_prefix)) 28 | 29 | return found_keys 30 | 31 | 32 | def replace_feature(out, new_features): 33 | if "replace_feature" in out.__dir__(): 34 | # spconv 2.x behaviour 35 | return out.replace_feature(new_features) 36 | else: 37 | out.features = new_features 38 | return out 39 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/pcdet/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.6.0+8caccce" 2 | -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/res/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "Once": { 3 | "means": { 4 | "mAP": 65.8904875754768, 5 | "mAP_vehicle": 78.220132291265, 6 | "mAP_pedestrian": 51.77878046417283, 7 | "mAP_cyclist": 67.67254997099255 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/tools/_init_path.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../') -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/tools/cfgs/dataset_configs/once_dataset.yaml: -------------------------------------------------------------------------------- 1 | DATASET: 'ONCEDataset' 2 | DATA_PATH: './datasets/once' 3 | CLOUD_DATA_PATH: './datasets/once' 4 | 5 | POINT_CLOUD_RANGE: [-75.2, -75.2, -5.0, 75.2, 75.2, 3.0] 6 | 7 | INFO_PATH: { 8 | 'train': [once_infos_train.pkl], 9 | 'val': [once_infos_val.pkl], 10 | 'test': [once_infos_test.pkl], 11 | } 12 | 13 | DATA_SPLIT: { 14 | 'train': train, 15 | 'test': val 16 | } 17 | 18 | DATA_AUGMENTOR: 19 | DISABLE_AUG_LIST: ['placeholder'] 20 | AUG_CONFIG_LIST: 21 | - NAME: gt_sampling 22 | USE_ROAD_PLANE: False 23 | DB_INFO_PATH: 24 | - once_dbinfos_train.pkl 25 | PREPARE: { 26 | filter_by_min_points: ['Car:5', 'Bus:5', 'Truck:5', 'Pedestrian:5', 'Cyclist:5'], 27 | } 28 | 29 | SAMPLE_GROUPS: ['Car:1', 'Bus:4', 'Truck:3', 'Pedestrian:2', 'Cyclist:2'] 30 | NUM_POINT_FEATURES: 4 31 | REMOVE_EXTRA_WIDTH: [0.0, 0.0, 0.0] 32 | LIMIT_WHOLE_SCENE: True 33 | 34 | - NAME: random_world_flip 35 | ALONG_AXIS_LIST: ['x', 'y'] 36 | 37 | - NAME: random_world_rotation 38 | WORLD_ROT_ANGLE: [-0.78539816, 0.78539816] 39 | 40 | - NAME: random_world_scaling 41 | WORLD_SCALE_RANGE: [0.95, 1.05] 42 | 43 | 44 | POINT_FEATURE_ENCODING: { 45 | encoding_type: absolute_coordinates_encoding, 46 | used_feature_list: ['x', 'y', 'z', 'intensity'], 47 | src_feature_list: ['x', 'y', 'z', 'intensity'], 48 | } 49 | 50 | 51 | DATA_PROCESSOR: 52 | - NAME: mask_points_and_boxes_outside_range 53 | REMOVE_OUTSIDE_BOXES: True 54 | 55 | - NAME: shuffle_points 56 | SHUFFLE_ENABLED: { 57 | 'train': True, 58 | 'test': False 59 | } 60 | 61 | - NAME: transform_points_to_voxels 62 | VOXEL_SIZE: [0.1, 0.1, 0.2] 63 | MAX_POINTS_PER_VOXEL: 5 64 | MAX_NUMBER_OF_VOXELS: { 65 | 'train': 60000, 66 | 'test': 60000 67 | } 68 | COMPUTE_VOXEL_CENTERS: True -------------------------------------------------------------------------------- /examples/AutoPCDet_Once/SARA3D/tools/scripts/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | NGPUS=$1 5 | PY_ARGS=${@:2} 6 | 7 | while true 8 | do 9 | PORT=$(( ((RANDOM<<15)|RANDOM) % 49152 + 10000 )) 10 | status="$(nc -z 127.0.0.1 $PORT < /dev/null &>/dev/null; echo $?)" 11 | if [ "${status}" != "0" ]; then 12 | break; 13 | fi 14 | done 15 | echo $PORT 16 | 17 | python -m torch.distributed.launch --nproc_per_node=${NGPUS} --rdzv_endpoint=localhost:${PORT} train.py --launcher pytorch ${PY_ARGS} 18 | 19 | -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/AdaptiveHierarchicalGraphTransformer/configs/test_senseflow_39.yaml: -------------------------------------------------------------------------------- 1 | log_path: ./results 2 | flag_return_losses: True 3 | 4 | pq_len: &pq_len 29 5 | pv_len: &pv_len 9 6 | slack_len: &slack_len 1 7 | mask_num: &mask_num 0 8 | batch_size: &batch_size 256 9 | 10 | data: 11 | meta: 12 | node: ['PQ', 'PV', 'Slack'] 13 | edge: 14 | - ['PQ', 'default', 'PQ'] 15 | - ['PQ', 'default', 'PV'] 16 | - ['PQ', 'default', 'Slack'] 17 | - ['PV', 'default', 'PQ'] 18 | - ['PV', 'default', 'PV'] 19 | - ['PV', 'default', 'Slack'] 20 | - ['Slack', 'default', 'PQ'] 21 | - ['Slack', 'default', 'PV'] 22 | train: 23 | dataset_type: PowerFlowDataset 24 | data_root: / 25 | split_txt: ./datasets/power/case39_data/10w_case39_n_n_1.json 26 | pq_len: *pq_len 27 | pv_len: *pv_len 28 | slack_len: *slack_len 29 | mask_num: *mask_num 30 | val: 31 | dataset_type: PowerFlowDataset 32 | data_root: / 33 | split_txt: ./datasets/power/case39_data/2w_case39_n_2.json 34 | pq_len: *pq_len 35 | pv_len: *pv_len 36 | slack_len: *slack_len 37 | mask_num: *mask_num 38 | batch_size: *batch_size 39 | batch_size_test: *batch_size 40 | num_workers: 4 41 | 42 | train: 43 | logs_freq: 10 44 | epochs: 100 45 | optimizer_type: "Adam" 46 | learning_rate: 0.001 47 | momentum: 0.9 48 | weight_decay: 0.0 49 | 50 | model: 51 | type: senseflow 52 | hidden_channels: 128 53 | num_block: 4 54 | layers_per_graph: 2 55 | heads_ca: 8 56 | batch_size: *batch_size 57 | flag_use_edge_feat: False 58 | with_norm: True 59 | num_loops_train: 1 60 | num_loops_test: -1 61 | scaling_factor_vm: 0.01 62 | scaling_factor_va: 0.01 63 | loss_type: l1 64 | flag_weighted_loss: True 65 | loss_weight_equ: 0.1 66 | loss_weight_vm: 10.0 67 | loss_weight_va: 1.0 68 | matrix: vm_va 69 | resume_ckpt_path: "" 70 | flag_use_ema: True 71 | ema_warmup_epoch: 10 72 | ema_decay_param: 0.99 73 | 74 | 75 | scheduler: 76 | type: Cosine 77 | eta_min: 1e-5 78 | 79 | 80 | loss: 81 | type: bi_deltapq_loss 82 | filt_type: True 83 | aggr: abs 84 | -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/AdaptiveHierarchicalGraphTransformer/launcher.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | out_dir=$1 3 | rseed=2024 4 | ROOT=. 5 | python $ROOT/experiment.py \ 6 | --config configs/test_senseflow_39.yaml \ 7 | --out_dir ${out_dir} \ 8 | --seed=${rseed} -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/AdaptiveHierarchicalGraphTransformer/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | def RMSE(predictions, targets): 5 | mse_eval = nn.MSELoss() 6 | rmse = torch.sqrt(mse_eval(predictions, targets)).item() 7 | return rmse -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/AdaptiveHierarchicalGraphTransformer/res/final_info.json: -------------------------------------------------------------------------------- 1 | {"IEEE39": {"means": {"val/PQ_Vm_rmse": 0.004259684176828999, "val/PQ_Va_rmse": 0.04334524861321999, "val/PV_Va_rmse": 0.05058172834702791, "epoch": 99}}} -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/AdaptiveHierarchicalGraphTransformer/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPower_IEEE39_bus/AdaptiveHierarchicalGraphTransformer/src/__init__.py -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/AdaptiveHierarchicalGraphTransformer/src/dataset/powerflow_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from torch.utils.data import Dataset 4 | import json 5 | 6 | from torch_geometric.data import HeteroData 7 | import networkx as nx 8 | 9 | class PowerFlowDataset(Dataset): 10 | def __init__(self, data_root, split_txt, pq_len, pv_len, slack_len, mask_num=0): 11 | self.data_root = data_root 12 | with open(split_txt, 'r') as f: 13 | self.file_list = [json.loads(line) for line in f] 14 | self.pq_len = pq_len 15 | self.pv_len = pv_len 16 | self.slack_len = slack_len 17 | self.mask_num = mask_num 18 | 19 | # for shortest path 20 | self.flag_distance_once_calculated = False 21 | self.shortest_paths = None 22 | self.node_type_to_global_index = None 23 | self.max_depth = 16 24 | 25 | def __len__(self): 26 | return len(self.file_list) 27 | 28 | def update_max_depth(self): 29 | tmp_distance = max(list(self.shortest_paths.values())) 30 | if tmp_distance < self.max_depth: 31 | self.max_depth = tmp_distance 32 | 33 | def __getitem__(self, idx): 34 | file_dict = self.file_list[idx] 35 | data = torch.load(os.path.join(file_dict['file_path'])) 36 | pq_num = data['PQ'].x.shape[0] 37 | pv_num = data['PV'].x.shape[0] 38 | slack_num = data['Slack'].x.shape[0] 39 | 40 | Vm, Va, P_net, Q_net, Gs, Bs = 0, 1, 2, 3, 4, 5 41 | 42 | # ------- add initial values -------- 43 | # y = Vm, Va, P_net, Q_net 44 | data['PQ'].y = data['PQ'].x[:,[Vm, Va, P_net, Q_net]].clone().detach() 45 | data['PQ'].x[:, Vm] = 1.0 # Vm unknown 46 | data['PQ'].x[:, Va] = data['Slack'].x[0, Va].item() # Va unkonwn, uses value from Slack 47 | 48 | non_zero_indices = torch.nonzero(data['PQ'].x[:, Q_net]) 49 | data['PQ'].q_mask = torch.ones((pq_num,),dtype=torch.bool) 50 | if self.mask_num > 0: 51 | if file_dict.get('masked_node') is None: 52 | mask_indices = non_zero_indices[torch.randperm(non_zero_indices.shape[0])[:self.mask_num]] 53 | else: 54 | mask_indices = file_dict['masked_node'][:self.mask_num] 55 | data['PQ'].q_mask[mask_indices] = False 56 | data['PQ'].x[~data['PQ'].q_mask, Q_net] = 0 57 | 58 | data['PV'].y = data['PV'].x[:,[Vm, Va, P_net, Q_net]].clone().detach() 59 | data['PV'].x[:, Va] = data['Slack'].x[0, Va].item() # Va unkonwn, uses value from Slack 60 | data['PV'].x[:, Q_net] = 0 # Q unknown for PV node, set to 0 61 | 62 | data['Slack'].y = data['Slack'].x[:,[Vm, Va, P_net, Q_net]].clone().detach() 63 | data['Slack'].x[:, P_net] = 0 # P_net unkonwn for slack node 64 | data['Slack'].x[:, Q_net] = 0 # Q_net unknown for slack node 65 | 66 | return data 67 | -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/Baseline/configs/test_senseflow_39.yaml: -------------------------------------------------------------------------------- 1 | log_path: ./results 2 | flag_return_losses: True 3 | 4 | pq_len: &pq_len 29 5 | pv_len: &pv_len 9 6 | slack_len: &slack_len 1 7 | mask_num: &mask_num 0 8 | batch_size: &batch_size 256 9 | 10 | data: 11 | meta: 12 | node: ['PQ', 'PV', 'Slack'] 13 | edge: 14 | - ['PQ', 'default', 'PQ'] 15 | - ['PQ', 'default', 'PV'] 16 | - ['PQ', 'default', 'Slack'] 17 | - ['PV', 'default', 'PQ'] 18 | - ['PV', 'default', 'PV'] 19 | - ['PV', 'default', 'Slack'] 20 | - ['Slack', 'default', 'PQ'] 21 | - ['Slack', 'default', 'PV'] 22 | train: 23 | dataset_type: PowerFlowDataset 24 | data_root: / 25 | split_txt: ./datasets/power/case39_data/10w_case39_n_n_1.json 26 | pq_len: *pq_len 27 | pv_len: *pv_len 28 | slack_len: *slack_len 29 | mask_num: *mask_num 30 | val: 31 | dataset_type: PowerFlowDataset 32 | data_root: / 33 | split_txt: ./datasets/power/case39_data/2w_case39_n_2.json 34 | pq_len: *pq_len 35 | pv_len: *pv_len 36 | slack_len: *slack_len 37 | mask_num: *mask_num 38 | batch_size: *batch_size 39 | batch_size_test: *batch_size 40 | num_workers: 4 41 | 42 | train: 43 | logs_freq: 10 44 | epochs: 100 45 | optimizer_type: "Adam" 46 | learning_rate: 0.001 47 | momentum: 0.9 48 | weight_decay: 0.0 49 | 50 | model: 51 | type: senseflow 52 | hidden_channels: 128 53 | num_block: 4 54 | layers_per_graph: 2 55 | heads_ca: 8 56 | batch_size: *batch_size 57 | flag_use_edge_feat: False 58 | with_norm: True 59 | num_loops_train: 1 60 | num_loops_test: -1 61 | scaling_factor_vm: 0.01 62 | scaling_factor_va: 0.01 63 | loss_type: l1 64 | flag_weighted_loss: True 65 | loss_weight_equ: 0.1 66 | loss_weight_vm: 10.0 67 | loss_weight_va: 1.0 68 | matrix: vm_va 69 | resume_ckpt_path: "" 70 | flag_use_ema: True 71 | ema_warmup_epoch: 10 72 | ema_decay_param: 0.99 73 | 74 | 75 | scheduler: 76 | type: Cosine 77 | eta_min: 1e-5 78 | 79 | 80 | loss: 81 | type: bi_deltapq_loss 82 | filt_type: True 83 | aggr: abs 84 | -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | out_dir=$1 3 | rseed=2024 4 | ROOT=. 5 | python $ROOT/experiment.py \ 6 | --config configs/test_senseflow_39.yaml \ 7 | --out_dir ${out_dir} \ 8 | --seed=${rseed} -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/Baseline/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | def RMSE(predictions, targets): 5 | mse_eval = nn.MSELoss() 6 | rmse = torch.sqrt(mse_eval(predictions, targets)).item() 7 | return rmse -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/Baseline/res/final_info.json: -------------------------------------------------------------------------------- 1 | {"IEEE39": {"means": {"val/PQ_Vm_rmse": 0.004727089210437276, "val/PQ_Va_rmse": 0.047760623411681406, "val/PV_Va_rmse": 0.05517512395118292, "epoch": 96}}} -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/Baseline/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoPower_IEEE39_bus/Baseline/src/__init__.py -------------------------------------------------------------------------------- /examples/AutoPower_IEEE39_bus/Baseline/src/dataset/powerflow_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from torch.utils.data import Dataset 4 | import json 5 | 6 | from torch_geometric.data import HeteroData 7 | import networkx as nx 8 | 9 | class PowerFlowDataset(Dataset): 10 | def __init__(self, data_root, split_txt, pq_len, pv_len, slack_len, mask_num=0): 11 | self.data_root = data_root 12 | with open(split_txt, 'r') as f: 13 | self.file_list = [json.loads(line) for line in f] 14 | self.pq_len = pq_len 15 | self.pv_len = pv_len 16 | self.slack_len = slack_len 17 | self.mask_num = mask_num 18 | 19 | # for shortest path 20 | self.flag_distance_once_calculated = False 21 | self.shortest_paths = None 22 | self.node_type_to_global_index = None 23 | self.max_depth = 16 24 | 25 | def __len__(self): 26 | return len(self.file_list) 27 | 28 | def update_max_depth(self): 29 | tmp_distance = max(list(self.shortest_paths.values())) 30 | if tmp_distance < self.max_depth: 31 | self.max_depth = tmp_distance 32 | 33 | def __getitem__(self, idx): 34 | file_dict = self.file_list[idx] 35 | data = torch.load(os.path.join(file_dict['file_path'])) 36 | pq_num = data['PQ'].x.shape[0] 37 | pv_num = data['PV'].x.shape[0] 38 | slack_num = data['Slack'].x.shape[0] 39 | 40 | Vm, Va, P_net, Q_net, Gs, Bs = 0, 1, 2, 3, 4, 5 41 | 42 | # ------- add initial values -------- 43 | # y = Vm, Va, P_net, Q_net 44 | data['PQ'].y = data['PQ'].x[:,[Vm, Va, P_net, Q_net]].clone().detach() 45 | data['PQ'].x[:, Vm] = 1.0 # Vm unknown 46 | data['PQ'].x[:, Va] = data['Slack'].x[0, Va].item() # Va unkonwn, uses value from Slack 47 | 48 | non_zero_indices = torch.nonzero(data['PQ'].x[:, Q_net]) 49 | data['PQ'].q_mask = torch.ones((pq_num,),dtype=torch.bool) 50 | if self.mask_num > 0: 51 | if file_dict.get('masked_node') is None: 52 | mask_indices = non_zero_indices[torch.randperm(non_zero_indices.shape[0])[:self.mask_num]] 53 | else: 54 | mask_indices = file_dict['masked_node'][:self.mask_num] 55 | data['PQ'].q_mask[mask_indices] = False 56 | data['PQ'].x[~data['PQ'].q_mask, Q_net] = 0 57 | 58 | data['PV'].y = data['PV'].x[:,[Vm, Va, P_net, Q_net]].clone().detach() 59 | data['PV'].x[:, Va] = data['Slack'].x[0, Va].item() # Va unkonwn, uses value from Slack 60 | data['PV'].x[:, Q_net] = 0 # Q unknown for PV node, set to 0 61 | 62 | data['Slack'].y = data['Slack'].x[:,[Vm, Va, P_net, Q_net]].clone().detach() 63 | data['Slack'].x[:, P_net] = 0 # P_net unkonwn for slack node 64 | data['Slack'].x[:, Q_net] = 0 # Q_net unknown for slack node 65 | 66 | return data 67 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | VOCdevkit 3 | checkpoints 4 | .vscode 5 | *.pyc 6 | .idea/ 7 | __pycache__ 8 | results 9 | checkpoints_bak 10 | cityscapes 11 | test_results -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Gongfan Fang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .voc import VOCSegmentation 2 | from .cityscapes import Cityscapes -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | python main.py \ 2 | --out_dir $1 \ 3 | --data_root ./datasets \ 4 | --batch_size 64 \ 5 | --lr 0.04 6 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .stream_metrics import StreamSegMetrics, AverageMeter 2 | 3 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/network/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoSeg_VOC12/Baseline/network/.DS_Store -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/network/__init__.py: -------------------------------------------------------------------------------- 1 | from .modeling import * 2 | from ._deeplab import convert_to_separable_conv -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/network/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | from . import resnet 2 | from . import mobilenetv2 3 | from . import hrnetv2 4 | from . import xception 5 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | torchvision 3 | numpy 4 | pillow 5 | scikit-learn 6 | tqdm 7 | matplotlib 8 | visdom -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .visualizer import Visualizer 3 | from .scheduler import PolyLR 4 | from .loss import FocalLoss -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/utils/loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | import torch 4 | 5 | class FocalLoss(nn.Module): 6 | def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=255): 7 | super(FocalLoss, self).__init__() 8 | self.alpha = alpha 9 | self.gamma = gamma 10 | self.ignore_index = ignore_index 11 | self.size_average = size_average 12 | 13 | def forward(self, inputs, targets): 14 | ce_loss = F.cross_entropy( 15 | inputs, targets, reduction='none', ignore_index=self.ignore_index) 16 | pt = torch.exp(-ce_loss) 17 | focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss 18 | if self.size_average: 19 | return focal_loss.mean() 20 | else: 21 | return focal_loss.sum() -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/utils/scheduler.py: -------------------------------------------------------------------------------- 1 | from torch.optim.lr_scheduler import _LRScheduler, StepLR 2 | 3 | class PolyLR(_LRScheduler): 4 | def __init__(self, optimizer, max_iters, power=0.9, last_epoch=-1, min_lr=1e-6): 5 | self.power = power 6 | self.max_iters = max_iters # avoid zero lr 7 | self.min_lr = min_lr 8 | super(PolyLR, self).__init__(optimizer, last_epoch) 9 | 10 | def get_lr(self): 11 | return [ max( base_lr * ( 1 - self.last_epoch/self.max_iters )**self.power, self.min_lr) 12 | for base_lr in self.base_lrs] -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/Baseline/utils/utils.py: -------------------------------------------------------------------------------- 1 | from torchvision.transforms.functional import normalize 2 | import torch.nn as nn 3 | import numpy as np 4 | import os 5 | 6 | def denormalize(tensor, mean, std): 7 | mean = np.array(mean) 8 | std = np.array(std) 9 | 10 | _mean = -mean/std 11 | _std = 1/std 12 | return normalize(tensor, _mean, _std) 13 | 14 | class Denormalize(object): 15 | def __init__(self, mean, std): 16 | mean = np.array(mean) 17 | std = np.array(std) 18 | self._mean = -mean/std 19 | self._std = 1/std 20 | 21 | def __call__(self, tensor): 22 | if isinstance(tensor, np.ndarray): 23 | return (tensor - self._mean.reshape(-1,1,1)) / self._std.reshape(-1,1,1) 24 | return normalize(tensor, self._mean, self._std) 25 | 26 | def set_bn_momentum(model, momentum=0.1): 27 | for m in model.modules(): 28 | if isinstance(m, nn.BatchNorm2d): 29 | m.momentum = momentum 30 | 31 | def fix_bn(model): 32 | for m in model.modules(): 33 | if isinstance(m, nn.BatchNorm2d): 34 | m.eval() 35 | 36 | def mkdir(path): 37 | if not os.path.exists(path): 38 | os.mkdir(path) 39 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Gongfan Fang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .voc import VOCSegmentation 2 | from .cityscapes import Cityscapes -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/launcher.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=4,5,6,7 python main.py \ 2 | --out_dir $1 \ 3 | --data_root ./datasets \ 4 | --batch_size 128 \ 5 | --lr 0.06 \ 6 | --use_eoaNet \ 7 | --msa_scales 1 2 4 \ 8 | --eog_beta 0.5 \ 9 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .stream_metrics import StreamSegMetrics, AverageMeter 2 | 3 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/network/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/network/.DS_Store -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/network/__init__.py: -------------------------------------------------------------------------------- 1 | from .modeling import * 2 | from ._deeplab import convert_to_separable_conv 3 | from .enhanced_deeplab import convert_to_separable_conv 4 | from .enhanced_modules import NormalizedMultiScaleAttention, EntropyOptimizedGating, EOANetModule -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/network/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | from . import resnet 2 | from . import mobilenetv2 3 | from . import hrnetv2 4 | from . import xception 5 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | torchvision 3 | numpy 4 | pillow 5 | scikit-learn 6 | tqdm 7 | matplotlib 8 | visdom -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/res/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "voc12_aug": { 3 | "means": { 4 | "mIoU": 0.80875763561515, 5 | "OA": 0.9573472222065693, 6 | "mAcc": 0.80945763561515 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .visualizer import Visualizer 3 | from .scheduler import PolyLR 4 | from .loss import FocalLoss -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/utils/loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | import torch 4 | 5 | class FocalLoss(nn.Module): 6 | def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=255): 7 | super(FocalLoss, self).__init__() 8 | self.alpha = alpha 9 | self.gamma = gamma 10 | self.ignore_index = ignore_index 11 | self.size_average = size_average 12 | 13 | def forward(self, inputs, targets): 14 | ce_loss = F.cross_entropy( 15 | inputs, targets, reduction='none', ignore_index=self.ignore_index) 16 | pt = torch.exp(-ce_loss) 17 | focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss 18 | if self.size_average: 19 | return focal_loss.mean() 20 | else: 21 | return focal_loss.sum() -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/utils/scheduler.py: -------------------------------------------------------------------------------- 1 | from torch.optim.lr_scheduler import _LRScheduler, StepLR 2 | 3 | class PolyLR(_LRScheduler): 4 | def __init__(self, optimizer, max_iters, power=0.9, last_epoch=-1, min_lr=1e-6): 5 | self.power = power 6 | self.max_iters = max_iters # avoid zero lr 7 | self.min_lr = min_lr 8 | super(PolyLR, self).__init__(optimizer, last_epoch) 9 | 10 | def get_lr(self): 11 | return [ max( base_lr * ( 1 - self.last_epoch/self.max_iters )**self.power, self.min_lr) 12 | for base_lr in self.base_lrs] -------------------------------------------------------------------------------- /examples/AutoSeg_VOC12/EntropyOptimizedAttentionNet/utils/utils.py: -------------------------------------------------------------------------------- 1 | from torchvision.transforms.functional import normalize 2 | import torch.nn as nn 3 | import numpy as np 4 | import os 5 | 6 | def denormalize(tensor, mean, std): 7 | mean = np.array(mean) 8 | std = np.array(std) 9 | 10 | _mean = -mean/std 11 | _std = 1/std 12 | return normalize(tensor, _mean, _std) 13 | 14 | class Denormalize(object): 15 | def __init__(self, mean, std): 16 | mean = np.array(mean) 17 | std = np.array(std) 18 | self._mean = -mean/std 19 | self._std = 1/std 20 | 21 | def __call__(self, tensor): 22 | if isinstance(tensor, np.ndarray): 23 | return (tensor - self._mean.reshape(-1,1,1)) / self._std.reshape(-1,1,1) 24 | return normalize(tensor, self._mean, self._std) 25 | 26 | def set_bn_momentum(model, momentum=0.1): 27 | for m in model.modules(): 28 | if isinstance(m, nn.BatchNorm2d): 29 | m.momentum = momentum 30 | 31 | def fix_bn(model): 32 | for m in model.modules(): 33 | if isinstance(m, nn.BatchNorm2d): 34 | m.eval() 35 | 36 | def mkdir(path): 37 | if not os.path.exists(path): 38 | os.mkdir(path) 39 | -------------------------------------------------------------------------------- /examples/AutoTPPR_Perturb-seq/Baseline/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "Gears": { 3 | "means": { 4 | "Test Top 20 DE MSE": 0.1974669247865677 5 | } 6 | } 7 | } -------------------------------------------------------------------------------- /examples/AutoTPPR_Perturb-seq/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py \ 2 | --data_path "./GEARS/data" \ 3 | --device "cuda:3" \ 4 | --epochs 20 \ 5 | --out_dir $1 -------------------------------------------------------------------------------- /examples/AutoTPPR_Perturb-seq/GEARS_LocalRegularization/launcher.sh: -------------------------------------------------------------------------------- 1 | python experiment.py \ 2 | --data_path "./hexiaohan/GEARS/data" \ 3 | --device "cuda:3" \ 4 | --epochs 20 \ 5 | --out_dir $1 -------------------------------------------------------------------------------- /examples/AutoTPPR_Perturb-seq/GEARS_LocalRegularization/res/final_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "GEARS_LocalRegularization": { 3 | "means": { 4 | "Test Top 20 DE MSE": 0.14649905264377594 5 | } 6 | } 7 | } -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/AdaptiveHybridDFTNet/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/AdaptiveHybridDFTNet/data_provider/data_factory.py: -------------------------------------------------------------------------------- 1 | from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred 2 | from torch.utils.data import DataLoader 3 | 4 | data_dict = { 5 | 'ETTh1': Dataset_ETT_hour, 6 | 'ETTh2': Dataset_ETT_hour, 7 | 'ETTm1': Dataset_ETT_minute, 8 | 'ETTm2': Dataset_ETT_minute, 9 | 'custom': Dataset_Custom, 10 | } 11 | 12 | 13 | def data_provider(args, flag): 14 | Data = data_dict[args.data] 15 | timeenc = 0 if args.embed != 'timeF' else 1 16 | train_only = args.train_only 17 | 18 | if flag == 'test': 19 | shuffle_flag = False 20 | drop_last = False 21 | batch_size = args.batch_size 22 | freq = args.freq 23 | elif flag == 'pred': 24 | shuffle_flag = False 25 | drop_last = False 26 | batch_size = 1 27 | freq = args.freq 28 | Data = Dataset_Pred 29 | else: 30 | shuffle_flag = True 31 | drop_last = True 32 | batch_size = args.batch_size 33 | freq = args.freq 34 | 35 | data_set = Data( 36 | root_path=args.root_path, 37 | data_path=args.data_path, 38 | flag=flag, 39 | size=[args.seq_len, args.label_len, args.pred_len], 40 | features=args.features, 41 | target=args.target, 42 | timeenc=timeenc, 43 | freq=freq, 44 | train_only=train_only 45 | ) 46 | print(flag, len(data_set)) 47 | data_loader = DataLoader( 48 | data_set, 49 | batch_size=batch_size, 50 | shuffle=shuffle_flag, 51 | num_workers=args.num_workers, 52 | drop_last=drop_last) 53 | return data_set, data_loader 54 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/AdaptiveHybridDFTNet/launcher.sh: -------------------------------------------------------------------------------- 1 | 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/LongForecasting" ]; then 7 | mkdir ./logs/LongForecasting 8 | fi 9 | seq_len=336 10 | model_name=DLinear 11 | out_dir=$1 12 | python -u experiment.py \ 13 | --out_dir ${out_dir} \ 14 | --is_training 1 \ 15 | --root_path ./datasets/tsf/dataset/ \ 16 | --data_path ETTh1.csv \ 17 | --data ETTh1 \ 18 | --features M \ 19 | --seq_len $seq_len \ 20 | --enc_in 7 \ 21 | --des 'Exp' \ 22 | --itr 1 --batch_size 32 --learning_rate 0.005 >logs/LongForecasting/$model_name'_'Etth1.log 23 | 24 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/AdaptiveHybridDFTNet/res/final_info.json: -------------------------------------------------------------------------------- 1 | {"ETTh1": {"means": {"mae": 0.4331462010741234, "mse": 0.42021340131759644}}} -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/AdaptiveHybridDFTNet/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask 27 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/AdaptiveHybridDFTNet/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | d += 1e-12 12 | return 0.01*(u / d).mean(-1) 13 | 14 | 15 | def MAE(pred, true): 16 | return np.mean(np.abs(pred - true)) 17 | 18 | 19 | def MSE(pred, true): 20 | return np.mean((pred - true) ** 2) 21 | 22 | 23 | def RMSE(pred, true): 24 | return np.sqrt(MSE(pred, true)) 25 | 26 | 27 | def MAPE(pred, true): 28 | return np.mean(np.abs((pred - true) / true)) 29 | 30 | 31 | def MSPE(pred, true): 32 | return np.mean(np.square((pred - true) / true)) 33 | 34 | 35 | def metric(pred, true): 36 | mae = MAE(pred, true) 37 | mse = MSE(pred, true) 38 | rmse = RMSE(pred, true) 39 | mape = MAPE(pred, true) 40 | mspe = MSPE(pred, true) 41 | rse = RSE(pred, true) 42 | corr = CORR(pred, true) 43 | 44 | return mae, mse, rmse, mape, mspe, rse, corr 45 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/Baseline/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/Baseline/data_provider/data_factory.py: -------------------------------------------------------------------------------- 1 | from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred 2 | from torch.utils.data import DataLoader 3 | 4 | data_dict = { 5 | 'ETTh1': Dataset_ETT_hour, 6 | 'ETTh2': Dataset_ETT_hour, 7 | 'ETTm1': Dataset_ETT_minute, 8 | 'ETTm2': Dataset_ETT_minute, 9 | 'custom': Dataset_Custom, 10 | } 11 | 12 | 13 | def data_provider(args, flag): 14 | Data = data_dict[args.data] 15 | timeenc = 0 if args.embed != 'timeF' else 1 16 | train_only = args.train_only 17 | 18 | if flag == 'test': 19 | shuffle_flag = False 20 | drop_last = False 21 | batch_size = args.batch_size 22 | freq = args.freq 23 | elif flag == 'pred': 24 | shuffle_flag = False 25 | drop_last = False 26 | batch_size = 1 27 | freq = args.freq 28 | Data = Dataset_Pred 29 | else: 30 | shuffle_flag = True 31 | drop_last = True 32 | batch_size = args.batch_size 33 | freq = args.freq 34 | 35 | data_set = Data( 36 | root_path=args.root_path, 37 | data_path=args.data_path, 38 | flag=flag, 39 | size=[args.seq_len, args.label_len, args.pred_len], 40 | features=args.features, 41 | target=args.target, 42 | timeenc=timeenc, 43 | freq=freq, 44 | train_only=train_only 45 | ) 46 | print(flag, len(data_set)) 47 | data_loader = DataLoader( 48 | data_set, 49 | batch_size=batch_size, 50 | shuffle=shuffle_flag, 51 | num_workers=args.num_workers, 52 | drop_last=drop_last) 53 | return data_set, data_loader 54 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/Baseline/launcher.sh: -------------------------------------------------------------------------------- 1 | 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/LongForecasting" ]; then 7 | mkdir ./logs/LongForecasting 8 | fi 9 | seq_len=336 10 | model_name=DLinear 11 | out_dir=$1 12 | python -u experiment.py \ 13 | --out_dir ${out_dir} \ 14 | --is_training 1 \ 15 | --root_path ./datasets/tsf/dataset/ \ 16 | --data_path ETTh1.csv \ 17 | --data ETTh1 \ 18 | --features M \ 19 | --seq_len $seq_len \ 20 | --enc_in 7 \ 21 | --des 'Exp' \ 22 | --itr 1 --batch_size 32 --learning_rate 0.005 >logs/LongForecasting/$model_name'_'Etth1.log 23 | 24 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/Baseline/res/final_info.json: -------------------------------------------------------------------------------- 1 | {"ETTh1": {"means": {"mae": 0.43822748214006424, "mse": 0.4266631305217743}}} -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/Baseline/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask 27 | -------------------------------------------------------------------------------- /examples/AutoTSF_ETTh1/Baseline/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | d += 1e-12 12 | return 0.01*(u / d).mean(-1) 13 | 14 | 15 | def MAE(pred, true): 16 | return np.mean(np.abs(pred - true)) 17 | 18 | 19 | def MSE(pred, true): 20 | return np.mean((pred - true) ** 2) 21 | 22 | 23 | def RMSE(pred, true): 24 | return np.sqrt(MSE(pred, true)) 25 | 26 | 27 | def MAPE(pred, true): 28 | return np.mean(np.abs((pred - true) / true)) 29 | 30 | 31 | def MSPE(pred, true): 32 | return np.mean(np.square((pred - true) / true)) 33 | 34 | 35 | def metric(pred, true): 36 | mae = MAE(pred, true) 37 | mse = MSE(pred, true) 38 | rmse = RMSE(pred, true) 39 | mape = MAPE(pred, true) 40 | mspe = MSPE(pred, true) 41 | rse = RSE(pred, true) 42 | corr = CORR(pred, true) 43 | 44 | return mae, mse, rmse, mape, mspe, rse, corr 45 | -------------------------------------------------------------------------------- /images/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/images/framework.png -------------------------------------------------------------------------------- /images/novelseek.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alpha-Innovator/NovelSeek/0787bf8b5a114d0ad2a5539c02bc9c8ef7504173/images/novelseek.png --------------------------------------------------------------------------------