├── .devcontainer.json ├── .github └── workflows │ └── python_install.yml ├── .gitignore ├── .vscode └── settings.json ├── LICENSE ├── README.md ├── bucketed_scene_flow_eval ├── __init__.py ├── datasets │ ├── __init__.py │ ├── argoverse2 │ │ ├── __init__.py │ │ ├── argoverse_box_annotations.py │ │ ├── argoverse_raw_data.py │ │ ├── argoverse_scene_flow.py │ │ ├── av2_metacategories.py │ │ ├── dataset.py │ │ └── symlink_camera_data.py │ ├── nuscenes │ │ ├── __init__.py │ │ ├── dataset.py │ │ ├── nuscenes_metacategories.py │ │ ├── nuscenes_raw_data.py │ │ ├── nuscenes_scene_flow.py │ │ └── nuscenes_utils.py │ ├── orbbec_astra │ │ ├── __init__.py │ │ └── dataset.py │ └── waymoopen │ │ ├── __init__.py │ │ ├── dataset.py │ │ └── waymo_supervised_flow.py ├── datastructures │ ├── __init__.py │ ├── camera_projection.py │ ├── dataclasses.py │ ├── line_mesh.py │ ├── o3d_visualizer.py │ ├── pointcloud.py │ ├── rgb_image.py │ ├── se2.py │ └── se3.py ├── eval │ ├── __init__.py │ ├── base_per_frame_sceneflow_eval.py │ ├── bucketed_epe.py │ ├── eval.py │ └── threeway_epe.py ├── interfaces │ ├── __init__.py │ ├── abstract_dataset.py │ ├── abstract_sequence_loader.py │ └── base_dataset_abstract_seq_loader.py └── utils │ ├── __init__.py │ ├── glfw_key_ids.py │ └── loaders.py ├── build_pypi.sh ├── data_prep_scripts ├── argo │ ├── count_boxes.py │ ├── create_gt_flow.py │ ├── create_symlink_tree.py │ ├── duplicate_without_annotations.py │ └── plot_boxes.py ├── nuscenes │ ├── create_gt_flow.py │ └── visualize_nuscenes.py └── waymo │ ├── extract_flow_and_remove_ground.py │ └── rasterize_heightmap.py ├── docker ├── Dockerfile ├── Dockerfileav2 ├── Dockerfilewaymo └── bashrc ├── docs ├── AV2_EVAL_FORMAT.md ├── DATASTRUCTURES.md ├── GETTING_STARTED.md └── imgs │ ├── av2_gt_flow.gif │ ├── av2_lidar.gif │ └── av2_multicam.png ├── flow_lab ├── flow_lab.py └── o3d_raw_vis_demo.py ├── launch.sh ├── pyproject.toml ├── scripts ├── demo_3d.py ├── demo_rgb.py └── evals │ ├── __init__.py │ ├── av2_eval.py │ ├── av2_occ.py │ ├── setup_sparse_user_submission.py │ └── waymo_eval.py └── tests ├── datasets ├── argoverse2 │ ├── av2_box_tests.py │ ├── av2_small_tests.py │ └── av2_tiny_tests.py └── nuscenes │ └── nuscenes_tests.py_bak ├── datastructures └── rgb.py ├── eval ├── bucketed_epe.py └── threeway_epe.py ├── integration_tests.py ├── integration_tests.sh └── setup.sh /.devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bucketed_scene_flow_eval", 3 | "initializeCommand": "touch ${localWorkspaceFolder}/docker_history.txt", 4 | "build": { "dockerfile": "docker/Dockerfile" }, 5 | "privileged": true, 6 | "workspaceMount": "source=${localWorkspaceFolder},target=/project,type=bind,consistency=default", 7 | "workspaceFolder": "/project", 8 | "customizations": 9 | { 10 | "vscode": { 11 | "extensions": [ 12 | "ms-python.python", 13 | "ms-python.black-formatter", 14 | "ms-python.isort", 15 | "ms-python.vscode-pylance", 16 | "ms-python.mypy-type-checker", 17 | "ms-azuretools.vscode-docker", 18 | ] 19 | } 20 | }, 21 | "postStartCommand": "git config --global --add safe.directory ${containerWorkspaceFolder}", 22 | "mounts": [ 23 | "source=/bigdata,target=/bigdata,type=bind,consistency=default", 24 | "source=/efs,target=/efs,type=bind,consistency=default", 25 | "source=/efs,target=/Datasets,type=bind,consistency=default", 26 | "source=/efs2,target=/efs2,type=bind,consistency=default", 27 | "source=${localWorkspaceFolder}/docker_history.txt,target=/root/.bash_history,type=bind,consistency=default" 28 | ], 29 | "runArgs": [ 30 | "--gpus=all", 31 | "-h=bucketedflow", 32 | "--shm-size=16gb" 33 | ] 34 | } -------------------------------------------------------------------------------- /.github/workflows/python_install.yml: -------------------------------------------------------------------------------- 1 | name: Bucketed Scene Flow Eval full tests 2 | 3 | on: [push] 4 | 5 | jobs: 6 | run-test: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - name: Checkout Repository 11 | uses: actions/checkout@v4 12 | 13 | - name: Install OpenGL 14 | run: sudo apt-get update && sudo apt-get install -y freeglut3-dev # This is required for Open3D to not blow up on startup 15 | 16 | - name: Set up Python 17 | uses: actions/setup-python@v5 18 | with: 19 | python-version: '3.10' 20 | 21 | - name: Pip Install 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install -e . 25 | 26 | - name: Download and Setup Data Files 27 | run: | 28 | ./tests/setup.sh 29 | 30 | - name: Run Tests 31 | run: | 32 | ./tests/integration_tests.sh 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### VisualStudioCode ### 2 | .vscode/**/* 3 | !.vscode/settings.json 4 | !.vscode/tasks.json 5 | !.vscode/launch.json 6 | !.vscode/extensions.json 7 | 8 | ### VisualStudioCode Patch ### 9 | # Ignore all local history of files 10 | **/.history 11 | 12 | .mypy_cache/ 13 | .pytest_cache/ 14 | scene_trajectory_benchmark/ 15 | **/__pycache__/ 16 | docker_history.txt 17 | dist/ 18 | **.pkl 19 | **.png 20 | **.zip 21 | eval_results/ -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "isort.args": [ 3 | "--profile", 4 | "black" 5 | ], 6 | "black-formatter.importStrategy": "fromEnvironment", 7 | "black-formatter.args": [ 8 | "--line-length", 9 | "100" 10 | ], 11 | "editor.formatOnSave": true, 12 | "editor.defaultFormatter": "ms-python.black-formatter", 13 | "editor.codeActionsOnSave": { 14 | "source.organizeImports": "explicit" 15 | }, 16 | "files.trimTrailingWhitespace": true, 17 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 Kyle Vedder 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Bucketed Scene Flow Evaluation 2 | 3 | This repo provides the official implementation of _Bucket Normalized EPE_, as described in our paper [_I Can't Believe It's Not Scene Flow!_](https://vedder.io/trackflow.html) 4 | 5 | This repo provides: 6 | - A speed and class aware evaluation protocol called _Bucket Normalized EPE_. See our [paper](https://vedder.io/trackflow.html) for more details. 7 | - A standardized interface for working with Scene Flow datasets. 8 | - Evaulation infrastructure for the [Argoverse 2 2024 Scene Flow Challenge](https://eval.ai/web/challenges/challenge-page/2210/overview). 9 | 10 | Currently supported datasets: 11 | 12 | - Argoverse 2 (Human Labeled and [NSFP Pseudolabeled](https://github.com/kylevedder/BucketedSceneFlowEval/blob/master/docs/GETTING_STARTED.md#argoverse-2-nsfp-pseudolabels-new)) 13 | - Waymo Open (LiDAR only) 14 | - NuScenes (LiDAR only, beta) 15 | 16 | 17 | If you use this repository as part of a publication, please cite: 18 | 19 | ``` 20 | @inproceedings{khatri2024trackflow, 21 | author = {Khatri, Ishan and Vedder, Kyle and Peri, Neehar and Ramanan, Deva and Hays, James}, 22 | title = {{I Can't Believe It's Not Scene Flow!}}, 23 | journal = {European Conference on Computer Vision (ECCV)}, 24 | year = {2024}, 25 | pdf = {https://arxiv.org/abs/2403.04739}, 26 | website={http://vedder.io/trackflow.html}, 27 | } 28 | ``` 29 | 30 | ## Installation 31 | 32 | ``` 33 | pip install bucketed-scene-flow-eval 34 | ``` 35 | 36 | ## Setup 37 | 38 | Follow our [Getting Started](docs/GETTING_STARTED.md) for setup instructions. 39 | 40 | ## Demo 41 | 42 | We provide a demo script which shows off the various features of the API. 43 | 44 | ### Argoverse 2: 45 | 46 | To render the lidar and multiple camera views of an Argoverse 2 sequence in 3D, run: 47 | 48 | ``` 49 | python scripts/demo_3d.py --dataset Argoverse2CausalSceneFlow --root_dir /efs/argoverse2/val/ --with_rgb --sequence_length 4 50 | ``` 51 | 52 | ![Argoverse 2 MultiCam](docs/imgs/av2_multicam.png) 53 | 54 | To render RGB frames with lidar imposed on top, run: 55 | 56 | ``` 57 | python scripts/demo_rgb.py --dataset Argoverse2SceneFlow --mode project_lidar --reduction_factor 16 --root_dir /efs/argoverse2/val --sequence_length 150 --save_dir /efs/av2_camera_render/ 58 | ``` 59 | 60 | ![Argoverse 2 LiDAR](docs/imgs/av2_lidar.gif) 61 | 62 | To render the flow field of an Argoverse 2 sequence, run: 63 | 64 | ``` 65 | python scripts/demo_rgb.py --dataset Argoverse2SceneFlow --mode project_flow --reduction_factor 16 --root_dir /efs/argoverse2/val --sequence_length 150 --save_dir /efs/av2_camera_render/ --flow_dir 66 | ``` 67 | 68 | ![Argoverse 2 Flow](docs/imgs/av2_gt_flow.gif) 69 | 70 | ### Waymo Open: 71 | 72 | ``` 73 | python scripts/demo.py --dataset WaymoOpenSceneFlow --root_dir /efs/waymo_open_processed_flow/validation/ 74 | ``` 75 | 76 | ## Evaluating AV2 flow submissions 77 | 78 | To evaluate an AV2 Scene Flow challenge entry named `./submission_val.zip` against validation dataset masks `/efs/argoverse2/val_official_masks.zip`, run 79 | 80 | ``` 81 | python scripts/av2_eval.py /efs/argoverse2/val /efs/argoverse2/val_official_masks.zip ./submission_val.zip 82 | ``` 83 | 84 | ## Documentation 85 | 86 | See `docs/` for more documentation . 87 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/BucketedSceneFlowEval/a3ce5cf7226e467e0cea76cbc3ec443ddb733b28/bucketed_scene_flow_eval/__init__.py -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from bucketed_scene_flow_eval.datasets.argoverse2 import ( 2 | Argoverse2CausalSceneFlow, 3 | Argoverse2NonCausalSceneFlow, 4 | ) 5 | from bucketed_scene_flow_eval.datasets.orbbec_astra import OrbbecAstra 6 | from bucketed_scene_flow_eval.datasets.waymoopen import ( 7 | WaymoOpenCausalSceneFlow, 8 | WaymoOpenNonCausalSceneFlow, 9 | ) 10 | 11 | # from bucketed_scene_flow_eval.datasets.nuscenes import ( 12 | # NuScenesCausalSceneFlow, 13 | # NuScenesNonCausalSceneFlow, 14 | # ) 15 | from bucketed_scene_flow_eval.interfaces import AbstractDataset 16 | 17 | importable_classes = [ 18 | Argoverse2CausalSceneFlow, 19 | Argoverse2NonCausalSceneFlow, 20 | # NuScenesCausalSceneFlow, 21 | # NuScenesNonCausalSceneFlow, 22 | WaymoOpenCausalSceneFlow, 23 | WaymoOpenNonCausalSceneFlow, 24 | OrbbecAstra, 25 | ] 26 | name_to_class_lookup = {cls.__name__.lower(): cls for cls in importable_classes} 27 | 28 | 29 | def construct_dataset(name: str, args: dict) -> AbstractDataset: 30 | name = name.lower() 31 | if name not in name_to_class_lookup: 32 | raise ValueError(f"Unknown dataset name: {name}") 33 | 34 | cls = name_to_class_lookup[name] 35 | return cls(**args) 36 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/argoverse2/__init__.py: -------------------------------------------------------------------------------- 1 | from .argoverse_raw_data import ArgoverseRawSequence, ArgoverseRawSequenceLoader 2 | from .argoverse_scene_flow import ( 3 | ArgoverseNoFlowSequence, 4 | ArgoverseNoFlowSequenceLoader, 5 | ArgoverseSceneFlowSequence, 6 | ArgoverseSceneFlowSequenceLoader, 7 | ) 8 | 9 | from .argoverse_box_annotations import ( 10 | ArgoverseBoxAnnotationSequence, 11 | ArgoverseBoxAnnotationSequenceLoader, 12 | ) 13 | from .dataset import Argoverse2CausalSceneFlow, Argoverse2NonCausalSceneFlow 14 | 15 | __all__ = [ 16 | "Argoverse2CausalSceneFlow", 17 | "Argoverse2NonCausalSceneFlow", 18 | "ArgoverseNoFlowSequence", 19 | "ArgoverseNoFlowSequenceLoader", 20 | "ArgoverseRawSequence", 21 | "ArgoverseRawSequenceLoader", 22 | "ArgoverseSceneFlowSequence", 23 | "ArgoverseSceneFlowSequenceLoader", 24 | ] 25 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/argoverse2/argoverse_box_annotations.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from pathlib import Path 3 | 4 | from bucketed_scene_flow_eval.datastructures import ( 5 | SE3, 6 | BoundingBox, 7 | TimeSyncedAVLidarData, 8 | TimeSyncedSceneFlowBoxFrame, 9 | TimeSyncedSceneFlowFrame, 10 | ) 11 | from bucketed_scene_flow_eval.utils import load_feather 12 | 13 | from .argoverse_scene_flow import ArgoverseNoFlowSequence, ArgoverseNoFlowSequenceLoader 14 | 15 | 16 | class ArgoverseBoxAnnotationSequence(ArgoverseNoFlowSequence): 17 | def __init__(self, *args, **kwargs): 18 | super().__init__(*args, **kwargs) 19 | self.timestamp_to_boxes = self._prep_bbox_annotations() 20 | 21 | def _prep_bbox_annotations(self) -> dict[int, list[BoundingBox]]: 22 | annotations_file = self.dataset_dir / "annotations.feather" 23 | assert annotations_file.exists(), f"Annotations file {annotations_file} does not exist" 24 | annotation_df = load_feather(annotations_file) 25 | # Index(['timestamp_ns', 'track_uuid', 'category', 'length_m', 'width_m', 26 | # 'height_m', 'qw', 'qx', 'qy', 'qz', 'tx_m', 'ty_m', 'tz_m', 27 | # 'num_interior_pts'], 28 | # dtype='object') 29 | 30 | # Convert to dictionary keyed by timestamp_ns int 31 | timestamp_to_annotations: dict[int, list[BoundingBox]] = {} 32 | for _, row in annotation_df.iterrows(): 33 | timestamp_ns = row["timestamp_ns"] 34 | if timestamp_ns not in timestamp_to_annotations: 35 | timestamp_to_annotations[timestamp_ns] = [] 36 | pose = SE3.from_rot_w_x_y_z_translation_x_y_z( 37 | row["qw"], 38 | row["qx"], 39 | row["qy"], 40 | row["qz"], 41 | row["tx_m"], 42 | row["ty_m"], 43 | row["tz_m"], 44 | ) 45 | timestamp_to_annotations[timestamp_ns].append( 46 | BoundingBox( 47 | pose=pose, 48 | length=row["length_m"], 49 | width=row["width_m"], 50 | height=row["height_m"], 51 | track_uuid=row["track_uuid"], 52 | category=row["category"], 53 | ) 54 | ) 55 | return timestamp_to_annotations 56 | 57 | def load( 58 | self, idx: int, relative_to_idx: int, with_flow: bool = False 59 | ) -> tuple[TimeSyncedSceneFlowBoxFrame, TimeSyncedAVLidarData]: 60 | scene_flow_frame, lidar_data = super().load(idx, relative_to_idx, with_flow) 61 | timestamp = self.timestamp_list[idx] 62 | boxes = self.timestamp_to_boxes.get(timestamp, []) 63 | return TimeSyncedSceneFlowBoxFrame(**vars(scene_flow_frame), boxes=boxes), lidar_data 64 | 65 | 66 | class ArgoverseBoxAnnotationSequenceLoader(ArgoverseNoFlowSequenceLoader): 67 | 68 | def _load_sequence_uncached(self, sequence_id: str) -> ArgoverseBoxAnnotationSequence: 69 | assert ( 70 | sequence_id in self.sequence_id_to_raw_data 71 | ), f"sequence_id {sequence_id} does not exist" 72 | return ArgoverseBoxAnnotationSequence( 73 | sequence_id, 74 | self.sequence_id_to_raw_data[sequence_id], 75 | self.sequence_id_to_raw_data[sequence_id], 76 | with_classes=False, 77 | **self.load_sequence_kwargs, 78 | ) 79 | 80 | def cache_folder_name(self) -> str: 81 | return f"av2_box_data_use_gt_flow_{self.use_gt_flow}_raw_data_path_{self.raw_data_path}_No_flow_data_path" 82 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/argoverse2/av2_metacategories.py: -------------------------------------------------------------------------------- 1 | BACKGROUND_CATEGORIES = ["BACKGROUND"] 2 | 3 | # These catagories are ignored because of labeling oddities (moving stop signs on side of school bus, etc) 4 | ROAD_SIGNS = [ 5 | "BOLLARD", 6 | "CONSTRUCTION_BARREL", 7 | "CONSTRUCTION_CONE", 8 | "MOBILE_PEDESTRIAN_CROSSING_SIGN", 9 | "SIGN", 10 | "STOP_SIGN", 11 | "MESSAGE_BOARD_TRAILER", 12 | "TRAFFIC_LIGHT_TRAILER", 13 | ] 14 | 15 | PEDESTRIAN_CATEGORIES = ["PEDESTRIAN", "STROLLER", "WHEELCHAIR", "OFFICIAL_SIGNALER"] 16 | 17 | WHEELED_VRU = [ 18 | "BICYCLE", 19 | "BICYCLIST", 20 | "MOTORCYCLE", 21 | "MOTORCYCLIST", 22 | "WHEELED_DEVICE", 23 | "WHEELED_RIDER", 24 | ] 25 | 26 | CAR = ["REGULAR_VEHICLE"] 27 | 28 | OTHER_VEHICLES = [ 29 | "BOX_TRUCK", 30 | "LARGE_VEHICLE", 31 | "RAILED_VEHICLE", 32 | "TRUCK", 33 | "TRUCK_CAB", 34 | "VEHICULAR_TRAILER", 35 | "ARTICULATED_BUS", 36 | "BUS", 37 | "SCHOOL_BUS", 38 | ] 39 | 40 | BUCKETED_METACATAGORIES = { 41 | "BACKGROUND": BACKGROUND_CATEGORIES, 42 | "CAR": CAR, 43 | "PEDESTRIAN": PEDESTRIAN_CATEGORIES, 44 | "WHEELED_VRU": WHEELED_VRU, 45 | "OTHER_VEHICLES": OTHER_VEHICLES, 46 | } 47 | 48 | THREEWAY_EPE_METACATAGORIES = { 49 | "BACKGROUND": BACKGROUND_CATEGORIES, 50 | "FOREGROUND": PEDESTRIAN_CATEGORIES + WHEELED_VRU + CAR + OTHER_VEHICLES, 51 | } 52 | 53 | BUCKETED_VOLUME_METACATAGORIES = { 54 | "BACKGROUND": ["BACKGROUND"], 55 | "SMALL": ["SMALL"], 56 | "MEDIUM": ["MEDIUM"], 57 | "LARGE": ["LARGE"], 58 | } 59 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/argoverse2/dataset.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from pathlib import Path 3 | from typing import Optional, Union 4 | 5 | from bucketed_scene_flow_eval.datastructures import * 6 | from bucketed_scene_flow_eval.eval import ( 7 | BucketedEPEEvaluator, 8 | Evaluator, 9 | ThreeWayEPEEvaluator, 10 | ) 11 | from bucketed_scene_flow_eval.interfaces import ( 12 | CausalSeqLoaderDataset, 13 | EvalType, 14 | NonCausalSeqLoaderDataset, 15 | ) 16 | 17 | from .argoverse_box_annotations import ArgoverseBoxAnnotationSequenceLoader 18 | from .argoverse_raw_data import DEFAULT_POINT_CLOUD_RANGE, PointCloudRange 19 | from .argoverse_scene_flow import ( 20 | CATEGORY_MAP, 21 | ArgoverseNoFlowSequenceLoader, 22 | ArgoverseSceneFlowSequenceLoader, 23 | ) 24 | from .av2_metacategories import ( 25 | BUCKETED_METACATAGORIES, 26 | BUCKETED_VOLUME_METACATAGORIES, 27 | THREEWAY_EPE_METACATAGORIES, 28 | ) 29 | 30 | 31 | def _make_av2_evaluator(eval_type: EvalType, eval_args: dict) -> Evaluator: 32 | eval_args_copy = copy.deepcopy(eval_args) 33 | # Builds the evaluator object for this dataset. 34 | if eval_type == EvalType.BUCKETED_EPE: 35 | if "meta_class_lookup" not in eval_args_copy: 36 | eval_args_copy["meta_class_lookup"] = BUCKETED_METACATAGORIES 37 | if "class_id_to_name" not in eval_args_copy: 38 | eval_args_copy["class_id_to_name"] = CATEGORY_MAP 39 | return BucketedEPEEvaluator(**eval_args_copy) 40 | elif eval_type == EvalType.THREEWAY_EPE: 41 | if "meta_class_lookup" not in eval_args_copy: 42 | eval_args_copy["meta_class_lookup"] = THREEWAY_EPE_METACATAGORIES 43 | if "class_id_to_name" not in eval_args_copy: 44 | eval_args_copy["class_id_to_name"] = CATEGORY_MAP 45 | return ThreeWayEPEEvaluator(**eval_args_copy) 46 | elif eval_type == EvalType.BUCKETED_VOLUME_EPE: 47 | if "meta_class_lookup" not in eval_args_copy: 48 | eval_args_copy["meta_class_lookup"] = BUCKETED_VOLUME_METACATAGORIES 49 | if "class_id_to_name" not in eval_args_copy: 50 | eval_args_copy["class_id_to_name"] = { 51 | -1: "BACKGROUND", 52 | 0: "SMALL", 53 | 1: "MEDIUM", 54 | 2: "LARGE", 55 | } 56 | return BucketedEPEEvaluator(**eval_args_copy) 57 | else: 58 | raise ValueError(f"Unknown eval type {eval_type}") 59 | 60 | 61 | class Argoverse2CausalSceneFlow(CausalSeqLoaderDataset): 62 | def __init__( 63 | self, 64 | root_dir: Union[Path, list[Path]], 65 | subsequence_length: int = 2, 66 | sliding_window_step_size: int | None = 1, 67 | with_ground: bool = True, 68 | cache_root: Path = Path("/tmp/"), 69 | use_gt_flow: bool = True, 70 | flow_data_path: Optional[Union[Path, list[Path]]] = None, 71 | eval_type: str = "bucketed_epe", 72 | eval_args=dict(), 73 | load_boxes: bool = False, 74 | load_flow: bool = True, 75 | use_cache=True, 76 | **kwargs, 77 | ) -> None: 78 | if load_boxes: 79 | self.sequence_loader = ArgoverseBoxAnnotationSequenceLoader( 80 | root_dir, 81 | **kwargs, 82 | ) 83 | elif load_flow: 84 | self.sequence_loader = ArgoverseSceneFlowSequenceLoader( 85 | root_dir, 86 | use_gt_flow=use_gt_flow, 87 | flow_data_path=flow_data_path, 88 | **kwargs, 89 | ) 90 | else: 91 | self.sequence_loader = ArgoverseNoFlowSequenceLoader( 92 | root_dir, 93 | **kwargs, 94 | ) 95 | super().__init__( 96 | sequence_loader=self.sequence_loader, 97 | subsequence_length=subsequence_length, 98 | with_ground=with_ground, 99 | idx_lookup_cache_root=cache_root, 100 | eval_type=eval_type, 101 | eval_args=eval_args, 102 | use_cache=use_cache, 103 | sliding_window_step_size=sliding_window_step_size, 104 | ) 105 | 106 | def evaluator(self) -> Evaluator: 107 | return _make_av2_evaluator(self.eval_type, self.eval_args) 108 | 109 | 110 | class Argoverse2NonCausalSceneFlow(NonCausalSeqLoaderDataset): 111 | def __init__( 112 | self, 113 | root_dir: Union[Path, list[Path]], 114 | subsequence_length: int = 2, 115 | sliding_window_step_size: int | None = None, 116 | with_ground: bool = True, 117 | cache_root: Path = Path("/tmp/"), 118 | use_gt_flow: bool = True, 119 | flow_data_path: Optional[Union[Path, list[Path]]] = None, 120 | eval_type: str = "bucketed_epe", 121 | eval_args=dict(), 122 | use_cache=True, 123 | load_boxes: bool = False, 124 | load_flow: bool = True, 125 | **kwargs, 126 | ) -> None: 127 | if load_boxes: 128 | self.sequence_loader = ArgoverseBoxAnnotationSequenceLoader( 129 | raw_data_path=root_dir, 130 | **kwargs, 131 | ) 132 | elif load_flow: 133 | self.sequence_loader = ArgoverseSceneFlowSequenceLoader( 134 | raw_data_path=root_dir, 135 | use_gt_flow=use_gt_flow, 136 | flow_data_path=flow_data_path, 137 | **kwargs, 138 | ) 139 | else: 140 | self.sequence_loader = ArgoverseNoFlowSequenceLoader( 141 | raw_data_path=root_dir, 142 | **kwargs, 143 | ) 144 | super().__init__( 145 | sequence_loader=self.sequence_loader, 146 | subsequence_length=subsequence_length, 147 | with_ground=with_ground, 148 | idx_lookup_cache_root=cache_root, 149 | eval_type=eval_type, 150 | eval_args=eval_args, 151 | use_cache=use_cache, 152 | sliding_window_step_size=sliding_window_step_size, 153 | ) 154 | 155 | def evaluator(self) -> Evaluator: 156 | return _make_av2_evaluator(self.eval_type, self.eval_args) 157 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/argoverse2/symlink_camera_data.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import shutil 3 | from pathlib import Path 4 | 5 | import tqdm 6 | 7 | # Get path to missing_cam_frames AV2 and with_cam_frames AV2 copies. 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument("missing_cam_frames", type=Path) 10 | parser.add_argument("with_cam_frames", type=Path) 11 | args = parser.parse_args() 12 | 13 | assert args.missing_cam_frames.is_dir(), f"{args.missing_cam_frames} is not a directory" 14 | assert args.with_cam_frames.is_dir(), f"{args.with_cam_frames} is not a directory" 15 | 16 | split_names = ["train", "val", "test"] 17 | 18 | for split in split_names: 19 | missing_frames_dir = args.missing_cam_frames / split 20 | with_frames_dir = args.with_cam_frames / split 21 | 22 | # iterate through directories in missing_frames_dir 23 | for missing_dir in tqdm.tqdm( 24 | list(missing_frames_dir.iterdir()), desc=f"Processing {split} split" 25 | ): 26 | # Corresponding data dir 27 | with_dir = with_frames_dir / missing_dir.name 28 | assert missing_dir.is_dir(), f"{missing_dir} is not a directory" 29 | assert with_dir.is_dir(), f"{with_dir} is not a directory" 30 | 31 | # Symlink the "sensors/cameras" directory from with_dir to missing_dir. 32 | # Remove the "sensors/cameras" directory from missing_dir if it exists. 33 | missing_cameras_dir = missing_dir / "sensors/cameras" 34 | with_cameras_dir = with_dir / "sensors/cameras" 35 | assert with_cameras_dir.is_dir(), f"{with_cameras_dir} is not a directory" 36 | 37 | if missing_cameras_dir.is_dir(): 38 | shutil.rmtree(missing_cameras_dir) 39 | 40 | missing_cameras_dir.symlink_to(with_cameras_dir, target_is_directory=True) 41 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/nuscenes/__init__.py: -------------------------------------------------------------------------------- 1 | from .nuscenes_raw_data import ( 2 | NuScenesRawSequence, 3 | NuScenesRawSequenceLoader, 4 | ) 5 | from .nuscenes_scene_flow import ( 6 | NuScenesNoFlowSequence, 7 | NuScenesNoFlowSequenceLoader, 8 | NuScenesSceneFlowSequence, 9 | NuScenesSceneFlowSequenceLoader 10 | ) 11 | from .dataset import NuScenesCausalSceneFlow, NuScenesNonCausalSceneFlow 12 | 13 | __all__ = [ 14 | "NuScenesCausalSceneFlow", 15 | "NuScenesNonCausalSceneFlow", 16 | "NuScenesNoFlowSequence", 17 | "NuScenesNoFlowSequenceLoader", 18 | "NuScenesRawSequence", 19 | "NuScenesRawSequenceLoader", 20 | "NuScenesSceneFlowSequence", 21 | "NuScenesSceneFlowSequenceLoader", 22 | ] 23 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/nuscenes/dataset.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from pathlib import Path 3 | from typing import Optional, Union 4 | 5 | from bucketed_scene_flow_eval.datasets.argoverse2.argoverse_raw_data import ( 6 | DEFAULT_POINT_CLOUD_RANGE, 7 | PointCloudRange, 8 | ) 9 | from bucketed_scene_flow_eval.datastructures import * 10 | from bucketed_scene_flow_eval.eval import ( 11 | BucketedEPEEvaluator, 12 | Evaluator, 13 | ThreeWayEPEEvaluator, 14 | ) 15 | from bucketed_scene_flow_eval.interfaces import ( 16 | CausalSeqLoaderDataset, 17 | EvalType, 18 | NonCausalSeqLoaderDataset, 19 | ) 20 | 21 | from .nuscenes_metacategories import ( 22 | BUCKETED_METACATAGORIES, 23 | THREEWAY_EPE_METACATAGORIES, 24 | ) 25 | from .nuscenes_scene_flow import ( 26 | CATEGORY_MAP, 27 | NuScenesNoFlowSequenceLoader, 28 | NuScenesSceneFlowSequenceLoader, 29 | ) 30 | 31 | 32 | def _make_evaluator(eval_type: EvalType, eval_args: dict) -> Evaluator: 33 | eval_args_copy = copy.deepcopy(eval_args) 34 | # Builds the evaluator object for this dataset. 35 | if eval_type == EvalType.BUCKETED_EPE: 36 | if "meta_class_lookup" not in eval_args_copy: 37 | eval_args_copy["meta_class_lookup"] = BUCKETED_METACATAGORIES 38 | if "class_id_to_name" not in eval_args_copy: 39 | eval_args_copy["class_id_to_name"] = CATEGORY_MAP 40 | return BucketedEPEEvaluator(**eval_args_copy) 41 | elif eval_type == EvalType.THREEWAY_EPE: 42 | if "meta_class_lookup" not in eval_args_copy: 43 | eval_args_copy["meta_class_lookup"] = THREEWAY_EPE_METACATAGORIES 44 | if "class_id_to_name" not in eval_args_copy: 45 | eval_args_copy["class_id_to_name"] = CATEGORY_MAP 46 | return ThreeWayEPEEvaluator(**eval_args_copy) 47 | else: 48 | raise ValueError(f"Unknown eval type {eval_type}") 49 | 50 | 51 | class NuScenesCausalSceneFlow(CausalSeqLoaderDataset): 52 | def __init__( 53 | self, 54 | root_dir: Union[Path, list[Path]], 55 | nuscenes_version: str, 56 | split: str, 57 | subsequence_length: int = 2, 58 | with_ground: bool = True, 59 | with_rgb: bool = False, 60 | cache_root: Path = Path("/tmp/"), 61 | use_gt_flow: bool = True, 62 | flow_data_path: Optional[Union[Path, list[Path]]] = None, 63 | eval_type: str = "bucketed_epe", 64 | eval_args=dict(), 65 | expected_camera_shape: tuple[int, int, int] = (1550, 2048, 3), 66 | point_cloud_range: Optional[PointCloudRange] = DEFAULT_POINT_CLOUD_RANGE, 67 | use_cache=True, 68 | load_flow: bool = True, 69 | ) -> None: 70 | if load_flow: 71 | self.sequence_loader = NuScenesSceneFlowSequenceLoader( 72 | raw_data_path=root_dir, 73 | nuscenes_version=nuscenes_version, 74 | split=split, 75 | with_rgb=with_rgb, 76 | use_gt_flow=use_gt_flow, 77 | flow_data_path=flow_data_path, 78 | expected_camera_shape=expected_camera_shape, 79 | point_cloud_range=point_cloud_range, 80 | ) 81 | else: 82 | self.sequence_loader = NuScenesNoFlowSequenceLoader( 83 | raw_data_path=root_dir, 84 | nuscenes_version=nuscenes_version, 85 | split=split, 86 | with_rgb=with_rgb, 87 | expected_camera_shape=expected_camera_shape, 88 | point_cloud_range=point_cloud_range, 89 | ) 90 | super().__init__( 91 | sequence_loader=self.sequence_loader, 92 | subsequence_length=subsequence_length, 93 | with_ground=with_ground, 94 | idx_lookup_cache_root=cache_root, 95 | eval_type=eval_type, 96 | eval_args=eval_args, 97 | use_cache=use_cache, 98 | ) 99 | 100 | def evaluator(self) -> Evaluator: 101 | return _make_evaluator(self.eval_type, self.eval_args) 102 | 103 | 104 | class NuScenesNonCausalSceneFlow(NonCausalSeqLoaderDataset): 105 | def __init__( 106 | self, 107 | root_dir: Union[Path, list[Path]], 108 | nuscenes_version: str, 109 | split: str, 110 | subsequence_length: int = 2, 111 | with_ground: bool = True, 112 | with_rgb: bool = False, 113 | cache_root: Path = Path("/tmp/"), 114 | use_gt_flow: bool = True, 115 | flow_data_path: Optional[Union[Path, list[Path]]] = None, 116 | eval_type: str = "bucketed_epe", 117 | eval_args=dict(), 118 | expected_camera_shape: tuple[int, int, int] = (1550, 2048, 3), 119 | use_cache=True, 120 | load_flow: bool = True, 121 | ) -> None: 122 | if load_flow: 123 | self.sequence_loader = NuScenesSceneFlowSequenceLoader( 124 | root_dir, 125 | nuscenes_version=nuscenes_version, 126 | split=split, 127 | with_rgb=with_rgb, 128 | use_gt_flow=use_gt_flow, 129 | flow_data_path=flow_data_path, 130 | expected_camera_shape=expected_camera_shape, 131 | ) 132 | else: 133 | self.sequence_loader = NuScenesNoFlowSequenceLoader( 134 | root_dir, nuscenes_version=nuscenes_version, split=split, with_rgb=with_rgb, expected_camera_shape=expected_camera_shape 135 | ) 136 | super().__init__( 137 | sequence_loader=self.sequence_loader, 138 | subsequence_length=subsequence_length, 139 | with_ground=with_ground, 140 | idx_lookup_cache_root=cache_root, 141 | eval_type=eval_type, 142 | eval_args=eval_args, 143 | use_cache=use_cache, 144 | ) 145 | 146 | def evaluator(self) -> Evaluator: 147 | return _make_evaluator(self.eval_type, self.eval_args) 148 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/nuscenes/nuscenes_metacategories.py: -------------------------------------------------------------------------------- 1 | BACKGROUND_CATEGORIES = ["background"] 2 | 3 | # These catagories are ignored because of labeling oddities 4 | STATIC_OBJECTS = [ 5 | "movable_object.barrier", 6 | "movable_object.debris", 7 | "movable_object.pushable_pullable", 8 | "movable_object.trafficcone", 9 | "static_object.bicycle_rack", 10 | ] 11 | 12 | PEDESTRIAN_CATEGORIES = [ 13 | "animal", 14 | "human.pedestrian.adult", 15 | "human.pedestrian.child", 16 | "human.pedestrian.construction_worker", 17 | "human.pedestrian.personal_mobility", 18 | "human.pedestrian.police_officer", 19 | "human.pedestrian.stroller", 20 | "human.pedestrian.wheelchair", 21 | ] 22 | 23 | WHEELED_VRU = ["vehicle.bicycle", "vehicle.motorcycle"] 24 | 25 | CAR = ["vehicle.car"] 26 | 27 | OTHER_VEHICLES = [ 28 | "vehicle.bus.bendy", 29 | "vehicle.bus.rigid", 30 | "vehicle.construction", 31 | "vehicle.emergency.ambulance", 32 | "vehicle.emergency.police", 33 | "vehicle.trailer", 34 | "vehicle.truck", 35 | ] 36 | 37 | BUCKETED_METACATAGORIES = { 38 | "BACKGROUND": BACKGROUND_CATEGORIES, 39 | "CAR": CAR, 40 | "PEDESTRIAN": PEDESTRIAN_CATEGORIES, 41 | "WHEELED_VRU": WHEELED_VRU, 42 | "OTHER_VEHICLES": OTHER_VEHICLES, 43 | } 44 | 45 | THREEWAY_EPE_METACATAGORIES = { 46 | "BACKGROUND": BACKGROUND_CATEGORIES, 47 | "FOREGROUND": PEDESTRIAN_CATEGORIES + WHEELED_VRU + CAR + OTHER_VEHICLES, 48 | } 49 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/orbbec_astra/__init__.py: -------------------------------------------------------------------------------- 1 | from .dataset import OrbbecAstra 2 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/orbbec_astra/dataset.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import open3d as o3d 5 | import tqdm 6 | 7 | from bucketed_scene_flow_eval.datastructures import ( 8 | SE3, 9 | ColoredSupervisedPointCloudFrame, 10 | EgoLidarFlow, 11 | PointCloud, 12 | PoseInfo, 13 | RGBFrameLookup, 14 | SemanticClassId, 15 | SupervisedPointCloudFrame, 16 | TimeSyncedSceneFlowFrame, 17 | ) 18 | from bucketed_scene_flow_eval.eval import EmptyEvaluator, Evaluator 19 | from bucketed_scene_flow_eval.interfaces import AbstractDataset, LoaderType 20 | from bucketed_scene_flow_eval.utils import load_feather, load_pickle 21 | 22 | 23 | class OrbbecAstra(AbstractDataset): 24 | def __init__( 25 | self, 26 | root_dir: Path, 27 | flow_dir: Path | None, 28 | subsequence_length: int = 2, 29 | extension_name: str = ".pkl", 30 | ) -> None: 31 | root_dir = Path(root_dir) 32 | self.data_dir = root_dir 33 | self.flow_dir = Path(flow_dir) if flow_dir is not None else None 34 | self.subsequence_length = subsequence_length 35 | self.pointclouds = [ 36 | self._load_file(pcd_file) 37 | for pcd_file in tqdm.tqdm( 38 | sorted(root_dir.glob(f"*{extension_name}")), desc="Loading ORBBEC pointclouds" 39 | ) 40 | ] 41 | assert ( 42 | len(self.pointclouds) >= subsequence_length 43 | ), f"Need at least {subsequence_length} frames, found {len(self.pointclouds)} in {root_dir}/*{extension_name}" 44 | 45 | # Magic numbers to scale the pointclouds to be in the same range as the argoverse data we are training on 46 | # These numbers are derived from looking at the pointclouds themselves 47 | 48 | self.scale = 35 49 | self.center_translation = -np.array([1.13756592, 0.21126675, -1.04425789]) 50 | self.bg_delete_x_max = (1.7 + self.center_translation[0]) * self.scale 51 | self.bg_delete_z_min = (-1.2 + self.center_translation[2]) * self.scale 52 | 53 | def __len__(self): 54 | return len(self.pointclouds) - self.subsequence_length + 1 55 | 56 | def _load_file(self, data_file: Path) -> tuple[PointCloud, np.ndarray]: 57 | data = load_pickle(data_file, verbose=False) 58 | points = data[:, :3] 59 | colors = data[:, 3:] 60 | return PointCloud(points), colors 61 | 62 | def evaluator(self) -> Evaluator: 63 | return EmptyEvaluator() 64 | 65 | def loader_type(self): 66 | return LoaderType.NON_CAUSAL 67 | 68 | def _load_pose_info(self) -> PoseInfo: 69 | # Convert from standard sensor coordinate system to right hand coordinate system we use. 70 | sensor_to_right_hand = SE3( 71 | # fmt: off 72 | rotation_matrix=np.array([[0, 0, 1], 73 | [-1, 0, 0], 74 | [0, -1, 0]]), 75 | # fmt: on 76 | translation=np.array([0.0, 0.0, 0.0]), 77 | ) 78 | 79 | # The sensor is rotated down 30 degrees, so we need to rotate it back up so the table is level. 80 | theta_degrees = 30 81 | theta_radians = np.radians(theta_degrees) 82 | rotation_matrix_y = np.array( 83 | [ 84 | [np.cos(theta_radians), 0, np.sin(theta_radians)], 85 | [0, 1, 0], 86 | [-np.sin(theta_radians), 0, np.cos(theta_radians)], 87 | ] 88 | ) 89 | right_hand_to_ego = SE3( 90 | rotation_matrix=rotation_matrix_y, 91 | translation=np.array([0.0, 0.0, 0.0]), 92 | ) 93 | 94 | return PoseInfo( 95 | sensor_to_ego=right_hand_to_ego.compose(sensor_to_right_hand) 96 | .translate(self.center_translation) 97 | .scale(self.scale), 98 | ego_to_global=SE3.identity(), 99 | ) 100 | 101 | def _load_flow(self, idx: int, pc_frame: SupervisedPointCloudFrame) -> EgoLidarFlow: 102 | if self.flow_dir is None: 103 | return EgoLidarFlow(full_flow=np.zeros_like(pc_frame.full_pc), mask=pc_frame.mask) 104 | flow_file = self.flow_dir / f"{idx:010d}.feather" 105 | if not flow_file.exists(): 106 | return EgoLidarFlow(full_flow=np.zeros_like(pc_frame.full_pc), mask=pc_frame.mask) 107 | flow_feather = load_feather(flow_file, verbose=False) 108 | flow_x = flow_feather["flow_tx_m"].to_numpy() 109 | flow_y = flow_feather["flow_ty_m"].to_numpy() 110 | flow_z = flow_feather["flow_tz_m"].to_numpy() 111 | flow = np.stack([flow_x, flow_y, flow_z], axis=-1) 112 | flow_mask = flow_feather["is_valid"].to_numpy() 113 | assert len(flow) == len( 114 | pc_frame.full_pc 115 | ), f"Expected {len(pc_frame.full_pc)} points, found {len(flow)}" 116 | assert np.all( 117 | flow_mask == pc_frame.mask 118 | ), f"Founds {np.sum(flow_mask)} masked points, expected {np.sum(pc_frame.mask)}" 119 | return EgoLidarFlow(full_flow=flow, mask=pc_frame.mask) 120 | 121 | def _get_sequence_frame(self, idx: int) -> TimeSyncedSceneFlowFrame: 122 | pc, color = self.pointclouds[idx] 123 | 124 | semantics = np.zeros(pc.shape[0], dtype=SemanticClassId) 125 | pose_info = self._load_pose_info() 126 | 127 | ego_pc = pc.transform(pose_info.sensor_to_ego) 128 | 129 | is_valid_mask = (ego_pc.points[:, 0] < self.bg_delete_x_max) & ( 130 | ego_pc.points[:, 2] > self.bg_delete_z_min 131 | ) 132 | pc_frame = ColoredSupervisedPointCloudFrame(pc, pose_info, is_valid_mask, semantics, color) 133 | rgb_frame_lookup = RGBFrameLookup.empty() 134 | gt_flow = self._load_flow(idx, pc_frame) 135 | 136 | return TimeSyncedSceneFlowFrame( 137 | pc=pc_frame, 138 | auxillary_pc=None, 139 | rgbs=rgb_frame_lookup, 140 | log_id=self.data_dir.name, 141 | log_idx=idx, 142 | log_timestamp=idx, 143 | flow=gt_flow, 144 | ) 145 | 146 | def __getitem__(self, idx: int) -> list[TimeSyncedSceneFlowFrame]: 147 | # Minibatch logic 148 | return [self._get_sequence_frame(idx + i) for i in range(self.subsequence_length)] 149 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/waymoopen/__init__.py: -------------------------------------------------------------------------------- 1 | from .dataset import WaymoOpenCausalSceneFlow, WaymoOpenNonCausalSceneFlow 2 | from .waymo_supervised_flow import ( 3 | CATEGORY_MAP, 4 | WaymoSupervisedSceneFlowSequence, 5 | WaymoSupervisedSceneFlowSequenceLoader, 6 | ) 7 | 8 | __all__ = [ 9 | "WaymoOpenCausalSceneFlow", 10 | "WaymoOpenNonCausalSceneFlow", 11 | ] 12 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datasets/waymoopen/dataset.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from pathlib import Path 3 | 4 | from bucketed_scene_flow_eval.datastructures import * 5 | from bucketed_scene_flow_eval.eval import ( 6 | BucketedEPEEvaluator, 7 | Evaluator, 8 | ThreeWayEPEEvaluator, 9 | ) 10 | from bucketed_scene_flow_eval.interfaces import ( 11 | CausalSeqLoaderDataset, 12 | EvalType, 13 | NonCausalSeqLoaderDataset, 14 | ) 15 | 16 | from .waymo_supervised_flow import CATEGORY_MAP, WaymoSupervisedSceneFlowSequenceLoader 17 | 18 | THREEWAY_EPE_METACATAGORIES = { 19 | "FOREGROUND": ["VEHICLE", "PEDESTRIAN", "SIGN", "CYCLIST"], 20 | "BACKGROUND": ["BACKGROUND"], 21 | } 22 | 23 | 24 | def _make_waymo_evaluator(eval_type: EvalType, eval_args: dict) -> Evaluator: 25 | eval_args_copy = copy.deepcopy(eval_args) 26 | # Builds the evaluator object for this dataset. 27 | if eval_type == EvalType.BUCKETED_EPE: 28 | if "class_id_to_name" not in eval_args_copy: 29 | eval_args_copy["class_id_to_name"] = CATEGORY_MAP 30 | return BucketedEPEEvaluator(**eval_args_copy) 31 | elif eval_type == EvalType.THREEWAY_EPE: 32 | if "meta_class_lookup" not in eval_args_copy: 33 | eval_args_copy["meta_class_lookup"] = THREEWAY_EPE_METACATAGORIES 34 | if "class_id_to_name" not in eval_args_copy: 35 | eval_args_copy["class_id_to_name"] = CATEGORY_MAP 36 | return ThreeWayEPEEvaluator(**eval_args_copy) 37 | else: 38 | raise ValueError(f"Unknown eval type {eval_type}") 39 | 40 | 41 | class WaymoOpenCausalSceneFlow(CausalSeqLoaderDataset): 42 | def __init__( 43 | self, 44 | root_dir: Path, 45 | flow_folder: Path | None = None, 46 | subsequence_length: int = 2, 47 | cache_root: Path = Path("/tmp/"), 48 | eval_type: str = "bucketed_epe", 49 | with_rgb: bool = True, 50 | use_cache: bool = True, 51 | log_subset: list[str] | None = None, 52 | eval_args=dict(), 53 | ) -> None: 54 | self.sequence_loader = WaymoSupervisedSceneFlowSequenceLoader( 55 | root_dir, log_subset=log_subset, with_rgb=with_rgb, flow_dir=flow_folder 56 | ) 57 | super().__init__( 58 | sequence_loader=self.sequence_loader, 59 | subsequence_length=subsequence_length, 60 | with_ground=True, 61 | idx_lookup_cache_root=cache_root, 62 | eval_type=eval_type, 63 | eval_args=eval_args, 64 | use_cache=use_cache, 65 | ) 66 | 67 | def evaluator(self) -> Evaluator: 68 | return _make_waymo_evaluator(self.eval_type, self.eval_args) 69 | 70 | 71 | class WaymoOpenNonCausalSceneFlow(NonCausalSeqLoaderDataset): 72 | def __init__( 73 | self, 74 | root_dir: Path, 75 | subsequence_length: int = 2, 76 | cache_root: Path = Path("/tmp/"), 77 | eval_type: str = "bucketed_epe", 78 | with_rgb: bool = True, 79 | use_cache: bool = True, 80 | log_subset: list[str] | None = None, 81 | eval_args=dict(), 82 | ) -> None: 83 | self.sequence_loader = WaymoSupervisedSceneFlowSequenceLoader( 84 | root_dir, log_subset=log_subset 85 | ) 86 | super().__init__( 87 | sequence_loader=self.sequence_loader, 88 | subsequence_length=subsequence_length, 89 | with_ground=True, 90 | idx_lookup_cache_root=cache_root, 91 | eval_type=eval_type, 92 | eval_args=eval_args, 93 | use_cache=use_cache, 94 | ) 95 | 96 | def evaluator(self) -> Evaluator: 97 | return _make_waymo_evaluator(self.eval_type, self.eval_args) 98 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datastructures/__init__.py: -------------------------------------------------------------------------------- 1 | from .camera_projection import CameraModel, CameraProjection 2 | from .dataclasses import ( 3 | BoundingBox, 4 | ColoredSupervisedPointCloudFrame, 5 | EgoLidarDistance, 6 | EgoLidarFlow, 7 | MaskArray, 8 | PointCloudFrame, 9 | PoseInfo, 10 | RGBFrame, 11 | RGBFrameLookup, 12 | SemanticClassId, 13 | SemanticClassIdArray, 14 | SupervisedPointCloudFrame, 15 | TimeSyncedAVLidarData, 16 | TimeSyncedBaseAuxilaryData, 17 | TimeSyncedRawFrame, 18 | TimeSyncedSceneFlowBoxFrame, 19 | TimeSyncedSceneFlowFrame, 20 | VectorArray, 21 | ) 22 | from .line_mesh import LineMesh 23 | from .o3d_visualizer import O3DVisualizer 24 | from .pointcloud import PointCloud, from_fixed_array, to_fixed_array 25 | from .rgb_image import RGBImage, RGBImageCrop 26 | from .se2 import SE2 27 | from .se3 import SE3 28 | 29 | __all__ = [ 30 | "CameraModel", 31 | "CameraProjection", 32 | "EgoLidarFlow", 33 | "EgoLidarDistance", 34 | "MaskArray", 35 | "PointCloudFrame", 36 | "PoseInfo", 37 | "RGBFrame", 38 | "RGBFrameLookup", 39 | "SemanticClassId", 40 | "SemanticClassIdArray", 41 | "SupervisedPointCloudFrame", 42 | "ColoredSupervisedPointCloudFrame", 43 | "TimeSyncedSceneFlowBoxFrame", 44 | "TimeSyncedSceneFlowFrame", 45 | "TimeSyncedAVLidarData", 46 | "TimeSyncedBaseAuxilaryData", 47 | "TimeSyncedRawFrame", 48 | "TimeSyncedSceneFlowFrame", 49 | "VectorArray", 50 | "O3DVisualizer", 51 | "PointCloud", 52 | "from_fixed_array", 53 | "to_fixed_array", 54 | "RGBImage", 55 | "RGBImageCrop", 56 | "SE2", 57 | "SE3", 58 | ] 59 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datastructures/line_mesh.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import open3d as o3d 3 | 4 | 5 | def align_vector_to_another(a=np.array([0, 0, 1]), b=np.array([1, 0, 0])): 6 | """ 7 | Aligns vector a to vector b with axis angle rotation 8 | """ 9 | if np.array_equal(a, b): 10 | return None, None 11 | axis_ = np.cross(a, b) 12 | axis_ = axis_ / np.linalg.norm(axis_) 13 | angle = np.arccos(np.dot(a, b)) 14 | 15 | return axis_, angle 16 | 17 | 18 | def normalized(a, axis=-1, order=2): 19 | """Normalizes a numpy array of points""" 20 | l2 = np.atleast_1d(np.linalg.norm(a, order, axis)) 21 | l2[l2 == 0] = 1 22 | return a / np.expand_dims(l2, axis), l2 23 | 24 | 25 | class LineMesh(object): 26 | def __init__(self, points, lines=None, colors=[0, 1, 0], radius=0.15): 27 | """Creates a line represented as sequence of cylinder triangular meshes 28 | 29 | Arguments: 30 | points {ndarray} -- Numpy array of ponts Nx3. 31 | 32 | Keyword Arguments: 33 | lines {list[list] or None} -- List of point index pairs denoting line segments. If None, implicit lines from ordered pairwise points. (default: {None}) 34 | colors {list} -- list of colors, or single color of the line (default: {[0, 1, 0]}) 35 | radius {float} -- radius of cylinder (default: {0.15}) 36 | """ 37 | self.points = np.array(points) 38 | self.lines = ( 39 | np.array(lines) if lines is not None else self.lines_from_ordered_points(self.points) 40 | ) 41 | self.colors = np.array(colors) 42 | self.radius = radius 43 | self.cylinder_segments = [] 44 | 45 | self._create_line_mesh() 46 | 47 | @staticmethod 48 | def lines_from_ordered_points(points): 49 | lines = [[i, i + 1] for i in range(0, points.shape[0] - 1, 1)] 50 | return np.array(lines) 51 | 52 | def _create_line_mesh(self): 53 | first_points = self.points[self.lines[:, 0], :] 54 | second_points = self.points[self.lines[:, 1], :] 55 | line_segments = second_points - first_points 56 | line_segments_unit, line_lengths = normalized(line_segments) 57 | 58 | z_axis = np.array([0, 0, 1]) 59 | # Create triangular mesh cylinder segments of line 60 | for i in range(line_segments_unit.shape[0]): 61 | line_segment = line_segments_unit[i, :] 62 | line_length = line_lengths[i] 63 | # get axis angle rotation to allign cylinder with line segment 64 | axis, angle = align_vector_to_another(z_axis, line_segment) 65 | # Get translation vector 66 | translation = first_points[i, :] + line_segment * line_length * 0.5 67 | # create cylinder and apply transformations 68 | cylinder_segment = o3d.geometry.TriangleMesh.create_cylinder(self.radius, line_length) 69 | cylinder_segment = cylinder_segment.translate(translation, relative=False) 70 | if axis is not None: 71 | axis_a = axis * angle 72 | cylinder_segment = cylinder_segment.rotate( 73 | R=o3d.geometry.get_rotation_matrix_from_axis_angle(axis_a), 74 | center=cylinder_segment.get_center(), 75 | ) 76 | # cylinder_segment = cylinder_segment.rotate( 77 | # axis_a, center=True, type=o3d.geometry.RotationType.AxisAngle) 78 | # color cylinder 79 | color = self.colors if self.colors.ndim == 1 else self.colors[i, :] 80 | cylinder_segment.paint_uniform_color(color) 81 | 82 | self.cylinder_segments.append(cylinder_segment) 83 | 84 | def add_line(self, vis): 85 | """Adds this line to the visualizer""" 86 | for cylinder in self.cylinder_segments: 87 | vis.add_geometry(cylinder) 88 | 89 | def remove_line(self, vis): 90 | """Removes this line from the visualizer""" 91 | for cylinder in self.cylinder_segments: 92 | vis.remove_geometry(cylinder) 93 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datastructures/o3d_visualizer.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from pathlib import Path 3 | from typing import Optional, Union 4 | 5 | import numpy as np 6 | import open3d as o3d 7 | 8 | from .dataclasses import EgoLidarFlow, PointCloudFrame, RGBFrame, VectorArray 9 | from .line_mesh import LineMesh 10 | from .pointcloud import PointCloud 11 | from .se3 import SE3 12 | 13 | ColorType = Union[np.ndarray, tuple[float, float, float], list[tuple[float, float, float]]] 14 | 15 | 16 | class O3DVisualizer: 17 | def __init__( 18 | self, point_size: float = 0.1, line_width: float = 1.0, add_world_frame: bool = True 19 | ): 20 | self.point_size = point_size 21 | self.line_width = line_width 22 | self.geometry_list = [] 23 | 24 | if add_world_frame: 25 | self.add_world_frame() 26 | 27 | def add_world_frame(self): 28 | world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1) 29 | self.add_geometry(world_frame) 30 | 31 | def add_geometry(self, geometry): 32 | if isinstance(geometry, list): 33 | for g in geometry: 34 | if "to_o3d" in dir(g): 35 | g = g.to_o3d() 36 | self.add_geometry(g) 37 | else: 38 | self.geometry_list.append(geometry) 39 | 40 | def add_global_pc_frame( 41 | self, 42 | pc_frame: PointCloudFrame, 43 | color: Optional[ColorType] = None, 44 | ): 45 | self.add_pointcloud(pc_frame.global_pc, color=color) 46 | 47 | def _paint_o3d_color(self, o3d_geom, color: ColorType): 48 | assert color is not None, "Expected color to be not None" 49 | color = np.array(color) 50 | if color.ndim == 1: 51 | o3d_geom.paint_uniform_color(color) 52 | elif color.ndim == 2: 53 | assert len(color) == len( 54 | o3d_geom.points 55 | ), f"Expected color to have length {len(o3d_geom.points)}, got {len(color)} instead" 56 | o3d_geom.colors = o3d.utility.Vector3dVector(color) 57 | 58 | def add_lineset( 59 | self, 60 | p1s: Union[VectorArray, PointCloud], 61 | p2s: Union[VectorArray, PointCloud], 62 | color: Optional[ColorType] = None, 63 | ): 64 | # Convert to PointClouds 65 | if isinstance(p1s, np.ndarray): 66 | # Ensure it's Nx3 67 | assert ( 68 | p1s.ndim == 2 and p1s.shape[1] == 3 69 | ), f"Expected p1s to be a Nx3 array, got {p1s.shape} instead" 70 | p1s = PointCloud(p1s) 71 | if isinstance(p2s, np.ndarray): 72 | # Ensure it's Nx3 73 | assert ( 74 | p2s.ndim == 2 and p2s.shape[1] == 3 75 | ), f"Expected p2s to be a Nx3 array, got {p2s.shape} instead" 76 | p2s = PointCloud(p2s) 77 | 78 | assert len(p1s) == len( 79 | p2s 80 | ), f"Expected p1s and p2s to have the same length, got {len(p1s)} and {len(p2s)} instead" 81 | 82 | # Convert to o3d 83 | p1s_o3d = p1s.to_o3d() 84 | p2s_o3d = p2s.to_o3d() 85 | 86 | corrispondences = [(i, i) for i in range(len(p1s))] 87 | lineset = o3d.geometry.LineSet.create_from_point_cloud_correspondences( 88 | p1s_o3d, p2s_o3d, corrispondences 89 | ) 90 | if color is not None: 91 | self._paint_o3d_color(lineset, color) 92 | 93 | self.add_geometry(lineset) 94 | 95 | def add_global_flow( 96 | self, pc_frame: PointCloudFrame, ego_flow: EgoLidarFlow, color: Optional[ColorType] = None 97 | ): 98 | # Add lineset for flow vectors 99 | ego_pc1 = pc_frame.full_pc.mask_points(ego_flow.mask) 100 | ego_p2 = ego_pc1.flow(ego_flow.valid_flow) 101 | global_pc1 = ego_pc1.transform(pc_frame.global_pose) 102 | global_pc2 = ego_p2.transform(pc_frame.global_pose) 103 | # self.add_pointcloud(global_pc1, color=(0, 1, 0)) 104 | # self.add_pointcloud(global_pc2, color=(0, 0, 1)) 105 | self.add_lineset(global_pc1, global_pc2, color=color) 106 | 107 | def add_global_rgb_frame(self, rgb_frame: RGBFrame): 108 | image_plane_pc, colors = rgb_frame.camera_projection.image_to_image_plane_pc( 109 | rgb_frame.rgb, depth=20 110 | ) 111 | image_plane_pc = image_plane_pc.transform(rgb_frame.pose.sensor_to_global) 112 | self.add_pointcloud(image_plane_pc, color=colors) 113 | 114 | def add_pointcloud( 115 | self, 116 | pc: PointCloud, 117 | pose: SE3 = SE3.identity(), 118 | color: Optional[ColorType] = None, 119 | ): 120 | pc = pc.transform(pose) 121 | pc_o3d = pc.to_o3d() 122 | if color is not None: 123 | self._paint_o3d_color(pc_o3d, color) 124 | self.add_geometry(pc_o3d) 125 | 126 | def add_sphere(self, location: np.ndarray, radius: float, color: tuple[float, float, float]): 127 | sphere = o3d.geometry.TriangleMesh.create_sphere(radius=radius, resolution=2) 128 | sphere = sphere.translate(location) 129 | sphere.paint_uniform_color(color) 130 | self.add_geometry(sphere) 131 | 132 | def add_spheres( 133 | self, 134 | locations: list[np.ndarray], 135 | radius: float, 136 | colors: list[tuple[float, float, float]], 137 | ): 138 | assert len(locations) == len( 139 | colors 140 | ), f"Expected locations and colors to have the same length, got {len(locations)} and {len(colors)} instead" 141 | triangle_mesh = o3d.geometry.TriangleMesh() 142 | for i, location in enumerate(locations): 143 | sphere = o3d.geometry.TriangleMesh.create_sphere(radius=radius, resolution=2) 144 | sphere = sphere.translate(location) 145 | sphere.paint_uniform_color(colors[i]) 146 | triangle_mesh += sphere 147 | self.add_geometry(triangle_mesh) 148 | 149 | def add_pose(self, pose: SE3): 150 | self.add_geometry(pose.to_o3d(simple=True)) 151 | 152 | def add_trajectories(self, points_array: np.ndarray): 153 | # points_array: (n_trajectories, n_points, 3) 154 | assert ( 155 | points_array.ndim == 3 156 | ), f"Expected points_array to have shape (n_trajectories, n_points, 3), got {points_array.shape} instead" 157 | assert ( 158 | points_array.shape[2] == 3 159 | ), f"Expected points_array to have shape (n_trajectories, n_points, 3), got {points_array.shape} instead" 160 | 161 | n_trajectories = points_array.shape[0] 162 | n_points_per_trajectory = points_array.shape[1] 163 | 164 | # trajectories are now in sequence 165 | flat_point_array = points_array.reshape(-1, 3) 166 | 167 | n_to_np1_array = np.array( 168 | [ 169 | np.arange(n_trajectories * n_points_per_trajectory), 170 | np.arange(n_trajectories * n_points_per_trajectory) + 1, 171 | ] 172 | ).T 173 | keep_mask = np.ones(len(n_to_np1_array), dtype=bool) 174 | keep_mask[(n_points_per_trajectory - 1) :: n_points_per_trajectory] = False 175 | 176 | # print(n_to_np1_array) 177 | # print(keep_mask) 178 | flat_index_array = n_to_np1_array[keep_mask] 179 | 180 | # print(flat_point_array) 181 | # print(flat_index_array) 182 | 183 | line_set = o3d.geometry.LineSet() 184 | line_set.points = o3d.utility.Vector3dVector(flat_point_array) 185 | line_set.lines = o3d.utility.Vector2iVector(flat_index_array) 186 | # line_set.colors = o3d.utility.Vector3dVector( 187 | # np.tile(np.array(color), (len(trajectory) - 1, 1))) 188 | self.add_geometry(line_set) 189 | 190 | def add_trajectory( 191 | self, 192 | trajectory: list[np.ndarray], 193 | color: tuple[float, float, float], 194 | radius: float = 0.05, 195 | ): 196 | for i in range(len(trajectory) - 1): 197 | self.add_sphere(trajectory[i], radius, color) 198 | 199 | points = o3d.utility.Vector3dVector(trajectory) 200 | lines = o3d.utility.Vector2iVector( 201 | np.array([[i, i + 1] for i in range(len(trajectory) - 1)]) 202 | ) 203 | colors = o3d.utility.Vector3dVector(np.tile(np.array(color), (len(trajectory) - 1, 1))) 204 | 205 | line_mesh = LineMesh(points=points, lines=lines, colors=colors, radius=self.line_width / 20) 206 | self.add_geometry(line_mesh.cylinder_segments) 207 | 208 | def render(self, vis, reset_view: bool = True): 209 | for geometry in self.geometry_list: 210 | vis.add_geometry(geometry, reset_bounding_box=reset_view) 211 | 212 | def run(self, vis=o3d.visualization.Visualizer()): 213 | print("Running visualizer on geometry list of length", len(self.geometry_list)) 214 | vis.create_window(window_name="Benchmark Visualizer") 215 | 216 | ro = vis.get_render_option() 217 | ro.point_size = self.point_size 218 | 219 | self.render(vis) 220 | 221 | vis.run() 222 | 223 | 224 | class O3DCallbackVisualizer(O3DVisualizer): 225 | def __init__(self, screenshot_path: Path = Path() / "screenshots", *args, **kwargs): 226 | self.screenshot_path = screenshot_path 227 | super().__init__(*args, **kwargs) 228 | 229 | def _get_screenshot_path(self) -> Path: 230 | return self.screenshot_path / f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.png" 231 | 232 | def save_screenshot(self, vis: o3d.visualization.VisualizerWithKeyCallback): 233 | save_name = self._get_screenshot_path() 234 | save_name.parent.mkdir(exist_ok=True, parents=True) 235 | vis.capture_screen_image(str(save_name)) 236 | 237 | def _register_callbacks(self, vis: o3d.visualization.VisualizerWithKeyCallback): 238 | vis.register_key_callback(ord("S"), self.save_screenshot) 239 | 240 | def run(self, vis=o3d.visualization.VisualizerWithKeyCallback()): 241 | return super().run(vis) 242 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datastructures/pointcloud.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import open3d as o3d 3 | 4 | from .se3 import SE3 5 | 6 | 7 | def to_fixed_array(array: np.ndarray, max_len: int, pad_val=np.nan) -> np.ndarray: 8 | if len(array) > max_len: 9 | np.random.RandomState(len(array)).shuffle(array) 10 | sliced_pts = array[:max_len] 11 | return sliced_pts 12 | else: 13 | pad_tuples = [(0, max_len - len(array))] 14 | for _ in range(array.ndim - 1): 15 | pad_tuples.append((0, 0)) 16 | return np.pad(array, pad_tuples, constant_values=pad_val) 17 | 18 | 19 | def from_fixed_array(array: np.ndarray) -> np.ndarray: 20 | if isinstance(array, np.ndarray): 21 | if len(array.shape) == 2: 22 | check_array = array[:, 0] 23 | elif len(array.shape) == 1: 24 | check_array = array 25 | else: 26 | raise ValueError(f"unknown array shape {array.shape}") 27 | are_valid_points = np.logical_not(np.isnan(check_array)) 28 | are_valid_points = are_valid_points.astype(bool) 29 | else: 30 | import torch 31 | 32 | if len(array.shape) == 2: 33 | check_array = array[:, 0] 34 | elif len(array.shape) == 1: 35 | check_array = array 36 | else: 37 | raise ValueError(f"unknown array shape {array.shape}") 38 | are_valid_points = torch.logical_not(torch.isnan(check_array)) 39 | are_valid_points = are_valid_points.bool() 40 | return array[are_valid_points] 41 | 42 | 43 | def make_image_pixel_coordinate_grid(image_shape: tuple) -> np.ndarray: 44 | assert len(image_shape) == 2, f"image_shape must be a 2-tuple, got {image_shape}" 45 | # X positions repeated for each row 46 | x_positions = np.tile(np.arange(image_shape[1]), (image_shape[0], 1)) 47 | # Y positions repeated for each column 48 | y_positions = np.tile(np.arange(image_shape[0]), (image_shape[1], 1)).T 49 | 50 | image_coordinates = ( 51 | np.stack([x_positions, y_positions], axis=2).astype(np.float32).reshape(-1, 2) 52 | ) 53 | return image_coordinates 54 | 55 | 56 | def camera_to_world_coordiantes(points: np.ndarray) -> np.ndarray: 57 | world_T_camera = np.array( 58 | [ 59 | [0, 0, 1], 60 | [-1, 0, 0], 61 | [0, -1, 0], 62 | ] 63 | ) 64 | return (world_T_camera @ points.T).T 65 | 66 | 67 | class PointCloud: 68 | def __init__(self, points: np.ndarray) -> None: 69 | assert points.ndim == 2, f"points must be a 2D array, got {points.ndim}" 70 | assert points.shape[1] == 3, f"points must be a Nx3 array, got {points.shape}" 71 | self.points = points 72 | 73 | def __eq__(self, o: object) -> bool: 74 | if not isinstance(o, PointCloud): 75 | return False 76 | return np.allclose(self.points, o.points) 77 | 78 | def __len__(self): 79 | return self.points.shape[0] 80 | 81 | def __repr__(self) -> str: 82 | return f"PointCloud with {len(self)} points" 83 | 84 | def __getitem__(self, idx): 85 | return self.points[idx] 86 | 87 | @staticmethod 88 | def from_depth_image(depth: np.ndarray, camera_projection: "CameraProjection") -> "PointCloud": 89 | assert depth.ndim == 2, f"depth must be a 2D array, got {depth.ndim}" 90 | image_coordinates = make_image_pixel_coordinate_grid(depth.shape) 91 | image_coordinate_depths = depth.reshape(-1, 1) 92 | 93 | points = camera_projection.to_camera(image_coordinates, image_coordinate_depths) 94 | finite_points = points[np.isfinite(points).all(axis=1)] 95 | return PointCloud(finite_points) 96 | 97 | @staticmethod 98 | def from_points_and_depth( 99 | image_coordinates: np.ndarray, 100 | image_coordinate_depths: np.ndarray, 101 | camera_projection: "CameraProjection", 102 | ) -> "PointCloud": 103 | return PointCloud(camera_projection.to_camera(image_coordinates, image_coordinate_depths)) 104 | 105 | def transform(self, se3: SE3) -> "PointCloud": 106 | assert isinstance(se3, SE3), f"se3 must be an SE3, got {type(se3)}, expected {SE3}" 107 | return PointCloud(se3.transform_points(self.points)) 108 | 109 | def transform_masked(self, se3: SE3, mask: np.ndarray) -> "PointCloud": 110 | assert isinstance(se3, SE3) 111 | assert mask.ndim == 1 112 | assert mask.shape[0] == len(self) 113 | updated_points = self.points.copy() 114 | updated_points[mask] = se3.transform_points(self.points[mask]) 115 | return PointCloud(updated_points) 116 | 117 | def translate(self, translation: np.ndarray) -> "PointCloud": 118 | assert translation.shape == (3,) 119 | return PointCloud(self.points + translation) 120 | 121 | def flow(self, flow: np.ndarray) -> "PointCloud": 122 | assert ( 123 | flow.shape == self.points.shape 124 | ), f"flow shape {flow.shape} must match point cloud shape {self.points.shape}" 125 | return PointCloud(self.points + flow) 126 | 127 | def flow_masked(self, flow: np.ndarray, mask: np.ndarray) -> "PointCloud": 128 | assert mask.ndim == 1, f"mask must be 1D, got {mask.ndim}" 129 | assert mask.dtype == bool, f"mask must be boolean, got {mask.dtype}" 130 | assert ( 131 | self.points.shape[0] == mask.shape[0] 132 | ), f"mask must have same length as point cloud, got {mask.shape[0]} and {self.points.shape[0]}" 133 | # check that flow has the same number of entries as the boolean mask. 134 | assert ( 135 | flow.shape[0] == mask.sum() 136 | ), f"flow must have same number of entries as the number of True values in the mask, got flow shape of {flow.shape[0]} and mask sum of {mask.sum()}" 137 | flow = flow.astype(np.float32) 138 | updated_points = self.points.copy() 139 | updated_points[mask] = self.points[mask] + flow 140 | return PointCloud(updated_points) 141 | 142 | def to_fixed_array(self, max_points: int) -> np.ndarray: 143 | return to_fixed_array(self.points, max_points) 144 | 145 | def matched_point_diffs(self, other: "PointCloud") -> np.ndarray: 146 | assert len(self) == len(other) 147 | return self.points - other.points 148 | 149 | def matched_point_distance(self, other: "PointCloud") -> np.ndarray: 150 | assert len(self) == len(other) 151 | return np.linalg.norm(self.matched_point_diffs(other), axis=1) 152 | 153 | @staticmethod 154 | def from_fixed_array(points) -> "PointCloud": 155 | return PointCloud(from_fixed_array(points)) 156 | 157 | def to_array(self) -> np.ndarray: 158 | return self.points 159 | 160 | def copy(self) -> "PointCloud": 161 | return PointCloud(self.points.copy()) 162 | 163 | def mask_points(self, mask: np.ndarray) -> "PointCloud": 164 | assert isinstance(mask, np.ndarray) 165 | assert mask.ndim == 1 166 | if mask.dtype == bool: 167 | assert mask.shape[0] == len(self) 168 | else: 169 | in_bounds = mask < len(self) 170 | assert np.all( 171 | in_bounds 172 | ), f"mask values must be in bounds, got {(~in_bounds).sum()} indices not in bounds out of {len(self)} points" 173 | 174 | return PointCloud(self.points[mask]) 175 | 176 | def within_region_mask(self, x_min, x_max, y_min, y_max, z_min, z_max) -> np.ndarray: 177 | mask = np.logical_and(self.points[:, 0] < x_max, self.points[:, 0] > x_min) 178 | mask = np.logical_and(mask, self.points[:, 1] < y_max) 179 | mask = np.logical_and(mask, self.points[:, 1] > y_min) 180 | mask = np.logical_and(mask, self.points[:, 2] < z_max) 181 | mask = np.logical_and(mask, self.points[:, 2] > z_min) 182 | return mask 183 | 184 | def within_region(self, x_min, x_max, y_min, y_max, z_min, z_max) -> "PointCloud": 185 | mask = self.within_region_mask(x_min, x_max, y_min, y_max, z_min, z_max) 186 | return self.mask_points(mask) 187 | 188 | @property 189 | def shape(self) -> tuple[int, int]: 190 | return self.points.shape 191 | 192 | def to_o3d(self) -> o3d.geometry.PointCloud: 193 | return o3d.geometry.PointCloud(o3d.utility.Vector3dVector(self.points)) 194 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datastructures/rgb_image.py: -------------------------------------------------------------------------------- 1 | import math 2 | from dataclasses import dataclass 3 | from typing import Optional 4 | 5 | import cv2 6 | import numpy as np 7 | 8 | 9 | @dataclass 10 | class RGBImageCrop: 11 | min_x: int 12 | min_y: int 13 | max_x: int 14 | max_y: int 15 | 16 | def __post_init__(self): 17 | assert ( 18 | self.min_x < self.max_x 19 | ), f"min_x must be less than max_x, got {self.min_x} and {self.max_x}" 20 | assert ( 21 | self.min_y < self.max_y 22 | ), f"min_y must be less than max_y, got {self.min_y} and {self.max_y}" 23 | # Ensure that the crop is non-negative 24 | assert self.min_x >= 0, f"min_x must be non-negative, got {self.min_x}" 25 | assert self.min_y >= 0, f"min_y must be non-negative, got {self.min_y}" 26 | 27 | @staticmethod 28 | def from_full_image(image: "RGBImage") -> "RGBImageCrop": 29 | return RGBImageCrop(0, 0, image.full_image.shape[1], image.full_image.shape[0]) 30 | 31 | def apply_to_image(self, image: "RGBImage") -> "RGBImage": 32 | return RGBImage(image.full_image[self.min_y : self.max_y, self.min_x : self.max_x]) 33 | 34 | def get_is_valid_mask(self, image: "RGBImage") -> np.ndarray: 35 | mask = np.zeros(image.full_image.shape[:2], dtype=bool) 36 | mask[self.min_y : self.max_y, self.min_x : self.max_x] = True 37 | return mask 38 | 39 | def resize(self, reduction_factor: float) -> "RGBImageCrop": 40 | return RGBImageCrop( 41 | int(math.floor(self.min_x / reduction_factor)), 42 | int(math.floor(self.min_y / reduction_factor)), 43 | int(math.ceil(self.max_x / reduction_factor)), 44 | int(math.ceil(self.max_y / reduction_factor)), 45 | ) 46 | 47 | 48 | class RGBImage: 49 | """ 50 | RGBImage is a wrapper around a numpy array of shape (H, W, 3) representing an RGB image. 51 | 52 | Each pixel is assumed to be in the range [0, 1] as a float32. 53 | """ 54 | 55 | def __init__(self, full_image: np.ndarray, valid_crop: Optional[RGBImageCrop] = None): 56 | assert ( 57 | len(full_image.shape) == 3 58 | ), f"image must have shape (H, W, 3), got {full_image.shape}" 59 | assert full_image.shape[2] == 3, f"image must have shape (H, W, 3), got {full_image.shape}" 60 | 61 | assert ( 62 | full_image.dtype == np.float32 63 | ), f"image must have dtype float32 or float64, got {full_image.dtype} with a min of {np.min(full_image)} and a max of {np.max(full_image)}" 64 | 65 | assert np.all(full_image >= 0) and np.all( 66 | full_image <= 1 67 | ), f"image must have values in range [0, 1], got min {np.min(full_image)} and max {np.max(full_image)}" 68 | 69 | self.full_image = full_image.astype(np.float32) 70 | 71 | if valid_crop is None: 72 | self.valid_crop = RGBImageCrop.from_full_image(self) 73 | else: 74 | assert isinstance( 75 | valid_crop, RGBImageCrop 76 | ), f"valid_crop must be an RGBImageCrop, got {type(valid_crop)}" 77 | self.valid_crop = valid_crop 78 | 79 | @staticmethod 80 | def white_image(shape: tuple[int, int]) -> "RGBImage": 81 | assert len(shape) == 2, f"shape must be a 2-tuple, got {shape}" 82 | return RGBImage(np.ones(shape + (3,), dtype=np.float32)) 83 | 84 | @staticmethod 85 | def white_image_like(image: "RGBImage") -> "RGBImage": 86 | return RGBImage.white_image(image.shape[:2]) 87 | 88 | @staticmethod 89 | def black_image(shape: tuple[int, int]) -> "RGBImage": 90 | assert len(shape) == 2, f"shape must be a 2-tuple, got {shape}" 91 | return RGBImage(np.zeros(shape + (3,), dtype=np.float32)) 92 | 93 | @staticmethod 94 | def black_image_like(image: "RGBImage") -> "RGBImage": 95 | return RGBImage.black_image(image.shape[:2]) 96 | 97 | def __repr__(self) -> str: 98 | return f"RGBImage(shape={self.full_image.shape}, dtype={self.full_image.dtype})" 99 | 100 | def copy(self) -> "RGBImage": 101 | return RGBImage(self.full_image.copy()) 102 | 103 | def rescale(self, reduction_factor: float) -> "RGBImage": 104 | new_shape = ( 105 | int(math.ceil(self.full_image.shape[1] / reduction_factor)), 106 | int(math.ceil(self.full_image.shape[0] / reduction_factor)), 107 | ) 108 | new_img = cv2.resize(self.full_image, new_shape) 109 | valid_crop = None 110 | if self.valid_crop is not None: 111 | valid_crop = self.valid_crop.resize(reduction_factor) 112 | return RGBImage(new_img, valid_crop) 113 | 114 | @property 115 | def masked_image(self) -> "RGBImage": 116 | return self.valid_crop.apply_to_image(self) 117 | 118 | def get_is_valid_mask(self) -> np.ndarray: 119 | return self.valid_crop.get_is_valid_mask(self) 120 | 121 | @property 122 | def shape(self): 123 | return self.full_image.shape 124 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datastructures/se2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class SE2: 5 | def __init__(self, rotation: np.ndarray, translation: np.ndarray) -> None: 6 | """Initialize. 7 | Args: 8 | rotation: np.ndarray of shape (2,2). 9 | translation: np.ndarray of shape (2,1). 10 | Raises: 11 | ValueError: if rotation or translation do not have the required shapes. 12 | """ 13 | assert rotation.shape == (2, 2) 14 | assert translation.shape == (2,) 15 | self.rotation = rotation 16 | self.translation = translation 17 | self.transform_matrix = np.eye(3) 18 | self.transform_matrix[:2, :2] = self.rotation 19 | self.transform_matrix[:2, 2] = self.translation 20 | 21 | def transform_point_cloud(self, point_cloud: np.ndarray) -> np.ndarray: 22 | """Apply the SE(2) transformation to point_cloud. 23 | Args: 24 | point_cloud: np.ndarray of shape (N, 2). 25 | Returns: 26 | transformed_point_cloud: np.ndarray of shape (N, 2). 27 | Raises: 28 | ValueError: if point_cloud does not have the required shape. 29 | """ 30 | assert point_cloud.ndim == 2 31 | assert point_cloud.shape[1] == 2 32 | num_points = point_cloud.shape[0] 33 | homogeneous_pts = np.hstack([point_cloud, np.ones((num_points, 1))]) 34 | transformed_point_cloud = homogeneous_pts.dot(self.transform_matrix.T) 35 | return transformed_point_cloud[:, :2] 36 | 37 | def inverse(self) -> "SE2": 38 | """Return the inverse of the current SE2 transformation. 39 | For example, if the current object represents target_SE2_src, we will return instead src_SE2_target. 40 | Returns: 41 | inverse of this SE2 transformation. 42 | """ 43 | return SE2(rotation=self.rotation.T, translation=self.rotation.T.dot(-self.translation)) 44 | 45 | def inverse_transform_point_cloud(self, point_cloud: np.ndarray) -> np.ndarray: 46 | """Transform the point_cloud by the inverse of this SE2. 47 | Args: 48 | point_cloud: Numpy array of shape (N,2). 49 | Returns: 50 | point_cloud transformed by the inverse of this SE2. 51 | """ 52 | return self.inverse().transform_point_cloud(point_cloud) 53 | 54 | def compose(self, right_se2: "SE2") -> "SE2": 55 | """Multiply this SE2 from right by right_se2 and return the composed transformation. 56 | Args: 57 | right_se2: SE2 object to multiply this object by from right. 58 | Returns: 59 | The composed transformation. 60 | """ 61 | chained_transform_matrix = self.transform_matrix.dot(right_se2.transform_matrix) 62 | chained_se2 = SE2( 63 | rotation=chained_transform_matrix[:2, :2], 64 | translation=chained_transform_matrix[:2, 2], 65 | ) 66 | return chained_se2 67 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/datastructures/se3.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pyquaternion import Quaternion 3 | 4 | 5 | class SE3: 6 | """An SE3 class allows point cloud rotation and translation operations.""" 7 | 8 | def __init__(self, rotation_matrix: np.ndarray, translation: np.ndarray) -> None: 9 | """Initialize an SE3 instance with its rotation and translation matrices. 10 | Args: 11 | rotation: Array of shape (3, 3) 12 | translation: Array of shape (3,) 13 | """ 14 | assert rotation_matrix.shape == (3, 3) 15 | assert translation.shape == (3,) 16 | 17 | self.transform_matrix = np.eye(4) 18 | self.transform_matrix[:3, :3] = rotation_matrix 19 | self.transform_matrix[:3, 3] = translation 20 | 21 | @property 22 | def rotation_matrix(self) -> np.ndarray: 23 | return self.transform_matrix[:3, :3] 24 | 25 | @property 26 | def translation(self) -> np.ndarray: 27 | return self.transform_matrix[:3, 3] 28 | 29 | @staticmethod 30 | def identity() -> "SE3": 31 | """Return the identity transformation.""" 32 | return SE3(rotation_matrix=np.eye(3), translation=np.zeros(3)) 33 | 34 | @staticmethod 35 | def from_rot_x_y_z_translation_x_y_z(rx, ry, rz, tx, ty, tz) -> "SE3": 36 | rotation_matrix = ( 37 | Quaternion(axis=[1, 0, 0], angle=rx).rotation_matrix 38 | @ Quaternion(axis=[0, 1, 0], angle=ry).rotation_matrix 39 | @ Quaternion(axis=[0, 0, 1], angle=rz).rotation_matrix 40 | ) 41 | translation = np.array([tx, ty, tz]) 42 | return SE3(rotation_matrix, translation) 43 | 44 | @staticmethod 45 | def from_rot_w_x_y_z_translation_x_y_z(rw, rx, ry, rz, tx, ty, tz) -> "SE3": 46 | rotation_matrix = Quaternion(w=rw, x=rx, y=ry, z=rz).rotation_matrix 47 | translation = np.array([tx, ty, tz]) 48 | return SE3(rotation_matrix, translation) 49 | 50 | def __eq__(self, __value: object) -> bool: 51 | if not isinstance(__value, SE3): 52 | return False 53 | return np.allclose(self.rotation_matrix, __value.rotation_matrix) and np.allclose( 54 | self.translation, __value.translation 55 | ) 56 | 57 | def translate(self, translation: np.ndarray) -> "SE3": 58 | """Return a new SE3 instance with the given translation applied.""" 59 | if isinstance(translation, list): 60 | translation = np.array(translation) 61 | assert translation.shape == ( 62 | 3, 63 | ), f"Translation must be a 3D vector, got {translation.shape}" 64 | return SE3( 65 | rotation_matrix=self.rotation_matrix, 66 | translation=self.translation + translation, 67 | ) 68 | 69 | def scale(self, scale: float) -> "SE3": 70 | """Return a new SE3 instance with the given scale applied.""" 71 | return SE3( 72 | rotation_matrix=self.rotation_matrix * scale, 73 | translation=self.translation * scale, 74 | ) 75 | 76 | def transform_points(self, point_cloud: np.ndarray) -> np.ndarray: 77 | """Apply the SE(3) transformation to this point cloud. 78 | Args: 79 | point_cloud: Array of shape (N, 3). If the transform represents dst_SE3_src, 80 | then point_cloud should consist of points in frame `src` 81 | Returns: 82 | Array of shape (N, 3) representing the transformed point cloud, i.e. points in frame `dst` 83 | """ 84 | return point_cloud @ self.rotation_matrix.T + self.translation 85 | 86 | def transform_flow(self, flow: np.ndarray) -> np.ndarray: 87 | """Apply the SE(3)'s rotation transformation to this flow field. 88 | Args: 89 | flow: Array of shape (N, 3). If the transform represents dst_SE3_src, 90 | then flow should consist of flow vectors in frame `src` 91 | Returns: 92 | Array of shape (N, 3) representing the transformed flow field, i.e. flow vectors in frame `dst` 93 | """ 94 | return flow @ self.rotation_matrix.T 95 | 96 | def inverse(self) -> "SE3": 97 | """Return the inverse of the current SE3 transformation. 98 | For example, if the current object represents target_SE3_src, we will return instead src_SE3_target. 99 | Returns: 100 | src_SE3_target: instance of SE3 class, representing 101 | inverse of SE3 transformation target_SE3_src 102 | """ 103 | return SE3( 104 | rotation_matrix=self.rotation_matrix.T, 105 | translation=self.rotation_matrix.T.dot(-self.translation), 106 | ) 107 | 108 | def compose(self, right_se3: "SE3") -> "SE3": 109 | """Compose (right multiply) this class' transformation matrix T with another SE3 instance. 110 | Algebraic representation: chained_se3 = T * right_se3 111 | Args: 112 | right_se3: another instance of SE3 class 113 | Returns: 114 | chained_se3: new instance of SE3 class 115 | """ 116 | return SE3.from_array(self.transform_matrix @ right_se3.transform_matrix) 117 | 118 | def __matmul__(self, right_se3: "SE3") -> "SE3": 119 | return self.compose(right_se3) 120 | 121 | def to_array(self) -> np.ndarray: 122 | """Return the SE3 transformation matrix as a numpy array.""" 123 | return self.transform_matrix 124 | 125 | @staticmethod 126 | def from_array(transform_matrix: np.ndarray) -> "SE3": 127 | """Initialize an SE3 instance from a numpy array.""" 128 | return SE3( 129 | rotation_matrix=transform_matrix[:3, :3], 130 | translation=transform_matrix[:3, 3], 131 | ) 132 | 133 | def __repr__(self) -> str: 134 | return f"SE3(rotation_matrix={self.rotation_matrix}, translation={self.translation})" 135 | 136 | def to_o3d(self, simple: bool = True): 137 | import open3d as o3d 138 | 139 | # Draw ball at origin 140 | origin_ball = o3d.geometry.TriangleMesh.create_sphere(radius=0.1) 141 | origin_ball = origin_ball.translate(self.translation) 142 | origin_ball.paint_uniform_color([0, 0, 0]) 143 | 144 | cone = o3d.geometry.TriangleMesh.create_cone(radius=0.1, height=0.5) 145 | point_forward = np.array( 146 | [ 147 | [0, 0, 1], 148 | [0, 1, 0], 149 | [1, 0, 0], 150 | ] 151 | ) 152 | cone = cone.rotate(self.rotation_matrix @ point_forward, center=(0, 0, 0)) 153 | cone = cone.translate(self.translation) 154 | cone = cone.compute_vertex_normals() 155 | 156 | if simple: 157 | return [origin_ball, cone] 158 | 159 | forward_vec = np.array([1, 0, 0]) 160 | forward_rotated_vec = self.rotation_matrix @ forward_vec 161 | left_vec = np.array([0, 1, 0]) 162 | left_rotated_vec = self.rotation_matrix @ left_vec 163 | up_vec = np.array([0, 0, 1]) 164 | up_rotated_vec = self.rotation_matrix @ up_vec 165 | 166 | # Draw ball at unit length in x direction 167 | forward_ball = o3d.geometry.TriangleMesh.create_sphere(radius=0.1) 168 | forward_ball = forward_ball.translate(self.translation + forward_rotated_vec) 169 | forward_ball.paint_uniform_color([1, 0, 0]) 170 | 171 | left_ball = o3d.geometry.TriangleMesh.create_sphere(radius=0.1) 172 | left_ball = left_ball.translate(self.translation + left_rotated_vec) 173 | left_ball.paint_uniform_color([0, 1, 0]) 174 | 175 | up_ball = o3d.geometry.TriangleMesh.create_sphere(radius=0.1) 176 | up_ball = up_ball.translate(self.translation + up_rotated_vec) 177 | up_ball.paint_uniform_color([0, 0, 1]) 178 | 179 | # Draw line between balls 180 | line = o3d.geometry.LineSet() 181 | line.points = o3d.utility.Vector3dVector( 182 | np.vstack((self.translation, self.translation + forward_rotated_vec)) 183 | ) 184 | line.lines = o3d.utility.Vector2iVector(np.array([[0, 1]])) 185 | 186 | return [origin_ball, forward_ball, left_ball, up_ball, line, cone] 187 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/eval/__init__.py: -------------------------------------------------------------------------------- 1 | from .bucketed_epe import BucketedEPEEvaluator 2 | from .eval import EmptyEvaluator, Evaluator 3 | from .threeway_epe import ThreeWayEPEEvaluator 4 | 5 | __all__ = [ 6 | "Evaluator", 7 | "BucketedEPEEvaluator", 8 | "ThreeWayEPEEvaluator", 9 | ] 10 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/eval/eval.py: -------------------------------------------------------------------------------- 1 | # Import abstract base class for evaluator 2 | from abc import ABC, abstractmethod 3 | from typing import Any 4 | 5 | from bucketed_scene_flow_eval.datastructures import ( 6 | EgoLidarFlow, 7 | TimeSyncedSceneFlowFrame, 8 | ) 9 | 10 | 11 | class Evaluator(ABC): 12 | @abstractmethod 13 | def eval(self, predictions: EgoLidarFlow, gt: TimeSyncedSceneFlowFrame): 14 | raise NotImplementedError 15 | 16 | @abstractmethod 17 | def compute_results(self, save_results: bool = True) -> dict[Any, Any]: 18 | raise NotImplementedError 19 | 20 | 21 | class EmptyEvaluator(Evaluator): 22 | def __init__(self): 23 | pass 24 | 25 | def eval(self, predictions: EgoLidarFlow, gt: TimeSyncedSceneFlowFrame): 26 | pass 27 | 28 | def compute_results(self, save_results: bool = True) -> dict[Any, Any]: 29 | return {} 30 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/eval/threeway_epe.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | 5 | from bucketed_scene_flow_eval.datastructures import SemanticClassId 6 | 7 | from .bucketed_epe import BucketedEPEEvaluator 8 | 9 | 10 | class ThreeWayEPEEvaluator(BucketedEPEEvaluator): 11 | def __init__( 12 | self, 13 | class_id_to_name: dict[SemanticClassId, str], 14 | meta_class_lookup: dict[str, list[str]], 15 | dynamic_threshold_meters_per_frame=0.5 / 10, 16 | output_path: Path = Path("/tmp/frame_results/threeway_epe"), 17 | ) -> None: 18 | assert meta_class_lookup is not None, "meta_class_lookup must be provided" 19 | assert isinstance(meta_class_lookup, dict), "meta_class_lookup must be a dictionary" 20 | assert ( 21 | len(meta_class_lookup.keys()) == 2 22 | ), f"Threeway EPE meta_class_lookup must have 2 keys, instead found {len(meta_class_lookup.keys())} keys: {meta_class_lookup.keys()}" 23 | super().__init__( 24 | class_id_to_name=class_id_to_name, 25 | output_path=output_path, 26 | meta_class_lookup=meta_class_lookup, 27 | ) 28 | bucket_edges = [0.0, dynamic_threshold_meters_per_frame, np.inf] 29 | self.speed_thresholds = list(zip(bucket_edges, bucket_edges[1:])) 30 | 31 | def _save_stats_tables(self, average_stats): 32 | super()._save_stats_tables(average_stats, normalized=False) 33 | 34 | def compute_results( 35 | self, save_results: bool = True, return_distance_threshold: int = 35 36 | ) -> dict[str, tuple[float, float]]: 37 | super().compute_results(save_results) 38 | 39 | category_to_per_frame_stats = self._category_to_per_frame_stats() 40 | category_to_average_stats = self._category_to_average_stats(category_to_per_frame_stats) 41 | matrix = self._build_stat_table(category_to_average_stats, return_distance_threshold) 42 | assert len(matrix.class_names) == 2, f"Expected 2 classes, found {len(matrix.class_names)}" 43 | return { 44 | str(k): (v.static_epe, v.dynamic_error) 45 | for k, v in matrix.get_overall_class_errors(normalized=False).items() 46 | } 47 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/interfaces/__init__.py: -------------------------------------------------------------------------------- 1 | from .abstract_dataset import AbstractDataset, LoaderType 2 | from .abstract_sequence_loader import ( 3 | AbstractAVLidarSequence, 4 | AbstractSequence, 5 | AbstractSequenceLoader, 6 | CachedSequenceLoader, 7 | ) 8 | from .base_dataset_abstract_seq_loader import ( 9 | CausalSeqLoaderDataset, 10 | EvalType, 11 | NonCausalSeqLoaderDataset, 12 | ) 13 | 14 | __all__ = [ 15 | "AbstractDataset", 16 | "AbstractSequence", 17 | "AbstractAVLidarSequence", 18 | "AbstractSequenceLoader", 19 | "CachedSequenceLoader", 20 | "EvalType", 21 | "LoaderType", 22 | "NonCausalSeqLoaderDataset", 23 | "CausalSeqLoaderDataset", 24 | ] 25 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/interfaces/abstract_dataset.py: -------------------------------------------------------------------------------- 1 | # import abstract base class 2 | import enum 3 | from abc import ABC, abstractmethod 4 | 5 | from bucketed_scene_flow_eval.datastructures import TimeSyncedSceneFlowFrame 6 | from bucketed_scene_flow_eval.eval import Evaluator 7 | 8 | 9 | class LoaderType(enum.Enum): 10 | CAUSAL = 0 11 | NON_CAUSAL = 1 12 | 13 | 14 | class AbstractDataset: 15 | @abstractmethod 16 | def __getitem__(self, idx: int) -> list[TimeSyncedSceneFlowFrame]: 17 | raise NotImplementedError 18 | 19 | @abstractmethod 20 | def __len__(self) -> int: 21 | raise NotImplementedError 22 | 23 | @abstractmethod 24 | def evaluator(self) -> Evaluator: 25 | raise NotImplementedError 26 | 27 | @abstractmethod 28 | def loader_type(self) -> LoaderType: 29 | raise NotImplementedError 30 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/interfaces/abstract_sequence_loader.py: -------------------------------------------------------------------------------- 1 | # import abstract base class 2 | from abc import ABC, abstractmethod 3 | 4 | from bucketed_scene_flow_eval.datastructures import ( 5 | TimeSyncedAVLidarData, 6 | TimeSyncedBaseAuxilaryData, 7 | TimeSyncedRawFrame, 8 | TimeSyncedSceneFlowFrame, 9 | ) 10 | 11 | 12 | class AbstractSequence(ABC): 13 | def __init__(self): 14 | pass 15 | 16 | @abstractmethod 17 | def load( 18 | self, idx: int, relative_to_idx: int 19 | ) -> tuple[TimeSyncedRawFrame, TimeSyncedBaseAuxilaryData]: 20 | pass 21 | 22 | @abstractmethod 23 | def __len__(self): 24 | pass 25 | 26 | 27 | class AbstractAVLidarSequence(AbstractSequence): 28 | @abstractmethod 29 | def load( 30 | self, idx: int, relative_to_idx: int, with_flow: bool = True 31 | ) -> tuple[TimeSyncedSceneFlowFrame, TimeSyncedAVLidarData]: 32 | pass 33 | 34 | 35 | class AbstractSequenceLoader(ABC): 36 | def __init__(self): 37 | pass 38 | 39 | @abstractmethod 40 | def get_sequence_ids(self) -> list: 41 | pass 42 | 43 | @abstractmethod 44 | def load_sequence(self, sequence_identifier) -> AbstractSequence: 45 | pass 46 | 47 | @abstractmethod 48 | def cache_folder_name(self) -> str: 49 | pass 50 | 51 | @abstractmethod 52 | def __len__(self) -> int: 53 | pass 54 | 55 | def __getitem__(self, idx): 56 | return self.load_sequence(self.sequence_id_lst[idx]) 57 | 58 | 59 | class CachedSequenceLoader(AbstractSequenceLoader): 60 | def __init__(self): 61 | self.last_loaded_sequence = None 62 | self.last_loaded_sequence_id = None 63 | 64 | @abstractmethod 65 | def _load_sequence_uncached(self, sequence_identifier) -> AbstractSequence: 66 | pass 67 | 68 | def load_sequence(self, sequence_identifier) -> AbstractSequence: 69 | # Basic caching mechanism for repeated loads of the same sequence 70 | if self.last_loaded_sequence_id != sequence_identifier: 71 | self.last_loaded_sequence = self._load_sequence_uncached(sequence_identifier) 72 | self.last_loaded_sequence_id = sequence_identifier 73 | 74 | return self.last_loaded_sequence 75 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/interfaces/base_dataset_abstract_seq_loader.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import enum 3 | from abc import abstractmethod 4 | from pathlib import Path 5 | from typing import Any, Optional, Sequence, Union 6 | 7 | import numpy as np 8 | 9 | from bucketed_scene_flow_eval.datastructures import * 10 | from bucketed_scene_flow_eval.interfaces import AbstractDataset, LoaderType 11 | from bucketed_scene_flow_eval.utils import load_pickle, save_pickle 12 | 13 | from .abstract_sequence_loader import ( 14 | AbstractAVLidarSequence, 15 | AbstractSequence, 16 | AbstractSequenceLoader, 17 | ) 18 | 19 | 20 | class EvalType(enum.Enum): 21 | BUCKETED_EPE = 0 22 | THREEWAY_EPE = 1 23 | BUCKETED_VOLUME_EPE = 2 24 | 25 | 26 | CacheLookup = list[tuple[int, tuple[int, int]]] 27 | 28 | 29 | class BaseAbstractSeqLoaderDataset(AbstractDataset): 30 | def __init__( 31 | self, 32 | sequence_loader: AbstractSequenceLoader, 33 | subsequence_length: int = 2, 34 | sliding_window_step_size: int | None = 1, 35 | with_ground: bool = True, 36 | idx_lookup_cache_root: Path = Path("/tmp/idx_lookup_cache/"), 37 | eval_type: str = "bucketed_epe", 38 | eval_args=dict(), 39 | use_cache=True, 40 | ) -> None: 41 | self.use_cache = use_cache 42 | self.with_ground = with_ground 43 | self.sequence_loader = sequence_loader 44 | self.subsequence_length = subsequence_length 45 | 46 | if sliding_window_step_size is None: 47 | sliding_window_step_size = subsequence_length 48 | self.sliding_window_step_size = sliding_window_step_size 49 | self.idx_lookup_cache_path = ( 50 | idx_lookup_cache_root / self.sequence_loader.cache_folder_name() 51 | ) 52 | 53 | self.dataset_to_sequence_subsequence_idx = self._load_dataset_to_sequence_subsequence_idx() 54 | self.sequence_subsequence_idx_to_dataset_idx = { 55 | value: key for key, value in enumerate(self.dataset_to_sequence_subsequence_idx) 56 | } 57 | 58 | self.eval_type = EvalType[eval_type.strip().upper()] 59 | self.eval_args = eval_args 60 | 61 | def _build_new_cache(self) -> CacheLookup: 62 | cache_file = self._get_idx_lookup_cache_file() 63 | dataset_idx_to_sequence_subsequence_range: CacheLookup = [] 64 | 65 | for sequence_idx, sequence in enumerate(self.sequence_loader): 66 | for subsequence_start_idx in range( 67 | 0, len(sequence) - self.subsequence_length + 1, self.sliding_window_step_size 68 | ): 69 | dataset_idx_to_sequence_subsequence_range.append( 70 | ( 71 | sequence_idx, 72 | (subsequence_start_idx, subsequence_start_idx + self.subsequence_length), 73 | ) 74 | ) 75 | 76 | print( 77 | f"Loaded {len(dataset_idx_to_sequence_subsequence_range)} subsequence pairs. Saving it to {cache_file}" 78 | ) 79 | save_pickle(cache_file, dataset_idx_to_sequence_subsequence_range) 80 | return dataset_idx_to_sequence_subsequence_range 81 | 82 | @abstractmethod 83 | def _get_idx_lookup_cache_file(self) -> Path: 84 | raise NotImplementedError 85 | 86 | def _load_existing_cache(self) -> Optional[CacheLookup]: 87 | cache_file = self._get_idx_lookup_cache_file() 88 | if cache_file.exists() and self.use_cache: 89 | cache_pkl = load_pickle(cache_file) 90 | return cache_pkl 91 | return None 92 | 93 | def _load_dataset_to_sequence_subsequence_idx(self) -> CacheLookup: 94 | existing_cache = self._load_existing_cache() 95 | if existing_cache is not None: 96 | return existing_cache 97 | 98 | return self._build_new_cache() 99 | 100 | def __len__(self): 101 | return len(self.dataset_to_sequence_subsequence_idx) 102 | 103 | def _process_frame_with_metadata( 104 | self, frame: TimeSyncedSceneFlowFrame, metadata: TimeSyncedAVLidarData 105 | ) -> TimeSyncedSceneFlowFrame: 106 | # Typecheck 107 | assert isinstance(frame, TimeSyncedSceneFlowFrame), f"item is {type(frame)}" 108 | assert isinstance(metadata, TimeSyncedAVLidarData), f"metadata is {type(metadata)}" 109 | # Falsify PC mask for ground points. 110 | frame.pc.mask = frame.pc.mask & metadata.in_range_mask 111 | # Falsify Flow mask for ground points. 112 | frame.flow.mask = frame.flow.mask & metadata.in_range_mask 113 | 114 | if not self.with_ground: 115 | frame.pc.mask = frame.pc.mask & ~metadata.is_ground_points 116 | frame.flow.mask = frame.flow.mask & ~metadata.is_ground_points 117 | 118 | return frame 119 | 120 | @abstractmethod 121 | def _load_from_sequence( 122 | self, 123 | sequence: AbstractAVLidarSequence, 124 | relative_idx: int, 125 | subsequence_start_idx: int, 126 | ) -> tuple[TimeSyncedSceneFlowFrame, TimeSyncedAVLidarData]: 127 | raise NotImplementedError 128 | 129 | def __getitem__(self, dataset_idx, verbose: bool = False) -> list[TimeSyncedSceneFlowFrame]: 130 | sequence_idx, ( 131 | subsequence_start_idx, 132 | _, 133 | ) = self.dataset_to_sequence_subsequence_idx[dataset_idx] 134 | 135 | # Load sequence 136 | sequence = self.sequence_loader[sequence_idx] 137 | 138 | # Load subsequence 139 | 140 | subsequence_frames = [ 141 | self._load_from_sequence(sequence, i, subsequence_start_idx) 142 | for i in range(self.subsequence_length) 143 | ] 144 | 145 | scene_flow_items = [item for item, _ in subsequence_frames] 146 | scene_flow_metadata = [metadata for _, metadata in subsequence_frames] 147 | 148 | scene_flow_items = [ 149 | self._process_frame_with_metadata(item, metadata) 150 | for item, metadata in zip(scene_flow_items, scene_flow_metadata) 151 | ] 152 | 153 | return scene_flow_items 154 | 155 | 156 | class CausalSeqLoaderDataset(BaseAbstractSeqLoaderDataset): 157 | def _load_from_sequence( 158 | self, 159 | sequence: AbstractAVLidarSequence, 160 | relative_idx: int, 161 | subsequence_start_idx: int, 162 | ) -> tuple[TimeSyncedSceneFlowFrame, TimeSyncedAVLidarData]: 163 | assert isinstance( 164 | sequence, AbstractAVLidarSequence 165 | ), f"sequence is {type(sequence)}, not AbstractAVLidarSequence" 166 | # As a causal loader, the central frame is the last frame in the sequence. 167 | in_subsequence_tgt_index = self.subsequence_length - 1 168 | return sequence.load( 169 | subsequence_start_idx + relative_idx, 170 | subsequence_start_idx + in_subsequence_tgt_index, 171 | with_flow=relative_idx != self.subsequence_length - 1, 172 | ) 173 | 174 | def _get_idx_lookup_cache_file(self) -> Path: 175 | cache_file = ( 176 | self.idx_lookup_cache_path 177 | / f"causal_subsequence_{self.subsequence_length}_sliding_window_{self.sliding_window_step_size}_lookup.pkl" 178 | ) 179 | return cache_file 180 | 181 | def loader_type(self) -> LoaderType: 182 | return LoaderType.CAUSAL 183 | 184 | 185 | class NonCausalSeqLoaderDataset(BaseAbstractSeqLoaderDataset): 186 | def __init__(self, *args, sliding_window_step_size: int | None = None, **kwargs): 187 | super().__init__(*args, sliding_window_step_size=sliding_window_step_size, **kwargs) 188 | 189 | def _load_from_sequence( 190 | self, 191 | sequence: AbstractAVLidarSequence, 192 | relative_idx: int, 193 | subsequence_start_idx: int, 194 | ) -> tuple[TimeSyncedSceneFlowFrame, TimeSyncedAVLidarData]: 195 | assert isinstance( 196 | sequence, AbstractAVLidarSequence 197 | ), f"sequence is {type(sequence)}, not AbstractAVLidarSequence" 198 | # As a non-causal loader, the central frame is the middle frame in the sequence. 199 | in_subsequence_src_index = (self.subsequence_length - 1) // 2 200 | in_subsequence_tgt_index = in_subsequence_src_index + 1 201 | return sequence.load( 202 | subsequence_start_idx + relative_idx, 203 | subsequence_start_idx + in_subsequence_tgt_index, 204 | with_flow=relative_idx != self.subsequence_length - 1, 205 | ) 206 | 207 | def _get_idx_lookup_cache_file(self) -> Path: 208 | cache_file = ( 209 | self.idx_lookup_cache_path 210 | / f"non_causal_subsequence_{self.subsequence_length}_sliding_window_{self.sliding_window_step_size}_lookup.pkl" 211 | ) 212 | return cache_file 213 | 214 | def loader_type(self) -> LoaderType: 215 | return LoaderType.NON_CAUSAL 216 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .loaders import ( 2 | load_by_extension, 3 | load_csv, 4 | load_feather, 5 | load_json, 6 | load_npy, 7 | load_npz, 8 | load_pickle, 9 | load_txt, 10 | run_cmd, 11 | save_by_extension, 12 | save_csv, 13 | save_feather, 14 | save_json, 15 | save_npy, 16 | save_npz, 17 | save_pickle, 18 | save_txt, 19 | ) 20 | 21 | __all__ = [ 22 | "load_pickle", 23 | "save_pickle", 24 | "load_json", 25 | "save_json", 26 | "load_csv", 27 | "save_csv", 28 | "load_npz", 29 | "save_npz", 30 | "load_npy", 31 | "save_npy", 32 | "run_cmd", 33 | "load_txt", 34 | "save_txt", 35 | "save_by_extension", 36 | "load_by_extension", 37 | "load_feather", 38 | "save_feather", 39 | ] 40 | -------------------------------------------------------------------------------- /bucketed_scene_flow_eval/utils/glfw_key_ids.py: -------------------------------------------------------------------------------- 1 | GLFW_KEY_SPACE = 32 2 | GLFW_KEY_APOSTROPHE = 39 3 | GLFW_KEY_COMMA = 44 4 | GLFW_KEY_MINUS = 45 5 | GLFW_KEY_PERIOD = 46 6 | GLFW_KEY_SLASH = 47 7 | GLFW_KEY_0 = 48 8 | GLFW_KEY_1 = 49 9 | GLFW_KEY_2 = 50 10 | GLFW_KEY_3 = 51 11 | GLFW_KEY_4 = 52 12 | GLFW_KEY_5 = 53 13 | GLFW_KEY_6 = 54 14 | GLFW_KEY_7 = 55 15 | GLFW_KEY_8 = 56 16 | GLFW_KEY_9 = 57 17 | GLFW_KEY_SEMICOLON = 59 18 | GLFW_KEY_EQUAL = 61 19 | GLFW_KEY_A = 65 20 | GLFW_KEY_B = 66 21 | GLFW_KEY_C = 67 22 | GLFW_KEY_D = 68 23 | GLFW_KEY_E = 69 24 | GLFW_KEY_F = 70 25 | GLFW_KEY_G = 71 26 | GLFW_KEY_H = 72 27 | GLFW_KEY_I = 73 28 | GLFW_KEY_J = 74 29 | GLFW_KEY_K = 75 30 | GLFW_KEY_L = 76 31 | GLFW_KEY_M = 77 32 | GLFW_KEY_N = 78 33 | GLFW_KEY_O = 79 34 | GLFW_KEY_P = 80 35 | GLFW_KEY_Q = 81 36 | GLFW_KEY_R = 82 37 | GLFW_KEY_S = 83 38 | GLFW_KEY_T = 84 39 | GLFW_KEY_U = 85 40 | GLFW_KEY_V = 86 41 | GLFW_KEY_W = 87 42 | GLFW_KEY_X = 88 43 | GLFW_KEY_Y = 89 44 | GLFW_KEY_Z = 90 45 | GLFW_KEY_LEFT_BRACKET = 91 46 | GLFW_KEY_BACKSLASH = 92 47 | GLFW_KEY_RIGHT_BRACKET = 93 48 | GLFW_KEY_GRAVE_ACCENT = 96 49 | GLFW_KEY_WORLD_1 = 161 # /* non-US #1 */ 50 | GLFW_KEY_WORLD_2 = 162 # /* non-US #2 */ 51 | GLFW_KEY_ESCAPE = 256 52 | GLFW_KEY_ENTER = 257 53 | GLFW_KEY_TAB = 258 54 | GLFW_KEY_BACKSPACE = 259 55 | GLFW_KEY_INSERT = 260 56 | GLFW_KEY_DELETE = 261 57 | GLFW_KEY_RIGHT = 262 58 | GLFW_KEY_LEFT = 263 59 | GLFW_KEY_DOWN = 264 60 | GLFW_KEY_UP = 265 61 | GLFW_KEY_PAGE_UP = 266 62 | GLFW_KEY_PAGE_DOWN = 267 63 | GLFW_KEY_HOME = 268 64 | GLFW_KEY_END = 269 65 | GLFW_KEY_CAPS_LOCK = 280 66 | GLFW_KEY_SCROLL_LOCK = 281 67 | GLFW_KEY_NUM_LOCK = 282 68 | GLFW_KEY_PRINT_SCREEN = 283 69 | GLFW_KEY_PAUSE = 284 70 | GLFW_KEY_F1 = 290 71 | GLFW_KEY_F2 = 291 72 | GLFW_KEY_F3 = 292 73 | GLFW_KEY_F4 = 293 74 | GLFW_KEY_F5 = 294 75 | GLFW_KEY_F6 = 295 76 | GLFW_KEY_F7 = 296 77 | GLFW_KEY_F8 = 297 78 | GLFW_KEY_F9 = 298 79 | GLFW_KEY_F10 = 299 80 | GLFW_KEY_F11 = 300 81 | GLFW_KEY_F12 = 301 82 | GLFW_KEY_F13 = 302 83 | GLFW_KEY_F14 = 303 84 | GLFW_KEY_F15 = 304 85 | GLFW_KEY_F16 = 305 86 | GLFW_KEY_F17 = 306 87 | GLFW_KEY_F18 = 307 88 | GLFW_KEY_F19 = 308 89 | GLFW_KEY_F20 = 309 90 | GLFW_KEY_F21 = 310 91 | GLFW_KEY_F22 = 311 92 | GLFW_KEY_F23 = 312 93 | GLFW_KEY_F24 = 313 94 | GLFW_KEY_F25 = 314 95 | GLFW_KEY_KP_0 = 320 96 | GLFW_KEY_KP_1 = 321 97 | GLFW_KEY_KP_2 = 322 98 | GLFW_KEY_KP_3 = 323 99 | GLFW_KEY_KP_4 = 324 100 | GLFW_KEY_KP_5 = 325 101 | GLFW_KEY_KP_6 = 326 102 | GLFW_KEY_KP_7 = 327 103 | GLFW_KEY_KP_8 = 328 104 | GLFW_KEY_KP_9 = 329 105 | GLFW_KEY_KP_DECIMAL = 330 106 | GLFW_KEY_KP_DIVIDE = 331 107 | GLFW_KEY_KP_MULTIPLY = 332 108 | GLFW_KEY_KP_SUBTRACT = 333 109 | GLFW_KEY_KP_ADD = 334 110 | GLFW_KEY_KP_ENTER = 335 111 | GLFW_KEY_KP_EQUAL = 336 112 | GLFW_KEY_LEFT_SHIFT = 340 113 | GLFW_KEY_LEFT_CONTROL = 341 114 | GLFW_KEY_LEFT_ALT = 342 115 | GLFW_KEY_LEFT_SUPER = 343 116 | GLFW_KEY_RIGHT_SHIFT = 344 117 | GLFW_KEY_RIGHT_CONTROL = 345 118 | GLFW_KEY_RIGHT_ALT = 346 119 | GLFW_KEY_RIGHT_SUPER = 347 120 | GLFW_KEY_MENU = 348 121 | -------------------------------------------------------------------------------- /build_pypi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | rm -rf ./dist/ 3 | python3 -m build 4 | python3 -m twine upload --repository pypi dist/* -------------------------------------------------------------------------------- /data_prep_scripts/argo/count_boxes.py: -------------------------------------------------------------------------------- 1 | import json 2 | from argparse import ArgumentParser 3 | from collections import defaultdict 4 | from pathlib import Path 5 | 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | 9 | 10 | def load_cuboid_metadata(json_file: Path): 11 | """Load cuboid metadata from a JSON file.""" 12 | with open(json_file, "r") as f: 13 | cuboid_metadata = json.load(f) 14 | return cuboid_metadata 15 | 16 | 17 | def plot_histogram(cuboid_metadata, output_file: Path): 18 | """Plot a histogram of boxes by volume, colored by class name.""" 19 | volumes_by_class = defaultdict(list) 20 | 21 | # Collect volumes by class 22 | for entry in cuboid_metadata: 23 | class_name = entry["class_name"] 24 | volume = entry["volume"] 25 | volumes_by_class[class_name].append(volume) 26 | 27 | # Prepare data for the histogram 28 | classes = list(volumes_by_class.keys()) 29 | volumes = [volumes_by_class[class_name] for class_name in classes] 30 | 31 | # Create histogram bins 32 | all_volumes = np.concatenate(volumes) 33 | bin_edges = np.histogram_bin_edges(all_volumes, bins="auto") 34 | 35 | # Compute histogram data for each class 36 | histogram_data = [] 37 | for class_volumes in volumes: 38 | counts, _ = np.histogram(class_volumes, bins=bin_edges) 39 | histogram_data.append(counts) 40 | 41 | histogram_data = np.array(histogram_data) 42 | 43 | # Plot stacked histogram 44 | plt.figure(figsize=(10, 6), dpi=300) 45 | bottom = np.zeros(len(bin_edges) - 1) 46 | 47 | for class_name, class_counts in zip(classes, histogram_data): 48 | plt.bar( 49 | bin_edges[:-1], 50 | class_counts, 51 | width=np.diff(bin_edges), 52 | bottom=bottom, 53 | label=class_name, 54 | align="edge", 55 | ) 56 | bottom += class_counts 57 | 58 | plt.xlabel("Volume") 59 | plt.ylabel("Count") 60 | plt.title("Histogram of Boxes by Volume and Class") 61 | plt.legend() 62 | plt.grid(True) 63 | 64 | # Save plot as high-resolution PNG 65 | plt.savefig(output_file, format="png") 66 | plt.close() 67 | 68 | 69 | if __name__ == "__main__": 70 | parser = ArgumentParser(description="Plot histogram of cuboid volumes by class") 71 | parser.add_argument( 72 | "--json_file", 73 | type=str, 74 | required=True, 75 | help="Path to the JSON file containing cuboid metadata", 76 | ) 77 | parser.add_argument( 78 | "--output_file", 79 | type=str, 80 | required=True, 81 | help="Path to save the output PNG file", 82 | ) 83 | 84 | args = parser.parse_args() 85 | json_file = Path(args.json_file) 86 | output_file = Path(args.output_file) 87 | 88 | cuboid_metadata = load_cuboid_metadata(json_file) 89 | plot_histogram(cuboid_metadata, output_file) 90 | -------------------------------------------------------------------------------- /data_prep_scripts/argo/create_symlink_tree.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from pathlib import Path 4 | 5 | 6 | def create_symlink_tree(input_folder: str, target_folder: str, total_length: int | None = None): 7 | # Convert to Path objects for easier manipulation 8 | input_folder = Path(input_folder) 9 | target_folder = Path(target_folder) 10 | 11 | # Iterate through the sequence subfolders in the input folder 12 | for sequence_folder in input_folder.iterdir(): 13 | if sequence_folder.is_dir(): 14 | # Create corresponding folder in the target directory 15 | target_sequence_folder = target_folder / sequence_folder.name 16 | target_sequence_folder.mkdir(parents=True, exist_ok=True) 17 | 18 | # List all feather files in the sequence folder, sorted by name 19 | feather_files = sorted(sequence_folder.glob("*.feather")) 20 | 21 | modified_feather_files = feather_files 22 | if total_length is not None: 23 | modified_feather_files = feather_files[:total_length] 24 | else: 25 | modified_feather_files = feather_files[:-1] 26 | 27 | # Create symlinks for all but the last file in the sequence 28 | for file in modified_feather_files: 29 | symlink_target = target_sequence_folder / file.name 30 | symlink_target.symlink_to(file.resolve()) 31 | 32 | 33 | if __name__ == "__main__": 34 | import argparse 35 | 36 | # Argument parsing 37 | parser = argparse.ArgumentParser( 38 | description="Create a folder tree with symlinks to all but the last file in each sequence." 39 | ) 40 | parser.add_argument( 41 | "input_folder", type=str, help="Path to the input folder containing sequence subfolders." 42 | ) 43 | parser.add_argument( 44 | "target_folder", 45 | type=str, 46 | help="Path to the target folder where the symlink tree will be created.", 47 | ) 48 | parser.add_argument( 49 | "--total_length", 50 | type=int, 51 | default=None, 52 | ) 53 | 54 | args = parser.parse_args() 55 | 56 | # Run the symlink creation 57 | create_symlink_tree(args.input_folder, args.target_folder, args.total_length) 58 | -------------------------------------------------------------------------------- /data_prep_scripts/argo/duplicate_without_annotations.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | import tqdm 5 | 6 | 7 | def parse_args(): 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument( 10 | "source_root", 11 | type=Path, 12 | help="Path to the root directory of the source dataset.", 13 | ) 14 | parser.add_argument( 15 | "target_root", 16 | type=Path, 17 | help="Path to the root directory of the target dataset.", 18 | ) 19 | return parser.parse_args() 20 | 21 | 22 | def symlink_raw_data_split(source_root: Path, target_root: Path): 23 | assert source_root.is_dir(), f"{source_root} is not a directory" 24 | target_root.mkdir(exist_ok=True, parents=True) 25 | 26 | sequence_dirs = sorted(source_root.glob("*")) 27 | for sequence_dir in tqdm.tqdm(sequence_dirs): 28 | sequence_id = sequence_dir.name 29 | target_sequence_dir = target_root / sequence_id 30 | target_sequence_dir.mkdir(exist_ok=True) 31 | 32 | # Relevant files / folders are: 33 | # - calibration 34 | # - map 35 | # - sensors 36 | # - city_SE3_egovehicle.feather 37 | 38 | symlink_targets = [ 39 | "calibration", 40 | "map", 41 | "sensors", 42 | "city_SE3_egovehicle.feather", 43 | ] 44 | for symlink_target in symlink_targets: 45 | source_path = sequence_dir / symlink_target 46 | target_path = target_sequence_dir / symlink_target 47 | assert source_path.exists(), f"{source_path} not found" 48 | # If the target path already exists, remove it 49 | if target_path.exists(): 50 | target_path.unlink() 51 | target_path.symlink_to(source_path) 52 | 53 | 54 | def main(source_root: Path, target_root: Path): 55 | subdirs = ["train", "val", "test"] 56 | for subdir in subdirs: 57 | source_subdir = source_root / subdir 58 | target_subdir = target_root / subdir 59 | 60 | if not source_subdir.exists(): 61 | print(f"{source_subdir} not found") 62 | continue 63 | 64 | symlink_raw_data_split(source_subdir, target_subdir) 65 | 66 | label_subdirs = ["train_sceneflow_feather", "val_sceneflow_feather"] 67 | for label_subdir in label_subdirs: 68 | source_label_subdir = source_root / label_subdir 69 | target_label_subdir = target_root / label_subdir 70 | 71 | if not source_label_subdir.exists(): 72 | print(f"{source_label_subdir} not found") 73 | continue 74 | 75 | # Symlink directly 76 | # If the target path already exists, remove it 77 | if target_label_subdir.exists(): 78 | target_label_subdir.unlink() 79 | target_label_subdir.symlink_to(source_label_subdir) 80 | 81 | 82 | if __name__ == "__main__": 83 | args = parse_args() 84 | main(args.source_root, args.target_root) 85 | -------------------------------------------------------------------------------- /data_prep_scripts/argo/plot_boxes.py: -------------------------------------------------------------------------------- 1 | import json 2 | from argparse import ArgumentParser 3 | from collections import defaultdict 4 | from pathlib import Path 5 | 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | 9 | # Define the category mappings 10 | BACKGROUND_CATEGORIES = ["BACKGROUND"] 11 | 12 | ROAD_SIGNS = [ 13 | "BOLLARD", 14 | "CONSTRUCTION_BARREL", 15 | "CONSTRUCTION_CONE", 16 | "MOBILE_PEDESTRIAN_CROSSING_SIGN", 17 | "SIGN", 18 | "STOP_SIGN", 19 | "MESSAGE_BOARD_TRAILER", 20 | "TRAFFIC_LIGHT_TRAILER", 21 | ] 22 | 23 | PEDESTRIAN_CATEGORIES = ["PEDESTRIAN", "STROLLER", "WHEELCHAIR", "OFFICIAL_SIGNALER"] 24 | 25 | WHEELED_VRU = [ 26 | "BICYCLE", 27 | "BICYCLIST", 28 | "MOTORCYCLE", 29 | "MOTORCYCLIST", 30 | "WHEELED_DEVICE", 31 | "WHEELED_RIDER", 32 | ] 33 | 34 | CAR = ["REGULAR_VEHICLE"] 35 | 36 | OTHER_VEHICLES = [ 37 | "BOX_TRUCK", 38 | "LARGE_VEHICLE", 39 | "RAILED_VEHICLE", 40 | "TRUCK", 41 | "TRUCK_CAB", 42 | "VEHICULAR_TRAILER", 43 | "ARTICULATED_BUS", 44 | "BUS", 45 | "SCHOOL_BUS", 46 | ] 47 | 48 | BUCKETED_METACATAGORIES = { 49 | "BACKGROUND": BACKGROUND_CATEGORIES, 50 | "CAR": CAR, 51 | "PEDESTRIAN": PEDESTRIAN_CATEGORIES, 52 | "WHEELED_VRU": WHEELED_VRU, 53 | "OTHER_VEHICLES": OTHER_VEHICLES, 54 | } 55 | 56 | # Reverse mapping from specific category to meta category 57 | CATEGORY_TO_META = { 58 | category: meta 59 | for meta, categories in BUCKETED_METACATAGORIES.items() 60 | for category in categories 61 | } 62 | 63 | 64 | def load_cuboid_metadata(json_file: Path): 65 | """Load cuboid metadata from a JSON file.""" 66 | print("Loading cuboid metadata from", json_file) 67 | with open(json_file, "r") as f: 68 | cuboid_metadata = json.load(f) 69 | print("Loaded", len(cuboid_metadata), "cuboids") 70 | return cuboid_metadata 71 | 72 | 73 | def plot_histogram(cuboid_metadata, output_file: Path): 74 | """Plot a histogram of boxes by volume, colored by class name.""" 75 | volumes_by_class = defaultdict(list) 76 | 77 | # Collect volumes by class 78 | for entry in cuboid_metadata: 79 | class_name = entry["class_name"] 80 | if class_name in CATEGORY_TO_META: 81 | meta_class_name = CATEGORY_TO_META[class_name] 82 | volume = entry["volume"] 83 | volumes_by_class[meta_class_name].append(volume) 84 | 85 | # Prepare data for the histogram 86 | classes = list(volumes_by_class.keys()) 87 | volumes = [volumes_by_class[class_name] for class_name in classes] 88 | 89 | # Create histogram bins 90 | all_volumes = np.concatenate(volumes) 91 | bin_edges = np.histogram_bin_edges(all_volumes, bins="auto") 92 | 93 | # Compute histogram data for each class 94 | histogram_data = [] 95 | for class_volumes in volumes: 96 | counts, _ = np.histogram(class_volumes, bins=bin_edges) 97 | histogram_data.append(counts) 98 | 99 | histogram_data = np.array(histogram_data) 100 | 101 | # Plot stacked histogram 102 | plt.figure(figsize=(10, 6), dpi=300) 103 | bottom = np.zeros(len(bin_edges) - 1) 104 | 105 | for class_name, class_counts in zip(classes, histogram_data): 106 | plt.bar( 107 | bin_edges[:-1], 108 | class_counts, 109 | width=np.diff(bin_edges), 110 | bottom=bottom, 111 | label=class_name, 112 | align="edge", 113 | ) 114 | bottom += class_counts 115 | 116 | plt.yscale("log") 117 | plt.xlabel("Volume") 118 | plt.ylabel("Count") 119 | plt.title("Histogram of Boxes by Volume and Class") 120 | # Set x-axis to limit of 0 to 60 121 | plt.xlim(0, 60) 122 | plt.legend() 123 | plt.grid(True, which="both", ls="--") 124 | 125 | # Save plot as high-resolution PNG 126 | plt.savefig(output_file, format="png") 127 | plt.close() 128 | 129 | 130 | if __name__ == "__main__": 131 | parser = ArgumentParser(description="Plot histogram of cuboid volumes by class") 132 | parser.add_argument( 133 | "--json_file", 134 | type=str, 135 | required=True, 136 | help="Path to the JSON file containing cuboid metadata", 137 | ) 138 | parser.add_argument( 139 | "--output_file", 140 | type=str, 141 | required=True, 142 | help="Path to save the output PNG file", 143 | ) 144 | 145 | args = parser.parse_args() 146 | json_file = Path(args.json_file) 147 | output_file = Path(args.output_file) 148 | 149 | cuboid_metadata = load_cuboid_metadata(json_file) 150 | plot_histogram(cuboid_metadata, output_file) 151 | -------------------------------------------------------------------------------- /data_prep_scripts/nuscenes/create_gt_flow.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ["OMP_NUM_THREADS"] = "1" 4 | 5 | import multiprocessing 6 | from argparse import ArgumentParser 7 | from multiprocessing import Pool, current_process 8 | from pathlib import Path 9 | from typing import Optional 10 | 11 | import numpy as np 12 | import pandas as pd 13 | from tqdm import tqdm 14 | 15 | from bucketed_scene_flow_eval.datastructures.dataclasses import PoseInfo 16 | from bucketed_scene_flow_eval.datastructures.pointcloud import PointCloud 17 | from bucketed_scene_flow_eval.datasets.nuscenes.nuscenes_scene_flow import ( 18 | CATEGORY_MAP_INV, 19 | ) 20 | from bucketed_scene_flow_eval.utils.loaders import save_feather 21 | from bucketed_scene_flow_eval.datasets.nuscenes import NuScenesRawSequenceLoader 22 | from bucketed_scene_flow_eval.datasets.nuscenes.nuscenes_utils import InstanceBox 23 | 24 | def compute_sceneflow( 25 | dataset: NuScenesRawSequenceLoader, log_id: str, timestamps: tuple[int, int] 26 | ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: 27 | """Compute sceneflow between the sweeps at the given timestamps. 28 | Args: 29 | dataset: Sensor dataset. 30 | log_id: unique id. 31 | timestamps: the timestamps of the lidar sweeps to compute flow between 32 | Returns: 33 | dictionary with fields: 34 | pcl_0: Nx3 array containing the points at time 0 35 | pcl_1: Mx3 array containing the points at time 1 36 | flow_0_1: Nx3 array containing flow from timestamp 0 to 1 37 | classes_0: Nx1 array containing the class ids for each point in sweep 0 38 | valid_0: Nx1 array indicating if the returned flow from 0 to 1 is valid (1 for valid, 0 otherwise) 39 | ego_motion: SE3 motion from sweep 0 to sweep 1 40 | """ 41 | 42 | def compute_flow(sweeps, cuboids: list[list[InstanceBox]], poses: list[PoseInfo]): 43 | ego1_SE3_ego0 = poses[1].ego_to_global.inverse().compose(poses[0].ego_to_global) 44 | 45 | flow_0_1 = np.zeros_like(sweeps[0].points, dtype=np.float32) 46 | 47 | valid_0 = np.ones(len(sweeps[0].points), dtype=bool) 48 | classes_0 = np.ones(len(sweeps[0].points), dtype=np.int8) * CATEGORY_MAP_INV["background"] 49 | 50 | c1_instance_tokens = {c.instance_token: i for i, c in enumerate(cuboids[1])} 51 | 52 | for c0 in cuboids[0]: 53 | c0.wlh += np.array([0.2, 0.2, 0.0]) # the bounding boxes are a little too tight, so some points are missed, expand width and length by 0.2m 54 | obj_pts, obj_mask = c0.compute_interior_points(sweeps[0].points) 55 | classes_0[obj_mask] = CATEGORY_MAP_INV[c0.name] 56 | 57 | if c0.instance_token in c1_instance_tokens: 58 | c1 = cuboids[1][c1_instance_tokens[c0.instance_token]] 59 | c1_SE3_c0_ego_frame = ego1_SE3_ego0.inverse().compose( 60 | c1.dst_SE3_object.compose(c0.dst_SE3_object.inverse()) 61 | ) 62 | obj_flow = c1_SE3_c0_ego_frame.transform_points(obj_pts) - obj_pts 63 | flow_0_1[obj_mask] = obj_flow.astype(np.float32) 64 | else: 65 | valid_0[obj_mask] = 0 66 | 67 | # Convert flow from ego -> sensor frame for storage 68 | flow_0_1 = PointCloud(flow_0_1) 69 | flow_0_1_sensor = poses[0].sensor_to_ego.inverse().transform_flow(flow_0_1) 70 | 71 | return flow_0_1_sensor, classes_0, valid_0, ego1_SE3_ego0 72 | 73 | sequence = dataset._load_sequence_uncached(log_id) 74 | 75 | pc_objects = [sequence.load(ts, 0)[0].pc for ts in timestamps] 76 | 77 | # Sweeps are stored in sensor frame so we must transform to ego frame 78 | sweeps = [pc_obj.full_ego_pc for pc_obj in pc_objects] 79 | 80 | # Ego to map poses, used for computing ego motion 81 | poses = [sequence._load_pose(ts) for ts in timestamps] 82 | 83 | # Cuboids are fetched in global frame initially 84 | lidar_sensor_tokens = [sequence.synced_sensors[ts].lidar_top['token'] for ts in timestamps] 85 | cuboids = [sequence.nusc.get_boxes_with_instance_token(lidar_token) for lidar_token in lidar_sensor_tokens] 86 | # Here we convert cuboids from global frame to ego frame 87 | for cuboid_list, pose in zip(cuboids, poses): 88 | for c in cuboid_list: 89 | c.transform(pose.ego_to_global.inverse()) 90 | 91 | flow_0_1, classes_0, valid_0, _ = compute_flow(sweeps, cuboids, poses) 92 | return flow_0_1, classes_0, valid_0 93 | 94 | 95 | def process_log( 96 | dataset: NuScenesRawSequenceLoader, log_id: str, output_dir: Path, n: Optional[int] = None 97 | ): 98 | """Outputs sceneflow and auxillary information for each pair of pointclouds in the 99 | dataset. Output files have the format /_.npz 100 | Args: 101 | dataset: Sensor dataset to process. 102 | log_id: Log unique id. 103 | output_dir: Output_directory. 104 | n: the position to use for the progress bar 105 | Returns: 106 | None 107 | """ 108 | timestamps = range(len(dataset._load_sequence_uncached(log_id))) 109 | 110 | iter_bar = zip(timestamps, timestamps[1:]) 111 | if n is not None: 112 | iter_bar = tqdm( 113 | iter_bar, 114 | leave=False, 115 | total=len(timestamps) - 1, 116 | position=n, 117 | desc=f"Log {log_id}", 118 | ) 119 | 120 | for ts0, ts1 in iter_bar: 121 | flow_0_1, classes_0, valid_0 = compute_sceneflow(dataset, log_id, (ts0, ts1)) 122 | df = pd.DataFrame( 123 | { 124 | "flow_tx_m": flow_0_1[:, 0], 125 | "flow_ty_m": flow_0_1[:, 1], 126 | "flow_tz_m": flow_0_1[:, 2], 127 | "is_valid": valid_0, 128 | "classes_0": classes_0, 129 | } 130 | ) 131 | save_feather(output_dir / log_id / f"{ts0}.feather", df, verbose=False) 132 | 133 | 134 | def process_log_wrapper(x, ignore_current_process=False): 135 | if not ignore_current_process: 136 | current = current_process() 137 | pos = current._identity[0] 138 | else: 139 | pos = 1 140 | process_log(*x, n=pos) 141 | 142 | 143 | def process_logs(data_dir: Path, nusc_ver: str, output_dir: Path, nproc: int): 144 | """Compute sceneflow for all logs in the dataset. Logs are processed in parallel. 145 | Args: 146 | data_dir: NuScenes directory 147 | output_dir: Output directory. 148 | """ 149 | 150 | if not data_dir.exists(): 151 | print(f"{data_dir} not found") 152 | return 153 | 154 | split_output_dir = output_dir 155 | split_output_dir.mkdir(exist_ok=True, parents=True) 156 | 157 | dataset = NuScenesRawSequenceLoader(version=nusc_ver, sequence_dir=str(data_dir), point_cloud_range=None) 158 | logs = dataset.get_sequence_ids() 159 | args = sorted([(dataset, log, split_output_dir) for log in logs]) 160 | 161 | print(f"Using {nproc} processes") 162 | if nproc <= 1: 163 | for x in tqdm(args): 164 | process_log_wrapper(x, ignore_current_process=True) 165 | else: 166 | with Pool(processes=nproc) as p: 167 | res = list(tqdm(p.imap_unordered(process_log_wrapper, args), total=len(logs))) 168 | 169 | 170 | if __name__ == "__main__": 171 | multiprocessing.set_start_method("spawn") 172 | parser = ArgumentParser( 173 | prog="create", 174 | description="Create a LiDAR sceneflow dataset from NuScenes", 175 | ) 176 | parser.add_argument( 177 | "--data_dir", 178 | type=str, 179 | help="The top level directory contating the input dataset", 180 | ) 181 | parser.add_argument( 182 | "--nusc_ver", 183 | type=str, 184 | help="The version of nuscenes to use.", 185 | ) 186 | parser.add_argument( 187 | "--output_dir", type=str, help="The location to output the sceneflow files to" 188 | ) 189 | parser.add_argument("--nproc", type=int, default=(multiprocessing.cpu_count() - 1)) 190 | 191 | args = parser.parse_args() 192 | data_root = Path(args.data_dir) 193 | output_dir = Path(args.output_dir) 194 | 195 | process_logs(data_root, args.nusc_ver, output_dir, args.nproc) 196 | -------------------------------------------------------------------------------- /data_prep_scripts/nuscenes/visualize_nuscenes.py: -------------------------------------------------------------------------------- 1 | from bucketed_scene_flow_eval.datasets.nuscenes import NuScenesRawSequenceLoader 2 | import numpy as np 3 | import open3d as o3d 4 | from pathlib import Path 5 | 6 | from bucketed_scene_flow_eval.datasets.nuscenes.nuscenes_metacategories import BUCKETED_METACATAGORIES 7 | from bucketed_scene_flow_eval.utils.loaders import load_feather 8 | 9 | raw_sequence_loader = NuScenesRawSequenceLoader(version='v1.0-mini', sequence_dir="/efs/nuscenes_mini") 10 | sequence = raw_sequence_loader[0] 11 | 12 | starter_idx = 0 13 | timestamps = range(len(sequence)) 14 | 15 | def increase_starter_idx(vis): 16 | global starter_idx 17 | starter_idx += 1 18 | if starter_idx >= len(timestamps) - 1: 19 | starter_idx = 0 20 | # print("Index: ", starter_idx) 21 | vis.clear_geometries() 22 | draw_frames(vis, reset_view=False) 23 | 24 | 25 | def decrease_starter_idx(vis): 26 | global starter_idx 27 | starter_idx -= 1 28 | if starter_idx < 0: 29 | starter_idx = len(timestamps) - 2 30 | # print("Index: ", starter_idx) 31 | vis.clear_geometries() 32 | draw_frames(vis, reset_view=False) 33 | 34 | 35 | def setup_vis(): 36 | # # make open3d visualizer 37 | vis = o3d.visualization.VisualizerWithKeyCallback() 38 | vis.create_window() 39 | vis.get_render_option().point_size = 1.5 40 | vis.get_render_option().background_color = (0.1, 0.1, 0.1) 41 | # vis.get_render_option().show_coordinate_frame = True 42 | # set up vector 43 | vis.get_view_control().set_up([0, 0, 1]) 44 | # left arrow decrease starter_idx 45 | vis.register_key_callback(263, decrease_starter_idx) 46 | # right arrow increase starter_idx 47 | vis.register_key_callback(262, increase_starter_idx) 48 | 49 | return vis 50 | 51 | def _colorize_pc(pc: o3d.geometry.PointCloud, color_tuple: tuple[float, float, float]): 52 | pc_color = np.ones_like(pc.points) * np.array(color_tuple) 53 | return o3d.utility.Vector3dVector(pc_color) 54 | 55 | def draw_frames_cuboids(vis, reset_view=False): 56 | ts = timestamps[starter_idx] 57 | pc_object = sequence.load(ts, 0)[0].pc 58 | lidar_pc = pc_object.full_ego_pc 59 | pose = pc_object.pose.ego_to_global 60 | lidar_sensor_token = sequence.synced_sensors[ts].lidar_top['token'] 61 | cuboids = sequence.nusc.get_boxes_with_instance_token(lidar_sensor_token) 62 | # Add base point cloud 63 | pcd = lidar_pc.to_o3d() 64 | pcd.colors = _colorize_pc(pcd, (1, 1, 1)) 65 | vis.add_geometry(pcd, reset_bounding_box=reset_view) 66 | # # Draw the cuboids 67 | cuboids = [c.transform(pose.inverse()) for c in cuboids] 68 | draw_cuboids(vis, cuboids) 69 | 70 | def draw_frames(vis, reset_view=False): 71 | ts = timestamps[starter_idx:starter_idx+2] 72 | color_list = [(0, 0, 1), (0, 1, 0)] 73 | pc_objects = [sequence.load(t, 0)[0].pc for t in ts] 74 | 75 | lidar_pc = [pc_obj.full_ego_pc for pc_obj in pc_objects] 76 | 77 | groundish_points_mask = lidar_pc[0].within_region_mask(-1000, 1000, -1000, 1000, -2, 0.5) 78 | groundish_points = lidar_pc[0].mask_points(groundish_points_mask) 79 | actual_ground_from_groundish_mask = np.zeros((groundish_points.shape[0]), dtype=bool) 80 | 81 | o3d.utility.random.seed(42) 82 | _, inliers0 = groundish_points.to_o3d().segment_plane(distance_threshold=0.2, ransac_n=3, num_iterations=100, probability=1.0) 83 | actual_ground_from_groundish_mask[inliers0] = 1 84 | 85 | ground_mask = np.zeros_like(groundish_points_mask, dtype=bool) 86 | ground_mask[groundish_points_mask] = actual_ground_from_groundish_mask 87 | 88 | final_rendered_pc = lidar_pc[0].to_o3d() 89 | final_pc_color = np.tile([0, 0, 1], (ground_mask.shape[0], 1)) 90 | final_pc_color[ground_mask] = [0, 1, 1] 91 | final_rendered_pc.colors = o3d.utility.Vector3dVector(final_pc_color) 92 | 93 | vis.add_geometry(final_rendered_pc, reset_bounding_box=reset_view) 94 | 95 | # flow_data = load_feather(Path(f"/efs/nuscenes_mini_sceneflow_feather/{sequence.log_id}/{ts[0]}.feather")) 96 | flow_data = load_feather(Path(f"/efs/nuscenes_mini_nsfp_flow/sequence_len_002/{sequence.log_id}/{ts[0]:010d}.feather")) 97 | # flow_data = load_feather(Path(f"/efs/nuscenes_mini_fast_nsf_flow/sequence_len_002/{sequence.log_id}/{ts[0]:010d}.feather")) 98 | # is_valid_arr = flow_data["is_valid"].values 99 | 100 | # The flow data is stored as 3 1D arrays, one for each dimension. 101 | xs = flow_data["flow_tx_m"].values 102 | ys = flow_data["flow_ty_m"].values 103 | zs = flow_data["flow_tz_m"].values 104 | flow_0_1 = np.stack([xs, ys, zs], axis=1) 105 | # ego_frame_flow = flow_0_1 106 | ego_frame_flow = pc_objects[0].pose.sensor_to_ego.transform_flow(flow_0_1) 107 | 108 | pc = lidar_pc[0].to_array() 109 | flowed_pc = pc + ego_frame_flow 110 | 111 | line_set = o3d.geometry.LineSet() 112 | line_set_points = np.concatenate([pc, flowed_pc], axis=0) 113 | lines = np.array([[i, i + len(pc)] for i in range(len(pc))]) 114 | line_set.points = o3d.utility.Vector3dVector(line_set_points) 115 | line_set.lines = o3d.utility.Vector2iVector(lines) 116 | draw_color = (1, 0, 0) 117 | line_set.colors = o3d.utility.Vector3dVector( 118 | [draw_color for _ in range(len(lines))] 119 | ) 120 | vis.add_geometry(line_set, reset_bounding_box=reset_view) 121 | 122 | 123 | def draw_cuboids(vis, box_list, reset_view=False): 124 | # Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc... 125 | lines = [ 126 | [0, 1], 127 | [1, 2], 128 | [2, 3], 129 | [0, 3], 130 | [4, 5], 131 | [5, 6], 132 | [6, 7], 133 | [4, 7], 134 | [0, 4], 135 | [1, 5], 136 | [2, 6], 137 | [3, 7], 138 | ] 139 | 140 | # Use the same color for all lines 141 | red = [[1, 0, 0] for _ in range(len(lines))] 142 | green = [[0, 1, 0] for _ in range(len(lines))] 143 | blue = [[0, 0.8, 0.8] for _ in range(len(lines))] 144 | magenta = [[1, 0, 1] for _ in range(len(lines))] 145 | 146 | for bbox in box_list: 147 | corner_box = bbox.corners().T 148 | 149 | line_set = o3d.geometry.LineSet() 150 | line_set.points = o3d.utility.Vector3dVector(corner_box) 151 | line_set.lines = o3d.utility.Vector2iVector(lines) 152 | 153 | if bbox.name in BUCKETED_METACATAGORIES["PEDESTRIAN"]: 154 | colors = red 155 | elif bbox.name in BUCKETED_METACATAGORIES["CAR"]: 156 | colors = blue 157 | elif bbox.name in BUCKETED_METACATAGORIES["WHEELED_VRU"]: 158 | colors = green 159 | elif bbox.name in BUCKETED_METACATAGORIES["OTHER_VEHICLES"]: 160 | colors = magenta 161 | else: # Background/static 162 | colors = [[1, 1, 0] for _ in range(len(lines))] 163 | line_set.colors = o3d.utility.Vector3dVector(colors) 164 | 165 | # Display the bounding boxes: 166 | vis.add_geometry(line_set, reset_bounding_box=reset_view) 167 | 168 | if __name__ == "__main__": 169 | vis = setup_vis() 170 | draw_frames(vis, reset_view=True) 171 | vis.run() 172 | -------------------------------------------------------------------------------- /data_prep_scripts/waymo/rasterize_heightmap.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from sklearn.neighbors import NearestNeighbors 4 | 5 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 6 | import argparse 7 | import json 8 | import multiprocessing 9 | from pathlib import Path 10 | 11 | import numpy as np 12 | import tensorflow as tf 13 | from joblib import Parallel, delayed 14 | from waymo_open_dataset import dataset_pb2 15 | 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument("waymo_directory", type=Path, help="Path to Waymo Open directory.") 18 | parser.add_argument("output_directory", type=Path, help="Path to output directory.") 19 | parser.add_argument( 20 | "--cells_per_meter", 21 | type=int, 22 | default=10.0 / 3.0, 23 | help="Cells per meter for discritization. Default is Argoverse 2 Sensor dataset default is 30cm (10/3 cells per meter)", 24 | ) 25 | parser.add_argument( 26 | "--num_neighbors", 27 | type=int, 28 | default=20, 29 | help="Number of neighbors to use to compute height", 30 | ) 31 | parser.add_argument( 32 | "--cpus", 33 | type=int, 34 | default=multiprocessing.cpu_count(), 35 | help="Number of cpus to use for parallel processing", 36 | ) 37 | 38 | args = parser.parse_args() 39 | 40 | assert args.cells_per_meter > 0, "Cells per meter must be positive" 41 | assert args.num_neighbors > 0, "Number of neighbors must be greater than zero" 42 | assert args.waymo_directory.is_dir(), f"{args.waymo_directory} is not a directory" 43 | 44 | print("Waymo directory:", args.waymo_directory) 45 | print("Output directory:", args.output_directory) 46 | 47 | 48 | def build_knn(points, num_neighbors): 49 | return NearestNeighbors(n_neighbors=num_neighbors, radius=20, leaf_size=num_neighbors).fit( 50 | points 51 | ) 52 | 53 | 54 | def build_global_grid(points, cells_per_meter): 55 | xs, ys, _ = zip(*points) 56 | min_x, max_x = min(xs), max(xs) 57 | min_y, max_y = min(ys), max(ys) 58 | 59 | grid_max_global_frame = np.array([max_x, max_y]) 60 | grid_min_global_frame = np.array([min_x, min_y]) 61 | 62 | area_grid_global_frame = grid_max_global_frame - grid_min_global_frame 63 | grid_shape = np.ceil(area_grid_global_frame * cells_per_meter).astype(int) + 1 64 | 65 | def global_to_grid_float(pts): 66 | assert (pts <= grid_max_global_frame).all(), f"({pts} <= {grid_max_global_frame})" 67 | assert (pts >= grid_min_global_frame).all(), f"({pts} >= {grid_min_global_frame})" 68 | relative_to_grid_origin = pts - grid_min_global_frame 69 | floating_point_grid_coordinate = relative_to_grid_origin * cells_per_meter 70 | return floating_point_grid_coordinate 71 | 72 | def global_to_grid_index(pts): 73 | coords = global_to_grid_float(pts) 74 | return np.round(coords).astype(int) 75 | 76 | return grid_shape, global_to_grid_index, global_to_grid_float, grid_min_global_frame 77 | 78 | 79 | def render_heightmap(points, cells_per_meter, num_neighbors): 80 | # We construct this full heightmap with 0, 0 at xy_min_offset in the global coordinate frame. 81 | ( 82 | grid_shape, 83 | global_to_grid_index, 84 | global_to_grid_float, 85 | grid_min_global_frame, 86 | ) = build_global_grid(points, cells_per_meter) 87 | 88 | polygon_grid_points = np.array([(*global_to_grid_index(p[:2]), p[2]) for p in points]) 89 | mean_z = np.mean([e[2] for e in polygon_grid_points]) 90 | knn = build_knn(polygon_grid_points, num_neighbors) 91 | 92 | # Construct a grid shaped array whose last axis holds the X, Y index value for that square, 93 | # with the average Z value for the purposes of querying. 94 | xs_lin = np.arange(0, grid_shape[0], 1) 95 | ys_lin = np.arange(0, grid_shape[1], 1) 96 | xs_square = np.expand_dims(np.tile(xs_lin, (grid_shape[1], 1)), 2) 97 | ys_square = np.expand_dims(np.tile(ys_lin, (grid_shape[0], 1)).T, 2) 98 | zs_square = np.ones_like(xs_square) * mean_z 99 | pts_square = np.concatenate((xs_square, ys_square, zs_square), 2) 100 | 101 | # Flatten the pts square into an N x 3 array for querying KNN 102 | pts_square_shape = pts_square.shape 103 | pts_line = pts_square.reshape(pts_square_shape[0] * pts_square_shape[1], 3) 104 | 105 | _, indices = knn.kneighbors(pts_line) 106 | neighbor_values = polygon_grid_points[indices] 107 | avg_neighbor_z_values = np.mean(neighbor_values[:, :, 2], axis=1) 108 | 109 | # Reshape flattened average Z values back into grid shape. 110 | grid = avg_neighbor_z_values.reshape(pts_square_shape[0], pts_square_shape[1]) 111 | 112 | return grid, grid_min_global_frame 113 | 114 | 115 | def save_grid_global_offset( 116 | file_path: Path, grid, grid_min_global_frame, cells_per_meter, verbose=False 117 | ): 118 | se2 = { 119 | "R": [1.0, 0.0, 0.0, 1.0], # Identity rotation matrix flattened 120 | "t": [-grid_min_global_frame[0], -grid_min_global_frame[1]], 121 | "s": cells_per_meter, 122 | } 123 | 124 | save_folder = args.output_directory / file_path.parent.name / (file_path.stem + "_map") 125 | save_folder.mkdir(parents=True, exist_ok=True) 126 | 127 | se2_name = "se2.json" 128 | height_map_name = "ground_height.npy" 129 | 130 | height_map_file = save_folder / height_map_name 131 | if height_map_file.exists(): 132 | height_map_file.unlink() 133 | 134 | se2_file = save_folder / se2_name 135 | if se2_file.exists(): 136 | se2_file.unlink() 137 | 138 | np.save(height_map_file, grid.astype(np.float16)) 139 | if verbose: 140 | print(f"Saving heightmap to {height_map_file}") 141 | with open(se2_file, "w") as fp: 142 | json.dump(se2, fp) 143 | 144 | 145 | def polygon_to_points(polygon) -> np.ndarray: 146 | return [np.array([e.x, e.y, e.z]) for e in polygon] 147 | 148 | 149 | def collect_points(frame: dataset_pb2.Frame) -> np.ndarray: 150 | map_features = frame.map_features 151 | points = [] 152 | for feature in map_features: 153 | if feature.HasField("road_edge"): 154 | points.extend(polygon_to_points(feature.road_edge.polyline)) 155 | elif feature.HasField("crosswalk"): 156 | points.extend(polygon_to_points(feature.crosswalk.polygon)) 157 | elif feature.HasField("road_line"): 158 | points.extend(polygon_to_points(feature.road_line.polyline)) 159 | return np.array(points) 160 | 161 | 162 | def build_work_queue(waymo_directory): 163 | waymo_directory = Path(waymo_directory) 164 | assert waymo_directory.is_dir(), f"{waymo_directory} is not a directory" 165 | 166 | train_records = sorted((waymo_directory / "training").glob("*.tfrecord")) 167 | val_records = sorted((waymo_directory / "validation").glob("*.tfrecord")) 168 | 169 | queue = train_records + val_records 170 | for record in queue: 171 | assert record.is_file(), f"{record} is not a file" 172 | 173 | return queue 174 | 175 | 176 | def process_record(file_path: Path): 177 | print("Processing", file_path) 178 | dataset = tf.data.TFRecordDataset(file_path, compression_type="") 179 | 180 | # Hack because I can't figure out how to extract the first frame from the dataset. 181 | for data in dataset: 182 | frame = dataset_pb2.Frame.FromString(bytearray(data.numpy())) 183 | break 184 | 185 | points = collect_points(frame) 186 | grid, grid_min_global_frame = render_heightmap(points, args.cells_per_meter, args.num_neighbors) 187 | save_grid_global_offset( 188 | file_path, grid, grid_min_global_frame, args.cells_per_meter, verbose=True 189 | ) 190 | 191 | 192 | work_queue = build_work_queue(args.waymo_directory) 193 | print("Work queue size:", len(work_queue)) 194 | 195 | num_processes = min(args.cpus, len(work_queue)) 196 | Parallel(num_processes)(delayed(process_record)(record) for record in work_queue) 197 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM kylevedder/cudagl:11.8.0-devel-ubuntu22.04 2 | SHELL ["/bin/bash", "-c"] 3 | # Set the timezone info because otherwise tzinfo blocks install 4 | # flow and ignores the non-interactive frontend command 🤬🤬🤬 5 | RUN ln -snf /usr/share/zoneinfo/America/New_York /etc/localtime && echo "/usr/share/zoneinfo/America/New_York" > /etc/timezone 6 | 7 | # Core system packages 8 | RUN apt-get update --fix-missing 9 | RUN apt install -y software-properties-common wget curl gpg gcc git make g++ unzip apt-utils 10 | 11 | # Install miniconda to /miniconda 12 | RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh 13 | RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b 14 | RUN rm Miniconda3-latest-Linux-x86_64.sh 15 | ENV PATH=/miniconda/bin:${PATH} 16 | RUN conda update -y conda 17 | 18 | 19 | ENV FORCE_CUDA="1" 20 | ENV TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 8.9 9.0" 21 | RUN conda install -y numpy=1.26 python=3.11 pytorch==2.3 torchvision torchaudio pytorch-cuda=11.8 fvcore iopath -c pytorch -c nvidia -c fvcore -c iopath -c conda-forge 22 | # Install nuscenes-devkit, forked to get rid of the matplotlib pegged dependency 23 | RUN conda install -y matplotlib=3.8 24 | RUN apt install unzip -y 25 | RUN pip install iopath fvcore pandas opencv-python pyquaternion pyarrow pytest av2 black isort 26 | 27 | RUN git clone https://github.com/kylevedder/nuscenes-devkit.git /nuscenes-devkit 28 | WORKDIR /nuscenes-devkit 29 | RUN pip install -e setup 30 | 31 | # Latest Open3D, because 0.18.0 has a bunch of bugs related to missing visualization functions 32 | RUN pip install https://github.com/isl-org/Open3D/releases/download/main-devel/open3d-0.18.0+18a47ef-cp311-cp311-manylinux_2_31_x86_64.whl 33 | 34 | ENV PYTHONPATH=/bucketed_scene_flow_eval:${PYTHONPATH} 35 | COPY bashrc /root/.bashrc 36 | WORKDIR /bucketed_scene_flow_eval -------------------------------------------------------------------------------- /docker/Dockerfileav2: -------------------------------------------------------------------------------- 1 | FROM nvidia/cudagl:11.3.0-devel-ubuntu20.04 2 | SHELL ["/bin/bash", "-c"] 3 | # Set the timezone info because otherwise tzinfo blocks install 4 | # flow and ignores the non-interactive frontend command 🤬🤬🤬 5 | RUN ln -snf /usr/share/zoneinfo/America/New_York /etc/localtime && echo "/usr/share/zoneinfo/America/New_York" > /etc/timezone 6 | 7 | # Core system packages 8 | RUN apt-get update --fix-missing 9 | RUN apt install -y software-properties-common wget curl gpg gcc git make 10 | 11 | # Install miniconda to /miniconda 12 | RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh 13 | RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b 14 | RUN rm Miniconda3-latest-Linux-x86_64.sh 15 | ENV PATH=/miniconda/bin:${PATH} 16 | RUN conda update -y conda 17 | 18 | RUN apt install -y apt-utils 19 | 20 | ENV TORCH_CUDA_ARCH_LIST="Ampere;Turing;Pascal" 21 | ENV FORCE_CUDA="1" 22 | RUN conda update -y conda 23 | RUN conda install numpy python=3.10 pytorch==1.12.1 torchvision torchaudio cudatoolkit=11.3 -c pytorch -y 24 | RUN conda install -c fvcore -c iopath -c conda-forge fvcore iopath -y 25 | RUN pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu113_pyt1121/download.html 26 | RUN pip install mmcv-full==1.7.1 -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.12.0/index.html 27 | RUN pip install pyarrow pyquaternion open3d 28 | RUN pip install tensorboard 29 | RUN pip install pytorch-lightning==1.9.4 30 | RUN pip install nntime 31 | 32 | RUN git clone https://github.com/argoverse/av2-api.git /av2-api 33 | WORKDIR /av2-api 34 | RUN git checkout 74f949ea8f09df2e3145abf9bf4a14232804584e 35 | RUN conda install -c conda-forge mamba -y 36 | RUN mamba install -c conda-forge conda-forge/label/rust_dev::rust av click joblib kornia maturin nox numba polars pyarrow pyproj universal_pathlib -y 37 | RUN pip install git+https://github.com/JonathonLuiten/TrackEval.git 38 | ENV OPENSSL_DIR=/av2-api 39 | RUN pip install -e . 40 | RUN apt install -y nano 41 | ENV PYTHONPATH=/project:${PYTHONPATH} 42 | COPY bashrc /root/.bashrc 43 | WORKDIR /project 44 | -------------------------------------------------------------------------------- /docker/Dockerfilewaymo: -------------------------------------------------------------------------------- 1 | FROM nvidia/cudagl:11.3.0-devel-ubuntu20.04 2 | SHELL ["/bin/bash", "-c"] 3 | # Set the timezone info because otherwise tzinfo blocks install 4 | # flow and ignores the non-interactive frontend command 🤬🤬🤬 5 | RUN ln -snf /usr/share/zoneinfo/America/New_York /etc/localtime && echo "/usr/share/zoneinfo/America/New_York" > /etc/timezone 6 | 7 | # Core system packages 8 | RUN apt-get update --fix-missing 9 | RUN apt install -y software-properties-common wget curl gpg gcc git make 10 | 11 | # Install miniconda to /miniconda 12 | RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh 13 | RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b 14 | RUN rm Miniconda3-latest-Linux-x86_64.sh 15 | ENV PATH=/miniconda/bin:${PATH} 16 | RUN conda update -y conda 17 | 18 | RUN apt install -y apt-utils 19 | 20 | ENV TORCH_CUDA_ARCH_LIST="Ampere;Turing;Pascal" 21 | ENV FORCE_CUDA="1" 22 | RUN conda update -y conda 23 | 24 | RUN conda install python=3.10 pip -y 25 | RUN pip install --upgrade pip 26 | # ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/ 27 | 28 | RUN apt install -y openexr libopenexr-dev 29 | RUN pip install numpy==1.21.5 waymo-open-dataset-tf-2.11.0 30 | 31 | RUN pip install pytorch-lightning opencv-python open3d 32 | RUN pip install bucketed-scene-flow-eval==2.0.20 33 | 34 | ENV PYTHONPATH=/project/data_prep_scripts/waymo:/project/:${PYTHONPATH} 35 | COPY bashrc /root/.bashrc 36 | WORKDIR /project/data_prep_scripts/waymo -------------------------------------------------------------------------------- /docker/bashrc: -------------------------------------------------------------------------------- 1 | # ~/.bashrc: executed by bash(1) for non-login shells. 2 | # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) 3 | # for examples 4 | # If not running interactively, don't do anything 5 | case $- in 6 | *i*) ;; 7 | *) return;; 8 | esac 9 | # don't put duplicate lines or lines starting with space in the history. 10 | # See bash(1) for more options 11 | HISTCONTROL=ignoreboth 12 | 13 | # append to the history file, don't overwrite it 14 | shopt -s histappend 15 | 16 | # check the window size after each command and, if necessary, 17 | # update the values of LINES and COLUMNS. 18 | shopt -s checkwinsize 19 | 20 | # If set, the pattern "**" used in a pathname expansion context will 21 | # match all files and zero or more directories and subdirectories. 22 | #shopt -s globstar 23 | 24 | # make less more friendly for non-text input files, see lesspipe(1) 25 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" 26 | 27 | # set variable identifying the chroot you work in (used in the prompt below) 28 | if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then 29 | debian_chroot=$(cat /etc/debian_chroot) 30 | fi 31 | 32 | # set a fancy prompt (non-color, unless we know we "want" color) 33 | case "$TERM" in 34 | xterm-color|*-256color) color_prompt=yes;; 35 | esac 36 | 37 | # uncomment for a colored prompt, if the terminal has the capability; turned 38 | # off by default to not distract the user: the focus in a terminal window 39 | # should be on the output of commands, not on the prompt 40 | force_color_prompt=yes 41 | 42 | if [ -n "$force_color_prompt" ]; then 43 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then 44 | # We have color support; assume it's compliant with Ecma-48 45 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such 46 | # a case would tend to support setf rather than setaf.) 47 | color_prompt=yes 48 | else 49 | color_prompt= 50 | fi 51 | fi 52 | 53 | INFO_STR="\$? " 54 | if [ "$color_prompt" = yes ]; then 55 | PS1='${debian_chroot:+($debian_chroot)}'"$INFO_STR"'\[\033[01;31m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' 56 | #PS1='${debian_chroot:+($debian_chroot)}\[\033[38;5;46m\]\u@\h\[\033[00m\]:\[\033[38;5;33m\]\w\[\033[00m\]\$ ' 57 | else 58 | PS1='${debian_chroot:+($debian_chroot)}'"$INFO_STR"'\u@\h:\w\$ ' 59 | fi 60 | unset color_prompt force_color_prompt 61 | 62 | # If this is an xterm set the title to user@host:dir 63 | case "$TERM" in 64 | xterm*|rxvt*) 65 | PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" 66 | ;; 67 | *) 68 | ;; 69 | esac 70 | 71 | # enable color support of ls and also add handy aliases 72 | if [ -x /usr/bin/dircolors ]; then 73 | test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" 74 | alias ls='ls --color=auto' 75 | #alias dir='dir --color=auto' 76 | #alias vdir='vdir --color=auto' 77 | 78 | alias grep='grep --color=auto' 79 | alias fgrep='fgrep --color=auto' 80 | alias egrep='egrep --color=auto' 81 | fi 82 | 83 | # colored GCC warnings and errors 84 | export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' 85 | 86 | # some more ls aliases 87 | alias ll='ls -alF' 88 | alias la='ls -A' 89 | alias l='ls -CF' 90 | 91 | # Add an "alert" alias for long running commands. Use like so: 92 | # sleep 10; alert 93 | alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' 94 | 95 | # Alias definitions. 96 | # You may want to put all your additions into a separate file like 97 | # ~/.bash_aliases, instead of adding them here directly. 98 | # See /usr/share/doc/bash-doc/examples in the bash-doc package. 99 | 100 | if [ -f ~/.bash_aliases ]; then 101 | . ~/.bash_aliases 102 | fi 103 | 104 | # enable programmable completion features (you don't need to enable 105 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile 106 | # sources /etc/bash.bashrc). 107 | if ! shopt -oq posix; then 108 | if [ -f /usr/share/bash-completion/bash_completion ]; then 109 | . /usr/share/bash-completion/bash_completion 110 | elif [ -f /etc/bash_completion ]; then 111 | . /etc/bash_completion 112 | fi 113 | fi 114 | 115 | # Eternal bash history. 116 | # --------------------- 117 | # Undocumented feature which sets the size to "unlimited". 118 | # http://stackoverflow.com/questions/9457233/unlimited-bash-history 119 | export HISTFILESIZE= 120 | export HISTSIZE= 121 | export HISTTIMEFORMAT="[%F %T] " 122 | # Change the file location because certain bash sessions truncate .bash_history file upon close. 123 | # http://superuser.com/questions/575479/bash-history-truncated-to-500-lines-on-each-login 124 | export HISTFILE=/root/.bash_history 125 | # Force prompt to write history after every command. 126 | # http://superuser.com/questions/20900/bash-history-loss 127 | PROMPT_COMMAND="history -a; $PROMPT_COMMAND" -------------------------------------------------------------------------------- /docs/AV2_EVAL_FORMAT.md: -------------------------------------------------------------------------------- 1 | # AV2 Eval Format 2 | 3 | This repo provides the stand alone evaluation infrastructure for the [Argoverse 2 2024 Scene Flow Challenge](https://argoverse.org/sceneflow). The script used for evaluation is `scripts/evals/av2_eval.py`. 4 | 5 | This script makes several assumptions: 6 | 7 | - The input zip folder is structured like the supervised AV2 scene flow labels, but with only every 5th frame provided (this reduces submission size). 8 | - Every feather file is in the same format and same frame as the supervised labels, but without the `classes` column. 9 | 10 | An autobundler of this format is provided as part of the [SceneFlowZoo](https://github.com/kylevedder/SceneFlowZoo/blob/master/av2_scene_flow_competition_submit.py)utilities. -------------------------------------------------------------------------------- /docs/DATASTRUCTURES.md: -------------------------------------------------------------------------------- 1 | ## Data Structures: 2 | 3 | Located in `datastructures/scene_sequence.py` 4 | 5 | ### `RawSceneSequence` 6 | 7 | `RawSceneSequence` describes the raw scene -- raw observations and their global frame poses. 8 | 9 | `RawSceneSequence` presents a map interface from `Timestamp` to `RawSceneItem`. 10 | 11 | ### `QuerySceneSequence` 12 | 13 | `QuerySceneSequence` is a self-contained description of: 14 | 15 | - the raw scene 16 | - query particles 17 | - the requested timestamps the prediction method should solve for 18 | 19 | Query particles are comprised of a series of particles, each associated with a particle id, and a single query timestamp. The query timestamp associates the particles with the requested timestamps. In principle these particles could be at any point in the requested series, although datasets may provide stronger guarantees (e.g. scene flow datasets will have these be the first of two timestamps) 20 | 21 | `QuerySceneSequence` presents a map interface from `ParticleID` to `tuple[WorldParticle, Timestamp]`. 22 | 23 | ### `EstimatedParticleTrajectories` 24 | 25 | `EstimatedParticleTrajectories` describes trajectories for every `ParticleID` over the given timestamps. 26 | 27 | ### `EstimatedParticleTrajectories` 28 | 29 | `EstimatedParticleTrajectories` describes trajectories for every `ParticleID` over the given timestamps, along with semantic class IDs for each particle. 30 | -------------------------------------------------------------------------------- /docs/GETTING_STARTED.md: -------------------------------------------------------------------------------- 1 | # File system assumptions 2 | 3 | ### Argoverse 2 Sensor Dataset 4 | 5 | Somewhere on disk, have an `argoverse2/` folder so that the downloaded files live inside 6 | 7 | ``` 8 | argoverse2/train 9 | argoverse2/val 10 | argoverse2/test 11 | ``` 12 | 13 | Please note that when downloaded from the cloud, these files may have a different top level directory format (their stored format keeps changing); you can solve this by moving the files or symlinking the appropriate directories into a different tree. 14 | 15 | Generate the train and val supervision labels to 16 | 17 | ``` 18 | argoverse2/train_sceneflow_feather 19 | argoverse2/val_sceneflow_feather 20 | ``` 21 | 22 | To generate these supervision labels, use the generation script in `data_prep_scripts/argo/create_gt_flow.py`. We have uploaded [a prebuilt DockerHub image](https://hub.docker.com/repository/docker/kylevedder/zeroflow_av2/general) for running the generation script; it can be run using `./launch.sh`. 23 | 24 | ### Argoverse 2 NSFP Pseudolabels (New!) 25 | 26 | We provide the Argoverse 2 NSFP Pseudolabels for the _Sensor_ split in the S3 bucket 27 | 28 | ``` 29 | s3://argoverse/assets/av2/scene_flow/sensor/ 30 | ``` 31 | 32 | and for our subsets of the _Lidar_ split 33 | 34 | ``` 35 | s3://argoverse/assets/av2/scene_flow/lidar/subsample/ 36 | ``` 37 | 38 | 39 | ### Argoverse 2 Tiny Demo Dataset 40 | 41 | To get started, we provide a directly downloadable [tiny demo dataset](https://github.com/kylevedder/BucketedSceneFlowEval/files/13881746/argoverse2_tiny.zip) (5.5MB). 42 | 43 | `argoverse2_tiny` contains four subfolders: 44 | 45 | - `argoverse2_tiny/val`: a single sequence with the single frame pair 46 | - `argoverse2_tiny/val_sceneflow_feather`: the supervised ground truth for this frame pair 47 | - `argoverse2_tiny/val_nsfp_flow_feather`: the NSFP pseudolabels for this frame pair 48 | - `argoverse2_tiny/val_supervised_out`: the output of the forward pass of [FastFlow3D, a supervised scene flow estimator](http://vedder.io/zeroflow). 49 | 50 | ### Waymo Open 51 | 52 | Download Waymo Open v1.4.2 (earlier versions lack map information) and the scene Flow labels contributed by _Scalable Scene Flow from Point Clouds in the Real World_ from the [Waymo Open download page](https://waymo.com/open/). We preprocess these files, both to convert them from an annoying proto file format to a standard Python format and to remove the ground points. 53 | 54 | Do this using 55 | 56 | 1. `data_prep_scripts/waymo/rasterize_heightmap.py` -- generate heightmaps in a separate folder used for ground removal 57 | 2. `data_prep_scripts/waymo/extract_flow_and_remove_ground.py` -- extracts the points into a pickle format and removes the groundplane using the generated heightmaps 58 | 59 | We have uploaded [a prebuilt DockerHub image](https://hub.docker.com/repository/docker/kylevedder/zeroflow_waymo/general) for running the Waymo conversion scripts. 60 | 61 | ### Waymo Open Tiny Demo Dataset 62 | 63 | We have also provided a directly downloadable [tiny demo dataset](https://github.com/kylevedder/BucketedSceneFlowEval/files/13924555/waymo_open_processed_flow_tiny.zip) (3.1MB). 64 | 65 | `waymo_open_processed_flow_tiny` contains two subfolders: 66 | 67 | - `training`: a single frame pair of waymo data 68 | - `train_nsfp_flow`: the flow labels for the framepair 69 | 70 | ### NuScenes 71 | 72 | TODO -------------------------------------------------------------------------------- /docs/imgs/av2_gt_flow.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/BucketedSceneFlowEval/a3ce5cf7226e467e0cea76cbc3ec443ddb733b28/docs/imgs/av2_gt_flow.gif -------------------------------------------------------------------------------- /docs/imgs/av2_lidar.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/BucketedSceneFlowEval/a3ce5cf7226e467e0cea76cbc3ec443ddb733b28/docs/imgs/av2_lidar.gif -------------------------------------------------------------------------------- /docs/imgs/av2_multicam.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kylevedder/BucketedSceneFlowEval/a3ce5cf7226e467e0cea76cbc3ec443ddb733b28/docs/imgs/av2_multicam.png -------------------------------------------------------------------------------- /launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | touch docker_history.txt 3 | xhost + 4 | docker run --gpus=all --rm -it \ 5 | --shm-size=16gb \ 6 | -v `pwd`:/bucketed_scene_flow_eval \ 7 | -v /efs:/efs \ 8 | -v /efs2:/efs2 \ 9 | -v /bigdata:/bigdata \ 10 | -v /tmp/.X11-unix:/tmp/.X11-unix \ 11 | -v /tmp:/tmp \ 12 | -v `pwd`/docker_history.txt:/root/.bash_history \ 13 | -e DISPLAY=$DISPLAY \ 14 | -h $HOSTNAME \ 15 | --privileged \ 16 | kylevedder/bucketed_scene_flow_eval:latest 17 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [tool.hatch.build] 6 | exclude = [ 7 | "/.*", 8 | "/docker", 9 | "/scripts", 10 | "/docs", 11 | "/data_prep_scripts", 12 | "/launch.sh", 13 | "/README.md", 14 | ] 15 | 16 | [project] 17 | name = "bucketed_scene_flow_eval" 18 | version = "2.0.25" 19 | authors = [ 20 | { name="Kyle Vedder", email="kvedder@seas.upenn.edu" }, 21 | ] 22 | description = "Bucketed Scene Flow Evaluation" 23 | readme = "README.md" 24 | requires-python = ">=3.10" 25 | dependencies = [ 26 | "numpy", 27 | "iopath", 28 | "fvcore", 29 | "pandas", 30 | "opencv-python", 31 | "pyquaternion", 32 | "open3d", 33 | "pyarrow", 34 | "pytest", 35 | ] 36 | classifiers = [ 37 | "Programming Language :: Python :: 3", 38 | "License :: OSI Approved :: MIT License", 39 | "Operating System :: OS Independent", 40 | ] 41 | 42 | [tool.black] 43 | line-length = 100 44 | 45 | [tool.isort] 46 | profile = "black" 47 | -------------------------------------------------------------------------------- /scripts/demo_3d.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | import tqdm 6 | 7 | from bucketed_scene_flow_eval.datasets import construct_dataset 8 | from bucketed_scene_flow_eval.datastructures import ( 9 | EgoLidarFlow, 10 | O3DVisualizer, 11 | TimeSyncedSceneFlowFrame, 12 | ) 13 | 14 | 15 | def visualize_lidar_3d( 16 | frame_list: list[TimeSyncedSceneFlowFrame], downscale_rgb_factor: int, with_aux: bool 17 | ): 18 | o3d_vis = O3DVisualizer(point_size=2) 19 | 20 | print("Visualizing", len(frame_list), "frames") 21 | 22 | for frame_idx, frame in enumerate(frame_list): 23 | rgb_frames = frame.rgbs 24 | pc_frame = frame.pc 25 | aux_pc_frame = frame.auxillary_pc 26 | flow_frame = frame.flow 27 | 28 | # Set constant flow for debug 29 | # flow_frame.full_flow = np.ones_like(flow_frame.full_flow) * 0.1 30 | 31 | o3d_vis.add_global_pc_frame(pc_frame, color=[1, 0, 0]) 32 | if aux_pc_frame is not None and with_aux: 33 | o3d_vis.add_global_pc_frame(aux_pc_frame, color=[0, 0, 1]) 34 | o3d_vis.add_global_flow(pc_frame, flow_frame) 35 | for name, rgb_frame in rgb_frames.items(): 36 | print(f"Adding RGB frame {frame_idx} {name}") 37 | rgb_frame = rgb_frame.rescale(downscale_rgb_factor) 38 | # print("RGB Frame ego pose:", rgb_frame.pose.ego_to_global.translation) 39 | o3d_vis.add_pose(rgb_frame.pose.ego_to_global) 40 | o3d_vis.add_global_rgb_frame(rgb_frame) 41 | o3d_vis.run() 42 | del o3d_vis 43 | 44 | 45 | if __name__ == "__main__": 46 | # Take arguments to specify dataset and root directory 47 | parser = argparse.ArgumentParser() 48 | parser.add_argument("--dataset", type=str, default="Argoverse2CausalSceneFlow") 49 | parser.add_argument("--root_dir", type=Path, default="/efs/argoverse2/test") 50 | parser.add_argument("--flow_dir", type=Path, default="/efs/argoverse2/test_sceneflow_feather") 51 | parser.add_argument("--with_rgb", action="store_true") 52 | parser.add_argument("--with_aux", action="store_true") 53 | parser.add_argument("--no_ground", action="store_true") 54 | parser.add_argument("--sequence_length", type=int, default=2) 55 | parser.add_argument("--downscale_rgb_factor", type=int, default=8) 56 | parser.add_argument("--log_id", type=str, default=None) 57 | args = parser.parse_args() 58 | 59 | dataset = construct_dataset( 60 | args.dataset, 61 | dict( 62 | root_dir=args.root_dir, 63 | flow_data_path=args.flow_dir, 64 | with_rgb=args.with_rgb, 65 | subsequence_length=args.sequence_length, 66 | use_gt_flow=False, 67 | with_ground=not args.no_ground, 68 | range_crop_type="ego", 69 | point_cloud_range=[-35, -35, -2.5, 35, 35, 2.5], 70 | log_subset=None if args.log_id is None else [args.log_id], 71 | ), 72 | ) 73 | assert len(dataset) > 0, "Dataset is empty" 74 | print("Dataset contains", len(dataset), "samples") 75 | 76 | vis_index = 0 77 | 78 | print("Loading sequence idx", vis_index) 79 | frame_list = dataset[vis_index] 80 | visualize_lidar_3d(frame_list, args.downscale_rgb_factor, args.with_aux) 81 | -------------------------------------------------------------------------------- /scripts/evals/__init__.py: -------------------------------------------------------------------------------- 1 | from .av2_eval import run_eval 2 | from .setup_sparse_user_submission import run_setup_sparse_user_submission 3 | 4 | __all__ = [ 5 | "run_eval", 6 | "run_setup_sparse_user_submission", 7 | ] 8 | -------------------------------------------------------------------------------- /scripts/evals/av2_eval.py: -------------------------------------------------------------------------------- 1 | # Set OMP_NUM_THREADS=1 to avoid slamming the CPU 2 | import os 3 | 4 | os.environ["OMP_NUM_THREADS"] = "1" 5 | 6 | import argparse 7 | import multiprocessing 8 | from pathlib import Path 9 | 10 | import tqdm 11 | 12 | from bucketed_scene_flow_eval.datasets import Argoverse2CausalSceneFlow 13 | from bucketed_scene_flow_eval.eval import Evaluator 14 | 15 | 16 | def _make_range_shards(total_len: int, num_shards: int) -> list[tuple[int, int]]: 17 | """ 18 | Return a list of tuples of (start, end) indices for each shard. 19 | 20 | The function divides the range specified by total_len into num_shards shards. 21 | Each shard is represented by a tuple of (start, end) indices. 22 | The division tries to distribute the elements as evenly as possible among the shards. 23 | """ 24 | shards = [] 25 | shard_len = total_len // num_shards 26 | remainder = total_len % num_shards 27 | 28 | start = 0 29 | for _ in range(num_shards): 30 | end = start + shard_len + (1 if remainder > 0 else 0) 31 | shards.append((start, min(end, total_len))) 32 | start = end 33 | remainder -= 1 34 | 35 | return shards 36 | 37 | 38 | def _make_index_shards( 39 | dataset: Argoverse2CausalSceneFlow, num_shards: int, every_kth_in_sequence: int 40 | ) -> list[list[int]]: 41 | dataset_valid_indices: list[int] = [ 42 | dataset_idx 43 | for ( 44 | _, 45 | (subsequence_start_idx, subsequence_end_idx), 46 | ), dataset_idx in dataset.sequence_subsequence_idx_to_dataset_idx.items() 47 | if (subsequence_start_idx % every_kth_in_sequence) == 0 48 | ] 49 | 50 | tuple_shards = _make_range_shards(len(dataset_valid_indices), num_shards) 51 | return [dataset_valid_indices[start:end] for start, end in tuple_shards] 52 | 53 | 54 | def _work( 55 | shard_idx: int, 56 | shard_list: list[int], 57 | gt_dataset: Argoverse2CausalSceneFlow, 58 | est_dataset: Argoverse2CausalSceneFlow, 59 | evaluator: Evaluator, 60 | verbose: bool = True, 61 | ) -> Evaluator: 62 | # Set tqdm bar on the row of the terminal corresponding to the shard index 63 | iterator = shard_list 64 | if verbose: 65 | iterator = tqdm.tqdm(shard_list, position=shard_idx + 1, desc=f"Shard {shard_idx}") 66 | 67 | for idx in iterator: 68 | gt_lst = gt_dataset[idx] 69 | est_lst = est_dataset[idx] 70 | assert len(gt_lst) == len(est_lst) == 2, f"GT and estimated lists must have length 2." 71 | gt_frame0, gt_frame1 = gt_lst 72 | est_frame0, est_frame1 = est_lst 73 | evaluator.eval(est_frame0.flow, gt_frame0) 74 | 75 | return evaluator 76 | 77 | 78 | def _work_wrapper( 79 | args: tuple[ 80 | int, list[int], Argoverse2CausalSceneFlow, Argoverse2CausalSceneFlow, Evaluator, bool 81 | ] 82 | ) -> Evaluator: 83 | return _work(*args) 84 | 85 | 86 | def run_eval( 87 | data_dir: Path, 88 | gt_flow_dir: Path, 89 | est_flow_dir: Path, 90 | output_path: Path, 91 | cpu_count: int, 92 | cache_root: Path, 93 | every_kth: int = 5, 94 | eval_type: str = "bucketed_epe", 95 | verbose: bool = True, 96 | ) -> None: 97 | assert data_dir.exists(), f"Data directory {data_dir} does not exist." 98 | assert gt_flow_dir.exists(), f"GT flow directory {gt_flow_dir} does not exist." 99 | assert est_flow_dir.exists(), f"Estimated flow directory {est_flow_dir} does not exist." 100 | 101 | # Make the output directory if it doesn't exist 102 | output_path.mkdir(parents=True, exist_ok=True) 103 | 104 | gt_dataset = Argoverse2CausalSceneFlow( 105 | root_dir=data_dir, 106 | flow_data_path=gt_flow_dir, 107 | with_ground=False, 108 | with_rgb=False, 109 | use_gt_flow=True, 110 | eval_type=eval_type, 111 | eval_args=dict(output_path=output_path), 112 | cache_root=cache_root, 113 | ) 114 | 115 | est_dataset = Argoverse2CausalSceneFlow( 116 | root_dir=data_dir, 117 | flow_data_path=est_flow_dir, 118 | with_ground=False, 119 | with_rgb=False, 120 | use_gt_flow=False, 121 | use_cache=False, 122 | eval_type=eval_type, 123 | cache_root=cache_root, 124 | ) 125 | 126 | dataset_evaluator = gt_dataset.evaluator() 127 | 128 | assert len(gt_dataset) == len( 129 | est_dataset 130 | ), f"GT and estimated datasets must be the same length, but are {len(gt_dataset)} and {len(est_dataset)} respectively." 131 | 132 | # Shard the dataset into pieces for each CPU 133 | shard_lists = _make_index_shards(gt_dataset, cpu_count, every_kth) 134 | args_list = [ 135 | (shard_idx, shard_list, gt_dataset, est_dataset, dataset_evaluator, verbose) 136 | for shard_idx, shard_list in enumerate(shard_lists) 137 | ] 138 | 139 | if cpu_count > 1: 140 | print(f"Running evaluation on {len(gt_dataset)} scenes using {cpu_count} CPUs.") 141 | # Run the evaluation in parallel 142 | with multiprocessing.Pool(cpu_count) as pool: 143 | sharded_evaluators = pool.map(_work_wrapper, args_list) 144 | else: 145 | print(f"Running evaluation on {len(gt_dataset)} scenes using 1 CPU.") 146 | # Run the evaluation serially 147 | sharded_evaluators = [_work_wrapper(args) for args in args_list] 148 | 149 | # Combine the sharded evaluators 150 | gathered_evaluator: Evaluator = sum(sharded_evaluators) 151 | gathered_evaluator.compute_results() 152 | 153 | 154 | if __name__ == "__main__": 155 | multiprocessing.set_start_method("spawn") 156 | parser = argparse.ArgumentParser( 157 | description="Iterate over .feather files in a result zip file." 158 | ) 159 | parser.add_argument("data_dir", type=Path, help="Path to the data_dir directory of the dataset") 160 | parser.add_argument("gt_flow_dir", type=Path, help="Path gt flow directory") 161 | parser.add_argument("est_flow_dir", type=Path, help="Path to the estimated flow directory") 162 | parser.add_argument("output_path", type=Path, help="Path to save the results") 163 | parser.add_argument( 164 | "--cpu_count", 165 | type=int, 166 | default=multiprocessing.cpu_count(), 167 | help="Number of CPUs to use", 168 | ) 169 | parser.add_argument( 170 | "--every_kth", type=int, default=5, help="Only evaluate every kth scene in a sequence" 171 | ) 172 | parser.add_argument("--eval_type", type=str, default="bucketed_epe", help="Type of evaluation") 173 | parser.add_argument( 174 | "--cache_root", 175 | type=Path, 176 | default=Path("/tmp/av2_eval_cache/"), 177 | help="Path to the cache root directory", 178 | ) 179 | parser.add_argument( 180 | "--quiet", 181 | action="store_true", 182 | help="Suppress output", 183 | ) 184 | 185 | args = parser.parse_args() 186 | 187 | run_eval( 188 | data_dir=args.data_dir, 189 | gt_flow_dir=args.gt_flow_dir, 190 | est_flow_dir=args.est_flow_dir, 191 | output_path=args.output_path, 192 | cpu_count=args.cpu_count, 193 | every_kth=args.every_kth, 194 | eval_type=args.eval_type, 195 | cache_root=args.cache_root, 196 | verbose=not args.quiet, 197 | ) 198 | -------------------------------------------------------------------------------- /scripts/evals/av2_occ.py: -------------------------------------------------------------------------------- 1 | # Set OMP_NUM_THREADS=1 to avoid slamming the CPU 2 | import os 3 | 4 | os.environ["OMP_NUM_THREADS"] = "1" 5 | 6 | import argparse 7 | import multiprocessing 8 | from pathlib import Path 9 | 10 | import numpy as np 11 | import pandas as pd 12 | import tqdm 13 | 14 | from bucketed_scene_flow_eval.datasets import Argoverse2NonCausalSceneFlow 15 | from bucketed_scene_flow_eval.eval import Evaluator 16 | from bucketed_scene_flow_eval.utils import load_feather 17 | 18 | 19 | def _make_range_shards(total_len: int, num_shards: int) -> list[tuple[int, int]]: 20 | """ 21 | Return a list of tuples of (start, end) indices for each shard. 22 | 23 | The function divides the range specified by total_len into num_shards shards. 24 | Each shard is represented by a tuple of (start, end) indices. 25 | The division tries to distribute the elements as evenly as possible among the shards. 26 | """ 27 | shards = [] 28 | shard_len = total_len // num_shards 29 | remainder = total_len % num_shards 30 | 31 | start = 0 32 | for _ in range(num_shards): 33 | end = start + shard_len + (1 if remainder > 0 else 0) 34 | shards.append((start, min(end, total_len))) 35 | start = end 36 | remainder -= 1 37 | 38 | return shards 39 | 40 | 41 | def _make_index_shards( 42 | dataset: Argoverse2NonCausalSceneFlow, num_shards: int, every_kth_in_sequence: int 43 | ) -> list[list[int]]: 44 | dataset_valid_indices: list[int] = [ 45 | dataset_idx 46 | for ( 47 | _, 48 | (subsequence_start_idx, subsequence_end_idx), 49 | ), dataset_idx in dataset.sequence_subsequence_idx_to_dataset_idx.items() 50 | if (subsequence_start_idx % every_kth_in_sequence) == 0 51 | ] 52 | 53 | tuple_shards = _make_range_shards(len(dataset_valid_indices), num_shards) 54 | return [dataset_valid_indices[start:end] for start, end in tuple_shards] 55 | 56 | 57 | def _work( 58 | shard_idx: int, 59 | shard_list: list[int], 60 | gt_dataset: Argoverse2NonCausalSceneFlow, 61 | occ_folder: Path, 62 | verbose: bool = True, 63 | ) -> list[float]: 64 | # Set tqdm bar on the row of the terminal corresponding to the shard index 65 | iterator = shard_list 66 | if verbose: 67 | iterator = tqdm.tqdm(shard_list, position=shard_idx + 1, desc=f"Shard {shard_idx}") 68 | 69 | per_frame_l1_errors = [] 70 | 71 | for idx in iterator: 72 | gt_lst = gt_dataset[idx] 73 | assert len(gt_lst) == 2, f"GT list must have length 2." 74 | source_frame = gt_lst[0] 75 | log_id = source_frame.log_id 76 | log_idx = source_frame.log_idx 77 | est_occ_path = occ_folder / f"{log_id}/{log_idx:010d}_occ.feather" 78 | 79 | assert est_occ_path.exists(), f"Estimated occ file {est_occ_path} does not exist." 80 | est_occ_df = load_feather(est_occ_path, verbose=False) 81 | est_occ_is_valid = est_occ_df["is_valid"].values 82 | est_occ_distances = est_occ_df["distances_m"].values 83 | est_occ_is_colliding = est_occ_df["is_colliding"].values 84 | 85 | gt_pc = source_frame.pc.full_ego_pc 86 | # Convert to a set of distances using L2 norm 87 | gt_distances = np.linalg.norm(gt_pc.points, axis=1) 88 | gt_is_valid_mask = source_frame.pc.mask 89 | 90 | # Ensure that the est_occ_df is the same length as the ground truth point cloud 91 | assert len(est_occ_df) == len( 92 | gt_pc 93 | ), "Estimated occ and ground truth point cloud must have the same length." 94 | 95 | # Ensure that for all entries, if gt_mask is true, then is_valid is true in the df 96 | assert np.all( 97 | np.logical_and(gt_is_valid_mask, est_occ_is_valid) == gt_is_valid_mask 98 | ), f"If gt_mask is true, then is_valid must be true in the estimated occ. Num differences: {np.sum(np.logical_and(gt_is_valid_mask, est_occ_is_valid) != gt_is_valid_mask)}" 99 | 100 | valid_gt_distances = gt_distances[gt_is_valid_mask] 101 | valid_est_distances = est_occ_distances[gt_is_valid_mask] 102 | 103 | l1_differences = np.abs(valid_gt_distances - valid_est_distances) 104 | per_frame_l1_errors.append(np.mean(l1_differences)) 105 | 106 | return per_frame_l1_errors 107 | 108 | 109 | def _work_wrapper( 110 | args: tuple[int, list[int], Argoverse2NonCausalSceneFlow, Path, bool] 111 | ) -> list[float]: 112 | return _work(*args) 113 | 114 | 115 | def run_eval( 116 | data_dir: Path, 117 | est_occ_dir: Path, 118 | output_path: Path, 119 | cpu_count: int, 120 | every_kth: int = 5, 121 | eval_type: str = "bucketed_epe", 122 | verbose: bool = True, 123 | ) -> None: 124 | assert data_dir.exists(), f"Data directory {data_dir} does not exist." 125 | assert est_occ_dir.exists(), f"Estimated occ directory {est_occ_dir} does not exist." 126 | 127 | # Make the output directory if it doesn't exist 128 | output_path.mkdir(parents=True, exist_ok=True) 129 | 130 | gt_dataset = Argoverse2NonCausalSceneFlow( 131 | root_dir=data_dir, 132 | with_ground=False, 133 | with_rgb=False, 134 | load_flow=False, 135 | eval_type=eval_type, 136 | eval_args=dict(output_path=output_path), 137 | use_cache=False, 138 | range_crop_type="ego", 139 | ) 140 | 141 | # Shard the dataset into pieces for each CPU 142 | shard_lists = _make_index_shards(gt_dataset, cpu_count, every_kth) 143 | args_list = [ 144 | (shard_idx, shard_list, gt_dataset, est_occ_dir, verbose) 145 | for shard_idx, shard_list in enumerate(shard_lists) 146 | ] 147 | 148 | if cpu_count > 1: 149 | print(f"Running evaluation on {len(gt_dataset)} scenes using {cpu_count} CPUs.") 150 | # Run the evaluation in parallel 151 | with multiprocessing.Pool(cpu_count) as pool: 152 | sharded_results = pool.map(_work_wrapper, args_list) 153 | else: 154 | print(f"Running evaluation on {len(gt_dataset)} scenes using 1 CPU.") 155 | # Run the evaluation serially 156 | sharded_results = [_work_wrapper(args) for args in args_list] 157 | 158 | # Combine the results from each shard 159 | all_l1_error_results = [] 160 | for shard_results in sharded_results: 161 | all_l1_error_results.extend(shard_results) 162 | 163 | mean_l1_error = np.mean(all_l1_error_results) 164 | std_l1_error = np.std(all_l1_error_results) 165 | 166 | # Save mean and std output to a CSV file 167 | csv_file_path = output_path / "l1_error_results.csv" 168 | df = pd.DataFrame( 169 | { 170 | "mean_l1_error": [mean_l1_error], 171 | "std_l1_error": [std_l1_error], 172 | } 173 | ) 174 | df.to_csv(csv_file_path, index=False) 175 | print(f"Saved results to {csv_file_path}") 176 | 177 | 178 | if __name__ == "__main__": 179 | multiprocessing.set_start_method("spawn") 180 | parser = argparse.ArgumentParser( 181 | description="Iterate over .feather files in a result zip file." 182 | ) 183 | parser.add_argument("data_dir", type=Path, help="Path to the data_dir directory of the dataset") 184 | parser.add_argument("est_occ_dir", type=Path, help="Path to the estimated flow directory") 185 | parser.add_argument("output_path", type=Path, help="Path to save the results") 186 | parser.add_argument( 187 | "--cpu_count", 188 | type=int, 189 | default=multiprocessing.cpu_count(), 190 | help="Number of CPUs to use", 191 | ) 192 | parser.add_argument( 193 | "--every_kth", type=int, default=5, help="Only evaluate every kth scene in a sequence" 194 | ) 195 | parser.add_argument("--eval_type", type=str, default="bucketed_epe", help="Type of evaluation") 196 | parser.add_argument( 197 | "--quiet", 198 | action="store_true", 199 | help="Suppress output", 200 | ) 201 | 202 | args = parser.parse_args() 203 | 204 | run_eval( 205 | data_dir=args.data_dir, 206 | est_occ_dir=args.est_occ_dir, 207 | output_path=args.output_path, 208 | cpu_count=args.cpu_count, 209 | every_kth=args.every_kth, 210 | eval_type=args.eval_type, 211 | verbose=not args.quiet, 212 | ) 213 | -------------------------------------------------------------------------------- /scripts/evals/setup_sparse_user_submission.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import math 3 | import shutil 4 | import time 5 | import zipfile 6 | from pathlib import Path 7 | 8 | import tqdm 9 | 10 | 11 | def _unzip_submission(working_dir: Path, submission_zip: Path, every_kth: int) -> Path: 12 | assert submission_zip.exists(), f"Submission zip {submission_zip} does not exist." 13 | submission_dir = working_dir / "submission" 14 | 15 | submission_dir.mkdir(parents=True, exist_ok=False) 16 | 17 | # If the submission zip is actually a directory, symlink it to the submission dir 18 | if submission_zip.is_dir(): 19 | print("Submission zip is a directory, symlinking it to the submission dir.") 20 | # Iterate over every sequence folder 21 | for sequence_folder in tqdm.tqdm(sorted(submission_zip.glob("*"))): 22 | sequence_folder_name = sequence_folder.name 23 | user_sequence_folder = submission_dir / sequence_folder_name 24 | user_sequence_folder.mkdir(parents=True, exist_ok=False) 25 | for idx, user_file in enumerate(sorted(sequence_folder.glob("*.feather"))): 26 | if idx % every_kth == 0: 27 | # Symlink the file to the user sequence folder 28 | user_file_symlink = user_sequence_folder / user_file.name 29 | user_file_symlink.symlink_to(user_file) 30 | 31 | return submission_dir 32 | 33 | print(f"Unzipping {submission_zip} to {submission_dir}") 34 | before_unzip = time.time() 35 | with zipfile.ZipFile(submission_zip, "r") as zip_ref: 36 | zip_ref.extractall(submission_dir) 37 | after_unzip = time.time() 38 | print( 39 | f"Unzipped {submission_zip} to {submission_dir} in {after_unzip - before_unzip:.2f} seconds." 40 | ) 41 | return submission_dir 42 | 43 | 44 | def _validate_sequence_folder_and_create_dummy_entries( 45 | user_sequence_folder: Path, ground_truth_sequence_folder: Path, divisor: int = 5 46 | ): 47 | assert ( 48 | user_sequence_folder.exists() 49 | ), f"User sequence folder {user_sequence_folder} does not exist." 50 | assert ( 51 | ground_truth_sequence_folder.exists() 52 | ), f"Ground truth sequence folder {ground_truth_sequence_folder} does not exist." 53 | 54 | # Check that they have the same name 55 | assert ( 56 | user_sequence_folder.name == ground_truth_sequence_folder.name 57 | ), f"User sequence folder {user_sequence_folder} and ground truth sequence folder {ground_truth_sequence_folder} do not have the same name." 58 | 59 | # Check that the user sequence folder has // divisor fewer feather files vs the ground truth sequence folder 60 | user_sequence_files = sorted(user_sequence_folder.glob("*.feather")) 61 | gt_sequence_files = sorted(ground_truth_sequence_folder.glob("*.feather")) 62 | 63 | expected_num_user_files = int(math.ceil(len(gt_sequence_files) / divisor)) 64 | assert ( 65 | len(user_sequence_files) == expected_num_user_files 66 | ), f"User sequence folder {user_sequence_folder} has {len(user_sequence_files)} files, expected {expected_num_user_files} files." 67 | 68 | # Ensure that all user files are 10 characters long to match the 010d expected format and are all integers mod divisor. 69 | for user_file in user_sequence_files: 70 | assert len(user_file.stem) == 10, f"User file {user_file} does not have 10 characters." 71 | assert ( 72 | int(user_file.stem) % divisor == 0 73 | ), f"User file int {int(user_file.stem)} is not divisible by {divisor}." 74 | 75 | for idx in range(len(gt_sequence_files)): 76 | user_file = user_sequence_folder / f"{idx:010d}.feather" 77 | if idx % divisor == 0: 78 | # Check that file exists 79 | assert user_file.exists(), f"User file {user_file} does not exist." 80 | else: 81 | # Create dummy file 82 | with open(user_file, "w") as f: 83 | f.write("") 84 | 85 | # Check that the user sequence folder has the same number of files as the ground truth sequence folder 86 | user_sequence_files = sorted(user_sequence_folder.glob("*.feather")) 87 | assert len(user_sequence_files) == len( 88 | gt_sequence_files 89 | ), f"User sequence folder {user_sequence_folder} has {len(user_sequence_files)} files, expected {len(gt_sequence_files)} files." 90 | 91 | 92 | def run_setup_sparse_user_submission( 93 | working_dir: Path, 94 | user_submission_zip: Path, 95 | ground_truth_root_folder: Path, 96 | every_kth_entry: int, 97 | ) -> Path: 98 | working_dir = Path(working_dir) 99 | user_submission_zip = Path(user_submission_zip) 100 | ground_truth_root_folder = Path(ground_truth_root_folder) 101 | 102 | working_dir.mkdir(parents=True, exist_ok=True) 103 | assert ( 104 | user_submission_zip.exists() 105 | ), f"User submission zip {user_submission_zip} does not exist." 106 | assert ( 107 | ground_truth_root_folder.exists() 108 | ), f"Ground truth root folder {ground_truth_root_folder} does not exist." 109 | 110 | unziped_submission_dir = _unzip_submission(working_dir, user_submission_zip, every_kth_entry) 111 | 112 | # Iterate over the sequence folders and validate and create dummy entries 113 | for gt_sequence_folder in tqdm.tqdm(sorted(ground_truth_root_folder.glob("*"))): 114 | user_sequence_folder = unziped_submission_dir / gt_sequence_folder.name 115 | _validate_sequence_folder_and_create_dummy_entries( 116 | user_sequence_folder, gt_sequence_folder, divisor=every_kth_entry 117 | ) 118 | 119 | return unziped_submission_dir 120 | 121 | 122 | if __name__ == "__main__": 123 | # Get arguments for the script 124 | parser = argparse.ArgumentParser( 125 | description="Setup a sparse user submission for the Argoverse 2.0 Scene Flow Prediction Challenge." 126 | ) 127 | parser.add_argument( 128 | "working_dir", 129 | type=Path, 130 | help="The working directory to unzip the user submission and create dummy entries.", 131 | ) 132 | parser.add_argument( 133 | "user_submission_zip", 134 | type=Path, 135 | help="The user submission zip file to unzip and create dummy entries.", 136 | ) 137 | parser.add_argument( 138 | "ground_truth_root_folder", 139 | type=Path, 140 | help="The root folder containing the ground truth sequence folders.", 141 | ) 142 | parser.add_argument( 143 | "--every_kth_entry", 144 | type=int, 145 | default=5, 146 | help="The number of entries to skip in the user submission.", 147 | ) 148 | args = parser.parse_args() 149 | 150 | run_setup_sparse_user_submission( 151 | args.working_dir, 152 | args.user_submission_zip, 153 | args.ground_truth_root_folder, 154 | args.every_kth_entry, 155 | ) 156 | -------------------------------------------------------------------------------- /scripts/evals/waymo_eval.py: -------------------------------------------------------------------------------- 1 | # Set OMP_NUM_THREADS=1 to avoid slamming the CPU 2 | import os 3 | 4 | os.environ["OMP_NUM_THREADS"] = "1" 5 | 6 | import argparse 7 | import multiprocessing 8 | from pathlib import Path 9 | 10 | import tqdm 11 | 12 | from bucketed_scene_flow_eval.datasets import WaymoOpenCausalSceneFlow 13 | from bucketed_scene_flow_eval.eval import Evaluator 14 | 15 | 16 | def _make_range_shards(total_len: int, num_shards: int) -> list[tuple[int, int]]: 17 | """ 18 | Return a list of tuples of (start, end) indices for each shard. 19 | 20 | The function divides the range specified by total_len into num_shards shards. 21 | Each shard is represented by a tuple of (start, end) indices. 22 | The division tries to distribute the elements as evenly as possible among the shards. 23 | """ 24 | shards = [] 25 | shard_len = total_len // num_shards 26 | remainder = total_len % num_shards 27 | 28 | start = 0 29 | for _ in range(num_shards): 30 | end = start + shard_len + (1 if remainder > 0 else 0) 31 | shards.append((start, min(end, total_len))) 32 | start = end 33 | remainder -= 1 34 | 35 | return shards 36 | 37 | 38 | def _make_index_shards( 39 | dataset: WaymoOpenCausalSceneFlow, num_shards: int, every_kth_in_sequence: int 40 | ) -> list[list[int]]: 41 | dataset_valid_indices: list[int] = [ 42 | dataset_idx 43 | for ( 44 | _, 45 | (subsequence_start_idx, subsequence_end_idx), 46 | ), dataset_idx in dataset.sequence_subsequence_idx_to_dataset_idx.items() 47 | if (subsequence_start_idx % every_kth_in_sequence) == 0 48 | ] 49 | 50 | tuple_shards = _make_range_shards(len(dataset_valid_indices), num_shards) 51 | return [dataset_valid_indices[start:end] for start, end in tuple_shards] 52 | 53 | 54 | def _work( 55 | shard_idx: int, 56 | shard_list: list[int], 57 | gt_dataset: WaymoOpenCausalSceneFlow, 58 | est_dataset: WaymoOpenCausalSceneFlow, 59 | evaluator: Evaluator, 60 | verbose: bool = True, 61 | ) -> Evaluator: 62 | # Set tqdm bar on the row of the terminal corresponding to the shard index 63 | iterator = shard_list 64 | if verbose: 65 | iterator = tqdm.tqdm(shard_list, position=shard_idx + 1, desc=f"Shard {shard_idx}") 66 | 67 | for idx in iterator: 68 | gt_lst = gt_dataset[idx] 69 | try: 70 | est_lst = est_dataset[idx] 71 | except AssertionError as e: 72 | print(e) 73 | continue 74 | assert len(gt_lst) == len(est_lst) == 2, f"GT and estimated lists must have length 2." 75 | gt_frame0, gt_frame1 = gt_lst 76 | est_frame0, est_frame1 = est_lst 77 | evaluator.eval(est_frame0.flow, gt_frame0) 78 | 79 | return evaluator 80 | 81 | 82 | def _work_wrapper( 83 | args: tuple[int, list[int], WaymoOpenCausalSceneFlow, WaymoOpenCausalSceneFlow, Evaluator, bool] 84 | ) -> Evaluator: 85 | return _work(*args) 86 | 87 | 88 | def run_eval( 89 | data_dir: Path, 90 | est_flow_dir: Path, 91 | output_path: Path, 92 | cpu_count: int, 93 | cache_root: Path, 94 | every_kth: int = 5, 95 | eval_type: str = "bucketed_epe", 96 | verbose: bool = True, 97 | ) -> None: 98 | assert data_dir.exists(), f"Data directory {data_dir} does not exist." 99 | assert est_flow_dir.exists(), f"Estimated flow directory {est_flow_dir} does not exist." 100 | 101 | # Make the output directory if it doesn't exist 102 | output_path.mkdir(parents=True, exist_ok=True) 103 | 104 | gt_dataset = WaymoOpenCausalSceneFlow( 105 | root_dir=data_dir, 106 | flow_folder=None, 107 | with_rgb=False, 108 | eval_type=eval_type, 109 | eval_args=dict(output_path=output_path), 110 | cache_root=cache_root, 111 | ) 112 | 113 | est_dataset = WaymoOpenCausalSceneFlow( 114 | root_dir=data_dir, 115 | flow_folder=est_flow_dir, 116 | with_rgb=False, 117 | use_cache=False, 118 | eval_type=eval_type, 119 | cache_root=cache_root, 120 | ) 121 | 122 | dataset_evaluator = gt_dataset.evaluator() 123 | 124 | assert len(gt_dataset) == len( 125 | est_dataset 126 | ), f"GT and estimated datasets must be the same length, but are {len(gt_dataset)} and {len(est_dataset)} respectively." 127 | 128 | # Shard the dataset into pieces for each CPU 129 | shard_lists = _make_index_shards(gt_dataset, cpu_count, every_kth) 130 | args_list = [ 131 | (shard_idx, shard_list, gt_dataset, est_dataset, dataset_evaluator, verbose) 132 | for shard_idx, shard_list in enumerate(shard_lists) 133 | ] 134 | 135 | if cpu_count > 1: 136 | print(f"Running evaluation on {len(gt_dataset)} scenes using {cpu_count} CPUs.") 137 | # Run the evaluation in parallel 138 | with multiprocessing.Pool(cpu_count) as pool: 139 | sharded_evaluators = pool.map(_work_wrapper, args_list) 140 | else: 141 | print(f"Running evaluation on {len(gt_dataset)} scenes using 1 CPU.") 142 | # Run the evaluation serially 143 | sharded_evaluators = [_work_wrapper(args) for args in args_list] 144 | 145 | # Combine the sharded evaluators 146 | gathered_evaluator: Evaluator = sum(sharded_evaluators) 147 | gathered_evaluator.compute_results() 148 | 149 | 150 | if __name__ == "__main__": 151 | multiprocessing.set_start_method("spawn") 152 | parser = argparse.ArgumentParser( 153 | description="Iterate over .feather files in a result zip file." 154 | ) 155 | parser.add_argument("data_dir", type=Path, help="Path to the data_dir directory of the dataset") 156 | parser.add_argument("est_flow_dir", type=Path, help="Path to the estimated flow directory") 157 | parser.add_argument("output_path", type=Path, help="Path to save the results") 158 | parser.add_argument( 159 | "--cpu_count", 160 | type=int, 161 | default=multiprocessing.cpu_count(), 162 | help="Number of CPUs to use", 163 | ) 164 | parser.add_argument( 165 | "--every_kth", type=int, default=5, help="Only evaluate every kth scene in a sequence" 166 | ) 167 | parser.add_argument("--eval_type", type=str, default="bucketed_epe", help="Type of evaluation") 168 | parser.add_argument( 169 | "--cache_root", 170 | type=Path, 171 | default=Path("/tmp/av2_eval_cache/"), 172 | help="Path to the cache root directory", 173 | ) 174 | parser.add_argument( 175 | "--quiet", 176 | action="store_true", 177 | help="Suppress output", 178 | ) 179 | 180 | args = parser.parse_args() 181 | 182 | run_eval( 183 | data_dir=args.data_dir, 184 | est_flow_dir=args.est_flow_dir, 185 | output_path=args.output_path, 186 | cpu_count=args.cpu_count, 187 | every_kth=args.every_kth, 188 | eval_type=args.eval_type, 189 | cache_root=args.cache_root, 190 | verbose=not args.quiet, 191 | ) 192 | -------------------------------------------------------------------------------- /tests/datasets/argoverse2/av2_box_tests.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from bucketed_scene_flow_eval.datasets.argoverse2 import ( 6 | ArgoverseBoxAnnotationSequenceLoader, 7 | ArgoverseSceneFlowSequenceLoader, 8 | ) 9 | 10 | 11 | @pytest.fixture 12 | def av2_box_sequence_loader() -> ArgoverseBoxAnnotationSequenceLoader: 13 | return ArgoverseBoxAnnotationSequenceLoader( 14 | raw_data_path=Path("/tmp/argoverse2_small/val"), 15 | ) 16 | 17 | 18 | def test_load_box_sequence_length( 19 | av2_box_sequence_loader: ArgoverseBoxAnnotationSequenceLoader, 20 | ): 21 | sequence = av2_box_sequence_loader.load_sequence("02678d04-cc9f-3148-9f95-1ba66347dff9") 22 | assert len(sequence) == 157, f"expected 157 frames, got {len(sequence)}" 23 | first_frame, lidar_data = sequence.load(0, 0) 24 | assert len(first_frame.boxes) == 23, f"expected 23 boxes, got {len(first_frame.boxes)}" 25 | 26 | sequence = av2_box_sequence_loader.load_sequence("02a00399-3857-444e-8db3-a8f58489c394") 27 | assert len(sequence) == 159, f"expected 159 frames, got {len(sequence)}" 28 | first_frame, lidar_data = sequence.load(0, 0) 29 | assert len(first_frame.boxes) == 10, f"expected 10 boxes, got {len(first_frame.boxes)}" 30 | -------------------------------------------------------------------------------- /tests/datasets/argoverse2/av2_small_tests.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from bucketed_scene_flow_eval.datasets import ( 7 | Argoverse2CausalSceneFlow, 8 | Argoverse2NonCausalSceneFlow, 9 | ) 10 | from bucketed_scene_flow_eval.datasets.argoverse2 import ( 11 | ArgoverseBoxAnnotationSequenceLoader, 12 | ArgoverseSceneFlowSequenceLoader, 13 | ) 14 | from bucketed_scene_flow_eval.datastructures import ( 15 | SE3, 16 | PoseInfo, 17 | TimeSyncedAVLidarData, 18 | TimeSyncedSceneFlowFrame, 19 | ) 20 | 21 | 22 | @pytest.fixture 23 | def av2_sequence_loader() -> ArgoverseSceneFlowSequenceLoader: 24 | return ArgoverseSceneFlowSequenceLoader( 25 | raw_data_path=Path("/tmp/argoverse2_small/val"), 26 | flow_data_path=Path("/tmp/argoverse2_small/val_sceneflow_feather/"), 27 | with_rgb=True, 28 | expected_camera_shape=(194, 256, 3), 29 | ) 30 | 31 | 32 | @pytest.fixture 33 | def av2_dataset_seq_2_causal() -> Argoverse2CausalSceneFlow: 34 | return Argoverse2CausalSceneFlow( 35 | root_dir=Path("/tmp/argoverse2_small/val"), 36 | with_rgb=True, 37 | use_gt_flow=True, 38 | flow_data_path=Path("/tmp/argoverse2_small/val_sceneflow_feather/"), 39 | subsequence_length=2, 40 | expected_camera_shape=(194, 256, 3), 41 | ) 42 | 43 | 44 | @pytest.fixture 45 | def av2_dataset_seq_5_causal() -> Argoverse2CausalSceneFlow: 46 | return Argoverse2CausalSceneFlow( 47 | root_dir=Path("/tmp/argoverse2_small/val"), 48 | with_rgb=True, 49 | use_gt_flow=True, 50 | flow_data_path=Path("/tmp/argoverse2_small/val_sceneflow_feather/"), 51 | subsequence_length=5, 52 | expected_camera_shape=(194, 256, 3), 53 | ) 54 | 55 | 56 | @pytest.fixture 57 | def av2_dataset_seq_2_noncausal() -> Argoverse2NonCausalSceneFlow: 58 | return Argoverse2NonCausalSceneFlow( 59 | root_dir=Path("/tmp/argoverse2_small/val"), 60 | with_rgb=True, 61 | use_gt_flow=True, 62 | flow_data_path=Path("/tmp/argoverse2_small/val_sceneflow_feather/"), 63 | subsequence_length=2, 64 | expected_camera_shape=(194, 256, 3), 65 | ) 66 | 67 | 68 | @pytest.fixture 69 | def av2_dataset_seq_5_noncausal() -> Argoverse2NonCausalSceneFlow: 70 | return Argoverse2NonCausalSceneFlow( 71 | root_dir=Path("/tmp/argoverse2_small/val"), 72 | with_rgb=True, 73 | use_gt_flow=True, 74 | flow_data_path=Path("/tmp/argoverse2_small/val_sceneflow_feather/"), 75 | subsequence_length=5, 76 | expected_camera_shape=(194, 256, 3), 77 | ) 78 | 79 | 80 | def test_load_full_sequence_size_causal(av2_sequence_loader: ArgoverseSceneFlowSequenceLoader): 81 | sequence = av2_sequence_loader.load_sequence("02678d04-cc9f-3148-9f95-1ba66347dff9") 82 | assert len(sequence) == 157, f"expected 157 frames, got {len(sequence)}" 83 | sequence = av2_sequence_loader.load_sequence("02a00399-3857-444e-8db3-a8f58489c394") 84 | assert len(sequence) == 159, f"expected 159 frames, got {len(sequence)}" 85 | 86 | 87 | def test_av2_dataset_seq_2_size_noncausal( 88 | av2_dataset_seq_2_noncausal: Argoverse2NonCausalSceneFlow, 89 | ): 90 | # Length of the two subsequences, minus 1 because of flow between frames. 91 | expected_len = (157 - 1) // 2 + (159 - 1) // 2 92 | assert ( 93 | len(av2_dataset_seq_2_noncausal) == expected_len 94 | ), f"expected {expected_len} frames, got {len(av2_dataset_seq_2_noncausal)}" 95 | 96 | for frame_list in av2_dataset_seq_2_noncausal: 97 | assert isinstance(frame_list, list) 98 | assert len(frame_list) == 2, f"expected 2 entries, got {len(frame_list)}" 99 | 100 | 101 | def test_av2_dataset_seq_5_size_noncausal( 102 | av2_dataset_seq_5_noncausal: Argoverse2NonCausalSceneFlow, 103 | ): 104 | # Length of the two subsequences, minus 1 because of flow between frames. 105 | expected_len = (157 - 1) // 5 + (159 - 1) // 5 106 | assert ( 107 | len(av2_dataset_seq_5_noncausal) == expected_len 108 | ), f"expected {expected_len} frames, got {len(av2_dataset_seq_5_noncausal)}" 109 | 110 | for frame_list in av2_dataset_seq_5_noncausal: 111 | assert isinstance(frame_list, list) 112 | assert len(frame_list) == 5, f"expected 5 entries, got {len(frame_list)}" 113 | 114 | 115 | def test_av2_dataset_seq_2_size_causal(av2_dataset_seq_2_causal: Argoverse2CausalSceneFlow): 116 | # Length of the two subsequences, minus 1 because of flow between frames. 117 | expected_len = 157 - 1 + 159 - 1 118 | assert ( 119 | len(av2_dataset_seq_2_causal) == expected_len 120 | ), f"expected {expected_len} frames, got {len(av2_dataset_seq_2_causal)}" 121 | 122 | for frame_list in av2_dataset_seq_2_causal: 123 | assert isinstance(frame_list, list) 124 | assert len(frame_list) == 2, f"expected 2 entries, got {len(frame_list)}" 125 | 126 | 127 | def test_av2_dataset_seq_5_size_causal(av2_dataset_seq_5_causal: Argoverse2CausalSceneFlow): 128 | # Length of the two subsequences, minus 1 because of flow between frames. 129 | expected_len = 157 - 4 + 159 - 4 130 | assert ( 131 | len(av2_dataset_seq_5_causal) == 157 - 4 + 159 - 4 132 | ), f"expected {expected_len} frames, got {len(av2_dataset_seq_5_causal)}" 133 | 134 | for frame_list in av2_dataset_seq_5_causal: 135 | assert isinstance(frame_list, list) 136 | assert len(frame_list) == 5, f"expected 5 entries, got {len(frame_list)}" 137 | -------------------------------------------------------------------------------- /tests/datasets/nuscenes/nuscenes_tests.py_bak: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from bucketed_scene_flow_eval.datasets.nuscenes import NuScenesRawSequenceLoader 6 | 7 | 8 | @pytest.fixture 9 | def nuscenes_loader() -> NuScenesRawSequenceLoader: 10 | return NuScenesRawSequenceLoader( 11 | sequence_dir=Path("/tmp/nuscenes"), 12 | version="v1.0-mini", 13 | split="mini_train", 14 | verbose=False, 15 | ) 16 | 17 | 18 | def test_nuscenes_loader_basic_load_and_len_check(nuscenes_loader: NuScenesRawSequenceLoader): 19 | assert len(nuscenes_loader) > 0, f"no sequences found in {nuscenes_loader}" 20 | expected_lens= [236, 236, 236, 233, 223, 239, 231, 231] 21 | assert len(nuscenes_loader) == len( 22 | expected_lens 23 | ), f"expected {len(expected_lens)} sequences, got {len(nuscenes_loader)}" 24 | 25 | num_loop_iterations = 0 26 | for sequence_id, expected_len in zip(nuscenes_loader.get_sequence_ids(), expected_lens): 27 | num_loop_iterations += 1 28 | nusc_seq = nuscenes_loader.load_sequence(sequence_id) 29 | assert ( 30 | len(nusc_seq) == expected_len 31 | ), f"expected {expected_len} frames, got {len(nusc_seq)} for {sequence_id}" 32 | 33 | assert num_loop_iterations == len( 34 | expected_lens 35 | ), f"expected {len(expected_lens)} loop iterations, got {num_loop_iterations}" 36 | -------------------------------------------------------------------------------- /tests/datastructures/rgb.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from bucketed_scene_flow_eval.datastructures import RGBImage, RGBImageCrop 5 | 6 | 7 | @pytest.fixture 8 | def cropped_square() -> RGBImage: 9 | # Create a 100x100 image with a red square in the middle 10 | image = np.zeros((100, 100, 3), dtype=np.float32) 11 | image[25:75, 25:75] = [1, 0, 0] 12 | mask = RGBImageCrop(25, 25, 75, 75) 13 | return RGBImage(image, mask) 14 | 15 | 16 | def test_masked_image_extract(cropped_square: RGBImage): 17 | center_image = cropped_square.masked_image 18 | 19 | assert center_image.shape == (50, 50, 3), f"expected 50x50x3, got {center_image.shape}" 20 | assert np.all(center_image.full_image == [1, 0, 0]), f"expected all red, got {center_image}" 21 | -------------------------------------------------------------------------------- /tests/eval/bucketed_epe.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from bucketed_scene_flow_eval.datasets import ( 5 | Argoverse2CausalSceneFlow, 6 | construct_dataset, 7 | ) 8 | from bucketed_scene_flow_eval.datastructures import ( 9 | EgoLidarFlow, 10 | TimeSyncedSceneFlowFrame, 11 | ) 12 | 13 | 14 | @pytest.fixture 15 | def argo_dataset_gt_with_ground(): 16 | return construct_dataset( 17 | "argoverse2causalsceneflow", 18 | dict( 19 | root_dir="/tmp/argoverse2_tiny/val", 20 | with_rgb=False, 21 | use_gt_flow=True, 22 | with_ground=True, 23 | ), 24 | ) 25 | 26 | 27 | @pytest.fixture 28 | def argo_dataset_pseudo_with_ground(): 29 | return construct_dataset( 30 | "argoverse2causalsceneflow", 31 | dict( 32 | root_dir="/tmp/argoverse2_tiny/val", 33 | with_rgb=False, 34 | use_gt_flow=False, 35 | with_ground=True, 36 | ), 37 | ) 38 | 39 | 40 | @pytest.fixture 41 | def argo_dataset_gt_no_ground(): 42 | return construct_dataset( 43 | "argoverse2causalsceneflow", 44 | dict( 45 | root_dir="/tmp/argoverse2_tiny/val", 46 | with_rgb=False, 47 | use_gt_flow=True, 48 | with_ground=False, 49 | ), 50 | ) 51 | 52 | 53 | @pytest.fixture 54 | def argo_dataset_pseudo_no_ground(): 55 | return construct_dataset( 56 | "argoverse2causalsceneflow", 57 | dict( 58 | root_dir="/tmp/argoverse2_tiny/val", 59 | with_rgb=False, 60 | use_gt_flow=False, 61 | with_ground=False, 62 | ), 63 | ) 64 | 65 | 66 | def _run_eval_on_target_and_gt_datasets( 67 | gt_dataset: Argoverse2CausalSceneFlow, 68 | target_dataset: Argoverse2CausalSceneFlow, 69 | EXPECTED_RESULTS_DICT: dict[str, tuple[float, float]], 70 | ): 71 | assert len(gt_dataset) == len(target_dataset), ( 72 | f"Ground truth and pseudo datasets must have the same number of samples. " 73 | f"Found {len(gt_dataset)} ground truth samples and " 74 | f"{len(target_dataset)} pseudo samples." 75 | ) 76 | 77 | evaluator = gt_dataset.evaluator() 78 | 79 | # Iterate over both datasets, treating the pseudo dataset as the "prediction" 80 | # and the ground truth dataset as the "target" 81 | iterations = 0 82 | for target_lst, gt_lst in zip(target_dataset, gt_dataset): 83 | assert len(target_lst) == len(gt_lst) == 2, ( 84 | f"Each sample must be a tuple of length 2. " 85 | f"Found {len(target_lst)} and {len(gt_lst)}." 86 | ) 87 | target_item1: TimeSyncedSceneFlowFrame = target_lst[0] 88 | gt_item1: TimeSyncedSceneFlowFrame = gt_lst[0] 89 | 90 | evaluator.eval( 91 | target_item1.flow, 92 | gt_item1, 93 | ) 94 | iterations += 1 95 | 96 | assert iterations == len( 97 | gt_dataset 98 | ), f"Expected to iterate over {len(gt_dataset)} samples, but only iterated over {iterations}." 99 | 100 | out_results_dict: dict[str, tuple[float, float]] = evaluator.compute_results() 101 | 102 | # Ensure that output results are a dictionary of the expected type 103 | assert isinstance( 104 | out_results_dict, dict 105 | ), f"Results must be a dictionary. Found {out_results_dict}." 106 | 107 | # Check overlap of keys 108 | assert out_results_dict.keys() == EXPECTED_RESULTS_DICT.keys(), ( 109 | f"Results must be computed for the same classes. " 110 | f"Found {out_results_dict.keys()} and {EXPECTED_RESULTS_DICT.keys()}." 111 | ) 112 | 113 | print(out_results_dict) 114 | 115 | for key in EXPECTED_RESULTS_DICT: 116 | out_static_epe, out_dynamic_epe = out_results_dict[key] 117 | exp_static_epe, exp_dynamic_epe = EXPECTED_RESULTS_DICT[key] 118 | 119 | # Check that floats are equal, but be aware of NaNs (which are not equal to anything) 120 | assert np.isnan(out_static_epe) == np.isnan( 121 | exp_static_epe 122 | ), f"Static EPEs must both be NaN or not NaN. Found output is {out_static_epe} but expected {exp_static_epe}." 123 | 124 | assert np.isnan(out_dynamic_epe) == np.isnan( 125 | exp_dynamic_epe 126 | ), f"Dynamic EPEs must both be NaN or not NaN. Found output is {out_dynamic_epe} but expected {exp_dynamic_epe}." 127 | 128 | if not np.isnan(exp_static_epe): 129 | assert out_static_epe == pytest.approx( 130 | exp_static_epe, rel=1e-6 131 | ), f"Static EPEs must be equal. Found {out_static_epe} and {exp_static_epe}." 132 | if not np.isnan(exp_dynamic_epe): 133 | assert out_dynamic_epe == pytest.approx( 134 | exp_dynamic_epe, rel=1e-6 135 | ), f"Dynamic EPEs must be equal. Found {out_dynamic_epe} and {exp_dynamic_epe}." 136 | 137 | 138 | def test_bucketed_eval_av2_with_ground( 139 | argo_dataset_gt_with_ground: Argoverse2CausalSceneFlow, 140 | argo_dataset_pseudo_with_ground: Argoverse2CausalSceneFlow, 141 | ): 142 | EXPECTED_RESULTS_DICT = { 143 | "BACKGROUND": (0.017420833175797096, float("nan")), 144 | "CAR": (0.00715087565425712, 0.9549859068245323), 145 | "OTHER_VEHICLES": (float("nan"), float("nan")), 146 | "PEDESTRIAN": (float("nan"), 0.8860363751576089), 147 | "WHEELED_VRU": (float("nan"), 0.9848588530322282), 148 | } 149 | _run_eval_on_target_and_gt_datasets( 150 | argo_dataset_gt_with_ground, argo_dataset_pseudo_with_ground, EXPECTED_RESULTS_DICT 151 | ) 152 | 153 | 154 | def test_bucketed_eval_av2_no_ground( 155 | argo_dataset_gt_no_ground: Argoverse2CausalSceneFlow, 156 | argo_dataset_pseudo_no_ground: Argoverse2CausalSceneFlow, 157 | ): 158 | EXPECTED_RESULTS_DICT = { 159 | "BACKGROUND": (0.01975785995262935, float("nan")), 160 | "CAR": (0.008681314962881582, 0.9460171305709397), 161 | "OTHER_VEHICLES": (float("nan"), float("nan")), 162 | "PEDESTRIAN": (float("nan"), 0.8834896978129233), 163 | "WHEELED_VRU": (float("nan"), 0.9758072524985107), 164 | } 165 | _run_eval_on_target_and_gt_datasets( 166 | argo_dataset_gt_no_ground, argo_dataset_pseudo_no_ground, EXPECTED_RESULTS_DICT 167 | ) 168 | -------------------------------------------------------------------------------- /tests/eval/threeway_epe.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from bucketed_scene_flow_eval.datasets import ( 4 | Argoverse2CausalSceneFlow, 5 | construct_dataset, 6 | ) 7 | from tests.eval.bucketed_epe import _run_eval_on_target_and_gt_datasets 8 | 9 | 10 | @pytest.fixture 11 | def argo_dataset_gt_with_ground(): 12 | return construct_dataset( 13 | "argoverse2causalsceneflow", 14 | dict( 15 | root_dir="/tmp/argoverse2_tiny/val", 16 | with_rgb=False, 17 | use_gt_flow=True, 18 | with_ground=True, 19 | eval_type="threeway_epe", 20 | ), 21 | ) 22 | 23 | 24 | @pytest.fixture 25 | def argo_dataset_pseudo_with_ground(): 26 | return construct_dataset( 27 | "argoverse2causalsceneflow", 28 | dict( 29 | root_dir="/tmp/argoverse2_tiny/val", 30 | with_rgb=False, 31 | use_gt_flow=False, 32 | with_ground=True, 33 | eval_type="threeway_epe", 34 | ), 35 | ) 36 | 37 | 38 | @pytest.fixture 39 | def argo_dataset_gt_no_ground(): 40 | return construct_dataset( 41 | "argoverse2causalsceneflow", 42 | dict( 43 | root_dir="/tmp/argoverse2_tiny/val", 44 | with_rgb=False, 45 | use_gt_flow=True, 46 | with_ground=False, 47 | eval_type="threeway_epe", 48 | ), 49 | ) 50 | 51 | 52 | @pytest.fixture 53 | def argo_dataset_pseudo_no_ground(): 54 | return construct_dataset( 55 | "argoverse2causalsceneflow", 56 | dict( 57 | root_dir="/tmp/argoverse2_tiny/val", 58 | with_rgb=False, 59 | use_gt_flow=False, 60 | with_ground=False, 61 | eval_type="threeway_epe", 62 | ), 63 | ) 64 | 65 | 66 | def test_bucketed_eval_av2_with_ground( 67 | argo_dataset_gt_with_ground: Argoverse2CausalSceneFlow, 68 | argo_dataset_pseudo_with_ground: Argoverse2CausalSceneFlow, 69 | ): 70 | EXPECTED_RESULTS_DICT = { 71 | "BACKGROUND": (0.017420833175797096, float("nan")), 72 | "FOREGROUND": (0.00715087565425712, 0.5442804620019708), 73 | } 74 | 75 | _run_eval_on_target_and_gt_datasets( 76 | argo_dataset_gt_with_ground, argo_dataset_pseudo_with_ground, EXPECTED_RESULTS_DICT 77 | ) 78 | 79 | 80 | def test_bucketed_eval_av2_no_ground( 81 | argo_dataset_gt_no_ground: Argoverse2CausalSceneFlow, 82 | argo_dataset_pseudo_no_ground: Argoverse2CausalSceneFlow, 83 | ): 84 | EXPECTED_RESULTS_DICT = { 85 | "BACKGROUND": (0.01975785995262935, float("nan")), 86 | "FOREGROUND": (0.008681314962881582, 0.5248476027085919), 87 | } 88 | _run_eval_on_target_and_gt_datasets( 89 | argo_dataset_gt_no_ground, argo_dataset_pseudo_no_ground, EXPECTED_RESULTS_DICT 90 | ) 91 | -------------------------------------------------------------------------------- /tests/integration_tests.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import numpy as np 4 | import pytest 5 | import tqdm 6 | 7 | from bucketed_scene_flow_eval.datasets import construct_dataset 8 | from bucketed_scene_flow_eval.datastructures import * 9 | from bucketed_scene_flow_eval.interfaces import AbstractDataset 10 | 11 | 12 | @pytest.fixture 13 | def waymo_dataset_gt(): 14 | return construct_dataset( 15 | "waymoopencausalsceneflow", 16 | dict(root_dir="/tmp/waymo_open_processed_flow_tiny/training"), 17 | ) 18 | 19 | 20 | @pytest.fixture 21 | def argo_dataset_gt_with_ground(): 22 | return construct_dataset( 23 | "argoverse2causalsceneflow", 24 | dict( 25 | root_dir="/tmp/argoverse2_tiny/val", 26 | with_rgb=False, 27 | use_gt_flow=True, 28 | with_ground=True, 29 | ), 30 | ) 31 | 32 | 33 | @pytest.fixture 34 | def argo_dataset_pseudo_with_ground(): 35 | return construct_dataset( 36 | "argoverse2causalsceneflow", 37 | dict( 38 | root_dir="/tmp/argoverse2_tiny/val", 39 | with_rgb=False, 40 | use_gt_flow=False, 41 | with_ground=True, 42 | ), 43 | ) 44 | 45 | 46 | @pytest.fixture 47 | def argo_dataset_gt_no_ground(): 48 | return construct_dataset( 49 | "argoverse2causalsceneflow", 50 | dict( 51 | root_dir="/tmp/argoverse2_tiny/val", 52 | with_rgb=False, 53 | use_gt_flow=True, 54 | with_ground=False, 55 | ), 56 | ) 57 | 58 | 59 | @pytest.fixture 60 | def argo_dataset_pseudo_no_ground(): 61 | return construct_dataset( 62 | "argoverse2causalsceneflow", 63 | dict( 64 | root_dir="/tmp/argoverse2_tiny/val", 65 | with_rgb=False, 66 | use_gt_flow=False, 67 | with_ground=False, 68 | ), 69 | ) 70 | 71 | 72 | @pytest.fixture 73 | def argo_dataset_test_no_flow_no_ground(): 74 | return construct_dataset( 75 | "argoverse2causalsceneflow", 76 | dict( 77 | root_dir="/tmp/argoverse2_tiny/test", 78 | with_rgb=False, 79 | with_ground=False, 80 | load_flow=False, 81 | ), 82 | ) 83 | 84 | 85 | @pytest.fixture 86 | def argo_dataset_test_no_flow_with_ground(): 87 | return construct_dataset( 88 | "argoverse2causalsceneflow", 89 | dict( 90 | root_dir="/tmp/argoverse2_tiny/test", 91 | with_rgb=False, 92 | with_ground=True, 93 | load_flow=False, 94 | ), 95 | ) 96 | 97 | 98 | @pytest.fixture 99 | def argo_box_dataset(): 100 | return construct_dataset( 101 | "argoverse2noncausalsceneflow", 102 | dict( 103 | root_dir="/tmp/argoverse2_small/val", 104 | subsequence_length=150, 105 | load_boxes=True, 106 | range_crop_type="ego", 107 | log_subset=["02678d04-cc9f-3148-9f95-1ba66347dff9"], 108 | ), 109 | ) 110 | 111 | 112 | def _validate_dataloader( 113 | dataloader: AbstractDataset, 114 | full_pc_size: int, 115 | masked_pc_size: int, 116 | expected_len: int = 1, 117 | expected_num_frames: int = 2, 118 | ): 119 | assert len(dataloader) == expected_len, f"Expected {expected_len} scene, got {len(dataloader)}" 120 | 121 | dataloader_entries = dataloader[0] 122 | assert ( 123 | len(dataloader_entries) == expected_num_frames 124 | ), f"Expected list of length {expected_num_frames}, got {len(dataloader_entries)}" 125 | # Failure of the following line indicates that the __getitem__ method is broken. 126 | 127 | num_iteration_entries = 0 128 | for entry in dataloader: 129 | assert isinstance(entry, list), f"Expected list, got {type(entry)}" 130 | assert ( 131 | len(entry) == expected_num_frames 132 | ), f"Expected list of length {expected_num_frames}, got {len(entry)}" 133 | item_t1 = entry[0] 134 | 135 | assert ( 136 | full_pc_size == item_t1.pc.full_pc.shape[0] 137 | ), f"Expected full pc to be of size {full_pc_size}, got {item_t1.pc.full_pc.shape[0]}" 138 | 139 | assert ( 140 | masked_pc_size == item_t1.pc.pc.shape[0] 141 | ), f"Expected masked pc to be of size {masked_pc_size}, got {item_t1.pc.pc.shape[0]}" 142 | 143 | num_iteration_entries += 1 144 | 145 | # Check that we actually iterated over the dataset. 146 | assert ( 147 | num_iteration_entries == expected_len 148 | ), f"Expected {expected_len} iteration, got {num_iteration_entries}" 149 | 150 | 151 | def test_argo_box_dataset(argo_box_dataset): 152 | _validate_dataloader(argo_box_dataset, 95381, 69600, 1, 150) 153 | 154 | 155 | def test_waymo_dataset(waymo_dataset_gt): 156 | _validate_dataloader(waymo_dataset_gt, 124364, 124364) 157 | 158 | 159 | def test_argo_dataset_gt_with_ground(argo_dataset_gt_with_ground): 160 | _validate_dataloader(argo_dataset_gt_with_ground, 90430, 74218) 161 | 162 | 163 | def test_argo_dataset_pseudo_with_ground(argo_dataset_pseudo_with_ground): 164 | _validate_dataloader(argo_dataset_pseudo_with_ground, 90430, 74218) 165 | 166 | 167 | def test_argo_dataset_test_no_flow_with_ground(argo_dataset_test_no_flow_with_ground): 168 | _validate_dataloader(argo_dataset_test_no_flow_with_ground, 90430, 74218) 169 | 170 | 171 | def test_argo_dataset_gt_no_ground(argo_dataset_gt_no_ground): 172 | _validate_dataloader(argo_dataset_gt_no_ground, 90430, 65225) 173 | 174 | 175 | def test_argo_dataset_pseudo_no_ground(argo_dataset_pseudo_no_ground): 176 | _validate_dataloader(argo_dataset_pseudo_no_ground, 90430, 65225) 177 | 178 | 179 | def test_argo_dataset_test_no_flow_no_ground(argo_dataset_test_no_flow_no_ground): 180 | _validate_dataloader(argo_dataset_test_no_flow_no_ground, 90430, 65225) 181 | -------------------------------------------------------------------------------- /tests/integration_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Running integration tests" 4 | pytest tests/integration_tests.py tests/eval/*.py tests/datasets/*/*.py tests/datastructures/*.py 5 | -------------------------------------------------------------------------------- /tests/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup fake environment 4 | 5 | # Prepare /tmp/argoverse2_small/val 6 | rm -rf /tmp/argoverse2_small 7 | echo "Downloading the 23 chunks of argoverse2_small" 8 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668591/argove01.zip -O /tmp/argoverse_small_part_01.zip 9 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668605/argove02.zip -O /tmp/argoverse_small_part_02.zip 10 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668609/argove03.zip -O /tmp/argoverse_small_part_03.zip 11 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668613/argove04.zip -O /tmp/argoverse_small_part_04.zip 12 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668619/argove05.zip -O /tmp/argoverse_small_part_05.zip 13 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668624/argove06.zip -O /tmp/argoverse_small_part_06.zip 14 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14672605/argove07.zip -O /tmp/argoverse_small_part_07.zip 15 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668635/argove08.zip -O /tmp/argoverse_small_part_08.zip 16 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668642/argove09.zip -O /tmp/argoverse_small_part_09.zip 17 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668643/argove10.zip -O /tmp/argoverse_small_part_10.zip 18 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668650/argove11.zip -O /tmp/argoverse_small_part_11.zip 19 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14672612/argove12.zip -O /tmp/argoverse_small_part_12.zip 20 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668654/argove13.zip -O /tmp/argoverse_small_part_13.zip 21 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668656/argove14.zip -O /tmp/argoverse_small_part_14.zip 22 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668657/argove15.zip -O /tmp/argoverse_small_part_15.zip 23 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668658/argove16.zip -O /tmp/argoverse_small_part_16.zip 24 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668660/argove17.zip -O /tmp/argoverse_small_part_17.zip 25 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668661/argove18.zip -O /tmp/argoverse_small_part_18.zip 26 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668663/argove19.zip -O /tmp/argoverse_small_part_19.zip 27 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668665/argove20.zip -O /tmp/argoverse_small_part_20.zip 28 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668667/argove21.zip -O /tmp/argoverse_small_part_21.zip 29 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668668/argove22.zip -O /tmp/argoverse_small_part_22.zip 30 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14668671/argove23.zip -O /tmp/argoverse_small_part_23.zip 31 | for i in {1..23} 32 | do 33 | echo "Unzipping argoverse_small part $i" 34 | unzip -q /tmp/argoverse_small_part_$(printf %02d $i).zip -d /tmp/ 35 | done 36 | 37 | 38 | # Prepare /tmp/argoverse2_tiny/val 39 | rm -rf /tmp/argoverse2_tiny 40 | echo "Downloading argoverse2_tiny" 41 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/14576619/argoverse2_tiny.zip -O /tmp/argoverse2_tiny.zip 42 | unzip -q /tmp/argoverse2_tiny.zip -d /tmp/ 43 | 44 | # For testing the raw data loader only (no flow) mode of the argoverse2 dataset, we symlink a "test" split to the val split 45 | ln -s /tmp/argoverse2_tiny/val /tmp/argoverse2_tiny/test 46 | 47 | # Prepare /tmp/waymo_open_processed_flow_tiny 48 | rm -rf /tmp/waymo_open_processed_flow_tiny 49 | echo "Downloading waymo_open_processed_flow_tiny" 50 | wget -q https://github.com/kylevedder/BucketedSceneFlowEval/files/13924555/waymo_open_processed_flow_tiny.zip -O /tmp/waymo_open_processed_flow_tiny.zip 51 | unzip -q /tmp/waymo_open_processed_flow_tiny.zip -d /tmp/ 52 | 53 | 54 | # # Prepare /tmp/nuscenes v1.0-mini 55 | # rm -rf /tmp/nuscenes 56 | # mkdir -p /tmp/nuscenes 57 | # echo "Downloading nuscenes v1.0-mini" 58 | # wget -q https://www.nuscenes.org/data/v1.0-mini.tgz -O /tmp/nuscenes/nuscenes_v1.0-mini.tgz 59 | # tar -xzf /tmp/nuscenes/nuscenes_v1.0-mini.tgz -C /tmp/nuscenes 60 | --------------------------------------------------------------------------------