├── .flake8 ├── .gitignore ├── LICENSE ├── README.md ├── assets └── sledge_logo_transparent.png ├── docs ├── autoencoder.md ├── diffusion.md ├── installation.md └── simulation.md ├── environment.yml ├── requirements.txt ├── scripts ├── autoencoder │ ├── rvae │ │ ├── feature_caching_rvae.sh │ │ ├── latent_caching_rvae.sh │ │ └── training_rvae.sh │ └── vae │ │ └── training_vae.sh ├── diffusion │ ├── scenario_caching_diffusion.sh │ └── training_diffusion.sh ├── download │ ├── download_cache.sh │ └── download_nuplan.sh └── simulation │ └── simple_simulation.sh ├── setup.py └── sledge ├── __init__.py ├── autoencoder ├── __init__.py ├── callbacks │ ├── __init__.py │ ├── rvae_visualization_callback.py │ └── vae_visualization_callback.py ├── data_augmentation │ ├── __init__.py │ ├── augmentation_utils.py │ ├── rvae_augmentation.py │ └── vae_augmentation.py ├── data_loader │ ├── __init__.py │ └── autoencoder_datamodule.py ├── experiments │ ├── __init__.py │ ├── feature_caching.py │ ├── latent_caching.py │ └── training.py ├── modeling │ ├── __init__.py │ ├── autoencoder_lightning_module_wrapper.py │ ├── autoencoder_torch_module_wrapper.py │ ├── matching │ │ ├── __init__.py │ │ ├── abstract_matching.py │ │ └── rvae_matching.py │ ├── metrics │ │ ├── __init__.py │ │ ├── abstract_custom_metric.py │ │ └── kl_metric.py │ ├── models │ │ ├── __init__.py │ │ ├── rvae │ │ │ ├── __init__.py │ │ │ ├── rvae_config.py │ │ │ ├── rvae_decoder.py │ │ │ ├── rvae_encoder.py │ │ │ ├── rvae_model.py │ │ │ └── utils │ │ │ │ ├── rvae_position_encoding.py │ │ │ │ └── rvae_transformer.py │ │ └── vae │ │ │ ├── __init__.py │ │ │ ├── vae_config.py │ │ │ └── vae_model.py │ └── objectives │ │ ├── __init__.py │ │ ├── abstract_custom_objective.py │ │ ├── kl_objective.py │ │ ├── rvae_objective.py │ │ └── vae_objective.py └── preprocessing │ ├── __init__.py │ ├── feature_builders │ ├── __init__.py │ ├── sledge │ │ ├── __init__.py │ │ ├── sledge_agent_feature.py │ │ ├── sledge_feature_processing.py │ │ ├── sledge_line_feature.py │ │ └── sledge_utils.py │ └── sledge_raw_feature_builder.py │ ├── features │ ├── __init__.py │ ├── latent_feature.py │ ├── map_id_feature.py │ ├── rvae_matching_feature.py │ ├── sledge_raster_feature.py │ └── sledge_vector_feature.py │ └── target_builders │ ├── __init__.py │ └── map_id_target_builder.py ├── common ├── __init__.py ├── helper │ ├── __init__.py │ └── cache_helper.py └── visualization │ ├── __init__.py │ ├── sledge_colors.py │ └── sledge_visualization_utils.py ├── diffusion ├── __init__.py ├── dataset │ ├── __init__.py │ ├── rvae_latent_builder_config.py │ └── rvae_latent_dataset.py ├── experiments │ ├── __init__.py │ ├── scenario_caching.py │ └── training.py └── modelling │ ├── __init__.py │ └── ldm_pipeline.py ├── script ├── __init__.py ├── builders │ ├── __init__.py │ ├── autoencoder_builder.py │ ├── diffusion_builder.py │ ├── matching_builder.py │ ├── metric_builder.py │ ├── model_builder.py │ ├── scenario_builder.py │ ├── simulation_builder.py │ └── utils │ │ ├── __init__.py │ │ └── utils_config.py ├── config │ ├── __init__.py │ ├── autoencoder │ │ ├── __init__.py │ │ ├── callbacks │ │ │ ├── default_callbacks.yaml │ │ │ ├── learning_rate_monitor_callback.yaml │ │ │ ├── model_checkpoint_callback.yaml │ │ │ ├── rvae_visualization_callback.yaml │ │ │ ├── time_logging_callback.yaml │ │ │ └── vae_visualization_callback.yaml │ │ ├── data_augmentation │ │ │ ├── rvae_augmentation.yaml │ │ │ ├── rvae_no_augmentation.yaml │ │ │ └── vae_augmentation.yaml │ │ ├── data_loader │ │ │ └── default_data_loader.yaml │ │ ├── default_autoencoder.yaml │ │ ├── lightning │ │ │ └── default_lightning.yaml │ │ ├── lr_scheduler │ │ │ ├── multistep_lr.yaml │ │ │ └── one_cycle_lr.yaml │ │ ├── matching │ │ │ ├── rvae_green_lights_matching.yaml │ │ │ ├── rvae_lines_matching.yaml │ │ │ ├── rvae_pedestrians_matching.yaml │ │ │ ├── rvae_red_lights_matching.yaml │ │ │ ├── rvae_static_objects_matching.yaml │ │ │ └── rvae_vehicles_matching.yaml │ │ ├── objective │ │ │ ├── kl_objective.yaml │ │ │ ├── rvae_ego_objective.yaml │ │ │ ├── rvae_green_lights_objective.yaml │ │ │ ├── rvae_lines_objective.yaml │ │ │ ├── rvae_pedestrians_objective.yaml │ │ │ ├── rvae_red_lights_objective.yaml │ │ │ ├── rvae_static_objects_objective.yaml │ │ │ ├── rvae_vehicles_objective.yaml │ │ │ ├── vae_bce_objective.yaml │ │ │ └── vae_l1_objective.yaml │ │ ├── optimizer │ │ │ ├── adam.yaml │ │ │ ├── adamw.yaml │ │ │ └── sgd.yaml │ │ ├── scenario_type_weights │ │ │ └── default_scenario_type_weights.yaml │ │ └── training_metric │ │ │ └── kl_metric.yaml │ ├── common │ │ ├── __init__.py │ │ ├── autoencoder_model │ │ │ ├── rvae_model.yaml │ │ │ └── vae_model.yaml │ │ ├── default_common.yaml │ │ ├── default_experiment.yaml │ │ ├── diffusion_model │ │ │ ├── dit_b_model.yaml │ │ │ ├── dit_l_model.yaml │ │ │ ├── dit_s_model.yaml │ │ │ └── dit_xl_model.yaml │ │ ├── scenario_builder │ │ │ ├── mock_abstract_scenario_builder.yaml │ │ │ ├── nuplan.yaml │ │ │ ├── nuplan_challenge.yaml │ │ │ ├── nuplan_mini.yaml │ │ │ ├── scenario_mapping │ │ │ │ ├── nuplan_challenge_scenario_mapping.yaml │ │ │ │ └── nuplan_scenario_mapping.yaml │ │ │ └── vehicle_parameters │ │ │ │ └── nuplan_vehicle_parameters.yaml │ │ ├── scenario_filter │ │ │ ├── filter_bos.yaml │ │ │ ├── filter_lav.yaml │ │ │ ├── filter_pgh.yaml │ │ │ ├── filter_sgp.yaml │ │ │ ├── one_continuous_log.yaml │ │ │ ├── reduced_val14_split.yaml │ │ │ ├── val14_split.yaml │ │ │ └── vegas.yaml │ │ ├── simulation_metric │ │ │ ├── common_metrics.yaml │ │ │ ├── default_metrics.yaml │ │ │ ├── ego_in_stop_line │ │ │ │ └── ego_stop_at_stop_line_statistics.yaml │ │ │ ├── high_level │ │ │ │ ├── drivable_area_compliance_statistics.yaml │ │ │ │ ├── driving_direction_compliance_statistics.yaml │ │ │ │ ├── ego_is_comfortable_statistics.yaml │ │ │ │ ├── ego_is_making_progress_statistics.yaml │ │ │ │ ├── no_ego_at_fault_collisions_statistics.yaml │ │ │ │ ├── planner_expert_average_heading_error_within_bound_statistics.yaml │ │ │ │ ├── planner_expert_final_heading_error_within_bound_statistics.yaml │ │ │ │ ├── planner_expert_final_l2_error_within_bound_statistics.yaml │ │ │ │ ├── planner_miss_rate_within_bound_statistics.yaml │ │ │ │ ├── speed_limit_compliance_statistics.yaml │ │ │ │ └── time_to_collision_within_bound_statistics.yaml │ │ │ ├── low_level │ │ │ │ ├── ego_acceleration_statistics.yaml │ │ │ │ ├── ego_expert_l2_error_statistics.yaml │ │ │ │ ├── ego_expert_l2_error_with_yaw_statistics.yaml │ │ │ │ ├── ego_jerk_statistics.yaml │ │ │ │ ├── ego_lane_change_statistics.yaml │ │ │ │ ├── ego_lat_acceleration_statistics.yaml │ │ │ │ ├── ego_lat_jerk_statistics.yaml │ │ │ │ ├── ego_lon_acceleration_statistics.yaml │ │ │ │ ├── ego_lon_jerk_statistics.yaml │ │ │ │ ├── ego_mean_speed_statistics.yaml │ │ │ │ ├── ego_progress_along_expert_route_statistics.yaml │ │ │ │ ├── ego_yaw_acceleration_statistics.yaml │ │ │ │ ├── ego_yaw_rate_statistics.yaml │ │ │ │ └── planner_expert_average_l2_error_within_bound_statistics.yaml │ │ │ ├── simulation_closed_loop_nonreactive_agents.yaml │ │ │ ├── simulation_closed_loop_reactive_agents.yaml │ │ │ └── simulation_open_loop_boxes.yaml │ │ ├── splitter │ │ │ └── nuplan.yaml │ │ └── worker │ │ │ ├── ray_distributed.yaml │ │ │ ├── sequential.yaml │ │ │ └── single_machine_thread_pool.yaml │ ├── diffusion │ │ ├── __init__.py │ │ ├── accelerator │ │ │ └── default_accelerator.yaml │ │ ├── default_diffusion.yaml │ │ ├── noise_scheduler │ │ │ └── ddpm_scheduler.yaml │ │ └── optimizer │ │ │ └── adamw.yaml │ ├── simulation │ │ ├── __init__.py │ │ ├── callback │ │ │ ├── serialization_callback.yaml │ │ │ ├── simulation_log_callback.yaml │ │ │ └── timing_callback.yaml │ │ ├── default_simulation.yaml │ │ ├── ego_controller │ │ │ ├── log_play_back_controller.yaml │ │ │ ├── motion_model │ │ │ │ └── kinematic_bicycle_model.yaml │ │ │ ├── perfect_tracking_controller.yaml │ │ │ ├── tracker │ │ │ │ ├── ilqr_tracker.yaml │ │ │ │ └── lqr_tracker.yaml │ │ │ └── two_stage_controller.yaml │ │ ├── main_callback │ │ │ ├── completion_callback.yaml │ │ │ ├── metric_aggregator_callback.yaml │ │ │ ├── metric_file_callback.yaml │ │ │ ├── metric_summary_callback.yaml │ │ │ ├── publisher_callback.yaml │ │ │ ├── time_callback.yaml │ │ │ └── validation_callback.yaml │ │ ├── metric_aggregator │ │ │ ├── closed_loop_reactive_agents_weighted_average.yaml │ │ │ └── default_weighted_average.yaml │ │ ├── observation │ │ │ └── sledge_agents_observation.yaml │ │ ├── planner │ │ │ └── pdm_closed_planner.yaml │ │ └── simulation_time_controller │ │ │ └── step_simulation_time_controller.yaml │ └── sledgeboard │ │ ├── __init__.py │ │ └── default_sledgeboard.yaml ├── experiments │ ├── __init__.py │ ├── autoencoder │ │ ├── training_rvae_model.yaml │ │ └── training_vae_model.yaml │ ├── diffusion │ │ └── training_dit_model.yaml │ └── simulation │ │ └── sledge_reactive_agents.yaml ├── run_autoencoder.py ├── run_diffusion.py ├── run_simulation.py └── run_sledgeboard.py ├── simulation ├── __init__.py ├── maps │ ├── __init__.py │ └── sledge_map │ │ ├── __init__.py │ │ ├── sledge_lane.py │ │ ├── sledge_map.py │ │ ├── sledge_map_graph.py │ │ ├── sledge_path.py │ │ ├── sledge_polyline.py │ │ └── sledge_roadblock.py ├── observation │ ├── __init__.py │ ├── sledge_idm │ │ ├── __init__.py │ │ ├── sledge_idm_agent.py │ │ ├── sledge_idm_agent_manager.py │ │ ├── sledge_idm_agents_builder.py │ │ └── utils.py │ └── sledge_idm_agents.py ├── planner │ ├── __init__.py │ └── pdm_planner │ │ ├── __init__.py │ │ ├── abstract_pdm_closed_planner.py │ │ ├── abstract_pdm_planner.py │ │ ├── observation │ │ ├── __init__.py │ │ ├── pdm_object_manager.py │ │ ├── pdm_observation.py │ │ ├── pdm_observation_utils.py │ │ └── pdm_occupancy_map.py │ │ ├── pdm_closed_planner.py │ │ ├── proposal │ │ ├── __init__.py │ │ ├── batch_idm_policy.py │ │ ├── pdm_generator.py │ │ └── pdm_proposal.py │ │ ├── scoring │ │ ├── __init__.py │ │ ├── pdm_comfort_metrics.py │ │ ├── pdm_scorer.py │ │ └── pdm_scorer_utils.py │ │ ├── simulation │ │ ├── __init__.py │ │ ├── batch_kinematic_bicycle.py │ │ ├── batch_lqr.py │ │ ├── batch_lqr_utils.py │ │ └── pdm_simulator.py │ │ └── utils │ │ ├── graph_search │ │ ├── bfs_roadblock.py │ │ └── dijkstra.py │ │ ├── pdm_array_representation.py │ │ ├── pdm_emergency_brake.py │ │ ├── pdm_enums.py │ │ ├── pdm_geometry_utils.py │ │ ├── pdm_path.py │ │ └── route_utils.py └── scenarios │ ├── __init__.py │ └── sledge_scenario │ ├── __init__.py │ ├── sledge_scenario.py │ └── sledge_scenario_utils.py └── sledgeboard ├── __init__.py ├── base ├── __init__.py ├── base_tab.py ├── data_class.py ├── experiment_file_data.py ├── plot_data.py └── simulation_tile.py ├── resource ├── __init__.py ├── css │ ├── cloud.css │ ├── histogram.css │ ├── overview.css │ └── scenario.css ├── scripts │ └── utils.js ├── sledge_logo_transparent.png ├── spectre-exp.min.css ├── spectre-icons.min.css ├── spectre.min.css └── style.css ├── sledgeboard.py ├── style.py ├── tabs ├── __init__.py ├── cloud_tab.py ├── config │ ├── __init__.py │ ├── cloud_tab_config.py │ ├── histogram_tab_config.py │ ├── overview_tab_config.py │ └── scenario_tab_config.py ├── configuration_tab.py ├── histogram_tab.py ├── js_code │ ├── __init__.py │ ├── cloud_tab_js_code.py │ ├── histogram_tab_js_code.py │ └── scenario_tab_js_code.py ├── overview_tab.py └── scenario_tab.py ├── templates ├── index.html └── tabs │ ├── cloud.html │ ├── histogram.html │ ├── overview.html │ └── scenario.html └── utils ├── __init__.py ├── sledgeboard_cloud_utils.py ├── sledgeboard_histogram_utils.py └── utils.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = 3 | # Whitespace before ':' 4 | E203, 5 | # Module level import not at top of file 6 | E402, 7 | # Line break occurred before a binary operator 8 | W503, 9 | # Line break occurred after a binary operator 10 | W504 11 | # line break before binary operator 12 | E203 13 | # line too long 14 | E501 15 | # No lambdas — too strict 16 | E731 17 | max-line-length = 120 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # python 2 | *.pyc 3 | **/__pycache__/ 4 | .pytest_cache/* 5 | .pydevproject 6 | 7 | # IDE 8 | .vscode/* 9 | 10 | # Pip 11 | *.egg-info 12 | 13 | # Log files 14 | *.out 15 | *.err 16 | *.gz 17 | 18 | # Custom 19 | *.ckpt 20 | *.zip -------------------------------------------------------------------------------- /assets/sledge_logo_transparent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/assets/sledge_logo_transparent.png -------------------------------------------------------------------------------- /docs/autoencoder.md: -------------------------------------------------------------------------------- 1 | # Autoencoder 2 | 3 | This guide provides instructions on how to run an autoencoder in SLEDGE. 4 | The tutorial below shows the key functionalities of the raster-to-vector autoencoder (RVAE). 5 | 6 | ### 1. Feature Caching 7 | Similar to the [nuplan-devkit](https://github.com/motional/nuplan-devkit), 8 | pre-processing of the training data is recommended. 9 | The cache for the RVAE can be created by running: 10 | ```bash 11 | cd $SLEDGE_DEVKIT_ROOT/scripts/autoencoder/rvae/ 12 | bash feature_caching_rvae.sh 13 | ``` 14 | This script pre-processes the vector features of several maps sequentially. The cached features only store the local map and agents in a general vector format. The features are further processed and rasterized on the fly during training. This two-step processing enables fast access to training data and allows data augmentation (e.g. random rotation and translation) for RVAE training. The feature cache is compatible with other autoencoders. 15 | 16 | ### 2. Training Autoencoder 17 | After creating or downloading the autoencoder cache, you can start the training. We provide an example script in the same folder. 18 | ```bash 19 | bash training_rvae.sh 20 | ``` 21 | You can find the experiment folder of training in `$SLEDGE_EXP_ROOT/exp` and monitor the run with tensorboard. 22 | 23 | ### 3. Latent Caching 24 | You must first cache the latent variables to run a latent diffusion model with the trained autoencoder. In SLEDGE, we cache the latent variables into the autoencoder cache directory (i.e. `$SLEDGE_EXP_ROOT/caches/autoencoder_cache`). The bash script is provided in the RVAE folder. 25 | ```bash 26 | bash latent_caching_rvae.sh 27 | ``` 28 | Importantly, data augmentation is disabled for latent caching. We also only cache the samples from the training split. 29 | 30 | ### 4. Evaluating Autoencoder 31 | Coming soon! -------------------------------------------------------------------------------- /docs/diffusion.md: -------------------------------------------------------------------------------- 1 | # Diffusion 2 | 3 | This section provides instructions on how to utilize diffusion models within the SLEDGE framework. 4 | 5 | ### 1. Training Diffusion 6 | Before training a diffusion model, make sure you have a trained autoencoder checkpoint and latent cache as described in `docs/autoencoder.md`. 7 | You can start a training experiment by running the script: 8 | ```bash 9 | cd $SLEDGE_DEVKIT_ROOT/scripts/diffusion/ 10 | bash training_diffusion.sh 11 | ``` 12 | Please make sure you added the autoencoder checkpoint path to the bash script. Before training starts, the latent variables will be stored in a Hugging Face dataset format and saved to `$SLEDGE_EXP_ROOT/caches/diffusion_cache`. This format is compatible with the [`accelerate`](https://github.com/huggingface/accelerate) framework and has performance advantages. Read more [here](https://huggingface.co/docs/datasets/about_arrow) if you are interested. Our training pipeline supports [diffusion transformers (DiT)](https://arxiv.org/abs/2212.09748) in four sizes (S, B, L, XL). You can find the experiment folder and checkpoints in `$SLEDGE_EXP_ROOT/exp`. You can also monitor the training with tensorboard. 13 | 14 | ### 2. Scenario Synthesis 15 | Given the trained diffusion model, you can generate a set of samples used for driving simulation or the generative metrics. You can set the diffuser checkpoint path and run the following: 16 | ```bash 17 | bash scenario_caching_diffusion.sh 18 | ``` 19 | The samples are stored in `$SLEDGE_EXP_ROOT/caches/scenario_cache` by default. These samples can be simulated in the v0.1 release. 20 | Additional options for route extrapolation by inpainting will be added in a future update. 21 | 22 | ### 3. Evaluating Diffusion 23 | Coming soon! -------------------------------------------------------------------------------- /docs/simulation.md: -------------------------------------------------------------------------------- 1 | # Simulation 2 | 3 | This section provides instructions for simulating scenarios in SLEDGE. 4 | 5 | ### Simple Simulation 6 | 7 | For the v0.1 release, you can simulate simple 64mx64m patches. You first need to train a diffusion model and run scenario caching, as described in `docs/diffusion.md`. 8 | Consequently, you can simulate the scenarios by running. 9 | ```bash 10 | cd $SLEDGE_DEVKIT_ROOT/scripts/simulation/ 11 | bash simple_simulation.sh 12 | ``` 13 | By default, we simulate the [PDM-Closed](https://arxiv.org/abs/2306.07962) planner for 15 seconds. The experiment folder can be found in `$SLEDGE_EXP_ROOT/exp`. Further simulation modes and configurations will follow in future updates. 14 | 15 | ### Visualization 16 | 17 | The simulated scenarios can be visualized with SledgeBoard. Simply run: 18 | ```bash 19 | python $SLEDGE_DEVKIT_ROOT/sledge/script/run_sledgeboard.py 20 | ``` 21 | Open the `.nuboard` file in the experiment folder, view simulations, and render videos of scenarios. Note that SledgeBoard is primarily of a skin of nuBoard, with small adaptations to view synthetic scenarios in SLEDGE. 22 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: sledge 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.9 6 | - pip=23.3.1 7 | - nb_conda_kernels 8 | - pip: 9 | - -r requirements.txt -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2 2 | 3 | # nuplan requirements 4 | aioboto3 5 | aiofiles 6 | bokeh==2.4.3 7 | casadi 8 | control==0.9.1 9 | Fiona 10 | geopandas>=0.12.1 11 | guppy3==3.1.2 12 | hydra-core==1.1.0rc1 13 | joblib 14 | matplotlib 15 | nest_asyncio 16 | numpy==1.23.4 17 | opencv-python==4.9.0.80 18 | pandas 19 | Pillow 20 | psutil 21 | pyarrow 22 | pyinstrument 23 | pyogrio 24 | pyquaternion>=0.9.5 25 | pytest 26 | rasterio 27 | ray 28 | retry 29 | rtree 30 | scipy 31 | selenium 32 | setuptools==65.5.1 33 | Shapely>=2.0.0 34 | SQLAlchemy==1.4.27 35 | sympy 36 | tornado 37 | tqdm 38 | ujson 39 | notebook==7.2.1 40 | 41 | # torch & lighting 42 | torch==2.0.1 43 | torchvision==0.15.2 44 | pytorch-lightning==2.2.1 45 | tensorboard 46 | protobuf==4.25.3 47 | 48 | # hugging face 49 | accelerate==0.33.0 50 | datasets==2.20.0 51 | transformers==4.43.3 52 | dataset==1.6.2 53 | diffusers==0.29.2 54 | timm==1.0.8 -------------------------------------------------------------------------------- /scripts/autoencoder/rvae/feature_caching_rvae.sh: -------------------------------------------------------------------------------- 1 | JOB_NAME=feature_caching 2 | AUTOENCODER_CACHE_PATH=/path/to/exp/caches/autoencoder_cache 3 | USE_CACHE_WITHOUT_DATASET=True 4 | SEED=0 5 | 6 | 7 | for MAP in "filter_pgh" "filter_lav" "filter_sgp" "filter_bos" 8 | do 9 | python $SLEDGE_DEVKIT_ROOT/sledge/script/run_autoencoder.py \ 10 | py_func=feature_caching \ 11 | seed=$SEED \ 12 | job_name=$JOB_NAME \ 13 | +autoencoder=training_rvae_model \ 14 | scenario_builder=nuplan \ 15 | scenario_filter=$MAP \ 16 | cache.autoencoder_cache_path=$AUTOENCODER_CACHE_PATH \ 17 | cache.use_cache_without_dataset=$USE_CACHE_WITHOUT_DATASET \ 18 | callbacks="[]" 19 | done -------------------------------------------------------------------------------- /scripts/autoencoder/rvae/latent_caching_rvae.sh: -------------------------------------------------------------------------------- 1 | JOB_NAME=latent_caching 2 | AUTOENCODER_CACHE_PATH=/path/to/exp/caches/autoencoder_cache 3 | AUTOENCODER_CHECKPOINT=/path/to/rvae_checkpoint.ckpt 4 | USE_CACHE_WITHOUT_DATASET=True 5 | SEED=0 6 | 7 | 8 | python $SLEDGE_DEVKIT_ROOT/sledge/script/run_autoencoder.py \ 9 | py_func=latent_caching \ 10 | seed=$SEED \ 11 | job_name=$JOB_NAME \ 12 | +autoencoder=training_rvae_model \ 13 | data_augmentation=rvae_no_augmentation \ 14 | autoencoder_checkpoint=$AUTOENCODER_CHECKPOINT \ 15 | cache.autoencoder_cache_path=$AUTOENCODER_CACHE_PATH \ 16 | cache.latent_name="rvae_latent" \ 17 | cache.use_cache_without_dataset=$USE_CACHE_WITHOUT_DATASET \ 18 | callbacks="[]" -------------------------------------------------------------------------------- /scripts/autoencoder/rvae/training_rvae.sh: -------------------------------------------------------------------------------- 1 | JOB_NAME=training_rvae_model 2 | AUTOENCODER_CACHE_PATH=/path/to/exp/caches/autoencoder_cache 3 | AUTOENCODER_CHECKPOINT=null # set for weight intialization / continue training 4 | USE_CACHE_WITHOUT_DATASET=True 5 | SEED=0 6 | 7 | python $SLEDGE_DEVKIT_ROOT/sledge/script/run_autoencoder.py \ 8 | py_func=training \ 9 | seed=$SEED \ 10 | job_name=$JOB_NAME \ 11 | +autoencoder=training_rvae_model \ 12 | autoencoder_checkpoint=$AUTOENCODER_CHECKPOINT \ 13 | cache.autoencoder_cache_path=$AUTOENCODER_CACHE_PATH \ 14 | cache.use_cache_without_dataset=$USE_CACHE_WITHOUT_DATASET \ 15 | callbacks="[learning_rate_monitor_callback, model_checkpoint_callback, time_logging_callback, rvae_visualization_callback]" -------------------------------------------------------------------------------- /scripts/autoencoder/vae/training_vae.sh: -------------------------------------------------------------------------------- 1 | JOB_NAME=training_vae_model 2 | AUTOENCODER_CACHE_PATH=/path/to/exp/caches/autoencoder_cache 3 | USE_CACHE_WITHOUT_DATASET=True 4 | SEED=0 5 | 6 | 7 | python $SLEDGE_DEVKIT_ROOT/sledge/script/run_autoencoder.py \ 8 | py_func=training \ 9 | seed=$SEED \ 10 | job_name=$JOB_NAME \ 11 | +autoencoder=training_vae_model \ 12 | cache.autoencoder_cache_path=$AUTOENCODER_CACHE_PATH \ 13 | cache.use_cache_without_dataset=$USE_CACHE_WITHOUT_DATASET \ 14 | callbacks="[learning_rate_monitor_callback, model_checkpoint_callback, time_logging_callback, vae_visualization_callback]" -------------------------------------------------------------------------------- /scripts/diffusion/scenario_caching_diffusion.sh: -------------------------------------------------------------------------------- 1 | JOB_NAME=scenario_caching 2 | AUTOENCODER_CHECKPOINT=/path/to/rvae_checkpoint.ckpt 3 | DIFFUSION_CHECKPOINT=/path/to/diffusion/checkpoint 4 | DIFFUSION_MODEL=dit_b_model # [dit_s_model, dit_b_model, dit_l_model, dit_xl_model] 5 | SEED=0 6 | 7 | 8 | python $SLEDGE_DEVKIT_ROOT/sledge/script/run_diffusion.py \ 9 | py_func=scenario_caching \ 10 | seed=$SEED \ 11 | job_name=$JOB_NAME \ 12 | py_func=scenario_cache \ 13 | +diffusion=training_dit_model \ 14 | diffusion_model=$DIFFUSION_MODEL \ 15 | autoencoder_checkpoint=$AUTOENCODER_CHECKPOINT \ 16 | diffusion_checkpoint=$DIFFUSION_CHECKPOINT -------------------------------------------------------------------------------- /scripts/diffusion/training_diffusion.sh: -------------------------------------------------------------------------------- 1 | JOB_NAME=training_dit_diffusion 2 | AUTOENCODER_CACHE_PATH=/path/to/exp/caches/autoencoder_cache 3 | AUTOENCODER_CHECKPOINT=/path/to/rvae_checkpoint.ckpt 4 | DIFFUSION_CHECKPOINT=null # set for weight intialization / continue training 5 | DIFFUSION_MODEL=dit_b_model # [dit_s_model, dit_b_model, dit_l_model, dit_xl_model] 6 | CLEANUP_DIFFUSION_CACHE=False 7 | SEED=0 8 | 9 | accelerate launch $SLEDGE_DEVKIT_ROOT/sledge/script/run_diffusion.py \ 10 | py_func=training \ 11 | seed=$SEED \ 12 | job_name=$JOB_NAME \ 13 | +diffusion=training_dit_model \ 14 | diffusion_model=$DIFFUSION_MODEL \ 15 | cache.autoencoder_cache_path=$AUTOENCODER_CACHE_PATH \ 16 | cache.cleanup_diffusion_cache=$CLEANUP_DIFFUSION_CACHE \ 17 | autoencoder_checkpoint=$AUTOENCODER_CHECKPOINT \ 18 | diffusion_checkpoint=$DIFFUSION_CHECKPOINT -------------------------------------------------------------------------------- /scripts/download/download_cache.sh: -------------------------------------------------------------------------------- 1 | # autoencoder cache 2 | wget https://s3.eu-central-1.amazonaws.com/avg-projects-2/sledge/caches/autoencoder_cache.zip -------------------------------------------------------------------------------- /scripts/download/download_nuplan.sh: -------------------------------------------------------------------------------- 1 | # NOTE: Please check the LICENSE file when downloading the nuPlan dataset 2 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/LICENSE 3 | 4 | # maps 5 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/public/nuplan-v1.1/nuplan-maps-v1.1.zip 6 | 7 | # train 8 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/public/nuplan-v1.1/nuplan-v1.1_train_boston.zip 9 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/public/nuplan-v1.1/nuplan-v1.1_train_pittsburgh.zip 10 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/public/nuplan-v1.1/nuplan-v1.1_train_singapore.zip 11 | for split in {1..6}; do 12 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/public/nuplan-v1.1/nuplan-v1.1_train_vegas_${split}.zip 13 | done 14 | 15 | # val 16 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/public/nuplan-v1.1/nuplan-v1.1_test.zip 17 | 18 | # test 19 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/public/nuplan-v1.1/nuplan-v1.1_val.zip 20 | 21 | # mini 22 | wget https://motional-nuplan.s3-ap-northeast-1.amazonaws.com/public/nuplan-v1.1/nuplan-v1.1_mini.zip -------------------------------------------------------------------------------- /scripts/simulation/simple_simulation.sh: -------------------------------------------------------------------------------- 1 | CHALLENGE=sledge_reactive_agents 2 | 3 | python $SLEDGE_DEVKIT_ROOT/sledge/script/run_simulation.py \ 4 | +simulation=$CHALLENGE \ 5 | planner=pdm_closed_planner \ 6 | observation=sledge_agents_observation \ 7 | scenario_builder=nuplan -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import setuptools 3 | 4 | # Change directory to allow installation from anywhere 5 | script_folder = os.path.dirname(os.path.realpath(__file__)) 6 | os.chdir(script_folder) 7 | 8 | with open("requirements.txt") as f: 9 | requirements = f.read().splitlines() 10 | 11 | # Installs 12 | setuptools.setup( 13 | name="sledge", 14 | version="0.1", 15 | author="University of Tuebingen", 16 | author_email="daniel.dauner@uni-tuebingen.de", 17 | description="Simulation Environments for Vehicle Motion Planning with Generative Models.", 18 | url="https://github.com/autonomousvision/sledge", 19 | python_requires=">=3.9", 20 | packages=["sledge"], 21 | package_dir={"": "."}, 22 | classifiers=[ 23 | "Programming Language :: Python :: 3.9", 24 | "Operating System :: OS Independent", 25 | "License :: Free for non-commercial use", 26 | ], 27 | license="apache-2.0", 28 | install_requires=requirements, 29 | ) 30 | -------------------------------------------------------------------------------- /sledge/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/callbacks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/callbacks/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/data_augmentation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/data_augmentation/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/data_loader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/data_loader/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/experiments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/experiments/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/experiments/latent_caching.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from omegaconf import DictConfig 4 | 5 | import torch 6 | from tqdm import tqdm 7 | 8 | from nuplan.planning.utils.multithreading.worker_pool import WorkerPool 9 | from nuplan.planning.training.preprocessing.utils.feature_cache import FeatureCachePickle 10 | 11 | from sledge.script.builders.model_builder import build_autoencoder_torch_module_wrapper 12 | from sledge.script.builders.autoencoder_builder import build_autoencoder_lightning_datamodule 13 | from sledge.autoencoder.preprocessing.features.latent_feature import Latent 14 | from sledge.autoencoder.modeling.autoencoder_lightning_module_wrapper import AutoencoderLightningModuleWrapper 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | def cache_latent(cfg: DictConfig, worker: WorkerPool) -> None: 20 | """ 21 | Build the lightning datamodule and cache the latent of all training samples. 22 | :param cfg: omegaconf dictionary 23 | :param worker: Worker to submit tasks which can be executed in parallel 24 | """ 25 | 26 | assert cfg.autoencoder_checkpoint is not None, "cfg.autoencoder_checkpoint is not specified for latent caching!" 27 | 28 | # Create model 29 | logger.info("Building Autoencoder Module...") 30 | torch_module_wrapper = build_autoencoder_torch_module_wrapper(cfg) 31 | torch_module_wrapper = AutoencoderLightningModuleWrapper.load_from_checkpoint( 32 | cfg.autoencoder_checkpoint, model=torch_module_wrapper 33 | ).model 34 | logger.info("Building Autoencoder Module...DONE!") 35 | 36 | # Build the datamodule 37 | logger.info("Building Datamodule Module...") 38 | datamodule = build_autoencoder_lightning_datamodule(cfg, worker, torch_module_wrapper) 39 | datamodule.setup("fit") 40 | dataloader = datamodule.train_dataloader() 41 | logger.info("Building Datamodule Module...DONE!") 42 | 43 | autoencoder_cache_path = Path(cfg.cache.autoencoder_cache_path) 44 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 45 | 46 | storing_mechanism = FeatureCachePickle() 47 | torch_module_wrapper = torch_module_wrapper.to(device) 48 | 49 | # Perform inference 50 | with torch.no_grad(): 51 | for batch in tqdm(dataloader, total=len(dataloader), desc="Cache Latents (batch-wise)"): 52 | # Assuming batch is a tuple of (inputs, labels, indices) where indices track sample order 53 | features, targets, scenarios = datamodule.transfer_batch_to_device(batch, device, 0) 54 | predictions = torch_module_wrapper.forward(features, encode_only=True) 55 | assert "latent" in predictions 56 | 57 | latents: Latent = predictions["latent"] 58 | latents = latents.torch_to_numpy() 59 | 60 | for latent, scenario in zip(latents.unpack(), scenarios): 61 | file_name = ( 62 | autoencoder_cache_path 63 | / scenario.log_name 64 | / scenario.scenario_type 65 | / scenario.token 66 | / cfg.cache.latent_name 67 | ) 68 | storing_mechanism.store_computed_feature_to_folder(file_name, latent) 69 | 70 | return None 71 | -------------------------------------------------------------------------------- /sledge/autoencoder/experiments/training.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from omegaconf import DictConfig 3 | 4 | from torch.optim.lr_scheduler import OneCycleLR 5 | import pytorch_lightning as pl 6 | 7 | from nuplan.planning.utils.multithreading.worker_pool import WorkerPool 8 | from nuplan.planning.training.experiments.training import TrainingEngine 9 | from nuplan.planning.script.builders.utils.utils_config import get_num_gpus_used 10 | from nuplan.planning.script.builders.utils.utils_type import is_target_type 11 | 12 | from sledge.script.builders.model_builder import build_autoencoder_torch_module_wrapper 13 | from sledge.script.builders.autoencoder_builder import ( 14 | build_autoencoder_lightning_datamodule, 15 | build_autoencoder_lightning_module, 16 | build_autoencoder_trainer, 17 | ) 18 | 19 | logger = logging.getLogger(__name__) 20 | 21 | 22 | def build_training_engine(cfg: DictConfig, worker: WorkerPool) -> TrainingEngine: 23 | """ 24 | Build the three core lightning modules: LightningDataModule, LightningModule and Trainer 25 | :param cfg: omegaconf dictionary 26 | :param worker: Worker to submit tasks which can be executed in parallel 27 | :return: TrainingEngine 28 | """ 29 | logger.info("Building training engine...") 30 | 31 | # Create model 32 | torch_module_wrapper = build_autoencoder_torch_module_wrapper(cfg) 33 | 34 | # Build the datamodule 35 | datamodule = build_autoencoder_lightning_datamodule(cfg, worker, torch_module_wrapper) 36 | 37 | cfg = scale_cfg_for_distributed_training(cfg, datamodule=datamodule, worker=worker) 38 | 39 | # Build lightning module 40 | model = build_autoencoder_lightning_module(cfg, torch_module_wrapper) 41 | 42 | # Build trainer 43 | trainer = build_autoencoder_trainer(cfg) 44 | 45 | engine = TrainingEngine(trainer=trainer, datamodule=datamodule, model=model) 46 | 47 | return engine 48 | 49 | 50 | def scale_cfg_for_distributed_training( 51 | cfg: DictConfig, datamodule: pl.LightningDataModule, worker: WorkerPool 52 | ) -> DictConfig: 53 | """ 54 | Adjusts parameters in cfg for ddp. 55 | :param cfg: Config with parameters for instantiation. 56 | :param datamodule: Datamodule which will be used for updating the lr_scheduler parameters. 57 | :return cfg: Updated config. 58 | """ 59 | number_gpus = get_num_gpus_used(cfg) 60 | 61 | # Setup learning rate and momentum schedulers 62 | if is_target_type(cfg.lr_scheduler, OneCycleLR): 63 | num_train_samples = int( 64 | len(datamodule._splitter.get_train_samples(datamodule._all_samples, worker)) * datamodule._train_fraction 65 | ) 66 | 67 | cfg.lr_scheduler.steps_per_epoch = (num_train_samples // cfg.data_loader.params.batch_size) // number_gpus 68 | 69 | return cfg 70 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/modeling/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/autoencoder_torch_module_wrapper.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import List 3 | 4 | import torch.nn as nn 5 | 6 | from nuplan.planning.training.modeling.types import FeaturesType, TargetsType 7 | from nuplan.planning.training.modeling.torch_module_wrapper import TorchModuleWrapper 8 | from nuplan.planning.training.preprocessing.feature_builders.abstract_feature_builder import AbstractFeatureBuilder 9 | from nuplan.planning.training.preprocessing.target_builders.abstract_target_builder import AbstractTargetBuilder 10 | 11 | 12 | class AutoencoderTorchModuleWrapper(TorchModuleWrapper): 13 | """Torch module wrapper that encapsulates builders for constructing model features and targets.""" 14 | 15 | def __init__( 16 | self, 17 | feature_builders: List[AbstractFeatureBuilder], 18 | target_builders: List[AbstractTargetBuilder], 19 | ): 20 | """ 21 | Construct a model with feature and target builders. 22 | :param feature_builders: The list of builders which will compute features for this model. 23 | :param target_builders: The list of builders which will compute targets for this model. 24 | """ 25 | super().__init__( 26 | future_trajectory_sampling=None, # dummy value 27 | feature_builders=feature_builders, 28 | target_builders=target_builders, 29 | ) 30 | 31 | self.feature_builders = feature_builders 32 | self.target_builders = target_builders 33 | 34 | @abc.abstractmethod 35 | def forward(self, features: FeaturesType, encoder_only: bool) -> TargetsType: 36 | """ 37 | The main inference call for the model. 38 | :param features: _description_ 39 | :param encoder_only: whether to only encode input in autoencoder 40 | :return: The results of the inference as a TargetsType. 41 | """ 42 | pass 43 | 44 | @abc.abstractmethod 45 | def encode(self, features: FeaturesType) -> FeaturesType: 46 | """ 47 | TODO: 48 | """ 49 | pass 50 | 51 | @abc.abstractmethod 52 | def decode(self, features: FeaturesType) -> TargetsType: 53 | """ 54 | TODO: 55 | """ 56 | pass 57 | 58 | @abc.abstractmethod 59 | def get_encoder(self, features: FeaturesType) -> nn.Module: 60 | """ 61 | TODO: 62 | """ 63 | pass 64 | 65 | @abc.abstractmethod 66 | def get_decoder(self, features: FeaturesType) -> nn.Module: 67 | """ 68 | TODO: 69 | """ 70 | pass 71 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/matching/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/modeling/matching/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/matching/abstract_matching.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import torch 3 | 4 | from nuplan.planning.training.modeling.types import FeaturesType, TargetsType 5 | 6 | 7 | class AbstractMatching(ABC): 8 | """Matching interface used in for hungarian matching losses during training.""" 9 | 10 | @abstractmethod 11 | @torch.no_grad() 12 | def compute(self, predictions: FeaturesType, targets: TargetsType) -> TargetsType: 13 | """ 14 | Run matching between model predictions and targets for loss computation. 15 | :param predictions: Predicted feature tensors for matching. 16 | :param targets: Target feature tensors for matching 17 | :return: Matching formulation between prediction and target 18 | """ 19 | pass 20 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/metrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/modeling/metrics/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/metrics/abstract_custom_metric.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Dict 3 | 4 | import torch 5 | 6 | from nuplan.planning.training.modeling.types import FeaturesType, TargetsType, ScenarioListType 7 | 8 | 9 | class AbstractCustomMetric(ABC): 10 | """Custom abstract class for metric definition. Allows to multiple metric in one objects.""" 11 | 12 | @abstractmethod 13 | def compute( 14 | self, 15 | predictions: FeaturesType, 16 | targets: TargetsType, 17 | matchings: TargetsType, 18 | scenarios: ScenarioListType, 19 | ) -> Dict[str, torch.Tensor]: 20 | """ 21 | Computes the metric given the ground truth targets and the model's predictions. 22 | :param predictions: dictionary of model's predictions 23 | :param targets: dictionary of ground-truth targets from the dataset 24 | :param matchings: dictionary of matchings between targets and predictions 25 | :param scenarios: list if scenario types (for type-specific weighting) 26 | :return: dictionary of metric name and scalar. 27 | """ 28 | pass 29 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/metrics/kl_metric.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import torch 4 | 5 | from nuplan.planning.training.modeling.types import FeaturesType, ScenarioListType, TargetsType 6 | 7 | from sledge.autoencoder.preprocessing.features.latent_feature import Latent 8 | from sledge.autoencoder.modeling.metrics.abstract_custom_metric import AbstractCustomMetric 9 | 10 | 11 | class KLMetric(AbstractCustomMetric): 12 | def __init__(self, scenario_type_loss_weighting: Dict[str, float]): 13 | """ 14 | Initializes the class 15 | :param scenario_type_loss_weighting: loss weight per scenario (ignored) 16 | """ 17 | self._scenario_type_loss_weighting = scenario_type_loss_weighting 18 | 19 | def compute( 20 | self, predictions: FeaturesType, targets: TargetsType, matchings: TargetsType, scenarios: ScenarioListType 21 | ) -> Dict[str, torch.Tensor]: 22 | """Inherited, see superclass.""" 23 | 24 | pred_latent: Latent = predictions["latent"] 25 | mu, log_var = pred_latent.mu, pred_latent.log_var 26 | kl_metric = -0.5 * torch.mean(1 + log_var - mu**2 - log_var.exp()) 27 | 28 | return {"kl_metric": kl_metric} 29 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/modeling/models/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/models/rvae/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/modeling/models/rvae/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/models/rvae/rvae_encoder.py: -------------------------------------------------------------------------------- 1 | # Code mainly from: https://github.com/facebookresearch/detr (Apache-2.0 license) 2 | # TODO: Refactor & add docstring's 3 | 4 | from typing import Union 5 | import torch 6 | import torchvision 7 | 8 | from torch import nn 9 | from torchvision.models._utils import IntermediateLayerGetter 10 | 11 | from sledge.autoencoder.modeling.models.rvae.rvae_config import RVAEConfig 12 | from sledge.autoencoder.modeling.models.vae.vae_config import VAEConfig 13 | from sledge.autoencoder.preprocessing.features.latent_feature import Latent 14 | 15 | 16 | class FrozenBatchNorm2d(torch.nn.Module): 17 | """ 18 | BatchNorm2d where the batch statistics and the affine parameters are fixed. 19 | 20 | Copy-paste from torchvision.misc.ops with added eps before rqsrt, 21 | without which any other models than torchvision.models.resnet[18,34,50,101] 22 | produce nans. 23 | """ 24 | 25 | def __init__(self, n): 26 | super(FrozenBatchNorm2d, self).__init__() 27 | self.register_buffer("weight", torch.ones(n)) 28 | self.register_buffer("bias", torch.zeros(n)) 29 | self.register_buffer("running_mean", torch.zeros(n)) 30 | self.register_buffer("running_var", torch.ones(n)) 31 | 32 | def _load_from_state_dict( 33 | self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs 34 | ): 35 | num_batches_tracked_key = prefix + "num_batches_tracked" 36 | if num_batches_tracked_key in state_dict: 37 | del state_dict[num_batches_tracked_key] 38 | 39 | super(FrozenBatchNorm2d, self)._load_from_state_dict( 40 | state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs 41 | ) 42 | 43 | def forward(self, x): 44 | # move reshapes to the beginning 45 | # to make it user-friendly 46 | w = self.weight.reshape(1, -1, 1, 1) 47 | b = self.bias.reshape(1, -1, 1, 1) 48 | rv = self.running_var.reshape(1, -1, 1, 1) 49 | rm = self.running_mean.reshape(1, -1, 1, 1) 50 | eps = 1e-5 51 | scale = w * (rv + eps).rsqrt() 52 | bias = b - rm * scale 53 | return x * scale + bias 54 | 55 | 56 | class RVAEEncoder(nn.Module): 57 | """ResNet backbone with frozen BatchNorm.""" 58 | 59 | def __init__(self, config: Union[RVAEConfig, VAEConfig]): 60 | """ 61 | Initialize encoder module. 62 | :param config: config of RVAE or VAE. 63 | """ 64 | super().__init__() 65 | 66 | # TODO: support more backbones. 67 | backbone = getattr(torchvision.models, config.model_name)( 68 | replace_stride_with_dilation=[False, False, False], 69 | weights="DEFAULT", 70 | norm_layer=FrozenBatchNorm2d, 71 | ) 72 | if config.num_input_channels != 3: 73 | backbone.conv1 = nn.Conv2d(config.num_input_channels, 64, 7, stride=2, padding=3, bias=False) 74 | self._backbone = IntermediateLayerGetter(backbone, return_layers={"layer4": "0"}) 75 | output_channels = 512 if config.model_name in ["resnet18", "resnet34"] else 2048 76 | 77 | # TODO: add params to config 78 | self._group_norm = nn.GroupNorm(num_groups=32, num_channels=output_channels, eps=1e-6, affine=True) 79 | self._latent = nn.Conv2d(output_channels, 2 * config.latent_channel, kernel_size=3, stride=1, padding=1) 80 | 81 | def forward(self, raster: torch.Tensor) -> Latent: 82 | 83 | cnn_features = self._backbone(raster)["0"] 84 | normed_features = self._group_norm(cnn_features) 85 | 86 | latent = self._latent(normed_features) 87 | mu, log_var = torch.chunk(latent, 2, dim=1) 88 | 89 | return Latent(mu, log_var) 90 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/models/rvae/rvae_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from nuplan.planning.training.modeling.types import FeaturesType, TargetsType 5 | 6 | from sledge.autoencoder.modeling.autoencoder_torch_module_wrapper import AutoencoderTorchModuleWrapper 7 | from sledge.autoencoder.modeling.models.rvae.rvae_encoder import RVAEEncoder 8 | from sledge.autoencoder.modeling.models.rvae.rvae_decoder import RVAEDecoder 9 | from sledge.autoencoder.modeling.models.rvae.rvae_config import RVAEConfig 10 | from sledge.autoencoder.preprocessing.features.latent_feature import Latent 11 | from sledge.autoencoder.preprocessing.feature_builders.sledge_raw_feature_builder import SledgeRawFeatureBuilder 12 | from sledge.autoencoder.preprocessing.target_builders.map_id_target_builder import MapIDTargetBuilder 13 | 14 | 15 | class RVAEModel(AutoencoderTorchModuleWrapper): 16 | """Raster-Vector Autoencoder in of SLEDGE.""" 17 | 18 | def __init__(self, config: RVAEConfig): 19 | """ 20 | Initialize Raster-Vector Autoencoder. 21 | :param config: configuration dataclass of RVAE. 22 | """ 23 | feature_builders = [SledgeRawFeatureBuilder(config)] 24 | target_builders = [MapIDTargetBuilder()] 25 | 26 | super().__init__(feature_builders=feature_builders, target_builders=target_builders) 27 | 28 | self._config = config 29 | 30 | self._raster_encoder = RVAEEncoder(config) 31 | self._vector_decoder = RVAEDecoder(config) 32 | 33 | @staticmethod 34 | def _reparameterize(latent: Latent) -> torch.Tensor: 35 | """ 36 | Reparameterization method for variational autoencoder's. 37 | :param latent: dataclass for mu, logvar tensors. 38 | :return: combined latent tensor. 39 | """ 40 | mu, log_var = latent.mu, latent.log_var 41 | assert mu.shape == log_var.shape 42 | std = torch.exp(0.5 * log_var) 43 | eps = torch.randn_like(std) 44 | return mu + eps * std 45 | 46 | def forward(self, features: FeaturesType, encode_only: bool = False) -> TargetsType: 47 | """Inherited, see superclass.""" 48 | predictions: TargetsType = {} 49 | 50 | # encoding 51 | predictions["latent"] = self._raster_encoder(features["sledge_raster"].data) 52 | latent = self._reparameterize(predictions["latent"]) 53 | if encode_only: 54 | return predictions 55 | 56 | # decoding 57 | predictions["sledge_vector"] = self._vector_decoder(latent) 58 | return predictions 59 | 60 | def get_encoder(self) -> nn.Module: 61 | """Inherited, see superclass.""" 62 | return self._raster_encoder 63 | 64 | def get_decoder(self) -> nn.Module: 65 | """Inherited, see superclass.""" 66 | return self._vector_decoder 67 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/models/vae/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/modeling/models/vae/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/models/vae/vae_config.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | from dataclasses import dataclass 3 | 4 | from sledge.autoencoder.preprocessing.features.sledge_vector_feature import SledgeConfig 5 | 6 | 7 | @dataclass 8 | class VAEConfig(SledgeConfig): 9 | """Configuration dataclass for Raster VAE.""" 10 | 11 | # 1. features raw 12 | radius: int = 100 13 | pose_interval: int = 1.0 14 | 15 | # 2. features raster & vector 16 | frame: Tuple[int, int] = (64, 64) 17 | 18 | num_lines: int = 50 19 | num_vehicles: int = 50 20 | num_pedestrians: int = 20 21 | num_static_objects: int = 30 22 | num_green_lights: int = 20 23 | num_red_lights: int = 20 24 | 25 | num_line_poses: int = 20 26 | vehicle_max_velocity: float = 15 27 | pedestrian_max_velocity: float = 2 28 | 29 | pixel_size: float = 0.25 30 | line_dots_radius: int = 0 31 | 32 | # 3. raster encoder π 33 | model_name: str = "resnet50" 34 | down_factor: int = 32 # NOTE: specific to resnet 35 | num_input_channels: int = 12 36 | latent_channel: int = 64 37 | 38 | # loss 39 | reconstruction_weight: float = 1.0 40 | kl_weight: float = 0.1 41 | 42 | # output 43 | threshold: float = 0.3 44 | 45 | def __post_init__(self): 46 | super().__post_init__() 47 | 48 | @property 49 | def pixel_frame(self) -> Tuple[int, int]: 50 | frame_width, frame_height = self.frame 51 | return int(frame_width / self.pixel_size), int(frame_height / self.pixel_size) 52 | 53 | @property 54 | def latent_frame(self) -> Tuple[int, int]: 55 | pixel_width, pixel_height = self.pixel_frame 56 | return int(pixel_width / self.down_factor), int(pixel_height / self.down_factor) 57 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/objectives/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/modeling/objectives/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/objectives/abstract_custom_objective.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Dict 3 | import torch 4 | 5 | from nuplan.planning.training.modeling.types import FeaturesType, ScenarioListType, TargetsType 6 | 7 | 8 | class AbstractCustomObjective(ABC): 9 | """Abstract class for custom objectives in sledge package. Allows to multiple objectives via dict.""" 10 | 11 | @abstractmethod 12 | def compute( 13 | self, 14 | predictions: FeaturesType, 15 | targets: TargetsType, 16 | matchings: TargetsType, 17 | scenarios: ScenarioListType, 18 | ) -> Dict[str, torch.Tensor]: 19 | """ 20 | Computes the objective given the ground truth targets and the model's predictions. 21 | :param predictions: dictionary of model's predictions 22 | :param targets: dictionary of ground-truth targets from the dataset 23 | :param matchings: dictionary of matchings between targets and predictions 24 | :param scenarios: list if scenario types (for type-specific weighting) 25 | :return: dictionary of metric name and scalar. 26 | """ 27 | pass 28 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/objectives/kl_objective.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import torch 4 | 5 | from nuplan.planning.training.modeling.types import FeaturesType, ScenarioListType, TargetsType 6 | 7 | from sledge.autoencoder.preprocessing.features.latent_feature import Latent 8 | from sledge.autoencoder.modeling.objectives.abstract_custom_objective import AbstractCustomObjective 9 | 10 | 11 | class KLObjective(AbstractCustomObjective): 12 | """Kullback-Leibler divergence objective for VAEs.""" 13 | 14 | def __init__(self, weight: float, scenario_type_loss_weighting: Dict[str, float]): 15 | """ 16 | Initialize KL objective. 17 | :param weight: scalar for loss weighting (aka. β) 18 | :param scenario_type_loss_weighting: scenario-type specific loss weights (ignored). 19 | """ 20 | self._weight = weight 21 | self._scenario_type_loss_weighting = scenario_type_loss_weighting 22 | 23 | def compute( 24 | self, predictions: FeaturesType, targets: TargetsType, matchings: TargetsType, scenarios: ScenarioListType 25 | ) -> Dict[str, torch.Tensor]: 26 | """Inherited, see superclass.""" 27 | 28 | pred_latent: Latent = predictions["latent"] 29 | mu, log_var = pred_latent.mu, pred_latent.log_var 30 | kl_loss = -0.5 * torch.mean(1 + log_var - mu**2 - log_var.exp()) 31 | 32 | return {"kl_latent": self._weight * kl_loss} 33 | -------------------------------------------------------------------------------- /sledge/autoencoder/modeling/objectives/vae_objective.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | from nuplan.planning.training.modeling.types import FeaturesType, ScenarioListType, TargetsType 7 | 8 | from sledge.autoencoder.preprocessing.features.sledge_raster_feature import SledgeRaster 9 | from sledge.autoencoder.modeling.objectives.abstract_custom_objective import AbstractCustomObjective 10 | 11 | 12 | class VAEL1Objective(AbstractCustomObjective): 13 | """Object for image reconstruction loss (ie. l1).""" 14 | 15 | def __init__(self, weight: float, scenario_type_loss_weighting: Dict[str, float]): 16 | """ 17 | Initialize l1 objective for raster reconstruction. 18 | :param weight: scalar for loss weighting 19 | :param scenario_type_loss_weighting: scenario-type specific loss weights (ignored) 20 | """ 21 | self._weight = weight 22 | self._scenario_type_loss_weighting = scenario_type_loss_weighting 23 | 24 | def compute( 25 | self, predictions: FeaturesType, targets: TargetsType, matchings: TargetsType, scenarios: ScenarioListType 26 | ) -> Dict[str, torch.Tensor]: 27 | 28 | gt_raster: SledgeRaster = targets["sledge_raster"] 29 | pred_raster: SledgeRaster = predictions["sledge_raster"] 30 | 31 | l1_loss = F.l1_loss(gt_raster.data, pred_raster.data) 32 | 33 | return {"l1_loss": self._weight * l1_loss} 34 | 35 | 36 | class VAEBCEObjective(AbstractCustomObjective): 37 | """Object for image reconstruction loss (ie. binary cross-entropy).""" 38 | 39 | def __init__(self, weight: float, scenario_type_loss_weighting: Dict[str, float]): 40 | """ 41 | Initialize binary cross-entropy objective for raster reconstruction. 42 | :param weight: scalar for loss weighting 43 | :param scenario_type_loss_weighting: scenario-type specific loss weights (ignored) 44 | """ 45 | self._weight = weight 46 | self._scenario_type_loss_weighting = scenario_type_loss_weighting 47 | 48 | def compute( 49 | self, predictions: FeaturesType, targets: TargetsType, matchings: TargetsType, scenarios: ScenarioListType 50 | ) -> Dict[str, torch.Tensor]: 51 | """Inherited, see superclass.""" 52 | 53 | pred_raster: SledgeRaster = predictions["sledge_raster"] 54 | gt_raster: SledgeRaster = targets["sledge_raster"] 55 | bce_loss = F.binary_cross_entropy_with_logits(pred_raster.data, gt_raster.data) 56 | 57 | return {"bce_loss": self._weight * bce_loss} 58 | -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/preprocessing/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/feature_builders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/preprocessing/feature_builders/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/feature_builders/sledge/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/preprocessing/feature_builders/sledge/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/feature_builders/sledge/sledge_utils.py: -------------------------------------------------------------------------------- 1 | # TODO: Move these functions for general use. 2 | # eg. sledge.common.visualization 3 | 4 | from typing import Tuple 5 | 6 | import cv2 7 | import numpy as np 8 | import numpy.typing as npt 9 | 10 | from nuplan.common.actor_state.oriented_box import OrientedBox 11 | from sledge.autoencoder.preprocessing.features.sledge_vector_feature import SledgeConfig 12 | 13 | 14 | def raster_mask_oriented_box(oriented_box: OrientedBox, config: SledgeConfig) -> npt.NDArray[np.bool_]: 15 | """ 16 | Create raster mask if a oriented bounding box in BEV 17 | :param oriented_box: class of a bounding box with heading 18 | :param config: config dataclass of a sledge autoencoder 19 | :return: creates raster mask of oriented bounding box 20 | """ 21 | 22 | pixel_width, pixel_height = config.pixel_frame 23 | corners = np.asarray([[corner.x, corner.y] for corner in oriented_box.all_corners()]) 24 | corner_idcs = coords_to_pixel(corners, config.frame, config.pixel_size) 25 | 26 | # TODO: check if float32 is really necessary here. 27 | raster_mask = np.zeros((pixel_width, pixel_height), dtype=np.float32) 28 | cv2.fillPoly(raster_mask, [corner_idcs], color=1.0, lineType=cv2.LINE_AA) 29 | 30 | # NOTE: OpenCV has origin on top-left corner 31 | raster_mask = np.rot90(raster_mask)[::-1] 32 | return raster_mask > 0 33 | 34 | 35 | def coords_in_frame(coords: npt.NDArray[np.float32], frame: Tuple[float, float]) -> npt.NDArray[np.bool_]: 36 | """ 37 | Checks which coordinates are within the given 2D frame extend. 38 | :param coords: coordinate array in numpy (x,y) in last axis 39 | :param frame: tuple of frame extend in meter 40 | :return: numpy array of boolean's 41 | """ 42 | assert coords.shape[-1] == 2, "Coordinate array must have last dim size of 2 (ie. x,y)" 43 | width, height = frame 44 | 45 | within_width = np.logical_and(-width / 2 <= coords[..., 0], coords[..., 0] <= width / 2) 46 | within_height = np.logical_and(-height / 2 <= coords[..., 1], coords[..., 1] <= height / 2) 47 | 48 | return np.logical_and(within_width, within_height) 49 | 50 | 51 | def pixel_in_frame(pixel: npt.NDArray[np.int32], pixel_frame: Tuple[int, int]) -> npt.NDArray[np.bool_]: 52 | """ 53 | Checks if pixels indices are within the image. 54 | :param pixel: pixel indices as numpy array 55 | :param pixel_frame: tuple of raster width and height 56 | :return: numpy array of boolean's 57 | """ 58 | assert pixel.shape[-1] == 2, "Coordinate array must have last dim size of 2 (ie. x,y)" 59 | pixel_width, pixel_height = pixel_frame 60 | 61 | within_width = np.logical_and(0 <= pixel[..., 0], pixel[..., 0] < pixel_width) 62 | within_height = np.logical_and(0 <= pixel[..., 1], pixel[..., 1] < pixel_height) 63 | 64 | return np.logical_and(within_width, within_height) 65 | 66 | 67 | def coords_to_pixel( 68 | coords: npt.NDArray[np.float32], frame: Tuple[float, float], pixel_size: float 69 | ) -> npt.NDArray[np.int32]: 70 | """ 71 | Converts ego-centric coordinates into pixel coordinates (ie. indices) 72 | :param coords: coordinate array in numpy (x,y) in last axis 73 | :param frame: tuple of frame extend in meter 74 | :param pixel_size: size of a pixel 75 | :return: indices of pixel coordinates 76 | """ 77 | assert coords.shape[-1] == 2 78 | 79 | width, height = frame 80 | pixel_width, pixel_height = int(width / pixel_size), int(height / pixel_size) 81 | pixel_center = np.array([[pixel_width / 2.0, pixel_height / 2.0]]) 82 | coords_idcs = (coords / pixel_size) + pixel_center 83 | 84 | return coords_idcs.astype(np.int32) 85 | -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/features/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/preprocessing/features/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/features/latent_feature.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, Dict, List 4 | from dataclasses import dataclass 5 | import torch 6 | 7 | from nuplan.planning.script.builders.utils.utils_type import validate_type 8 | from nuplan.planning.training.preprocessing.feature_builders.abstract_feature_builder import AbstractModelFeature 9 | from nuplan.planning.training.preprocessing.features.abstract_model_feature import FeatureDataType, to_tensor 10 | 11 | 12 | @dataclass 13 | class Latent(AbstractModelFeature): 14 | """Feature class of latent variable.""" 15 | 16 | mu: FeatureDataType 17 | log_var: FeatureDataType 18 | 19 | def to_device(self, device: torch.device) -> Latent: 20 | """Implemented. See interface.""" 21 | validate_type(self.mu, torch.Tensor) 22 | validate_type(self.log_var, torch.Tensor) 23 | return Latent(mu=self.mu.to(device=device), log_var=self.log_var.to(device=device)) 24 | 25 | def to_feature_tensor(self) -> Latent: 26 | """Inherited, see superclass.""" 27 | return Latent(mu=to_tensor(self.mu), log_var=to_tensor(self.log_var)) 28 | 29 | @classmethod 30 | def deserialize(cls, data: Dict[str, Any]) -> Latent: 31 | """Implemented. See interface.""" 32 | return Latent(mu=data["mu"], log_var=data["log_var"]) 33 | 34 | def unpack(self) -> List[Latent]: 35 | """Implemented. See interface.""" 36 | return [Latent(mu, log_var) for mu, log_var in zip(self.mu, self.log_var)] 37 | 38 | def torch_to_numpy(self) -> Latent: 39 | """Helper method to convert feature from torch tensor to numpy array.""" 40 | return Latent(mu=self.mu.detach().cpu().numpy(), log_var=self.log_var.detach().cpu().numpy()) 41 | 42 | def squeeze(self) -> Latent: 43 | """Helper method to apply .squeeze() on features.""" 44 | return Latent(mu=self.mu.squeeze(0), log_var=self.log_var.squeeze(0)) 45 | -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/features/map_id_feature.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass 4 | from typing import Any, Dict, List 5 | 6 | import torch 7 | 8 | from nuplan.planning.script.builders.utils.utils_type import validate_type 9 | from nuplan.planning.training.preprocessing.feature_builders.abstract_feature_builder import AbstractModelFeature 10 | from nuplan.planning.training.preprocessing.features.abstract_model_feature import FeatureDataType, to_tensor 11 | 12 | MAP_NAME_ID_ABBR = [ 13 | (0, "us-nv-las-vegas-strip", "LAV"), 14 | (1, "us-pa-pittsburgh-hazelwood", "PGH"), 15 | (2, "sg-one-north", "SGP"), 16 | (3, "us-ma-boston", "BOS"), 17 | ] 18 | 19 | MAP_NAME_TO_ID = {name: id for id, name, abbr in MAP_NAME_ID_ABBR} 20 | MAP_ID_TO_NAME = {id: name for id, name, abbr in MAP_NAME_ID_ABBR} 21 | MAP_ID_TO_ABBR = {id: abbr for id, name, abbr in MAP_NAME_ID_ABBR} 22 | 23 | 24 | @dataclass 25 | class MapID(AbstractModelFeature): 26 | """Feature class of to store map id.""" 27 | 28 | id: FeatureDataType 29 | 30 | def to_device(self, device: torch.device) -> MapID: 31 | """Implemented. See interface.""" 32 | validate_type(self.id, torch.Tensor) 33 | return MapID(id=self.id.to(device=device)) 34 | 35 | def to_feature_tensor(self) -> MapID: 36 | """Inherited, see superclass.""" 37 | return MapID(id=to_tensor(self.id)) 38 | 39 | @classmethod 40 | def deserialize(cls, data: Dict[str, Any]) -> MapID: 41 | """Implemented. See interface.""" 42 | return MapID(id=data["id"]) 43 | 44 | def unpack(self) -> List[MapID]: 45 | """Implemented. See interface.""" 46 | return [MapID(id) for id in zip(self.id)] 47 | 48 | def torch_to_numpy(self) -> MapID: 49 | """Helper method to convert feature from torch tensor to numpy array.""" 50 | return MapID(id=self.id.detach().cpu().numpy()) 51 | 52 | def squeeze(self) -> MapID: 53 | """Helper method to apply .squeeze() on features.""" 54 | return MapID(id=self.squeeze(0)) 55 | -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/features/rvae_matching_feature.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass 4 | from typing import Any, Dict, List 5 | import torch 6 | 7 | from nuplan.planning.script.builders.utils.utils_type import validate_type 8 | from nuplan.planning.training.preprocessing.feature_builders.abstract_feature_builder import AbstractModelFeature 9 | from nuplan.planning.training.preprocessing.features.abstract_model_feature import FeatureDataType, to_tensor 10 | 11 | 12 | @dataclass 13 | class RVAEMatchingFeature(AbstractModelFeature): 14 | """Feature class to score matched entities during RVAE training.""" 15 | 16 | indices: FeatureDataType 17 | 18 | def to_device(self, device: torch.device) -> RVAEMatchingFeature: 19 | """Implemented. See interface.""" 20 | validate_type(self.indices, torch.Tensor) 21 | return RVAEMatchingFeature(indices=self.indices.to(device=device)) 22 | 23 | def to_feature_tensor(self) -> RVAEMatchingFeature: 24 | """Inherited, see superclass.""" 25 | return RVAEMatchingFeature(indices=to_tensor(self.indices)) 26 | 27 | @classmethod 28 | def deserialize(cls, data: Dict[str, Any]) -> RVAEMatchingFeature: 29 | """Implemented. See interface.""" 30 | return RVAEMatchingFeature(indices=data["indices"]) 31 | 32 | def unpack(self) -> List[RVAEMatchingFeature]: 33 | """Implemented. See interface.""" 34 | return [RVAEMatchingFeature(indices) for indices in zip(self.indices)] 35 | -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/target_builders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/autoencoder/preprocessing/target_builders/__init__.py -------------------------------------------------------------------------------- /sledge/autoencoder/preprocessing/target_builders/map_id_target_builder.py: -------------------------------------------------------------------------------- 1 | from typing import Type 2 | import numpy as np 3 | 4 | from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario 5 | from nuplan.planning.training.preprocessing.feature_builders.abstract_feature_builder import AbstractModelFeature 6 | from nuplan.planning.training.preprocessing.target_builders.abstract_target_builder import AbstractTargetBuilder 7 | 8 | from sledge.autoencoder.preprocessing.features.map_id_feature import MapID, MAP_NAME_TO_ID 9 | 10 | 11 | class MapIDTargetBuilder(AbstractTargetBuilder): 12 | def __init__(self) -> None: 13 | pass 14 | 15 | @classmethod 16 | def get_feature_unique_name(cls) -> str: 17 | """Inherited, see superclass.""" 18 | return "map_id" 19 | 20 | @classmethod 21 | def get_feature_type(cls) -> Type[AbstractModelFeature]: 22 | """Inherited, see superclass.""" 23 | return MapID 24 | 25 | def get_targets(self, scenario: AbstractScenario) -> MapID: 26 | """Inherited, see superclass.""" 27 | id = np.array(MAP_NAME_TO_ID[scenario.map_api.map_name], dtype=np.int64) 28 | return MapID(id) 29 | -------------------------------------------------------------------------------- /sledge/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/common/__init__.py -------------------------------------------------------------------------------- /sledge/common/helper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/common/helper/__init__.py -------------------------------------------------------------------------------- /sledge/common/helper/cache_helper.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from pathlib import Path 3 | 4 | 5 | def find_feature_paths(root_path: Path, feature_name: str) -> List[Path]: 6 | """ 7 | Simple helper function, collecting all available gzip files in a cache. 8 | :param root_path: path of feature cache 9 | :param feature_name: name of feature, excluding file ending 10 | :return: list of paths 11 | """ 12 | 13 | # TODO: move somewhere else 14 | file_paths: List[Path] = [] 15 | for log_path in root_path.iterdir(): 16 | if log_path.name == "metadata": 17 | continue 18 | for scenario_type_path in log_path.iterdir(): 19 | for token_path in scenario_type_path.iterdir(): 20 | feature_path = token_path / f"{feature_name}.gz" 21 | if feature_path.is_file(): 22 | file_paths.append(token_path / feature_name) 23 | 24 | return file_paths 25 | -------------------------------------------------------------------------------- /sledge/common/visualization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/common/visualization/__init__.py -------------------------------------------------------------------------------- /sledge/diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/diffusion/__init__.py -------------------------------------------------------------------------------- /sledge/diffusion/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/diffusion/dataset/__init__.py -------------------------------------------------------------------------------- /sledge/diffusion/dataset/rvae_latent_builder_config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from pathlib import Path 3 | from typing import List 4 | from datasets.builder import BuilderConfig 5 | from sledge.autoencoder.modeling.models.rvae.rvae_config import RVAEConfig 6 | 7 | 8 | @dataclass 9 | class RVAELatentBuilderConfig(BuilderConfig): 10 | 11 | feature_name: str = "rvae_latent" 12 | label_name: str = "map_id" 13 | rvae_config: RVAEConfig = RVAEConfig() 14 | 15 | def find_file_paths(self, root_path: Path) -> List[Path]: 16 | """ 17 | Search for latent features in cache. 18 | :param root_path: root path of cache 19 | :return: list of latent file paths 20 | """ 21 | 22 | # TODO: move somewhere else 23 | file_paths: List[Path] = [] 24 | for log_path in root_path.iterdir(): 25 | if log_path.name == "metadata": 26 | continue 27 | for scenario_type_path in log_path.iterdir(): 28 | for token_path in scenario_type_path.iterdir(): 29 | if (token_path / f"{self.feature_name}.gz").is_file() and ( 30 | token_path / f"{self.label_name}.gz" 31 | ).is_file(): 32 | file_paths.append(token_path) 33 | return file_paths 34 | -------------------------------------------------------------------------------- /sledge/diffusion/experiments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/diffusion/experiments/__init__.py -------------------------------------------------------------------------------- /sledge/diffusion/experiments/scenario_caching.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from tqdm import tqdm 3 | from omegaconf import DictConfig 4 | from accelerate.logging import get_logger 5 | 6 | from nuplan.planning.training.preprocessing.utils.feature_cache import FeatureCachePickle 7 | 8 | from sledge.autoencoder.preprocessing.features.sledge_vector_feature import SledgeVector 9 | from sledge.autoencoder.preprocessing.features.map_id_feature import MAP_ID_TO_NAME 10 | from sledge.script.builders.diffusion_builder import build_pipeline_from_checkpoint 11 | 12 | logger = get_logger(__name__, log_level="INFO") 13 | 14 | 15 | def run_scenario_caching(cfg: DictConfig) -> None: 16 | """ 17 | Applies the diffusion model generate and cache scenarios. 18 | :param cfg: DictConfig. Configuration that is used to run the experiment. 19 | """ 20 | 21 | logger.info("Building pipeline from checkpoint...") 22 | pipeline = build_pipeline_from_checkpoint(cfg) 23 | pipeline.to("cuda") 24 | logger.info("Building pipeline from checkpoint...DONE!") 25 | 26 | logger.info("Scenario caching...") 27 | storing_mechanism = FeatureCachePickle() 28 | current_cache_size: int = 0 29 | class_labels = list(range(cfg.num_classes)) * (cfg.inference_batch_size // cfg.num_classes) 30 | num_total_batches = (cfg.cache.scenario_cache_size // cfg.inference_batch_size) + 1 31 | for _ in tqdm(range(num_total_batches), desc="Load cache files..."): 32 | sledge_vector_list = pipeline( 33 | class_labels=class_labels, 34 | num_inference_timesteps=cfg.num_inference_timesteps, 35 | guidance_scale=cfg.guidance_scale, 36 | num_classes=cfg.num_classes, 37 | ) 38 | for sledge_vector, map_id in zip(sledge_vector_list, class_labels): 39 | sledge_vector_numpy: SledgeVector = sledge_vector.torch_to_numpy() 40 | file_name = ( 41 | Path(cfg.cache.scenario_cache_path) 42 | / "log" 43 | / MAP_ID_TO_NAME[map_id] 44 | / str(current_cache_size) 45 | / "sledge_vector" 46 | ) 47 | file_name.parent.mkdir(parents=True, exist_ok=True) 48 | storing_mechanism.store_computed_feature_to_folder(file_name, sledge_vector_numpy) 49 | current_cache_size += 1 50 | if current_cache_size >= cfg.cache.scenario_cache_size: 51 | break 52 | logger.info("Scenario caching...DONE!") 53 | return None 54 | -------------------------------------------------------------------------------- /sledge/diffusion/modelling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/diffusion/modelling/__init__.py -------------------------------------------------------------------------------- /sledge/script/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/__init__.py -------------------------------------------------------------------------------- /sledge/script/builders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/builders/__init__.py -------------------------------------------------------------------------------- /sledge/script/builders/matching_builder.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import List 3 | 4 | from hydra.utils import instantiate 5 | from omegaconf import DictConfig 6 | 7 | from nuplan.planning.script.builders.utils.utils_type import validate_type 8 | 9 | from sledge.autoencoder.modeling.matching.abstract_matching import AbstractMatching 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | def build_matching(cfg: DictConfig) -> List[AbstractMatching]: 15 | """ 16 | Build list of matchings based on config. 17 | :param cfg: Dict config. 18 | :return List of augmentor objects. 19 | """ 20 | logger.info("Building matchings...") 21 | 22 | instantiated_matchings = [] 23 | for matching_type in cfg.values(): 24 | matching: AbstractMatching = instantiate(matching_type) 25 | validate_type(matching, AbstractMatching) 26 | instantiated_matchings.append(matching) 27 | 28 | logger.info("Building matchings...DONE!") 29 | return instantiated_matchings 30 | -------------------------------------------------------------------------------- /sledge/script/builders/metric_builder.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import List 3 | 4 | from hydra.utils import instantiate 5 | from omegaconf import DictConfig 6 | 7 | from nuplan.planning.script.builders.utils.utils_type import validate_type 8 | 9 | from sledge.autoencoder.modeling.metrics.abstract_custom_metric import AbstractCustomMetric 10 | from sledge.autoencoder.modeling.objectives.abstract_custom_objective import AbstractCustomObjective 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | def build_custom_training_metrics(cfg: DictConfig) -> List[AbstractCustomMetric]: 16 | """ 17 | Build objectives based on config 18 | :param cfg: config 19 | :return list of objectives. 20 | """ 21 | instantiated_metrics = [] 22 | 23 | scenario_type_loss_weighting = ( 24 | cfg.scenario_type_weights.scenario_type_loss_weights 25 | if ("scenario_type_weights" in cfg and "scenario_type_loss_weights" in cfg.scenario_type_weights) 26 | else {} 27 | ) 28 | for metric_name, metric_type in cfg.training_metric.items(): 29 | new_metric: AbstractCustomMetric = instantiate( 30 | metric_type, scenario_type_loss_weighting=scenario_type_loss_weighting 31 | ) 32 | validate_type(new_metric, AbstractCustomMetric) 33 | instantiated_metrics.append(new_metric) 34 | return instantiated_metrics 35 | 36 | 37 | def build_custom_objectives(cfg: DictConfig) -> List[AbstractCustomObjective]: 38 | """ 39 | Build objectives based on config 40 | :param cfg: config 41 | :return list of objectives. 42 | """ 43 | instantiated_objectives = [] 44 | 45 | scenario_type_loss_weighting = ( 46 | cfg.scenario_type_weights.scenario_type_loss_weights 47 | if ("scenario_type_weights" in cfg and "scenario_type_loss_weights" in cfg.scenario_type_weights) 48 | else {} 49 | ) 50 | for objective_name, objective_type in cfg.objective.items(): 51 | new_objective: AbstractCustomObjective = instantiate( 52 | objective_type, scenario_type_loss_weighting=scenario_type_loss_weighting 53 | ) 54 | validate_type(new_objective, AbstractCustomObjective) 55 | instantiated_objectives.append(new_objective) 56 | return instantiated_objectives 57 | -------------------------------------------------------------------------------- /sledge/script/builders/model_builder.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from hydra.utils import instantiate 4 | from omegaconf import DictConfig 5 | 6 | from nuplan.planning.script.builders.utils.utils_type import validate_type 7 | 8 | from sledge.autoencoder.modeling.autoencoder_torch_module_wrapper import AutoencoderTorchModuleWrapper 9 | from sledge.autoencoder.modeling.autoencoder_lightning_module_wrapper import AutoencoderLightningModuleWrapper 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | def build_autoencoder_torch_module_wrapper(cfg: DictConfig) -> AutoencoderTorchModuleWrapper: 15 | """ 16 | Builds the autoencoder module. 17 | :param cfg: DictConfig. Configuration that is used to run the experiment. 18 | :return: Instance of AutoencoderTorchModuleWrapper. 19 | """ 20 | logger.info("Building AutoencoderTorchModuleWrapper...") 21 | model = instantiate(cfg.autoencoder_model) 22 | validate_type(model, AutoencoderTorchModuleWrapper) 23 | if cfg.autoencoder_checkpoint: 24 | model = AutoencoderLightningModuleWrapper.load_from_checkpoint(cfg.autoencoder_checkpoint, model=model).model 25 | logger.info(f"Load from checkpoint {cfg.autoencoder_checkpoint}...DONE!") 26 | logger.info("Building AutoencoderTorchModuleWrapper...DONE!") 27 | 28 | return model 29 | -------------------------------------------------------------------------------- /sledge/script/builders/scenario_builder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/builders/scenario_builder.py -------------------------------------------------------------------------------- /sledge/script/builders/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/builders/utils/__init__.py -------------------------------------------------------------------------------- /sledge/script/builders/utils/utils_config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from shutil import rmtree 4 | 5 | from omegaconf import DictConfig, OmegaConf 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | # TODO: maybe remove this function 11 | def update_config_for_autoencoder_training(cfg: DictConfig) -> None: 12 | """ 13 | Updates the config based on some conditions. 14 | :param cfg: omegaconf dictionary that is used to run the experiment. 15 | """ 16 | # Make the configuration editable. 17 | OmegaConf.set_struct(cfg, False) 18 | 19 | if cfg.cache.autoencoder_cache_path is None: 20 | logger.warning("Parameter autoencoder_cache_path is not set, caching is disabled") 21 | else: 22 | if cfg.cache.cleanup_autoencoder_cache and Path(cfg.cache.autoencoder_cache_path).exists(): 23 | rmtree(cfg.cache.autoencoder_cache_path) 24 | 25 | Path(cfg.cache.autoencoder_cache_path).mkdir(parents=True, exist_ok=True) 26 | 27 | cfg.cache.cache_path = cfg.cache.autoencoder_cache_path # TODO: remove this workaround 28 | cfg.lightning.trainer.params.gpus = -1 # TODO: remove this workaround 29 | 30 | # Save all interpolations and remove keys that were only used for interpolation and have no further use. 31 | OmegaConf.resolve(cfg) 32 | 33 | # Finalize the configuration and make it non-editable. 34 | OmegaConf.set_struct(cfg, True) 35 | 36 | # Log the final configuration after all overrides, interpolations and updates. 37 | if cfg.log_config: 38 | logger.info(f"Creating experiment name [{cfg.experiment}] in group [{cfg.group}] with config...") 39 | logger.info("\n" + OmegaConf.to_yaml(cfg)) 40 | -------------------------------------------------------------------------------- /sledge/script/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/config/__init__.py -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/config/autoencoder/__init__.py -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/callbacks/default_callbacks.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - learning_rate_monitor_callback 3 | - model_checkpoint_callback 4 | - time_logging_callback 5 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/callbacks/learning_rate_monitor_callback.yaml: -------------------------------------------------------------------------------- 1 | learning_rate_monitor_callback: 2 | _target_: pytorch_lightning.callbacks.LearningRateMonitor 3 | _convert_: 'all' 4 | 5 | logging_interval: 'step' 6 | log_momentum: true 7 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/callbacks/model_checkpoint_callback.yaml: -------------------------------------------------------------------------------- 1 | model_checkpoint_callback: 2 | _target_: nuplan.planning.training.callbacks.checkpoint_callback.ModelCheckpointAtEpochEnd 3 | _convert_: 'all' 4 | 5 | save_last: false 6 | dirpath: ${output_dir}/best_model 7 | 8 | # see default_lightning.yaml 9 | save_top_k: ${lightning.trainer.checkpoint.save_top_k} 10 | monitor: ${lightning.trainer.checkpoint.monitor} 11 | mode: ${lightning.trainer.checkpoint.mode} 12 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/callbacks/rvae_visualization_callback.yaml: -------------------------------------------------------------------------------- 1 | rvae_visualization_callback: 2 | _target_: sledge.autoencoder.callbacks.rvae_visualization_callback.RVAEVisualizationCallback 3 | _convert_: 'all' 4 | 5 | images_per_tile: 6 # number of images per row 6 | num_train_tiles: 5 # number of rows of training images 7 | num_val_tiles: 5 # number of rows of validation images 8 | config: ${autoencoder_model.config} 9 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/callbacks/time_logging_callback.yaml: -------------------------------------------------------------------------------- 1 | time_logging_callback: 2 | _target_: nuplan.planning.training.callbacks.time_logging_callback.TimeLoggingCallback 3 | _convert_: 'all' 4 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/callbacks/vae_visualization_callback.yaml: -------------------------------------------------------------------------------- 1 | vae_visualization_callback: 2 | _target_: sledge.autoencoder.callbacks.vae_visualization_callback.VAEVisualizationCallback 3 | _convert_: 'all' 4 | 5 | images_per_tile: 6 # number of images per row 6 | num_train_tiles: 5 # number of rows of training images 7 | num_val_tiles: 5 # number of rows of validation images 8 | config: ${autoencoder_model.config} 9 | 10 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/data_augmentation/rvae_augmentation.yaml: -------------------------------------------------------------------------------- 1 | rvae_augmentation: 2 | _target_: sledge.autoencoder.data_augmentation.rvae_augmentation.RVAEAugmenter 3 | _convert_: 'all' 4 | 5 | config: ${autoencoder_model.config} 6 | 7 | se2_noise: [1,1,2.5] # meter, meter, degree 8 | p_vehicle_dropout: 0.1 9 | p_pedestrian_dropout: 0.1 10 | p_static_dropout: 0.1 -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/data_augmentation/rvae_no_augmentation.yaml: -------------------------------------------------------------------------------- 1 | rvae_augmentation: 2 | _target_: sledge.autoencoder.data_augmentation.rvae_augmentation.RVAEAugmenter 3 | _convert_: 'all' 4 | 5 | config: ${autoencoder_model.config} 6 | 7 | se2_noise: null 8 | p_vehicle_dropout: null 9 | p_pedestrian_dropout: null 10 | p_static_dropout: null 11 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/data_augmentation/vae_augmentation.yaml: -------------------------------------------------------------------------------- 1 | vae_augmentation: 2 | _target_: sledge.autoencoder.data_augmentation.vae_augmentation.VAEAugmenter 3 | _convert_: 'all' 4 | 5 | config: ${autoencoder_model.config} 6 | 7 | se2_noise: [1,1,2.5] # meter, meter, degree 8 | p_vehicle_dropout: 0.1 9 | p_pedestrian_dropout: 0.1 10 | p_static_dropout: 0.1 -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/data_loader/default_data_loader.yaml: -------------------------------------------------------------------------------- 1 | datamodule: 2 | train_fraction: 1.0 # [%] fraction of training samples to use 3 | val_fraction: 1.0 # [%] fraction of validation samples to use 4 | test_fraction: 1.0 # [%] fraction of test samples to use 5 | 6 | params: 7 | batch_size: 64 # batch size per GPU 8 | num_workers: 8 # number of dataloader workers 9 | pin_memory: ${gpu} # allocate dataloader examples in a page-locked memory for faster host-to-device transfer 10 | drop_last: true # drop the last examples if the batch is not complete 11 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/default_autoencoder.yaml: -------------------------------------------------------------------------------- 1 | hydra: 2 | run: 3 | dir: ${output_dir} 4 | output_subdir: ${output_dir}/code/hydra # Store hydra's config breakdown here for debugging 5 | searchpath: # Only in these paths are discoverable 6 | - pkg://sledge.script.config.common 7 | - pkg://sledge.script.experiments # Put experiments configs in script/experiments/ 8 | 9 | defaults: 10 | - default_experiment 11 | - default_common 12 | 13 | # Trainer and callbacks 14 | - lightning: default_lightning 15 | - callbacks: default_callbacks 16 | 17 | # Optimizer settings 18 | - optimizer: adamw # [adam, adamw] supported optimizers 19 | - lr_scheduler: one_cycle_lr # [one_cycle_lr, multistep_lr] supported lr_schedulers 20 | - warm_up_lr_scheduler: null # [linear_warm_up, constant_warm_up] supported warm up lr schedulers 21 | 22 | # Data Loading 23 | - data_loader: default_data_loader 24 | - splitter: ??? 25 | 26 | # Objectives and metrics 27 | - objective: ??? 28 | - training_metric: null 29 | - matching: null 30 | - data_augmentation: null 31 | - data_augmentation_scheduler: null # [default_augmentation_schedulers, stepwise_augmentation_probability_scheduler, stepwise_noise_parameter_scheduler] supported data augmentation schedulers 32 | - scenario_type_weights: default_scenario_type_weights 33 | 34 | experiment_name: ${py_func}_autoencoder 35 | objective_aggregate_mode: ??? # How to aggregate multiple objectives, can be 'mean', 'max', 'sum' 36 | 37 | autoencoder_checkpoint: null 38 | 39 | # Cache parameters 40 | cache: 41 | autoencoder_cache_path: ${oc.env:SLEDGE_EXP_ROOT}/caches/autoencoder_cache # Local/remote path to store all preprocessed artifacts from the data pipeline 42 | latent_name: null 43 | use_cache_without_dataset: false # Load all existing features from a local/remote cache without loading the dataset 44 | force_feature_computation: false # Recompute features even if a cache exists 45 | 46 | cleanup_autoencoder_cache: false # Cleanup cached data in the cache_path, this ensures that new data are generated if the same cache_path is passed 47 | 48 | # Mandatory parameters 49 | py_func: ??? # Function to be run inside main (can be "train", "test", "cache") 50 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/lightning/default_lightning.yaml: -------------------------------------------------------------------------------- 1 | distributed_training: 2 | equal_variance_scaling_strategy: true # scales lr and betas either linearly if false (multiply by num GPUs) or with equal_variance if true (multiply by sqaure root of num GPUs) 3 | 4 | trainer: 5 | checkpoint: 6 | resume_training: false # load the model from the last epoch and resume training 7 | save_top_k: -1 # save the top K models in terms of performance 8 | monitor: loss/val_loss # metric to monitor for performance 9 | mode: min # minimize/maximize metric 10 | 11 | params: 12 | max_time: "04:00:00:00" # training time before the process is terminated 13 | 14 | max_epochs: 50 # maximum number of training epochs 15 | check_val_every_n_epoch: 1 # run validation set every n training epochs 16 | val_check_interval: 1.0 # [%] run validation set every X% of training set 17 | 18 | limit_train_batches: 1.0 # how much of training dataset to check (float = fraction, int = num_batches) 19 | limit_val_batches: 1.0 # how much of validation dataset to check (float = fraction, int = num_batches) 20 | limit_test_batches: 1.0 # how much of test dataset to check (float = fraction, int = num_batches) 21 | 22 | devices: -1 # The devices to use. -1 to indicate all available devices 23 | # accelerator: ddp # distribution method 24 | precision: 32-true # floating point precision 25 | # amp_level: O2 # AMP optimization level 26 | num_nodes: 1 # Number of nodes used for training 27 | 28 | # auto_scale_batch_size: false 29 | # auto_lr_find: false # tunes LR before beginning training 30 | # terminate_on_nan: true # terminates training if a nan is encountered in loss/weights 31 | 32 | num_sanity_val_steps: 0 # number of validation steps to run before training begins 33 | fast_dev_run: false # runs 1 batch of train/val/test for sanity 34 | 35 | accumulate_grad_batches: 1 # accumulates gradients every n batches 36 | # track_grad_norm: -1 # logs the p-norm for inspection 37 | gradient_clip_val: 0.0 # value to clip gradients 38 | gradient_clip_algorithm: norm # [value, norm] method to clip gradients 39 | 40 | # checkpoint_callback: true # enab le default checkpoint 41 | 42 | overfitting: 43 | enable: false # run an overfitting test instead of training 44 | 45 | params: 46 | max_epochs: 150 # number of epochs to overfit the same batches 47 | overfit_batches: 1 # number of batches to overfit 48 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/lr_scheduler/multistep_lr.yaml: -------------------------------------------------------------------------------- 1 | _target_: torch.optim.lr_scheduler.MultiStepLR 2 | _convert_: all 3 | 4 | milestones: [100, 170] # decays the learning rate of each parameter group by gamma once the number of epochs equals one of the milestones 5 | gamma: 0.1 # multiplicative factor of learning rate decay 6 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/lr_scheduler/one_cycle_lr.yaml: -------------------------------------------------------------------------------- 1 | _target_: torch.optim.lr_scheduler.OneCycleLR 2 | _convert_: 'all' 3 | 4 | # Complete details found here: https://pytorch.org/docs/master/generated/torch.optim.lr_scheduler.OneCycleLR.html 5 | # Either total_steps OR (epochs AND steps_per_epoch) must be provided. 6 | 7 | # Updated through code in the model with configure_optimizers() 8 | optimizer: null 9 | 10 | # Upper learning rate boundaries in the cycle for each parameter group. 11 | max_lr: 1e-4 12 | 13 | # The number of epochs to train for. 14 | epochs: ${lightning.trainer.params.max_epochs} 15 | 16 | # The number of steps per epoch to train for. This is used along with epochs in order to infer the total number of steps 17 | # in the cycle if a value for total_steps is not provided. 18 | # Updated through code in update_distributed_lr_scheduler_config(). 19 | steps_per_epoch: null 20 | 21 | # The percentage of the cycle (in number of steps) spent increasing the learning rate. 22 | pct_start: 0.0 23 | 24 | # {‘cos’, ‘linear’} Specifies the annealing strategy: “cos” for cosine annealing, “linear” for linear annealing. 25 | anneal_strategy: cos 26 | 27 | # If True, momentum is cycled inversely to learning rate between ‘base_momentum’ and ‘max_momentum’. 28 | cycle_momentum: true 29 | 30 | # Lower momentum boundaries in the cycle for each parameter group. Note that momentum is cycled inversely to 31 | # learning rate; at the peak of a cycle, momentum is ‘base_momentum’ and learning rate is ‘max_lr’. 32 | base_momentum: 0.85 33 | 34 | # Upper momentum boundaries in the cycle for each parameter group. Functionally, it defines the cycle amplitude 35 | # (max_momentum - base_momentum). Note that momentum is cycled inversely to learning rate; at the start of a cycle, 36 | # momentum is ‘max_momentum’ and learning rate is ‘base_lr’ 37 | max_momentum: 0.95 38 | 39 | # Determines the initial learning rate via initial_lr = max_lr/div_factor 40 | div_factor: 10 41 | 42 | # Determines the final initial learning rate to be used via final_initial_lr = initial_lr/final_div_factor 43 | final_div_factor: 10 44 | 45 | # The index of the last batch. This parameter is used when resuming a training job. Since step() should be invoked after 46 | # each batch instead of after each epoch, this number represents the total number of batches computed, not the total 47 | # number of epochs computed. When last_epoch=-1, the schedule is started from the beginning. 48 | 49 | last_epoch: -1 # Unclear if lightning uses for step-level checkpoint resume but kept for completion. KIV. 50 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/matching/rvae_green_lights_matching.yaml: -------------------------------------------------------------------------------- 1 | rvae_green_lights_matching: 2 | _target_: sledge.autoencoder.modeling.matching.rvae_matching.RVAEHungarianMatching 3 | _convert_: "all" 4 | 5 | key: "green_lights" 6 | config: ${autoencoder_model.config} 7 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/matching/rvae_lines_matching.yaml: -------------------------------------------------------------------------------- 1 | rvae_lines_matching: 2 | _target_: sledge.autoencoder.modeling.matching.rvae_matching.RVAEHungarianMatching 3 | _convert_: "all" 4 | 5 | key: "lines" 6 | config: ${autoencoder_model.config} 7 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/matching/rvae_pedestrians_matching.yaml: -------------------------------------------------------------------------------- 1 | rvae_pedestrians_matching: 2 | _target_: sledge.autoencoder.modeling.matching.rvae_matching.RVAEHungarianMatching 3 | _convert_: "all" 4 | 5 | key: "pedestrians" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/matching/rvae_red_lights_matching.yaml: -------------------------------------------------------------------------------- 1 | rvae_red_lights_matching: 2 | _target_: sledge.autoencoder.modeling.matching.rvae_matching.RVAEHungarianMatching 3 | _convert_: "all" 4 | 5 | key: "red_lights" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/matching/rvae_static_objects_matching.yaml: -------------------------------------------------------------------------------- 1 | rvae_static_objects_matching: 2 | _target_: sledge.autoencoder.modeling.matching.rvae_matching.RVAEHungarianMatching 3 | _convert_: "all" 4 | 5 | key: "static_objects" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/matching/rvae_vehicles_matching.yaml: -------------------------------------------------------------------------------- 1 | rvae_vehicles_matching: 2 | _target_: sledge.autoencoder.modeling.matching.rvae_matching.RVAEHungarianMatching 3 | _convert_: "all" 4 | 5 | key: "vehicles" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/kl_objective.yaml: -------------------------------------------------------------------------------- 1 | kl_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.kl_objective.KLObjective 3 | _convert_: 'all' 4 | weight: ${autoencoder_model.config.kl_weight} 5 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/rvae_ego_objective.yaml: -------------------------------------------------------------------------------- 1 | rvae_ego_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.rvae_objective.RVAEEgoObjective 3 | _convert_: "all" 4 | 5 | weight: ${autoencoder_model.config.ego_reconstruction_weight} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/rvae_green_lights_objective.yaml: -------------------------------------------------------------------------------- 1 | rvae_green_lights_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.rvae_objective.RVAEHungarianObjective 3 | _convert_: "all" 4 | 5 | key: "green_lights" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/rvae_lines_objective.yaml: -------------------------------------------------------------------------------- 1 | rvae_lines_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.rvae_objective.RVAEHungarianObjective 3 | _convert_: "all" 4 | 5 | key: "lines" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/rvae_pedestrians_objective.yaml: -------------------------------------------------------------------------------- 1 | rvae_pedestrians_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.rvae_objective.RVAEHungarianObjective 3 | _convert_: "all" 4 | 5 | key: "pedestrians" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/rvae_red_lights_objective.yaml: -------------------------------------------------------------------------------- 1 | rvae_red_lights_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.rvae_objective.RVAEHungarianObjective 3 | _convert_: "all" 4 | 5 | key: "red_lights" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/rvae_static_objects_objective.yaml: -------------------------------------------------------------------------------- 1 | rvae_static_objects_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.rvae_objective.RVAEHungarianObjective 3 | _convert_: "all" 4 | 5 | key: "static_objects" 6 | config: ${autoencoder_model.config} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/rvae_vehicles_objective.yaml: -------------------------------------------------------------------------------- 1 | rvae_vehicles_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.rvae_objective.RVAEHungarianObjective 3 | _convert_: "all" 4 | 5 | key: "vehicles" 6 | config: ${autoencoder_model.config} 7 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/vae_bce_objective.yaml: -------------------------------------------------------------------------------- 1 | vae_bce_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.vae_objective.VAEBCEObjective 3 | _convert_: 'all' 4 | weight: ${autoencoder_model.config.reconstruction_weight} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/objective/vae_l1_objective.yaml: -------------------------------------------------------------------------------- 1 | vae_bce_objective: 2 | _target_: sledge.autoencoder.modeling.objectives.vae_objective.VAEL1Objective 3 | _convert_: 'all' 4 | weight: ${autoencoder_model.config.reconstruction_weight} -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/optimizer/adam.yaml: -------------------------------------------------------------------------------- 1 | _target_: torch.optim.Adam 2 | _convert_: 'all' 3 | 4 | lr: 5e-5 # learning rate 5 | weight_decay: 5e-4 # l2 norm penalty 6 | betas: [0.9, 0.999] # coefficients used for computing running averages of gradient and its square 7 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/optimizer/adamw.yaml: -------------------------------------------------------------------------------- 1 | _target_: torch.optim.AdamW 2 | _convert_: 'all' 3 | 4 | lr: 5e-5 # learning rate 5 | weight_decay: 5e-4 # weight decay coefficient 6 | betas: [0.9, 0.999] # coefficients used for computing running averages of gradient and its square 7 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/optimizer/sgd.yaml: -------------------------------------------------------------------------------- 1 | _target_: torch.optim.SGD 2 | _convert_: 'all' 3 | 4 | lr: 5e-5 # learning rate 5 | momentum: 0.95 # momentum factor 6 | weight_decay: 1e-5 # l2 penalty 7 | dampening: 0.0 # dampening used for momentum 8 | nesterov: true # enables nesterov momentum if true, otherwise nesterov momentum is not used 9 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/scenario_type_weights/default_scenario_type_weights.yaml: -------------------------------------------------------------------------------- 1 | scenario_type_sampling_weights: 2 | # scenario_name: scenario_type_weights 3 | # If a scenario types weight is not specified by default its weight is 1.0. 4 | # To sample with a lower probability set a weight lower than 1.0 5 | # To sample with a higher probability set a weight higher than 1.0 6 | enable: false 7 | scenario_type_weights: 8 | unknown: 1.0 9 | 10 | scenario_type_loss_weights: 11 | unknown: 1.0 12 | -------------------------------------------------------------------------------- /sledge/script/config/autoencoder/training_metric/kl_metric.yaml: -------------------------------------------------------------------------------- 1 | kl_metric: 2 | _target_: sledge.autoencoder.modeling.metrics.kl_metric.KLMetric 3 | _convert_: 'all' 4 | -------------------------------------------------------------------------------- /sledge/script/config/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/config/common/__init__.py -------------------------------------------------------------------------------- /sledge/script/config/common/autoencoder_model/rvae_model.yaml: -------------------------------------------------------------------------------- 1 | _target_: sledge.autoencoder.modeling.models.rvae.rvae_model.RVAEModel 2 | _convert_: 'all' 3 | 4 | config: 5 | _target_: sledge.autoencoder.modeling.models.rvae.rvae_config.RVAEConfig 6 | 7 | # 1. features raw 8 | radius: 100 9 | pose_interval: 1.0 10 | 11 | # 2. features in frame 12 | frame: [64, 64] 13 | num_lines: 50 14 | num_vehicles: 50 15 | num_pedestrians: 20 16 | num_static_objects: 30 17 | num_green_lights: 20 18 | num_red_lights: 20 19 | 20 | num_line_poses: 20 21 | vehicle_max_velocity: 15 22 | pedestrian_max_velocity: 2 23 | 24 | pixel_size: 0.25 25 | line_dots_radius: 0 26 | 27 | # 3. raster encoder π 28 | model_name: "resnet50" 29 | down_factor: 32 # NOTE: specific to resnet 30 | num_input_channels: 12 31 | latent_channel: 64 32 | 33 | # 4. vector decoder φ 34 | num_encoder_layers: 0 35 | num_decoder_layers: 6 36 | 37 | patch_size: 1 38 | dropout: 0.1 39 | num_head: 8 40 | d_model: 512 41 | d_ffn: 2048 42 | activation: "relu" 43 | normalize_before: False 44 | positional_embedding: "sine" 45 | split_latent: True 46 | 47 | head_d_ffn: 1024 48 | head_num_layers: 1 49 | 50 | num_line_queries: 50 51 | num_vehicle_queries: 50 52 | num_pedestrian_queries: 20 53 | num_static_object_queries: 30 54 | num_green_light_queries: 20 55 | num_red_light_queries: 20 56 | 57 | # matching & loss 58 | line_reconstruction_weight: 2 59 | line_ce_weight: 5 60 | 61 | box_reconstruction_weight: 2 62 | box_ce_weight: 5 63 | 64 | ego_reconstruction_weight: 1 65 | kl_weight: 0.1 66 | 67 | norm_by_count: False 68 | 69 | threshold: 0.3 70 | -------------------------------------------------------------------------------- /sledge/script/config/common/autoencoder_model/vae_model.yaml: -------------------------------------------------------------------------------- 1 | _target_: sledge.autoencoder.modeling.models.vae.vae_model.VAEModel 2 | _convert_: 'all' 3 | 4 | 5 | config: 6 | _target_: sledge.autoencoder.modeling.models.vae.vae_config.VAEConfig 7 | 8 | # 1. features raw 9 | radius: 100 10 | pose_interval: 1.0 11 | 12 | # 2. features in frame 13 | frame: [64, 64] 14 | num_lines: 50 15 | num_vehicles: 50 16 | num_pedestrians: 20 17 | num_static_objects: 30 18 | num_green_lights: 20 19 | num_red_lights: 20 20 | 21 | num_line_poses: 20 22 | vehicle_max_velocity: 15 23 | pedestrian_max_velocity: 2 24 | 25 | pixel_size: 0.25 26 | line_dots_radius: 0 27 | 28 | # 3. raster encoder π 29 | model_name: "resnet50" 30 | down_factor: 32 # NOTE: specific to resnet 31 | num_input_channels: 12 32 | latent_channel: 64 33 | 34 | # loss 35 | reconstruction_weight: 1.0 36 | kl_weight: 0.1 37 | 38 | # output 39 | threshold: 0.3 -------------------------------------------------------------------------------- /sledge/script/config/common/default_common.yaml: -------------------------------------------------------------------------------- 1 | # Default common configs 2 | defaults: 3 | # Add common items 4 | - scenario_builder: nuplan_mini 5 | - scenario_filter: one_continuous_log 6 | - model: null 7 | - autoencoder_model: null 8 | - diffusion_model: null 9 | # Worker that is used to run simulations 10 | - worker: ray_distributed 11 | 12 | distribute_by_scenario: true 13 | distributed_timeout_seconds: 7200 # Sets how long to wait while synchronizing across worker nodes in a distributed context. 14 | selected_simulation_metrics: null 15 | 16 | # Sets verbosity level, in particular determines if progress bars are shown or not. 17 | verbose: false 18 | -------------------------------------------------------------------------------- /sledge/script/config/common/default_experiment.yaml: -------------------------------------------------------------------------------- 1 | # Common experiment configs 2 | group: ${oc.env:SLEDGE_EXP_ROOT}/exp # This is where results, logs, config, etc. are saved 3 | experiment_name: ??? # Experiment name, by default 'simulation' or 'training' 4 | job_name: ??? # Job name, as defined in the specific yaml files. 5 | 6 | # Directory structure 7 | date_format: '%Y.%m.%d.%H.%M.%S' 8 | experiment_uid: ${now:${date_format}} # Unique Id of the experiment, default to timestamp 9 | experiment: ${experiment_name}/${job_name}/${experiment_uid} # Unique name of the experiment 10 | output_dir: ${group}/${experiment} # Output directory to save all training artifacts 11 | metric_dir: metrics # Metric dir name to save metric results. 12 | aggregator_metric_dir: aggregator_metric # Aggregator metric dir name to save aggregated metrics. 13 | runner_report_file: runner_report.parquet # Name of the parquet file the RunnerReport will be stored to 14 | log_config: false # Whether to log the final config after all overrides and interpolations 15 | 16 | # Execution 17 | max_number_of_workers: null # Set null to disable threading for simulation execution 18 | seed: 0 # Random seed value. 19 | enable_profiling: false # Whether to enable profiler which will be dumped to "profiling" folder 20 | gpu: true # Whether to use available GPUs during training/simulation 21 | 22 | # Logger 23 | logger_level: info # Level of logger 24 | logger_format_string: null # Logger format string, set null to use the default format string 25 | -------------------------------------------------------------------------------- /sledge/script/config/common/diffusion_model/dit_b_model.yaml: -------------------------------------------------------------------------------- 1 | _target_: diffusers.models.DiTTransformer2DModel 2 | _convert_: 'all' 3 | 4 | activation_fn: "gelu-approximate" 5 | attention_bias: true 6 | attention_head_dim: 64 7 | dropout: 0.0 8 | in_channels: 64 9 | norm_elementwise_affine: false 10 | norm_eps: 1e-05 11 | norm_num_groups: 32 12 | norm_type: "ada_norm_zero" 13 | num_attention_heads: 12 14 | num_embeds_ada_norm: ${num_classes} 15 | num_layers: 12 16 | out_channels: 64 17 | patch_size: 1 18 | sample_size: 8 19 | upcast_attention: false 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/diffusion_model/dit_l_model.yaml: -------------------------------------------------------------------------------- 1 | _target_: diffusers.models.DiTTransformer2DModel 2 | _convert_: 'all' 3 | 4 | activation_fn: "gelu-approximate" 5 | attention_bias: true 6 | attention_head_dim: 64 7 | dropout: 0.0 8 | in_channels: 64 9 | norm_elementwise_affine: false 10 | norm_eps: 1e-05 11 | norm_num_groups: 32 12 | norm_type: "ada_norm_zero" 13 | num_attention_heads: 16 14 | num_embeds_ada_norm: ${num_classes} 15 | num_layers: 24 16 | out_channels: 64 17 | patch_size: 1 18 | sample_size: 8 19 | upcast_attention: false 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/diffusion_model/dit_s_model.yaml: -------------------------------------------------------------------------------- 1 | _target_: diffusers.models.DiTTransformer2DModel 2 | _convert_: 'all' 3 | 4 | activation_fn: "gelu-approximate" 5 | attention_bias: true 6 | attention_head_dim: 64 7 | dropout: 0.0 8 | in_channels: 64 9 | norm_elementwise_affine: false 10 | norm_eps: 1e-05 11 | norm_num_groups: 32 12 | norm_type: "ada_norm_zero" 13 | num_attention_heads: 16 14 | num_embeds_ada_norm: ${num_classes} 15 | num_layers: 6 16 | out_channels: 64 17 | patch_size: 1 18 | sample_size: 8 19 | upcast_attention: false 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/diffusion_model/dit_xl_model.yaml: -------------------------------------------------------------------------------- 1 | _target_: diffusers.models.DiTTransformer2DModel 2 | _convert_: 'all' 3 | 4 | activation_fn: "gelu-approximate" 5 | attention_bias: true 6 | attention_head_dim: 64 7 | dropout: 0.0 8 | in_channels: 64 9 | norm_elementwise_affine: false 10 | norm_eps: 1e-05 11 | norm_num_groups: 32 12 | norm_type: "ada_norm_zero" 13 | num_attention_heads: 16 14 | num_embeds_ada_norm: ${num_classes} 15 | num_layers: 28 16 | out_channels: 64 17 | patch_size: 1 18 | sample_size: 8 19 | upcast_attention: false 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_builder/mock_abstract_scenario_builder.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.test.mock_abstract_scenario_builder.MockAbstractScenarioBuilder 2 | _convert_: 'all' 3 | 4 | num_scenarios: 5 # The number of scenarios to return from get_scenarios() 5 | -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_builder/nuplan.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario_builder.NuPlanScenarioBuilder 2 | _convert_: 'all' 3 | 4 | data_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits/trainval 5 | map_root: ${oc.env:NUPLAN_MAPS_ROOT} 6 | sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs 7 | 8 | db_files: null # if db file(s) exist locally, the data_root is ignored 9 | 10 | map_version: nuplan-maps-v1.0 11 | 12 | include_cameras: false # Include camera data in the scenarios. 13 | 14 | max_workers: null 15 | verbose: ${verbose} 16 | 17 | defaults: 18 | - vehicle_parameters: nuplan_vehicle_parameters 19 | - scenario_mapping: nuplan_scenario_mapping 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_builder/nuplan_challenge.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario_builder.NuPlanScenarioBuilder 2 | _convert_: 'all' 3 | 4 | data_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/test/ 5 | map_root: ${oc.env:NUPLAN_MAPS_ROOT} 6 | sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs 7 | 8 | db_files: null # if db file(s) exist locally, the data_root is ignored 9 | 10 | map_version: nuplan-maps-v1.0 11 | 12 | include_cameras: false # Include camera data in the scenarios. 13 | 14 | max_workers: null 15 | verbose: ${verbose} 16 | 17 | defaults: 18 | - vehicle_parameters: nuplan_vehicle_parameters 19 | - scenario_mapping: nuplan_challenge_scenario_mapping 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_builder/nuplan_mini.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario_builder.NuPlanScenarioBuilder 2 | _convert_: 'all' 3 | 4 | data_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits/mini 5 | map_root: ${oc.env:NUPLAN_MAPS_ROOT} 6 | sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs 7 | 8 | db_files: null # if db file(s) exist locally, the data_root is ignored 9 | 10 | map_version: nuplan-maps-v1.0 11 | 12 | include_cameras: false # Include camera data in the scenarios. 13 | 14 | max_workers: null 15 | verbose: ${verbose} 16 | 17 | defaults: 18 | - vehicle_parameters: nuplan_vehicle_parameters 19 | - scenario_mapping: nuplan_scenario_mapping 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_builder/vehicle_parameters/nuplan_vehicle_parameters.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.common.actor_state.vehicle_parameters.VehicleParameters 2 | _convert_: 'all' 3 | width: 2.297 4 | front_length: 4.049 5 | rear_length: 1.127 6 | cog_position_from_rear_axle: 1.67 7 | height: 1.777 8 | wheel_base: 3.089 9 | vehicle_name: "pacifica" 10 | vehicle_type: "gen1" 11 | -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_filter/filter_bos.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.scenario_filter.ScenarioFilter 2 | _convert_: 'all' 3 | 4 | scenario_types: null # List of scenario types to include 5 | 6 | log_names: null # Filter scenarios by log names 7 | map_names: ["us-ma-boston"] # ["us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood", "sg-one-north", "us-ma-boston"] 8 | 9 | num_scenarios_per_type: null # Number of scenarios per type 10 | limit_total_scenarios: null # Limit total scenarios (float = fraction, int = num) - this filter can be applied on top of num_scenarios_per_type 11 | timestamp_threshold_s: 1 # Filter scenarios to ensure scenarios have more than `timestamp_threshold_s` seconds between their initial lidar timestamps 12 | ego_displacement_minimum_m: null # Whether to remove scenarios where the ego moves less than a certain amount 13 | ego_start_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from below 14 | ego_stop_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from above 15 | speed_noise_tolerance: null # Value at or below which a speed change between two timepoints should be ignored as noise. 16 | 17 | expand_scenarios: true # Whether to expand multi-sample scenarios to multiple single-sample scenarios 18 | remove_invalid_goals: true # Whether to remove scenarios where the mission goal is invalid 19 | shuffle: true 20 | 21 | scenario_tokens: null # List of scenario tokens to include -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_filter/filter_lav.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.scenario_filter.ScenarioFilter 2 | _convert_: 'all' 3 | 4 | scenario_types: null # List of scenario types to include 5 | 6 | log_names: null # Filter scenarios by log names 7 | map_names: ["us-nv-las-vegas-strip"] # ["us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood", "sg-one-north", "us-ma-boston"] 8 | 9 | num_scenarios_per_type: null # Number of scenarios per type 10 | limit_total_scenarios: null # Limit total scenarios (float = fraction, int = num) - this filter can be applied on top of num_scenarios_per_type 11 | timestamp_threshold_s: 30 # Filter scenarios to ensure scenarios have more than `timestamp_threshold_s` seconds between their initial lidar timestamps 12 | ego_displacement_minimum_m: null # Whether to remove scenarios where the ego moves less than a certain amount 13 | ego_start_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from below 14 | ego_stop_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from above 15 | speed_noise_tolerance: null # Value at or below which a speed change between two timepoints should be ignored as noise. 16 | 17 | expand_scenarios: true # Whether to expand multi-sample scenarios to multiple single-sample scenarios 18 | remove_invalid_goals: true # Whether to remove scenarios where the mission goal is invalid 19 | shuffle: true 20 | 21 | scenario_tokens: null # List of scenario tokens to include -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_filter/filter_pgh.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.scenario_filter.ScenarioFilter 2 | _convert_: 'all' 3 | 4 | scenario_types: null # List of scenario types to include 5 | 6 | log_names: null # Filter scenarios by log names 7 | map_names: ["us-pa-pittsburgh-hazelwood"] # ["us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood", "sg-one-north", "us-ma-boston"] 8 | 9 | num_scenarios_per_type: null # Number of scenarios per type 10 | limit_total_scenarios: null # Limit total scenarios (float = fraction, int = num) - this filter can be applied on top of num_scenarios_per_type 11 | timestamp_threshold_s: 2 # Filter scenarios to ensure scenarios have more than `timestamp_threshold_s` seconds between their initial lidar timestamps 12 | ego_displacement_minimum_m: null # Whether to remove scenarios where the ego moves less than a certain amount 13 | ego_start_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from below 14 | ego_stop_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from above 15 | speed_noise_tolerance: null # Value at or below which a speed change between two timepoints should be ignored as noise. 16 | 17 | expand_scenarios: true # Whether to expand multi-sample scenarios to multiple single-sample scenarios 18 | remove_invalid_goals: true # Whether to remove scenarios where the mission goal is invalid 19 | shuffle: true 20 | 21 | scenario_tokens: null # List of scenario tokens to include -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_filter/filter_sgp.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.scenario_filter.ScenarioFilter 2 | _convert_: 'all' 3 | 4 | scenario_types: null # List of scenario types to include 5 | 6 | log_names: null # Filter scenarios by log names 7 | map_names: ["sg-one-north"] # ["us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood", "sg-one-north", "us-ma-boston"] 8 | 9 | num_scenarios_per_type: null # Number of scenarios per type 10 | limit_total_scenarios: null # Limit total scenarios (float = fraction, int = num) - this filter can be applied on top of num_scenarios_per_type 11 | timestamp_threshold_s: 2 # Filter scenarios to ensure scenarios have more than `timestamp_threshold_s` seconds between their initial lidar timestamps 12 | ego_displacement_minimum_m: null # Whether to remove scenarios where the ego moves less than a certain amount 13 | ego_start_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from below 14 | ego_stop_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from above 15 | speed_noise_tolerance: null # Value at or below which a speed change between two timepoints should be ignored as noise. 16 | 17 | expand_scenarios: true # Whether to expand multi-sample scenarios to multiple single-sample scenarios 18 | remove_invalid_goals: true # Whether to remove scenarios where the mission goal is invalid 19 | shuffle: true 20 | 21 | scenario_tokens: null # List of scenario tokens to include -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_filter/one_continuous_log.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.scenario_filter.ScenarioFilter 2 | _convert_: 'all' 3 | 4 | scenario_types: null # List of scenario types to include 5 | scenario_tokens: null # List of scenario tokens to include 6 | 7 | log_names: # Filter scenarios by log names 8 | - 2021.07.16.20.45.29_veh-35_01095_01486 9 | map_names: null # Filter scenarios by map names 10 | 11 | num_scenarios_per_type: null # Number of scenarios per type 12 | limit_total_scenarios: null # Limit total scenarios (float = fraction, int = num) - this filter can be applied on top of num_scenarios_per_type 13 | timestamp_threshold_s: null # Filter scenarios to ensure scenarios have more than `timestamp_threshold_s` seconds between their initial lidar timestamps 14 | ego_displacement_minimum_m: null # Whether to remove scenarios where the ego moves less than a certain amount 15 | ego_start_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from below 16 | ego_stop_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from above 17 | speed_noise_tolerance: null # Value at or below which a speed change between two timepoints should be ignored as noise. 18 | 19 | expand_scenarios: false # Whether to expand multi-sample scenarios to multiple single-sample scenarios 20 | remove_invalid_goals: true # Whether to remove scenarios where the mission goal is invalid 21 | shuffle: false # Whether to shuffle the scenarios 22 | -------------------------------------------------------------------------------- /sledge/script/config/common/scenario_filter/vegas.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.scenario_builder.scenario_filter.ScenarioFilter 2 | _convert_: 'all' 3 | 4 | scenario_types: # List of scenario types to include 5 | - starting_left_turn 6 | - starting_right_turn 7 | 8 | scenario_tokens: null 9 | 10 | log_names: null # Filter scenarios by log names 11 | map_names: ["us-nv-las-vegas-strip"] # ["us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood", "sg-one-north", "us-ma-boston"] 12 | 13 | num_scenarios_per_type: 100 # Number of scenarios per type 14 | limit_total_scenarios: null # Limit total scenarios (float = fraction, int = num) - this filter can be applied on top of num_scenarios_per_type 15 | timestamp_threshold_s: 15 # Filter scenarios to ensure scenarios have more than `timestamp_threshold_s` seconds between their initial lidar timestamps 16 | ego_displacement_minimum_m: null # Whether to remove scenarios where the ego moves less than a certain amount 17 | ego_start_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from below 18 | ego_stop_speed_threshold: null # Limit to scenarios where the ego reaches a certain speed from above 19 | speed_noise_tolerance: null # Value at or below which a speed change between two timepoints should be ignored as noise. 20 | 21 | expand_scenarios: false # Whether to expand multi-sample scenarios to multiple single-sample scenarios 22 | remove_invalid_goals: true # Whether to remove scenarios where the mission goal is invalid 23 | shuffle: false 24 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/common_metrics.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - low_level: 3 | - ego_mean_speed_statistics 4 | - ego_expert_l2_error_statistics 5 | - ego_expert_l2_error_with_yaw_statistics 6 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/default_metrics.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - common_metrics 3 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/ego_in_stop_line/ego_stop_at_stop_line_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_stop_at_stop_line_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.scenario_dependent.ego_stop_at_stop_line.EgoStopAtStopLineStatistics 3 | _convert_: 'all' 4 | name: 'ego_stop_at_stop_line' 5 | category: 'Scenario dependent' 6 | distance_threshold: 5.0 # m 7 | velocity_threshold: 0.1 # m/s^2 8 | max_violation_threshold: 0 9 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/drivable_area_compliance_statistics.yaml: -------------------------------------------------------------------------------- 1 | drivable_area_compliance_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.drivable_area_compliance.DrivableAreaComplianceStatistics 3 | _convert_: 'all' 4 | name: 'drivable_area_compliance' 5 | category: 'Planning' 6 | metric_score_unit: 'bool' 7 | 8 | max_violation_threshold: 0.3 # The violatation tolerance threshold in meters 9 | 10 | required_metrics: 11 | # Parameter: base metric name and other high level metrics used in this metric 12 | lane_change_metric: ego_lane_change_statistics 13 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/driving_direction_compliance_statistics.yaml: -------------------------------------------------------------------------------- 1 | driving_direction_compliance_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.driving_direction_compliance.DrivingDirectionComplianceStatistics 3 | _convert_: 'all' 4 | name: 'driving_direction_compliance' 5 | category: 'Planning' 6 | metric_score_unit: 'bool' 7 | 8 | driving_direction_compliance_threshold: 2 # [m] Driving in opposite direction up to this threshold isn't considered violation 9 | driving_direction_violation_threshold: 6 # [m] Driving in opposite direction above this threshold isn't tolerated 10 | time_horizon: 1 # [s] time horizon in which movement of the vehicle along baseline direction is computed. 11 | 12 | required_metrics: 13 | # Parameter: base metric name and other high level metrics used in this metric 14 | lane_change_metric: ego_lane_change_statistics 15 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/ego_is_comfortable_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_is_comfortable_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_is_comfortable.EgoIsComfortableStatistics 3 | _convert_: 'all' 4 | name: 'ego_is_comfortable' 5 | category: 'Violations' 6 | metric_score_unit: 'bool' 7 | 8 | required_metrics: 9 | # Parameter: base metric name 10 | ego_jerk_metric: ego_jerk_statistics 11 | ego_lat_acceleration_metric: ego_lat_acceleration_statistics 12 | ego_lon_acceleration_metric: ego_lon_acceleration_statistics 13 | ego_lon_jerk_metric: ego_lon_jerk_statistics 14 | ego_yaw_acceleration_metric: ego_yaw_acceleration_statistics 15 | ego_yaw_rate_metric: ego_yaw_rate_statistics 16 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/ego_is_making_progress_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_is_making_progress_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_is_making_progress.EgoIsMakingProgressStatistics 3 | _convert_: 'all' 4 | name: 'ego_is_making_progress' 5 | category: 'Planning' 6 | metric_score_unit: 'bool' 7 | min_progress_threshold: 0.2 8 | 9 | required_metrics: 10 | # Parameter: base metric name and other high level metrics used in this metric 11 | ego_progress_along_expert_route_metric: ego_progress_along_expert_route_statistics 12 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/no_ego_at_fault_collisions_statistics.yaml: -------------------------------------------------------------------------------- 1 | no_ego_at_fault_collisions_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.no_ego_at_fault_collisions.EgoAtFaultCollisionStatistics 3 | _convert_: 'all' 4 | name: 'no_ego_at_fault_collisions' 5 | category: 'Dynamics' 6 | metric_score_unit: 'float' 7 | 8 | max_violation_threshold_vru: 0 9 | max_violation_threshold_vehicle: 0 10 | max_violation_threshold_object: 1 11 | 12 | required_metrics: 13 | # Parameter: base metric name and other high level metrics used in this metric 14 | ego_lane_change_metric: ego_lane_change_statistics 15 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/planner_expert_average_heading_error_within_bound_statistics.yaml: -------------------------------------------------------------------------------- 1 | planner_expert_average_heading_error_within_bound_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.planner_expert_average_heading_error_within_bound.PlannerExpertAverageHeadingErrorStatistics 3 | _convert_: 'all' 4 | name: 'planner_expert_average_heading_error_within_bound' 5 | category: 'Planning' 6 | metric_score_unit: 'float' 7 | max_average_heading_error_threshold: 0.8 #radian 8 | 9 | required_metrics: 10 | # Parameter: base metric name and other high level metrics used in this metric 11 | planner_expert_average_l2_error_within_bound_metric: planner_expert_average_l2_error_within_bound_statistics 12 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/planner_expert_final_heading_error_within_bound_statistics.yaml: -------------------------------------------------------------------------------- 1 | planner_expert_final_heading_error_within_bound_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.planner_expert_final_heading_error_within_bound.PlannerExpertFinalHeadingErrorStatistics 3 | _convert_: 'all' 4 | name: 'planner_expert_final_heading_error_within_bound' 5 | category: 'Planning' 6 | metric_score_unit: 'float' 7 | max_final_heading_error_threshold: 0.8 #radian 8 | 9 | required_metrics: 10 | # Parameter: base metric name and other high level metrics used in this metric 11 | planner_expert_average_l2_error_within_bound_metric: planner_expert_average_l2_error_within_bound_statistics 12 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/planner_expert_final_l2_error_within_bound_statistics.yaml: -------------------------------------------------------------------------------- 1 | planner_expert_final_l2_error_within_bound_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.planner_expert_final_l2_error_within_bound.PlannerExpertFinalL2ErrorStatistics 3 | _convert_: 'all' 4 | name: 'planner_expert_final_l2_error_within_bound' 5 | category: 'Planning' 6 | metric_score_unit: 'float' 7 | max_final_l2_error_threshold: 8 #meter 8 | 9 | required_metrics: 10 | # Parameter: base metric name and other high level metrics used in this metric 11 | planner_expert_average_l2_error_within_bound_metric: planner_expert_average_l2_error_within_bound_statistics 12 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/planner_miss_rate_within_bound_statistics.yaml: -------------------------------------------------------------------------------- 1 | planner_miss_rate_within_bound_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.planner_miss_rate_within_bound.PlannerMissRateStatistics 3 | _convert_: 'all' 4 | name: 'planner_miss_rate_within_bound' 5 | category: 'Planning' 6 | metric_score_unit: 'bool' 7 | 8 | max_displacement_threshold: [6.0, 8.0, 16.0] #meter 9 | max_miss_rate_threshold: 0.3 10 | 11 | required_metrics: 12 | # Parameter: base metric name and other high level metrics used in this metric 13 | planner_expert_average_l2_error_within_bound_metric: planner_expert_average_l2_error_within_bound_statistics 14 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/speed_limit_compliance_statistics.yaml: -------------------------------------------------------------------------------- 1 | speed_limit_compliance_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.speed_limit_compliance.SpeedLimitComplianceStatistics 3 | _convert_: 'all' 4 | name: 'speed_limit_compliance' 5 | category: 'Violations' 6 | metric_score_unit: 'float' 7 | max_violation_threshold: 1.0 8 | max_overspeed_value_threshold: 2.23 9 | 10 | required_metrics: 11 | # Parameter: base metric name and other high level metrics used in this metric 12 | lane_change_metric: ego_lane_change_statistics 13 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/high_level/time_to_collision_within_bound_statistics.yaml: -------------------------------------------------------------------------------- 1 | time_to_collision_within_bound_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.time_to_collision_within_bound.TimeToCollisionStatistics 3 | _convert_: 'all' 4 | name: 'time_to_collision_within_bound' 5 | category: 'Planning' 6 | metric_score_unit: 'bool' 7 | 8 | time_step_size: 0.1 9 | time_horizon: 3.0 10 | least_min_ttc: 0.95 11 | 12 | required_metrics: 13 | # Parameter: base metric name and other high level metrics used in this metric 14 | ego_lane_change_metric: ego_lane_change_statistics 15 | no_ego_at_fault_collisions_metric: no_ego_at_fault_collisions_statistics 16 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_acceleration_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_acceleration_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_acceleration.EgoAccelerationStatistics 3 | _convert_: 'all' 4 | name: 'ego_acceleration' 5 | category: 'Dynamics' 6 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_expert_l2_error_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_expert_l2_error_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_expert_l2_error.EgoExpertL2ErrorStatistics 3 | _convert_: 'all' 4 | name: 'ego_expert_L2_error' 5 | category: 'Planning' 6 | discount_factor: 1 7 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_expert_l2_error_with_yaw_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_expert_l2_error_with_yaw_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_expert_l2_error_with_yaw.EgoExpertL2ErrorWithYawStatistics 3 | _convert_: 'all' 4 | name: 'ego_expert_l2_error_with_yaw' 5 | category: 'Planning' 6 | discount_factor: 1 7 | heading_diff_weight: 2.5 8 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_jerk_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_jerk_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_jerk.EgoJerkStatistics 3 | _convert_: 'all' 4 | name: 'ego_jerk' 5 | category: 'Dynamics' 6 | 7 | max_abs_mag_jerk: 8.37 8 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_lane_change_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_lane_change_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_lane_change.EgoLaneChangeStatistics 3 | _convert_: 'all' 4 | name: 'ego_lane_change' 5 | category: 'Planning' 6 | max_fail_rate: 0.3 7 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_lat_acceleration_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_lat_acceleration_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_lat_acceleration.EgoLatAccelerationStatistics 3 | _convert_: 'all' 4 | name: 'ego_lat_acceleration' 5 | category: 'Dynamics' 6 | 7 | max_abs_lat_accel: 4.89 8 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_lat_jerk_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_lat_jerk_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_lat_jerk.EgoLatJerkStatistics 3 | _convert_: 'all' 4 | name: 'ego_lat_jerk' 5 | category: 'Dynamics' 6 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_lon_acceleration_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_lon_acceleration_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_lon_acceleration.EgoLonAccelerationStatistics 3 | _convert_: 'all' 4 | name: 'ego_lon_acceleration' 5 | category: 'Dynamics' 6 | 7 | min_lon_accel: -4.05 8 | max_lon_accel: 2.40 9 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_lon_jerk_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_lon_jerk_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_lon_jerk.EgoLonJerkStatistics 3 | _convert_: 'all' 4 | name: 'ego_lon_jerk' 5 | category: 'Dynamics' 6 | 7 | max_abs_lon_jerk: 4.13 8 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_mean_speed_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_mean_speed_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_mean_speed.EgoMeanSpeedStatistics 3 | _convert_: 'all' 4 | name: 'ego_mean_speed' 5 | category: 'Dynamics' 6 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_progress_along_expert_route_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_progress_along_expert_route_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_progress_along_expert_route.EgoProgressAlongExpertRouteStatistics 3 | _convert_: 'all' 4 | name: 'ego_progress_along_expert_route' 5 | category: 'Planning' 6 | score_progress_threshold: 2 # [m] 7 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_yaw_acceleration_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_yaw_acceleration_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_yaw_acceleration.EgoYawAccelerationStatistics 3 | _convert_: 'all' 4 | name: 'ego_yaw_acceleration' 5 | category: 'Dynamics' 6 | 7 | max_abs_yaw_accel: 1.93 8 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/ego_yaw_rate_statistics.yaml: -------------------------------------------------------------------------------- 1 | ego_yaw_rate_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.ego_yaw_rate.EgoYawRateStatistics 3 | _convert_: 'all' 4 | name: 'ego_yaw_rate' 5 | category: 'Dynamics' 6 | 7 | max_abs_yaw_rate: 0.95 8 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/low_level/planner_expert_average_l2_error_within_bound_statistics.yaml: -------------------------------------------------------------------------------- 1 | planner_expert_average_l2_error_within_bound_statistics: 2 | _target_: nuplan.planning.metrics.evaluation_metrics.common.planner_expert_average_l2_error_within_bound.PlannerExpertAverageL2ErrorStatistics 3 | _convert_: 'all' 4 | name: 'planner_expert_average_l2_error_within_bound' 5 | category: 'Planning' 6 | metric_score_unit: 'float' 7 | comparison_horizon: [3, 5, 8] #seconds 8 | comparison_frequency: 1 #Hz 9 | max_average_l2_error_threshold: 8 #meter 10 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/simulation_closed_loop_nonreactive_agents.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | # - common_metrics # Uncomment this for common information about the scenario as specified in the config 3 | - low_level: # Low level metrics 4 | - ego_lane_change_statistics 5 | - ego_jerk_statistics 6 | - ego_lat_acceleration_statistics 7 | - ego_lon_acceleration_statistics 8 | - ego_lon_jerk_statistics 9 | - ego_yaw_acceleration_statistics 10 | - ego_yaw_rate_statistics 11 | - ego_progress_along_expert_route_statistics 12 | - high_level: # High level metrics that depend on low level metrics, they can also rely on the previously called high level metrics 13 | - drivable_area_compliance_statistics 14 | - no_ego_at_fault_collisions_statistics 15 | - time_to_collision_within_bound_statistics 16 | - speed_limit_compliance_statistics 17 | - ego_is_comfortable_statistics 18 | - ego_is_making_progress_statistics 19 | - driving_direction_compliance_statistics 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/simulation_closed_loop_reactive_agents.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | # - common_metrics # Uncomment this for common information about the scenario as specified in the config 3 | - low_level: # Low level metrics 4 | - ego_lane_change_statistics 5 | - ego_jerk_statistics 6 | - ego_lat_acceleration_statistics 7 | - ego_lon_acceleration_statistics 8 | - ego_lon_jerk_statistics 9 | - ego_yaw_acceleration_statistics 10 | - ego_yaw_rate_statistics 11 | - ego_progress_along_expert_route_statistics 12 | - high_level: # High level metrics that depend on low level metrics, they can also rely on the previously called high level metrics 13 | - drivable_area_compliance_statistics 14 | - no_ego_at_fault_collisions_statistics 15 | - time_to_collision_within_bound_statistics 16 | - speed_limit_compliance_statistics 17 | - ego_is_comfortable_statistics 18 | - ego_is_making_progress_statistics 19 | - driving_direction_compliance_statistics 20 | -------------------------------------------------------------------------------- /sledge/script/config/common/simulation_metric/simulation_open_loop_boxes.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | # - common_metrics # Uncomment this for common information about the scenario as specified in the config 3 | - low_level: # Low level metrics 4 | - planner_expert_average_l2_error_within_bound_statistics 5 | - high_level: # High level metrics that depend on low level metrics, they can also rely on the previously called high level metrics 6 | - planner_expert_final_l2_error_within_bound_statistics 7 | - planner_miss_rate_within_bound_statistics 8 | - planner_expert_final_heading_error_within_bound_statistics 9 | - planner_expert_average_heading_error_within_bound_statistics 10 | -------------------------------------------------------------------------------- /sledge/script/config/common/worker/ray_distributed.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed 2 | _convert_: 'all' 3 | master_node_ip: null # Set to a master node IP if you desire to connect to cluster remotely 4 | threads_per_node: null # Number of CPU threads to use per node, "null" means all threads available 5 | debug_mode: false # If true all tasks will be executed serially, mainly for testing 6 | log_to_driver: true # If true, all printouts from ray threads will be displayed in driver 7 | logs_subdir: 'logs' # Subdirectory to store logs inside the experiment directory 8 | use_distributed: false # Whether to use the built-in distributed mode of ray 9 | -------------------------------------------------------------------------------- /sledge/script/config/common/worker/sequential.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.utils.multithreading.worker_sequential.Sequential 2 | _convert_: 'all' 3 | -------------------------------------------------------------------------------- /sledge/script/config/common/worker/single_machine_thread_pool.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.utils.multithreading.worker_parallel.SingleMachineParallelExecutor 2 | _convert_: 'all' 3 | use_process_pool: False # If true, use ProcessPoolExecutor as the backend, otherwise uses ThreadPoolExecutor 4 | max_workers: null # Number of CPU workers (threads/processes) to use per node, "null" means all available 5 | -------------------------------------------------------------------------------- /sledge/script/config/diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/config/diffusion/__init__.py -------------------------------------------------------------------------------- /sledge/script/config/diffusion/accelerator/default_accelerator.yaml: -------------------------------------------------------------------------------- 1 | 2 | # accelerator: 3 | _target_: accelerate.Accelerator 4 | _convert_: all 5 | 6 | gradient_accumulation_steps: 1 7 | mixed_precision: "no" 8 | log_with: "tensorboard" 9 | 10 | project_config: 11 | _target_: accelerate.utils.ProjectConfiguration 12 | _convert_: all 13 | project_dir: ${output_dir} 14 | 15 | -------------------------------------------------------------------------------- /sledge/script/config/diffusion/default_diffusion.yaml: -------------------------------------------------------------------------------- 1 | hydra: 2 | run: 3 | dir: ${output_dir} 4 | output_subdir: ${output_dir}/code/hydra # Store hydra's config breakdown here for debugging 5 | searchpath: # Only in these paths are discoverable 6 | - pkg://sledge.script.config.common 7 | - pkg://sledge.script.experiments # Put experiments configs in script/experiments/ 8 | 9 | defaults: 10 | - default_experiment 11 | - default_common 12 | 13 | # hugging face accelerator config 14 | - accelerator: default_accelerator 15 | 16 | # optimizer settings 17 | - noise_scheduler: ddpm_scheduler 18 | - optimizer: adamw 19 | 20 | experiment_name: ${py_func}_diffusion 21 | 22 | num_epochs: 100 23 | 24 | debug_mode: true # runs one train batch and inference if true 25 | 26 | inference_epochs: 1 27 | inference_batch_size: 16 28 | num_classes: 4 29 | 30 | guidance_scale: 4.0 31 | num_inference_timesteps: 100 32 | 33 | autoencoder_checkpoint: ??? 34 | diffusion_checkpoint: null 35 | 36 | data_loader: 37 | params: 38 | batch_size: 64 39 | shuffle: true 40 | num_workers: 8 41 | 42 | lr_scheduler: 43 | name: "constant" 44 | step_rules: null 45 | num_warmup_steps: 1 46 | num_cycles: 1 47 | power: 1.0 48 | last_epoch: -1 49 | 50 | ema: 51 | use_ema: true 52 | max_decay: 0.9999 53 | inv_gamma: 1.0 54 | power: 0.75 55 | 56 | 57 | # Cache parameters 58 | cache: 59 | autoencoder_cache_path: ${oc.env:SLEDGE_EXP_ROOT}/caches/autoencoder_cache 60 | diffusion_cache_path: ${oc.env:SLEDGE_EXP_ROOT}/caches/diffusion_cache 61 | cleanup_diffusion_cache: false 62 | 63 | scenario_cache_path: ${oc.env:SLEDGE_EXP_ROOT}/caches/scenario_cache 64 | scenario_cache_size: 32 65 | 66 | # # Mandatory parameters 67 | py_func: ??? 68 | -------------------------------------------------------------------------------- /sledge/script/config/diffusion/noise_scheduler/ddpm_scheduler.yaml: -------------------------------------------------------------------------------- 1 | _target_: diffusers.schedulers.DDPMScheduler 2 | _convert_: 'all' 3 | 4 | # https://huggingface.co/docs/diffusers/api/schedulers/ddpm 5 | num_train_timesteps: 1000 # number of diffusion steps for training 6 | beta_start: 0.0015 # starting `beta` value of inference 7 | beta_end: 0.015 # final `beta` value 8 | beta_schedule: "linear" # linear, scaled_linear, or squaredcos_cap_v2 9 | clip_sample: False # Clip the predicted sample for numerical stability. (unsuitable for latent-space diffusion models) -------------------------------------------------------------------------------- /sledge/script/config/diffusion/optimizer/adamw.yaml: -------------------------------------------------------------------------------- 1 | _target_: torch.optim.AdamW 2 | _convert_: 'all' 3 | 4 | lr: 1e-4 # learning rate 5 | betas: [0.95, 0.999] # coefficients used for computing running averages of gradient and its square 6 | eps: 1e-8 # term added to the denominator to improve numerical stability 7 | weight_decay: 1e-6 # weight decay coefficient 8 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/config/simulation/__init__.py -------------------------------------------------------------------------------- /sledge/script/config/simulation/callback/serialization_callback.yaml: -------------------------------------------------------------------------------- 1 | serialization_callback: 2 | _target_: nuplan.planning.simulation.callback.serialization_callback.SerializationCallback 3 | _convert_: 'all' 4 | 5 | folder_name: simulation # Name of a folder in which the serialization will take place 6 | serialize_into_single_file: false # If true, the output will be a single file, if False, the data will be serialized 7 | # into one file per time step 8 | serialization_type: "json" # A way to serialize output, options: ["json", "pickle", "msgpack"] 9 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/callback/simulation_log_callback.yaml: -------------------------------------------------------------------------------- 1 | simulation_log_callback: 2 | _target_: nuplan.planning.simulation.callback.simulation_log_callback.SimulationLogCallback 3 | _convert_: 'all' 4 | 5 | output_directory: ${output_dir} 6 | simulation_log_dir: simulation_log # Simulation log dir 7 | serialization_type: "msgpack" # A way to serialize output, options: ["pickle", "msgpack"] 8 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/callback/timing_callback.yaml: -------------------------------------------------------------------------------- 1 | timing_callback: 2 | _target_: nuplan.planning.simulation.callback.timing_callback.TimingCallback 3 | _convert_: 'all' 4 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/ego_controller/log_play_back_controller.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.simulation.controller.log_playback.LogPlaybackController 2 | _convert_: 'all' 3 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/ego_controller/motion_model/kinematic_bicycle_model.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.simulation.controller.motion_model.kinematic_bicycle.KinematicBicycleModel 2 | _convert_: 'all' 3 | 4 | vehicle: ${scenario_builder.vehicle_parameters} 5 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/ego_controller/perfect_tracking_controller.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.simulation.controller.perfect_tracking.PerfectTrackingController 2 | _convert_: 'all' 3 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/ego_controller/tracker/ilqr_tracker.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.simulation.controller.tracker.ilqr_tracker.ILQRTracker 2 | _convert_: all 3 | 4 | n_horizon: 40 # Maximum time horizon (number of discrete time steps) that we should plan ahead. 5 | 6 | ilqr_solver: 7 | _target_: nuplan.planning.simulation.controller.tracker.ilqr.ilqr_solver.ILQRSolver 8 | _convert_: all 9 | 10 | solver_params: 11 | _target_: nuplan.planning.simulation.controller.tracker.ilqr.ilqr_solver.ILQRSolverParameters 12 | _convert_: all 13 | 14 | discretization_time: 0.2 # [s] Time discretization used for integration. 15 | 16 | # Cost weights for state variables [x, y, heading, velocity, steering angle] 17 | state_cost_diagonal_entries: [1.0, 1.0, 10.0, 0.0, 0.0] 18 | 19 | # Cost weights for input variables [acceleration, steering rate] 20 | input_cost_diagonal_entries: [1.0, 10.0] 21 | 22 | # Trust region cost weights for states and inputs. 23 | state_trust_region_entries: [1.0, 1.0, 1.0, 1.0, 1.0] 24 | input_trust_region_entries: [1.0, 1.0] 25 | 26 | max_ilqr_iterations: 20 # Maximum number of iterations to run iLQR before timeout. 27 | convergence_threshold: 1e-6 # Threshold for delta inputs below which we can terminate iLQR early. 28 | max_solve_time: 0.05 # [s] If defined, sets a maximum time to run a solve call of iLQR before terminating. 29 | 30 | max_acceleration: 3.0 # [m/s^2] Absolute value threshold on acceleration input. 31 | max_steering_angle: 1.047197 # [rad] Absolute value threshold on steering angle. 32 | max_steering_angle_rate: 0.5 # [rad/s] Absolute value threshold on steering rate input. 33 | min_velocity_linearization: 0.01 # [m/s] Absolute value threshold below which linearization velocity is modified. 34 | 35 | warm_start_params: 36 | _target_: nuplan.planning.simulation.controller.tracker.ilqr.ilqr_solver.ILQRWarmStartParameters 37 | _convert_: all 38 | 39 | k_velocity_error_feedback: 0.5 # Gain for initial velocity error for warm start acceleration. 40 | k_steering_angle_error_feedback: 0.05 # Gain for initial steering angle error for warm start steering rate. 41 | lookahead_distance_lateral_error: 15.0 # [m] Distance ahead for which we estimate lateral error. 42 | k_lateral_error: 0.1 # Gain for lateral error to compute steering angle feedback. 43 | jerk_penalty_warm_start_fit: 1e-4 # Penalty for jerk in velocity profile estimation. 44 | curvature_rate_penalty_warm_start_fit: 1e-2 # Penalty for curvature rate in curvature profile estimation. 45 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/ego_controller/tracker/lqr_tracker.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.simulation.controller.tracker.lqr.LQRTracker 2 | _convert_: 'all' 3 | 4 | # LQR tuning 5 | q_longitudinal: [10.0] # velocity tracking cost gain 6 | r_longitudinal: [1.0] # acceleration tracking cost gain 7 | q_lateral: [1.0, 10.0, 0.0] # [lateral_error, heading_error, steering_angle] tracking cost gains 8 | r_lateral: [1.0] # steering_rate tracking cost gain 9 | discretization_time: 0.1 # [s] The time interval used for discretizing the continuous time dynamics. 10 | tracking_horizon: 10 # The number of time steps (at discretization_time interval) ahead we consider for LQR. 11 | 12 | # Parameters for velocity and curvature estimation. 13 | jerk_penalty: 1e-4 # Penalty for jerk in velocity profile estimation. 14 | curvature_rate_penalty: 1e-2 # Penalty for curvature rate in curvature profile estimation. 15 | 16 | # Stopping logic 17 | stopping_proportional_gain: 0.5 # Proportional controller tuning for stopping controller 18 | stopping_velocity: 0.2 # [m/s] Velocity threshold for stopping 19 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/ego_controller/two_stage_controller.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.simulation.controller.two_stage_controller.TwoStageController 2 | _convert_: 'all' 3 | 4 | defaults: 5 | - tracker: lqr_tracker 6 | - motion_model: kinematic_bicycle_model 7 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/main_callback/completion_callback.yaml: -------------------------------------------------------------------------------- 1 | completion_callback: 2 | _target_: nuplan.planning.simulation.main_callback.completion_callback.CompletionCallback 3 | _convert_: 'all' 4 | 5 | output_dir: ${output_dir} 6 | challenge_name: ${job_name} 7 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/main_callback/metric_aggregator_callback.yaml: -------------------------------------------------------------------------------- 1 | metric_aggregator_callback: 2 | _target_: nuplan.planning.simulation.main_callback.metric_aggregator_callback.MetricAggregatorCallback 3 | _convert_: 'all' 4 | 5 | metric_save_path: ${output_dir}/${metric_dir} # Path of a folder in which the metrics are saved 6 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/main_callback/metric_file_callback.yaml: -------------------------------------------------------------------------------- 1 | metric_file_callback: 2 | _target_: nuplan.planning.simulation.main_callback.metric_file_callback.MetricFileCallback 3 | _convert_: 'all' 4 | 5 | metric_file_output_path: ${output_dir}/${metric_dir} # Path to save metric files 6 | scenario_metric_paths: # A list of paths in which scenario metrics are saved 7 | - ${output_dir}/${metric_dir} 8 | delete_scenario_metric_files: True # Delete previous scenario metric files 9 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/main_callback/metric_summary_callback.yaml: -------------------------------------------------------------------------------- 1 | metric_summary_callback: 2 | _target_: nuplan.planning.simulation.main_callback.metric_summary_callback.MetricSummaryCallback 3 | _convert_: 'all' 4 | 5 | metric_save_path: ${output_dir}/${metric_dir} # Path to saved metric files 6 | metric_aggregator_save_path: ${aggregator_save_path} # Path to saved aggregated files 7 | summary_output_path: ${output_dir}/summary 8 | num_bins: 20 9 | pdf_file_name: 'summary.pdf' 10 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/main_callback/publisher_callback.yaml: -------------------------------------------------------------------------------- 1 | publisher_callback: 2 | _target_: nuplan.planning.simulation.main_callback.publisher_callback.PublisherCallback 3 | _convert_: 'all' 4 | 5 | s3_client: null 6 | s3_bucket: null 7 | remote_prefix: null 8 | 9 | # Path to the directories 10 | uploads: 11 | metrics: 12 | upload: True 13 | save_path: ${output_dir}/${metric_dir} # Path of a folder in which the metrics are saved 14 | remote_path: ${job_name}/${metric_dir} # Path of a folder in which the metrics are saved 15 | aggregator_metric: 16 | upload: True 17 | save_path: ${output_dir}/${aggregator_metric_dir} # Path of a folder in which the aggregated metrics are saved 18 | remote_path: ${aggregator_metric_dir} # Path of a folder in which the aggregated metrics are saved 19 | simulation: 20 | upload: False 21 | save_path: ${output_dir}/simulation_log # Path of a folder in which the simulation output is saved 22 | remote_path: ${job_name}/simulation_log # Path of a folder in which the simulation output is saved 23 | runner_report: 24 | upload: True 25 | save_path: ${output_dir}/runner_report.parquet # Path of a folder in which the simulation output is saved 26 | remote_path: ${job_name} # Path of a folder in which the simulation output is saved 27 | passed: 28 | upload: True 29 | save_path: ${output_dir}/validation-results/passed.txt # Path of a folder in which the validation result is saved 30 | remote_path: validation-results # Path of a folder in which the simulation output is saved 31 | failed: 32 | upload: True 33 | save_path: ${output_dir}/validation-results/failed.txt # Path of a folder in which the validation result is saved 34 | remote_path: validation-results # Path of a folder in which the simulation output is saved 35 | completed: 36 | upload: True 37 | save_path: ${output_dir}/simulation-results # Path of a folder in which the validation result is saved 38 | remote_path: simulation-results # Path of a folder in which the simulation output is saved 39 | submission_logs: 40 | upload: True 41 | save_path: /tmp/submission.log # Path of a folder in which the validation result is saved 42 | remote_path: ${job_name}/submission.log # Path of a folder in which the simulation output is saved 43 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/main_callback/time_callback.yaml: -------------------------------------------------------------------------------- 1 | time_callback: 2 | _target_: nuplan.planning.simulation.main_callback.time_callback.TimeCallback 3 | _convert_: 'all' 4 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/main_callback/validation_callback.yaml: -------------------------------------------------------------------------------- 1 | validation_callback: 2 | _target_: nuplan.planning.simulation.main_callback.validation_callback.ValidationCallback 3 | _convert_: 'all' 4 | 5 | output_dir: ${output_dir} 6 | validation_dir_name: 'validation-results' 7 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/metric_aggregator/closed_loop_reactive_agents_weighted_average.yaml: -------------------------------------------------------------------------------- 1 | closed_loop_reactive_agents_weighted_average: 2 | _target_: nuplan.planning.metrics.aggregator.weighted_average_metric_aggregator.WeightedAverageMetricAggregator 3 | name: 'closed_loop_reactive_agents_weighted_average' 4 | metric_weights: # Below we list the metrics used in the scenario scoring function and their corresponsing weights to calculate a weighted average score for each scenario, 5 | # if not specified, the weight is set as default. 6 | # metric name : metric weight in the weighted average function 7 | ego_progress_along_expert_route: 5.0 # This metric has the highest weight equal to 5.0 in the weighted average function, its base score can take a value in [0,1] depending on the ratio of ego to expert progress 8 | time_to_collision_within_bound: 5.0 # This metric has the highest weight equal to 5.0 in the weighted average function, its base score can be 0 or 1 depending on the minimum time to collision threshold 9 | speed_limit_compliance: 4.0 # This metric has a weight equal to 4.0 in the weighted average function, its base score can take a value in [0,1] depending on the amount and duration of over-speeding 10 | ego_is_comfortable: 2.0 # This metric has the lowest weight equal to 2.0 in the weighted average function, its base score can be 0 or 1 depending on the comfort thresholds on acceleration, jerk and yaw. 11 | default: 1.0 12 | file_name: closed_loop_reactive_agents_weighted_average_metrics_${now:${date_format}} 13 | # The scenario score is defined as the weighted average score of the metrics listed above, multiplied by the score of the multiple_metrics below. 14 | multiple_metrics: 15 | - no_ego_at_fault_collisions # This metric score can be 0, 0.5 or 1 depending on whether there is an at fault collision with VRUs, vehicles or objects 16 | - drivable_area_compliance # This metric score can be 0 or 1 depending on whether ego drives outside the drivable area 17 | - ego_is_making_progress # This metric score can be 0 or 1 depending on whether ego makes progress more than a minimum threshold compared to expert's progress 18 | - driving_direction_compliance # This metric score can be 0 or 0.5 or 1 depending on how much ego drives in the opposite direction if any 19 | challenge_name: ${job_name} 20 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/metric_aggregator/default_weighted_average.yaml: -------------------------------------------------------------------------------- 1 | weighted_average_metric_aggregator: 2 | _target_: nuplan.planning.metrics.aggregator.weighted_average_metric_aggregator.WeightedAverageMetricAggregator 3 | name: 'weighted_average_metric_aggregator' 4 | metric_weights: # Below we list the metrics used in the scenario scoring function and their corresponsing weights to calculate a weighted average score for each scenario, 5 | # if not specified, the weight is set as default. 6 | # metric name : metric weight in the weighted average function 7 | default: 1.0 8 | file_name: weighted_average_metrics_${now:${date_format}} 9 | # The scenario score is defined as the weighted average score of the metrics listed above, multiplied by the score of the multiple_metrics below. 10 | multiple_metrics: null 11 | challenge_name: null 12 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/observation/sledge_agents_observation.yaml: -------------------------------------------------------------------------------- 1 | _target_: sledge.simulation.observation.sledge_idm_agents.SledgeIDMAgents 2 | _convert_: 'all' 3 | target_velocity: 10 # Desired velocity in free traffic [m/s] 4 | min_gap_to_lead_agent: 1.0 # Minimum relative distance to lead vehicle [m] 5 | headway_time: 1.5 # Desired time headway. The minimum possible time to the vehicle in front [s] 6 | accel_max: 1.0 # maximum acceleration [m/s^2] 7 | decel_max: 2.0 # maximum deceleration (positive value) [m/s^2] 8 | static_detections_types: ["BARRIER", "CZONE_SIGN", "TRAFFIC_CONE", "GENERIC_OBJECT"] # Open-loop detections to include 9 | minimum_path_length: 30 # [m] The minimum path length to maintain 10 | radius: 64 # [m] Only agents within this radius around the ego will be simulated. 11 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/planner/pdm_closed_planner.yaml: -------------------------------------------------------------------------------- 1 | pdm_closed_planner: 2 | _target_: sledge.simulation.planner.pdm_planner.pdm_closed_planner.PDMClosedPlanner 3 | _convert_: 'all' 4 | 5 | # parameters for output trajectory 6 | trajectory_sampling: 7 | _target_: nuplan.planning.simulation.trajectory.trajectory_sampling.TrajectorySampling 8 | _convert_: 'all' 9 | num_poses: 80 # target future poses 10 | interval_length: 0.1 # interval of samples [s] 11 | 12 | # parameters for proposals 13 | proposal_sampling: 14 | _target_: nuplan.planning.simulation.trajectory.trajectory_sampling.TrajectorySampling 15 | _convert_: 'all' 16 | num_poses: 40 # target future poses 17 | interval_length: 0.1 # interval of samples [s] 18 | 19 | idm_policies: 20 | _target_: sledge.simulation.planner.pdm_planner.proposal.batch_idm_policy.BatchIDMPolicy 21 | _convert_: 'all' 22 | speed_limit_fraction: [0.2,0.4,0.6,0.8,1.0] # Target velocity as fractions of current speed-limit 23 | fallback_target_velocity: 15.0 # Desired fallback velocity in free traffic [m/s] 24 | min_gap_to_lead_agent: 1.0 # Minimum relative distance to lead vehicle [m] 25 | headway_time: 1.5 # Desired time headway. The minimum possible time to the vehicle in front [s] 26 | accel_max: 1.5 # Maximum acceleration [m/s^2] 27 | decel_max: 3.0 # Maximum deceleration (positive value) [m/s^2] 28 | 29 | lateral_offsets: [-1.0, 1.0] # Signed offsets from centerline (or null) [m] 30 | map_radius: 50 # Radius to consider around ego [m] 31 | -------------------------------------------------------------------------------- /sledge/script/config/simulation/simulation_time_controller/step_simulation_time_controller.yaml: -------------------------------------------------------------------------------- 1 | _target_: nuplan.planning.simulation.simulation_time_controller.step_simulation_time_controller.StepSimulationTimeController 2 | _convert_: 'all' 3 | -------------------------------------------------------------------------------- /sledge/script/config/sledgeboard/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/config/sledgeboard/__init__.py -------------------------------------------------------------------------------- /sledge/script/config/sledgeboard/default_sledgeboard.yaml: -------------------------------------------------------------------------------- 1 | hydra: 2 | run: 3 | dir: . 4 | output_subdir: null # Store hydra's config breakdown here for debugging 5 | searchpath: # Only in these paths are discoverable 6 | - pkg://sledge.script.config.common 7 | - pkg://sledge.script.experiments # Put experiments configs in script/experiments/ 8 | 9 | 10 | defaults: 11 | - default_common 12 | - simulation_metric: null 13 | # - default_metrics # TODO 14 | - override hydra/job_logging: none # Disable hydra's logging 15 | - override hydra/hydra_logging: none # Disable hydra's logging 16 | 17 | log_config: False # Whether to log the final config after all overrides and interpolations 18 | port_number: 5006 19 | simulation_path: null 20 | resource_prefix: null 21 | profiler_path: null 22 | async_scenario_rendering: True # Setting this to True will improve UX by showing the scenario canvas as early as possible and then drawing individual plots as their data become available 23 | 24 | # Maximum frames to render in the scenario tab per second, must be between 1-60. 25 | # Use lower values when running SledgeBoard in the cloud to prevent frame queues due to latency. The rule of thumb 26 | # is to match the frame rate with the expected latency, e.g 5Hz for 200ms round-trip latency: 27 | scenario_rendering_frame_rate_cap_hz: 60 28 | -------------------------------------------------------------------------------- /sledge/script/experiments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/script/experiments/__init__.py -------------------------------------------------------------------------------- /sledge/script/experiments/autoencoder/training_rvae_model.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | experiment_name: training_rvae_model 3 | py_func: training 4 | objective_aggregate_mode: sum 5 | 6 | defaults: 7 | - override /data_augmentation: 8 | - rvae_augmentation 9 | 10 | - override /objective: 11 | - rvae_lines_objective 12 | - rvae_vehicles_objective 13 | - rvae_pedestrians_objective 14 | - rvae_static_objects_objective 15 | - rvae_green_lights_objective 16 | - rvae_red_lights_objective 17 | - rvae_ego_objective 18 | - kl_objective 19 | 20 | - override /matching: 21 | - rvae_lines_matching 22 | - rvae_vehicles_matching 23 | - rvae_pedestrians_matching 24 | - rvae_static_objects_matching 25 | - rvae_green_lights_matching 26 | - rvae_red_lights_matching 27 | 28 | - override /splitter: nuplan 29 | - override /autoencoder_model: rvae_model 30 | - override /training_metric: -------------------------------------------------------------------------------- /sledge/script/experiments/autoencoder/training_vae_model.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | experiment_name: training_vae_model 3 | py_func: training 4 | objective_aggregate_mode: sum 5 | 6 | defaults: 7 | - override /data_augmentation: 8 | - vae_augmentation 9 | 10 | - override /objective: 11 | - vae_bce_objective 12 | - kl_objective 13 | 14 | - override /splitter: nuplan 15 | - override /autoencoder_model: vae_model 16 | - override /training_metric: 17 | - override /matching: -------------------------------------------------------------------------------- /sledge/script/experiments/diffusion/training_dit_model.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | experiment_name: training_dit_model 3 | py_func: training 4 | 5 | defaults: 6 | 7 | - override /autoencoder_model: rvae_model 8 | - override /diffusion_model: dit_b_model 9 | - override /scenario_filter: null 10 | -------------------------------------------------------------------------------- /sledge/script/experiments/simulation/sledge_reactive_agents.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | job_name: sledge_reactive_agents 3 | 4 | defaults: 5 | - override /observation: sledge_agents_observation 6 | - override /ego_controller: two_stage_controller 7 | - override /planner: pdm_closed_planner 8 | - override /simulation_metric: simulation_closed_loop_reactive_agents 9 | - override /metric_aggregator: closed_loop_reactive_agents_weighted_average -------------------------------------------------------------------------------- /sledge/script/run_diffusion.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from typing import Optional 4 | 5 | import hydra 6 | import pytorch_lightning as pl 7 | from omegaconf import DictConfig 8 | 9 | from nuplan.planning.training.experiments.training import TrainingEngine 10 | from nuplan.planning.script.builders.folder_builder import build_training_experiment_folder 11 | from nuplan.planning.script.builders.logging_builder import build_logger 12 | from nuplan.planning.script.utils import set_default_path 13 | 14 | from sledge.diffusion.experiments.training import run_training_diffusion 15 | from sledge.diffusion.experiments.scenario_caching import run_scenario_caching 16 | 17 | logging.getLogger("numba").setLevel(logging.WARNING) 18 | logger = logging.getLogger(__name__) 19 | 20 | # If set, use the env. variable to overwrite the default dataset and experiment paths 21 | set_default_path() 22 | 23 | # If set, use the env. variable to overwrite the Hydra config 24 | CONFIG_PATH = os.getenv("NUPLAN_HYDRA_CONFIG_PATH", "config/diffusion") 25 | 26 | if os.environ.get("NUPLAN_HYDRA_CONFIG_PATH") is not None: 27 | CONFIG_PATH = os.path.join("../../../../", CONFIG_PATH) 28 | 29 | if os.path.basename(CONFIG_PATH) != "diffusion": 30 | CONFIG_PATH = os.path.join(CONFIG_PATH, "diffusion") 31 | CONFIG_NAME = "default_diffusion" 32 | 33 | 34 | @hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME) 35 | def main(cfg: DictConfig) -> Optional[TrainingEngine]: 36 | """ 37 | Main entrypoint for diffusion experiments. 38 | :param cfg: omegaconf dictionary 39 | """ 40 | # Fix random seed 41 | pl.seed_everything(cfg.seed, workers=True) 42 | 43 | # Configure logger 44 | build_logger(cfg) 45 | 46 | # Create output storage folder 47 | build_training_experiment_folder(cfg=cfg) 48 | 49 | # Build worker 50 | # worker = build_worker(cfg) 51 | print(cfg.py_func) 52 | 53 | if cfg.py_func == "training": 54 | run_training_diffusion(cfg) 55 | elif cfg.py_func == "scenario_caching": 56 | run_scenario_caching(cfg) 57 | else: 58 | raise NameError(f"Function {cfg.py_func} does not exist") 59 | 60 | 61 | if __name__ == "__main__": 62 | main() 63 | -------------------------------------------------------------------------------- /sledge/script/run_sledgeboard.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from pathlib import Path 4 | 5 | import hydra 6 | import nest_asyncio 7 | from hydra.utils import instantiate 8 | from omegaconf import DictConfig 9 | 10 | from nuplan.common.actor_state.vehicle_parameters import VehicleParameters 11 | from sledge.sledgeboard.sledgeboard import SledgeBoard 12 | from nuplan.planning.script.builders.scenario_building_builder import build_scenario_builder 13 | from nuplan.planning.script.builders.utils.utils_config import update_config_for_nuboard 14 | from nuplan.planning.script.utils import set_default_path 15 | 16 | logging.basicConfig(level=logging.INFO) 17 | logger = logging.getLogger(__name__) 18 | 19 | # If set, use the env. variable to overwrite the default dataset and experiment paths 20 | set_default_path() 21 | 22 | # If set, use the env. variable to overwrite the Hydra config 23 | CONFIG_PATH = os.getenv("NUPLAN_HYDRA_CONFIG_PATH", "config/sledgeboard") 24 | 25 | if os.environ.get("NUPLAN_HYDRA_CONFIG_PATH") is not None: 26 | CONFIG_PATH = os.path.join("../../../../", CONFIG_PATH) 27 | 28 | if os.path.basename(CONFIG_PATH) != "sledgeboard": 29 | CONFIG_PATH = os.path.join(CONFIG_PATH, "sledgeboard") 30 | CONFIG_NAME = "default_sledgeboard" 31 | 32 | nest_asyncio.apply() 33 | 34 | 35 | def initialize_sledgeboard(cfg: DictConfig) -> SledgeBoard: 36 | """ 37 | Sets up dependencies and instantiates a SledgeBoard object. 38 | :param cfg: DictConfig. Configuration that is used to run the experiment. 39 | :return: SledgeBoard object. 40 | """ 41 | # Update and override configs for sledge board 42 | update_config_for_nuboard(cfg=cfg) 43 | 44 | scenario_builder = build_scenario_builder(cfg) 45 | 46 | # Build vehicle parameters 47 | vehicle_parameters: VehicleParameters = instantiate(cfg.scenario_builder.vehicle_parameters) 48 | profiler_path = None 49 | if cfg.profiler_path: 50 | profiler_path = Path(cfg.profiler_path) 51 | 52 | sledgeboard = SledgeBoard( 53 | profiler_path=profiler_path, 54 | sledgeboard_paths=cfg.simulation_path, 55 | scenario_builder=scenario_builder, 56 | port_number=cfg.port_number, 57 | resource_prefix=cfg.resource_prefix, 58 | vehicle_parameters=vehicle_parameters, 59 | async_scenario_rendering=cfg.async_scenario_rendering, 60 | scenario_rendering_frame_rate_cap_hz=cfg.scenario_rendering_frame_rate_cap_hz, 61 | ) 62 | 63 | return sledgeboard 64 | 65 | 66 | @hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME) 67 | def main(cfg: DictConfig) -> None: 68 | """ 69 | Execute all available challenges simultaneously on the same scenario. 70 | :param cfg: DictConfig. Configuration that is used to run the experiment. 71 | """ 72 | sledgeboard = initialize_sledgeboard(cfg) 73 | sledgeboard.run() 74 | 75 | 76 | if __name__ == "__main__": 77 | main() 78 | -------------------------------------------------------------------------------- /sledge/simulation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/maps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/maps/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/maps/sledge_map/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/maps/sledge_map/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/maps/sledge_map/sledge_roadblock.py: -------------------------------------------------------------------------------- 1 | from functools import cached_property 2 | from typing import List 3 | 4 | from shapely.geometry import Polygon 5 | 6 | from nuplan.common.maps.abstract_map_objects import LaneGraphEdgeMapObject, RoadBlockGraphEdgeMapObject, StopLine 7 | 8 | from sledge.simulation.maps.sledge_map.sledge_map_graph import SledgeMapGraph 9 | import sledge.simulation.maps.sledge_map.sledge_lane as sledge_lane 10 | 11 | 12 | class SledgeRoadBlock(RoadBlockGraphEdgeMapObject): 13 | """Implementation of Roadblock in sledge.""" 14 | 15 | def __init__(self, roadblock_id: str, sledge_map_graph: SledgeMapGraph): 16 | """ 17 | Initialize roadblock interface of sledge. 18 | NOTE: SledgeRoadBlock wrapper of a single lane with same id. 19 | :param roadblock_id: unique identifier of roadblock. 20 | :param sledge_map_graph: lane map graph interface in sledge. 21 | """ 22 | super().__init__(roadblock_id) 23 | self._sledge_map_graph = sledge_map_graph 24 | 25 | @cached_property 26 | def incoming_edges(self) -> List[RoadBlockGraphEdgeMapObject]: 27 | """Inherited from superclass.""" 28 | incoming_ids = list(self._sledge_map_graph.directed_lane_graph.predecessors(self.id)) 29 | return [SledgeRoadBlock(incoming_id, self._sledge_map_graph) for incoming_id in incoming_ids] 30 | 31 | @cached_property 32 | def outgoing_edges(self) -> List[RoadBlockGraphEdgeMapObject]: 33 | """Inherited from superclass.""" 34 | outgoing_ids = list(self._sledge_map_graph.directed_lane_graph.successors(self.id)) 35 | return [SledgeRoadBlock(outgoing_id, self._sledge_map_graph) for outgoing_id in outgoing_ids] 36 | 37 | @cached_property 38 | def interior_edges(self) -> List[LaneGraphEdgeMapObject]: 39 | """Inherited from superclass.""" 40 | # NOTE: Additional heuristic of grouping lanes in roadblock could be added. 41 | lane_ids = [self.id] 42 | return [sledge_lane.SledgeLane(lane_id, self._sledge_map_graph) for lane_id in lane_ids] 43 | 44 | @cached_property 45 | def polygon(self) -> Polygon: 46 | """Inherited from superclass.""" 47 | return self._sledge_map_graph.polygon_dict[self.id] 48 | 49 | @cached_property 50 | def children_stop_lines(self) -> List[StopLine]: 51 | """Inherited from superclass.""" 52 | raise NotImplementedError 53 | 54 | @cached_property 55 | def parallel_edges(self) -> List[RoadBlockGraphEdgeMapObject]: 56 | """Inherited from superclass.""" 57 | raise NotImplementedError 58 | -------------------------------------------------------------------------------- /sledge/simulation/observation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/observation/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/observation/sledge_idm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/observation/sledge_idm/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/planner/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/planner/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/planner/pdm_planner/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/observation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/planner/pdm_planner/observation/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/observation/pdm_observation_utils.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from shapely.geometry import Polygon 4 | 5 | from nuplan.common.actor_state.ego_state import EgoState 6 | from nuplan.common.actor_state.state_representation import Point2D 7 | from nuplan.common.maps.abstract_map import AbstractMap 8 | from nuplan.common.maps.maps_datatypes import SemanticMapLayer 9 | 10 | from sledge.simulation.planner.pdm_planner.observation.pdm_occupancy_map import PDMOccupancyMap 11 | 12 | DRIVABLE_MAP_LAYERS = [SemanticMapLayer.ROADBLOCK, SemanticMapLayer.ROADBLOCK_CONNECTOR, SemanticMapLayer.CARPARK_AREA] 13 | 14 | 15 | def get_drivable_area_map( 16 | map_api: AbstractMap, 17 | ego_state: EgoState, 18 | map_radius: float = 50, 19 | ) -> PDMOccupancyMap: 20 | 21 | # query all drivable map elements around ego position 22 | position: Point2D = ego_state.center.point 23 | drivable_area = map_api.get_proximal_map_objects(position, map_radius, DRIVABLE_MAP_LAYERS) 24 | 25 | # collect lane polygons in list, save on-route indices 26 | drivable_polygons: List[Polygon] = [] 27 | drivable_polygon_ids: List[str] = [] 28 | 29 | for type in [SemanticMapLayer.ROADBLOCK, SemanticMapLayer.ROADBLOCK_CONNECTOR]: 30 | for roadblock in drivable_area[type]: 31 | for lane in roadblock.interior_edges: 32 | drivable_polygons.append(lane.polygon) 33 | drivable_polygon_ids.append(lane.id) 34 | 35 | for carpark in drivable_area[SemanticMapLayer.CARPARK_AREA]: 36 | drivable_polygons.append(carpark.polygon) 37 | drivable_polygon_ids.append(carpark.id) 38 | 39 | # create occupancy map with lane polygons 40 | drivable_area_map = PDMOccupancyMap(drivable_polygon_ids, drivable_polygons) 41 | 42 | return drivable_area_map 43 | -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/pdm_closed_planner.py: -------------------------------------------------------------------------------- 1 | import gc 2 | import logging 3 | import warnings 4 | from typing import List, Optional, Type 5 | 6 | from nuplan.planning.simulation.observation.observation_type import DetectionsTracks, Observation 7 | from nuplan.planning.simulation.planner.abstract_planner import PlannerInitialization, PlannerInput 8 | from nuplan.planning.simulation.trajectory.abstract_trajectory import AbstractTrajectory 9 | from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling 10 | 11 | from sledge.simulation.planner.pdm_planner.abstract_pdm_closed_planner import AbstractPDMClosedPlanner 12 | from sledge.simulation.planner.pdm_planner.observation.pdm_observation_utils import get_drivable_area_map 13 | from sledge.simulation.planner.pdm_planner.proposal.batch_idm_policy import BatchIDMPolicy 14 | 15 | warnings.filterwarnings("ignore", category=RuntimeWarning) 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | class PDMClosedPlanner(AbstractPDMClosedPlanner): 20 | """PDM-Closed planner class.""" 21 | 22 | # Inherited property, see superclass. 23 | requires_scenario: bool = False 24 | 25 | def __init__( 26 | self, 27 | trajectory_sampling: TrajectorySampling, 28 | proposal_sampling: TrajectorySampling, 29 | idm_policies: BatchIDMPolicy, 30 | lateral_offsets: Optional[List[float]], 31 | map_radius: float, 32 | ): 33 | """ 34 | Constructor for PDMClosedPlanner 35 | :param trajectory_sampling: Sampling parameters for final trajectory 36 | :param proposal_sampling: Sampling parameters for proposals 37 | :param idm_policies: BatchIDMPolicy class 38 | :param lateral_offsets: centerline offsets for proposals (optional) 39 | :param map_radius: radius around ego to consider 40 | """ 41 | super(PDMClosedPlanner, self).__init__( 42 | trajectory_sampling, 43 | proposal_sampling, 44 | idm_policies, 45 | lateral_offsets, 46 | map_radius, 47 | ) 48 | 49 | def initialize(self, initialization: PlannerInitialization) -> None: 50 | """Inherited, see superclass.""" 51 | self._iteration = 0 52 | self._map_api = initialization.map_api 53 | self._load_route_dicts(initialization.route_roadblock_ids) 54 | gc.collect() 55 | 56 | def name(self) -> str: 57 | """Inherited, see superclass.""" 58 | return self.__class__.__name__ 59 | 60 | def observation_type(self) -> Type[Observation]: 61 | """Inherited, see superclass.""" 62 | return DetectionsTracks # type: ignore 63 | 64 | def compute_planner_trajectory(self, current_input: PlannerInput) -> AbstractTrajectory: 65 | """Inherited, see superclass.""" 66 | 67 | gc.disable() 68 | ego_state, _ = current_input.history.current_state 69 | 70 | # Apply route correction on first iteration (ego_state required) 71 | if self._iteration == 0: 72 | self._route_roadblock_correction(ego_state) 73 | 74 | # Update/Create drivable area polygon map 75 | self._drivable_area_map = get_drivable_area_map(self._map_api, ego_state, self._map_radius) 76 | 77 | trajectory = self._get_closed_loop_trajectory(current_input) 78 | 79 | self._iteration += 1 80 | return trajectory 81 | -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/proposal/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/planner/pdm_planner/proposal/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/proposal/pdm_proposal.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List 3 | 4 | from shapely.geometry import LineString 5 | 6 | from sledge.simulation.planner.pdm_planner.proposal.batch_idm_policy import BatchIDMPolicy 7 | from sledge.simulation.planner.pdm_planner.utils.pdm_path import PDMPath 8 | 9 | 10 | @dataclass 11 | class PDMProposal: 12 | """Dataclass for storing proposal information.""" 13 | 14 | proposal_idx: int 15 | lateral_idx: int 16 | longitudinal_idx: int 17 | path: PDMPath 18 | 19 | @property 20 | def linestring(self) -> LineString: 21 | """Getter for linestring of proposal's path.""" 22 | return self.path.linestring 23 | 24 | @property 25 | def length(self): 26 | """Getter for length [m] of proposal's path.""" 27 | return self.path.length 28 | 29 | 30 | class PDMProposalManager: 31 | """Class to store and manage lateral and longitudinal combination of proposals.""" 32 | 33 | def __init__( 34 | self, 35 | lateral_proposals: List[PDMPath], 36 | longitudinal_policies: BatchIDMPolicy, 37 | ): 38 | """ 39 | Constructor for PDMProposalManager 40 | :param lateral_proposals: list of path's to follow 41 | :param longitudinal_policies: IDM policy class (batch-wise) 42 | """ 43 | 44 | self._num_lateral_proposals: int = len(lateral_proposals) 45 | self._num_longitudinal_proposals: int = longitudinal_policies.num_policies 46 | self._longitudinal_policies: BatchIDMPolicy = longitudinal_policies 47 | 48 | self._proposals: List[PDMProposal] = [] 49 | proposal_idx = 0 50 | 51 | for lateral_idx in range(self._num_lateral_proposals): 52 | for longitudinal_idx in range(self._num_longitudinal_proposals): 53 | self._proposals.append( 54 | PDMProposal( 55 | proposal_idx=proposal_idx, 56 | lateral_idx=lateral_idx, 57 | longitudinal_idx=longitudinal_idx, 58 | path=lateral_proposals[lateral_idx], 59 | ) 60 | ) 61 | proposal_idx += 1 62 | 63 | def __len__(self) -> int: 64 | """Returns number of proposals (paths x policies).""" 65 | return len(self._proposals) 66 | 67 | def __getitem__(self, proposal_idx) -> PDMProposal: 68 | """ 69 | Returns the requested proposal. 70 | :param proposal_idx: index for each proposal 71 | :return: PDMProposal dataclass 72 | """ 73 | return self._proposals[proposal_idx] 74 | 75 | def update(self, speed_limit_mps: float) -> None: 76 | """ 77 | Updates target velocities of IDM policies with current speed-limit. 78 | :param speed_limit_mps: current speed-limit [m/s] 79 | """ 80 | self._longitudinal_policies.update(speed_limit_mps) 81 | 82 | @property 83 | def num_lateral_proposals(self) -> int: 84 | return self._num_lateral_proposals 85 | 86 | @property 87 | def num_longitudinal_proposals(self) -> int: 88 | return self._longitudinal_policies._num_longitudinal_proposals 89 | 90 | @property 91 | def max_target_velocity(self) -> float: 92 | return self._longitudinal_policies.max_target_velocity 93 | 94 | @property 95 | def longitudinal_policies(self) -> BatchIDMPolicy: 96 | return self._longitudinal_policies 97 | -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/scoring/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/planner/pdm_planner/scoring/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/scoring/pdm_scorer_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.typing as npt 3 | from shapely import LineString, Polygon 4 | 5 | from nuplan.common.actor_state.state_representation import StateSE2 6 | from nuplan.common.actor_state.tracked_objects import TrackedObject 7 | from nuplan.planning.metrics.utils.collision_utils import CollisionType 8 | from nuplan.planning.simulation.observation.idm.utils import is_agent_behind, is_track_stopped 9 | 10 | from sledge.simulation.planner.pdm_planner.utils.pdm_enums import StateIndex 11 | 12 | 13 | def get_collision_type( 14 | state: npt.NDArray[np.float64], 15 | ego_polygon: Polygon, 16 | tracked_object: TrackedObject, 17 | tracked_object_polygon: Polygon, 18 | stopped_speed_threshold: float = 5e-02, 19 | ) -> CollisionType: 20 | """ 21 | Classify collision between ego and the track. 22 | :param ego_state: Ego's state at the current timestamp. 23 | :param tracked_object: Tracked object. 24 | :param stopped_speed_threshold: Threshold for 0 speed due to noise. 25 | :return Collision type. 26 | """ 27 | 28 | ego_speed = np.hypot( 29 | state[StateIndex.VELOCITY_X], 30 | state[StateIndex.VELOCITY_Y], 31 | ) 32 | 33 | is_ego_stopped = float(ego_speed) <= stopped_speed_threshold 34 | 35 | center_point = tracked_object_polygon.centroid 36 | tracked_object_center = StateSE2(center_point.x, center_point.y, tracked_object.box.center.heading) 37 | 38 | ego_rear_axle_pose: StateSE2 = StateSE2(*state[StateIndex.STATE_SE2]) 39 | 40 | # Collisions at (close-to) zero ego speed 41 | if is_ego_stopped: 42 | collision_type = CollisionType.STOPPED_EGO_COLLISION 43 | 44 | # Collisions at (close-to) zero track speed 45 | elif is_track_stopped(tracked_object): 46 | collision_type = CollisionType.STOPPED_TRACK_COLLISION 47 | 48 | # Rear collision when both ego and track are not stopped 49 | elif is_agent_behind(ego_rear_axle_pose, tracked_object_center): 50 | collision_type = CollisionType.ACTIVE_REAR_COLLISION 51 | 52 | # Front bumper collision when both ego and track are not stopped 53 | elif LineString( 54 | [ 55 | ego_polygon.exterior.coords[0], 56 | ego_polygon.exterior.coords[3], 57 | ] 58 | ).intersects(tracked_object_polygon): 59 | collision_type = CollisionType.ACTIVE_FRONT_COLLISION 60 | 61 | # Lateral collision when both ego and track are not stopped 62 | else: 63 | collision_type = CollisionType.ACTIVE_LATERAL_COLLISION 64 | 65 | return collision_type 66 | -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/simulation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/planner/pdm_planner/simulation/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/simulation/pdm_simulator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.typing as npt 3 | 4 | from nuplan.common.actor_state.ego_state import EgoState 5 | from nuplan.common.actor_state.state_representation import TimeDuration, TimePoint 6 | from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration 7 | from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling 8 | 9 | from sledge.simulation.planner.pdm_planner.simulation.batch_kinematic_bicycle import BatchKinematicBicycleModel 10 | from sledge.simulation.planner.pdm_planner.simulation.batch_lqr import BatchLQRTracker 11 | from sledge.simulation.planner.pdm_planner.utils.pdm_array_representation import ego_state_to_state_array 12 | 13 | 14 | class PDMSimulator: 15 | """ 16 | Re-implementation of nuPlan's simulation pipeline. Enables batch-wise simulation. 17 | """ 18 | 19 | def __init__(self, proposal_sampling: TrajectorySampling): 20 | """ 21 | Constructor of PDMSimulator. 22 | :param proposal_sampling: Sampling parameters for proposals 23 | """ 24 | 25 | # time parameters 26 | self._proposal_sampling = proposal_sampling 27 | 28 | # simulation objects 29 | self._motion_model = BatchKinematicBicycleModel() 30 | self._tracker = BatchLQRTracker() 31 | 32 | def simulate_proposals( 33 | self, states: npt.NDArray[np.float64], initial_ego_state: EgoState 34 | ) -> npt.NDArray[np.float64]: 35 | """ 36 | Simulate all proposals over batch-dim 37 | :param initial_ego_state: ego-vehicle state at current iteration 38 | :param states: proposal states as array 39 | :return: simulated proposal states as array 40 | """ 41 | 42 | # TODO: find cleaner way to load parameters 43 | # set parameters of motion model and tracker 44 | self._motion_model._vehicle = initial_ego_state.car_footprint.vehicle_parameters 45 | self._tracker._discretization_time = self._proposal_sampling.interval_length 46 | 47 | proposal_states = states[:, : self._proposal_sampling.num_poses + 1] 48 | self._tracker.update(proposal_states) 49 | 50 | # state array representation for simulated vehicle states 51 | simulated_states = np.zeros(proposal_states.shape, dtype=np.float64) 52 | simulated_states[:, 0] = ego_state_to_state_array(initial_ego_state) 53 | 54 | # timing objects 55 | current_time_point = initial_ego_state.time_point 56 | delta_time_point = TimeDuration.from_s(self._proposal_sampling.interval_length) 57 | 58 | current_iteration = SimulationIteration(current_time_point, 0) 59 | next_iteration = SimulationIteration(current_time_point + delta_time_point, 1) 60 | 61 | for time_idx in range(1, self._proposal_sampling.num_poses + 1): 62 | sampling_time: TimePoint = next_iteration.time_point - current_iteration.time_point 63 | 64 | command_states = self._tracker.track_trajectory( 65 | current_iteration, 66 | next_iteration, 67 | simulated_states[:, time_idx - 1], 68 | ) 69 | 70 | simulated_states[:, time_idx] = self._motion_model.propagate_state( 71 | states=simulated_states[:, time_idx - 1], 72 | command_states=command_states, 73 | sampling_time=sampling_time, 74 | ) 75 | 76 | current_iteration = next_iteration 77 | next_iteration = SimulationIteration(current_iteration.time_point + delta_time_point, 1 + time_idx) 78 | 79 | return simulated_states 80 | -------------------------------------------------------------------------------- /sledge/simulation/planner/pdm_planner/utils/pdm_geometry_utils.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import numpy as np 4 | import numpy.typing as npt 5 | 6 | from nuplan.common.actor_state.state_representation import StateSE2 7 | 8 | from sledge.simulation.planner.pdm_planner.utils.pdm_enums import SE2Index 9 | 10 | 11 | def normalize_angle(angle): 12 | """ 13 | Map a angle in range [-π, π] 14 | :param angle: any angle as float 15 | :return: normalized angle 16 | """ 17 | return np.arctan2(np.sin(angle), np.cos(angle)) 18 | 19 | 20 | def parallel_discrete_path(discrete_path: List[StateSE2], offset=float) -> List[StateSE2]: 21 | """ 22 | Creates a parallel discrete path for a given offset. 23 | :param discrete_path: baseline path (x,y,θ) 24 | :param offset: parall loffset 25 | :return: parallel discrete path 26 | """ 27 | parallel_discrete_path = [] 28 | for state in discrete_path: 29 | theta = state.heading + np.pi / 2 30 | x_new = state.x + np.cos(theta) * offset 31 | y_new = state.y + np.sin(theta) * offset 32 | parallel_discrete_path.append(StateSE2(x_new, y_new, state.heading)) 33 | return parallel_discrete_path 34 | 35 | 36 | def translate_lon_and_lat( 37 | centers: npt.NDArray[np.float64], 38 | headings: npt.NDArray[np.float64], 39 | lon: float, 40 | lat: float, 41 | ) -> npt.NDArray[np.float64]: 42 | """ 43 | Translate the position component of an centers point array 44 | :param centers: array to be translated 45 | :param headings: array with heading angles 46 | :param lon: [m] distance by which a point should be translated in longitudinal direction 47 | :param lat: [m] distance by which a point should be translated in lateral direction 48 | :return array of translated coordinates 49 | """ 50 | half_pi = np.pi / 2.0 51 | translation: npt.NDArray[np.float64] = np.stack( 52 | [ 53 | (lat * np.cos(headings + half_pi)) + (lon * np.cos(headings)), 54 | (lat * np.sin(headings + half_pi)) + (lon * np.sin(headings)), 55 | ], 56 | axis=-1, 57 | ) 58 | return centers + translation 59 | 60 | 61 | def calculate_progress(path: List[StateSE2]) -> List[float]: 62 | """ 63 | Calculate the cumulative progress of a given path. 64 | :param path: a path consisting of StateSE2 as waypoints 65 | :return: a cumulative list of progress 66 | """ 67 | x_position = [point.x for point in path] 68 | y_position = [point.y for point in path] 69 | x_diff = np.diff(x_position) 70 | y_diff = np.diff(y_position) 71 | points_diff: npt.NDArray[np.float64] = np.concatenate(([x_diff], [y_diff]), axis=0, dtype=np.float64) 72 | progress_diff = np.append(0.0, np.linalg.norm(points_diff, axis=0)) 73 | return np.cumsum(progress_diff, dtype=np.float64) # type: ignore 74 | 75 | 76 | def convert_absolute_to_relative_se2_array( 77 | origin: StateSE2, state_se2_array: npt.NDArray[np.float64] 78 | ) -> npt.NDArray[np.float64]: 79 | """ 80 | Converts an StateSE2 array from global to relative coordinates. 81 | :param origin: origin pose of relative coords system 82 | :param state_se2_array: array of SE2 states with (x,y,θ) in last dim 83 | :return: SE2 coords array in relative coordinates 84 | """ 85 | assert len(SE2Index) == state_se2_array.shape[-1] 86 | 87 | theta = -origin.heading 88 | origin_array = np.array([[origin.x, origin.y, origin.heading]], dtype=np.float64) 89 | 90 | R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) 91 | 92 | points_rel = state_se2_array - origin_array 93 | points_rel[..., :2] = points_rel[..., :2] @ R.T 94 | points_rel[:, 2] = normalize_angle(points_rel[:, 2]) 95 | 96 | return points_rel 97 | -------------------------------------------------------------------------------- /sledge/simulation/scenarios/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/scenarios/__init__.py -------------------------------------------------------------------------------- /sledge/simulation/scenarios/sledge_scenario/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/simulation/scenarios/sledge_scenario/__init__.py -------------------------------------------------------------------------------- /sledge/sledgeboard/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/sledgeboard/__init__.py -------------------------------------------------------------------------------- /sledge/sledgeboard/base/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/sledgeboard/base/__init__.py -------------------------------------------------------------------------------- /sledge/sledgeboard/resource/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/sledgeboard/resource/__init__.py -------------------------------------------------------------------------------- /sledge/sledgeboard/resource/css/cloud.css: -------------------------------------------------------------------------------- 1 | .cloud-input-form { 2 | display: None; 3 | position: fixed; 4 | left: 30%; 5 | top: 20%; 6 | z-index: 3000; 7 | } 8 | 9 | .cloud-tab-section { 10 | height: 95vh; 11 | width: 100%; 12 | } 13 | 14 | .cloud-tab-section a { 15 | color: #DE7061; 16 | } 17 | 18 | .col-extend-height { 19 | height: 100%; 20 | width: 100%; 21 | } 22 | 23 | .panel-body-top-input-form { 24 | margin-top: 10px; 25 | height: 8%; 26 | } 27 | 28 | .panel-extend-height { 29 | height: 100%; 30 | width: 100%; 31 | } 32 | 33 | .panel-body-extend-height { 34 | height: 100%; 35 | width: 100%; 36 | } 37 | 38 | .cloud-table-section { 39 | margin-top: 5px; 40 | height: 90% !important; 41 | } 42 | 43 | .cloud-tab-section .cloud-table-section .bk-root { 44 | height: 100%; 45 | box-sizing: border-box !important; 46 | } 47 | 48 | .s3-data-table { 49 | width: 100% !important; 50 | height: 100% !important; 51 | margin: auto !important; 52 | overflow-y: auto; 53 | overflow-x: auto; 54 | box-sizing: border-box !important; 55 | } 56 | 57 | .s3-data-table .slick-cell { 58 | border-right: 1px solid #ddd; 59 | word-wrap: break-word; 60 | word-break: break-all; 61 | overflow-wrap: break-word; 62 | white-space: normal; 63 | font-size: 14px; 64 | cursor: pointer; 65 | } 66 | 67 | .s3-data-table .slick-row { 68 | border-left: 1px solid #ddd !important; 69 | border-bottom: 1px solid #ddd !important; 70 | box-sizing: border-box !important; 71 | } 72 | 73 | .s3-data-table .slick-pane-header { 74 | height: 30px; 75 | border-top: 1px solid #ddd; 76 | width: 100% !important; 77 | box-sizing: border-box !important; 78 | } 79 | 80 | .s3-data-table .slick-pane-top { 81 | border-top: 3px solid #ddd; 82 | } 83 | 84 | .s3-data-table .slick-header-column { 85 | height: 30px !important; 86 | word-wrap: break-word; 87 | word-break: break-all; 88 | overflow-wrap: break-word; 89 | white-space: normal; 90 | font-size: 12px; 91 | font-weight: bold; 92 | box-sizing: border-box !important; 93 | } 94 | 95 | .s3-tab-modal-query-btn { 96 | width: unset !important; 97 | height: unset !important; 98 | margin: unset !important; 99 | } 100 | 101 | .s3-tab-modal-query-btn .bk-btn-default { 102 | background-color: #DE7061 !important; 103 | border-color: #DE7061 !important; 104 | color: white; 105 | } 106 | 107 | .title { 108 | padding: 6px 12px; 109 | } 110 | 111 | .modal-input-group .input-group .bk-root .bk { 112 | height: unset !important; 113 | margin: unset !important; 114 | } 115 | 116 | .cloud-input-form .modal-input-group .input-group .bk-root .bk { 117 | width: 100% !important; 118 | } 119 | 120 | .input-group-addon { 121 | min-width: 150px; 122 | } 123 | 124 | .bottom-marg-10 { 125 | margin-bottom: 10px; 126 | } 127 | 128 | .breadcrumb { 129 | margin-top: 0; 130 | margin-bottom: 0; 131 | } 132 | 133 | .s3-download-text-input { 134 | display: inline-block; 135 | } 136 | 137 | .cloud-download-section .bk-root { 138 | display: inline-block; 139 | } 140 | .panel-body-top-error-message { 141 | height: 8%; 142 | color: red; 143 | } 144 | 145 | .s3-download-btn .bk-btn-default { 146 | background-color: #DE7061 !important; 147 | border-color: #DE7061 !important; 148 | color: white; 149 | } 150 | 151 | .s3-download-btn .bk-btn[disabled] { 152 | opacity: unset !important; 153 | pointer-events: unset !important; 154 | } 155 | 156 | .s3-error-text { 157 | width: 100% !important; 158 | } 159 | 160 | #navbuttons .btn { 161 | padding: 3px 6px; 162 | } 163 | 164 | #cloud-modal-loading { 165 | position: absolute; 166 | top: 20%; 167 | left: 50%; 168 | visibility: hidden; 169 | } 170 | -------------------------------------------------------------------------------- /sledge/sledgeboard/resource/css/histogram.css: -------------------------------------------------------------------------------- 1 | .histogram-setting-form { 2 | display: None; 3 | position: fixed; 4 | left: 30%; 5 | top: 20%; 6 | z-index: 3000; 7 | } 8 | 9 | .histogram-setting-form .modal-body .input-group .bk-root { 10 | width: 100%; 11 | } 12 | 13 | .histogram-bin-spinner { 14 | width: 100% !important; 15 | } 16 | 17 | .histogram-tab-modal-query-btn { 18 | width: unset !important; 19 | height: unset !important; 20 | margin: unset !important; 21 | } 22 | 23 | .histogram-tab-modal-query-btn .bk-btn-default { 24 | background-color: #DE7061 !important; 25 | border-color: #DE7061 !important; 26 | color: white; 27 | } 28 | 29 | .histogram-plot-section { 30 | width: 100%; 31 | box-sizing: border-box; 32 | border: 1px solid #f0f0f0; 33 | height: 95%; 34 | } 35 | 36 | .histogram-plot-section .planner-search-row { 37 | text-align: right; 38 | padding: 5px 24px; 39 | height: 30px; 40 | background: #fff; 41 | border-bottom: 1px solid #f0f0f0; 42 | } 43 | 44 | .histogram-plot-section .histogram-table-row { 45 | margin-top: 10px; 46 | height: 95%; 47 | } 48 | 49 | .histogram-plot-section .histogram-table-row .bk-root { 50 | height: 100%; 51 | } 52 | 53 | .histogram-default-div { 54 | width: 80% !important; 55 | font-size: 1.2em !important; 56 | } 57 | 58 | .histogram-plots { 59 | width: 100% !important; 60 | height: 100% !important; 61 | margin: auto !important; 62 | overflow-y: auto; 63 | overflow-x: auto; 64 | } 65 | 66 | #histogram-loading { 67 | position: absolute; 68 | top: 20%; 69 | left: 50%; 70 | visibility: hidden; 71 | } 72 | 73 | .histogram-plots .bk-tooltip { 74 | overflow-y: auto; 75 | overflow-x: hidden; 76 | min-height: 100px; 77 | max-height: 250px; 78 | } 79 | 80 | .histogram-plots .bk .bk-left { 81 | left: 15px !important; 82 | } 83 | 84 | .histogram-plots .bk .bk-right { 85 | right: 15px !important; 86 | } 87 | -------------------------------------------------------------------------------- /sledge/sledgeboard/resource/css/overview.css: -------------------------------------------------------------------------------- 1 | .overview-table { 2 | width: 100% !important; 3 | height: 100% !important; 4 | margin: auto !important; 5 | overflow-y: auto; 6 | overflow-x: auto; 7 | box-sizing: border-box !important; 8 | } 9 | 10 | .overview-table .slick-cell { 11 | border-right: 1px solid #ddd; 12 | word-wrap: break-word; 13 | word-break: break-all; 14 | overflow-wrap: break-word; 15 | white-space: normal; 16 | font-size: 14px; 17 | cursor: pointer; 18 | } 19 | 20 | .overview-table .slick-row { 21 | border-left: 1px solid #ddd !important; 22 | border-bottom: 1px solid #ddd !important; 23 | box-sizing: border-box !important; 24 | } 25 | 26 | .overview-table .slick-pane-header { 27 | height: 30px; 28 | border-top: 1px solid #ddd; 29 | width: 100% !important; 30 | box-sizing: border-box !important; 31 | } 32 | 33 | .overview-table .slick-pane-top { 34 | border-top: 3px solid #ddd; 35 | } 36 | 37 | .overview-table .slick-header-column { 38 | height: 30px !important; 39 | word-wrap: break-word; 40 | word-break: break-all; 41 | overflow-wrap: break-word; 42 | white-space: normal; 43 | font-size: 12px; 44 | font-weight: bold; 45 | box-sizing: border-box !important; 46 | } 47 | 48 | .overview-section { 49 | width: 100%; 50 | box-sizing: border-box; 51 | border: 1px solid #f0f0f0; 52 | height: 95%; 53 | } 54 | 55 | .overview-section .planner-search-row { 56 | text-align: right; 57 | padding: 5px 24px; 58 | height: 30px; 59 | background: #fff; 60 | border-bottom: 1px solid #f0f0f0; 61 | } 62 | 63 | .overview-section .overview-table-row { 64 | margin-top: 10px; 65 | height: 95%; 66 | } 67 | 68 | .overview-section .overview-table-row .bk-root { 69 | height: 100%; 70 | } 71 | 72 | #overview-loading { 73 | position: absolute; 74 | top: 20%; 75 | left: 50%; 76 | visibility: hidden; 77 | } 78 | -------------------------------------------------------------------------------- /sledge/sledgeboard/resource/scripts/utils.js: -------------------------------------------------------------------------------- 1 | function openTab(evt, tab_name) { 2 | var i, tab_content, header_bar_tab_btn; 3 | tab_content = document.getElementsByClassName("tab-content"); 4 | const navEles = document.getElementsByClassName("nav-item"); 5 | 6 | for (const navEle of navEles) { 7 | const navName = navEle.innerText; 8 | navEle.style.fontWeight = 9 | tab_name === navName.toLowerCase() ? "bold" : "unset"; 10 | } 11 | for (i = 0; i < tab_content.length; i++) { 12 | tab_content[i].style.display = "none"; 13 | } 14 | document.getElementById(tab_name).style.display = "block"; 15 | evt.currentTarget.className += " active"; 16 | } 17 | 18 | function toggleNav() { 19 | const navbar = document.getElementsByClassName("navbar")[0]; 20 | const menuIcon = document.getElementById("menu-icon"); 21 | const file_header = document.getElementById("file-header"); 22 | 23 | // Close the menu bar 24 | if (file_header.style.marginLeft == "10rem" || file_header.style.marginLeft == "") { 25 | navbar.style.marginLeft = "-10rem"; 26 | file_header.style.marginLeft = 0; 27 | menuIcon.classList.add("icon-menu"); 28 | menuIcon.classList.remove("icon-arrow-left"); 29 | } else { 30 | navbar.style.marginLeft = 0; 31 | file_header.style.marginLeft = "10rem"; 32 | menuIcon.classList.add("icon-arrow-left"); 33 | menuIcon.classList.remove("icon-menu"); 34 | } 35 | } 36 | 37 | function openScenarioBar(evt, bar_name, svg_path_name) { 38 | const scenario_bar = document.getElementById(bar_name); 39 | const scenario_bar_svg_path = document.getElementById(svg_path_name); 40 | if (scenario_bar.style.display == "none" || scenario_bar.style.display == "") { 41 | scenario_bar.style.display = "block"; 42 | scenario_bar_svg_path.setAttribute("d", "M12 8l-6 6 1.41 1.41L12 10.83l4.59 4.58L18 14z"); 43 | } else { 44 | scenario_bar.style.display = "none"; 45 | scenario_bar_svg_path.setAttribute("d", "M16.59 8.59L12 13.17 7.41 8.59 6 10l6 6 6-6z"); 46 | } 47 | } 48 | 49 | function openModal(evt, modal_name) { 50 | const overlay = document.getElementsByClassName("overlay")[0]; 51 | overlay.style.display = "block"; 52 | 53 | document.getElementsByClassName(modal_name)[0].style.display = "block"; 54 | } 55 | 56 | function closeModal(evt, modal_name) { 57 | const overlay = document.getElementsByClassName("overlay")[0]; 58 | overlay.style.display = "none"; 59 | 60 | document.getElementsByClassName(modal_name)[0].style.display = "none"; 61 | } 62 | -------------------------------------------------------------------------------- /sledge/sledgeboard/resource/sledge_logo_transparent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/sledgeboard/resource/sledge_logo_transparent.png -------------------------------------------------------------------------------- /sledge/sledgeboard/tabs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/sledgeboard/tabs/__init__.py -------------------------------------------------------------------------------- /sledge/sledgeboard/tabs/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/sledgeboard/tabs/config/__init__.py -------------------------------------------------------------------------------- /sledge/sledgeboard/tabs/config/overview_tab_config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, ClassVar, Dict, List, Optional 3 | 4 | OVERVIEW_PLANNER_CHECKBOX_GROUP_NAME = 'overview_planner_checkbox_group' 5 | 6 | 7 | @dataclass 8 | class OverviewAggregatorData: 9 | """Aggregator metric data in the overview tab.""" 10 | 11 | aggregator_file_name: str # Aggregator output file name 12 | aggregator_type: str # Aggregator type 13 | planner_name: str # Planner name 14 | scenario_type: str # Scenario type 15 | num_scenarios: int # Number of scenarios in the type 16 | score: float # The aggregator scores for the scenario type 17 | 18 | 19 | @dataclass(frozen=True) 20 | class OverviewTabDefaultDataSourceDictConfig: 21 | """Config for the overview tab default data source tag.""" 22 | 23 | experiment: ClassVar[List[str]] = ['-'] 24 | scenario_type: ClassVar[List[str]] = ['-'] 25 | planner: ClassVar[List[str]] = [ 26 | 'No metric aggregator results, please add more experiments ' 'or adjust the search filter' 27 | ] 28 | 29 | @classmethod 30 | def get_config(cls) -> Dict[str, Any]: 31 | """Get configs as a dict.""" 32 | return {'experiment': cls.experiment, 'scenario_type': cls.scenario_type, 'planner': cls.planner} 33 | 34 | 35 | @dataclass(frozen=True) 36 | class OverviewTabExperimentTableColumnConfig: 37 | """Config for the overview tab experiment table column tag.""" 38 | 39 | field: ClassVar[str] = 'experiment' 40 | title: ClassVar[str] = 'Experiment' 41 | width: ClassVar[int] = 150 42 | sortable: ClassVar[bool] = False 43 | 44 | @classmethod 45 | def get_config(cls) -> Dict[str, Any]: 46 | """Get configs as a dict.""" 47 | return {'field': cls.field, 'title': cls.title, 'width': cls.width, 'sortable': cls.sortable} 48 | 49 | 50 | @dataclass(frozen=True) 51 | class OverviewTabScenarioTypeTableColumnConfig: 52 | """Config for the overview tab scenario type table column tag.""" 53 | 54 | field: ClassVar[str] = 'scenario_type' 55 | title: ClassVar[str] = 'Scenario Type (Number of Scenarios)' 56 | width: ClassVar[int] = 200 57 | sortable: ClassVar[bool] = False 58 | 59 | @classmethod 60 | def get_config(cls) -> Dict[str, Any]: 61 | """Get configs as a dict.""" 62 | return {'field': cls.field, 'title': cls.title, 'width': cls.width, 'sortable': cls.sortable} 63 | 64 | 65 | @dataclass(frozen=True) 66 | class OverviewTabPlannerTableColumnConfig: 67 | """Config for the overview tab planner table column tag.""" 68 | 69 | field: ClassVar[str] = 'planner' 70 | title: ClassVar[str] = 'Evaluation Score' 71 | sortable: ClassVar[bool] = False 72 | 73 | @classmethod 74 | def get_config(cls) -> Dict[str, Any]: 75 | """Get configs as a dict.""" 76 | return {'field': cls.field, 'title': cls.title, 'sortable': cls.sortable} 77 | 78 | 79 | @dataclass(frozen=True) 80 | class OverviewTabDataTableConfig: 81 | """Config for the overview tab planner data table tag.""" 82 | 83 | selectable: ClassVar[bool] = True 84 | row_height: ClassVar[int] = 80 85 | index_position: ClassVar[Optional[int]] = None 86 | name: ClassVar[str] = 'overview_table' 87 | css_classes: ClassVar[List[str]] = ['overview-table'] 88 | 89 | @classmethod 90 | def get_config(cls) -> Dict[str, Any]: 91 | """Get configs as a dict.""" 92 | return { 93 | 'selectable': cls.selectable, 94 | 'row_height': cls.row_height, 95 | 'index_position': cls.index_position, 96 | 'name': cls.name, 97 | 'css_classes': cls.css_classes, 98 | } 99 | -------------------------------------------------------------------------------- /sledge/sledgeboard/tabs/config/scenario_tab_config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Any, ClassVar, Dict, List, Tuple 3 | 4 | 5 | @dataclass(frozen=True) 6 | class ScenarioTabTitleDivConfig: 7 | """Config for the scenario tab title div tag.""" 8 | 9 | text: ClassVar[str] = "-" 10 | name: ClassVar[str] = 'scenario_title_div' 11 | css_classes: ClassVar[List[str]] = ['scenario-tab-title-div'] 12 | 13 | @classmethod 14 | def get_config(cls) -> Dict[str, Any]: 15 | """Get configs as a dict.""" 16 | return {'text': cls.text, 'name': cls.name, 'css_classes': cls.css_classes} 17 | 18 | 19 | @dataclass(frozen=True) 20 | class ScenarioTabScenarioTokenMultiChoiceConfig: 21 | """Config for scenario tab scenario token multi choice tag.""" 22 | 23 | max_items: ClassVar[int] = 1 24 | option_limit: ClassVar[int] = 10 25 | height: ClassVar[int] = 40 26 | placeholder: ClassVar[str] = "Scenario token" 27 | name: ClassVar[str] = 'scenario_token_multi_choice' 28 | css_classes: ClassVar[List[str]] = ['scenario-token-multi-choice'] 29 | 30 | @classmethod 31 | def get_config(cls) -> Dict[str, Any]: 32 | """Get configs as a dict.""" 33 | return { 34 | 'max_items': cls.max_items, 35 | 'option_limit': cls.option_limit, 36 | 'height': cls.height, 37 | 'placeholder': cls.placeholder, 38 | 'name': cls.name, 39 | 'css_classes': cls.css_classes, 40 | } 41 | 42 | 43 | @dataclass(frozen=True) 44 | class ScenarioTabModalQueryButtonConfig: 45 | """Config for scenario tab modal query button tag.""" 46 | 47 | name: ClassVar[str] = 'scenario_modal_query_btn' 48 | label: ClassVar[str] = 'Query Scenario' 49 | css_classes: ClassVar[List[str]] = ['btn', 'btn-primary', 'modal-btn', 'scenario-tab-modal-query-btn'] 50 | 51 | @classmethod 52 | def get_config(cls) -> Dict[str, Any]: 53 | """Get configs as a dict.""" 54 | return {'name': cls.name, 'label': cls.label, 'css_classes': cls.css_classes} 55 | 56 | 57 | @dataclass(frozen=True) 58 | class ScenarioTabFrameButtonConfig: 59 | """Config for scenario tab's frame control buttons.""" 60 | 61 | label: str 62 | margin: Tuple[int, int, int, int] = field(default_factory=lambda: (5, 19, 5, 35)) # Top, right, bottom, left 63 | css_classes: List[str] = field(default_factory=lambda: ["frame-control-button"]) 64 | width: int = field(default_factory=lambda: 56) 65 | 66 | 67 | # Global config instances 68 | first_button_config = ScenarioTabFrameButtonConfig(label="first") 69 | prev_button_config = ScenarioTabFrameButtonConfig(label="prev") 70 | play_button_config = ScenarioTabFrameButtonConfig(label="play") 71 | next_button_config = ScenarioTabFrameButtonConfig(label="next") 72 | last_button_config = ScenarioTabFrameButtonConfig(label="last") 73 | -------------------------------------------------------------------------------- /sledge/sledgeboard/tabs/js_code/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/sledgeboard/tabs/js_code/__init__.py -------------------------------------------------------------------------------- /sledge/sledgeboard/tabs/js_code/histogram_tab_js_code.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from bokeh.models.callbacks import CustomJS 4 | 5 | 6 | @dataclass(frozen=True) 7 | class HistogramTabLoadingJSCode: 8 | """JS when loading in the histogram tab.""" 9 | 10 | @classmethod 11 | def get_js_code(cls) -> CustomJS: 12 | """Get js code.""" 13 | return CustomJS( 14 | args={}, 15 | code=""" 16 | cb_obj.tags = [window.outerWidth, window.outerHeight]; 17 | document.getElementById('histogram-loading').style.visibility = 'visible'; 18 | document.getElementById('histogram-plot-section').style.visibility = 'hidden'; 19 | document.getElementById('histogram-setting-form').style.display = 'none'; 20 | """, 21 | ) 22 | 23 | 24 | @dataclass(frozen=True) 25 | class HistogramTabUpdateWindowsSizeJSCode: 26 | """JS when updating window size in the histogram tab.""" 27 | 28 | @classmethod 29 | def get_js_code(cls) -> CustomJS: 30 | """Get js code.""" 31 | return CustomJS( 32 | args={}, 33 | code=""" 34 | console.log(cb_obj.tags); 35 | cb_obj.tags = [window.outerWidth, window.outerHeight]; 36 | """, 37 | ) 38 | 39 | 40 | @dataclass(frozen=True) 41 | class HistogramTabLoadingEndJSCode: 42 | """JS when loading simulation is done in the histogram tab.""" 43 | 44 | @classmethod 45 | def get_js_code(cls) -> CustomJS: 46 | """Get js code.""" 47 | return CustomJS( 48 | args={}, 49 | code=""" 50 | document.getElementById('histogram-loading').style.visibility = 'hidden'; 51 | document.getElementById('histogram-plot-section').style.visibility = 'visible'; 52 | document.getElementById('overlay').style.display = 'none'; 53 | """, 54 | ) 55 | -------------------------------------------------------------------------------- /sledge/sledgeboard/tabs/js_code/scenario_tab_js_code.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from bokeh.models.callbacks import CustomJS 4 | 5 | 6 | @dataclass(frozen=True) 7 | class ScenarioTabLoadingJSCode: 8 | """JS when loading simulation in the scenario tab.""" 9 | 10 | @classmethod 11 | def get_js_code(cls) -> CustomJS: 12 | """Get js code.""" 13 | return CustomJS( 14 | args={}, 15 | code=""" 16 | cb_obj.tags = [window.outerWidth, window.outerHeight]; 17 | document.getElementById('scenario-loading').style.visibility = 'visible'; 18 | document.getElementById('scenario-plot-section').style.visibility = 'hidden'; 19 | document.getElementById('scenario-setting-form').style.display = 'none'; 20 | """, 21 | ) 22 | 23 | 24 | @dataclass(frozen=True) 25 | class ScenarioTabUpdateWindowsSizeJSCode: 26 | """JS when updating window size in the scenario tab.""" 27 | 28 | @classmethod 29 | def get_js_code(cls) -> CustomJS: 30 | """Get js code.""" 31 | return CustomJS( 32 | args={}, 33 | code=""" 34 | cb_obj.tags = [window.outerWidth, window.outerHeight]; 35 | """, 36 | ) 37 | 38 | 39 | @dataclass(frozen=True) 40 | class ScenarioTabLoadingEndJSCode: 41 | """JS when loading simulation is done in the scenario tab.""" 42 | 43 | @classmethod 44 | def get_js_code(cls) -> CustomJS: 45 | """Get js code.""" 46 | return CustomJS( 47 | args={}, 48 | code=""" 49 | document.getElementById('scenario-loading').style.visibility = 'hidden'; 50 | document.getElementById('scenario-plot-section').style.visibility = 'visible'; 51 | document.getElementById('overlay').style.display = 'none'; 52 | """, 53 | ) 54 | -------------------------------------------------------------------------------- /sledge/sledgeboard/templates/tabs/overview.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | Overview   11 |
12 |
13 |
14 |
15 | {{ embed(roots.overview_planner_checkbox_group) }} 16 |
17 |
18 |
19 | {{ embed(roots.overview_table) }} 20 |
21 |
22 |
23 |
24 |
25 |
26 | 27 | -------------------------------------------------------------------------------- /sledge/sledgeboard/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sledge/3d72828a1895d1a467f11a8343988c6a1eb4b684/sledge/sledgeboard/utils/__init__.py -------------------------------------------------------------------------------- /sledge/sledgeboard/utils/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from typing import List 4 | 5 | import pandas as pd 6 | 7 | from nuplan.planning.metrics.metric_dataframe import MetricStatisticsDataFrame 8 | from sledge.sledgeboard.base.data_class import SledgeBoardFile 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def metric_statistics_reader(parquet_file: Path) -> MetricStatisticsDataFrame: 14 | """ 15 | Reader for a metric statistic parquet file. 16 | :param parquet_file: Parquet file path to read. 17 | :return MetricStatisticsDataFrame. 18 | """ 19 | data_frame = MetricStatisticsDataFrame.load_parquet(parquet_file) 20 | return data_frame 21 | 22 | 23 | def metric_aggregator_reader(parquet_file: Path) -> pd.DataFrame: 24 | """ 25 | Reader for a metric aggregator parquet file. 26 | :param parquet_file: Parquet file path to read. 27 | :return Pandas data frame. 28 | """ 29 | data_frame = pd.read_parquet(parquet_file) 30 | return data_frame 31 | 32 | 33 | def check_sledgeboard_file_paths(main_paths: List[str]) -> List[Path]: 34 | """ 35 | Check if given file paths are valid SledgeBoard files. 36 | :param main_paths: A list of file paths. 37 | :return A list of available SledgeBoard files. 38 | """ 39 | available_paths = [] 40 | for main_path in main_paths: 41 | main_folder_path: Path = Path(main_path) 42 | if main_folder_path.is_dir(): 43 | # Search for sledgeboard event files. 44 | files = list(main_folder_path.iterdir()) 45 | event_files = [file for file in files if file.name.endswith(SledgeBoardFile.extension())] 46 | 47 | if len(event_files) > 0: 48 | # Descending order. 49 | event_files = sorted(event_files, reverse=True) 50 | # Load the first file only. 51 | available_paths.append(event_files[0]) 52 | elif main_folder_path.is_file() and main_folder_path.name.endswith(SledgeBoardFile.extension()): 53 | available_paths.append(main_folder_path) 54 | else: 55 | raise RuntimeError(f"{str(main_folder_path)} is not a valid SledgeBoard file") 56 | 57 | if len(available_paths) == 0: 58 | logger.info("No available SledgeBoard files are found.") 59 | 60 | return available_paths 61 | 62 | 63 | def read_sledgeboard_file_paths(file_paths: List[Path]) -> List[SledgeBoardFile]: 64 | """ 65 | Read a list of file paths to SledgeBoardFile data class. 66 | :param file_paths: A list of file paths. 67 | :return A list of SledgeBoard files. 68 | """ 69 | sledgeboard_files = [] 70 | for file_path in file_paths: 71 | sledgeboard_file = SledgeBoardFile.load_sledgeboard_file(file_path) 72 | sledgeboard_file.current_path = file_path.parents[0] 73 | sledgeboard_files.append(sledgeboard_file) 74 | 75 | return sledgeboard_files 76 | --------------------------------------------------------------------------------