├── .github └── FUNDING.yml ├── .gitignore ├── LICENSE ├── README.md ├── agents ├── __init__.py ├── navigation │ ├── __init__.py │ ├── agent.py │ ├── basic_agent.py │ ├── behavior_agent.py │ ├── controller.py │ ├── global_route_planner.py │ ├── global_route_planner_dao.py │ ├── local_planner.py │ ├── local_planner_behavior.py │ ├── roaming_agent.py │ └── types_behavior.py └── tools │ ├── __init__.py │ └── misc.py ├── camera_calibration ├── README.md ├── calculate_distortion_parameters.py ├── collect_data_for_calibration.py ├── test.png ├── undistort.png └── undistort.py ├── dataset └── tools │ ├── generate_metadata.py │ └── generate_random_routes.py ├── docs ├── DATA_PREP.md ├── EVAL.md ├── INSTALL.md └── TRAIN.md ├── leaderboard ├── LICENSE ├── README.md ├── data │ ├── routes_for_evaluation │ │ ├── routes_longest6.xml │ │ └── routes_town05_long.xml │ ├── routes_for_open_loop_training │ │ ├── routes_town01_00.xml │ │ ├── routes_town01_01.xml │ │ ├── routes_town01_02.xml │ │ ├── routes_town01_03.xml │ │ ├── routes_town01_04.xml │ │ ├── routes_town01_05.xml │ │ ├── routes_town01_06.xml │ │ ├── routes_town01_07.xml │ │ ├── routes_town01_08.xml │ │ ├── routes_town01_09.xml │ │ ├── routes_town01_10.xml │ │ ├── routes_town01_11.xml │ │ ├── routes_town01_val.xml │ │ ├── routes_town02_00.xml │ │ ├── routes_town02_01.xml │ │ ├── routes_town02_02.xml │ │ ├── routes_town02_03.xml │ │ ├── routes_town02_04.xml │ │ ├── routes_town02_05.xml │ │ ├── routes_town02_06.xml │ │ ├── routes_town02_07.xml │ │ ├── routes_town02_08.xml │ │ ├── routes_town02_09.xml │ │ ├── routes_town02_10.xml │ │ ├── routes_town02_11.xml │ │ ├── routes_town02_val.xml │ │ ├── routes_town03_00.xml │ │ ├── routes_town03_01.xml │ │ ├── routes_town03_02.xml │ │ ├── routes_town03_03.xml │ │ ├── routes_town03_04.xml │ │ ├── routes_town03_05.xml │ │ ├── routes_town03_06.xml │ │ ├── routes_town03_07.xml │ │ ├── routes_town03_08.xml │ │ ├── routes_town03_09.xml │ │ ├── routes_town03_10.xml │ │ ├── routes_town03_11.xml │ │ ├── routes_town03_val.xml │ │ ├── routes_town04_00.xml │ │ ├── routes_town04_01.xml │ │ ├── routes_town04_02.xml │ │ ├── routes_town04_03.xml │ │ ├── routes_town04_04.xml │ │ ├── routes_town04_05.xml │ │ ├── routes_town04_06.xml │ │ ├── routes_town04_07.xml │ │ ├── routes_town04_08.xml │ │ ├── routes_town04_09.xml │ │ ├── routes_town04_10.xml │ │ ├── routes_town04_11.xml │ │ ├── routes_town04_val.xml │ │ ├── routes_town05_00.xml │ │ ├── routes_town05_01.xml │ │ ├── routes_town05_02.xml │ │ ├── routes_town05_03.xml │ │ ├── routes_town05_04.xml │ │ ├── routes_town05_05.xml │ │ ├── routes_town05_06.xml │ │ ├── routes_town05_07.xml │ │ ├── routes_town05_08.xml │ │ ├── routes_town05_09.xml │ │ ├── routes_town05_10.xml │ │ ├── routes_town05_11.xml │ │ ├── routes_town05_val.xml │ │ ├── routes_town06_00.xml │ │ ├── routes_town06_01.xml │ │ ├── routes_town06_02.xml │ │ ├── routes_town06_03.xml │ │ ├── routes_town06_04.xml │ │ ├── routes_town06_05.xml │ │ ├── routes_town06_06.xml │ │ ├── routes_town06_07.xml │ │ ├── routes_town06_08.xml │ │ ├── routes_town06_09.xml │ │ ├── routes_town06_10.xml │ │ ├── routes_town06_11.xml │ │ ├── routes_town06_val.xml │ │ ├── routes_town07_00.xml │ │ ├── routes_town07_01.xml │ │ ├── routes_town07_02.xml │ │ ├── routes_town07_03.xml │ │ ├── routes_town07_04.xml │ │ ├── routes_town07_05.xml │ │ ├── routes_town07_06.xml │ │ ├── routes_town07_07.xml │ │ ├── routes_town07_08.xml │ │ ├── routes_town07_09.xml │ │ ├── routes_town07_10.xml │ │ ├── routes_town07_11.xml │ │ ├── routes_town07_val.xml │ │ ├── routes_town10_00.xml │ │ ├── routes_town10_01.xml │ │ ├── routes_town10_02.xml │ │ ├── routes_town10_03.xml │ │ ├── routes_town10_04.xml │ │ ├── routes_town10_05.xml │ │ ├── routes_town10_06.xml │ │ ├── routes_town10_07.xml │ │ ├── routes_town10_08.xml │ │ ├── routes_town10_09.xml │ │ ├── routes_town10_10.xml │ │ ├── routes_town10_11.xml │ │ └── routes_town10_val.xml │ └── scenarios │ │ ├── all_towns_traffic_scenarios_no256.json │ │ └── longest6_eval_scenarios.json ├── leaderboard │ ├── __init__.py │ ├── autoagents │ │ ├── __init__.py │ │ ├── agent_wrapper.py │ │ ├── autonomous_agent.py │ │ ├── dummy_agent.py │ │ ├── human_agent.py │ │ ├── human_agent_config.txt │ │ ├── npc_agent.py │ │ └── ros_agent.py │ ├── envs │ │ ├── __init__.py │ │ └── sensor_interface.py │ ├── leaderboard_evaluator.py │ ├── scenarios │ │ ├── __init__.py │ │ ├── background_activity.py │ │ ├── master_scenario.py │ │ ├── route_scenario.py │ │ ├── scenario_manager.py │ │ └── scenarioatomics │ │ │ ├── __init__.py │ │ │ └── atomic_criteria.py │ └── utils │ │ ├── __init__.py │ │ ├── checkpoint_tools.py │ │ ├── result_writer.py │ │ ├── route_indexer.py │ │ ├── route_manipulation.py │ │ ├── route_parser.py │ │ └── statistics_manager.py ├── scripts │ ├── collect_data.sh │ ├── evaluation_longest6.sh │ └── evaluation_town05long.sh └── team_code │ ├── auto_pilot.py │ ├── base_agent.py │ ├── driveadapter_agent.py │ ├── map_agent.py │ ├── pid_controller.py │ ├── planner.py │ └── roach_ap_agent_data_collection.py ├── open_loop_training ├── __init__.py ├── code │ ├── __init__.py │ ├── apis │ │ ├── __init__.py │ │ ├── mmdet_train.py │ │ └── train.py │ ├── core │ │ └── evaluation │ │ │ ├── __init__.py │ │ │ ├── epoch_hook.py │ │ │ ├── eval_hooks.py │ │ │ └── eval_tool.py │ ├── datasets │ │ ├── __init__.py │ │ ├── base_dataset.py │ │ ├── builder.py │ │ ├── carla_dataset.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── formating.py │ │ │ ├── loading.py │ │ │ └── transform.py │ │ └── samplers │ │ │ ├── __init__.py │ │ │ ├── distributed_sampler.py │ │ │ ├── group_sampler.py │ │ │ └── sampler.py │ ├── encoder_decoder_framework.py │ ├── model_code │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── lidarnet.py │ │ │ └── lss.py │ │ └── dense_heads │ │ │ ├── __init__.py │ │ │ ├── driver_adapter_head.py │ │ │ └── mask2former4seg.py │ └── utils.py ├── configs │ ├── _base_ │ │ ├── default_runtime.py │ │ └── schedules │ │ │ ├── cosine.py │ │ │ ├── cyclic_20e.py │ │ │ ├── cyclic_40e.py │ │ │ ├── mmdet_schedule_1x.py │ │ │ ├── schedule_2x.py │ │ │ ├── schedule_3x.py │ │ │ ├── seg_cosine_150e.py │ │ │ ├── seg_cosine_200e.py │ │ │ └── seg_cosine_50e.py │ └── driveadapter.py ├── ev_mask.npy ├── ops │ └── voxel_pooling │ │ ├── __init__.py │ │ ├── src │ │ ├── voxel_pooling_forward.cpp │ │ └── voxel_pooling_forward_cuda.cu │ │ └── voxel_pooling.py ├── setup.py └── train.py ├── roach ├── config │ └── config_agent.yaml ├── criteria │ ├── blocked.py │ ├── collision.py │ ├── encounter_light.py │ ├── outside_route_lane.py │ ├── route_deviation.py │ ├── run_red_light.py │ └── run_stop_sign.py ├── log │ └── ckpt_11833344.pth ├── models │ ├── distributions.py │ ├── ppo.py │ ├── ppo_buffer.py │ ├── ppo_policy.py │ ├── torch_layers.py │ └── torch_util.py ├── obs_manager │ ├── actor_state │ │ ├── control.py │ │ ├── route.py │ │ ├── speed.py │ │ └── velocity.py │ └── birdview │ │ ├── chauffeurnet.py │ │ ├── hdmap_generate.py │ │ └── maps │ │ ├── Town01.h5 │ │ ├── Town02.h5 │ │ ├── Town03.h5 │ │ ├── Town04.h5 │ │ ├── Town05.h5 │ │ ├── Town06.h5 │ │ ├── Town07.h5 │ │ └── Town10HD.h5 ├── rl_birdview_agent.py └── utils │ ├── config_utils.py │ ├── expert_noiser.py │ ├── rl_birdview_wrapper.py │ ├── traffic_light.py │ ├── transforms.py │ └── wandb_callback.py ├── scenario_runner ├── CARLA_VER ├── Dockerfile ├── Docs │ ├── CHANGELOG.md │ ├── CODE_OF_CONDUCT.md │ ├── CONTRIBUTING.md │ ├── FAQ.md │ ├── agent_evaluation.md │ ├── coding_standard.md │ ├── creating_new_scenario.md │ ├── extra.css │ ├── getting_scenariorunner.md │ ├── getting_started.md │ ├── img │ │ ├── OSC_catalogs.png │ │ ├── OSC_entities_1.png │ │ ├── OSC_entities_2.png │ │ ├── OSC_main.png │ │ ├── OSC_params.png │ │ ├── OSC_roadnetwork.png │ │ ├── OSC_storyboard.png │ │ ├── OSC_storyboard_endconditions.png │ │ ├── OSC_storyboard_event.png │ │ ├── OSC_storyboard_init_1.png │ │ ├── OSC_storyboard_init_2.png │ │ ├── OSC_storyboard_story.png │ │ ├── metrics_example.jpg │ │ ├── metrics_example.png │ │ └── scenario_runner_video.png │ ├── index.md │ ├── list_of_scenarios.md │ ├── metrics_module.md │ ├── openscenario_support.md │ ├── requirements.txt │ └── ros_agent.md ├── Jenkinsfile ├── LICENSE ├── README.md ├── manual_control.py ├── metrics_manager.py ├── mkdocs.yml ├── no_rendering_mode.py ├── requirements.txt ├── scenario_runner.py └── srunner │ ├── __init__.py │ ├── autoagents │ ├── __init__.py │ ├── agent_wrapper.py │ ├── autonomous_agent.py │ ├── dummy_agent.py │ ├── human_agent.py │ ├── npc_agent.py │ ├── ros_agent.py │ └── sensor_interface.py │ ├── data │ ├── all_towns_traffic_scenarios1_3_4.json │ ├── all_towns_traffic_scenarios1_3_4_8.json │ ├── no_scenarios.json │ ├── routes_debug.xml │ ├── routes_devtest.xml │ └── routes_training.xml │ ├── examples │ ├── CatalogExample.xosc │ ├── ChangeLane.xml │ ├── ChangingWeather.xosc │ ├── ControlLoss.xml │ ├── CutIn.xml │ ├── CyclistCrossing.xosc │ ├── FollowLeadingVehicle.xml │ ├── FollowLeadingVehicle.xosc │ ├── FreeRide.xml │ ├── LaneChangeSimple.xosc │ ├── LeadingVehicle.xml │ ├── NoSignalJunction.xml │ ├── ObjectCrossing.xml │ ├── OppositeDirection.xml │ ├── OscControllerExample.xosc │ ├── PedestrianCrossingFront.xosc │ ├── RunningRedLight.xml │ ├── SignalizedJunctionLeftTurn.xml │ ├── SignalizedJunctionRightTurn.xml │ ├── VehicleTurning.xml │ └── catalogs │ │ ├── ControllerCatalog.xosc │ │ ├── EnvironmentCatalog.xosc │ │ ├── ManeuverCatalog.xosc │ │ ├── MiscObjectCatalog.xosc │ │ ├── PedestrianCatalog.xosc │ │ └── VehicleCatalog.xosc │ ├── metrics │ ├── data │ │ ├── CriteriaFilter_criteria.json │ │ ├── DistanceBetweenVehicles_criteria.json │ │ └── DistanceToLaneCenter_criteria.json │ ├── examples │ │ ├── basic_metric.py │ │ ├── criteria_filter.py │ │ ├── distance_between_vehicles.py │ │ └── distance_to_lane_center.py │ └── tools │ │ ├── metrics_log.py │ │ └── metrics_parser.py │ ├── openscenario │ ├── 0.9.x │ │ ├── OpenSCENARIO_Catalog.xsd │ │ ├── OpenSCENARIO_TypeDefs.xsd │ │ ├── OpenSCENARIO_v0.9.1.xsd │ │ └── migration0_9_1to1_0.xslt │ └── OpenSCENARIO.xsd │ ├── scenarioconfigs │ ├── __init__.py │ ├── openscenario_configuration.py │ ├── route_scenario_configuration.py │ └── scenario_configuration.py │ ├── scenariomanager │ ├── __init__.py │ ├── actorcontrols │ │ ├── __init__.py │ │ ├── actor_control.py │ │ ├── basic_control.py │ │ ├── external_control.py │ │ ├── npc_vehicle_control.py │ │ ├── pedestrian_control.py │ │ ├── simple_vehicle_control.py │ │ └── vehicle_longitudinal_control.py │ ├── carla_data_provider.py │ ├── result_writer.py │ ├── scenario_manager.py │ ├── scenarioatomics │ │ ├── __init__.py │ │ ├── atomic_behaviors.py │ │ ├── atomic_criteria.py │ │ └── atomic_trigger_conditions.py │ ├── timer.py │ ├── traffic_events.py │ ├── watchdog.py │ └── weather_sim.py │ ├── scenarios │ ├── __init__.py │ ├── background_activity.py │ ├── basic_scenario.py │ ├── change_lane.py │ ├── control_loss.py │ ├── cut_in.py │ ├── follow_leading_vehicle.py │ ├── freeride.py │ ├── junction_crossing_route.py │ ├── maneuver_opposite_direction.py │ ├── master_scenario.py │ ├── no_signal_junction_crossing.py │ ├── object_crash_intersection.py │ ├── object_crash_vehicle.py │ ├── open_scenario.py │ ├── opposite_vehicle_taking_priority.py │ ├── other_leading_vehicle.py │ ├── route_scenario.py │ ├── signalized_junction_left_turn.py │ └── signalized_junction_right_turn.py │ ├── tools │ ├── __init__.py │ ├── openscenario_parser.py │ ├── py_trees_port.py │ ├── route_manipulation.py │ ├── route_parser.py │ ├── scenario_helper.py │ └── scenario_parser.py │ └── utilities │ └── code_check_and_formatting.sh └── src ├── opendrivelab_e2e_update.png └── pipeline.PNG /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [OpenDriveLab] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | *results.json 3 | *results*.json 4 | *.pyc 5 | *.out 6 | nohup.out 7 | *.out 8 | events* 9 | *hparams.yaml 10 | ckpt/ 11 | *checkpoints* 12 | __pycache__ 13 | *work_dirs* 14 | *batchscript* 15 | build 16 | *egg-info* 17 | *cpython* 18 | collect_data_json/*.json 19 | dataset/town??_?? 20 | *dataset_metadata.pkl* 21 | closed_loop_eval_log/eval_log/* 22 | *old_version* -------------------------------------------------------------------------------- /agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/agents/__init__.py -------------------------------------------------------------------------------- /agents/navigation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/agents/navigation/__init__.py -------------------------------------------------------------------------------- /agents/navigation/global_route_planner_dao.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) # Copyright (c) 2018-2020 CVC. 2 | # 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ 7 | This module provides implementation for GlobalRoutePlannerDAO 8 | """ 9 | 10 | import numpy as np 11 | 12 | 13 | class GlobalRoutePlannerDAO(object): 14 | """ 15 | This class is the data access layer for fetching data 16 | from the carla server instance for GlobalRoutePlanner 17 | """ 18 | 19 | def __init__(self, wmap, sampling_resolution): 20 | """ 21 | Constructor method. 22 | 23 | :param wmap: carla.world object 24 | :param sampling_resolution: sampling distance between waypoints 25 | """ 26 | self._sampling_resolution = sampling_resolution 27 | self._wmap = wmap 28 | 29 | def get_topology(self): 30 | """ 31 | Accessor for topology. 32 | This function retrieves topology from the server as a list of 33 | road segments as pairs of waypoint objects, and processes the 34 | topology into a list of dictionary objects. 35 | 36 | :return topology: list of dictionary objects with the following attributes 37 | entry - waypoint of entry point of road segment 38 | entryxyz- (x,y,z) of entry point of road segment 39 | exit - waypoint of exit point of road segment 40 | exitxyz - (x,y,z) of exit point of road segment 41 | path - list of waypoints separated by 1m from entry 42 | to exit 43 | """ 44 | topology = [] 45 | # Retrieving waypoints to construct a detailed topology 46 | for segment in self._wmap.get_topology(): 47 | wp1, wp2 = segment[0], segment[1] 48 | l1, l2 = wp1.transform.location, wp2.transform.location 49 | # Rounding off to avoid floating point imprecision 50 | x1, y1, z1, x2, y2, z2 = np.round([l1.x, l1.y, l1.z, l2.x, l2.y, l2.z], 0) 51 | wp1.transform.location, wp2.transform.location = l1, l2 52 | seg_dict = dict() 53 | seg_dict['entry'], seg_dict['exit'] = wp1, wp2 54 | seg_dict['entryxyz'], seg_dict['exitxyz'] = (x1, y1, z1), (x2, y2, z2) 55 | seg_dict['path'] = [] 56 | endloc = wp2.transform.location 57 | if wp1.transform.location.distance(endloc) > self._sampling_resolution: 58 | w = wp1.next(self._sampling_resolution)[0] 59 | while w.transform.location.distance(endloc) > self._sampling_resolution: 60 | seg_dict['path'].append(w) 61 | w = w.next(self._sampling_resolution)[0] 62 | else: 63 | seg_dict['path'].append(wp1.next(self._sampling_resolution)[0]) 64 | topology.append(seg_dict) 65 | return topology 66 | 67 | def get_waypoint(self, location): 68 | """ 69 | The method returns waypoint at given location 70 | 71 | :param location: vehicle location 72 | :return waypoint: generated waypoint close to location 73 | """ 74 | waypoint = self._wmap.get_waypoint(location) 75 | return waypoint 76 | 77 | def get_resolution(self): 78 | """ Accessor for self._sampling_resolution """ 79 | return self._sampling_resolution 80 | -------------------------------------------------------------------------------- /agents/navigation/roaming_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2018 Intel Labs. 4 | # authors: German Ros (german.ros@intel.com) 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ This module implements an agent that roams around a track following random waypoints and avoiding other vehicles. 10 | The agent also responds to traffic lights. """ 11 | 12 | from agents.navigation.agent import Agent, AgentState 13 | from agents.navigation.local_planner import LocalPlanner 14 | 15 | 16 | class RoamingAgent(Agent): 17 | """ 18 | RoamingAgent implements a basic agent that navigates scenes making random 19 | choices when facing an intersection. 20 | 21 | This agent respects traffic lights and other vehicles. 22 | """ 23 | 24 | def __init__(self, vehicle): 25 | """ 26 | 27 | :param vehicle: actor to apply to local planner logic onto 28 | """ 29 | super(RoamingAgent, self).__init__(vehicle) 30 | self._proximity_threshold = 10.0 # meters 31 | self._state = AgentState.NAVIGATING 32 | self._local_planner = LocalPlanner(self._vehicle) 33 | 34 | def run_step(self, debug=False): 35 | """ 36 | Execute one step of navigation. 37 | :return: carla.VehicleControl 38 | """ 39 | 40 | # is there an obstacle in front of us? 41 | hazard_detected = False 42 | 43 | # retrieve relevant elements for safe navigation, i.e.: traffic lights 44 | # and other vehicles 45 | actor_list = self._world.get_actors() 46 | vehicle_list = actor_list.filter("*vehicle*") 47 | lights_list = actor_list.filter("*traffic_light*") 48 | 49 | # check possible obstacles 50 | vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list) 51 | if vehicle_state: 52 | if debug: 53 | print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id)) 54 | 55 | self._state = AgentState.BLOCKED_BY_VEHICLE 56 | hazard_detected = True 57 | 58 | # check for the state of the traffic lights 59 | light_state, traffic_light = self._is_light_red(lights_list) 60 | if light_state: 61 | if debug: 62 | print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id)) 63 | 64 | self._state = AgentState.BLOCKED_RED_LIGHT 65 | hazard_detected = True 66 | 67 | if hazard_detected: 68 | control = self.emergency_stop() 69 | else: 70 | self._state = AgentState.NAVIGATING 71 | # standard local planner behavior 72 | control = self._local_planner.run_step() 73 | 74 | return control 75 | -------------------------------------------------------------------------------- /agents/navigation/types_behavior.py: -------------------------------------------------------------------------------- 1 | # This work is licensed under the terms of the MIT license. 2 | # For a copy, see . 3 | 4 | """ This module contains the different parameters sets for each behavior. """ 5 | 6 | 7 | class Cautious(object): 8 | """Class for Cautious agent.""" 9 | max_speed = 40 10 | speed_lim_dist = 6 11 | speed_decrease = 12 12 | safety_time = 3 13 | min_proximity_threshold = 12 14 | braking_distance = 6 15 | overtake_counter = -1 16 | tailgate_counter = 0 17 | 18 | 19 | class Normal(object): 20 | """Class for Normal agent.""" 21 | max_speed = 50 22 | speed_lim_dist = 3 23 | speed_decrease = 10 24 | safety_time = 3 25 | min_proximity_threshold = 10 26 | braking_distance = 5 27 | overtake_counter = 0 28 | tailgate_counter = 0 29 | 30 | 31 | class Aggressive(object): 32 | """Class for Aggressive agent.""" 33 | max_speed = 70 34 | speed_lim_dist = 1 35 | speed_decrease = 8 36 | safety_time = 3 37 | min_proximity_threshold = 8 38 | braking_distance = 4 39 | overtake_counter = 0 40 | tailgate_counter = -1 41 | -------------------------------------------------------------------------------- /agents/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/agents/tools/__init__.py -------------------------------------------------------------------------------- /camera_calibration/README.md: -------------------------------------------------------------------------------- 1 | # Camera Calibration 2 | 3 | Under the current setting, all four cameras have **fov 150**. If you want to change their fov, you could use the following code to estimate the corresponding distortion parameters. More discussions here: https://github.com/carla-simulator/carla/issues/3412. 4 | 5 | 1. Build Carla from source code with commit 0c97e9a5de5b35b759cc5b0955801244ed76791f: 6 | - git clone the latest commit 7 | - git checkout -b branch_id commit_id 8 | - following the guidance in https://carla.readthedocs.io/en/0.9.10/build_linux/ 9 | - make launch (so that the Carla is opened with an UE4 editor) 10 | 11 | Note that the Carla built from source and its cooresponding environment (Python, environment variables, etc) are only used for estimating the distortion parameters. In all other experiments, we use the official pre-build version 0.9.10.1 and the environment under our setting. If there is any problem during building the Carla, please refer to https://carla.readthedocs.io/en/0.9.10/build_linux/. 12 | 13 | 2. Conduct Checkboard Calibration 14 | - Download the checkboard asset for Carla Town3 15 | https://github.com/AbanobSoliman/IBISCape and put it in the suggested folder of Carla from source 16 | - In the Carla UE4 editor, click make launch -> Compile -> Build -> Play 17 | - **python collect_data_for_calibration.py** (sensor setting) to collect image for calibration (For more information about the data collecting process, please refer to tutorials about checkboard camera calibration: https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html and the instruction in the original code https://github.com/AbanobSoliman/IBISCape) 18 | - **python calculate_distortion_parameters.py** (line 26-35 to calculate intrinsic matrix, line 32 to select image index for calibration) to output distortion matrix 19 | - Put an image collected by your Carla into this folded and rename it as "test.png". 20 | - **python undistort.py** (line 4-10 to set intrinsics matrix, line 14 to input the distortion matrix from the last step) to verify the effectiveness of the undistortion. Note that the Tangential Distortion effects need extra attention when collect images. 21 | 22 | 23 | ## Images without Distortion 24 | If your application does not involve official benchmarks such town05Long, Longest6 or Leaderboard, you could eliminate all distortions. In [leaderboard/leaderboard/autoagents/agent_wrapper.py](../leaderboard/leaderboard/autoagents/agent_wrapper.py), set all *lens_circle_multiplier* to 0.0, all *lens_circle_falloff* to 5.0, and all *chromatic_aberration_intensity* to 0.0. 25 | 26 | Reference: https://carla.readthedocs.io/en/latest/ref_sensors/ 27 | 28 | 29 | ## Acknowledgements 30 | 31 | The calibration code is based on [IBISCape](https://github.com/AbanobSoliman/IBISCape) and please check out their awsome code. 32 | -------------------------------------------------------------------------------- /camera_calibration/calculate_distortion_parameters.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import cv2 4 | import numpy as np 5 | import os 6 | import glob 7 | 8 | # Defining the dimensions of checkerboard 9 | CHECKERBOARD = (7, 7) 10 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) 11 | 12 | # Creating vector to store vectors of 3D points for each checkerboard image 13 | objpoints = [] 14 | # Creating vector to store vectors of 2D points for each checkerboard image 15 | imgpoints = [] 16 | 17 | 18 | # Defining the world coordinates for 3D points 19 | objp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32) 20 | objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2) 21 | prev_img_shape = None 22 | 23 | # Extracting path of individual image stored in a given directory 24 | img_path = None 25 | print("Image Path:", img_path, "(Please Change it to your path of Carla **built from source**!!!)") 26 | 27 | images = glob.glob(img_path+'/*.png') 28 | 29 | ImageSizeX = 1600 30 | ImageSizeY = 900 31 | CameraFOV = 150 32 | f = ImageSizeX /(2 * np.tan(CameraFOV * np.pi / 360)) 33 | Cx = ImageSizeX / 2 34 | Cy = ImageSizeY / 2 35 | # intrinsics = np.array([[214.35935394, 0, 800, ], 36 | # [ 0, 214.35935394, 450, ], 37 | # [ 0, 0, 1, ]]) 38 | intrinsics = np.array([[f, 0, Cx], 39 | [0, f, Cy], 40 | [0, 0, 1 ]]) 41 | print(intrinsics) 42 | 43 | for fname in images: 44 | index = int() 45 | if fname[:-4].split("/")[-1][-4:] < "8028" or fname[:-4].split("/")[-1][-4:] > "8068": 46 | continue 47 | img = cv2.imread(fname) 48 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 49 | # Find the chess board corners 50 | # If desired number of corners are found in the image then ret = true 51 | ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE + cv2.CALIB_CB_FAST_CHECK) 52 | 53 | """ 54 | If desired number of corner are detected, 55 | we refine the pixel coordinates and display 56 | them on the images of checker board 57 | """ 58 | print(fname, ret) 59 | if ret == True: 60 | objpoints.append(objp) 61 | # refining pixel coordinates for given 2d points. 62 | corners2 = cv2.cornerSubPix(gray, corners, (11,11),(-1,-1), criteria) 63 | 64 | imgpoints.append(corners2) 65 | 66 | # Draw and display the corners 67 | img = cv2.drawChessboardCorners(img, CHECKERBOARD, corners2, ret) 68 | # cv2.imshow('img',img) 69 | # cv2.waitKey(0) 70 | 71 | cv2.destroyAllWindows() 72 | 73 | h,w = img.shape[:2] 74 | 75 | """ 76 | Performing camera calibration by 77 | passing the value of known 3D points (objpoints) 78 | and corresponding pixel coordinates of the 79 | detected corners (imgpoints) 80 | """ 81 | ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], intrinsics, None, flags=cv2.CALIB_FIX_INTRINSIC+cv2.CALIB_FIX_PRINCIPAL_POINT) 82 | 83 | print("Camera matrix : \n") 84 | print(mtx) 85 | print("Distortion Matrix : \n") 86 | print(dist) 87 | print("rvecs : \n") 88 | print(rvecs) 89 | print("tvecs : \n") 90 | print(tvecs) -------------------------------------------------------------------------------- /camera_calibration/test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/camera_calibration/test.png -------------------------------------------------------------------------------- /camera_calibration/undistort.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/camera_calibration/undistort.png -------------------------------------------------------------------------------- /camera_calibration/undistort.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import numpy as np 3 | import torch 4 | ImageSizeX = 1600 5 | ImageSizeY = 900 6 | CameraFOV = 150 7 | f = ImageSizeX /(2 * np.tan(CameraFOV * np.pi / 360)) 8 | Cx = ImageSizeX / 2 9 | Cy = ImageSizeY / 2 10 | intrinsics = np.array([[f, 0, Cx], 11 | [0, f, Cy], 12 | [0, 0, 1 ]]) 13 | 14 | distortion_matrix = np.array([[ 0.00888296, -0.00130899, 0.00012061, -0.00338673, 0.00028834]]) 15 | newcameramtx, roi = cv.getOptimalNewCameraMatrix(intrinsics, distortion_matrix, (1600, 900), 1, (1600, 900)) 16 | # newcameramtx = np.array([[304.14395142, 0, 788.25758876,], 17 | # [ 0, 221.49429321, 449.78972161,], 18 | # [ 0, 0, 1, ],]) 19 | #print(roi, newcameramtx) 20 | 21 | 22 | original_img = cv.imread("test" + '.png') 23 | mapx, mapy = cv.initUndistortRectifyMap(intrinsics, distortion_matrix, None, newcameramtx, (ImageSizeX, ImageSizeY), 5) 24 | undistorted_img = cv.remap(original_img, mapx, mapy, cv.INTER_LINEAR) 25 | cv.imwrite("undistort.png", undistorted_img) -------------------------------------------------------------------------------- /docs/EVAL.md: -------------------------------------------------------------------------------- 1 | # Clsoed-Loop Evaluation 2 | 3 | To evaluate in the town05long with the 2M checkpoint: 4 | ```shell 5 | ## In the DriveAdapter/ directory 6 | port_for_carla=22023 ## Change the port for each running script to avoid cofliction 7 | port_for_traffic_manager=22033 ## Change the port for each running script to avoid cofliction 8 | team_agent=driveadapter_agent 9 | is_resume=False ## If there is the corresponding json file in the folder closed_loop_eval_log, you could set it as True to continue after the last finished route. 10 | is_local=True 11 | ckpt_and_config_path=open_loop_training/ckpt/driveadapter_2m.pth+open_loop_training/configs/driveadapter.py 12 | scenario_file=all_towns_traffic_scenarios_no256 13 | cuda_device=0 14 | setting_name=driveadapter_town05long 15 | CUDA_VISIBLE_DEVICES=$cuda_device nohup bash ./leaderboard/scripts/evaluation_town05long.sh $port_for_carla $port_for_traffic_manager $team_agent $is_resume $is_local $ckpt_and_config_path $scenario_file $setting_name 2>&1 > $setting_name.log & 16 | ``` 17 | 18 | or simply: 19 | ```shell 20 | ## In the DriveAdapter/ directory 21 | CUDA_VISIBLE_DEVICES=0 nohup bash ./leaderboard/scripts/evaluation_town05long.sh 22023 22033 driveadapter_agent False True open_loop_training/ckpt/driveadapter_2m.pth+open_loop_training/configs/driveadapter.py all_towns_traffic_scenarios_no256 driveadapter_town05long 2>&1 > driveadapter_town05long.log & 22 | ``` 23 | 24 | To evaluate in the longest6 with the 2M checkpoint, you can simply use: 25 | ```shell 26 | ## In the DriveAdapter/ directory 27 | CUDA_VISIBLE_DEVICES=0 nohup bash ./leaderboard/scripts/evaluation_longest6.sh 23023 23033 driveadapter_agent False True open_loop_training/ckpt/driveadapter_2m.pth+open_loop_training/configs/driveadapter.py longest6_eval_scenarios driveadapter_longest6 2>&1 > driveadapter_longest6.log & 28 | ``` 29 | 30 | Note that the evaluation result is in the directory **closed_loop_eval_log/results_$setting_name.json** and the visualizations and recordings for debug (top-down view, front view, and canbus) are in the directory **closed_loop_eval_log/eval_log/$setting_name**. 31 | 32 | Warning: The visualizations and recordings could take lots of disk space. Please monitor those folders in the [closed_loop_eval_log/eval_log/](../closed_loop_eval_log/eval_log/) and delete those useless ones in time. You could also modify the **save** function of [leaderboard/team_code/driveadapter_agent.py](../leaderboard/team_code/driveadapter_agent.py) to change the saved information during evaluation. 33 | 34 | Update: As kindly reminded by the authors of [carla_garge](https://github.com/autonomousvision/carla_garage) (One awsome repo for e2e ad! Check it out for more detials), we update the implementation of longest6 to align with the one used by [Transfuser](https://github.com/autonomousvision/transfuser/tree/2022/leaderboard). Specifically, they ignore the penalty score of running stop sign and increase the number of agents in the scene. DriveAdapter (2M frames) in the updated longest6 achieves DS 63.27, IS 0.87, RC 71.92. -------------------------------------------------------------------------------- /docs/TRAIN.md: -------------------------------------------------------------------------------- 1 | # Train Your Model 2 | 3 | ## Script For Training 4 | 5 | Our training pipeline is based on [mmcv](https://github.com/open-mmlab/mmcv) and [mmdet3d](https://github.com/open-mmlab/mmdetection3d). To train a DriveAdapter model, you could use: 6 | ```shell 7 | #In DriveAdapter/open_loop_training/ directory 8 | #We train on 16 A100 for 4 days 9 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7, python -m torch.distributed.launch --nproc_per_node=8 --master_port=22023 train.py configs/driveadapter.py --work-dir=work_dirs/driveadapter --launcher="pytorch" 10 | ``` 11 | 12 | For single GPU debug, you could simply use: 13 | ```shell 14 | #In driveadapter/open_loop_training/ directory 15 | CUDA_VISIBLE_DEVICES=0 python train.py configs/driveadapter.py --work-dir=work_dirs/debug 16 | ``` 17 | 18 | ## Code Structure 19 | We give the structure of the training code. Note that We only introduce those folders/files are commonly used and modified. 20 | 21 | DriveAdapter/open_loop_training 22 | ├── ckpt # Checkpoints 23 | ├── configs # Hyper-Parameter 24 | ├── work_dirs # Training Log 25 | ├── code # Preprocessing, DataLoader, Model 26 | │ ├── apis # Training pipeline for mmdet3D 27 | │ ├── core # The hooks for mmdet3D 28 | │ ├── datasets # Preprocessing and DataLoader 29 | | | ├── pipelines # Functions of Preprocessing and DataLoader 30 | │ | ├── samplers # For DDP 31 | │ | └── carla_dataset.py # Framework of Preprocessing and DataLoading 32 | │ ├── model_code # Neural Network 33 | | | ├── backbones # Module of Encoder 34 | | | └── dense_heads # Module of Decoder and Loss Functions 35 | │ └── encoder_decoder_framework.py # Entrance of Neural Network 36 | └── train.py # Entrance of Training 37 | 38 | 39 | 40 | ## Tips 41 | - Change **is_dev** in [open_loop_training/configs/driveadapter.py](../open_loop_training/configs/driveadapter.py) to True when you develop your model and to False during training 42 | - Set **is_full** in [open_loop_training/configs/driveadapter.py](../open_loop_training/configs/driveadapter.py) to False would only use the same number of data as TCP while to True would use all possible data recorded in **dataset/dataset_metadata.pkl**. 43 | - Your could start with [open_loop_training/code/encoder_decoder_framework.py](../open_loop_training/configs/driveadapter.py) when you want to learn about the neural network and [open_loop_training/code/datasets/carla_dataset.py](../open_loop_training/configs/driveadapter.py) when you want to learn about data. -------------------------------------------------------------------------------- /leaderboard/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 CARLA 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /leaderboard/README.md: -------------------------------------------------------------------------------- 1 | ## Note by DriveAdapter 2 | We have changed/added some log related functions of the leaderboard for easier debug. For example, each simulated route would have a index in its folder name. For all infractions, the location and timestep would be recorded in the json file. 3 | 4 | ## Original README.md 5 | 6 | The main goal of the CARLA Autonomous Driving Leaderboard is to evaluate the driving proficiency of autonomous agents in realistic traffic situations. The leaderboard serves as an open platform for the community to perform fair and reproducible evaluations, simplifying the comparison between different approaches. 7 | 8 | Autonomous agents have to drive through a set of predefined routes. For each route, agents are initialized at a starting point and have to drive to a destination point. The agents will be provided with a description of the route. Routes will happen in a variety of areas, including freeways, urban scenes, and residential districts. 9 | 10 | Agents will face multiple traffic situations based in the NHTSA typology, such as: 11 | 12 | * Lane merging 13 | * Lane changing 14 | * Negotiations at traffic intersections 15 | * Negotiations at roundabouts 16 | * Handling traffic lights and traffic signs 17 | * Coping with pedestrians, cyclists and other elements 18 | 19 | The user can change the weather of the simulation, allowing the evaluation of the agent in a variety of weather conditions, including daylight scenes, sunset, rain, fog, and night, among others. 20 | 21 | More information can be found [here](https://leaderboard.carla.org/) -------------------------------------------------------------------------------- /leaderboard/leaderboard/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/leaderboard/leaderboard/__init__.py -------------------------------------------------------------------------------- /leaderboard/leaderboard/autoagents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/leaderboard/leaderboard/autoagents/__init__.py -------------------------------------------------------------------------------- /leaderboard/leaderboard/autoagents/dummy_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ 7 | This module provides a dummy agent to control the ego vehicle 8 | """ 9 | 10 | from __future__ import print_function 11 | 12 | import carla 13 | 14 | from leaderboard.autoagents.autonomous_agent import AutonomousAgent, Track 15 | 16 | def get_entry_point(): 17 | return 'DummyAgent' 18 | 19 | class DummyAgent(AutonomousAgent): 20 | 21 | """ 22 | Dummy autonomous agent to control the ego vehicle 23 | """ 24 | 25 | def setup(self, path_to_conf_file): 26 | """ 27 | Setup the agent parameters 28 | """ 29 | self.track = Track.MAP 30 | 31 | def sensors(self): 32 | """ 33 | Define the sensor suite required by the agent 34 | 35 | :return: a list containing the required sensors in the following format: 36 | 37 | [ 38 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 39 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, 40 | 41 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 42 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, 43 | 44 | {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, 45 | 'id': 'LIDAR'} 46 | ] 47 | """ 48 | 49 | sensors = [ 50 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 51 | 'width': 800, 'height': 600, 'fov': 100, 'id': 'Center'}, 52 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': -45.0, 53 | 'width': 800, 'height': 600, 'fov': 100, 'id': 'Left'}, 54 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 45.0, 55 | 'width': 800, 'height': 600, 'fov': 100, 'id': 'Right'}, 56 | {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 57 | 'yaw': -45.0, 'id': 'LIDAR'}, 58 | {'type': 'sensor.other.radar', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 59 | 'yaw': -45.0, 'fov': 30, 'id': 'RADAR'}, 60 | {'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'}, 61 | {'type': 'sensor.other.imu', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 62 | 'yaw': -45.0, 'id': 'IMU'}, 63 | {'type': 'sensor.opendrive_map', 'reading_frequency': 1, 'id': 'OpenDRIVE'}, 64 | ] 65 | 66 | return sensors 67 | 68 | def run_step(self, input_data, timestamp): 69 | """ 70 | Execute one step of navigation. 71 | """ 72 | print("=====================>") 73 | for key, val in input_data.items(): 74 | if hasattr(val[1], 'shape'): 75 | shape = val[1].shape 76 | print("[{} -- {:06d}] with shape {}".format(key, val[0], shape)) 77 | else: 78 | print("[{} -- {:06d}] ".format(key, val[0])) 79 | print("<=====================") 80 | 81 | # DO SOMETHING SMART 82 | 83 | # RETURN CONTROL 84 | control = carla.VehicleControl() 85 | control.steer = 0.0 86 | control.throttle = 0.0 87 | control.brake = 0.0 88 | control.hand_brake = False 89 | 90 | return control 91 | -------------------------------------------------------------------------------- /leaderboard/leaderboard/autoagents/human_agent_config.txt: -------------------------------------------------------------------------------- 1 | mode: log # Either 'log' or 'playback' 2 | file: test.json # path to the file with the controls 3 | 4 | 5 | This is the configuration file of the human agent. This agent incorporates two modes. 6 | The log mode parses the controls given to the vehicle into a dictionary and records them into a .json file. 7 | This file can be read by the playback mode to control the vehicle in the same way, resulting in a deterministic agent. 8 | The file name can be chosen, and is the second argument of this config file. 9 | 10 | -------------------------------------------------------------------------------- /leaderboard/leaderboard/autoagents/npc_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ 7 | This module provides an NPC agent to control the ego vehicle 8 | """ 9 | 10 | from __future__ import print_function 11 | 12 | import carla 13 | from agents.navigation.basic_agent import BasicAgent 14 | from srunner.scenariomanager.carla_data_provider import CarlaDataProvider 15 | 16 | from leaderboard.autoagents.autonomous_agent import AutonomousAgent, Track 17 | 18 | def get_entry_point(): 19 | return 'NpcAgent' 20 | 21 | class NpcAgent(AutonomousAgent): 22 | 23 | """ 24 | NPC autonomous agent to control the ego vehicle 25 | """ 26 | 27 | _agent = None 28 | _route_assigned = False 29 | 30 | def setup(self, path_to_conf_file): 31 | """ 32 | Setup the agent parameters 33 | """ 34 | self.track = Track.SENSORS 35 | 36 | self._route_assigned = False 37 | self._agent = None 38 | 39 | def sensors(self): 40 | """ 41 | Define the sensor suite required by the agent 42 | 43 | :return: a list containing the required sensors in the following format: 44 | 45 | [ 46 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 47 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, 48 | 49 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 50 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, 51 | 52 | {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, 53 | 'id': 'LIDAR'} 54 | ] 55 | """ 56 | 57 | sensors = [ 58 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 59 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, 60 | ] 61 | 62 | return sensors 63 | 64 | def run_step(self, input_data, timestamp): 65 | """ 66 | Execute one step of navigation. 67 | """ 68 | control = carla.VehicleControl() 69 | control.steer = 0.0 70 | control.throttle = 0.0 71 | control.brake = 0.0 72 | control.hand_brake = False 73 | 74 | if not self._agent: 75 | hero_actor = None 76 | for actor in CarlaDataProvider.get_world().get_actors(): 77 | if 'role_name' in actor.attributes and actor.attributes['role_name'] == 'hero': 78 | hero_actor = actor 79 | break 80 | if hero_actor: 81 | self._agent = BasicAgent(hero_actor) 82 | 83 | return control 84 | 85 | if not self._route_assigned: 86 | if self._global_plan: 87 | plan = [] 88 | 89 | prev = None 90 | for transform, _ in self._global_plan_world_coord: 91 | wp = CarlaDataProvider.get_map().get_waypoint(transform.location) 92 | if prev: 93 | route_segment = self._agent._trace_route(prev, wp) 94 | plan.extend(route_segment) 95 | 96 | prev = wp 97 | 98 | #loc = plan[-1][0].transform.location 99 | #self._agent.set_destination([loc.x, loc.y, loc.z]) 100 | self._agent._local_planner.set_global_plan(plan) # pylint: disable=protected-access 101 | self._route_assigned = True 102 | 103 | else: 104 | control = self._agent.run_step() 105 | 106 | return control 107 | -------------------------------------------------------------------------------- /leaderboard/leaderboard/envs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/leaderboard/leaderboard/envs/__init__.py -------------------------------------------------------------------------------- /leaderboard/leaderboard/scenarios/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/leaderboard/leaderboard/scenarios/__init__.py -------------------------------------------------------------------------------- /leaderboard/leaderboard/scenarios/background_activity.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """ 8 | Scenario spawning elements to make the town dynamic and interesting 9 | """ 10 | 11 | import carla 12 | 13 | from srunner.scenariomanager.carla_data_provider import CarlaDataProvider 14 | from srunner.scenarios.basic_scenario import BasicScenario 15 | import os 16 | 17 | BACKGROUND_ACTIVITY_SCENARIOS = ["BackgroundActivity"] 18 | 19 | 20 | class BackgroundActivity(BasicScenario): 21 | 22 | """ 23 | Implementation of a scenario to spawn a set of background actors, 24 | and to remove traffic jams in background traffic 25 | 26 | This is a single ego vehicle scenario 27 | """ 28 | 29 | category = "BackgroundActivity" 30 | 31 | town_amount = { 32 | # 'Town01': 120, 33 | 'Town01': 120, 34 | 'Town02': 100, 35 | 'Town03': 120, 36 | 'Town04': 200, 37 | 'Town05': 120, 38 | 'Town06': 150, 39 | 'Town07': 110, 40 | 'Town08': 180, 41 | 'Town09': 300, 42 | 'Town10': 120, 43 | } 44 | 45 | def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, timeout=35 * 60): 46 | """ 47 | Setup all relevant parameters and create scenario 48 | """ 49 | self.config = config 50 | self.debug = debug_mode 51 | 52 | self.timeout = timeout # Timeout of scenario in seconds 53 | 54 | super(BackgroundActivity, self).__init__("BackgroundActivity", 55 | ego_vehicles, 56 | config, 57 | world, 58 | debug_mode, 59 | terminate_on_failure=True, 60 | criteria_enable=True) 61 | 62 | def _initialize_actors(self, config): 63 | 64 | town_name = config.town 65 | if town_name in self.town_amount: 66 | amount = self.town_amount[town_name] 67 | else: 68 | amount = 0 69 | if os.environ["BENCHMARK"] == "town05long": 70 | amount = 120 71 | print("----------------Eval with Town05 Long, amount=120", flush=True) 72 | if os.environ["BENCHMARK"] == "longest6": 73 | amount = 500 74 | print("----------------Eval with Longest6, amount=500", flush=True) 75 | new_actors = CarlaDataProvider.request_new_batch_actors('vehicle.*', 76 | amount, 77 | carla.Transform(), 78 | autopilot=True, 79 | random_location=True, 80 | rolename='background') 81 | 82 | if new_actors is None: 83 | raise Exception("Error: Unable to add the background activity, all spawn points were occupied") 84 | 85 | for _actor in new_actors: 86 | self.other_actors.append(_actor) 87 | 88 | def _create_behavior(self): 89 | """ 90 | Basic behavior do nothing, i.e. Idle 91 | """ 92 | pass 93 | 94 | def _create_test_criteria(self): 95 | """ 96 | A list of all test criteria will be created that is later used 97 | in parallel behavior tree. 98 | """ 99 | pass 100 | 101 | def __del__(self): 102 | """ 103 | Remove all actors upon deletion 104 | """ 105 | self.remove_all_actors() 106 | -------------------------------------------------------------------------------- /leaderboard/leaderboard/scenarios/scenarioatomics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/leaderboard/leaderboard/scenarios/scenarioatomics/__init__.py -------------------------------------------------------------------------------- /leaderboard/leaderboard/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/leaderboard/leaderboard/utils/__init__.py -------------------------------------------------------------------------------- /leaderboard/leaderboard/utils/checkpoint_tools.py: -------------------------------------------------------------------------------- 1 | import json 2 | try: 3 | import simplejson as json 4 | except ImportError: 5 | import json 6 | import requests 7 | import os.path 8 | 9 | 10 | def autodetect_proxy(): 11 | proxies = {} 12 | 13 | proxy_https = os.getenv('HTTPS_PROXY', os.getenv('https_proxy', None)) 14 | proxy_http = os.getenv('HTTP_PROXY', os.getenv('http_proxy', None)) 15 | 16 | if proxy_https: 17 | proxies['https'] = proxy_https 18 | if proxy_http: 19 | proxies['http'] = proxy_http 20 | 21 | return proxies 22 | 23 | 24 | def fetch_dict(endpoint): 25 | data = None 26 | if endpoint.startswith(('http:', 'https:', 'ftp:')): 27 | proxies = autodetect_proxy() 28 | 29 | if proxies: 30 | response = requests.get(url=endpoint, proxies=proxies) 31 | else: 32 | response = requests.get(url=endpoint) 33 | 34 | try: 35 | data = response.json() 36 | except json.decoder.JSONDecodeError: 37 | data = {} 38 | else: 39 | data = {} 40 | if os.path.exists(endpoint): 41 | with open(endpoint) as fd: 42 | try: 43 | data = json.load(fd) 44 | except json.JSONDecodeError: 45 | data = {} 46 | 47 | return data 48 | 49 | 50 | def create_default_json_msg(): 51 | msg = { 52 | "sensors": [], 53 | "values": [], 54 | "labels": [], 55 | "entry_status": "", 56 | "eligible": "", 57 | "_checkpoint": { 58 | "progress": [], 59 | "records": [], 60 | "global_record": {} 61 | }, 62 | } 63 | 64 | return msg 65 | 66 | 67 | def save_dict(endpoint, data): 68 | if endpoint.startswith(('http:', 'https:', 'ftp:')): 69 | proxies = autodetect_proxy() 70 | 71 | if proxies: 72 | _ = requests.patch(url=endpoint, headers={'content-type':'application/json'}, data=json.dumps(data, indent=4, sort_keys=True), proxies=proxies) 73 | else: 74 | _ = requests.patch(url=endpoint, headers={'content-type':'application/json'}, data=json.dumps(data, indent=4, sort_keys=True)) 75 | else: 76 | with open(endpoint, 'w') as fd: 77 | json.dump(data, fd, indent=4, sort_keys=True) 78 | -------------------------------------------------------------------------------- /leaderboard/leaderboard/utils/route_indexer.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from dictor import dictor 3 | 4 | import copy 5 | 6 | from srunner.scenarioconfigs.route_scenario_configuration import RouteScenarioConfiguration 7 | 8 | 9 | from leaderboard.utils.route_parser import RouteParser 10 | from leaderboard.utils.checkpoint_tools import fetch_dict, create_default_json_msg, save_dict 11 | 12 | 13 | class RouteIndexer(): 14 | def __init__(self, routes_file, scenarios_file, repetitions): 15 | self._routes_file = routes_file 16 | self._scenarios_file = scenarios_file 17 | self._repetitions = repetitions 18 | self._configs_dict = OrderedDict() 19 | self._configs_list = [] 20 | self.routes_length = [] 21 | self._index = 0 22 | 23 | # retrieve routes 24 | route_configurations = RouteParser.parse_routes_file(self._routes_file, self._scenarios_file, False) 25 | 26 | self.n_routes = len(route_configurations) 27 | self.total = self.n_routes*self._repetitions 28 | 29 | for i, config in enumerate(route_configurations): 30 | for repetition in range(repetitions): 31 | config.index = i * self._repetitions + repetition 32 | config.repetition_index = repetition 33 | self._configs_dict['{}.{}'.format(config.name, repetition)] = copy.copy(config) 34 | 35 | self._configs_list = list(self._configs_dict.items()) 36 | 37 | def peek(self): 38 | return not (self._index >= len(self._configs_list)) 39 | 40 | def next(self): 41 | if self._index >= len(self._configs_list): 42 | return None 43 | 44 | key, config = self._configs_list[self._index] 45 | self._index += 1 46 | 47 | return config 48 | 49 | def resume(self, endpoint): 50 | data = fetch_dict(endpoint) 51 | 52 | if data: 53 | checkpoint_dict = dictor(data, '_checkpoint') 54 | if checkpoint_dict and 'progress' in checkpoint_dict: 55 | progress = checkpoint_dict['progress'] 56 | if not progress: 57 | current_route = 0 58 | else: 59 | current_route, total_routes = progress 60 | if current_route <= self.total: 61 | self._index = current_route 62 | else: 63 | print('Problem reading checkpoint. Route id {} ' 64 | 'larger than maximum number of routes {}'.format(current_route, self.total)) 65 | 66 | def save_state(self, endpoint): 67 | data = fetch_dict(endpoint) 68 | if not data: 69 | data = create_default_json_msg() 70 | data['_checkpoint']['progress'] = [self._index, self.total] 71 | 72 | save_dict(endpoint, data) 73 | -------------------------------------------------------------------------------- /leaderboard/scripts/collect_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export CARLA_SERVER=${CARLA_ROOT}/CarlaUE4.sh 3 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI 4 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla 5 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla/dist/carla-0.9.10-py3.7-linux-x86_64.egg 6 | export PYTHONPATH=$PYTHONPATH:leaderboard 7 | export PYTHONPATH=$PYTHONPATH:leaderboard/team_code 8 | export PYTHONPATH=$PYTHONPATH:scenario_runner 9 | export PYTHONPATH=/mnt/petrelfs/share/pymc/new:$PYTHONPATH 10 | 11 | 12 | export LEADERBOARD_ROOT=leaderboard 13 | export CHALLENGE_TRACK_CODENAME=SENSORS 14 | export PORT=$1 15 | export TM_PORT=$2 16 | export DEBUG_CHALLENGE=0 17 | export REPETITIONS=1 18 | export RESUME=$4 19 | 20 | 21 | # Roach data collection 22 | export ROUTES=leaderboard/data/routes_for_open_loop_training/routes_$3.xml 23 | export TEAM_AGENT=team_code/$6.py 24 | export TEAM_CONFIG=roach/config/config_agent.yaml 25 | export CHECKPOINT_ENDPOINT=data_collect_$3_results.json 26 | export SCENARIOS=leaderboard/data/scenarios/$7.json 27 | export SAVE_PATH=dataset/$3 28 | export ROUTE_FILE=$3 29 | 30 | 31 | python3 ${LEADERBOARD_ROOT}/leaderboard/leaderboard_evaluator.py \ 32 | --scenarios=${SCENARIOS} \ 33 | --routes=${ROUTES} \ 34 | --repetitions=${REPETITIONS} \ 35 | --track=${CHALLENGE_TRACK_CODENAME} \ 36 | --checkpoint=collect_data_json/${CHECKPOINT_ENDPOINT} \ 37 | --agent=${TEAM_AGENT} \ 38 | --agent-config=${TEAM_CONFIG} \ 39 | --debug=${DEBUG_CHALLENGE} \ 40 | --record=${RECORD_PATH} \ 41 | --resume=${RESUME} \ 42 | --port=${PORT} \ 43 | --trafficManagerPort=${TM_PORT} \ 44 | --is_local=$5 \ 45 | --is_eval=False -------------------------------------------------------------------------------- /leaderboard/scripts/evaluation_longest6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export CARLA_SERVER=${CARLA_ROOT}/CarlaUE4.sh 3 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI 4 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla 5 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla/dist/carla-0.9.10-py3.7-linux-x86_64.egg 6 | export PYTHONPATH=$PYTHONPATH:leaderboard 7 | export PYTHONPATH=$PYTHONPATH:leaderboard/team_code 8 | export PYTHONPATH=$PYTHONPATH:scenario_runner 9 | export PYTHONPATH=$PYTHONPATH:open_loop_training 10 | export PYTHONPATH=/mnt/lustre/share/pymc/new:$PYTHONPATH 11 | 12 | export LEADERBOARD_ROOT=leaderboard 13 | export CHALLENGE_TRACK_CODENAME=SENSORS 14 | export PORT=$1 15 | export TM_PORT=$2 16 | export DEBUG_CHALLENGE=0 17 | export REPETITIONS=1 # multiple evaluation runs 18 | export RESUME=$4 19 | 20 | #evaluation 21 | export ROUTES=leaderboard/data/routes_for_evaluation/routes_longest6.xml 22 | export TEAM_AGENT=team_code/$3.py 23 | export TEAM_CONFIG=$6 24 | export CHECKPOINT_ENDPOINT=closed_loop_eval_log/results_$8.json 25 | export SCENARIOS=leaderboard/data/scenarios/$7.json 26 | export SAVE_PATH=closed_loop_eval_log/eval_log/$8 27 | export BENCHMARK=longest6 28 | 29 | 30 | 31 | python3 -q -X faulthandler ${LEADERBOARD_ROOT}/leaderboard/leaderboard_evaluator.py \ 32 | --scenarios=${SCENARIOS} \ 33 | --routes=${ROUTES} \ 34 | --repetitions=${REPETITIONS} \ 35 | --track=${CHALLENGE_TRACK_CODENAME} \ 36 | --checkpoint=${CHECKPOINT_ENDPOINT} \ 37 | --agent=${TEAM_AGENT} \ 38 | --agent-config=${TEAM_CONFIG} \ 39 | --debug=${DEBUG_CHALLENGE} \ 40 | --record=${RECORD_PATH} \ 41 | --resume=${RESUME} \ 42 | --port=${PORT} \ 43 | --trafficManagerPort=${TM_PORT} \ 44 | --is_local=$5 \ 45 | --is_eval=True 46 | -------------------------------------------------------------------------------- /leaderboard/scripts/evaluation_town05long.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export CARLA_SERVER=${CARLA_ROOT}/CarlaUE4.sh 3 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI 4 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla 5 | export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla/dist/carla-0.9.10-py3.7-linux-x86_64.egg 6 | export PYTHONPATH=$PYTHONPATH:leaderboard 7 | export PYTHONPATH=$PYTHONPATH:leaderboard/team_code 8 | export PYTHONPATH=$PYTHONPATH:scenario_runner 9 | export PYTHONPATH=$PYTHONPATH:open_loop_training 10 | export PYTHONPATH=/mnt/lustre/share/pymc/new:$PYTHONPATH 11 | 12 | export LEADERBOARD_ROOT=leaderboard 13 | export CHALLENGE_TRACK_CODENAME=SENSORS 14 | export PORT=$1 15 | export TM_PORT=$2 16 | export DEBUG_CHALLENGE=0 17 | export REPETITIONS=1 # multiple evaluation runs 18 | export RESUME=$4 19 | 20 | # evaluation 21 | export ROUTES=leaderboard/data/routes_for_evaluation/routes_town05_long.xml 22 | export TEAM_AGENT=team_code/$3.py 23 | export TEAM_CONFIG=$6 24 | export CHECKPOINT_ENDPOINT=closed_loop_eval_log/results_$8.json 25 | export SCENARIOS=leaderboard/data/scenarios/$7.json 26 | export SAVE_PATH=closed_loop_eval_log/eval_log/$8 27 | export BENCHMARK=town05long 28 | 29 | 30 | 31 | python3 -q -X faulthandler ${LEADERBOARD_ROOT}/leaderboard/leaderboard_evaluator.py \ 32 | --scenarios=${SCENARIOS} \ 33 | --routes=${ROUTES} \ 34 | --repetitions=${REPETITIONS} \ 35 | --track=${CHALLENGE_TRACK_CODENAME} \ 36 | --checkpoint=${CHECKPOINT_ENDPOINT} \ 37 | --agent=${TEAM_AGENT} \ 38 | --agent-config=${TEAM_CONFIG} \ 39 | --debug=${DEBUG_CHALLENGE} \ 40 | --record=${RECORD_PATH} \ 41 | --resume=${RESUME} \ 42 | --port=${PORT} \ 43 | --trafficManagerPort=${TM_PORT} \ 44 | --is_local=$5 \ 45 | --is_eval=True 46 | -------------------------------------------------------------------------------- /leaderboard/team_code/pid_controller.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | 3 | import numpy as np 4 | 5 | 6 | class PIDController(object): 7 | def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20): 8 | self._K_P = K_P 9 | self._K_I = K_I 10 | self._K_D = K_D 11 | 12 | self._window = deque([0 for _ in range(n)], maxlen=n) 13 | self._max = 0.0 14 | self._min = 0.0 15 | 16 | def step(self, error): 17 | self._window.append(error) 18 | self._max = max(self._max, abs(error)) 19 | self._min = -abs(self._max) 20 | 21 | if len(self._window) >= 2: 22 | integral = np.mean(self._window) 23 | derivative = (self._window[-1] - self._window[-2]) 24 | else: 25 | integral = 0.0 26 | derivative = 0.0 27 | 28 | return self._K_P * error + self._K_I * integral + self._K_D * derivative 29 | -------------------------------------------------------------------------------- /leaderboard/team_code/planner.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import deque 3 | 4 | import numpy as np 5 | 6 | 7 | DEBUG = int(os.environ.get('HAS_DISPLAY', 0)) 8 | 9 | 10 | class Plotter(object): 11 | def __init__(self, size): 12 | self.size = size 13 | self.clear() 14 | self.title = str(self.size) 15 | 16 | def clear(self): 17 | from PIL import Image, ImageDraw 18 | 19 | self.img = Image.fromarray(np.zeros((self.size, self.size, 3), dtype=np.uint8)) 20 | self.draw = ImageDraw.Draw(self.img) 21 | 22 | def dot(self, pos, node, color=(255, 255, 255), r=2): 23 | x, y = 5.5 * (pos - node) 24 | x += self.size / 2 25 | y += self.size / 2 26 | 27 | self.draw.ellipse((x-r, y-r, x+r, y+r), color) 28 | 29 | def show(self): 30 | if not DEBUG: 31 | return 32 | 33 | import cv2 34 | 35 | cv2.imshow(self.title, cv2.cvtColor(np.array(self.img), cv2.COLOR_BGR2RGB)) 36 | cv2.waitKey(1) 37 | 38 | 39 | class RoutePlanner(object): 40 | def __init__(self, min_distance, max_distance, debug_size=256): 41 | self.route = deque() 42 | self.min_distance = min_distance 43 | self.max_distance = max_distance 44 | 45 | # self.mean = np.array([49.0, 8.0]) # for carla 9.9 46 | # self.scale = np.array([111324.60662786, 73032.1570362]) # for carla 9.9 47 | self.mean = np.array([0.0, 0.0]) # for carla 9.10 48 | self.scale = np.array([111324.60662786, 111319.490945]) # for carla 9.10 49 | 50 | self.debug = Plotter(debug_size) 51 | 52 | def set_route(self, global_plan, gps=False, global_plan_world = None): 53 | self.route.clear() 54 | 55 | if global_plan_world: 56 | for (pos, cmd), (pos_word, _ )in zip(global_plan, global_plan_world): 57 | if gps: 58 | pos = np.array([pos['lat'], pos['lon']]) 59 | pos -= self.mean 60 | pos *= self.scale 61 | else: 62 | pos = np.array([pos.location.x, pos.location.y]) 63 | pos -= self.mean 64 | 65 | self.route.append((pos, cmd, pos_word)) 66 | else: 67 | for pos, cmd in global_plan: 68 | if gps: 69 | pos = np.array([pos['lat'], pos['lon']]) 70 | pos -= self.mean 71 | pos *= self.scale 72 | else: 73 | pos = np.array([pos.location.x, pos.location.y]) 74 | pos -= self.mean 75 | 76 | self.route.append((pos, cmd)) 77 | 78 | def run_step(self, gps): 79 | self.debug.clear() 80 | 81 | if len(self.route) == 1: 82 | return self.route[0] 83 | 84 | to_pop = 0 85 | farthest_in_range = -np.inf 86 | cumulative_distance = 0.0 87 | 88 | for i in range(1, len(self.route)): 89 | if cumulative_distance > self.max_distance: 90 | break 91 | 92 | cumulative_distance += np.linalg.norm(self.route[i][0] - self.route[i-1][0]) 93 | distance = np.linalg.norm(self.route[i][0] - gps) 94 | 95 | if distance <= self.min_distance and distance > farthest_in_range: 96 | farthest_in_range = distance 97 | to_pop = i 98 | 99 | r = 255 * int(distance > self.min_distance) 100 | g = 255 * int(self.route[i][1].value == 4) 101 | b = 255 102 | self.debug.dot(gps, self.route[i][0], (r, g, b)) 103 | 104 | for _ in range(to_pop): 105 | if len(self.route) > 2: 106 | self.route.popleft() 107 | 108 | self.debug.dot(gps, self.route[0][0], (0, 255, 0)) 109 | self.debug.dot(gps, self.route[1][0], (255, 0, 0)) 110 | self.debug.dot(gps, gps, (0, 0, 255)) 111 | self.debug.show() 112 | 113 | return self.route[1] 114 | -------------------------------------------------------------------------------- /open_loop_training/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/open_loop_training/__init__.py -------------------------------------------------------------------------------- /open_loop_training/code/__init__.py: -------------------------------------------------------------------------------- 1 | from .datasets.pipelines import * 2 | from .datasets import CarlaDataset 3 | from .model_code.backbones import * 4 | from .model_code.dense_heads import * 5 | from .encoder_decoder_framework import * 6 | 7 | -------------------------------------------------------------------------------- /open_loop_training/code/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .train import custom_train_model 2 | from .mmdet_train import custom_train_detector 3 | __all__ = ['custom_train_model', 'custom_train_detector'] 4 | -------------------------------------------------------------------------------- /open_loop_training/code/apis/train.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------- 2 | # Copyright (c) OpenMMLab. All rights reserved. 3 | # --------------------------------------------- 4 | # Modified by Zhiqi Li 5 | # --------------------------------------------- 6 | 7 | from .mmdet_train import custom_train_detector 8 | from mmseg.apis import train_segmentor 9 | from mmdet.apis import train_detector 10 | 11 | def custom_train_model(model, 12 | dataset, 13 | cfg, 14 | distributed=False, 15 | validate=False, 16 | timestamp=None, 17 | eval_model=None, 18 | meta=None): 19 | """A function wrapper for launching model training according to cfg. 20 | 21 | Because we need different eval_hook in runner. Should be deprecated in the 22 | future. 23 | """ 24 | if cfg.model.type in ['EncoderDecoder3D']: 25 | assert False 26 | else: 27 | custom_train_detector( 28 | model, 29 | dataset, 30 | cfg, 31 | distributed=distributed, 32 | validate=validate, 33 | timestamp=timestamp, 34 | eval_model=eval_model, 35 | meta=meta) 36 | 37 | 38 | def train_model(model, 39 | dataset, 40 | cfg, 41 | distributed=False, 42 | validate=False, 43 | timestamp=None, 44 | meta=None): 45 | """A function wrapper for launching model training according to cfg. 46 | 47 | Because we need different eval_hook in runner. Should be deprecated in the 48 | future. 49 | """ 50 | if cfg.model.type in ['EncoderDecoder3D']: 51 | train_segmentor( 52 | model, 53 | dataset, 54 | cfg, 55 | distributed=distributed, 56 | validate=validate, 57 | timestamp=timestamp, 58 | meta=meta) 59 | else: 60 | train_detector( 61 | model, 62 | dataset, 63 | cfg, 64 | distributed=distributed, 65 | validate=validate, 66 | timestamp=timestamp, 67 | meta=meta) 68 | -------------------------------------------------------------------------------- /open_loop_training/code/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .eval_hooks import CustomDistEvalHook, CustomEvalHook 2 | from .epoch_hook import MySetEpochInfoHook -------------------------------------------------------------------------------- /open_loop_training/code/core/evaluation/epoch_hook.py: -------------------------------------------------------------------------------- 1 | from mmcv.parallel import is_module_wrapper 2 | from mmcv.runner import HOOKS, Hook 3 | 4 | 5 | @HOOKS.register_module() 6 | class MySetEpochInfoHook(Hook): 7 | """Set runner's epoch information to the model.""" 8 | def before_train_epoch(self, runner): 9 | epoch = runner.epoch 10 | model = runner.model 11 | if is_module_wrapper(model): 12 | model = model.module 13 | model.set_epoch(epoch) -------------------------------------------------------------------------------- /open_loop_training/code/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .carla_dataset import CarlaDataset 2 | from .builder import custom_build_dataset 3 | 4 | __all__ = [ 5 | "CarlaDataset" 6 | ] 7 | -------------------------------------------------------------------------------- /open_loop_training/code/datasets/base_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import copy 3 | import warnings 4 | import os.path as osp 5 | from abc import ABCMeta, abstractmethod 6 | from os import PathLike 7 | from typing import List 8 | 9 | import mmcv 10 | import numpy as np 11 | from torch.utils.data import Dataset 12 | 13 | 14 | from mmdet3d.datasets.pipelines import Compose 15 | 16 | def expanduser(path): 17 | if isinstance(path, (str, PathLike)): 18 | return osp.expanduser(path) 19 | else: 20 | return path 21 | 22 | 23 | class BaseDataset(Dataset): 24 | """Base dataset. 25 | 26 | Args: 27 | data_prefix (str): the prefix of data path 28 | pipeline (list): a list of dict, where each element represents 29 | a operation defined in `mmcls.datasets.pipelines` 30 | ann_file (str | None): the annotation file. When ann_file is str, 31 | the subclass is expected to read from the ann_file. When ann_file 32 | is None, the subclass is expected to read according to data_prefix 33 | test_mode (bool): in train mode or test mode 34 | """ 35 | 36 | 37 | def __init__(self, 38 | ann_file, 39 | pipeline, 40 | data_prefix='', 41 | test_mode=False): 42 | super(BaseDataset, self).__init__() 43 | self.data_prefix = expanduser(data_prefix) 44 | self.pipeline = Compose(pipeline) 45 | self.ann_file = expanduser(ann_file) 46 | self.test_mode = test_mode 47 | self.data_infos = [] 48 | 49 | @abstractmethod 50 | def load_annotations(self): 51 | return mmcv.load(self.ann_file) 52 | 53 | 54 | def pre_pipeline(self, results): 55 | """Prepare results dict for pipeline.""" 56 | results['img_prefix'] = self.img_prefix 57 | 58 | 59 | 60 | def __len__(self): 61 | return len(self.data_infos) 62 | 63 | def __getitem__(self, idx): 64 | results = copy.deepcopy(self.data_infos[idx]) 65 | self.pre_pipeline(results) 66 | return self.pipeline(results) 67 | 68 | 69 | def evaluate(self, 70 | results, 71 | metric='accuracy', 72 | metric_options=None, 73 | jsonfile_prefix=None, 74 | indices=None, 75 | logger=None): 76 | pass 77 | 78 | -------------------------------------------------------------------------------- /open_loop_training/code/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from .transform import (InitMultiImage, ImageTransformMulti, IDAImageTransform) 2 | from .loading import LoadMultiImages, LoadPoints, LoadDepth 3 | from .formating import CarlaFormatBundle,CarlaCollect 4 | __all__ = [ 5 | 'LoadPoints', 6 | 'ImageTransformMulti', 7 | 'InitMultiImage', 8 | 'LoadMultiImages', 9 | 'CarlaFormatBundle', 10 | 'CarlaCollect', 11 | 'LoadDepth', 12 | 'IDAImageTransform', 13 | ] -------------------------------------------------------------------------------- /open_loop_training/code/datasets/pipelines/formating.py: -------------------------------------------------------------------------------- 1 | 2 | # Copyright (c) OpenMMLab. All rights reserved. 3 | import torch 4 | import numpy as np 5 | from mmcv.parallel import DataContainer as DC 6 | 7 | 8 | from mmdet3d.core.points import BasePoints 9 | from mmdet.datasets.builder import PIPELINES 10 | from mmdet.datasets.pipelines import to_tensor 11 | from torch import Tensor 12 | 13 | 14 | @PIPELINES.register_module() 15 | class CarlaFormatBundle(object): 16 | def __init__(self, ): 17 | return 18 | def __call__(self, results): 19 | if 'points' in results: 20 | if isinstance(results['points'], BasePoints): 21 | results['points'] = DC(results['points'].tensor) 22 | else: 23 | results['points'] = DC(torch.Tensor(results['points'])) 24 | if 'img' in results: 25 | if isinstance(results['img'], list): 26 | results['img'] = np.stack(results['img'], axis=0) 27 | return results 28 | def __repr__(self): 29 | return self.__class__.__name__ 30 | 31 | 32 | 33 | @PIPELINES.register_module() 34 | class CarlaCollect(object): 35 | def __init__(self, 36 | keys, 37 | meta_keys=('can_bus', 'ori_shape', 'intrin_mats', 'lidar2img', 38 | 'depth2img', 'cam2img', 'pad_shape', 39 | 'scale_factor', 'flip', 'pcd_horizontal_flip', 40 | 'pcd_vertical_flip', 'frame_idx', 'img_filename', 41 | 'img_norm_cfg', 'bda_mat', 'sample_idx', 42 | 'ida_mats', 'sensor2ego_mats', 'pts_filename', 43 | 'scene_token')): 44 | self.keys = keys 45 | self.meta_keys = meta_keys 46 | 47 | def __call__(self, results): 48 | """Call function to collect keys in results. The keys in ``meta_keys`` 49 | will be converted to :obj:`mmcv.DataContainer`. 50 | 51 | Args: 52 | results (dict): Result dict contains the data to collect. 53 | 54 | Returns: 55 | dict: The result dict contains the following keys 56 | - keys in ``self.keys`` 57 | - ``img_metas`` 58 | """ 59 | data = {} 60 | img_metas = {} 61 | for key in self.meta_keys: 62 | if key in results: 63 | img_metas[key] = results[key] 64 | data['img_metas'] = DC(img_metas, cpu_only=True) 65 | for key in self.keys: 66 | if key in results: 67 | data[key] = results[key] 68 | return data 69 | 70 | def __repr__(self): 71 | """str: Return a string that describes the module.""" 72 | return self.__class__.__name__ + \ 73 | f'(keys={self.keys}, meta_keys={self.meta_keys})' 74 | 75 | -------------------------------------------------------------------------------- /open_loop_training/code/datasets/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .group_sampler import DistributedGroupSampler 2 | from .distributed_sampler import DistributedSampler 3 | from .sampler import SAMPLER, build_sampler 4 | 5 | -------------------------------------------------------------------------------- /open_loop_training/code/datasets/samplers/distributed_sampler.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch 4 | from torch.utils.data import DistributedSampler as _DistributedSampler 5 | from .sampler import SAMPLER 6 | 7 | 8 | @SAMPLER.register_module() 9 | class DistributedSampler(_DistributedSampler): 10 | 11 | def __init__(self, 12 | dataset=None, 13 | num_replicas=None, 14 | rank=None, 15 | shuffle=True, 16 | seed=0): 17 | super().__init__( 18 | dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) 19 | # for the compatibility from PyTorch 1.3+ 20 | self.seed = seed if seed is not None else 0 21 | 22 | def __iter__(self): 23 | # deterministically shuffle based on epoch 24 | if self.shuffle: 25 | assert False 26 | else: 27 | indices = torch.arange(len(self.dataset)).tolist() 28 | 29 | # add extra samples to make it evenly divisible 30 | # in case that indices is shorter than half of total_size 31 | indices = (indices * 32 | math.ceil(self.total_size / len(indices)))[:self.total_size] 33 | assert len(indices) == self.total_size 34 | 35 | # subsample 36 | per_replicas = self.total_size//self.num_replicas 37 | # indices = indices[self.rank:self.total_size:self.num_replicas] 38 | indices = indices[self.rank*per_replicas:(self.rank+1)*per_replicas] 39 | assert len(indices) == self.num_samples 40 | 41 | return iter(indices) 42 | -------------------------------------------------------------------------------- /open_loop_training/code/datasets/samplers/sampler.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils.registry import Registry, build_from_cfg 2 | 3 | SAMPLER = Registry('sampler') 4 | 5 | 6 | def build_sampler(cfg, default_args): 7 | return build_from_cfg(cfg, SAMPLER, default_args) 8 | -------------------------------------------------------------------------------- /open_loop_training/code/model_code/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lidarnet import LidarNet, SparseEncoder_fp32 3 | from .lss import LSS, PAFPN_fp32 4 | __all__ = ['LidarNet', 'LSS', "SparseEncoder_fp32", "PAFPN_fp32"] -------------------------------------------------------------------------------- /open_loop_training/code/model_code/backbones/lidarnet.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import numpy as np 3 | 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | from torch.distributions import Beta 8 | 9 | from mmcv.runner import BaseModule 10 | from mmcv.runner import force_fp32, auto_fp16 11 | from mmdet.models import BACKBONES 12 | from mmdet.core import (multi_apply, multi_apply, reduce_mean) 13 | from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector 14 | from mmdet3d.models.builder import MIDDLE_ENCODERS 15 | from mmdet3d.models.middle_encoders.sparse_encoder import SparseEncoder 16 | from mmdet3d.ops.spconv import IS_SPCONV2_AVAILABLE 17 | if IS_SPCONV2_AVAILABLE: 18 | from spconv.pytorch import SparseConvTensor 19 | else: 20 | from mmcv.ops import SparseConvTensor 21 | 22 | 23 | #To avoid strange FP16 inf norm bug 24 | @MIDDLE_ENCODERS.register_module() 25 | class SparseEncoder_fp32(SparseEncoder): 26 | 27 | @force_fp32() 28 | def forward(self, voxel_features, coors, batch_size): 29 | """Forward of SparseEncoder. 30 | 31 | Args: 32 | voxel_features (torch.Tensor): Voxel features in shape (N, C). 33 | coors (torch.Tensor): Coordinates in shape (N, 4), 34 | the columns in the order of (batch_idx, z_idx, y_idx, x_idx). 35 | batch_size (int): Batch size. 36 | 37 | Returns: 38 | dict: Backbone features. 39 | """ 40 | coors = coors.int() 41 | input_sp_tensor = SparseConvTensor(voxel_features, coors, 42 | self.sparse_shape, batch_size) 43 | x = self.conv_input(input_sp_tensor) 44 | 45 | encode_features = [] 46 | for encoder_layer in self.encoder_layers: 47 | x = encoder_layer(x) 48 | encode_features.append(x) 49 | 50 | # for detection head 51 | # [200, 176, 5] -> [200, 176, 2] 52 | out = self.conv_out(encode_features[-1]) 53 | spatial_features = out.dense() 54 | 55 | N, C, D, H, W = spatial_features.shape 56 | spatial_features = spatial_features.view(N, C * D, H, W) 57 | 58 | return spatial_features 59 | 60 | 61 | @BACKBONES.register_module() 62 | class LidarNet(MVXTwoStageDetector): 63 | def __init__(self, 64 | bev_h=None, 65 | bev_w=None, 66 | pts_voxel_layer=None, 67 | pts_voxel_encoder=None, 68 | pts_middle_encoder=None, 69 | pts_fusion_layer=None, 70 | pts_backbone=None, 71 | pts_neck=None, 72 | pts_bbox_head=None, 73 | train_cfg=None, 74 | test_cfg=None, 75 | ): 76 | self.bev_h = bev_h 77 | self.bev_w = bev_w 78 | super(LidarNet, 79 | self).__init__(pts_voxel_layer, pts_voxel_encoder, 80 | pts_middle_encoder, pts_fusion_layer, 81 | None, pts_backbone, None, pts_neck, 82 | pts_bbox_head, None, None, 83 | train_cfg, test_cfg, None) 84 | self.fp16_enabled = False 85 | 86 | @auto_fp16() 87 | def forward(self, pts): 88 | voxels, num_points, coors = self.voxelize(pts) 89 | voxel_features = self.pts_voxel_encoder(voxels, num_points, coors) 90 | batch_size = coors[-1, 0] + 1 91 | x = self.pts_middle_encoder(voxel_features, coors, batch_size) 92 | if self.with_pts_backbone: 93 | x = self.pts_backbone(x) 94 | if self.with_pts_neck: 95 | x = self.pts_neck(x) 96 | return x 97 | 98 | 99 | -------------------------------------------------------------------------------- /open_loop_training/code/model_code/dense_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .driver_adapter_head import DriveAdapterHead 2 | from .mask2former4seg import Mask2Former4Seg -------------------------------------------------------------------------------- /open_loop_training/code/utils.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | import torch 3 | import torch.nn as nn 4 | import numpy as np 5 | from mmcv.runner import force_fp32, auto_fp16 6 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | checkpoint_config = dict(interval=1) 2 | # yapf:disable push 3 | # By default we use textlogger hook and tensorboard 4 | # For more loggers see 5 | # https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook 6 | log_config = dict( 7 | interval=50, 8 | hooks=[ 9 | dict(type='TextLoggerHook'), 10 | dict(type='TensorboardLoggerHook') 11 | ]) 12 | # yapf:enable 13 | dist_params = dict(backend='nccl') 14 | log_level = 'INFO' 15 | work_dir = None 16 | load_from = None 17 | resume_from = None 18 | workflow = [('train', 1)] 19 | custom_hooks = [dict(type="MySetEpochInfoHook")] -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/cosine.py: -------------------------------------------------------------------------------- 1 | # This schedule is mainly used by models with dynamic voxelization 2 | # optimizer 3 | lr = 0.003 # max learning rate 4 | optimizer = dict( 5 | type='AdamW', 6 | lr=lr, 7 | betas=(0.95, 0.99), # the momentum is change during training 8 | weight_decay=0.001) 9 | optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) 10 | 11 | lr_config = dict( 12 | policy='CosineAnnealing', 13 | warmup='linear', 14 | warmup_iters=1000, 15 | warmup_ratio=1.0 / 10, 16 | min_lr_ratio=1e-5) 17 | 18 | momentum_config = None 19 | 20 | runner = dict(type='EpochBasedRunner', max_epochs=40) 21 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/cyclic_20e.py: -------------------------------------------------------------------------------- 1 | # For nuScenes dataset, we usually evaluate the model at the end of training. 2 | # Since the models are trained by 24 epochs by default, we set evaluation 3 | # interval to be 20. Please change the interval accordingly if you do not 4 | # use a default schedule. 5 | # optimizer 6 | # This schedule is mainly used by models on nuScenes dataset 7 | optimizer = dict(type='AdamW', lr=1e-4, weight_decay=0.01) 8 | # max_norm=10 is better for SECOND 9 | optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) 10 | lr_config = dict( 11 | policy='cyclic', 12 | target_ratio=(10, 1e-4), 13 | cyclic_times=1, 14 | step_ratio_up=0.4, 15 | ) 16 | momentum_config = dict( 17 | policy='cyclic', 18 | target_ratio=(0.85 / 0.95, 1), 19 | cyclic_times=1, 20 | step_ratio_up=0.4, 21 | ) 22 | 23 | # runtime settings 24 | runner = dict(type='EpochBasedRunner', max_epochs=20) 25 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/cyclic_40e.py: -------------------------------------------------------------------------------- 1 | # The schedule is usually used by models trained on KITTI dataset 2 | 3 | # The learning rate set in the cyclic schedule is the initial learning rate 4 | # rather than the max learning rate. Since the target_ratio is (10, 1e-4), 5 | # the learning rate will change from 0.0018 to 0.018, than go to 0.0018*1e-4 6 | lr = 0.0018 7 | # The optimizer follows the setting in SECOND.Pytorch, but here we use 8 | # the offcial AdamW optimizer implemented by PyTorch. 9 | optimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01) 10 | optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) 11 | # We use cyclic learning rate and momentum schedule following SECOND.Pytorch 12 | # https://github.com/traveller59/second.pytorch/blob/3aba19c9688274f75ebb5e576f65cfe54773c021/torchplus/train/learning_schedules_fastai.py#L69 # noqa 13 | # We implement them in mmcv, for more details, please refer to 14 | # https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327 # noqa 15 | # https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130 # noqa 16 | lr_config = dict( 17 | policy='cyclic', 18 | target_ratio=(10, 1e-4), 19 | cyclic_times=1, 20 | step_ratio_up=0.4, 21 | ) 22 | momentum_config = dict( 23 | policy='cyclic', 24 | target_ratio=(0.85 / 0.95, 1), 25 | cyclic_times=1, 26 | step_ratio_up=0.4, 27 | ) 28 | # Although the max_epochs is 40, this schedule is usually used we 29 | # RepeatDataset with repeat ratio N, thus the actual max epoch 30 | # number could be Nx40 31 | runner = dict(type='EpochBasedRunner', max_epochs=40) 32 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/mmdet_schedule_1x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[8, 11]) 11 | runner = dict(type='EpochBasedRunner', max_epochs=12) 12 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/schedule_2x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used by models on nuScenes dataset 3 | optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.01) 4 | # max_norm=10 is better for SECOND 5 | optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) 6 | lr_config = dict( 7 | policy='step', 8 | warmup='linear', 9 | warmup_iters=1000, 10 | warmup_ratio=1.0 / 1000, 11 | step=[20, 23]) 12 | momentum_config = None 13 | # runtime settings 14 | runner = dict(type='EpochBasedRunner', max_epochs=24) 15 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/schedule_3x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used by models on indoor dataset, 3 | # e.g., VoteNet on SUNRGBD and ScanNet 4 | lr = 0.008 # max learning rate 5 | optimizer = dict(type='AdamW', lr=lr, weight_decay=0.01) 6 | optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) 7 | lr_config = dict(policy='step', warmup=None, step=[24, 32]) 8 | # runtime settings 9 | runner = dict(type='EpochBasedRunner', max_epochs=36) 10 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/seg_cosine_150e.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used on S3DIS dataset in segmentation task 3 | optimizer = dict(type='SGD', lr=0.2, weight_decay=0.0001, momentum=0.9) 4 | optimizer_config = dict(grad_clip=None) 5 | lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=0.002) 6 | momentum_config = None 7 | 8 | # runtime settings 9 | runner = dict(type='EpochBasedRunner', max_epochs=150) 10 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/seg_cosine_200e.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used on ScanNet dataset in segmentation task 3 | optimizer = dict(type='Adam', lr=0.001, weight_decay=0.01) 4 | optimizer_config = dict(grad_clip=None) 5 | lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) 6 | momentum_config = None 7 | 8 | # runtime settings 9 | runner = dict(type='EpochBasedRunner', max_epochs=200) 10 | -------------------------------------------------------------------------------- /open_loop_training/configs/_base_/schedules/seg_cosine_50e.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used on S3DIS dataset in segmentation task 3 | optimizer = dict(type='Adam', lr=0.001, weight_decay=0.001) 4 | optimizer_config = dict(grad_clip=None) 5 | lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) 6 | momentum_config = None 7 | 8 | # runtime settings 9 | runner = dict(type='EpochBasedRunner', max_epochs=50) 10 | -------------------------------------------------------------------------------- /open_loop_training/ev_mask.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/open_loop_training/ev_mask.npy -------------------------------------------------------------------------------- /open_loop_training/ops/voxel_pooling/__init__.py: -------------------------------------------------------------------------------- 1 | from .voxel_pooling import voxel_pooling 2 | 3 | __all__ = ['voxel_pooling'] 4 | -------------------------------------------------------------------------------- /open_loop_training/ops/voxel_pooling/src/voxel_pooling_forward.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Megvii Inc. All rights reserved. 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | #define CHECK_CUDA(x) \ 11 | TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 12 | #define CHECK_CONTIGUOUS(x) \ 13 | TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") 14 | #define CHECK_INPUT(x) \ 15 | CHECK_CUDA(x); \ 16 | CHECK_CONTIGUOUS(x) 17 | 18 | int voxel_pooling_forward_wrapper(int batch_size, int num_points, int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z, at::Tensor geom_xyz_tensor, 19 | at::Tensor input_features_tensor, at::Tensor output_features_tensor, at::Tensor pos_memo_tensor); 20 | 21 | void voxel_pooling_forward_kernel_launcher(int batch_size, int num_points, int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z, const int *geom_xyz, const float *input_features, 22 | float *output_features, int *pos_memo, cudaStream_t stream); 23 | 24 | int voxel_pooling_forward_wrapper(int batch_size, int num_points, int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z, at::Tensor geom_xyz_tensor, 25 | at::Tensor input_features_tensor, at::Tensor output_features_tensor, at::Tensor pos_memo_tensor) { 26 | CHECK_INPUT(geom_xyz_tensor); 27 | CHECK_INPUT(input_features_tensor); 28 | const int *geom_xyz = geom_xyz_tensor.data_ptr(); 29 | const float *input_features = input_features_tensor.data_ptr(); 30 | float *output_features = output_features_tensor.data_ptr(); 31 | int *pos_memo = pos_memo_tensor.data_ptr(); 32 | 33 | cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); 34 | voxel_pooling_forward_kernel_launcher(batch_size, num_points, num_channels, num_voxel_x, num_voxel_y, num_voxel_z, geom_xyz, input_features, 35 | output_features, pos_memo, stream); 36 | return 1; 37 | } 38 | 39 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 40 | m.def("voxel_pooling_forward_wrapper", &voxel_pooling_forward_wrapper, "voxel_pooling_forward_wrapper"); 41 | } 42 | -------------------------------------------------------------------------------- /open_loop_training/ops/voxel_pooling/src/voxel_pooling_forward_cuda.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) Megvii Inc. All rights reserved. 2 | #include 3 | #include 4 | #include 5 | 6 | #define THREADS_PER_BLOCK 256 7 | #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) 8 | 9 | __global__ void voxel_pooling_forward_kernel(int batch_size, int num_points, int num_channels, int num_voxel_x, 10 | int num_voxel_y, int num_voxel_z, const int *geom_xyz, 11 | const float *input_features, float *output_features, int *pos_memo) { 12 | // Each thread process only one channel of one voxel. 13 | int blk_idx = blockIdx.x; 14 | int thd_idx = threadIdx.x; 15 | int pt_idx = blk_idx * blockDim.x + thd_idx; 16 | if (pt_idx >= batch_size * num_points) { 17 | return; 18 | } else { 19 | int batch_idx = pt_idx / num_points; 20 | int x = geom_xyz[pt_idx * 3]; 21 | int y = geom_xyz[pt_idx * 3 + 1]; 22 | int z = geom_xyz[pt_idx * 3 + 2]; 23 | // if coord of current voxel is out of boundary, return. 24 | if (x < 0 || x >= num_voxel_x || y < 0 || y >= num_voxel_y || z < 0 || z >= num_voxel_z) { 25 | return; 26 | } 27 | pos_memo[pt_idx * 3] = batch_idx; 28 | pos_memo[pt_idx * 3 + 1] = y; 29 | pos_memo[pt_idx * 3 + 2] = x; 30 | for (int channel_idx = 0; channel_idx < num_channels; channel_idx++) { 31 | atomicAdd( 32 | &output_features[(batch_idx * num_voxel_y * num_voxel_x + y * num_voxel_x + x) * num_channels + channel_idx], 33 | input_features[pt_idx * num_channels + channel_idx]); 34 | } 35 | } 36 | } 37 | 38 | void voxel_pooling_forward_kernel_launcher(int batch_size, int num_points, int num_channels, int num_voxel_x, 39 | int num_voxel_y, int num_voxel_z, const int *geom_xyz, 40 | const float *input_features, float *output_features, int *pos_memo, 41 | cudaStream_t stream) { 42 | cudaError_t err; 43 | 44 | dim3 blocks(DIVUP(batch_size * num_points, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) 45 | dim3 threads(THREADS_PER_BLOCK); 46 | 47 | voxel_pooling_forward_kernel<<>>(batch_size, num_points, num_channels, num_voxel_x, 48 | num_voxel_y, num_voxel_z, geom_xyz, input_features, 49 | output_features, pos_memo); 50 | // cudaDeviceSynchronize(); // for using printf in kernel function 51 | err = cudaGetLastError(); 52 | if (cudaSuccess != err) { 53 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 54 | exit(-1); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /open_loop_training/ops/voxel_pooling/voxel_pooling.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Megvii Inc. All rights reserved. 2 | import torch 3 | from torch.autograd import Function 4 | 5 | from . import voxel_pooling_ext 6 | 7 | 8 | class VoxelPooling(Function): 9 | @staticmethod 10 | def forward(ctx, geom_xyz: torch.Tensor, input_features: torch.Tensor, 11 | voxel_num: torch.Tensor) -> torch.Tensor: 12 | """Forward function for `voxel pooling. 13 | 14 | Args: 15 | geom_xyz (Tensor): xyz coord for each voxel with the shape 16 | of [B, N, 3]. 17 | input_features (Tensor): feature for each voxel with the 18 | shape of [B, N, C]. 19 | voxel_num (Tensor): Number of voxels for each dim with the 20 | shape of [3]. 21 | 22 | Returns: 23 | Tensor: (B, C, H, W) bev feature map. 24 | """ 25 | assert geom_xyz.is_contiguous() 26 | assert input_features.is_contiguous() 27 | # no gradient for input_features and geom_feats 28 | ctx.mark_non_differentiable(geom_xyz) 29 | grad_input_features = torch.zeros_like(input_features) 30 | geom_xyz = geom_xyz.reshape(geom_xyz.shape[0], -1, geom_xyz.shape[-1]) 31 | input_features = input_features.reshape( 32 | (geom_xyz.shape[0], -1, input_features.shape[-1])) 33 | assert geom_xyz.shape[1] == input_features.shape[1] 34 | batch_size = input_features.shape[0] 35 | num_points = input_features.shape[1] 36 | num_channels = input_features.shape[2] 37 | output_features = input_features.new_zeros(batch_size, voxel_num[1], 38 | voxel_num[0], num_channels) 39 | # Save the position of bev_feature_map for each input point. 40 | pos_memo = geom_xyz.new_ones(batch_size, num_points, 3) * -1 41 | voxel_pooling_ext.voxel_pooling_forward_wrapper( 42 | batch_size, 43 | num_points, 44 | num_channels, 45 | voxel_num[0], 46 | voxel_num[1], 47 | voxel_num[2], 48 | geom_xyz, 49 | input_features, 50 | output_features, 51 | pos_memo, 52 | ) 53 | # save grad_input_features and pos_memo for backward 54 | ctx.save_for_backward(grad_input_features, pos_memo) 55 | return output_features.permute(0, 3, 1, 2) 56 | 57 | @staticmethod 58 | def backward(ctx, grad_output_features): 59 | (grad_input_features, pos_memo) = ctx.saved_tensors 60 | kept = (pos_memo != -1)[..., 0] 61 | grad_input_features_shape = grad_input_features.shape 62 | grad_input_features = grad_input_features.reshape( 63 | grad_input_features.shape[0], -1, grad_input_features.shape[-1]) 64 | grad_input_features[kept] = grad_output_features[ 65 | pos_memo[kept][..., 0].long(), :, pos_memo[kept][..., 1].long(), 66 | pos_memo[kept][..., 2].long()] 67 | grad_input_features = grad_input_features.reshape( 68 | grad_input_features_shape) 69 | return None, grad_input_features, None 70 | 71 | 72 | voxel_pooling = VoxelPooling.apply 73 | -------------------------------------------------------------------------------- /open_loop_training/setup.py: -------------------------------------------------------------------------------- 1 | # setup.py 2 | from setuptools import setup 3 | import os 4 | import torch 5 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 6 | 7 | def make_cuda_ext(name, 8 | module, 9 | sources, 10 | sources_cuda=[], 11 | extra_args=[], 12 | extra_include_path=[]): 13 | 14 | define_macros = [] 15 | extra_compile_args = {'cxx': [] + extra_args} 16 | 17 | if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1': 18 | define_macros += [('WITH_CUDA', None)] 19 | extension = CUDAExtension 20 | extra_compile_args['nvcc'] = extra_args + [ 21 | '-D__CUDA_NO_HALF_OPERATORS__', 22 | '-D__CUDA_NO_HALF_CONVERSIONS__', 23 | '-D__CUDA_NO_HALF2_OPERATORS__', 24 | "-gencode=arch=compute_35,code=sm_35", 25 | "-gencode=arch=compute_37,code=sm_37", 26 | "-gencode=arch=compute_50,code=sm_50", 27 | "-gencode=arch=compute_52,code=sm_52", 28 | "-gencode=arch=compute_53,code=sm_53", 29 | "-gencode=arch=compute_60,code=sm_60", 30 | "-gencode=arch=compute_61,code=sm_61", 31 | "-gencode=arch=compute_62,code=sm_62", 32 | "-gencode=arch=compute_75,code=sm_75", 33 | "-gencode=arch=compute_80,code=sm_80", 34 | "-gencode=arch=compute_86,code=sm_86", 35 | ] 36 | sources += sources_cuda 37 | else: 38 | raise EnvironmentError('CUDA is required to compile MMDetection!') 39 | 40 | return extension( 41 | name='{}.{}'.format(module, name), 42 | sources=[os.path.join(*module.split('.'), p) for p in sources], 43 | include_dirs=extra_include_path, 44 | define_macros=define_macros, 45 | extra_compile_args=extra_compile_args, 46 | ) 47 | 48 | setup( 49 | name='voxel_pooling', 50 | ext_modules=[ 51 | make_cuda_ext( 52 | name='voxel_pooling_ext', 53 | module='ops.voxel_pooling', 54 | sources=['src/voxel_pooling_forward.cpp'], 55 | sources_cuda=['src/voxel_pooling_forward_cuda.cu'], 56 | ), 57 | ], 58 | cmdclass={'build_ext': BuildExtension}) 59 | 60 | -------------------------------------------------------------------------------- /roach/config/config_agent.yaml: -------------------------------------------------------------------------------- 1 | entry_point: roach.rl_birdview_agent:RlBirdviewAgent 2 | wb_run_path: null 3 | wb_ckpt_step: null 4 | env_wrapper: 5 | entry_point: roach.utils.rl_birdview_wrapper:RlBirdviewWrapper 6 | kwargs: 7 | input_states: 8 | - control 9 | - vel_xy 10 | acc_as_action: true 11 | policy: 12 | entry_point: roach.models.ppo_policy:PpoPolicy 13 | kwargs: 14 | policy_head_arch: 15 | - 256 16 | - 256 17 | value_head_arch: 18 | - 256 19 | - 256 20 | features_extractor_entry_point: roach.models.torch_layers:XtMaCNN 21 | features_extractor_kwargs: 22 | states_neurons: 23 | - 256 24 | - 256 25 | distribution_entry_point: roach.models.distributions:BetaDistribution 26 | distribution_kwargs: 27 | dist_init: null 28 | training: 29 | entry_point: roach.models.ppo:PPO 30 | kwargs: 31 | learning_rate: 1.0e-05 32 | n_steps_total: 12288 33 | batch_size: 256 34 | n_epochs: 20 35 | gamma: 0.99 36 | gae_lambda: 0.9 37 | clip_range: 0.2 38 | clip_range_vf: null 39 | ent_coef: 0.01 40 | explore_coef: 0.05 41 | vf_coef: 0.5 42 | max_grad_norm: 0.5 43 | target_kl: 0.01 44 | update_adv: false 45 | lr_schedule_step: 8 46 | obs_configs: 47 | birdview: 48 | module: birdview.chauffeurnet 49 | width_in_pixels: 192 50 | pixels_ev_to_bottom: 40 51 | pixels_per_meter: 5.0 52 | history_idx: 53 | - -16 54 | - -11 55 | - -6 56 | - -1 57 | scale_bbox: true 58 | scale_mask_col: 1.0 59 | speed: 60 | module: actor_state.speed 61 | control: 62 | module: actor_state.control 63 | velocity: 64 | module: actor_state.velocity 65 | -------------------------------------------------------------------------------- /roach/criteria/blocked.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Blocked(): 5 | 6 | def __init__(self, speed_threshold=0.1, below_threshold_max_time=90.0): 7 | self._speed_threshold = speed_threshold 8 | self._below_threshold_max_time = below_threshold_max_time 9 | self._time_last_valid_state = None 10 | 11 | def tick(self, vehicle, timestamp): 12 | info = None 13 | linear_speed = self._calculate_speed(vehicle.get_velocity()) 14 | 15 | if linear_speed < self._speed_threshold and self._time_last_valid_state: 16 | if (timestamp['relative_simulation_time'] - self._time_last_valid_state) > self._below_threshold_max_time: 17 | # The actor has been "blocked" for too long 18 | ev_loc = vehicle.get_location() 19 | info = { 20 | 'step': timestamp['step'], 21 | 'simulation_time': timestamp['relative_simulation_time'], 22 | 'ev_loc': [ev_loc.x, ev_loc.y, ev_loc.z] 23 | } 24 | else: 25 | self._time_last_valid_state = timestamp['relative_simulation_time'] 26 | return info 27 | 28 | @staticmethod 29 | def _calculate_speed(carla_velocity): 30 | return np.linalg.norm([carla_velocity.x, carla_velocity.y]) 31 | -------------------------------------------------------------------------------- /roach/criteria/encounter_light.py: -------------------------------------------------------------------------------- 1 | from carla_gym.utils.traffic_light import TrafficLightHandler 2 | 3 | 4 | class EncounterLight(): 5 | 6 | def __init__(self, dist_threshold=7.5): 7 | self._last_light_id = None 8 | self._dist_threshold = dist_threshold 9 | 10 | def tick(self, vehicle, timestamp): 11 | info = None 12 | 13 | light_state, light_loc, light_id = TrafficLightHandler.get_light_state( 14 | vehicle, dist_threshold=self._dist_threshold) 15 | 16 | if light_id is not None: 17 | if light_id != self._last_light_id: 18 | self._last_light_id = light_id 19 | info = { 20 | 'step': timestamp['step'], 21 | 'simulation_time': timestamp['relative_simulation_time'], 22 | 'id': light_id, 23 | 'tl_loc': light_loc.tolist() 24 | } 25 | 26 | return info 27 | -------------------------------------------------------------------------------- /roach/criteria/route_deviation.py: -------------------------------------------------------------------------------- 1 | class RouteDeviation(): 2 | 3 | def __init__(self, offroad_min=15, offroad_max=30, max_route_percentage=0.3): 4 | self._offroad_min = offroad_min 5 | self._offroad_max = offroad_max 6 | self._max_route_percentage = max_route_percentage 7 | self._out_route_distance = 0.0 8 | 9 | def tick(self, vehicle, timestamp, ref_waypoint, distance_traveled, route_length): 10 | ev_loc = vehicle.get_location() 11 | 12 | distance = ev_loc.distance(ref_waypoint.transform.location) 13 | 14 | # fail if off_route is True 15 | off_route_max = distance > self._offroad_max 16 | 17 | # fail if off_safe_route more than 30% of total route length 18 | off_route_min = False 19 | if distance > self._offroad_min: 20 | self._out_route_distance += distance_traveled 21 | out_route_percentage = self._out_route_distance / route_length 22 | if out_route_percentage > self._max_route_percentage: 23 | off_route_min = True 24 | 25 | info = None 26 | if off_route_max or off_route_min: 27 | info = { 28 | 'step': timestamp['step'], 29 | 'simulation_time': timestamp['relative_simulation_time'], 30 | 'ev_loc': [ev_loc.x, ev_loc.y, ev_loc.z], 31 | 'off_route_max': off_route_max, 32 | 'off_route_min': off_route_min 33 | } 34 | return info 35 | -------------------------------------------------------------------------------- /roach/criteria/run_red_light.py: -------------------------------------------------------------------------------- 1 | import carla 2 | import shapely.geometry 3 | from carla_gym.utils.traffic_light import TrafficLightHandler 4 | 5 | 6 | class RunRedLight(): 7 | 8 | def __init__(self, carla_map, distance_light=30): 9 | self._map = carla_map 10 | self._last_red_light_id = None 11 | self._distance_light = distance_light 12 | 13 | def tick(self, vehicle, timestamp): 14 | ev_tra = vehicle.get_transform() 15 | ev_loc = ev_tra.location 16 | ev_dir = ev_tra.get_forward_vector() 17 | ev_extent = vehicle.bounding_box.extent.x 18 | 19 | tail_close_pt = ev_tra.transform(carla.Location(x=-0.8 * ev_extent)) 20 | tail_far_pt = ev_tra.transform(carla.Location(x=-ev_extent - 1.0)) 21 | tail_wp = self._map.get_waypoint(tail_far_pt) 22 | 23 | info = None 24 | for idx_tl in range(TrafficLightHandler.num_tl): 25 | traffic_light = TrafficLightHandler.list_tl_actor[idx_tl] 26 | tl_tv_loc = TrafficLightHandler.list_tv_loc[idx_tl] 27 | if tl_tv_loc.distance(ev_loc) > self._distance_light: 28 | continue 29 | if traffic_light.state != carla.TrafficLightState.Red: 30 | continue 31 | if self._last_red_light_id and self._last_red_light_id == traffic_light.id: 32 | continue 33 | 34 | for idx_wp in range(len(TrafficLightHandler.list_stopline_wps[idx_tl])): 35 | wp = TrafficLightHandler.list_stopline_wps[idx_tl][idx_wp] 36 | wp_dir = wp.transform.get_forward_vector() 37 | dot_ve_wp = ev_dir.x * wp_dir.x + ev_dir.y * wp_dir.y + ev_dir.z * wp_dir.z 38 | 39 | if tail_wp.road_id == wp.road_id and tail_wp.lane_id == wp.lane_id and dot_ve_wp > 0: 40 | # This light is red and is affecting our lane 41 | stop_left_loc, stop_right_loc = TrafficLightHandler.list_stopline_vtx[idx_tl][idx_wp] 42 | # Is the vehicle traversing the stop line? 43 | if self._is_vehicle_crossing_line((tail_close_pt, tail_far_pt), (stop_left_loc, stop_right_loc)): 44 | tl_loc = traffic_light.get_location() 45 | # loc_in_ev = trans_utils.loc_global_to_ref(tl_loc, ev_tra) 46 | self._last_red_light_id = traffic_light.id 47 | info = { 48 | 'step': timestamp['step'], 49 | 'simulation_time': timestamp['relative_simulation_time'], 50 | 'id': traffic_light.id, 51 | 'tl_loc': [tl_loc.x, tl_loc.y, tl_loc.z], 52 | 'ev_loc': [ev_loc.x, ev_loc.y, ev_loc.z] 53 | } 54 | return info 55 | 56 | @staticmethod 57 | def _is_vehicle_crossing_line(seg1, seg2): 58 | """ 59 | check if vehicle crosses a line segment 60 | """ 61 | line1 = shapely.geometry.LineString([(seg1[0].x, seg1[0].y), (seg1[1].x, seg1[1].y)]) 62 | line2 = shapely.geometry.LineString([(seg2[0].x, seg2[0].y), (seg2[1].x, seg2[1].y)]) 63 | inter = line1.intersection(line2) 64 | return not inter.is_empty 65 | -------------------------------------------------------------------------------- /roach/log/ckpt_11833344.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/log/ckpt_11833344.pth -------------------------------------------------------------------------------- /roach/models/torch_util.py: -------------------------------------------------------------------------------- 1 | import torch as th 2 | from torch import nn 3 | import math 4 | import torch.nn.functional as F 5 | 6 | 7 | def NormedLinear(*args, scale=1.0, dtype=th.float32, **kwargs): 8 | """ 9 | nn.Linear but with normalized fan-in init 10 | """ 11 | out = nn.Linear(*args, **kwargs) 12 | out.weight.data *= scale / out.weight.norm(dim=1, p=2, keepdim=True) 13 | if kwargs.get("bias", True): 14 | out.bias.data *= 0 15 | return out 16 | 17 | 18 | def NormedConv2d(*args, scale=1, **kwargs): 19 | """ 20 | nn.Conv2d but with normalized fan-in init 21 | """ 22 | out = nn.Conv2d(*args, **kwargs) 23 | out.weight.data *= scale / out.weight.norm(dim=(1, 2, 3), p=2, keepdim=True) 24 | if kwargs.get("bias", True): 25 | out.bias.data *= 0 26 | return out 27 | 28 | 29 | def intprod(xs): 30 | """ 31 | Product of a sequence of integers 32 | """ 33 | out = 1 34 | for x in xs: 35 | out *= x 36 | return out 37 | 38 | 39 | class CnnBasicBlock(nn.Module): 40 | """ 41 | Residual basic block (without batchnorm), as in ImpalaCNN 42 | Preserves channel number and shape 43 | """ 44 | 45 | def __init__(self, inchan, scale=1, batch_norm=False): 46 | super().__init__() 47 | self.inchan = inchan 48 | self.batch_norm = batch_norm 49 | s = math.sqrt(scale) 50 | self.conv0 = NormedConv2d(self.inchan, self.inchan, 3, padding=1, scale=s) 51 | self.conv1 = NormedConv2d(self.inchan, self.inchan, 3, padding=1, scale=s) 52 | if self.batch_norm: 53 | self.bn0 = nn.BatchNorm2d(self.inchan) 54 | self.bn1 = nn.BatchNorm2d(self.inchan) 55 | 56 | def residual(self, x): 57 | # inplace should be False for the first relu, so that it does not change the input, 58 | # which will be used for skip connection. 59 | # getattr is for backwards compatibility with loaded models 60 | if getattr(self, "batch_norm", False): 61 | x = self.bn0(x) 62 | x = F.relu(x, inplace=False) 63 | x = self.conv0(x) 64 | if getattr(self, "batch_norm", False): 65 | x = self.bn1(x) 66 | x = F.relu(x, inplace=True) 67 | x = self.conv1(x) 68 | return x 69 | 70 | def forward(self, x): 71 | return x + self.residual(x) 72 | 73 | 74 | class CnnDownStack(nn.Module): 75 | """ 76 | Downsampling stack from Impala CNN 77 | """ 78 | 79 | def __init__(self, inchan, nblock, outchan, scale=1, pool=True, **kwargs): 80 | super().__init__() 81 | self.inchan = inchan 82 | self.outchan = outchan 83 | self.pool = pool 84 | self.firstconv = NormedConv2d(inchan, outchan, 3, padding=1) 85 | s = scale / math.sqrt(nblock) 86 | self.blocks = nn.ModuleList( 87 | [CnnBasicBlock(outchan, scale=s, **kwargs) for _ in range(nblock)] 88 | ) 89 | 90 | def forward(self, x): 91 | x = self.firstconv(x) 92 | if getattr(self, "pool", True): 93 | x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) 94 | for block in self.blocks: 95 | x = block(x) 96 | return x 97 | 98 | def output_shape(self, inshape): 99 | c, h, w = inshape 100 | assert c == self.inchan 101 | if getattr(self, "pool", True): 102 | return (self.outchan, (h + 1) // 2, (w + 1) // 2) 103 | else: 104 | return (self.outchan, h, w) -------------------------------------------------------------------------------- /roach/obs_manager/actor_state/control.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from gym import spaces 3 | 4 | from roach.obs_manager.obs_manager import ObsManagerBase 5 | 6 | 7 | class ObsManager(): 8 | 9 | def __init__(self, obs_configs): 10 | self._parent_actor = None 11 | def attach_ego_vehicle(self, parent_actor): 12 | self._parent_actor = parent_actor 13 | 14 | def get_observation(self): 15 | control = self._parent_actor.vehicle.get_control() 16 | speed_limit = self._parent_actor.vehicle.get_speed_limit() / 3.6 * 0.8 17 | obs = { 18 | 'throttle': np.array([control.throttle], dtype=np.float32), 19 | 'steer': np.array([control.steer], dtype=np.float32), 20 | 'brake': np.array([control.brake], dtype=np.float32), 21 | 'gear': np.array([control.gear], dtype=np.float32), 22 | 'speed_limit': np.array([speed_limit], dtype=np.float32), 23 | } 24 | return obs 25 | 26 | def clean(self): 27 | self._parent_actor = None 28 | -------------------------------------------------------------------------------- /roach/obs_manager/actor_state/route.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from gym import spaces 3 | 4 | from carla_gym.core.obs_manager.obs_manager import ObsManagerBase 5 | import carla_gym.utils.transforms as trans_utils 6 | 7 | 8 | class ObsManager(ObsManagerBase): 9 | 10 | def __init__(self, obs_configs): 11 | self._parent_actor = None 12 | self._route_steps = 5 13 | super(ObsManager, self).__init__() 14 | 15 | def _define_obs_space(self): 16 | self.obs_space = spaces.Dict({ 17 | 'lateral_dist': spaces.Box(low=0.0, high=2.0, shape=(1,), dtype=np.float32), 18 | 'angle_diff': spaces.Box(low=-2.0, high=2.0, shape=(1,), dtype=np.float32), 19 | 'route_locs': spaces.Box(low=-5.0, high=5.0, shape=(self._route_steps*2,), dtype=np.float32), 20 | 'dist_remaining': spaces.Box(low=0.0, high=100, shape=(1,), dtype=np.float32) 21 | }) 22 | 23 | def attach_ego_vehicle(self, parent_actor): 24 | self._parent_actor = parent_actor 25 | 26 | def get_observation(self): 27 | ev_transform = self._parent_actor.vehicle.get_transform() 28 | route_plan = self._parent_actor.route_plan 29 | 30 | # lateral_dist 31 | waypoint, road_option = route_plan[0] 32 | wp_transform = waypoint.transform 33 | 34 | d_vec = ev_transform.location - wp_transform.location 35 | np_d_vec = np.array([d_vec.x, d_vec.y], dtype=np.float32) 36 | wp_unit_forward = wp_transform.rotation.get_forward_vector() 37 | np_wp_unit_right = np.array([-wp_unit_forward.y, wp_unit_forward.x], dtype=np.float32) 38 | 39 | lateral_dist = np.abs(np.dot(np_wp_unit_right, np_d_vec)) 40 | lateral_dist = np.clip(lateral_dist, 0, 2) 41 | 42 | # angle_diff 43 | angle_diff = np.deg2rad(np.abs(trans_utils.cast_angle(ev_transform.rotation.yaw - wp_transform.rotation.yaw))) 44 | angle_diff = np.clip(angle_diff, -2, 2) 45 | 46 | # route_locs 47 | location_list = [] 48 | route_length = len(route_plan) 49 | for i in range(self._route_steps): 50 | if i < route_length: 51 | waypoint, road_option = route_plan[i] 52 | else: 53 | waypoint, road_option = route_plan[-1] 54 | 55 | wp_location_world_coord = waypoint.transform.location 56 | wp_location_actor_coord = trans_utils.loc_global_to_ref(wp_location_world_coord, ev_transform) 57 | location_list += [wp_location_actor_coord.x, wp_location_actor_coord.y] 58 | 59 | # dist_remaining_in_km 60 | dist_remaining_in_km = (self._parent_actor.route_length - self._parent_actor.route_completed) / 1000.0 61 | 62 | obs = { 63 | 'lateral_dist': np.array([lateral_dist], dtype=np.float32), 64 | 'angle_diff': np.array([angle_diff], dtype=np.float32), 65 | 'route_locs': np.array(location_list, dtype=np.float32), 66 | 'dist_remaining': np.array([dist_remaining_in_km], dtype=np.float32) 67 | } 68 | return obs 69 | 70 | def clean(self): 71 | self._parent_actor = None 72 | -------------------------------------------------------------------------------- /roach/obs_manager/actor_state/speed.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from gym import spaces 3 | 4 | from carla_gym.core.obs_manager.obs_manager import ObsManagerBase 5 | 6 | 7 | class ObsManager(ObsManagerBase): 8 | """ 9 | in m/s 10 | """ 11 | 12 | def __init__(self, obs_configs): 13 | self._parent_actor = None 14 | super(ObsManager, self).__init__() 15 | 16 | def _define_obs_space(self): 17 | self.obs_space = spaces.Dict({ 18 | 'speed': spaces.Box(low=-10.0, high=30.0, shape=(1,), dtype=np.float32), 19 | 'speed_xy': spaces.Box(low=-10.0, high=30.0, shape=(1,), dtype=np.float32), 20 | 'forward_speed': spaces.Box(low=-10.0, high=30.0, shape=(1,), dtype=np.float32) 21 | }) 22 | 23 | def attach_ego_vehicle(self, parent_actor): 24 | self._parent_actor = parent_actor 25 | 26 | def get_observation(self): 27 | velocity = self._parent_actor.vehicle.get_velocity() 28 | transform = self._parent_actor.vehicle.get_transform() 29 | forward_vec = transform.get_forward_vector() 30 | 31 | np_vel = np.array([velocity.x, velocity.y, velocity.z]) 32 | np_fvec = np.array([forward_vec.x, forward_vec.y, forward_vec.z]) 33 | 34 | speed = np.linalg.norm(np_vel) 35 | speed_xy = np.linalg.norm(np_vel[0:2]) 36 | forward_speed = np.dot(np_vel, np_fvec) 37 | 38 | obs = { 39 | 'speed': np.array([speed], dtype=np.float32), 40 | 'speed_xy': np.array([speed_xy], dtype=np.float32), 41 | 'forward_speed': np.array([forward_speed], dtype=np.float32) 42 | } 43 | return obs 44 | 45 | def clean(self): 46 | self._parent_actor = None 47 | -------------------------------------------------------------------------------- /roach/obs_manager/actor_state/velocity.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from gym import spaces 3 | 4 | from carla_gym.core.obs_manager.obs_manager import ObsManagerBase 5 | import carla_gym.utils.transforms as trans_utils 6 | 7 | 8 | class ObsManager(ObsManagerBase): 9 | 10 | def __init__(self, obs_configs): 11 | super(ObsManager, self).__init__() 12 | 13 | def _define_obs_space(self): 14 | # acc_x, acc_y: m/s2 15 | # vel_x, vel_y: m/s 16 | # vel_angular z: rad/s 17 | self.obs_space = spaces.Dict({ 18 | 'acc_xy': spaces.Box(low=-1e3, high=1e3, shape=(2,), dtype=np.float32), 19 | 'vel_xy': spaces.Box(low=-1e2, high=1e2, shape=(2,), dtype=np.float32), 20 | 'vel_ang_z': spaces.Box(low=-1e3, high=1e3, shape=(1,), dtype=np.float32) 21 | }) 22 | 23 | def attach_ego_vehicle(self, parent_actor): 24 | self._parent_actor = parent_actor 25 | 26 | def get_observation(self): 27 | ev_transform = self._parent_actor.vehicle.get_transform() 28 | acc_w = self._parent_actor.vehicle.get_acceleration() 29 | vel_w = self._parent_actor.vehicle.get_velocity() 30 | ang_w = self._parent_actor.vehicle.get_angular_velocity() 31 | 32 | acc_ev = trans_utils.vec_global_to_ref(acc_w, ev_transform.rotation) 33 | vel_ev = trans_utils.vec_global_to_ref(vel_w, ev_transform.rotation) 34 | 35 | obs = { 36 | 'acc_xy': np.array([acc_ev.x, acc_ev.y], dtype=np.float32), 37 | 'vel_xy': np.array([vel_ev.x, vel_ev.y], dtype=np.float32), 38 | 'vel_ang_z': np.array([ang_w.z], dtype=np.float32) 39 | } 40 | return obs 41 | 42 | def clean(self): 43 | self._parent_actor = None 44 | -------------------------------------------------------------------------------- /roach/obs_manager/birdview/hdmap_generate.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import carla 3 | from gym import spaces 4 | import cv2 as cv 5 | from collections import deque 6 | from pathlib import Path 7 | import h5py 8 | 9 | 10 | COLOR_BLACK = (0, 0, 0) 11 | COLOR_RED = (255, 0, 0) 12 | COLOR_GREEN = (0, 255, 0) 13 | COLOR_BLUE = (0, 0, 255) 14 | COLOR_CYAN = (0, 255, 255) 15 | COLOR_MAGENTA = (255, 0, 255) 16 | COLOR_MAGENTA_2 = (255, 140, 255) 17 | COLOR_YELLOW = (255, 255, 0) 18 | COLOR_YELLOW_2 = (160, 160, 0) 19 | COLOR_WHITE = (255, 255, 255) 20 | COLOR_ALUMINIUM_0 = (238, 238, 236) 21 | COLOR_ALUMINIUM_3 = (136, 138, 133) 22 | COLOR_ALUMINIUM_5 = (46, 52, 54) 23 | 24 | def tint(color, factor): 25 | r, g, b = color 26 | r = int(r + (255-r) * factor) 27 | g = int(g + (255-g) * factor) 28 | b = int(b + (255-b) * factor) 29 | r = min(r, 255) 30 | g = min(g, 255) 31 | b = min(b, 255) 32 | return (r, g, b) 33 | 34 | -------------------------------------------------------------------------------- /roach/obs_manager/birdview/maps/Town01.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/obs_manager/birdview/maps/Town01.h5 -------------------------------------------------------------------------------- /roach/obs_manager/birdview/maps/Town02.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/obs_manager/birdview/maps/Town02.h5 -------------------------------------------------------------------------------- /roach/obs_manager/birdview/maps/Town03.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/obs_manager/birdview/maps/Town03.h5 -------------------------------------------------------------------------------- /roach/obs_manager/birdview/maps/Town04.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/obs_manager/birdview/maps/Town04.h5 -------------------------------------------------------------------------------- /roach/obs_manager/birdview/maps/Town05.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/obs_manager/birdview/maps/Town05.h5 -------------------------------------------------------------------------------- /roach/obs_manager/birdview/maps/Town06.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/obs_manager/birdview/maps/Town06.h5 -------------------------------------------------------------------------------- /roach/obs_manager/birdview/maps/Town07.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/obs_manager/birdview/maps/Town07.h5 -------------------------------------------------------------------------------- /roach/obs_manager/birdview/maps/Town10HD.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/roach/obs_manager/birdview/maps/Town10HD.h5 -------------------------------------------------------------------------------- /roach/rl_birdview_agent.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | from omegaconf import OmegaConf 4 | import copy 5 | 6 | from roach.utils.config_utils import load_entry_point 7 | 8 | 9 | class RlBirdviewAgent(): 10 | def __init__(self, path_to_conf_file='config_agent.yaml', ckpt = None): 11 | 12 | self._render_dict = None 13 | self.supervision_dict = None 14 | self._ckpt = ckpt 15 | self.setup(path_to_conf_file) 16 | 17 | def setup(self, path_to_conf_file): 18 | cfg = OmegaConf.load(path_to_conf_file) 19 | cfg = OmegaConf.to_container(cfg) 20 | 21 | self._obs_configs = cfg['obs_configs'] 22 | self._train_cfg = cfg['training'] 23 | 24 | 25 | self._policy_class = load_entry_point(cfg['policy']['entry_point']) 26 | self._policy_kwargs = cfg['policy']['kwargs'] 27 | if self._ckpt is None: 28 | self._policy = None 29 | else: 30 | self._policy, self._train_cfg['kwargs'] = self._policy_class.load(self._ckpt) 31 | self._policy = self._policy.eval() 32 | 33 | self._wrapper_class = load_entry_point(cfg['env_wrapper']['entry_point']) 34 | self._wrapper_kwargs = cfg['env_wrapper']['kwargs'] 35 | 36 | def run_step(self, input_data, timestamp): 37 | input_data = copy.deepcopy(input_data) 38 | 39 | policy_input = self._wrapper_class.process_obs(input_data, self._wrapper_kwargs['input_states'], train=False) 40 | 41 | actions, values, log_probs, mu, sigma, features = self._policy.forward( 42 | policy_input, deterministic=True, clip_action=True) 43 | control = self._wrapper_class.process_act(actions, self._wrapper_kwargs['acc_as_action'], train=False) 44 | self.supervision_dict = { 45 | 'action': np.array([control.throttle, control.steer, control.brake], dtype=np.float32), 46 | 'value': values[0], 47 | 'action_mu': mu[0], 48 | 'action_sigma': sigma[0], 49 | 'features': features[0], 50 | 'speed': input_data['speed']['forward_speed'] 51 | } 52 | self.supervision_dict = copy.deepcopy(self.supervision_dict) 53 | 54 | return control 55 | 56 | @property 57 | def obs_configs(self): 58 | return self._obs_configs 59 | -------------------------------------------------------------------------------- /scenario_runner/CARLA_VER: -------------------------------------------------------------------------------- 1 | HOST = https://carla-releases.s3.eu-west-3.amazonaws.com/Linux 2 | RELEASE=CARLA_0.9.9 3 | -------------------------------------------------------------------------------- /scenario_runner/Dockerfile: -------------------------------------------------------------------------------- 1 | from ubuntu:18.04 2 | 3 | # Install base libs 4 | run apt-get update && apt-get install --no-install-recommends -y libpng16-16=1.6.34-1ubuntu0.18.04.2 \ 5 | libtiff5=4.0.9-5ubuntu0.3 libjpeg8=8c-2ubuntu8 build-essential=12.4ubuntu1 wget=1.19.4-1ubuntu2.2 git=1:2.17.1-1ubuntu0.7 \ 6 | python3.6=3.6.9-1~18.04ubuntu1 python3.6-dev=3.6.9-1~18.04ubuntu1 python3-pip=9.0.1-2.3~ubuntu1.18.04.1 \ 7 | && rm -rf /var/lib/apt/lists/* 8 | 9 | # Install python requirements 10 | run pip3 install --user setuptools==46.3.0 wheel==0.34.2 && pip3 install py_trees==0.8.3 networkx==2.2 pygame==1.9.6 \ 11 | six==1.14.0 numpy==1.18.4 psutil==5.7.0 shapely==1.7.0 xmlschema==1.1.3 ephem==3.7.6.0 tabulate==0.8.7\ 12 | && mkdir -p /app/scenario_runner 13 | 14 | # Install scenario_runner 15 | copy . /app/scenario_runner 16 | 17 | # setup environment : 18 | # 19 | # CARLA_HOST : uri for carla package without trailing slash. 20 | # For example, "https://carla-releases.s3.eu-west-3.amazonaws.com/Linux". 21 | # If this environment is not passed to docker build, the value 22 | # is taken from CARLA_VER file inside the repository. 23 | # 24 | # CARLA_RELEASE : Name of the package to be used. For example, "CARLA_0.9.9". 25 | # If this environment is not passed to docker build, the value 26 | # is taken from CARLA_VER file inside the repository. 27 | # 28 | # 29 | # It's expected that $(CARLA_HOST)/$(CARLA_RELEASE).tar.gz is a downloadable resource. 30 | # 31 | 32 | env CARLA_HOST "" 33 | env CARLA_RELEASE "" 34 | 35 | # Extract and install python API and resources from CARLA 36 | run export DEFAULT_CARLA_HOST="$(sed -e 's/^\s*HOST\s*=\s*//;t;d' /app/scenario_runner/CARLA_VER)" && \ 37 | echo "$DEFAULT_CARLA_HOST" && \ 38 | export CARLA_HOST="${CARLA_HOST:-$DEFAULT_CARLA_HOST}" && \ 39 | export DEFAULT_CARLA_RELEASE="$(sed -e 's/^\s*RELEASE\s*=\s*//;t;d' /app/scenario_runner/CARLA_VER)" && \ 40 | export CARLA_RELEASE="${CARLA_RELEASE:-$DEFAULT_CARLA_RELEASE}" && \ 41 | echo "$CARLA_HOST/$CARLA_RELEASE.tar.gz" && \ 42 | wget -qO- "$CARLA_HOST/$CARLA_RELEASE.tar.gz" | tar -xzv PythonAPI/carla -C / && \ 43 | mv /PythonAPI/carla /app/ && \ 44 | python3 -m easy_install --no-find-links --no-deps "$(find /app/carla/ -iname '*py3.*.egg' )" 45 | 46 | 47 | # Setup working environment 48 | workdir /app/scenario_runner 49 | env PYTHONPATH "${PYTHONPATH}:/app/carla/agents:/app/carla" 50 | entrypoint ["/bin/sh" ] 51 | 52 | -------------------------------------------------------------------------------- /scenario_runner/Docs/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | education, socio-economic status, nationality, personal appearance, race, 10 | religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at [INSERT EMAIL ADDRESS]. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | -------------------------------------------------------------------------------- /scenario_runner/Docs/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contributing to CARLA 2 | ===================== 3 | 4 | We are more than happy to accept contributions! 5 | 6 | How can I contribute? 7 | 8 | * Reporting bugs 9 | * Feature requests 10 | * Improving documentation 11 | * Code contributions 12 | 13 | Reporting bugs 14 | -------------- 15 | 16 | Use our [issue section][issueslink] on GitHub. Please check before that the 17 | issue is not already reported, and make sure you have read our CARLA 18 | [Documentation][docslink] and [FAQ][faqlink]. 19 | 20 | [issueslink]: https://github.com/carla-simulator/scenario_runner/issues 21 | [docslink]: http://carla.readthedocs.io 22 | [faqlink]: http://carla.readthedocs.io/en/latest/faq/ 23 | 24 | Feature requests 25 | ---------------- 26 | 27 | Please check first the list of [feature requests][frlink]. If it is not there 28 | and you think is a feature that might be interesting for users, please submit 29 | your request as a new issue. 30 | 31 | [frlink]: https://github.com/carla-simulator/scenario_runner/issues?q=is%3Aissue+is%3Aopen+label%3A%22feature+request%22+sort%3Acomments-desc 32 | 33 | Improving documentation 34 | ----------------------- 35 | 36 | If you feel something is missing in the documentation, please don't hesitate to 37 | open an issue to let us know. Even better, if you think you can improve it 38 | yourself, it would be a great contribution to the community! 39 | 40 | We build our documentation with [MkDocs](http://www.mkdocs.org/) based on the 41 | Markdown files inside the "Docs" folder. You can either directly modify them on 42 | GitHub or locally in your machine. 43 | 44 | Once you are done with your changes, please submit a pull-request. 45 | 46 | **TIP:** You can build and serve it locally by running `mkdocs` in the project's 47 | main folder 48 | 49 | $ sudo pip install mkdocs 50 | $ mkdocs serve 51 | 52 | Code contributions 53 | ------------------ 54 | 55 | So you are considering making a code contribution, great! we love to have 56 | contributions from the community. 57 | 58 | Before starting hands-on on coding, please check out our 59 | [issue board][wafflelink] to see if we are already working on that, it would 60 | be a pity putting an effort into something just to discover that someone else 61 | was already working on that. In case of doubt or to discuss how to proceed, 62 | please contact one of us (or send an email to carla.simulator@gmail.com). 63 | 64 | [wafflelink]: https://waffle.io/carla-simulator/scenario_runner 65 | 66 | #### What should I know before I get started? 67 | 68 | Check out the ["CARLA Documentation"][docslink] to get an idea on CARLA. In 69 | addition you may want to check the [Getting started](getting_started.md) document. 70 | 71 | [docslink]: http://carla.readthedocs.io 72 | 73 | #### Coding standard 74 | 75 | Please follow the current [coding standard](coding_standard.md) when submitting 76 | new code. 77 | 78 | #### Pull-requests 79 | 80 | Once you think your contribution is ready to be added to CARLA, please submit a 81 | pull-request. 82 | 83 | Try to be as descriptive as possible when filling the pull-request description. 84 | Adding images and gifs may help people to understand your changes or new 85 | features. 86 | 87 | Please note that there are some checks that the new code is required to pass 88 | before we can do the merge. The checks are automatically run by the continuous 89 | integration system, you will see a green tick mark if all the checks succeeded. 90 | If you see a red mark, please correct your code accordingly. 91 | 92 | ###### Checklist 93 | 94 | - [ ] Your branch is up-to-date with the `master` branch and tested with latest changes 95 | - [ ] Extended the README / documentation, if necessary 96 | - [ ] Code compiles correctly 97 | -------------------------------------------------------------------------------- /scenario_runner/Docs/FAQ.md: -------------------------------------------------------------------------------- 1 | # Frequently Asked Questions 2 | 3 | ## I receive the error "TypeError: 'instancemethod' object has no attribute '__getitem__'" in the agent navigation 4 | 5 | This issue is most likely caused by an outdated version of the Python Networkx package. Please remove the current installation 6 | (e.g. sudo apt-get remove python-networkx) and install it using "pip install --user networkx==2.2". 7 | 8 | ## No scenario visible and I receive the message "No more scenarios .... Exiting" 9 | 10 | In case you receive the following output 11 | ``` 12 | Preparing scenario: FollowLeadingVehicle_1 13 | ScenarioManager: Running scenario FollowVehicle 14 | Resetting ego-vehicle! 15 | Failure! 16 | Resetting ego-vehicle! 17 | ERROR: failed to destroy actor 527 : unable to destroy actor: not found 18 | No more scenarios .... Exiting 19 | ``` 20 | and you see nothing happening, it is most likely due to the fact, that you did not launch a program to control 21 | the ego vehicle. Run for example manual_control.py, and you should now see something happening. 22 | 23 | 24 | ## Scenario Runner exits with error when using --debug commandline parameter 25 | 26 | In case you receive the following output 27 | ``` 28 | UnicodeEncodeError: 'ascii' codec can't encode character '\u2713' in position 58: ordinal not in range(128) 29 | ``` 30 | Please set environment variable 31 | ``` 32 | PYTHONIOENCODING=utf-8 33 | ``` 34 | 35 | -------------------------------------------------------------------------------- /scenario_runner/Docs/coding_standard.md: -------------------------------------------------------------------------------- 1 |

Coding standard

2 | 3 | > _This document is a work in progress and might be incomplete._ 4 | 5 | General 6 | ------- 7 | 8 | * Use spaces, not tabs. 9 | * Avoid adding trailing whitespace as it creates noise in the diffs. 10 | * Comments should not exceed 120 columns, code may exceed this limit a bit in 11 | rare occasions if it results in clearer code. 12 | 13 | Python 14 | ------ 15 | 16 | * All code must be compatible with Python 2.7, 3.5, and 3.6. 17 | * [Pylint][pylintlink] should not give any error or warning (few exceptions 18 | apply with external classes like `numpy`, see our `.pylintrc`). 19 | * Python code follows [PEP8 style guide][pep8link] (use `autopep8` whenever 20 | possible). 21 | 22 | [pylintlink]: https://www.pylint.org/ 23 | [pep8link]: https://www.python.org/dev/peps/pep-0008/ 24 | 25 | C++ 26 | --- 27 | 28 | * Compilation should not give any error or warning 29 | (`clang++ -Wall -Wextra -std=C++14 -Wno-missing-braces`). 30 | * Unreal C++ code (CarlaUE4 and Carla plugin) follow the 31 | [Unreal Engine's Coding Standard][ue4link] with the exception of using 32 | spaces instead of tabs. 33 | * LibCarla uses a variation of [Google's style guide][googlelink]. 34 | 35 | [ue4link]: https://docs.unrealengine.com/latest/INT/Programming/Development/CodingStandard/ 36 | [googlelink]: https://google.github.io/styleguide/cppguide.html 37 | -------------------------------------------------------------------------------- /scenario_runner/Docs/extra.css: -------------------------------------------------------------------------------- 1 | .build-buttons{ 2 | text-align: center; 3 | } 4 | 5 | .build-buttons > p { 6 | display: inline-block; 7 | vertical-align: top; 8 | padding: 5px; 9 | } 10 | 11 | .vector-zero { 12 | text-align: center; 13 | } 14 | 15 | 16 | /************************* DEFAULT TABLES **************************/ 17 | 18 | table.defTable { 19 | border: 1px solid #242424; 20 | background-color: #f3f6f6; 21 | text-align: left; 22 | border-collapse: collapse; 23 | } 24 | 25 | table.defTable thead { 26 | background: #ffffff; 27 | border-bottom: 1px solid #444444; 28 | } 29 | 30 | table.defTable tr:nth-child(even) { 31 | background: #ffffff; 32 | } 33 | 34 | table.defTable thead th { 35 | padding: 7px 13px; 36 | } 37 | 38 | table.defTable tbody td{ 39 | padding: 7px 13px; 40 | } 41 | 42 | /************************* TOWN SLIDER **************************/ 43 | 44 | * {box-sizing:border-box} 45 | 46 | /* Container */ 47 | .townslider-container { 48 | max-width: 1000px; 49 | position: relative; 50 | margin: auto; 51 | } 52 | 53 | /* Hide the images by default */ 54 | .townslide { 55 | display: none; 56 | text-align: center; 57 | 58 | } 59 | 60 | /* Fading animation for slides */ 61 | .fade { 62 | -webkit-animation-name: fade; 63 | -webkit-animation-duration: 1.5s; 64 | animation-name: fade; 65 | animation-duration: 1.5s; 66 | } 67 | 68 | @-webkit-keyframes fade { 69 | from {opacity: .4} 70 | to {opacity: 1} 71 | } 72 | 73 | @keyframes fade { 74 | from {opacity: .4} 75 | to {opacity: 1} 76 | } 77 | 78 | /* "next" and "previous" buttons */ 79 | .prev, .next { 80 | cursor: pointer; 81 | position: absolute; 82 | top: 50%; 83 | width: auto; 84 | margin-top: -22px; 85 | padding: 16px; 86 | color: white; 87 | font-weight: bold; 88 | font-size: 18px; 89 | transition: 0.6s ease; 90 | border-radius: 0 3px 3px 0; 91 | user-select: none; 92 | } 93 | 94 | /* Position the "next" button*/ 95 | .next { 96 | right: 0; 97 | border-radius: 3px 0 0 3px; 98 | } 99 | 100 | /* Black background color to buttons when hovering*/ 101 | .prev:hover, .next:hover { 102 | background-color: rgba(0,0,0,0.8); 103 | } 104 | 105 | /* Caption text for towns */ 106 | .text { 107 | color: #f2f2f2; 108 | font-size: 15px; 109 | padding: 8px 12px; 110 | position: absolute; 111 | bottom: 8px; 112 | width: 100%; 113 | text-align: center; 114 | /*background-color:rgba(0,0,0,0.5);*/ 115 | } 116 | 117 | /* The dot indicators for slides */ 118 | .dot { 119 | cursor: pointer; 120 | height: 15px; 121 | width: 15px; 122 | margin: 0 2px; 123 | background-color: #bbb; 124 | border-radius: 50%; 125 | display: inline-block; 126 | transition: background-color 0.6s ease; 127 | } 128 | 129 | .active, .dot:hover { 130 | background-color: #717171; 131 | } 132 | 133 | -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_catalogs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_catalogs.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_entities_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_entities_1.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_entities_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_entities_2.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_main.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_params.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_params.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_roadnetwork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_roadnetwork.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_storyboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_storyboard.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_storyboard_endconditions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_storyboard_endconditions.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_storyboard_event.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_storyboard_event.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_storyboard_init_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_storyboard_init_1.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_storyboard_init_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_storyboard_init_2.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/OSC_storyboard_story.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/OSC_storyboard_story.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/metrics_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/metrics_example.jpg -------------------------------------------------------------------------------- /scenario_runner/Docs/img/metrics_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/metrics_example.png -------------------------------------------------------------------------------- /scenario_runner/Docs/img/scenario_runner_video.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/Docs/img/scenario_runner_video.png -------------------------------------------------------------------------------- /scenario_runner/Docs/index.md: -------------------------------------------------------------------------------- 1 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 2 | ![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/carla-simulator/scenario_runner.svg) 3 | [![Build Status](https://travis-ci.com/carla-simulator/scenario_runner.svg?branch=master)](https://travis-ci.com/carla/scenario_runner) 4 | 5 | # ScenarioRunner 6 | 7 | ScenarioRunner is a module that allows traffic scenario definition and execution for the [CARLA](http://carla.org/ ) simulator. The scenarios can be defined through a Python interface or using the [OpenSCENARIO](http://www.openscenario.org/) standard. 8 | 9 | ScenarioRunner can also be used to prepare AD agents for their evaluation, by easily creating complex traffic scenarios and routes for the agents to navigate through. These results can be validated and shared in the [CARLA Leaderboard](https://leaderboard.carla.org/), an open platform for the community to fairly compare their progress, evaluating agents in realistic traffic situatoins. 10 | 11 | 12 | The CARLA forum has a specific section regarding ScenarioRunner, for users to post any doubts or suggestions that may arise during the reading of this documentation. 13 | 17 | 18 | --- 19 | 20 | ## Quick start 21 |

22 | [__Get ScenarioRunner__](getting_scenariorunner.md) 23 | — Tutorial on how to download and launch ScenarioRunner. 24 | [__First steps__](getting_started.md) 25 | — Brief tutorials on how to run different types of scenarios. 26 | [__Create a new scenario__](creating_new_scenario.md) 27 | — Tutorial on how to create a new scenario using ScenarioRunner. 28 | [__Metrics module__](metrics_module.md) 29 | — Explanation of the metrics module. 30 | [__F.A.Q.__](FAQ.md) 31 | — Some of the most frequent installation issues. 32 | [__Release notes__](CHANGELOG.md) 33 | — Features, fixes and other changes listed per release. 34 |

35 | 36 | ## References 37 |

38 | [__List of scenarios__](list_of_scenarios.md) 39 | — Example scenarios available in ScenarioRunner. 40 | [__OpenScenario support__](getting_started.md) 41 | — Support status of OpenSCENARIO features. 42 |

43 | 44 | ## Contributing 45 |

46 | [__Code of conduct__](CODE_OF_CONDUCT.md) 47 | — Standard rights and duties for contributors. 48 | [__Coding standard__](coding_standard.md) 49 | — Guidelines to write proper code. 50 | [__Contribution guidelines__](CONTRIBUTING.md) 51 | — The different ways to contribute to ScenarioRunner. 52 |

53 | 54 | -------------------------------------------------------------------------------- /scenario_runner/Docs/requirements.txt: -------------------------------------------------------------------------------- 1 | mkdocs >= 1.0 2 | markdown-include 3 | mkdocs-redirects 4 | -------------------------------------------------------------------------------- /scenario_runner/Docs/ros_agent.md: -------------------------------------------------------------------------------- 1 | # ROS-based Challenge Agent 2 | 3 | Interfacing CARLA from ROS is normally done via [CARLA ROS Bridge](https://github.com/carla-simulator/ros-bridge). 4 | In Challenge Mode this bridging functionality is provided by a RosAgent. It uses the same topics and message-types for the sensors but does not publish tf-transformations. 5 | 6 | # Requirements 7 | 8 | * `roscore` is expected to be running in the docker container. Please adapt your entrypoint. 9 | 10 | ## Setup 11 | 12 | To enable your stack within challenge mode, the following steps need to be taken: 13 | 14 | 1. Define Sensor Setup 15 | 2. Define Startup 16 | 17 | ### Define Sensor Setup 18 | 19 | Derive from RosAgent and implement the sensors() method. 20 | 21 | from srunner.autoagents.ros_agent import RosAgent 22 | 23 | class MyRosAgent(RosAgent): 24 | 25 | def sensors(self): 26 | return [ ] 27 | 28 | As an example for the sensor definition, see [HumanAgent.py](../srunner/autoagents/human_agent.py). 29 | 30 | 31 | ### Define Startup 32 | 33 | The startup of the stack is done within the shell script `$TEAM_CODE_ROOT/start.sh`. 34 | Therefore the environment variable `TEAM_CODE_ROOT` must be set. 35 | 36 | RosAgent takes care of executing and monitoring. The script shall remain running as long as the stack is active. 37 | 38 | Example for start.sh 39 | 40 | #!/bin/bash -e 41 | roslaunch $TEAM_CODE_ROOT/challenge.launch 42 | 43 | 44 | ## Testing 45 | 46 | In general, the challenge execution is headless. For diagnosis you're still able to use ros-tools like rviz or rqt. Additionally you 47 | can use [carla_manual_control](https://github.com/carla-simulator/ros-bridge/tree/master/carla_manual_control) from the carla_ros_bridge for visualization (and also controlling the vehicle). 48 | 49 | -------------------------------------------------------------------------------- /scenario_runner/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 CARLA 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /scenario_runner/mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: CARLA ScenarioRunner 2 | repo_url: https://github.com/carla-simulator/scenario_runner 3 | docs_dir: Docs 4 | edit_uri: 'edit/master/Docs/' 5 | theme: readthedocs 6 | extra_css: [extra.css] 7 | 8 | nav: 9 | - Home: index.md 10 | - Quick start: 11 | - Get ScenarioRunner: getting_scenariorunner.md 12 | - First steps: getting_started.md 13 | - Create a new scenario: creating_new_scenario.md 14 | - Metrics module: metrics_module.md 15 | - FAQ: FAQ.md 16 | - Release Notes: CHANGELOG.md 17 | - References: 18 | - List of scenarios: list_of_scenarios.md 19 | - OpenSCENARIO support: openscenario_support.md 20 | - Contributing: 21 | - Code of conduct: CODE_OF_CONDUCT.md 22 | - Coding standard: coding_standard.md 23 | - Contribution guidelines: CONTRIBUTING.md 24 | 25 | markdown_extensions: 26 | - admonition 27 | - markdown_include.include: 28 | base_path: '.' 29 | 30 | -------------------------------------------------------------------------------- /scenario_runner/requirements.txt: -------------------------------------------------------------------------------- 1 | py-trees==0.8.3 2 | networkx==2.2 3 | Shapely==1.6.4.post2 4 | psutil 5 | xmlschema==1.0.18 6 | carla 7 | ephem 8 | tabulate 9 | opencv-python==4.2.0.32 10 | numpy 11 | matplotlib 12 | six 13 | -------------------------------------------------------------------------------- /scenario_runner/srunner/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/srunner/__init__.py -------------------------------------------------------------------------------- /scenario_runner/srunner/autoagents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/srunner/autoagents/__init__.py -------------------------------------------------------------------------------- /scenario_runner/srunner/autoagents/dummy_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ 7 | This module provides a dummy agent to control the ego vehicle 8 | """ 9 | 10 | from __future__ import print_function 11 | 12 | import carla 13 | 14 | from srunner.autoagents.autonomous_agent import AutonomousAgent 15 | 16 | 17 | class DummyAgent(AutonomousAgent): 18 | 19 | """ 20 | Dummy autonomous agent to control the ego vehicle 21 | """ 22 | 23 | def setup(self, path_to_conf_file): 24 | """ 25 | Setup the agent parameters 26 | """ 27 | 28 | def sensors(self): 29 | """ 30 | Define the sensor suite required by the agent 31 | 32 | :return: a list containing the required sensors in the following format: 33 | 34 | [ 35 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 36 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, 37 | 38 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 39 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, 40 | 41 | {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, 42 | 'id': 'LIDAR'} 43 | 44 | 45 | """ 46 | sensors = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 47 | 'width': 800, 'height': 600, 'fov': 100, 'id': 'Center'}, 48 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 49 | 'yaw': -45.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'Left'}, 50 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 45.0, 51 | 'width': 800, 'height': 600, 'fov': 100, 'id': 'Right'}, 52 | {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 53 | 'yaw': -45.0, 'id': 'LIDAR'}, 54 | {'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'}, 55 | {'type': 'sensor.can_bus', 'reading_frequency': 25, 'id': 'can_bus'}, 56 | {'type': 'sensor.hd_map', 'reading_frequency': 1, 'id': 'hdmap'}, 57 | ] 58 | 59 | return sensors 60 | 61 | def run_step(self, input_data, timestamp): 62 | """ 63 | Execute one step of navigation. 64 | """ 65 | print("=====================>") 66 | for key, val in input_data.items(): 67 | if hasattr(val[1], 'shape'): 68 | shape = val[1].shape 69 | print("[{} -- {:06d}] with shape {}".format(key, val[0], shape)) 70 | else: 71 | print("[{} -- {:06d}] ".format(key, val[0])) 72 | print("<=====================") 73 | 74 | # DO SOMETHING SMART 75 | 76 | # RETURN CONTROL 77 | control = carla.VehicleControl() 78 | control.steer = 0.0 79 | control.throttle = 0.0 80 | control.brake = 0.0 81 | control.hand_brake = False 82 | 83 | return control 84 | -------------------------------------------------------------------------------- /scenario_runner/srunner/autoagents/npc_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ 7 | This module provides an NPC agent to control the ego vehicle 8 | """ 9 | 10 | from __future__ import print_function 11 | 12 | import carla 13 | from agents.navigation.basic_agent import BasicAgent 14 | 15 | from srunner.autoagents.autonomous_agent import AutonomousAgent 16 | from srunner.scenariomanager.carla_data_provider import CarlaDataProvider 17 | 18 | 19 | class NpcAgent(AutonomousAgent): 20 | 21 | """ 22 | NPC autonomous agent to control the ego vehicle 23 | """ 24 | 25 | _agent = None 26 | _route_assigned = False 27 | 28 | def setup(self, path_to_conf_file): 29 | """ 30 | Setup the agent parameters 31 | """ 32 | 33 | self._route_assigned = False 34 | self._agent = None 35 | 36 | def sensors(self): 37 | """ 38 | Define the sensor suite required by the agent 39 | 40 | :return: a list containing the required sensors in the following format: 41 | 42 | [ 43 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 44 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, 45 | 46 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 47 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, 48 | 49 | {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, 50 | 'id': 'LIDAR'} 51 | 52 | 53 | """ 54 | 55 | sensors = [ 56 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 57 | 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, 58 | ] 59 | 60 | return sensors 61 | 62 | def run_step(self, input_data, timestamp): 63 | """ 64 | Execute one step of navigation. 65 | """ 66 | control = carla.VehicleControl() 67 | control.steer = 0.0 68 | control.throttle = 0.0 69 | control.brake = 0.0 70 | control.hand_brake = False 71 | 72 | if not self._agent: 73 | hero_actor = None 74 | for actor in CarlaDataProvider.get_world().get_actors(): 75 | if 'role_name' in actor.attributes and actor.attributes['role_name'] == 'hero': 76 | hero_actor = actor 77 | break 78 | if hero_actor: 79 | self._agent = BasicAgent(hero_actor) 80 | 81 | return control 82 | 83 | if not self._route_assigned: 84 | if self._global_plan: 85 | plan = [] 86 | 87 | for transform, road_option in self._global_plan_world_coord: 88 | wp = CarlaDataProvider.get_map().get_waypoint(transform.location) 89 | plan.append((wp, road_option)) 90 | 91 | self._agent._local_planner.set_global_plan(plan) # pylint: disable=protected-access 92 | self._route_assigned = True 93 | 94 | else: 95 | control = self._agent.run_step() 96 | 97 | return control 98 | -------------------------------------------------------------------------------- /scenario_runner/srunner/data/no_scenarios.json: -------------------------------------------------------------------------------- 1 | { 2 | "available_scenarios": [ 3 | { 4 | } 5 | ] 6 | } -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/ChangeLane.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/ControlLoss.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/CutIn.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/FreeRide.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/LeadingVehicle.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/NoSignalJunction.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/ObjectCrossing.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/OppositeDirection.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/RunningRedLight.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/SignalizedJunctionLeftTurn.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/SignalizedJunctionRightTurn.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/VehicleTurning.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/catalogs/ControllerCatalog.xosc: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/catalogs/EnvironmentCatalog.xosc: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/catalogs/ManeuverCatalog.xosc: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/catalogs/MiscObjectCatalog.xosc: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/catalogs/PedestrianCatalog.xosc: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 |
8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /scenario_runner/srunner/examples/catalogs/VehicleCatalog.xosc: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /scenario_runner/srunner/metrics/data/DistanceBetweenVehicles_criteria.json: -------------------------------------------------------------------------------- 1 | { 2 | "CollisionTest": { 3 | "children": [], 4 | "feedback_message": "", 5 | "blackbox_level": 4, 6 | "_terminate_on_failure": false, 7 | "test_status": "SUCCESS", 8 | "expected_value_success": 0, 9 | "expected_value_acceptable": null, 10 | "actual_value": 0, 11 | "optional": false, 12 | "list_traffic_events": [], 13 | "_collision_sensor": null, 14 | "other_actor": null, 15 | "other_actor_type": null, 16 | "registered_collisions": [], 17 | "last_id": null, 18 | "collision_time": null, 19 | "terminate_on_failure": false 20 | } 21 | } -------------------------------------------------------------------------------- /scenario_runner/srunner/metrics/data/DistanceToLaneCenter_criteria.json: -------------------------------------------------------------------------------- 1 | { 2 | "CollisionTest": { 3 | "children": [], 4 | "feedback_message": "", 5 | "blackbox_level": 4, 6 | "_terminate_on_failure": false, 7 | "test_status": "SUCCESS", 8 | "expected_value_success": 0, 9 | "expected_value_acceptable": null, 10 | "actual_value": 0, 11 | "optional": false, 12 | "list_traffic_events": [], 13 | "_collision_sensor": null, 14 | "other_actor": null, 15 | "other_actor_type": null, 16 | "registered_collisions": [], 17 | "last_id": null, 18 | "collision_time": null, 19 | "terminate_on_failure": false 20 | } 21 | } -------------------------------------------------------------------------------- /scenario_runner/srunner/metrics/examples/basic_metric.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | This module provide BasicMetric, the basic class of all the metrics. 11 | """ 12 | 13 | class BasicMetric(object): 14 | """ 15 | Base class of all the metrics. 16 | """ 17 | 18 | def __init__(self, town_map, log, criteria=None): 19 | """ 20 | Initialization of the metric class. This calls the metrics log and creates the metrics 21 | 22 | Args: 23 | town_map (carla.Map): Map of the simulation. Used to access the Waypoint API. 24 | log (srunner.metrics.tools.Metricslog): instance of a class used to access the recorder information 25 | criteria (dict): list of dictionaries with all the criteria information 26 | """ 27 | 28 | # Create the metrics of the simulation. This part is left to the user 29 | self._create_metric(town_map, log, criteria) 30 | 31 | def _create_metric(self, town_map, log, criteria): 32 | """ 33 | Pure virtual function to setup the metrics by the user. 34 | 35 | Args: 36 | town_map (carla.Map): Map of the simulation. Used to access the Waypoint API. 37 | log (srunner.metrics.tools.Metricslog): instance of a class used to access the recorder information 38 | criteria (dict): dictionaries with all the criteria information 39 | """ 40 | raise NotImplementedError( 41 | "This function should be re-implemented by all metrics" 42 | "If this error becomes visible the class hierarchy is somehow broken") 43 | -------------------------------------------------------------------------------- /scenario_runner/srunner/metrics/examples/criteria_filter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | This metric filters the useful information of the criteria (sucess / fail ...), 11 | and dump it into a json file 12 | 13 | It is meant to serve as an example of how to use the criteria 14 | """ 15 | 16 | import json 17 | 18 | from srunner.metrics.examples.basic_metric import BasicMetric 19 | 20 | 21 | class CriteriaFilter(BasicMetric): 22 | """ 23 | Metric class CriteriaFilter 24 | """ 25 | 26 | def _create_metric(self, town_map, log, criteria): 27 | """ 28 | Implementation of the metric. This is an example to show how to use the criteria 29 | """ 30 | 31 | ### Parse the criteria information, filtering only the useful information, and dump it into a json ### 32 | 33 | results = {} 34 | for criterion_name in criteria: 35 | criterion = criteria[criterion_name] 36 | results.update({criterion_name: 37 | { 38 | "test_status": criterion["test_status"], 39 | "actual_value": criterion["actual_value"], 40 | "success_value": criterion["expected_value_success"] 41 | } 42 | } 43 | ) 44 | 45 | with open('srunner/metrics/data/CriteriaFilter_results.json', 'w') as fw: 46 | json.dump(results, fw, sort_keys=False, indent=4) 47 | -------------------------------------------------------------------------------- /scenario_runner/srunner/metrics/examples/distance_between_vehicles.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | This metric calculates the distance between the ego vehicle and 11 | another actor, dumping it to a json file. 12 | 13 | It is meant to serve as an example of how to use the information from 14 | the recorder 15 | """ 16 | 17 | import math 18 | import matplotlib.pyplot as plt 19 | 20 | from srunner.metrics.examples.basic_metric import BasicMetric 21 | 22 | 23 | class DistanceBetweenVehicles(BasicMetric): 24 | """ 25 | Metric class DistanceBetweenVehicles 26 | """ 27 | 28 | def _create_metric(self, town_map, log, criteria): 29 | """ 30 | Implementation of the metric. This is an example to show how to use the recorder, 31 | accessed via the log. 32 | """ 33 | 34 | # Get the ID of the two vehicles 35 | ego_id = log.get_ego_vehicle_id() 36 | adv_id = log.get_actor_ids_with_role_name("scenario")[0] # Could have also used its type_id 37 | 38 | dist_list = [] 39 | frames_list = [] 40 | 41 | # Get the frames both actors were alive 42 | start_ego, end_ego = log.get_actor_alive_frames(ego_id) 43 | start_adv, end_adv = log.get_actor_alive_frames(adv_id) 44 | start = max(start_ego, start_adv) 45 | end = min(end_ego, end_adv) 46 | 47 | # Get the distance between the two 48 | for i in range(start, end): 49 | 50 | # Get the transforms 51 | ego_location = log.get_actor_transform(ego_id, i).location 52 | adv_location = log.get_actor_transform(adv_id, i).location 53 | 54 | # Filter some points for a better graph 55 | if adv_location.z < -10: 56 | continue 57 | 58 | dist_v = ego_location - adv_location 59 | dist = math.sqrt(dist_v.x * dist_v.x + dist_v.y * dist_v.y + dist_v.z * dist_v.z) 60 | 61 | dist_list.append(dist) 62 | frames_list.append(i) 63 | 64 | # Use matplotlib to show the results 65 | plt.plot(frames_list, dist_list) 66 | plt.ylabel('Distance [m]') 67 | plt.xlabel('Frame number') 68 | plt.title('Distance between the ego vehicle and the adversary over time') 69 | plt.show() 70 | -------------------------------------------------------------------------------- /scenario_runner/srunner/metrics/examples/distance_to_lane_center.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | This metric calculates the distance between the ego vehicle and 11 | the center of the lane, dumping it to a json file. 12 | 13 | It is meant to serve as an example of how to use the map API 14 | """ 15 | 16 | import math 17 | import json 18 | 19 | from srunner.metrics.examples.basic_metric import BasicMetric 20 | 21 | 22 | class DistanceToLaneCenter(BasicMetric): 23 | """ 24 | Metric class DistanceToLaneCenter 25 | """ 26 | 27 | def _create_metric(self, town_map, log, criteria): 28 | """ 29 | Implementation of the metric. 30 | """ 31 | 32 | # Get ego vehicle id 33 | ego_id = log.get_ego_vehicle_id() 34 | 35 | dist_list = [] 36 | frames_list = [] 37 | 38 | # Get the frames the ego actor was alive and its transforms 39 | start, end = log.get_actor_alive_frames(ego_id) 40 | 41 | # Get the projected distance vector to the center of the lane 42 | for i in range(start, end + 1): 43 | 44 | ego_location = log.get_actor_transform(ego_id, i).location 45 | ego_waypoint = town_map.get_waypoint(ego_location) 46 | 47 | # Get the distance vector and project it 48 | a = ego_location - ego_waypoint.transform.location # Ego to waypoint vector 49 | b = ego_waypoint.transform.get_right_vector() # Waypoint perpendicular vector 50 | b_norm = math.sqrt(b.x * b.x + b.y * b.y + b.z * b.z) 51 | 52 | ab_dot = a.x * b.x + a.y * b.y + a.z * b.z 53 | dist_v = ab_dot/(b_norm*b_norm)*b 54 | dist = math.sqrt(dist_v.x * dist_v.x + dist_v.y * dist_v.y + dist_v.z * dist_v.z) 55 | 56 | # Get the sign of the distance (left side is positive) 57 | c = ego_waypoint.transform.get_forward_vector() # Waypoint forward vector 58 | ac_cross = c.x * a.y - c.y * a.x 59 | if ac_cross < 0: 60 | dist *= -1 61 | 62 | dist_list.append(dist) 63 | frames_list.append(i) 64 | 65 | # Save the results to a file 66 | results = {'frames': frames_list, 'distance': dist_list} 67 | with open('srunner/metrics/data/DistanceToLaneCenter_results.json', 'w') as fw: 68 | json.dump(results, fw, sort_keys=False, indent=4) 69 | -------------------------------------------------------------------------------- /scenario_runner/srunner/openscenario/0.9.x/OpenSCENARIO_Catalog.xsd: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | XML Schema Definition for OpenSCENARIO Catalog XML files - Version Draft 0.9.1, (c)2017 by VIRES Simulationstechnologie GmbH, Germany 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenarioconfigs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/srunner/scenarioconfigs/__init__.py -------------------------------------------------------------------------------- /scenario_runner/srunner/scenarioconfigs/route_scenario_configuration.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Intel Corporation 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | This module provides the key configuration parameters for a route-based scenario 10 | """ 11 | 12 | import carla 13 | from agents.navigation.local_planner import RoadOption 14 | 15 | from srunner.scenarioconfigs.scenario_configuration import ScenarioConfiguration 16 | 17 | 18 | class RouteConfiguration(object): 19 | 20 | """ 21 | This class provides the basic configuration for a route 22 | """ 23 | 24 | def __init__(self, route=None): 25 | self.data = route 26 | self.folder_name = None 27 | 28 | def parse_xml(self, node): 29 | """ 30 | Parse route config XML 31 | """ 32 | self.data = [] 33 | 34 | for waypoint in node.iter("waypoint"): 35 | x = float(waypoint.attrib.get('x', 0)) 36 | y = float(waypoint.attrib.get('y', 0)) 37 | z = float(waypoint.attrib.get('z', 0)) 38 | c = waypoint.attrib.get('connection', '') 39 | connection = RoadOption[c.split('.')[1]] 40 | 41 | self.data.append((carla.Location(x, y, z), connection)) 42 | 43 | 44 | class RouteScenarioConfiguration(ScenarioConfiguration): 45 | 46 | """ 47 | Basic configuration of a RouteScenario 48 | """ 49 | folder_name = None 50 | trajectory = None 51 | scenario_file = None 52 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenarioconfigs/scenario_configuration.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Intel Corporation 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | This module provides the key configuration parameters for an XML-based scenario 10 | """ 11 | 12 | import carla 13 | 14 | 15 | class ActorConfigurationData(object): 16 | 17 | """ 18 | This is a configuration base class to hold model and transform attributes 19 | """ 20 | 21 | def __init__(self, model, transform, rolename='other', speed=0, autopilot=False, 22 | random=False, color=None, category="car", args=None): 23 | self.model = model 24 | self.rolename = rolename 25 | self.transform = transform 26 | self.speed = speed 27 | self.autopilot = autopilot 28 | self.random_location = random 29 | self.color = color 30 | self.category = category 31 | self.args = args 32 | 33 | @staticmethod 34 | def parse_from_node(node, rolename): 35 | """ 36 | static method to initialize an ActorConfigurationData from a given ET tree 37 | """ 38 | 39 | model = node.attrib.get('model', 'vehicle.*') 40 | 41 | pos_x = float(node.attrib.get('x', 0)) 42 | pos_y = float(node.attrib.get('y', 0)) 43 | pos_z = float(node.attrib.get('z', 0)) 44 | yaw = float(node.attrib.get('yaw', 0)) 45 | 46 | transform = carla.Transform(carla.Location(x=pos_x, y=pos_y, z=pos_z), carla.Rotation(yaw=yaw)) 47 | 48 | rolename = node.attrib.get('rolename', rolename) 49 | 50 | speed = node.attrib.get('speed', 0) 51 | 52 | autopilot = False 53 | if 'autopilot' in node.keys(): 54 | autopilot = True 55 | 56 | random_location = False 57 | if 'random_location' in node.keys(): 58 | random_location = True 59 | 60 | color = node.attrib.get('color', None) 61 | 62 | return ActorConfigurationData(model, transform, rolename, speed, autopilot, random_location, color) 63 | 64 | 65 | class ScenarioConfiguration(object): 66 | 67 | """ 68 | This class provides a basic scenario configuration incl.: 69 | - configurations for all actors 70 | - town, where the scenario should be executed 71 | - name of the scenario (e.g. ControlLoss_1) 72 | - type is the class of scenario (e.g. ControlLoss) 73 | """ 74 | 75 | trigger_points = [] 76 | ego_vehicles = [] 77 | other_actors = [] 78 | town = None 79 | name = None 80 | type = None 81 | route = None 82 | agent = None 83 | weather = carla.WeatherParameters() 84 | friction = None 85 | subtype = None 86 | route_var_name = None 87 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/srunner/scenariomanager/__init__.py -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/actorcontrols/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/srunner/scenariomanager/actorcontrols/__init__.py -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/actorcontrols/basic_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Intel Corporation 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | This module provides the base class for user-defined actor 10 | controllers. All user-defined controls must be derived from 11 | this class. 12 | 13 | A user must not modify the module. 14 | """ 15 | 16 | 17 | class BasicControl(object): 18 | 19 | """ 20 | This class is the base class for user-defined actor controllers 21 | All user-defined agents must be derived from this class. 22 | 23 | Args: 24 | actor (carla.Actor): Actor that should be controlled by the controller. 25 | 26 | Attributes: 27 | _actor (carla.Actor): Controlled actor. 28 | Defaults to None. 29 | _target_speed (float): Logitudinal target speed of the controller. 30 | Defaults to 0. 31 | _init_speed (float): Initial longitudinal speed of the controller. 32 | Defaults to 0. 33 | _waypoints (list of carla.Transform): List of target waypoints the actor 34 | should travel along. A waypoint here is of type carla.Transform! 35 | Defaults to []. 36 | _waypoints_updated (boolean): 37 | Defaults to False. 38 | _reached_goal (boolean): 39 | Defaults to False. 40 | """ 41 | 42 | _actor = None 43 | _waypoints = [] 44 | _waypoints_updated = False 45 | _target_speed = 0 46 | _reached_goal = False 47 | _init_speed = False 48 | 49 | def __init__(self, actor): 50 | """ 51 | Initialize the actor 52 | """ 53 | self._actor = actor 54 | 55 | def update_target_speed(self, speed): 56 | """ 57 | Update the actor's target speed and set _init_speed to False. 58 | 59 | Args: 60 | speed (float): New target speed [m/s]. 61 | """ 62 | self._target_speed = speed 63 | self._init_speed = False 64 | 65 | def update_waypoints(self, waypoints, start_time=None): 66 | """ 67 | Update the actor's waypoints 68 | 69 | Args: 70 | waypoints (List of carla.Transform): List of new waypoints. 71 | """ 72 | self._waypoints = waypoints 73 | self._waypoints_updated = True 74 | 75 | def set_init_speed(self): 76 | """ 77 | Set _init_speed to True 78 | """ 79 | self._init_speed = True 80 | 81 | def check_reached_waypoint_goal(self): 82 | """ 83 | Check if the actor reached the end of the waypoint list 84 | 85 | returns: 86 | True if the end was reached, False otherwise. 87 | """ 88 | return self._reached_goal 89 | 90 | def reset(self): 91 | """ 92 | Pure virtual function to reset the controller. This should be implemented 93 | in the user-defined agent implementation. 94 | """ 95 | raise NotImplementedError( 96 | "This function must be re-implemented by the user-defined actor control." 97 | "If this error becomes visible the class hierarchy is somehow broken") 98 | 99 | def run_step(self): 100 | """ 101 | Pure virtual function to run one step of the controllers's control loop. 102 | This should be implemented in the user-defined agent implementation. 103 | """ 104 | raise NotImplementedError( 105 | "This function must be re-implemented by the user-defined actor control." 106 | "If this error becomes visible the class hierarchy is somehow broken") 107 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/actorcontrols/external_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Intel Corporation 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | This module provides an example controller for actors, that use an external 10 | software for longitudinal and lateral control command calculation. 11 | Examples for external controls are: Autoware, CARLA manual_control, etc. 12 | 13 | This module is not intended for modification. 14 | """ 15 | 16 | from srunner.scenariomanager.actorcontrols.basic_control import BasicControl 17 | 18 | 19 | class ExternalControl(BasicControl): 20 | 21 | """ 22 | Actor control class for actors, with externally implemented longitudinal and 23 | lateral controlers (e.g. Autoware). 24 | 25 | Args: 26 | actor (carla.Actor): Actor that should be controlled by the agent. 27 | """ 28 | 29 | def __init__(self, actor, args=None): 30 | super(ExternalControl, self).__init__(actor) 31 | 32 | def reset(self): 33 | """ 34 | Reset the controller 35 | """ 36 | if self._actor and self._actor.is_alive: 37 | self._actor = None 38 | 39 | def run_step(self): 40 | """ 41 | The control loop and setting the actor controls is implemented externally. 42 | """ 43 | pass 44 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/actorcontrols/pedestrian_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Intel Corporation 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | This module provides an example control for pedestrians 10 | """ 11 | 12 | import math 13 | 14 | import carla 15 | 16 | from srunner.scenariomanager.actorcontrols.basic_control import BasicControl 17 | 18 | 19 | class PedestrianControl(BasicControl): 20 | 21 | """ 22 | Controller class for pedestrians derived from BasicControl. 23 | 24 | Args: 25 | actor (carla.Actor): Pedestrian actor that should be controlled. 26 | """ 27 | 28 | def __init__(self, actor, args=None): 29 | if not isinstance(actor, carla.Walker): 30 | raise RuntimeError("PedestrianControl: The to be controlled actor is not a pedestrian") 31 | 32 | super(PedestrianControl, self).__init__(actor) 33 | 34 | def reset(self): 35 | """ 36 | Reset the controller 37 | """ 38 | if self._actor and self._actor.is_alive: 39 | self._actor = None 40 | 41 | def run_step(self): 42 | """ 43 | Execute on tick of the controller's control loop 44 | 45 | If _waypoints are provided, the pedestrian moves towards the next waypoint 46 | with the given _target_speed, until reaching the final waypoint. Upon reaching 47 | the final waypoint, _reached_goal is set to True. 48 | 49 | If _waypoints is empty, the pedestrians moves in its current direction with 50 | the given _target_speed. 51 | """ 52 | if not self._actor or not self._actor.is_alive: 53 | return 54 | 55 | control = self._actor.get_control() 56 | control.speed = self._target_speed 57 | 58 | if self._waypoints: 59 | self._reached_goal = False 60 | location = self._waypoints[0].location 61 | direction = location - self._actor.get_location() 62 | direction_norm = math.sqrt(direction.x**2 + direction.y**2) 63 | control.direction = direction / direction_norm 64 | self._actor.apply_control(control) 65 | if direction_norm < 1.0: 66 | self._waypoints = self._waypoints[1:] 67 | if not self._waypoints: 68 | self._reached_goal = True 69 | else: 70 | control.direction = self._actor.get_transform().rotation.get_forward_vector() 71 | self._actor.apply_control(control) 72 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/actorcontrols/vehicle_longitudinal_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Intel Corporation 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | This module provides an example longitudinal control for vehicles 10 | """ 11 | 12 | import math 13 | 14 | import carla 15 | 16 | from srunner.scenariomanager.actorcontrols.basic_control import BasicControl 17 | 18 | 19 | class VehicleLongitudinalControl(BasicControl): 20 | 21 | """ 22 | Controller class for vehicles derived from BasicControl. 23 | 24 | The controller only controls the throttle of a vehicle, but not the steering. 25 | 26 | Args: 27 | actor (carla.Actor): Vehicle actor that should be controlled. 28 | """ 29 | 30 | def __init__(self, actor, args=None): 31 | super(VehicleLongitudinalControl, self).__init__(actor) 32 | 33 | def reset(self): 34 | """ 35 | Reset the controller 36 | """ 37 | if self._actor and self._actor.is_alive: 38 | self._actor = None 39 | 40 | def run_step(self): 41 | """ 42 | Execute on tick of the controller's control loop 43 | 44 | The control loop is very simplistic: 45 | If the actor speed is below the _target_speed, set throttle to 1.0, 46 | otherwise, set throttle to 0.0 47 | Note, that this is a longitudinal controller only. 48 | 49 | If _init_speed is True, the control command is post-processed to ensure that 50 | the initial actor velocity is maintained independent of physics. 51 | """ 52 | 53 | control = self._actor.get_control() 54 | 55 | velocity = self._actor.get_velocity() 56 | current_speed = math.sqrt(velocity.x**2 + velocity.y**2) 57 | if current_speed < self._target_speed: 58 | control.throttle = 1.0 59 | else: 60 | control.throttle = 0.0 61 | 62 | self._actor.apply_control(control) 63 | 64 | if self._init_speed: 65 | if abs(self._target_speed - current_speed) > 3: 66 | yaw = self._actor.get_transform().rotation.yaw * (math.pi / 180) 67 | vx = math.cos(yaw) * self._target_speed 68 | vy = math.sin(yaw) * self._target_speed 69 | self._actor.set_target_velocity(carla.Vector3D(vx, vy, 0)) 70 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/scenarioatomics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/srunner/scenariomanager/scenarioatomics/__init__.py -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/traffic_events.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ 7 | Collection of TrafficEvents 8 | """ 9 | 10 | from enum import Enum 11 | 12 | 13 | class TrafficEventType(Enum): 14 | 15 | """ 16 | This enum represents different traffic events that occur during driving. 17 | """ 18 | 19 | NORMAL_DRIVING = 0 20 | COLLISION_STATIC = 1 21 | COLLISION_VEHICLE = 2 22 | COLLISION_PEDESTRIAN = 3 23 | ROUTE_DEVIATION = 4 24 | ROUTE_COMPLETION = 5 25 | ROUTE_COMPLETED = 6 26 | TRAFFIC_LIGHT_INFRACTION = 7 27 | WRONG_WAY_INFRACTION = 8 28 | ON_SIDEWALK_INFRACTION = 9 29 | STOP_INFRACTION = 10 30 | OUTSIDE_LANE_INFRACTION = 11 31 | OUTSIDE_ROUTE_LANES_INFRACTION = 12 32 | VEHICLE_BLOCKED = 13 33 | 34 | 35 | class TrafficEvent(object): 36 | 37 | """ 38 | TrafficEvent definition 39 | """ 40 | 41 | def __init__(self, event_type, message=None, dictionary=None): 42 | """ 43 | Initialize object 44 | 45 | :param event_type: TrafficEventType defining the type of traffic event 46 | :param message: optional message to inform users of the event 47 | :param dictionary: optional dictionary with arbitrary keys and values 48 | """ 49 | self._type = event_type 50 | self._message = message 51 | self._dict = dictionary 52 | 53 | def get_type(self): 54 | """ 55 | @return type 56 | """ 57 | return self._type 58 | 59 | def get_message(self): 60 | """ 61 | @return message 62 | """ 63 | if self._message: 64 | return self._message 65 | 66 | return "" 67 | 68 | def set_message(self, message): 69 | """ 70 | Set message 71 | """ 72 | self._message = message 73 | 74 | def get_dict(self): 75 | """ 76 | @return dictionary 77 | """ 78 | return self._dict 79 | 80 | def set_dict(self, dictionary): 81 | """ 82 | Set dictionary 83 | """ 84 | self._dict = dictionary 85 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenariomanager/watchdog.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Intel Corporation 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | This module provides a simple watchdog timer to detect timeouts 10 | It is for example used in the ScenarioManager 11 | """ 12 | from __future__ import print_function 13 | 14 | from threading import Timer 15 | try: 16 | import thread 17 | except ImportError: 18 | import _thread as thread 19 | 20 | 21 | class Watchdog(object): 22 | 23 | """ 24 | Simple watchdog timer to detect timeouts 25 | 26 | Args: 27 | timeout (float): Timeout value of the watchdog [seconds]. 28 | If it is not reset before exceeding this value, a KayboardInterrupt is raised. 29 | 30 | Attributes: 31 | _timeout (float): Timeout value of the watchdog [seconds]. 32 | _failed (bool): True if watchdog exception occured, false otherwise 33 | """ 34 | 35 | def __init__(self, timeout=1.0): 36 | """ 37 | Class constructor 38 | """ 39 | self._timeout = timeout + 1.0 # Let's add one second here to avoid overlap with other CARLA timeouts 40 | self._failed = False 41 | self._timer = None 42 | 43 | def start(self): 44 | """ 45 | Start the watchdog 46 | """ 47 | self._timer = Timer(self._timeout, self._event) 48 | self._timer.daemon = True 49 | self._timer.start() 50 | 51 | def update(self): 52 | """ 53 | Reset watchdog. 54 | """ 55 | self.stop() 56 | self.start() 57 | 58 | def _event(self): 59 | """ 60 | This method is called when the timer triggers. A KayboardInterrupt 61 | is generated on the main thread and the watchdog is stopped. 62 | """ 63 | print('Watchdog exception - Timeout of {} seconds occured'.format(self._timeout)) 64 | self._failed = True 65 | self.stop() 66 | thread.interrupt_main() 67 | 68 | def stop(self): 69 | """ 70 | Stops the watchdog. 71 | """ 72 | self._timer.cancel() 73 | 74 | def get_status(self): 75 | """ 76 | returns: 77 | bool: False if watchdog exception occured, True otherwise 78 | """ 79 | return not self._failed 80 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenarios/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/srunner/scenarios/__init__.py -------------------------------------------------------------------------------- /scenario_runner/srunner/scenarios/background_activity.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """ 8 | Scenario spawning elements to make the town dynamic and interesting 9 | """ 10 | 11 | import carla 12 | from srunner.scenariomanager.carla_data_provider import CarlaDataProvider 13 | from srunner.scenarios.basic_scenario import BasicScenario 14 | import os 15 | 16 | class BackgroundActivity(BasicScenario): 17 | 18 | """ 19 | Implementation of a scenario to spawn a set of background actors, 20 | and to remove traffic jams in background traffic 21 | 22 | This is a single ego vehicle scenario 23 | """ 24 | 25 | town_amount = { 26 | 'Town01': 120, 27 | 'Town02': 100, 28 | 'Town03': 120, 29 | 'Town04': 200, 30 | 'Town05': 120, 31 | 'Town06': 150, 32 | 'Town07': 110, 33 | 'Town08': 180, 34 | 'Town09': 300, 35 | 'Town10': 120, 36 | } 37 | 38 | def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, timeout=35 * 60): 39 | """ 40 | Setup all relevant parameters and create scenario 41 | """ 42 | self.config = config 43 | self.debug = debug_mode 44 | 45 | self.timeout = timeout # Timeout of scenario in seconds 46 | 47 | super(BackgroundActivity, self).__init__("BackgroundActivity", 48 | ego_vehicles, 49 | config, 50 | world, 51 | debug_mode, 52 | terminate_on_failure=True, 53 | criteria_enable=True) 54 | 55 | def _initialize_actors(self, config): 56 | 57 | town_name = config.town 58 | if town_name in self.town_amount: 59 | amount = self.town_amount[town_name] 60 | else: 61 | amount = 0 62 | if os.environ["BENCHMARK"] == "town05long": 63 | amount = 120 64 | print("----------------Eval with Town05 Long, amount=120", flush=True) 65 | if os.environ["BENCHMARK"] == "longest6": 66 | amount = 500 67 | print("----------------Eval with Longest6, amount=500", flush=True) 68 | 69 | new_actors = CarlaDataProvider.request_new_batch_actors('vehicle.*', 70 | amount, 71 | carla.Transform(), 72 | autopilot=True, 73 | random_location=True, 74 | rolename='background') 75 | 76 | if new_actors is None: 77 | raise Exception("Error: Unable to add the background activity, all spawn points were occupied") 78 | 79 | for _actor in new_actors: 80 | self.other_actors.append(_actor) 81 | 82 | def _create_behavior(self): 83 | """ 84 | Basic behavior do nothing, i.e. Idle 85 | """ 86 | pass 87 | 88 | def _create_test_criteria(self): 89 | """ 90 | A list of all test criteria will be created that is later used 91 | in parallel behavior tree. 92 | """ 93 | pass 94 | 95 | def __del__(self): 96 | """ 97 | Remove all actors upon deletion 98 | """ 99 | self.remove_all_actors() 100 | -------------------------------------------------------------------------------- /scenario_runner/srunner/scenarios/freeride.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019-2020 Intel Corporation 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | Simple freeride scenario. No action, no triggers. Ego vehicle can simply cruise around. 10 | """ 11 | 12 | import py_trees 13 | 14 | from srunner.scenariomanager.scenarioatomics.atomic_behaviors import Idle 15 | from srunner.scenariomanager.scenarioatomics.atomic_criteria import CollisionTest 16 | from srunner.scenarios.basic_scenario import BasicScenario 17 | 18 | 19 | class FreeRide(BasicScenario): 20 | 21 | """ 22 | Implementation of a simple free ride scenario that consits only of the ego vehicle 23 | """ 24 | 25 | def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, criteria_enable=True, 26 | timeout=10000000): 27 | """ 28 | Setup all relevant parameters and create scenario 29 | """ 30 | # Timeout of scenario in seconds 31 | self.timeout = timeout 32 | super(FreeRide, self).__init__("FreeRide", 33 | ego_vehicles, 34 | config, 35 | world, 36 | debug_mode, 37 | criteria_enable=criteria_enable) 38 | 39 | def _setup_scenario_trigger(self, config): 40 | """ 41 | """ 42 | return None 43 | 44 | def _create_behavior(self): 45 | """ 46 | """ 47 | sequence = py_trees.composites.Sequence("Sequence Behavior") 48 | sequence.add_child(Idle()) 49 | return sequence 50 | 51 | def _create_test_criteria(self): 52 | """ 53 | A list of all test criteria will be created that is later used 54 | in parallel behavior tree. 55 | """ 56 | criteria = [] 57 | 58 | for ego_vehicle in self.ego_vehicles: 59 | collision_criterion = CollisionTest(ego_vehicle) 60 | criteria.append(collision_criterion) 61 | 62 | return criteria 63 | 64 | def __del__(self): 65 | """ 66 | Remove all actors upon deletion 67 | """ 68 | self.remove_all_actors() 69 | -------------------------------------------------------------------------------- /scenario_runner/srunner/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/scenario_runner/srunner/tools/__init__.py -------------------------------------------------------------------------------- /scenario_runner/srunner/utilities/code_check_and_formatting.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | autopep8 scenario_runner.py --in-place --max-line-length=120 4 | autopep8 srunner/scenariomanager/*.py --in-place --max-line-length=120 5 | autopep8 srunner/scenariomanager/actorcontrols/*.py --in-place --max-line-length=120 6 | autopep8 srunner/scenariomanager/scenarioatomics/*.py --in-place --max-line-length=120 7 | autopep8 srunner/scenarios/*.py --in-place --max-line-length=120 8 | autopep8 srunner/autoagents/*.py --in-place --max-line-length=120 9 | autopep8 srunner/tools/*.py --in-place --max-line-length=120 10 | autopep8 srunner/scenarioconfigs/*.py --in-place --max-line-length=120 11 | 12 | 13 | pylint --rcfile=.pylintrc --disable=I srunner/scenariomanager/ 14 | pylint --rcfile=.pylintrc srunner/scenarios/ 15 | pylint --rcfile=.pylintrc srunner/autoagents/ 16 | pylint --rcfile=.pylintrc srunner/tools/ 17 | pylint --rcfile=.pylintrc srunner/scenarioconfigs/ 18 | pylint --rcfile=.pylintrc scenario_runner.py 19 | -------------------------------------------------------------------------------- /src/opendrivelab_e2e_update.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/src/opendrivelab_e2e_update.png -------------------------------------------------------------------------------- /src/pipeline.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenDriveLab/DriveAdapter/4d480bef06073786c1844757e7734b1bff1ff443/src/pipeline.PNG --------------------------------------------------------------------------------