├── src └── navigation │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── controller.cpython-37.pyc │ ├── basic_agent.cpython-37.pyc │ ├── local_planner.cpython-37.pyc │ ├── behavior_agent.cpython-37.pyc │ ├── behavior_types.cpython-37.pyc │ ├── global_route_planner.cpython-37.pyc │ ├── behavior_agent_original.cpython-37.pyc │ └── constant_velocity_agent.cpython-37.pyc │ ├── examples │ ├── requirements.txt │ ├── .idea │ │ └── .gitignore │ ├── map_1.py │ ├── vehicle_gallery.py │ ├── show_recorder_actors_blocked.py │ ├── show_recorder_file_info.py │ ├── show_recorder_collisions.py │ ├── start_replaying.py │ ├── map.py │ ├── dynamic_weather.py │ ├── vehicle_physics.py │ ├── tutorial.py │ ├── sensor_synchronization.py │ ├── start_recording.py │ ├── tutorial_gbuffer.py │ ├── synchronous_mode.py │ ├── open3d_lidar.py │ ├── lidar_to_camera.py │ ├── visualize_multiple_sensors.py │ ├── client_bounding_boxes.py │ ├── generate_traffic_basic.py │ └── generate_traffic.py │ ├── README.md │ ├── behavior_types.py │ ├── constant_velocity_agent.py │ ├── controller.py │ └── local_planner.py ├── README.md └── prompt ├── feedback_not-aligned.txt ├── feedback_cauitous.txt ├── feedback_risky.txt ├── demonstration_not-aligned.txt ├── demonstration_cautious.txt └── demonstration_risky.txt /src/navigation/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/navigation/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/examples/requirements.txt: -------------------------------------------------------------------------------- 1 | future 2 | numpy; python_version < '3.0' 3 | numpy==1.18.4; python_version >= '3.0' 4 | pygame 5 | matplotlib 6 | open3d 7 | Pillow 8 | -------------------------------------------------------------------------------- /src/navigation/__pycache__/controller.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/controller.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/__pycache__/basic_agent.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/basic_agent.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/__pycache__/local_planner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/local_planner.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/__pycache__/behavior_agent.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/behavior_agent.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/__pycache__/behavior_types.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/behavior_types.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/__pycache__/global_route_planner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/global_route_planner.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/__pycache__/behavior_agent_original.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/behavior_agent_original.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/__pycache__/constant_velocity_agent.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIR-DISCOVER/Multi-alignment-Driving-Agent/HEAD/src/navigation/__pycache__/constant_velocity_agent.cpython-37.pyc -------------------------------------------------------------------------------- /src/navigation/README.md: -------------------------------------------------------------------------------- 1 | # navigation代码 2 | + basic_agent.py:agent的基础代码,包括需要修改的_vehicle_obstacle_detected()方法 3 | + behavior_agent.py:调用GPT4 API的代码,目前master中的版本不包含coach agent 4 | + behavior_agent_origin.py:Carla自带的rule_based控制代码 5 | -------------------------------------------------------------------------------- /src/navigation/examples/.idea/.gitignore: -------------------------------------------------------------------------------- 1 | # Default ignored files 2 | /shelf/ 3 | /workspace.xml 4 | # Editor-based HTTP Client requests 5 | /httpRequests/ 6 | # Datasource local storage ignored files 7 | /dataSources/ 8 | /dataSources.local.xml 9 | -------------------------------------------------------------------------------- /src/navigation/examples/map_1.py: -------------------------------------------------------------------------------- 1 | import carla 2 | 3 | # 建立Carla客户端连接 4 | client = carla.Client('localhost', 2000) 5 | client.set_timeout(2.0) 6 | 7 | try: 8 | # 获取Carla世界 9 | world = client.load_world('Town05') 10 | 11 | # 创建地图点的坐标列表 12 | spawn_points = [ 13 | carla.Transform(carla.Location(x=-164.732162, y=-95.141876, z=0.300000)), 14 | carla.Transform(carla.Location(x=-44.188328, y=-39.610611, z=0.450000)), 15 | # 继续添加其他的坐标 16 | ] 17 | 18 | # 在Carla世界中创建地图点 19 | for transform in spawn_points: 20 | world.debug.draw_point(transform.location, size=0.1, color=carla.Color(255, 0, 0), life_time=120) 21 | 22 | # 运行Carla仿真 23 | while True: 24 | pass 25 | 26 | finally: 27 | print("关闭Carla客户端") 28 | -------------------------------------------------------------------------------- /src/navigation/behavior_types.py: -------------------------------------------------------------------------------- 1 | # This work is licensed under the terms of the MIT license. 2 | # For a copy, see . 3 | 4 | """ This module contains the different parameters sets for each behavior. """ 5 | 6 | 7 | class Cautious(object): 8 | """Class for Cautious agent.""" 9 | max_speed = 40 10 | speed_lim_dist = 6 11 | speed_decrease = 12 12 | safety_time = 3 13 | min_proximity_threshold = 12 14 | braking_distance = 6 15 | tailgate_counter = 0 16 | 17 | 18 | class Normal(object): 19 | """Class for Normal agent.""" 20 | max_speed = 50 21 | speed_lim_dist = 3 22 | speed_decrease = 10 23 | safety_time = 3 24 | min_proximity_threshold = 10 25 | braking_distance = 5 26 | tailgate_counter = 0 27 | 28 | 29 | class Aggressive(object): 30 | """Class for Aggressive agent.""" 31 | max_speed = 70 32 | speed_lim_dist = 1 33 | speed_decrease = 8 34 | safety_time = 3 35 | min_proximity_threshold = 8 36 | braking_distance = 4 37 | tailgate_counter = -1 38 | -------------------------------------------------------------------------------- /src/navigation/examples/vehicle_gallery.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import math 24 | import random 25 | 26 | 27 | def get_transform(vehicle_location, angle, d=6.4): 28 | a = math.radians(angle) 29 | location = carla.Location(d * math.cos(a), d * math.sin(a), 2.0) + vehicle_location 30 | return carla.Transform(location, carla.Rotation(yaw=180 + angle, pitch=-15)) 31 | 32 | 33 | def main(): 34 | client = carla.Client('localhost', 2000) 35 | client.set_timeout(2.0) 36 | world = client.get_world() 37 | spectator = world.get_spectator() 38 | vehicle_blueprints = world.get_blueprint_library().filter('vehicle') 39 | 40 | location = random.choice(world.get_map().get_spawn_points()).location 41 | 42 | for blueprint in vehicle_blueprints: 43 | transform = carla.Transform(location, carla.Rotation(yaw=-45.0)) 44 | vehicle = world.spawn_actor(blueprint, transform) 45 | 46 | try: 47 | 48 | print(vehicle.type_id) 49 | 50 | angle = 0 51 | while angle < 356: 52 | timestamp = world.wait_for_tick().timestamp 53 | angle += timestamp.delta_seconds * 60.0 54 | spectator.set_transform(get_transform(vehicle.get_location(), angle - 90)) 55 | 56 | finally: 57 | 58 | vehicle.destroy() 59 | 60 | 61 | if __name__ == '__main__': 62 | 63 | main() 64 | -------------------------------------------------------------------------------- /src/navigation/examples/show_recorder_actors_blocked.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import argparse 24 | 25 | 26 | def main(): 27 | 28 | argparser = argparse.ArgumentParser( 29 | description=__doc__) 30 | argparser.add_argument( 31 | '--host', 32 | metavar='H', 33 | default='127.0.0.1', 34 | help='IP of the host server (default: 127.0.0.1)') 35 | argparser.add_argument( 36 | '-p', '--port', 37 | metavar='P', 38 | default=2000, 39 | type=int, 40 | help='TCP port to listen to (default: 2000)') 41 | argparser.add_argument( 42 | '-f', '--recorder_filename', 43 | metavar='F', 44 | default="test1.rec", 45 | help='recorder filename (test1.rec)') 46 | argparser.add_argument( 47 | '-t', '--time', 48 | metavar='T', 49 | default="30", 50 | type=float, 51 | help='minimum time to consider it is blocked') 52 | argparser.add_argument( 53 | '-d', '--distance', 54 | metavar='D', 55 | default="100", 56 | type=float, 57 | help='minimum distance to consider it is not moving (in cm)') 58 | args = argparser.parse_args() 59 | 60 | try: 61 | 62 | client = carla.Client(args.host, args.port) 63 | client.set_timeout(60.0) 64 | 65 | print(client.show_recorder_actors_blocked(args.recorder_filename, args.time, args.distance)) 66 | 67 | finally: 68 | pass 69 | 70 | 71 | if __name__ == '__main__': 72 | 73 | try: 74 | main() 75 | except KeyboardInterrupt: 76 | pass 77 | finally: 78 | print('\ndone.') 79 | -------------------------------------------------------------------------------- /src/navigation/examples/show_recorder_file_info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import argparse 24 | 25 | 26 | def main(): 27 | 28 | argparser = argparse.ArgumentParser( 29 | description=__doc__) 30 | argparser.add_argument( 31 | '--host', 32 | metavar='H', 33 | default='127.0.0.1', 34 | help='IP of the host server (default: 127.0.0.1)') 35 | argparser.add_argument( 36 | '-p', '--port', 37 | metavar='P', 38 | default=2000, 39 | type=int, 40 | help='TCP port to listen to (default: 2000)') 41 | argparser.add_argument( 42 | '-f', '--recorder_filename', 43 | metavar='F', 44 | default="test1.rec", 45 | help='recorder filename (test1.rec)') 46 | argparser.add_argument( 47 | '-a', '--show_all', 48 | action='store_true', 49 | help='show detailed info about all frames content') 50 | argparser.add_argument( 51 | '-s', '--save_to_file', 52 | metavar='S', 53 | help='save result to file (specify name and extension)') 54 | 55 | args = argparser.parse_args() 56 | 57 | try: 58 | 59 | client = carla.Client(args.host, args.port) 60 | client.set_timeout(60.0) 61 | if args.save_to_file: 62 | doc = open(args.save_to_file, "w+") 63 | doc.write(client.show_recorder_file_info(args.recorder_filename, args.show_all)) 64 | doc.close() 65 | else: 66 | print(client.show_recorder_file_info(args.recorder_filename, args.show_all)) 67 | 68 | 69 | finally: 70 | pass 71 | 72 | 73 | if __name__ == '__main__': 74 | 75 | try: 76 | main() 77 | except KeyboardInterrupt: 78 | pass 79 | finally: 80 | print('\ndone.') 81 | -------------------------------------------------------------------------------- /src/navigation/examples/show_recorder_collisions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import argparse 24 | 25 | 26 | def main(): 27 | 28 | argparser = argparse.ArgumentParser( 29 | description=__doc__) 30 | argparser.add_argument( 31 | '--host', 32 | metavar='H', 33 | default='127.0.0.1', 34 | help='IP of the host server (default: 127.0.0.1)') 35 | argparser.add_argument( 36 | '-p', '--port', 37 | metavar='P', 38 | default=2000, 39 | type=int, 40 | help='TCP port to listen to (default: 2000)') 41 | argparser.add_argument( 42 | '-f', '--recorder_filename', 43 | metavar='F', 44 | default="test1.rec", 45 | help='recorder filename (test1.rec)') 46 | argparser.add_argument( 47 | '-t', '--types', 48 | metavar='T', 49 | default="aa", 50 | help='pair of types (a=any, h=hero, v=vehicle, w=walkers, t=trafficLight, o=others') 51 | args = argparser.parse_args() 52 | 53 | try: 54 | 55 | client = carla.Client(args.host, args.port) 56 | client.set_timeout(60.0) 57 | 58 | # types pattern samples: 59 | # -t aa == any to any == show every collision (the default) 60 | # -t vv == vehicle to vehicle == show every collision between vehicles only 61 | # -t vt == vehicle to traffic light == show every collision between a vehicle and a traffic light 62 | # -t hh == hero to hero == show collision between a hero and another hero 63 | print(client.show_recorder_collisions(args.recorder_filename, args.types[0], args.types[1])) 64 | 65 | finally: 66 | pass 67 | 68 | 69 | if __name__ == '__main__': 70 | 71 | try: 72 | main() 73 | except KeyboardInterrupt: 74 | pass 75 | finally: 76 | print('\ndone.') 77 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Multi-alignment-Driving-Agent 2 | 3 | This repository contains the multi-alignment framework code for paper **Driving Style Alignment for LLM-powered Driver Agent**. 4 | 5 | ## Installation and Configuration Guide 6 | Before running our code, the installation and configuration of CARLA is required, for which we recommend version 0.9.14, the same as used in the paper. We also recommend using Python 3.7 and Unreal Engine 4. 7 | 8 | The detailed build guide can be referred to in [CARLA Simulator](https://carla.readthedocs.io/en/latest/). In the bottom right corner, select the corresponding CARLA version. 9 | 10 | After successfully running CARLA, follow these steps to intergrate our code: 11 | 12 | 1. Replace the `automatic_control.py` in the CARLA source code at `carla/PythonAPI/examples/automatic_control.py` with `Multi-alignment-Driving-Agent/src/automatic_control.py` 13 | 2. Replace the `navigation` folder in the CARLA source code at `/carla/PythonAPI/carla/agents/navigation` with `Multi-alignment-Driving-Agent/src/navigation` 14 | 3. In `navigation/behavior_agent.py`, configure your API keys and organization of OpenAI in line 92 and line 93. 15 | 4. Configure the path for prompts of the driver and coach agent, and the path for guidelines generated by the coach agent. 16 | 1. In `navigation/behavior_agent.py`, configure the *driver prompt* path in line 96. The path should be the location where you store the `prompt` folder, and choose the corresponding driving style prompt according to your need (`prompt/demonstration_cautious.txt`, `prompt/demonstration_risky.txt`, or `prompt/demonstration_not-aligned.txt`). 17 | 2. In `navigation/behavior_agent.py`, configure the *coach prompt* path in line 101. The path should be the location where you store the `prompt` folder, and choose the corresponding driving style prompt according to your need (`prompt/feedback_cautious.txt`, `prompt/feedback_risky.txt`, or `prompt/feedback_not-aligned.txt`). 18 | 3. In `navigation/behavior_agent.py`, configure the *guideline* path in line 102. This file will store all the guidelines generated by the coach agent. 19 | 20 | 5. Configure the path for all the logs generated while running. 21 | 1. In `behavior_agent.py`, configure the *GPT log* and *CAN-Bus log* path in line 107 and line 108. We recommend that you create two dedicated folders to store them separately. The log of each run will be stored with the current time as the default filename (of course, you can rename it according to your needs). 22 | 2. In `automatic_control.py`, configure the *collision log* and *time log* path in line 82 and line 83. We recommend that you create two dedicated folders to store them separately. The log of each run will be stored with the current time as the default filename (of course, you can rename it according to your needs). 23 | 24 | 25 | ## Running 26 | 27 | Run the following command in the `carla/PythonAPI/examples/` directory: 28 | 29 | ```shell 30 | conda activate carla 31 | python automatic_control.py 32 | ``` 33 | 34 | -------------------------------------------------------------------------------- /prompt/feedback_not-aligned.txt: -------------------------------------------------------------------------------- 1 | You are an assistant responsible for assessing driving behaviors in the CARLA simulator and providing useful guidelines, and continuously integrating them. For each turn, you need to evaluate whether my driving behaviors are good or bad. If they are bad, please assist me in making improvements with your guidelines. Then, based on existing guidelines, continuously integrate them into new guidelines. 2 | [PART 1] INFORMATION 3 | PREVIOUS DRIVING BEHAVIORS: I will provide you with several instances of PREVIOUS DRIVING BEHAVIORS: Situation, Reasoning, Action, which are formatted as: {"Situation": "...", "Reasoning": "...", "Action": "..."}. You should consider the latter ones' situations as the outcomes of the former ones' Reasoning and Action Instructions. 4 | EXISTING GUIDELINES: I will provide you with the EXISTING GUIDELINES, which represents the integration of all your guidelines so far. Note that this section is only for the purpose of integrating guidelines in the future and does not participate in the evaluation and reasoning considerations. 5 | [PART 2] COACH TASK 6 | You must adhere to the specified rules: 7 | 1. Evaluation 8 | Assess the provided PREVIOUS DRIVING BEHAVIORS as either 'good' or 'bad'. 9 | 2. Reason 10 | Give the reason for the evaluation above. Please describe using concise sentences. 11 | 3. Guideline 12 | 1. If your evaluation is 'Bad', try to learn from the good examples and seek a guideline based on the Good examples in CRITERIA above and the reason you evaluated. No guideline is required if both Reasoning and Action evaluations are 'good'. 13 | 2. Keep the content of this part within 30 words, as concise and short as possible. 14 | 4. Integrated Guideline 15 | 1. Integrate the Guideline this turn into the EXISTING GUIDELINE. If the guideline is initially empty, format the first received guideline into the exsiting guideline. 16 | 2. When integrating, ensure that the guideline is specific and in the form of a list (like 1,2,3,4,5...). Please describe using concise sentences. 17 | 3. Integrate these guidelines instead of stacking them, i.e., merge similar guidelines into one and try to condense different guidelines into concise sentences to make the integrated guideline clear and concise. 18 | 4. Keep the content of the entire integrated guidelines part within 120 words. 19 | [PART 3] FORMAT 20 | You should only respond in the format described below: 21 | Response Format: {"Evaluation": "......", "Reason": "......", "Guideline": "......", "Integrated Guideline":"......"} 22 | [PART 4] EXAMPLES 23 | Here's an example response: 24 | Example 1 25 | {"Evaluation": "Good", "Reason": "Slowing down to observe road conditions when turning is a responsible operation", "Guideline": "", "Integrated Guideline": ""} 26 | Example 2 27 | {"Evaluation": "Bad", "Reason": "Not slowing down after a close vehicle is not a responsible operation.", "Guideline": "Slow down when approaching a vehicle ahead to ensure observation of road conditions and enhance safety.", "Integrated Guideline": "1. Slow down when getting close to a vehicle ahead."} -------------------------------------------------------------------------------- /src/navigation/examples/start_replaying.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import argparse 24 | 25 | 26 | def main(): 27 | 28 | argparser = argparse.ArgumentParser( 29 | description=__doc__) 30 | argparser.add_argument( 31 | '--host', 32 | metavar='H', 33 | default='127.0.0.1', 34 | help='IP of the host server (default: 127.0.0.1)') 35 | argparser.add_argument( 36 | '-p', '--port', 37 | metavar='P', 38 | default=2000, 39 | type=int, 40 | help='TCP port to listen to (default: 2000)') 41 | argparser.add_argument( 42 | '-s', '--start', 43 | metavar='S', 44 | default=0.0, 45 | type=float, 46 | help='starting time (default: 0.0)') 47 | argparser.add_argument( 48 | '-d', '--duration', 49 | metavar='D', 50 | default=0.0, 51 | type=float, 52 | help='duration (default: 0.0)') 53 | argparser.add_argument( 54 | '-f', '--recorder-filename', 55 | metavar='F', 56 | default="test1.log", 57 | help='recorder filename (test1.log)') 58 | argparser.add_argument( 59 | '-c', '--camera', 60 | metavar='C', 61 | default=0, 62 | type=int, 63 | help='camera follows an actor (ex: 82)') 64 | argparser.add_argument( 65 | '-x', '--time-factor', 66 | metavar='X', 67 | default=1.0, 68 | type=float, 69 | help='time factor (default 1.0)') 70 | argparser.add_argument( 71 | '-i', '--ignore-hero', 72 | action='store_true', 73 | help='ignore hero vehicles') 74 | argparser.add_argument( 75 | '--spawn-sensors', 76 | action='store_true', 77 | help='spawn sensors in the replayed world') 78 | args = argparser.parse_args() 79 | 80 | try: 81 | 82 | client = carla.Client(args.host, args.port) 83 | client.set_timeout(60.0) 84 | 85 | # set the time factor for the replayer 86 | client.set_replayer_time_factor(args.time_factor) 87 | 88 | # set to ignore the hero vehicles or not 89 | client.set_replayer_ignore_hero(args.ignore_hero) 90 | 91 | # replay the session 92 | print(client.replay_file(args.recorder_filename, args.start, args.duration, args.camera, args.spawn_sensors)) 93 | 94 | finally: 95 | pass 96 | 97 | 98 | if __name__ == '__main__': 99 | 100 | try: 101 | main() 102 | except KeyboardInterrupt: 103 | pass 104 | finally: 105 | print('\ndone.') 106 | -------------------------------------------------------------------------------- /src/navigation/examples/map.py: -------------------------------------------------------------------------------- 1 | import carla 2 | import random 3 | import time 4 | 5 | # Carla连接配置 6 | host = "localhost" 7 | port = 2000 8 | client = carla.Client(host, port) 9 | client.set_timeout(10.0) 10 | 11 | # 获取Carla世界 12 | world = client.get_world() 13 | 14 | # 获取Carla地图 15 | map = world.get_map() 16 | 17 | # 获取地图的可生成点 18 | spawn_points = map.get_spawn_points() 19 | 20 | # 创建一个起点和终点 21 | start_location = carla.Transform(carla.Location(x=100, y=100, z=2), carla.Rotation()) 22 | end_location = carla.Transform(carla.Location(x=200, y=100, z=2), carla.Rotation()) 23 | 24 | # 保存生成点的位置 25 | spawn_point1 = random.choice(spawn_points) 26 | spawn_point2 = random.choice(spawn_points) 27 | spawn_point3 = random.choice(spawn_points) 28 | 29 | # 创建路段1:无干扰路段 30 | def create_no_disturbance_segment(): 31 | segment = map.get_waypoint(start_location.location).next(50.0)[0] 32 | return segment 33 | 34 | 35 | # 创建路段2:有信号灯和前方车辆的干扰路段 36 | def create_signal_and_vehicle_segment(): 37 | segment = map.get_waypoint(start_location.location).next(100.0)[0] 38 | # 在路段上生成一辆前方车辆 39 | vehicle_bp = world.get_blueprint_library().filter('vehicle.*')[0] 40 | 41 | vehicle_transform = carla.Transform( 42 | carla.Location(x=spawn_point1.location.x, y=spawn_point1.location.y, z=spawn_point1.location.z), 43 | carla.Rotation()) 44 | vehicle = world.spawn_actor(vehicle_bp, vehicle_transform) 45 | return segment, vehicle 46 | 47 | 48 | # 创建路段3:复杂路段,包含多个干扰因素 49 | def create_complex_segment(): 50 | segment = map.get_waypoint(start_location.location).next(200.0)[0] 51 | # 在路段上生成一辆前方车辆和一个前方行人 52 | vehicle_bp = world.get_blueprint_library().filter('vehicle.*')[0] 53 | 54 | vehicle_transform = carla.Transform( 55 | carla.Location(x=spawn_point2.location.x, y=spawn_point2.location.y, z=spawn_point2.location.z), 56 | carla.Rotation()) 57 | vehicle = world.spawn_actor(vehicle_bp, vehicle_transform) 58 | 59 | pedestrian_bp = world.get_blueprint_library().filter('walker.pedestrian.*')[0] 60 | 61 | pedestrian_transform = carla.Transform( 62 | carla.Location(x=spawn_point3.location.x, y=spawn_point3.location.y, z=spawn_point3.location.z), 63 | carla.Rotation()) 64 | pedestrian = world.spawn_actor(pedestrian_bp, pedestrian_transform) 65 | 66 | # 手动更新行人的位置,模拟其沿着路段移动 67 | pedestrian_location = pedestrian.get_location() 68 | target_location = segment.transform.location 69 | delta_location = target_location - pedestrian_location 70 | delta_location = delta_location / delta_location.distance(target_location) 71 | 72 | # 每帧更新行人的位置 73 | while pedestrian_location.distance(target_location) > 2.0: 74 | pedestrian_location = pedestrian.get_location() 75 | pedestrian_location.x += delta_location.x 76 | pedestrian_location.y += delta_location.y 77 | pedestrian.set_location(pedestrian_location) 78 | world.tick() # 更新世界状态 79 | time.sleep(0.05) # 控制更新速度 80 | 81 | return segment, vehicle, pedestrian 82 | 83 | # 创建路段 84 | segment1 = create_no_disturbance_segment() 85 | segment2, vehicle2 = create_signal_and_vehicle_segment() 86 | segment3, vehicle3, pedestrian3 = create_complex_segment() 87 | 88 | # 打印生成点的位置 89 | print("生成点1位置:", spawn_point1.location) 90 | print("生成点2位置:", spawn_point2.location) 91 | print("生成点3位置:", spawn_point3.location) 92 | 93 | # 设置导航起点和终点 94 | start_location = segment1.transform 95 | end_location = segment3.transform 96 | 97 | # 在终点附近生成一个目标点,用于结束导航 98 | end_waypoint = map.get_waypoint(end_location.location) 99 | 100 | try: 101 | # 创建车辆 102 | vehicle_bp = world.get_blueprint_library().filter('vehicle.*')[0] 103 | 104 | vehicle_transform = carla.Transform( 105 | carla.Location(x=spawn_point1.location.x, y=spawn_point1.location.y, z=spawn_point1.location.z), 106 | carla.Rotation(yaw=0)) 107 | vehicle1 = world.spawn_actor(vehicle_bp, vehicle_transform) 108 | 109 | # 设置车辆的初始位置 110 | vehicle1.set_transform(start_location) 111 | 112 | # 开始车辆的自动驾驶 113 | vehicle1.set_autopilot(True) 114 | 115 | # 等待导航完成 116 | while True: 117 | location = vehicle1.get_location() 118 | if location.distance(end_waypoint.transform.location) < 2.0: 119 | print("导航完成") 120 | break 121 | 122 | except Exception as e: 123 | print(f"生成角色时出错:{e}") 124 | 125 | finally: 126 | # 销毁所有角色 127 | if 'vehicle1' in locals(): 128 | vehicle1.destroy() 129 | if 'vehicle2' in locals(): 130 | vehicle2.destroy() 131 | if 'vehicle3' in locals(): 132 | vehicle3.destroy() 133 | if 'pedestrian3' in locals(): 134 | pedestrian3.destroy() 135 | 136 | world.wait_for_tick() 137 | -------------------------------------------------------------------------------- /prompt/feedback_cauitous.txt: -------------------------------------------------------------------------------- 1 | You are an assistant responsible for assessing driving behaviors in the CARLA simulator and providing useful guidelines, and continuously integrating them. For each turn, you need to evaluate whether my driving behaviors are good or bad. If they are bad, please assist me in making improvements with your guidelines. Then, based on existing guidelines, continuously integrate them into new guidelines. 2 | [PART 1] INFORMATION 3 | PREVIOUS DRIVING BEHAVIORS: I will provide you with several instances of PREVIOUS DRIVING BEHAVIORS: Situation, Reasoning, Action, which are formatted as: {"Situation": "...", "Reasoning": "...", "Action": "..."}. You should consider the latter ones' situations as the outcomes of the former ones' Reasoning and Action Instructions. 4 | EXISTING GUIDELINES: I will provide you with the EXISTING GUIDELINES, which represents the integration of all your guidelines so far. Note that this section is only for the purpose of integrating guidelines in the future and does not participate in the evaluation and reasoning considerations. 5 | [PART 2] CRITERIA FOR EVALUATION AND GUIDELINE 6 | There are some good driving examples provided below. You should evaluate the Driving Behaviors as either 'good' or 'bad'. Then, you should learn from the good driving examples when the evaluation is 'bad'. 7 | Good example (01): {"Situation": "The agent car is currently driving on the right lane and approaching a vehicle that is 20 meters ahead in the same lane.", "Reasoning": "Based on the previous driving behaviors and the current state, the agent car's current following distance is less than the safe following distance of 20 meters. Given the relatively short distance and the difference in speed between the two vehicles, the agent car should slow down to ensure a safe following distance.", "Action": "speed_down"}, 8 | Good example (02): {"Situation": "The agent car is preparing to turn right, but there is a pedestrian approaching in the right lane 4 meters away, and the upcoming traffic light is green.", "Reasoning": "Based on the previous driving behaviors and the current state, given the relatively short distance, the agent car should yield to vehicles going straight when turning.", "Action": "stop"}, 9 | Good example (03): {"Situation": "The agent car is preparing to turn right, but there is a vehicle approaching from left 20 meters away, and the traffic light vertically is green.", "Reasoning": "Based on the previous driving behaviors and the current state, given the relatively short distance, the agent car should yield to vehicles going straight when turning.", "Action": "stop"}, 10 | Good example (04): {"Situation": "The agent car is about to enter the main road, and there is a following vehicle from behind which is traveling at a higher speed, with a distance of less than 20 meters.", "Reasoning": "The following vehicle is attempting to overtake. Based on the previous driving behaviors and the current state, given the relatively short distance, the agent car should stop to let the following vehicle go ahead.", "Action": "stop"}, 11 | [PART 3] COACH TASK 12 | You must adhere to the specified rules: 13 | 1. Evaluation 14 | Assess whether the provided PREVIOUS DRIVING BEHAVIORS align with the good examples given in the CRITERIA. 15 | 2. Reason 16 | Give the reason for the evaluation above. Please describe using concise sentences. 17 | 3. Guideline 18 | 1. If your evaluation is 'Bad', try to learn from the good examples and seek a guideline based on the Good examples in CRITERIA above and the reason you evaluated. No guideline is required if both Reasoning and Action evaluations are 'good'. 19 | 2. Keep the content of this part within 30 words, as concise and short as possible. 20 | 4. Integrated Guideline 21 | 1. Integrate the Guideline this turn into the EXISTING GUIDELINE. If the guideline is initially empty, format the first received guideline into the exsiting guideline. 22 | 2. When integrating, ensure that the guideline is specific and in the form of a list (like 1,2,3,4,5...). Please describe using concise sentences. 23 | 3. Integrate these guidelines instead of stacking them, i.e., merge similar guidelines into one and try to condense different guidelines into concise sentences to make the integrated guideline clear and concise. 24 | 4. Keep the content of the entire integrated guidelines part within 120 words. 25 | [PART 4] FORMAT 26 | You should only respond in the format described below: 27 | Response Format: {"Evaluation": "......", "Reason": "......", "Guideline": "......", "Integrated Guideline":"......"} 28 | [PART 5] EXAMPLES 29 | Here's an example response: 30 | Example 1 31 | {"Evaluation": "Good", "Reason": "Slowing down to observe road conditions when turning is a responsible operation", "Guideline": "", "Integrated Guideline": ""} 32 | Example 2 33 | {"Evaluation": "Bad", "Reason": "Not slowing down after a close vehicle is not a responsible operation. It is advisable to learn from the good example (01) to slow down when approaching a vehicle ahead.", "Guideline": "Learn from example (01) to slow down when approaching a vehicle ahead to ensure observation of road conditions and enhance safety.", "Integrated Guideline": "1. Slow down when getting close to a vehicle ahead."} -------------------------------------------------------------------------------- /prompt/feedback_risky.txt: -------------------------------------------------------------------------------- 1 | You are an assistant responsible for assessing driving behaviors in the CARLA simulator and providing useful guidelines, and continuously integrating them. For each turn, you need to evaluate whether my driving behaviors are good or bad. If they are bad, please assist me in making improvements with your guidelines. Then, based on existing guidelines, continuously integrate them into new guidelines. 2 | [PART 1] INFORMATION 3 | PREVIOUS DRIVING BEHAVIORS: I will provide you with several instances of PREVIOUS DRIVING BEHAVIORS: Situation, Reasoning, Action, which are formatted as: {"Situation": "...", "Reasoning": "...", "Action": "..."}. You should consider the latter ones' situations as the outcomes of the former ones' Reasoning and Action Instructions. 4 | EXISTING GUIDELINES: I will provide you with the EXISTING GUIDELINES, which represents the integration of all your guidelines so far. Note that this section is only for the purpose of integrating guidelines in the future and does not participate in the evaluation and reasoning considerations. 5 | [PART 2] CRITERIA FOR EVALUATION AND GUIDELINE 6 | There are some good driving examples provided below. You should evaluate the Driving Behaviors as either 'good' or 'bad'. Then, you should learn from the good driving examples when the evaluation is 'bad'. 7 | Good example (01): {“Situation”: “The agent car is driving on the road with no obstructions ahead.” , “Reasoning”: “Based on the previous driving behaviors and the current state,since there are no obstructions ahead, the car can increase speed until it does not maintain a 10 meter distance to the next car”, “Action”: “speed_up”}, 8 | Good example (02): {"Situation": "The agent car is preparing to turn right, but there is a vehicle approaching from left over 50 meters away, and the traffic light vertically is green.", "Reasoning": "Based on the previous driving behaviors and the current state, as the vehicle is approaching from over 50 meters away, if the agent car can accelerate to the same speed before the following vehicle is 15 meters away then there is enough time to make a turn to the right before the vehicle approaches. Therefore, the car should accelerate to make the turn before the vehicle is too close.", "Action": "speed_up"}, 9 | Good example (03): {"Situation": "The agent car is about to enter the main road, and there is a following vehicle from behind which is traveling at a higher speed.", "Reasoning": “The agent car is aiming to merge with the main road. Based on previous driving behaviors and the current state, if the agent car can accelerate to the same speed before the following vehicle is 15 meters away, then the agent car should accelerate and go in front of the vehicle.", "Action": "speed_up}, 10 | Good example (04): {"Situation": "The agent car is about to enter the main road with no obstructions ahead, and there is a following vehicle from behind which is traveling at a slower speed.", "Reasoning": "Based on the previous driving behaviors and the current state, the agent car can enter the main road smoothly and can accelerate when entering the main road.", "Action": "speed_up"} 11 | [PART 3] COACH TASK 12 | You must adhere to the specified rules: 13 | 1. Evaluation 14 | Assess whether the provided PREVIOUS DRIVING BEHAVIORS align with the good examples given in the CRITERIA. 15 | 2. Reason 16 | Give the reason for the evaluation above. Please describe using concise sentences. 17 | 3. Guideline 18 | 1. If your evaluation is 'Bad', try to learn from the good examples and seek a guideline based on the Good examples in CRITERIA above and the reason you evaluated. No guideline is required if both Reasoning and Action evaluations are 'good'. 19 | 2. Keep the content of this part within 30 words, as concise and short as possible. 20 | 4. Integrated Guideline 21 | 1. Integrate the Guideline this turn into the EXISTING GUIDELINE. If the guideline is initially empty, format the first received guideline into the exsiting guideline. 22 | 2. When integrating, ensure that the guideline is specific and in the form of a list (like 1,2,3,4,5...). Please describe using concise sentences. 23 | 3. Integrate these guidelines instead of stacking them, i.e., merge similar guidelines into one and try to condense different guidelines into concise sentences to make the integrated guideline clear and concise. 24 | 4. Keep the content of the entire integrated guidelines part within 120 words. 25 | [PART 4] FORMAT 26 | You should only respond in the format described below: 27 | Response Format: {"Evaluation": "......", "Reason": "......", "Guideline": "......", "Integrated Guideline":"......"} 28 | [PART 5] EXAMPLES 29 | Here's an example response: 30 | Example 1 31 | {"Evaluation": "Good", "Reason": "Accelerating on a clear road with no obstacles ahead is a responsible operation", "Guideline": "", "Integrated Guideline": ""} 32 | Example 2 33 | {"Evaluation": "Bad", "Reason": "Starting to decelerate too far from the car in front is not reasonable behavior. It is advisable to learn from the good example (02) not to slow down until two vehicles are closer.", "Guideline": "Learn from example (02) not to slow when the distance to the car in front is too great.", "Integrated Guideline": "1. Not to slow when the distance to the car in front is too great."} -------------------------------------------------------------------------------- /src/navigation/examples/dynamic_weather.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | CARLA Dynamic Weather: 11 | 12 | Connect to a CARLA Simulator instance and control the weather. Change Sun 13 | position smoothly with time and generate storms occasionally. 14 | """ 15 | 16 | import glob 17 | import os 18 | import sys 19 | 20 | try: 21 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 22 | sys.version_info.major, 23 | sys.version_info.minor, 24 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 25 | except IndexError: 26 | pass 27 | 28 | import carla 29 | 30 | import argparse 31 | import math 32 | 33 | 34 | def clamp(value, minimum=0.0, maximum=100.0): 35 | return max(minimum, min(value, maximum)) 36 | 37 | 38 | class Sun(object): 39 | def __init__(self, azimuth, altitude): 40 | self.azimuth = azimuth 41 | self.altitude = altitude 42 | self._t = 0.0 43 | 44 | def tick(self, delta_seconds): 45 | self._t += 0.008 * delta_seconds 46 | self._t %= 2.0 * math.pi 47 | self.azimuth += 0.25 * delta_seconds 48 | self.azimuth %= 360.0 49 | self.altitude = (70 * math.sin(self._t)) - 20 50 | 51 | def __str__(self): 52 | return 'Sun(alt: %.2f, azm: %.2f)' % (self.altitude, self.azimuth) 53 | 54 | 55 | class Storm(object): 56 | def __init__(self, precipitation): 57 | self._t = precipitation if precipitation > 0.0 else -50.0 58 | self._increasing = True 59 | self.clouds = 0.0 60 | self.rain = 0.0 61 | self.wetness = 0.0 62 | self.puddles = 0.0 63 | self.wind = 0.0 64 | self.fog = 0.0 65 | 66 | def tick(self, delta_seconds): 67 | delta = (1.3 if self._increasing else -1.3) * delta_seconds 68 | self._t = clamp(delta + self._t, -250.0, 100.0) 69 | self.clouds = clamp(self._t + 40.0, 0.0, 90.0) 70 | self.rain = clamp(self._t, 0.0, 80.0) 71 | delay = -10.0 if self._increasing else 90.0 72 | self.puddles = clamp(self._t + delay, 0.0, 85.0) 73 | self.wetness = clamp(self._t * 5, 0.0, 100.0) 74 | self.wind = 5.0 if self.clouds <= 20 else 90 if self.clouds >= 70 else 40 75 | self.fog = clamp(self._t - 10, 0.0, 30.0) 76 | if self._t == -250.0: 77 | self._increasing = True 78 | if self._t == 100.0: 79 | self._increasing = False 80 | 81 | def __str__(self): 82 | return 'Storm(clouds=%d%%, rain=%d%%, wind=%d%%)' % (self.clouds, self.rain, self.wind) 83 | 84 | 85 | class Weather(object): 86 | def __init__(self, weather): 87 | self.weather = weather 88 | self._sun = Sun(weather.sun_azimuth_angle, weather.sun_altitude_angle) 89 | self._storm = Storm(weather.precipitation) 90 | 91 | def tick(self, delta_seconds): 92 | self._sun.tick(delta_seconds) 93 | self._storm.tick(delta_seconds) 94 | self.weather.cloudiness = self._storm.clouds 95 | self.weather.precipitation = self._storm.rain 96 | self.weather.precipitation_deposits = self._storm.puddles 97 | self.weather.wind_intensity = self._storm.wind 98 | self.weather.fog_density = self._storm.fog 99 | self.weather.wetness = self._storm.wetness 100 | self.weather.sun_azimuth_angle = self._sun.azimuth 101 | self.weather.sun_altitude_angle = self._sun.altitude 102 | 103 | def __str__(self): 104 | return '%s %s' % (self._sun, self._storm) 105 | 106 | 107 | def main(): 108 | argparser = argparse.ArgumentParser( 109 | description=__doc__) 110 | argparser.add_argument( 111 | '--host', 112 | metavar='H', 113 | default='127.0.0.1', 114 | help='IP of the host server (default: 127.0.0.1)') 115 | argparser.add_argument( 116 | '-p', '--port', 117 | metavar='P', 118 | default=2000, 119 | type=int, 120 | help='TCP port to listen to (default: 2000)') 121 | argparser.add_argument( 122 | '-s', '--speed', 123 | metavar='FACTOR', 124 | default=1.0, 125 | type=float, 126 | help='rate at which the weather changes (default: 1.0)') 127 | args = argparser.parse_args() 128 | 129 | speed_factor = args.speed 130 | update_freq = 0.1 / speed_factor 131 | 132 | client = carla.Client(args.host, args.port) 133 | client.set_timeout(2.0) 134 | world = client.get_world() 135 | 136 | weather = Weather(world.get_weather()) 137 | 138 | elapsed_time = 0.0 139 | 140 | while True: 141 | timestamp = world.wait_for_tick(seconds=30.0).timestamp 142 | elapsed_time += timestamp.delta_seconds 143 | if elapsed_time > update_freq: 144 | weather.tick(speed_factor * elapsed_time) 145 | world.set_weather(weather.weather) 146 | sys.stdout.write('\r' + str(weather) + 12 * ' ') 147 | sys.stdout.flush() 148 | elapsed_time = 0.0 149 | 150 | 151 | if __name__ == '__main__': 152 | 153 | main() 154 | -------------------------------------------------------------------------------- /src/navigation/examples/vehicle_physics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | Vehicle physics example for CARLA 11 | Small example that shows the effect of different impulse and force aplication 12 | methods to a vehicle. 13 | """ 14 | 15 | import glob 16 | import os 17 | import sys 18 | import argparse 19 | 20 | try: 21 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 22 | sys.version_info.major, 23 | sys.version_info.minor, 24 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 25 | except IndexError: 26 | pass 27 | 28 | import carla 29 | 30 | 31 | def print_step_info(world, vehicle): 32 | snapshot = world.get_snapshot() 33 | print("%d %06.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f" % 34 | (snapshot.frame, snapshot.timestamp.elapsed_seconds, \ 35 | vehicle.get_acceleration().x, vehicle.get_acceleration().y, vehicle.get_acceleration().z, \ 36 | vehicle.get_velocity().x, vehicle.get_velocity().y, vehicle.get_velocity().z, \ 37 | vehicle.get_location().x, vehicle.get_location().y, vehicle.get_location().z)) 38 | 39 | 40 | def wait(world, frames=100): 41 | for i in range(0, frames): 42 | world.tick() 43 | 44 | 45 | def main(arg): 46 | """Main function of the script""" 47 | client = carla.Client(arg.host, arg.port) 48 | client.set_timeout(2.0) 49 | world = client.get_world() 50 | 51 | try: 52 | # Setting the world and the spawn properties 53 | original_settings = world.get_settings() 54 | settings = world.get_settings() 55 | 56 | delta = 0.1 57 | settings.fixed_delta_seconds = delta 58 | settings.synchronous_mode = True 59 | world.apply_settings(settings) 60 | 61 | blueprint_library = world.get_blueprint_library() 62 | vehicle_bp = blueprint_library.filter(arg.filter)[0] 63 | 64 | vehicle_transform = world.get_map().get_spawn_points()[0] 65 | vehicle_transform.location.z += 3 66 | vehicle = world.spawn_actor(vehicle_bp, vehicle_transform) 67 | 68 | physics_vehicle = vehicle.get_physics_control() 69 | car_mass = physics_vehicle.mass 70 | 71 | spectator_transform = carla.Transform(vehicle_transform.location, vehicle_transform.rotation) 72 | spectator_transform.location += vehicle_transform.get_forward_vector() * 20 73 | spectator_transform.rotation.yaw += 180 74 | spectator = world.get_spectator() 75 | spectator.set_transform(spectator_transform) 76 | 77 | 78 | # We let the vehicle stabilize and save the transform to reset it after each test. 79 | wait(world) 80 | vehicle.set_target_velocity(carla.Vector3D(0, 0, 0)) 81 | vehicle_transform = vehicle.get_transform() 82 | wait(world) 83 | 84 | 85 | # Impulse/Force at the center of mass of the object 86 | impulse = 10 * car_mass 87 | 88 | print("# Adding an Impulse of %f N s" % impulse) 89 | vehicle.add_impulse(carla.Vector3D(0, 0, impulse)) 90 | 91 | wait(world) 92 | vehicle.set_transform(vehicle_transform) 93 | vehicle.set_target_velocity(carla.Vector3D(0, 0, 0)) 94 | wait(world) 95 | 96 | print("# Adding a Force of %f N" % (impulse / delta)) 97 | # The add_force method should not be use for instantaneous forces like this one, 98 | # it is more useful for constant or variable forces acting in a finite amount of time. 99 | # In this script it is done with the proper scaling to show the equivalence 100 | # between the add_impulse and add_force methods. 101 | # As in this case the force is going to be applied during the whole step dt=delta 102 | # a force more or less equivalent is impulse / delta. 103 | vehicle.add_force(carla.Vector3D(0, 0, impulse / delta)) 104 | 105 | wait(world) 106 | vehicle.set_transform(vehicle_transform) 107 | vehicle.set_target_velocity(carla.Vector3D(0, 0, 0)) 108 | wait(world) 109 | 110 | wait(world, 500) 111 | 112 | 113 | finally: 114 | world.apply_settings(original_settings) 115 | 116 | vehicle.destroy() 117 | 118 | 119 | if __name__ == "__main__": 120 | 121 | argparser = argparse.ArgumentParser( 122 | description=__doc__) 123 | argparser.add_argument( 124 | '--host', 125 | metavar='H', 126 | default='localhost', 127 | help='IP of the host CARLA Simulator (default: localhost)') 128 | argparser.add_argument( 129 | '-p', '--port', 130 | metavar='P', 131 | default=2000, 132 | type=int, 133 | help='TCP port of CARLA Simulator (default: 2000)') 134 | argparser.add_argument( 135 | '--filter', 136 | metavar='PATTERN', 137 | default='model3', 138 | help='actor filter (default: "vehicle.*")') 139 | args = argparser.parse_args() 140 | 141 | try: 142 | main(args) 143 | except KeyboardInterrupt: 144 | print(' - Exited by user.') 145 | -------------------------------------------------------------------------------- /src/navigation/examples/tutorial.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import random 24 | import time 25 | 26 | 27 | def main(): 28 | actor_list = [] 29 | 30 | # In this tutorial script, we are going to add a vehicle to the simulation 31 | # and let it drive in autopilot. We will also create a camera attached to 32 | # that vehicle, and save all the images generated by the camera to disk. 33 | 34 | try: 35 | # First of all, we need to create the client that will send the requests 36 | # to the simulator. Here we'll assume the simulator is accepting 37 | # requests in the localhost at port 2000. 38 | client = carla.Client('localhost', 2000) 39 | client.set_timeout(2.0) 40 | 41 | # Once we have a client we can retrieve the world that is currently 42 | # running. 43 | world = client.get_world() 44 | 45 | # The world contains the list blueprints that we can use for adding new 46 | # actors into the simulation. 47 | blueprint_library = world.get_blueprint_library() 48 | 49 | # Now let's filter all the blueprints of type 'vehicle' and choose one 50 | # at random. 51 | bp = random.choice(blueprint_library.filter('vehicle')) 52 | 53 | # A blueprint contains the list of attributes that define a vehicle's 54 | # instance, we can read them and modify some of them. For instance, 55 | # let's randomize its color. 56 | if bp.has_attribute('color'): 57 | color = random.choice(bp.get_attribute('color').recommended_values) 58 | bp.set_attribute('color', color) 59 | 60 | # Now we need to give an initial transform to the vehicle. We choose a 61 | # random transform from the list of recommended spawn points of the map. 62 | transform = random.choice(world.get_map().get_spawn_points()) 63 | 64 | # So let's tell the world to spawn the vehicle. 65 | vehicle = world.spawn_actor(bp, transform) 66 | 67 | # It is important to note that the actors we create won't be destroyed 68 | # unless we call their "destroy" function. If we fail to call "destroy" 69 | # they will stay in the simulation even after we quit the Python script. 70 | # For that reason, we are storing all the actors we create so we can 71 | # destroy them afterwards. 72 | actor_list.append(vehicle) 73 | print('created %s' % vehicle.type_id) 74 | 75 | # Let's put the vehicle to drive around. 76 | vehicle.set_autopilot(True) 77 | 78 | # Let's add now a "depth" camera attached to the vehicle. Note that the 79 | # transform we give here is now relative to the vehicle. 80 | camera_bp = blueprint_library.find('sensor.camera.depth') 81 | camera_transform = carla.Transform(carla.Location(x=1.5, z=2.4)) 82 | camera = world.spawn_actor(camera_bp, camera_transform, attach_to=vehicle) 83 | actor_list.append(camera) 84 | print('created %s' % camera.type_id) 85 | 86 | # Now we register the function that will be called each time the sensor 87 | # receives an image. In this example we are saving the image to disk 88 | # converting the pixels to gray-scale. 89 | cc = carla.ColorConverter.LogarithmicDepth 90 | camera.listen(lambda image: image.save_to_disk('_out/%06d.png' % image.frame, cc)) 91 | 92 | # Oh wait, I don't like the location we gave to the vehicle, I'm going 93 | # to move it a bit forward. 94 | location = vehicle.get_location() 95 | location.x += 40 96 | vehicle.set_location(location) 97 | print('moved vehicle to %s' % location) 98 | 99 | # But the city now is probably quite empty, let's add a few more 100 | # vehicles. 101 | transform.location += carla.Location(x=40, y=-3.2) 102 | transform.rotation.yaw = -180.0 103 | for _ in range(0, 10): 104 | transform.location.x += 8.0 105 | 106 | bp = random.choice(blueprint_library.filter('vehicle')) 107 | 108 | # This time we are using try_spawn_actor. If the spot is already 109 | # occupied by another object, the function will return None. 110 | npc = world.try_spawn_actor(bp, transform) 111 | if npc is not None: 112 | actor_list.append(npc) 113 | npc.set_autopilot(True) 114 | print('created %s' % npc.type_id) 115 | 116 | time.sleep(5) 117 | 118 | finally: 119 | 120 | print('destroying actors') 121 | camera.destroy() 122 | client.apply_batch([carla.command.DestroyActor(x) for x in actor_list]) 123 | print('done.') 124 | 125 | 126 | if __name__ == '__main__': 127 | 128 | main() 129 | -------------------------------------------------------------------------------- /src/navigation/examples/sensor_synchronization.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | Sensor synchronization example for CARLA 11 | 12 | The communication model for the syncronous mode in CARLA sends the snapshot 13 | of the world and the sensors streams in parallel. 14 | We provide this script as an example of how to syncrononize the sensor 15 | data gathering in the client. 16 | To to this, we create a queue that is being filled by every sensor when the 17 | client receives its data and the main loop is blocked until all the sensors 18 | have received its data. 19 | This suppose that all the sensors gather information at every tick. It this is 20 | not the case, the clients needs to take in account at each frame how many 21 | sensors are going to tick at each frame. 22 | """ 23 | 24 | import glob 25 | import os 26 | import sys 27 | from queue import Queue 28 | from queue import Empty 29 | 30 | try: 31 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 32 | sys.version_info.major, 33 | sys.version_info.minor, 34 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 35 | except IndexError: 36 | pass 37 | 38 | import carla 39 | 40 | 41 | # Sensor callback. 42 | # This is where you receive the sensor data and 43 | # process it as you liked and the important part is that, 44 | # at the end, it should include an element into the sensor queue. 45 | def sensor_callback(sensor_data, sensor_queue, sensor_name): 46 | # Do stuff with the sensor_data data like save it to disk 47 | # Then you just need to add to the queue 48 | sensor_queue.put((sensor_data.frame, sensor_name)) 49 | 50 | 51 | def main(): 52 | # We start creating the client 53 | client = carla.Client('localhost', 2000) 54 | client.set_timeout(2.0) 55 | world = client.get_world() 56 | 57 | try: 58 | # We need to save the settings to be able to recover them at the end 59 | # of the script to leave the server in the same state that we found it. 60 | original_settings = world.get_settings() 61 | settings = world.get_settings() 62 | 63 | # We set CARLA syncronous mode 64 | settings.fixed_delta_seconds = 0.2 65 | settings.synchronous_mode = True 66 | world.apply_settings(settings) 67 | 68 | # We create the sensor queue in which we keep track of the information 69 | # already received. This structure is thread safe and can be 70 | # accessed by all the sensors callback concurrently without problem. 71 | sensor_queue = Queue() 72 | 73 | # Bluepints for the sensors 74 | blueprint_library = world.get_blueprint_library() 75 | cam_bp = blueprint_library.find('sensor.camera.rgb') 76 | lidar_bp = blueprint_library.find('sensor.lidar.ray_cast') 77 | radar_bp = blueprint_library.find('sensor.other.radar') 78 | 79 | # We create all the sensors and keep them in a list for convenience. 80 | sensor_list = [] 81 | 82 | cam01 = world.spawn_actor(cam_bp, carla.Transform()) 83 | cam01.listen(lambda data: sensor_callback(data, sensor_queue, "camera01")) 84 | sensor_list.append(cam01) 85 | 86 | lidar_bp.set_attribute('points_per_second', '100000') 87 | lidar01 = world.spawn_actor(lidar_bp, carla.Transform()) 88 | lidar01.listen(lambda data: sensor_callback(data, sensor_queue, "lidar01")) 89 | sensor_list.append(lidar01) 90 | 91 | lidar_bp.set_attribute('points_per_second', '1000000') 92 | lidar02 = world.spawn_actor(lidar_bp, carla.Transform()) 93 | lidar02.listen(lambda data: sensor_callback(data, sensor_queue, "lidar02")) 94 | sensor_list.append(lidar02) 95 | 96 | radar01 = world.spawn_actor(radar_bp, carla.Transform()) 97 | radar01.listen(lambda data: sensor_callback(data, sensor_queue, "radar01")) 98 | sensor_list.append(radar01) 99 | 100 | radar02 = world.spawn_actor(radar_bp, carla.Transform()) 101 | radar02.listen(lambda data: sensor_callback(data, sensor_queue, "radar02")) 102 | sensor_list.append(radar02) 103 | 104 | # Main loop 105 | while True: 106 | # Tick the server 107 | world.tick() 108 | w_frame = world.get_snapshot().frame 109 | print("\nWorld's frame: %d" % w_frame) 110 | 111 | # Now, we wait to the sensors data to be received. 112 | # As the queue is blocking, we will wait in the queue.get() methods 113 | # until all the information is processed and we continue with the next frame. 114 | # We include a timeout of 1.0 s (in the get method) and if some information is 115 | # not received in this time we continue. 116 | try: 117 | for _ in range(len(sensor_list)): 118 | s_frame = sensor_queue.get(True, 1.0) 119 | print(" Frame: %d Sensor: %s" % (s_frame[0], s_frame[1])) 120 | 121 | except Empty: 122 | print(" Some of the sensor information is missed") 123 | 124 | finally: 125 | world.apply_settings(original_settings) 126 | for sensor in sensor_list: 127 | sensor.destroy() 128 | 129 | 130 | if __name__ == "__main__": 131 | try: 132 | main() 133 | except KeyboardInterrupt: 134 | print(' - Exited by user.') 135 | -------------------------------------------------------------------------------- /src/navigation/examples/start_recording.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import argparse 24 | import random 25 | import time 26 | import logging 27 | 28 | 29 | def main(): 30 | argparser = argparse.ArgumentParser( 31 | description=__doc__) 32 | argparser.add_argument( 33 | '--host', 34 | metavar='H', 35 | default='127.0.0.1', 36 | help='IP of the host server (default: 127.0.0.1)') 37 | argparser.add_argument( 38 | '-p', '--port', 39 | metavar='P', 40 | default=2000, 41 | type=int, 42 | help='TCP port to listen to (default: 2000)') 43 | argparser.add_argument( 44 | '-n', '--number-of-vehicles', 45 | metavar='N', 46 | default=10, 47 | type=int, 48 | help='number of vehicles (default: 10)') 49 | argparser.add_argument( 50 | '-d', '--delay', 51 | metavar='D', 52 | default=2.0, 53 | type=float, 54 | help='delay in seconds between spawns (default: 2.0)') 55 | argparser.add_argument( 56 | '--safe', 57 | action='store_true', 58 | help='avoid spawning vehicles prone to accidents') 59 | argparser.add_argument( 60 | '-f', '--recorder_filename', 61 | metavar='F', 62 | default="test1.log", 63 | help='recorder filename (test1.log)') 64 | argparser.add_argument( 65 | '-t', '--recorder_time', 66 | metavar='T', 67 | default=0, 68 | type=int, 69 | help='recorder duration (auto-stop)') 70 | args = argparser.parse_args() 71 | 72 | actor_list = [] 73 | logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) 74 | 75 | try: 76 | 77 | client = carla.Client(args.host, args.port) 78 | client.set_timeout(2.0) 79 | world = client.get_world() 80 | blueprints = world.get_blueprint_library().filter('vehicle.*') 81 | 82 | spawn_points = world.get_map().get_spawn_points() 83 | random.shuffle(spawn_points) 84 | 85 | print('found %d spawn points.' % len(spawn_points)) 86 | 87 | count = args.number_of_vehicles 88 | 89 | print("Recording on file: %s" % client.start_recorder(args.recorder_filename)) 90 | 91 | if args.safe: 92 | blueprints = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4] 93 | blueprints = [x for x in blueprints if not x.id.endswith('microlino')] 94 | blueprints = [x for x in blueprints if not x.id.endswith('carlacola')] 95 | blueprints = [x for x in blueprints if not x.id.endswith('cybertruck')] 96 | blueprints = [x for x in blueprints if not x.id.endswith('t2')] 97 | blueprints = [x for x in blueprints if not x.id.endswith('sprinter')] 98 | blueprints = [x for x in blueprints if not x.id.endswith('firetruck')] 99 | blueprints = [x for x in blueprints if not x.id.endswith('ambulance')] 100 | 101 | spawn_points = world.get_map().get_spawn_points() 102 | number_of_spawn_points = len(spawn_points) 103 | 104 | if count < number_of_spawn_points: 105 | random.shuffle(spawn_points) 106 | elif count > number_of_spawn_points: 107 | msg = 'requested %d vehicles, but could only find %d spawn points' 108 | logging.warning(msg, count, number_of_spawn_points) 109 | count = number_of_spawn_points 110 | 111 | # @todo cannot import these directly. 112 | SpawnActor = carla.command.SpawnActor 113 | SetAutopilot = carla.command.SetAutopilot 114 | FutureActor = carla.command.FutureActor 115 | 116 | batch = [] 117 | for n, transform in enumerate(spawn_points): 118 | if n >= count: 119 | break 120 | blueprint = random.choice(blueprints) 121 | if blueprint.has_attribute('color'): 122 | color = random.choice(blueprint.get_attribute('color').recommended_values) 123 | blueprint.set_attribute('color', color) 124 | blueprint.set_attribute('role_name', 'autopilot') 125 | batch.append(SpawnActor(blueprint, transform).then(SetAutopilot(FutureActor, True))) 126 | 127 | for response in client.apply_batch_sync(batch): 128 | if response.error: 129 | logging.error(response.error) 130 | else: 131 | actor_list.append(response.actor_id) 132 | 133 | print('spawned %d vehicles, press Ctrl+C to exit.' % len(actor_list)) 134 | 135 | if (args.recorder_time > 0): 136 | time.sleep(args.recorder_time) 137 | else: 138 | while True: 139 | world.wait_for_tick() 140 | # time.sleep(0.1) 141 | 142 | finally: 143 | 144 | print('\ndestroying %d actors' % len(actor_list)) 145 | client.apply_batch_sync([carla.command.DestroyActor(x) for x in actor_list]) 146 | 147 | print("Stop recording") 148 | client.stop_recorder() 149 | 150 | 151 | if __name__ == '__main__': 152 | 153 | try: 154 | main() 155 | except KeyboardInterrupt: 156 | pass 157 | finally: 158 | print('\ndone.') 159 | -------------------------------------------------------------------------------- /prompt/demonstration_not-aligned.txt: -------------------------------------------------------------------------------- 1 | You are a driver in the CARLA simulator. Drive the agent car to its destination on a city road. Base your actions only on the information I provide. 2 | [PART 1] INFORMATION: 3 | I'll give you CARLA parameters for the agent car's current state and environment (e.g. traffic lights, vehicles, walkers). Use the provided navigation system to drive from start to destination, following lanes and waypoints. 4 | CURRENT STATE & ENVIRONMENT 5 | Car_Speed: Current speed of the agent car in km/h. 6 | Car_Speed_limit: Maximum speed allowed for the agent car in the current area. 7 | Car_Direction: The current road option that the local planner is guiding the agent car towards. You can think of it as the current action. 8 | Car_Incoming_direction: The road option at a distance ahead (normally 3 waypoints ahead) that the local planner is guiding the agent car towards. 9 | The value of Car_Direction and Car_Incoming_direction is an integer from -1 to 6, representing: 10 | -1 (VOID): Represents that there is no valid movement option currently being considered for the current lane segment by the agent car. 11 | 1 (LEFT): Represents that the agent car is currently turning left. 12 | 2 (RIGHT): Represents that the agent car is currently turning right 13 | 3 (STRAIGHT): Represents that the vehicle is currently moving straight along the current lane segment without making an explicit turn or road change. 14 | 4 (LANEFOLLOW): Represents that the vehicle is currently following the current lane, emphasizing the ongoing process of continuous driving on the current lane, which may include some slight road curve following, without requiring explicit road changes or turns. 15 | 5 (CHANGELANELEFT): Represents that the agent car is changing to the left lane. 16 | 6 (CHANGELANERIGHT): Represents that the agent car is changing to the right lane. 17 | Car_at_Traffic_light: This indicates the current status of the traffic lights in front. If this is True, it signifies that you are approaching a red light. Conversely, if this is False, it implies that you are approaching a green light. 18 | Walkers_ahead: The walker status in front of the agent vehicle. Including distance from the agent vehicle, relative position to the agent vehicle and direction of movement (whether approaching or moving away from the agent vehicle's direction). False indicates that there are no pedestrians in front of the agent vehicle. 19 | Vehicle_ahead: The vehicle status in front of the agent vehicle blocking its way. Including distance (negative value might indicate a collision) from the agent vehicle and its speed. False indicates that there are no vehicles in front of the agent vehicle. 20 | is_right_lane_change_allowed: This function is used to determine whether a right lane change is permissible in the current situation. When the function returns True, it indicates that a right lane change is allowed, and when it returns False, it signifies that a right lane change is not allowed. 21 | is_left_lane_change_allowed:This function is used to determine whether a left lane change is permissible in the current situation. When the function returns True, it indicates that a left lane change is allowed, and when it returns False, it signifies that a left lane change is not allowed. 22 | PREVIOUS DRIVING BEHAVIORS (Short-Term Memory): 23 | You"ll get up to four past DRIVING BEHAVIORS of the agent car which are formatted as: {"Situation": "...", "Reasoning": "...", "Action": "..."}. Integrate these when deciding driving behavior. 24 | 25 | [PART 2] ACTION LIBRARY: These are pre-coded actions that the agent car can directly implement. 26 | 1. ACTION_NAME: stop 27 | Description:This function performs an emergency stop. It sets the throttle to zero, applies maximum brake, and releases the handbrake. Steering remains unchanged to keep the car in its lane. 28 | 2. ACTION_NAME: lane_changing_left 29 | Description: This function makes the agent car change to the left lane. Never use this function when the agent car IS CURRENTLY performing a lane change. 30 | 3. ACTION_NAME: lane_changing_right 31 | Description: This function makes the agent car change to the right lane. Never use this function when the agent car IS CURRENTLY performing a lane change. 32 | All of the below functions(4-7) perform lane-keeping driving based on global navigation, meaning that when these functions are executed, you will drive along the global navigation path. 33 | 4. ACTION_NAME: normal_behavior 34 | Description: This function makes the agent car follow a route set by global navigation, keeping its speed close to the road"s limit or the car"s maximum. NEVER use this function for stopping or when stopped. 35 | 5. ACTION_NAME: maintain_speed 36 | Description: This function maintains the agent car"s current speed on its navigation route. NEVER use it when the car needs to stay stopped. 37 | 6. ACTION_NAME: speed_up 38 | Description: This function accelerates the agent car by 0.5 km/h, slightly above the posted speed limit for the lane, and continues on its navigation route. 39 | 7. ACTION_NAME: speed_down 40 | Description: This function decelerates the agent car by 0.5 km/h, down to a minimum of zero, and then proceeds on its navigation route. You can also use it for gradual stops. 41 | 42 | [PART 3] DRIVING BEHAVIOR 43 | Perform three tasks: Situation Understanding(Situation), Reasoning(Reason), and Action Commanding(Action). Use the provided INFORMATION to guide the agent car's next action. 44 | 45 | 1. Situation Understanding: 46 | 1) Analyze and describe the agent car's situation using all of the given INFORMATION, and mention all the parts of INFORMATION you used in the Situation. 47 | 2) Describe as concisely as possible and keep the content of this part within 30 words. 48 | 49 | 2. Reasoning: 50 | 1) Decide the agent car's next action based on Situation Understanding. 51 | 2) Give clear, detailed instructions for the action. 52 | 3) Think step by step. 53 | 4) Describe as concisely as possible and keep the content of this part within 30 words. 54 | 55 | 3. Action Commanding: 56 | 1) Specify the action using the ACTION LIBRARY format. 57 | 2) Follow this structure: "Action":"ACTION_NAME". 58 | 3) Use a single phrase for each action. Avoid multiple actions or extra content. 59 | 60 | [PART 4] FORMAT 61 | You should only respond in the format as described below. 62 | RESPONSE FORMAT: {"Situation": "......","Reasoning": "......","Action": "......"} -------------------------------------------------------------------------------- /src/navigation/constant_velocity_agent.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) # Copyright (c) 2018-2020 CVC. 2 | # 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ 7 | This module implements an agent that roams around a track following random 8 | waypoints and avoiding other vehicles. The agent also responds to traffic lights. 9 | It can also make use of the global route planner to follow a specifed route 10 | """ 11 | 12 | import carla 13 | 14 | from agents.navigation.basic_agent import BasicAgent 15 | 16 | class ConstantVelocityAgent(BasicAgent): 17 | """ 18 | ConstantVelocityAgent implements an agent that navigates the scene at a fixed velocity. 19 | This agent will fail if asked to perform turns that are impossible are the desired speed. 20 | This includes lane changes. When a collision is detected, the constant velocity will stop, 21 | wait for a bit, and then start again. 22 | """ 23 | 24 | def __init__(self, vehicle, target_speed=20, opt_dict={}, map_inst=None, grp_inst=None): 25 | """ 26 | Initialization the agent parameters, the local and the global planner. 27 | 28 | :param vehicle: actor to apply to agent logic onto 29 | :param target_speed: speed (in Km/h) at which the vehicle will move 30 | :param opt_dict: dictionary in case some of its parameters want to be changed. 31 | This also applies to parameters related to the LocalPlanner. 32 | :param map_inst: carla.Map instance to avoid the expensive call of getting it. 33 | :param grp_inst: GlobalRoutePlanner instance to avoid the expensive call of getting it. 34 | """ 35 | super().__init__(vehicle, target_speed, opt_dict=opt_dict, map_inst=map_inst, grp_inst=grp_inst) 36 | 37 | self._use_basic_behavior = False # Whether or not to use the BasicAgent behavior when the constant velocity is down 38 | self._target_speed = target_speed / 3.6 # [m/s] 39 | self._current_speed = vehicle.get_velocity().length() # [m/s] 40 | self._constant_velocity_stop_time = None 41 | self._collision_sensor = None 42 | 43 | self._restart_time = float('inf') # Time after collision before the constant velocity behavior starts again 44 | 45 | if 'restart_time' in opt_dict: 46 | self._restart_time = opt_dict['restart_time'] 47 | if 'use_basic_behavior' in opt_dict: 48 | self._use_basic_behavior = opt_dict['use_basic_behavior'] 49 | 50 | self.is_constant_velocity_active = True 51 | self._set_collision_sensor() 52 | self._set_constant_velocity(target_speed) 53 | 54 | def set_target_speed(self, speed): 55 | """Changes the target speed of the agent [km/h]""" 56 | self._target_speed = speed / 3.6 57 | self._local_planner.set_speed(speed) 58 | 59 | def stop_constant_velocity(self): 60 | """Stops the constant velocity behavior""" 61 | self.is_constant_velocity_active = False 62 | self._vehicle.disable_constant_velocity() 63 | self._constant_velocity_stop_time = self._world.get_snapshot().timestamp.elapsed_seconds 64 | 65 | def restart_constant_velocity(self): 66 | """Public method to restart the constant velocity""" 67 | self.is_constant_velocity_active = True 68 | self._set_constant_velocity(self._target_speed) 69 | 70 | def _set_constant_velocity(self, speed): 71 | """Forces the agent to drive at the specified speed""" 72 | self._vehicle.enable_constant_velocity(carla.Vector3D(speed, 0, 0)) 73 | 74 | def run_step(self): 75 | """Execute one step of navigation.""" 76 | if not self.is_constant_velocity_active: 77 | if self._world.get_snapshot().timestamp.elapsed_seconds - self._constant_velocity_stop_time > self._restart_time: 78 | self.restart_constant_velocity() 79 | self.is_constant_velocity_active = True 80 | elif self._use_basic_behavior: 81 | return super(ConstantVelocityAgent, self).run_step() 82 | else: 83 | return carla.VehicleControl() 84 | 85 | hazard_detected = False 86 | 87 | # Retrieve all relevant actors 88 | actor_list = self._world.get_actors() 89 | vehicle_list = actor_list.filter("*vehicle*") 90 | lights_list = actor_list.filter("*traffic_light*") 91 | 92 | vehicle_speed = self._vehicle.get_velocity().length() 93 | 94 | max_vehicle_distance = self._base_vehicle_threshold + vehicle_speed 95 | affected_by_vehicle, adversary, _ = self._vehicle_obstacle_detected(vehicle_list, max_vehicle_distance) 96 | if affected_by_vehicle: 97 | vehicle_velocity = self._vehicle.get_velocity() 98 | if vehicle_velocity.length() == 0: 99 | hazard_speed = 0 100 | else: 101 | hazard_speed = vehicle_velocity.dot(adversary.get_velocity()) / vehicle_velocity.length() 102 | hazard_detected = True 103 | 104 | # Check if the vehicle is affected by a red traffic light 105 | max_tlight_distance = self._base_tlight_threshold + 0.3 * vehicle_speed 106 | affected_by_tlight, _ = self._affected_by_traffic_light(lights_list, max_tlight_distance) 107 | if affected_by_tlight: 108 | hazard_speed = 0 109 | hazard_detected = True 110 | 111 | # The longitudinal PID is overwritten by the constant velocity but it is 112 | # still useful to apply it so that the vehicle isn't moving with static wheels 113 | control = self._local_planner.run_step() 114 | if hazard_detected: 115 | self._set_constant_velocity(hazard_speed) 116 | else: 117 | self._set_constant_velocity(self._target_speed) 118 | 119 | return control 120 | 121 | def _set_collision_sensor(self): 122 | blueprint = self._world.get_blueprint_library().find('sensor.other.collision') 123 | self._collision_sensor = self._world.spawn_actor(blueprint, carla.Transform(), attach_to=self._vehicle) 124 | self._collision_sensor.listen(lambda event: self.stop_constant_velocity()) 125 | 126 | def destroy_sensor(self): 127 | if self._collision_sensor: 128 | self._collision_sensor.destroy() 129 | self._collision_sensor = None 130 | -------------------------------------------------------------------------------- /src/navigation/examples/tutorial_gbuffer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import random 24 | import time 25 | 26 | 27 | 28 | def main(): 29 | actor_list = [] 30 | 31 | # In this tutorial script, we are going to add a vehicle to the simulation 32 | # and let it drive in autopilot. We will also create a camera attached to 33 | # that vehicle, and save all the images generated by the camera to disk. 34 | # Additionally, we will save all of the gbuffer textures for each frame. 35 | 36 | try: 37 | # First of all, we need to create the client that will send the requests 38 | # to the simulator. Here we'll assume the simulator is accepting 39 | # requests in the localhost at port 2000. 40 | client = carla.Client('127.0.0.1', 2000) 41 | client.set_timeout(2.0) 42 | 43 | # Once we have a client we can retrieve the world that is currently 44 | # running. 45 | world = client.get_world() 46 | 47 | # The world contains the list blueprints that we can use for adding new 48 | # actors into the simulation. 49 | blueprint_library = world.get_blueprint_library() 50 | 51 | # Now let's filter all the blueprints of type 'vehicle' and choose one 52 | # at random. 53 | bp = random.choice(blueprint_library.filter('vehicle')) 54 | 55 | # A blueprint contains the list of attributes that define a vehicle's 56 | # instance, we can read them and modify some of them. For instance, 57 | # let's randomize its color. 58 | if bp.has_attribute('color'): 59 | color = random.choice(bp.get_attribute('color').recommended_values) 60 | bp.set_attribute('color', color) 61 | 62 | # Now we need to give an initial transform to the vehicle. We choose a 63 | # random transform from the list of recommended spawn points of the map. 64 | transform = world.get_map().get_spawn_points()[0] 65 | 66 | # So let's tell the world to spawn the vehicle. 67 | vehicle = world.spawn_actor(bp, transform) 68 | 69 | # It is important to note that the actors we create won't be destroyed 70 | # unless we call their "destroy" function. If we fail to call "destroy" 71 | # they will stay in the simulation even after we quit the Python script. 72 | # For that reason, we are storing all the actors we create so we can 73 | # destroy them afterwards. 74 | actor_list.append(vehicle) 75 | print('created %s' % vehicle.type_id) 76 | 77 | # Let's put the vehicle to drive around. 78 | vehicle.set_autopilot(True) 79 | 80 | # Let's add now a "rgb" camera attached to the vehicle. Note that the 81 | # transform we give here is now relative to the vehicle. 82 | camera_bp = blueprint_library.find('sensor.camera.rgb') 83 | camera_bp.set_attribute('image_size_x', '1920') 84 | camera_bp.set_attribute('image_size_y', '1080') 85 | camera_transform = carla.Transform(carla.Location(x=1.5, z=2.4)) 86 | camera = world.spawn_actor(camera_bp, camera_transform, attach_to=vehicle) 87 | actor_list.append(camera) 88 | print('created %s' % camera.type_id) 89 | 90 | # Register a callback for whenever a new frame is available. This step is 91 | # currently required to correctly receive the gbuffer textures, as it is 92 | # used to determine whether the sensor is active. 93 | camera.listen(lambda image: image.save_to_disk('_out/FinalColor-%06d.png' % image.frame)) 94 | 95 | # Here we will register the callbacks for each gbuffer texture. 96 | # The function "listen_to_gbuffer" behaves like the regular listen function, 97 | # but you must first pass it the ID of the desired gbuffer texture. 98 | camera.listen_to_gbuffer(carla.GBufferTextureID.SceneColor, lambda image: image.save_to_disk('_out/GBuffer-SceneColor-%06d.png' % image.frame)) 99 | camera.listen_to_gbuffer(carla.GBufferTextureID.SceneDepth, lambda image: image.save_to_disk('_out/GBuffer-SceneDepth-%06d.png' % image.frame)) 100 | camera.listen_to_gbuffer(carla.GBufferTextureID.SceneStencil, lambda image: image.save_to_disk('_out/GBuffer-SceneStencil-%06d.png' % image.frame)) 101 | camera.listen_to_gbuffer(carla.GBufferTextureID.GBufferA, lambda image: image.save_to_disk('_out/GBuffer-A-%06d.png' % image.frame)) 102 | camera.listen_to_gbuffer(carla.GBufferTextureID.GBufferB, lambda image: image.save_to_disk('_out/GBuffer-B-%06d.png' % image.frame)) 103 | camera.listen_to_gbuffer(carla.GBufferTextureID.GBufferC, lambda image: image.save_to_disk('_out/GBuffer-C-%06d.png' % image.frame)) 104 | camera.listen_to_gbuffer(carla.GBufferTextureID.GBufferD, lambda image: image.save_to_disk('_out/GBuffer-D-%06d.png' % image.frame)) 105 | # Note that some gbuffer textures may not be available for a particular scene. 106 | # For example, the textures E and F are likely unavailable in this example, 107 | # which will result in them being sent as black images. 108 | camera.listen_to_gbuffer(carla.GBufferTextureID.GBufferE, lambda image: image.save_to_disk('_out/GBuffer-E-%06d.png' % image.frame)) 109 | camera.listen_to_gbuffer(carla.GBufferTextureID.GBufferF, lambda image: image.save_to_disk('_out/GBuffer-F-%06d.png' % image.frame)) 110 | camera.listen_to_gbuffer(carla.GBufferTextureID.Velocity, lambda image: image.save_to_disk('_out/GBuffer-Velocity-%06d.png' % image.frame)) 111 | camera.listen_to_gbuffer(carla.GBufferTextureID.SSAO, lambda image: image.save_to_disk('_out/GBuffer-SSAO-%06d.png' % image.frame)) 112 | camera.listen_to_gbuffer(carla.GBufferTextureID.CustomDepth, lambda image: image.save_to_disk('_out/GBuffer-CustomDepth-%06d.png' % image.frame)) 113 | camera.listen_to_gbuffer(carla.GBufferTextureID.CustomStencil, lambda image: image.save_to_disk('_out/GBuffer-CustomStencil-%06d.png' % image.frame)) 114 | 115 | time.sleep(10) 116 | 117 | finally: 118 | 119 | print('destroying actors') 120 | camera.destroy() 121 | client.apply_batch([carla.command.DestroyActor(x) for x in actor_list]) 122 | print('done.') 123 | 124 | 125 | if __name__ == '__main__': 126 | 127 | main() 128 | -------------------------------------------------------------------------------- /src/navigation/examples/synchronous_mode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | 13 | try: 14 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 15 | sys.version_info.major, 16 | sys.version_info.minor, 17 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 18 | except IndexError: 19 | pass 20 | 21 | import carla 22 | 23 | import random 24 | 25 | try: 26 | import pygame 27 | except ImportError: 28 | raise RuntimeError('cannot import pygame, make sure pygame package is installed') 29 | 30 | try: 31 | import numpy as np 32 | except ImportError: 33 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 34 | 35 | try: 36 | import queue 37 | except ImportError: 38 | import Queue as queue 39 | 40 | 41 | class CarlaSyncMode(object): 42 | """ 43 | Context manager to synchronize output from different sensors. Synchronous 44 | mode is enabled as long as we are inside this context 45 | 46 | with CarlaSyncMode(world, sensors) as sync_mode: 47 | while True: 48 | data = sync_mode.tick(timeout=1.0) 49 | 50 | """ 51 | 52 | def __init__(self, world, *sensors, **kwargs): 53 | self.world = world 54 | self.sensors = sensors 55 | self.frame = None 56 | self.delta_seconds = 1.0 / kwargs.get('fps', 20) 57 | self._queues = [] 58 | self._settings = None 59 | 60 | def __enter__(self): 61 | self._settings = self.world.get_settings() 62 | self.frame = self.world.apply_settings(carla.WorldSettings( 63 | no_rendering_mode=False, 64 | synchronous_mode=True, 65 | fixed_delta_seconds=self.delta_seconds)) 66 | 67 | def make_queue(register_event): 68 | q = queue.Queue() 69 | register_event(q.put) 70 | self._queues.append(q) 71 | 72 | make_queue(self.world.on_tick) 73 | for sensor in self.sensors: 74 | make_queue(sensor.listen) 75 | return self 76 | 77 | def tick(self, timeout): 78 | self.frame = self.world.tick() 79 | data = [self._retrieve_data(q, timeout) for q in self._queues] 80 | assert all(x.frame == self.frame for x in data) 81 | return data 82 | 83 | def __exit__(self, *args, **kwargs): 84 | self.world.apply_settings(self._settings) 85 | 86 | def _retrieve_data(self, sensor_queue, timeout): 87 | while True: 88 | data = sensor_queue.get(timeout=timeout) 89 | if data.frame == self.frame: 90 | return data 91 | 92 | 93 | def draw_image(surface, image, blend=False): 94 | array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) 95 | array = np.reshape(array, (image.height, image.width, 4)) 96 | array = array[:, :, :3] 97 | array = array[:, :, ::-1] 98 | image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 99 | if blend: 100 | image_surface.set_alpha(100) 101 | surface.blit(image_surface, (0, 0)) 102 | 103 | 104 | def get_font(): 105 | fonts = [x for x in pygame.font.get_fonts()] 106 | default_font = 'ubuntumono' 107 | font = default_font if default_font in fonts else fonts[0] 108 | font = pygame.font.match_font(font) 109 | return pygame.font.Font(font, 14) 110 | 111 | 112 | def should_quit(): 113 | for event in pygame.event.get(): 114 | if event.type == pygame.QUIT: 115 | return True 116 | elif event.type == pygame.KEYUP: 117 | if event.key == pygame.K_ESCAPE: 118 | return True 119 | return False 120 | 121 | 122 | def main(): 123 | actor_list = [] 124 | pygame.init() 125 | 126 | display = pygame.display.set_mode( 127 | (800, 600), 128 | pygame.HWSURFACE | pygame.DOUBLEBUF) 129 | font = get_font() 130 | clock = pygame.time.Clock() 131 | 132 | client = carla.Client('localhost', 2000) 133 | client.set_timeout(2.0) 134 | 135 | world = client.get_world() 136 | 137 | try: 138 | m = world.get_map() 139 | start_pose = random.choice(m.get_spawn_points()) 140 | waypoint = m.get_waypoint(start_pose.location) 141 | 142 | blueprint_library = world.get_blueprint_library() 143 | 144 | vehicle = world.spawn_actor( 145 | random.choice(blueprint_library.filter('vehicle.*')), 146 | start_pose) 147 | actor_list.append(vehicle) 148 | vehicle.set_simulate_physics(False) 149 | 150 | camera_rgb = world.spawn_actor( 151 | blueprint_library.find('sensor.camera.rgb'), 152 | carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)), 153 | attach_to=vehicle) 154 | actor_list.append(camera_rgb) 155 | 156 | camera_semseg = world.spawn_actor( 157 | blueprint_library.find('sensor.camera.semantic_segmentation'), 158 | carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)), 159 | attach_to=vehicle) 160 | actor_list.append(camera_semseg) 161 | 162 | # Create a synchronous mode context. 163 | with CarlaSyncMode(world, camera_rgb, camera_semseg, fps=30) as sync_mode: 164 | while True: 165 | if should_quit(): 166 | return 167 | clock.tick() 168 | 169 | # Advance the simulation and wait for the data. 170 | snapshot, image_rgb, image_semseg = sync_mode.tick(timeout=2.0) 171 | 172 | # Choose the next waypoint and update the car location. 173 | waypoint = random.choice(waypoint.next(1.5)) 174 | vehicle.set_transform(waypoint.transform) 175 | 176 | image_semseg.convert(carla.ColorConverter.CityScapesPalette) 177 | fps = round(1.0 / snapshot.timestamp.delta_seconds) 178 | 179 | # Draw the display. 180 | draw_image(display, image_rgb) 181 | draw_image(display, image_semseg, blend=True) 182 | display.blit( 183 | font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)), 184 | (8, 10)) 185 | display.blit( 186 | font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)), 187 | (8, 28)) 188 | pygame.display.flip() 189 | 190 | finally: 191 | 192 | print('destroying actors.') 193 | for actor in actor_list: 194 | actor.destroy() 195 | 196 | pygame.quit() 197 | print('done.') 198 | 199 | 200 | if __name__ == '__main__': 201 | 202 | try: 203 | 204 | main() 205 | 206 | except KeyboardInterrupt: 207 | print('\nCancelled by user. Bye!') 208 | -------------------------------------------------------------------------------- /prompt/demonstration_cautious.txt: -------------------------------------------------------------------------------- 1 | You are a driver in the CARLA simulator. Drive the agent car to its destination on a city road. Base your actions only on the information I provide. 2 | [PART 1] INFORMATION: 3 | I'll give you CARLA parameters for the agent car's current state and environment (e.g. traffic lights, vehicles, walkers). Use the provided navigation system to drive from start to destination, following lanes and waypoints. 4 | CURRENT STATE & ENVIRONMENT 5 | Car_Speed: Current speed of the agent car in km/h. 6 | Car_Speed_limit: Maximum speed allowed for the agent car in the current area. 7 | Car_Direction: The current road option that the local planner is guiding the agent car towards. You can think of it as the current action. 8 | Car_Incoming_direction: The road option at a distance ahead (normally 3 waypoints ahead) that the local planner is guiding the agent car towards. 9 | The value of Car_Direction and Car_Incoming_direction is an integer from -1 to 6, representing: 10 | -1 (VOID): Represents that there is no valid movement option currently being considered for the current lane segment by the agent car. 11 | 1 (LEFT): Represents that the agent car is currently turning left. 12 | 2 (RIGHT): Represents that the agent car is currently turning right 13 | 3 (STRAIGHT): Represents that the vehicle is currently moving straight along the current lane segment without making an explicit turn or road change. 14 | 4 (LANEFOLLOW): Represents that the vehicle is currently following the current lane, emphasizing the ongoing process of continuous driving on the current lane, which may include some slight road curve following, without requiring explicit road changes or turns. 15 | 5 (CHANGELANELEFT): Represents that the agent car is changing to the left lane. 16 | 6 (CHANGELANERIGHT): Represents that the agent car is changing to the right lane. 17 | Car_at_Traffic_light: This indicates the current status of the traffic lights in front. If this is True, it signifies that you are approaching a red light. Conversely, if this is False, it implies that you are approaching a green light. 18 | Walkers_ahead: The walker status in front of the agent vehicle. Including distance from the agent vehicle, relative position to the agent vehicle and direction of movement (whether approaching or moving away from the agent vehicle's direction). False indicates that there are no pedestrians in front of the agent vehicle. 19 | Vehicle_ahead: The vehicle status in front of the agent vehicle blocking its way. Including distance (negative value might indicate a collision) from the agent vehicle and its speed. False indicates that there are no vehicles in front of the agent vehicle. 20 | is_right_lane_change_allowed: This function is used to determine whether a right lane change is permissible in the current situation. When the function returns True, it indicates that a right lane change is allowed, and when it returns False, it signifies that a right lane change is not allowed. 21 | is_left_lane_change_allowed:This function is used to determine whether a left lane change is permissible in the current situation. When the function returns True, it indicates that a left lane change is allowed, and when it returns False, it signifies that a left lane change is not allowed. 22 | PREVIOUS DRIVING BEHAVIORS (Short-Term Memory): 23 | You"ll get up to four past DRIVING BEHAVIORS of the agent car which are formatted as: {"Situation": "...", "Reasoning": "...", "Action": "..."}. Integrate these when deciding driving behavior. 24 | 25 | [PART 2] ACTION LIBRARY: These are pre-coded actions that the agent car can directly implement. 26 | 1. ACTION_NAME: stop 27 | Description:This function performs an emergency stop. It sets the throttle to zero, applies maximum brake, and releases the handbrake. Steering remains unchanged to keep the car in its lane. 28 | 2. ACTION_NAME: lane_changing_left 29 | Description: This function makes the agent car change to the left lane. Never use this function when the agent car IS CURRENTLY performing a lane change. 30 | 3. ACTION_NAME: lane_changing_right 31 | Description: This function makes the agent car change to the right lane. Never use this function when the agent car IS CURRENTLY performing a lane change. 32 | All of the below functions(4-7) perform lane-keeping driving based on global navigation, meaning that when these functions are executed, you will drive along the global navigation path. 33 | 4. ACTION_NAME: normal_behavior 34 | Description: This function makes the agent car follow a route set by global navigation, keeping its speed close to the road"s limit or the car"s maximum. NEVER use this function for stopping or when stopped. 35 | 5. ACTION_NAME: maintain_speed 36 | Description: This function maintains the agent car"s current speed on its navigation route. NEVER use it when the car needs to stay stopped. 37 | 6. ACTION_NAME: speed_up 38 | Description: This function accelerates the agent car by 0.5 km/h, slightly above the posted speed limit for the lane, and continues on its navigation route. 39 | 7. ACTION_NAME: speed_down 40 | Description: This function decelerates the agent car by 0.5 km/h, down to a minimum of zero, and then proceeds on its navigation route. You can also use it for gradual stops. 41 | 42 | [PART 3] DRIVING BEHAVIOR 43 | Perform three tasks: Situation Understanding(Situation), Reasoning(Reason), and Action Commanding(Action). Use the provided INFORMATION to guide the agent car's next action. 44 | 45 | 1. Situation Understanding: 46 | 1) Analyze and describe the agent car's situation using all of the given INFORMATION, and mention all the parts of INFORMATION you used in the Situation. 47 | 2) Describe as concisely as possible and keep the content of this part within 30 words. 48 | 49 | 2. Reasoning: 50 | 1) Decide the agent car's next action based on Situation Understanding. 51 | 2) Give clear, detailed instructions for the action. 52 | 3) Think step by step. 53 | 4) Describe as concisely as possible and keep the content of this part within 30 words. 54 | 55 | 3. Action Commanding: 56 | 1) Specify the action using the ACTION LIBRARY format. 57 | 2) Follow this structure: "Action":"ACTION_NAME". 58 | 3) Use a single phrase for each action. Avoid multiple actions or extra content. 59 | 60 | [PART 4] EXAMPLE 61 | {"Situation": "The agent car is currently driving on the right lane and approaching a vehicle that is 20 meters ahead in the same lane.", "Reasoning": "Based on the previous driving behaviors and the current state, the agent car's current following distance is less than the safe following distance of 20 meters. Given the relatively short distance and the difference in speed between the two vehicles, the agent car should slow down to ensure a safe following distance.", "Action": "speed_down"}, 62 | {"Situation": "The agent car is preparing to turn right, but there is a pedestrian approaching in the right lane 4 meters away, and the upcoming traffic light is green.", "Reasoning": "Based on the previous driving behaviors and the current state, given the relatively short distance, the agent car should yield to vehicles going straight when turning.", "Action": "stop"}, 63 | {"Situation": "The agent car is preparing to turn right, but there is a vehicle approaching from left 20 meters away, and the traffic light vertically is green.", "Reasoning": "Based on the previous driving behaviors and the current state, given the relatively short distance, the agent car should yield to vehicles going straight when turning.", "Action": "stop"}, 64 | {"Situation": "The agent car is about to enter the main road, and there is a following vehicle from behind which is traveling at a higher speed, with a distance of less than 20 meters.", "Reasoning": "The following vehicle is attempting to overtake. Based on the previous driving behaviors and the current state, given the relatively short distance, the agent car should stop to let the following vehicle go ahead.", "Action": "stop"}, 65 | 66 | [PART 5] FORMAT 67 | You should only respond in the format as described below. 68 | RESPONSE FORMAT: {"Situation": "......","Reasoning": "......","Action": "......"} -------------------------------------------------------------------------------- /prompt/demonstration_risky.txt: -------------------------------------------------------------------------------- 1 | You are a driver in the CARLA simulator. Drive the agent car to its destination on a city road. Base your actions only on the information I provide. 2 | [PART 1] INFORMATION: 3 | I'll give you CARLA parameters for the agent car's current state and environment (e.g. traffic lights, vehicles, walkers). Use the provided navigation system to drive from start to destination, following lanes and waypoints. 4 | CURRENT STATE & ENVIRONMENT 5 | Car_Speed: Current speed of the agent car in km/h. 6 | Car_Speed_limit: Maximum speed allowed for the agent car in the current area. 7 | Car_Direction: The current road option that the local planner is guiding the agent car towards. You can think of it as the current action. 8 | Car_Incoming_direction: The road option at a distance ahead (normally 3 waypoints ahead) that the local planner is guiding the agent car towards. 9 | The value of Car_Direction and Car_Incoming_direction is an integer from -1 to 6, representing: 10 | -1 (VOID): Represents that there is no valid movement option currently being considered for the current lane segment by the agent car. 11 | 1 (LEFT): Represents that the agent car is currently turning left. 12 | 2 (RIGHT): Represents that the agent car is currently turning right 13 | 3 (STRAIGHT): Represents that the vehicle is currently moving straight along the current lane segment without making an explicit turn or road change. 14 | 4 (LANEFOLLOW): Represents that the vehicle is currently following the current lane, emphasizing the ongoing process of continuous driving on the current lane, which may include some slight road curve following, without requiring explicit road changes or turns. 15 | 5 (CHANGELANELEFT): Represents that the agent car is changing to the left lane. 16 | 6 (CHANGELANERIGHT): Represents that the agent car is changing to the right lane. 17 | Car_at_Traffic_light: This indicates the current status of the traffic lights in front. If this is True, it signifies that you are approaching a red light. Conversely, if this is False, it implies that you are approaching a green light. 18 | Walkers_ahead: The walker status in front of the agent vehicle. Including distance from the agent vehicle, relative position to the agent vehicle and direction of movement (whether approaching or moving away from the agent vehicle's direction). False indicates that there are no pedestrians in front of the agent vehicle. 19 | Vehicle_ahead: The vehicle status in front of the agent vehicle blocking its way. Including distance (negative value might indicate a collision) from the agent vehicle and its speed. False indicates that there are no vehicles in front of the agent vehicle. 20 | is_right_lane_change_allowed: This function is used to determine whether a right lane change is permissible in the current situation. When the function returns True, it indicates that a right lane change is allowed, and when it returns False, it signifies that a right lane change is not allowed. 21 | is_left_lane_change_allowed:This function is used to determine whether a left lane change is permissible in the current situation. When the function returns True, it indicates that a left lane change is allowed, and when it returns False, it signifies that a left lane change is not allowed. 22 | PREVIOUS DRIVING BEHAVIORS (Short-Term Memory): 23 | You"ll get up to four past DRIVING BEHAVIORS of the agent car which are formatted as: {"Situation": "...", "Reasoning": "...", "Action": "..."}. Integrate these when deciding driving behavior. 24 | 25 | [PART 2] ACTION LIBRARY: These are pre-coded actions that the agent car can directly implement. 26 | 1. ACTION_NAME: stop 27 | Description:This function performs an emergency stop. It sets the throttle to zero, applies maximum brake, and releases the handbrake. Steering remains unchanged to keep the car in its lane. 28 | 2. ACTION_NAME: lane_changing_left 29 | Description: This function makes the agent car change to the left lane. Never use this function when the agent car IS CURRENTLY performing a lane change. 30 | 3. ACTION_NAME: lane_changing_right 31 | Description: This function makes the agent car change to the right lane. Never use this function when the agent car IS CURRENTLY performing a lane change. 32 | All of the below functions(4-7) perform lane-keeping driving based on global navigation, meaning that when these functions are executed, you will drive along the global navigation path. 33 | 4. ACTION_NAME: normal_behavior 34 | Description: This function makes the agent car follow a route set by global navigation, keeping its speed close to the road"s limit or the car"s maximum. NEVER use this function for stopping or when stopped. 35 | 5. ACTION_NAME: maintain_speed 36 | Description: This function maintains the agent car"s current speed on its navigation route. NEVER use it when the car needs to stay stopped. 37 | 6. ACTION_NAME: speed_up 38 | Description: This function accelerates the agent car by 0.5 km/h, slightly above the posted speed limit for the lane, and continues on its navigation route. 39 | 7. ACTION_NAME: speed_down 40 | Description: This function decelerates the agent car by 0.5 km/h, down to a minimum of zero, and then proceeds on its navigation route. You can also use it for gradual stops. 41 | 42 | [PART 3] DRIVING BEHAVIOR 43 | Perform three tasks: Situation Understanding(Situation), Reasoning(Reason), and Action Commanding(Action). Use the provided INFORMATION to guide the agent car's next action. 44 | 45 | 1. Situation Understanding: 46 | 1) Analyze and describe the agent car's situation using all of the given INFORMATION, and mention all the parts of INFORMATION you used in the Situation. 47 | 2) Describe as concisely as possible and keep the content of this part within 30 words. 48 | 49 | 2. Reasoning: 50 | 1) Decide the agent car's next action based on Situation Understanding. 51 | 2) Give clear, detailed instructions for the action. 52 | 3) Think step by step. 53 | 4) Describe as concisely as possible and keep the content of this part within 30 words. 54 | 55 | 3. Action Commanding: 56 | 1) Specify the action using the ACTION LIBRARY format. 57 | 2) Follow this structure: "Action":"ACTION_NAME". 58 | 3) Use a single phrase for each action. Avoid multiple actions or extra content. 59 | 60 | [PART 4] EXAMPLE 61 | {"Situation":"The agent car is driving on the road with no obstructions ahead." , "Reasoning": "Based on the previous driving behaviors and the current state,since there are no obstructions ahead, the car can increase speed until it does not maintain a 10 meter distance to the next car", "Action": "speed_up"}, 62 | {"Situation": "The agent car is preparing to turn right, but there is a vehicle approaching from left over 50 meters away, and the traffic light vertically is green.", "Reasoning": "Based on the previous driving behaviors and the current state, as the vehicle is approaching from over 50 meters away, if the agent car can accelerate to the same speed before the following vehicle is 15 meters away then there is enough time to make a turn to the right before the vehicle approaches. Therefore, the car should accelerate to make the turn before the vehicle is too close.", "Action": "speed_up"}, 63 | {"Situation": "The agent car is about to enter the main road, and there is a following vehicle from behind which is traveling at a higher speed.", "Reasoning": “The agent car is aiming to merge with the main road. Based on the previous driving behaviors and the current state, if the agent car can accelerate to the same speed before the following vehicle is 15 meters away then the agent car should accelerate and go in front of the vehicle.", "Action": "speed_up}, 64 | {"Situation": "The agent car is about to enter the main road with no obstructions ahead, and there is a following vehicle from behind which is traveling at a slower speed.", "Reasoning": "Based on the previous driving behaviors and the current state, the agent car can enter the main road smoothly and can accelerate when entering the main road.", "Action": "speed_up"} 65 | 66 | 67 | [PART 5] FORMAT 68 | You should only respond in the format as described below. 69 | RESPONSE FORMAT: {"Situation": "......","Reasoning": "......","Action": "......"} -------------------------------------------------------------------------------- /src/navigation/controller.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) # Copyright (c) 2018-2020 CVC. 2 | # 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ This module contains PID controllers to perform lateral and longitudinal control. """ 7 | 8 | from collections import deque 9 | import math 10 | import numpy as np 11 | import carla 12 | from agents.tools.misc import get_speed 13 | 14 | 15 | class VehiclePIDController(): 16 | """ 17 | VehiclePIDController is the combination of two PID controllers 18 | (lateral and longitudinal) to perform the 19 | low level control a vehicle from client side 20 | """ 21 | 22 | 23 | def __init__(self, vehicle, args_lateral, args_longitudinal, offset=0, max_throttle=0.75, max_brake=0.3, 24 | max_steering=0.8): 25 | """ 26 | Constructor method. 27 | 28 | :param vehicle: actor to apply to local planner logic onto 29 | :param args_lateral: dictionary of arguments to set the lateral PID controller 30 | using the following semantics: 31 | K_P -- Proportional term 32 | K_D -- Differential term 33 | K_I -- Integral term 34 | :param args_longitudinal: dictionary of arguments to set the longitudinal 35 | PID controller using the following semantics: 36 | K_P -- Proportional term 37 | K_D -- Differential term 38 | K_I -- Integral term 39 | :param offset: If different than zero, the vehicle will drive displaced from the center line. 40 | Positive values imply a right offset while negative ones mean a left one. Numbers high enough 41 | to cause the vehicle to drive through other lanes might break the controller. 42 | """ 43 | 44 | self.max_brake = max_brake 45 | self.max_throt = max_throttle 46 | self.max_steer = max_steering 47 | 48 | self._vehicle = vehicle 49 | self._world = self._vehicle.get_world() 50 | self.past_steering = self._vehicle.get_control().steer 51 | self._lon_controller = PIDLongitudinalController(self._vehicle, **args_longitudinal) 52 | self._lat_controller = PIDLateralController(self._vehicle, offset, **args_lateral) 53 | 54 | def run_step(self, target_speed, waypoint): 55 | """ 56 | Execute one step of control invoking both lateral and longitudinal 57 | PID controllers to reach a target waypoint 58 | at a given target_speed. 59 | 60 | :param target_speed: desired vehicle speed 61 | :param waypoint: target location encoded as a waypoint 62 | :return: distance (in meters) to the waypoint 63 | """ 64 | 65 | acceleration = self._lon_controller.run_step(target_speed) 66 | current_steering = self._lat_controller.run_step(waypoint) 67 | control = carla.VehicleControl() 68 | if acceleration >= 0.0: 69 | control.throttle = min(acceleration, self.max_throt) 70 | control.brake = 0.0 71 | else: 72 | control.throttle = 0.0 73 | control.brake = min(abs(acceleration), self.max_brake) 74 | 75 | # Steering regulation: changes cannot happen abruptly, can't steer too much. 76 | 77 | if current_steering > self.past_steering + 0.1: 78 | current_steering = self.past_steering + 0.1 79 | elif current_steering < self.past_steering - 0.1: 80 | current_steering = self.past_steering - 0.1 81 | 82 | if current_steering >= 0: 83 | steering = min(self.max_steer, current_steering) 84 | else: 85 | steering = max(-self.max_steer, current_steering) 86 | 87 | control.steer = steering 88 | control.hand_brake = False 89 | control.manual_gear_shift = False 90 | self.past_steering = steering 91 | 92 | return control 93 | 94 | 95 | def change_longitudinal_PID(self, args_longitudinal): 96 | """Changes the parameters of the PIDLongitudinalController""" 97 | self._lon_controller.change_parameters(**args_longitudinal) 98 | 99 | def change_lateral_PID(self, args_lateral): 100 | """Changes the parameters of the PIDLongitudinalController""" 101 | self._lon_controller.change_parameters(**args_lateral) 102 | 103 | 104 | class PIDLongitudinalController(): 105 | """ 106 | PIDLongitudinalController implements longitudinal control using a PID. 107 | """ 108 | 109 | def __init__(self, vehicle, K_P=1.0, K_I=0.0, K_D=0.0, dt=0.03): 110 | """ 111 | Constructor method. 112 | 113 | :param vehicle: actor to apply to local planner logic onto 114 | :param K_P: Proportional term 115 | :param K_D: Differential term 116 | :param K_I: Integral term 117 | :param dt: time differential in seconds 118 | """ 119 | self._vehicle = vehicle 120 | self._k_p = K_P 121 | self._k_i = K_I 122 | self._k_d = K_D 123 | self._dt = dt 124 | self._error_buffer = deque(maxlen=10) 125 | 126 | def run_step(self, target_speed, debug=False): 127 | """ 128 | Execute one step of longitudinal control to reach a given target speed. 129 | 130 | :param target_speed: target speed in Km/h 131 | :param debug: boolean for debugging 132 | :return: throttle control 133 | """ 134 | current_speed = get_speed(self._vehicle) 135 | 136 | if debug: 137 | print('Current speed = {}'.format(current_speed)) 138 | 139 | return self._pid_control(target_speed, current_speed) 140 | 141 | def _pid_control(self, target_speed, current_speed): 142 | """ 143 | Estimate the throttle/brake of the vehicle based on the PID equations 144 | 145 | :param target_speed: target speed in Km/h 146 | :param current_speed: current speed of the vehicle in Km/h 147 | :return: throttle/brake control 148 | """ 149 | 150 | error = target_speed - current_speed 151 | self._error_buffer.append(error) 152 | 153 | if len(self._error_buffer) >= 2: 154 | _de = (self._error_buffer[-1] - self._error_buffer[-2]) / self._dt 155 | _ie = sum(self._error_buffer) * self._dt 156 | else: 157 | _de = 0.0 158 | _ie = 0.0 159 | 160 | return np.clip((self._k_p * error) + (self._k_d * _de) + (self._k_i * _ie), -1.0, 1.0) 161 | 162 | def change_parameters(self, K_P, K_I, K_D, dt): 163 | """Changes the PID parameters""" 164 | self._k_p = K_P 165 | self._k_i = K_I 166 | self._k_d = K_D 167 | self._dt = dt 168 | 169 | 170 | class PIDLateralController(): 171 | """ 172 | PIDLateralController implements lateral control using a PID. 173 | """ 174 | 175 | def __init__(self, vehicle, offset=0, K_P=1.0, K_I=0.0, K_D=0.0, dt=0.03): 176 | """ 177 | Constructor method. 178 | 179 | :param vehicle: actor to apply to local planner logic onto 180 | :param offset: distance to the center line. If might cause issues if the value 181 | is large enough to make the vehicle invade other lanes. 182 | :param K_P: Proportional term 183 | :param K_D: Differential term 184 | :param K_I: Integral term 185 | :param dt: time differential in seconds 186 | """ 187 | self._vehicle = vehicle 188 | self._k_p = K_P 189 | self._k_i = K_I 190 | self._k_d = K_D 191 | self._dt = dt 192 | self._offset = offset 193 | self._e_buffer = deque(maxlen=10) 194 | 195 | def run_step(self, waypoint): 196 | """ 197 | Execute one step of lateral control to steer 198 | the vehicle towards a certain waypoin. 199 | 200 | :param waypoint: target waypoint 201 | :return: steering control in the range [-1, 1] where: 202 | -1 maximum steering to left 203 | +1 maximum steering to right 204 | """ 205 | return self._pid_control(waypoint, self._vehicle.get_transform()) 206 | 207 | def _pid_control(self, waypoint, vehicle_transform): 208 | """ 209 | Estimate the steering angle of the vehicle based on the PID equations 210 | 211 | :param waypoint: target waypoint 212 | :param vehicle_transform: current transform of the vehicle 213 | :return: steering control in the range [-1, 1] 214 | """ 215 | # Get the ego's location and forward vector 216 | ego_loc = vehicle_transform.location 217 | v_vec = vehicle_transform.get_forward_vector() 218 | v_vec = np.array([v_vec.x, v_vec.y, 0.0]) 219 | 220 | # Get the vector vehicle-target_wp 221 | if self._offset != 0: 222 | # Displace the wp to the side 223 | w_tran = waypoint.transform 224 | r_vec = w_tran.get_right_vector() 225 | w_loc = w_tran.location + carla.Location(x=self._offset*r_vec.x, 226 | y=self._offset*r_vec.y) 227 | else: 228 | w_loc = waypoint.transform.location 229 | 230 | w_vec = np.array([w_loc.x - ego_loc.x, 231 | w_loc.y - ego_loc.y, 232 | 0.0]) 233 | 234 | wv_linalg = np.linalg.norm(w_vec) * np.linalg.norm(v_vec) 235 | if wv_linalg == 0: 236 | _dot = 1 237 | else: 238 | _dot = math.acos(np.clip(np.dot(w_vec, v_vec) / (wv_linalg), -1.0, 1.0)) 239 | _cross = np.cross(v_vec, w_vec) 240 | if _cross[2] < 0: 241 | _dot *= -1.0 242 | 243 | self._e_buffer.append(_dot) 244 | if len(self._e_buffer) >= 2: 245 | _de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt 246 | _ie = sum(self._e_buffer) * self._dt 247 | else: 248 | _de = 0.0 249 | _ie = 0.0 250 | 251 | return np.clip((self._k_p * _dot) + (self._k_d * _de) + (self._k_i * _ie), -1.0, 1.0) 252 | 253 | def change_parameters(self, K_P, K_I, K_D, dt): 254 | """Changes the PID parameters""" 255 | self._k_p = K_P 256 | self._k_i = K_I 257 | self._k_d = K_D 258 | self._dt = dt -------------------------------------------------------------------------------- /src/navigation/examples/open3d_lidar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """Open3D Lidar visuialization example for CARLA""" 10 | 11 | import glob 12 | import os 13 | import sys 14 | import argparse 15 | import time 16 | from datetime import datetime 17 | import random 18 | import numpy as np 19 | from matplotlib import cm 20 | import open3d as o3d 21 | 22 | try: 23 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 24 | sys.version_info.major, 25 | sys.version_info.minor, 26 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 27 | except IndexError: 28 | pass 29 | 30 | import carla 31 | 32 | VIRIDIS = np.array(cm.get_cmap('plasma').colors) 33 | VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0]) 34 | LABEL_COLORS = np.array([ 35 | (255, 255, 255), # None 36 | (70, 70, 70), # Building 37 | (100, 40, 40), # Fences 38 | (55, 90, 80), # Other 39 | (220, 20, 60), # Pedestrian 40 | (153, 153, 153), # Pole 41 | (157, 234, 50), # RoadLines 42 | (128, 64, 128), # Road 43 | (244, 35, 232), # Sidewalk 44 | (107, 142, 35), # Vegetation 45 | (0, 0, 142), # Vehicle 46 | (102, 102, 156), # Wall 47 | (220, 220, 0), # TrafficSign 48 | (70, 130, 180), # Sky 49 | (81, 0, 81), # Ground 50 | (150, 100, 100), # Bridge 51 | (230, 150, 140), # RailTrack 52 | (180, 165, 180), # GuardRail 53 | (250, 170, 30), # TrafficLight 54 | (110, 190, 160), # Static 55 | (170, 120, 50), # Dynamic 56 | (45, 60, 150), # Water 57 | (145, 170, 100), # Terrain 58 | ]) / 255.0 # normalize each channel [0-1] since is what Open3D uses 59 | 60 | 61 | def lidar_callback(point_cloud, point_list): 62 | """Prepares a point cloud with intensity 63 | colors ready to be consumed by Open3D""" 64 | data = np.copy(np.frombuffer(point_cloud.raw_data, dtype=np.dtype('f4'))) 65 | data = np.reshape(data, (int(data.shape[0] / 4), 4)) 66 | 67 | # Isolate the intensity and compute a color for it 68 | intensity = data[:, -1] 69 | intensity_col = 1.0 - np.log(intensity) / np.log(np.exp(-0.004 * 100)) 70 | int_color = np.c_[ 71 | np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 0]), 72 | np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 1]), 73 | np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 2])] 74 | 75 | # Isolate the 3D data 76 | points = data[:, :-1] 77 | 78 | # We're negating the y to correclty visualize a world that matches 79 | # what we see in Unreal since Open3D uses a right-handed coordinate system 80 | points[:, :1] = -points[:, :1] 81 | 82 | # # An example of converting points from sensor to vehicle space if we had 83 | # # a carla.Transform variable named "tran": 84 | # points = np.append(points, np.ones((points.shape[0], 1)), axis=1) 85 | # points = np.dot(tran.get_matrix(), points.T).T 86 | # points = points[:, :-1] 87 | 88 | point_list.points = o3d.utility.Vector3dVector(points) 89 | point_list.colors = o3d.utility.Vector3dVector(int_color) 90 | 91 | 92 | def semantic_lidar_callback(point_cloud, point_list): 93 | """Prepares a point cloud with semantic segmentation 94 | colors ready to be consumed by Open3D""" 95 | data = np.frombuffer(point_cloud.raw_data, dtype=np.dtype([ 96 | ('x', np.float32), ('y', np.float32), ('z', np.float32), 97 | ('CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)])) 98 | 99 | # We're negating the y to correclty visualize a world that matches 100 | # what we see in Unreal since Open3D uses a right-handed coordinate system 101 | points = np.array([data['x'], -data['y'], data['z']]).T 102 | 103 | # # An example of adding some noise to our data if needed: 104 | # points += np.random.uniform(-0.05, 0.05, size=points.shape) 105 | 106 | # Colorize the pointcloud based on the CityScapes color palette 107 | labels = np.array(data['ObjTag']) 108 | int_color = LABEL_COLORS[labels] 109 | 110 | # # In case you want to make the color intensity depending 111 | # # of the incident ray angle, you can use: 112 | # int_color *= np.array(data['CosAngle'])[:, None] 113 | 114 | point_list.points = o3d.utility.Vector3dVector(points) 115 | point_list.colors = o3d.utility.Vector3dVector(int_color) 116 | 117 | 118 | def generate_lidar_bp(arg, world, blueprint_library, delta): 119 | """Generates a CARLA blueprint based on the script parameters""" 120 | if arg.semantic: 121 | lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast_semantic') 122 | else: 123 | lidar_bp = blueprint_library.find('sensor.lidar.ray_cast') 124 | if arg.no_noise: 125 | lidar_bp.set_attribute('dropoff_general_rate', '0.0') 126 | lidar_bp.set_attribute('dropoff_intensity_limit', '1.0') 127 | lidar_bp.set_attribute('dropoff_zero_intensity', '0.0') 128 | else: 129 | lidar_bp.set_attribute('noise_stddev', '0.2') 130 | 131 | lidar_bp.set_attribute('upper_fov', str(arg.upper_fov)) 132 | lidar_bp.set_attribute('lower_fov', str(arg.lower_fov)) 133 | lidar_bp.set_attribute('channels', str(arg.channels)) 134 | lidar_bp.set_attribute('range', str(arg.range)) 135 | lidar_bp.set_attribute('rotation_frequency', str(1.0 / delta)) 136 | lidar_bp.set_attribute('points_per_second', str(arg.points_per_second)) 137 | return lidar_bp 138 | 139 | 140 | def add_open3d_axis(vis): 141 | """Add a small 3D axis on Open3D Visualizer""" 142 | axis = o3d.geometry.LineSet() 143 | axis.points = o3d.utility.Vector3dVector(np.array([ 144 | [0.0, 0.0, 0.0], 145 | [1.0, 0.0, 0.0], 146 | [0.0, 1.0, 0.0], 147 | [0.0, 0.0, 1.0]])) 148 | axis.lines = o3d.utility.Vector2iVector(np.array([ 149 | [0, 1], 150 | [0, 2], 151 | [0, 3]])) 152 | axis.colors = o3d.utility.Vector3dVector(np.array([ 153 | [1.0, 0.0, 0.0], 154 | [0.0, 1.0, 0.0], 155 | [0.0, 0.0, 1.0]])) 156 | vis.add_geometry(axis) 157 | 158 | 159 | def main(arg): 160 | """Main function of the script""" 161 | client = carla.Client(arg.host, arg.port) 162 | client.set_timeout(2.0) 163 | world = client.get_world() 164 | 165 | try: 166 | original_settings = world.get_settings() 167 | settings = world.get_settings() 168 | traffic_manager = client.get_trafficmanager(8000) 169 | traffic_manager.set_synchronous_mode(True) 170 | 171 | delta = 0.05 172 | 173 | settings.fixed_delta_seconds = delta 174 | settings.synchronous_mode = True 175 | settings.no_rendering_mode = arg.no_rendering 176 | world.apply_settings(settings) 177 | 178 | blueprint_library = world.get_blueprint_library() 179 | vehicle_bp = blueprint_library.filter(arg.filter)[0] 180 | vehicle_transform = random.choice(world.get_map().get_spawn_points()) 181 | vehicle = world.spawn_actor(vehicle_bp, vehicle_transform) 182 | vehicle.set_autopilot(arg.no_autopilot) 183 | 184 | lidar_bp = generate_lidar_bp(arg, world, blueprint_library, delta) 185 | 186 | user_offset = carla.Location(arg.x, arg.y, arg.z) 187 | lidar_transform = carla.Transform(carla.Location(x=-0.5, z=1.8) + user_offset) 188 | 189 | lidar = world.spawn_actor(lidar_bp, lidar_transform, attach_to=vehicle) 190 | 191 | point_list = o3d.geometry.PointCloud() 192 | if arg.semantic: 193 | lidar.listen(lambda data: semantic_lidar_callback(data, point_list)) 194 | else: 195 | lidar.listen(lambda data: lidar_callback(data, point_list)) 196 | 197 | vis = o3d.visualization.Visualizer() 198 | vis.create_window( 199 | window_name='Carla Lidar', 200 | width=960, 201 | height=540, 202 | left=480, 203 | top=270) 204 | vis.get_render_option().background_color = [0.05, 0.05, 0.05] 205 | vis.get_render_option().point_size = 1 206 | vis.get_render_option().show_coordinate_frame = True 207 | 208 | if arg.show_axis: 209 | add_open3d_axis(vis) 210 | 211 | frame = 0 212 | dt0 = datetime.now() 213 | while True: 214 | if frame == 2: 215 | vis.add_geometry(point_list) 216 | vis.update_geometry(point_list) 217 | 218 | vis.poll_events() 219 | vis.update_renderer() 220 | # # This can fix Open3D jittering issues: 221 | time.sleep(0.005) 222 | world.tick() 223 | 224 | process_time = datetime.now() - dt0 225 | sys.stdout.write('\r' + 'FPS: ' + str(1.0 / process_time.total_seconds())) 226 | sys.stdout.flush() 227 | dt0 = datetime.now() 228 | frame += 1 229 | 230 | finally: 231 | world.apply_settings(original_settings) 232 | traffic_manager.set_synchronous_mode(False) 233 | 234 | vehicle.destroy() 235 | lidar.destroy() 236 | vis.destroy_window() 237 | 238 | 239 | if __name__ == "__main__": 240 | argparser = argparse.ArgumentParser( 241 | description=__doc__) 242 | argparser.add_argument( 243 | '--host', 244 | metavar='H', 245 | default='localhost', 246 | help='IP of the host CARLA Simulator (default: localhost)') 247 | argparser.add_argument( 248 | '-p', '--port', 249 | metavar='P', 250 | default=2000, 251 | type=int, 252 | help='TCP port of CARLA Simulator (default: 2000)') 253 | argparser.add_argument( 254 | '--no-rendering', 255 | action='store_true', 256 | help='use the no-rendering mode which will provide some extra' 257 | ' performance but you will lose the articulated objects in the' 258 | ' lidar, such as pedestrians') 259 | argparser.add_argument( 260 | '--semantic', 261 | action='store_true', 262 | help='use the semantic lidar instead, which provides ground truth' 263 | ' information') 264 | argparser.add_argument( 265 | '--no-noise', 266 | action='store_true', 267 | help='remove the drop off and noise from the normal (non-semantic) lidar') 268 | argparser.add_argument( 269 | '--no-autopilot', 270 | action='store_false', 271 | help='disables the autopilot so the vehicle will remain stopped') 272 | argparser.add_argument( 273 | '--show-axis', 274 | action='store_true', 275 | help='show the cartesian coordinates axis') 276 | argparser.add_argument( 277 | '--filter', 278 | metavar='PATTERN', 279 | default='model3', 280 | help='actor filter (default: "vehicle.*")') 281 | argparser.add_argument( 282 | '--upper-fov', 283 | default=15.0, 284 | type=float, 285 | help='lidar\'s upper field of view in degrees (default: 15.0)') 286 | argparser.add_argument( 287 | '--lower-fov', 288 | default=-25.0, 289 | type=float, 290 | help='lidar\'s lower field of view in degrees (default: -25.0)') 291 | argparser.add_argument( 292 | '--channels', 293 | default=64.0, 294 | type=float, 295 | help='lidar\'s channel count (default: 64)') 296 | argparser.add_argument( 297 | '--range', 298 | default=100.0, 299 | type=float, 300 | help='lidar\'s maximum range in meters (default: 100.0)') 301 | argparser.add_argument( 302 | '--points-per-second', 303 | default=500000, 304 | type=int, 305 | help='lidar\'s points per second (default: 500000)') 306 | argparser.add_argument( 307 | '-x', 308 | default=0.0, 309 | type=float, 310 | help='offset in the sensor position in the X-axis in meters (default: 0.0)') 311 | argparser.add_argument( 312 | '-y', 313 | default=0.0, 314 | type=float, 315 | help='offset in the sensor position in the Y-axis in meters (default: 0.0)') 316 | argparser.add_argument( 317 | '-z', 318 | default=0.0, 319 | type=float, 320 | help='offset in the sensor position in the Z-axis in meters (default: 0.0)') 321 | args = argparser.parse_args() 322 | 323 | try: 324 | main(args) 325 | except KeyboardInterrupt: 326 | print(' - Exited by user.') 327 | -------------------------------------------------------------------------------- /src/navigation/examples/lidar_to_camera.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | Lidar projection on RGB camera example 11 | """ 12 | 13 | import glob 14 | import os 15 | import sys 16 | 17 | try: 18 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 19 | sys.version_info.major, 20 | sys.version_info.minor, 21 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 22 | except IndexError: 23 | pass 24 | 25 | import carla 26 | 27 | import argparse 28 | from queue import Queue 29 | from queue import Empty 30 | from matplotlib import cm 31 | 32 | try: 33 | import numpy as np 34 | except ImportError: 35 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 36 | 37 | try: 38 | from PIL import Image 39 | except ImportError: 40 | raise RuntimeError('cannot import PIL, make sure "Pillow" package is installed') 41 | 42 | VIRIDIS = np.array(cm.get_cmap('viridis').colors) 43 | VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0]) 44 | 45 | def sensor_callback(data, queue): 46 | """ 47 | This simple callback just stores the data on a thread safe Python Queue 48 | to be retrieved from the "main thread". 49 | """ 50 | queue.put(data) 51 | 52 | 53 | def tutorial(args): 54 | """ 55 | This function is intended to be a tutorial on how to retrieve data in a 56 | synchronous way, and project 3D points from a lidar to a 2D camera. 57 | """ 58 | # Connect to the server 59 | client = carla.Client(args.host, args.port) 60 | client.set_timeout(2.0) 61 | world = client.get_world() 62 | bp_lib = world.get_blueprint_library() 63 | 64 | traffic_manager = client.get_trafficmanager(8000) 65 | traffic_manager.set_synchronous_mode(True) 66 | 67 | original_settings = world.get_settings() 68 | settings = world.get_settings() 69 | settings.synchronous_mode = True 70 | settings.fixed_delta_seconds = 3.0 71 | world.apply_settings(settings) 72 | 73 | vehicle = None 74 | camera = None 75 | lidar = None 76 | 77 | try: 78 | if not os.path.isdir('_out'): 79 | os.mkdir('_out') 80 | # Search the desired blueprints 81 | vehicle_bp = bp_lib.filter("vehicle.lincoln.mkz_2017")[0] 82 | camera_bp = bp_lib.filter("sensor.camera.rgb")[0] 83 | lidar_bp = bp_lib.filter("sensor.lidar.ray_cast")[0] 84 | 85 | # Configure the blueprints 86 | camera_bp.set_attribute("image_size_x", str(args.width)) 87 | camera_bp.set_attribute("image_size_y", str(args.height)) 88 | 89 | if args.no_noise: 90 | lidar_bp.set_attribute('dropoff_general_rate', '0.0') 91 | lidar_bp.set_attribute('dropoff_intensity_limit', '1.0') 92 | lidar_bp.set_attribute('dropoff_zero_intensity', '0.0') 93 | lidar_bp.set_attribute('upper_fov', str(args.upper_fov)) 94 | lidar_bp.set_attribute('lower_fov', str(args.lower_fov)) 95 | lidar_bp.set_attribute('channels', str(args.channels)) 96 | lidar_bp.set_attribute('range', str(args.range)) 97 | lidar_bp.set_attribute('points_per_second', str(args.points_per_second)) 98 | 99 | # Spawn the blueprints 100 | vehicle = world.spawn_actor( 101 | blueprint=vehicle_bp, 102 | transform=world.get_map().get_spawn_points()[0]) 103 | vehicle.set_autopilot(True) 104 | camera = world.spawn_actor( 105 | blueprint=camera_bp, 106 | transform=carla.Transform(carla.Location(x=1.6, z=1.6)), 107 | attach_to=vehicle) 108 | lidar = world.spawn_actor( 109 | blueprint=lidar_bp, 110 | transform=carla.Transform(carla.Location(x=1.0, z=1.8)), 111 | attach_to=vehicle) 112 | 113 | # Build the K projection matrix: 114 | # K = [[Fx, 0, image_w/2], 115 | # [ 0, Fy, image_h/2], 116 | # [ 0, 0, 1]] 117 | image_w = camera_bp.get_attribute("image_size_x").as_int() 118 | image_h = camera_bp.get_attribute("image_size_y").as_int() 119 | fov = camera_bp.get_attribute("fov").as_float() 120 | focal = image_w / (2.0 * np.tan(fov * np.pi / 360.0)) 121 | 122 | # In this case Fx and Fy are the same since the pixel aspect 123 | # ratio is 1 124 | K = np.identity(3) 125 | K[0, 0] = K[1, 1] = focal 126 | K[0, 2] = image_w / 2.0 127 | K[1, 2] = image_h / 2.0 128 | 129 | # The sensor data will be saved in thread-safe Queues 130 | image_queue = Queue() 131 | lidar_queue = Queue() 132 | 133 | camera.listen(lambda data: sensor_callback(data, image_queue)) 134 | lidar.listen(lambda data: sensor_callback(data, lidar_queue)) 135 | 136 | for frame in range(args.frames): 137 | world.tick() 138 | world_frame = world.get_snapshot().frame 139 | 140 | try: 141 | # Get the data once it's received. 142 | image_data = image_queue.get(True, 1.0) 143 | lidar_data = lidar_queue.get(True, 1.0) 144 | except Empty: 145 | print("[Warning] Some sensor data has been missed") 146 | continue 147 | 148 | assert image_data.frame == lidar_data.frame == world_frame 149 | # At this point, we have the synchronized information from the 2 sensors. 150 | sys.stdout.write("\r(%d/%d) Simulation: %d Camera: %d Lidar: %d" % 151 | (frame, args.frames, world_frame, image_data.frame, lidar_data.frame) + ' ') 152 | sys.stdout.flush() 153 | 154 | # Get the raw BGRA buffer and convert it to an array of RGB of 155 | # shape (image_data.height, image_data.width, 3). 156 | im_array = np.copy(np.frombuffer(image_data.raw_data, dtype=np.dtype("uint8"))) 157 | im_array = np.reshape(im_array, (image_data.height, image_data.width, 4)) 158 | im_array = im_array[:, :, :3][:, :, ::-1] 159 | 160 | # Get the lidar data and convert it to a numpy array. 161 | p_cloud_size = len(lidar_data) 162 | p_cloud = np.copy(np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))) 163 | p_cloud = np.reshape(p_cloud, (p_cloud_size, 4)) 164 | 165 | # Lidar intensity array of shape (p_cloud_size,) but, for now, let's 166 | # focus on the 3D points. 167 | intensity = np.array(p_cloud[:, 3]) 168 | 169 | # Point cloud in lidar sensor space array of shape (3, p_cloud_size). 170 | local_lidar_points = np.array(p_cloud[:, :3]).T 171 | 172 | # Add an extra 1.0 at the end of each 3d point so it becomes of 173 | # shape (4, p_cloud_size) and it can be multiplied by a (4, 4) matrix. 174 | local_lidar_points = np.r_[ 175 | local_lidar_points, [np.ones(local_lidar_points.shape[1])]] 176 | 177 | # This (4, 4) matrix transforms the points from lidar space to world space. 178 | lidar_2_world = lidar.get_transform().get_matrix() 179 | 180 | # Transform the points from lidar space to world space. 181 | world_points = np.dot(lidar_2_world, local_lidar_points) 182 | 183 | # This (4, 4) matrix transforms the points from world to sensor coordinates. 184 | world_2_camera = np.array(camera.get_transform().get_inverse_matrix()) 185 | 186 | # Transform the points from world space to camera space. 187 | sensor_points = np.dot(world_2_camera, world_points) 188 | 189 | # New we must change from UE4's coordinate system to an "standard" 190 | # camera coordinate system (the same used by OpenCV): 191 | 192 | # ^ z . z 193 | # | / 194 | # | to: +-------> x 195 | # | . x | 196 | # |/ | 197 | # +-------> y v y 198 | 199 | # This can be achieved by multiplying by the following matrix: 200 | # [[ 0, 1, 0 ], 201 | # [ 0, 0, -1 ], 202 | # [ 1, 0, 0 ]] 203 | 204 | # Or, in this case, is the same as swapping: 205 | # (x, y ,z) -> (y, -z, x) 206 | point_in_camera_coords = np.array([ 207 | sensor_points[1], 208 | sensor_points[2] * -1, 209 | sensor_points[0]]) 210 | 211 | # Finally we can use our K matrix to do the actual 3D -> 2D. 212 | points_2d = np.dot(K, point_in_camera_coords) 213 | 214 | # Remember to normalize the x, y values by the 3rd value. 215 | points_2d = np.array([ 216 | points_2d[0, :] / points_2d[2, :], 217 | points_2d[1, :] / points_2d[2, :], 218 | points_2d[2, :]]) 219 | 220 | # At this point, points_2d[0, :] contains all the x and points_2d[1, :] 221 | # contains all the y values of our points. In order to properly 222 | # visualize everything on a screen, the points that are out of the screen 223 | # must be discarted, the same with points behind the camera projection plane. 224 | points_2d = points_2d.T 225 | intensity = intensity.T 226 | points_in_canvas_mask = \ 227 | (points_2d[:, 0] > 0.0) & (points_2d[:, 0] < image_w) & \ 228 | (points_2d[:, 1] > 0.0) & (points_2d[:, 1] < image_h) & \ 229 | (points_2d[:, 2] > 0.0) 230 | points_2d = points_2d[points_in_canvas_mask] 231 | intensity = intensity[points_in_canvas_mask] 232 | 233 | # Extract the screen coords (uv) as integers. 234 | u_coord = points_2d[:, 0].astype(np.int) 235 | v_coord = points_2d[:, 1].astype(np.int) 236 | 237 | # Since at the time of the creation of this script, the intensity function 238 | # is returning high values, these are adjusted to be nicely visualized. 239 | intensity = 4 * intensity - 3 240 | color_map = np.array([ 241 | np.interp(intensity, VID_RANGE, VIRIDIS[:, 0]) * 255.0, 242 | np.interp(intensity, VID_RANGE, VIRIDIS[:, 1]) * 255.0, 243 | np.interp(intensity, VID_RANGE, VIRIDIS[:, 2]) * 255.0]).astype(np.int).T 244 | 245 | if args.dot_extent <= 0: 246 | # Draw the 2d points on the image as a single pixel using numpy. 247 | im_array[v_coord, u_coord] = color_map 248 | else: 249 | # Draw the 2d points on the image as squares of extent args.dot_extent. 250 | for i in range(len(points_2d)): 251 | # I'm not a NumPy expert and I don't know how to set bigger dots 252 | # without using this loop, so if anyone has a better solution, 253 | # make sure to update this script. Meanwhile, it's fast enough :) 254 | im_array[ 255 | v_coord[i]-args.dot_extent : v_coord[i]+args.dot_extent, 256 | u_coord[i]-args.dot_extent : u_coord[i]+args.dot_extent] = color_map[i] 257 | 258 | # Save the image using Pillow module. 259 | image = Image.fromarray(im_array) 260 | image.save("_out/%08d.png" % image_data.frame) 261 | 262 | finally: 263 | # Apply the original settings when exiting. 264 | world.apply_settings(original_settings) 265 | 266 | # Destroy the actors in the scene. 267 | if camera: 268 | camera.destroy() 269 | if lidar: 270 | lidar.destroy() 271 | if vehicle: 272 | vehicle.destroy() 273 | 274 | 275 | def main(): 276 | """Start function""" 277 | argparser = argparse.ArgumentParser( 278 | description='CARLA Sensor sync and projection tutorial') 279 | argparser.add_argument( 280 | '--host', 281 | metavar='H', 282 | default='127.0.0.1', 283 | help='IP of the host server (default: 127.0.0.1)') 284 | argparser.add_argument( 285 | '-p', '--port', 286 | metavar='P', 287 | default=2000, 288 | type=int, 289 | help='TCP port to listen to (default: 2000)') 290 | argparser.add_argument( 291 | '--res', 292 | metavar='WIDTHxHEIGHT', 293 | default='680x420', 294 | help='window resolution (default: 1280x720)') 295 | argparser.add_argument( 296 | '-f', '--frames', 297 | metavar='N', 298 | default=500, 299 | type=int, 300 | help='number of frames to record (default: 500)') 301 | argparser.add_argument( 302 | '-d', '--dot-extent', 303 | metavar='SIZE', 304 | default=2, 305 | type=int, 306 | help='visualization dot extent in pixels (Recomended [1-4]) (default: 2)') 307 | argparser.add_argument( 308 | '--no-noise', 309 | action='store_true', 310 | help='remove the drop off and noise from the normal (non-semantic) lidar') 311 | argparser.add_argument( 312 | '--upper-fov', 313 | metavar='F', 314 | default=30.0, 315 | type=float, 316 | help='lidar\'s upper field of view in degrees (default: 15.0)') 317 | argparser.add_argument( 318 | '--lower-fov', 319 | metavar='F', 320 | default=-25.0, 321 | type=float, 322 | help='lidar\'s lower field of view in degrees (default: -25.0)') 323 | argparser.add_argument( 324 | '-c', '--channels', 325 | metavar='C', 326 | default=64.0, 327 | type=float, 328 | help='lidar\'s channel count (default: 64)') 329 | argparser.add_argument( 330 | '-r', '--range', 331 | metavar='R', 332 | default=100.0, 333 | type=float, 334 | help='lidar\'s maximum range in meters (default: 100.0)') 335 | argparser.add_argument( 336 | '--points-per-second', 337 | metavar='N', 338 | default='100000', 339 | type=int, 340 | help='lidar points per second (default: 100000)') 341 | args = argparser.parse_args() 342 | args.width, args.height = [int(x) for x in args.res.split('x')] 343 | args.dot_extent -= 1 344 | 345 | try: 346 | tutorial(args) 347 | 348 | except KeyboardInterrupt: 349 | print('\nCancelled by user. Bye!') 350 | 351 | 352 | if __name__ == '__main__': 353 | 354 | main() 355 | -------------------------------------------------------------------------------- /src/navigation/examples/visualize_multiple_sensors.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """ 10 | Script that render multiple sensors in the same pygame window 11 | 12 | By default, it renders four cameras, one LiDAR and one Semantic LiDAR. 13 | It can easily be configure for any different number of sensors. 14 | To do that, check lines 290-308. 15 | """ 16 | 17 | import glob 18 | import os 19 | import sys 20 | 21 | try: 22 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 23 | sys.version_info.major, 24 | sys.version_info.minor, 25 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 26 | except IndexError: 27 | pass 28 | 29 | import carla 30 | import argparse 31 | import random 32 | import time 33 | import numpy as np 34 | 35 | 36 | try: 37 | import pygame 38 | from pygame.locals import K_ESCAPE 39 | from pygame.locals import K_q 40 | except ImportError: 41 | raise RuntimeError('cannot import pygame, make sure pygame package is installed') 42 | 43 | class CustomTimer: 44 | def __init__(self): 45 | try: 46 | self.timer = time.perf_counter 47 | except AttributeError: 48 | self.timer = time.time 49 | 50 | def time(self): 51 | return self.timer() 52 | 53 | class DisplayManager: 54 | def __init__(self, grid_size, window_size): 55 | pygame.init() 56 | pygame.font.init() 57 | self.display = pygame.display.set_mode(window_size, pygame.HWSURFACE | pygame.DOUBLEBUF) 58 | 59 | self.grid_size = grid_size 60 | self.window_size = window_size 61 | self.sensor_list = [] 62 | 63 | def get_window_size(self): 64 | return [int(self.window_size[0]), int(self.window_size[1])] 65 | 66 | def get_display_size(self): 67 | return [int(self.window_size[0]/self.grid_size[1]), int(self.window_size[1]/self.grid_size[0])] 68 | 69 | def get_display_offset(self, gridPos): 70 | dis_size = self.get_display_size() 71 | return [int(gridPos[1] * dis_size[0]), int(gridPos[0] * dis_size[1])] 72 | 73 | def add_sensor(self, sensor): 74 | self.sensor_list.append(sensor) 75 | 76 | def get_sensor_list(self): 77 | return self.sensor_list 78 | 79 | def render(self): 80 | if not self.render_enabled(): 81 | return 82 | 83 | for s in self.sensor_list: 84 | s.render() 85 | 86 | pygame.display.flip() 87 | 88 | def destroy(self): 89 | for s in self.sensor_list: 90 | s.destroy() 91 | 92 | def render_enabled(self): 93 | return self.display != None 94 | 95 | class SensorManager: 96 | def __init__(self, world, display_man, sensor_type, transform, attached, sensor_options, display_pos): 97 | self.surface = None 98 | self.world = world 99 | self.display_man = display_man 100 | self.display_pos = display_pos 101 | self.sensor = self.init_sensor(sensor_type, transform, attached, sensor_options) 102 | self.sensor_options = sensor_options 103 | self.timer = CustomTimer() 104 | 105 | self.time_processing = 0.0 106 | self.tics_processing = 0 107 | 108 | self.display_man.add_sensor(self) 109 | 110 | def init_sensor(self, sensor_type, transform, attached, sensor_options): 111 | if sensor_type == 'RGBCamera': 112 | camera_bp = self.world.get_blueprint_library().find('sensor.camera.rgb') 113 | disp_size = self.display_man.get_display_size() 114 | camera_bp.set_attribute('image_size_x', str(disp_size[0])) 115 | camera_bp.set_attribute('image_size_y', str(disp_size[1])) 116 | 117 | for key in sensor_options: 118 | camera_bp.set_attribute(key, sensor_options[key]) 119 | 120 | camera = self.world.spawn_actor(camera_bp, transform, attach_to=attached) 121 | camera.listen(self.save_rgb_image) 122 | 123 | return camera 124 | 125 | elif sensor_type == 'LiDAR': 126 | lidar_bp = self.world.get_blueprint_library().find('sensor.lidar.ray_cast') 127 | lidar_bp.set_attribute('range', '100') 128 | lidar_bp.set_attribute('dropoff_general_rate', lidar_bp.get_attribute('dropoff_general_rate').recommended_values[0]) 129 | lidar_bp.set_attribute('dropoff_intensity_limit', lidar_bp.get_attribute('dropoff_intensity_limit').recommended_values[0]) 130 | lidar_bp.set_attribute('dropoff_zero_intensity', lidar_bp.get_attribute('dropoff_zero_intensity').recommended_values[0]) 131 | 132 | for key in sensor_options: 133 | lidar_bp.set_attribute(key, sensor_options[key]) 134 | 135 | lidar = self.world.spawn_actor(lidar_bp, transform, attach_to=attached) 136 | 137 | lidar.listen(self.save_lidar_image) 138 | 139 | return lidar 140 | 141 | elif sensor_type == 'SemanticLiDAR': 142 | lidar_bp = self.world.get_blueprint_library().find('sensor.lidar.ray_cast_semantic') 143 | lidar_bp.set_attribute('range', '100') 144 | 145 | for key in sensor_options: 146 | lidar_bp.set_attribute(key, sensor_options[key]) 147 | 148 | lidar = self.world.spawn_actor(lidar_bp, transform, attach_to=attached) 149 | 150 | lidar.listen(self.save_semanticlidar_image) 151 | 152 | return lidar 153 | 154 | elif sensor_type == "Radar": 155 | radar_bp = self.world.get_blueprint_library().find('sensor.other.radar') 156 | for key in sensor_options: 157 | radar_bp.set_attribute(key, sensor_options[key]) 158 | 159 | radar = self.world.spawn_actor(radar_bp, transform, attach_to=attached) 160 | radar.listen(self.save_radar_image) 161 | 162 | return radar 163 | 164 | else: 165 | return None 166 | 167 | def get_sensor(self): 168 | return self.sensor 169 | 170 | def save_rgb_image(self, image): 171 | t_start = self.timer.time() 172 | 173 | image.convert(carla.ColorConverter.Raw) 174 | array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) 175 | array = np.reshape(array, (image.height, image.width, 4)) 176 | array = array[:, :, :3] 177 | array = array[:, :, ::-1] 178 | 179 | if self.display_man.render_enabled(): 180 | self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 181 | 182 | t_end = self.timer.time() 183 | self.time_processing += (t_end-t_start) 184 | self.tics_processing += 1 185 | 186 | def save_lidar_image(self, image): 187 | t_start = self.timer.time() 188 | 189 | disp_size = self.display_man.get_display_size() 190 | lidar_range = 2.0*float(self.sensor_options['range']) 191 | 192 | points = np.frombuffer(image.raw_data, dtype=np.dtype('f4')) 193 | points = np.reshape(points, (int(points.shape[0] / 4), 4)) 194 | lidar_data = np.array(points[:, :2]) 195 | lidar_data *= min(disp_size) / lidar_range 196 | lidar_data += (0.5 * disp_size[0], 0.5 * disp_size[1]) 197 | lidar_data = np.fabs(lidar_data) # pylint: disable=E1111 198 | lidar_data = lidar_data.astype(np.int32) 199 | lidar_data = np.reshape(lidar_data, (-1, 2)) 200 | lidar_img_size = (disp_size[0], disp_size[1], 3) 201 | lidar_img = np.zeros((lidar_img_size), dtype=np.uint8) 202 | 203 | lidar_img[tuple(lidar_data.T)] = (255, 255, 255) 204 | 205 | if self.display_man.render_enabled(): 206 | self.surface = pygame.surfarray.make_surface(lidar_img) 207 | 208 | t_end = self.timer.time() 209 | self.time_processing += (t_end-t_start) 210 | self.tics_processing += 1 211 | 212 | def save_semanticlidar_image(self, image): 213 | t_start = self.timer.time() 214 | 215 | disp_size = self.display_man.get_display_size() 216 | lidar_range = 2.0*float(self.sensor_options['range']) 217 | 218 | points = np.frombuffer(image.raw_data, dtype=np.dtype('f4')) 219 | points = np.reshape(points, (int(points.shape[0] / 6), 6)) 220 | lidar_data = np.array(points[:, :2]) 221 | lidar_data *= min(disp_size) / lidar_range 222 | lidar_data += (0.5 * disp_size[0], 0.5 * disp_size[1]) 223 | lidar_data = np.fabs(lidar_data) # pylint: disable=E1111 224 | lidar_data = lidar_data.astype(np.int32) 225 | lidar_data = np.reshape(lidar_data, (-1, 2)) 226 | lidar_img_size = (disp_size[0], disp_size[1], 3) 227 | lidar_img = np.zeros((lidar_img_size), dtype=np.uint8) 228 | 229 | lidar_img[tuple(lidar_data.T)] = (255, 255, 255) 230 | 231 | if self.display_man.render_enabled(): 232 | self.surface = pygame.surfarray.make_surface(lidar_img) 233 | 234 | t_end = self.timer.time() 235 | self.time_processing += (t_end-t_start) 236 | self.tics_processing += 1 237 | 238 | def save_radar_image(self, radar_data): 239 | t_start = self.timer.time() 240 | points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4')) 241 | points = np.reshape(points, (len(radar_data), 4)) 242 | 243 | t_end = self.timer.time() 244 | self.time_processing += (t_end-t_start) 245 | self.tics_processing += 1 246 | 247 | def render(self): 248 | if self.surface is not None: 249 | offset = self.display_man.get_display_offset(self.display_pos) 250 | self.display_man.display.blit(self.surface, offset) 251 | 252 | def destroy(self): 253 | self.sensor.destroy() 254 | 255 | def run_simulation(args, client): 256 | """This function performed one test run using the args parameters 257 | and connecting to the carla client passed. 258 | """ 259 | 260 | display_manager = None 261 | vehicle = None 262 | vehicle_list = [] 263 | timer = CustomTimer() 264 | 265 | try: 266 | 267 | # Getting the world and 268 | world = client.get_world() 269 | original_settings = world.get_settings() 270 | 271 | if args.sync: 272 | traffic_manager = client.get_trafficmanager(8000) 273 | settings = world.get_settings() 274 | traffic_manager.set_synchronous_mode(True) 275 | settings.synchronous_mode = True 276 | settings.fixed_delta_seconds = 0.05 277 | world.apply_settings(settings) 278 | 279 | 280 | # Instanciating the vehicle to which we attached the sensors 281 | bp = world.get_blueprint_library().filter('charger_2020')[0] 282 | vehicle = world.spawn_actor(bp, random.choice(world.get_map().get_spawn_points())) 283 | vehicle_list.append(vehicle) 284 | vehicle.set_autopilot(True) 285 | 286 | # Display Manager organize all the sensors an its display in a window 287 | # If can easily configure the grid and the total window size 288 | display_manager = DisplayManager(grid_size=[2, 3], window_size=[args.width, args.height]) 289 | 290 | # Then, SensorManager can be used to spawn RGBCamera, LiDARs and SemanticLiDARs as needed 291 | # and assign each of them to a grid position, 292 | SensorManager(world, display_manager, 'RGBCamera', carla.Transform(carla.Location(x=0, z=2.4), carla.Rotation(yaw=-90)), 293 | vehicle, {}, display_pos=[0, 0]) 294 | SensorManager(world, display_manager, 'RGBCamera', carla.Transform(carla.Location(x=0, z=2.4), carla.Rotation(yaw=+00)), 295 | vehicle, {}, display_pos=[0, 1]) 296 | SensorManager(world, display_manager, 'RGBCamera', carla.Transform(carla.Location(x=0, z=2.4), carla.Rotation(yaw=+90)), 297 | vehicle, {}, display_pos=[0, 2]) 298 | SensorManager(world, display_manager, 'RGBCamera', carla.Transform(carla.Location(x=0, z=2.4), carla.Rotation(yaw=180)), 299 | vehicle, {}, display_pos=[1, 1]) 300 | 301 | SensorManager(world, display_manager, 'LiDAR', carla.Transform(carla.Location(x=0, z=2.4)), 302 | vehicle, {'channels' : '64', 'range' : '100', 'points_per_second': '250000', 'rotation_frequency': '20'}, display_pos=[1, 0]) 303 | SensorManager(world, display_manager, 'SemanticLiDAR', carla.Transform(carla.Location(x=0, z=2.4)), 304 | vehicle, {'channels' : '64', 'range' : '100', 'points_per_second': '100000', 'rotation_frequency': '20'}, display_pos=[1, 2]) 305 | 306 | 307 | #Simulation loop 308 | call_exit = False 309 | time_init_sim = timer.time() 310 | while True: 311 | # Carla Tick 312 | if args.sync: 313 | world.tick() 314 | else: 315 | world.wait_for_tick() 316 | 317 | # Render received data 318 | display_manager.render() 319 | 320 | for event in pygame.event.get(): 321 | if event.type == pygame.QUIT: 322 | call_exit = True 323 | elif event.type == pygame.KEYDOWN: 324 | if event.key == K_ESCAPE or event.key == K_q: 325 | call_exit = True 326 | break 327 | 328 | if call_exit: 329 | break 330 | 331 | finally: 332 | if display_manager: 333 | display_manager.destroy() 334 | 335 | client.apply_batch([carla.command.DestroyActor(x) for x in vehicle_list]) 336 | 337 | world.apply_settings(original_settings) 338 | 339 | 340 | 341 | def main(): 342 | argparser = argparse.ArgumentParser( 343 | description='CARLA Sensor tutorial') 344 | argparser.add_argument( 345 | '--host', 346 | metavar='H', 347 | default='127.0.0.1', 348 | help='IP of the host server (default: 127.0.0.1)') 349 | argparser.add_argument( 350 | '-p', '--port', 351 | metavar='P', 352 | default=2000, 353 | type=int, 354 | help='TCP port to listen to (default: 2000)') 355 | argparser.add_argument( 356 | '--sync', 357 | action='store_true', 358 | help='Synchronous mode execution') 359 | argparser.add_argument( 360 | '--async', 361 | dest='sync', 362 | action='store_false', 363 | help='Asynchronous mode execution') 364 | argparser.set_defaults(sync=True) 365 | argparser.add_argument( 366 | '--res', 367 | metavar='WIDTHxHEIGHT', 368 | default='1280x720', 369 | help='window resolution (default: 1280x720)') 370 | 371 | args = argparser.parse_args() 372 | 373 | args.width, args.height = [int(x) for x in args.res.split('x')] 374 | 375 | try: 376 | client = carla.Client(args.host, args.port) 377 | client.set_timeout(5.0) 378 | 379 | run_simulation(args, client) 380 | 381 | except KeyboardInterrupt: 382 | print('\nCancelled by user. Bye!') 383 | 384 | 385 | if __name__ == '__main__': 386 | 387 | main() 388 | -------------------------------------------------------------------------------- /src/navigation/examples/client_bounding_boxes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Aptiv 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | 8 | """ 9 | An example of client-side bounding boxes with basic car controls. 10 | 11 | Controls: 12 | 13 | W : throttle 14 | S : brake 15 | AD : steer 16 | Space : hand-brake 17 | 18 | ESC : quit 19 | """ 20 | 21 | # ============================================================================== 22 | # -- find carla module --------------------------------------------------------- 23 | # ============================================================================== 24 | 25 | 26 | import glob 27 | import os 28 | import sys 29 | 30 | try: 31 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 32 | sys.version_info.major, 33 | sys.version_info.minor, 34 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 35 | except IndexError: 36 | pass 37 | 38 | 39 | # ============================================================================== 40 | # -- imports ------------------------------------------------------------------- 41 | # ============================================================================== 42 | 43 | import carla 44 | 45 | import weakref 46 | import random 47 | 48 | try: 49 | import pygame 50 | from pygame.locals import K_ESCAPE 51 | from pygame.locals import K_SPACE 52 | from pygame.locals import K_a 53 | from pygame.locals import K_d 54 | from pygame.locals import K_s 55 | from pygame.locals import K_w 56 | except ImportError: 57 | raise RuntimeError('cannot import pygame, make sure pygame package is installed') 58 | 59 | try: 60 | import numpy as np 61 | except ImportError: 62 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 63 | 64 | VIEW_WIDTH = 1920//2 65 | VIEW_HEIGHT = 1080//2 66 | VIEW_FOV = 90 67 | 68 | BB_COLOR = (248, 64, 24) 69 | 70 | # ============================================================================== 71 | # -- ClientSideBoundingBoxes --------------------------------------------------- 72 | # ============================================================================== 73 | 74 | 75 | class ClientSideBoundingBoxes(object): 76 | """ 77 | This is a module responsible for creating 3D bounding boxes and drawing them 78 | client-side on pygame surface. 79 | """ 80 | 81 | @staticmethod 82 | def get_bounding_boxes(vehicles, camera): 83 | """ 84 | Creates 3D bounding boxes based on carla vehicle list and camera. 85 | """ 86 | 87 | bounding_boxes = [ClientSideBoundingBoxes.get_bounding_box(vehicle, camera) for vehicle in vehicles] 88 | # filter objects behind camera 89 | bounding_boxes = [bb for bb in bounding_boxes if all(bb[:, 2] > 0)] 90 | return bounding_boxes 91 | 92 | @staticmethod 93 | def draw_bounding_boxes(display, bounding_boxes): 94 | """ 95 | Draws bounding boxes on pygame display. 96 | """ 97 | 98 | bb_surface = pygame.Surface((VIEW_WIDTH, VIEW_HEIGHT)) 99 | bb_surface.set_colorkey((0, 0, 0)) 100 | for bbox in bounding_boxes: 101 | points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)] 102 | # draw lines 103 | # base 104 | pygame.draw.line(bb_surface, BB_COLOR, points[0], points[1]) 105 | pygame.draw.line(bb_surface, BB_COLOR, points[0], points[1]) 106 | pygame.draw.line(bb_surface, BB_COLOR, points[1], points[2]) 107 | pygame.draw.line(bb_surface, BB_COLOR, points[2], points[3]) 108 | pygame.draw.line(bb_surface, BB_COLOR, points[3], points[0]) 109 | # top 110 | pygame.draw.line(bb_surface, BB_COLOR, points[4], points[5]) 111 | pygame.draw.line(bb_surface, BB_COLOR, points[5], points[6]) 112 | pygame.draw.line(bb_surface, BB_COLOR, points[6], points[7]) 113 | pygame.draw.line(bb_surface, BB_COLOR, points[7], points[4]) 114 | # base-top 115 | pygame.draw.line(bb_surface, BB_COLOR, points[0], points[4]) 116 | pygame.draw.line(bb_surface, BB_COLOR, points[1], points[5]) 117 | pygame.draw.line(bb_surface, BB_COLOR, points[2], points[6]) 118 | pygame.draw.line(bb_surface, BB_COLOR, points[3], points[7]) 119 | display.blit(bb_surface, (0, 0)) 120 | 121 | @staticmethod 122 | def get_bounding_box(vehicle, camera): 123 | """ 124 | Returns 3D bounding box for a vehicle based on camera view. 125 | """ 126 | 127 | bb_cords = ClientSideBoundingBoxes._create_bb_points(vehicle) 128 | cords_x_y_z = ClientSideBoundingBoxes._vehicle_to_sensor(bb_cords, vehicle, camera)[:3, :] 129 | cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]]) 130 | bbox = np.transpose(np.dot(camera.calibration, cords_y_minus_z_x)) 131 | camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1) 132 | return camera_bbox 133 | 134 | @staticmethod 135 | def _create_bb_points(vehicle): 136 | """ 137 | Returns 3D bounding box for a vehicle. 138 | """ 139 | 140 | cords = np.zeros((8, 4)) 141 | extent = vehicle.bounding_box.extent 142 | cords[0, :] = np.array([extent.x, extent.y, -extent.z, 1]) 143 | cords[1, :] = np.array([-extent.x, extent.y, -extent.z, 1]) 144 | cords[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1]) 145 | cords[3, :] = np.array([extent.x, -extent.y, -extent.z, 1]) 146 | cords[4, :] = np.array([extent.x, extent.y, extent.z, 1]) 147 | cords[5, :] = np.array([-extent.x, extent.y, extent.z, 1]) 148 | cords[6, :] = np.array([-extent.x, -extent.y, extent.z, 1]) 149 | cords[7, :] = np.array([extent.x, -extent.y, extent.z, 1]) 150 | return cords 151 | 152 | @staticmethod 153 | def _vehicle_to_sensor(cords, vehicle, sensor): 154 | """ 155 | Transforms coordinates of a vehicle bounding box to sensor. 156 | """ 157 | 158 | world_cord = ClientSideBoundingBoxes._vehicle_to_world(cords, vehicle) 159 | sensor_cord = ClientSideBoundingBoxes._world_to_sensor(world_cord, sensor) 160 | return sensor_cord 161 | 162 | @staticmethod 163 | def _vehicle_to_world(cords, vehicle): 164 | """ 165 | Transforms coordinates of a vehicle bounding box to world. 166 | """ 167 | 168 | bb_transform = carla.Transform(vehicle.bounding_box.location) 169 | bb_vehicle_matrix = ClientSideBoundingBoxes.get_matrix(bb_transform) 170 | vehicle_world_matrix = ClientSideBoundingBoxes.get_matrix(vehicle.get_transform()) 171 | bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix) 172 | world_cords = np.dot(bb_world_matrix, np.transpose(cords)) 173 | return world_cords 174 | 175 | @staticmethod 176 | def _world_to_sensor(cords, sensor): 177 | """ 178 | Transforms world coordinates to sensor. 179 | """ 180 | 181 | sensor_world_matrix = ClientSideBoundingBoxes.get_matrix(sensor.get_transform()) 182 | world_sensor_matrix = np.linalg.inv(sensor_world_matrix) 183 | sensor_cords = np.dot(world_sensor_matrix, cords) 184 | return sensor_cords 185 | 186 | @staticmethod 187 | def get_matrix(transform): 188 | """ 189 | Creates matrix from carla transform. 190 | """ 191 | 192 | rotation = transform.rotation 193 | location = transform.location 194 | c_y = np.cos(np.radians(rotation.yaw)) 195 | s_y = np.sin(np.radians(rotation.yaw)) 196 | c_r = np.cos(np.radians(rotation.roll)) 197 | s_r = np.sin(np.radians(rotation.roll)) 198 | c_p = np.cos(np.radians(rotation.pitch)) 199 | s_p = np.sin(np.radians(rotation.pitch)) 200 | matrix = np.matrix(np.identity(4)) 201 | matrix[0, 3] = location.x 202 | matrix[1, 3] = location.y 203 | matrix[2, 3] = location.z 204 | matrix[0, 0] = c_p * c_y 205 | matrix[0, 1] = c_y * s_p * s_r - s_y * c_r 206 | matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r 207 | matrix[1, 0] = s_y * c_p 208 | matrix[1, 1] = s_y * s_p * s_r + c_y * c_r 209 | matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r 210 | matrix[2, 0] = s_p 211 | matrix[2, 1] = -c_p * s_r 212 | matrix[2, 2] = c_p * c_r 213 | return matrix 214 | 215 | 216 | # ============================================================================== 217 | # -- BasicSynchronousClient ---------------------------------------------------- 218 | # ============================================================================== 219 | 220 | 221 | class BasicSynchronousClient(object): 222 | """ 223 | Basic implementation of a synchronous client. 224 | """ 225 | 226 | def __init__(self): 227 | self.client = None 228 | self.world = None 229 | self.camera = None 230 | self.car = None 231 | 232 | self.display = None 233 | self.image = None 234 | self.capture = True 235 | 236 | def camera_blueprint(self): 237 | """ 238 | Returns camera blueprint. 239 | """ 240 | 241 | camera_bp = self.world.get_blueprint_library().find('sensor.camera.rgb') 242 | camera_bp.set_attribute('image_size_x', str(VIEW_WIDTH)) 243 | camera_bp.set_attribute('image_size_y', str(VIEW_HEIGHT)) 244 | camera_bp.set_attribute('fov', str(VIEW_FOV)) 245 | return camera_bp 246 | 247 | def set_synchronous_mode(self, synchronous_mode): 248 | """ 249 | Sets synchronous mode. 250 | """ 251 | 252 | settings = self.world.get_settings() 253 | settings.synchronous_mode = synchronous_mode 254 | self.world.apply_settings(settings) 255 | 256 | def setup_car(self): 257 | """ 258 | Spawns actor-vehicle to be controled. 259 | """ 260 | 261 | car_bp = self.world.get_blueprint_library().filter('vehicle.*')[0] 262 | location = random.choice(self.world.get_map().get_spawn_points()) 263 | self.car = self.world.spawn_actor(car_bp, location) 264 | 265 | def setup_camera(self): 266 | """ 267 | Spawns actor-camera to be used to render view. 268 | Sets calibration for client-side boxes rendering. 269 | """ 270 | 271 | camera_transform = carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)) 272 | self.camera = self.world.spawn_actor(self.camera_blueprint(), camera_transform, attach_to=self.car) 273 | weak_self = weakref.ref(self) 274 | self.camera.listen(lambda image: weak_self().set_image(weak_self, image)) 275 | 276 | calibration = np.identity(3) 277 | calibration[0, 2] = VIEW_WIDTH / 2.0 278 | calibration[1, 2] = VIEW_HEIGHT / 2.0 279 | calibration[0, 0] = calibration[1, 1] = VIEW_WIDTH / (2.0 * np.tan(VIEW_FOV * np.pi / 360.0)) 280 | self.camera.calibration = calibration 281 | 282 | def control(self, car): 283 | """ 284 | Applies control to main car based on pygame pressed keys. 285 | Will return True If ESCAPE is hit, otherwise False to end main loop. 286 | """ 287 | 288 | keys = pygame.key.get_pressed() 289 | if keys[K_ESCAPE]: 290 | return True 291 | 292 | control = car.get_control() 293 | control.throttle = 0 294 | if keys[K_w]: 295 | control.throttle = 1 296 | control.reverse = False 297 | elif keys[K_s]: 298 | control.throttle = 1 299 | control.reverse = True 300 | if keys[K_a]: 301 | control.steer = max(-1., min(control.steer - 0.05, 0)) 302 | elif keys[K_d]: 303 | control.steer = min(1., max(control.steer + 0.05, 0)) 304 | else: 305 | control.steer = 0 306 | control.hand_brake = keys[K_SPACE] 307 | 308 | car.apply_control(control) 309 | return False 310 | 311 | @staticmethod 312 | def set_image(weak_self, img): 313 | """ 314 | Sets image coming from camera sensor. 315 | The self.capture flag is a mean of synchronization - once the flag is 316 | set, next coming image will be stored. 317 | """ 318 | 319 | self = weak_self() 320 | if self.capture: 321 | self.image = img 322 | self.capture = False 323 | 324 | def render(self, display): 325 | """ 326 | Transforms image from camera sensor and blits it to main pygame display. 327 | """ 328 | 329 | if self.image is not None: 330 | array = np.frombuffer(self.image.raw_data, dtype=np.dtype("uint8")) 331 | array = np.reshape(array, (self.image.height, self.image.width, 4)) 332 | array = array[:, :, :3] 333 | array = array[:, :, ::-1] 334 | surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 335 | display.blit(surface, (0, 0)) 336 | 337 | def game_loop(self): 338 | """ 339 | Main program loop. 340 | """ 341 | 342 | try: 343 | pygame.init() 344 | 345 | self.client = carla.Client('127.0.0.1', 2000) 346 | self.client.set_timeout(2.0) 347 | self.world = self.client.get_world() 348 | 349 | self.setup_car() 350 | self.setup_camera() 351 | 352 | self.display = pygame.display.set_mode((VIEW_WIDTH, VIEW_HEIGHT), pygame.HWSURFACE | pygame.DOUBLEBUF) 353 | pygame_clock = pygame.time.Clock() 354 | 355 | self.set_synchronous_mode(True) 356 | vehicles = self.world.get_actors().filter('vehicle.*') 357 | 358 | while True: 359 | self.world.tick() 360 | 361 | self.capture = True 362 | pygame_clock.tick_busy_loop(20) 363 | 364 | self.render(self.display) 365 | bounding_boxes = ClientSideBoundingBoxes.get_bounding_boxes(vehicles, self.camera) 366 | ClientSideBoundingBoxes.draw_bounding_boxes(self.display, bounding_boxes) 367 | 368 | pygame.display.flip() 369 | 370 | pygame.event.pump() 371 | if self.control(self.car): 372 | return 373 | 374 | finally: 375 | self.set_synchronous_mode(False) 376 | self.camera.destroy() 377 | self.car.destroy() 378 | pygame.quit() 379 | 380 | 381 | # ============================================================================== 382 | # -- main() -------------------------------------------------------------------- 383 | # ============================================================================== 384 | 385 | 386 | def main(): 387 | """ 388 | Initializes the client-side bounding box demo. 389 | """ 390 | 391 | try: 392 | client = BasicSynchronousClient() 393 | client.game_loop() 394 | finally: 395 | print('EXIT') 396 | 397 | 398 | if __name__ == '__main__': 399 | main() 400 | -------------------------------------------------------------------------------- /src/navigation/local_planner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) # Copyright (c) 2018-2020 CVC. 2 | # 3 | # This work is licensed under the terms of the MIT license. 4 | # For a copy, see . 5 | 6 | """ This module contains a local planner to perform low-level waypoint following based on PID controllers. """ 7 | 8 | from enum import IntEnum 9 | from collections import deque 10 | import random 11 | 12 | import carla 13 | from agents.navigation.controller import VehiclePIDController 14 | from agents.tools.misc import draw_waypoints, get_speed 15 | 16 | 17 | class RoadOption(IntEnum): 18 | """ 19 | RoadOption represents the possible topological configurations when moving from a segment of lane to other. 20 | 21 | """ 22 | VOID = -1 23 | LEFT = 1 24 | RIGHT = 2 25 | STRAIGHT = 3 26 | LANEFOLLOW = 4 27 | CHANGELANELEFT = 5 28 | CHANGELANERIGHT = 6 29 | 30 | 31 | class LocalPlanner(object): 32 | """ 33 | LocalPlanner implements the basic behavior of following a 34 | trajectory of waypoints that is generated on-the-fly. 35 | 36 | The low-level motion of the vehicle is computed by using two PID controllers, 37 | one is used for the lateral control and the other for the longitudinal control (cruise speed). 38 | 39 | When multiple paths are available (intersections) this local planner makes a random choice, 40 | unless a given global plan has already been specified. 41 | """ 42 | 43 | def __init__(self, vehicle, opt_dict={}, map_inst=None): 44 | """ 45 | :param vehicle: actor to apply to local planner logic onto 46 | :param opt_dict: dictionary of arguments with different parameters: 47 | dt: time between simulation steps 48 | target_speed: desired cruise speed in Km/h 49 | sampling_radius: distance between the waypoints part of the plan 50 | lateral_control_dict: values of the lateral PID controller 51 | longitudinal_control_dict: values of the longitudinal PID controller 52 | max_throttle: maximum throttle applied to the vehicle 53 | max_brake: maximum brake applied to the vehicle 54 | max_steering: maximum steering applied to the vehicle 55 | offset: distance between the route waypoints and the center of the lane 56 | :param map_inst: carla.Map instance to avoid the expensive call of getting it. 57 | """ 58 | self._vehicle = vehicle 59 | self._world = self._vehicle.get_world() 60 | if map_inst: 61 | if isinstance(map_inst, carla.Map): 62 | self._map = map_inst 63 | else: 64 | print("Warning: Ignoring the given map as it is not a 'carla.Map'") 65 | self._map = self._world.get_map() 66 | else: 67 | self._map = self._world.get_map() 68 | 69 | self._vehicle_controller = None 70 | self.target_waypoint = None 71 | self.target_road_option = None 72 | 73 | self._waypoints_queue = deque(maxlen=10000) 74 | self._min_waypoint_queue_length = 100 75 | self._stop_waypoint_creation = False 76 | 77 | # Base parameters 78 | self._dt = 1.0 / 20.0 79 | self._target_speed = 45.0 # Km/h 80 | self._sampling_radius = 2.0 81 | self._args_lateral_dict = {'K_P': 1.95, 'K_I': 0.05, 'K_D': 0.2, 'dt': self._dt} 82 | self._args_longitudinal_dict = {'K_P': 1.0, 'K_I': 0.05, 'K_D': 0, 'dt': self._dt} 83 | self._max_throt = 0.75 84 | self._max_brake = 0.3 85 | self._max_steer = 0.8 86 | self._offset = 0 87 | self._base_min_distance = 3.0 88 | self._distance_ratio = 0.5 89 | self._follow_speed_limits = False 90 | 91 | # Overload parameters 92 | if opt_dict: 93 | if 'dt' in opt_dict: 94 | self._dt = opt_dict['dt'] 95 | if 'target_speed' in opt_dict: 96 | self._target_speed = opt_dict['target_speed'] 97 | if 'sampling_radius' in opt_dict: 98 | self._sampling_radius = opt_dict['sampling_radius'] 99 | if 'lateral_control_dict' in opt_dict: 100 | self._args_lateral_dict = opt_dict['lateral_control_dict'] 101 | if 'longitudinal_control_dict' in opt_dict: 102 | self._args_longitudinal_dict = opt_dict['longitudinal_control_dict'] 103 | if 'max_throttle' in opt_dict: 104 | self._max_throt = opt_dict['max_throttle'] 105 | if 'max_brake' in opt_dict: 106 | self._max_brake = opt_dict['max_brake'] 107 | if 'max_steering' in opt_dict: 108 | self._max_steer = opt_dict['max_steering'] 109 | if 'offset' in opt_dict: 110 | self._offset = opt_dict['offset'] 111 | if 'base_min_distance' in opt_dict: 112 | self._base_min_distance = opt_dict['base_min_distance'] 113 | if 'distance_ratio' in opt_dict: 114 | self._distance_ratio = opt_dict['distance_ratio'] 115 | if 'follow_speed_limits' in opt_dict: 116 | self._follow_speed_limits = opt_dict['follow_speed_limits'] 117 | 118 | # initializing controller 119 | self._init_controller() 120 | 121 | def reset_vehicle(self): 122 | """Reset the ego-vehicle""" 123 | self._vehicle = None 124 | 125 | def _init_controller(self): 126 | """Controller initialization""" 127 | self._vehicle_controller = VehiclePIDController(self._vehicle, 128 | args_lateral=self._args_lateral_dict, 129 | args_longitudinal=self._args_longitudinal_dict, 130 | offset=self._offset, 131 | max_throttle=self._max_throt, 132 | max_brake=self._max_brake, 133 | max_steering=self._max_steer) 134 | 135 | # Compute the current vehicle waypoint 136 | current_waypoint = self._map.get_waypoint(self._vehicle.get_location()) 137 | self.target_waypoint, self.target_road_option = (current_waypoint, RoadOption.LANEFOLLOW) 138 | self._waypoints_queue.append((self.target_waypoint, self.target_road_option)) 139 | 140 | def set_speed(self, speed): 141 | """ 142 | Changes the target speed 143 | 144 | :param speed: new target speed in Km/h 145 | :return: 146 | """ 147 | if self._follow_speed_limits: 148 | print("WARNING: The max speed is currently set to follow the speed limits. " 149 | "Use 'follow_speed_limits' to deactivate this") 150 | self._target_speed = speed 151 | 152 | def follow_speed_limits(self, value=False): 153 | """ 154 | Activates a flag that makes the max speed dynamically vary according to the spped limits 155 | 156 | :param value: bool 157 | :return: 158 | """ 159 | self._follow_speed_limits = value 160 | 161 | def _compute_next_waypoints(self, k=1): 162 | """ 163 | Add new waypoints to the trajectory queue. 164 | 165 | :param k: how many waypoints to compute 166 | :return: 167 | """ 168 | # check we do not overflow the queue 169 | available_entries = self._waypoints_queue.maxlen - len(self._waypoints_queue) 170 | k = min(available_entries, k) 171 | 172 | for _ in range(k): 173 | last_waypoint = self._waypoints_queue[-1][0] 174 | # 获取可能的下一个waypoints集合 175 | next_waypoints = list(last_waypoint.next(self._sampling_radius)) 176 | 177 | if len(next_waypoints) == 0: 178 | break 179 | elif len(next_waypoints) == 1: 180 | # only one option available ==> lanefollowing 181 | next_waypoint = next_waypoints[0] 182 | road_option = RoadOption.LANEFOLLOW 183 | else: 184 | # random choice between the possible options 185 | road_options_list = _retrieve_options( 186 | next_waypoints, last_waypoint) 187 | road_option = random.choice(road_options_list) 188 | next_waypoint = next_waypoints[road_options_list.index( 189 | road_option)] 190 | 191 | self._waypoints_queue.append((next_waypoint, road_option)) 192 | 193 | def set_global_plan(self, current_plan, stop_waypoint_creation=True, clean_queue=True): 194 | """ 195 | Adds a new plan to the local planner. A plan must be a list of [carla.Waypoint, RoadOption] pairs 196 | The 'clean_queue` parameter erases the previous plan if True, otherwise, it adds it to the old one 197 | The 'stop_waypoint_creation' flag stops the automatic creation of random waypoints 198 | 199 | :param current_plan: list of (carla.Waypoint, RoadOption) 200 | :param stop_waypoint_creation: bool 201 | :param clean_queue: bool 202 | :return: 203 | """ 204 | if clean_queue: 205 | self._waypoints_queue.clear() 206 | 207 | # Remake the waypoints queue if the new plan has a higher length than the queue 208 | new_plan_length = len(current_plan) + len(self._waypoints_queue) 209 | if new_plan_length > self._waypoints_queue.maxlen: 210 | new_waypoint_queue = deque(maxlen=new_plan_length) 211 | for wp in self._waypoints_queue: 212 | new_waypoint_queue.append(wp) 213 | self._waypoints_queue = new_waypoint_queue 214 | 215 | for elem in current_plan: 216 | self._waypoints_queue.append(elem) 217 | 218 | self._stop_waypoint_creation = stop_waypoint_creation 219 | 220 | def run_step(self, debug=False): 221 | """ 222 | Execute one step of local planning which involves running the longitudinal and lateral PID controllers to 223 | follow the waypoints trajectory. 224 | 225 | :param debug: boolean flag to activate waypoints debugging 226 | :return: control to be applied 227 | """ 228 | if self._follow_speed_limits: 229 | self._target_speed = self._vehicle.get_speed_limit() 230 | 231 | # Add more waypoints too few in the horizon 232 | if not self._stop_waypoint_creation and len(self._waypoints_queue) < self._min_waypoint_queue_length: 233 | self._compute_next_waypoints(k=self._min_waypoint_queue_length) 234 | 235 | # Purge the queue of obsolete waypoints 236 | veh_location = self._vehicle.get_location() 237 | vehicle_speed = get_speed(self._vehicle) / 3.6 238 | self._min_distance = self._base_min_distance + self._distance_ratio * vehicle_speed 239 | 240 | num_waypoint_removed = 0 241 | for waypoint, _ in self._waypoints_queue: 242 | 243 | if len(self._waypoints_queue) - num_waypoint_removed == 1: 244 | min_distance = 1 # Don't remove the last waypoint until very close by 245 | else: 246 | min_distance = self._min_distance 247 | 248 | if veh_location.distance(waypoint.transform.location) < min_distance: 249 | num_waypoint_removed += 1 250 | else: 251 | break 252 | 253 | if num_waypoint_removed > 0: 254 | for _ in range(num_waypoint_removed): 255 | self._waypoints_queue.popleft() 256 | 257 | # Get the target waypoint and move using the PID controllers. Stop if no target waypoint 258 | if len(self._waypoints_queue) == 0: 259 | control = carla.VehicleControl() 260 | control.steer = 0.0 261 | control.throttle = 0.0 262 | control.brake = 1.0 263 | control.hand_brake = False 264 | control.manual_gear_shift = False 265 | else: 266 | self.target_waypoint, self.target_road_option = self._waypoints_queue[0] 267 | control = self._vehicle_controller.run_step(self._target_speed, self.target_waypoint) 268 | 269 | if debug: 270 | draw_waypoints(self._vehicle.get_world(), [self.target_waypoint], 1.0) 271 | 272 | return control 273 | 274 | def get_incoming_waypoint_and_direction(self, steps=3): 275 | """ 276 | Returns direction and waypoint at a distance ahead defined by the user. 277 | 278 | :param steps: number of steps to get the incoming waypoint. 279 | """ 280 | if len(self._waypoints_queue) > steps: 281 | return self._waypoints_queue[steps] 282 | 283 | else: 284 | try: 285 | wpt, direction = self._waypoints_queue[-1] 286 | return wpt, direction 287 | except IndexError as i: 288 | return None, RoadOption.VOID 289 | 290 | def get_plan(self): 291 | """Returns the current plan of the local planner""" 292 | return self._waypoints_queue 293 | 294 | def done(self): 295 | """ 296 | Returns whether or not the planner has finished 297 | 298 | :return: boolean 299 | """ 300 | return len(self._waypoints_queue) == 0 301 | 302 | 303 | def _retrieve_options(list_waypoints, current_waypoint): 304 | """ 305 | Compute the type of connection between the current active waypoint and the multiple waypoints present in 306 | list_waypoints. The result is encoded as a list of RoadOption enums. 307 | 308 | :param list_waypoints: list with the possible target waypoints in case of multiple options 309 | :param current_waypoint: current active waypoint 310 | :return: list of RoadOption enums representing the type of connection from the active waypoint to each 311 | candidate in list_waypoints 312 | """ 313 | options = [] 314 | for next_waypoint in list_waypoints: 315 | # this is needed because something we are linking to 316 | # the beginning of an intersection, therefore the 317 | # variation in angle is small 318 | next_next_waypoint = next_waypoint.next(3.0)[0] 319 | link = _compute_connection(current_waypoint, next_next_waypoint) 320 | options.append(link) 321 | 322 | return options 323 | 324 | 325 | def _compute_connection(current_waypoint, next_waypoint, threshold=35): 326 | """ 327 | Compute the type of topological connection between an active waypoint (current_waypoint) and a target waypoint 328 | (next_waypoint). 329 | 330 | :param current_waypoint: active waypoint 331 | :param next_waypoint: target waypoint 332 | :return: the type of topological connection encoded as a RoadOption enum: 333 | RoadOption.STRAIGHT 334 | RoadOption.LEFT 335 | RoadOption.RIGHT 336 | """ 337 | n = next_waypoint.transform.rotation.yaw 338 | n = n % 360.0 339 | 340 | c = current_waypoint.transform.rotation.yaw 341 | c = c % 360.0 342 | 343 | diff_angle = (n - c) % 180.0 344 | if diff_angle < threshold or diff_angle > (180 - threshold): 345 | return RoadOption.STRAIGHT 346 | elif diff_angle > 90.0: 347 | return RoadOption.LEFT 348 | else: 349 | return RoadOption.RIGHT 350 | -------------------------------------------------------------------------------- /src/navigation/examples/generate_traffic_basic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2021 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """Example script to generate traffic in the simulation""" 10 | 11 | import glob 12 | import os 13 | import sys 14 | import time 15 | 16 | try: 17 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 18 | sys.version_info.major, 19 | sys.version_info.minor, 20 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 21 | except IndexError: 22 | pass 23 | 24 | import carla 25 | 26 | from carla import VehicleLightState as vls 27 | 28 | import argparse 29 | import logging 30 | from numpy import random 31 | 32 | def get_actor_blueprints(world, filter, generation): 33 | bps = world.get_blueprint_library().filter(filter) 34 | 35 | if generation.lower() == "all": 36 | return bps 37 | 38 | # If the filter returns only one bp, we assume that this one needed 39 | # and therefore, we ignore the generation 40 | if len(bps) == 1: 41 | return bps 42 | 43 | try: 44 | int_generation = int(generation) 45 | # Check if generation is in available generations 46 | if int_generation in [1, 2]: 47 | bps = [x for x in bps if int(x.get_attribute('generation')) == int_generation] 48 | return bps 49 | else: 50 | print(" Warning! Actor Generation is not valid. No actor will be spawned.") 51 | return [] 52 | except: 53 | print(" Warning! Actor Generation is not valid. No actor will be spawned.") 54 | return [] 55 | 56 | def main(): 57 | argparser = argparse.ArgumentParser( 58 | description=__doc__) 59 | argparser.add_argument( 60 | '--host', 61 | metavar='H', 62 | default='127.0.0.1', 63 | help='IP of the host server (default: 127.0.0.1)') 64 | argparser.add_argument( 65 | '-p', '--port', 66 | metavar='P', 67 | default=2000, 68 | type=int, 69 | help='TCP port to listen to (default: 2000)') 70 | argparser.add_argument( 71 | '-n', '--number-of-vehicles', 72 | metavar='N', 73 | default=0, 74 | type=int, 75 | help='Number of vehicles (default: 30)') 76 | argparser.add_argument( 77 | '-w', '--number-of-walkers', 78 | metavar='W', 79 | default=0, 80 | 81 | 82 | type=int, 83 | help='Number of walkers (default: 10)') 84 | argparser.add_argument( 85 | '--safe', 86 | action='store_true', 87 | help='Avoid spawning vehicles prone to accidents') 88 | argparser.add_argument( 89 | '--filterv', 90 | metavar='PATTERN', 91 | default='vehicle.*', 92 | help='Filter vehicle model (default: "vehicle.*")') 93 | argparser.add_argument( 94 | '--generationv', 95 | metavar='G', 96 | default='All', 97 | help='restrict to certain vehicle generation (values: "1","2","All" - default: "All")') 98 | argparser.add_argument( 99 | '--filterw', 100 | metavar='PATTERN', 101 | default='walker.pedestrian.*', 102 | help='Filter pedestrian type (default: "walker.pedestrian.*")') 103 | argparser.add_argument( 104 | '--generationw', 105 | metavar='G', 106 | default='2', 107 | help='restrict to certain pedestrian generation (values: "1","2","All" - default: "2")') 108 | argparser.add_argument( 109 | '--tm-port', 110 | metavar='P', 111 | default=8000, 112 | type=int, 113 | help='Port to communicate with TM (default: 8000)') 114 | argparser.add_argument( 115 | '--asynch', 116 | action='store_true', 117 | help='Activate asynchronous mode execution') 118 | argparser.add_argument( 119 | '--hybrid', 120 | action='store_true', 121 | help='Activate hybrid mode for Traffic Manager') 122 | argparser.add_argument( 123 | '-s', '--seed', 124 | metavar='S', 125 | type=int, 126 | help='Set random device seed and deterministic mode for Traffic Manager') 127 | argparser.add_argument( 128 | '--seedw', 129 | metavar='S', 130 | default=0, 131 | type=int, 132 | help='Set the seed for pedestrians module') 133 | argparser.add_argument( 134 | '--car-lights-on', 135 | action='store_true', 136 | default=False, 137 | help='Enable automatic car light management') 138 | argparser.add_argument( 139 | '--hero', 140 | action='store_true', 141 | default=False, 142 | help='Set one of the vehicles as hero') 143 | argparser.add_argument( 144 | '--respawn', 145 | action='store_true', 146 | default=False, 147 | help='Automatically respawn dormant vehicles (only in large maps)') 148 | argparser.add_argument( 149 | '--no-rendering', 150 | action='store_true', 151 | default=False, 152 | help='Activate no rendering mode') 153 | 154 | args = argparser.parse_args() 155 | 156 | logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) 157 | 158 | vehicles_list = [] 159 | walkers_list = [] 160 | all_id = [] 161 | client = carla.Client(args.host, args.port) 162 | client.set_timeout(10.0) 163 | synchronous_master = False 164 | random.seed(args.seed if args.seed is not None else int(time.time())) 165 | 166 | try: 167 | world = client.get_world() 168 | 169 | traffic_manager = client.get_trafficmanager(args.tm_port) 170 | traffic_manager.set_global_distance_to_leading_vehicle(2.5) 171 | if args.respawn: 172 | traffic_manager.set_respawn_dormant_vehicles(True) 173 | if args.hybrid: 174 | traffic_manager.set_hybrid_physics_mode(True) 175 | traffic_manager.set_hybrid_physics_radius(70.0) 176 | if args.seed is not None: 177 | traffic_manager.set_random_device_seed(args.seed) 178 | 179 | settings = world.get_settings() 180 | if not args.asynch: 181 | traffic_manager.set_synchronous_mode(True) 182 | if not settings.synchronous_mode: 183 | synchronous_master = True 184 | settings.synchronous_mode = True 185 | settings.fixed_delta_seconds = 0.05 186 | else: 187 | synchronous_master = False 188 | else: 189 | print("You are currently in asynchronous mode. If this is a traffic simulation, \ 190 | you could experience some issues. If it's not working correctly, switch to synchronous \ 191 | mode by using traffic_manager.set_synchronous_mode(True)") 192 | 193 | if args.no_rendering: 194 | settings.no_rendering_mode = True 195 | world.apply_settings(settings) 196 | 197 | blueprints = get_actor_blueprints(world, args.filterv, args.generationv) 198 | blueprintsWalkers = get_actor_blueprints(world, args.filterw, args.generationw) 199 | 200 | if args.safe: 201 | blueprints = [x for x in blueprints if x.get_attribute('base_type') == 'car'] 202 | 203 | blueprints = sorted(blueprints, key=lambda bp: bp.id) 204 | 205 | spawn_points = world.get_map().get_spawn_points() 206 | number_of_spawn_points = len(spawn_points) 207 | 208 | if args.number_of_vehicles < number_of_spawn_points: 209 | random.shuffle(spawn_points) 210 | elif args.number_of_vehicles > number_of_spawn_points: 211 | msg = 'requested %d vehicles, but could only find %d spawn points' 212 | logging.warning(msg, args.number_of_vehicles, number_of_spawn_points) 213 | args.number_of_vehicles = number_of_spawn_points 214 | 215 | # @todo cannot import these directly. 216 | SpawnActor = carla.command.SpawnActor 217 | SetAutopilot = carla.command.SetAutopilot 218 | FutureActor = carla.command.FutureActor 219 | 220 | # -------------- 221 | # Spawn vehicles 222 | # -------------- 223 | batch = [] 224 | hero = args.hero 225 | for n, transform in enumerate(spawn_points): 226 | if n >= args.number_of_vehicles: 227 | break 228 | blueprint = random.choice(blueprints) 229 | if blueprint.has_attribute('color'): 230 | color = random.choice(blueprint.get_attribute('color').recommended_values) 231 | blueprint.set_attribute('color', color) 232 | if blueprint.has_attribute('driver_id'): 233 | driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values) 234 | blueprint.set_attribute('driver_id', driver_id) 235 | if hero: 236 | blueprint.set_attribute('role_name', 'hero') 237 | hero = False 238 | else: 239 | blueprint.set_attribute('role_name', 'autopilot') 240 | 241 | # spawn the cars and set their autopilot and light state all together 242 | batch.append(SpawnActor(blueprint, transform) 243 | .then(SetAutopilot(FutureActor, True, traffic_manager.get_port()))) 244 | 245 | for response in client.apply_batch_sync(batch, synchronous_master): 246 | if response.error: 247 | logging.error(response.error) 248 | else: 249 | vehicles_list.append(response.actor_id) 250 | 251 | # Set automatic vehicle lights update if specified 252 | if args.car_lights_on: 253 | all_vehicle_actors = world.get_actors(vehicles_list) 254 | for actor in all_vehicle_actors: 255 | traffic_manager.update_vehicle_lights(actor, True) 256 | 257 | # ------------- 258 | # Spawn Walkers 259 | # ------------- 260 | # some settings 261 | percentagePedestriansRunning = 0.0 # how many pedestrians will run 262 | percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road 263 | if args.seedw: 264 | world.set_pedestrians_seed(args.seedw) 265 | random.seed(args.seedw) 266 | # 1. take all the random locations to spawn 267 | spawn_points = [] 268 | for i in range(args.number_of_walkers): 269 | spawn_point = carla.Transform() 270 | loc = world.get_random_location_from_navigation() 271 | if (loc != None): 272 | spawn_point.location = loc 273 | spawn_points.append(spawn_point) 274 | # 2. we spawn the walker object 275 | batch = [] 276 | walker_speed = [] 277 | for spawn_point in spawn_points: 278 | walker_bp = random.choice(blueprintsWalkers) 279 | # set as not invincible 280 | if walker_bp.has_attribute('is_invincible'): 281 | walker_bp.set_attribute('is_invincible', 'false') 282 | # set the max speed 283 | if walker_bp.has_attribute('speed'): 284 | if (random.random() > percentagePedestriansRunning): 285 | # walking 286 | walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1]) 287 | else: 288 | # running 289 | walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2]) 290 | else: 291 | print("Walker has no speed") 292 | walker_speed.append(0.0) 293 | batch.append(SpawnActor(walker_bp, spawn_point)) 294 | results = client.apply_batch_sync(batch, True) 295 | walker_speed2 = [] 296 | for i in range(len(results)): 297 | if results[i].error: 298 | logging.error(results[i].error) 299 | else: 300 | walkers_list.append({"id": results[i].actor_id}) 301 | walker_speed2.append(walker_speed[i]) 302 | walker_speed = walker_speed2 303 | # 3. we spawn the walker controller 304 | batch = [] 305 | walker_controller_bp = world.get_blueprint_library().find('controller.ai.walker') 306 | for i in range(len(walkers_list)): 307 | batch.append(SpawnActor(walker_controller_bp, carla.Transform(), walkers_list[i]["id"])) 308 | results = client.apply_batch_sync(batch, True) 309 | for i in range(len(results)): 310 | if results[i].error: 311 | logging.error(results[i].error) 312 | else: 313 | walkers_list[i]["con"] = results[i].actor_id 314 | # 4. we put together the walkers and controllers id to get the objects from their id 315 | for i in range(len(walkers_list)): 316 | all_id.append(walkers_list[i]["con"]) 317 | all_id.append(walkers_list[i]["id"]) 318 | all_actors = world.get_actors(all_id) 319 | 320 | # wait for a tick to ensure client receives the last transform of the walkers we have just created 321 | if args.asynch or not synchronous_master: 322 | world.wait_for_tick() 323 | else: 324 | world.tick() 325 | 326 | # 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...]) 327 | # set how many pedestrians can cross the road 328 | world.set_pedestrians_cross_factor(percentagePedestriansCrossing) 329 | for i in range(0, len(all_id), 2): 330 | # start walker 331 | all_actors[i].start() 332 | # set walk to random point 333 | all_actors[i].go_to_location(world.get_random_location_from_navigation()) 334 | # max speed 335 | all_actors[i].set_max_speed(float(walker_speed[int(i/2)])) 336 | 337 | print('spawned %d vehicles and %d walkers, press Ctrl+C to exit.' % (len(vehicles_list), len(walkers_list))) 338 | 339 | # Example of how to use Traffic Manager parameters 340 | traffic_manager.global_percentage_speed_difference(30.0) 341 | 342 | while True: 343 | if not args.asynch and synchronous_master: 344 | world.tick() 345 | else: 346 | world.wait_for_tick() 347 | 348 | finally: 349 | 350 | if not args.asynch and synchronous_master: 351 | settings = world.get_settings() 352 | settings.synchronous_mode = False 353 | settings.no_rendering_mode = False 354 | settings.fixed_delta_seconds = None 355 | world.apply_settings(settings) 356 | 357 | print('\ndestroying %d vehicles' % len(vehicles_list)) 358 | client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_list]) 359 | 360 | # stop walker controllers (list is [controller, actor, controller, actor ...]) 361 | for i in range(0, len(all_id), 2): 362 | all_actors[i].stop() 363 | 364 | print('\ndestroying %d walkers' % len(walkers_list)) 365 | client.apply_batch([carla.command.DestroyActor(x) for x in all_id]) 366 | 367 | time.sleep(0.5) 368 | 369 | if __name__ == '__main__': 370 | 371 | try: 372 | main() 373 | except KeyboardInterrupt: 374 | pass 375 | finally: 376 | print('\ndone.') 377 | -------------------------------------------------------------------------------- /src/navigation/examples/generate_traffic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2021 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """Example script to generate traffic in the simulation""" 10 | 11 | import glob 12 | import os 13 | import sys 14 | import time 15 | 16 | try: 17 | sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( 18 | sys.version_info.major, 19 | sys.version_info.minor, 20 | 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) 21 | except IndexError: 22 | pass 23 | 24 | import carla 25 | 26 | from carla import VehicleLightState as vls 27 | 28 | import argparse 29 | import logging 30 | from numpy import random 31 | 32 | def get_actor_blueprints(world, filter, generation): 33 | bps = world.get_blueprint_library().filter(filter) 34 | 35 | if generation.lower() == "all": 36 | return bps 37 | 38 | # If the filter returns only one bp, we assume that this one needed 39 | # and therefore, we ignore the generation 40 | if len(bps) == 1: 41 | return bps 42 | 43 | try: 44 | int_generation = int(generation) 45 | # Check if generation is in available generations 46 | if int_generation in [1, 2]: 47 | bps = [x for x in bps if int(x.get_attribute('generation')) == int_generation] 48 | return bps 49 | else: 50 | print(" Warning! Actor Generation is not valid. No actor will be spawned.") 51 | return [] 52 | except: 53 | print(" Warning! Actor Generation is not valid. No actor will be spawned.") 54 | return [] 55 | 56 | def main(): 57 | argparser = argparse.ArgumentParser( 58 | description=__doc__) 59 | argparser.add_argument( 60 | '--host', 61 | metavar='H', 62 | default='127.0.0.1', 63 | help='IP of the host server (default: 127.0.0.1)') 64 | argparser.add_argument( 65 | '-p', '--port', 66 | metavar='P', 67 | default=2000, 68 | type=int, 69 | help='TCP port to listen to (default: 2000)') 70 | argparser.add_argument( 71 | '-n', '--number-of-vehicles', 72 | metavar='N', 73 | default=0, 74 | type=int, 75 | help='Number of vehicles (default: 30)') 76 | argparser.add_argument( 77 | '-w', '--number-of-walkers', 78 | metavar='W', 79 | default=0, 80 | 81 | 82 | type=int, 83 | help='Number of walkers (default: 10)') 84 | argparser.add_argument( 85 | '--safe', 86 | action='store_true', 87 | help='Avoid spawning vehicles prone to accidents') 88 | argparser.add_argument( 89 | '--filterv', 90 | metavar='PATTERN', 91 | default='vehicle.*', 92 | help='Filter vehicle model (default: "vehicle.*")') 93 | argparser.add_argument( 94 | '--generationv', 95 | metavar='G', 96 | default='All', 97 | help='restrict to certain vehicle generation (values: "1","2","All" - default: "All")') 98 | argparser.add_argument( 99 | '--filterw', 100 | metavar='PATTERN', 101 | default='walker.pedestrian.*', 102 | help='Filter pedestrian type (default: "walker.pedestrian.*")') 103 | argparser.add_argument( 104 | '--generationw', 105 | metavar='G', 106 | default='2', 107 | help='restrict to certain pedestrian generation (values: "1","2","All" - default: "2")') 108 | argparser.add_argument( 109 | '--tm-port', 110 | metavar='P', 111 | default=8000, 112 | type=int, 113 | help='Port to communicate with TM (default: 8000)') 114 | argparser.add_argument( 115 | '--asynch', 116 | action='store_true', 117 | help='Activate asynchronous mode execution') 118 | argparser.add_argument( 119 | '--hybrid', 120 | action='store_true', 121 | help='Activate hybrid mode for Traffic Manager') 122 | argparser.add_argument( 123 | '-s', '--seed', 124 | metavar='S', 125 | type=int, 126 | help='Set random device seed and deterministic mode for Traffic Manager') 127 | argparser.add_argument( 128 | '--seedw', 129 | metavar='S', 130 | default=0, 131 | type=int, 132 | help='Set the seed for pedestrians module') 133 | argparser.add_argument( 134 | '--car-lights-on', 135 | action='store_true', 136 | default=False, 137 | help='Enable automatic car light management') 138 | argparser.add_argument( 139 | '--hero', 140 | action='store_true', 141 | default=False, 142 | help='Set one of the vehicles as hero') 143 | argparser.add_argument( 144 | '--respawn', 145 | action='store_true', 146 | default=False, 147 | help='Automatically respawn dormant vehicles (only in large maps)') 148 | argparser.add_argument( 149 | '--no-rendering', 150 | action='store_true', 151 | default=False, 152 | help='Activate no rendering mode') 153 | 154 | args = argparser.parse_args() 155 | 156 | logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) 157 | 158 | vehicles_list = [] 159 | walkers_list = [] 160 | all_id = [] 161 | client = carla.Client(args.host, args.port) 162 | client.set_timeout(10.0) 163 | synchronous_master = False 164 | random.seed(args.seed if args.seed is not None else int(time.time())) 165 | 166 | try: 167 | world = client.get_world() 168 | 169 | traffic_manager = client.get_trafficmanager(args.tm_port) 170 | traffic_manager.set_global_distance_to_leading_vehicle(2.5) 171 | if args.respawn: 172 | traffic_manager.set_respawn_dormant_vehicles(True) 173 | if args.hybrid: 174 | traffic_manager.set_hybrid_physics_mode(True) 175 | traffic_manager.set_hybrid_physics_radius(70.0) 176 | if args.seed is not None: 177 | traffic_manager.set_random_device_seed(args.seed) 178 | 179 | settings = world.get_settings() 180 | if not args.asynch: 181 | traffic_manager.set_synchronous_mode(True) 182 | if not settings.synchronous_mode: 183 | synchronous_master = True 184 | settings.synchronous_mode = True 185 | settings.fixed_delta_seconds = 0.05 186 | else: 187 | synchronous_master = False 188 | else: 189 | print("You are currently in asynchronous mode. If this is a traffic simulation, \ 190 | you could experience some issues. If it's not working correctly, switch to synchronous \ 191 | mode by using traffic_manager.set_synchronous_mode(True)") 192 | 193 | if args.no_rendering: 194 | settings.no_rendering_mode = True 195 | world.apply_settings(settings) 196 | 197 | blueprints = get_actor_blueprints(world, args.filterv, args.generationv) 198 | blueprintsWalkers = get_actor_blueprints(world, args.filterw, args.generationw) 199 | 200 | if args.safe: 201 | blueprints = [x for x in blueprints if x.get_attribute('base_type') == 'car'] 202 | 203 | blueprints = sorted(blueprints, key=lambda bp: bp.id) 204 | 205 | spawn_points = world.get_map().get_spawn_points() 206 | number_of_spawn_points = len(spawn_points) 207 | 208 | if args.number_of_vehicles < number_of_spawn_points: 209 | random.shuffle(spawn_points) 210 | elif args.number_of_vehicles > number_of_spawn_points: 211 | msg = 'requested %d vehicles, but could only find %d spawn points' 212 | logging.warning(msg, args.number_of_vehicles, number_of_spawn_points) 213 | args.number_of_vehicles = number_of_spawn_points 214 | 215 | # @todo cannot import these directly. 216 | SpawnActor = carla.command.SpawnActor 217 | SetAutopilot = carla.command.SetAutopilot 218 | FutureActor = carla.command.FutureActor 219 | 220 | 221 | def draw_waypoints(waypoints, road_id=None, life_time=10.0): 222 | for waypoint in waypoints: 223 | 224 | if (waypoint.road_id == road_id): 225 | world.debug.draw_string(waypoint.transform.location, 'O', draw_shadow=False, 226 | color=carla.Color(r=0, g=255, b=255), life_time=life_time, 227 | persistent_lines=True) 228 | 229 | world = client.get_world() 230 | # 以距离为1的间距创建waypoints 231 | waypoints = world.get_map().generate_waypoints(distance=1.0) 232 | # life_time 为画出的辅助标志存活时间 233 | draw_waypoints(waypoints, road_id=5) 234 | draw_waypoints(waypoints, road_id=3) 235 | draw_waypoints(waypoints, road_id=25) 236 | # draw_waypoints(waypoints, road_id=19) 237 | # draw_waypoints(waypoints, road_id=20) 238 | 239 | # -------------- 240 | # Spawn vehicles 241 | # -------------- 242 | batch = [] 243 | hero = args.hero 244 | for n, transform in enumerate(spawn_points): 245 | if n >= args.number_of_vehicles: 246 | break 247 | blueprint = random.choice(blueprints) 248 | if blueprint.has_attribute('color'): 249 | color = random.choice(blueprint.get_attribute('color').recommended_values) 250 | blueprint.set_attribute('color', color) 251 | if blueprint.has_attribute('driver_id'): 252 | driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values) 253 | blueprint.set_attribute('driver_id', driver_id) 254 | if hero: 255 | blueprint.set_attribute('role_name', 'hero') 256 | hero = False 257 | else: 258 | blueprint.set_attribute('role_name', 'autopilot') 259 | 260 | # spawn the cars and set their autopilot and light state all together 261 | batch.append(SpawnActor(blueprint, transform) 262 | .then(SetAutopilot(FutureActor, True, traffic_manager.get_port()))) 263 | 264 | for response in client.apply_batch_sync(batch, synchronous_master): 265 | if response.error: 266 | logging.error(response.error) 267 | else: 268 | vehicles_list.append(response.actor_id) 269 | 270 | # Set automatic vehicle lights update if specified 271 | if args.car_lights_on: 272 | all_vehicle_actors = world.get_actors(vehicles_list) 273 | for actor in all_vehicle_actors: 274 | traffic_manager.update_vehicle_lights(actor, True) 275 | 276 | # ------------- 277 | # Spawn Walkers 278 | # ------------- 279 | # some settings 280 | percentagePedestriansRunning = 0.0 # how many pedestrians will run 281 | percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road 282 | if args.seedw: 283 | world.set_pedestrians_seed(args.seedw) 284 | random.seed(args.seedw) 285 | # 1. take all the random locations to spawn 286 | spawn_points = [] 287 | for i in range(args.number_of_walkers): 288 | spawn_point = carla.Transform() 289 | loc = world.get_random_location_from_navigation() 290 | if (loc != None): 291 | spawn_point.location = loc 292 | spawn_points.append(spawn_point) 293 | # 2. we spawn the walker object 294 | batch = [] 295 | walker_speed = [] 296 | for spawn_point in spawn_points: 297 | walker_bp = random.choice(blueprintsWalkers) 298 | # set as not invincible 299 | if walker_bp.has_attribute('is_invincible'): 300 | walker_bp.set_attribute('is_invincible', 'false') 301 | # set the max speed 302 | if walker_bp.has_attribute('speed'): 303 | if (random.random() > percentagePedestriansRunning): 304 | # walking 305 | walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1]) 306 | else: 307 | # running 308 | walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2]) 309 | else: 310 | print("Walker has no speed") 311 | walker_speed.append(0.0) 312 | batch.append(SpawnActor(walker_bp, spawn_point)) 313 | results = client.apply_batch_sync(batch, True) 314 | walker_speed2 = [] 315 | for i in range(len(results)): 316 | if results[i].error: 317 | logging.error(results[i].error) 318 | else: 319 | walkers_list.append({"id": results[i].actor_id}) 320 | walker_speed2.append(walker_speed[i]) 321 | walker_speed = walker_speed2 322 | # 3. we spawn the walker controller 323 | batch = [] 324 | walker_controller_bp = world.get_blueprint_library().find('controller.ai.walker') 325 | for i in range(len(walkers_list)): 326 | batch.append(SpawnActor(walker_controller_bp, carla.Transform(), walkers_list[i]["id"])) 327 | results = client.apply_batch_sync(batch, True) 328 | for i in range(len(results)): 329 | if results[i].error: 330 | logging.error(results[i].error) 331 | else: 332 | walkers_list[i]["con"] = results[i].actor_id 333 | # 4. we put together the walkers and controllers id to get the objects from their id 334 | for i in range(len(walkers_list)): 335 | all_id.append(walkers_list[i]["con"]) 336 | all_id.append(walkers_list[i]["id"]) 337 | all_actors = world.get_actors(all_id) 338 | 339 | # wait for a tick to ensure client receives the last transform of the walkers we have just created 340 | if args.asynch or not synchronous_master: 341 | world.wait_for_tick() 342 | else: 343 | world.tick() 344 | 345 | # 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...]) 346 | # set how many pedestrians can cross the road 347 | world.set_pedestrians_cross_factor(percentagePedestriansCrossing) 348 | for i in range(0, len(all_id), 2): 349 | # start walker 350 | all_actors[i].start() 351 | # set walk to random point 352 | all_actors[i].go_to_location(world.get_random_location_from_navigation()) 353 | # max speed 354 | all_actors[i].set_max_speed(float(walker_speed[int(i/2)])) 355 | 356 | print('spawned %d vehicles and %d walkers, press Ctrl+C to exit.' % (len(vehicles_list), len(walkers_list))) 357 | 358 | # Example of how to use Traffic Manager parameters 359 | traffic_manager.global_percentage_speed_difference(30.0) 360 | 361 | while True: 362 | if not args.asynch and synchronous_master: 363 | world.tick() 364 | else: 365 | world.wait_for_tick() 366 | 367 | finally: 368 | 369 | if not args.asynch and synchronous_master: 370 | settings = world.get_settings() 371 | settings.synchronous_mode = False 372 | settings.no_rendering_mode = False 373 | settings.fixed_delta_seconds = None 374 | world.apply_settings(settings) 375 | 376 | print('\ndestroying %d vehicles' % len(vehicles_list)) 377 | client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_list]) 378 | 379 | # stop walker controllers (list is [controller, actor, controller, actor ...]) 380 | for i in range(0, len(all_id), 2): 381 | all_actors[i].stop() 382 | 383 | print('\ndestroying %d walkers' % len(walkers_list)) 384 | client.apply_batch([carla.command.DestroyActor(x) for x in all_id]) 385 | 386 | time.sleep(0.5) 387 | 388 | if __name__ == '__main__': 389 | 390 | try: 391 | main() 392 | except KeyboardInterrupt: 393 | pass 394 | finally: 395 | print('\ndone.') 396 | --------------------------------------------------------------------------------