├── helper ├── CARLA_PATH.txt ├── list_procs.py ├── CollisionManager.py ├── CarlaDebug.py ├── CarlaHelper.py └── CameraManager.py ├── docs └── thumbnail_CARLA_RAY.jpg ├── core ├── CarlaCore1.py ├── CarlaCore2.py └── BaseCarlaCore.py ├── algorithms ├── apex_vision.py ├── a3c_vision.py └── ppo_custom.py ├── readme.md ├── requirements.yml ├── carla_env.py ├── experiments ├── experiment3.py ├── experiment1.py ├── experiment2.py └── base_experiment.py ├── test_code ├── synchronous_mode.py └── manual_control.py └── LICENSE /helper/CARLA_PATH.txt: -------------------------------------------------------------------------------- 1 | /home/salstouhi/Downloads/CARLA_0.9.6 2 | -------------------------------------------------------------------------------- /docs/thumbnail_CARLA_RAY.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/layssi/Carla_Ray_Rlib/HEAD/docs/thumbnail_CARLA_RAY.jpg -------------------------------------------------------------------------------- /core/CarlaCore1.py: -------------------------------------------------------------------------------- 1 | from .BaseCarlaCore import BaseCarlaCore, CORE_CONFIG 2 | 3 | class CarlaCore(BaseCarlaCore): 4 | pass 5 | -------------------------------------------------------------------------------- /core/CarlaCore2.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a core that is used for the files under "Test_Code" folder. It just starts the server. 3 | """ 4 | 5 | from .BaseCarlaCore import BaseCarlaCore, CORE_CONFIG 6 | 7 | class CarlaCore(BaseCarlaCore): 8 | def __init__(self, environment_config, experiment_config, core_config=None): 9 | """ 10 | Initialize the server, clients, hero and sensors 11 | :param environment_config: Environment Configuration 12 | :param experiment_config: Experiment Configuration 13 | """ 14 | self.core_config = core_config 15 | self.environment_config = environment_config 16 | self.experiment_config = experiment_config 17 | super().init_server() -------------------------------------------------------------------------------- /algorithms/apex_vision.py: -------------------------------------------------------------------------------- 1 | """Apex Algorithm. Not Tested yet with Carla 2 | You can visualize experiment results in ~/ray_results using TensorBoard. 3 | """ 4 | 5 | from __future__ import absolute_import 6 | from __future__ import division 7 | from __future__ import print_function 8 | 9 | 10 | import ray 11 | from ray import tune 12 | from carla_env import CarlaEnv 13 | from ray.rllib.models import FullyConnectedNetwork, Model, ModelCatalog 14 | from ray.tune import grid_search, run_experiments 15 | from helper.CarlaHelper import kill_server 16 | 17 | 18 | ENV_CONFIG = {"RAY": True, "DEBUG_MODE": False} # Are we running an experiment in Ray 19 | 20 | env_config = ENV_CONFIG.copy() 21 | env_config.update( 22 | { 23 | "RAY": True, # Are we running an experiment in Ray 24 | "DEBUG_MODE": False, 25 | "Experiment": "experiment2", 26 | 27 | } 28 | ) 29 | 30 | 31 | if __name__ == "__main__": 32 | print("THIS EXPERIMENT HAS NOT BEEN FULLY TESTED") 33 | kill_server() 34 | ray.init() 35 | run_experiments({ 36 | "apex-vision": { 37 | "run": "APEX", 38 | "env": CarlaEnv, 39 | "stop":{"episodes_total":30000000},#"training_iteration":5000000}, 40 | "checkpoint_at_end":True, 41 | "checkpoint_freq":100, 42 | "config": { 43 | "env_config": env_config, 44 | "num_gpus_per_worker": 0, 45 | "num_cpus_per_worker":2, 46 | "buffer_size":20000, 47 | "num_workers": 2, 48 | }, 49 | }, 50 | }, 51 | resume= False, 52 | ) -------------------------------------------------------------------------------- /algorithms/a3c_vision.py: -------------------------------------------------------------------------------- 1 | """A3C Algorithm. Tested yet with Carla. 2 | You can visualize experiment results in ~/ray_results using TensorBoard. 3 | """ 4 | 5 | from __future__ import absolute_import 6 | from __future__ import division 7 | from __future__ import print_function 8 | 9 | 10 | import ray 11 | from ray import tune 12 | from carla_env import CarlaEnv 13 | from ray.rllib.models import FullyConnectedNetwork, Model, ModelCatalog 14 | from ray.tune import grid_search, run_experiments 15 | from helper.CarlaHelper import kill_server 16 | 17 | ENV_CONFIG = {"RAY": True, # Are we running an experiment in Ray 18 | "DEBUG_MODE": False, 19 | "Experiment": "experiment3", 20 | } 21 | 22 | env_config = ENV_CONFIG.copy() 23 | env_config.update( 24 | { 25 | "RAY": True, # Are we running an experiment in Ray 26 | "DEBUG_MODE": False, 27 | } 28 | ) 29 | 30 | 31 | if __name__ == "__main__": 32 | kill_server() 33 | ray.init() 34 | run_experiments({ 35 | "carla-a3c": { 36 | "run": "A3C", 37 | "env": CarlaEnv, 38 | "stop": {"episodes_total":30000000}, #"training_iteration":5000000}, 39 | "checkpoint_at_end": True, 40 | "checkpoint_freq": 1, 41 | "config": { 42 | "env_config": env_config, 43 | "num_gpus_per_worker": 0.25, 44 | "num_cpus_per_worker": 2, 45 | "num_workers": 3, 46 | "gamma": 0.99, # random.choice([0.5, 0.8, 0.9, 0.95, 0.99]), 47 | 48 | }, 49 | }, 50 | }, 51 | resume= False, 52 | ) 53 | -------------------------------------------------------------------------------- /helper/list_procs.py: -------------------------------------------------------------------------------- 1 | # """ 2 | # Usage: 3 | # 4 | # >>> for pid, name in search_procs_by_name("python").items(): 5 | # ... print(pid, name) 6 | # ... 7 | # 11882 python3.6 8 | # 47599 python3.6 9 | # 51877 python3.6 10 | # 51924 python3.6 11 | # 12 | # You can use regular expressions as well: 13 | # 14 | # >>> for pid, name in search_procs_by_name("p.t..n").items(): 15 | # ... print(pid, name) 16 | # ... 17 | # 11882 python3.6 18 | # 47599 python3.6 19 | # 51877 python3.6 20 | # 51924 python3.6 21 | # 22 | # You can search by command line arguments: 23 | # 24 | # >>> for pid, cmdline in search_procs_by_cmdline("tensorboard").items(): 25 | # ... print(pid, cmdline) 26 | # ... 27 | # 51924 ['/Users/blabla/.virtualenvs/tf2/bin/python3.6', '/Users/blabla/.virtualenvs/tf2/bin/tensorboard', '--logdir=./my_logs', '--port=6006'] 28 | # """ 29 | 30 | import psutil 31 | 32 | 33 | def proc_names(): 34 | return dict([(proc.pid, proc.name()) for proc in psutil.process_iter()]) 35 | 36 | 37 | def proc_cmdlines(): 38 | cmdlines = {} 39 | for proc in psutil.process_iter(): 40 | try: 41 | cmdlines[proc.pid] = proc.cmdline() 42 | except psutil.AccessDenied: 43 | cmdlines[proc.pid] = None 44 | return cmdlines 45 | 46 | 47 | def to_regex(regex): 48 | if not hasattr(regex, "search"): 49 | import re 50 | 51 | regex = re.compile(regex) 52 | return regex 53 | 54 | 55 | def search_procs_by_name(regex): 56 | pid_names = {} 57 | regex = to_regex(regex) 58 | for pid, name in proc_names().items(): 59 | if regex.search(name): 60 | pid_names[pid] = name 61 | return pid_names 62 | 63 | 64 | def search_procs_by_cmdline(regex): 65 | pid_cmdlines = {} 66 | regex = to_regex(regex) 67 | for pid, cmdline in proc_cmdlines().items(): 68 | if cmdline is not None: 69 | for part in cmdline: 70 | if regex.search(part): 71 | pid_cmdlines[pid] = cmdline 72 | break 73 | return pid_cmdlines 74 | -------------------------------------------------------------------------------- /algorithms/ppo_custom.py: -------------------------------------------------------------------------------- 1 | """Example of a custom model with CARLA. This currently works with experiment one (1 Dimensional Observation) 2 | 3 | This example shows: 4 | - using a custom environment 5 | - using Tune for grid search 6 | You can visualize experiment results in ~/ray_results using TensorBoard. 7 | 8 | THIS EXPERIMENT HAS NOT BEEN FULLY TESTED 9 | 10 | """ 11 | from __future__ import absolute_import 12 | from __future__ import division 13 | from __future__ import print_function 14 | 15 | import ray 16 | from ray import tune 17 | from carla_env import CarlaEnv 18 | from ray.rllib.models import FullyConnectedNetwork, Model, ModelCatalog 19 | from ray.tune import run_experiments, grid_search 20 | from helper.CarlaHelper import kill_server 21 | 22 | 23 | 24 | ENV_CONFIG = {"RAY": True, "DEBUG_MODE": False} # Are we running an experiment in Ray 25 | 26 | env_config = ENV_CONFIG.copy() 27 | env_config.update( 28 | { 29 | "RAY": True, # Are we running an experiment in Ray 30 | "DEBUG_MODE": False, 31 | "Experiment": "experiment1", 32 | } 33 | ) 34 | 35 | 36 | 37 | 38 | 39 | class CustomModel(Model): 40 | 41 | """Example of a custom model. 42 | 43 | This model just delegates to the built-in fcnet. 44 | """ 45 | 46 | def _build_layers_v2(self, input_dict, num_outputs, options): 47 | print(input_dict) 48 | self.obs_in = input_dict["obs"] 49 | self.fcnet = FullyConnectedNetwork( 50 | input_dict, self.obs_space, self.action_space, num_outputs, options 51 | ) 52 | return self.fcnet.outputs, self.fcnet.last_layer 53 | 54 | 55 | if __name__ == "__main__": 56 | print("THIS EXPERIMENT HAS NOT BEEN FULLY TESTED") 57 | kill_server() 58 | ray.init() 59 | ModelCatalog.register_custom_model("my_model", CustomModel) 60 | tune.run( 61 | "PPO", 62 | stop={"timesteps_total": 1000000}, 63 | checkpoint_freq=1, 64 | config={ 65 | "env": CarlaEnv, # CarlaEnv,SimpleCorridor, # or "corridor" if registered above 66 | "model": {"custom_model": "my_model"}, 67 | "lr": grid_search([1e-2, 1e-4, 1e-6]), # try different lrs 68 | "num_workers": 4, # parallelism 69 | "num_gpus_per_worker": 0.2, 70 | "env_config": env_config, 71 | }, 72 | resume=False, 73 | ) 74 | -------------------------------------------------------------------------------- /helper/CollisionManager.py: -------------------------------------------------------------------------------- 1 | import math 2 | import weakref 3 | 4 | import carla 5 | 6 | import queue 7 | 8 | # from helper.CarlaDebug import get_actor_display_name 9 | 10 | 11 | class CollisionManager(object): 12 | def __init__(self, parent_actor, synchronous_mode=True): 13 | self.sensor = None 14 | self.intensity = False 15 | self._parent = parent_actor 16 | self.synchronous_mode = synchronous_mode 17 | self.world = self._parent.get_world() 18 | self.bp = self.world.get_blueprint_library().find("sensor.other.collision") 19 | self.world = self._parent.get_world() 20 | self.sensor = self.world.spawn_actor( 21 | self.bp, carla.Transform(), attach_to=self._parent 22 | ) 23 | # We need to pass the lambda a weak reference to self to avoid circular 24 | # reference. 25 | if not self.synchronous_mode: 26 | weak_self = weakref.ref(self) 27 | self.sensor.listen( 28 | lambda event: CollisionManager._on_collision(weak_self, event) 29 | ) 30 | else: 31 | self.collision_queue = None 32 | self.collision_queue = queue.Queue() 33 | self.sensor.listen(self.collision_queue.put) 34 | 35 | def read_collision_queue(self): 36 | weak_self = weakref.ref(self) 37 | if not self.synchronous_mode: 38 | return self.intensity 39 | else: 40 | try: 41 | CollisionManager._on_collision( 42 | weak_self, self.collision_queue.get(False) 43 | ) 44 | except: 45 | # print("We could not get collision sensor") 46 | # Ignore empty Que 47 | pass 48 | 49 | def destroy_sensor(self): 50 | if self.sensor is not None: 51 | self.sensor.destroy() 52 | self.sensor = None 53 | self.intensity = False 54 | 55 | def get_collision_data(self): 56 | if self.intensity is not False: 57 | return True 58 | else: 59 | return False 60 | 61 | @staticmethod 62 | def _on_collision(weak_self, event): 63 | self = weak_self() 64 | if not self: 65 | return 66 | # actor_type = get_actor_display_name(event.other_actor) 67 | impulse = event.normal_impulse 68 | self.intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2) 69 | -------------------------------------------------------------------------------- /helper/CarlaDebug.py: -------------------------------------------------------------------------------- 1 | def print_spawn_point(world): 2 | spawn_points = list(world.get_map().get_spawn_points()) 3 | for i in range(len(spawn_points)): 4 | print( 5 | "spawn_point:", 6 | i, 7 | "x:", 8 | spawn_points[i].location.x, 9 | "y:", 10 | spawn_points[i].location.y, 11 | "z:", 12 | spawn_points[i].location.z, 13 | "pitch:", 14 | spawn_points[i].rotation.pitch, 15 | "roll:", 16 | spawn_points[i].rotation.roll, 17 | "yaw:", 18 | spawn_points[i].rotation.yaw, 19 | ) 20 | 21 | 22 | def draw_spawn_points(world): 23 | spawn_points = list(world.get_map().get_spawn_points()) 24 | i = 0 25 | for spawn_point in spawn_points: 26 | world.debug.draw_point( 27 | spawn_point.location, size=0.1, life_time=1000.0, persistent_lines=True 28 | ) 29 | name = ( 30 | str(i) 31 | + " , " 32 | + str(round(spawn_point.location.x, 1)) 33 | + " , " 34 | + str(round(spawn_point.location.y, 1)) 35 | + " , " 36 | + str(round(spawn_point.location.z, 1)) 37 | ) 38 | world.debug.draw_string( 39 | spawn_point.location, name, life_time=100.0, persistent_lines=True 40 | ) 41 | i = i + 1 42 | 43 | 44 | def get_actor_status(actor): 45 | position = actor.get_transform() 46 | velocity = actor.get_velocity() 47 | control = actor.get_control() 48 | heading = "N" if abs(position.rotation.yaw) < 89.5 else "" 49 | heading += "S" if abs(position.rotation.yaw) > 90.5 else "" 50 | heading += "E" if 179.5 > position.rotation.yaw > 0.5 else "" 51 | heading += "W" if -0.5 > position.rotation.yaw > -179.5 else "" 52 | return position, velocity, control, heading 53 | 54 | 55 | def get_actor_display_name(actor, truncate=250): 56 | name = " ".join(actor.type_id.replace("_", ".").title().split(".")[1:]) 57 | return (name[: truncate - 1] + u"\u2026") if len(name) > truncate else name 58 | 59 | 60 | def print_blueprint_attributes(blueprint_library): 61 | for blueprint in blueprint_library: 62 | print(blueprint) 63 | for attribute in blueprint: 64 | print(" - %s" % attribute) 65 | 66 | 67 | def split_actors(actors): 68 | vehicles = [] 69 | traffic_lights = [] 70 | speed_limits = [] 71 | walkers = [] 72 | 73 | for actor in actors: 74 | if "vehicle" in actor.type_id: 75 | vehicles.append(actor) 76 | elif "traffic_light" in actor.type_id: 77 | traffic_lights.append(actor) 78 | elif "speed_limit" in actor.type_id: 79 | speed_limits.append(actor) 80 | elif "walker" in actor.type_id: 81 | walkers.append(actor) 82 | 83 | return vehicles, traffic_lights, speed_limits, walkers 84 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Carla-Ray-RLib 2 | Reinforcement Learning with Rlib and Carla 3 | 4 | ![Cloud Based Autonomous Driving RL](https://github.com/layssi/Carla_Ray_Rlib/blob/master/docs/thumbnail_CARLA_RAY.jpg "Cloud Based Autonomous Driving RL") 5 | 6 | 7 | # Setup Carla 8 | ## Download Carla Binaries 9 | ### (Option 1) Release 0.9.6: 10 | wget http://carla-assets-internal.s3.amazonaws.com/Releases/Linux/CARLA_0.9.6.tar.gz 11 | **Optional:** wget http://carla-assets-internal.s3.amazonaws.com/Releases/Linux/Town06_0.9.6.tar.gz 12 | **Optional:** wget http://carla-assets-internal.s3.amazonaws.com/Releases/Linux/Town07_0.9.6.tar.gz 13 | 14 | ### (Option 2)Latest Build (Warning. Not Tested) 15 | http://carla-assets-internal.s3.amazonaws.com/Releases/Linux/Dev/CARLA_Latest.tar.gz 16 | 17 | ## Setup Carla 18 | ### To lower Carla GPU usage (This might be already set): 19 | Under your Carla/CARLAUE4/Config, edit *DefaultEngine.ini*: 20 | **r.TextureStreaming=True** 21 | 22 | ### (Optional) Mouse Unlock 23 | To prevent the freezing of the mouse (which is very annoying).Under your Carla/CARLAUE4/Config, edit *"DefaultInput.ini"*: 24 | **bCaptureMouseOnLaunch=False** 25 | **DefaultViewportMouseCaptureMode=CaptureDuringMouseDown** 26 | **bDefaultViewportMouseLock=False** 27 | **DefaultViewportMouseLockMode=DoNotLock** 28 | 29 | # Setup Environment 30 | ## Install Environment 31 | Download and install conda: https://www.anaconda.com/distribution/ 32 | conda env create --name [**enviroment-name**] -f=requirements.yml 33 | conda activate [**enviroment-name**] 34 | conda install -c anaconda tensorflow-gpu 35 | 36 | ## Update Carla path 37 | In "helper" folder, change the location of your Carla path in file "CARLA_PATH.txt". Example: ~/home/Carla_Simulator 38 | ## Pycharm setup 39 | Start a new pycharm project. 40 | Point the project to this directory. 41 | Choose an existing anaconda enviroment. Pick the enviroment created in [**enviroment-name**] 42 | It will look like "/anaconda/env/[**enviroment-name**]/bin/python 43 | Open the folder and run "carla_env.py". Also run "a3c_vision.py" 44 | 45 | # Project Organization 46 | Algorithms: Allow you to setup different RL algorithms and models 47 | Core: Has Base Carla Functionality and can be extended or modified 48 | experiments: Allows you to setup your experiment. Example, observations and rewards 49 | helper: Help functions that are used used by multiple modules 50 | Test_code: Couple Carla script to allow you to run and test the core 51 | 52 | # Cloud Setup 53 | There is an ami available on ec2 with everything setup and no display. 54 | Find "ami-070f500a304414585" and start the machine. 55 | Make sure your security setting have the inbound and outbound ports open. 56 | Update the code to the latest. 57 | Run "source ~/.bashrc" and run "python3 carla_env.py" or "python3 vision_algorithm.py" 58 | 59 | # ToDO 60 | 1. Setup the AMI to properly run RLIB on the cloud with autoscales 61 | 2. Verify experiment 1 and experiment 2 and verify apex_vision and ppo_custom 62 | 3. Add rollout to export and test inference 63 | -------------------------------------------------------------------------------- /helper/CarlaHelper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import time 4 | import sys 5 | import cv2 6 | import numpy as np 7 | from helper.list_procs import search_procs_by_name 8 | import signal 9 | 10 | import collections 11 | 12 | def update_config(d, u): 13 | for k, v in u.items(): 14 | if isinstance(v, collections.Mapping): 15 | d[k] = update_config(d.get(k, {}), v) 16 | else: 17 | d[k] = v 18 | return d 19 | 20 | def add_carla_path(carla_path_config_file): 21 | carla_text_path = (os.path.dirname(os.path.realpath(__file__)) + "/" + carla_path_config_file) 22 | carla_path_file = open(carla_text_path, "r") 23 | carla_main_path = (carla_path_file.readline().split("\n"))[0] 24 | carla_path_file.close() 25 | for file in os.listdir(carla_main_path + "/PythonAPI/carla/dist/"): 26 | if 'py3.5' in file: 27 | carla_egg_file = os.path.join(carla_main_path + "/PythonAPI/carla/dist/", file) 28 | sys.path.append(os.path.expanduser(carla_egg_file)) 29 | carla_python_interface = carla_main_path + "/PythonAPI/carla/" 30 | carla_server_binary = carla_main_path + "/CarlaUE4.sh" 31 | sys.path.append(os.path.expanduser(carla_python_interface)) 32 | print(carla_python_interface) 33 | return carla_server_binary 34 | 35 | 36 | def get_parent_dir(directory): 37 | return os.path.dirname(directory) 38 | 39 | 40 | def post_process_image(image, normalized=True, grayscale=True): 41 | """ 42 | Convert image to gray scale and normalize between -1 and 1 if required 43 | :param image: 44 | :param normalized: 45 | :param grayscale 46 | :return: normalized image 47 | """ 48 | if grayscale: 49 | image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) 50 | 51 | if normalized: 52 | return (image.astype(np.float32) - 128) / 128 53 | else: 54 | return image.astype(np.float32) 55 | 56 | 57 | def kill_server(): 58 | """ 59 | Kill all PIDs that start with Carla. Do this if you running a single server 60 | :return: 61 | """ 62 | for pid, name in search_procs_by_name("Carla").items(): 63 | os.kill(pid, signal.SIGKILL) 64 | 65 | def spawn_vehicle_at(transform, vehicle_blueprint, world, autopilot=True, max_time=0.1): 66 | """ 67 | Try to spawn a vehicle and give the vehicle time to be spawned and seen by the world before you say it is spawned 68 | 69 | :param transform: Location and Orientation of vehicle 70 | :param vehicle_blueprint: Vehicle Blueprint (We assign a random color) 71 | :param world: World 72 | :param autopilot: If True, AutoPilot is Enabled. If False, autopilot is disabled 73 | :param max_time: Maximum time in s to wait before you say that vehicle can not be spawned at current location 74 | :return: True if vehicle was added to world and False otherwise 75 | """ 76 | 77 | # If the vehicle can not be spawned, it is OK 78 | previous_number_of_vehicles = len(world.get_actors().filter("*vehicle*")) 79 | 80 | # Get a random color 81 | color = random.choice(vehicle_blueprint.get_attribute("color").recommended_values) 82 | vehicle_blueprint.set_attribute("color", color) 83 | 84 | vehicle = world.try_spawn_actor(vehicle_blueprint, transform) 85 | 86 | wait_tick = 0.002 # Wait of 2ms to recheck if a vehicle is spawned 87 | if vehicle is not None: 88 | vehicle.set_autopilot(autopilot) 89 | # vehicle.set_simulate_physics(not(world.get_settings().synchronous_mode)) #Disable physics in synchronous mode 90 | world.tick() # Tick the world so it creates the vehicle 91 | while previous_number_of_vehicles >= len( 92 | world.get_actors().filter("*vehicle*") 93 | ): 94 | time.sleep(wait_tick) # Wait 2ms and check again 95 | max_time = max_time - wait_tick 96 | if max_time <= 0: # Check for expiration time 97 | return False 98 | return vehicle 99 | return False 100 | 101 | 102 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | name: raytest2 2 | channels: 3 | - conda-forge 4 | - anaconda 5 | - defaults 6 | dependencies: 7 | - _libgcc_mutex=0.1=main 8 | - _tflow_select=2.1.0=gpu 9 | - absl-py=0.7.1=py36_0 10 | - appdirs=1.4.3=py_1 11 | - astor=0.7.1=py_0 12 | - astroid=2.2.5=py36_0 13 | - attrs=19.1.0=py_0 14 | - black=19.3b0=py_0 15 | - blas=1.0=mkl 16 | - bzip2=1.0.7=h7b6447c_0 17 | - c-ares=1.15.0=h14c3975_1001 18 | - ca-certificates=2019.5.15=0 19 | - cairo=1.14.12=h8948797_3 20 | - certifi=2019.6.16=py36_0 21 | - click=7.0=py_0 22 | - cudatoolkit=9.2=0 23 | - cudnn=7.6.0=cuda9.2_0 24 | - cupti=9.2.148=0 25 | - cycler=0.10.0=py36_0 26 | - dbus=1.13.6=h746ee38_0 27 | - decorator=4.4.0=py36_1 28 | - expat=2.2.6=he6710b0_0 29 | - ffmpeg=4.0=hcdf2ecd_0 30 | - fontconfig=2.13.0=h9420a91_0 31 | - freeglut=3.0.0=hf484d3e_5 32 | - freetype=2.9.1=h8a8886c_1 33 | - gast=0.2.2=py_0 34 | - glib=2.56.2=hd408876_0 35 | - graphite2=1.3.13=h23475e2_0 36 | - gst-plugins-base=1.14.0=hbbd80ab_1 37 | - gstreamer=1.14.0=hb453b48_1 38 | - h5py=2.8.0=py36h989c5e5_3 39 | - harfbuzz=1.8.8=hffaf4a1_0 40 | - hdf5=1.10.2=hba1933b_1 41 | - icu=58.2=h9c2bf20_1 42 | - intel-openmp=2019.4=243 43 | - isort=4.3.21=py36_0 44 | - jasper=2.0.14=h07fcdf6_1 45 | - jpeg=9b=h024ee3a_2 46 | - keras-applications=1.0.8=py_0 47 | - keras-preprocessing=1.1.0=py_1 48 | - kiwisolver=1.1.0=py36he6710b0_0 49 | - lazy-object-proxy=1.4.1=py36h7b6447c_0 50 | - libedit=3.1.20181209=hc058e9b_0 51 | - libffi=3.2.1=hd88cf55_4 52 | - libgcc-ng=9.1.0=hdf63c60_0 53 | - libgfortran-ng=7.3.0=hdf63c60_0 54 | - libglu=9.0.0=hf484d3e_1 55 | - libopencv=3.4.2=hb342d67_1 56 | - libopus=1.3=h7b6447c_0 57 | - libpng=1.6.37=hbc83047_0 58 | - libprotobuf=3.8.0=hd408876_0 59 | - libstdcxx-ng=9.1.0=hdf63c60_0 60 | - libtiff=4.0.10=h2733197_2 61 | - libuuid=1.0.3=h1bed415_2 62 | - libvpx=1.7.0=h439df22_0 63 | - libxcb=1.13=h1bed415_1 64 | - libxml2=2.9.9=hea5a465_1 65 | - lz4=2.1.6=py36ha8eefa0_1000 66 | - lz4-c=1.8.1.2=h14c3975_0 67 | - markdown=3.1.1=py_0 68 | - matplotlib=3.1.0=py36h5429711_0 69 | - mccabe=0.6.1=py36_1 70 | - mkl=2018.0.3=1 71 | - mkl_fft=1.0.10=py36_0 72 | - mkl_random=1.0.2=py36_0 73 | - mock=3.0.5=py36_0 74 | - ncurses=6.1=he6710b0_1 75 | - networkx=2.3=py_0 76 | - numpy=1.15.4=py36h1d66e8a_0 77 | - numpy-base=1.15.4=py36h81de0dd_0 78 | - opencv=3.4.2=py36h6fd60c2_1 79 | - openssl=1.1.1c=h7b6447c_1 80 | - pcre=8.43=he6710b0_0 81 | - pip=19.1.1=py36_0 82 | - pixman=0.38.0=h7b6447c_0 83 | - protobuf=3.8.0=py36he6710b0_0 84 | - py-opencv=3.4.2=py36hb342d67_1 85 | - pylint=2.3.1=py36_0 86 | - pyparsing=2.4.0=py_0 87 | - pyqt=5.9.2=py36h05f1152_2 88 | - python=3.6.8=h0371630_0 89 | - python-dateutil=2.8.0=py36_0 90 | - pytz=2019.1=py_0 91 | - qt=5.9.7=h5867ecd_1 92 | - readline=7.0=h7b6447c_5 93 | - setuptools=41.0.1=py36_0 94 | - sip=4.19.8=py36hf484d3e_0 95 | - six=1.12.0=py36_1000 96 | - sqlite=3.28.0=h7b6447c_0 97 | - tensorboard=1.13.1=py36hf484d3e_0 98 | - tensorflow=1.13.1=gpu_py36h9b25d83_0 99 | - tensorflow-base=1.13.1=gpu_py36h611c6d2_0 100 | - tensorflow-estimator=1.13.0=py_0 101 | - tensorflow-gpu=1.13.1=h0d30ee6_0 102 | - termcolor=1.1.0=py_2 103 | - tk=8.6.8=hbc83047_0 104 | - toml=0.10.0=py_0 105 | - tornado=6.0.3=py36h7b6447c_0 106 | - typed-ast=1.3.4=py36h7b6447c_0 107 | - werkzeug=0.15.4=py_0 108 | - wheel=0.33.4=py36_0 109 | - wrapt=1.11.2=py36h7b6447c_0 110 | - xz=5.2.4=h14c3975_4 111 | - zlib=1.2.11=h7b6447c_3 112 | - zstd=1.3.7=h0b5b093_0 113 | - pip: 114 | - atomicwrites==1.3.0 115 | - chardet==3.0.4 116 | - cloudpickle==1.2.1 117 | - colorama==0.4.1 118 | - docopt==0.6.2 119 | - filelock==3.0.12 120 | - flatbuffers==1.11 121 | - funcsigs==1.0.2 122 | - future==0.17.1 123 | - grpcio==1.22.0 124 | - gym==0.13.1 125 | - idna==2.8 126 | - importlib-metadata==0.18 127 | - more-itertools==7.1.0 128 | - packaging==19.0 129 | - pandas==0.24.2 130 | - pipreqs==0.4.9 131 | - pluggy==0.12.0 132 | - psutil==5.6.3 133 | - py==1.8.0 134 | - pygame==1.9.6 135 | - pyglet==1.3.2 136 | - pytest==5.0.1 137 | - pyyaml==5.1.1 138 | - ray==0.7.2 139 | - redis==3.2.1 140 | - requests==2.22.0 141 | - scipy==1.3.0 142 | - setproctitle==1.1.10 143 | - urllib3==1.25.3 144 | - wcwidth==0.1.7 145 | - yarg==0.1.9 146 | - zipp==0.5.2 147 | prefix: /home/salstouhi/anaconda/envs/raytest2 148 | 149 | -------------------------------------------------------------------------------- /carla_env.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a sample carla environment. It does basic functionality. 3 | """ 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | # Put import that you want "helper" modules to import 9 | import time 10 | import gym 11 | from gym.utils import seeding 12 | from helper.CarlaHelper import add_carla_path 13 | from random import randint 14 | 15 | ENV_CONFIG = { 16 | "RAY": True, # True if you are running an experiment in Ray 17 | "DEBUG_MODE": False, 18 | "CARLA_PATH_CONFIG_FILE": "CARLA_PATH.txt", # Do not modify for individual experiments 19 | "Experiment":"experiment1", 20 | } 21 | 22 | CARLA_SERVER_BINARY = add_carla_path(ENV_CONFIG["CARLA_PATH_CONFIG_FILE"]) 23 | ENV_CONFIG.update({"SERVER_BINARY": CARLA_SERVER_BINARY}) 24 | 25 | import carla 26 | 27 | #Choose your experiment and Core 28 | #from experiments.experiment3 import Experiment 29 | from core.CarlaCore1 import CarlaCore 30 | 31 | from helper.CarlaDebug import draw_spawn_points, get_actor_display_name, \ 32 | split_actors, get_actor_status, print_spawn_point 33 | from helper.CarlaHelper import kill_server 34 | 35 | 36 | class CarlaEnv(gym.Env): 37 | def __init__(self, config=None): 38 | if config is None: 39 | config = ENV_CONFIG 40 | self.environment_config = config 41 | carla_server_binary = add_carla_path(ENV_CONFIG["CARLA_PATH_CONFIG_FILE"]) 42 | self.environment_config.update({"SERVER_BINARY": carla_server_binary}) 43 | module = __import__("experiments.{}".format(self.environment_config["Experiment"] )) 44 | exec("self.experiment = module.{}.Experiment()".format(self.environment_config["Experiment"])) 45 | self.action_space = self.experiment.get_action_space() 46 | self.observation_space = self.experiment.get_observation_space() 47 | self.experiment_config = self.experiment.get_experiment_config() 48 | 49 | self.core = CarlaCore(self.environment_config, self.experiment_config) 50 | self.experiment.spawn_actors(self.core) 51 | self.experiment.initialize_reward(self.core) 52 | self.reset() 53 | 54 | def reset(self): 55 | self.core.reset_sensors(self.experiment_config) 56 | self.experiment.spawn_hero(self.core, self.experiment.start_location, autopilot=False) 57 | 58 | self.core.setup_sensors( 59 | self.experiment.experiment_config, 60 | self.experiment.get_hero(), 61 | self.core.get_core_world().get_settings().synchronous_mode, 62 | ) 63 | self.experiment.initialize_reward(self.core) 64 | self.experiment.set_server_view(self.core) 65 | self.experiment.experiment_tick(self.core, action=None) 66 | obs, info = self.experiment.get_observation(self.core) 67 | obs = self.experiment.process_observation(self.core, obs) 68 | return obs 69 | 70 | def step(self, action): 71 | # assert action in [0, 13], action 72 | self.experiment.experiment_tick(self.core, action) 73 | observation, info = self.experiment.get_observation(self.core) 74 | observation = self.experiment.process_observation(self.core, observation) 75 | reward = self.experiment.compute_reward(self.core,observation) 76 | done = self.experiment.get_done_status() 77 | return observation, reward, done, info 78 | 79 | def seed(self, seed=None): 80 | self.np_random, seed = seeding.np_random(seed) 81 | return [seed] 82 | 83 | 84 | if __name__ == "__main__": 85 | 86 | env = CarlaEnv() 87 | obs = env.reset() 88 | try: 89 | if env.environment_config["DEBUG_MODE"]: 90 | dir(carla) 91 | dir(carla.LaneInvasionEvent) 92 | dir(carla.LaneInvasionEvent.crossed_lane_markings) 93 | 94 | world = env.core.get_core_world() 95 | hero = env.experiment.hero 96 | 97 | draw_spawn_points(world) 98 | print_spawn_point(world) 99 | position, velocity, control, heading = get_actor_status(hero) 100 | print( 101 | "Position:", 102 | [position.location.x, position.location.y], 103 | " Velocity:", 104 | [velocity.x, velocity.y, velocity.z], 105 | " Heading:", 106 | heading, 107 | ) 108 | print(get_actor_display_name(hero)) 109 | # print_blueprint_attributes(world.get_blueprint_library()) 110 | 111 | Vehicles, Traffic_lights, Speed_limits, walkers = split_actors( 112 | world.get_actors() 113 | ) 114 | print("vehicles", Vehicles) 115 | env.core.get_nearby_vehicles(world, hero, max_distance=200) 116 | 117 | for _ in range(100): 118 | obs = env.reset() 119 | if env.environment_config["DEBUG_MODE"]: 120 | if env.experiment_config["OBSERVATION_CONFIG"]["CAMERA_OBSERVATION"]: 121 | env.core.record_camera(True) 122 | env.core.render_camera_lidar(True) 123 | 124 | done = False 125 | while done is False: 126 | t = time.time() 127 | observation, reward, done, info = env.step(1) #Forward 128 | #observation, reward, done, info = env.step(randint(0,(env.action_space.n-1))) #Random 129 | # print ("observation:",observation," Reward::{:0.2f}".format(reward * 1000)) 130 | elapsed = time.time() - t 131 | print("Elapsed (ms):{:0.2f}".format(elapsed * 1000)) 132 | 133 | except (KeyboardInterrupt, SystemExit): 134 | kill_server() 135 | -------------------------------------------------------------------------------- /experiments/experiment3.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module is how to setup a sample experiment. 3 | """ 4 | import numpy as np 5 | from gym.spaces import Box 6 | import cv2 7 | 8 | from experiments.base_experiment import * 9 | from helper.CarlaHelper import update_config 10 | import carla 11 | import matplotlib.pyplot as plt 12 | import random 13 | 14 | SERVER_VIEW_CONFIG = { 15 | } 16 | 17 | SENSOR_CONFIG = { 18 | "CAMERA_X": 84, 19 | "CAMERA_Y": 84, 20 | "CAMERA_NORMALIZED": True, 21 | "FRAMESTACK": 2, 22 | } 23 | 24 | 25 | OBSERVATION_CONFIG ={ 26 | "CAMERA_OBSERVATION": True, 27 | } 28 | 29 | EXPERIMENT_CONFIG = { 30 | "OBSERVATION_CONFIG": OBSERVATION_CONFIG, 31 | "Server_View": SERVER_VIEW_CONFIG, 32 | "SENSOR_CONFIG": SENSOR_CONFIG, 33 | "number_of_spawning_actors": 0, 34 | "server_map": "Town02", 35 | "start_pos_spawn_id": 95, 36 | "end_pos_spawn_id": 34, 37 | "Debug":False, 38 | } 39 | 40 | 41 | def preprocess_image(x_res,y_res,image): 42 | data = np.asarray(image) 43 | data = cv2.resize(data, (x_res, y_res), interpolation=cv2.INTER_AREA) 44 | data = (data.astype(np.float32) - 128) / 128 45 | return data 46 | 47 | 48 | def plot_observation_space(obs): 49 | #https://stackoverflow.com/questions/279561/what-is-the-python-equivalent-of-static-variables-inside-a-function 50 | if not hasattr(plot_observation_space, "counter"): 51 | plot_observation_space.counter = 0 # it doesn't exist yet, so initialize it 52 | plot_observation_space.counter += 1 53 | 54 | plt.close() 55 | data = obs[:, :, 0:3] 56 | plt.subplot(211) 57 | plt.imshow(((data * 128) + 128).astype("uint8")) 58 | data = obs[:, :, 3:6] 59 | plt.subplot(212) 60 | plt.imshow(((data * 128) + 128).astype("uint8")) 61 | 62 | 63 | 64 | class Experiment(BaseExperiment): 65 | def __init__(self): 66 | config=update_config(BASE_EXPERIMENT_CONFIG, EXPERIMENT_CONFIG) 67 | super().__init__(config) 68 | self.prev_image = None 69 | self.previous_distance = None 70 | self.start_location = None 71 | self.end_location = None 72 | 73 | 74 | def set_observation_space(self): 75 | num_of_channels = 3 76 | 77 | image_space = Box( 78 | low=-1.0, 79 | high=1.0, 80 | shape=( 81 | self.experiment_config["SENSOR_CONFIG"]["CAMERA_X"], 82 | self.experiment_config["SENSOR_CONFIG"]["CAMERA_Y"], 83 | num_of_channels * self.experiment_config["SENSOR_CONFIG"]["FRAMESTACK"], 84 | ), 85 | dtype=np.float32, 86 | ) 87 | self.observation_space = image_space 88 | 89 | def process_observation(self, core, observation): 90 | """ 91 | Process observations according to your experiment 92 | :param core: 93 | :param observation: 94 | :return: 95 | """ 96 | image = preprocess_image(self.experiment_config["SENSOR_CONFIG"]["CAMERA_X"], 97 | self.experiment_config["SENSOR_CONFIG"]["CAMERA_Y"], 98 | observation['camera']) 99 | 100 | assert self.experiment_config["SENSOR_CONFIG"]["FRAMESTACK"] in [1, 2] 101 | 102 | prev_image = self.prev_image 103 | self.prev_image = image 104 | 105 | if prev_image is None: 106 | prev_image = image 107 | if self.experiment_config["SENSOR_CONFIG"]["FRAMESTACK"] == 2: 108 | image = np.concatenate([prev_image, image], axis=2) 109 | 110 | if self.experiment_config["Debug"] and 0: 111 | plot_observation_space(image) 112 | return image 113 | 114 | def initialize_reward(self, core): 115 | """ 116 | Generic initialization of reward function 117 | :param core: 118 | :return: 119 | """ 120 | self.previous_distance=None 121 | 122 | 123 | def compute_reward(self, core, observation): 124 | """ 125 | Reward function 126 | :param core: 127 | :param observation: 128 | :return: 129 | """ 130 | current_distance_from_start = float(np.linalg.norm( 131 | [self.hero.get_location().x - self.start_location.location.x, 132 | self.hero.get_location().y - self.start_location.location.y]) / 100) 133 | 134 | if self.previous_distance is None: 135 | reward = 0 136 | else: 137 | reward = current_distance_from_start - self.previous_distance 138 | 139 | self.previous_distance = current_distance_from_start 140 | 141 | if (random.randint(1, 200) == 50): 142 | print("Current Reward is:", reward, "current distance", current_distance_from_start) 143 | 144 | if self.experiment_config["Debug"]: 145 | message = " Reward is {reward:.2f}" 146 | message = message.format( 147 | reward=reward, 148 | ) 149 | print(message) 150 | 151 | return reward 152 | 153 | def set_server_view(self, core): 154 | 155 | """ 156 | Apply server view to be in the sky between camera between start and end positions 157 | :param core: 158 | :return: 159 | """ 160 | # spectator pointing to the sky to reduce rendering impact 161 | 162 | server_view_x = ( 163 | self.experiment_config["Server_View"]["server_view_x_offset"] 164 | + (self.start_location.location.x + self.end_location.location.x) / 2 165 | ) 166 | server_view_y = ( 167 | self.experiment_config["Server_View"]["server_view_y_offset"] 168 | + (self.start_location.location.y + self.end_location.location.y) / 2 169 | ) 170 | server_view_z = self.experiment_config["Server_View"]["server_view_height"] 171 | server_view_pitch = self.experiment_config["Server_View"]["server_view_pitch"] 172 | 173 | world = core.get_core_world() 174 | self.spectator = world.get_spectator() 175 | self.spectator.set_transform( 176 | carla.Transform( 177 | carla.Location(x=server_view_x, y=server_view_y, z=server_view_z), 178 | carla.Rotation(pitch=server_view_pitch), 179 | ) 180 | ) -------------------------------------------------------------------------------- /experiments/experiment1.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module is how to setup a sample experiment. 3 | """ 4 | import numpy as np 5 | from gym.spaces import Box 6 | 7 | from experiments.base_experiment import * 8 | from helper.CarlaHelper import update_config 9 | import carla 10 | 11 | SERVER_VIEW_CONFIG = { 12 | } 13 | 14 | SENSOR_CONFIG = { 15 | "CAMERA_X": 1280, 16 | "CAMERA_Y": 720, 17 | } 18 | OBSERVATION_CONFIG ={ 19 | "CAMERA_OBSERVATION": False, 20 | } 21 | 22 | EXPERIMENT_CONFIG = { 23 | "OBSERVATION_CONFIG": OBSERVATION_CONFIG, 24 | "Server_View": SERVER_VIEW_CONFIG, 25 | "SENSOR_CONFIG": SENSOR_CONFIG, 26 | "number_of_spawning_actors": 0, 27 | "Debug": False, 28 | } 29 | 30 | 31 | def calculate_forward_speed(vehicle): 32 | # https://github.com/carla-simulator/carla/issues/355 33 | yaw_global = np.radians(vehicle.get_transform().rotation.yaw) 34 | 35 | rotation_global = np.array([ 36 | [np.sin(yaw_global), np.cos(yaw_global)], 37 | [np.cos(yaw_global), -np.sin(yaw_global)] 38 | ]) 39 | 40 | velocity_global = vehicle.get_velocity() 41 | velocity_global = np.array([velocity_global.y, velocity_global.x]) 42 | velocity_local = rotation_global.T @ velocity_global 43 | return (velocity_local[0]) 44 | 45 | 46 | class Experiment(BaseExperiment): 47 | def __init__(self): 48 | config=update_config(BASE_EXPERIMENT_CONFIG, EXPERIMENT_CONFIG) 49 | super().__init__(config) 50 | 51 | 52 | def set_observation_space(self): 53 | """ 54 | Set observation space as location of vehicle im x,y starting at (0,0) and ending at (1,1) 55 | :return: 56 | """ 57 | self.observation_space = Box(low=np.array([0, 0,-1.0,0]), high=np.array([1.0, 1.0,1.0,1.0]), dtype=np.float32) 58 | 59 | def initialize_reward(self, core): 60 | """ 61 | Generic initialization of reward function 62 | :param core: 63 | :return: 64 | """ 65 | # Get vehicle start location so reward can be calculated as total distance traveled 66 | self.start_pos_normalized_x, self.start_pos_normalized_y = core.normalize_coordinates( 67 | self.start_location.location.x, 68 | self.start_location.location.y) 69 | self.previous_distance = None 70 | 71 | def compute_reward(self, core, observation): 72 | """ 73 | Reward function 74 | :return: 75 | :param core: 76 | :param observation: 77 | :return: 78 | """ 79 | # Get vehicle start location so reward can be calculated as total distance traveled 80 | normalized_x = observation[0] 81 | normalized_y = observation[1] 82 | 83 | distance_reward = float( 84 | np.linalg.norm( 85 | [ 86 | normalized_x - self.start_pos_normalized_x, 87 | normalized_y - self.start_pos_normalized_y, 88 | ]) 89 | ) 90 | 91 | if self.previous_distance is None: 92 | reward = 0 93 | else: 94 | reward = distance_reward - self.previous_distance 95 | 96 | self.previous_distance = distance_reward 97 | 98 | return reward 99 | 100 | def process_observation(self, core, observation): 101 | """ 102 | Post processing of raw CARLA observations 103 | :param core: Core Environment 104 | :param observation: CARLA observations 105 | :return: 106 | """ 107 | #self.set_server_view(core) 108 | 109 | x_pos, y_pos= core.normalize_coordinates(observation["location"].location.x, 110 | observation["location"].location.y) 111 | forward_velocity = np.clip(calculate_forward_speed(self.hero), 0, None) 112 | forward_velocity=np.clip(forward_velocity, 0, 50.0)/50 113 | heading = np.sin(observation['location'].rotation.yaw * np.pi / 180) 114 | 115 | post_observation = np.r_[x_pos, y_pos, heading, forward_velocity] 116 | if self.experiment_config["Debug"]: 117 | normalized_x = post_observation[0] 118 | normalized_y = post_observation[1] 119 | distance_reward = float( 120 | np.linalg.norm( 121 | [ 122 | normalized_x - self.start_pos_normalized_x, 123 | normalized_y - self.start_pos_normalized_y, 124 | ]) 125 | ) 126 | 127 | 128 | message = "Vehicle at ({pos_x:.2f}, {pos_y:.2f}), " 129 | message += "with speed {speed:.2f} km/h, and heading {heading:.2f} " 130 | message += " and reward is {reward:.2f}" 131 | 132 | message = message.format( 133 | pos_x = x_pos, 134 | pos_y = y_pos, 135 | speed = forward_velocity, 136 | heading = heading, 137 | reward = distance_reward, 138 | ) 139 | print(message) 140 | 141 | return post_observation 142 | 143 | 144 | 145 | def set_server_view(self, core): 146 | 147 | """ 148 | Apply server view to be in the sky between camera between start and end positions 149 | :param core: 150 | :return: 151 | """ 152 | # spectator pointing to the sky to reduce rendering impact 153 | 154 | server_view_x = ( 155 | self.experiment_config["Server_View"]["server_view_x_offset"] 156 | + (self.start_location.location.x + self.end_location.location.x) / 2 157 | ) 158 | server_view_y = ( 159 | self.experiment_config["Server_View"]["server_view_y_offset"] 160 | + (self.start_location.location.y + self.end_location.location.y) / 2 161 | ) 162 | server_view_z = self.experiment_config["Server_View"]["server_view_height"] 163 | server_view_pitch = self.experiment_config["Server_View"]["server_view_pitch"] 164 | 165 | world = core.get_core_world() 166 | self.spectator = world.get_spectator() 167 | self.spectator.set_transform( 168 | carla.Transform( 169 | carla.Location(x=server_view_x, y=server_view_y, z=server_view_z), 170 | carla.Rotation(pitch=server_view_pitch), 171 | ) 172 | ) 173 | -------------------------------------------------------------------------------- /test_code/synchronous_mode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import glob 10 | import os 11 | import sys 12 | from helper.CarlaHelper import add_carla_path 13 | import time 14 | 15 | 16 | ENV_CONFIG = { 17 | "RAY": True, # True if you are running an experiment in Ray 18 | "DEBUG_MODE": True, 19 | "CARLA_PATH_CONFIG_FILE": "CARLA_PATH.txt", # IN this file, put the path to your CARLA FOLDER 20 | } 21 | 22 | CARLA_SERVER_BINARY = add_carla_path(ENV_CONFIG["CARLA_PATH_CONFIG_FILE"]) 23 | ENV_CONFIG.update({"SERVER_BINARY": CARLA_SERVER_BINARY}) 24 | 25 | import carla 26 | 27 | from core.CarlaCore2 import CarlaCore 28 | from experiments.experiment1 import Experiment 29 | 30 | 31 | import random 32 | 33 | try: 34 | import pygame 35 | except ImportError: 36 | raise RuntimeError('cannot import pygame, make sure pygame package is installed') 37 | 38 | try: 39 | import numpy as np 40 | except ImportError: 41 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 42 | 43 | try: 44 | import queue 45 | except ImportError: 46 | import Queue as queue 47 | 48 | 49 | 50 | class CarlaSyncMode(object): 51 | """ 52 | Context manager to synchronize output from different sensors. Synchronous 53 | mode is enabled as long as we are inside this context 54 | 55 | with CarlaSyncMode(world, sensors) as sync_mode: 56 | while True: 57 | data = sync_mode.tick(timeout=1.0) 58 | 59 | """ 60 | 61 | def __init__(self, world, *sensors, **kwargs): 62 | self.world = world 63 | self.sensors = sensors 64 | self.frame = None 65 | self.delta_seconds = 1.0 / kwargs.get('fps', 20) 66 | self._queues = [] 67 | self._settings = None 68 | 69 | 70 | def __enter__(self): 71 | self._settings = self.world.get_settings() 72 | self.frame = self.world.apply_settings(carla.WorldSettings( 73 | no_rendering_mode=False, 74 | synchronous_mode=True, 75 | fixed_delta_seconds=self.delta_seconds)) 76 | 77 | def make_queue(register_event): 78 | q = queue.Queue() 79 | register_event(q.put) 80 | self._queues.append(q) 81 | 82 | make_queue(self.world.on_tick) 83 | for sensor in self.sensors: 84 | make_queue(sensor.listen) 85 | return self 86 | 87 | def tick(self, timeout): 88 | self.frame = self.world.tick() 89 | data = [self._retrieve_data(q, timeout) for q in self._queues] 90 | assert all(x.frame == self.frame for x in data) 91 | return data 92 | 93 | def __exit__(self, *args, **kwargs): 94 | self.world.apply_settings(self._settings) 95 | 96 | def _retrieve_data(self, sensor_queue, timeout): 97 | while True: 98 | data = sensor_queue.get(timeout=timeout) 99 | if data.frame == self.frame: 100 | return data 101 | 102 | 103 | def draw_image(surface, image, blend=False): 104 | array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) 105 | array = np.reshape(array, (image.height, image.width, 4)) 106 | array = array[:, :, :3] 107 | array = array[:, :, ::-1] 108 | image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 109 | if blend: 110 | image_surface.set_alpha(100) 111 | surface.blit(image_surface, (0, 0)) 112 | 113 | 114 | def get_font(): 115 | fonts = [x for x in pygame.font.get_fonts()] 116 | default_font = 'ubuntumono' 117 | font = default_font if default_font in fonts else fonts[0] 118 | font = pygame.font.match_font(font) 119 | return pygame.font.Font(font, 14) 120 | 121 | 122 | def should_quit(): 123 | for event in pygame.event.get(): 124 | if event.type == pygame.QUIT: 125 | return True 126 | elif event.type == pygame.KEYUP: 127 | if event.key == pygame.K_ESCAPE: 128 | return True 129 | return False 130 | 131 | 132 | def main(): 133 | actor_list = [] 134 | pygame.init() 135 | 136 | display = pygame.display.set_mode( 137 | (900, 1200), 138 | pygame.HWSURFACE | pygame.DOUBLEBUF) 139 | font = get_font() 140 | clock = pygame.time.Clock() 141 | 142 | env_config = ENV_CONFIG 143 | carla_server_binary = add_carla_path(ENV_CONFIG["CARLA_PATH_CONFIG_FILE"]) 144 | env_config.update({"SERVER_BINARY": carla_server_binary}) 145 | 146 | experiment = Experiment() 147 | action_space = experiment.get_action_space() 148 | observation_space = experiment.get_observation_space() 149 | exp_config = experiment.get_experiment_config() 150 | CarlaCore(environment_config=env_config, experiment_config=exp_config,core_config=None) 151 | time.sleep(10) 152 | 153 | client = carla.Client('localhost', 2000) 154 | client.set_timeout(2.0) 155 | print(client) 156 | print("server_version", client.get_server_version()) 157 | print("client_version", client.get_client_version()) 158 | 159 | world = client.get_world() 160 | 161 | try: 162 | m = world.get_map() 163 | start_pose = random.choice(m.get_spawn_points()) 164 | waypoint = m.get_waypoint(start_pose.location) 165 | 166 | blueprint_library = world.get_blueprint_library() 167 | 168 | vehicle = world.spawn_actor( 169 | random.choice(blueprint_library.filter('vehicle.*')), 170 | start_pose) 171 | actor_list.append(vehicle) 172 | vehicle.set_simulate_physics(False) 173 | 174 | camera_rgb = world.spawn_actor( 175 | blueprint_library.find('sensor.camera.rgb'), 176 | carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)), 177 | attach_to=vehicle) 178 | actor_list.append(camera_rgb) 179 | 180 | camera_semseg = world.spawn_actor( 181 | blueprint_library.find('sensor.camera.semantic_segmentation'), 182 | carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)), 183 | attach_to=vehicle) 184 | actor_list.append(camera_semseg) 185 | 186 | # Create a synchronous mode context. 187 | with CarlaSyncMode(world, camera_rgb, camera_semseg, fps=50) as sync_mode: 188 | while True: 189 | if should_quit(): 190 | return 191 | clock.tick() 192 | t = time.time() 193 | 194 | # Advance the simulation and wait for the data. 195 | snapshot, image_rgb, image_semseg = sync_mode.tick(timeout=2.0) 196 | 197 | # Choose the next waypoint and update the car location. 198 | waypoint = random.choice(waypoint.next(1.5)) 199 | vehicle.set_transform(waypoint.transform) 200 | 201 | image_semseg.convert(carla.ColorConverter.CityScapesPalette) 202 | fps = round(1.0 / snapshot.timestamp.delta_seconds) 203 | 204 | # Draw the display. 205 | draw_image(display, image_rgb) 206 | draw_image(display, image_semseg, blend=True) 207 | display.blit( 208 | font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)), 209 | (8, 10)) 210 | display.blit( 211 | font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)), 212 | (8, 28)) 213 | pygame.display.flip() 214 | elapsed = time.time() - t 215 | print("Elapsed (ms):{:0.2f}".format(elapsed * 1000)) 216 | 217 | finally: 218 | 219 | print('destroying actors.') 220 | for actor in actor_list: 221 | actor.destroy() 222 | 223 | pygame.quit() 224 | print('done.') 225 | 226 | 227 | if __name__ == '__main__': 228 | 229 | try: 230 | 231 | main() 232 | 233 | except KeyboardInterrupt: 234 | print('\nCancelled by user. Bye!') 235 | -------------------------------------------------------------------------------- /experiments/experiment2.py: -------------------------------------------------------------------------------- 1 | from experiments.base_experiment import * 2 | from helper.CarlaHelper import spawn_vehicle_at, post_process_image, update_config 3 | import random 4 | import numpy as np 5 | from gym.spaces import Box 6 | from itertools import cycle 7 | 8 | SERVER_VIEW_CONFIG = { 9 | } 10 | 11 | SENSOR_CONFIG = { 12 | "CAMERA_NORMALIZED": True, 13 | "FRAMESTACK": 4, 14 | } 15 | OBSERVATION_CONFIG ={ 16 | "CAMERA_OBSERVATION": True, 17 | } 18 | 19 | EXPERIMENT_CONFIG = { 20 | "OBSERVATION_CONFIG": OBSERVATION_CONFIG, 21 | "Server_View": SERVER_VIEW_CONFIG, 22 | "SENSOR_CONFIG": SENSOR_CONFIG, 23 | "number_of_spawning_actors": 10000, 24 | "hero_vehicle_model": "vehicle.mini.cooperst", 25 | } 26 | 27 | ENV_CONFIG = {"RAY": True, "DEBUG_MODE": False} # Are we running an experiment in Ray 28 | 29 | 30 | class Experiment(BaseExperiment): 31 | def __init__(self): 32 | config=update_config(BASE_EXPERIMENT_CONFIG, EXPERIMENT_CONFIG) 33 | super().__init__(config) 34 | 35 | 36 | self.max_actors = 30 37 | self.randomized_vehicle_spawn_point = None 38 | 39 | self.environment_config = ENV_CONFIG.copy() 40 | 41 | self.environment_config.update( 42 | { 43 | "RAY": True, # Are we running an experiment in Ray 44 | "DEBUG_MODE": False, 45 | "corridor_length": 5, 46 | } 47 | ) 48 | 49 | def initialize_reward(self, core): 50 | """ 51 | Generic initialization of reward function 52 | :param core: 53 | :return: 54 | """ 55 | self.previous_distance = 0 56 | self.base_x = 0 57 | self.base_y = 0 58 | 59 | self.frame_stack = 4 # can be 1,2,3,4 60 | self.prev_image_0 = None 61 | self.prev_image_1 = None 62 | self.prev_image_2 = None 63 | self.start_location = None 64 | 65 | 66 | def set_observation_space(self): 67 | num_of_channels = 1 68 | image_space = Box( 69 | low=-1.0, 70 | high=1.0, 71 | shape=( 72 | self.experiment_config["SENSOR_CONFIG"]["CAMERA_X"], 73 | self.experiment_config["SENSOR_CONFIG"]["CAMERA_Y"], 74 | num_of_channels * self.experiment_config["SENSOR_CONFIG"]["FRAMESTACK"], 75 | ), 76 | dtype=np.float32, 77 | ) 78 | self.observation_space = image_space 79 | 80 | def process_observation(self, core, observation): 81 | """ 82 | Process observations according to your experiment 83 | :param core: 84 | :param observation: 85 | :return: 86 | """ 87 | self.set_server_view(core) 88 | image = post_process_image(observation['camera'], 89 | normalized = self.experiment_config["SENSOR_CONFIG"]["CAMERA_NORMALIZED"], 90 | grayscale = self.experiment_config["SENSOR_CONFIG"]["CAMERA_GRAYSCALE"] 91 | ) 92 | image = image[:, :, np.newaxis] 93 | 94 | if self.prev_image_0 is None: # can be improved 95 | self.prev_image_0 = image 96 | self.prev_image_1 = self.prev_image_0 97 | self.prev_image_2 = self.prev_image_1 98 | #ToDO. Fix the images stack 99 | if self.frame_stack >= 2: 100 | images = np.concatenate([self.prev_image_0, image], axis=2) 101 | if self.frame_stack >= 3 and images is not None: 102 | images = np.concatenate([self.prev_image_1, images], axis=2) 103 | if self.frame_stack >= 4 and images is not None: 104 | images = np.concatenate([self.prev_image_2, images], axis=2) 105 | 106 | # uncomment to save the observations (Normalized must be False) 107 | ''' 108 | cv2.imwrite('./input_img0.jpg', image) 109 | cv2.imwrite('./input_img1.jpg', self.prev_image_0) 110 | cv2.imwrite('./input_img2.jpg', self.prev_image_1) 111 | cv2.imwrite('./input_img3.jpg', self.prev_image_2) 112 | ''' 113 | self.prev_image_2 = self.prev_image_1 114 | self.prev_image_1 = self.prev_image_0 115 | self.prev_image_0 = image 116 | 117 | return images 118 | 119 | 120 | def compute_reward(self, core, observation): 121 | """ 122 | Reward function 123 | :param observation: 124 | :param core: 125 | :return: 126 | """ 127 | c = float(np.sqrt(np.square(self.hero.get_location().x - self.base_x) + \ 128 | np.square(self.hero.get_location().y - self.base_y))) 129 | if c > self.previous_distance + 1e-2: 130 | reward = 1 131 | else: 132 | reward = 0 133 | 134 | # print("\n", self.previous_distance) 135 | self.previous_distance = c 136 | # print("\n", c) 137 | 138 | if c > 50: 139 | # print("\n", self.base_x, hero.get_location().x, 140 | # "\n", self.base_y, hero.get_location().y) 141 | self.base_x = self.hero.get_location().x 142 | self.base_y = self.hero.get_location().y 143 | print("Reached the milestone!") 144 | self.previous_distance = 0 145 | return reward 146 | 147 | def spawn_actors(self, core): 148 | # Get a list of all the vehicle blueprints 149 | world = core.get_core_world() 150 | vehicle_blueprints = world.get_blueprint_library().filter("vehicle.*") 151 | car_blueprints = [ 152 | x 153 | for x in vehicle_blueprints 154 | if int(x.get_attribute("number_of_wheels")) == 4 155 | ] 156 | # Get all spawn Points 157 | spawn_points = list(world.get_map().get_spawn_points()) 158 | 159 | # Now we are ready to spawn all the vehicles (except the hero) 160 | count = 0 # self.experiment_config["number_of_spawning_actors"] 161 | 162 | self.randomized_vehicle_spawn_point = spawn_points.copy() 163 | 164 | while count > 1: 165 | random.shuffle(self.randomized_vehicle_spawn_point, random.random) 166 | next_spawn_point = self.randomized_vehicle_spawn_point[count] 167 | 168 | # Try to spawn but if you can't, just move on 169 | next_vehicle = spawn_vehicle_at( 170 | next_spawn_point, 171 | random.choice(car_blueprints), 172 | world, 173 | autopilot=True, 174 | max_time=0.1, 175 | ) 176 | print(count) 177 | if next_vehicle is not False: 178 | self.spawn_point_list.append(next_spawn_point) 179 | self.vehicle_list.append(next_vehicle) 180 | count -= 1 181 | if len(self.vehicle_list) > self.max_actors: 182 | for v in self.vehicle_list: # do we need this? 183 | v.destroy() 184 | self.vehicle_list = [] 185 | self.spawn_point_list = [] 186 | 187 | # print(world.get_actors().filter("vehicle.*")) 188 | print('number of actors: ', len(self.vehicle_list)) 189 | 190 | # spawn hero 191 | # self.spawn_hero() 192 | # return self.hero 193 | 194 | def spawn_hero(self, core, transform, autopilot=False): 195 | world = core.get_core_world() 196 | 197 | self.hero_blueprints = world.get_blueprint_library().find(self.hero_model) 198 | self.hero_blueprints.set_attribute("role_name", "hero") 199 | 200 | random.shuffle(self.randomized_vehicle_spawn_point, random.random) 201 | next_spawn_point = self.randomized_vehicle_spawn_point[0] 202 | if next_spawn_point in self.spawn_point_list: 203 | random.shuffle(self.randomized_vehicle_spawn_point, random.random) 204 | next_spawn_point = self.randomized_vehicle_spawn_point[0] 205 | # spawn hero 206 | # Hamid what's the dif between spawn_vehicle_at and try_spawn_actor 207 | super().spawn_hero(core, next_spawn_point, autopilot=False) 208 | 209 | print("Hero spawned!") 210 | self.base_x = self.hero.get_location().x 211 | self.base_y = self.hero.get_location().y 212 | self.start_location = next_spawn_point -------------------------------------------------------------------------------- /helper/CameraManager.py: -------------------------------------------------------------------------------- 1 | import carla 2 | import weakref 3 | # noinspection PyUnresolvedReferences 4 | from carla import ColorConverter as cc 5 | import numpy as np 6 | import pygame 7 | 8 | import queue 9 | # ============================================================================== 10 | # -- CameraManager ------------------------------------------------------------- 11 | # ============================================================================== 12 | 13 | 14 | class CameraManager(object): 15 | def __init__(self, parent_actor, image_size_x, image_size_y, image_fov): 16 | self.sensor = None 17 | self.surface = None 18 | self._parent = parent_actor 19 | self.image_size_x = image_size_x 20 | self.image_size_y = image_size_y 21 | self.image_fov = image_fov 22 | self.dim = [self.image_size_x, self.image_size_y] 23 | self.image_calibration = None 24 | self.recording = False 25 | self.render = False 26 | self.display = None 27 | self.camera_data = None 28 | 29 | bound_y = 0.5 + self._parent.bounding_box.extent.y 30 | attachment = carla.AttachmentType 31 | self._camera_transforms = [ 32 | ( 33 | carla.Transform( 34 | carla.Location(x=-5.5, z=2.5), carla.Rotation(pitch=8.0) 35 | ), 36 | attachment.SpringArm, 37 | ), 38 | (carla.Transform(carla.Location(x=1.6, z=1.7)), attachment.Rigid), 39 | ( 40 | carla.Transform(carla.Location(x=5.5, y=1.5, z=1.5)), 41 | attachment.SpringArm, 42 | ), 43 | ( 44 | carla.Transform( 45 | carla.Location(x=-8.0, z=6.0), carla.Rotation(pitch=6.0) 46 | ), 47 | attachment.SpringArm, 48 | ), 49 | ( 50 | carla.Transform(carla.Location(x=-1, y=-bound_y, z=0.5)), 51 | attachment.Rigid, 52 | ), 53 | ] 54 | self.transform_index = 1 55 | self.sensors = [ 56 | ["sensor.camera.rgb", cc.Raw, "Camera RGB"], 57 | ["sensor.camera.depth", cc.Raw, "Camera Depth (Raw)"], 58 | ["sensor.camera.depth", cc.Depth, "Camera Depth (Gray Scale)"], 59 | [ 60 | "sensor.camera.depth", 61 | cc.LogarithmicDepth, 62 | "Camera Depth (Logarithmic Gray Scale)", 63 | ], 64 | [ 65 | "sensor.camera.semantic_segmentation", 66 | cc.Raw, 67 | "Camera Semantic Segmentation (Raw)", 68 | ], 69 | [ 70 | "sensor.camera.semantic_segmentation", 71 | cc.CityScapesPalette, 72 | "Camera Semantic Segmentation (CityScapes Palette)", 73 | ], 74 | ["sensor.lidar.ray_cast", None, "Lidar (Ray-Cast)"], 75 | ] 76 | world = self._parent.get_world() 77 | bp_library = world.get_blueprint_library() 78 | for item in self.sensors: 79 | bp = bp_library.find(item[0]) 80 | if item[0].startswith("sensor.camera"): 81 | bp.set_attribute("image_size_x", str(self.image_size_x)) 82 | bp.set_attribute("image_size_y", str(self.image_size_y)) 83 | bp.set_attribute("fov", str(self.image_fov)) 84 | 85 | calibration = np.identity(3) 86 | calibration[0, 2] = self.image_size_x / 2.0 87 | calibration[1, 2] = self.image_size_y / 2.0 88 | calibration[0, 0] = calibration[1, 1] = self.image_size_x / ( 89 | 2.0 * np.tan(self.image_fov * np.pi / 360.0) 90 | ) 91 | self.image_calibration = calibration 92 | 93 | elif item[0].startswith("sensor.lidar"): 94 | bp.set_attribute("range", "5000") 95 | item.append(bp) 96 | 97 | bp.set_attribute("sensor_tick", "0.00001") 98 | 99 | self.index = None 100 | 101 | def toggle_camera(self): 102 | self.transform_index = (self.transform_index + 1) % len(self._camera_transforms) 103 | self.set_sensor( 104 | self.index, force_respawn=True, synchronous_mode=True 105 | ) 106 | 107 | def destroy_sensor(self): 108 | if self.sensor is not None: 109 | self.sensor.destroy() 110 | self.surface = None 111 | self.sensor = None 112 | 113 | def set_sensor(self, index, force_respawn=False, synchronous_mode=True): 114 | self.synchronous_mode = synchronous_mode 115 | index = index % len(self.sensors) 116 | needs_respawn = ( 117 | True 118 | if self.index is None 119 | else ( 120 | force_respawn or (self.sensors[index][0] != self.sensors[self.index][0]) 121 | ) 122 | ) 123 | if needs_respawn: 124 | if self.sensor is not None: 125 | self.sensor.destroy() 126 | self.surface = None 127 | self.sensor = self._parent.get_world().spawn_actor( 128 | self.sensors[index][-1], 129 | self._camera_transforms[self.transform_index][0], 130 | attach_to=self._parent, 131 | attachment_type=self._camera_transforms[self.transform_index][1], 132 | ) 133 | if not self.synchronous_mode: 134 | # We need to pass the lambda a weak reference to self to avoid 135 | # circular reference. 136 | weak_self = weakref.ref(self) 137 | self.sensor.listen( 138 | lambda image: CameraManager._parse_image(weak_self, image) 139 | ) 140 | self.last_image = None 141 | else: 142 | # Make sync queue for sensor data. 143 | self.camera_queue = queue.Queue() 144 | self.sensor.listen(self.camera_queue.put) 145 | 146 | self.index = index 147 | 148 | def next_sensor(self): 149 | self.set_sensor(self.index + 1) 150 | 151 | def toggle_recording(self): 152 | self.recording = not self.recording 153 | 154 | def set_recording(self, record_state): 155 | self.recording = record_state 156 | 157 | def set_rendering(self, render_state): 158 | self.render = render_state 159 | 160 | def read_image_queue(self): 161 | weak_self = weakref.ref(self) 162 | if (not self.synchronous_mode) and (self.last_image is not None): 163 | CameraManager._parse_image(weak_self, self.last_image) 164 | if self.synchronous_mode: 165 | try: 166 | CameraManager._parse_image(weak_self, self.camera_queue.get(True)) 167 | except: 168 | print("We couldn't read Image") 169 | # Ignore empty Que 170 | pass 171 | 172 | def get_camera_data(self): 173 | 174 | if self.camera_data is None: 175 | return None 176 | 177 | return self.camera_data.astype(np.float32) 178 | 179 | @staticmethod 180 | def _parse_image(weak_self, image): 181 | self = weak_self() 182 | if not self: 183 | return 184 | self.last_image = image 185 | if self.sensors[self.index][0].startswith("sensor.lidar"): 186 | points = np.frombuffer(image.raw_data, dtype=np.dtype("f4")) 187 | points = np.reshape(points, (int(points.shape[0] / 3), 3)) 188 | lidar_data = np.array(points[:, :2]) 189 | lidar_data *= min(self.dim) / 100.0 190 | lidar_data += (0.5 * self.image_size_x, 0.5 * self.image_size_y) 191 | lidar_data = np.fabs(lidar_data) 192 | lidar_data = lidar_data.astype(np.int32) 193 | lidar_data = np.reshape(lidar_data, (-1, 2)) 194 | lidar_img_size = (self.image_size_x, self.image_size_y, 3) 195 | lidar_img = np.zeros(lidar_img_size, dtype=int) 196 | lidar_img[tuple(lidar_data.T)] = (255, 255, 255) 197 | if self.render: 198 | self.surface = pygame.surfarray.make_surface(lidar_img) 199 | if self.surface is not None: 200 | if self.display is None: 201 | self.display = pygame.display.set_mode( 202 | (self.image_size_x, self.image_size_y), 203 | pygame.HWSURFACE | pygame.DOUBLEBUF, 204 | ) 205 | # ToDO save the output of the Lidar Image instead of real time visualization 206 | self.display.blit(self.surface, (0, 0)) 207 | pygame.display.flip() 208 | else: 209 | image.convert(self.sensors[self.index][1]) 210 | array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) 211 | array = np.reshape(array, (image.height, image.width, 4)) 212 | array = array[:, :, :3] 213 | array = array[:, :, ::-1] 214 | self.camera_data = array 215 | if self.render: 216 | self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 217 | if self.surface is not None: 218 | if self.display is None: 219 | self.display = pygame.display.set_mode( 220 | (self.image_size_x, self.image_size_y), 221 | pygame.HWSURFACE | pygame.DOUBLEBUF, 222 | ) 223 | # ToDO save the output of the Lidar Image instead of real time visualization 224 | self.display.blit(self.surface, (0, 0)) 225 | pygame.display.flip() 226 | if self.recording: 227 | image.save_to_disk("_out/%08d" % image.frame_number) 228 | -------------------------------------------------------------------------------- /core/BaseCarlaCore.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | import random 4 | import signal 5 | import subprocess 6 | import time 7 | 8 | import carla 9 | import numpy as np 10 | 11 | from helper.CameraManager import CameraManager 12 | from helper.CarlaDebug import get_actor_display_name 13 | from helper.CollisionManager import CollisionManager 14 | from helper.list_procs import search_procs_by_name 15 | 16 | 17 | """ 18 | Generic Carla colors (Not being used but can be useful) 19 | """ 20 | RED = carla.Color(255, 0, 0) 21 | GREEN = carla.Color(0, 255, 0) 22 | BLUE = carla.Color(47, 210, 231) 23 | CYAN = carla.Color(0, 255, 255) 24 | YELLOW = carla.Color(255, 255, 0) 25 | ORANGE = carla.Color(255, 162, 0) 26 | WHITE = carla.Color(255, 255, 255) 27 | 28 | CORE_CONFIG = { 29 | "RAY_DELAY": 3, # Delay between 0 & RAY_DELAY before starting server so not all servers are launched simultaneously 30 | "RETRIES_ON_ERROR": 30, 31 | "timeout": 2.0, 32 | "host": "localhost", 33 | "map_buffer": 1.2, # To find the minimum and maximum coordinates of the map 34 | } 35 | 36 | 37 | class BaseCarlaCore: 38 | def __init__(self, environment_config, experiment_config, core_config=None): 39 | """ 40 | Initialize the server, clients, hero and sensors 41 | :param environment_config: Environment Configuration 42 | :param experiment_config: Experiment Configuration 43 | """ 44 | if core_config is None: 45 | core_config = CORE_CONFIG 46 | 47 | self.core_config = core_config 48 | self.environment_config = environment_config 49 | self.experiment_config = experiment_config 50 | 51 | self.init_server(self.core_config["RAY_DELAY"]) 52 | 53 | self.client, self.world, self.town_map, self.actors = self.__connect_client( 54 | self.core_config["host"], 55 | self.server_port, 56 | self.core_config["timeout"], 57 | self.core_config["RETRIES_ON_ERROR"], 58 | self.experiment_config["Disable_Rendering_Mode"], 59 | True, 60 | self.experiment_config["Weather"], 61 | self.experiment_config["server_map"] 62 | ) 63 | self.set_map_dimensions() 64 | self.camera_manager = None 65 | self.collision_manager = None 66 | 67 | # ============================================================================== 68 | # -- ServerSetup ----------------------------------------------------------- 69 | # ============================================================================== 70 | def init_server(self, ray_delay=0): 71 | """ 72 | Start a server on a random port 73 | :param ray_delay: Delay so not all servers start simultaneously causing race condition 74 | :return: 75 | """ 76 | if self.environment_config["RAY"] is False: 77 | try: 78 | # Kill all PIDs that start with Carla. Do this if you running a single server or before an experiment 79 | for pid, name in search_procs_by_name("Carla").items(): 80 | os.kill(pid, signal.SIGKILL) 81 | except: 82 | pass 83 | 84 | # Generate a random port to connect to. You need one port for each server-client 85 | if self.environment_config["DEBUG_MODE"]: 86 | self.server_port = 2000 87 | else: 88 | self.server_port = random.randint(10000, 60000) 89 | 90 | # Create a new server process and start the client. 91 | if self.environment_config["RAY"] is True: 92 | # Ray tends to start all processes simultaneously. This causes problems 93 | # => random delay to start individual servers 94 | delay_sleep = random.uniform(0, ray_delay) 95 | time.sleep(delay_sleep) 96 | 97 | if self.environment_config["DEBUG_MODE"] is True: 98 | # Big Screen for Debugging 99 | self.experiment_config["SENSOR_CONFIG"]["CAMERA_X"] = 900 100 | self.experiment_config["SENSOR_CONFIG"]["CAMERA_Y"] = 1200 101 | self.experiment_config["quality_level"] = "High" 102 | 103 | # Run the server process 104 | server_command = [ 105 | self.environment_config["SERVER_BINARY"], 106 | "-windowed", 107 | "-ResX={}".format(self.experiment_config["SENSOR_CONFIG"]["CAMERA_X"]), 108 | "-ResY={}".format(self.experiment_config["SENSOR_CONFIG"]["CAMERA_Y"]), 109 | "-carla-server", 110 | "-carla-port={}".format(self.server_port), 111 | "-fps={}".format(self.experiment_config["fps"]), # 112 | "-quality-level =", 113 | self.experiment_config["quality_level"], 114 | "--no-rendering", 115 | "-carla-server-timeout = 10000ms", 116 | ] 117 | if not self.experiment_config["Disable_Rendering_Mode"]: 118 | server_command.remove("--no-rendering") 119 | 120 | server_command_text = " ".join(map(str, server_command)) 121 | print(server_command_text) 122 | server_process = subprocess.Popen( 123 | server_command_text, 124 | shell=True, 125 | preexec_fn=os.setsid, 126 | stdout=open(os.devnull, "w"), 127 | ) 128 | 129 | # ============================================================================== 130 | # -- ClientSetup ----------------------------------------------------------- 131 | # ============================================================================== 132 | 133 | @staticmethod 134 | def __connect_client(host, port, timeout, num_retries, disable_rendering_mode, sync_mode, weather,map): 135 | """ 136 | Connect the client 137 | 138 | :param host: The host servers 139 | :param port: The server port to connect to 140 | :param timeout: The server takes time to get going, so wait a "timeout" and re-connect 141 | :param num_retries: Number of times to try before giving up 142 | :param disable_rendering_mode: True to disable rendering 143 | :param sync_mode: True for RL 144 | :param weather: The weather to start the world 145 | :param map: current map 146 | :return: 147 | """ 148 | for i in range(num_retries): 149 | try: 150 | carla_client = carla.Client(host, port) 151 | carla_client.set_timeout(timeout) 152 | carla_client.load_world(map) 153 | 154 | world = carla_client.get_world() 155 | 156 | settings = world.get_settings() 157 | settings.no_rendering_mode = disable_rendering_mode 158 | world.apply_settings(settings) 159 | 160 | settings = world.get_settings() 161 | settings.synchronous_mode = sync_mode 162 | settings.fixed_delta_seconds = 1/20 163 | 164 | world.apply_settings(settings) 165 | 166 | town_map = world.get_map() 167 | actors = world.get_actors() 168 | world.set_weather(weather) 169 | print("Server setup is complete") 170 | 171 | return carla_client, world, town_map, actors 172 | 173 | except Exception as e: 174 | print(" Waiting for server to be ready: {}, attempt {} of {}".format(e, i + 1, num_retries)) 175 | time.sleep(1) 176 | # if (i + 1) == num_retries: 177 | raise Exception("Trouble is brewing. Can not connect to server. Try increasing timeouts or num_retries") 178 | 179 | 180 | # ============================================================================== 181 | # -- MapDigestionsSetup ----------------------------------------------------------- 182 | # ============================================================================== 183 | 184 | def set_map_dimensions(self): 185 | """ 186 | From the spawn points, we get min and max and add some buffer so we can normalize the location of agents (0..1) 187 | This allows you to get the location of the vehicle between 0 and 1 188 | 189 | :input 190 | self.core_config["map_buffer"]. Because we use spawn points, we add a buffer as vehicle can drive off the road 191 | 192 | :output: 193 | self.coord_normalization["map_normalization"] = Using larger of (X,Y) axis to normalize x,y 194 | self.coord_normalization["map_min_x"] = minimum x coordinate 195 | self.coord_normalization["map_min_y"] = minimum y coordinate 196 | :return: None 197 | """ 198 | 199 | map_buffer = self.core_config["map_buffer"] 200 | spawn_points = list(self.world.get_map().get_spawn_points()) 201 | 202 | min_x = min_y = 1000000 203 | max_x = max_y = -1000000 204 | 205 | for spawn_point in spawn_points: 206 | min_x = min(min_x, spawn_point.location.x) 207 | max_x = max(max_x, spawn_point.location.x) 208 | 209 | min_y = min(min_y, spawn_point.location.y) 210 | max_y = max(max_y, spawn_point.location.y) 211 | 212 | center_x = (max_x+min_x)/2 213 | center_y = (max_y+min_y)/2 214 | 215 | x_buffer = (max_x - center_x) * map_buffer 216 | y_buffer = (max_y - center_y) * map_buffer 217 | 218 | min_x = center_x - x_buffer 219 | max_x = center_x + x_buffer 220 | 221 | min_y = center_y - y_buffer 222 | max_y = center_y + y_buffer 223 | 224 | self.coord_normalization = {"map_normalization": max(max_x - min_x, max_y - min_y), 225 | "map_min_x": min_x, 226 | "map_min_y": min_y} 227 | 228 | def normalize_coordinates(self, input_x, input_y): 229 | """ 230 | :param input_x: X location of your actor 231 | :param input_y: Y location of your actor 232 | :return: The normalized location of your actor 233 | """ 234 | output_x = (input_x - self.coord_normalization["map_min_x"]) / self.coord_normalization["map_normalization"] 235 | output_y = (input_y - self.coord_normalization["map_min_y"]) / self.coord_normalization["map_normalization"] 236 | 237 | # ToDO Possible bug (Clipped the observation and still didn't stop the observations from being under 238 | output_x = float(np.clip(output_x, 0, 1)) 239 | output_y = float(np.clip(output_y, 0, 1)) 240 | 241 | return output_x, output_y 242 | 243 | # ============================================================================== 244 | # -- SensorSetup ----------------------------------------------------------- 245 | # ============================================================================== 246 | 247 | def setup_sensors(self, experiment_config, hero, synchronous_mode=True): 248 | 249 | """ 250 | This function sets up hero vehicle sensors 251 | 252 | :param experiment_config: Sensor configuration for you sensors 253 | :param hero: Hero vehicle 254 | :param synchronous_mode: set to True for RL 255 | :return: 256 | """ 257 | 258 | if experiment_config["OBSERVATION_CONFIG"]["CAMERA_OBSERVATION"]: 259 | self.camera_manager = CameraManager( 260 | hero, 261 | experiment_config["SENSOR_CONFIG"]["CAMERA_X"], 262 | experiment_config["SENSOR_CONFIG"]["CAMERA_Y"], 263 | experiment_config["SENSOR_CONFIG"]["CAMERA_FOV"], 264 | ) 265 | sensor = experiment_config["SENSOR_CONFIG"]["SENSOR"].value 266 | self.camera_manager.set_sensor(sensor, synchronous_mode=synchronous_mode) 267 | transform_index = experiment_config["SENSOR_CONFIG"][ 268 | "SENSOR_TRANSFORM" 269 | ].value 270 | if experiment_config["OBSERVATION_CONFIG"]["COLLISION_OBSERVATION"]: 271 | self.collision_manager = CollisionManager( 272 | hero, synchronous_mode=synchronous_mode 273 | ) 274 | 275 | def reset_sensors(self, experiment_config): 276 | """ 277 | Destroys sensors that were setup in this class 278 | :param experiment_config: sensors configured in the experiment 279 | :return: 280 | """ 281 | if experiment_config["OBSERVATION_CONFIG"]["CAMERA_OBSERVATION"]: 282 | if self.camera_manager is not None: 283 | self.camera_manager.destroy_sensor() 284 | if experiment_config["OBSERVATION_CONFIG"]["COLLISION_OBSERVATION"]: 285 | if self.collision_manager is not None: 286 | self.collision_manager.destroy_sensor() 287 | 288 | # ============================================================================== 289 | # -- CameraSensor ----------------------------------------------------------- 290 | # ============================================================================== 291 | 292 | def record_camera(self, record_state): 293 | self.camera_manager.set_recording(record_state) 294 | 295 | def render_camera_lidar(self, render_state): 296 | self.camera_manager.set_rendering(render_state) 297 | 298 | def update_camera(self): 299 | self.camera_manager.read_image_queue() 300 | 301 | def get_camera_data(self): 302 | return self.camera_manager.get_camera_data() 303 | 304 | # ============================================================================== 305 | # -- CollisionSensor ----------------------------------------------------------- 306 | # ============================================================================== 307 | 308 | def update_collision(self): 309 | self.collision_manager.read_collision_queue() 310 | 311 | def get_collision_data(self): 312 | return self.collision_manager.get_collision_data() 313 | 314 | 315 | # ============================================================================== 316 | # -- OtherForNow ----------------------------------------------------------- 317 | # ============================================================================== 318 | 319 | def get_core_world(self): 320 | return self.world 321 | 322 | def get_nearby_vehicles(self, world, hero_actor, max_distance=200): 323 | vehicles = world.get_actors().filter("vehicle.*") 324 | surrounding_vehicles = [] 325 | surrounding_vehicle_actors = [] 326 | _info_text = [] 327 | if len(vehicles) > 1: 328 | _info_text += ["Nearby vehicles:"] 329 | for x in vehicles: 330 | if x.id != hero_actor: 331 | loc1 = hero_actor.get_location() 332 | loc2 = x.get_location() 333 | distance = math.sqrt( 334 | (loc1.x - loc2.x) ** 2 335 | + (loc1.y - loc2.y) ** 2 336 | + (loc1.z - loc2.z) ** 2 337 | ) 338 | vehicle = {} 339 | if distance < max_distance: 340 | vehicle["vehicle_type"] = get_actor_display_name(x, truncate=22) 341 | vehicle["vehicle_location"] = x.get_location() 342 | vehicle["vehicle_velocity"] = x.get_velocity() 343 | vehicle["vehicle_distance"] = distance 344 | surrounding_vehicles.append(vehicle) 345 | surrounding_vehicle_actors.append(x) 346 | -------------------------------------------------------------------------------- /experiments/base_experiment.py: -------------------------------------------------------------------------------- 1 | import random 2 | from enum import Enum 3 | from itertools import cycle 4 | 5 | import carla 6 | import numpy as np 7 | from gym.spaces import Discrete, Box 8 | 9 | from helper.CarlaHelper import spawn_vehicle_at, post_process_image 10 | 11 | 12 | class SensorsTransformEnum(Enum): 13 | Transform_A = 0 # (carla.Transform(carla.Location(x=-5.5, z=2.5), carla.Rotation(pitch=8.0)), Attachment.SpringArm) 14 | Transform_B = 1 # (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid), 15 | Transform_c = 2 # (carla.Transform(carla.Location(x=5.5, y=1.5, z=1.5)), Attachment.SpringArm), 16 | Transform_D = 3 # (carla.Transform(carla.Location(x=-8.0, z=6.0), carla.Rotation(pitch=6.0)), Attachment.SpringArm) 17 | Transform_E = 4 # (carla.Transform(carla.Location(x=-1, y=-bound_y, z=0.5)), Attachment.Rigid)] 18 | 19 | 20 | class SensorsEnum(Enum): 21 | CAMERA_RGB = 0 22 | CAMERA_DEPTH_RAW = 1 23 | CAMERA_DEPTH_GRAY = 2 24 | CAMERA__DEPTH_LOG = 3 25 | CAMERA_SEMANTIC_RAW = 4 26 | CAMERA_SEMANTIC_CITYSCAPE = 5 27 | LIDAR = 6 28 | 29 | 30 | BASE_SERVER_VIEW_CONFIG = { 31 | "server_view_x_offset": 00, 32 | "server_view_y_offset": 00, 33 | "server_view_height": 200, 34 | "server_view_pitch": -90, 35 | } 36 | BASE_SENSOR_CONFIG = { 37 | "SENSOR": SensorsEnum.CAMERA_RGB, 38 | "SENSOR_TRANSFORM": SensorsTransformEnum.Transform_A, 39 | "CAMERA_X": 84,#1280, 40 | "CAMERA_Y": 84,#720, 41 | "CAMERA_FOV": 60, 42 | "CAMERA_NORMALIZED": True, 43 | "CAMERA_GRAYSCALE": True, 44 | "FRAMESTACK": 1, 45 | } 46 | BASE_OBSERVATION_CONFIG = { 47 | "CAMERA_OBSERVATION": False, 48 | "COLLISION_OBSERVATION": True, 49 | "LOCATION_OBSERVATION": True, 50 | } 51 | BASE_EXPERIMENT_CONFIG = { 52 | "OBSERVATION_CONFIG": BASE_OBSERVATION_CONFIG, 53 | "Server_View": BASE_SERVER_VIEW_CONFIG, 54 | "SENSOR_CONFIG": BASE_SENSOR_CONFIG, 55 | "server_map": "Town02", 56 | "quality_level": "Low", # options are low or Epic #ToDO. This does not do anything + change to enum 57 | "Disable_Rendering_Mode": False, # If you disable, you will not get camera images 58 | "number_of_spawning_actors": 10, 59 | "start_pos_spawn_id": 100, # 82, 60 | "end_pos_spawn_id": 45, # 34, 61 | "hero_vehicle_model": "vehicle.audi.tt", 62 | "fps": 30, 63 | "Weather": carla.WeatherParameters.ClearNoon, 64 | "RANDOM_RESPAWN": False, # Actors are randomly Respawned or Not 65 | "DISCRETE_ACTION": True, 66 | "Debug": False, 67 | } 68 | 69 | DISCRETE_ACTIONS_SMALL = { 70 | 0: [0.0, 0.00, 1.0, False, False], # Apply Break 71 | 1: [1.0, 0.00, 0.0, False, False], # Straight 72 | 2: [1.0, -0.70, 0.0, False, False], # Right + Accelerate 73 | 3: [1.0, -0.50, 0.0, False, False], # Right + Accelerate 74 | 4: [1.0, -0.30, 0.0, False, False], # Right + Accelerate 75 | 5: [1.0, -0.10, 0.0, False, False], # Right + Accelerate 76 | 6: [1.0, 0.10, 0.0, False, False], # Left+Accelerate 77 | 7: [1.0, 0.30, 0.0, False, False], # Left+Accelerate 78 | 8: [1.0, 0.50, 0.0, False, False], # Left+Accelerate 79 | 9: [1.0, 0.70, 0.0, False, False], # Left+Accelerate 80 | 10: [0.0, -0.70, 0.0, False, False], # Left+Stop 81 | 11: [0.0, -0.23, 0.0, False, False], # Left+Stop 82 | 12: [0.0, 0.23, 0.0, False, False], # Right+Stop 83 | 13: [0.0, 0.70, 0.0, False, False], # Right+Stop 84 | } 85 | DISCRETE_ACTIONS = DISCRETE_ACTIONS_SMALL 86 | 87 | 88 | class BaseExperiment: 89 | def __init__(self, config=BASE_EXPERIMENT_CONFIG): 90 | self.experiment_config = config 91 | self.observation = {} 92 | self.observation_space = None 93 | self.action = None 94 | self.action_space = None 95 | 96 | self.hero = None 97 | self.spectator = None 98 | 99 | self.spawn_point_list = [] 100 | self.vehicle_list = [] 101 | self.start_location = None 102 | self.end_location = None 103 | 104 | 105 | 106 | self.hero_model = ''.join(self.experiment_config["hero_vehicle_model"]) 107 | 108 | self.set_observation_space() 109 | self.set_action_space() 110 | 111 | def get_experiment_config(self): 112 | return self.experiment_config 113 | 114 | def set_observation_space(self): 115 | """ 116 | observation_space_option: Camera Image 117 | :return: observation space: 118 | """ 119 | raise NotImplementedError 120 | 121 | def get_observation_space(self): 122 | """ 123 | :return: observation space 124 | """ 125 | return self.observation_space 126 | 127 | def set_action_space(self): 128 | """ 129 | :return: None. In this experiment it is a discrete space (for now) 130 | """ 131 | self.action_space = Discrete(len(DISCRETE_ACTIONS)) 132 | 133 | def get_action_space(self): 134 | """ 135 | :return: action_space. In this experiment it is a discrete space (for now) 136 | """ 137 | return self.action_space 138 | 139 | def respawn_actors(self, world): 140 | 141 | random_respawn = self.experiment_config["RANDOM_RESPAWN"] 142 | 143 | # Get all spawn Points 144 | spawn_points = list(world.get_map().get_spawn_points()) 145 | 146 | randomized_vehicle_spawn_point = spawn_points.copy() 147 | random.shuffle(randomized_vehicle_spawn_point, random.random) 148 | randomized_spawn_list = cycle(randomized_vehicle_spawn_point) 149 | 150 | # ToDo remove hero from this list. This should be already done if no random_respawn is False 151 | for i in range(len(self.spawn_point_list)): 152 | self.vehicle_list[i].set_autopilot(False) 153 | self.vehicle_list[i].set_velocity(carla.Vector3D(0, 0, 0)) 154 | 155 | if random_respawn is True: 156 | next_spawn_point = next(randomized_spawn_list) 157 | else: 158 | next_spawn_point = self.spawn_point_list[i] 159 | self.vehicle_list[i].set_transform(next_spawn_point) 160 | 161 | # Reset the autopilot 162 | self.vehicle_list[i].set_autopilot(False) 163 | self.vehicle_list[i].set_autopilot(True) 164 | 165 | # self.hero.set_autopilot(True) 166 | # self.hero.set_autopilot(False) 167 | # self.hero.set_velocity(carla.Vector3D(0, 0, 0)) 168 | # self.hero.set_transform(spawn_points[self.experiment_config["start_pos_spawn_id"]]) 169 | # self.hero.set_autopilot(False) 170 | 171 | def spawn_actors(self, core): 172 | """ 173 | This experiment spawns vehicles randomly on a map based on a pre-set number of vehicles. 174 | To spawn, the spawn points and randomized and the vehicles are spawned with a each vehicle occupying a 175 | single spawn point to avoid vehicles running on top of each other 176 | This experiment does not spawn any vehicle where the actor is to be spawned 177 | :param core: 178 | :return: 179 | """ 180 | world = core.get_core_world() 181 | # Get a list of all the vehicle blueprints 182 | vehicle_blueprints = world.get_blueprint_library().filter("vehicle.*") 183 | car_blueprints = [ 184 | x 185 | for x in vehicle_blueprints 186 | if int(x.get_attribute("number_of_wheels")) == 4 187 | ] 188 | 189 | # Get all spwan Points 190 | spawn_points = list(world.get_map().get_spawn_points()) 191 | 192 | # Now we are ready to spawn all the vehicles (except the hero) 193 | count = self.experiment_config["number_of_spawning_actors"] 194 | 195 | # Spawn cars at different spawn locations where the last car spawned is a hero 196 | # This idea of the spawn is to: 197 | # a) random spawn 198 | # b) give vehicles time to spawn in place before you spawn a vehicle on top of it. If ypu randomly spawn 199 | # without thinking it through, you will get lots of accidents. 200 | # ToDo: Every step or reset, consider removing vehicles that have crashed and re-spawn new vehicles 201 | 202 | randomized_vehicle_spawn_point = spawn_points.copy() 203 | random.shuffle(randomized_vehicle_spawn_point, random.random) 204 | randomized_spawn_list = cycle(randomized_vehicle_spawn_point) 205 | 206 | self.start_location = spawn_points[self.experiment_config["start_pos_spawn_id"]] 207 | self.end_location = spawn_points[self.experiment_config["end_pos_spawn_id"]] 208 | # ToDO SA This function should be split into two functions. One function is specific to the experiment 209 | # and another function should do the spawning, For example, Function one has a list of the hero spawns 210 | # and second function two will do the spawning. The idea is that function one can be 211 | # replaced (inherited) in a different experiment 212 | while count > 0: 213 | next_spawn_point = next(randomized_spawn_list) 214 | # Before you spawn, make sure you are not spawning in the hero location 215 | if (next_spawn_point.location.x != self.start_location.location.x) or ( 216 | next_spawn_point.location.y != self.start_location.location.y 217 | ): 218 | # Try to spawn but if you can't, just move on 219 | next_vehicle = spawn_vehicle_at( 220 | next_spawn_point, 221 | random.choice(car_blueprints), 222 | world, 223 | autopilot=True, 224 | max_time=0.1, 225 | ) 226 | if next_vehicle is not False: 227 | self.spawn_point_list.append(next_spawn_point) 228 | self.vehicle_list.append(next_vehicle) 229 | count -= 1 230 | # self.hero.set_simulate_physics(False) 231 | # return self.hero 232 | 233 | def set_server_view(self,core): 234 | """ 235 | Set server view to be behind the hero 236 | :param core:Carla Core 237 | :return: 238 | """ 239 | # spectator following the car 240 | transforms = self.hero.get_transform() 241 | server_view_x = self.hero.get_location().x - 5 * transforms.get_forward_vector().x 242 | server_view_y = self.hero.get_location().y - 5 * transforms.get_forward_vector().y 243 | server_view_z = self.hero.get_location().z + 3 244 | server_view_pitch = transforms.rotation.pitch 245 | server_view_yaw = transforms.rotation.yaw 246 | server_view_roll = transforms.rotation.roll 247 | self.spectator = core.get_core_world().get_spectator() 248 | self.spectator.set_transform( 249 | carla.Transform( 250 | carla.Location(x=server_view_x, y=server_view_y, z=server_view_z), 251 | carla.Rotation(pitch=server_view_pitch,yaw=server_view_yaw,roll=server_view_roll), 252 | ) 253 | ) 254 | 255 | 256 | def get_done_status(self): 257 | done = self.observation["collision"] 258 | return done 259 | 260 | def process_observation(self, core, observation): 261 | """ 262 | Main function to do all the post processing of observations. This is an example. 263 | :param core: 264 | :param observation: 265 | :return: 266 | """ 267 | observation['camera'] = post_process_image( 268 | observation['camera'], 269 | normalized = self.experiment_config["SENSOR_CONFIG"]["CAMERA_NORMALIZED"], 270 | grayscale = self.experiment_config["SENSOR_CONFIG"]["CAMERA_GRAYSCALE"] 271 | ) 272 | return observation 273 | 274 | def get_observation(self, core): 275 | info = {} 276 | if self.experiment_config["OBSERVATION_CONFIG"]["CAMERA_OBSERVATION"]: 277 | self.observation["camera"] = core.get_camera_data() 278 | if self.experiment_config["OBSERVATION_CONFIG"]["COLLISION_OBSERVATION"]: 279 | self.observation["collision"] = core.get_collision_data() 280 | if self.experiment_config["OBSERVATION_CONFIG"]["LOCATION_OBSERVATION"]: 281 | self.observation["location"] = self.hero.get_transform() 282 | 283 | info["control"] = { 284 | "steer": self.action.steer, 285 | "throttle": self.action.throttle, 286 | "brake": self.action.brake, 287 | "reverse": self.action.reverse, 288 | "hand_brake": self.action.hand_brake, 289 | } 290 | return self.observation, info 291 | 292 | def update_measurements(self, core): 293 | if self.experiment_config["OBSERVATION_CONFIG"]["CAMERA_OBSERVATION"]: 294 | core.update_camera() 295 | if self.experiment_config["OBSERVATION_CONFIG"]["COLLISION_OBSERVATION"]: 296 | core.update_collision() 297 | 298 | def update_actions(self, action, hero): 299 | # ToDO SA: These actions are not good, we should have incremental actions 300 | # (like current action = previous action + extra). This is absolutely necessary for realism. 301 | # (For example, command should be: Increase or decrease acceleration =>"throttle=Throttle+small_number 302 | if action is None: 303 | self.action = carla.VehicleControl() 304 | else: 305 | action = DISCRETE_ACTIONS[int(action)] 306 | self.action.throttle = float(np.clip(action[0], 0, 1)) 307 | self.action.steer = float(np.clip(action[1], -0.7, 0.7)) 308 | self.action.brake = float(np.clip(action[2], 0, 1)) 309 | self.action.reverse = action[3] 310 | self.action.hand_brake = action[4] 311 | hero.apply_control(self.action) 312 | 313 | def compute_reward(self, core, observation): 314 | """ 315 | 316 | :param core: 317 | :param observation: 318 | :return: 319 | """ 320 | 321 | return NotImplemented 322 | 323 | def initialize_reward(self, core): 324 | """ 325 | Generic initialization of reward function 326 | :param core: 327 | :return: 328 | """ 329 | print("This is a base experiment. Make sure you make you own reward initialization function") 330 | raise NotImplementedError 331 | 332 | 333 | # ============================================================================== 334 | # -- Hero ----------------------------------------------------------- 335 | # ============================================================================== 336 | def spawn_hero(self, core, transform, autopilot=False): 337 | """ 338 | This function spawns the hero vehicle. It makes sure that if a hero exists=>destroy the hero and respawn 339 | :param core 340 | :param transform: Hero location 341 | :param autopilot: Autopilot Status 342 | :return: 343 | """ 344 | world = core.get_core_world() 345 | 346 | if self.hero is not None: 347 | self.hero.destroy() 348 | self.hero = None 349 | 350 | hero_car_blueprint = world.get_blueprint_library().find(self.hero_model) 351 | hero_car_blueprint.set_attribute("role_name", "hero") 352 | 353 | while self.hero is None: 354 | self.hero = world.try_spawn_actor(hero_car_blueprint, transform) 355 | 356 | self.hero.set_autopilot(autopilot) 357 | 358 | def get_hero(self): 359 | 360 | """ 361 | Get hero vehicle 362 | :return: 363 | """ 364 | return self.hero 365 | 366 | # ============================================================================== 367 | # -- Tick ----------------------------------------------------------- 368 | # ============================================================================== 369 | 370 | def experiment_tick(self, core, action): 371 | """ 372 | This is the "tick" logic. 373 | :param core: 374 | :param action: 375 | :return: 376 | """ 377 | 378 | world = core.get_core_world() 379 | world.tick() 380 | self.update_measurements(core) 381 | self.update_actions(action, self.hero) 382 | 383 | # self.getNearbyVehicles() 384 | # self.getNextWayPoint() 385 | 386 | 387 | """ 388 | #ToDO FOR NOW this stuff should be in a different function 389 | self.end_location = spawn_points[self.environment_config["end_pos_spawn_id"]] 390 | 391 | self.total_distance_to_goal_euclidean = float(np.linalg.norm( 392 | [self.start_location.location.x - self.end_location.location.x, 393 | self.start_location.location.y - self.end_location.location.y]) / 100) 394 | 395 | self.x_dist = np.abs(self.start_location.location.x - self.end_location.location.x) 396 | self.y_dist = np.abs(self.start_location.location.y - self.end_location.location.y) 397 | """ 398 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /test_code/manual_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | # Allows controlling a vehicle with a keyboard. For a simpler and more 10 | # documented example, please take a look at tutorial.py. 11 | 12 | """ 13 | Welcome to CARLA manual control. 14 | 15 | Use ARROWS or WASD keys for control. 16 | 17 | W : throttle 18 | S : brake 19 | AD : steer 20 | Q : toggle reverse 21 | Space : hand-brake 22 | P : toggle autopilot 23 | M : toggle manual transmission 24 | ,/. : gear up/down 25 | 26 | TAB : change sensor position 27 | ` : next sensor 28 | [1-9] : change to sensor [1-9] 29 | C : change weather (Shift+C reverse) 30 | Backspace : change vehicle 31 | 32 | R : toggle recording images to disk 33 | 34 | CTRL + R : toggle recording of simulation (replacing any previous) 35 | CTRL + P : start replaying last recorded simulation 36 | CTRL + + : increments the start time of the replay by 1 second (+SHIFT = 10 seconds) 37 | CTRL + - : decrements the start time of the replay by 1 second (+SHIFT = 10 seconds) 38 | 39 | F1 : toggle HUD 40 | H/? : toggle help 41 | ESC : quit 42 | """ 43 | 44 | from __future__ import print_function 45 | 46 | 47 | # ============================================================================== 48 | # -- find carla module --------------------------------------------------------- 49 | # ============================================================================== 50 | 51 | 52 | import glob 53 | import os 54 | import sys 55 | 56 | from helper.CarlaHelper import add_carla_path 57 | 58 | ENV_CONFIG = { 59 | "RAY": False, # True if you are running an experiment in Ray 60 | "DEBUG_MODE": True, 61 | "CARLA_PATH_CONFIG_FILE": "CARLA_PATH.txt", # IN this file, put the path to your CARLA FOLDER 62 | } 63 | 64 | CARLA_SERVER_BINARY = add_carla_path(ENV_CONFIG["CARLA_PATH_CONFIG_FILE"]) 65 | ENV_CONFIG.update({"SERVER_BINARY": CARLA_SERVER_BINARY}) 66 | 67 | import carla 68 | 69 | from core.CarlaCore2 import CarlaCore 70 | from experiments.experiment1 import Experiment 71 | import time 72 | # ============================================================================== 73 | # -- imports ------------------------------------------------------------------- 74 | # ============================================================================== 75 | 76 | 77 | 78 | from carla import ColorConverter as cc 79 | 80 | import argparse 81 | import collections 82 | import datetime 83 | import logging 84 | import math 85 | import random 86 | import re 87 | import weakref 88 | 89 | try: 90 | import pygame 91 | from pygame.locals import KMOD_CTRL 92 | from pygame.locals import KMOD_SHIFT 93 | from pygame.locals import K_0 94 | from pygame.locals import K_9 95 | from pygame.locals import K_BACKQUOTE 96 | from pygame.locals import K_BACKSPACE 97 | from pygame.locals import K_COMMA 98 | from pygame.locals import K_DOWN 99 | from pygame.locals import K_ESCAPE 100 | from pygame.locals import K_F1 101 | from pygame.locals import K_LEFT 102 | from pygame.locals import K_PERIOD 103 | from pygame.locals import K_RIGHT 104 | from pygame.locals import K_SLASH 105 | from pygame.locals import K_SPACE 106 | from pygame.locals import K_TAB 107 | from pygame.locals import K_UP 108 | from pygame.locals import K_a 109 | from pygame.locals import K_c 110 | from pygame.locals import K_d 111 | from pygame.locals import K_h 112 | from pygame.locals import K_m 113 | from pygame.locals import K_p 114 | from pygame.locals import K_q 115 | from pygame.locals import K_r 116 | from pygame.locals import K_s 117 | from pygame.locals import K_w 118 | from pygame.locals import K_MINUS 119 | from pygame.locals import K_EQUALS 120 | except ImportError: 121 | raise RuntimeError('cannot import pygame, make sure pygame package is installed') 122 | 123 | try: 124 | import numpy as np 125 | except ImportError: 126 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 127 | 128 | 129 | # ============================================================================== 130 | # -- Global functions ---------------------------------------------------------- 131 | # ============================================================================== 132 | 133 | 134 | def find_weather_presets(): 135 | rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)') 136 | name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x)) 137 | presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)] 138 | return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets] 139 | 140 | 141 | def get_actor_display_name(actor, truncate=250): 142 | name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:]) 143 | return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name 144 | 145 | 146 | # ============================================================================== 147 | # -- World --------------------------------------------------------------------- 148 | # ============================================================================== 149 | 150 | 151 | class World(object): 152 | def __init__(self, carla_world, hud, args): 153 | self.world = carla_world 154 | self.actor_role_name = args.rolename 155 | self.map = self.world.get_map() 156 | self.hud = hud 157 | self.player = None 158 | self.collision_sensor = None 159 | self.lane_invasion_sensor = None 160 | self.gnss_sensor = None 161 | self.camera_manager = None 162 | self._weather_presets = find_weather_presets() 163 | self._weather_index = 0 164 | self._actor_filter = args.filter 165 | self._gamma = args.gamma 166 | self.restart() 167 | self.world.on_tick(hud.on_world_tick) 168 | self.recording_enabled = False 169 | self.recording_start = 0 170 | 171 | def restart(self): 172 | # Keep same camera config if the camera manager exists. 173 | cam_index = self.camera_manager.index if self.camera_manager is not None else 0 174 | cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0 175 | # Get a random blueprint. 176 | blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter)) 177 | blueprint.set_attribute('role_name', self.actor_role_name) 178 | if blueprint.has_attribute('color'): 179 | color = random.choice(blueprint.get_attribute('color').recommended_values) 180 | blueprint.set_attribute('color', color) 181 | if blueprint.has_attribute('driver_id'): 182 | driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values) 183 | blueprint.set_attribute('driver_id', driver_id) 184 | if blueprint.has_attribute('is_invincible'): 185 | blueprint.set_attribute('is_invincible', 'true') 186 | # Spawn the player. 187 | if self.player is not None: 188 | spawn_point = self.player.get_transform() 189 | spawn_point.location.z += 2.0 190 | spawn_point.rotation.roll = 0.0 191 | spawn_point.rotation.pitch = 0.0 192 | self.destroy() 193 | self.player = self.world.try_spawn_actor(blueprint, spawn_point) 194 | while self.player is None: 195 | spawn_points = self.map.get_spawn_points() 196 | spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform() 197 | self.player = self.world.try_spawn_actor(blueprint, spawn_point) 198 | # Set up the sensors. 199 | self.collision_sensor = CollisionSensor(self.player, self.hud) 200 | self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud) 201 | self.gnss_sensor = GnssSensor(self.player) 202 | self.camera_manager = CameraManager(self.player, self.hud, self._gamma) 203 | self.camera_manager.transform_index = cam_pos_index 204 | self.camera_manager.set_sensor(cam_index, notify=False) 205 | actor_type = get_actor_display_name(self.player) 206 | self.hud.notification(actor_type) 207 | 208 | def next_weather(self, reverse=False): 209 | self._weather_index += -1 if reverse else 1 210 | self._weather_index %= len(self._weather_presets) 211 | preset = self._weather_presets[self._weather_index] 212 | self.hud.notification('Weather: %s' % preset[1]) 213 | self.player.get_world().set_weather(preset[0]) 214 | 215 | def tick(self, clock): 216 | self.hud.tick(self, clock) 217 | 218 | def render(self, display): 219 | self.camera_manager.render(display) 220 | self.hud.render(display) 221 | 222 | def destroy_sensors(self): 223 | self.camera_manager.sensor.destroy() 224 | self.camera_manager.sensor = None 225 | self.camera_manager.index = None 226 | 227 | def destroy(self): 228 | actors = [ 229 | self.camera_manager.sensor, 230 | self.collision_sensor.sensor, 231 | self.lane_invasion_sensor.sensor, 232 | self.gnss_sensor.sensor, 233 | self.player] 234 | for actor in actors: 235 | if actor is not None: 236 | actor.destroy() 237 | 238 | # ============================================================================== 239 | # -- KeyboardControl ----------------------------------------------------------- 240 | # ============================================================================== 241 | 242 | 243 | class KeyboardControl(object): 244 | def __init__(self, world, start_in_autopilot): 245 | self._autopilot_enabled = start_in_autopilot 246 | if isinstance(world.player, carla.Vehicle): 247 | self._control = carla.VehicleControl() 248 | world.player.set_autopilot(self._autopilot_enabled) 249 | elif isinstance(world.player, carla.Walker): 250 | self._control = carla.WalkerControl() 251 | self._autopilot_enabled = False 252 | self._rotation = world.player.get_transform().rotation 253 | else: 254 | raise NotImplementedError("Actor type not supported") 255 | self._steer_cache = 0.0 256 | world.hud.notification("Press 'H' or '?' for help.", seconds=4.0) 257 | 258 | def parse_events(self, client, world, clock): 259 | for event in pygame.event.get(): 260 | if event.type == pygame.QUIT: 261 | return True 262 | elif event.type == pygame.KEYUP: 263 | if self._is_quit_shortcut(event.key): 264 | return True 265 | elif event.key == K_BACKSPACE: 266 | world.restart() 267 | elif event.key == K_F1: 268 | world.hud.toggle_info() 269 | elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT): 270 | world.hud.help.toggle() 271 | elif event.key == K_TAB: 272 | world.camera_manager.toggle_camera() 273 | elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT: 274 | world.next_weather(reverse=True) 275 | elif event.key == K_c: 276 | world.next_weather() 277 | elif event.key == K_BACKQUOTE: 278 | world.camera_manager.next_sensor() 279 | elif event.key > K_0 and event.key <= K_9: 280 | world.camera_manager.set_sensor(event.key - 1 - K_0) 281 | elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL): 282 | world.camera_manager.toggle_recording() 283 | elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL): 284 | if (world.recording_enabled): 285 | client.stop_recorder() 286 | world.recording_enabled = False 287 | world.hud.notification("Recorder is OFF") 288 | else: 289 | client.start_recorder("manual_recording.rec") 290 | world.recording_enabled = True 291 | world.hud.notification("Recorder is ON") 292 | elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL): 293 | # stop recorder 294 | client.stop_recorder() 295 | world.recording_enabled = False 296 | # work around to fix camera at start of replaying 297 | currentIndex = world.camera_manager.index 298 | world.destroy_sensors() 299 | # disable autopilot 300 | self._autopilot_enabled = False 301 | world.player.set_autopilot(self._autopilot_enabled) 302 | world.hud.notification("Replaying file 'manual_recording.rec'") 303 | # replayer 304 | client.replay_file("manual_recording.rec", world.recording_start, 0, 0) 305 | world.camera_manager.set_sensor(currentIndex) 306 | elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL): 307 | if pygame.key.get_mods() & KMOD_SHIFT: 308 | world.recording_start -= 10 309 | else: 310 | world.recording_start -= 1 311 | world.hud.notification("Recording start time is %d" % (world.recording_start)) 312 | elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL): 313 | if pygame.key.get_mods() & KMOD_SHIFT: 314 | world.recording_start += 10 315 | else: 316 | world.recording_start += 1 317 | world.hud.notification("Recording start time is %d" % (world.recording_start)) 318 | if isinstance(self._control, carla.VehicleControl): 319 | if event.key == K_q: 320 | self._control.gear = 1 if self._control.reverse else -1 321 | elif event.key == K_m: 322 | self._control.manual_gear_shift = not self._control.manual_gear_shift 323 | self._control.gear = world.player.get_control().gear 324 | world.hud.notification('%s Transmission' % 325 | ('Manual' if self._control.manual_gear_shift else 'Automatic')) 326 | elif self._control.manual_gear_shift and event.key == K_COMMA: 327 | self._control.gear = max(-1, self._control.gear - 1) 328 | elif self._control.manual_gear_shift and event.key == K_PERIOD: 329 | self._control.gear = self._control.gear + 1 330 | elif event.key == K_p and not (pygame.key.get_mods() & KMOD_CTRL): 331 | self._autopilot_enabled = not self._autopilot_enabled 332 | world.player.set_autopilot(self._autopilot_enabled) 333 | world.hud.notification('Autopilot %s' % ('On' if self._autopilot_enabled else 'Off')) 334 | if not self._autopilot_enabled: 335 | if isinstance(self._control, carla.VehicleControl): 336 | self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time()) 337 | self._control.reverse = self._control.gear < 0 338 | elif isinstance(self._control, carla.WalkerControl): 339 | self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time()) 340 | world.player.apply_control(self._control) 341 | 342 | def _parse_vehicle_keys(self, keys, milliseconds): 343 | self._control.throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0 344 | steer_increment = 5e-4 * milliseconds 345 | if keys[K_LEFT] or keys[K_a]: 346 | self._steer_cache -= steer_increment 347 | elif keys[K_RIGHT] or keys[K_d]: 348 | self._steer_cache += steer_increment 349 | else: 350 | self._steer_cache = 0.0 351 | self._steer_cache = min(0.7, max(-0.7, self._steer_cache)) 352 | self._control.steer = round(self._steer_cache, 1) 353 | self._control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0 354 | self._control.hand_brake = keys[K_SPACE] 355 | 356 | def _parse_walker_keys(self, keys, milliseconds): 357 | self._control.speed = 0.0 358 | if keys[K_DOWN] or keys[K_s]: 359 | self._control.speed = 0.0 360 | if keys[K_LEFT] or keys[K_a]: 361 | self._control.speed = .01 362 | self._rotation.yaw -= 0.08 * milliseconds 363 | if keys[K_RIGHT] or keys[K_d]: 364 | self._control.speed = .01 365 | self._rotation.yaw += 0.08 * milliseconds 366 | if keys[K_UP] or keys[K_w]: 367 | self._control.speed = 3.333 if pygame.key.get_mods() & KMOD_SHIFT else 2.778 368 | self._control.jump = keys[K_SPACE] 369 | self._rotation.yaw = round(self._rotation.yaw, 1) 370 | self._control.direction = self._rotation.get_forward_vector() 371 | 372 | @staticmethod 373 | def _is_quit_shortcut(key): 374 | return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL) 375 | 376 | 377 | # ============================================================================== 378 | # -- HUD ----------------------------------------------------------------------- 379 | # ============================================================================== 380 | 381 | 382 | class HUD(object): 383 | def __init__(self, width, height): 384 | self.dim = (width, height) 385 | font = pygame.font.Font(pygame.font.get_default_font(), 20) 386 | fonts = [x for x in pygame.font.get_fonts() if 'mono' in x] 387 | default_font = 'ubuntumono' 388 | mono = default_font if default_font in fonts else fonts[0] 389 | mono = pygame.font.match_font(mono) 390 | self._font_mono = pygame.font.Font(mono, 14) 391 | self._notifications = FadingText(font, (width, 40), (0, height - 40)) 392 | self.help = HelpText(pygame.font.Font(mono, 24), width, height) 393 | self.server_fps = 0 394 | self.frame = 0 395 | self.simulation_time = 0 396 | self._show_info = True 397 | self._info_text = [] 398 | self._server_clock = pygame.time.Clock() 399 | 400 | def on_world_tick(self, timestamp): 401 | self._server_clock.tick() 402 | self.server_fps = self._server_clock.get_fps() 403 | self.frame = timestamp.frame 404 | self.simulation_time = timestamp.elapsed_seconds 405 | 406 | def tick(self, world, clock): 407 | self._notifications.tick(world, clock) 408 | if not self._show_info: 409 | return 410 | t = world.player.get_transform() 411 | v = world.player.get_velocity() 412 | c = world.player.get_control() 413 | heading = 'N' if abs(t.rotation.yaw) < 89.5 else '' 414 | heading += 'S' if abs(t.rotation.yaw) > 90.5 else '' 415 | heading += 'E' if 179.5 > t.rotation.yaw > 0.5 else '' 416 | heading += 'W' if -0.5 > t.rotation.yaw > -179.5 else '' 417 | colhist = world.collision_sensor.get_collision_history() 418 | collision = [colhist[x + self.frame - 200] for x in range(0, 200)] 419 | max_col = max(1.0, max(collision)) 420 | collision = [x / max_col for x in collision] 421 | vehicles = world.world.get_actors().filter('vehicle.*') 422 | self._info_text = [ 423 | 'Server: % 16.0f FPS' % self.server_fps, 424 | 'Client: % 16.0f FPS' % clock.get_fps(), 425 | '', 426 | 'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20), 427 | 'Map: % 20s' % world.map.name, 428 | 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), 429 | '', 430 | 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)), 431 | u'Heading:% 16.0f\N{DEGREE SIGN} % 2s' % (t.rotation.yaw, heading), 432 | 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)), 433 | 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 434 | 'Height: % 18.0f m' % t.location.z, 435 | ''] 436 | if isinstance(c, carla.VehicleControl): 437 | self._info_text += [ 438 | ('Throttle:', c.throttle, 0.0, 1.0), 439 | ('Steer:', c.steer, -1.0, 1.0), 440 | ('Brake:', c.brake, 0.0, 1.0), 441 | ('Reverse:', c.reverse), 442 | ('Hand brake:', c.hand_brake), 443 | ('Manual:', c.manual_gear_shift), 444 | 'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear)] 445 | elif isinstance(c, carla.WalkerControl): 446 | self._info_text += [ 447 | ('Speed:', c.speed, 0.0, 5.556), 448 | ('Jump:', c.jump)] 449 | self._info_text += [ 450 | '', 451 | 'Collision:', 452 | collision, 453 | '', 454 | 'Number of vehicles: % 8d' % len(vehicles)] 455 | if len(vehicles) > 1: 456 | self._info_text += ['Nearby vehicles:'] 457 | distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2) 458 | vehicles = [(distance(x.get_location()), x) for x in vehicles if x.id != world.player.id] 459 | for d, vehicle in sorted(vehicles): 460 | if d > 200.0: 461 | break 462 | vehicle_type = get_actor_display_name(vehicle, truncate=22) 463 | self._info_text.append('% 4dm %s' % (d, vehicle_type)) 464 | 465 | def toggle_info(self): 466 | self._show_info = not self._show_info 467 | 468 | def notification(self, text, seconds=2.0): 469 | self._notifications.set_text(text, seconds=seconds) 470 | 471 | def error(self, text): 472 | self._notifications.set_text('Error: %s' % text, (255, 0, 0)) 473 | 474 | def render(self, display): 475 | if self._show_info: 476 | info_surface = pygame.Surface((220, self.dim[1])) 477 | info_surface.set_alpha(100) 478 | display.blit(info_surface, (0, 0)) 479 | v_offset = 4 480 | bar_h_offset = 100 481 | bar_width = 106 482 | for item in self._info_text: 483 | if v_offset + 18 > self.dim[1]: 484 | break 485 | if isinstance(item, list): 486 | if len(item) > 1: 487 | points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)] 488 | pygame.draw.lines(display, (255, 136, 0), False, points, 2) 489 | item = None 490 | v_offset += 18 491 | elif isinstance(item, tuple): 492 | if isinstance(item[1], bool): 493 | rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) 494 | pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1) 495 | else: 496 | rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) 497 | pygame.draw.rect(display, (255, 255, 255), rect_border, 1) 498 | f = (item[1] - item[2]) / (item[3] - item[2]) 499 | if item[2] < 0.0: 500 | rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6)) 501 | else: 502 | rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6)) 503 | pygame.draw.rect(display, (255, 255, 255), rect) 504 | item = item[0] 505 | if item: # At this point has to be a str. 506 | surface = self._font_mono.render(item, True, (255, 255, 255)) 507 | display.blit(surface, (8, v_offset)) 508 | v_offset += 18 509 | self._notifications.render(display) 510 | self.help.render(display) 511 | 512 | 513 | # ============================================================================== 514 | # -- FadingText ---------------------------------------------------------------- 515 | # ============================================================================== 516 | 517 | 518 | class FadingText(object): 519 | def __init__(self, font, dim, pos): 520 | self.font = font 521 | self.dim = dim 522 | self.pos = pos 523 | self.seconds_left = 0 524 | self.surface = pygame.Surface(self.dim) 525 | 526 | def set_text(self, text, color=(255, 255, 255), seconds=2.0): 527 | text_texture = self.font.render(text, True, color) 528 | self.surface = pygame.Surface(self.dim) 529 | self.seconds_left = seconds 530 | self.surface.fill((0, 0, 0, 0)) 531 | self.surface.blit(text_texture, (10, 11)) 532 | 533 | def tick(self, _, clock): 534 | delta_seconds = 1e-3 * clock.get_time() 535 | self.seconds_left = max(0.0, self.seconds_left - delta_seconds) 536 | self.surface.set_alpha(500.0 * self.seconds_left) 537 | 538 | def render(self, display): 539 | display.blit(self.surface, self.pos) 540 | 541 | 542 | # ============================================================================== 543 | # -- HelpText ------------------------------------------------------------------ 544 | # ============================================================================== 545 | 546 | 547 | class HelpText(object): 548 | def __init__(self, font, width, height): 549 | lines = __doc__.split('\n') 550 | self.font = font 551 | self.dim = (680, len(lines) * 22 + 12) 552 | self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1]) 553 | self.seconds_left = 0 554 | self.surface = pygame.Surface(self.dim) 555 | self.surface.fill((0, 0, 0, 0)) 556 | for n, line in enumerate(lines): 557 | text_texture = self.font.render(line, True, (255, 255, 255)) 558 | self.surface.blit(text_texture, (22, n * 22)) 559 | self._render = False 560 | self.surface.set_alpha(220) 561 | 562 | def toggle(self): 563 | self._render = not self._render 564 | 565 | def render(self, display): 566 | if self._render: 567 | display.blit(self.surface, self.pos) 568 | 569 | 570 | # ============================================================================== 571 | # -- CollisionSensor ----------------------------------------------------------- 572 | # ============================================================================== 573 | 574 | 575 | class CollisionSensor(object): 576 | def __init__(self, parent_actor, hud): 577 | self.sensor = None 578 | self.history = [] 579 | self._parent = parent_actor 580 | self.hud = hud 581 | world = self._parent.get_world() 582 | bp = world.get_blueprint_library().find('sensor.other.collision') 583 | self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent) 584 | # We need to pass the lambda a weak reference to self to avoid circular 585 | # reference. 586 | weak_self = weakref.ref(self) 587 | self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event)) 588 | 589 | def get_collision_history(self): 590 | history = collections.defaultdict(int) 591 | for frame, intensity in self.history: 592 | history[frame] += intensity 593 | return history 594 | 595 | @staticmethod 596 | def _on_collision(weak_self, event): 597 | self = weak_self() 598 | if not self: 599 | return 600 | actor_type = get_actor_display_name(event.other_actor) 601 | self.hud.notification('Collision with %r' % actor_type) 602 | impulse = event.normal_impulse 603 | intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2) 604 | self.history.append((event.frame, intensity)) 605 | if len(self.history) > 4000: 606 | self.history.pop(0) 607 | 608 | 609 | # ============================================================================== 610 | # -- LaneInvasionSensor -------------------------------------------------------- 611 | # ============================================================================== 612 | 613 | 614 | class LaneInvasionSensor(object): 615 | def __init__(self, parent_actor, hud): 616 | self.sensor = None 617 | self._parent = parent_actor 618 | self.hud = hud 619 | world = self._parent.get_world() 620 | bp = world.get_blueprint_library().find('sensor.other.lane_invasion') 621 | self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent) 622 | # We need to pass the lambda a weak reference to self to avoid circular 623 | # reference. 624 | weak_self = weakref.ref(self) 625 | self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event)) 626 | 627 | @staticmethod 628 | def _on_invasion(weak_self, event): 629 | self = weak_self() 630 | if not self: 631 | return 632 | lane_types = set(x.type for x in event.crossed_lane_markings) 633 | text = ['%r' % str(x).split()[-1] for x in lane_types] 634 | self.hud.notification('Crossed line %s' % ' and '.join(text)) 635 | 636 | # ============================================================================== 637 | # -- GnssSensor -------------------------------------------------------- 638 | # ============================================================================== 639 | 640 | 641 | class GnssSensor(object): 642 | def __init__(self, parent_actor): 643 | self.sensor = None 644 | self._parent = parent_actor 645 | self.lat = 0.0 646 | self.lon = 0.0 647 | world = self._parent.get_world() 648 | bp = world.get_blueprint_library().find('sensor.other.gnss') 649 | self.sensor = world.spawn_actor(bp, carla.Transform(carla.Location(x=1.0, z=2.8)), attach_to=self._parent) 650 | # We need to pass the lambda a weak reference to self to avoid circular 651 | # reference. 652 | weak_self = weakref.ref(self) 653 | self.sensor.listen(lambda event: GnssSensor._on_gnss_event(weak_self, event)) 654 | 655 | @staticmethod 656 | def _on_gnss_event(weak_self, event): 657 | self = weak_self() 658 | if not self: 659 | return 660 | self.lat = event.latitude 661 | self.lon = event.longitude 662 | 663 | 664 | # ============================================================================== 665 | # -- CameraManager ------------------------------------------------------------- 666 | # ============================================================================== 667 | 668 | 669 | class CameraManager(object): 670 | def __init__(self, parent_actor, hud, gamma_correction): 671 | self.sensor = None 672 | self.surface = None 673 | self._parent = parent_actor 674 | self.hud = hud 675 | self.recording = False 676 | bound_y = 0.5 + self._parent.bounding_box.extent.y 677 | Attachment = carla.AttachmentType 678 | self._camera_transforms = [ 679 | (carla.Transform(carla.Location(x=-5.5, z=2.5), carla.Rotation(pitch=8.0)), Attachment.SpringArm), 680 | (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid), 681 | (carla.Transform(carla.Location(x=5.5, y=1.5, z=1.5)), Attachment.SpringArm), 682 | (carla.Transform(carla.Location(x=-8.0, z=6.0), carla.Rotation(pitch=6.0)), Attachment.SpringArm), 683 | (carla.Transform(carla.Location(x=-1, y=-bound_y, z=0.5)), Attachment.Rigid)] 684 | self.transform_index = 1 685 | self.sensors = [ 686 | ['sensor.camera.rgb', cc.Raw, 'Camera RGB'], 687 | ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)'], 688 | ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)'], 689 | ['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)'], 690 | ['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)'], 691 | ['sensor.camera.semantic_segmentation', cc.CityScapesPalette, 692 | 'Camera Semantic Segmentation (CityScapes Palette)'], 693 | ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)']] 694 | world = self._parent.get_world() 695 | bp_library = world.get_blueprint_library() 696 | for item in self.sensors: 697 | bp = bp_library.find(item[0]) 698 | if item[0].startswith('sensor.camera'): 699 | bp.set_attribute('image_size_x', str(hud.dim[0])) 700 | bp.set_attribute('image_size_y', str(hud.dim[1])) 701 | if bp.has_attribute('gamma'): 702 | bp.set_attribute('gamma', str(gamma_correction)) 703 | elif item[0].startswith('sensor.lidar'): 704 | bp.set_attribute('range', '5000') 705 | item.append(bp) 706 | self.index = None 707 | 708 | def toggle_camera(self): 709 | self.transform_index = (self.transform_index + 1) % len(self._camera_transforms) 710 | self.set_sensor(self.index, notify=False, force_respawn=True) 711 | 712 | def set_sensor(self, index, notify=True, force_respawn=False): 713 | index = index % len(self.sensors) 714 | needs_respawn = True if self.index is None else \ 715 | (force_respawn or (self.sensors[index][0] != self.sensors[self.index][0])) 716 | if needs_respawn: 717 | if self.sensor is not None: 718 | self.sensor.destroy() 719 | self.surface = None 720 | self.sensor = self._parent.get_world().spawn_actor( 721 | self.sensors[index][-1], 722 | self._camera_transforms[self.transform_index][0], 723 | attach_to=self._parent, 724 | attachment_type=self._camera_transforms[self.transform_index][1]) 725 | # We need to pass the lambda a weak reference to self to avoid 726 | # circular reference. 727 | weak_self = weakref.ref(self) 728 | self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image)) 729 | if notify: 730 | self.hud.notification(self.sensors[index][2]) 731 | self.index = index 732 | 733 | def next_sensor(self): 734 | self.set_sensor(self.index + 1) 735 | 736 | def toggle_recording(self): 737 | self.recording = not self.recording 738 | self.hud.notification('Recording %s' % ('On' if self.recording else 'Off')) 739 | 740 | def render(self, display): 741 | if self.surface is not None: 742 | display.blit(self.surface, (0, 0)) 743 | 744 | @staticmethod 745 | def _parse_image(weak_self, image): 746 | self = weak_self() 747 | if not self: 748 | return 749 | if self.sensors[self.index][0].startswith('sensor.lidar'): 750 | points = np.frombuffer(image.raw_data, dtype=np.dtype('f4')) 751 | points = np.reshape(points, (int(points.shape[0] / 3), 3)) 752 | lidar_data = np.array(points[:, :2]) 753 | lidar_data *= min(self.hud.dim) / 100.0 754 | lidar_data += (0.5 * self.hud.dim[0], 0.5 * self.hud.dim[1]) 755 | lidar_data = np.fabs(lidar_data) # pylint: disable=E1111 756 | lidar_data = lidar_data.astype(np.int32) 757 | lidar_data = np.reshape(lidar_data, (-1, 2)) 758 | lidar_img_size = (self.hud.dim[0], self.hud.dim[1], 3) 759 | lidar_img = np.zeros((lidar_img_size), dtype = int) 760 | lidar_img[tuple(lidar_data.T)] = (255, 255, 255) 761 | self.surface = pygame.surfarray.make_surface(lidar_img) 762 | else: 763 | image.convert(self.sensors[self.index][1]) 764 | array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) 765 | array = np.reshape(array, (image.height, image.width, 4)) 766 | array = array[:, :, :3] 767 | array = array[:, :, ::-1] 768 | self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 769 | if self.recording: 770 | image.save_to_disk('_out/%08d' % image.frame) 771 | 772 | 773 | # ============================================================================== 774 | # -- game_loop() --------------------------------------------------------------- 775 | # ============================================================================== 776 | 777 | 778 | def game_loop(args): 779 | pygame.init() 780 | pygame.font.init() 781 | world = None 782 | 783 | try: 784 | client = carla.Client(args.host, args.port) 785 | client.set_timeout(2.0) 786 | 787 | display = pygame.display.set_mode( 788 | (args.width, args.height), 789 | pygame.HWSURFACE | pygame.DOUBLEBUF) 790 | 791 | hud = HUD(args.width, args.height) 792 | world = World(client.get_world(), hud, args) 793 | controller = KeyboardControl(world, args.autopilot) 794 | 795 | clock = pygame.time.Clock() 796 | while True: 797 | clock.tick_busy_loop(60) 798 | if controller.parse_events(client, world, clock): 799 | return 800 | world.tick(clock) 801 | world.render(display) 802 | pygame.display.flip() 803 | 804 | finally: 805 | 806 | if (world and world.recording_enabled): 807 | client.stop_recorder() 808 | 809 | if world is not None: 810 | world.destroy() 811 | 812 | pygame.quit() 813 | 814 | 815 | # ============================================================================== 816 | # -- main() -------------------------------------------------------------------- 817 | # ============================================================================== 818 | 819 | 820 | def main(): 821 | 822 | env_config = ENV_CONFIG 823 | carla_server_binary = add_carla_path(ENV_CONFIG["CARLA_PATH_CONFIG_FILE"]) 824 | env_config.update({"SERVER_BINARY": carla_server_binary}) 825 | 826 | experiment = Experiment() 827 | action_space = experiment.get_action_space() 828 | observation_space = experiment.get_observation_space() 829 | exp_config = experiment.get_experiment_config() 830 | CarlaCore(environment_config=env_config, experiment_config=exp_config,core_config=None) 831 | time.sleep(5) 832 | 833 | argparser = argparse.ArgumentParser( 834 | description='CARLA Manual Control Client') 835 | argparser.add_argument( 836 | '-v', '--verbose', 837 | action='store_true', 838 | dest='debug', 839 | help='print debug information') 840 | argparser.add_argument( 841 | '--host', 842 | metavar='H', 843 | default='127.0.0.1', 844 | help='IP of the host server (default: 127.0.0.1)') 845 | argparser.add_argument( 846 | '-p', '--port', 847 | metavar='P', 848 | default=2000, 849 | type=int, 850 | help='TCP port to listen to (default: 2000)') 851 | argparser.add_argument( 852 | '-a', '--autopilot', 853 | action='store_true', 854 | help='enable autopilot') 855 | argparser.add_argument( 856 | '--res', 857 | metavar='WIDTHxHEIGHT', 858 | default='1280x720', 859 | help='window resolution (default: 1280x720)') 860 | argparser.add_argument( 861 | '--filter', 862 | metavar='PATTERN', 863 | default='vehicle.*', 864 | help='actor filter (default: "vehicle.*")') 865 | argparser.add_argument( 866 | '--rolename', 867 | metavar='NAME', 868 | default='hero', 869 | help='actor role name (default: "hero")') 870 | argparser.add_argument( 871 | '--gamma', 872 | default=2.2, 873 | type=float, 874 | help='Gamma correction of the camera (default: 2.2)') 875 | args = argparser.parse_args() 876 | 877 | args.width, args.height = [int(x) for x in args.res.split('x')] 878 | 879 | log_level = logging.DEBUG if args.debug else logging.INFO 880 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 881 | 882 | logging.info('listening to server %s:%s', args.host, args.port) 883 | 884 | print(__doc__) 885 | 886 | try: 887 | 888 | game_loop(args) 889 | 890 | except KeyboardInterrupt: 891 | print('\nCancelled by user. Bye!') 892 | 893 | 894 | if __name__ == '__main__': 895 | 896 | main() 897 | --------------------------------------------------------------------------------