├── carla ├── __init__.py ├── planner │ ├── __init__.py │ ├── Town01.png │ ├── Town02.png │ ├── Town02Big.png │ ├── Town01Lanes.png │ ├── Town02Lanes.png │ ├── Town01Central.png │ ├── Town02Central.png │ ├── Town02.txt │ ├── Town01.txt │ ├── grid.py │ ├── graph.py │ ├── city_track.py │ ├── astar.py │ ├── map.py │ ├── planner.py │ └── converter.py ├── driving_benchmark │ ├── __init__.py │ ├── experiment_suites │ │ ├── __init__.py │ │ ├── basic_experiment_suite.py │ │ ├── experiment_suite.py │ │ └── corl_2017.py │ ├── experiment.py │ ├── results_printer.py │ ├── recording.py │ ├── driving_benchmark.py │ └── metrics.py ├── tcp.pyc ├── util.pyc ├── agent │ ├── __init__.py │ ├── forward_agent.py │ └── agent.py ├── client.pyc ├── sensor.pyc ├── __init__.pyc ├── settings.pyc ├── transform.pyc ├── carla_server_pb2.pyc ├── util.py ├── tcp.py ├── settings.py ├── transform.py ├── image_converter.py ├── client.py └── sensor.py ├── .gitignore ├── readme_imgs └── driving_log_structure.png ├── settings.ini ├── enums.py ├── timer.py ├── HUD.py ├── environment.yml ├── non_player_objects.py ├── drive_models.py ├── disk_writer.py ├── README.md └── controller.py /carla/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /carla/planner/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | __pycache__ 3 | output/ 4 | .pylintrc 5 | models/ -------------------------------------------------------------------------------- /carla/driving_benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | from .driving_benchmark import run_driving_benchmark 2 | -------------------------------------------------------------------------------- /carla/tcp.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/tcp.pyc -------------------------------------------------------------------------------- /carla/util.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/util.pyc -------------------------------------------------------------------------------- /carla/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .forward_agent import ForwardAgent 2 | from .agent import Agent 3 | -------------------------------------------------------------------------------- /carla/client.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/client.pyc -------------------------------------------------------------------------------- /carla/sensor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/sensor.pyc -------------------------------------------------------------------------------- /carla/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/__init__.pyc -------------------------------------------------------------------------------- /carla/settings.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/settings.pyc -------------------------------------------------------------------------------- /carla/transform.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/transform.pyc -------------------------------------------------------------------------------- /carla/planner/Town01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/planner/Town01.png -------------------------------------------------------------------------------- /carla/planner/Town02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/planner/Town02.png -------------------------------------------------------------------------------- /carla/carla_server_pb2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/carla_server_pb2.pyc -------------------------------------------------------------------------------- /carla/planner/Town02Big.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/planner/Town02Big.png -------------------------------------------------------------------------------- /carla/planner/Town01Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/planner/Town01Lanes.png -------------------------------------------------------------------------------- /carla/planner/Town02Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/planner/Town02Lanes.png -------------------------------------------------------------------------------- /carla/planner/Town01Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/planner/Town01Central.png -------------------------------------------------------------------------------- /carla/planner/Town02Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/carla/planner/Town02Central.png -------------------------------------------------------------------------------- /readme_imgs/driving_log_structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxJohnsen/carla-controller/HEAD/readme_imgs/driving_log_structure.png -------------------------------------------------------------------------------- /carla/driving_benchmark/experiment_suites/__init__.py: -------------------------------------------------------------------------------- 1 | from .basic_experiment_suite import BasicExperimentSuite 2 | from .corl_2017 import CoRL2017 3 | -------------------------------------------------------------------------------- /carla/agent/forward_agent.py: -------------------------------------------------------------------------------- 1 | 2 | from carla.agent.agent import Agent 3 | from carla.client import VehicleControl 4 | 5 | 6 | class ForwardAgent(Agent): 7 | """ 8 | Simple derivation of Agent Class, 9 | A trivial agent agent that goes straight 10 | """ 11 | def run_step(self, measurements, sensor_data, directions, target): 12 | control = VehicleControl() 13 | control.throttle = 0.9 14 | 15 | return control 16 | -------------------------------------------------------------------------------- /settings.ini: -------------------------------------------------------------------------------- 1 | [Carla] 2 | NumberOfVehicles = 25 3 | NumberOfPedestrians = 0 4 | WeatherId = 1 5 | RandomizeWeather = no 6 | QualityLevel = Epic 7 | StartingPositions = 17 8 | 9 | [Pygame] 10 | WindowWidth = 1024 11 | WindowHeight = 768 12 | OutputImageWidth = 300 13 | OutputImageHeight = 180 14 | 15 | [Controller] 16 | AutoStartRecording = no 17 | FrameLimit = 0 18 | EpisodeLimit = 0 19 | 20 | [AutoPilot] 21 | SteerNoise = 0 22 | ThrottleNoise = 0.4 23 | 24 | [DriveModel] 25 | ControlSteer = yes 26 | ControlThrottle = yes 27 | ControlBrake = yes -------------------------------------------------------------------------------- /enums.py: -------------------------------------------------------------------------------- 1 | """ 2 | TODO: Write docstring 3 | """ 4 | from enum import Enum 5 | 6 | 7 | class GameState(Enum): 8 | """ TODO: Write Docstring """ 9 | 10 | NOT_RECORDING = 0 11 | RECORDING = 1 12 | WRITING = 2 13 | 14 | 15 | class HighLevelCommand(Enum): 16 | """ TODO: Write Docstring """ 17 | 18 | FOLLOW_ROAD = 0 19 | TURN_LEFT = 1 20 | TURN_RIGHT = 2 21 | STRAIGHT_AHEAD = 3 22 | 23 | 24 | class TrafficLight(Enum): 25 | """ TODO: Write Docstring """ 26 | 27 | GREEN = 0 28 | YELLOW = 1 29 | RED = 2 30 | NONE = 3 31 | 32 | -------------------------------------------------------------------------------- /carla/planner/Town02.txt: -------------------------------------------------------------------------------- 1 | 5.4400,-107.48000,-0.22000000 2 | 0.000000,0.000000,0.000000 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 25, 25 6 | 0,10 0,24 14 7 | 0,24 0,10 14 8 | 24,24 6,24 18 9 | 6,24 24,24 18 10 | 24,0 24,10 10 11 | 24,10 24,0 10 12 | 0,0 24,0 24 13 | 24,0 0,0 24 14 | 0,10 0,0 10 15 | 0,0 0,10 10 16 | 24,10 24,16 6 17 | 24,16 24,10 6 18 | 0,10 6,10 6 19 | 6,10 0,10 6 20 | 6,24 0,24 6 21 | 0,24 6,24 6 22 | 6,10 17,10 11 23 | 17,10 6,10 11 24 | 6,24 6,16 8 25 | 6,16 6,24 8 26 | 24,16 24,24 8 27 | 24,24 24,16 8 28 | 6,16 6,10 6 29 | 6,10 6,16 6 30 | 24,16 17,16 7 31 | 17,16 24,16 7 32 | 17,16 6,16 11 33 | 6,16 17,16 11 34 | 17,10 24,10 7 35 | 24,10 17,10 7 36 | 17,16 17,10 6 37 | 17,10 17,16 6 38 | -------------------------------------------------------------------------------- /carla/agent/agent.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 3 | # Barcelona (UAB). 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | # @author: german,felipecode 8 | 9 | 10 | from __future__ import print_function 11 | import abc 12 | 13 | 14 | class Agent(object): 15 | def __init__(self): 16 | self.__metaclass__ = abc.ABCMeta 17 | 18 | @abc.abstractmethod 19 | def run_step(self, measurements, sensor_data, directions, target): 20 | """ 21 | Function to be redefined by an agent. 22 | :param The measurements like speed, the image data and a target 23 | :returns A carla Control object, with the steering/gas/brake for the agent 24 | """ 25 | -------------------------------------------------------------------------------- /timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | class Timer(object): 5 | def __init__(self): 6 | self._timestamp = time.time() 7 | self._episode_timestamp = time.time() 8 | 9 | self.episode_num = 0 10 | self.frame = 0 11 | self.timestamp_str = self._get_timestamp_str(self._timestamp) 12 | 13 | self.episode_frame = 0 14 | self.episode_timestamp_str = self._get_timestamp_str(self._episode_timestamp) 15 | 16 | def tick(self): 17 | self.frame += 1 18 | self.episode_frame += 1 19 | 20 | def new_episode(self): 21 | self.episode_frame = 0 22 | self._episode_timestamp = time.time() 23 | self.episode_timestamp_str = self._get_timestamp_str(self._episode_timestamp) 24 | self.episode_num += 1 25 | 26 | def _get_timestamp_str(self, timestamp): 27 | return time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(timestamp)) 28 | -------------------------------------------------------------------------------- /carla/planner/Town01.txt: -------------------------------------------------------------------------------- 1 | 0.0,0.0,-0.3811000000 2 | 0.000000,0.000000,0.0 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 49, 41 6 | 0,0 0,40 40 7 | 0,40 0,0 40 8 | 48,40 41,40 7 9 | 41,40 48,40 7 10 | 48,0 48,40 40 11 | 48,40 48,0 40 12 | 0,0 11,0 11 13 | 11,0 0,0 11 14 | 41,0 48,0 7 15 | 48,0 41,0 7 16 | 41,40 11,40 30 17 | 11,40 41,40 30 18 | 41,0 41,7 7 19 | 41,7 41,0 7 20 | 11,40 0,40 11 21 | 0,40 11,40 11 22 | 11,0 19,0 8 23 | 19,0 11,0 8 24 | 11,40 11,24 16 25 | 11,24 11,40 16 26 | 41,24 41,40 16 27 | 41,40 41,24 16 28 | 11,24 11,16 8 29 | 11,16 11,24 8 30 | 41,24 11,24 30 31 | 11,24 41,24 30 32 | 41,16 41,24 8 33 | 41,24 41,16 8 34 | 11,16 11,7 9 35 | 11,7 11,16 9 36 | 41,16 11,16 30 37 | 11,16 41,16 30 38 | 41,7 41,16 9 39 | 41,16 41,7 9 40 | 11,7 11,0 7 41 | 11,0 11,7 7 42 | 41,7 19,7 22 43 | 19,7 41,7 22 44 | 19,0 41,0 22 45 | 41,0 19,0 22 46 | 19,7 11,7 8 47 | 11,7 19,7 8 48 | 19,0 19,7 7 49 | 19,7 19,0 7 50 | -------------------------------------------------------------------------------- /HUD.py: -------------------------------------------------------------------------------- 1 | import pygame 2 | 3 | 4 | class InfoBox: 5 | def __init__(self, size, bg_color=(255, 255, 255, 100), text_color=(0, 0, 0)): 6 | pygame.font.init() 7 | self.size = size 8 | self._bg_color = bg_color 9 | self._text_color = text_color 10 | self._content = {} 11 | self._label_font = pygame.font.SysFont("Tahoma", 12, bold=False) 12 | self._value_font = pygame.font.SysFont("Tahoma", 12, bold=True) 13 | self._text_gap = 10 14 | 15 | def update_content(self, items): 16 | for label, value in items: 17 | self._content[label] = value 18 | 19 | def render_surface(self): 20 | rel_y = (self.size[1] - (16 * len(self._content))) / 2 21 | surface = pygame.Surface(self.size, pygame.SRCALPHA) 22 | surface.fill(self._bg_color) 23 | for label, value in self._content.items(): 24 | label = self._label_font.render(label, False, self._text_color) 25 | value = self._value_font.render(str(value), False, self._text_color) 26 | label_x = (self.size[0] / 2) - self._text_gap - label.get_width() 27 | value_x = (self.size[0] / 2) + self._text_gap 28 | surface.blit(label, (label_x, rel_y)) 29 | surface.blit(value, (value_x, rel_y)) 30 | rel_y += label.get_height() 31 | 32 | return surface 33 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: carla-controller 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - certifi=2018.10.15=py36_1000 7 | - pip=18.1=py36_1000 8 | - python=3.6.6=he025d50_0 9 | - setuptools=40.5.0=py36_0 10 | - vc=14=0 11 | - vs2015_runtime=14.0.25420=0 12 | - wheel=0.32.2=py36_0 13 | - wincertstore=0.2=py36_1002 14 | - pip: 15 | - absl-py==0.6.1 16 | - astor==0.7.1 17 | - astroid==2.0.4 18 | - bleach==1.5.0 19 | - colorama==0.4.0 20 | - cycler==0.10.0 21 | - future==0.17.1 22 | - gast==0.2.0 23 | - grpcio==1.16.0 24 | - h5py==2.8.0 25 | - html5lib==0.9999999 26 | - isort==4.3.4 27 | - keras-applications==1.0.6 28 | - keras-preprocessing==1.0.5 29 | - kiwisolver==1.0.1 30 | - lazy-object-proxy==1.3.1 31 | - markdown==3.0.1 32 | - matplotlib==3.0.1 33 | - mccabe==0.6.1 34 | - numexpr==2.6.8 35 | - numpy==1.14.5 36 | - pandas==0.23.4 37 | - pillow==5.3.0 38 | - protobuf==3.6.1 39 | - pygame==1.9.4 40 | - pyparsing==2.3.0 41 | - python-dateutil==2.7.5 42 | - pytz==2018.7 43 | - scipy==1.1.0 44 | - six==1.11.0 45 | - tables==3.4.4 46 | - tensorboard==1.12.0 47 | - tensorflow-gpu==1.12.0 48 | - tensorflow-tensorboard==1.5.1 49 | - termcolor==1.1.0 50 | - typed-ast==1.1.0 51 | - werkzeug==0.14.1 52 | - wrapt==1.10.11 53 | 54 | -------------------------------------------------------------------------------- /carla/driving_benchmark/experiment.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | from carla.settings import CarlaSettings 8 | 9 | 10 | class Experiment(object): 11 | """ 12 | Experiment defines a certain task, under conditions 13 | A task is associated with a set of poses, containing start and end pose. 14 | 15 | Conditions are associated with a carla Settings and describe the following: 16 | 17 | Number Of Vehicles 18 | Number Of Pedestrians 19 | Weather 20 | Random Seed of the agents, describing their behaviour. 21 | 22 | """ 23 | 24 | def __init__(self): 25 | self.Task = 0 26 | self.Conditions = CarlaSettings() 27 | self.Poses = [[]] 28 | self.Repetitions = 1 29 | 30 | def set(self, **kwargs): 31 | for key, value in kwargs.items(): 32 | if not hasattr(self, key): 33 | raise ValueError('Experiment: no key named %r' % key) 34 | setattr(self, key, value) 35 | 36 | if self.Repetitions != 1: 37 | raise NotImplementedError() 38 | 39 | @property 40 | def task(self): 41 | return self.Task 42 | 43 | @property 44 | def conditions(self): 45 | return self.Conditions 46 | 47 | @property 48 | def poses(self): 49 | return self.Poses 50 | 51 | @property 52 | def repetitions(self): 53 | return self.Repetitions 54 | -------------------------------------------------------------------------------- /non_player_objects.py: -------------------------------------------------------------------------------- 1 | """ 2 | TODO: Write docstring 3 | """ 4 | 5 | from scipy import spatial 6 | 7 | 8 | class NonPlayerObjects: 9 | def __init__(self, agent_type): 10 | self._agents = None 11 | self._KD_tree = None 12 | self._agent_type = agent_type 13 | 14 | self.valid = False 15 | 16 | def _is_valid_rot(self, rot1, rot2, rot_dif, thresh): 17 | yaw_diff = ((rot1 - rot2 + 180) % 360) - 180 + rot_dif 18 | return -thresh <= yaw_diff <= thresh 19 | 20 | def update_agents(self, all_agents): 21 | filtered = list( 22 | filter(lambda agent: hasattr(agent, self._agent_type), all_agents) 23 | ) 24 | self._agents = list(map(lambda a: getattr(a, self._agent_type), filtered)) 25 | 26 | def initialize_KD_tree(self): 27 | locations = [] 28 | for agent in self._agents: 29 | loc = agent.transform.location 30 | locations.append([loc.x, loc.y, loc.z]) 31 | 32 | if locations: 33 | self._KD_tree = spatial.KDTree(locations) 34 | self.valid = True 35 | 36 | def get_closest_with_rotation(self, player_trans, radius, rot_dif, thresh): 37 | if not self.valid: 38 | return None, None 39 | 40 | car_loc = [ 41 | player_trans.location.x, 42 | player_trans.location.y, 43 | player_trans.location.z, 44 | ] 45 | car_rot = player_trans.rotation.yaw 46 | 47 | distance, index = self._KD_tree.query(car_loc) 48 | 49 | if distance < radius: 50 | closest_agent = self._agents[index] 51 | agent_rot = closest_agent.transform.rotation.yaw 52 | 53 | if self._is_valid_rot(agent_rot, car_rot, rot_dif, thresh): 54 | return closest_agent, distance 55 | 56 | return None, None 57 | 58 | -------------------------------------------------------------------------------- /carla/util.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import datetime 8 | import sys 9 | 10 | from contextlib import contextmanager 11 | 12 | 13 | @contextmanager 14 | def make_connection(client_type, *args, **kwargs): 15 | """Context manager to create and connect a networking client object.""" 16 | client = None 17 | try: 18 | client = client_type(*args, **kwargs) 19 | client.connect() 20 | yield client 21 | finally: 22 | if client is not None: 23 | client.disconnect() 24 | 25 | 26 | class StopWatch(object): 27 | def __init__(self): 28 | self.start = datetime.datetime.now() 29 | self.end = None 30 | 31 | def restart(self): 32 | self.start = datetime.datetime.now() 33 | self.end = None 34 | 35 | def stop(self): 36 | self.end = datetime.datetime.now() 37 | 38 | def seconds(self): 39 | return (self.end - self.start).total_seconds() 40 | 41 | def milliseconds(self): 42 | return 1000.0 * self.seconds() 43 | 44 | 45 | def to_hex_str(header): 46 | return ':'.join('{:02x}'.format(ord(c)) for c in header) 47 | 48 | 49 | if sys.version_info >= (3, 3): 50 | 51 | import shutil 52 | 53 | def print_over_same_line(text): 54 | terminal_width = shutil.get_terminal_size((80, 20)).columns 55 | empty_space = max(0, terminal_width - len(text)) 56 | sys.stdout.write('\r' + text + empty_space * ' ') 57 | sys.stdout.flush() 58 | 59 | else: 60 | 61 | # Workaround for older Python versions. 62 | def print_over_same_line(text): 63 | line_length = max(print_over_same_line.last_line_length, len(text)) 64 | empty_space = max(0, line_length - len(text)) 65 | sys.stdout.write('\r' + text + empty_space * ' ') 66 | sys.stdout.flush() 67 | print_over_same_line.last_line_length = line_length 68 | print_over_same_line.last_line_length = 0 69 | -------------------------------------------------------------------------------- /carla/driving_benchmark/experiment_suites/basic_experiment_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | from __future__ import print_function 9 | 10 | from carla.driving_benchmark.experiment import Experiment 11 | from carla.sensor import Camera 12 | from carla.settings import CarlaSettings 13 | 14 | from .experiment_suite import ExperimentSuite 15 | 16 | 17 | class BasicExperimentSuite(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [1] 26 | 27 | def build_experiments(self): 28 | """ 29 | Creates the whole set of experiment objects, 30 | The experiments created depends on the selected Town. 31 | 32 | """ 33 | 34 | # We check the town, based on that we define the town related parameters 35 | # The size of the vector is related to the number of tasks, inside each 36 | # task there is also multiple poses ( start end, positions ) 37 | if self._city_name == 'Town01': 38 | poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]] 39 | vehicles_tasks = [0, 0, 0, 20] 40 | pedestrians_tasks = [0, 0, 0, 50] 41 | else: 42 | poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]] 43 | vehicles_tasks = [0, 0, 0, 15] 44 | pedestrians_tasks = [0, 0, 0, 50] 45 | 46 | # We set the camera 47 | # This single RGB camera is used on every experiment 48 | 49 | camera = Camera('CameraRGB') 50 | camera.set(FOV=100) 51 | camera.set_image_size(800, 600) 52 | camera.set_position(2.0, 0.0, 1.4) 53 | camera.set_rotation(-15.0, 0, 0) 54 | 55 | # Based on the parameters, creates a vector with experiment objects. 56 | experiments_vector = [] 57 | for weather in self.weathers: 58 | 59 | for iteration in range(len(poses_tasks)): 60 | poses = poses_tasks[iteration] 61 | vehicles = vehicles_tasks[iteration] 62 | pedestrians = pedestrians_tasks[iteration] 63 | 64 | conditions = CarlaSettings() 65 | conditions.set( 66 | SendNonPlayerAgentsInfo=True, 67 | NumberOfVehicles=vehicles, 68 | NumberOfPedestrians=pedestrians, 69 | WeatherId=weather 70 | 71 | ) 72 | # Add all the cameras that were set for this experiments 73 | conditions.add_sensor(camera) 74 | experiment = Experiment() 75 | experiment.set( 76 | Conditions=conditions, 77 | Poses=poses, 78 | Task=iteration, 79 | Repetitions=1 80 | ) 81 | experiments_vector.append(experiment) 82 | 83 | return experiments_vector 84 | -------------------------------------------------------------------------------- /carla/driving_benchmark/experiment_suites/experiment_suite.py: -------------------------------------------------------------------------------- 1 | # To be redefined on subclasses on how to calculate timeout for an episode 2 | import abc 3 | 4 | 5 | class ExperimentSuite(object): 6 | 7 | def __init__(self, city_name): 8 | 9 | self._city_name = city_name 10 | self._experiments = self.build_experiments() 11 | 12 | def calculate_time_out(self, path_distance): 13 | """ 14 | Function to return the timeout ,in milliseconds, 15 | that is calculated based on distance to goal. 16 | This is the same timeout as used on the CoRL paper. 17 | """ 18 | return ((path_distance / 1000.0) / 10.0) * 3600.0 + 10.0 19 | 20 | def get_number_of_poses_task(self): 21 | """ 22 | Get the number of poses a task have for this benchmark 23 | """ 24 | 25 | # Warning: assumes that all tasks have the same size 26 | 27 | return len(self._experiments[0].poses) 28 | 29 | def get_experiments(self): 30 | """ 31 | Getter for the experiment set. 32 | """ 33 | return self._experiments 34 | 35 | @property 36 | def dynamic_tasks(self): 37 | """ 38 | Returns the episodes that contain dynamic obstacles 39 | """ 40 | dynamic_tasks = set() 41 | for exp in self._experiments: 42 | if exp.conditions.NumberOfVehicles > 0 or exp.conditions.NumberOfPedestrians > 0: 43 | dynamic_tasks.add(exp.task) 44 | 45 | return list(dynamic_tasks) 46 | 47 | @property 48 | def metrics_parameters(self): 49 | """ 50 | Property to return the parameters for the metric module 51 | Could be redefined depending on the needs of the user. 52 | """ 53 | return { 54 | 55 | 'intersection_offroad': {'frames_skip': 10, 56 | 'frames_recount': 20, 57 | 'threshold': 0.3 58 | }, 59 | 'intersection_otherlane': {'frames_skip': 10, 60 | 'frames_recount': 20, 61 | 'threshold': 0.4 62 | }, 63 | 'collision_other': {'frames_skip': 10, 64 | 'frames_recount': 20, 65 | 'threshold': 400 66 | }, 67 | 'collision_vehicles': {'frames_skip': 10, 68 | 'frames_recount': 30, 69 | 'threshold': 400 70 | }, 71 | 'collision_pedestrians': {'frames_skip': 5, 72 | 'frames_recount': 100, 73 | 'threshold': 300 74 | }, 75 | 76 | } 77 | 78 | @property 79 | def weathers(self): 80 | weathers = set(self.train_weathers) 81 | weathers.update(self.test_weathers) 82 | return weathers 83 | 84 | @abc.abstractmethod 85 | def build_experiments(self): 86 | """ 87 | Returns a set of experiments to be evaluated 88 | Must be redefined in an inherited class. 89 | 90 | """ 91 | 92 | @abc.abstractproperty 93 | def train_weathers(self): 94 | """ 95 | Return the weathers that are considered as training conditions 96 | """ 97 | 98 | @abc.abstractproperty 99 | def test_weathers(self): 100 | """ 101 | Return the weathers that are considered as testing conditions 102 | """ 103 | -------------------------------------------------------------------------------- /drive_models.py: -------------------------------------------------------------------------------- 1 | """ 2 | TODO: Write docstring 3 | """ 4 | from abc import ABC, abstractmethod 5 | from tensorflow.keras.models import load_model 6 | import cv2 7 | import numpy as np 8 | 9 | 10 | class ModelInterface(ABC): 11 | """ 12 | TODO: Write docstring 13 | """ 14 | 15 | @abstractmethod 16 | def load_model(self, path): 17 | pass 18 | 19 | @abstractmethod 20 | def get_prediction(self, images, info): 21 | pass 22 | 23 | 24 | class CNNKeras(ModelInterface): 25 | """ 26 | TODO: Write docstring 27 | """ 28 | 29 | def __init__(self): 30 | self._model = None 31 | self._one_hot_hlc = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] 32 | 33 | def load_model(self, path): 34 | self._model = load_model(path) 35 | 36 | def get_prediction(self, images, info): 37 | if self._model is None: 38 | return False 39 | img_input = cv2.cvtColor(images["rgb_center"], cv2.COLOR_BGR2LAB) 40 | info_input = [ 41 | info["speed"] / 100, 42 | info["speed_limit"] / 100, 43 | 1 if info["traffic_light"] == 2 else 0, 44 | ] 45 | hlc_input = self._one_hot_hlc[int(info["hlc"])] 46 | prediction = self._model.predict( 47 | [np.array([img_input]), np.array([info_input]), np.array([hlc_input])] 48 | ) 49 | prediction = prediction[0] 50 | steer = prediction[0] 51 | throttle = prediction[1] 52 | brake = prediction[2] 53 | return (steer, throttle, brake) 54 | 55 | 56 | class LSTMKeras(ModelInterface): 57 | """ 58 | TODO: Write docstring 59 | """ 60 | 61 | def __init__(self, seq_length, seq_space, late_hlc=False): 62 | self._model = None 63 | self._img_history = [] 64 | self._info_history = [] 65 | self._hlc_history = [] 66 | self._late_hlc = late_hlc 67 | self._seq_length = seq_length 68 | self._seq_space = seq_space 69 | self._one_hot_hlc = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] 70 | 71 | def load_model(self, path): 72 | self._model = load_model(path) 73 | 74 | def get_prediction(self, images, info): 75 | if self._model is None: 76 | return False 77 | img_input = cv2.cvtColor(images["rgb_center"], cv2.COLOR_BGR2LAB) 78 | info_input = [ 79 | info["speed"] / 100, 80 | info["speed_limit"] / 100, 81 | 1 if info["traffic_light"] == 2 else 0, 82 | ] 83 | hlc_input = self._one_hot_hlc[int(info["hlc"])] 84 | 85 | self._img_history.append(np.array(img_input)) 86 | self._info_history.append(np.array(info_input)) 87 | self._hlc_history.append(np.array(hlc_input)) 88 | 89 | req = (self._seq_length - 1) * (self._seq_space + 1) 90 | if len(self._img_history) > req: 91 | start = len(self._img_history) - 1 - req 92 | imgs = np.array([self._img_history[start :: self._seq_space + 1]]) 93 | infos = np.array([self._info_history[start :: self._seq_space + 1]]) 94 | 95 | if self._late_hlc: 96 | hlcs = np.array([self._hlc_history[-1]]) 97 | else: 98 | hlcs = np.array([self._hlc_history[start :: self._seq_space + 1]]) 99 | 100 | prediction = self._model.predict( 101 | {"image_input": imgs, "info_input": infos, "hlc_input": hlcs} 102 | ) 103 | prediction = prediction[0] 104 | steer = prediction[0] 105 | throttle = prediction[1] 106 | brake = prediction[2] 107 | return (steer, throttle, brake) 108 | return (0, 0, 0) 109 | -------------------------------------------------------------------------------- /carla/tcp.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """Basic TCP client.""" 8 | 9 | import logging 10 | import socket 11 | import struct 12 | import time 13 | 14 | class TCPConnectionError(Exception): 15 | pass 16 | 17 | 18 | class TCPClient(object): 19 | """ 20 | Basic networking client for TCP connections. Errors occurred during 21 | networking operations are raised as TCPConnectionError. 22 | 23 | Received messages are expected to be prepended by a int32 defining the 24 | message size. Messages are sent following this convention. 25 | """ 26 | 27 | def __init__(self, host, port, timeout): 28 | self._host = host 29 | self._port = port 30 | self._timeout = timeout 31 | self._socket = None 32 | self._logprefix = '(%s:%s) ' % (self._host, self._port) 33 | 34 | def connect(self, connection_attempts=10): 35 | """Try to establish a connection to the given host:port.""" 36 | connection_attempts = max(1, connection_attempts) 37 | error = None 38 | for attempt in range(1, connection_attempts + 1): 39 | try: 40 | self._socket = socket.create_connection(address=(self._host, self._port), timeout=self._timeout) 41 | self._socket.settimeout(self._timeout) 42 | logging.debug('%sconnected', self._logprefix) 43 | return 44 | except socket.error as exception: 45 | error = exception 46 | logging.debug('%sconnection attempt %d: %s', self._logprefix, attempt, error) 47 | time.sleep(1) 48 | self._reraise_exception_as_tcp_error('failed to connect', error) 49 | 50 | def disconnect(self): 51 | """Disconnect any active connection.""" 52 | if self._socket is not None: 53 | logging.debug('%sdisconnecting', self._logprefix) 54 | self._socket.close() 55 | self._socket = None 56 | 57 | def connected(self): 58 | """Return whether there is an active connection.""" 59 | return self._socket is not None 60 | 61 | def write(self, message): 62 | """Send message to the server.""" 63 | if self._socket is None: 64 | raise TCPConnectionError(self._logprefix + 'not connected') 65 | header = struct.pack(' 0: 86 | try: 87 | data = self._socket.recv(length) 88 | except socket.error as exception: 89 | self._reraise_exception_as_tcp_error('failed to read data', exception) 90 | if not data: 91 | raise TCPConnectionError(self._logprefix + 'connection closed') 92 | buf += data 93 | length -= len(data) 94 | return buf 95 | 96 | def _reraise_exception_as_tcp_error(self, message, exception): 97 | raise TCPConnectionError('%s%s: %s' % (self._logprefix, message, exception)) 98 | -------------------------------------------------------------------------------- /disk_writer.py: -------------------------------------------------------------------------------- 1 | """ 2 | TODO: Write Docstring 3 | """ 4 | from threading import Thread 5 | import os 6 | import cv2 7 | 8 | 9 | class ImageWriter(Thread): 10 | """ TODO: Write Docstring """ 11 | 12 | def __init__(self, episode_path, images, driving_log, frames, on_complete=None): 13 | Thread.__init__(self) 14 | self.progress = 0.0 15 | self._images = images 16 | self._driving_log = driving_log 17 | self._frames = frames 18 | self._episode_path = episode_path 19 | self._on_complete = on_complete 20 | 21 | def run(self): 22 | image_path = self._episode_path / "imgs" 23 | image_path.mkdir(parents=True, exist_ok=True) 24 | 25 | for i in range(len(self._images)): 26 | for key, value in self._images[i].items(): 27 | image = value 28 | filename = f"{self._frames[i]}_{key}.png" 29 | path = image_path / filename 30 | cv2.imwrite(str(path), image) 31 | self.progress = (i + 1) / len(self._images) 32 | 33 | csv_path = f"{str(self._episode_path)}/driving_log.csv" 34 | if not os.path.isfile(csv_path): 35 | self._driving_log.to_csv(csv_path) 36 | else: 37 | self._driving_log.to_csv(csv_path, mode="a", header=False) 38 | 39 | if self._on_complete is not None: 40 | self._on_complete() 41 | 42 | 43 | class VideoWriter(Thread): 44 | """ TODO: Write Docstring """ 45 | 46 | def __init__(self, episode_path, images, info, fps=30, on_complete=None): 47 | Thread.__init__(self) 48 | self.progress = 0.0 49 | self._images = images 50 | self._episode_path = episode_path 51 | self._on_complete = on_complete 52 | self._fps = fps 53 | self._info = info 54 | 55 | def _draw_info(self, img, info): 56 | font = cv2.FONT_HERSHEY_SIMPLEX 57 | fontScale = 1 58 | fontColor = (255, 255, 255) 59 | lineType = 2 60 | speed, speed_limit, traffic_light, hlc = info 61 | cv2.putText(img, "Speed:", (132, 30), font, fontScale, fontColor, lineType) 62 | cv2.putText(img, "Speed Limit:", (41, 65), font, fontScale, fontColor, lineType) 63 | cv2.putText( 64 | img, "Traffic Light:", (37, 100), font, fontScale, fontColor, lineType 65 | ) 66 | cv2.putText( 67 | img, "Activated HLC:", (10, 135), font, fontScale, fontColor, lineType 68 | ) 69 | cv2.putText(img, speed, (270, 30), font, fontScale, fontColor, lineType) 70 | cv2.putText(img, speed_limit, (270, 65), font, fontScale, fontColor, lineType) 71 | cv2.putText( 72 | img, traffic_light, (270, 100), font, fontScale, fontColor, lineType 73 | ) 74 | cv2.putText(img, hlc, (270, 135), font, fontScale, fontColor, lineType) 75 | return img 76 | 77 | def run(self): 78 | video_path = self._episode_path / "videos" 79 | video_path.mkdir(parents=True, exist_ok=True) 80 | 81 | for i in range(len(self._images)): 82 | imgs = self._images[i] 83 | shape = imgs[0].shape[:2] 84 | video_size = (shape[1], shape[0]) 85 | cap = cv2.VideoCapture(0) 86 | fourcc = cv2.VideoWriter_fourcc(*"XVID") 87 | path = video_path / f"camera{i}.avi" 88 | out = cv2.VideoWriter(str(path), fourcc, self._fps, video_size) 89 | 90 | for f in range(len(imgs)): 91 | img = imgs[f] 92 | img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) 93 | img = self._draw_info(img, self._info[f]) 94 | out.write(img) 95 | self.progress = (len(imgs) * i + f + 1) / ( 96 | len(self._images) * len(imgs) 97 | ) 98 | cap.release() 99 | out.release() 100 | cv2.destroyAllWindows() 101 | 102 | if self._on_complete is not None: 103 | self._on_complete() 104 | -------------------------------------------------------------------------------- /carla/settings.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA Settings""" 8 | 9 | import io 10 | import random 11 | import sys 12 | 13 | 14 | if sys.version_info >= (3, 0): 15 | 16 | from configparser import ConfigParser 17 | 18 | else: 19 | 20 | from ConfigParser import RawConfigParser as ConfigParser 21 | 22 | 23 | from . import sensor as carla_sensor 24 | 25 | 26 | MAX_NUMBER_OF_WEATHER_IDS = 14 27 | 28 | 29 | class CarlaSettings(object): 30 | """ 31 | The CarlaSettings object controls the settings of an episode. The __str__ 32 | method retrieves an str with a CarlaSettings.ini file contents. 33 | """ 34 | 35 | def __init__(self, **kwargs): 36 | # [CARLA/Server] 37 | self.SynchronousMode = True 38 | self.SendNonPlayerAgentsInfo = False 39 | # [CARLA/QualitySettings] 40 | self.QualityLevel = 'Epic' 41 | # [CARLA/LevelSettings] 42 | self.PlayerVehicle = None 43 | self.NumberOfVehicles = 20 44 | self.NumberOfPedestrians = 30 45 | self.WeatherId = 1 46 | self.SeedVehicles = None 47 | self.SeedPedestrians = None 48 | self.set(**kwargs) 49 | self._sensors = [] 50 | 51 | def set(self, **kwargs): 52 | for key, value in kwargs.items(): 53 | if not hasattr(self, key): 54 | raise ValueError('CarlaSettings: no key named %r' % key) 55 | setattr(self, key, value) 56 | 57 | def randomize_seeds(self): 58 | """ 59 | Randomize the seeds of the new episode's pseudo-random number 60 | generators. 61 | """ 62 | self.SeedVehicles = random.getrandbits(16) 63 | self.SeedPedestrians = random.getrandbits(16) 64 | 65 | def randomize_weather(self): 66 | """Randomized the WeatherId.""" 67 | self.WeatherId = random.randint(0, MAX_NUMBER_OF_WEATHER_IDS) 68 | 69 | def add_sensor(self, sensor): 70 | """Add a sensor to the player vehicle (see sensor.py).""" 71 | if not isinstance(sensor, carla_sensor.Sensor): 72 | raise ValueError('Sensor not supported') 73 | self._sensors.append(sensor) 74 | 75 | def __str__(self): 76 | """Converts this object to an INI formatted string.""" 77 | ini = ConfigParser() 78 | ini.optionxform = str 79 | S_SERVER = 'CARLA/Server' 80 | S_QUALITY = 'CARLA/QualitySettings' 81 | S_LEVEL = 'CARLA/LevelSettings' 82 | S_SENSOR = 'CARLA/Sensor' 83 | 84 | def get_attribs(obj): 85 | return [a for a in dir(obj) if not a.startswith('_') and not callable(getattr(obj, a))] 86 | 87 | def add_section(section, obj, keys): 88 | for key in keys: 89 | if hasattr(obj, key) and getattr(obj, key) is not None: 90 | if not ini.has_section(section): 91 | ini.add_section(section) 92 | ini.set(section, key, str(getattr(obj, key))) 93 | 94 | add_section(S_SERVER, self, [ 95 | 'SynchronousMode', 96 | 'SendNonPlayerAgentsInfo']) 97 | add_section(S_QUALITY, self, [ 98 | 'QualityLevel']) 99 | add_section(S_LEVEL, self, [ 100 | 'NumberOfVehicles', 101 | 'NumberOfPedestrians', 102 | 'WeatherId', 103 | 'SeedVehicles', 104 | 'SeedPedestrians']) 105 | 106 | ini.add_section(S_SENSOR) 107 | ini.set(S_SENSOR, 'Sensors', ','.join(s.SensorName for s in self._sensors)) 108 | 109 | for sensor_def in self._sensors: 110 | section = S_SENSOR + '/' + sensor_def.SensorName 111 | add_section(section, sensor_def, get_attribs(sensor_def)) 112 | 113 | if sys.version_info >= (3, 0): 114 | text = io.StringIO() 115 | else: 116 | text = io.BytesIO() 117 | 118 | ini.write(text) 119 | return text.getvalue().replace(' = ', '=') 120 | -------------------------------------------------------------------------------- /carla/planner/grid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import copy 8 | import numpy as np 9 | 10 | 11 | def angle_between(v1, v2): 12 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 13 | 14 | 15 | class Grid(object): 16 | 17 | def __init__(self, graph): 18 | 19 | self._graph = graph 20 | self._structure = self._make_structure() 21 | self._walls = self._make_walls() 22 | 23 | def search_on_grid(self, x, y): 24 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 25 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 26 | c_x, c_y = x, y 27 | scale = 1 28 | while self._structure[c_x, c_y] != 0: 29 | for offset in visit: 30 | c_x, c_y = x + offset[0] * scale, y + offset[1] * scale 31 | 32 | if c_x >= 0 and c_x < self._graph.get_resolution()[ 33 | 0] and c_y >= 0 and c_y < self._graph.get_resolution()[1]: 34 | if self._structure[c_x, c_y] == 0: 35 | break 36 | else: 37 | c_x, c_y = x, y 38 | scale += 1 39 | 40 | return c_x, c_y 41 | def get_walls(self): 42 | return self._walls 43 | 44 | def get_wall_source(self, pos, pos_ori, target): 45 | 46 | free_nodes = self._get_adjacent_free_nodes(pos) 47 | # print self._walls 48 | final_walls = copy.copy(self._walls) 49 | # print final_walls 50 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 51 | for adj in free_nodes: 52 | 53 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 54 | angle = angle_between(heading_start, start_to_goal) 55 | if (angle > 1.6 and adj != target): 56 | final_walls.add((adj[0], adj[1])) 57 | 58 | return final_walls 59 | 60 | def get_wall_target(self, pos, pos_ori, source): 61 | 62 | free_nodes = self._get_adjacent_free_nodes(pos) 63 | final_walls = copy.copy(self._walls) 64 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 65 | for adj in free_nodes: 66 | 67 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 68 | angle = angle_between(heading_start, start_to_goal) 69 | 70 | if (angle < 1.0 and adj != source): 71 | final_walls.add((adj[0], adj[1])) 72 | 73 | return final_walls 74 | 75 | def _draw_line(self, grid, xi, yi, xf, yf): 76 | 77 | if xf < xi: 78 | aux = xi 79 | xi = xf 80 | xf = aux 81 | 82 | if yf < yi: 83 | aux = yi 84 | yi = yf 85 | yf = aux 86 | 87 | for i in range(xi, xf + 1): 88 | 89 | for j in range(yi, yf + 1): 90 | grid[i, j] = 0.0 91 | 92 | return grid 93 | 94 | def _make_structure(self): 95 | structure = np.ones( 96 | (self._graph.get_resolution()[0], 97 | self._graph.get_resolution()[1])) 98 | 99 | for key, connections in self._graph.get_edges().items(): 100 | 101 | # draw a line 102 | for con in connections: 103 | # print key[0],key[1],con[0],con[1] 104 | structure = self._draw_line( 105 | structure, key[0], key[1], con[0], con[1]) 106 | # print grid 107 | return structure 108 | 109 | def _make_walls(self): 110 | walls = set() 111 | 112 | for i in range(self._structure.shape[0]): 113 | 114 | for j in range(self._structure.shape[1]): 115 | if self._structure[i, j] == 1.0: 116 | walls.add((i, j)) 117 | 118 | return walls 119 | 120 | def _get_adjacent_free_nodes(self, pos): 121 | """ Eight nodes in total """ 122 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 123 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 124 | 125 | adjacent = set() 126 | for offset in visit: 127 | node = (pos[0] + offset[0], pos[1] + offset[1]) 128 | 129 | if (node[0] >= 0 and node[0] < self._graph.get_resolution()[0] 130 | and node[1] >= 0 and node[1] < self._graph.get_resolution()[1]): 131 | 132 | if self._structure[node[0], node[1]] == 0.0: 133 | adjacent.add(node) 134 | 135 | return adjacent 136 | -------------------------------------------------------------------------------- /carla/planner/graph.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | 11 | def string_to_node(string): 12 | vec = string.split(',') 13 | return (int(vec[0]), int(vec[1])) 14 | 15 | 16 | def string_to_floats(string): 17 | vec = string.split(',') 18 | return (float(vec[0]), float(vec[1]), float(vec[2])) 19 | 20 | 21 | def sldist(c1, c2): 22 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 23 | 24 | 25 | def sldist3(c1, c2): 26 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) 27 | ** 2 + (c2[2] - c1[2]) ** 2) 28 | 29 | 30 | class Graph(object): 31 | """ 32 | A simple directed, weighted graph 33 | """ 34 | 35 | def __init__(self, graph_file=None, node_density=50): 36 | 37 | self._nodes = set() 38 | self._angles = {} 39 | self._edges = {} 40 | self._distances = {} 41 | self._node_density = node_density 42 | 43 | if graph_file is not None: 44 | with open(graph_file, 'r') as f: 45 | # Skipe the first four lines that 46 | lines_after_4 = f.readlines()[4:] 47 | 48 | # the graph resolution. 49 | linegraphres = lines_after_4[0] 50 | self._resolution = string_to_node(linegraphres) 51 | for line in lines_after_4[1:]: 52 | 53 | from_node, to_node, d = line.split() 54 | from_node = string_to_node(from_node) 55 | to_node = string_to_node(to_node) 56 | 57 | if from_node not in self._nodes: 58 | self.add_node(from_node) 59 | if to_node not in self._nodes: 60 | self.add_node(to_node) 61 | 62 | self._edges.setdefault(from_node, []) 63 | self._edges[from_node].append(to_node) 64 | self._distances[(from_node, to_node)] = float(d) 65 | 66 | def add_node(self, value): 67 | self._nodes.add(value) 68 | 69 | def make_orientations(self, node, heading): 70 | 71 | import collections 72 | distance_dic = {} 73 | for node_iter in self._nodes: 74 | if node_iter != node: 75 | distance_dic[sldist(node, node_iter)] = node_iter 76 | 77 | distance_dic = collections.OrderedDict( 78 | sorted(distance_dic.items())) 79 | 80 | self._angles[node] = heading 81 | for _, v in distance_dic.items(): 82 | start_to_goal = np.array([node[0] - v[0], node[1] - v[1]]) 83 | 84 | print(start_to_goal) 85 | 86 | self._angles[v] = start_to_goal / np.linalg.norm(start_to_goal) 87 | 88 | def add_edge(self, from_node, to_node, distance): 89 | self._add_edge(from_node, to_node, distance) 90 | 91 | def _add_edge(self, from_node, to_node, distance): 92 | self._edges.setdefault(from_node, []) 93 | self._edges[from_node].append(to_node) 94 | self._distances[(from_node, to_node)] = distance 95 | 96 | def get_resolution(self): 97 | return self._resolution 98 | def get_edges(self): 99 | return self._edges 100 | 101 | def intersection_nodes(self): 102 | 103 | intersect_nodes = [] 104 | for node in self._nodes: 105 | if len(self._edges[node]) > 2: 106 | intersect_nodes.append(node) 107 | 108 | return intersect_nodes 109 | 110 | # This contains also the non-intersection turns... 111 | 112 | def turn_nodes(self): 113 | 114 | return self._nodes 115 | 116 | def plot_ori(self, c): 117 | from matplotlib import collections as mc 118 | 119 | import matplotlib.pyplot as plt 120 | line_len = 1 121 | 122 | lines = [[(p[0], p[1]), (p[0] + line_len * self._angles[p][0], 123 | p[1] + line_len * self._angles[p][1])] for p in self._nodes] 124 | lc = mc.LineCollection(lines, linewidth=2, color='green') 125 | _, ax = plt.subplots() 126 | ax.add_collection(lc) 127 | 128 | ax.autoscale() 129 | ax.margins(0.1) 130 | 131 | xs = [p[0] for p in self._nodes] 132 | ys = [p[1] for p in self._nodes] 133 | 134 | plt.scatter(xs, ys, color=c) 135 | 136 | def plot(self, c): 137 | import matplotlib.pyplot as plt 138 | xs = [p[0] for p in self._nodes] 139 | ys = [p[1] for p in self._nodes] 140 | 141 | plt.scatter(xs, ys, color=c) 142 | -------------------------------------------------------------------------------- /carla/planner/city_track.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | from carla.planner.graph import sldist 8 | 9 | from carla.planner.astar import AStar 10 | from carla.planner.map import CarlaMap 11 | 12 | 13 | class CityTrack(object): 14 | 15 | def __init__(self, city_name): 16 | 17 | # These values are fixed for every city. 18 | self._node_density = 50.0 19 | self._pixel_density = 0.1643 20 | 21 | self._map = CarlaMap(city_name, self._pixel_density, self._node_density) 22 | 23 | self._astar = AStar() 24 | 25 | # Refers to the start position of the previous route computation 26 | self._previous_node = [] 27 | 28 | # The current computed route 29 | self._route = None 30 | 31 | def project_node(self, position): 32 | """ 33 | Projecting the graph node into the city road 34 | """ 35 | 36 | node = self._map.convert_to_node(position) 37 | 38 | # To change the orientation with respect to the map standards 39 | 40 | node = tuple([int(x) for x in node]) 41 | 42 | # Set to zero if it is less than zero. 43 | 44 | node = (max(0, node[0]), max(0, node[1])) 45 | node = (min(self._map.get_graph_resolution()[0] - 1, node[0]), 46 | min(self._map.get_graph_resolution()[1] - 1, node[1])) 47 | 48 | node = self._map.search_on_grid(node) 49 | 50 | return node 51 | 52 | def get_intersection_nodes(self): 53 | return self._map.get_intersection_nodes() 54 | 55 | def get_pixel_density(self): 56 | return self._pixel_density 57 | 58 | def get_node_density(self): 59 | return self._node_density 60 | 61 | def is_at_goal(self, source, target): 62 | return source == target 63 | 64 | def is_at_new_node(self, current_node): 65 | return current_node != self._previous_node 66 | 67 | def is_away_from_intersection(self, current_node): 68 | return self._closest_intersection_position(current_node) > 1 69 | 70 | def is_far_away_from_route_intersection(self, current_node): 71 | # CHECK FOR THE EMPTY CASE 72 | if self._route is None: 73 | raise RuntimeError('Impossible to find route' 74 | + ' Current planner is limited' 75 | + ' Try to select start points away from intersections') 76 | 77 | return self._closest_intersection_route_position(current_node, 78 | self._route) > 4 79 | 80 | def compute_route(self, node_source, source_ori, node_target, target_ori): 81 | 82 | self._previous_node = node_source 83 | 84 | a_star = AStar() 85 | a_star.init_grid(self._map.get_graph_resolution()[0], 86 | self._map.get_graph_resolution()[1], 87 | self._map.get_walls_directed(node_source, source_ori, 88 | node_target, target_ori), node_source, 89 | node_target) 90 | 91 | route = a_star.solve() 92 | 93 | # JuSt a Corner Case 94 | # Clean this to avoid having to use this function 95 | if route is None: 96 | a_star = AStar() 97 | a_star.init_grid(self._map.get_graph_resolution()[0], 98 | self._map.get_graph_resolution()[1], self._map.get_walls(), 99 | node_source, node_target) 100 | 101 | route = a_star.solve() 102 | 103 | self._route = route 104 | 105 | return route 106 | 107 | def get_distance_closest_node_route(self, pos, route): 108 | distance = [] 109 | 110 | for node_iter in route: 111 | 112 | if node_iter in self._map.get_intersection_nodes(): 113 | distance.append(sldist(node_iter, pos)) 114 | 115 | if not distance: 116 | return sldist(route[-1], pos) 117 | return sorted(distance)[0] 118 | 119 | 120 | def _closest_intersection_position(self, current_node): 121 | 122 | distance_vector = [] 123 | for node_iterator in self._map.get_intersection_nodes(): 124 | distance_vector.append(sldist(node_iterator, current_node)) 125 | 126 | return sorted(distance_vector)[0] 127 | 128 | 129 | def _closest_intersection_route_position(self, current_node, route): 130 | 131 | distance_vector = [] 132 | for _ in route: 133 | for node_iterator in self._map.get_intersection_nodes(): 134 | distance_vector.append(sldist(node_iterator, current_node)) 135 | 136 | return sorted(distance_vector)[0] 137 | 138 | -------------------------------------------------------------------------------- /carla/driving_benchmark/results_printer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | 5 | 6 | def print_summary(metrics_summary, weathers, path): 7 | """ 8 | We plot the summary of the testing for the set selected weathers. 9 | 10 | We take the raw data and print the way it was described on CORL 2017 paper 11 | 12 | """ 13 | 14 | # Improve readability by adding a weather dictionary 15 | weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon', 16 | 6: 'Heavy Rain Noon', 8: 'Clear Sunset', 17 | 4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'} 18 | 19 | # First we write the entire dictionary on the benchmark folder. 20 | with open(os.path.join(path, 'metrics.json'), 'w') as fo: 21 | fo.write(json.dumps(metrics_summary)) 22 | 23 | # Second we plot the metrics that are already ready by averaging 24 | 25 | metrics_to_average = [ 26 | 'episodes_fully_completed', 27 | 'episodes_completion' 28 | 29 | ] 30 | # We compute the number of episodes based on size of average completion 31 | number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1]) 32 | 33 | for metric in metrics_to_average: 34 | 35 | if metric == 'episodes_completion': 36 | print ("Average Percentage of Distance to Goal Travelled ") 37 | else: 38 | print ("Percentage of Successful Episodes") 39 | 40 | print ("") 41 | values = metrics_summary[metric] 42 | 43 | metric_sum_values = np.zeros(number_of_episodes) 44 | for weather, tasks in values.items(): 45 | if weather in set(weathers): 46 | print(' Weather: ', weather_name_dict[weather]) 47 | count = 0 48 | for t in tasks: 49 | # if isinstance(t, np.ndarray) or isinstance(t, list): 50 | if t == []: 51 | print(' Metric Not Computed') 52 | else: 53 | print(' Task:', count, ' -> ', float(sum(t)) / float(len(t))) 54 | metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float( 55 | len(weathers)) 56 | 57 | count += 1 58 | 59 | print (' Average Between Weathers') 60 | for i in range(len(metric_sum_values)): 61 | print(' Task ', i, ' -> ', metric_sum_values[i]) 62 | print ("") 63 | 64 | infraction_metrics = [ 65 | 'collision_pedestrians', 66 | 'collision_vehicles', 67 | 'collision_other', 68 | 'intersection_offroad', 69 | 'intersection_otherlane' 70 | 71 | ] 72 | 73 | # We need to collect the total number of kilometers for each task 74 | 75 | for metric in infraction_metrics: 76 | values_driven = metrics_summary['driven_kilometers'] 77 | values = metrics_summary[metric] 78 | metric_sum_values = np.zeros(number_of_episodes) 79 | summed_driven_kilometers = np.zeros(number_of_episodes) 80 | 81 | if metric == 'collision_pedestrians': 82 | print ('Avg. Kilometers driven before a collision to a PEDESTRIAN') 83 | elif metric == 'collision_vehicles': 84 | print('Avg. Kilometers driven before a collision to a VEHICLE') 85 | elif metric == 'collision_other': 86 | print('Avg. Kilometers driven before a collision to a STATIC OBSTACLE') 87 | elif metric == 'intersection_offroad': 88 | print('Avg. Kilometers driven before going OUTSIDE OF THE ROAD') 89 | else: 90 | print('Avg. Kilometers driven before invading the OPPOSITE LANE') 91 | 92 | # print (zip(values.items(), values_driven.items())) 93 | for items_metric, items_driven in zip(values.items(), values_driven.items()): 94 | weather = items_metric[0] 95 | tasks = items_metric[1] 96 | tasks_driven = items_driven[1] 97 | 98 | if weather in set(weathers): 99 | print(' Weather: ', weather_name_dict[weather]) 100 | count = 0 101 | for t, t_driven in zip(tasks, tasks_driven): 102 | # if isinstance(t, np.ndarray) or isinstance(t, list): 103 | if t == []: 104 | print('Metric Not Computed') 105 | else: 106 | if sum(t) > 0: 107 | print(' Task ', count, ' -> ', t_driven / float(sum(t))) 108 | else: 109 | print(' Task ', count, ' -> more than', t_driven) 110 | 111 | metric_sum_values[count] += float(sum(t)) 112 | summed_driven_kilometers[count] += t_driven 113 | 114 | count += 1 115 | print (' Average Between Weathers') 116 | for i in range(len(metric_sum_values)): 117 | if metric_sum_values[i] == 0: 118 | print(' Task ', i, ' -> more than ', summed_driven_kilometers[i]) 119 | else: 120 | print(' Task ', i, ' -> ', summed_driven_kilometers[i] / metric_sum_values[i]) 121 | print ("") 122 | 123 | print("") 124 | print("") 125 | -------------------------------------------------------------------------------- /carla/transform.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB), and the INTEL Visual Computing Lab. 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | 9 | from collections import namedtuple 10 | 11 | try: 12 | import numpy 13 | except ImportError: 14 | raise RuntimeError( 15 | 'cannot import numpy, make sure numpy package is installed.') 16 | 17 | try: 18 | from . import carla_server_pb2 as carla_protocol 19 | except ImportError: 20 | raise RuntimeError('cannot import "carla_server_pb2.py", run ' 21 | 'the protobuf compiler to generate this file') 22 | 23 | 24 | Translation = namedtuple('Translation', 'x y z') 25 | Translation.__new__.__defaults__ = (0.0, 0.0, 0.0) 26 | 27 | Rotation = namedtuple('Rotation', 'pitch yaw roll') 28 | Rotation.__new__.__defaults__ = (0.0, 0.0, 0.0) 29 | 30 | Scale = namedtuple('Scale', 'x y z') 31 | Scale.__new__.__defaults__ = (1.0, 1.0, 1.0) 32 | 33 | 34 | class Transform(object): 35 | """A 3D transformation. 36 | 37 | The transformation is applied in the order: scale, rotation, translation. 38 | """ 39 | 40 | def __init__(self, *args, **kwargs): 41 | if 'matrix' in kwargs: 42 | self.matrix = kwargs['matrix'] 43 | return 44 | if isinstance(args[0], carla_protocol.Transform): 45 | args = [ 46 | Translation( 47 | args[0].location.x, 48 | args[0].location.y, 49 | args[0].location.z), 50 | Rotation( 51 | args[0].rotation.pitch, 52 | args[0].rotation.yaw, 53 | args[0].rotation.roll) 54 | ] 55 | self.matrix = numpy.matrix(numpy.identity(4)) 56 | self.set(*args, **kwargs) 57 | 58 | def set(self, *args): 59 | """Builds the transform matrix given a Translate, Rotation 60 | and Scale. 61 | """ 62 | translation = Translation() 63 | rotation = Rotation() 64 | scale = Scale() 65 | 66 | if len(args) > 3: 67 | raise ValueError("'Transform' accepts 3 values as maximum.") 68 | 69 | def get_single_obj_type(obj_type): 70 | """Returns the unique object contained in the 71 | arguments lists that is instance of 'obj_type'. 72 | """ 73 | obj = [x for x in args if isinstance(x, obj_type)] 74 | if len(obj) > 1: 75 | raise ValueError("Transform only accepts one instances of " + 76 | str(obj_type) + " as a parameter") 77 | elif not obj: 78 | # Create an instance of the type that is 'obj_type' 79 | return obj_type() 80 | return obj[0] 81 | 82 | translation = get_single_obj_type(Translation) 83 | rotation = get_single_obj_type(Rotation) 84 | scale = get_single_obj_type(Scale) 85 | 86 | for param in args: 87 | if not isinstance(param, Translation) and \ 88 | not isinstance(param, Rotation) and \ 89 | not isinstance(param, Scale): 90 | raise TypeError( 91 | "'" + str(type(param)) + "' type not match with \ 92 | 'Translation', 'Rotation' or 'Scale'") 93 | 94 | # Transformation matrix 95 | cy = math.cos(numpy.radians(rotation.yaw)) 96 | sy = math.sin(numpy.radians(rotation.yaw)) 97 | cr = math.cos(numpy.radians(rotation.roll)) 98 | sr = math.sin(numpy.radians(rotation.roll)) 99 | cp = math.cos(numpy.radians(rotation.pitch)) 100 | sp = math.sin(numpy.radians(rotation.pitch)) 101 | self.matrix[0, 3] = translation.x 102 | self.matrix[1, 3] = translation.y 103 | self.matrix[2, 3] = translation.z 104 | self.matrix[0, 0] = scale.x * (cp * cy) 105 | self.matrix[0, 1] = scale.y * (cy * sp * sr - sy * cr) 106 | self.matrix[0, 2] = -scale.z * (cy * sp * cr + sy * sr) 107 | self.matrix[1, 0] = scale.x * (sy * cp) 108 | self.matrix[1, 1] = scale.y * (sy * sp * sr + cy * cr) 109 | self.matrix[1, 2] = scale.z * (cy * sr - sy * sp * cr) 110 | self.matrix[2, 0] = scale.x * (sp) 111 | self.matrix[2, 1] = -scale.y * (cp * sr) 112 | self.matrix[2, 2] = scale.z * (cp * cr) 113 | 114 | def inverse(self): 115 | """Return the inverse transform.""" 116 | return Transform(matrix=numpy.linalg.inv(self.matrix)) 117 | 118 | def transform_points(self, points): 119 | """ 120 | Given a 4x4 transformation matrix, transform an array of 3D points. 121 | Expected point foramt: [[X0,Y0,Z0],..[Xn,Yn,Zn]] 122 | """ 123 | # Needed foramt: [[X0,..Xn],[Z0,..Zn],[Z0,..Zn]]. So let's transpose 124 | # the point matrix. 125 | points = points.transpose() 126 | # Add 0s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[0,..0]] 127 | points = numpy.append(points, numpy.ones((1, points.shape[1])), axis=0) 128 | # Point transformation 129 | points = self.matrix * points 130 | # Return all but last row 131 | return points[0:3].transpose() 132 | 133 | def __mul__(self, other): 134 | return Transform(matrix=numpy.dot(self.matrix, other.matrix)) 135 | 136 | def __str__(self): 137 | return str(self.matrix) 138 | -------------------------------------------------------------------------------- /carla/planner/astar.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import heapq 8 | 9 | 10 | class Cell(object): 11 | def __init__(self, x, y, reachable): 12 | """Initialize new cell. 13 | 14 | @param reachable is cell reachable? not a wall? 15 | @param x cell x coordinate 16 | @param y cell y coordinate 17 | @param g cost to move from the starting cell to this cell. 18 | @param h estimation of the cost to move from this cell 19 | to the ending cell. 20 | @param f f = g + h 21 | """ 22 | self.reachable = reachable 23 | self.x = x 24 | self.y = y 25 | self.parent = None 26 | self.g = 0 27 | self.h = 0 28 | self.f = 0 29 | 30 | def __lt__(self, other): 31 | return self.g < other.g 32 | 33 | 34 | class AStar(object): 35 | def __init__(self): 36 | # open list 37 | self.opened = [] 38 | heapq.heapify(self.opened) 39 | # visited cells list 40 | self.closed = set() 41 | # grid cells 42 | self.cells = [] 43 | self.grid_height = None 44 | self.grid_width = None 45 | self.start = None 46 | self.end = None 47 | 48 | def init_grid(self, width, height, walls, start, end): 49 | """Prepare grid cells, walls. 50 | 51 | @param width grid's width. 52 | @param height grid's height. 53 | @param walls list of wall x,y tuples. 54 | @param start grid starting point x,y tuple. 55 | @param end grid ending point x,y tuple. 56 | """ 57 | self.grid_height = height 58 | self.grid_width = width 59 | for x in range(self.grid_width): 60 | for y in range(self.grid_height): 61 | if (x, y) in walls: 62 | reachable = False 63 | else: 64 | reachable = True 65 | self.cells.append(Cell(x, y, reachable)) 66 | self.start = self.get_cell(*start) 67 | self.end = self.get_cell(*end) 68 | 69 | def get_heuristic(self, cell): 70 | """Compute the heuristic value H for a cell. 71 | 72 | Distance between this cell and the ending cell multiply by 10. 73 | 74 | @returns heuristic value H 75 | """ 76 | return 10 * (abs(cell.x - self.end.x) + abs(cell.y - self.end.y)) 77 | 78 | def get_cell(self, x, y): 79 | """Returns a cell from the cells list. 80 | 81 | @param x cell x coordinate 82 | @param y cell y coordinate 83 | @returns cell 84 | """ 85 | return self.cells[x * self.grid_height + y] 86 | 87 | def get_adjacent_cells(self, cell): 88 | """Returns adjacent cells to a cell. 89 | 90 | Clockwise starting from the one on the right. 91 | 92 | @param cell get adjacent cells for this cell 93 | @returns adjacent cells list. 94 | """ 95 | cells = [] 96 | if cell.x < self.grid_width - 1: 97 | cells.append(self.get_cell(cell.x + 1, cell.y)) 98 | if cell.y > 0: 99 | cells.append(self.get_cell(cell.x, cell.y - 1)) 100 | if cell.x > 0: 101 | cells.append(self.get_cell(cell.x - 1, cell.y)) 102 | if cell.y < self.grid_height - 1: 103 | cells.append(self.get_cell(cell.x, cell.y + 1)) 104 | return cells 105 | 106 | def get_path(self): 107 | cell = self.end 108 | path = [(cell.x, cell.y)] 109 | while cell.parent is not self.start: 110 | cell = cell.parent 111 | path.append((cell.x, cell.y)) 112 | 113 | path.append((self.start.x, self.start.y)) 114 | path.reverse() 115 | return path 116 | 117 | def update_cell(self, adj, cell): 118 | """Update adjacent cell. 119 | 120 | @param adj adjacent cell to current cell 121 | @param cell current cell being processed 122 | """ 123 | adj.g = cell.g + 10 124 | adj.h = self.get_heuristic(adj) 125 | adj.parent = cell 126 | adj.f = adj.h + adj.g 127 | 128 | def solve(self): 129 | """Solve maze, find path to ending cell. 130 | 131 | @returns path or None if not found. 132 | """ 133 | # add starting cell to open heap queue 134 | heapq.heappush(self.opened, (self.start.f, self.start)) 135 | while len(self.opened): 136 | # pop cell from heap queue 137 | _, cell = heapq.heappop(self.opened) 138 | # add cell to closed list so we don't process it twice 139 | self.closed.add(cell) 140 | # if ending cell, return found path 141 | if cell is self.end: 142 | return self.get_path() 143 | # get adjacent cells for cell 144 | adj_cells = self.get_adjacent_cells(cell) 145 | for adj_cell in adj_cells: 146 | if adj_cell.reachable and adj_cell not in self.closed: 147 | if (adj_cell.f, adj_cell) in self.opened: 148 | # if adj cell in open list, check if current path is 149 | # better than the one previously found 150 | # for this adj cell. 151 | if adj_cell.g > cell.g + 10: 152 | self.update_cell(adj_cell, cell) 153 | else: 154 | self.update_cell(adj_cell, cell) 155 | # add adj cell to open list 156 | heapq.heappush(self.opened, (adj_cell.f, adj_cell)) 157 | -------------------------------------------------------------------------------- /carla/planner/map.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """Class used for operating the city map.""" 8 | 9 | import math 10 | import os 11 | 12 | try: 13 | import numpy as np 14 | except ImportError: 15 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 16 | 17 | try: 18 | from PIL import Image 19 | except ImportError: 20 | raise RuntimeError('cannot import PIL, make sure pillow package is installed') 21 | 22 | from carla.planner.graph import Graph 23 | from carla.planner.graph import sldist 24 | from carla.planner.grid import Grid 25 | from carla.planner.converter import Converter 26 | 27 | 28 | def color_to_angle(color): 29 | return (float(color) / 255.0) * 2 * math.pi 30 | 31 | 32 | class CarlaMap(object): 33 | 34 | def __init__(self, city, pixel_density, node_density): 35 | dir_path = os.path.dirname(__file__) 36 | city_file = os.path.join(dir_path, city + '.txt') 37 | 38 | city_map_file = os.path.join(dir_path, city + '.png') 39 | city_map_file_lanes = os.path.join(dir_path, city + 'Lanes.png') 40 | city_map_file_center = os.path.join(dir_path, city + 'Central.png') 41 | 42 | # The built graph. This is the exact same graph that unreal builds. This 43 | # is a generic structure used for many cases 44 | self._graph = Graph(city_file, node_density) 45 | 46 | self._pixel_density = pixel_density 47 | self._grid = Grid(self._graph) 48 | # The number of game units per pixel. For now this is fixed. 49 | 50 | self._converter = Converter(city_file, pixel_density, node_density) 51 | 52 | # Load the lanes image 53 | self.map_image_lanes = Image.open(city_map_file_lanes) 54 | self.map_image_lanes.load() 55 | self.map_image_lanes = np.asarray(self.map_image_lanes, dtype="int32") 56 | # Load the image 57 | self.map_image = Image.open(city_map_file) 58 | self.map_image.load() 59 | self.map_image = np.asarray(self.map_image, dtype="int32") 60 | 61 | # Load the lanes image 62 | self.map_image_center = Image.open(city_map_file_center) 63 | self.map_image_center.load() 64 | self.map_image_center = np.asarray(self.map_image_center, dtype="int32") 65 | 66 | def get_graph_resolution(self): 67 | 68 | return self._graph.get_resolution() 69 | 70 | def get_map(self, height=None): 71 | if height is not None: 72 | img = Image.fromarray(self.map_image.astype(np.uint8)) 73 | 74 | aspect_ratio = height / float(self.map_image.shape[0]) 75 | 76 | img = img.resize((int(aspect_ratio * self.map_image.shape[1]), height), Image.ANTIALIAS) 77 | img.load() 78 | return np.asarray(img, dtype="int32") 79 | return np.fliplr(self.map_image) 80 | 81 | def get_map_lanes(self, size=None): 82 | if size is not None: 83 | img = Image.fromarray(self.map_image_lanes.astype(np.uint8)) 84 | img = img.resize((size[1], size[0]), Image.ANTIALIAS) 85 | img.load() 86 | return np.fliplr(np.asarray(img, dtype="int32")) 87 | return np.fliplr(self.map_image_lanes) 88 | 89 | def get_lane_orientation(self, world): 90 | """Get the lane orientation of a certain world position.""" 91 | pixel = self.convert_to_pixel(world) 92 | 93 | ori = self.map_image_lanes[int(pixel[1]), int(pixel[0]), 2] 94 | ori = color_to_angle(ori) 95 | 96 | return (-math.cos(ori), -math.sin(ori)) 97 | 98 | def convert_to_node(self, input_data): 99 | """ 100 | Receives a data type (Can Be Pixel or World ) 101 | :param input_data: position in some coordinate 102 | :return: A node object 103 | """ 104 | return self._converter.convert_to_node(input_data) 105 | 106 | def convert_to_pixel(self, input_data): 107 | """ 108 | Receives a data type (Can Be Node or World ) 109 | :param input_data: position in some coordinate 110 | :return: A node object 111 | """ 112 | return self._converter.convert_to_pixel(input_data) 113 | 114 | def convert_to_world(self, input_data): 115 | """ 116 | Receives a data type (Can Be Pixel or Node ) 117 | :param input_data: position in some coordinate 118 | :return: A node object 119 | """ 120 | return self._converter.convert_to_world(input_data) 121 | 122 | def get_walls_directed(self, node_source, source_ori, node_target, target_ori): 123 | """ 124 | This is the most hacky function. Instead of planning on two ways, 125 | we basically use a one way road and interrupt the other road by adding 126 | an artificial wall. 127 | 128 | """ 129 | 130 | final_walls = self._grid.get_wall_source(node_source, source_ori, node_target) 131 | 132 | final_walls = final_walls.union(self._grid.get_wall_target( 133 | node_target, target_ori, node_source)) 134 | return final_walls 135 | 136 | def get_walls(self): 137 | 138 | return self._grid.get_walls() 139 | 140 | def get_distance_closest_node(self, pos): 141 | 142 | distance = [] 143 | for node_iter in self._graph.intersection_nodes(): 144 | distance.append(sldist(node_iter, pos)) 145 | 146 | return sorted(distance)[0] 147 | 148 | def get_intersection_nodes(self): 149 | return self._graph.intersection_nodes() 150 | 151 | def search_on_grid(self,node): 152 | return self._grid.search_on_grid(node[0], node[1]) 153 | -------------------------------------------------------------------------------- /carla/driving_benchmark/experiment_suites/corl_2017.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | from carla.driving_benchmark.experiment import Experiment 12 | from carla.sensor import Camera 13 | from carla.settings import CarlaSettings 14 | from carla.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite 15 | 16 | 17 | class CoRL2017(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1, 3, 6, 8] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [4, 14] 26 | 27 | def _poses_town01(self): 28 | """ 29 | Each matrix is a new task. We have all the four tasks 30 | 31 | """ 32 | 33 | def _poses_straight(): 34 | return [[36, 40], [39, 35], [110, 114], [7, 3], [0, 4], 35 | [68, 50], [61, 59], [47, 64], [147, 90], [33, 87], 36 | [26, 19], [80, 76], [45, 49], [55, 44], [29, 107], 37 | [95, 104], [84, 34], [53, 67], [22, 17], [91, 148], 38 | [20, 107], [78, 70], [95, 102], [68, 44], [45, 69]] 39 | 40 | def _poses_one_curve(): 41 | return [[138, 17], [47, 16], [26, 9], [42, 49], [140, 124], 42 | [85, 98], [65, 133], [137, 51], [76, 66], [46, 39], 43 | [40, 60], [0, 29], [4, 129], [121, 140], [2, 129], 44 | [78, 44], [68, 85], [41, 102], [95, 70], [68, 129], 45 | [84, 69], [47, 79], [110, 15], [130, 17], [0, 17]] 46 | 47 | def _poses_navigation(): 48 | return [[105, 29], [27, 130], [102, 87], [132, 27], [24, 44], 49 | [96, 26], [34, 67], [28, 1], [140, 134], [105, 9], 50 | [148, 129], [65, 18], [21, 16], [147, 97], [42, 51], 51 | [30, 41], [18, 107], [69, 45], [102, 95], [18, 145], 52 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]] 53 | 54 | return [_poses_straight(), 55 | _poses_one_curve(), 56 | _poses_navigation(), 57 | _poses_navigation()] 58 | 59 | def _poses_town02(self): 60 | 61 | def _poses_straight(): 62 | return [[38, 34], [4, 2], [12, 10], [62, 55], [43, 47], 63 | [64, 66], [78, 76], [59, 57], [61, 18], [35, 39], 64 | [12, 8], [0, 18], [75, 68], [54, 60], [45, 49], 65 | [46, 42], [53, 46], [80, 29], [65, 63], [0, 81], 66 | [54, 63], [51, 42], [16, 19], [17, 26], [77, 68]] 67 | 68 | def _poses_one_curve(): 69 | return [[37, 76], [8, 24], [60, 69], [38, 10], [21, 1], 70 | [58, 71], [74, 32], [44, 0], [71, 16], [14, 24], 71 | [34, 11], [43, 14], [75, 16], [80, 21], [3, 23], 72 | [75, 59], [50, 47], [11, 19], [77, 34], [79, 25], 73 | [40, 63], [58, 76], [79, 55], [16, 61], [27, 11]] 74 | 75 | def _poses_navigation(): 76 | return [[19, 66], [79, 14], [19, 57], [23, 1], 77 | [53, 76], [42, 13], [31, 71], [33, 5], 78 | [54, 30], [10, 61], [66, 3], [27, 12], 79 | [79, 19], [2, 29], [16, 14], [5, 57], 80 | [70, 73], [46, 67], [57, 50], [61, 49], [21, 12], 81 | [51, 81], [77, 68], [56, 65], [43, 54]] 82 | 83 | return [_poses_straight(), 84 | _poses_one_curve(), 85 | _poses_navigation(), 86 | _poses_navigation() 87 | ] 88 | 89 | def build_experiments(self): 90 | """ 91 | Creates the whole set of experiment objects, 92 | The experiments created depend on the selected Town. 93 | 94 | 95 | """ 96 | 97 | # We set the camera 98 | # This single RGB camera is used on every experiment 99 | 100 | camera = Camera('CameraRGB') 101 | camera.set(FOV=100) 102 | camera.set_image_size(800, 600) 103 | camera.set_position(2.0, 0.0, 1.4) 104 | camera.set_rotation(-15.0, 0, 0) 105 | 106 | if self._city_name == 'Town01': 107 | poses_tasks = self._poses_town01() 108 | vehicles_tasks = [0, 0, 0, 20] 109 | pedestrians_tasks = [0, 0, 0, 50] 110 | else: 111 | poses_tasks = self._poses_town02() 112 | vehicles_tasks = [0, 0, 0, 15] 113 | pedestrians_tasks = [0, 0, 0, 50] 114 | 115 | experiments_vector = [] 116 | 117 | for weather in self.weathers: 118 | 119 | for iteration in range(len(poses_tasks)): 120 | poses = poses_tasks[iteration] 121 | vehicles = vehicles_tasks[iteration] 122 | pedestrians = pedestrians_tasks[iteration] 123 | 124 | conditions = CarlaSettings() 125 | conditions.set( 126 | SendNonPlayerAgentsInfo=True, 127 | NumberOfVehicles=vehicles, 128 | NumberOfPedestrians=pedestrians, 129 | WeatherId=weather 130 | ) 131 | # Add all the cameras that were set for this experiments 132 | 133 | conditions.add_sensor(camera) 134 | 135 | experiment = Experiment() 136 | experiment.set( 137 | Conditions=conditions, 138 | Poses=poses, 139 | Task=iteration, 140 | Repetitions=1 141 | ) 142 | experiments_vector.append(experiment) 143 | 144 | return experiments_vector 145 | -------------------------------------------------------------------------------- /carla/planner/planner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import collections 8 | import math 9 | 10 | import numpy as np 11 | 12 | from . import city_track 13 | 14 | 15 | def compare(x, y): 16 | return collections.Counter(x) == collections.Counter(y) 17 | 18 | 19 | 20 | # Constants Used for the high level commands 21 | 22 | 23 | REACH_GOAL = 0.0 24 | GO_STRAIGHT = 5.0 25 | TURN_RIGHT = 4.0 26 | TURN_LEFT = 3.0 27 | LANE_FOLLOW = 2.0 28 | 29 | 30 | # Auxiliary algebra function 31 | def angle_between(v1, v2): 32 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 33 | 34 | 35 | def sldist(c1, c2): return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 36 | 37 | 38 | def signal(v1, v2): 39 | return np.cross(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2) 40 | 41 | 42 | class Planner(object): 43 | 44 | def __init__(self, city_name): 45 | 46 | self._city_track = city_track.CityTrack(city_name) 47 | 48 | self._commands = [] 49 | 50 | def get_next_command(self, source, source_ori, target, target_ori): 51 | """ 52 | Computes the full plan and returns the next command, 53 | Args 54 | source: source position 55 | source_ori: source orientation 56 | target: target position 57 | target_ori: target orientation 58 | Returns 59 | a command ( Straight,Lane Follow, Left or Right) 60 | """ 61 | 62 | track_source = self._city_track.project_node(source) 63 | track_target = self._city_track.project_node(target) 64 | 65 | # reach the goal 66 | 67 | if self._city_track.is_at_goal(track_source, track_target): 68 | return REACH_GOAL 69 | 70 | if (self._city_track.is_at_new_node(track_source) 71 | and self._city_track.is_away_from_intersection(track_source)): 72 | 73 | route = self._city_track.compute_route(track_source, source_ori, 74 | track_target, target_ori) 75 | if route is None: 76 | raise RuntimeError('Impossible to find route') 77 | 78 | self._commands = self._route_to_commands(route) 79 | 80 | if self._city_track.is_far_away_from_route_intersection( 81 | track_source): 82 | return LANE_FOLLOW 83 | else: 84 | if self._commands: 85 | return self._commands[0] 86 | else: 87 | return LANE_FOLLOW 88 | else: 89 | 90 | if self._city_track.is_far_away_from_route_intersection( 91 | track_source): 92 | return LANE_FOLLOW 93 | 94 | # If there is computed commands 95 | if self._commands: 96 | return self._commands[0] 97 | else: 98 | return LANE_FOLLOW 99 | 100 | def get_shortest_path_distance( 101 | self, 102 | source, 103 | source_ori, 104 | target, 105 | target_ori): 106 | 107 | distance = 0 108 | track_source = self._city_track.project_node(source) 109 | track_target = self._city_track.project_node(target) 110 | 111 | current_pos = track_source 112 | 113 | route = self._city_track.compute_route(track_source, source_ori, 114 | track_target, target_ori) 115 | # No Route, distance is zero 116 | if route is None: 117 | return 0.0 118 | 119 | for node_iter in route: 120 | distance += sldist(node_iter, current_pos) 121 | current_pos = node_iter 122 | 123 | # We multiply by these values to convert distance to world coordinates 124 | return distance * self._city_track.get_pixel_density() \ 125 | * self._city_track.get_node_density() 126 | 127 | def is_there_posible_route(self, source, source_ori, target, target_ori): 128 | 129 | track_source = self._city_track.project_node(source) 130 | track_target = self._city_track.project_node(target) 131 | 132 | return not self._city_track.compute_route( 133 | track_source, source_ori, track_target, target_ori) is None 134 | 135 | def test_position(self, source): 136 | 137 | node_source = self._city_track.project_node(source) 138 | 139 | return self._city_track.is_away_from_intersection(node_source) 140 | 141 | def _route_to_commands(self, route): 142 | 143 | """ 144 | from the shortest path graph, transform it into a list of commands 145 | 146 | :param route: the sub graph containing the shortest path 147 | :return: list of commands encoded from 0-5 148 | """ 149 | 150 | commands_list = [] 151 | 152 | for i in range(0, len(route)): 153 | if route[i] not in self._city_track.get_intersection_nodes(): 154 | continue 155 | 156 | current = route[i] 157 | past = route[i - 1] 158 | future = route[i + 1] 159 | 160 | past_to_current = np.array( 161 | [current[0] - past[0], current[1] - past[1]]) 162 | current_to_future = np.array( 163 | [future[0] - current[0], future[1] - current[1]]) 164 | angle = signal(current_to_future, past_to_current) 165 | 166 | if angle < -0.1: 167 | command = TURN_RIGHT 168 | elif angle > 0.1: 169 | command = TURN_LEFT 170 | else: 171 | command = GO_STRAIGHT 172 | 173 | commands_list.append(command) 174 | 175 | return commands_list 176 | -------------------------------------------------------------------------------- /carla/planner/converter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | from carla.planner.graph import string_to_floats 11 | 12 | # Constant definition enumeration 13 | 14 | PIXEL = 0 15 | WORLD = 1 16 | NODE = 2 17 | 18 | 19 | class Converter(object): 20 | 21 | def __init__(self, city_file, pixel_density, node_density): 22 | 23 | self._node_density = node_density 24 | self._pixel_density = pixel_density 25 | with open(city_file, 'r') as f: 26 | # The offset of the world from the zero coordinates ( The 27 | # coordinate we consider zero) 28 | self._worldoffset = string_to_floats(f.readline()) 29 | 30 | angles = string_to_floats(f.readline()) 31 | 32 | # If there is an rotation between the world and map coordinates. 33 | self._worldrotation = np.array([ 34 | [math.cos(math.radians(angles[2])), -math.sin(math.radians(angles[2])), 0.0], 35 | [math.sin(math.radians(angles[2])), math.cos(math.radians(angles[2])), 0.0], 36 | [0.0, 0.0, 1.0]]) 37 | 38 | # Ignore for now, these are offsets for map coordinates and scale 39 | # (not used). 40 | _ = f.readline() 41 | 42 | # The offset of the map zero coordinate. 43 | self._mapoffset = string_to_floats(f.readline()) 44 | 45 | def convert_to_node(self, input_data): 46 | """ 47 | Receives a data type (Can Be Pixel or World ) 48 | :param input_data: position in some coordinate 49 | :return: A vector representing a node 50 | """ 51 | 52 | input_type = self._check_input_type(input_data) 53 | if input_type == PIXEL: 54 | return self._pixel_to_node(input_data) 55 | elif input_type == WORLD: 56 | return self._world_to_node(input_data) 57 | else: 58 | raise ValueError('Invalid node to be converted') 59 | 60 | def convert_to_pixel(self, input_data): 61 | 62 | """ 63 | Receives a data type (Can Be Node or World ) 64 | :param input_data: position in some coordinate 65 | :return: A vector with pixel coordinates 66 | """ 67 | 68 | input_type = self._check_input_type(input_data) 69 | 70 | if input_type == NODE: 71 | return self._node_to_pixel(input_data) 72 | elif input_type == WORLD: 73 | return self._world_to_pixel(input_data) 74 | else: 75 | raise ValueError('Invalid node to be converted') 76 | 77 | def convert_to_world(self, input_data): 78 | 79 | """ 80 | Receives a data type (Can Be Pixel or Node ) 81 | :param input_data: position in some coordinate 82 | :return: vector with world coordinates 83 | """ 84 | 85 | input_type = self._check_input_type(input_data) 86 | if input_type == NODE: 87 | return self._node_to_world(input_data) 88 | elif input_type == PIXEL: 89 | return self._pixel_to_world(input_data) 90 | else: 91 | raise ValueError('Invalid node to be converted') 92 | 93 | def _node_to_pixel(self, node): 94 | """ 95 | Conversion from node format (graph) to pixel (image) 96 | :param node: 97 | :return: pixel 98 | """ 99 | pixel = [((node[0] + 2) * self._node_density) 100 | , ((node[1] + 2) * self._node_density)] 101 | return pixel 102 | 103 | def _pixel_to_node(self, pixel): 104 | """ 105 | Conversion from pixel format (image) to node (graph) 106 | :param node: 107 | :return: pixel 108 | """ 109 | node = [int(((pixel[0]) / self._node_density) - 2) 110 | , int(((pixel[1]) / self._node_density) - 2)] 111 | 112 | return tuple(node) 113 | 114 | def _pixel_to_world(self, pixel): 115 | """ 116 | Conversion from pixel format (image) to world (3D) 117 | :param pixel: 118 | :return: world 119 | """ 120 | 121 | relative_location = [pixel[0] * self._pixel_density, 122 | pixel[1] * self._pixel_density] 123 | 124 | world = [ 125 | relative_location[0] + self._mapoffset[0] - self._worldoffset[0], 126 | relative_location[1] + self._mapoffset[1] - self._worldoffset[1], 127 | 22 128 | ] 129 | 130 | return world 131 | 132 | def _world_to_pixel(self, world): 133 | """ 134 | Conversion from world format (3D) to pixel 135 | :param world: 136 | :return: pixel 137 | """ 138 | 139 | rotation = np.array([world[0], world[1], world[2]]) 140 | rotation = rotation.dot(self._worldrotation) 141 | 142 | relative_location = [rotation[0] + self._worldoffset[0] - self._mapoffset[0], 143 | rotation[1] + self._worldoffset[1] - self._mapoffset[1], 144 | rotation[2] + self._worldoffset[2] - self._mapoffset[2]] 145 | 146 | 147 | 148 | pixel = [math.floor(relative_location[0] / float(self._pixel_density)), 149 | math.floor(relative_location[1] / float(self._pixel_density))] 150 | 151 | return pixel 152 | 153 | def _world_to_node(self, world): 154 | return self._pixel_to_node(self._world_to_pixel(world)) 155 | 156 | def _node_to_world(self, node): 157 | 158 | return self._pixel_to_world(self._node_to_pixel(node)) 159 | 160 | def _check_input_type(self, input_data): 161 | if len(input_data) > 2: 162 | return WORLD 163 | elif type(input_data[0]) is int: 164 | return NODE 165 | else: 166 | return PIXEL 167 | -------------------------------------------------------------------------------- /carla/image_converter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """ 8 | Handy conversions for CARLA images. 9 | 10 | The functions here are provided for real-time display, if you want to save the 11 | converted images, save the images from Python without conversion and convert 12 | them afterwards with the C++ implementation at "Util/ImageConverter" as it 13 | provides considerably better performance. 14 | """ 15 | 16 | import math 17 | 18 | try: 19 | import numpy 20 | from numpy.matlib import repmat 21 | except ImportError: 22 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 23 | 24 | 25 | from . import sensor 26 | 27 | 28 | def to_bgra_array(image): 29 | """Convert a CARLA raw image to a BGRA numpy array.""" 30 | if not isinstance(image, sensor.Image): 31 | raise ValueError("Argument must be a carla.sensor.Image") 32 | array = numpy.frombuffer(image.raw_data, dtype=numpy.dtype("uint8")) 33 | array = numpy.reshape(array, (image.height, image.width, 4)) 34 | return array 35 | 36 | 37 | def to_rgb_array(image): 38 | """Convert a CARLA raw image to a RGB numpy array.""" 39 | array = to_bgra_array(image) 40 | # Convert BGRA to RGB. 41 | array = array[:, :, :3] 42 | array = array[:, :, ::-1] 43 | return array 44 | 45 | 46 | def labels_to_array(image): 47 | """ 48 | Convert an image containing CARLA semantic segmentation labels to a 2D array 49 | containing the label of each pixel. 50 | """ 51 | return to_bgra_array(image)[:, :, 2] 52 | 53 | 54 | def labels_to_cityscapes_palette(image): 55 | """ 56 | Convert an image containing CARLA semantic segmentation labels to 57 | Cityscapes palette. 58 | """ 59 | classes = { 60 | 0: [0, 0, 0], # None 61 | 1: [70, 70, 70], # Buildings 62 | 2: [190, 153, 153], # Fences 63 | 3: [72, 0, 90], # Other 64 | 4: [220, 20, 60], # Pedestrians 65 | 5: [153, 153, 153], # Poles 66 | 6: [157, 234, 50], # RoadLines 67 | 7: [128, 64, 128], # Roads 68 | 8: [244, 35, 232], # Sidewalks 69 | 9: [107, 142, 35], # Vegetation 70 | 10: [0, 0, 255], # Vehicles 71 | 11: [102, 102, 156], # Walls 72 | 12: [220, 220, 0] # TrafficSigns 73 | } 74 | array = labels_to_array(image) 75 | result = numpy.zeros((array.shape[0], array.shape[1], 3)) 76 | for key, value in classes.items(): 77 | result[numpy.where(array == key)] = value 78 | return result 79 | 80 | 81 | def depth_to_array(image): 82 | """ 83 | Convert an image containing CARLA encoded depth-map to a 2D array containing 84 | the depth value of each pixel normalized between [0.0, 1.0]. 85 | """ 86 | array = to_bgra_array(image) 87 | array = array.astype(numpy.float32) 88 | # Apply (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1). 89 | normalized_depth = numpy.dot(array[:, :, :3], [65536.0, 256.0, 1.0]) 90 | normalized_depth /= 16777215.0 # (256.0 * 256.0 * 256.0 - 1.0) 91 | return normalized_depth 92 | 93 | 94 | def depth_to_logarithmic_grayscale(image): 95 | """ 96 | Convert an image containing CARLA encoded depth-map to a logarithmic 97 | grayscale image array. 98 | "max_depth" is used to omit the points that are far enough. 99 | """ 100 | normalized_depth = depth_to_array(image) 101 | # Convert to logarithmic depth. 102 | logdepth = numpy.ones(normalized_depth.shape) + \ 103 | (numpy.log(normalized_depth) / 5.70378) 104 | logdepth = numpy.clip(logdepth, 0.0, 1.0) 105 | logdepth *= 255.0 106 | # Expand to three colors. 107 | return numpy.repeat(logdepth[:, :, numpy.newaxis], 3, axis=2) 108 | 109 | 110 | def depth_to_local_point_cloud(image, color=None, max_depth=0.9): 111 | """ 112 | Convert an image containing CARLA encoded depth-map to a 2D array containing 113 | the 3D position (relative to the camera) of each pixel and its corresponding 114 | RGB color of an array. 115 | "max_depth" is used to omit the points that are far enough. 116 | """ 117 | far = 1000.0 # max depth in meters. 118 | normalized_depth = depth_to_array(image) 119 | 120 | # (Intrinsic) K Matrix 121 | k = numpy.identity(3) 122 | k[0, 2] = image.width / 2.0 123 | k[1, 2] = image.height / 2.0 124 | k[0, 0] = k[1, 1] = image.width / \ 125 | (2.0 * math.tan(image.fov * math.pi / 360.0)) 126 | 127 | # 2d pixel coordinates 128 | pixel_length = image.width * image.height 129 | u_coord = repmat(numpy.r_[image.width-1:-1:-1], 130 | image.height, 1).reshape(pixel_length) 131 | v_coord = repmat(numpy.c_[image.height-1:-1:-1], 132 | 1, image.width).reshape(pixel_length) 133 | if color is not None: 134 | color = color.reshape(pixel_length, 3) 135 | normalized_depth = numpy.reshape(normalized_depth, pixel_length) 136 | 137 | # Search for pixels where the depth is greater than max_depth to 138 | # delete them 139 | max_depth_indexes = numpy.where(normalized_depth > max_depth) 140 | normalized_depth = numpy.delete(normalized_depth, max_depth_indexes) 141 | u_coord = numpy.delete(u_coord, max_depth_indexes) 142 | v_coord = numpy.delete(v_coord, max_depth_indexes) 143 | if color is not None: 144 | color = numpy.delete(color, max_depth_indexes, axis=0) 145 | 146 | # pd2 = [u,v,1] 147 | p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)]) 148 | 149 | # P = [X,Y,Z] 150 | p3d = numpy.dot(numpy.linalg.inv(k), p2d) 151 | p3d *= normalized_depth * far 152 | 153 | # Formating the output to: 154 | # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]] 155 | if color is not None: 156 | # numpy.concatenate((numpy.transpose(p3d), color), axis=1) 157 | return sensor.PointCloud( 158 | image.frame_number, 159 | numpy.transpose(p3d), 160 | color_array=color) 161 | # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]] 162 | return sensor.PointCloud(image.frame_number, numpy.transpose(p3d)) 163 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Carla Controller 2 | 3 | The purpose of this repository is two-fold: 4 | 1) To generate driving datasets using the CARLA simulator 5 | 2) Use the CARLA simulator to test the performance of deep learning models trained to drive autonomously 6 | 7 | ## Setup Guide (Windows) 8 | 9 | 1. Download and install Anaconda. 10 | 2. Download and extract [Carla v. 0.8.2](https://github.com/carla-simulator/carla/releases/tag/0.8.2) 11 | 3. Navigate to the extracted folder and create a new file named `CarlaSettings.ini`: 12 | ``` ini 13 | [CARLA/Server] 14 | ; Time-out in milliseconds for the networking operations. 15 | ServerTimeOut=999999999; 16 | ``` 17 | 4. Navigate to the directory where you want to store the project. 18 | 5. Clone and open the repo: 19 | 20 | ``` 21 | git clone https://github.com/MaxJohnsen/carla-controller && cd carla-controller 22 | ``` 23 | 24 | 6. Install dependencies in a new Conda environment: 25 | 26 | ``` 27 | conda env create -f environment.yml 28 | ``` 29 | 30 | 7. Activate the new environment: 31 | 32 | ``` 33 | conda activate carla-controller 34 | ``` 35 | 36 | --- 37 | 38 | ## Data Generator 39 | The data generator is used to control the CARLA simulator (either by manual control or using an autopilot) while saving driving data to the hard drive. The generated datasets can then be used to train deep learning models for autonomous driving. 40 | 41 | ### Running the Data Generator 42 | 43 | 1. Navigate to the extracted _Carla v. 0.8.2_ folder. 44 | 2. Run the server: 45 | ``` 46 | CarlaUE4.exe -Carla-server -windowed -carla-settings="CarlaSettings.ini" 47 | ``` 48 | 3. A CARLA simulator window should open, displaying a black background. The server is now waiting for a client to connect. 49 | 4. In a new cmd window, navigate to the project repository. 50 | 5. Run the Carla controller: 51 | ``` 52 | python data_generator.py 53 | ``` 54 | 55 | For help run: 56 | ``` 57 | python data_generator.py --help 58 | ``` 59 | 6. A Pygame window should open. You are now ready to start generating data. See [Controlling the Simulator](#controlling-the-simulator). 60 | 61 | ### Arguments 62 | ``` 63 | Usage: data_generator.py [-h] [-v] [--host H] [-j] [-p P] [-o PATH] 64 | ``` 65 | You can run the data generator with different arguments to customize the controller: 66 | 67 | Argument | Description | Default 68 | --- | --- | --- 69 | -h, --help | Displays help message | 70 | -v, --verbose| Print debug info | 71 | --host | IP of the host server | localhost 72 | -j, --joystick | Control the vehicle with an external joystick (e.g. a steering wheel) | 73 | -p, --port | TCP port to listen to | 2000 74 | -o, --output | Output folder for driving data | 75 | 76 | Example: `python data_generator.py -v -p 2001 -o data_output` will listen at port 2001, print debug information, and save recorded driving data to a folder called _data_output_ 77 | 78 | 79 | ### Controller and simulator configuration 80 | 81 | Update the project's configuration file (`settings.idi`) to customize the behavior of the controller and the simulator. 82 | 83 | Section | Key | Description | Default Value 84 | --- | --- | --- | --- 85 | Carla | NumberOfVehicles | The number of non-player vehicles in the simulated world. | 50 86 | Carla | NumberOfPedastrians | The number of pedestrians in the simulated world. | 30 87 | Carla | WeatherId | Weather and lighting configuration. Read more [here](https://carla.readthedocs.io/en/latest/carla_settings/). | 0 88 | Carla | RandomizeWeather | Randomly select weather and lighting conditions. | no 89 | Carla | QualityLevel | The quality level of the simulated world. | Epic 90 | Pygame | WindowHeight | The height of the Pygame window. | 1024 91 | Pygame | WindowWidht | The width of the Pygame window. | 768 92 | Pygame | OutputImageHeight | The height of the output images .| 300 93 | Pygame | OutputImageWidth | The width of the output images. | 180 94 | Controller | AutoStartRecording | Automatically enable recording at the beginning of a new episode (after a small delay). | no 95 | Controller | FrameLimit | Restart episode when the frame limit is reached. | 0 96 | Controller | EpisodeLimit | Exit the program when the episode limit is reached. | 0 97 | AutoPilot | Noise | Noise applied to the auto pilot's steering angle to prevent perfect driving. _Note: The noise are not applied to the logged autopilot data_ | 0 98 | 99 | ### Controlling the simulator 100 | 101 | Key | Action 102 | --- | --- 103 | A, ← | Steer left 104 | D, → | Steer right 105 | W, ↑ | Throttle 106 | S, ↓ | Break 107 | Space | Handbreak 108 | Q | Toggle reverse 109 | P | Toggle autopilot 110 | R | Toggle driving data recording 111 | E | Start a new episode 112 | 113 | ### Data Recording 114 | 115 | #### Usage example 116 | 1) Start the data generator (the `--output` argument is required for data recording) 117 | ``` 118 | python data_generator.py -o data_output 119 | ``` 120 | 2) Start recording by pressing the `R`-key. 121 | 3) Drive around. 122 | 4) Stop recording by pressing `R` again. 123 | 5) The simulator freezes while the data are being written to the disk. 124 | 125 | _Note:_ 126 | - Starting a new episode while recording will end the recording and write the data to disk 127 | - The driving data from any additional recordings within a single episode will be appended to the end of that episode's driving log 128 | 129 | #### What data is recorded? 130 | 131 | Data | Description | Driving Log Format | Driving Log Header 132 | --- | --- | --- | --- 133 | RGB Image, Center | A forward facing RGB image from the center of the car. | Relative path | CenterRGB 134 | RGB Image, Left | A forward facing RGB image from the left side of the car. | Relative path| LeftRGB 135 | RGB Image, Right| A forward facing RGB image from the left side of the car. | Relative path| RightRGB 136 | Depth Map | A forward facing depth map, represented by a grayscale image. | Relative path | Depth 137 | Semantic Segmentation | A perfectly classified forward facing image. Objects are displayed in different colors according to the object class. | Relative Path | SemSeg 138 | Location | The location of the vehicle. | (X-pos, Y-pos, Z-pos) | Location 139 | Speed | The speed of the vehicle. | | Speed 140 | Player Controls | The vehicle's current control signals. | (Steering angle, throttle, brake, reverse_enabled) | Controls 141 | Autopilot Controls | The CARLA server's proposed control signals. | (Steering angle, throttle, brake, reverse_enabled) | APControls 142 | High-Level Command | The current activated high-level command (see section #todo). | | HLC 143 | Speed-Limit | The current speed-limit. | | SpeedLimit 144 | Traffic Light | The car's current facing traffic-light. | | TrafficLight 145 | 146 | #### Directory Structure 147 | - `[output-folder]/` 148 | - `[episode timestamp]/` 149 | - `imgs/` 150 | - `[frame]_rgb_center.png` 151 | - `[frame]_rgb_left.png` 152 | - `[frame]_rgb_right.png` 153 | - `[frame]_rgb_depth.png` 154 | - `[frame]_rgb_sem_seg.png` 155 | - `driving_log.csv` 156 | 157 | #### Driving Log Structure 158 | _Example image from a `driving_log.csv` file:_ 159 | 160 | ![Driving Log Example](readme_imgs/driving_log_structure.png) 161 | -------------------------------------------------------------------------------- /carla/client.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA Client.""" 8 | 9 | import logging 10 | import struct 11 | 12 | from contextlib import contextmanager 13 | 14 | from . import sensor 15 | from . import tcp 16 | from . import util 17 | 18 | try: 19 | from . import carla_server_pb2 as carla_protocol 20 | except ImportError: 21 | raise RuntimeError('cannot import "carla_server_pb2.py", run the protobuf compiler to generate this file') 22 | 23 | try: 24 | import numpy 25 | except ImportError: 26 | raise RuntimeError('cannot import numpy, make sure numpy package is installed.') 27 | 28 | 29 | VehicleControl = carla_protocol.Control 30 | 31 | 32 | @contextmanager 33 | def make_carla_client(host, world_port, timeout=15): 34 | """Context manager for creating and connecting a CarlaClient.""" 35 | with util.make_connection(CarlaClient, host, world_port, timeout) as client: 36 | yield client 37 | 38 | 39 | class CarlaClient(object): 40 | """The CARLA client. Manages communications with the CARLA server.""" 41 | 42 | def __init__(self, host, world_port, timeout=15): 43 | self._world_client = tcp.TCPClient(host, world_port, timeout) 44 | self._stream_client = tcp.TCPClient(host, world_port + 1, timeout) 45 | self._control_client = tcp.TCPClient(host, world_port + 2, timeout) 46 | self._current_settings = None 47 | self._is_episode_requested = False 48 | self._sensors = {} 49 | 50 | def connect(self, connection_attempts=10): 51 | """ 52 | Try to establish a connection to a CARLA server at the given host:port. 53 | """ 54 | self._world_client.connect(connection_attempts) 55 | 56 | def disconnect(self): 57 | """Disconnect from server.""" 58 | self._control_client.disconnect() 59 | self._stream_client.disconnect() 60 | self._world_client.disconnect() 61 | 62 | def connected(self): 63 | """Return whether there is an active connection.""" 64 | return self._world_client.connected() 65 | 66 | def load_settings(self, carla_settings): 67 | """ 68 | Load new settings and request a new episode based on these settings. 69 | carla_settings object must be convertible to a str holding the contents 70 | of a CarlaSettings.ini file. 71 | 72 | Return a protobuf object holding the scene description. 73 | """ 74 | self._current_settings = carla_settings 75 | return self._request_new_episode(carla_settings) 76 | 77 | def start_episode(self, player_start_index): 78 | """ 79 | Start the new episode at the player start given by the 80 | player_start_index. The list of player starts is retrieved by 81 | "load_settings". 82 | 83 | The new episode is started based on the last settings loaded by 84 | "load_settings". 85 | 86 | This function waits until the server answers with an EpisodeReady. 87 | """ 88 | if self._current_settings is None: 89 | raise RuntimeError('no settings loaded, cannot start episode') 90 | 91 | # if no new settings are loaded, request new episode with previous 92 | if not self._is_episode_requested: 93 | self._request_new_episode(self._current_settings) 94 | 95 | try: 96 | pb_message = carla_protocol.EpisodeStart() 97 | pb_message.player_start_spot_index = player_start_index 98 | self._world_client.write(pb_message.SerializeToString()) 99 | # Wait for EpisodeReady. 100 | data = self._world_client.read() 101 | if not data: 102 | raise RuntimeError('failed to read data from server') 103 | pb_message = carla_protocol.EpisodeReady() 104 | pb_message.ParseFromString(data) 105 | if not pb_message.ready: 106 | raise RuntimeError('cannot start episode: server failed to start episode') 107 | # We can start the agent clients now. 108 | self._stream_client.connect() 109 | self._control_client.connect() 110 | # Set again the status for no episode requested 111 | finally: 112 | self._is_episode_requested = False 113 | 114 | def read_data(self): 115 | """ 116 | Read the data sent from the server this frame. The episode must be 117 | started. Return a pair containing the protobuf object containing the 118 | measurements followed by the raw data of the sensors. 119 | """ 120 | # Read measurements. 121 | data = self._stream_client.read() 122 | if not data: 123 | raise RuntimeError('failed to read data from server') 124 | pb_message = carla_protocol.Measurements() 125 | pb_message.ParseFromString(data) 126 | # Read sensor data. 127 | return pb_message, dict(x for x in self._read_sensor_data()) 128 | 129 | def send_control(self, *args, **kwargs): 130 | """ 131 | Send the VehicleControl to be applied this frame. 132 | 133 | If synchronous mode was requested, the server will pause the simulation 134 | until this message is received. 135 | """ 136 | if isinstance(args[0] if args else None, carla_protocol.Control): 137 | pb_message = args[0] 138 | else: 139 | pb_message = carla_protocol.Control() 140 | pb_message.steer = kwargs.get('steer', 0.0) 141 | pb_message.throttle = kwargs.get('throttle', 0.0) 142 | pb_message.brake = kwargs.get('brake', 0.0) 143 | pb_message.hand_brake = kwargs.get('hand_brake', False) 144 | pb_message.reverse = kwargs.get('reverse', False) 145 | self._control_client.write(pb_message.SerializeToString()) 146 | 147 | def _request_new_episode(self, carla_settings): 148 | """ 149 | Internal function to request a new episode. Prepare the client for a new 150 | episode by disconnecting agent clients. 151 | """ 152 | # Disconnect agent clients. 153 | self._stream_client.disconnect() 154 | self._control_client.disconnect() 155 | # Send new episode request. 156 | pb_message = carla_protocol.RequestNewEpisode() 157 | pb_message.ini_file = str(carla_settings) 158 | self._world_client.write(pb_message.SerializeToString()) 159 | # Read scene description. 160 | data = self._world_client.read() 161 | if not data: 162 | raise RuntimeError('failed to read data from server') 163 | pb_message = carla_protocol.SceneDescription() 164 | pb_message.ParseFromString(data) 165 | self._sensors = dict((sensor.id, sensor) \ 166 | for sensor in _make_sensor_parsers(pb_message.sensors)) 167 | self._is_episode_requested = True 168 | return pb_message 169 | 170 | def _read_sensor_data(self): 171 | while True: 172 | data = self._stream_client.read() 173 | if not data: 174 | raise StopIteration 175 | yield self._parse_sensor_data(data) 176 | 177 | def _parse_sensor_data(self, data): 178 | sensor_id = struct.unpack(' id else 'Unknown' 186 | getint32 = lambda data, index: struct.unpack('5d}.jpg') 66 | 67 | @property 68 | def path(self): 69 | return self._path 70 | 71 | def log_poses(self, start_index, end_index, weather_id): 72 | with open(self._internal_log_name, 'a+') as log: 73 | log.write(' Start Poses (%d %d ) on weather %d \n ' % 74 | (start_index, end_index, weather_id)) 75 | 76 | def log_poses_finish(self): 77 | with open(self._internal_log_name, 'a+') as log: 78 | log.write('Finished Task') 79 | 80 | def log_start(self, id_experiment): 81 | 82 | with open(self._internal_log_name, 'a+') as log: 83 | log.write('Start Task %d \n' % id_experiment) 84 | 85 | def log_end(self): 86 | with open(self._internal_log_name, 'a+') as log: 87 | log.write('====== Finished Entire Benchmark ======') 88 | 89 | def write_summary_results(self, experiment, pose, rep, 90 | path_distance, remaining_distance, 91 | final_time, time_out, result): 92 | """ 93 | Method to record the summary of an episode(pose) execution 94 | """ 95 | 96 | self._dict_summary['exp_id'] = experiment.task 97 | self._dict_summary['rep'] = rep 98 | self._dict_summary['weather'] = experiment.Conditions.WeatherId 99 | self._dict_summary['start_point'] = pose[0] 100 | self._dict_summary['end_point'] = pose[1] 101 | self._dict_summary['result'] = result 102 | self._dict_summary['initial_distance'] = path_distance 103 | self._dict_summary['final_distance'] = remaining_distance 104 | self._dict_summary['final_time'] = final_time 105 | self._dict_summary['time_out'] = time_out 106 | 107 | with open(os.path.join(self._path, 'summary.csv'), 'a+') as ofd: 108 | w = csv.DictWriter(ofd, self._dict_summary.keys()) 109 | 110 | w.writerow(self._dict_summary) 111 | 112 | def write_measurements_results(self, experiment, rep, pose, reward_vec, control_vec): 113 | """ 114 | Method to record the measurements, sensors, 115 | controls and status of the entire benchmark. 116 | """ 117 | with open(os.path.join(self._path, 'measurements.csv'), 'a+') as rfd: 118 | rw = csv.DictWriter(rfd, self._dict_measurements.keys()) 119 | 120 | for i in range(len(reward_vec)): 121 | self._dict_measurements['exp_id'] = experiment.task 122 | self._dict_measurements['rep'] = rep 123 | self._dict_measurements['start_point'] = pose[0] 124 | self._dict_measurements['end_point'] = pose[1] 125 | self._dict_measurements['weather'] = experiment.Conditions.WeatherId 126 | self._dict_measurements['collision_other'] = reward_vec[ 127 | i].collision_other 128 | self._dict_measurements['collision_pedestrians'] = reward_vec[ 129 | i].collision_pedestrians 130 | self._dict_measurements['collision_vehicles'] = reward_vec[ 131 | i].collision_vehicles 132 | self._dict_measurements['intersection_otherlane'] = reward_vec[ 133 | i].intersection_otherlane 134 | self._dict_measurements['intersection_offroad'] = reward_vec[ 135 | i].intersection_offroad 136 | self._dict_measurements['pos_x'] = reward_vec[ 137 | i].transform.location.x 138 | self._dict_measurements['pos_y'] = reward_vec[ 139 | i].transform.location.y 140 | self._dict_measurements['steer'] = control_vec[ 141 | i].steer 142 | self._dict_measurements['throttle'] = control_vec[ 143 | i].throttle 144 | self._dict_measurements['brake'] = control_vec[ 145 | i].brake 146 | 147 | rw.writerow(self._dict_measurements) 148 | 149 | def _create_log_files(self): 150 | """ 151 | Just create the log files and add the necessary header for it. 152 | """ 153 | 154 | if not self._experiment_exist(): 155 | os.mkdir(self._path) 156 | 157 | with open(os.path.join(self._path, 'summary.csv'), 'w') as ofd: 158 | w = csv.DictWriter(ofd, self._dict_summary.keys()) 159 | w.writeheader() 160 | 161 | with open(os.path.join(self._path, 'measurements.csv'), 'w') as rfd: 162 | rw = csv.DictWriter(rfd, self._dict_measurements.keys()) 163 | rw.writeheader() 164 | 165 | def _continue_experiment(self, continue_experiment): 166 | """ 167 | Get the line on the file for the experiment. 168 | If continue_experiment is false and experiment exist, generates a new file path 169 | 170 | """ 171 | 172 | def get_non_existent_path(f_name_path): 173 | """ 174 | Get the path to a filename which does not exist by incrementing path. 175 | """ 176 | if not os.path.exists(f_name_path): 177 | return f_name_path 178 | filename, file_extension = os.path.splitext(f_name_path) 179 | i = 1 180 | new_f_name = "{}-{}{}".format(filename, i, file_extension) 181 | while os.path.exists(new_f_name): 182 | i += 1 183 | new_f_name = "{}-{}{}".format(filename, i, file_extension) 184 | return new_f_name 185 | 186 | # start the new path as the same one as before 187 | new_path = self._path 188 | 189 | # if the experiment exist 190 | if self._experiment_exist(): 191 | 192 | # If you want to continue just get the last position 193 | if continue_experiment: 194 | line_on_file = self._get_last_position() 195 | 196 | else: 197 | # Get a new non_conflicting path name 198 | new_path = get_non_existent_path(new_path) 199 | line_on_file = 1 200 | 201 | else: 202 | line_on_file = 1 203 | return new_path, line_on_file 204 | 205 | def save_images(self, sensor_data, episode_name, frame): 206 | """ 207 | Save a image during the experiment 208 | """ 209 | if self._save_images: 210 | for name, image in sensor_data.items(): 211 | image.save_to_disk(self._image_filename_format.format( 212 | episode_name, name, frame)) 213 | 214 | def get_pose_and_experiment(self, number_poses_task): 215 | """ 216 | Based on the line in log file, return the current pose and experiment. 217 | If the line is zero, create new log files. 218 | 219 | """ 220 | # Warning: assumes that all tasks have the same size 221 | line_on_file = self._get_last_position() - 1 222 | if line_on_file == 0: 223 | return 0, 0 224 | else: 225 | return line_on_file % number_poses_task, line_on_file // number_poses_task 226 | 227 | def _experiment_exist(self): 228 | 229 | return os.path.exists(self._path) 230 | 231 | def _get_last_position(self): 232 | """ 233 | Get the last position on the summary experiment file 234 | With this you are able to continue from there 235 | 236 | Returns: 237 | int, position: 238 | """ 239 | # Try to open, if the file is not found 240 | try: 241 | with open(os.path.join(self._path, 'summary.csv')) as f: 242 | return sum(1 for _ in f) 243 | except IOError: 244 | return 0 245 | -------------------------------------------------------------------------------- /carla/sensor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA sensors.""" 8 | 9 | 10 | import os 11 | 12 | from collections import namedtuple 13 | 14 | try: 15 | import numpy 16 | except ImportError: 17 | raise RuntimeError('cannot import numpy, make sure numpy package is installed.') 18 | 19 | from .transform import Transform, Translation, Rotation, Scale 20 | 21 | 22 | # ============================================================================== 23 | # -- Helpers ------------------------------------------------------------------- 24 | # ============================================================================== 25 | 26 | 27 | Color = namedtuple('Color', 'r g b') 28 | Color.__new__.__defaults__ = (0, 0, 0) 29 | 30 | 31 | Point = namedtuple('Point', 'x y z color') 32 | Point.__new__.__defaults__ = (0.0, 0.0, 0.0, None) 33 | 34 | 35 | def _append_extension(filename, ext): 36 | return filename if filename.lower().endswith(ext.lower()) else filename + ext 37 | 38 | 39 | # ============================================================================== 40 | # -- Sensor -------------------------------------------------------------------- 41 | # ============================================================================== 42 | 43 | 44 | class Sensor(object): 45 | """ 46 | Base class for sensor descriptions. Used to add sensors to CarlaSettings. 47 | """ 48 | 49 | def __init__(self, name, sensor_type): 50 | self.SensorName = name 51 | self.SensorType = sensor_type 52 | self.PositionX = 0.2 53 | self.PositionY = 0.0 54 | self.PositionZ = 1.3 55 | self.RotationPitch = 0.0 56 | self.RotationRoll = 0.0 57 | self.RotationYaw = 0.0 58 | 59 | def set(self, **kwargs): 60 | for key, value in kwargs.items(): 61 | if not hasattr(self, key): 62 | raise ValueError('sensor.Sensor: no key named %r' % key) 63 | setattr(self, key, value) 64 | 65 | def set_position(self, x, y, z): 66 | self.PositionX = x 67 | self.PositionY = y 68 | self.PositionZ = z 69 | 70 | def set_rotation(self, pitch, yaw, roll): 71 | self.RotationPitch = pitch 72 | self.RotationYaw = yaw 73 | self.RotationRoll = roll 74 | 75 | def get_transform(self): 76 | ''' 77 | Returns the camera to [whatever the camera is attached to] 78 | transformation. 79 | ''' 80 | return Transform( 81 | Translation(self.PositionX, self.PositionY, self.PositionZ), 82 | Rotation(self.RotationPitch, self.RotationYaw, self.RotationRoll)) 83 | 84 | def get_unreal_transform(self): 85 | ''' 86 | Returns the camera to [whatever the camera is attached to] 87 | transformation with the Unreal necessary corrections applied. 88 | 89 | @todo Do we need to expose this? 90 | ''' 91 | to_unreal_transform = Transform(Rotation(roll=-90, yaw=90), Scale(x=-1)) 92 | return self.get_transform() * to_unreal_transform 93 | 94 | 95 | class Camera(Sensor): 96 | """ 97 | Camera description. This class can be added to a CarlaSettings object to add 98 | a camera to the player vehicle. 99 | """ 100 | 101 | def __init__(self, name, **kwargs): 102 | super(Camera, self).__init__(name, sensor_type="CAMERA") 103 | self.PostProcessing = 'SceneFinal' 104 | self.ImageSizeX = 720 105 | self.ImageSizeY = 512 106 | self.FOV = 90.0 107 | self.set(**kwargs) 108 | 109 | def set_image_size(self, pixels_x, pixels_y): 110 | '''Sets the image size in pixels''' 111 | self.ImageSizeX = pixels_x 112 | self.ImageSizeY = pixels_y 113 | 114 | 115 | class Lidar(Sensor): 116 | """ 117 | Lidar description. This class can be added to a CarlaSettings object to add 118 | a Lidar to the player vehicle. 119 | """ 120 | 121 | def __init__(self, name, **kwargs): 122 | super(Lidar, self).__init__(name, sensor_type="LIDAR_RAY_CAST") 123 | self.Channels = 32 124 | self.Range = 50.0 125 | self.PointsPerSecond = 56000 126 | self.RotationFrequency = 10.0 127 | self.UpperFovLimit = 10.0 128 | self.LowerFovLimit = -30.0 129 | self.ShowDebugPoints = False 130 | self.set(**kwargs) 131 | 132 | 133 | # ============================================================================== 134 | # -- SensorData ---------------------------------------------------------------- 135 | # ============================================================================== 136 | 137 | 138 | class SensorData(object): 139 | """Base class for sensor data returned from the server.""" 140 | def __init__(self, frame_number): 141 | self.frame_number = frame_number 142 | 143 | 144 | class Image(SensorData): 145 | """Data generated by a Camera.""" 146 | 147 | def __init__(self, frame_number, width, height, image_type, fov, raw_data): 148 | super(Image, self).__init__(frame_number=frame_number) 149 | assert len(raw_data) == 4 * width * height 150 | self.width = width 151 | self.height = height 152 | self.type = image_type 153 | self.fov = fov 154 | self.raw_data = raw_data 155 | self._converted_data = None 156 | 157 | @property 158 | def data(self): 159 | """ 160 | Lazy initialization for data property, stores converted data in its 161 | default format. 162 | """ 163 | if self._converted_data is None: 164 | from . import image_converter 165 | 166 | if self.type == 'Depth': 167 | self._converted_data = image_converter.depth_to_array(self) 168 | elif self.type == 'SemanticSegmentation': 169 | self._converted_data = image_converter.labels_to_array(self) 170 | else: 171 | self._converted_data = image_converter.to_rgb_array(self) 172 | return self._converted_data 173 | 174 | def save_to_disk(self, filename): 175 | """Save this image to disk (requires PIL installed).""" 176 | filename = _append_extension(filename, '.png') 177 | 178 | try: 179 | from PIL import Image as PImage 180 | except ImportError: 181 | raise RuntimeError( 182 | 'cannot import PIL, make sure pillow package is installed') 183 | 184 | image = PImage.frombytes( 185 | mode='RGBA', 186 | size=(self.width, self.height), 187 | data=self.raw_data, 188 | decoder_name='raw') 189 | color = image.split() 190 | image = PImage.merge("RGB", color[2::-1]) 191 | 192 | folder = os.path.dirname(filename) 193 | if not os.path.isdir(folder): 194 | os.makedirs(folder) 195 | image.save(filename) 196 | 197 | 198 | class PointCloud(SensorData): 199 | """A list of points.""" 200 | 201 | def __init__(self, frame_number, array, color_array=None): 202 | super(PointCloud, self).__init__(frame_number=frame_number) 203 | self._array = array 204 | self._color_array = color_array 205 | self._has_colors = color_array is not None 206 | 207 | @property 208 | def array(self): 209 | """The numpy array holding the point-cloud. 210 | 211 | 3D points format for n elements: 212 | [ [X0,Y0,Z0], 213 | ..., 214 | [Xn,Yn,Zn] ] 215 | """ 216 | return self._array 217 | 218 | @property 219 | def color_array(self): 220 | """The numpy array holding the colors corresponding to each point. 221 | It is None if there are no colors. 222 | 223 | Colors format for n elements: 224 | [ [R0,G0,B0], 225 | ..., 226 | [Rn,Gn,Bn] ] 227 | """ 228 | return self._color_array 229 | 230 | def has_colors(self): 231 | """Return whether the points have color.""" 232 | return self._has_colors 233 | 234 | def apply_transform(self, transformation): 235 | """Modify the PointCloud instance transforming its points""" 236 | self._array = transformation.transform_points(self._array) 237 | 238 | def save_to_disk(self, filename): 239 | """Save this point-cloud to disk as PLY format.""" 240 | filename = _append_extension(filename, '.ply') 241 | 242 | def construct_ply_header(): 243 | """Generates a PLY header given a total number of 3D points and 244 | coloring property if specified 245 | """ 246 | points = len(self) # Total point number 247 | header = ['ply', 248 | 'format ascii 1.0', 249 | 'element vertex {}', 250 | 'property float32 x', 251 | 'property float32 y', 252 | 'property float32 z', 253 | 'property uchar diffuse_red', 254 | 'property uchar diffuse_green', 255 | 'property uchar diffuse_blue', 256 | 'end_header'] 257 | if not self._has_colors: 258 | return '\n'.join(header[0:6] + [header[-1]]).format(points) 259 | return '\n'.join(header).format(points) 260 | 261 | if not self._has_colors: 262 | ply = '\n'.join(['{:.2f} {:.2f} {:.2f}'.format( 263 | *p) for p in self._array.tolist()]) 264 | else: 265 | points_3d = numpy.concatenate( 266 | (self._array, self._color_array), axis=1) 267 | ply = '\n'.join(['{:.2f} {:.2f} {:.2f} {:.0f} {:.0f} {:.0f}' 268 | .format(*p) for p in points_3d.tolist()]) 269 | 270 | # Create folder to save if does not exist. 271 | folder = os.path.dirname(filename) 272 | if not os.path.isdir(folder): 273 | os.makedirs(folder) 274 | 275 | # Open the file and save with the specific PLY format. 276 | with open(filename, 'w+') as ply_file: 277 | ply_file.write('\n'.join([construct_ply_header(), ply])) 278 | 279 | def __len__(self): 280 | return len(self.array) 281 | 282 | def __getitem__(self, key): 283 | color = None if self._color_array is None else Color( 284 | *self._color_array[key]) 285 | return Point(*self._array[key], color=color) 286 | 287 | def __iter__(self): 288 | class PointIterator(object): 289 | """Iterator class for PointCloud""" 290 | 291 | def __init__(self, point_cloud): 292 | self.point_cloud = point_cloud 293 | self.index = -1 294 | 295 | def __next__(self): 296 | self.index += 1 297 | if self.index >= len(self.point_cloud): 298 | raise StopIteration 299 | return self.point_cloud[self.index] 300 | 301 | def next(self): 302 | return self.__next__() 303 | 304 | return PointIterator(self) 305 | 306 | def __str__(self): 307 | return str(self.array) 308 | 309 | 310 | class LidarMeasurement(SensorData): 311 | """Data generated by a Lidar.""" 312 | 313 | def __init__(self, frame_number, horizontal_angle, channels, point_count_by_channel, point_cloud): 314 | super(LidarMeasurement, self).__init__(frame_number=frame_number) 315 | assert numpy.sum(point_count_by_channel) == len(point_cloud.array) 316 | self.horizontal_angle = horizontal_angle 317 | self.channels = channels 318 | self.point_count_by_channel = point_count_by_channel 319 | self.point_cloud = point_cloud 320 | 321 | @property 322 | def data(self): 323 | """The numpy array holding the point-cloud. 324 | 325 | 3D points format for n elements: 326 | [ [X0,Y0,Z0], 327 | ..., 328 | [Xn,Yn,Zn] ] 329 | """ 330 | return self.point_cloud.array 331 | 332 | def save_to_disk(self, filename): 333 | """Save point-cloud to disk as PLY format.""" 334 | self.point_cloud.save_to_disk(filename) 335 | -------------------------------------------------------------------------------- /carla/driving_benchmark/driving_benchmark.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | import abc 9 | import logging 10 | import math 11 | import time 12 | 13 | from carla.client import VehicleControl 14 | from carla.client import make_carla_client 15 | from carla.driving_benchmark.metrics import Metrics 16 | from carla.planner.planner import Planner 17 | from carla.settings import CarlaSettings 18 | from carla.tcp import TCPConnectionError 19 | 20 | from . import results_printer 21 | from .recording import Recording 22 | 23 | 24 | def sldist(c1, c2): 25 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 26 | 27 | 28 | class DrivingBenchmark(object): 29 | """ 30 | The Benchmark class, controls the execution of the benchmark interfacing 31 | an Agent class with a set Suite. 32 | 33 | 34 | The benchmark class must be inherited with a class that defines the 35 | all the experiments to be run by the agent 36 | """ 37 | 38 | def __init__( 39 | self, 40 | city_name='Town01', 41 | name_to_save='Test', 42 | continue_experiment=False, 43 | save_images=False, 44 | distance_for_success=2.0 45 | ): 46 | 47 | self.__metaclass__ = abc.ABCMeta 48 | 49 | self._city_name = city_name 50 | self._base_name = name_to_save 51 | # The minimum distance for arriving into the goal point in 52 | # order to consider ir a success 53 | self._distance_for_success = distance_for_success 54 | # The object used to record the benchmark and to able to continue after 55 | self._recording = Recording(name_to_save=name_to_save, 56 | continue_experiment=continue_experiment, 57 | save_images=save_images 58 | ) 59 | 60 | # We have a default planner instantiated that produces high level commands 61 | self._planner = Planner(city_name) 62 | 63 | def benchmark_agent(self, experiment_suite, agent, client): 64 | """ 65 | Function to benchmark the agent. 66 | It first check the log file for this benchmark. 67 | if it exist it continues from the experiment where it stopped. 68 | 69 | 70 | Args: 71 | experiment_suite 72 | agent: an agent object with the run step class implemented. 73 | client: 74 | 75 | 76 | Return: 77 | A dictionary with all the metrics computed from the 78 | agent running the set of experiments. 79 | """ 80 | 81 | # Instantiate a metric object that will be used to compute the metrics for 82 | # the benchmark afterwards. 83 | metrics_object = Metrics(experiment_suite.metrics_parameters, 84 | experiment_suite.dynamic_tasks) 85 | 86 | # Function return the current pose and task for this benchmark. 87 | start_pose, start_experiment = self._recording.get_pose_and_experiment( 88 | experiment_suite.get_number_of_poses_task()) 89 | 90 | logging.info('START') 91 | 92 | for experiment in experiment_suite.get_experiments()[int(start_experiment):]: 93 | 94 | positions = client.load_settings( 95 | experiment.conditions).player_start_spots 96 | 97 | self._recording.log_start(experiment.task) 98 | 99 | for pose in experiment.poses[start_pose:]: 100 | for rep in range(experiment.repetitions): 101 | 102 | start_index = pose[0] 103 | end_index = pose[1] 104 | 105 | client.start_episode(start_index) 106 | # Print information on 107 | logging.info('======== !!!! ==========') 108 | logging.info(' Start Position %d End Position %d ', 109 | start_index, end_index) 110 | 111 | self._recording.log_poses(start_index, end_index, 112 | experiment.Conditions.WeatherId) 113 | 114 | # Calculate the initial distance for this episode 115 | initial_distance = \ 116 | sldist( 117 | [positions[start_index].location.x, positions[start_index].location.y], 118 | [positions[end_index].location.x, positions[end_index].location.y]) 119 | 120 | time_out = experiment_suite.calculate_time_out( 121 | self._get_shortest_path(positions[start_index], positions[end_index])) 122 | 123 | # running the agent 124 | (result, reward_vec, control_vec, final_time, remaining_distance) = \ 125 | self._run_navigation_episode( 126 | agent, client, time_out, positions[end_index], 127 | str(experiment.Conditions.WeatherId) + '_' 128 | + str(experiment.task) + '_' + str(start_index) 129 | + '.' + str(end_index)) 130 | 131 | # Write the general status of the just ran episode 132 | self._recording.write_summary_results( 133 | experiment, pose, rep, initial_distance, 134 | remaining_distance, final_time, time_out, result) 135 | 136 | # Write the details of this episode. 137 | self._recording.write_measurements_results(experiment, rep, pose, reward_vec, 138 | control_vec) 139 | if result > 0: 140 | logging.info('+++++ Target achieved in %f seconds! +++++', 141 | final_time) 142 | else: 143 | logging.info('----- Timeout! -----') 144 | 145 | start_pose = 0 146 | 147 | self._recording.log_end() 148 | 149 | return metrics_object.compute(self._recording.path) 150 | 151 | def get_path(self): 152 | """ 153 | Returns the path were the log was saved. 154 | """ 155 | return self._recording.path 156 | 157 | def _get_directions(self, current_point, end_point): 158 | """ 159 | Class that should return the directions to reach a certain goal 160 | """ 161 | 162 | directions = self._planner.get_next_command( 163 | (current_point.location.x, 164 | current_point.location.y, 0.22), 165 | (current_point.orientation.x, 166 | current_point.orientation.y, 167 | current_point.orientation.z), 168 | (end_point.location.x, end_point.location.y, 0.22), 169 | (end_point.orientation.x, end_point.orientation.y, end_point.orientation.z)) 170 | return directions 171 | 172 | def _get_shortest_path(self, start_point, end_point): 173 | """ 174 | Calculates the shortest path between two points considering the road netowrk 175 | """ 176 | 177 | return self._planner.get_shortest_path_distance( 178 | [ 179 | start_point.location.x, start_point.location.y, 0.22], [ 180 | start_point.orientation.x, start_point.orientation.y, 0.22], [ 181 | end_point.location.x, end_point.location.y, end_point.location.z], [ 182 | end_point.orientation.x, end_point.orientation.y, end_point.orientation.z]) 183 | 184 | def _run_navigation_episode( 185 | self, 186 | agent, 187 | client, 188 | time_out, 189 | target, 190 | episode_name): 191 | """ 192 | Run one episode of the benchmark (Pose) for a certain agent. 193 | 194 | 195 | Args: 196 | agent: the agent object 197 | client: an object of the carla client to communicate 198 | with the CARLA simulator 199 | time_out: the time limit to complete this episode 200 | target: the target to reach 201 | episode_name: The name for saving images of this episode 202 | 203 | """ 204 | 205 | # Send an initial command. 206 | measurements, sensor_data = client.read_data() 207 | client.send_control(VehicleControl()) 208 | 209 | initial_timestamp = measurements.game_timestamp 210 | current_timestamp = initial_timestamp 211 | 212 | # The vector containing all measurements produced on this episode 213 | measurement_vec = [] 214 | # The vector containing all controls produced on this episode 215 | control_vec = [] 216 | frame = 0 217 | distance = 10000 218 | success = False 219 | 220 | while (current_timestamp - initial_timestamp) < (time_out * 1000) and not success: 221 | 222 | # Read data from server with the client 223 | measurements, sensor_data = client.read_data() 224 | # The directions to reach the goal are calculated. 225 | directions = self._get_directions(measurements.player_measurements.transform, target) 226 | # Agent process the data. 227 | control = agent.run_step(measurements, sensor_data, directions, target) 228 | # Send the control commands to the vehicle 229 | client.send_control(control) 230 | 231 | # save images if the flag is activated 232 | self._recording.save_images(sensor_data, episode_name, frame) 233 | 234 | current_x = measurements.player_measurements.transform.location.x 235 | current_y = measurements.player_measurements.transform.location.y 236 | 237 | logging.info("Controller is Inputting:") 238 | logging.info('Steer = %f Throttle = %f Brake = %f ', 239 | control.steer, control.throttle, control.brake) 240 | 241 | current_timestamp = measurements.game_timestamp 242 | # Get the distance travelled until now 243 | distance = sldist([current_x, current_y], 244 | [target.location.x, target.location.y]) 245 | # Write status of the run on verbose mode 246 | logging.info('Status:') 247 | logging.info( 248 | '[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f', 249 | float(distance), current_x, current_y, target.location.x, 250 | target.location.y) 251 | # Check if reach the target 252 | if distance < self._distance_for_success: 253 | success = True 254 | 255 | # Increment the vectors and append the measurements and controls. 256 | frame += 1 257 | measurement_vec.append(measurements.player_measurements) 258 | control_vec.append(control) 259 | 260 | if success: 261 | return 1, measurement_vec, control_vec, float( 262 | current_timestamp - initial_timestamp) / 1000.0, distance 263 | return 0, measurement_vec, control_vec, time_out, distance 264 | 265 | 266 | def run_driving_benchmark(agent, 267 | experiment_suite, 268 | city_name='Town01', 269 | log_name='Test', 270 | continue_experiment=False, 271 | host='127.0.0.1', 272 | port=2000 273 | ): 274 | while True: 275 | try: 276 | 277 | with make_carla_client(host, port) as client: 278 | # Hack to fix for the issue 310, we force a reset, so it does not get 279 | # the positions on first server reset. 280 | client.load_settings(CarlaSettings()) 281 | client.start_episode(0) 282 | 283 | # We instantiate the driving benchmark, that is the engine used to 284 | # benchmark an agent. The instantiation starts the log process, sets 285 | 286 | benchmark = DrivingBenchmark(city_name=city_name, 287 | name_to_save=log_name + '_' 288 | + type(experiment_suite).__name__ 289 | + '_' + city_name, 290 | continue_experiment=continue_experiment) 291 | # This function performs the benchmark. It returns a dictionary summarizing 292 | # the entire execution. 293 | 294 | benchmark_summary = benchmark.benchmark_agent(experiment_suite, agent, client) 295 | 296 | print("") 297 | print("") 298 | print("----- Printing results for training weathers (Seen in Training) -----") 299 | print("") 300 | print("") 301 | results_printer.print_summary(benchmark_summary, experiment_suite.train_weathers, 302 | benchmark.get_path()) 303 | 304 | print("") 305 | print("") 306 | print("----- Printing results for test weathers (Unseen in Training) -----") 307 | print("") 308 | print("") 309 | 310 | results_printer.print_summary(benchmark_summary, experiment_suite.test_weathers, 311 | benchmark.get_path()) 312 | 313 | break 314 | 315 | except TCPConnectionError as error: 316 | logging.error(error) 317 | time.sleep(1) 318 | -------------------------------------------------------------------------------- /carla/driving_benchmark/metrics.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | import numpy as np 9 | import math 10 | import os 11 | 12 | sldist = lambda c1, c2: math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 13 | flatten = lambda l: [item for sublist in l for item in sublist] 14 | 15 | 16 | class Metrics(object): 17 | """ 18 | The metrics class is made to take the driving measurements 19 | and calculate some specific performance metrics. 20 | 21 | """ 22 | 23 | def __init__(self, parameters, dynamic_tasks): 24 | """ 25 | Args 26 | parameters: A dictionary with the used parameters for checking how to count infractions 27 | dynamic_tasks: A list of the all dynamic tasks (That contain dynamic objects) 28 | """ 29 | 30 | self._parameters = parameters 31 | self._parameters['dynamic_tasks'] = dynamic_tasks 32 | 33 | def _divide_by_episodes(self, measurements_matrix, header): 34 | 35 | """ 36 | Divides the measurements matrix on different episodes. 37 | 38 | Args: 39 | measurements_matrix: The full measurements matrix 40 | header: The header from the measurements matrix 41 | 42 | """ 43 | 44 | # Read previous for position zero 45 | prev_start = measurements_matrix[0, header.index('start_point')] 46 | prev_end = measurements_matrix[0, header.index('end_point')] 47 | prev_exp_id = measurements_matrix[0, header.index('exp_id')] 48 | 49 | # Start at the position 1. 50 | i = 1 51 | prev_i_position = 0 52 | episode_matrix_metrics = [] 53 | 54 | while i < measurements_matrix.shape[0]: 55 | 56 | current_start = measurements_matrix[i, header.index('start_point')] 57 | current_end = measurements_matrix[i, header.index('end_point')] 58 | current_exp_id = measurements_matrix[i, header.index('exp_id')] 59 | 60 | # If there is a change in the position it means it is a new episode for sure. 61 | if (current_start != prev_start and current_end != prev_end) \ 62 | or current_exp_id != prev_exp_id: 63 | episode_matrix_metrics.append(measurements_matrix[prev_i_position:i, :]) 64 | prev_i_position = i 65 | 66 | prev_start = current_start 67 | prev_end = current_end 68 | prev_exp_id = current_exp_id 69 | 70 | i += 1 71 | 72 | episode_matrix_metrics.append(measurements_matrix[prev_i_position:-1, :]) 73 | 74 | return episode_matrix_metrics 75 | 76 | def _get_collisions(self, selected_matrix, header): 77 | """ 78 | Get the number of collisions for pedestrians, vehicles or other 79 | Args: 80 | selected_matrix: The matrix with all the experiments summary 81 | header: The header , to know the positions of details 82 | 83 | 84 | """ 85 | count_collisions_general = 0 86 | count_collisions_pedestrian = 0 87 | count_collisions_vehicle = 0 88 | i = 1 89 | # Computing general collisions 90 | while i < selected_matrix.shape[0]: 91 | if (selected_matrix[i, header.index('collision_other')] 92 | - selected_matrix[ 93 | (i - self._parameters['collision_other']['frames_skip']), header.index( 94 | 'collision_other')]) > \ 95 | self._parameters['collision_other']['threshold']: 96 | count_collisions_general += 1 97 | i += self._parameters['collision_other']['frames_recount'] 98 | i += 1 99 | 100 | i = 1 101 | # Computing collisions for vehicles 102 | while i < selected_matrix.shape[0]: 103 | if (selected_matrix[i, header.index('collision_vehicles')] 104 | - selected_matrix[ 105 | (i - self._parameters['collision_vehicles']['frames_skip']), header.index( 106 | 'collision_vehicles')]) > \ 107 | self._parameters['collision_vehicles']['threshold']: 108 | count_collisions_vehicle += 1 109 | i += self._parameters['collision_vehicles']['frames_recount'] 110 | i += 1 111 | 112 | i = 1 113 | 114 | # Computing the collisions for pedestrians 115 | while i < selected_matrix.shape[0]: 116 | if (selected_matrix[i, header.index('collision_pedestrians')] 117 | - selected_matrix[i - self._parameters['collision_pedestrians']['frames_skip'], 118 | header.index('collision_pedestrians')]) > \ 119 | self._parameters['collision_pedestrians']['threshold']: 120 | count_collisions_pedestrian += 1 121 | i += self._parameters['collision_pedestrians']['frames_recount'] 122 | i += 1 123 | 124 | return count_collisions_general, count_collisions_vehicle, count_collisions_pedestrian 125 | 126 | def _get_distance_traveled(self, selected_matrix, header): 127 | """ 128 | Compute the total distance travelled 129 | Args: 130 | selected_matrix: The matrix with all the experiments summary 131 | header: The header , to know the positions of details 132 | 133 | 134 | """ 135 | 136 | prev_x = selected_matrix[0, header.index('pos_x')] 137 | prev_y = selected_matrix[0, header.index('pos_y')] 138 | 139 | i = 1 140 | acummulated_distance = 0 141 | 142 | while i < selected_matrix.shape[0]: 143 | x = selected_matrix[i, header.index('pos_x')] 144 | y = selected_matrix[i, header.index('pos_y')] 145 | 146 | acummulated_distance += sldist((x, y), (prev_x, prev_y)) 147 | 148 | prev_x = x 149 | prev_y = y 150 | 151 | i += 1 152 | 153 | return acummulated_distance / (1000.0) 154 | 155 | def _get_out_of_road_lane(self, selected_matrix, header): 156 | 157 | """ 158 | Check for the situations were the agent goes out of the road. 159 | Args: 160 | selected_matrix: The matrix with all the experiments summary 161 | header: The header , to know the positions of details 162 | 163 | 164 | """ 165 | 166 | count_sidewalk_intersect = 0 167 | count_lane_intersect = 0 168 | 169 | i = 0 170 | 171 | while i < selected_matrix.shape[0]: 172 | 173 | if (selected_matrix[i, header.index('intersection_offroad')] 174 | - selected_matrix[(i - self._parameters['intersection_offroad']['frames_skip']), 175 | header.index('intersection_offroad')]) \ 176 | > self._parameters['intersection_offroad']['threshold']: 177 | count_sidewalk_intersect += 1 178 | i += self._parameters['intersection_offroad']['frames_recount'] 179 | if i >= selected_matrix.shape[0]: 180 | break 181 | 182 | if (selected_matrix[i, header.index('intersection_otherlane')] 183 | - selected_matrix[(i - self._parameters['intersection_otherlane']['frames_skip']), 184 | header.index('intersection_otherlane')]) \ 185 | > self._parameters['intersection_otherlane']['threshold']: 186 | count_lane_intersect += 1 187 | i += self._parameters['intersection_otherlane']['frames_recount'] 188 | 189 | i += 1 190 | 191 | return count_lane_intersect, count_sidewalk_intersect 192 | 193 | def compute(self, path): 194 | 195 | """ 196 | Compute a dictionary containing the following metrics 197 | 198 | * Off Road Intersection: The number of times the agent goes out of the road. 199 | The intersection is only counted if the area of the vehicle outside 200 | of the road is bigger than a *threshold*. 201 | 202 | * Other Lane Intersection: The number of times the agent goes to the other 203 | lane. The intersection is only counted if the area of the vehicle on the 204 | other lane is bigger than a *threshold*. 205 | 206 | * Vehicle Collisions: The number of collisions with vehicles that have 207 | an impact bigger than a *threshold*. 208 | 209 | * Pedestrian Collisions: The number of collisions with pedestrians 210 | that have an impact bigger than a threshold. 211 | 212 | * General Collisions: The number of collisions with all other 213 | objects. 214 | 215 | 216 | Args: 217 | path: Path where the log files are. 218 | 219 | """ 220 | 221 | with open(os.path.join(path, 'summary.csv'), "rU") as f: 222 | header = f.readline() 223 | header = header.split(',') 224 | header[-1] = header[-1][:-1] 225 | 226 | with open(os.path.join(path, 'measurements.csv'), "rU") as f: 227 | 228 | header_metrics = f.readline() 229 | header_metrics = header_metrics.split(',') 230 | header_metrics[-1] = header_metrics[-1][:-1] 231 | 232 | result_matrix = np.loadtxt(os.path.join(path, 'summary.csv'), delimiter=",", skiprows=1) 233 | 234 | # Corner Case: The presented test just had one episode 235 | if result_matrix.ndim == 1: 236 | result_matrix = np.expand_dims(result_matrix, axis=0) 237 | 238 | tasks = np.unique(result_matrix[:, header.index('exp_id')]) 239 | 240 | all_weathers = np.unique(result_matrix[:, header.index('weather')]) 241 | 242 | measurements_matrix = np.loadtxt(os.path.join(path, 'measurements.csv'), delimiter=",", 243 | skiprows=1) 244 | 245 | metrics_dictionary = {'episodes_completion': {w: [0] * len(tasks) for w in all_weathers}, 246 | 'intersection_offroad': {w: [[] for i in range(len(tasks))] for w in 247 | all_weathers}, 248 | 'intersection_otherlane': {w: [[] for i in range(len(tasks))] for w in 249 | all_weathers}, 250 | 'collision_pedestrians': {w: [[] for i in range(len(tasks))] for w in 251 | all_weathers}, 252 | 'collision_vehicles': {w: [[] for i in range(len(tasks))] for w in 253 | all_weathers}, 254 | 'collision_other': {w: [[] for i in range(len(tasks))] for w in 255 | all_weathers}, 256 | 'episodes_fully_completed': {w: [0] * len(tasks) for w in 257 | all_weathers}, 258 | 'average_speed': {w: [0] * len(tasks) for w in all_weathers}, 259 | 'driven_kilometers': {w: [0] * len(tasks) for w in all_weathers} 260 | } 261 | 262 | for t in range(len(tasks)): 263 | experiment_results_matrix = result_matrix[ 264 | result_matrix[:, header.index('exp_id')] == tasks[t]] 265 | 266 | weathers = np.unique(experiment_results_matrix[:, header.index('weather')]) 267 | 268 | for w in weathers: 269 | 270 | experiment_results_matrix = result_matrix[ 271 | np.logical_and(result_matrix[:, header.index( 272 | 'exp_id')] == tasks[t], result_matrix[:, header.index('weather')] == w)] 273 | 274 | experiment_metrics_matrix = measurements_matrix[ 275 | np.logical_and(measurements_matrix[:, header_metrics.index( 276 | 'exp_id')] == float(tasks[t]), 277 | measurements_matrix[:, header_metrics.index('weather')] == float( 278 | w))] 279 | 280 | metrics_dictionary['episodes_fully_completed'][w][t] = \ 281 | experiment_results_matrix[:, header.index('result')].tolist() 282 | 283 | metrics_dictionary['episodes_completion'][w][t] = \ 284 | ((experiment_results_matrix[:, header.index('initial_distance')] 285 | - experiment_results_matrix[:, header.index('final_distance')]) 286 | / experiment_results_matrix[:, header.index('initial_distance')]).tolist() 287 | 288 | # Now we divide the experiment metrics matrix 289 | 290 | episode_experiment_metrics_matrix = self._divide_by_episodes( 291 | experiment_metrics_matrix, header_metrics) 292 | 293 | count = 0 294 | 295 | for episode_experiment_metrics in episode_experiment_metrics_matrix: 296 | 297 | km_run_episodes = self._get_distance_traveled( 298 | episode_experiment_metrics, header_metrics) 299 | metrics_dictionary['driven_kilometers'][w][t] += km_run_episodes 300 | metrics_dictionary['average_speed'][w][t] = \ 301 | km_run_episodes / (experiment_results_matrix[count, 302 | header.index( 303 | 'final_time')] / 3600.0) 304 | count += 1 305 | 306 | lane_road = self._get_out_of_road_lane( 307 | episode_experiment_metrics, header_metrics) 308 | 309 | metrics_dictionary['intersection_otherlane'][ 310 | w][t].append(lane_road[0]) 311 | metrics_dictionary['intersection_offroad'][ 312 | w][t].append(lane_road[1]) 313 | 314 | if tasks[t] in set(self._parameters['dynamic_tasks']): 315 | 316 | collisions = self._get_collisions(episode_experiment_metrics, 317 | header_metrics) 318 | 319 | metrics_dictionary['collision_pedestrians'][ 320 | w][t].append(collisions[2]) 321 | metrics_dictionary['collision_vehicles'][ 322 | w][t].append(collisions[1]) 323 | metrics_dictionary['collision_other'][ 324 | w][t].append(collisions[0]) 325 | 326 | else: 327 | 328 | metrics_dictionary['collision_pedestrians'][ 329 | w][t].append(0) 330 | metrics_dictionary['collision_vehicles'][ 331 | w][t].append(0) 332 | metrics_dictionary['collision_other'][ 333 | w][t].append(0) 334 | 335 | return metrics_dictionary 336 | -------------------------------------------------------------------------------- /controller.py: -------------------------------------------------------------------------------- 1 | """ 2 | TODO: Write Docstring 3 | """ 4 | from __future__ import print_function 5 | import time 6 | import argparse 7 | import logging 8 | import configparser 9 | from pathlib import Path 10 | import pandas as pd 11 | import pygame 12 | import pygame.locals as pl 13 | import numpy as np 14 | from carla.client import make_carla_client, VehicleControl 15 | from carla import sensor 16 | from carla.settings import CarlaSettings 17 | from carla.tcp import TCPConnectionError 18 | from carla import image_converter as ic 19 | from timer import Timer 20 | 21 | from disk_writer import ImageWriter, VideoWriter 22 | from HUD import InfoBox 23 | from enums import GameState, HighLevelCommand, TrafficLight 24 | from non_player_objects import NonPlayerObjects 25 | from drive_models import CNNKeras, LSTMKeras 26 | 27 | 28 | class CarlaController: 29 | """ TODO: Write Docstring """ 30 | 31 | def __init__(self, carla_client, args, settings): 32 | self.client = carla_client 33 | self._game_image = None 34 | self._game_image_3p = None 35 | self._measurements = None 36 | self._sensor_data = None 37 | self._image_history = None 38 | self._driving_history = None 39 | self._video_images = None 40 | self._video_info = None 41 | self._frame_history = None 42 | self._pygame_display = None 43 | self._carla_settings = None 44 | self._settings = self._initialize_settings(settings) 45 | self._timer = Timer() 46 | self._output_path = args.output_path 47 | self._game_state = GameState.NOT_RECORDING 48 | self._new_episode_flag = False 49 | self._exit_flag = False 50 | self._vehicle_in_reverse = False 51 | self._autopilot_enabled = False 52 | self._drive_model_enabled = False 53 | self._joystick_enabled = args.joystick 54 | self._joystick = None 55 | self._record_video = args.record_video 56 | self._drive_model_path = args.drive_model_path 57 | self._drive_model = None 58 | self._disk_writer_thread = None 59 | self._bottom_left_hud = InfoBox((200, 75)) 60 | self._bottom_right_hud = InfoBox((250, 75)) 61 | self._top_right_hud = InfoBox((200, 25)) 62 | self._current_traffic_light = None 63 | self._current_speed_limit = None 64 | self._current_hlc = None 65 | self._start_postition = None 66 | self._traffic_lights = NonPlayerObjects("traffic_light") 67 | self._speed_limits = NonPlayerObjects("speed_limit_sign") 68 | 69 | def _initialize_settings(self, f): 70 | s = {} 71 | s["quality_level"] = f.get("Carla", "QualityLevel", fallback="Epic") 72 | s["weather_id"] = int(f.get("Carla", "WeatherId", fallback=1)) 73 | s["number_of_vehicles"] = int(f.get("Carla", "NumberOfVehicles", fallback=50)) 74 | s["number_of_pedastrians"] = int( 75 | f.get("Carla", "NumberOfPedestrians", fallback=30) 76 | ) 77 | s["autopilot_steer_noise"] = float(f.get("AutoPilot", "SteerNoise", fallback=0)) 78 | s["autopilot_throttle_noise"] = float( 79 | f.get("AutoPilot", "ThrottleNoise", fallback=0) 80 | ) 81 | 82 | s["window_width"] = int(f.get("Pygame", "WindowWidth", fallback=1024)) 83 | s["window_height"] = int(f.get("Pygame", "WindowHeight", fallback=768)) 84 | s["output_image_width"] = int( 85 | f.get("Pygame", "OutputImageWidth", fallback=1024) 86 | ) 87 | s["output_image_height"] = int( 88 | f.get("Pygame", "OutputImageHeight", fallback=768) 89 | ) 90 | s["randomize_weather"] = f.getboolean( 91 | "Carla", "RandomizeWeather", fallback=False 92 | ) 93 | s["autostart_recording"] = f.getboolean( 94 | "Controller", "AutoStartRecording", fallback=False 95 | ) 96 | s["frame_limit"] = int(f.get("Controller", "FrameLimit", fallback=0)) 97 | s["episode_limit"] = int(f.get("Controller", "EpisodeLimit", fallback=0)) 98 | s["drive_model_steer"] = f.getboolean( 99 | "DriveModel", "ControlSteer", fallback=False 100 | ) 101 | s["drive_model_throttle"] = f.getboolean( 102 | "DriveModel", "ControlThrottle", fallback=False 103 | ) 104 | s["drive_model_brake"] = f.getboolean( 105 | "DriveModel", "ControlBrake", fallback=False 106 | ) 107 | s["starting_positions"] = f.get("Carla", "StartingPositions", fallback=None) 108 | 109 | if s["starting_positions"] is not None: 110 | s["starting_positions"] = list(map(int, s["starting_positions"].split(","))) 111 | return s 112 | 113 | def _initialize_pygame(self): 114 | self._pygame_display = pygame.display.set_mode( 115 | (self._settings["window_width"], self._settings["window_height"]), 116 | pygame.HWSURFACE | pygame.DOUBLEBUF, 117 | ) 118 | if self._joystick_enabled: 119 | pygame.joystick.init() 120 | self._joystick = pygame.joystick.Joystick(0) 121 | self._joystick.init() 122 | logging.info("Use steering wheel to control vehicle") 123 | else: 124 | logging.info("Use keyboard to control vehicle") 125 | 126 | self._on_new_episode() 127 | 128 | logging.debug("pygame initialized") 129 | 130 | def _initialize_carla(self): 131 | # Initialize settings 132 | settings = CarlaSettings() 133 | settings.set( 134 | SynchronousMode=True, 135 | NumberOfVehicles=self._settings["number_of_vehicles"], 136 | NumberOfPedestrians=self._settings["number_of_pedastrians"], 137 | WeatherId=self._settings["weather_id"], 138 | QualityLevel=self._settings["quality_level"], 139 | SendNonPlayerAgentsInfo=True, 140 | ) 141 | settings.randomize_seeds() 142 | 143 | output_image_width = self._settings["output_image_width"] 144 | output_image_height = self._settings["output_image_height"] 145 | 146 | # Add a game camera 1 and 2 147 | game_camera = sensor.Camera("GameCamera") 148 | game_camera.set_image_size( 149 | self._settings["window_width"], self._settings["window_height"] 150 | ) 151 | game_camera.set_position(2.0, 0.0, 1.4) 152 | game_camera.set_rotation(0.0, 0.0, 0.0) 153 | settings.add_sensor(game_camera) 154 | 155 | game_camera_3p = sensor.Camera("GameCamera3p") 156 | game_camera_3p.set_image_size( 157 | self._settings["window_width"], self._settings["window_height"] 158 | ) 159 | game_camera_3p.set_position(-5, 0.0, 3) 160 | game_camera_3p.set_rotation(-15, 0.0, 0) 161 | settings.add_sensor(game_camera_3p) 162 | 163 | # Add RGB center camera 164 | rgb_camera_center = sensor.Camera("RGBCameraCenter") 165 | rgb_camera_center.set_image_size(output_image_width, output_image_height) 166 | rgb_camera_center.set_position(2.0, 0.0, 1.4) 167 | rgb_camera_center.set_rotation(0.0, 0.0, 0.0) 168 | settings.add_sensor(rgb_camera_center) 169 | 170 | # Add RGB left camera 171 | rgb_camera_left = sensor.Camera("RGBCameraLeft") 172 | rgb_camera_left.set_image_size(output_image_width, output_image_height) 173 | rgb_camera_left.set_position(2.0, -1, 1.4) 174 | rgb_camera_left.set_rotation(0.0, 0.0, 0.0) 175 | settings.add_sensor(rgb_camera_left) 176 | 177 | # Add RGB right camera 178 | rgb_camera_right = sensor.Camera("RGBCameraRight") 179 | rgb_camera_right.set_image_size(output_image_width, output_image_height) 180 | rgb_camera_right.set_position(2.0, 1, 1.4) 181 | rgb_camera_right.set_rotation(0.0, 0.0, 0.0) 182 | settings.add_sensor(rgb_camera_right) 183 | 184 | # Add depth camera 185 | depth_camera = sensor.Camera("DepthCamera", PostProcessing="Depth") 186 | depth_camera.set_image_size(output_image_width, output_image_height) 187 | depth_camera.set_position(2.0, 0.0, 1.4) 188 | depth_camera.set_rotation(0.0, 0.0, 0.0) 189 | settings.add_sensor(depth_camera) 190 | 191 | # Add semantic segmentation camera 192 | sem_seg_camera = sensor.Camera( 193 | "SemSegCamera", PostProcessing="SemanticSegmentation" 194 | ) 195 | sem_seg_camera.set_image_size(output_image_width, output_image_height) 196 | sem_seg_camera.set_position(2.0, 0.0, 1.4) 197 | sem_seg_camera.set_rotation(0.0, 0.0, 0.0) 198 | settings.add_sensor(sem_seg_camera) 199 | 200 | self._carla_settings = settings 201 | 202 | logging.debug("Carla initialized") 203 | 204 | def _initialize_drive_model(self): 205 | if self._drive_model_path: 206 | self._drive_model = CNNKeras() 207 | logging.info("Loading drive model from: %s", self._drive_model_path) 208 | self._drive_model.load_model(self._drive_model_path) 209 | 210 | def _initialize_history(self): 211 | self._driving_history = pd.DataFrame( 212 | columns=[ 213 | "CenterRGB", 214 | "LeftRGB", 215 | "RightRGB", 216 | "Depth", 217 | "SemSeg", 218 | "Location", 219 | "Speed", 220 | "Controls", 221 | "APControls", 222 | "HLC", 223 | "SpeedLimit", 224 | "TrafficLight", 225 | "AutoPilotEnabled", 226 | "WeatherID", 227 | ] 228 | ) 229 | self._image_history = [] 230 | self._frame_history = [] 231 | 232 | def _on_new_episode(self): 233 | self._timer.new_episode() 234 | if self._settings["episode_limit"] != 0: 235 | if self._settings["episode_limit"] < self._timer.episode_num: 236 | self._exit_flag = True 237 | return 238 | scene = self.client.load_settings(self._carla_settings) 239 | number_of_start_positions = len(scene.player_start_spots) 240 | if self._settings["starting_positions"] is not None: 241 | if self._start_postition is None: 242 | current_index = 0 243 | else: 244 | current_index = self._settings["starting_positions"].index( 245 | self._start_postition 246 | ) 247 | if current_index >= len(self._settings["starting_positions"]) - 1: 248 | current_index = 0 249 | else: 250 | current_index += 1 251 | 252 | self._start_postition = self._settings["starting_positions"][current_index] 253 | else: 254 | self._start_postition = np.random.randint(number_of_start_positions) 255 | self.client.start_episode(self._start_postition) 256 | self._new_episode_flag = False 257 | self._disk_writer_thread = None 258 | self._initialize_history() 259 | self._video_images = [[], []] 260 | self._video_info = [] 261 | self._current_speed_limit = 30 262 | self._current_traffic_light = (TrafficLight.NONE, 15) 263 | if self._settings["randomize_weather"]: 264 | self._carla_settings.set(WeatherId=np.random.randint(0, 15)) 265 | if self._drive_model: 266 | self._current_hlc = HighLevelCommand.FOLLOW_ROAD 267 | 268 | def _get_keyboard_control(self, keys): 269 | control = VehicleControl() 270 | if keys[pl.K_LEFT] or keys[pl.K_a]: 271 | control.steer = -1.0 272 | if keys[pl.K_RIGHT] or keys[pl.K_d]: 273 | control.steer = 1.0 274 | if keys[pl.K_UP] or keys[pl.K_w]: 275 | control.throttle = 1.0 276 | if keys[pl.K_DOWN] or keys[pl.K_s]: 277 | control.brake = 1.0 278 | if keys[pl.K_SPACE]: 279 | control.hand_brake = True 280 | control.reverse = self._vehicle_in_reverse 281 | 282 | return control 283 | 284 | def _get_joystick_control(self): 285 | control = VehicleControl() 286 | control.steer = self._joystick.get_axis(0) 287 | control.throttle = max(self._joystick.get_axis(1) * -1, 0) 288 | control.brake = max(self._joystick.get_axis(1), 0) 289 | 290 | return control 291 | 292 | def _get_autopilot_control(self): 293 | speed = int(self._measurements.player_measurements.forward_speed * 3.6) 294 | autopilot = self._measurements.player_measurements.autopilot_control 295 | control = VehicleControl() 296 | steer_noise = self._settings["autopilot_steer_noise"] 297 | if steer_noise != 0: 298 | steer_noise = np.random.uniform(-steer_noise, steer_noise) 299 | control.steer = autopilot.steer + steer_noise 300 | 301 | throttle_noise = ( 302 | self._settings["autopilot_throttle_noise"] if speed > 10 else 0 303 | ) 304 | 305 | if throttle_noise != 0: 306 | throttle_noise = np.random.uniform(-throttle_noise, throttle_noise) 307 | control.throttle = autopilot.throttle + throttle_noise 308 | control.brake = autopilot.brake 309 | return control 310 | 311 | def _get_drive_model_control(self, control): 312 | images = self._get_camera_images() 313 | info = { 314 | "speed": self._measurements.player_measurements.forward_speed * 3.6, 315 | "speed_limit": self._current_speed_limit, 316 | "traffic_light": self._current_traffic_light[0].value, 317 | "hlc": self._current_hlc.value, 318 | } 319 | steer, throttle, brake = self._drive_model.get_prediction(images, info) 320 | 321 | if self._settings["drive_model_steer"]: 322 | control.steer = steer 323 | if self._settings["drive_model_throttle"]: 324 | control.throttle = throttle 325 | if self._settings["drive_model_brake"]: 326 | if brake > 0.4: 327 | control.brake = brake 328 | return control 329 | 330 | def _writeback_hlc_to_history(self, command): 331 | look_back = 70 332 | for i, row in self._driving_history.iterrows(): 333 | if int(row["HLC"]) == 0: 334 | if i >= len(self._driving_history.index) - look_back: 335 | self._driving_history.at[i, "HLC"] = command.value 336 | 337 | def _handle_keydown_event(self, key): 338 | if self._game_state is not GameState.WRITING: 339 | if key == pl.K_p: 340 | self._autopilot_enabled = not self._autopilot_enabled 341 | elif key == pl.K_m: 342 | if self._drive_model: 343 | self._drive_model_enabled = not self._drive_model_enabled 344 | self._current_hlc = HighLevelCommand.FOLLOW_ROAD 345 | elif key == pl.K_q: 346 | self._vehicle_in_reverse = not self._vehicle_in_reverse 347 | elif key == pl.K_e: 348 | if self._game_state == GameState.RECORDING: 349 | self._game_state = GameState.WRITING 350 | if self._record_video: 351 | self._write_video_to_disk( 352 | on_complete=self._write_history_to_disk 353 | ) 354 | else: 355 | self._write_history_to_disk() 356 | else: 357 | 358 | if self._record_video: 359 | self._game_state = GameState.WRITING 360 | self._write_video_to_disk() 361 | self._new_episode_flag = True 362 | elif key == pl.K_r: 363 | if ( 364 | self._game_state == GameState.NOT_RECORDING 365 | and self._output_path is not None 366 | ): 367 | self._game_state = GameState.RECORDING 368 | elif self._game_state == GameState.RECORDING: 369 | self._game_state = GameState.WRITING 370 | self._write_history_to_disk() 371 | if self._game_state == GameState.RECORDING: 372 | if key == pl.K_KP8: 373 | self._writeback_hlc_to_history(HighLevelCommand.STRAIGHT_AHEAD) 374 | elif key == pl.K_KP4: 375 | self._writeback_hlc_to_history(HighLevelCommand.TURN_LEFT) 376 | elif key == pl.K_KP6: 377 | self._writeback_hlc_to_history(HighLevelCommand.TURN_RIGHT) 378 | if self._drive_model_enabled: 379 | if key == pl.K_KP8: 380 | self._current_hlc = HighLevelCommand.STRAIGHT_AHEAD 381 | elif key == pl.K_KP4: 382 | self._current_hlc = HighLevelCommand.TURN_LEFT 383 | elif key == pl.K_KP6: 384 | self._current_hlc = HighLevelCommand.TURN_RIGHT 385 | elif key == pl.K_KP5: 386 | self._current_hlc = HighLevelCommand.FOLLOW_ROAD 387 | 388 | def _render_HUD(self): 389 | speed = int(self._measurements.player_measurements.forward_speed * 3.6) 390 | autopilot_status = "Enabled" if self._autopilot_enabled else "Disabled" 391 | drive_model_status = "Enabled" if self._drive_model_enabled else "Disabled" 392 | reverse_status = "Enabled" if self._vehicle_in_reverse else "Disabled" 393 | speed_value = "{} km/h".format(speed) 394 | speed_limit = "{} km/h".format(self._current_speed_limit) 395 | traffic_light = self._current_traffic_light[0].name 396 | current_hlc = ( 397 | self._current_hlc.name if self._drive_model_enabled else "Disabled" 398 | ) 399 | self._bottom_left_hud.update_content( 400 | [ 401 | ("Speed", speed_value), 402 | ("Speed Limit", speed_limit), 403 | ("Reverse", reverse_status), 404 | ("Traffic Light", traffic_light), 405 | ] 406 | ) 407 | self._bottom_right_hud.update_content( 408 | [ 409 | ("Autopilot", autopilot_status), 410 | ("Recording State", self._game_state.name), 411 | ("Drive Model", drive_model_status), 412 | ("Drive Model HLC", current_hlc), 413 | ] 414 | ) 415 | self._top_right_hud.update_content([("Start Position", self._start_postition)]) 416 | 417 | sw_x = 20 418 | sw_y = self._settings["window_height"] - self._bottom_left_hud.size[1] - 20 419 | se_x = self._settings["window_width"] - self._bottom_right_hud.size[0] - 20 420 | se_y = self._settings["window_height"] - self._bottom_right_hud.size[1] - 20 421 | ne_x = self._settings["window_width"] - self._top_right_hud.size[0] - 20 422 | ne_y = 20 423 | self._pygame_display.blit(self._bottom_left_hud.render_surface(), (sw_x, sw_y)) 424 | self._pygame_display.blit(self._bottom_right_hud.render_surface(), (se_x, se_y)) 425 | self._pygame_display.blit(self._top_right_hud.render_surface(), (ne_x, ne_y)) 426 | 427 | def _render_pygame(self): 428 | if self._game_image is not None: 429 | array = ic.to_rgb_array(self._game_image) 430 | surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 431 | 432 | if self._game_state == GameState.WRITING: 433 | self._render_progressbar( 434 | surface, 300, 40, self._disk_writer_thread.progress 435 | ) 436 | 437 | self._pygame_display.blit(surface, (0, 0)) 438 | 439 | self._render_HUD() 440 | 441 | pygame.display.flip() 442 | 443 | def _render_progressbar(self, surface, width, height, progress): 444 | left = (self._settings["window_width"] / 2) - (width / 2) 445 | top = (self._settings["window_height"] / 2) - (height / 2) 446 | pygame.draw.rect( 447 | surface, (255, 255, 255), pygame.Rect(left, top, width * progress, height) 448 | ) 449 | pygame.draw.rect( 450 | surface, (128, 128, 128), pygame.Rect(left, top, width, height), 1 451 | ) 452 | self._pygame_display.blit(surface, (0, 0)) 453 | 454 | def _get_camera_images(self): 455 | sensor_data = self._sensor_data 456 | 457 | image_object = { 458 | "rgb_center": ic.to_bgra_array(sensor_data.get("RGBCameraCenter", None)), 459 | "rgb_left": ic.to_bgra_array(sensor_data.get("RGBCameraLeft", None)), 460 | "rgb_right": ic.to_bgra_array(sensor_data.get("RGBCameraRight", None)), 461 | "depth": ic.depth_to_logarithmic_grayscale( 462 | sensor_data.get("DepthCamera", None) 463 | ), 464 | "sem_seg": ic.labels_to_cityscapes_palette( 465 | sensor_data.get("SemSegCamera", None) 466 | ), 467 | } 468 | return image_object 469 | 470 | def _prepare_video_images(self): 471 | speed = int(self._measurements.player_measurements.forward_speed * 3.6) 472 | speed = "{} km/h".format(speed) 473 | speed_limit = "{} km/h".format(self._current_speed_limit) 474 | traffic_light = self._current_traffic_light[0].name 475 | current_hlc = ( 476 | self._current_hlc.name if self._drive_model_enabled else "Disabled" 477 | ) 478 | info = [speed, speed_limit, traffic_light, current_hlc] 479 | self._video_info.append(info) 480 | self._video_images[0].append(ic.to_rgb_array(self._game_image)) 481 | self._video_images[1].append(ic.to_rgb_array(self._game_image_3p)) 482 | 483 | def _save_to_history(self, control): 484 | measurements = self._measurements 485 | 486 | frame = self._timer.episode_frame 487 | 488 | self._image_history.append(self._get_camera_images()) 489 | self._frame_history.append(frame) 490 | 491 | loc = measurements.player_measurements.transform.location 492 | speed = measurements.player_measurements.forward_speed * 3.6 493 | autopilot = measurements.player_measurements.autopilot_control 494 | 495 | self._driving_history = self._driving_history.append( 496 | pd.Series( 497 | [ 498 | f"imgs/{frame}_rgb_center.png", 499 | f"imgs/{frame}_rgb_left.png", 500 | f"imgs/{frame}_rgb_right.png", 501 | f"imgs/{frame}_depth.png", 502 | f"imgs/{frame}_sem_seg.png", 503 | (loc.x, loc.y), 504 | speed, 505 | ( 506 | control.steer, 507 | control.throttle, 508 | control.brake, 509 | int(control.reverse), 510 | ), 511 | ( 512 | autopilot.steer, 513 | autopilot.throttle, 514 | autopilot.brake, 515 | int(autopilot.reverse), 516 | ), 517 | 0, 518 | self._current_speed_limit, 519 | self._current_traffic_light[0].value, 520 | int(self._autopilot_enabled), 521 | self._settings["weather_id"], 522 | ], 523 | index=self._driving_history.columns, 524 | ), 525 | ignore_index=True, 526 | ) 527 | 528 | def _write_history_to_disk(self): 529 | path = Path(f"{self._output_path}/{self._timer.episode_timestamp_str}") 530 | self._disk_writer_thread = ImageWriter( 531 | path, 532 | self._image_history, 533 | self._driving_history, 534 | self._frame_history, 535 | on_complete=self._images_write_complete, 536 | ) 537 | self._disk_writer_thread.start() 538 | 539 | def _write_video_to_disk(self, on_complete=None): 540 | if on_complete is None: 541 | on_complete = self._video_write_complete 542 | 543 | path = Path(f"{self._output_path}/{self._timer.episode_timestamp_str}") 544 | self._disk_writer_thread = VideoWriter( 545 | path, self._video_images, self._video_info, on_complete=on_complete 546 | ) 547 | self._disk_writer_thread.start() 548 | 549 | def _images_write_complete(self): 550 | self._game_state = GameState.NOT_RECORDING 551 | self._initialize_history() 552 | 553 | def _video_write_complete(self): 554 | self._game_state = GameState.NOT_RECORDING 555 | 556 | def _update_current_traffic_light(self): 557 | old_state, old_dist = self._current_traffic_light 558 | agent, new_dist = self._traffic_lights.get_closest_with_rotation( 559 | self._measurements.player_measurements.transform, 12, -90, 15 560 | ) 561 | if agent is not None: 562 | new_state = agent.state 563 | 564 | if new_dist <= old_dist: 565 | self._current_traffic_light = (TrafficLight(new_state), new_dist) 566 | else: 567 | self._current_traffic_light = (TrafficLight(old_state), new_dist) 568 | else: 569 | self._current_traffic_light = (TrafficLight.NONE, 15) 570 | 571 | def _update_current_speed_limit(self): 572 | agent = self._speed_limits.get_closest_with_rotation( 573 | self._measurements.player_measurements.transform, 12, -90, 20 574 | )[0] 575 | if agent is not None: 576 | self._current_speed_limit = int(agent.speed_limit * 3.6) 577 | 578 | def _on_loop(self): 579 | 580 | if ( 581 | self._game_state is GameState.NOT_RECORDING 582 | and self._settings["autostart_recording"] 583 | ): 584 | if self._timer.episode_frame is 40: 585 | self._game_state = GameState.RECORDING 586 | 587 | if self._game_state is not GameState.WRITING: 588 | self._timer.tick() 589 | 590 | if self._new_episode_flag: 591 | self._on_new_episode() 592 | 593 | if self._exit_flag: 594 | return False 595 | 596 | measurements, sensor_data = self.client.read_data() 597 | self._measurements = measurements 598 | self._sensor_data = sensor_data 599 | 600 | self._game_image = sensor_data.get("GameCamera", None) 601 | self._game_image_3p = sensor_data.get("GameCamera3p", None) 602 | 603 | if self._record_video: 604 | self._prepare_video_images() 605 | 606 | self._traffic_lights.update_agents(measurements.non_player_agents) 607 | 608 | if not self._traffic_lights.valid: 609 | self._traffic_lights.initialize_KD_tree() 610 | else: 611 | self._update_current_traffic_light() 612 | 613 | if not self._speed_limits.valid: 614 | self._speed_limits.update_agents(measurements.non_player_agents) 615 | self._speed_limits.initialize_KD_tree() 616 | else: 617 | self._update_current_speed_limit() 618 | 619 | if not self._autopilot_enabled: 620 | if self._joystick_enabled: 621 | control = self._get_joystick_control() 622 | else: 623 | control = self._get_keyboard_control(pygame.key.get_pressed()) 624 | else: 625 | control = self._get_autopilot_control() 626 | 627 | if self._drive_model and self._drive_model_enabled: 628 | control = self._get_drive_model_control(control) 629 | print(control) 630 | self.client.send_control(control) 631 | 632 | if self._game_state == GameState.RECORDING: 633 | self._save_to_history(control) 634 | 635 | if self._settings["frame_limit"] != 0: 636 | if self._settings["frame_limit"] < self._timer.episode_frame: 637 | if self._game_state == GameState.RECORDING: 638 | self._game_state = GameState.WRITING 639 | if self._record_video: 640 | self._write_video_to_disk( 641 | on_complete=self._write_history_to_disk 642 | ) 643 | else: 644 | self._write_history_to_disk() 645 | if self._record_video: 646 | self._game_state = GameState.WRITING 647 | self._write_video_to_disk() 648 | self._new_episode_flag = True 649 | 650 | self._render_pygame() 651 | 652 | def execute(self): 653 | """ TODO: Write docstring """ 654 | pygame.init() 655 | 656 | self._initialize_carla() 657 | self._initialize_pygame() 658 | self._initialize_drive_model() 659 | if self._output_path is not None: 660 | logging.info("Recorded data will be saved to: %s", self._output_path) 661 | try: 662 | while True: 663 | for event in pygame.event.get(): 664 | if event.type == pygame.QUIT: 665 | return 666 | if event.type == pl.KEYDOWN: 667 | self._handle_keydown_event(event.key) 668 | 669 | if self._on_loop() is False: 670 | break 671 | finally: 672 | pygame.quit() 673 | 674 | 675 | def main(): 676 | """ TODO: Write docstring """ 677 | argparser = argparse.ArgumentParser(description="CARLA Manual Control Client") 678 | argparser.add_argument( 679 | "-v", 680 | "--verbose", 681 | action="store_true", 682 | dest="debug", 683 | help="print debug information", 684 | ) 685 | argparser.add_argument( 686 | "--host", 687 | metavar="H", 688 | default="localhost", 689 | help="IP of the host server (default: localhost)", 690 | ) 691 | argparser.add_argument( 692 | "-j", 693 | "--joystick", 694 | action="store_true", 695 | help="control vehicle with an external steering wheel", 696 | ) 697 | argparser.add_argument( 698 | "-p", 699 | "--port", 700 | metavar="P", 701 | default=2000, 702 | type=int, 703 | help="TCP port to listen to (default: 2000)", 704 | ) 705 | argparser.add_argument( 706 | "--record-video", 707 | action="store_true", 708 | dest="record_video", 709 | default=False, 710 | help="recorded a video from the driving", 711 | ) 712 | argparser.add_argument( 713 | "-o", 714 | "--output", 715 | metavar="PATH", 716 | dest="output_path", 717 | default="output", 718 | help="recorded data will be saved to this path", 719 | ) 720 | argparser.add_argument( 721 | "-m", 722 | "--model", 723 | metavar="M", 724 | dest="drive_model_path", 725 | default=None, 726 | help="path to drive model", 727 | ) 728 | args = argparser.parse_args() 729 | 730 | settings = configparser.ConfigParser() 731 | settings.read("settings.ini") 732 | 733 | log_level = logging.DEBUG if args.debug else logging.INFO 734 | logging.basicConfig(format="%(levelname)s: %(message)s", level=log_level) 735 | 736 | logging.info("Listening to server %s:%s", args.host, args.port) 737 | 738 | while True: 739 | try: 740 | with make_carla_client(args.host, args.port) as client: 741 | game = CarlaController(client, args, settings) 742 | game.execute() 743 | break 744 | 745 | except TCPConnectionError as error: 746 | logging.error(error) 747 | time.sleep(1) 748 | 749 | 750 | if __name__ == "__main__": 751 | try: 752 | main() 753 | except KeyboardInterrupt: 754 | print("\nCancelled by user. Bye!") 755 | --------------------------------------------------------------------------------