├── PythonClient ├── MANIFEST.in ├── carla │ ├── driving_benchmark │ │ ├── __init__.py │ │ ├── experiment_suites │ │ │ ├── __init__.py │ │ │ ├── basic_experiment_suite.py │ │ │ ├── experiment_suite.py │ │ │ └── corl_2017.py │ │ ├── experiment.py │ │ ├── results_printer.py │ │ ├── recording.py │ │ ├── driving_benchmark.py │ │ └── metrics.py │ ├── agent │ │ ├── __init__.py │ │ ├── forward_agent.py │ │ └── agent.py │ ├── planner │ │ ├── Town01.png │ │ ├── Town02.png │ │ ├── Town02Big.png │ │ ├── Town01Lanes.png │ │ ├── Town02Lanes.png │ │ ├── Town01Central.png │ │ ├── Town02Central.png │ │ ├── Town02.txt │ │ ├── Town01.txt │ │ ├── grid.py │ │ ├── graph.py │ │ ├── city_track.py │ │ ├── astar.py │ │ ├── map.py │ │ ├── planner.py │ │ └── converter.py │ ├── controller │ │ ├── __pycache__ │ │ │ └── utils.cpython-36.pyc │ │ ├── utils.py │ │ └── utils_test.py │ ├── image_converter.py │ └── client.py ├── __pycache__ │ └── live_plotter.cpython-36.pyc ├── Minor Project │ ├── controller_output │ │ ├── trajectory.png │ │ ├── Speed_Profiles.png │ │ ├── brake_output.png │ │ ├── forward_speed.png │ │ ├── steer_output.png │ │ └── throttle_output.png │ └── controller2d.py ├── setup.py ├── driving_benchmark_example.py ├── view_start_positions.py ├── point_cloud_example.py ├── client_example.py ├── live_plotter.py └── manual_control.py └── README.md /PythonClient/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include carla/planner/*.txt 2 | include carla/planner/*.png 3 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | from .driving_benchmark import run_driving_benchmark 2 | -------------------------------------------------------------------------------- /PythonClient/carla/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .forward_agent import ForwardAgent 2 | from .agent import Agent 3 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/carla/planner/Town01.png -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/carla/planner/Town02.png -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town02Big.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/carla/planner/Town02Big.png -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town01Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/carla/planner/Town01Lanes.png -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town02Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/carla/planner/Town02Lanes.png -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town01Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/carla/planner/Town01Central.png -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town02Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/carla/planner/Town02Central.png -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/experiment_suites/__init__.py: -------------------------------------------------------------------------------- 1 | from .basic_experiment_suite import BasicExperimentSuite 2 | from .corl_2017 import CoRL2017 3 | -------------------------------------------------------------------------------- /PythonClient/__pycache__/live_plotter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/__pycache__/live_plotter.cpython-36.pyc -------------------------------------------------------------------------------- /PythonClient/Minor Project/controller_output/trajectory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/Minor Project/controller_output/trajectory.png -------------------------------------------------------------------------------- /PythonClient/Minor Project/controller_output/Speed_Profiles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/Minor Project/controller_output/Speed_Profiles.png -------------------------------------------------------------------------------- /PythonClient/Minor Project/controller_output/brake_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/Minor Project/controller_output/brake_output.png -------------------------------------------------------------------------------- /PythonClient/Minor Project/controller_output/forward_speed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/Minor Project/controller_output/forward_speed.png -------------------------------------------------------------------------------- /PythonClient/Minor Project/controller_output/steer_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/Minor Project/controller_output/steer_output.png -------------------------------------------------------------------------------- /PythonClient/carla/controller/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/carla/controller/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /PythonClient/Minor Project/controller_output/throttle_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityasharma28/Self-Driving-Car/HEAD/PythonClient/Minor Project/controller_output/throttle_output.png -------------------------------------------------------------------------------- /PythonClient/carla/agent/forward_agent.py: -------------------------------------------------------------------------------- 1 | 2 | from carla.agent.agent import Agent 3 | from carla.client import VehicleControl 4 | 5 | 6 | class ForwardAgent(Agent): 7 | """ 8 | Simple derivation of Agent Class, 9 | A trivial agent agent that goes straight 10 | """ 11 | def run_step(self, measurements, sensor_data, directions, target): 12 | control = VehicleControl() 13 | control.throttle = 0.9 14 | 15 | return control 16 | -------------------------------------------------------------------------------- /PythonClient/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | # @todo Dependencies are missing. 4 | 5 | setup( 6 | name='carla_client', 7 | version='0.8.4', 8 | packages=['carla', 'carla.driving_benchmark', 'carla.agent', 9 | 'carla.driving_benchmark.experiment_suites', 'carla.planner'], 10 | license='MIT License', 11 | description='Python API for communicating with the CARLA server.', 12 | url='https://github.com/carla-simulator/carla', 13 | author='The CARLA team', 14 | author_email='carla.simulator@gmail.com', 15 | include_package_data=True 16 | ) 17 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town02.txt: -------------------------------------------------------------------------------- 1 | 5.4400,-107.48000,-0.22000000 2 | 0.000000,0.000000,0.000000 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 25, 25 6 | 0,10 0,24 14 7 | 0,24 0,10 14 8 | 24,24 6,24 18 9 | 6,24 24,24 18 10 | 24,0 24,10 10 11 | 24,10 24,0 10 12 | 0,0 24,0 24 13 | 24,0 0,0 24 14 | 0,10 0,0 10 15 | 0,0 0,10 10 16 | 24,10 24,16 6 17 | 24,16 24,10 6 18 | 0,10 6,10 6 19 | 6,10 0,10 6 20 | 6,24 0,24 6 21 | 0,24 6,24 6 22 | 6,10 17,10 11 23 | 17,10 6,10 11 24 | 6,24 6,16 8 25 | 6,16 6,24 8 26 | 24,16 24,24 8 27 | 24,24 24,16 8 28 | 6,16 6,10 6 29 | 6,10 6,16 6 30 | 24,16 17,16 7 31 | 17,16 24,16 7 32 | 17,16 6,16 11 33 | 6,16 17,16 11 34 | 17,10 24,10 7 35 | 24,10 17,10 7 36 | 17,16 17,10 6 37 | 17,10 17,16 6 38 | -------------------------------------------------------------------------------- /PythonClient/carla/agent/agent.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 3 | # Barcelona (UAB). 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | # @author: german,felipecode 8 | 9 | 10 | from __future__ import print_function 11 | import abc 12 | 13 | 14 | class Agent(object): 15 | def __init__(self): 16 | self.__metaclass__ = abc.ABCMeta 17 | 18 | @abc.abstractmethod 19 | def run_step(self, measurements, sensor_data, directions, target): 20 | """ 21 | Function to be redefined by an agent. 22 | :param The measurements like speed, the image data and a target 23 | :returns A carla Control object, with the steering/gas/brake for the agent 24 | """ 25 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/Town01.txt: -------------------------------------------------------------------------------- 1 | 0.0,0.0,-0.3811000000 2 | 0.000000,0.000000,0.0 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 49, 41 6 | 0,0 0,40 40 7 | 0,40 0,0 40 8 | 48,40 41,40 7 9 | 41,40 48,40 7 10 | 48,0 48,40 40 11 | 48,40 48,0 40 12 | 0,0 11,0 11 13 | 11,0 0,0 11 14 | 41,0 48,0 7 15 | 48,0 41,0 7 16 | 41,40 11,40 30 17 | 11,40 41,40 30 18 | 41,0 41,7 7 19 | 41,7 41,0 7 20 | 11,40 0,40 11 21 | 0,40 11,40 11 22 | 11,0 19,0 8 23 | 19,0 11,0 8 24 | 11,40 11,24 16 25 | 11,24 11,40 16 26 | 41,24 41,40 16 27 | 41,40 41,24 16 28 | 11,24 11,16 8 29 | 11,16 11,24 8 30 | 41,24 11,24 30 31 | 11,24 41,24 30 32 | 41,16 41,24 8 33 | 41,24 41,16 8 34 | 11,16 11,7 9 35 | 11,7 11,16 9 36 | 41,16 11,16 30 37 | 11,16 41,16 30 38 | 41,7 41,16 9 39 | 41,16 41,7 9 40 | 11,7 11,0 7 41 | 11,0 11,7 7 42 | 41,7 19,7 22 43 | 19,7 41,7 22 44 | 19,0 41,0 22 45 | 41,0 19,0 22 46 | 19,7 11,7 8 47 | 11,7 19,7 8 48 | 19,0 19,7 7 49 | 19,7 19,0 7 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Self-Driving-Car 2 | This is a model which will run on Carla simulator, My goal was to control the vehicle to follow a race track by navigating through preset waypoints. The vehicle needs to reach these waypoints at certain desired speeds, so both longitudinal and lateral control will be required. 3 | The trajectory feedback will contain the car, start and end positions, entire path/path traveled and a small shaded region which denotes the subset of interpolated points to be sent into the controller for control updates. Linear interpolation is used between waypoints to provide a finer resolution path/speed requests for the controller. 4 | The controls feedback shows the throttle, steering and brake outputs, as well as the speed response for the simulation (desired speed and current speed in the single plot). This is a general feedback for viewing what the client is sending to the CARLA server in terms of control commands. The desired speed is set to the closest interpolated speed point to the current position of the car. The speeds are in meters per second and the throttle (0 to 1), brake (0 to 1) and steering (-1 to 1, or left to right turn) are unitless. Note that the steering command output inside controller2d.py is automatically converted from radians (-1.22 to 1.22 rad) to a percentage (-1 to 1) before the command is sent to the CARLA server. The X axis for all four plots in the controls feedback is the in-game time, in seconds. 5 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/experiment.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | from carla.settings import CarlaSettings 8 | 9 | 10 | class Experiment(object): 11 | """ 12 | Experiment defines a certain task, under conditions 13 | A task is associated with a set of poses, containing start and end pose. 14 | 15 | Conditions are associated with a carla Settings and describe the following: 16 | 17 | Number Of Vehicles 18 | Number Of Pedestrians 19 | Weather 20 | Random Seed of the agents, describing their behaviour. 21 | 22 | """ 23 | 24 | def __init__(self): 25 | self.Task = 0 26 | self.Conditions = CarlaSettings() 27 | self.Poses = [[]] 28 | self.Repetitions = 1 29 | 30 | def set(self, **kwargs): 31 | for key, value in kwargs.items(): 32 | if not hasattr(self, key): 33 | raise ValueError('Experiment: no key named %r' % key) 34 | setattr(self, key, value) 35 | 36 | if self.Repetitions != 1: 37 | raise NotImplementedError() 38 | 39 | @property 40 | def task(self): 41 | return self.Task 42 | 43 | @property 44 | def conditions(self): 45 | return self.Conditions 46 | 47 | @property 48 | def poses(self): 49 | return self.Poses 50 | 51 | @property 52 | def repetitions(self): 53 | return self.Repetitions 54 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/experiment_suites/basic_experiment_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | from __future__ import print_function 9 | 10 | from carla.driving_benchmark.experiment import Experiment 11 | from carla.sensor import Camera 12 | from carla.settings import CarlaSettings 13 | 14 | from .experiment_suite import ExperimentSuite 15 | 16 | 17 | class BasicExperimentSuite(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [1] 26 | 27 | def build_experiments(self): 28 | """ 29 | Creates the whole set of experiment objects, 30 | The experiments created depends on the selected Town. 31 | 32 | """ 33 | 34 | # We check the town, based on that we define the town related parameters 35 | # The size of the vector is related to the number of tasks, inside each 36 | # task there is also multiple poses ( start end, positions ) 37 | if self._city_name == 'Town01': 38 | poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]] 39 | vehicles_tasks = [0, 0, 0, 20] 40 | pedestrians_tasks = [0, 0, 0, 50] 41 | else: 42 | poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]] 43 | vehicles_tasks = [0, 0, 0, 15] 44 | pedestrians_tasks = [0, 0, 0, 50] 45 | 46 | # We set the camera 47 | # This single RGB camera is used on every experiment 48 | 49 | camera = Camera('CameraRGB') 50 | camera.set(FOV=100) 51 | camera.set_image_size(800, 600) 52 | camera.set_position(2.0, 0.0, 1.4) 53 | camera.set_rotation(-15.0, 0, 0) 54 | 55 | # Based on the parameters, creates a vector with experiment objects. 56 | experiments_vector = [] 57 | for weather in self.weathers: 58 | 59 | for iteration in range(len(poses_tasks)): 60 | poses = poses_tasks[iteration] 61 | vehicles = vehicles_tasks[iteration] 62 | pedestrians = pedestrians_tasks[iteration] 63 | 64 | conditions = CarlaSettings() 65 | conditions.set( 66 | SendNonPlayerAgentsInfo=True, 67 | NumberOfVehicles=vehicles, 68 | NumberOfPedestrians=pedestrians, 69 | WeatherId=weather 70 | 71 | ) 72 | # Add all the cameras that were set for this experiments 73 | conditions.add_sensor(camera) 74 | experiment = Experiment() 75 | experiment.set( 76 | Conditions=conditions, 77 | Poses=poses, 78 | Task=iteration, 79 | Repetitions=1 80 | ) 81 | experiments_vector.append(experiment) 82 | 83 | return experiments_vector 84 | -------------------------------------------------------------------------------- /PythonClient/driving_benchmark_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import argparse 10 | import logging 11 | 12 | from carla.driving_benchmark import run_driving_benchmark 13 | from carla.driving_benchmark.experiment_suites import CoRL2017 14 | from carla.driving_benchmark.experiment_suites import BasicExperimentSuite 15 | from carla.agent import ForwardAgent 16 | 17 | if __name__ == '__main__': 18 | 19 | argparser = argparse.ArgumentParser(description=__doc__) 20 | argparser.add_argument( 21 | '-v', '--verbose', 22 | action='store_true', 23 | dest='verbose', 24 | help='print some extra status information') 25 | argparser.add_argument( 26 | '-db', '--debug', 27 | action='store_true', 28 | dest='debug', 29 | help='print debug information') 30 | argparser.add_argument( 31 | '--host', 32 | metavar='H', 33 | default='localhost', 34 | help='IP of the host server (default: localhost)') 35 | argparser.add_argument( 36 | '-p', '--port', 37 | metavar='P', 38 | default=2000, 39 | type=int, 40 | help='TCP port to listen to (default: 2000)') 41 | argparser.add_argument( 42 | '-c', '--city-name', 43 | metavar='C', 44 | default='Town01', 45 | help='The town that is going to be used on benchmark' 46 | + '(needs to match active town in server, options: Town01 or Town02)') 47 | argparser.add_argument( 48 | '-n', '--log_name', 49 | metavar='T', 50 | default='test', 51 | help='The name of the log file to be created by the benchmark' 52 | ) 53 | argparser.add_argument( 54 | '--corl-2017', 55 | action='store_true', 56 | help='If you want to benchmark the corl-2017 instead of the Basic one' 57 | ) 58 | argparser.add_argument( 59 | '--continue-experiment', 60 | action='store_true', 61 | help='If you want to continue the experiment with the same name' 62 | ) 63 | 64 | args = argparser.parse_args() 65 | if args.debug: 66 | log_level = logging.DEBUG 67 | elif args.verbose: 68 | log_level = logging.INFO 69 | else: 70 | log_level = logging.WARNING 71 | 72 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 73 | logging.info('listening to server %s:%s', args.host, args.port) 74 | 75 | # We instantiate a forward agent, a simple policy that just set 76 | # acceleration as 0.9 and steering as zero 77 | agent = ForwardAgent() 78 | 79 | # We instantiate an experiment suite. Basically a set of experiments 80 | # that are going to be evaluated on this benchmark. 81 | if args.corl_2017: 82 | experiment_suite = CoRL2017(args.city_name) 83 | else: 84 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017' 85 | ' experiment suites, you should run' 86 | ' python driving_benchmark_example.py --corl-2017') 87 | experiment_suite = BasicExperimentSuite(args.city_name) 88 | 89 | # Now actually run the driving_benchmark 90 | run_driving_benchmark(agent, experiment_suite, args.city_name, 91 | args.log_name, args.continue_experiment, 92 | args.host, args.port) 93 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/experiment_suites/experiment_suite.py: -------------------------------------------------------------------------------- 1 | # To be redefined on subclasses on how to calculate timeout for an episode 2 | import abc 3 | 4 | 5 | class ExperimentSuite(object): 6 | 7 | def __init__(self, city_name): 8 | 9 | self._city_name = city_name 10 | self._experiments = self.build_experiments() 11 | 12 | def calculate_time_out(self, path_distance): 13 | """ 14 | Function to return the timeout ,in milliseconds, 15 | that is calculated based on distance to goal. 16 | This is the same timeout as used on the CoRL paper. 17 | """ 18 | return ((path_distance / 1000.0) / 10.0) * 3600.0 + 10.0 19 | 20 | def get_number_of_poses_task(self): 21 | """ 22 | Get the number of poses a task have for this benchmark 23 | """ 24 | 25 | # Warning: assumes that all tasks have the same size 26 | 27 | return len(self._experiments[0].poses) 28 | 29 | def get_experiments(self): 30 | """ 31 | Getter for the experiment set. 32 | """ 33 | return self._experiments 34 | 35 | @property 36 | def dynamic_tasks(self): 37 | """ 38 | Returns the episodes that contain dynamic obstacles 39 | """ 40 | dynamic_tasks = set() 41 | for exp in self._experiments: 42 | if exp.conditions.NumberOfVehicles > 0 or exp.conditions.NumberOfPedestrians > 0: 43 | dynamic_tasks.add(exp.task) 44 | 45 | return list(dynamic_tasks) 46 | 47 | @property 48 | def metrics_parameters(self): 49 | """ 50 | Property to return the parameters for the metric module 51 | Could be redefined depending on the needs of the user. 52 | """ 53 | return { 54 | 55 | 'intersection_offroad': {'frames_skip': 10, 56 | 'frames_recount': 20, 57 | 'threshold': 0.3 58 | }, 59 | 'intersection_otherlane': {'frames_skip': 10, 60 | 'frames_recount': 20, 61 | 'threshold': 0.4 62 | }, 63 | 'collision_other': {'frames_skip': 10, 64 | 'frames_recount': 20, 65 | 'threshold': 400 66 | }, 67 | 'collision_vehicles': {'frames_skip': 10, 68 | 'frames_recount': 30, 69 | 'threshold': 400 70 | }, 71 | 'collision_pedestrians': {'frames_skip': 5, 72 | 'frames_recount': 100, 73 | 'threshold': 300 74 | }, 75 | 76 | } 77 | 78 | @property 79 | def weathers(self): 80 | weathers = set(self.train_weathers) 81 | weathers.update(self.test_weathers) 82 | return weathers 83 | 84 | @abc.abstractmethod 85 | def build_experiments(self): 86 | """ 87 | Returns a set of experiments to be evaluated 88 | Must be redefined in an inherited class. 89 | 90 | """ 91 | 92 | @abc.abstractproperty 93 | def train_weathers(self): 94 | """ 95 | Return the weathers that are considered as training conditions 96 | """ 97 | 98 | @abc.abstractproperty 99 | def test_weathers(self): 100 | """ 101 | Return the weathers that are considered as testing conditions 102 | """ 103 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/grid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import copy 8 | import numpy as np 9 | 10 | 11 | def angle_between(v1, v2): 12 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 13 | 14 | 15 | class Grid(object): 16 | 17 | def __init__(self, graph): 18 | 19 | self._graph = graph 20 | self._structure = self._make_structure() 21 | self._walls = self._make_walls() 22 | 23 | def search_on_grid(self, x, y): 24 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 25 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 26 | c_x, c_y = x, y 27 | scale = 1 28 | while self._structure[c_x, c_y] != 0: 29 | for offset in visit: 30 | c_x, c_y = x + offset[0] * scale, y + offset[1] * scale 31 | 32 | if c_x >= 0 and c_x < self._graph.get_resolution()[ 33 | 0] and c_y >= 0 and c_y < self._graph.get_resolution()[1]: 34 | if self._structure[c_x, c_y] == 0: 35 | break 36 | else: 37 | c_x, c_y = x, y 38 | scale += 1 39 | 40 | return c_x, c_y 41 | def get_walls(self): 42 | return self._walls 43 | 44 | def get_wall_source(self, pos, pos_ori, target): 45 | 46 | free_nodes = self._get_adjacent_free_nodes(pos) 47 | # print self._walls 48 | final_walls = copy.copy(self._walls) 49 | # print final_walls 50 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 51 | for adj in free_nodes: 52 | 53 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 54 | angle = angle_between(heading_start, start_to_goal) 55 | if (angle > 1.6 and adj != target): 56 | final_walls.add((adj[0], adj[1])) 57 | 58 | return final_walls 59 | 60 | def get_wall_target(self, pos, pos_ori, source): 61 | 62 | free_nodes = self._get_adjacent_free_nodes(pos) 63 | final_walls = copy.copy(self._walls) 64 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 65 | for adj in free_nodes: 66 | 67 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 68 | angle = angle_between(heading_start, start_to_goal) 69 | 70 | if (angle < 1.0 and adj != source): 71 | final_walls.add((adj[0], adj[1])) 72 | 73 | return final_walls 74 | 75 | def _draw_line(self, grid, xi, yi, xf, yf): 76 | 77 | if xf < xi: 78 | aux = xi 79 | xi = xf 80 | xf = aux 81 | 82 | if yf < yi: 83 | aux = yi 84 | yi = yf 85 | yf = aux 86 | 87 | for i in range(xi, xf + 1): 88 | 89 | for j in range(yi, yf + 1): 90 | grid[i, j] = 0.0 91 | 92 | return grid 93 | 94 | def _make_structure(self): 95 | structure = np.ones( 96 | (self._graph.get_resolution()[0], 97 | self._graph.get_resolution()[1])) 98 | 99 | for key, connections in self._graph.get_edges().items(): 100 | 101 | # draw a line 102 | for con in connections: 103 | # print key[0],key[1],con[0],con[1] 104 | structure = self._draw_line( 105 | structure, key[0], key[1], con[0], con[1]) 106 | # print grid 107 | return structure 108 | 109 | def _make_walls(self): 110 | walls = set() 111 | 112 | for i in range(self._structure.shape[0]): 113 | 114 | for j in range(self._structure.shape[1]): 115 | if self._structure[i, j] == 1.0: 116 | walls.add((i, j)) 117 | 118 | return walls 119 | 120 | def _get_adjacent_free_nodes(self, pos): 121 | """ Eight nodes in total """ 122 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 123 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 124 | 125 | adjacent = set() 126 | for offset in visit: 127 | node = (pos[0] + offset[0], pos[1] + offset[1]) 128 | 129 | if (node[0] >= 0 and node[0] < self._graph.get_resolution()[0] 130 | and node[1] >= 0 and node[1] < self._graph.get_resolution()[1]): 131 | 132 | if self._structure[node[0], node[1]] == 0.0: 133 | adjacent.add(node) 134 | 135 | return adjacent 136 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/graph.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | 11 | def string_to_node(string): 12 | vec = string.split(',') 13 | return (int(vec[0]), int(vec[1])) 14 | 15 | 16 | def string_to_floats(string): 17 | vec = string.split(',') 18 | return (float(vec[0]), float(vec[1]), float(vec[2])) 19 | 20 | 21 | def sldist(c1, c2): 22 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 23 | 24 | 25 | def sldist3(c1, c2): 26 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) 27 | ** 2 + (c2[2] - c1[2]) ** 2) 28 | 29 | 30 | class Graph(object): 31 | """ 32 | A simple directed, weighted graph 33 | """ 34 | 35 | def __init__(self, graph_file=None, node_density=50): 36 | 37 | self._nodes = set() 38 | self._angles = {} 39 | self._edges = {} 40 | self._distances = {} 41 | self._node_density = node_density 42 | 43 | if graph_file is not None: 44 | with open(graph_file, 'r') as f: 45 | # Skipe the first four lines that 46 | lines_after_4 = f.readlines()[4:] 47 | 48 | # the graph resolution. 49 | linegraphres = lines_after_4[0] 50 | self._resolution = string_to_node(linegraphres) 51 | for line in lines_after_4[1:]: 52 | 53 | from_node, to_node, d = line.split() 54 | from_node = string_to_node(from_node) 55 | to_node = string_to_node(to_node) 56 | 57 | if from_node not in self._nodes: 58 | self.add_node(from_node) 59 | if to_node not in self._nodes: 60 | self.add_node(to_node) 61 | 62 | self._edges.setdefault(from_node, []) 63 | self._edges[from_node].append(to_node) 64 | self._distances[(from_node, to_node)] = float(d) 65 | 66 | def add_node(self, value): 67 | self._nodes.add(value) 68 | 69 | def make_orientations(self, node, heading): 70 | 71 | import collections 72 | distance_dic = {} 73 | for node_iter in self._nodes: 74 | if node_iter != node: 75 | distance_dic[sldist(node, node_iter)] = node_iter 76 | 77 | distance_dic = collections.OrderedDict( 78 | sorted(distance_dic.items())) 79 | 80 | self._angles[node] = heading 81 | for _, v in distance_dic.items(): 82 | start_to_goal = np.array([node[0] - v[0], node[1] - v[1]]) 83 | 84 | print(start_to_goal) 85 | 86 | self._angles[v] = start_to_goal / np.linalg.norm(start_to_goal) 87 | 88 | def add_edge(self, from_node, to_node, distance): 89 | self._add_edge(from_node, to_node, distance) 90 | 91 | def _add_edge(self, from_node, to_node, distance): 92 | self._edges.setdefault(from_node, []) 93 | self._edges[from_node].append(to_node) 94 | self._distances[(from_node, to_node)] = distance 95 | 96 | def get_resolution(self): 97 | return self._resolution 98 | def get_edges(self): 99 | return self._edges 100 | 101 | def intersection_nodes(self): 102 | 103 | intersect_nodes = [] 104 | for node in self._nodes: 105 | if len(self._edges[node]) > 2: 106 | intersect_nodes.append(node) 107 | 108 | return intersect_nodes 109 | 110 | # This contains also the non-intersection turns... 111 | 112 | def turn_nodes(self): 113 | 114 | return self._nodes 115 | 116 | def plot_ori(self, c): 117 | from matplotlib import collections as mc 118 | 119 | import matplotlib.pyplot as plt 120 | line_len = 1 121 | 122 | lines = [[(p[0], p[1]), (p[0] + line_len * self._angles[p][0], 123 | p[1] + line_len * self._angles[p][1])] for p in self._nodes] 124 | lc = mc.LineCollection(lines, linewidth=2, color='green') 125 | _, ax = plt.subplots() 126 | ax.add_collection(lc) 127 | 128 | ax.autoscale() 129 | ax.margins(0.1) 130 | 131 | xs = [p[0] for p in self._nodes] 132 | ys = [p[1] for p in self._nodes] 133 | 134 | plt.scatter(xs, ys, color=c) 135 | 136 | def plot(self, c): 137 | import matplotlib.pyplot as plt 138 | xs = [p[0] for p in self._nodes] 139 | ys = [p[1] for p in self._nodes] 140 | 141 | plt.scatter(xs, ys, color=c) 142 | -------------------------------------------------------------------------------- /PythonClient/view_start_positions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """Connects with a CARLA simulator and displays the available start positions 10 | for the current map.""" 11 | 12 | from __future__ import print_function 13 | 14 | import argparse 15 | import logging 16 | import sys 17 | import time 18 | 19 | import matplotlib.image as mpimg 20 | import matplotlib.pyplot as plt 21 | 22 | from matplotlib.patches import Circle 23 | 24 | from carla.client import make_carla_client 25 | from carla.planner.map import CarlaMap 26 | from carla.settings import CarlaSettings 27 | from carla.tcp import TCPConnectionError 28 | 29 | 30 | def view_start_positions(args): 31 | # We assume the CARLA server is already waiting for a client to connect at 32 | # host:port. The same way as in the client example. 33 | with make_carla_client(args.host, args.port) as client: 34 | print('CarlaClient connected') 35 | 36 | # We load the default settings to the client. 37 | scene = client.load_settings(CarlaSettings()) 38 | print("Received the start positions") 39 | 40 | try: 41 | image = mpimg.imread('carla/planner/%s.png' % scene.map_name) 42 | carla_map = CarlaMap(scene.map_name, 0.1653, 50) 43 | except IOError as exception: 44 | logging.error(exception) 45 | logging.error('Cannot find map "%s"', scene.map_name) 46 | sys.exit(1) 47 | 48 | fig, ax = plt.subplots(1) 49 | 50 | ax.imshow(image) 51 | 52 | if args.positions == 'all': 53 | positions_to_plot = range(len(scene.player_start_spots)) 54 | else: 55 | positions_to_plot = map(int, args.positions.split(',')) 56 | 57 | for position in positions_to_plot: 58 | # Check if position is valid 59 | if position >= len(scene.player_start_spots): 60 | raise RuntimeError('Selected position is invalid') 61 | 62 | # Convert world to pixel coordinates 63 | pixel = carla_map.convert_to_pixel([scene.player_start_spots[position].location.x, 64 | scene.player_start_spots[position].location.y, 65 | scene.player_start_spots[position].location.z]) 66 | 67 | circle = Circle((pixel[0], pixel[1]), 12, color='r', label='A point') 68 | ax.add_patch(circle) 69 | 70 | if not args.no_labels: 71 | plt.text(pixel[0], pixel[1], str(position), size='x-small') 72 | 73 | plt.axis('off') 74 | plt.show() 75 | 76 | fig.savefig('town_positions.pdf', orientation='landscape', bbox_inches='tight') 77 | 78 | 79 | def main(): 80 | argparser = argparse.ArgumentParser(description=__doc__) 81 | argparser.add_argument( 82 | '-v', '--verbose', 83 | action='store_true', 84 | dest='debug', 85 | help='print debug information') 86 | argparser.add_argument( 87 | '--host', 88 | metavar='H', 89 | default='localhost', 90 | help='IP of the host server (default: localhost)') 91 | argparser.add_argument( 92 | '-p', '--port', 93 | metavar='P', 94 | default=2000, 95 | type=int, 96 | help='TCP port to listen to (default: 2000)') 97 | argparser.add_argument( 98 | '-pos', '--positions', 99 | metavar='P', 100 | default='all', 101 | help='Indices of the positions that you want to plot on the map. ' 102 | 'The indices must be separated by commas (default = all positions)') 103 | argparser.add_argument( 104 | '--no-labels', 105 | action='store_true', 106 | help='do not display position indices') 107 | 108 | args = argparser.parse_args() 109 | 110 | log_level = logging.DEBUG if args.debug else logging.INFO 111 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 112 | 113 | logging.info('listening to server %s:%s', args.host, args.port) 114 | 115 | while True: 116 | try: 117 | 118 | view_start_positions(args) 119 | print('Done.') 120 | return 121 | 122 | except TCPConnectionError as error: 123 | logging.error(error) 124 | time.sleep(1) 125 | except RuntimeError as error: 126 | logging.error(error) 127 | break 128 | 129 | 130 | if __name__ == '__main__': 131 | 132 | try: 133 | main() 134 | except KeyboardInterrupt: 135 | print('\nCancelled by user. Bye!') 136 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/city_track.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | from carla.planner.graph import sldist 8 | 9 | from carla.planner.astar import AStar 10 | from carla.planner.map import CarlaMap 11 | 12 | 13 | class CityTrack(object): 14 | 15 | def __init__(self, city_name): 16 | 17 | # These values are fixed for every city. 18 | self._node_density = 50.0 19 | self._pixel_density = 0.1643 20 | 21 | self._map = CarlaMap(city_name, self._pixel_density, self._node_density) 22 | 23 | self._astar = AStar() 24 | 25 | # Refers to the start position of the previous route computation 26 | self._previous_node = [] 27 | 28 | # The current computed route 29 | self._route = None 30 | 31 | def project_node(self, position): 32 | """ 33 | Projecting the graph node into the city road 34 | """ 35 | 36 | node = self._map.convert_to_node(position) 37 | 38 | # To change the orientation with respect to the map standards 39 | 40 | node = tuple([int(x) for x in node]) 41 | 42 | # Set to zero if it is less than zero. 43 | 44 | node = (max(0, node[0]), max(0, node[1])) 45 | node = (min(self._map.get_graph_resolution()[0] - 1, node[0]), 46 | min(self._map.get_graph_resolution()[1] - 1, node[1])) 47 | 48 | node = self._map.search_on_grid(node) 49 | 50 | return node 51 | 52 | def get_intersection_nodes(self): 53 | return self._map.get_intersection_nodes() 54 | 55 | def get_pixel_density(self): 56 | return self._pixel_density 57 | 58 | def get_node_density(self): 59 | return self._node_density 60 | 61 | def is_at_goal(self, source, target): 62 | return source == target 63 | 64 | def is_at_new_node(self, current_node): 65 | return current_node != self._previous_node 66 | 67 | def is_away_from_intersection(self, current_node): 68 | return self._closest_intersection_position(current_node) > 1 69 | 70 | def is_far_away_from_route_intersection(self, current_node): 71 | # CHECK FOR THE EMPTY CASE 72 | if self._route is None: 73 | raise RuntimeError('Impossible to find route' 74 | + ' Current planner is limited' 75 | + ' Try to select start points away from intersections') 76 | 77 | return self._closest_intersection_route_position(current_node, 78 | self._route) > 4 79 | 80 | def compute_route(self, node_source, source_ori, node_target, target_ori): 81 | 82 | self._previous_node = node_source 83 | 84 | a_star = AStar() 85 | a_star.init_grid(self._map.get_graph_resolution()[0], 86 | self._map.get_graph_resolution()[1], 87 | self._map.get_walls_directed(node_source, source_ori, 88 | node_target, target_ori), node_source, 89 | node_target) 90 | 91 | route = a_star.solve() 92 | 93 | # JuSt a Corner Case 94 | # Clean this to avoid having to use this function 95 | if route is None: 96 | a_star = AStar() 97 | a_star.init_grid(self._map.get_graph_resolution()[0], 98 | self._map.get_graph_resolution()[1], self._map.get_walls(), 99 | node_source, node_target) 100 | 101 | route = a_star.solve() 102 | 103 | self._route = route 104 | 105 | return route 106 | 107 | def get_distance_closest_node_route(self, pos, route): 108 | distance = [] 109 | 110 | for node_iter in route: 111 | 112 | if node_iter in self._map.get_intersection_nodes(): 113 | distance.append(sldist(node_iter, pos)) 114 | 115 | if not distance: 116 | return sldist(route[-1], pos) 117 | return sorted(distance)[0] 118 | 119 | 120 | def _closest_intersection_position(self, current_node): 121 | 122 | distance_vector = [] 123 | for node_iterator in self._map.get_intersection_nodes(): 124 | distance_vector.append(sldist(node_iterator, current_node)) 125 | 126 | return sorted(distance_vector)[0] 127 | 128 | 129 | def _closest_intersection_route_position(self, current_node, route): 130 | 131 | distance_vector = [] 132 | for _ in route: 133 | for node_iterator in self._map.get_intersection_nodes(): 134 | distance_vector.append(sldist(node_iterator, current_node)) 135 | 136 | return sorted(distance_vector)[0] 137 | 138 | -------------------------------------------------------------------------------- /PythonClient/carla/controller/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from math import pi 4 | from numpy import linalg as LA 5 | 6 | 7 | def project_point(vector, point): 8 | """Given a line vector and a point, projects the point 9 | on the line, resulting to a point that is closest to 10 | the given point. 11 | 12 | Args: 13 | vector: A 2D array of points in the form [[x1, y1], [x2, y2]] 14 | point: A 2D point in the form [x, y] 15 | 16 | Returns: 17 | closest_point: A 2D point in the form [x, y] that lies on 18 | given vector. 19 | """ 20 | 21 | p0 = vector[0] 22 | p1 = vector[1] 23 | 24 | v1 = np.subtract(point, p0) 25 | v2 = np.subtract(p1, p0) 26 | 27 | distance = np.dot(v1, v2) / np.power(LA.norm(v2), 2) 28 | 29 | if distance < 0.0: 30 | closest_point = p0 31 | elif distance > 1.0: 32 | closest_point = p1 33 | else: 34 | closest_point = p0 + distance * v2 35 | 36 | return closest_point 37 | 38 | 39 | def next_carrot(vector, pose_2d, lookahead_dis): 40 | """Given a line vector, position and look-ahead distance, 41 | determine the next carrot point. 42 | 43 | Args: 44 | vector: A 2D array of points in the form [[x1, y1], [x2, y2]] 45 | pose_2d: A 2D point in the form [x, y] 46 | lookahead_dis: A float distance determining how far ahead 47 | we want to look. 48 | 49 | Returns: 50 | carrot: A 2D point in the form [x, y]. 51 | """ 52 | p0 = vector[0] 53 | p1 = vector[1] 54 | 55 | projected_point = project_point(vector, pose_2d) 56 | 57 | # Calculate unit vector of trajectory 58 | vec_diff = np.subtract(p1, p0) 59 | unit_vec = vec_diff / LA.norm(vec_diff) 60 | 61 | carrot = projected_point + lookahead_dis * unit_vec 62 | return carrot 63 | 64 | 65 | def calculate_delta(position, carrot, delta_max): 66 | """Given a 2D position and carrot pose, determine the steering 67 | angle delta. 68 | This angle should be constrained by `delta_max`, determined based 69 | on the model. For instance for a car, this will depend on the properties 70 | of the car (for instance using Ackermann steering geometry you can 71 | calculate the center of the turning circle). 72 | 73 | Args: 74 | position: A 2D array of points in the form [[x1, y1], [x2, y2]] 75 | carrot: A 2D point in the form [x, y] 76 | delta_max: A float distance determining how far ahead we want to look. 77 | 78 | Returns: 79 | delta: A float representing the steering angle in unit radians. 80 | """ 81 | 82 | theta = position[2] 83 | 84 | # Calculate the angle between position and carrot 85 | x = carrot[0] - position[0] 86 | y = carrot[1] - position[1] 87 | angle_of_vec = np.arctan2(y, x) 88 | 89 | # Limit delta to pi and -pi 90 | delta = -(theta - angle_of_vec) 91 | delta = np.mod(delta + pi, 2 * pi) - pi 92 | 93 | # Limit delta to steering angle max 94 | if delta > delta_max: 95 | delta = delta_max 96 | elif delta < -delta_max: 97 | delta = -delta_max 98 | 99 | return delta 100 | 101 | 102 | def update_waypoint_trajectory(waypoints, waypoint_counter): 103 | """Given a list of waypoints, and waypoint_counter, determine 104 | the next set up waypoints. 105 | 106 | Args: 107 | waypoints: An array of waypoints in the format [wp1, wp2, ..., wpn] 108 | where each wp is a 2D point in the form [x, y]. 109 | waypoint_counter: A counter representing a pointer to the current 110 | waypoint. This should not exceed the total size of waypoint_counter. 111 | 112 | Returns: 113 | wp1 : First waypoint of the updated trajectory. 114 | wp2: Second waypoint of the updated trajectory. 115 | update_trajectory: A flag to determine whether we should continue. 116 | """ 117 | 118 | update_trajectory = True 119 | if waypoint_counter >= len(waypoints): 120 | print('Ran out of waypoints.') 121 | update_trajectory = False 122 | wp1 = wp2 = None 123 | 124 | elif waypoint_counter == len(waypoints) - 1: 125 | # Grab the last waypoint and the initial to get back 126 | # to the starting point 127 | wp1 = waypoints[waypoint_counter] 128 | wp2 = waypoints[0] 129 | 130 | else: 131 | wp1 = waypoints[waypoint_counter] 132 | wp2 = waypoints[waypoint_counter + 1] 133 | 134 | return wp1, wp2, update_trajectory 135 | 136 | 137 | def calculate_distance(point1, point2): 138 | """Given two 2D points, calculate the distance. 139 | 140 | Args: 141 | point1: A 2D array in the form [x, y] 142 | point2: A 2D array in the form [x, y] 143 | 144 | Returns: 145 | distance: A float representing the distance between 146 | the points. 147 | """ 148 | 149 | distance = np.sqrt(np.power((point2[1] - point1[1]), 2) + 150 | np.power((point2[0] - point1[0]), 2)) 151 | return distance 152 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/results_printer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | 5 | 6 | def print_summary(metrics_summary, weathers, path): 7 | """ 8 | We plot the summary of the testing for the set selected weathers. 9 | 10 | We take the raw data and print the way it was described on CORL 2017 paper 11 | 12 | """ 13 | 14 | # Improve readability by adding a weather dictionary 15 | weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon', 16 | 6: 'Heavy Rain Noon', 8: 'Clear Sunset', 17 | 4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'} 18 | 19 | # First we write the entire dictionary on the benchmark folder. 20 | with open(os.path.join(path, 'metrics.json'), 'w') as fo: 21 | fo.write(json.dumps(metrics_summary)) 22 | 23 | # Second we plot the metrics that are already ready by averaging 24 | 25 | metrics_to_average = [ 26 | 'episodes_fully_completed', 27 | 'episodes_completion' 28 | 29 | ] 30 | # We compute the number of episodes based on size of average completion 31 | number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1]) 32 | 33 | for metric in metrics_to_average: 34 | 35 | if metric == 'episodes_completion': 36 | print ("Average Percentage of Distance to Goal Travelled ") 37 | else: 38 | print ("Percentage of Successful Episodes") 39 | 40 | print ("") 41 | values = metrics_summary[metric] 42 | 43 | metric_sum_values = np.zeros(number_of_episodes) 44 | for weather, tasks in values.items(): 45 | if weather in set(weathers): 46 | print(' Weather: ', weather_name_dict[weather]) 47 | count = 0 48 | for t in tasks: 49 | # if isinstance(t, np.ndarray) or isinstance(t, list): 50 | if t == []: 51 | print(' Metric Not Computed') 52 | else: 53 | print(' Task:', count, ' -> ', float(sum(t)) / float(len(t))) 54 | metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float( 55 | len(weathers)) 56 | 57 | count += 1 58 | 59 | print (' Average Between Weathers') 60 | for i in range(len(metric_sum_values)): 61 | print(' Task ', i, ' -> ', metric_sum_values[i]) 62 | print ("") 63 | 64 | infraction_metrics = [ 65 | 'collision_pedestrians', 66 | 'collision_vehicles', 67 | 'collision_other', 68 | 'intersection_offroad', 69 | 'intersection_otherlane' 70 | 71 | ] 72 | 73 | # We need to collect the total number of kilometers for each task 74 | 75 | for metric in infraction_metrics: 76 | values_driven = metrics_summary['driven_kilometers'] 77 | values = metrics_summary[metric] 78 | metric_sum_values = np.zeros(number_of_episodes) 79 | summed_driven_kilometers = np.zeros(number_of_episodes) 80 | 81 | if metric == 'collision_pedestrians': 82 | print ('Avg. Kilometers driven before a collision to a PEDESTRIAN') 83 | elif metric == 'collision_vehicles': 84 | print('Avg. Kilometers driven before a collision to a VEHICLE') 85 | elif metric == 'collision_other': 86 | print('Avg. Kilometers driven before a collision to a STATIC OBSTACLE') 87 | elif metric == 'intersection_offroad': 88 | print('Avg. Kilometers driven before going OUTSIDE OF THE ROAD') 89 | else: 90 | print('Avg. Kilometers driven before invading the OPPOSITE LANE') 91 | 92 | # print (zip(values.items(), values_driven.items())) 93 | for items_metric, items_driven in zip(values.items(), values_driven.items()): 94 | weather = items_metric[0] 95 | tasks = items_metric[1] 96 | tasks_driven = items_driven[1] 97 | 98 | if weather in set(weathers): 99 | print(' Weather: ', weather_name_dict[weather]) 100 | count = 0 101 | for t, t_driven in zip(tasks, tasks_driven): 102 | # if isinstance(t, np.ndarray) or isinstance(t, list): 103 | if t == []: 104 | print('Metric Not Computed') 105 | else: 106 | if sum(t) > 0: 107 | print(' Task ', count, ' -> ', t_driven / float(sum(t))) 108 | else: 109 | print(' Task ', count, ' -> more than', t_driven) 110 | 111 | metric_sum_values[count] += float(sum(t)) 112 | summed_driven_kilometers[count] += t_driven 113 | 114 | count += 1 115 | print (' Average Between Weathers') 116 | for i in range(len(metric_sum_values)): 117 | if metric_sum_values[i] == 0: 118 | print(' Task ', i, ' -> more than ', summed_driven_kilometers[i]) 119 | else: 120 | print(' Task ', i, ' -> ', summed_driven_kilometers[i] / metric_sum_values[i]) 121 | print ("") 122 | 123 | print("") 124 | print("") 125 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/astar.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import heapq 8 | 9 | 10 | class Cell(object): 11 | def __init__(self, x, y, reachable): 12 | """Initialize new cell. 13 | 14 | @param reachable is cell reachable? not a wall? 15 | @param x cell x coordinate 16 | @param y cell y coordinate 17 | @param g cost to move from the starting cell to this cell. 18 | @param h estimation of the cost to move from this cell 19 | to the ending cell. 20 | @param f f = g + h 21 | """ 22 | self.reachable = reachable 23 | self.x = x 24 | self.y = y 25 | self.parent = None 26 | self.g = 0 27 | self.h = 0 28 | self.f = 0 29 | 30 | def __lt__(self, other): 31 | return self.g < other.g 32 | 33 | 34 | class AStar(object): 35 | def __init__(self): 36 | # open list 37 | self.opened = [] 38 | heapq.heapify(self.opened) 39 | # visited cells list 40 | self.closed = set() 41 | # grid cells 42 | self.cells = [] 43 | self.grid_height = None 44 | self.grid_width = None 45 | self.start = None 46 | self.end = None 47 | 48 | def init_grid(self, width, height, walls, start, end): 49 | """Prepare grid cells, walls. 50 | 51 | @param width grid's width. 52 | @param height grid's height. 53 | @param walls list of wall x,y tuples. 54 | @param start grid starting point x,y tuple. 55 | @param end grid ending point x,y tuple. 56 | """ 57 | self.grid_height = height 58 | self.grid_width = width 59 | for x in range(self.grid_width): 60 | for y in range(self.grid_height): 61 | if (x, y) in walls: 62 | reachable = False 63 | else: 64 | reachable = True 65 | self.cells.append(Cell(x, y, reachable)) 66 | self.start = self.get_cell(*start) 67 | self.end = self.get_cell(*end) 68 | 69 | def get_heuristic(self, cell): 70 | """Compute the heuristic value H for a cell. 71 | 72 | Distance between this cell and the ending cell multiply by 10. 73 | 74 | @returns heuristic value H 75 | """ 76 | return 10 * (abs(cell.x - self.end.x) + abs(cell.y - self.end.y)) 77 | 78 | def get_cell(self, x, y): 79 | """Returns a cell from the cells list. 80 | 81 | @param x cell x coordinate 82 | @param y cell y coordinate 83 | @returns cell 84 | """ 85 | return self.cells[x * self.grid_height + y] 86 | 87 | def get_adjacent_cells(self, cell): 88 | """Returns adjacent cells to a cell. 89 | 90 | Clockwise starting from the one on the right. 91 | 92 | @param cell get adjacent cells for this cell 93 | @returns adjacent cells list. 94 | """ 95 | cells = [] 96 | if cell.x < self.grid_width - 1: 97 | cells.append(self.get_cell(cell.x + 1, cell.y)) 98 | if cell.y > 0: 99 | cells.append(self.get_cell(cell.x, cell.y - 1)) 100 | if cell.x > 0: 101 | cells.append(self.get_cell(cell.x - 1, cell.y)) 102 | if cell.y < self.grid_height - 1: 103 | cells.append(self.get_cell(cell.x, cell.y + 1)) 104 | return cells 105 | 106 | def get_path(self): 107 | cell = self.end 108 | path = [(cell.x, cell.y)] 109 | while cell.parent is not self.start: 110 | cell = cell.parent 111 | path.append((cell.x, cell.y)) 112 | 113 | path.append((self.start.x, self.start.y)) 114 | path.reverse() 115 | return path 116 | 117 | def update_cell(self, adj, cell): 118 | """Update adjacent cell. 119 | 120 | @param adj adjacent cell to current cell 121 | @param cell current cell being processed 122 | """ 123 | adj.g = cell.g + 10 124 | adj.h = self.get_heuristic(adj) 125 | adj.parent = cell 126 | adj.f = adj.h + adj.g 127 | 128 | def solve(self): 129 | """Solve maze, find path to ending cell. 130 | 131 | @returns path or None if not found. 132 | """ 133 | # add starting cell to open heap queue 134 | heapq.heappush(self.opened, (self.start.f, self.start)) 135 | while len(self.opened): 136 | # pop cell from heap queue 137 | _, cell = heapq.heappop(self.opened) 138 | # add cell to closed list so we don't process it twice 139 | self.closed.add(cell) 140 | # if ending cell, return found path 141 | if cell is self.end: 142 | return self.get_path() 143 | # get adjacent cells for cell 144 | adj_cells = self.get_adjacent_cells(cell) 145 | for adj_cell in adj_cells: 146 | if adj_cell.reachable and adj_cell not in self.closed: 147 | if (adj_cell.f, adj_cell) in self.opened: 148 | # if adj cell in open list, check if current path is 149 | # better than the one previously found 150 | # for this adj cell. 151 | if adj_cell.g > cell.g + 10: 152 | self.update_cell(adj_cell, cell) 153 | else: 154 | self.update_cell(adj_cell, cell) 155 | # add adj cell to open list 156 | heapq.heappush(self.opened, (adj_cell.f, adj_cell)) 157 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/map.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """Class used for operating the city map.""" 8 | 9 | import math 10 | import os 11 | 12 | try: 13 | import numpy as np 14 | except ImportError: 15 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 16 | 17 | try: 18 | from PIL import Image 19 | except ImportError: 20 | raise RuntimeError('cannot import PIL, make sure pillow package is installed') 21 | 22 | from carla.planner.graph import Graph 23 | from carla.planner.graph import sldist 24 | from carla.planner.grid import Grid 25 | from carla.planner.converter import Converter 26 | 27 | 28 | def color_to_angle(color): 29 | return (float(color) / 255.0) * 2 * math.pi 30 | 31 | 32 | class CarlaMap(object): 33 | 34 | def __init__(self, city, pixel_density, node_density): 35 | dir_path = os.path.dirname(__file__) 36 | city_file = os.path.join(dir_path, city + '.txt') 37 | 38 | city_map_file = os.path.join(dir_path, city + '.png') 39 | city_map_file_lanes = os.path.join(dir_path, city + 'Lanes.png') 40 | city_map_file_center = os.path.join(dir_path, city + 'Central.png') 41 | 42 | # The built graph. This is the exact same graph that unreal builds. This 43 | # is a generic structure used for many cases 44 | self._graph = Graph(city_file, node_density) 45 | 46 | self._pixel_density = pixel_density 47 | self._grid = Grid(self._graph) 48 | # The number of game units per pixel. For now this is fixed. 49 | 50 | self._converter = Converter(city_file, pixel_density, node_density) 51 | 52 | # Load the lanes image 53 | self.map_image_lanes = Image.open(city_map_file_lanes) 54 | self.map_image_lanes.load() 55 | self.map_image_lanes = np.asarray(self.map_image_lanes, dtype="int32") 56 | # Load the image 57 | self.map_image = Image.open(city_map_file) 58 | self.map_image.load() 59 | self.map_image = np.asarray(self.map_image, dtype="int32") 60 | 61 | # Load the lanes image 62 | self.map_image_center = Image.open(city_map_file_center) 63 | self.map_image_center.load() 64 | self.map_image_center = np.asarray(self.map_image_center, dtype="int32") 65 | 66 | def get_graph_resolution(self): 67 | 68 | return self._graph.get_resolution() 69 | 70 | def get_map(self, height=None): 71 | if height is not None: 72 | img = Image.fromarray(self.map_image.astype(np.uint8)) 73 | 74 | aspect_ratio = height / float(self.map_image.shape[0]) 75 | 76 | img = img.resize((int(aspect_ratio * self.map_image.shape[1]), height), Image.ANTIALIAS) 77 | img.load() 78 | return np.asarray(img, dtype="int32") 79 | return np.fliplr(self.map_image) 80 | 81 | def get_map_lanes(self, size=None): 82 | if size is not None: 83 | img = Image.fromarray(self.map_image_lanes.astype(np.uint8)) 84 | img = img.resize((size[1], size[0]), Image.ANTIALIAS) 85 | img.load() 86 | return np.fliplr(np.asarray(img, dtype="int32")) 87 | return np.fliplr(self.map_image_lanes) 88 | 89 | def get_lane_orientation(self, world): 90 | """Get the lane orientation of a certain world position.""" 91 | pixel = self.convert_to_pixel(world) 92 | 93 | ori = self.map_image_lanes[int(pixel[1]), int(pixel[0]), 2] 94 | ori = color_to_angle(ori) 95 | 96 | return (-math.cos(ori), -math.sin(ori)) 97 | 98 | def convert_to_node(self, input_data): 99 | """ 100 | Receives a data type (Can Be Pixel or World ) 101 | :param input_data: position in some coordinate 102 | :return: A node object 103 | """ 104 | return self._converter.convert_to_node(input_data) 105 | 106 | def convert_to_pixel(self, input_data): 107 | """ 108 | Receives a data type (Can Be Node or World ) 109 | :param input_data: position in some coordinate 110 | :return: A node object 111 | """ 112 | return self._converter.convert_to_pixel(input_data) 113 | 114 | def convert_to_world(self, input_data): 115 | """ 116 | Receives a data type (Can Be Pixel or Node ) 117 | :param input_data: position in some coordinate 118 | :return: A node object 119 | """ 120 | return self._converter.convert_to_world(input_data) 121 | 122 | def get_walls_directed(self, node_source, source_ori, node_target, target_ori): 123 | """ 124 | This is the most hacky function. Instead of planning on two ways, 125 | we basically use a one way road and interrupt the other road by adding 126 | an artificial wall. 127 | 128 | """ 129 | 130 | final_walls = self._grid.get_wall_source(node_source, source_ori, node_target) 131 | 132 | final_walls = final_walls.union(self._grid.get_wall_target( 133 | node_target, target_ori, node_source)) 134 | return final_walls 135 | 136 | def get_walls(self): 137 | 138 | return self._grid.get_walls() 139 | 140 | def get_distance_closest_node(self, pos): 141 | 142 | distance = [] 143 | for node_iter in self._graph.intersection_nodes(): 144 | distance.append(sldist(node_iter, pos)) 145 | 146 | return sorted(distance)[0] 147 | 148 | def get_intersection_nodes(self): 149 | return self._graph.intersection_nodes() 150 | 151 | def search_on_grid(self,node): 152 | return self._grid.search_on_grid(node[0], node[1]) 153 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/experiment_suites/corl_2017.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | from carla.driving_benchmark.experiment import Experiment 12 | from carla.sensor import Camera 13 | from carla.settings import CarlaSettings 14 | from carla.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite 15 | 16 | 17 | class CoRL2017(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1, 3, 6, 8] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [4, 14] 26 | 27 | def _poses_town01(self): 28 | """ 29 | Each matrix is a new task. We have all the four tasks 30 | 31 | """ 32 | 33 | def _poses_straight(): 34 | return [[36, 40], [39, 35], [110, 114], [7, 3], [0, 4], 35 | [68, 50], [61, 59], [47, 64], [147, 90], [33, 87], 36 | [26, 19], [80, 76], [45, 49], [55, 44], [29, 107], 37 | [95, 104], [84, 34], [53, 67], [22, 17], [91, 148], 38 | [20, 107], [78, 70], [95, 102], [68, 44], [45, 69]] 39 | 40 | def _poses_one_curve(): 41 | return [[138, 17], [47, 16], [26, 9], [42, 49], [140, 124], 42 | [85, 98], [65, 133], [137, 51], [76, 66], [46, 39], 43 | [40, 60], [0, 29], [4, 129], [121, 140], [2, 129], 44 | [78, 44], [68, 85], [41, 102], [95, 70], [68, 129], 45 | [84, 69], [47, 79], [110, 15], [130, 17], [0, 17]] 46 | 47 | def _poses_navigation(): 48 | return [[105, 29], [27, 130], [102, 87], [132, 27], [24, 44], 49 | [96, 26], [34, 67], [28, 1], [140, 134], [105, 9], 50 | [148, 129], [65, 18], [21, 16], [147, 97], [42, 51], 51 | [30, 41], [18, 107], [69, 45], [102, 95], [18, 145], 52 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]] 53 | 54 | return [_poses_straight(), 55 | _poses_one_curve(), 56 | _poses_navigation(), 57 | _poses_navigation()] 58 | 59 | def _poses_town02(self): 60 | 61 | def _poses_straight(): 62 | return [[38, 34], [4, 2], [12, 10], [62, 55], [43, 47], 63 | [64, 66], [78, 76], [59, 57], [61, 18], [35, 39], 64 | [12, 8], [0, 18], [75, 68], [54, 60], [45, 49], 65 | [46, 42], [53, 46], [80, 29], [65, 63], [0, 81], 66 | [54, 63], [51, 42], [16, 19], [17, 26], [77, 68]] 67 | 68 | def _poses_one_curve(): 69 | return [[37, 76], [8, 24], [60, 69], [38, 10], [21, 1], 70 | [58, 71], [74, 32], [44, 0], [71, 16], [14, 24], 71 | [34, 11], [43, 14], [75, 16], [80, 21], [3, 23], 72 | [75, 59], [50, 47], [11, 19], [77, 34], [79, 25], 73 | [40, 63], [58, 76], [79, 55], [16, 61], [27, 11]] 74 | 75 | def _poses_navigation(): 76 | return [[19, 66], [79, 14], [19, 57], [23, 1], 77 | [53, 76], [42, 13], [31, 71], [33, 5], 78 | [54, 30], [10, 61], [66, 3], [27, 12], 79 | [79, 19], [2, 29], [16, 14], [5, 57], 80 | [70, 73], [46, 67], [57, 50], [61, 49], [21, 12], 81 | [51, 81], [77, 68], [56, 65], [43, 54]] 82 | 83 | return [_poses_straight(), 84 | _poses_one_curve(), 85 | _poses_navigation(), 86 | _poses_navigation() 87 | ] 88 | 89 | def build_experiments(self): 90 | """ 91 | Creates the whole set of experiment objects, 92 | The experiments created depend on the selected Town. 93 | 94 | 95 | """ 96 | 97 | # We set the camera 98 | # This single RGB camera is used on every experiment 99 | 100 | camera = Camera('CameraRGB') 101 | camera.set(FOV=100) 102 | camera.set_image_size(800, 600) 103 | camera.set_position(2.0, 0.0, 1.4) 104 | camera.set_rotation(-15.0, 0, 0) 105 | 106 | if self._city_name == 'Town01': 107 | poses_tasks = self._poses_town01() 108 | vehicles_tasks = [0, 0, 0, 20] 109 | pedestrians_tasks = [0, 0, 0, 50] 110 | else: 111 | poses_tasks = self._poses_town02() 112 | vehicles_tasks = [0, 0, 0, 15] 113 | pedestrians_tasks = [0, 0, 0, 50] 114 | 115 | experiments_vector = [] 116 | 117 | for weather in self.weathers: 118 | 119 | for iteration in range(len(poses_tasks)): 120 | poses = poses_tasks[iteration] 121 | vehicles = vehicles_tasks[iteration] 122 | pedestrians = pedestrians_tasks[iteration] 123 | 124 | conditions = CarlaSettings() 125 | conditions.set( 126 | SendNonPlayerAgentsInfo=True, 127 | NumberOfVehicles=vehicles, 128 | NumberOfPedestrians=pedestrians, 129 | WeatherId=weather 130 | ) 131 | # Add all the cameras that were set for this experiments 132 | 133 | conditions.add_sensor(camera) 134 | 135 | experiment = Experiment() 136 | experiment.set( 137 | Conditions=conditions, 138 | Poses=poses, 139 | Task=iteration, 140 | Repetitions=1 141 | ) 142 | experiments_vector.append(experiment) 143 | 144 | return experiments_vector 145 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/planner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import collections 8 | import math 9 | 10 | import numpy as np 11 | 12 | from . import city_track 13 | 14 | 15 | def compare(x, y): 16 | return collections.Counter(x) == collections.Counter(y) 17 | 18 | 19 | 20 | # Constants Used for the high level commands 21 | 22 | 23 | REACH_GOAL = 0.0 24 | GO_STRAIGHT = 5.0 25 | TURN_RIGHT = 4.0 26 | TURN_LEFT = 3.0 27 | LANE_FOLLOW = 2.0 28 | 29 | 30 | # Auxiliary algebra function 31 | def angle_between(v1, v2): 32 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 33 | 34 | 35 | def sldist(c1, c2): return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 36 | 37 | 38 | def signal(v1, v2): 39 | return np.cross(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2) 40 | 41 | 42 | class Planner(object): 43 | 44 | def __init__(self, city_name): 45 | 46 | self._city_track = city_track.CityTrack(city_name) 47 | 48 | self._commands = [] 49 | 50 | def get_next_command(self, source, source_ori, target, target_ori): 51 | """ 52 | Computes the full plan and returns the next command, 53 | Args 54 | source: source position 55 | source_ori: source orientation 56 | target: target position 57 | target_ori: target orientation 58 | Returns 59 | a command ( Straight,Lane Follow, Left or Right) 60 | """ 61 | 62 | track_source = self._city_track.project_node(source) 63 | track_target = self._city_track.project_node(target) 64 | 65 | # reach the goal 66 | 67 | if self._city_track.is_at_goal(track_source, track_target): 68 | return REACH_GOAL 69 | 70 | if (self._city_track.is_at_new_node(track_source) 71 | and self._city_track.is_away_from_intersection(track_source)): 72 | 73 | route = self._city_track.compute_route(track_source, source_ori, 74 | track_target, target_ori) 75 | if route is None: 76 | raise RuntimeError('Impossible to find route') 77 | 78 | self._commands = self._route_to_commands(route) 79 | 80 | if self._city_track.is_far_away_from_route_intersection( 81 | track_source): 82 | return LANE_FOLLOW 83 | else: 84 | if self._commands: 85 | return self._commands[0] 86 | else: 87 | return LANE_FOLLOW 88 | else: 89 | 90 | if self._city_track.is_far_away_from_route_intersection( 91 | track_source): 92 | return LANE_FOLLOW 93 | 94 | # If there is computed commands 95 | if self._commands: 96 | return self._commands[0] 97 | else: 98 | return LANE_FOLLOW 99 | 100 | def get_shortest_path_distance( 101 | self, 102 | source, 103 | source_ori, 104 | target, 105 | target_ori): 106 | 107 | distance = 0 108 | track_source = self._city_track.project_node(source) 109 | track_target = self._city_track.project_node(target) 110 | 111 | current_pos = track_source 112 | 113 | route = self._city_track.compute_route(track_source, source_ori, 114 | track_target, target_ori) 115 | # No Route, distance is zero 116 | if route is None: 117 | return 0.0 118 | 119 | for node_iter in route: 120 | distance += sldist(node_iter, current_pos) 121 | current_pos = node_iter 122 | 123 | # We multiply by these values to convert distance to world coordinates 124 | return distance * self._city_track.get_pixel_density() \ 125 | * self._city_track.get_node_density() 126 | 127 | def is_there_posible_route(self, source, source_ori, target, target_ori): 128 | 129 | track_source = self._city_track.project_node(source) 130 | track_target = self._city_track.project_node(target) 131 | 132 | return not self._city_track.compute_route( 133 | track_source, source_ori, track_target, target_ori) is None 134 | 135 | def test_position(self, source): 136 | 137 | node_source = self._city_track.project_node(source) 138 | 139 | return self._city_track.is_away_from_intersection(node_source) 140 | 141 | def _route_to_commands(self, route): 142 | 143 | """ 144 | from the shortest path graph, transform it into a list of commands 145 | 146 | :param route: the sub graph containing the shortest path 147 | :return: list of commands encoded from 0-5 148 | """ 149 | 150 | commands_list = [] 151 | 152 | for i in range(0, len(route)): 153 | if route[i] not in self._city_track.get_intersection_nodes(): 154 | continue 155 | 156 | current = route[i] 157 | past = route[i - 1] 158 | future = route[i + 1] 159 | 160 | past_to_current = np.array( 161 | [current[0] - past[0], current[1] - past[1]]) 162 | current_to_future = np.array( 163 | [future[0] - current[0], future[1] - current[1]]) 164 | angle = signal(current_to_future, past_to_current) 165 | 166 | if angle < -0.1: 167 | command = TURN_RIGHT 168 | elif angle > 0.1: 169 | command = TURN_LEFT 170 | else: 171 | command = GO_STRAIGHT 172 | 173 | commands_list.append(command) 174 | 175 | return commands_list 176 | -------------------------------------------------------------------------------- /PythonClient/carla/planner/converter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | from carla.planner.graph import string_to_floats 11 | 12 | # Constant definition enumeration 13 | 14 | PIXEL = 0 15 | WORLD = 1 16 | NODE = 2 17 | 18 | 19 | class Converter(object): 20 | 21 | def __init__(self, city_file, pixel_density, node_density): 22 | 23 | self._node_density = node_density 24 | self._pixel_density = pixel_density 25 | with open(city_file, 'r') as f: 26 | # The offset of the world from the zero coordinates ( The 27 | # coordinate we consider zero) 28 | self._worldoffset = string_to_floats(f.readline()) 29 | 30 | angles = string_to_floats(f.readline()) 31 | 32 | # If there is an rotation between the world and map coordinates. 33 | self._worldrotation = np.array([ 34 | [math.cos(math.radians(angles[2])), -math.sin(math.radians(angles[2])), 0.0], 35 | [math.sin(math.radians(angles[2])), math.cos(math.radians(angles[2])), 0.0], 36 | [0.0, 0.0, 1.0]]) 37 | 38 | # Ignore for now, these are offsets for map coordinates and scale 39 | # (not used). 40 | _ = f.readline() 41 | 42 | # The offset of the map zero coordinate. 43 | self._mapoffset = string_to_floats(f.readline()) 44 | 45 | def convert_to_node(self, input_data): 46 | """ 47 | Receives a data type (Can Be Pixel or World ) 48 | :param input_data: position in some coordinate 49 | :return: A vector representing a node 50 | """ 51 | 52 | input_type = self._check_input_type(input_data) 53 | if input_type == PIXEL: 54 | return self._pixel_to_node(input_data) 55 | elif input_type == WORLD: 56 | return self._world_to_node(input_data) 57 | else: 58 | raise ValueError('Invalid node to be converted') 59 | 60 | def convert_to_pixel(self, input_data): 61 | 62 | """ 63 | Receives a data type (Can Be Node or World ) 64 | :param input_data: position in some coordinate 65 | :return: A vector with pixel coordinates 66 | """ 67 | 68 | input_type = self._check_input_type(input_data) 69 | 70 | if input_type == NODE: 71 | return self._node_to_pixel(input_data) 72 | elif input_type == WORLD: 73 | return self._world_to_pixel(input_data) 74 | else: 75 | raise ValueError('Invalid node to be converted') 76 | 77 | def convert_to_world(self, input_data): 78 | 79 | """ 80 | Receives a data type (Can Be Pixel or Node ) 81 | :param input_data: position in some coordinate 82 | :return: vector with world coordinates 83 | """ 84 | 85 | input_type = self._check_input_type(input_data) 86 | if input_type == NODE: 87 | return self._node_to_world(input_data) 88 | elif input_type == PIXEL: 89 | return self._pixel_to_world(input_data) 90 | else: 91 | raise ValueError('Invalid node to be converted') 92 | 93 | def _node_to_pixel(self, node): 94 | """ 95 | Conversion from node format (graph) to pixel (image) 96 | :param node: 97 | :return: pixel 98 | """ 99 | pixel = [((node[0] + 2) * self._node_density) 100 | , ((node[1] + 2) * self._node_density)] 101 | return pixel 102 | 103 | def _pixel_to_node(self, pixel): 104 | """ 105 | Conversion from pixel format (image) to node (graph) 106 | :param node: 107 | :return: pixel 108 | """ 109 | node = [int(((pixel[0]) / self._node_density) - 2) 110 | , int(((pixel[1]) / self._node_density) - 2)] 111 | 112 | return tuple(node) 113 | 114 | def _pixel_to_world(self, pixel): 115 | """ 116 | Conversion from pixel format (image) to world (3D) 117 | :param pixel: 118 | :return: world 119 | """ 120 | 121 | relative_location = [pixel[0] * self._pixel_density, 122 | pixel[1] * self._pixel_density] 123 | 124 | world = [ 125 | relative_location[0] + self._mapoffset[0] - self._worldoffset[0], 126 | relative_location[1] + self._mapoffset[1] - self._worldoffset[1], 127 | 22 128 | ] 129 | 130 | return world 131 | 132 | def _world_to_pixel(self, world): 133 | """ 134 | Conversion from world format (3D) to pixel 135 | :param world: 136 | :return: pixel 137 | """ 138 | 139 | rotation = np.array([world[0], world[1], world[2]]) 140 | rotation = rotation.dot(self._worldrotation) 141 | 142 | relative_location = [rotation[0] + self._worldoffset[0] - self._mapoffset[0], 143 | rotation[1] + self._worldoffset[1] - self._mapoffset[1], 144 | rotation[2] + self._worldoffset[2] - self._mapoffset[2]] 145 | 146 | 147 | 148 | pixel = [math.floor(relative_location[0] / float(self._pixel_density)), 149 | math.floor(relative_location[1] / float(self._pixel_density))] 150 | 151 | return pixel 152 | 153 | def _world_to_node(self, world): 154 | return self._pixel_to_node(self._world_to_pixel(world)) 155 | 156 | def _node_to_world(self, node): 157 | 158 | return self._pixel_to_world(self._node_to_pixel(node)) 159 | 160 | def _check_input_type(self, input_data): 161 | if len(input_data) > 2: 162 | return WORLD 163 | elif type(input_data[0]) is int: 164 | return NODE 165 | else: 166 | return PIXEL 167 | -------------------------------------------------------------------------------- /PythonClient/carla/image_converter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """ 8 | Handy conversions for CARLA images. 9 | 10 | The functions here are provided for real-time display, if you want to save the 11 | converted images, save the images from Python without conversion and convert 12 | them afterwards with the C++ implementation at "Util/ImageConverter" as it 13 | provides considerably better performance. 14 | """ 15 | 16 | import math 17 | 18 | try: 19 | import numpy 20 | from numpy.matlib import repmat 21 | except ImportError: 22 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 23 | 24 | 25 | from . import sensor 26 | 27 | 28 | def to_bgra_array(image): 29 | """Convert a CARLA raw image to a BGRA numpy array.""" 30 | if not isinstance(image, sensor.Image): 31 | raise ValueError("Argument must be a carla.sensor.Image") 32 | array = numpy.frombuffer(image.raw_data, dtype=numpy.dtype("uint8")) 33 | array = numpy.reshape(array, (image.height, image.width, 4)) 34 | return array 35 | 36 | 37 | def to_rgb_array(image): 38 | """Convert a CARLA raw image to a RGB numpy array.""" 39 | array = to_bgra_array(image) 40 | # Convert BGRA to RGB. 41 | array = array[:, :, :3] 42 | array = array[:, :, ::-1] 43 | return array 44 | 45 | 46 | def labels_to_array(image): 47 | """ 48 | Convert an image containing CARLA semantic segmentation labels to a 2D array 49 | containing the label of each pixel. 50 | """ 51 | return to_bgra_array(image)[:, :, 2] 52 | 53 | 54 | def labels_to_cityscapes_palette(image): 55 | """ 56 | Convert an image containing CARLA semantic segmentation labels to 57 | Cityscapes palette. 58 | """ 59 | classes = { 60 | 0: [0, 0, 0], # None 61 | 1: [70, 70, 70], # Buildings 62 | 2: [190, 153, 153], # Fences 63 | 3: [72, 0, 90], # Other 64 | 4: [220, 20, 60], # Pedestrians 65 | 5: [153, 153, 153], # Poles 66 | 6: [157, 234, 50], # RoadLines 67 | 7: [128, 64, 128], # Roads 68 | 8: [244, 35, 232], # Sidewalks 69 | 9: [107, 142, 35], # Vegetation 70 | 10: [0, 0, 255], # Vehicles 71 | 11: [102, 102, 156], # Walls 72 | 12: [220, 220, 0] # TrafficSigns 73 | } 74 | array = labels_to_array(image) 75 | result = numpy.zeros((array.shape[0], array.shape[1], 3)) 76 | for key, value in classes.items(): 77 | result[numpy.where(array == key)] = value 78 | return result 79 | 80 | 81 | def depth_to_array(image): 82 | """ 83 | Convert an image containing CARLA encoded depth-map to a 2D array containing 84 | the depth value of each pixel normalized between [0.0, 1.0]. 85 | """ 86 | array = to_bgra_array(image) 87 | array = array.astype(numpy.float32) 88 | # Apply (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1). 89 | normalized_depth = numpy.dot(array[:, :, :3], [65536.0, 256.0, 1.0]) 90 | normalized_depth /= 16777215.0 # (256.0 * 256.0 * 256.0 - 1.0) 91 | return normalized_depth 92 | 93 | 94 | def depth_to_logarithmic_grayscale(image): 95 | """ 96 | Convert an image containing CARLA encoded depth-map to a logarithmic 97 | grayscale image array. 98 | "max_depth" is used to omit the points that are far enough. 99 | """ 100 | normalized_depth = depth_to_array(image) 101 | # Convert to logarithmic depth. 102 | logdepth = numpy.ones(normalized_depth.shape) + \ 103 | (numpy.log(normalized_depth) / 5.70378) 104 | logdepth = numpy.clip(logdepth, 0.0, 1.0) 105 | logdepth *= 255.0 106 | # Expand to three colors. 107 | return numpy.repeat(logdepth[:, :, numpy.newaxis], 3, axis=2) 108 | 109 | 110 | def depth_to_local_point_cloud(image, color=None, max_depth=0.9): 111 | """ 112 | Convert an image containing CARLA encoded depth-map to a 2D array containing 113 | the 3D position (relative to the camera) of each pixel and its corresponding 114 | RGB color of an array. 115 | "max_depth" is used to omit the points that are far enough. 116 | """ 117 | far = 1000.0 # max depth in meters. 118 | normalized_depth = depth_to_array(image) 119 | 120 | # (Intrinsic) K Matrix 121 | k = numpy.identity(3) 122 | k[0, 2] = image.width / 2.0 123 | k[1, 2] = image.height / 2.0 124 | k[0, 0] = k[1, 1] = image.width / \ 125 | (2.0 * math.tan(image.fov * math.pi / 360.0)) 126 | 127 | # 2d pixel coordinates 128 | pixel_length = image.width * image.height 129 | u_coord = repmat(numpy.r_[image.width-1:-1:-1], 130 | image.height, 1).reshape(pixel_length) 131 | v_coord = repmat(numpy.c_[image.height-1:-1:-1], 132 | 1, image.width).reshape(pixel_length) 133 | if color is not None: 134 | color = color.reshape(pixel_length, 3) 135 | normalized_depth = numpy.reshape(normalized_depth, pixel_length) 136 | 137 | # Search for pixels where the depth is greater than max_depth to 138 | # delete them 139 | max_depth_indexes = numpy.where(normalized_depth > max_depth) 140 | normalized_depth = numpy.delete(normalized_depth, max_depth_indexes) 141 | u_coord = numpy.delete(u_coord, max_depth_indexes) 142 | v_coord = numpy.delete(v_coord, max_depth_indexes) 143 | if color is not None: 144 | color = numpy.delete(color, max_depth_indexes, axis=0) 145 | 146 | # pd2 = [u,v,1] 147 | p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)]) 148 | 149 | # P = [X,Y,Z] 150 | p3d = numpy.dot(numpy.linalg.inv(k), p2d) 151 | p3d *= normalized_depth * far 152 | 153 | # Formating the output to: 154 | # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]] 155 | if color is not None: 156 | # numpy.concatenate((numpy.transpose(p3d), color), axis=1) 157 | return sensor.PointCloud( 158 | image.frame_number, 159 | numpy.transpose(p3d), 160 | color_array=color) 161 | # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]] 162 | return sensor.PointCloud(image.frame_number, numpy.transpose(p3d)) 163 | -------------------------------------------------------------------------------- /PythonClient/point_cloud_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB), and the INTEL Visual Computing Lab. 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """Basic CARLA client to generate point cloud in PLY format that you 10 | can visualize with MeshLab (meshlab.net) for instance. Please 11 | refer to client_example.py for a simpler and more documented example.""" 12 | 13 | from __future__ import print_function 14 | 15 | import argparse 16 | import logging 17 | import os 18 | import random 19 | import time 20 | 21 | from carla.client import make_carla_client 22 | from carla.sensor import Camera 23 | from carla.settings import CarlaSettings 24 | from carla.tcp import TCPConnectionError 25 | from carla.util import print_over_same_line, StopWatch 26 | from carla.image_converter import depth_to_local_point_cloud, to_rgb_array 27 | from carla.transform import Transform 28 | 29 | 30 | def run_carla_client(host, port, far): 31 | # Here we will run a single episode with 300 frames. 32 | number_of_frames = 3000 33 | frame_step = 100 # Save one image every 100 frames 34 | output_folder = '_out' 35 | image_size = [800, 600] 36 | camera_local_pos = [0.3, 0.0, 1.3] # [X, Y, Z] 37 | camera_local_rotation = [0, 0, 0] # [pitch(Y), yaw(Z), roll(X)] 38 | fov = 70 39 | 40 | # Connect with the server 41 | with make_carla_client(host, port) as client: 42 | print('CarlaClient connected') 43 | 44 | # Here we load the settings. 45 | settings = CarlaSettings() 46 | settings.set( 47 | SynchronousMode=True, 48 | SendNonPlayerAgentsInfo=False, 49 | NumberOfVehicles=20, 50 | NumberOfPedestrians=40, 51 | WeatherId=random.choice([1, 3, 7, 8, 14])) 52 | settings.randomize_seeds() 53 | 54 | camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov) 55 | camera1.set_image_size(*image_size) 56 | camera1.set_position(*camera_local_pos) 57 | camera1.set_rotation(*camera_local_rotation) 58 | settings.add_sensor(camera1) 59 | 60 | camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov) 61 | camera2.set_image_size(*image_size) 62 | camera2.set_position(*camera_local_pos) 63 | camera2.set_rotation(*camera_local_rotation) 64 | settings.add_sensor(camera2) 65 | 66 | client.load_settings(settings) 67 | 68 | # Start at location index id '0' 69 | client.start_episode(0) 70 | 71 | # Compute the camera transform matrix 72 | camera_to_car_transform = camera2.get_unreal_transform() 73 | 74 | # Iterate every frame in the episode except for the first one. 75 | for frame in range(1, number_of_frames): 76 | # Read the data produced by the server this frame. 77 | measurements, sensor_data = client.read_data() 78 | 79 | # Save one image every 'frame_step' frames 80 | if not frame % frame_step: 81 | # Start transformations time mesure. 82 | timer = StopWatch() 83 | 84 | # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]] 85 | image_RGB = to_rgb_array(sensor_data['CameraRGB']) 86 | 87 | # 2d to (camera) local 3d 88 | # We use the image_RGB to colorize each 3D point, this is optional. 89 | # "max_depth" is used to keep only the points that are near to the 90 | # camera, meaning 1.0 the farest points (sky) 91 | point_cloud = depth_to_local_point_cloud( 92 | sensor_data['CameraDepth'], 93 | image_RGB, 94 | max_depth=far 95 | ) 96 | 97 | # (Camera) local 3d to world 3d. 98 | # Get the transform from the player protobuf transformation. 99 | world_transform = Transform( 100 | measurements.player_measurements.transform 101 | ) 102 | 103 | # Compute the final transformation matrix. 104 | car_to_world_transform = world_transform * camera_to_car_transform 105 | 106 | # Car to World transformation given the 3D points and the 107 | # transformation matrix. 108 | point_cloud.apply_transform(car_to_world_transform) 109 | 110 | # End transformations time mesure. 111 | timer.stop() 112 | 113 | # Save PLY to disk 114 | # This generates the PLY string with the 3D points and the RGB colors 115 | # for each row of the file. 116 | point_cloud.save_to_disk(os.path.join( 117 | output_folder, '{:0>5}.ply'.format(frame)) 118 | ) 119 | 120 | print_message(timer.milliseconds(), len(point_cloud), frame) 121 | 122 | client.send_control( 123 | measurements.player_measurements.autopilot_control 124 | ) 125 | 126 | 127 | def print_message(elapsed_time, point_n, frame): 128 | message = ' '.join([ 129 | 'Transformations took {:>3.0f} ms.', 130 | 'Saved {:>6} points to "{:0>5}.ply".' 131 | ]).format(elapsed_time, point_n, frame) 132 | print_over_same_line(message) 133 | 134 | 135 | def check_far(value): 136 | fvalue = float(value) 137 | if fvalue < 0.0 or fvalue > 1.0: 138 | raise argparse.ArgumentTypeError( 139 | "{} must be a float between 0.0 and 1.0") 140 | return fvalue 141 | 142 | 143 | def main(): 144 | argparser = argparse.ArgumentParser(description=__doc__) 145 | argparser.add_argument( 146 | '-v', '--verbose', 147 | action='store_true', 148 | dest='debug', 149 | help='print debug information') 150 | argparser.add_argument( 151 | '--host', 152 | metavar='H', 153 | default='localhost', 154 | help='IP of the host server (default: localhost)') 155 | argparser.add_argument( 156 | '-p', '--port', 157 | metavar='P', 158 | default=2000, 159 | type=int, 160 | help='TCP port to listen to (default: 2000)') 161 | argparser.add_argument( 162 | '-f', '--far', 163 | default=0.2, 164 | type=check_far, 165 | help='The maximum save distance of camera-point ' 166 | '[0.0 (near), 1.0 (far)] (default: 0.2)') 167 | 168 | args = argparser.parse_args() 169 | 170 | log_level = logging.DEBUG if args.debug else logging.INFO 171 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 172 | 173 | logging.info('listening to server %s:%s', args.host, args.port) 174 | 175 | while True: 176 | try: 177 | run_carla_client(host=args.host, port=args.port, far=args.far) 178 | print('\nDone!') 179 | return 180 | 181 | except TCPConnectionError as error: 182 | logging.error(error) 183 | time.sleep(1) 184 | 185 | 186 | if __name__ == '__main__': 187 | 188 | try: 189 | main() 190 | except KeyboardInterrupt: 191 | print('\nClient stoped by user.') 192 | -------------------------------------------------------------------------------- /PythonClient/carla/controller/utils_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import time 4 | import unittest 5 | 6 | import utils 7 | 8 | 9 | class UtilsTest(unittest.TestCase): 10 | 11 | def test_project_point(self): 12 | ########################### 13 | # Case 1 - Point above line 14 | ########################### 15 | waypoint_1 = [0, 1] 16 | waypoint_2 = [3, 4] 17 | 18 | line_vec = [waypoint_1, waypoint_2] 19 | position_x = [1, 4] 20 | exp_pos_on_vec = np.asarray([2.0, 3.0]) 21 | 22 | pos_on_vec = utils.project_point(line_vec, 23 | position_x) 24 | np.testing.assert_allclose(pos_on_vec, 25 | exp_pos_on_vec) 26 | 27 | ########################### 28 | # Case 2 - Point below line 29 | ########################### 30 | waypoint_1 = [5, 1] 31 | waypoint_2 = [2, 6] 32 | 33 | line_vec = [waypoint_1, waypoint_2] 34 | position_x = [2, 3] 35 | exp_pos_on_vec = np.asarray([3.25, 3.79]) 36 | 37 | pos_on_vec = utils.project_point(line_vec, 38 | position_x) 39 | np.testing.assert_allclose(pos_on_vec, 40 | exp_pos_on_vec, 41 | rtol=1e-01) 42 | 43 | ######################################## 44 | # Case 3 - Edge case - Point behind line 45 | ######################################## 46 | waypoint_1 = [1, 2] 47 | waypoint_2 = [4, 3] 48 | 49 | line_vec = [waypoint_1, waypoint_2] 50 | position_x = [-1, 1] 51 | exp_pos_on_vec = np.asarray(waypoint_1) 52 | 53 | pos_on_vec = utils.project_point(line_vec, 54 | position_x) 55 | np.testing.assert_allclose(pos_on_vec, 56 | exp_pos_on_vec) 57 | 58 | ######################################## 59 | # Case 3 - Edge case - Point beyond line 60 | ######################################## 61 | waypoint_1 = [1, 2] 62 | waypoint_2 = [4, 3] 63 | 64 | line_vec = [waypoint_1, waypoint_2] 65 | position_x = [6, 1] 66 | exp_pos_on_vec = np.asarray(waypoint_2) 67 | 68 | pos_on_vec = utils.project_point(line_vec, 69 | position_x) 70 | np.testing.assert_allclose(pos_on_vec, 71 | exp_pos_on_vec) 72 | 73 | def test_next_carrot(self): 74 | 75 | waypoint_1 = [0, 1] 76 | waypoint_2 = [2, 4] 77 | 78 | line_vec = [waypoint_1, waypoint_2] 79 | pose_2d = [2, 2] 80 | lookahead_dis = 5.0 81 | 82 | exp_carrot = [3.85, 6.77] 83 | 84 | carrot = utils.next_carrot(line_vec, pose_2d, 85 | lookahead_dis) 86 | 87 | np.testing.assert_allclose(carrot, 88 | exp_carrot, 89 | rtol=1e-01) 90 | 91 | def test_calculate_delta(self): 92 | ######################### 93 | # Case 1 - First quadrant 94 | ######################### 95 | position = [2, 2, 0] 96 | 97 | carrot = [3, 4] 98 | # Limit the steering bound by 90 degrees 99 | delta_max = math.radians(90) 100 | 101 | exp_delta_degrees = 63.43 102 | 103 | delta = utils.calculate_delta(position, 104 | carrot, 105 | delta_max) 106 | 107 | self.assertAlmostEqual(math.degrees(delta), 108 | exp_delta_degrees, 109 | places=2) 110 | 111 | ####################### 112 | # Case 2 - 2nd quadrant 113 | ####################### 114 | position = [2, 2, 0] 115 | 116 | carrot = [1, 5] 117 | # Limit the steering bound by 180 degrees 118 | delta_max = math.radians(180) 119 | 120 | exp_delta_degrees = 108.4 121 | 122 | delta = utils.calculate_delta(position, 123 | carrot, 124 | delta_max) 125 | 126 | self.assertAlmostEqual(math.degrees(delta), 127 | exp_delta_degrees, 128 | places=1) 129 | 130 | ####################### 131 | # Case 3 - 3rd quadrant 132 | ####################### 133 | position = [2, 2, 0] 134 | 135 | carrot = [1, 1] 136 | # Limit the steering bound by 60 degrees 137 | delta_max = math.radians(60) 138 | # The resulting delta should be negative since we 139 | # are in the 3rd quadrant. 140 | exp_delta_degrees = -60 141 | 142 | delta = utils.calculate_delta(position, 143 | carrot, 144 | delta_max) 145 | 146 | self.assertAlmostEqual(math.degrees(delta), 147 | exp_delta_degrees, 148 | places=1) 149 | 150 | ####################### 151 | # Case 4 - 4th quadrant 152 | ####################### 153 | position = [2, 2, 0] 154 | 155 | carrot = [3, 1] 156 | # Limit the steering bound by 60 degrees 157 | delta_max = math.radians(60) 158 | # The resulting delta should be negative since we 159 | # are in the 3rd quadrant. 160 | exp_delta_degrees = -45 161 | 162 | delta = utils.calculate_delta(position, 163 | carrot, 164 | delta_max) 165 | 166 | self.assertAlmostEqual(math.degrees(delta), 167 | exp_delta_degrees, 168 | places=1) 169 | 170 | ####################### 171 | # Case 5 - Angle wrap 172 | ####################### 173 | 174 | position = [2, 2, 0] 175 | 176 | carrot = [3, 4] 177 | # Limit the steering bound by 30 degrees 178 | delta_max = math.radians(30) 179 | 180 | exp_delta_degrees = 30 181 | 182 | delta = utils.calculate_delta(position, 183 | carrot, 184 | delta_max) 185 | 186 | self.assertAlmostEqual(math.degrees(delta), 187 | exp_delta_degrees, 188 | places=1) 189 | 190 | def test_update_waypoint_trajectory(self): 191 | w1 = [0, 1] 192 | w2 = [2, 1] 193 | w3 = [2, 4] 194 | waypoints_array = [w1, w2, w3] 195 | waypoint_counter = 0 196 | 197 | wp1, wp2, update_traj = \ 198 | utils.update_waypoint_trajectory(waypoints_array, 199 | waypoint_counter) 200 | 201 | self.assertEqual(wp1, w1) 202 | self.assertEqual(wp2, w2) 203 | self.assertTrue(update_traj) 204 | 205 | waypoint_counter += 1 206 | wp1, wp2, update_traj = \ 207 | utils.update_waypoint_trajectory(waypoints_array, 208 | waypoint_counter) 209 | 210 | self.assertEqual(wp1, w2) 211 | self.assertEqual(wp2, w3) 212 | self.assertTrue(update_traj) 213 | 214 | waypoint_counter += 1 215 | wp1, wp2, update_traj = \ 216 | utils.update_waypoint_trajectory(waypoints_array, 217 | waypoint_counter) 218 | 219 | self.assertEqual(wp1, w3) 220 | self.assertEqual(wp2, w1) 221 | self.assertTrue(update_traj) 222 | 223 | waypoint_counter += 1 224 | wp1, wp2, update_traj = \ 225 | utils.update_waypoint_trajectory(waypoints_array, 226 | waypoint_counter) 227 | 228 | self.assertFalse(update_traj) 229 | 230 | def test_calculate_distance(self): 231 | point1 = [0, 0] 232 | point2 = [2, 2] 233 | exp_dist = 2.82 234 | 235 | distance = utils.calculate_distance(point1, point2) 236 | self.assertAlmostEqual(distance, exp_dist, places=1) 237 | 238 | point1 = [10, 10] 239 | point2 = [0, 6] 240 | exp_dist = 10.77 241 | 242 | distance = utils.calculate_distance(point1, point2) 243 | self.assertAlmostEqual(distance, exp_dist, places=1) 244 | 245 | 246 | if __name__ == '__main__': 247 | unittest.main() 248 | -------------------------------------------------------------------------------- /PythonClient/carla/client.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA Client.""" 8 | 9 | import logging 10 | import struct 11 | 12 | from contextlib import contextmanager 13 | 14 | from . import sensor 15 | from . import tcp 16 | from . import util 17 | 18 | try: 19 | from . import carla_server_pb2 as carla_protocol 20 | except ImportError: 21 | raise RuntimeError('cannot import "carla_server_pb2.py", run the protobuf compiler to generate this file') 22 | 23 | try: 24 | import numpy 25 | except ImportError: 26 | raise RuntimeError('cannot import numpy, make sure numpy package is installed.') 27 | 28 | 29 | VehicleControl = carla_protocol.Control 30 | 31 | 32 | @contextmanager 33 | def make_carla_client(host, world_port, timeout=15): 34 | """Context manager for creating and connecting a CarlaClient.""" 35 | with util.make_connection(CarlaClient, host, world_port, timeout) as client: 36 | yield client 37 | 38 | 39 | class CarlaClient(object): 40 | """The CARLA client. Manages communications with the CARLA server.""" 41 | 42 | def __init__(self, host, world_port, timeout=15): 43 | self._world_client = tcp.TCPClient(host, world_port, timeout) 44 | self._stream_client = tcp.TCPClient(host, world_port + 1, timeout) 45 | self._control_client = tcp.TCPClient(host, world_port + 2, timeout) 46 | self._current_settings = None 47 | self._is_episode_requested = False 48 | self._sensors = {} 49 | 50 | def connect(self, connection_attempts=10): 51 | """ 52 | Try to establish a connection to a CARLA server at the given host:port. 53 | """ 54 | self._world_client.connect(connection_attempts) 55 | 56 | def disconnect(self): 57 | """Disconnect from server.""" 58 | self._control_client.disconnect() 59 | self._stream_client.disconnect() 60 | self._world_client.disconnect() 61 | 62 | def connected(self): 63 | """Return whether there is an active connection.""" 64 | return self._world_client.connected() 65 | 66 | def load_settings(self, carla_settings): 67 | """ 68 | Load new settings and request a new episode based on these settings. 69 | carla_settings object must be convertible to a str holding the contents 70 | of a CarlaSettings.ini file. 71 | 72 | Return a protobuf object holding the scene description. 73 | """ 74 | self._current_settings = carla_settings 75 | return self._request_new_episode(carla_settings) 76 | 77 | def start_episode(self, player_start_index): 78 | """ 79 | Start the new episode at the player start given by the 80 | player_start_index. The list of player starts is retrieved by 81 | "load_settings". 82 | 83 | The new episode is started based on the last settings loaded by 84 | "load_settings". 85 | 86 | This function waits until the server answers with an EpisodeReady. 87 | """ 88 | if self._current_settings is None: 89 | raise RuntimeError('no settings loaded, cannot start episode') 90 | 91 | # if no new settings are loaded, request new episode with previous 92 | if not self._is_episode_requested: 93 | self._request_new_episode(self._current_settings) 94 | 95 | try: 96 | pb_message = carla_protocol.EpisodeStart() 97 | pb_message.player_start_spot_index = player_start_index 98 | self._world_client.write(pb_message.SerializeToString()) 99 | # Wait for EpisodeReady. 100 | data = self._world_client.read() 101 | if not data: 102 | raise RuntimeError('failed to read data from server') 103 | pb_message = carla_protocol.EpisodeReady() 104 | pb_message.ParseFromString(data) 105 | if not pb_message.ready: 106 | raise RuntimeError('cannot start episode: server failed to start episode') 107 | # We can start the agent clients now. 108 | self._stream_client.connect() 109 | self._control_client.connect() 110 | # Set again the status for no episode requested 111 | finally: 112 | self._is_episode_requested = False 113 | 114 | def read_data(self): 115 | """ 116 | Read the data sent from the server this frame. The episode must be 117 | started. Return a pair containing the protobuf object containing the 118 | measurements followed by the raw data of the sensors. 119 | """ 120 | # Read measurements. 121 | data = self._stream_client.read() 122 | if not data: 123 | raise RuntimeError('failed to read data from server') 124 | pb_message = carla_protocol.Measurements() 125 | pb_message.ParseFromString(data) 126 | # Read sensor data. 127 | return pb_message, dict(x for x in self._read_sensor_data()) 128 | 129 | def send_control(self, *args, **kwargs): 130 | """ 131 | Send the VehicleControl to be applied this frame. 132 | 133 | If synchronous mode was requested, the server will pause the simulation 134 | until this message is received. 135 | """ 136 | if isinstance(args[0] if args else None, carla_protocol.Control): 137 | pb_message = args[0] 138 | else: 139 | pb_message = carla_protocol.Control() 140 | pb_message.steer = kwargs.get('steer', 0.0) 141 | pb_message.throttle = kwargs.get('throttle', 0.0) 142 | pb_message.brake = kwargs.get('brake', 0.0) 143 | pb_message.hand_brake = kwargs.get('hand_brake', False) 144 | pb_message.reverse = kwargs.get('reverse', False) 145 | self._control_client.write(pb_message.SerializeToString()) 146 | 147 | def _request_new_episode(self, carla_settings): 148 | """ 149 | Internal function to request a new episode. Prepare the client for a new 150 | episode by disconnecting agent clients. 151 | """ 152 | # Disconnect agent clients. 153 | self._stream_client.disconnect() 154 | self._control_client.disconnect() 155 | # Send new episode request. 156 | pb_message = carla_protocol.RequestNewEpisode() 157 | pb_message.ini_file = str(carla_settings) 158 | self._world_client.write(pb_message.SerializeToString()) 159 | # Read scene description. 160 | data = self._world_client.read() 161 | if not data: 162 | raise RuntimeError('failed to read data from server') 163 | pb_message = carla_protocol.SceneDescription() 164 | pb_message.ParseFromString(data) 165 | self._sensors = dict((sensor.id, sensor) \ 166 | for sensor in _make_sensor_parsers(pb_message.sensors)) 167 | self._is_episode_requested = True 168 | return pb_message 169 | 170 | def _read_sensor_data(self): 171 | while True: 172 | data = self._stream_client.read() 173 | if not data: 174 | raise StopIteration 175 | yield self._parse_sensor_data(data) 176 | 177 | def _parse_sensor_data(self, data): 178 | sensor_id = struct.unpack(' id else 'Unknown' 186 | getint32 = lambda data, index: struct.unpack('. 8 | 9 | """Basic CARLA client example.""" 10 | 11 | from __future__ import print_function 12 | 13 | import argparse 14 | import logging 15 | import random 16 | import time 17 | 18 | from carla.client import make_carla_client 19 | from carla.sensor import Camera, Lidar 20 | from carla.settings import CarlaSettings 21 | from carla.tcp import TCPConnectionError 22 | from carla.util import print_over_same_line 23 | 24 | 25 | def run_carla_client(args): 26 | # Here we will run 3 episodes with 300 frames each. 27 | number_of_episodes = 3 28 | frames_per_episode = 300 29 | 30 | # We assume the CARLA server is already waiting for a client to connect at 31 | # host:port. To create a connection we can use the `make_carla_client` 32 | # context manager, it creates a CARLA client object and starts the 33 | # connection. It will throw an exception if something goes wrong. The 34 | # context manager makes sure the connection is always cleaned up on exit. 35 | with make_carla_client(args.host, args.port) as client: 36 | print('CarlaClient connected') 37 | 38 | for episode in range(0, number_of_episodes): 39 | # Start a new episode. 40 | 41 | if args.settings_filepath is None: 42 | 43 | # Create a CarlaSettings object. This object is a wrapper around 44 | # the CarlaSettings.ini file. Here we set the configuration we 45 | # want for the new episode. 46 | settings = CarlaSettings() 47 | settings.set( 48 | SynchronousMode=True, 49 | SendNonPlayerAgentsInfo=True, 50 | NumberOfVehicles=20, 51 | NumberOfPedestrians=40, 52 | WeatherId=random.choice([1, 3, 7, 8, 14]), 53 | QualityLevel=args.quality_level) 54 | settings.randomize_seeds() 55 | 56 | # Now we want to add a couple of cameras to the player vehicle. 57 | # We will collect the images produced by these cameras every 58 | # frame. 59 | 60 | # The default camera captures RGB images of the scene. 61 | camera0 = Camera('CameraRGB') 62 | # Set image resolution in pixels. 63 | camera0.set_image_size(800, 600) 64 | # Set its position relative to the car in meters. 65 | camera0.set_position(0.30, 0, 1.30) 66 | settings.add_sensor(camera0) 67 | 68 | # Let's add another camera producing ground-truth depth. 69 | camera1 = Camera('CameraDepth', PostProcessing='Depth') 70 | camera1.set_image_size(800, 600) 71 | camera1.set_position(0.30, 0, 1.30) 72 | settings.add_sensor(camera1) 73 | 74 | if args.lidar: 75 | lidar = Lidar('Lidar32') 76 | lidar.set_position(0, 0, 2.50) 77 | lidar.set_rotation(0, 0, 0) 78 | lidar.set( 79 | Channels=32, 80 | Range=50, 81 | PointsPerSecond=100000, 82 | RotationFrequency=10, 83 | UpperFovLimit=10, 84 | LowerFovLimit=-30) 85 | settings.add_sensor(lidar) 86 | 87 | else: 88 | 89 | # Alternatively, we can load these settings from a file. 90 | with open(args.settings_filepath, 'r') as fp: 91 | settings = fp.read() 92 | 93 | # Now we load these settings into the server. The server replies 94 | # with a scene description containing the available start spots for 95 | # the player. Here we can provide a CarlaSettings object or a 96 | # CarlaSettings.ini file as string. 97 | scene = client.load_settings(settings) 98 | 99 | # Choose one player start at random. 100 | number_of_player_starts = len(scene.player_start_spots) 101 | player_start = random.randint(0, max(0, number_of_player_starts - 1)) 102 | 103 | # Notify the server that we want to start the episode at the 104 | # player_start index. This function blocks until the server is ready 105 | # to start the episode. 106 | print('Starting new episode at %r...' % scene.map_name) 107 | client.start_episode(player_start) 108 | 109 | # Iterate every frame in the episode. 110 | for frame in range(0, frames_per_episode): 111 | 112 | # Read the data produced by the server this frame. 113 | measurements, sensor_data = client.read_data() 114 | 115 | # Print some of the measurements. 116 | print_measurements(measurements) 117 | 118 | # Save the images to disk if requested. 119 | if args.save_images_to_disk: 120 | for name, measurement in sensor_data.items(): 121 | filename = args.out_filename_format.format(episode, name, frame) 122 | measurement.save_to_disk(filename) 123 | 124 | # We can access the encoded data of a given image as numpy 125 | # array using its "data" property. For instance, to get the 126 | # depth value (normalized) at pixel X, Y 127 | # 128 | # depth_array = sensor_data['CameraDepth'].data 129 | # value_at_pixel = depth_array[Y, X] 130 | # 131 | 132 | # Now we have to send the instructions to control the vehicle. 133 | # If we are in synchronous mode the server will pause the 134 | # simulation until we send this control. 135 | 136 | if not args.autopilot: 137 | 138 | client.send_control( 139 | steer=random.uniform(-1.0, 1.0), 140 | throttle=0.5, 141 | brake=0.0, 142 | hand_brake=False, 143 | reverse=False) 144 | 145 | else: 146 | 147 | # Together with the measurements, the server has sent the 148 | # control that the in-game autopilot would do this frame. We 149 | # can enable autopilot by sending back this control to the 150 | # server. We can modify it if wanted, here for instance we 151 | # will add some noise to the steer. 152 | 153 | control = measurements.player_measurements.autopilot_control 154 | control.steer += random.uniform(-0.1, 0.1) 155 | client.send_control(control) 156 | 157 | 158 | def print_measurements(measurements): 159 | number_of_agents = len(measurements.non_player_agents) 160 | player_measurements = measurements.player_measurements 161 | message = 'Vehicle at ({pos_x:.1f}, {pos_y:.1f}), ' 162 | message += '{speed:.0f} km/h, ' 163 | message += 'Collision: {{vehicles={col_cars:.0f}, pedestrians={col_ped:.0f}, other={col_other:.0f}}}, ' 164 | message += '{other_lane:.0f}% other lane, {offroad:.0f}% off-road, ' 165 | message += '({agents_num:d} non-player agents in the scene)' 166 | message = message.format( 167 | pos_x=player_measurements.transform.location.x, 168 | pos_y=player_measurements.transform.location.y, 169 | speed=player_measurements.forward_speed * 3.6, # m/s -> km/h 170 | col_cars=player_measurements.collision_vehicles, 171 | col_ped=player_measurements.collision_pedestrians, 172 | col_other=player_measurements.collision_other, 173 | other_lane=100 * player_measurements.intersection_otherlane, 174 | offroad=100 * player_measurements.intersection_offroad, 175 | agents_num=number_of_agents) 176 | print_over_same_line(message) 177 | 178 | 179 | def main(): 180 | argparser = argparse.ArgumentParser(description=__doc__) 181 | argparser.add_argument( 182 | '-v', '--verbose', 183 | action='store_true', 184 | dest='debug', 185 | help='print debug information') 186 | argparser.add_argument( 187 | '--host', 188 | metavar='H', 189 | default='localhost', 190 | help='IP of the host server (default: localhost)') 191 | argparser.add_argument( 192 | '-p', '--port', 193 | metavar='P', 194 | default=2000, 195 | type=int, 196 | help='TCP port to listen to (default: 2000)') 197 | argparser.add_argument( 198 | '-a', '--autopilot', 199 | action='store_true', 200 | help='enable autopilot') 201 | argparser.add_argument( 202 | '-l', '--lidar', 203 | action='store_true', 204 | help='enable Lidar') 205 | argparser.add_argument( 206 | '-q', '--quality-level', 207 | choices=['Low', 'Epic'], 208 | type=lambda s: s.title(), 209 | default='Epic', 210 | help='graphics quality level, a lower level makes the simulation run considerably faster.') 211 | argparser.add_argument( 212 | '-i', '--images-to-disk', 213 | action='store_true', 214 | dest='save_images_to_disk', 215 | help='save images (and Lidar data if active) to disk') 216 | argparser.add_argument( 217 | '-c', '--carla-settings', 218 | metavar='PATH', 219 | dest='settings_filepath', 220 | default=None, 221 | help='Path to a "CarlaSettings.ini" file') 222 | 223 | args = argparser.parse_args() 224 | 225 | log_level = logging.DEBUG if args.debug else logging.INFO 226 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 227 | 228 | logging.info('listening to server %s:%s', args.host, args.port) 229 | 230 | args.out_filename_format = '_out/episode_{:0>4d}/{:s}/{:0>6d}' 231 | 232 | while True: 233 | try: 234 | 235 | run_carla_client(args) 236 | 237 | print('Done.') 238 | return 239 | 240 | except TCPConnectionError as error: 241 | logging.error(error) 242 | time.sleep(1) 243 | 244 | 245 | if __name__ == '__main__': 246 | 247 | try: 248 | main() 249 | except KeyboardInterrupt: 250 | print('\nCancelled by user. Bye!') 251 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/recording.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import datetime 3 | import os 4 | 5 | 6 | class Recording(object): 7 | 8 | def __init__(self 9 | , name_to_save 10 | , continue_experiment 11 | , save_images 12 | ): 13 | 14 | self._dict_summary = {'exp_id': -1, 15 | 'rep': -1, 16 | 'weather': -1, 17 | 'start_point': -1, 18 | 'end_point': -1, 19 | 'result': -1, 20 | 'initial_distance': -1, 21 | 'final_distance': -1, 22 | 'final_time': -1, 23 | 'time_out': -1 24 | } 25 | self._dict_measurements = {'exp_id': -1, 26 | 'rep': -1, 27 | 'weather': -1, 28 | 'start_point': -1, 29 | 'end_point': -1, 30 | 'collision_other': -1, 31 | 'collision_pedestrians': -1, 32 | 'collision_vehicles': -1, 33 | 'intersection_otherlane': -1, 34 | 'intersection_offroad': -1, 35 | 'pos_x': -1, 36 | 'pos_y': -1, 37 | 'steer': -1, 38 | 'throttle': -1, 39 | 'brake': -1 40 | } 41 | 42 | # Just in the case is the first time and there is no benchmark results folder 43 | if not os.path.exists('_benchmarks_results'): 44 | os.mkdir('_benchmarks_results') 45 | 46 | # Generate the full path for the log files 47 | self._path = os.path.join('_benchmarks_results' 48 | , name_to_save 49 | ) 50 | 51 | # Check for continuation of experiment, also returns the last line, used for test purposes 52 | # If you don't want to continue it will create a new path name with a number 53 | self._path, _ = self._continue_experiment(continue_experiment) 54 | 55 | self._create_log_files() 56 | 57 | # A log with a date file: to show when was the last access and log what was tested, 58 | now = datetime.datetime.now() 59 | self._internal_log_name = os.path.join(self._path, 'log_' + now.strftime("%Y%m%d%H%M")) 60 | open(self._internal_log_name, 'w').close() 61 | 62 | # store the save images flag, and already store the format for image saving 63 | self._save_images = save_images 64 | self._image_filename_format = os.path.join( 65 | self._path, '_images/episode_{:s}/{:s}/image_{:0>5d}.jpg') 66 | 67 | @property 68 | def path(self): 69 | return self._path 70 | 71 | def log_poses(self, start_index, end_index, weather_id): 72 | with open(self._internal_log_name, 'a+') as log: 73 | log.write(' Start Poses (%d %d ) on weather %d \n ' % 74 | (start_index, end_index, weather_id)) 75 | 76 | def log_poses_finish(self): 77 | with open(self._internal_log_name, 'a+') as log: 78 | log.write('Finished Task') 79 | 80 | def log_start(self, id_experiment): 81 | 82 | with open(self._internal_log_name, 'a+') as log: 83 | log.write('Start Task %d \n' % id_experiment) 84 | 85 | def log_end(self): 86 | with open(self._internal_log_name, 'a+') as log: 87 | log.write('====== Finished Entire Benchmark ======') 88 | 89 | def write_summary_results(self, experiment, pose, rep, 90 | path_distance, remaining_distance, 91 | final_time, time_out, result): 92 | """ 93 | Method to record the summary of an episode(pose) execution 94 | """ 95 | 96 | self._dict_summary['exp_id'] = experiment.task 97 | self._dict_summary['rep'] = rep 98 | self._dict_summary['weather'] = experiment.Conditions.WeatherId 99 | self._dict_summary['start_point'] = pose[0] 100 | self._dict_summary['end_point'] = pose[1] 101 | self._dict_summary['result'] = result 102 | self._dict_summary['initial_distance'] = path_distance 103 | self._dict_summary['final_distance'] = remaining_distance 104 | self._dict_summary['final_time'] = final_time 105 | self._dict_summary['time_out'] = time_out 106 | 107 | with open(os.path.join(self._path, 'summary.csv'), 'a+') as ofd: 108 | w = csv.DictWriter(ofd, self._dict_summary.keys()) 109 | 110 | w.writerow(self._dict_summary) 111 | 112 | def write_measurements_results(self, experiment, rep, pose, reward_vec, control_vec): 113 | """ 114 | Method to record the measurements, sensors, 115 | controls and status of the entire benchmark. 116 | """ 117 | with open(os.path.join(self._path, 'measurements.csv'), 'a+') as rfd: 118 | rw = csv.DictWriter(rfd, self._dict_measurements.keys()) 119 | 120 | for i in range(len(reward_vec)): 121 | self._dict_measurements['exp_id'] = experiment.task 122 | self._dict_measurements['rep'] = rep 123 | self._dict_measurements['start_point'] = pose[0] 124 | self._dict_measurements['end_point'] = pose[1] 125 | self._dict_measurements['weather'] = experiment.Conditions.WeatherId 126 | self._dict_measurements['collision_other'] = reward_vec[ 127 | i].collision_other 128 | self._dict_measurements['collision_pedestrians'] = reward_vec[ 129 | i].collision_pedestrians 130 | self._dict_measurements['collision_vehicles'] = reward_vec[ 131 | i].collision_vehicles 132 | self._dict_measurements['intersection_otherlane'] = reward_vec[ 133 | i].intersection_otherlane 134 | self._dict_measurements['intersection_offroad'] = reward_vec[ 135 | i].intersection_offroad 136 | self._dict_measurements['pos_x'] = reward_vec[ 137 | i].transform.location.x 138 | self._dict_measurements['pos_y'] = reward_vec[ 139 | i].transform.location.y 140 | self._dict_measurements['steer'] = control_vec[ 141 | i].steer 142 | self._dict_measurements['throttle'] = control_vec[ 143 | i].throttle 144 | self._dict_measurements['brake'] = control_vec[ 145 | i].brake 146 | 147 | rw.writerow(self._dict_measurements) 148 | 149 | def _create_log_files(self): 150 | """ 151 | Just create the log files and add the necessary header for it. 152 | """ 153 | 154 | if not self._experiment_exist(): 155 | os.mkdir(self._path) 156 | 157 | with open(os.path.join(self._path, 'summary.csv'), 'w') as ofd: 158 | w = csv.DictWriter(ofd, self._dict_summary.keys()) 159 | w.writeheader() 160 | 161 | with open(os.path.join(self._path, 'measurements.csv'), 'w') as rfd: 162 | rw = csv.DictWriter(rfd, self._dict_measurements.keys()) 163 | rw.writeheader() 164 | 165 | def _continue_experiment(self, continue_experiment): 166 | """ 167 | Get the line on the file for the experiment. 168 | If continue_experiment is false and experiment exist, generates a new file path 169 | 170 | """ 171 | 172 | def get_non_existent_path(f_name_path): 173 | """ 174 | Get the path to a filename which does not exist by incrementing path. 175 | """ 176 | if not os.path.exists(f_name_path): 177 | return f_name_path 178 | filename, file_extension = os.path.splitext(f_name_path) 179 | i = 1 180 | new_f_name = "{}-{}{}".format(filename, i, file_extension) 181 | while os.path.exists(new_f_name): 182 | i += 1 183 | new_f_name = "{}-{}{}".format(filename, i, file_extension) 184 | return new_f_name 185 | 186 | # start the new path as the same one as before 187 | new_path = self._path 188 | 189 | # if the experiment exist 190 | if self._experiment_exist(): 191 | 192 | # If you want to continue just get the last position 193 | if continue_experiment: 194 | line_on_file = self._get_last_position() 195 | 196 | else: 197 | # Get a new non_conflicting path name 198 | new_path = get_non_existent_path(new_path) 199 | line_on_file = 1 200 | 201 | else: 202 | line_on_file = 1 203 | return new_path, line_on_file 204 | 205 | def save_images(self, sensor_data, episode_name, frame): 206 | """ 207 | Save a image during the experiment 208 | """ 209 | if self._save_images: 210 | for name, image in sensor_data.items(): 211 | image.save_to_disk(self._image_filename_format.format( 212 | episode_name, name, frame)) 213 | 214 | def get_pose_and_experiment(self, number_poses_task): 215 | """ 216 | Based on the line in log file, return the current pose and experiment. 217 | If the line is zero, create new log files. 218 | 219 | """ 220 | # Warning: assumes that all tasks have the same size 221 | line_on_file = self._get_last_position() - 1 222 | if line_on_file == 0: 223 | return 0, 0 224 | else: 225 | return line_on_file % number_poses_task, line_on_file // number_poses_task 226 | 227 | def _experiment_exist(self): 228 | 229 | return os.path.exists(self._path) 230 | 231 | def _get_last_position(self): 232 | """ 233 | Get the last position on the summary experiment file 234 | With this you are able to continue from there 235 | 236 | Returns: 237 | int, position: 238 | """ 239 | # Try to open, if the file is not found 240 | try: 241 | with open(os.path.join(self._path, 'summary.csv')) as f: 242 | return sum(1 for _ in f) 243 | except IOError: 244 | return 0 245 | -------------------------------------------------------------------------------- /PythonClient/live_plotter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import tkinter as tk 4 | import os 5 | 6 | #import matplotlib 7 | #matplotlib.use("tkagg") 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | import matplotlib.backends.tkagg as tkagg 11 | from matplotlib.backends.backend_agg import FigureCanvasAgg 12 | import pygame 13 | 14 | class Dynamic2DFigure(): 15 | def __init__(self, 16 | figsize=(8,8), 17 | edgecolor="black", 18 | rect=[0.1, 0.1, 0.8, 0.8], 19 | *args, **kwargs): 20 | self.graphs = {} 21 | self.texts = {} 22 | self.fig = plt.Figure(figsize=figsize, edgecolor=edgecolor) 23 | self.ax = self.fig.add_axes(rect) 24 | self.fig.tight_layout() 25 | self.marker_text_offset = 0 26 | if kwargs["title"] is not None: 27 | self.fig.suptitle(kwargs["title"]) 28 | self.axis_equal = False 29 | self.invert_xaxis = False 30 | 31 | def set_invert_x_axis(self): 32 | self.invert_xaxis = True 33 | 34 | def set_axis_equal(self): 35 | self.axis_equal = True 36 | 37 | def add_graph(self, name, label="", window_size=10, x0=None, y0=None, 38 | linestyle='-', linewidth=1, marker="", color="k", 39 | markertext=None, marker_text_offset=2): 40 | self.marker_text_offset = marker_text_offset 41 | 42 | if x0 is None or y0 is None: 43 | x0 = np.zeros(window_size) 44 | y0 = np.zeros(window_size) 45 | new_graph, = self.ax.plot(x0, y0, label=label, 46 | linestyle=linestyle, linewidth=linewidth, 47 | marker=marker, color=color) 48 | if markertext is not None: 49 | new_text = self.ax.text(x0[-1], y0[-1] + marker_text_offset, 50 | markertext) 51 | else: 52 | new_graph, = self.ax.plot(x0, y0, label=label, 53 | linestyle=linestyle, linewidth=linewidth, 54 | marker=marker, color=color) 55 | if markertext is not None: 56 | new_text = self.ax.text(x0[-1], y0[-1] + marker_text_offset, 57 | markertext) 58 | 59 | self.graphs[name] = new_graph 60 | if markertext is not None: 61 | self.texts[name + "_TEXT"] = new_text 62 | 63 | def roll(self, name, new_x, new_y): 64 | graph = self.graphs[name] 65 | if graph is not None: 66 | x, y = graph.get_data() 67 | x = np.roll(x, -1) 68 | x[-1] = new_x 69 | y = np.roll(y, -1) 70 | y[-1] = new_y 71 | graph.set_data((x, y)) 72 | self.rescale() 73 | if name + "_TEXT" in self.texts: 74 | graph_text = self.texts[name + "_TEXT"] 75 | x = new_x 76 | y = new_y + self.marker_text_offset 77 | graph_text.set_position((x, y)) 78 | self.rescale() 79 | 80 | def update(self, name, new_x_vec, new_y_vec, new_colour='k'): 81 | graph = self.graphs[name] 82 | if graph is not None: 83 | graph.set_data((np.array(new_x_vec), np.array(new_y_vec))) 84 | graph.set_color(new_colour) 85 | self.rescale() 86 | if name + "_TEXT" in self.texts: 87 | graph_text = self.texts[name + "_TEXT"] 88 | x = new_x_vec[-1] 89 | y = new_y_vec[-1] + self.marker_text_offset 90 | graph_text.set_position((x, y)) 91 | self.rescale() 92 | 93 | def rescale(self): 94 | xmin = float("inf") 95 | xmax = -1*float("inf") 96 | ymin, ymax = self.ax.get_ylim() 97 | for name, graph in self.graphs.items(): 98 | xvals, yvals = graph.get_data() 99 | xmin_data = xvals.min() 100 | xmax_data = xvals.max() 101 | ymin_data = yvals.min() 102 | ymax_data = yvals.max() 103 | xmin_padded = xmin_data-0.05*(xmax_data-xmin_data) 104 | xmax_padded = xmax_data+0.05*(xmax_data-xmin_data) 105 | ymin_padded = ymin_data-0.05*(ymax_data-ymin_data) 106 | ymax_padded = ymax_data+0.05*(ymax_data-ymin_data) 107 | xmin = min(xmin_padded, xmin) 108 | xmax = max(xmax_padded, xmax) 109 | ymin = min(ymin_padded, ymin) 110 | ymax = max(ymax_padded, ymax) 111 | self.ax.set_xlim(xmin, xmax) 112 | self.ax.set_ylim(ymin, ymax) 113 | if self.axis_equal: 114 | self.ax.set_aspect('equal') 115 | if self.invert_xaxis: 116 | self.ax.invert_xaxis() 117 | 118 | 119 | class DynamicFigure(): 120 | def __init__(self, *args, **kwargs): 121 | self.graphs = {} 122 | self.fig = plt.Figure(figsize=(3, 2), edgecolor="black") 123 | self.ax = self.fig.add_axes([0.2, 0.2, 0.6, 0.6]) 124 | self.fig.tight_layout() 125 | if kwargs["title"] is not None: 126 | self.fig.suptitle(kwargs["title"]) 127 | 128 | def add_graph(self, name, label="", window_size=10, x0=None, y0=None): 129 | if y0 is None: 130 | x0 = np.zeros(window_size) 131 | y0 = np.zeros(window_size) 132 | new_graph, = self.ax.plot(x0, y0, label=label) 133 | elif x0 is None: 134 | new_graph, = self.ax.plot(y0, label=label) 135 | else: 136 | new_graph, = self.ax.plot(x0, y0, label=label) 137 | self.graphs[name] = new_graph 138 | 139 | def roll(self, name, new_x, new_y): 140 | graph = self.graphs[name] 141 | if graph is not None: 142 | x, y = graph.get_data() 143 | x = np.roll(x, -1) 144 | x[-1] = new_x 145 | y = np.roll(y, -1) 146 | y[-1] = new_y 147 | graph.set_data((x, y)) 148 | self.rescale() 149 | 150 | def rescale(self): 151 | xmin = float("inf") 152 | xmax = -1*float("inf") 153 | ymin, ymax = self.ax.get_ylim() 154 | for name, graph in self.graphs.items(): 155 | xvals, yvals = graph.get_data() 156 | xmin_data = xvals.min() 157 | xmax_data = xvals.max() 158 | ymin_data = yvals.min() 159 | ymax_data = yvals.max() 160 | xmin_padded = xmin_data-0.05*(xmax_data-xmin_data) 161 | xmax_padded = xmax_data+0.05*(xmax_data-xmin_data) 162 | ymin_padded = ymin_data-0.05*(ymax_data-ymin_data) 163 | ymax_padded = ymax_data+0.05*(ymax_data-ymin_data) 164 | xmin = min(xmin_padded, xmin) 165 | xmax = max(xmax_padded, xmax) 166 | ymin = min(ymin_padded, ymin) 167 | ymax = max(ymax_padded, ymax) 168 | self.ax.set_xlim(xmin, xmax) 169 | self.ax.set_ylim(ymin, ymax) 170 | 171 | 172 | class LivePlotter(): 173 | def __init__(self, tk_title=None): 174 | self._default_w = 150 175 | self._default_h = 100 176 | self._graph_w = 0 177 | self._graph_h = 0 178 | self._surf_w = 0 179 | self._surf_h = 0 180 | 181 | self._figs = [] 182 | self._fcas = {} 183 | self._photos = {} 184 | 185 | self._text_id = None 186 | self._empty = True 187 | 188 | self._root = tk.Tk() 189 | if tk_title is not None: 190 | self._root.title(tk_title) 191 | 192 | self._canvas = tk.Canvas(self._root, width=self._default_w, height=self._default_h) 193 | self._canvas.config(bg="#6A6A6A") 194 | self._text_id = self._canvas.create_text( 195 | (self._default_w/2, self._default_h/2), 196 | text="No live plots\ncreated yet.") 197 | self._canvas.grid(row=0, column=0) 198 | 199 | self._display = None 200 | self._game_frame = None 201 | self._pygame_init = False 202 | 203 | self._surfs = [] 204 | self._surf_coords = {} 205 | 206 | def plot_figure(self, fig): 207 | if self._empty: 208 | self._empty = False 209 | self._canvas.delete(self._text_id) 210 | 211 | f_w = fig.get_window_extent().width 212 | f_h = fig.get_window_extent().height 213 | f_w, f_h = int(f_w), int(f_h) 214 | 215 | # draw out figure 216 | fca = FigureCanvasAgg(fig) 217 | fca.draw() 218 | 219 | f_w, f_h = fca.get_renderer().get_canvas_width_height() 220 | f_w, f_h = int(f_w), int(f_h) 221 | 222 | self._graph_h += f_h 223 | self._graph_w = max(self._graph_w, f_w) 224 | self._canvas.config(width=self._graph_w, height=self._graph_h) 225 | self._canvas.grid(row=0, column=0) 226 | 227 | photo = tk.PhotoImage(master=self._canvas, width=f_w, height=f_h) 228 | self._canvas.create_image(f_w/2, self._graph_h-f_h/2, image=photo) 229 | tkagg.blit(photo, fca.get_renderer()._renderer, colormode=2) 230 | self._root.update() 231 | 232 | self._figs.append(fig) 233 | self._fcas[fig] = fca 234 | self._photos[fig] = photo 235 | 236 | def plot_new_figure(self): 237 | fig = plt.Figure(figsize=(3, 2), edgecolor="black") 238 | ax = fig.add_axes([0.2, 0.2, 0.6, 0.6]) 239 | fig.tight_layout() 240 | # this stores the figure locally as well 241 | self.plot_figure(fig) 242 | return fig, ax 243 | 244 | def plot_new_dynamic_figure(self, title=""): 245 | dyfig = DynamicFigure(title=title) 246 | fig = dyfig.fig 247 | # this stores the figure locally as well 248 | self.plot_figure(fig) 249 | return dyfig 250 | 251 | def plot_new_dynamic_2d_figure(self, title="", **kwargs): 252 | dy2dfig = Dynamic2DFigure(title=title, **kwargs) 253 | fig = dy2dfig.fig 254 | # this stores the figure locally as well 255 | self.plot_figure(fig) 256 | return dy2dfig 257 | 258 | def refresh_figure(self, fig): 259 | self._fcas[fig].draw() 260 | self._fcas[fig].flush_events() 261 | fig.canvas.draw() 262 | fig.canvas.flush_events() 263 | tkagg.blit( 264 | self._photos[fig], 265 | self._fcas[fig].get_renderer()._renderer, 266 | colormode=2) 267 | self._root.update() 268 | 269 | def init_pygame(self): 270 | self._game_frame = tk.Frame( 271 | self._root, 272 | width=self._surf_w, 273 | height=self._surf_h) 274 | self._game_frame.grid(row=0, column=1) 275 | 276 | os.environ['SDL_WINDOWID'] = str(self._game_frame.winfo_id()) 277 | self._game_frame.update() 278 | pygame.display.init() 279 | 280 | def plot_surface(self, surf): 281 | s_w, s_h = surf.get_size() 282 | 283 | self._surf_w += s_w 284 | self._surf_h = max(self._surf_h, s_h) 285 | 286 | if not self._pygame_init: 287 | self._pygame_init = True 288 | self.init_pygame() 289 | else: 290 | self._game_frame.config(width=self._surf_w, height=self._surf_h) 291 | self._game_frame.grid(row=0, column=1) 292 | 293 | self._display = pygame.display.set_mode((self._surf_w, self._surf_h)) 294 | 295 | self._surfs.append(surf) 296 | self._surf_coords[surf] = (self._surf_w-s_w, 0) 297 | 298 | self._display.blits(list(self._surf_coords.items())) 299 | 300 | def refresh(self): 301 | for fig in list(self._figs): 302 | self.refresh_figure(fig) 303 | self._root.update() 304 | 305 | if not self._display is None: 306 | self._display.blits(list(self._surf_coords.items())) 307 | pygame.display.flip() 308 | -------------------------------------------------------------------------------- /PythonClient/Minor Project/controller2d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | 2D Controller Class to be used for the CARLA waypoint follower demo. 5 | """ 6 | 7 | import cutils 8 | import numpy as np 9 | 10 | class Controller2D(object): 11 | def __init__(self, waypoints): 12 | self.vars = cutils.CUtils() 13 | self._current_x = 0 14 | self._current_y = 0 15 | self._current_yaw = 0 16 | self._current_speed = 0 17 | self._desired_speed = 0 18 | self._current_frame = 0 19 | self._current_timestamp = 0 20 | self._start_control_loop = False 21 | self._set_throttle = 0 22 | self._set_brake = 0 23 | self._set_steer = 0 24 | self._waypoints = waypoints 25 | self._conv_rad_to_steer = 180.0 / 70.0 / np.pi 26 | self._pi = np.pi 27 | self._2pi = 2.0 * np.pi 28 | 29 | def update_values(self, x, y, yaw, speed, timestamp, frame): 30 | self._current_x = x 31 | self._current_y = y 32 | self._current_yaw = yaw 33 | self._current_speed = speed 34 | self._current_timestamp = timestamp 35 | self._current_frame = frame 36 | if self._current_frame: 37 | self._start_control_loop = True 38 | 39 | def update_desired_speed(self): 40 | min_idx = 0 41 | min_dist = float("inf") 42 | desired_speed = 0 43 | for i in range(len(self._waypoints)): 44 | dist = np.linalg.norm(np.array([ 45 | self._waypoints[i][0] - self._current_x, 46 | self._waypoints[i][1] - self._current_y])) 47 | if dist < min_dist: 48 | min_dist = dist 49 | min_idx = i 50 | if min_idx < len(self._waypoints)-1: 51 | desired_speed = self._waypoints[min_idx][2] 52 | else: 53 | desired_speed = self._waypoints[-1][2] 54 | self._desired_speed = desired_speed 55 | 56 | def update_waypoints(self, new_waypoints): 57 | self._waypoints = new_waypoints 58 | 59 | def get_commands(self): 60 | return self._set_throttle, self._set_steer, self._set_brake 61 | 62 | def set_throttle(self, input_throttle): 63 | # Clamp the throttle command to valid bounds 64 | throttle = np.fmax(np.fmin(input_throttle, 1.0), 0.0) 65 | self._set_throttle = throttle 66 | 67 | def set_steer(self, input_steer_in_rad): 68 | # Covnert radians to [-1, 1] 69 | input_steer = self._conv_rad_to_steer * input_steer_in_rad 70 | 71 | # Clamp the steering command to valid bounds 72 | steer = np.fmax(np.fmin(input_steer, 1.0), -1.0) 73 | self._set_steer = steer 74 | 75 | def set_brake(self, input_brake): 76 | # Clamp the steering command to valid bounds 77 | brake = np.fmax(np.fmin(input_brake, 1.0), 0.0) 78 | self._set_brake = brake 79 | 80 | def update_controls(self): 81 | ###################################################### 82 | # RETRIEVE SIMULATOR FEEDBACK 83 | ###################################################### 84 | x = self._current_x 85 | y = self._current_y 86 | yaw = self._current_yaw 87 | v = self._current_speed 88 | self.update_desired_speed() 89 | v_desired = self._desired_speed 90 | t = self._current_timestamp 91 | waypoints = self._waypoints 92 | throttle_output = 0 93 | steer_output = 0 94 | brake_output = 0 95 | 96 | ###################################################### 97 | ###################################################### 98 | # MODULE 7: DECLARE USAGE VARIABLES HERE 99 | ###################################################### 100 | ###################################################### 101 | """ 102 | Use 'self.vars.create_var(, )' 103 | to create a persistent variable (not destroyed at each iteration). 104 | This means that the value can be stored for use in the next 105 | iteration of the control loop. 106 | Example: Creation of 'v_previous', default value to be 0 107 | self.vars.create_var('v_previous', 0.0) 108 | Example: Setting 'v_previous' to be 1.0 109 | self.vars.v_previous = 1.0 110 | Example: Accessing the value from 'v_previous' to be used 111 | throttle_output = 0.5 * self.vars.v_previous 112 | """ 113 | self.vars.create_var('v_previous', 0.0) 114 | self.vars.create_var('t_previous', 0.0) 115 | self.vars.create_var('error_previous', 0.0) 116 | self.vars.create_var('integral_error_previous', 0.0) 117 | self.vars.create_var('throttle_previous', 0.0) 118 | 119 | 120 | # Skip the first frame to store previous values properly 121 | if self._start_control_loop: 122 | """ 123 | Controller iteration code block. 124 | Controller Feedback Variables: 125 | x : Current X position (meters) 126 | y : Current Y position (meters) 127 | yaw : Current yaw pose (radians) 128 | v : Current forward speed (meters per second) 129 | t : Current time (seconds) 130 | v_desired : Current desired speed (meters per second) 131 | (Computed as the speed to track at the 132 | closest waypoint to the vehicle.) 133 | waypoints : Current waypoints to track 134 | (Includes speed to track at each x,y 135 | location.) 136 | Format: [[x0, y0, v0], 137 | [x1, y1, v1], 138 | ... 139 | [xn, yn, vn]] 140 | Example: 141 | waypoints[2][1]: 142 | Returns the 3rd waypoint's y position 143 | waypoints[5]: 144 | Returns [x5, y5, v5] (6th waypoint) 145 | 146 | Controller Output Variables: 147 | throttle_output : Throttle output (0 to 1) 148 | steer_output : Steer output (-1.22 rad to 1.22 rad) 149 | brake_output : Brake output (0 to 1) 150 | """ 151 | 152 | ###################################################### 153 | ###################################################### 154 | # MODULE 7: IMPLEMENTATION OF LONGITUDINAL CONTROLLER HERE 155 | ###################################################### 156 | ###################################################### 157 | """ 158 | Implement a longitudinal controller here. Remember that you can 159 | access the persistent variables declared above here. For 160 | example, can treat self.vars.v_previous like a "global variable". 161 | """ 162 | 163 | # Change these outputs with the longitudinal controller. Note that 164 | # brake_output is optional and is not required to pass the 165 | # assignment, as the car will naturally slow down over time. 166 | 167 | kp = 1.0 168 | ki = 0.2 169 | kd = 0.01 170 | 171 | throttle_output = 0 172 | brake_output = 0 173 | 174 | # pid control 175 | st = t - self.vars.t_previous 176 | 177 | # error term 178 | e_v = v_desired - v 179 | 180 | # I 181 | inte_v = self.vars.integral_error_previous + e_v * st 182 | 183 | # D 184 | derivate = (e_v - self.vars.error_previous) / st 185 | 186 | acc = kp * e_v + ki * inte_v + kd * derivate 187 | 188 | if acc > 0: 189 | throttle_output = (np.tanh(acc) + 1)/2 190 | # throttle_output = max(0.0, min(1.0, throttle_output)) 191 | if throttle_output - self.vars.throttle_previous > 0.1: 192 | throttle_output = self.vars.throttle_previous + 0.1 193 | else: 194 | throttle_output = 0 195 | 196 | 197 | ###################################################### 198 | ###################################################### 199 | # MODULE 7: IMPLEMENTATION OF LATERAL CONTROLLER HERE 200 | ###################################################### 201 | ###################################################### 202 | """ 203 | Implement a lateral controller here. Remember that you can 204 | access the persistent variables declared above here. For 205 | example, can treat self.vars.v_previous like a "global variable". 206 | """ 207 | 208 | # Change the steer output with the lateral controller. 209 | steer_output = 0 210 | 211 | # Use stanley controller for lateral control 212 | k_e = 0.3 213 | slope = (waypoints[-1][1]-waypoints[0][1])/ (waypoints[-1][0]-waypoints[0][0]) 214 | a = -slope 215 | b = 1.0 216 | c = (slope*waypoints[0][0]) - waypoints[0][1] 217 | 218 | # heading error 219 | yaw_path = np.arctan2(waypoints[-1][1]-waypoints[0][1], waypoints[-1][0]-waypoints[0][0]) 220 | # yaw_path = np.arctan2(slope, 1.0) # This was turning the vehicle only to the right (some error) 221 | yaw_diff_heading = yaw_path - yaw 222 | if yaw_diff_heading > np.pi: 223 | yaw_diff_heading -= 2 * np.pi 224 | if yaw_diff_heading < - np.pi: 225 | yaw_diff_heading += 2 * np.pi 226 | 227 | # crosstrack erroe 228 | current_xy = np.array([x, y]) 229 | crosstrack_error = np.min(np.sum((current_xy - np.array(waypoints)[:, :2])**2, axis=1)) 230 | yaw_cross_track = np.arctan2(y-waypoints[0][1], x-waypoints[0][0]) 231 | yaw_path2ct = yaw_path - yaw_cross_track 232 | if yaw_path2ct > np.pi: 233 | yaw_path2ct -= 2 * np.pi 234 | if yaw_path2ct < - np.pi: 235 | yaw_path2ct += 2 * np.pi 236 | if yaw_path2ct > 0: 237 | crosstrack_error = abs(crosstrack_error) 238 | else: 239 | crosstrack_error = - abs(crosstrack_error) 240 | yaw_diff_crosstrack = np.arctan(k_e * crosstrack_error / (v)) 241 | 242 | # final expected steering 243 | steer_expect = yaw_diff_crosstrack + yaw_diff_heading 244 | if steer_expect > np.pi: 245 | steer_expect -= 2 * np.pi 246 | if steer_expect < - np.pi: 247 | steer_expect += 2 * np.pi 248 | steer_expect = min(1.22, steer_expect) 249 | steer_expect = max(-1.22, steer_expect) 250 | 251 | #update 252 | steer_output = steer_expect 253 | 254 | ###################################################### 255 | # SET CONTROLS OUTPUT 256 | ###################################################### 257 | self.set_throttle(throttle_output) # in percent (0 to 1) 258 | self.set_steer(steer_output) # in rad (-1.22 to 1.22) 259 | self.set_brake(brake_output) # in percent (0 to 1) 260 | 261 | ###################################################### 262 | ###################################################### 263 | # MODULE 7: STORE OLD VALUES HERE (ADD MORE IF NECESSARY) 264 | ###################################################### 265 | ###################################################### 266 | """ 267 | Use this block to store old values (for example, we can store the 268 | current x, y, and yaw values here using persistent variables for use 269 | in the next iteration) 270 | """ 271 | self.vars.v_previous = v # Store forward speed to be used in next step 272 | self.vars.throttle_previous = throttle_output 273 | self.vars.t_previous = t 274 | self.vars.error_previous = e_v 275 | self.vars.integral_error_previous = inte_v 276 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/driving_benchmark.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | import abc 9 | import logging 10 | import math 11 | import time 12 | 13 | from carla.client import VehicleControl 14 | from carla.client import make_carla_client 15 | from carla.driving_benchmark.metrics import Metrics 16 | from carla.planner.planner import Planner 17 | from carla.settings import CarlaSettings 18 | from carla.tcp import TCPConnectionError 19 | 20 | from . import results_printer 21 | from .recording import Recording 22 | 23 | 24 | def sldist(c1, c2): 25 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 26 | 27 | 28 | class DrivingBenchmark(object): 29 | """ 30 | The Benchmark class, controls the execution of the benchmark interfacing 31 | an Agent class with a set Suite. 32 | 33 | 34 | The benchmark class must be inherited with a class that defines the 35 | all the experiments to be run by the agent 36 | """ 37 | 38 | def __init__( 39 | self, 40 | city_name='Town01', 41 | name_to_save='Test', 42 | continue_experiment=False, 43 | save_images=False, 44 | distance_for_success=2.0 45 | ): 46 | 47 | self.__metaclass__ = abc.ABCMeta 48 | 49 | self._city_name = city_name 50 | self._base_name = name_to_save 51 | # The minimum distance for arriving into the goal point in 52 | # order to consider ir a success 53 | self._distance_for_success = distance_for_success 54 | # The object used to record the benchmark and to able to continue after 55 | self._recording = Recording(name_to_save=name_to_save, 56 | continue_experiment=continue_experiment, 57 | save_images=save_images 58 | ) 59 | 60 | # We have a default planner instantiated that produces high level commands 61 | self._planner = Planner(city_name) 62 | 63 | def benchmark_agent(self, experiment_suite, agent, client): 64 | """ 65 | Function to benchmark the agent. 66 | It first check the log file for this benchmark. 67 | if it exist it continues from the experiment where it stopped. 68 | 69 | 70 | Args: 71 | experiment_suite 72 | agent: an agent object with the run step class implemented. 73 | client: 74 | 75 | 76 | Return: 77 | A dictionary with all the metrics computed from the 78 | agent running the set of experiments. 79 | """ 80 | 81 | # Instantiate a metric object that will be used to compute the metrics for 82 | # the benchmark afterwards. 83 | metrics_object = Metrics(experiment_suite.metrics_parameters, 84 | experiment_suite.dynamic_tasks) 85 | 86 | # Function return the current pose and task for this benchmark. 87 | start_pose, start_experiment = self._recording.get_pose_and_experiment( 88 | experiment_suite.get_number_of_poses_task()) 89 | 90 | logging.info('START') 91 | 92 | for experiment in experiment_suite.get_experiments()[int(start_experiment):]: 93 | 94 | positions = client.load_settings( 95 | experiment.conditions).player_start_spots 96 | 97 | self._recording.log_start(experiment.task) 98 | 99 | for pose in experiment.poses[start_pose:]: 100 | for rep in range(experiment.repetitions): 101 | 102 | start_index = pose[0] 103 | end_index = pose[1] 104 | 105 | client.start_episode(start_index) 106 | # Print information on 107 | logging.info('======== !!!! ==========') 108 | logging.info(' Start Position %d End Position %d ', 109 | start_index, end_index) 110 | 111 | self._recording.log_poses(start_index, end_index, 112 | experiment.Conditions.WeatherId) 113 | 114 | # Calculate the initial distance for this episode 115 | initial_distance = \ 116 | sldist( 117 | [positions[start_index].location.x, positions[start_index].location.y], 118 | [positions[end_index].location.x, positions[end_index].location.y]) 119 | 120 | time_out = experiment_suite.calculate_time_out( 121 | self._get_shortest_path(positions[start_index], positions[end_index])) 122 | 123 | # running the agent 124 | (result, reward_vec, control_vec, final_time, remaining_distance) = \ 125 | self._run_navigation_episode( 126 | agent, client, time_out, positions[end_index], 127 | str(experiment.Conditions.WeatherId) + '_' 128 | + str(experiment.task) + '_' + str(start_index) 129 | + '.' + str(end_index)) 130 | 131 | # Write the general status of the just ran episode 132 | self._recording.write_summary_results( 133 | experiment, pose, rep, initial_distance, 134 | remaining_distance, final_time, time_out, result) 135 | 136 | # Write the details of this episode. 137 | self._recording.write_measurements_results(experiment, rep, pose, reward_vec, 138 | control_vec) 139 | if result > 0: 140 | logging.info('+++++ Target achieved in %f seconds! +++++', 141 | final_time) 142 | else: 143 | logging.info('----- Timeout! -----') 144 | 145 | start_pose = 0 146 | 147 | self._recording.log_end() 148 | 149 | return metrics_object.compute(self._recording.path) 150 | 151 | def get_path(self): 152 | """ 153 | Returns the path were the log was saved. 154 | """ 155 | return self._recording.path 156 | 157 | def _get_directions(self, current_point, end_point): 158 | """ 159 | Class that should return the directions to reach a certain goal 160 | """ 161 | 162 | directions = self._planner.get_next_command( 163 | (current_point.location.x, 164 | current_point.location.y, 0.22), 165 | (current_point.orientation.x, 166 | current_point.orientation.y, 167 | current_point.orientation.z), 168 | (end_point.location.x, end_point.location.y, 0.22), 169 | (end_point.orientation.x, end_point.orientation.y, end_point.orientation.z)) 170 | return directions 171 | 172 | def _get_shortest_path(self, start_point, end_point): 173 | """ 174 | Calculates the shortest path between two points considering the road netowrk 175 | """ 176 | 177 | return self._planner.get_shortest_path_distance( 178 | [ 179 | start_point.location.x, start_point.location.y, 0.22], [ 180 | start_point.orientation.x, start_point.orientation.y, 0.22], [ 181 | end_point.location.x, end_point.location.y, end_point.location.z], [ 182 | end_point.orientation.x, end_point.orientation.y, end_point.orientation.z]) 183 | 184 | def _run_navigation_episode( 185 | self, 186 | agent, 187 | client, 188 | time_out, 189 | target, 190 | episode_name): 191 | """ 192 | Run one episode of the benchmark (Pose) for a certain agent. 193 | 194 | 195 | Args: 196 | agent: the agent object 197 | client: an object of the carla client to communicate 198 | with the CARLA simulator 199 | time_out: the time limit to complete this episode 200 | target: the target to reach 201 | episode_name: The name for saving images of this episode 202 | 203 | """ 204 | 205 | # Send an initial command. 206 | measurements, sensor_data = client.read_data() 207 | client.send_control(VehicleControl()) 208 | 209 | initial_timestamp = measurements.game_timestamp 210 | current_timestamp = initial_timestamp 211 | 212 | # The vector containing all measurements produced on this episode 213 | measurement_vec = [] 214 | # The vector containing all controls produced on this episode 215 | control_vec = [] 216 | frame = 0 217 | distance = 10000 218 | success = False 219 | 220 | while (current_timestamp - initial_timestamp) < (time_out * 1000) and not success: 221 | 222 | # Read data from server with the client 223 | measurements, sensor_data = client.read_data() 224 | # The directions to reach the goal are calculated. 225 | directions = self._get_directions(measurements.player_measurements.transform, target) 226 | # Agent process the data. 227 | control = agent.run_step(measurements, sensor_data, directions, target) 228 | # Send the control commands to the vehicle 229 | client.send_control(control) 230 | 231 | # save images if the flag is activated 232 | self._recording.save_images(sensor_data, episode_name, frame) 233 | 234 | current_x = measurements.player_measurements.transform.location.x 235 | current_y = measurements.player_measurements.transform.location.y 236 | 237 | logging.info("Controller is Inputting:") 238 | logging.info('Steer = %f Throttle = %f Brake = %f ', 239 | control.steer, control.throttle, control.brake) 240 | 241 | current_timestamp = measurements.game_timestamp 242 | # Get the distance travelled until now 243 | distance = sldist([current_x, current_y], 244 | [target.location.x, target.location.y]) 245 | # Write status of the run on verbose mode 246 | logging.info('Status:') 247 | logging.info( 248 | '[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f', 249 | float(distance), current_x, current_y, target.location.x, 250 | target.location.y) 251 | # Check if reach the target 252 | if distance < self._distance_for_success: 253 | success = True 254 | 255 | # Increment the vectors and append the measurements and controls. 256 | frame += 1 257 | measurement_vec.append(measurements.player_measurements) 258 | control_vec.append(control) 259 | 260 | if success: 261 | return 1, measurement_vec, control_vec, float( 262 | current_timestamp - initial_timestamp) / 1000.0, distance 263 | return 0, measurement_vec, control_vec, time_out, distance 264 | 265 | 266 | def run_driving_benchmark(agent, 267 | experiment_suite, 268 | city_name='Town01', 269 | log_name='Test', 270 | continue_experiment=False, 271 | host='127.0.0.1', 272 | port=2000 273 | ): 274 | while True: 275 | try: 276 | 277 | with make_carla_client(host, port) as client: 278 | # Hack to fix for the issue 310, we force a reset, so it does not get 279 | # the positions on first server reset. 280 | client.load_settings(CarlaSettings()) 281 | client.start_episode(0) 282 | 283 | # We instantiate the driving benchmark, that is the engine used to 284 | # benchmark an agent. The instantiation starts the log process, sets 285 | 286 | benchmark = DrivingBenchmark(city_name=city_name, 287 | name_to_save=log_name + '_' 288 | + type(experiment_suite).__name__ 289 | + '_' + city_name, 290 | continue_experiment=continue_experiment) 291 | # This function performs the benchmark. It returns a dictionary summarizing 292 | # the entire execution. 293 | 294 | benchmark_summary = benchmark.benchmark_agent(experiment_suite, agent, client) 295 | 296 | print("") 297 | print("") 298 | print("----- Printing results for training weathers (Seen in Training) -----") 299 | print("") 300 | print("") 301 | results_printer.print_summary(benchmark_summary, experiment_suite.train_weathers, 302 | benchmark.get_path()) 303 | 304 | print("") 305 | print("") 306 | print("----- Printing results for test weathers (Unseen in Training) -----") 307 | print("") 308 | print("") 309 | 310 | results_printer.print_summary(benchmark_summary, experiment_suite.test_weathers, 311 | benchmark.get_path()) 312 | 313 | break 314 | 315 | except TCPConnectionError as error: 316 | logging.error(error) 317 | time.sleep(1) 318 | -------------------------------------------------------------------------------- /PythonClient/carla/driving_benchmark/metrics.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | import numpy as np 9 | import math 10 | import os 11 | 12 | sldist = lambda c1, c2: math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 13 | flatten = lambda l: [item for sublist in l for item in sublist] 14 | 15 | 16 | class Metrics(object): 17 | """ 18 | The metrics class is made to take the driving measurements 19 | and calculate some specific performance metrics. 20 | 21 | """ 22 | 23 | def __init__(self, parameters, dynamic_tasks): 24 | """ 25 | Args 26 | parameters: A dictionary with the used parameters for checking how to count infractions 27 | dynamic_tasks: A list of the all dynamic tasks (That contain dynamic objects) 28 | """ 29 | 30 | self._parameters = parameters 31 | self._parameters['dynamic_tasks'] = dynamic_tasks 32 | 33 | def _divide_by_episodes(self, measurements_matrix, header): 34 | 35 | """ 36 | Divides the measurements matrix on different episodes. 37 | 38 | Args: 39 | measurements_matrix: The full measurements matrix 40 | header: The header from the measurements matrix 41 | 42 | """ 43 | 44 | # Read previous for position zero 45 | prev_start = measurements_matrix[0, header.index('start_point')] 46 | prev_end = measurements_matrix[0, header.index('end_point')] 47 | prev_exp_id = measurements_matrix[0, header.index('exp_id')] 48 | 49 | # Start at the position 1. 50 | i = 1 51 | prev_i_position = 0 52 | episode_matrix_metrics = [] 53 | 54 | while i < measurements_matrix.shape[0]: 55 | 56 | current_start = measurements_matrix[i, header.index('start_point')] 57 | current_end = measurements_matrix[i, header.index('end_point')] 58 | current_exp_id = measurements_matrix[i, header.index('exp_id')] 59 | 60 | # If there is a change in the position it means it is a new episode for sure. 61 | if (current_start != prev_start and current_end != prev_end) \ 62 | or current_exp_id != prev_exp_id: 63 | episode_matrix_metrics.append(measurements_matrix[prev_i_position:i, :]) 64 | prev_i_position = i 65 | 66 | prev_start = current_start 67 | prev_end = current_end 68 | prev_exp_id = current_exp_id 69 | 70 | i += 1 71 | 72 | episode_matrix_metrics.append(measurements_matrix[prev_i_position:-1, :]) 73 | 74 | return episode_matrix_metrics 75 | 76 | def _get_collisions(self, selected_matrix, header): 77 | """ 78 | Get the number of collisions for pedestrians, vehicles or other 79 | Args: 80 | selected_matrix: The matrix with all the experiments summary 81 | header: The header , to know the positions of details 82 | 83 | 84 | """ 85 | count_collisions_general = 0 86 | count_collisions_pedestrian = 0 87 | count_collisions_vehicle = 0 88 | i = 1 89 | # Computing general collisions 90 | while i < selected_matrix.shape[0]: 91 | if (selected_matrix[i, header.index('collision_other')] 92 | - selected_matrix[ 93 | (i - self._parameters['collision_other']['frames_skip']), header.index( 94 | 'collision_other')]) > \ 95 | self._parameters['collision_other']['threshold']: 96 | count_collisions_general += 1 97 | i += self._parameters['collision_other']['frames_recount'] 98 | i += 1 99 | 100 | i = 1 101 | # Computing collisions for vehicles 102 | while i < selected_matrix.shape[0]: 103 | if (selected_matrix[i, header.index('collision_vehicles')] 104 | - selected_matrix[ 105 | (i - self._parameters['collision_vehicles']['frames_skip']), header.index( 106 | 'collision_vehicles')]) > \ 107 | self._parameters['collision_vehicles']['threshold']: 108 | count_collisions_vehicle += 1 109 | i += self._parameters['collision_vehicles']['frames_recount'] 110 | i += 1 111 | 112 | i = 1 113 | 114 | # Computing the collisions for pedestrians 115 | while i < selected_matrix.shape[0]: 116 | if (selected_matrix[i, header.index('collision_pedestrians')] 117 | - selected_matrix[i - self._parameters['collision_pedestrians']['frames_skip'], 118 | header.index('collision_pedestrians')]) > \ 119 | self._parameters['collision_pedestrians']['threshold']: 120 | count_collisions_pedestrian += 1 121 | i += self._parameters['collision_pedestrians']['frames_recount'] 122 | i += 1 123 | 124 | return count_collisions_general, count_collisions_vehicle, count_collisions_pedestrian 125 | 126 | def _get_distance_traveled(self, selected_matrix, header): 127 | """ 128 | Compute the total distance travelled 129 | Args: 130 | selected_matrix: The matrix with all the experiments summary 131 | header: The header , to know the positions of details 132 | 133 | 134 | """ 135 | 136 | prev_x = selected_matrix[0, header.index('pos_x')] 137 | prev_y = selected_matrix[0, header.index('pos_y')] 138 | 139 | i = 1 140 | acummulated_distance = 0 141 | 142 | while i < selected_matrix.shape[0]: 143 | x = selected_matrix[i, header.index('pos_x')] 144 | y = selected_matrix[i, header.index('pos_y')] 145 | 146 | acummulated_distance += sldist((x, y), (prev_x, prev_y)) 147 | 148 | prev_x = x 149 | prev_y = y 150 | 151 | i += 1 152 | 153 | return acummulated_distance / (1000.0) 154 | 155 | def _get_out_of_road_lane(self, selected_matrix, header): 156 | 157 | """ 158 | Check for the situations were the agent goes out of the road. 159 | Args: 160 | selected_matrix: The matrix with all the experiments summary 161 | header: The header , to know the positions of details 162 | 163 | 164 | """ 165 | 166 | count_sidewalk_intersect = 0 167 | count_lane_intersect = 0 168 | 169 | i = 0 170 | 171 | while i < selected_matrix.shape[0]: 172 | 173 | if (selected_matrix[i, header.index('intersection_offroad')] 174 | - selected_matrix[(i - self._parameters['intersection_offroad']['frames_skip']), 175 | header.index('intersection_offroad')]) \ 176 | > self._parameters['intersection_offroad']['threshold']: 177 | count_sidewalk_intersect += 1 178 | i += self._parameters['intersection_offroad']['frames_recount'] 179 | if i >= selected_matrix.shape[0]: 180 | break 181 | 182 | if (selected_matrix[i, header.index('intersection_otherlane')] 183 | - selected_matrix[(i - self._parameters['intersection_otherlane']['frames_skip']), 184 | header.index('intersection_otherlane')]) \ 185 | > self._parameters['intersection_otherlane']['threshold']: 186 | count_lane_intersect += 1 187 | i += self._parameters['intersection_otherlane']['frames_recount'] 188 | 189 | i += 1 190 | 191 | return count_lane_intersect, count_sidewalk_intersect 192 | 193 | def compute(self, path): 194 | 195 | """ 196 | Compute a dictionary containing the following metrics 197 | 198 | * Off Road Intersection: The number of times the agent goes out of the road. 199 | The intersection is only counted if the area of the vehicle outside 200 | of the road is bigger than a *threshold*. 201 | 202 | * Other Lane Intersection: The number of times the agent goes to the other 203 | lane. The intersection is only counted if the area of the vehicle on the 204 | other lane is bigger than a *threshold*. 205 | 206 | * Vehicle Collisions: The number of collisions with vehicles that have 207 | an impact bigger than a *threshold*. 208 | 209 | * Pedestrian Collisions: The number of collisions with pedestrians 210 | that have an impact bigger than a threshold. 211 | 212 | * General Collisions: The number of collisions with all other 213 | objects. 214 | 215 | 216 | Args: 217 | path: Path where the log files are. 218 | 219 | """ 220 | 221 | with open(os.path.join(path, 'summary.csv'), "rU") as f: 222 | header = f.readline() 223 | header = header.split(',') 224 | header[-1] = header[-1][:-1] 225 | 226 | with open(os.path.join(path, 'measurements.csv'), "rU") as f: 227 | 228 | header_metrics = f.readline() 229 | header_metrics = header_metrics.split(',') 230 | header_metrics[-1] = header_metrics[-1][:-1] 231 | 232 | result_matrix = np.loadtxt(os.path.join(path, 'summary.csv'), delimiter=",", skiprows=1) 233 | 234 | # Corner Case: The presented test just had one episode 235 | if result_matrix.ndim == 1: 236 | result_matrix = np.expand_dims(result_matrix, axis=0) 237 | 238 | tasks = np.unique(result_matrix[:, header.index('exp_id')]) 239 | 240 | all_weathers = np.unique(result_matrix[:, header.index('weather')]) 241 | 242 | measurements_matrix = np.loadtxt(os.path.join(path, 'measurements.csv'), delimiter=",", 243 | skiprows=1) 244 | 245 | metrics_dictionary = {'episodes_completion': {w: [0] * len(tasks) for w in all_weathers}, 246 | 'intersection_offroad': {w: [[] for i in range(len(tasks))] for w in 247 | all_weathers}, 248 | 'intersection_otherlane': {w: [[] for i in range(len(tasks))] for w in 249 | all_weathers}, 250 | 'collision_pedestrians': {w: [[] for i in range(len(tasks))] for w in 251 | all_weathers}, 252 | 'collision_vehicles': {w: [[] for i in range(len(tasks))] for w in 253 | all_weathers}, 254 | 'collision_other': {w: [[] for i in range(len(tasks))] for w in 255 | all_weathers}, 256 | 'episodes_fully_completed': {w: [0] * len(tasks) for w in 257 | all_weathers}, 258 | 'average_speed': {w: [0] * len(tasks) for w in all_weathers}, 259 | 'driven_kilometers': {w: [0] * len(tasks) for w in all_weathers} 260 | } 261 | 262 | for t in range(len(tasks)): 263 | experiment_results_matrix = result_matrix[ 264 | result_matrix[:, header.index('exp_id')] == tasks[t]] 265 | 266 | weathers = np.unique(experiment_results_matrix[:, header.index('weather')]) 267 | 268 | for w in weathers: 269 | 270 | experiment_results_matrix = result_matrix[ 271 | np.logical_and(result_matrix[:, header.index( 272 | 'exp_id')] == tasks[t], result_matrix[:, header.index('weather')] == w)] 273 | 274 | experiment_metrics_matrix = measurements_matrix[ 275 | np.logical_and(measurements_matrix[:, header_metrics.index( 276 | 'exp_id')] == float(tasks[t]), 277 | measurements_matrix[:, header_metrics.index('weather')] == float( 278 | w))] 279 | 280 | metrics_dictionary['episodes_fully_completed'][w][t] = \ 281 | experiment_results_matrix[:, header.index('result')].tolist() 282 | 283 | metrics_dictionary['episodes_completion'][w][t] = \ 284 | ((experiment_results_matrix[:, header.index('initial_distance')] 285 | - experiment_results_matrix[:, header.index('final_distance')]) 286 | / experiment_results_matrix[:, header.index('initial_distance')]).tolist() 287 | 288 | # Now we divide the experiment metrics matrix 289 | 290 | episode_experiment_metrics_matrix = self._divide_by_episodes( 291 | experiment_metrics_matrix, header_metrics) 292 | 293 | count = 0 294 | 295 | for episode_experiment_metrics in episode_experiment_metrics_matrix: 296 | 297 | km_run_episodes = self._get_distance_traveled( 298 | episode_experiment_metrics, header_metrics) 299 | metrics_dictionary['driven_kilometers'][w][t] += km_run_episodes 300 | metrics_dictionary['average_speed'][w][t] = \ 301 | km_run_episodes / (experiment_results_matrix[count, 302 | header.index( 303 | 'final_time')] / 3600.0) 304 | count += 1 305 | 306 | lane_road = self._get_out_of_road_lane( 307 | episode_experiment_metrics, header_metrics) 308 | 309 | metrics_dictionary['intersection_otherlane'][ 310 | w][t].append(lane_road[0]) 311 | metrics_dictionary['intersection_offroad'][ 312 | w][t].append(lane_road[1]) 313 | 314 | if tasks[t] in set(self._parameters['dynamic_tasks']): 315 | 316 | collisions = self._get_collisions(episode_experiment_metrics, 317 | header_metrics) 318 | 319 | metrics_dictionary['collision_pedestrians'][ 320 | w][t].append(collisions[2]) 321 | metrics_dictionary['collision_vehicles'][ 322 | w][t].append(collisions[1]) 323 | metrics_dictionary['collision_other'][ 324 | w][t].append(collisions[0]) 325 | 326 | else: 327 | 328 | metrics_dictionary['collision_pedestrians'][ 329 | w][t].append(0) 330 | metrics_dictionary['collision_vehicles'][ 331 | w][t].append(0) 332 | metrics_dictionary['collision_other'][ 333 | w][t].append(0) 334 | 335 | return metrics_dictionary 336 | -------------------------------------------------------------------------------- /PythonClient/manual_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | # Keyboard controlling for CARLA. Please refer to client_example.py for a simpler 10 | # and more documented example. 11 | 12 | """ 13 | Welcome to CARLA manual control. 14 | 15 | Use ARROWS or WASD keys for control. 16 | 17 | W : throttle 18 | S : brake 19 | AD : steer 20 | Q : toggle reverse 21 | Space : hand-brake 22 | P : toggle autopilot 23 | 24 | R : restart level 25 | 26 | STARTING in a moment... 27 | """ 28 | 29 | from __future__ import print_function 30 | 31 | import argparse 32 | import logging 33 | import random 34 | import time 35 | 36 | try: 37 | import pygame 38 | from pygame.locals import K_DOWN 39 | from pygame.locals import K_LEFT 40 | from pygame.locals import K_RIGHT 41 | from pygame.locals import K_SPACE 42 | from pygame.locals import K_UP 43 | from pygame.locals import K_a 44 | from pygame.locals import K_d 45 | from pygame.locals import K_p 46 | from pygame.locals import K_q 47 | from pygame.locals import K_r 48 | from pygame.locals import K_s 49 | from pygame.locals import K_w 50 | except ImportError: 51 | raise RuntimeError('cannot import pygame, make sure pygame package is installed') 52 | 53 | try: 54 | import numpy as np 55 | except ImportError: 56 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 57 | 58 | from carla import image_converter 59 | from carla import sensor 60 | from carla.client import make_carla_client, VehicleControl 61 | from carla.planner.map import CarlaMap 62 | from carla.settings import CarlaSettings 63 | from carla.tcp import TCPConnectionError 64 | from carla.util import print_over_same_line 65 | 66 | 67 | WINDOW_WIDTH = 800 68 | WINDOW_HEIGHT = 600 69 | MINI_WINDOW_WIDTH = 320 70 | MINI_WINDOW_HEIGHT = 180 71 | 72 | 73 | def make_carla_settings(args): 74 | """Make a CarlaSettings object with the settings we need.""" 75 | settings = CarlaSettings() 76 | settings.set( 77 | SynchronousMode=False, 78 | SendNonPlayerAgentsInfo=True, 79 | NumberOfVehicles=15, 80 | NumberOfPedestrians=30, 81 | WeatherId=random.choice([1, 3, 7, 8, 14]), 82 | QualityLevel=args.quality_level) 83 | settings.randomize_seeds() 84 | camera0 = sensor.Camera('CameraRGB') 85 | camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT) 86 | camera0.set_position(2.0, 0.0, 1.4) 87 | camera0.set_rotation(0.0, 0.0, 0.0) 88 | settings.add_sensor(camera0) 89 | camera1 = sensor.Camera('CameraDepth', PostProcessing='Depth') 90 | camera1.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT) 91 | camera1.set_position(2.0, 0.0, 1.4) 92 | camera1.set_rotation(0.0, 0.0, 0.0) 93 | settings.add_sensor(camera1) 94 | camera2 = sensor.Camera('CameraSemSeg', PostProcessing='SemanticSegmentation') 95 | camera2.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT) 96 | camera2.set_position(2.0, 0.0, 1.4) 97 | camera2.set_rotation(0.0, 0.0, 0.0) 98 | settings.add_sensor(camera2) 99 | if args.lidar: 100 | lidar = sensor.Lidar('Lidar32') 101 | lidar.set_position(0, 0, 2.5) 102 | lidar.set_rotation(0, 0, 0) 103 | lidar.set( 104 | Channels=32, 105 | Range=50, 106 | PointsPerSecond=100000, 107 | RotationFrequency=10, 108 | UpperFovLimit=10, 109 | LowerFovLimit=-30) 110 | settings.add_sensor(lidar) 111 | return settings 112 | 113 | 114 | class Timer(object): 115 | def __init__(self): 116 | self.step = 0 117 | self._lap_step = 0 118 | self._lap_time = time.time() 119 | 120 | def tick(self): 121 | self.step += 1 122 | 123 | def lap(self): 124 | self._lap_step = self.step 125 | self._lap_time = time.time() 126 | 127 | def ticks_per_second(self): 128 | return float(self.step - self._lap_step) / self.elapsed_seconds_since_lap() 129 | 130 | def elapsed_seconds_since_lap(self): 131 | return time.time() - self._lap_time 132 | 133 | 134 | class CarlaGame(object): 135 | def __init__(self, carla_client, args): 136 | self.client = carla_client 137 | self._carla_settings = make_carla_settings(args) 138 | self._timer = None 139 | self._display = None 140 | self._main_image = None 141 | self._mini_view_image1 = None 142 | self._mini_view_image2 = None 143 | self._enable_autopilot = args.autopilot 144 | self._lidar_measurement = None 145 | self._map_view = None 146 | self._is_on_reverse = False 147 | self._display_map = args.map 148 | self._city_name = None 149 | self._map = None 150 | self._map_shape = None 151 | self._map_view = None 152 | self._position = None 153 | self._agent_positions = None 154 | 155 | def execute(self): 156 | """Launch the PyGame.""" 157 | pygame.init() 158 | self._initialize_game() 159 | try: 160 | while True: 161 | for event in pygame.event.get(): 162 | if event.type == pygame.QUIT: 163 | return 164 | self._on_loop() 165 | self._on_render() 166 | finally: 167 | pygame.quit() 168 | 169 | def _initialize_game(self): 170 | self._on_new_episode() 171 | 172 | if self._city_name is not None: 173 | self._map = CarlaMap(self._city_name, 0.1643, 50.0) 174 | self._map_shape = self._map.map_image.shape 175 | self._map_view = self._map.get_map(WINDOW_HEIGHT) 176 | 177 | extra_width = int((WINDOW_HEIGHT/float(self._map_shape[0]))*self._map_shape[1]) 178 | self._display = pygame.display.set_mode( 179 | (WINDOW_WIDTH + extra_width, WINDOW_HEIGHT), 180 | pygame.HWSURFACE | pygame.DOUBLEBUF) 181 | else: 182 | self._display = pygame.display.set_mode( 183 | (WINDOW_WIDTH, WINDOW_HEIGHT), 184 | pygame.HWSURFACE | pygame.DOUBLEBUF) 185 | 186 | logging.debug('pygame started') 187 | 188 | def _on_new_episode(self): 189 | self._carla_settings.randomize_seeds() 190 | self._carla_settings.randomize_weather() 191 | scene = self.client.load_settings(self._carla_settings) 192 | if self._display_map: 193 | self._city_name = scene.map_name 194 | number_of_player_starts = len(scene.player_start_spots) 195 | player_start = np.random.randint(number_of_player_starts) 196 | print('Starting new episode...') 197 | self.client.start_episode(player_start) 198 | self._timer = Timer() 199 | self._is_on_reverse = False 200 | 201 | def _on_loop(self): 202 | self._timer.tick() 203 | 204 | measurements, sensor_data = self.client.read_data() 205 | 206 | self._main_image = sensor_data.get('CameraRGB', None) 207 | self._mini_view_image1 = sensor_data.get('CameraDepth', None) 208 | self._mini_view_image2 = sensor_data.get('CameraSemSeg', None) 209 | self._lidar_measurement = sensor_data.get('Lidar32', None) 210 | 211 | # Print measurements every second. 212 | if self._timer.elapsed_seconds_since_lap() > 1.0: 213 | if self._city_name is not None: 214 | # Function to get car position on map. 215 | map_position = self._map.convert_to_pixel([ 216 | measurements.player_measurements.transform.location.x, 217 | measurements.player_measurements.transform.location.y, 218 | measurements.player_measurements.transform.location.z]) 219 | # Function to get orientation of the road car is in. 220 | lane_orientation = self._map.get_lane_orientation([ 221 | measurements.player_measurements.transform.location.x, 222 | measurements.player_measurements.transform.location.y, 223 | measurements.player_measurements.transform.location.z]) 224 | 225 | self._print_player_measurements_map( 226 | measurements.player_measurements, 227 | map_position, 228 | lane_orientation) 229 | else: 230 | self._print_player_measurements(measurements.player_measurements) 231 | 232 | # Plot position on the map as well. 233 | 234 | self._timer.lap() 235 | 236 | control = self._get_keyboard_control(pygame.key.get_pressed()) 237 | # Set the player position 238 | if self._city_name is not None: 239 | self._position = self._map.convert_to_pixel([ 240 | measurements.player_measurements.transform.location.x, 241 | measurements.player_measurements.transform.location.y, 242 | measurements.player_measurements.transform.location.z]) 243 | self._agent_positions = measurements.non_player_agents 244 | 245 | if control is None: 246 | self._on_new_episode() 247 | elif self._enable_autopilot: 248 | self.client.send_control(measurements.player_measurements.autopilot_control) 249 | else: 250 | self.client.send_control(control) 251 | 252 | def _get_keyboard_control(self, keys): 253 | """ 254 | Return a VehicleControl message based on the pressed keys. Return None 255 | if a new episode was requested. 256 | """ 257 | if keys[K_r]: 258 | return None 259 | control = VehicleControl() 260 | if keys[K_LEFT] or keys[K_a]: 261 | control.steer = -1.0 262 | if keys[K_RIGHT] or keys[K_d]: 263 | control.steer = 1.0 264 | if keys[K_UP] or keys[K_w]: 265 | control.throttle = 1.0 266 | if keys[K_DOWN] or keys[K_s]: 267 | control.brake = 1.0 268 | if keys[K_SPACE]: 269 | control.hand_brake = True 270 | if keys[K_q]: 271 | self._is_on_reverse = not self._is_on_reverse 272 | if keys[K_p]: 273 | self._enable_autopilot = not self._enable_autopilot 274 | control.reverse = self._is_on_reverse 275 | return control 276 | 277 | def _print_player_measurements_map( 278 | self, 279 | player_measurements, 280 | map_position, 281 | lane_orientation): 282 | message = 'Step {step} ({fps:.1f} FPS): ' 283 | message += 'Map Position ({map_x:.1f},{map_y:.1f}) ' 284 | message += 'Lane Orientation ({ori_x:.1f},{ori_y:.1f}) ' 285 | message += '{speed:.2f} km/h, ' 286 | message += '{other_lane:.0f}% other lane, {offroad:.0f}% off-road' 287 | message = message.format( 288 | map_x=map_position[0], 289 | map_y=map_position[1], 290 | ori_x=lane_orientation[0], 291 | ori_y=lane_orientation[1], 292 | step=self._timer.step, 293 | fps=self._timer.ticks_per_second(), 294 | speed=player_measurements.forward_speed * 3.6, 295 | other_lane=100 * player_measurements.intersection_otherlane, 296 | offroad=100 * player_measurements.intersection_offroad) 297 | print_over_same_line(message) 298 | 299 | def _print_player_measurements(self, player_measurements): 300 | message = 'Step {step} ({fps:.1f} FPS): ' 301 | message += '{speed:.2f} km/h, ' 302 | message += '{other_lane:.0f}% other lane, {offroad:.0f}% off-road' 303 | message = message.format( 304 | step=self._timer.step, 305 | fps=self._timer.ticks_per_second(), 306 | speed=player_measurements.forward_speed * 3.6, 307 | other_lane=100 * player_measurements.intersection_otherlane, 308 | offroad=100 * player_measurements.intersection_offroad) 309 | print_over_same_line(message) 310 | 311 | def _on_render(self): 312 | gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3 313 | mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x 314 | 315 | if self._main_image is not None: 316 | array = image_converter.to_rgb_array(self._main_image) 317 | surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 318 | self._display.blit(surface, (0, 0)) 319 | 320 | if self._mini_view_image1 is not None: 321 | array = image_converter.depth_to_logarithmic_grayscale(self._mini_view_image1) 322 | surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 323 | self._display.blit(surface, (gap_x, mini_image_y)) 324 | 325 | if self._mini_view_image2 is not None: 326 | array = image_converter.labels_to_cityscapes_palette( 327 | self._mini_view_image2) 328 | surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 329 | 330 | self._display.blit( 331 | surface, (2 * gap_x + MINI_WINDOW_WIDTH, mini_image_y)) 332 | 333 | if self._lidar_measurement is not None: 334 | lidar_data = np.array(self._lidar_measurement.data[:, :2]) 335 | lidar_data *= 2.0 336 | lidar_data += 100.0 337 | lidar_data = np.fabs(lidar_data) 338 | lidar_data = lidar_data.astype(np.int32) 339 | lidar_data = np.reshape(lidar_data, (-1, 2)) 340 | #draw lidar 341 | lidar_img_size = (200, 200, 3) 342 | lidar_img = np.zeros(lidar_img_size) 343 | lidar_img[tuple(lidar_data.T)] = (255, 255, 255) 344 | surface = pygame.surfarray.make_surface(lidar_img) 345 | self._display.blit(surface, (10, 10)) 346 | 347 | if self._map_view is not None: 348 | array = self._map_view 349 | array = array[:, :, :3] 350 | 351 | new_window_width = \ 352 | (float(WINDOW_HEIGHT) / float(self._map_shape[0])) * \ 353 | float(self._map_shape[1]) 354 | surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) 355 | 356 | w_pos = int(self._position[0]*(float(WINDOW_HEIGHT)/float(self._map_shape[0]))) 357 | h_pos = int(self._position[1] *(new_window_width/float(self._map_shape[1]))) 358 | 359 | pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0) 360 | for agent in self._agent_positions: 361 | if agent.HasField('vehicle'): 362 | agent_position = self._map.convert_to_pixel([ 363 | agent.vehicle.transform.location.x, 364 | agent.vehicle.transform.location.y, 365 | agent.vehicle.transform.location.z]) 366 | 367 | w_pos = int(agent_position[0]*(float(WINDOW_HEIGHT)/float(self._map_shape[0]))) 368 | h_pos = int(agent_position[1] *(new_window_width/float(self._map_shape[1]))) 369 | 370 | pygame.draw.circle(surface, [255, 0, 255, 255], (w_pos, h_pos), 4, 0) 371 | 372 | self._display.blit(surface, (WINDOW_WIDTH, 0)) 373 | 374 | pygame.display.flip() 375 | 376 | 377 | def main(): 378 | argparser = argparse.ArgumentParser( 379 | description='CARLA Manual Control Client') 380 | argparser.add_argument( 381 | '-v', '--verbose', 382 | action='store_true', 383 | dest='debug', 384 | help='print debug information') 385 | argparser.add_argument( 386 | '--host', 387 | metavar='H', 388 | default='localhost', 389 | help='IP of the host server (default: localhost)') 390 | argparser.add_argument( 391 | '-p', '--port', 392 | metavar='P', 393 | default=2000, 394 | type=int, 395 | help='TCP port to listen to (default: 2000)') 396 | argparser.add_argument( 397 | '-a', '--autopilot', 398 | action='store_true', 399 | help='enable autopilot') 400 | argparser.add_argument( 401 | '-l', '--lidar', 402 | action='store_true', 403 | help='enable Lidar') 404 | argparser.add_argument( 405 | '-q', '--quality-level', 406 | choices=['Low', 'Epic'], 407 | type=lambda s: s.title(), 408 | default='Epic', 409 | help='graphics quality level, a lower level makes the simulation run considerably faster') 410 | argparser.add_argument( 411 | '-m', '--map', 412 | action='store_true', 413 | help='plot the map of the current city') 414 | args = argparser.parse_args() 415 | 416 | log_level = logging.DEBUG if args.debug else logging.INFO 417 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 418 | 419 | logging.info('listening to server %s:%s', args.host, args.port) 420 | 421 | print(__doc__) 422 | 423 | while True: 424 | try: 425 | 426 | with make_carla_client(args.host, args.port) as client: 427 | game = CarlaGame(client, args) 428 | game.execute() 429 | break 430 | 431 | except TCPConnectionError as error: 432 | logging.error(error) 433 | time.sleep(1) 434 | 435 | 436 | if __name__ == '__main__': 437 | 438 | try: 439 | main() 440 | except KeyboardInterrupt: 441 | print('\nCancelled by user. Bye!') 442 | --------------------------------------------------------------------------------