├── .gitignore ├── LICENSE ├── README.md ├── carla ├── .DS_Store ├── __init__.py ├── agent │ ├── .DS_Store │ ├── __init__.py │ ├── agent.py │ ├── command_follower.py │ ├── forward_agent.py │ ├── human_agent.py │ ├── lane_follower.py │ └── modules │ │ ├── __init__.py │ │ ├── controllers.py │ │ ├── obstacle_avoidance.py │ │ ├── utils.py │ │ └── waypointer.py ├── carla_game.py ├── carla_server_pb2.py ├── client.py ├── image_converter.py ├── planner │ ├── Town01.png │ ├── Town01.txt │ ├── Town01Central.png │ ├── Town01Lanes.png │ ├── Town02.png │ ├── Town02.txt │ ├── Town02Big.png │ ├── Town02Central.png │ ├── Town02Lanes.png │ ├── __init__.py │ ├── astar.py │ ├── bezier.py │ ├── city_track.py │ ├── converter.py │ ├── graph.py │ ├── grid.py │ ├── map.py │ └── planner.py ├── sensor.py ├── settings.py ├── tcp.py ├── transform.py └── util.py ├── carla08 ├── .DS_Store ├── __init__.py ├── agent │ ├── .DS_Store │ ├── __init__.py │ ├── agent.py │ ├── command_follower.py │ ├── forward_agent.py │ ├── human_agent.py │ ├── lane_follower.py │ └── modules │ │ ├── __init__.py │ │ ├── controllers.py │ │ ├── obstacle_avoidance.py │ │ ├── utils.py │ │ └── waypointer.py ├── carla_server_pb2.py ├── client.py ├── driving_benchmark │ ├── .DS_Store │ ├── __init__.py │ ├── driving_benchmark.py │ ├── experiment.py │ ├── experiment_suites │ │ ├── __init__.py │ │ ├── basic_experiment_suite.py │ │ ├── corl_2017.py │ │ ├── experiment_suite.py │ │ └── longcontrol_2018.py │ ├── metrics.py │ ├── recording.py │ └── results_printer.py ├── image_converter.py ├── planner │ ├── Town01.png │ ├── Town01.txt │ ├── Town01Central.png │ ├── Town01Lanes.png │ ├── Town02.png │ ├── Town02.txt │ ├── Town02Big.png │ ├── Town02Central.png │ ├── Town02Lanes.png │ ├── __init__.py │ ├── astar.py │ ├── bezier.py │ ├── city_track.py │ ├── converter.py │ ├── graph.py │ ├── grid.py │ ├── map.py │ └── planner.py ├── sensor.py ├── settings.py ├── tcp.py ├── transform.py └── util.py ├── coil_core ├── __init__.py ├── adabound.py ├── adamaio.py ├── executer.py ├── grad_cam.py ├── run_drive.py ├── run_entropy.py ├── save_activations.py ├── train.py ├── validate.py ├── validate_for_expert.py └── validate_single_model.py ├── coiltraine.py ├── coilutils ├── __init__.py ├── attribute_dict.py ├── checking.py ├── checkpoint_schedule.py ├── drive_utils.py ├── experiment_schedule.py ├── exporter.py └── general.py ├── collect.py ├── configs ├── .DS_Store ├── __init__.py ├── coil_global.py ├── demo │ └── resnet34imnet10S1.yaml └── namer.py ├── dataset_configurations ├── __init__.py └── coil_training_dataset.py ├── drive ├── .DS_Store ├── CoILBaseline.py ├── __init__.py ├── coil_agent.py ├── sample_agent.json └── suites │ ├── nocrash_new_town_suite.py │ ├── nocrash_new_weather_suite.py │ ├── nocrash_new_weather_town_suite.py │ └── nocrash_training_suite.py ├── input ├── __init__.py ├── augmenter.py ├── coil_dataset.py ├── coil_sampler.py ├── data_parser.py ├── scheduler.py └── splitter.py ├── logger ├── __init__.py ├── carla_metrics_parser.py ├── coil_logger.py ├── json_formatter.py ├── monitorer.py ├── namer.py ├── printer.py ├── tensorboard_logger.py └── utils.py ├── model_view ├── carla08interface.py └── carla09interface.py ├── modules ├── .DS_Store ├── __init__.py ├── collision_checker.py ├── data_writer.py ├── noiser.py └── screen_manager.py ├── multi_gpu_collection.py ├── network ├── .DS_Store ├── __init__.py ├── coil_model.py ├── loss.py ├── loss_functional.py ├── models │ ├── .DS_Store │ ├── __init__.py │ ├── building_blocks │ │ ├── __init__.py │ │ ├── branching.py │ │ ├── conv.py │ │ ├── fc.py │ │ ├── join.py │ │ └── resnet.py │ └── coil_icra.py └── optimizer.py ├── plotter ├── .DS_Store ├── __init__.py ├── data_reading.py ├── metrics.py ├── plot_on_map.py ├── plotter.py ├── plotting_params │ ├── eccv_online_offline_plots.py │ ├── plotting_all_cameras.py │ └── sample_plot.py └── scatter_plotter.py ├── requirements.yaml └── tools ├── batch_rename.py ├── copy_data_fast.py ├── count_time.py ├── create_plots.py ├── create_video.py ├── filter_dagger_data.py ├── filter_dagger_data_var.py ├── move_data_fast.py ├── plot_infractions.py ├── plot_on_map.py ├── post_process.py └── viewer.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 autonomousvision 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /carla/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/.DS_Store -------------------------------------------------------------------------------- /carla/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/__init__.py -------------------------------------------------------------------------------- /carla/agent/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/agent/.DS_Store -------------------------------------------------------------------------------- /carla/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .forward_agent import ForwardAgent 2 | from .command_follower import CommandFollower 3 | from .lane_follower import LaneFollower 4 | from .human_agent import HumanAgent 5 | from .agent import Agent 6 | -------------------------------------------------------------------------------- /carla/agent/agent.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 3 | # Barcelona (UAB). 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | # @author: germanros, felipecode 8 | 9 | 10 | from __future__ import print_function 11 | import abc 12 | 13 | 14 | class Agent(object): 15 | def __init__(self): 16 | self.__metaclass__ = abc.ABCMeta 17 | 18 | @abc.abstractmethod 19 | def run_step(self, measurements, sensor_data, directions, target): 20 | """ 21 | Function to be redefined by an agent. 22 | params The measurements like speed, the image data and a target 23 | :returns A carla Control object, with the steering/gas/brake for the agent 24 | """ 25 | -------------------------------------------------------------------------------- /carla/agent/forward_agent.py: -------------------------------------------------------------------------------- 1 | 2 | from carla.agent.agent import Agent 3 | from carla.client import VehicleControl 4 | 5 | 6 | class ForwardAgent(Agent): 7 | """ 8 | Simple derivation of Agent Class, 9 | A trivial agent agent that goes straight 10 | """ 11 | def run_step(self, measurements, sensor_data, directions, target): 12 | control = VehicleControl() 13 | control.throttle = 0.9 14 | 15 | return control 16 | -------------------------------------------------------------------------------- /carla/agent/human_agent.py: -------------------------------------------------------------------------------- 1 | from carla.agent.agent import Agent 2 | from carla.client import VehicleControl 3 | 4 | try: 5 | import pygame 6 | from pygame.locals import K_DOWN 7 | from pygame.locals import K_LEFT 8 | from pygame.locals import K_RIGHT 9 | from pygame.locals import K_SPACE 10 | from pygame.locals import K_UP 11 | from pygame.locals import K_a 12 | from pygame.locals import K_d 13 | from pygame.locals import K_q 14 | from pygame.locals import K_s 15 | from pygame.locals import K_w 16 | 17 | except ImportError: 18 | raise RuntimeError('cannot import pygame, make sure pygame package is installed') 19 | 20 | 21 | class HumanAgent(Agent): 22 | """ 23 | Derivation of Agent Class for human control, 24 | 25 | """ 26 | 27 | def __init__(self): 28 | """ 29 | TODO: add the parameter for a joystick to be used, default keyboard. 30 | """ 31 | super(HumanAgent).__init__() 32 | self._is_on_reverse = False 33 | 34 | def _get_keyboard_control(self, keys): 35 | """ 36 | Return a VehicleControl message based on the pressed keys. 37 | 38 | Return None 39 | if a new episode was requested. 40 | """ 41 | 42 | control = VehicleControl() 43 | if keys[K_LEFT] or keys[K_a]: 44 | control.steer = -1.0 45 | if keys[K_RIGHT] or keys[K_d]: 46 | control.steer = 1.0 47 | if keys[K_UP] or keys[K_w]: 48 | control.throttle = 1.0 49 | if keys[K_DOWN] or keys[K_s]: 50 | control.brake = 1.0 51 | if keys[K_SPACE]: 52 | control.hand_brake = True 53 | if keys[K_q]: 54 | self._is_on_reverse = not self._is_on_reverse 55 | control.reverse = self._is_on_reverse 56 | return control 57 | 58 | def run_step(self, measurements, sensor_data, directions, target): 59 | # We basically ignore all the parameters. 60 | for event in pygame.event.get(): 61 | if event.type == pygame.QUIT: 62 | return VehicleControl() 63 | 64 | return self._get_keyboard_control(pygame.key.get_pressed()) 65 | -------------------------------------------------------------------------------- /carla/agent/lane_follower.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | from carla.agent.agent import Agent 5 | 6 | 7 | class LaneFollower(Agent): 8 | """ 9 | Simple derivation of Agent Class, 10 | A lane follower that randomly goes driving around the city 11 | Not yet implemented 12 | """ 13 | def __init__(self, town_name): 14 | 15 | pass 16 | 17 | 18 | def run_step(self, measurements, sensor_data, directions, target): 19 | 20 | raise NotImplementedError("Lane follower not yet implemented") 21 | -------------------------------------------------------------------------------- /carla/agent/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .obstacle_avoidance import ObstacleAvoidance 2 | from .controllers import Controller 3 | from .waypointer import Waypointer -------------------------------------------------------------------------------- /carla/agent/modules/controllers.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from pid_controller.pid import PID 4 | from carla.client import VehicleControl 5 | 6 | """ This module implements a 7 | PID based controller, in practice we can have different ones 8 | """ 9 | 10 | 11 | class Controller(object): 12 | # The vehicle controller, it receives waypoints and applies a PID control in order 13 | # to get the action. 14 | 15 | def __init__(self, params): 16 | 17 | # The parameters for this controller, set by the agent 18 | self.params = params 19 | # PID speed controller 20 | self.pid = PID(p=params['pid_p'], i=params['pid_i'], d=params['pid_d']) 21 | 22 | def get_control(self, wp_angle, wp_angle_speed, speed_factor, current_speed): 23 | # NOTE! All the calculations inside this function are made assuming speed in Km/h 24 | control = VehicleControl() 25 | current_speed = max(current_speed, 0) 26 | 27 | steer = self.params['steer_gain'] * wp_angle 28 | if steer > 0: 29 | control.steer = min(steer, 1) 30 | else: 31 | control.steer = max(steer, -1) 32 | 33 | # Don't go to fast around corners 34 | if math.fabs(wp_angle_speed) < 0.1: 35 | target_speed_adjusted = self.params['target_speed'] * speed_factor 36 | # Depending on the angle of the curve the speed is either 20 (beginning) 15 (most of it) 37 | elif math.fabs(wp_angle_speed) < 0.5: 38 | target_speed_adjusted = 20 * speed_factor 39 | else: 40 | target_speed_adjusted = 15 * speed_factor 41 | 42 | self.pid.target = target_speed_adjusted 43 | pid_gain = self.pid(feedback=current_speed) 44 | # print ('Target: ', self.pid.target, 'Error: ', self.pid.error, 'Gain: ', pid_gain) 45 | # print ('Target Speed: ', target_speed_adjusted, 'Current Speed: ', 46 | # current_speed, 'Speed Factor: ', speed_factor) 47 | 48 | throttle = min(max(self.params['default_throttle'] - 1.3 * pid_gain, 0), 49 | self.params['throttle_max']) 50 | 51 | if pid_gain > 0.5: 52 | brake = min(0.35 * pid_gain * self.params['brake_strength'], 1) 53 | else: 54 | brake = 0 55 | 56 | control.throttle = max(throttle, 0) 57 | control.brake = brake 58 | 59 | # print ('Throttle: ', control.throttle, 'Brake: ', 60 | # control.brake, 'Steering Angle: ', control.steer) 61 | 62 | return control 63 | -------------------------------------------------------------------------------- /carla/agent/modules/utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | 4 | def get_vec_dist(x_dst, y_dst, x_src, y_src): 5 | vec = np.array([x_dst, y_dst] - np.array([x_src, y_src])) 6 | dist = math.sqrt(vec[0] ** 2 + vec[1] ** 2) 7 | return vec / dist, dist 8 | 9 | 10 | def get_angle(vec_dst, vec_src): 11 | """ 12 | Get the angle between two vectors 13 | 14 | Returns: 15 | The angle between two vectors 16 | 17 | """ 18 | angle = math.atan2(vec_dst[1], vec_dst[0]) - math.atan2(vec_src[1], vec_src[0]) 19 | if angle > math.pi: 20 | angle -= 2 * math.pi 21 | elif angle < -math.pi: 22 | angle += 2 * math.pi 23 | return angle -------------------------------------------------------------------------------- /carla/planner/Town01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/planner/Town01.png -------------------------------------------------------------------------------- /carla/planner/Town01.txt: -------------------------------------------------------------------------------- 1 | 0.0,0.0,-38.11000000 2 | 0.000000,0.000000,0.0 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 49, 41 6 | 0,0 0,40 40 7 | 0,40 0,0 40 8 | 48,40 41,40 7 9 | 41,40 48,40 7 10 | 48,0 48,40 40 11 | 48,40 48,0 40 12 | 0,0 11,0 11 13 | 11,0 0,0 11 14 | 41,0 48,0 7 15 | 48,0 41,0 7 16 | 41,40 11,40 30 17 | 11,40 41,40 30 18 | 41,0 41,7 7 19 | 41,7 41,0 7 20 | 11,40 0,40 11 21 | 0,40 11,40 11 22 | 11,0 19,0 8 23 | 19,0 11,0 8 24 | 11,40 11,24 16 25 | 11,24 11,40 16 26 | 41,24 41,40 16 27 | 41,40 41,24 16 28 | 11,24 11,16 8 29 | 11,16 11,24 8 30 | 41,24 11,24 30 31 | 11,24 41,24 30 32 | 41,16 41,24 8 33 | 41,24 41,16 8 34 | 11,16 11,7 9 35 | 11,7 11,16 9 36 | 41,16 11,16 30 37 | 11,16 41,16 30 38 | 41,7 41,16 9 39 | 41,16 41,7 9 40 | 11,7 11,0 7 41 | 11,0 11,7 7 42 | 41,7 19,7 22 43 | 19,7 41,7 22 44 | 19,0 41,0 22 45 | 41,0 19,0 22 46 | 19,7 11,7 8 47 | 11,7 19,7 8 48 | 19,0 19,7 7 49 | 19,7 19,0 7 50 | -------------------------------------------------------------------------------- /carla/planner/Town01Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/planner/Town01Central.png -------------------------------------------------------------------------------- /carla/planner/Town01Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/planner/Town01Lanes.png -------------------------------------------------------------------------------- /carla/planner/Town02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/planner/Town02.png -------------------------------------------------------------------------------- /carla/planner/Town02.txt: -------------------------------------------------------------------------------- 1 | 5.4400,-107.48000,-38.11000000 2 | 0.000000,0.000000,0.000000 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 25, 25 6 | 0,10 0,24 14 7 | 0,24 0,10 14 8 | 24,24 6,24 18 9 | 6,24 24,24 18 10 | 24,0 24,10 10 11 | 24,10 24,0 10 12 | 0,0 24,0 24 13 | 24,0 0,0 24 14 | 0,10 0,0 10 15 | 0,0 0,10 10 16 | 24,10 24,16 6 17 | 24,16 24,10 6 18 | 0,10 6,10 6 19 | 6,10 0,10 6 20 | 6,24 0,24 6 21 | 0,24 6,24 6 22 | 6,10 17,10 11 23 | 17,10 6,10 11 24 | 6,24 6,16 8 25 | 6,16 6,24 8 26 | 24,16 24,24 8 27 | 24,24 24,16 8 28 | 6,16 6,10 6 29 | 6,10 6,16 6 30 | 24,16 17,16 7 31 | 17,16 24,16 7 32 | 17,16 6,16 11 33 | 6,16 17,16 11 34 | 17,10 24,10 7 35 | 24,10 17,10 7 36 | 17,16 17,10 6 37 | 17,10 17,16 6 38 | -------------------------------------------------------------------------------- /carla/planner/Town02Big.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/planner/Town02Big.png -------------------------------------------------------------------------------- /carla/planner/Town02Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/planner/Town02Central.png -------------------------------------------------------------------------------- /carla/planner/Town02Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla/planner/Town02Lanes.png -------------------------------------------------------------------------------- /carla/planner/__init__.py: -------------------------------------------------------------------------------- 1 | from .planner import Planner 2 | -------------------------------------------------------------------------------- /carla/planner/bezier.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.misc import comb 3 | 4 | def bernstein_poly(i, n, t): 5 | """ 6 | The Bernstein polynomial of n, i as a function of t 7 | """ 8 | 9 | return comb(n, i) * ( t**(n-i) ) * (1 - t)**i 10 | 11 | 12 | def bezier_curve(points, nTimes=1000): 13 | """ 14 | Given a set of control points, return the 15 | bezier curve defined by the control points. 16 | 17 | points should be a list of lists, or list of tuples 18 | such as [ [1,1], 19 | [2,3], 20 | [4,5], ..[Xn, Yn] ] 21 | nTimes is the number of time steps, defaults to 1000 22 | 23 | See http://processingjs.nihongoresources.com/bezierinfo/ 24 | """ 25 | 26 | nPoints = len(points) 27 | xPoints = np.array([p[0] for p in points]) 28 | yPoints = np.array([p[1] for p in points]) 29 | 30 | t = np.linspace(0.0, 1.0, nTimes) 31 | 32 | polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ]) 33 | 34 | xvals = np.dot(xPoints, polynomial_array) 35 | yvals = np.dot(yPoints, polynomial_array) 36 | 37 | return xvals, yvals 38 | -------------------------------------------------------------------------------- /carla/planner/graph.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | 11 | def string_to_node(string): 12 | vec = string.split(',') 13 | return (int(vec[0]), int(vec[1])) 14 | 15 | 16 | def string_to_floats(string): 17 | vec = string.split(',') 18 | return (float(vec[0]), float(vec[1]), float(vec[2])) 19 | 20 | 21 | def sldist(c1, c2): 22 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 23 | 24 | 25 | def sldist3(c1, c2): 26 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) 27 | ** 2 + (c2[2] - c1[2]) ** 2) 28 | 29 | 30 | class Graph(object): 31 | """ 32 | A simple directed, weighted graph 33 | """ 34 | 35 | def __init__(self, graph_file=None, node_density=50): 36 | 37 | self._nodes = set() 38 | self._angles = {} 39 | self._edges = {} 40 | self._distances = {} 41 | self._node_density = node_density 42 | 43 | if graph_file is not None: 44 | with open(graph_file, 'r') as f: 45 | # Skipe the first four lines that 46 | lines_after_4 = f.readlines()[4:] 47 | 48 | # the graph resolution. 49 | linegraphres = lines_after_4[0] 50 | self._resolution = string_to_node(linegraphres) 51 | for line in lines_after_4[1:]: 52 | 53 | from_node, to_node, d = line.split() 54 | from_node = string_to_node(from_node) 55 | to_node = string_to_node(to_node) 56 | 57 | if from_node not in self._nodes: 58 | self.add_node(from_node) 59 | if to_node not in self._nodes: 60 | self.add_node(to_node) 61 | 62 | self._edges.setdefault(from_node, []) 63 | self._edges[from_node].append(to_node) 64 | self._distances[(from_node, to_node)] = float(d) 65 | 66 | def add_node(self, value): 67 | self._nodes.add(value) 68 | 69 | def make_orientations(self, node, heading): 70 | 71 | import collections 72 | distance_dic = {} 73 | for node_iter in self._nodes: 74 | if node_iter != node: 75 | distance_dic[sldist(node, node_iter)] = node_iter 76 | 77 | distance_dic = collections.OrderedDict( 78 | sorted(distance_dic.items())) 79 | 80 | self._angles[node] = heading 81 | for _, v in distance_dic.items(): 82 | start_to_goal = np.array([node[0] - v[0], node[1] - v[1]]) 83 | 84 | print(start_to_goal) 85 | 86 | self._angles[v] = start_to_goal / np.linalg.norm(start_to_goal) 87 | 88 | def add_edge(self, from_node, to_node, distance): 89 | self._add_edge(from_node, to_node, distance) 90 | 91 | def _add_edge(self, from_node, to_node, distance): 92 | self._edges.setdefault(from_node, []) 93 | self._edges[from_node].append(to_node) 94 | self._distances[(from_node, to_node)] = distance 95 | 96 | def get_resolution(self): 97 | return self._resolution 98 | def get_edges(self): 99 | return self._edges 100 | 101 | def intersection_nodes(self): 102 | 103 | intersect_nodes = [] 104 | for node in self._nodes: 105 | if len(self._edges[node]) > 2: 106 | intersect_nodes.append(node) 107 | 108 | return intersect_nodes 109 | 110 | def curve_nodes(self): 111 | 112 | intersect_nodes = [] 113 | for node in self._nodes: 114 | if len(self._edges[node]) > 1: 115 | intersect_nodes.append(node) 116 | 117 | return intersect_nodes 118 | 119 | # This contains also the non-intersection turns... 120 | 121 | def turn_nodes(self): 122 | 123 | return self._nodes 124 | 125 | def plot_ori(self, c): 126 | from matplotlib import collections as mc 127 | 128 | import matplotlib.pyplot as plt 129 | line_len = 1 130 | 131 | lines = [[(p[0], p[1]), (p[0] + line_len * self._angles[p][0], 132 | p[1] + line_len * self._angles[p][1])] for p in self._nodes] 133 | lc = mc.LineCollection(lines, linewidth=2, color='green') 134 | _, ax = plt.subplots() 135 | ax.add_collection(lc) 136 | 137 | ax.autoscale() 138 | ax.margins(0.1) 139 | 140 | xs = [p[0] for p in self._nodes] 141 | ys = [p[1] for p in self._nodes] 142 | 143 | plt.scatter(xs, ys, color=c) 144 | 145 | def plot(self, c): 146 | import matplotlib.pyplot as plt 147 | xs = [p[0] for p in self._nodes] 148 | ys = [p[1] for p in self._nodes] 149 | 150 | plt.scatter(xs, ys, color=c) 151 | -------------------------------------------------------------------------------- /carla/planner/grid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import copy 8 | import numpy as np 9 | 10 | 11 | def angle_between(v1, v2): 12 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 13 | 14 | 15 | class Grid(object): 16 | 17 | def __init__(self, graph): 18 | 19 | self._graph = graph 20 | self._structure = self._make_structure() 21 | self._walls = self._make_walls() 22 | 23 | def search_on_grid(self, x, y): 24 | visit = [[0, 1], [0, -1], [1, 0], [-1, 0], 25 | [1, -1], [1, 1], [-1, 1], [-1, -1]] 26 | c_x, c_y = x, y 27 | scale = 1 28 | 29 | 30 | while self._structure[c_x, c_y] != 0: 31 | for offset in visit: 32 | c_x, c_y = x + offset[0] * scale, y + offset[1] * scale 33 | 34 | if c_x >= 0 and c_x < self._graph.get_resolution()[ 35 | 0] and c_y >= 0 and c_y < self._graph.get_resolution()[1]: 36 | if self._structure[c_x, c_y] == 0: 37 | break 38 | else: 39 | c_x, c_y = x, y 40 | scale += 1 41 | 42 | return c_x, c_y 43 | def get_walls(self): 44 | return self._walls 45 | 46 | def get_wall_source(self, pos, pos_ori, target): 47 | 48 | free_nodes = self.get_adjacent_free_nodes(pos) 49 | # print self._walls 50 | final_walls = copy.copy(self._walls) 51 | # print final_walls 52 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 53 | for adj in free_nodes: 54 | 55 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 56 | angle = angle_between(heading_start, start_to_goal) 57 | if (angle > 1.6 and adj != target): 58 | #print ("added source ", (adj[0], adj[1])) 59 | final_walls.add((adj[0], adj[1])) 60 | 61 | return final_walls 62 | 63 | def get_wall_target(self, pos, pos_ori, source): 64 | 65 | free_nodes = self.get_adjacent_free_nodes(pos) 66 | final_walls = copy.copy(self._walls) 67 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 68 | for adj in free_nodes: 69 | 70 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 71 | angle = angle_between(heading_start, start_to_goal) 72 | 73 | if (angle < 1.0 and adj != source): 74 | #print("added target ", (adj[0], adj[1])) 75 | final_walls.add((adj[0], adj[1])) 76 | 77 | return final_walls 78 | 79 | def _draw_line(self, grid, xi, yi, xf, yf): 80 | 81 | if xf < xi: 82 | aux = xi 83 | xi = xf 84 | xf = aux 85 | 86 | if yf < yi: 87 | aux = yi 88 | yi = yf 89 | yf = aux 90 | 91 | for i in range(xi, xf + 1): 92 | 93 | for j in range(yi, yf + 1): 94 | grid[i, j] = 0.0 95 | 96 | return grid 97 | 98 | def _make_structure(self): 99 | structure = np.ones( 100 | (self._graph.get_resolution()[0], 101 | self._graph.get_resolution()[1])) 102 | 103 | for key, connections in self._graph.get_edges().items(): 104 | 105 | # draw a line 106 | for con in connections: 107 | # print key[0],key[1],con[0],con[1] 108 | structure = self._draw_line( 109 | structure, key[0], key[1], con[0], con[1]) 110 | # print grid 111 | return structure 112 | 113 | def _make_walls(self): 114 | walls = set() 115 | 116 | for i in range(self._structure.shape[0]): 117 | 118 | for j in range(self._structure.shape[1]): 119 | if self._structure[i, j] == 1.0: 120 | walls.add((i, j)) 121 | 122 | return walls 123 | 124 | def get_adjacent_free_nodes(self, pos): 125 | """ Eight nodes in total """ 126 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 127 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 128 | 129 | adjacent = set() 130 | for offset in visit: 131 | node = (pos[0] + offset[0], pos[1] + offset[1]) 132 | 133 | if (node[0] >= 0 and node[0] < self._graph.get_resolution()[0] 134 | and node[1] >= 0 and node[1] < self._graph.get_resolution()[1]): 135 | 136 | if self._structure[node[0], node[1]] == 0.0: 137 | adjacent.add(node) 138 | 139 | return adjacent 140 | -------------------------------------------------------------------------------- /carla/planner/planner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import collections 8 | import math 9 | 10 | import numpy as np 11 | 12 | from . import city_track 13 | 14 | 15 | def compare(x, y): 16 | return collections.Counter(x) == collections.Counter(y) 17 | 18 | 19 | 20 | # Constants Used for the high level commands 21 | 22 | 23 | REACH_GOAL = 0.0 24 | GO_STRAIGHT = 5.0 25 | TURN_RIGHT = 4.0 26 | TURN_LEFT = 3.0 27 | LANE_FOLLOW = 2.0 28 | 29 | 30 | # Auxiliary algebra function 31 | def angle_between(v1, v2): 32 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 33 | 34 | 35 | def sldist(c1, c2): return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 36 | 37 | 38 | def signal(v1, v2): 39 | return np.cross(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2) 40 | 41 | 42 | class Planner(object): 43 | 44 | def __init__(self, city_name): 45 | 46 | self._city_track = city_track.CityTrack(city_name) 47 | 48 | self._commands = [] 49 | 50 | def get_next_command(self, source, source_ori, target, target_ori): 51 | """ 52 | Computes the full plan and returns the next command, 53 | Args 54 | source: source position 55 | source_ori: source orientation 56 | target: target position 57 | target_ori: target orientation 58 | Returns 59 | a command ( Straight,Lane Follow, Left or Right) 60 | """ 61 | 62 | track_source = self._city_track.project_node(source) 63 | track_target = self._city_track.project_node(target) 64 | 65 | # reach the goal 66 | 67 | if self._city_track.is_at_goal(track_source, track_target): 68 | return REACH_GOAL 69 | 70 | 71 | if (self._city_track.is_at_new_node(track_source) 72 | and self._city_track.is_away_from_intersection(track_source)): 73 | 74 | route = self._city_track.compute_route(track_source, source_ori, 75 | track_target, target_ori) 76 | 77 | self._commands = self._route_to_commands(route) 78 | 79 | if self._city_track.is_far_away_from_route_intersection( 80 | track_source): 81 | return LANE_FOLLOW 82 | else: 83 | if self._commands: 84 | return self._commands[0] 85 | else: 86 | return LANE_FOLLOW 87 | else: 88 | 89 | if self._city_track.is_far_away_from_route_intersection( 90 | track_source): 91 | return LANE_FOLLOW 92 | 93 | # If there are computed commands 94 | if self._commands: 95 | return self._commands[0] 96 | else: 97 | return LANE_FOLLOW 98 | 99 | def get_shortest_path_distance( 100 | self, 101 | source, 102 | source_ori, 103 | target, 104 | target_ori): 105 | 106 | distance = 0 107 | track_source = self._city_track.project_node(source) 108 | track_target = self._city_track.project_node(target) 109 | 110 | current_pos = track_source 111 | 112 | route = self._city_track.compute_route(track_source, source_ori, 113 | track_target, target_ori) 114 | # No Route, distance is zero 115 | if route is None: 116 | return 0.0 117 | 118 | for node_iter in route: 119 | distance += sldist(node_iter, current_pos) 120 | current_pos = node_iter 121 | 122 | # We multiply by these values to convert distance to world coordinates 123 | 124 | return distance * float(self._city_track.get_pixel_density()) \ 125 | * float(self._city_track.get_node_density()) 126 | 127 | def is_there_posible_route(self, source, source_ori, target, target_ori): 128 | 129 | track_source = self._city_track.project_node(source) 130 | track_target = self._city_track.project_node(target) 131 | 132 | return not self._city_track.compute_route( 133 | track_source, source_ori, track_target, target_ori) is None 134 | 135 | def test_position(self, source): 136 | 137 | node_source = self._city_track.project_node(source) 138 | 139 | return self._city_track.is_away_from_intersection(node_source) 140 | 141 | def _route_to_commands(self, route): 142 | 143 | """ 144 | from the shortest path graph, transform it into a list of commands 145 | 146 | :param route: the sub graph containing the shortest path 147 | :return: list of commands encoded from 0-5 148 | """ 149 | 150 | commands_list = [] 151 | 152 | for i in range(0, len(route)): 153 | if route[i] not in self._city_track.get_intersection_nodes(): 154 | continue 155 | 156 | current = route[i] 157 | past = route[i - 1] 158 | future = route[i + 1] 159 | 160 | past_to_current = np.array( 161 | [current[0] - past[0], current[1] - past[1]]) 162 | current_to_future = np.array( 163 | [future[0] - current[0], future[1] - current[1]]) 164 | angle = signal(current_to_future, past_to_current) 165 | 166 | if angle < -0.1: 167 | command = TURN_RIGHT 168 | elif angle > 0.1: 169 | command = TURN_LEFT 170 | else: 171 | command = GO_STRAIGHT 172 | 173 | commands_list.append(command) 174 | 175 | return commands_list 176 | -------------------------------------------------------------------------------- /carla/settings.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA Settings""" 8 | 9 | import io 10 | import random 11 | import sys 12 | 13 | 14 | if sys.version_info >= (3, 0): 15 | 16 | from configparser import ConfigParser 17 | 18 | else: 19 | 20 | from ConfigParser import RawConfigParser as ConfigParser 21 | 22 | 23 | from . import sensor as carla_sensor 24 | 25 | 26 | MAX_NUMBER_OF_WEATHER_IDS = 14 27 | 28 | 29 | class CarlaSettings(object): 30 | """ 31 | The CarlaSettings object controls the settings of an episode. The __str__ 32 | method retrieves an str with a CarlaSettings.ini file contents. 33 | """ 34 | 35 | def __init__(self, **kwargs): 36 | # [CARLA/Server] 37 | self.SynchronousMode = True 38 | self.SendNonPlayerAgentsInfo = False 39 | # [CARLA/QualitySettings] 40 | self.QualityLevel = 'Epic' 41 | # [CARLA/LevelSettings] 42 | self.PlayerVehicle = None 43 | self.NumberOfVehicles = 20 44 | self.NumberOfPedestrians = 30 45 | self.WeatherId = 1 46 | self.SeedVehicles = None 47 | self.SeedPedestrians = None 48 | self.DisableTwoWheeledVehicles = False 49 | self.set(**kwargs) 50 | self._sensors = [] 51 | 52 | def set(self, **kwargs): 53 | for key, value in kwargs.items(): 54 | if not hasattr(self, key): 55 | raise ValueError('CarlaSettings: no key named %r' % key) 56 | setattr(self, key, value) 57 | 58 | def randomize_seeds(self): 59 | """ 60 | Randomize the seeds of the new episode's pseudo-random number 61 | generators. 62 | """ 63 | self.SeedVehicles = random.getrandbits(16) 64 | self.SeedPedestrians = random.getrandbits(16) 65 | 66 | def randomize_weather(self): 67 | """Randomized the WeatherId.""" 68 | self.WeatherId = random.randint(0, MAX_NUMBER_OF_WEATHER_IDS) 69 | 70 | def add_sensor(self, sensor): 71 | """Add a sensor to the player vehicle (see sensor.py).""" 72 | if not isinstance(sensor, carla_sensor.Sensor): 73 | raise ValueError('Sensor not supported') 74 | self._sensors.append(sensor) 75 | 76 | def __str__(self): 77 | """Converts this object to an INI formatted string.""" 78 | ini = ConfigParser() 79 | ini.optionxform = str 80 | S_SERVER = 'CARLA/Server' 81 | S_QUALITY = 'CARLA/QualitySettings' 82 | S_LEVEL = 'CARLA/LevelSettings' 83 | S_SENSOR = 'CARLA/Sensor' 84 | 85 | def get_attribs(obj): 86 | return [a for a in dir(obj) if not a.startswith('_') and not callable(getattr(obj, a))] 87 | 88 | def add_section(section, obj, keys): 89 | for key in keys: 90 | if hasattr(obj, key) and getattr(obj, key) is not None: 91 | if not ini.has_section(section): 92 | ini.add_section(section) 93 | ini.set(section, key, str(getattr(obj, key))) 94 | 95 | add_section(S_SERVER, self, [ 96 | 'SynchronousMode', 97 | 'SendNonPlayerAgentsInfo']) 98 | add_section(S_QUALITY, self, [ 99 | 'QualityLevel']) 100 | add_section(S_LEVEL, self, [ 101 | 'NumberOfVehicles', 102 | 'NumberOfPedestrians', 103 | 'WeatherId', 104 | 'SeedVehicles', 105 | 'SeedPedestrians', 106 | 'DisableTwoWheeledVehicles']) 107 | 108 | ini.add_section(S_SENSOR) 109 | ini.set(S_SENSOR, 'Sensors', ','.join(s.SensorName for s in self._sensors)) 110 | 111 | for sensor_def in self._sensors: 112 | section = S_SENSOR + '/' + sensor_def.SensorName 113 | add_section(section, sensor_def, get_attribs(sensor_def)) 114 | 115 | if sys.version_info >= (3, 0): 116 | text = io.StringIO() 117 | else: 118 | text = io.BytesIO() 119 | 120 | ini.write(text) 121 | return text.getvalue().replace(' = ', '=') 122 | -------------------------------------------------------------------------------- /carla/tcp.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """Basic TCP client.""" 8 | 9 | import logging 10 | import socket 11 | import struct 12 | import time 13 | 14 | class TCPConnectionError(Exception): 15 | pass 16 | 17 | 18 | class TCPClient(object): 19 | """ 20 | Basic networking client for TCP connections. Errors occurred during 21 | networking operations are raised as TCPConnectionError. 22 | 23 | Received messages are expected to be prepended by a int32 defining the 24 | message size. Messages are sent following this convention. 25 | """ 26 | 27 | def __init__(self, host, port, timeout): 28 | self._host = host 29 | self._port = port 30 | self._timeout = timeout 31 | self._socket = None 32 | self._logprefix = '(%s:%s) ' % (self._host, self._port) 33 | 34 | def connect(self, connection_attempts=1000000000): 35 | """Try to establish a connection to the given host:port.""" 36 | connection_attempts = max(1, connection_attempts) 37 | error = None 38 | for attempt in range(1, connection_attempts + 1): 39 | try: 40 | self._socket = socket.create_connection(address=(self._host, self._port), timeout=self._timeout) 41 | self._socket.settimeout(self._timeout) 42 | logging.debug('%sconnected', self._logprefix) 43 | return 44 | except socket.error as exception: 45 | error = exception 46 | logging.debug('%sconnection attempt %d: %s', self._logprefix, attempt, error) 47 | time.sleep(1) 48 | self._reraise_exception_as_tcp_error('failed to connect', error) 49 | 50 | def disconnect(self): 51 | """Disconnect any active connection.""" 52 | if self._socket is not None: 53 | logging.debug('%sdisconnecting', self._logprefix) 54 | self._socket.close() 55 | self._socket = None 56 | 57 | def connected(self): 58 | """Return whether there is an active connection.""" 59 | return self._socket is not None 60 | 61 | def write(self, message): 62 | """Send message to the server.""" 63 | if self._socket is None: 64 | raise TCPConnectionError(self._logprefix + 'not connected') 65 | # print ('message length: ', len(message)) 66 | header = struct.pack(' 0: 87 | try: 88 | data = self._socket.recv(length) 89 | except socket.error as exception: 90 | self._reraise_exception_as_tcp_error('failed to read data', exception) 91 | if not data: 92 | raise TCPConnectionError(self._logprefix + 'connection closed') 93 | buf += data 94 | length -= len(data) 95 | return buf 96 | 97 | def _reraise_exception_as_tcp_error(self, message, exception): 98 | raise TCPConnectionError('%s%s: %s' % (self._logprefix, message, exception)) 99 | -------------------------------------------------------------------------------- /carla/transform.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB), and the INTEL Visual Computing Lab. 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | 9 | from collections import namedtuple 10 | 11 | try: 12 | import numpy 13 | except ImportError: 14 | raise RuntimeError( 15 | 'cannot import numpy, make sure numpy package is installed.') 16 | 17 | try: 18 | from . import carla_server_pb2 as carla_protocol 19 | except ImportError: 20 | raise RuntimeError('cannot import "carla_server_pb2.py", run ' 21 | 'the protobuf compiler to generate this file') 22 | 23 | 24 | Translation = namedtuple('Translation', 'x y z') 25 | Translation.__new__.__defaults__ = (0.0, 0.0, 0.0) 26 | 27 | Rotation = namedtuple('Rotation', 'pitch yaw roll') 28 | Rotation.__new__.__defaults__ = (0.0, 0.0, 0.0) 29 | 30 | Scale = namedtuple('Scale', 'x y z') 31 | Scale.__new__.__defaults__ = (1.0, 1.0, 1.0) 32 | 33 | 34 | class Transform(object): 35 | """A 3D transformation. 36 | 37 | The transformation is applied in the order: scale, rotation, translation. 38 | """ 39 | 40 | def __init__(self, *args, **kwargs): 41 | if 'matrix' in kwargs: 42 | self.matrix = kwargs['matrix'] 43 | return 44 | if isinstance(args[0], carla_protocol.Transform): 45 | args = [ 46 | Translation( 47 | args[0].location.x, 48 | args[0].location.y, 49 | args[0].location.z), 50 | Rotation( 51 | args[0].rotation.pitch, 52 | args[0].rotation.yaw, 53 | args[0].rotation.roll) 54 | ] 55 | self.matrix = numpy.matrix(numpy.identity(4)) 56 | self.set(*args, **kwargs) 57 | 58 | def set(self, *args): 59 | """Builds the transform matrix given a Translate, Rotation 60 | and Scale. 61 | """ 62 | translation = Translation() 63 | rotation = Rotation() 64 | scale = Scale() 65 | 66 | if len(args) > 3: 67 | raise ValueError("'Transform' accepts 3 values as maximum.") 68 | 69 | def get_single_obj_type(obj_type): 70 | """Returns the unique object contained in the 71 | arguments lists that is instance of 'obj_type'. 72 | """ 73 | obj = [x for x in args if isinstance(x, obj_type)] 74 | if len(obj) > 1: 75 | raise ValueError("Transform only accepts one instances of " + 76 | str(obj_type) + " as a parameter") 77 | elif not obj: 78 | # Create an instance of the type that is 'obj_type' 79 | return obj_type() 80 | return obj[0] 81 | 82 | translation = get_single_obj_type(Translation) 83 | rotation = get_single_obj_type(Rotation) 84 | scale = get_single_obj_type(Scale) 85 | 86 | for param in args: 87 | if not isinstance(param, Translation) and \ 88 | not isinstance(param, Rotation) and \ 89 | not isinstance(param, Scale): 90 | raise TypeError( 91 | "'" + str(type(param)) + "' type not match with \ 92 | 'Translation', 'Rotation' or 'Scale'") 93 | 94 | # Transformation matrix 95 | cy = math.cos(numpy.radians(rotation.yaw)) 96 | sy = math.sin(numpy.radians(rotation.yaw)) 97 | cr = math.cos(numpy.radians(rotation.roll)) 98 | sr = math.sin(numpy.radians(rotation.roll)) 99 | cp = math.cos(numpy.radians(rotation.pitch)) 100 | sp = math.sin(numpy.radians(rotation.pitch)) 101 | self.matrix[0, 3] = translation.x 102 | self.matrix[1, 3] = translation.y 103 | self.matrix[2, 3] = translation.z 104 | self.matrix[0, 0] = scale.x * (cp * cy) 105 | self.matrix[0, 1] = scale.y * (cy * sp * sr - sy * cr) 106 | self.matrix[0, 2] = -scale.z * (cy * sp * cr + sy * sr) 107 | self.matrix[1, 0] = scale.x * (sy * cp) 108 | self.matrix[1, 1] = scale.y * (sy * sp * sr + cy * cr) 109 | self.matrix[1, 2] = scale.z * (cy * sr - sy * sp * cr) 110 | self.matrix[2, 0] = scale.x * (sp) 111 | self.matrix[2, 1] = -scale.y * (cp * sr) 112 | self.matrix[2, 2] = scale.z * (cp * cr) 113 | 114 | def inverse(self): 115 | """Return the inverse transform.""" 116 | return Transform(matrix=numpy.linalg.inv(self.matrix)) 117 | 118 | def transform_points(self, points): 119 | """ 120 | Given a 4x4 transformation matrix, transform an array of 3D points. 121 | Expected point foramt: [[X0,Y0,Z0],..[Xn,Yn,Zn]] 122 | """ 123 | # Needed foramt: [[X0,..Xn],[Z0,..Zn],[Z0,..Zn]]. So let's transpose 124 | # the point matrix. 125 | points = points.transpose() 126 | # Add 0s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[0,..0]] 127 | points = numpy.append(points, numpy.ones((1, points.shape[1])), axis=0) 128 | # Point transformation 129 | points = self.matrix * points 130 | # Return all but last row 131 | return points[0:3].transpose() 132 | 133 | def __mul__(self, other): 134 | return Transform(matrix=numpy.dot(self.matrix, other.matrix)) 135 | 136 | def __str__(self): 137 | return str(self.matrix) 138 | -------------------------------------------------------------------------------- /carla/util.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import datetime 8 | import sys 9 | 10 | from contextlib import contextmanager 11 | 12 | 13 | @contextmanager 14 | def make_connection(client_type, *args, **kwargs): 15 | """Context manager to create and connect a networking client object.""" 16 | client = None 17 | try: 18 | client = client_type(*args, **kwargs) 19 | client.connect() 20 | yield client 21 | finally: 22 | if client is not None: 23 | client.disconnect() 24 | 25 | 26 | class StopWatch(object): 27 | def __init__(self): 28 | self.start = datetime.datetime.now() 29 | self.end = None 30 | 31 | def restart(self): 32 | self.start = datetime.datetime.now() 33 | self.end = None 34 | 35 | def stop(self): 36 | self.end = datetime.datetime.now() 37 | 38 | def seconds(self): 39 | return (self.end - self.start).total_seconds() 40 | 41 | def milliseconds(self): 42 | return 1000.0 * self.seconds() 43 | 44 | 45 | def to_hex_str(header): 46 | return ':'.join('{:02x}'.format(ord(c)) for c in header) 47 | 48 | 49 | if sys.version_info >= (3, 3): 50 | 51 | import shutil 52 | 53 | def print_over_same_line(text): 54 | terminal_width = shutil.get_terminal_size((80, 20)).columns 55 | empty_space = max(0, terminal_width - len(text)) 56 | sys.stdout.write('\r' + text + empty_space * ' ') 57 | sys.stdout.flush() 58 | 59 | else: 60 | 61 | # Workaround for older Python versions. 62 | def print_over_same_line(text): 63 | line_length = max(print_over_same_line.last_line_length, len(text)) 64 | empty_space = max(0, line_length - len(text)) 65 | sys.stdout.write('\r' + text + empty_space * ' ') 66 | sys.stdout.flush() 67 | print_over_same_line.last_line_length = line_length 68 | print_over_same_line.last_line_length = 0 69 | -------------------------------------------------------------------------------- /carla08/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/.DS_Store -------------------------------------------------------------------------------- /carla08/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/__init__.py -------------------------------------------------------------------------------- /carla08/agent/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/agent/.DS_Store -------------------------------------------------------------------------------- /carla08/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .forward_agent import ForwardAgent 2 | from .command_follower import CommandFollower 3 | from .lane_follower import LaneFollower 4 | from .human_agent import HumanAgent 5 | from .agent import Agent 6 | -------------------------------------------------------------------------------- /carla08/agent/agent.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 3 | # Barcelona (UAB). 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | # @author: germanros, felipecode 8 | 9 | 10 | from __future__ import print_function 11 | import abc 12 | 13 | 14 | class Agent(object): 15 | def __init__(self): 16 | self.__metaclass__ = abc.ABCMeta 17 | 18 | @abc.abstractmethod 19 | def run_step(self, measurements, sensor_data, directions, target): 20 | """ 21 | Function to be redefined by an agent. 22 | params The measurements like speed, the image data and a target 23 | :returns A carla Control object, with the steering/gas/brake for the agent 24 | """ 25 | -------------------------------------------------------------------------------- /carla08/agent/forward_agent.py: -------------------------------------------------------------------------------- 1 | 2 | from carla08.agent.agent import Agent 3 | from carla08.client import VehicleControl 4 | 5 | 6 | class ForwardAgent(Agent): 7 | """ 8 | Simple derivation of Agent Class, 9 | A trivial agent agent that goes straight 10 | """ 11 | def run_step(self, measurements, sensor_data, directions, target): 12 | control = VehicleControl() 13 | control.throttle = 0.9 14 | 15 | return control 16 | -------------------------------------------------------------------------------- /carla08/agent/human_agent.py: -------------------------------------------------------------------------------- 1 | from carla08.agent.agent import Agent 2 | from carla08.client import VehicleControl 3 | 4 | try: 5 | import pygame 6 | from pygame.locals import K_DOWN 7 | from pygame.locals import K_LEFT 8 | from pygame.locals import K_RIGHT 9 | from pygame.locals import K_SPACE 10 | from pygame.locals import K_UP 11 | from pygame.locals import K_a 12 | from pygame.locals import K_d 13 | from pygame.locals import K_q 14 | from pygame.locals import K_s 15 | from pygame.locals import K_w 16 | 17 | except ImportError: 18 | raise RuntimeError('cannot import pygame, make sure pygame package is installed') 19 | 20 | 21 | class HumanAgent(Agent): 22 | """ 23 | Derivation of Agent Class for human control, 24 | 25 | """ 26 | 27 | def __init__(self): 28 | """ 29 | TODO: add the parameter for a joystick to be used, default keyboard. 30 | """ 31 | super(HumanAgent).__init__() 32 | self._is_on_reverse = False 33 | 34 | def _get_keyboard_control(self, keys): 35 | """ 36 | Return a VehicleControl message based on the pressed keys. 37 | 38 | Return None 39 | if a new episode was requested. 40 | """ 41 | 42 | control = VehicleControl() 43 | if keys[K_LEFT] or keys[K_a]: 44 | control.steer = -1.0 45 | if keys[K_RIGHT] or keys[K_d]: 46 | control.steer = 1.0 47 | if keys[K_UP] or keys[K_w]: 48 | control.throttle = 1.0 49 | if keys[K_DOWN] or keys[K_s]: 50 | control.brake = 1.0 51 | if keys[K_SPACE]: 52 | control.hand_brake = True 53 | if keys[K_q]: 54 | self._is_on_reverse = not self._is_on_reverse 55 | control.reverse = self._is_on_reverse 56 | return control 57 | 58 | def run_step(self, measurements, sensor_data, directions, target): 59 | # We basically ignore all the parameters. 60 | for event in pygame.event.get(): 61 | if event.type == pygame.QUIT: 62 | return VehicleControl() 63 | 64 | return self._get_keyboard_control(pygame.key.get_pressed()) 65 | -------------------------------------------------------------------------------- /carla08/agent/lane_follower.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | from carla08.agent.agent import Agent 5 | 6 | 7 | class LaneFollower(Agent): 8 | """ 9 | Simple derivation of Agent Class, 10 | A lane follower that randomly goes driving around the city 11 | Not yet implemented 12 | """ 13 | def __init__(self, town_name): 14 | 15 | pass 16 | 17 | 18 | def run_step(self, measurements, sensor_data, directions, target): 19 | 20 | raise NotImplementedError("Lane follower not yet implemented") 21 | -------------------------------------------------------------------------------- /carla08/agent/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .obstacle_avoidance import ObstacleAvoidance 2 | from .controllers import Controller 3 | from .waypointer import Waypointer -------------------------------------------------------------------------------- /carla08/agent/modules/controllers.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from pid_controller.pid import PID 4 | from carla08.client import VehicleControl 5 | # PID based controller, we can have different ones 6 | 7 | 8 | class Controller(object): 9 | 10 | # The vehicle controller, it receives waypoints and applies a PID control in order 11 | # to get the action. 12 | 13 | 14 | def __init__(self, params): 15 | 16 | # The parameters for this controller, set by the agent 17 | self.params = params 18 | # PID speed controller 19 | self.pid = PID(p=params['pid_p'], i=params['pid_i'], d=params['pid_d']) 20 | 21 | 22 | 23 | def get_control(self, wp_angle, wp_angle_speed, speed_factor, current_speed): 24 | control = VehicleControl() 25 | current_speed = max(current_speed, 0) 26 | 27 | steer = self.params['steer_gain'] * wp_angle 28 | if steer > 0: 29 | control.steer = min(steer, 1) 30 | else: 31 | control.steer = max(steer, -1) 32 | 33 | # Don't go0 to fast around corners 34 | if math.fabs(wp_angle_speed) < 0.1: # 0.04 works better 35 | target_speed_adjusted = self.params['target_speed'] * speed_factor 36 | elif math.fabs(wp_angle_speed) < 0.5: 37 | target_speed_adjusted = 20 * speed_factor # 10 works better 38 | else: 39 | target_speed_adjusted = 15 * speed_factor # 10 works better 40 | 41 | self.pid.target = target_speed_adjusted 42 | pid_gain = self.pid(feedback=current_speed) 43 | print ('Target: ', self.pid.target, 'Error: ', self.pid.error, 'Gain: ', pid_gain) 44 | print ('Target Speed: ', target_speed_adjusted, 'Current Speed: ', current_speed, 'Speed Factor: ', 45 | speed_factor) 46 | 47 | throttle = min(max(self.params['default_throttle'] - 1.3 * pid_gain, 0), 48 | self.params['throttle_max']) 49 | 50 | if pid_gain > 0.5: 51 | brake = min(0.35 * pid_gain * self.params['brake_strength'], 1) 52 | else: 53 | brake = 0 54 | 55 | 56 | control.throttle = max(throttle, 0) # Prevent N by putting at least 0.01 57 | control.brake = brake 58 | 59 | print ('Throttle: ', control.throttle, 'Brake: ', control.brake, 'Steering Angle: ', control.steer) 60 | 61 | # with open('/is/sg2/aprakash/Documents/debug', 'a+') as log: 62 | # log.write('Target: %f Error: %f Gain: %f \n'%(self.pid.target, self.pid.error, pid_gain)) 63 | # log.write('Target Speed: %f Current Speed: %f Speed Factor: %f \n'%(target_speed_adjusted, current_speed, speed_factor)) 64 | # log.write('Throttle: %f Brake: %f Steering Angle: %f \n'%(control.throttle, control.brake, control.steer)) 65 | # log.close() 66 | 67 | return control -------------------------------------------------------------------------------- /carla08/agent/modules/utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | 4 | def get_vec_dist(x_dst, y_dst, x_src, y_src): 5 | vec = np.array([x_dst, y_dst] - np.array([x_src, y_src])) 6 | dist = math.sqrt(vec[0] ** 2 + vec[1] ** 2) 7 | return vec / dist, dist 8 | 9 | 10 | def get_angle(vec_dst, vec_src): 11 | """ 12 | Get the angle between two vectors 13 | 14 | Returns: 15 | The angle between two vectors 16 | 17 | """ 18 | angle = math.atan2(vec_dst[1], vec_dst[0]) - math.atan2(vec_src[1], vec_src[0]) 19 | if angle > math.pi: 20 | angle -= 2 * math.pi 21 | elif angle < -math.pi: 22 | angle += 2 * math.pi 23 | return angle -------------------------------------------------------------------------------- /carla08/driving_benchmark/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/driving_benchmark/.DS_Store -------------------------------------------------------------------------------- /carla08/driving_benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | from .driving_benchmark import run_driving_benchmark 2 | -------------------------------------------------------------------------------- /carla08/driving_benchmark/experiment.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | from ..settings import CarlaSettings 8 | 9 | 10 | class Experiment(object): 11 | """ 12 | Experiment defines a certain task, under conditions 13 | A task is associated with a set of poses, containing start and end pose. 14 | 15 | Conditions are associated with a carla Settings and describe the following: 16 | 17 | Number Of Vehicles 18 | Number Of Pedestrians 19 | Weather 20 | Random Seed of the agents, describing their behaviour. 21 | 22 | """ 23 | 24 | def __init__(self): 25 | self.Task = 0 26 | self.TaskName = '' 27 | self.Conditions = CarlaSettings() 28 | self.Poses = [[]] 29 | self.Repetitions = 1 30 | self.Corruption = None 31 | self.Severity = None 32 | 33 | def set(self, **kwargs): 34 | for key, value in kwargs.items(): 35 | if not hasattr(self, key): 36 | raise ValueError('Experiment: no key named %r' % key) 37 | setattr(self, key, value) 38 | 39 | 40 | 41 | @property 42 | def task(self): 43 | return self.Task 44 | @property 45 | def task_name(self): 46 | return self.TaskName 47 | 48 | @property 49 | def conditions(self): 50 | return self.Conditions 51 | 52 | @property 53 | def poses(self): 54 | return self.Poses 55 | 56 | @property 57 | def repetitions(self): 58 | return self.Repetitions 59 | 60 | @property 61 | def corruption(self): 62 | return self.Corruption 63 | 64 | @property 65 | def severity(self): 66 | return self.Severity 67 | -------------------------------------------------------------------------------- /carla08/driving_benchmark/experiment_suites/__init__.py: -------------------------------------------------------------------------------- 1 | from .basic_experiment_suite import BasicExperimentSuite 2 | from .corl_2017 import CoRL2017 3 | from .longcontrol_2018 import LongitudinalControl2018 4 | -------------------------------------------------------------------------------- /carla08/driving_benchmark/experiment_suites/basic_experiment_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | from __future__ import print_function 9 | 10 | from ..experiment import Experiment 11 | from ...sensor import Camera 12 | from ...settings import CarlaSettings 13 | 14 | from .experiment_suite import ExperimentSuite 15 | 16 | 17 | class BasicExperimentSuite(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [1] 26 | 27 | def build_experiments(self): 28 | """ 29 | Creates the whole set of experiment objects, 30 | The experiments created depends on the selected Town. 31 | 32 | """ 33 | 34 | # We check the town, based on that we define the town related parameters 35 | # The size of the vector is related to the number of tasks, inside each 36 | # task there is also multiple poses ( start end, positions ) 37 | if self._city_name == 'Town01': 38 | poses_tasks = [[[7, 3], [7, 3]], [[138, 17], [138, 17]], [[140, 134], [140, 134]], [[140, 134],[140, 134]]] 39 | vehicles_tasks = [0, 0, 0, 20] 40 | pedestrians_tasks = [0, 0, 0, 50] 41 | else: 42 | poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]] 43 | vehicles_tasks = [0, 0, 0, 15] 44 | pedestrians_tasks = [0, 0, 0, 50] 45 | 46 | # We set the camera 47 | # This single RGB camera is used on every experiment 48 | 49 | camera = Camera('CameraRGB') 50 | camera.set(FOV=100) 51 | camera.set_image_size(800, 600) 52 | camera.set_position(2.0, 0.0, 1.4) 53 | camera.set_rotation(-15.0, 0, 0) 54 | 55 | # Based on the parameters, creates a vector with experiment objects. 56 | experiments_vector = [] 57 | for weather in self.weathers: 58 | 59 | for iteration in range(len(poses_tasks)): 60 | poses = poses_tasks[iteration] 61 | vehicles = vehicles_tasks[iteration] 62 | pedestrians = pedestrians_tasks[iteration] 63 | 64 | conditions = CarlaSettings() 65 | conditions.set( 66 | SendNonPlayerAgentsInfo=True, 67 | NumberOfVehicles=vehicles, 68 | NumberOfPedestrians=pedestrians, 69 | WeatherId=weather 70 | 71 | ) 72 | # Add all the cameras that were set for this experiments 73 | conditions.add_sensor(camera) 74 | experiment = Experiment() 75 | experiment.set( 76 | Conditions=conditions, 77 | Poses=poses, 78 | Task=iteration, 79 | Repetitions=2 80 | ) 81 | experiments_vector.append(experiment) 82 | 83 | return experiments_vector 84 | -------------------------------------------------------------------------------- /carla08/driving_benchmark/experiment_suites/corl_2017.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | from ..experiment import Experiment 12 | from ...sensor import Camera 13 | from ...settings import CarlaSettings 14 | from ..experiment_suites.experiment_suite import ExperimentSuite 15 | 16 | 17 | class CoRL2017(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1, 3, 6, 8] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [4, 14] 26 | 27 | def _poses_town01(self): 28 | """ 29 | Each matrix is a new task. We have all the four tasks 30 | 31 | """ 32 | 33 | def _poses_straight(): 34 | return [[36, 40], [39, 35], [110, 114], [7, 3], [0, 4], 35 | [68, 50], [61, 59], [47, 64], [147, 90], [33, 87], 36 | [26, 19], [80, 76], [45, 49], [55, 44], [29, 107], 37 | [95, 104], [84, 34], [53, 67], [22, 17], [91, 148], 38 | [20, 107], [78, 70], [95, 102], [68, 44], [45, 69]] 39 | 40 | def _poses_one_curve(): 41 | return [[138, 17], [47, 16], [26, 9], [42, 49], [140, 124], 42 | [85, 98], [65, 133], [137, 51], [76, 66], [46, 39], 43 | [40, 60], [0, 29], [4, 129], [121, 140], [2, 129], 44 | [78, 44], [68, 85], [41, 102], [95, 70], [68, 129], 45 | [84, 69], [47, 79], [110, 15], [130, 17], [0, 17]] 46 | 47 | def _poses_navigation(): 48 | return [[105, 29], [27, 130], [102, 87], [132, 27], [24, 44], 49 | [96, 26], [34, 67], [28, 1], [140, 134], [105, 9], 50 | [148, 129], [65, 18], [21, 16], [147, 97], [42, 51], 51 | [30, 41], [18, 107], [69, 45], [102, 95], [18, 145], 52 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]] 53 | 54 | return [_poses_straight(), 55 | _poses_one_curve(), 56 | _poses_navigation(), 57 | _poses_navigation()] 58 | 59 | def _poses_town02(self): 60 | 61 | def _poses_straight(): 62 | return [[38, 34], [4, 2], [12, 10], [62, 55], [43, 47], 63 | [64, 66], [78, 76], [59, 57], [61, 18], [35, 39], 64 | [12, 8], [0, 18], [75, 68], [54, 60], [45, 49], 65 | [46, 42], [53, 46], [80, 29], [65, 63], [0, 81], 66 | [54, 63], [51, 42], [16, 19], [17, 26], [77, 68]] 67 | 68 | def _poses_one_curve(): 69 | return [[37, 76], [8, 24], [60, 69], [38, 10], [21, 1], 70 | [58, 71], [74, 32], [44, 0], [71, 16], [14, 24], 71 | [34, 11], [43, 14], [75, 16], [80, 21], [3, 23], 72 | [75, 59], [50, 47], [11, 19], [77, 34], [79, 25], 73 | [40, 63], [58, 76], [79, 55], [16, 61], [27, 11]] 74 | 75 | def _poses_navigation(): 76 | return [[19, 66], [79, 14], [19, 57], [23, 1], 77 | [53, 76], [42, 13], [31, 71], [33, 5], 78 | [54, 30], [10, 61], [66, 3], [27, 12], 79 | [79, 19], [2, 29], [16, 14], [5, 57], 80 | [70, 73], [46, 67], [57, 50], [61, 49], [21, 12], 81 | [51, 81], [77, 68], [56, 65], [43, 54]] 82 | 83 | return [_poses_straight(), 84 | _poses_one_curve(), 85 | _poses_navigation(), 86 | _poses_navigation() 87 | ] 88 | 89 | def build_experiments(self): 90 | """ 91 | Creates the whole set of experiment objects, 92 | The experiments created depend on the selected Town. 93 | 94 | 95 | """ 96 | 97 | # We set the camera 98 | # This single RGB camera is used on every experiment 99 | 100 | camera = Camera('CameraRGB') 101 | camera.set(FOV=100) 102 | camera.set_image_size(800, 600) 103 | camera.set_position(2.0, 0.0, 1.4) 104 | camera.set_rotation(-15.0, 0, 0) 105 | 106 | if self._city_name == 'Town01': 107 | poses_tasks = self._poses_town01() 108 | vehicles_tasks = [0, 0, 0, 20] 109 | pedestrians_tasks = [0, 0, 0, 50] 110 | else: 111 | poses_tasks = self._poses_town02() 112 | vehicles_tasks = [0, 0, 0, 15] 113 | pedestrians_tasks = [0, 0, 0, 50] 114 | 115 | experiments_vector = [] 116 | 117 | for weather in self.weathers: 118 | 119 | for iteration in range(len(poses_tasks)): 120 | poses = poses_tasks[iteration] 121 | vehicles = vehicles_tasks[iteration] 122 | pedestrians = pedestrians_tasks[iteration] 123 | 124 | conditions = CarlaSettings() 125 | conditions.set( 126 | SendNonPlayerAgentsInfo=True, 127 | NumberOfVehicles=vehicles, 128 | NumberOfPedestrians=pedestrians, 129 | WeatherId=weather 130 | ) 131 | # Add all the cameras that were set for this experiments 132 | 133 | conditions.add_sensor(camera) 134 | 135 | experiment = Experiment() 136 | experiment.set( 137 | Conditions=conditions, 138 | Poses=poses, 139 | Task=iteration, 140 | Repetitions=1 141 | ) 142 | experiments_vector.append(experiment) 143 | 144 | return experiments_vector 145 | -------------------------------------------------------------------------------- /carla08/driving_benchmark/experiment_suites/experiment_suite.py: -------------------------------------------------------------------------------- 1 | # To be redefined on subclasses on how to calculate timeout for an episode 2 | import abc 3 | 4 | 5 | class ExperimentSuite(object): 6 | 7 | def __init__(self, city_name): 8 | 9 | self._city_name = city_name 10 | self._experiments = self.build_experiments() 11 | 12 | def calculate_time_out(self, path_distance): 13 | """ 14 | Function to return the timeout ,in milliseconds, 15 | that is calculated based on distance to goal. 16 | This is the same timeout as used on the CoRL paper. 17 | """ 18 | return ((path_distance / 1000.0) / 10.0) * 3600.0 + 10.0 19 | 20 | def get_number_of_poses_task(self): 21 | """ 22 | Get the number of poses a task have for this benchmark 23 | """ 24 | 25 | # Warning: assumes that all tasks have the same size 26 | 27 | return len(self._experiments[0].poses) 28 | 29 | def get_number_of_reps_poses(self): 30 | """ 31 | Get the number of poses a task have for this benchmark 32 | """ 33 | 34 | # Warning: assumes that all poses have the same number of repetitions 35 | 36 | return self._experiments[0].repetitions 37 | 38 | 39 | def get_experiments(self): 40 | """ 41 | Getter for the experiment set. 42 | """ 43 | return self._experiments 44 | 45 | @property 46 | def dynamic_tasks(self): 47 | """ 48 | Returns the episodes that contain dynamic obstacles 49 | """ 50 | dynamic_tasks = set() 51 | for exp in self._experiments: 52 | if exp.conditions.NumberOfVehicles > 0 or exp.conditions.NumberOfPedestrians > 0: 53 | dynamic_tasks.add(exp.task) 54 | 55 | return list(dynamic_tasks) 56 | 57 | @property 58 | def metrics_parameters(self): 59 | """ 60 | Property to return the parameters for the metric module 61 | Could be redefined depending on the needs of the user. 62 | """ 63 | return { 64 | 65 | 'intersection_offroad': {'frames_skip': 10, 66 | 'frames_recount': 20, 67 | 'threshold': 0.3 68 | }, 69 | 'intersection_otherlane': {'frames_skip': 10, 70 | 'frames_recount': 20, 71 | 'threshold': 0.4 72 | }, 73 | 'collision_other': {'frames_skip': 10, 74 | 'frames_recount': 20, 75 | 'threshold': 400 76 | }, 77 | 'collision_vehicles': {'frames_skip': 10, 78 | 'frames_recount': 30, 79 | 'threshold': 400 80 | }, 81 | 'collision_pedestrians': {'frames_skip': 5, 82 | 'frames_recount': 100, 83 | 'threshold': 300 84 | }, 85 | 86 | } 87 | 88 | @property 89 | def weathers(self): 90 | weathers = set(self.train_weathers) 91 | weathers.update(self.test_weathers) 92 | return weathers 93 | 94 | @property 95 | def collision_as_failure(self): 96 | return False 97 | 98 | @property 99 | def traffic_light_as_failure(self): 100 | return False 101 | 102 | @abc.abstractmethod 103 | def build_experiments(self): 104 | """ 105 | Returns a set of experiments to be evaluated 106 | Must be redefined in an inherited class. 107 | 108 | """ 109 | 110 | @abc.abstractproperty 111 | def train_weathers(self): 112 | """ 113 | Return the weathers that are considered as training conditions 114 | """ 115 | 116 | @abc.abstractproperty 117 | def test_weathers(self): 118 | """ 119 | Return the weathers that are considered as testing conditions 120 | """ 121 | -------------------------------------------------------------------------------- /carla08/driving_benchmark/experiment_suites/longcontrol_2018.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | from ..experiment import Experiment 12 | from ...sensor import Camera 13 | from ...settings import CarlaSettings 14 | from ...driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite 15 | 16 | 17 | class LongitudinalControl2018(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1, 3, 6, 8] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [10, 14] 26 | 27 | @property 28 | def collision_as_failure(self): 29 | return True 30 | @property 31 | def traffic_light_as_failure(self): 32 | return False 33 | def calculate_time_out(self, path_distance): 34 | """ 35 | Function to return the timeout ,in milliseconds, 36 | that is calculated based on distance to goal. 37 | This timeout is increased since stop for traffic lights is expected. 38 | """ 39 | return ((path_distance / 1000.0) / 3.0) * 3600.0 + 20.0 40 | 41 | def _poses_town01(self): 42 | """ 43 | Each matrix is a new task. We have all the four tasks 44 | 45 | """ 46 | 47 | def _poses_navigation(): 48 | return [[105, 29], [27, 130], [102, 87], [132, 27], [25, 44], 49 | [4, 64], [34, 67], [54, 30], [140, 134], [105, 9], 50 | [148, 129], [65, 18], [21, 16], [147, 97], [134, 49], 51 | [30, 41], [81, 89], [69, 45], [102, 95], [18, 145], 52 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]] 53 | 54 | return [_poses_navigation(), 55 | _poses_navigation(), 56 | _poses_navigation()] 57 | 58 | def _poses_town02(self): 59 | 60 | def _poses_navigation(): 61 | return [[19, 66], [79, 14], [19, 57], [39, 53], [60, 26], 62 | [53, 76], [42, 13], [31, 71], [59, 35], [47, 16], 63 | [10, 61], [66, 3], [20, 79], [14, 56], [26, 69], 64 | [79, 19], [2, 29], [16, 14], [5, 57], [77, 68], 65 | [70, 73], [46, 67], [34, 77], [61, 49], [21, 12]] 66 | return [_poses_navigation(), 67 | _poses_navigation(), 68 | _poses_navigation() 69 | ] 70 | 71 | def build_experiments(self): 72 | """ 73 | Creates the whole set of experiment objects, 74 | The experiments created depend on the selected Town. 75 | 76 | 77 | """ 78 | 79 | # We set the camera 80 | # This single RGB camera is used on every experiment 81 | 82 | camera = Camera('CameraRGB') 83 | camera.set(FOV=100) 84 | camera.set_image_size(800, 600) 85 | camera.set_position(2.0, 0.0, 1.4) 86 | camera.set_rotation(-15.0, 0, 0) 87 | 88 | if self._city_name == 'Town01': 89 | poses_tasks = self._poses_town01() 90 | vehicles_tasks = [0, 20, 100] 91 | pedestrians_tasks = [0, 50, 250] 92 | else: 93 | poses_tasks = self._poses_town02() 94 | vehicles_tasks = [0, 15, 70] 95 | pedestrians_tasks = [0, 50, 150] 96 | 97 | experiments_vector = [] 98 | 99 | for weather in self.weathers: 100 | 101 | for iteration in range(len(poses_tasks)): 102 | poses = poses_tasks[iteration] 103 | vehicles = vehicles_tasks[iteration] 104 | pedestrians = pedestrians_tasks[iteration] 105 | 106 | conditions = CarlaSettings() 107 | conditions.set( 108 | SendNonPlayerAgentsInfo=True, 109 | NumberOfVehicles=vehicles, 110 | NumberOfPedestrians=pedestrians, 111 | WeatherId=weather 112 | ) 113 | 114 | conditions.set(DisableTwoWheeledVehicles=True) 115 | # Add all the cameras that were set for this experiments 116 | 117 | conditions.add_sensor(camera) 118 | 119 | experiment = Experiment() 120 | experiment.set( 121 | Conditions=conditions, 122 | Poses=poses, 123 | Task=iteration, 124 | Repetitions=1 125 | ) 126 | experiments_vector.append(experiment) 127 | 128 | return experiments_vector 129 | -------------------------------------------------------------------------------- /carla08/driving_benchmark/results_printer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | 5 | 6 | def print_summary(metrics_summary, weathers, path): 7 | """ 8 | We plot the summary of the testing for the set selected weathers. 9 | 10 | We take the raw data and print the way it was described on CORL 2017 paper 11 | 12 | """ 13 | 14 | # Improve readability by adding a weather dictionary 15 | weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon', 16 | 6: 'Heavy Rain Noon', 8: 'Clear Sunset', 10: 'rainy after rain', 17 | 4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'} 18 | 19 | # First we write the entire dictionary on the benchmark folder. 20 | with open(os.path.join(path, 'metrics.json'), 'w') as fo: 21 | fo.write(json.dumps(metrics_summary)) 22 | 23 | # Second we plot the metrics that are already ready by averaging 24 | 25 | metrics_to_average = [ 26 | 'episodes_fully_completed', 27 | 'episodes_completion', 28 | 'percentage_off_road', 29 | 'percentage_green_lights' 30 | 31 | ] 32 | # We compute the number of episodes based on size of average completion 33 | number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1]) 34 | 35 | for metric in metrics_to_average: 36 | 37 | if metric == 'episodes_completion': 38 | print ("Average Percentage of Distance to Goal Travelled ") 39 | elif metric == 'percentage_off_road': 40 | print("Average Percentage of Distance to Percentage OffRoad") 41 | elif metric == 'percentage_green_lights': 42 | print("Average Percentage of Distance to Percentage Green Lights") 43 | else: 44 | print ("Percentage of Successful Episodes") 45 | 46 | print ("") 47 | values = metrics_summary[metric] 48 | print( " VALUES ") 49 | 50 | metric_sum_values = np.zeros(number_of_episodes) 51 | for weather, tasks in values.items(): 52 | if weather in set(weathers): 53 | print(' Weather: ', weather_name_dict[weather]) 54 | count = 0 55 | for t in tasks: 56 | # if isinstance(t, np.ndarray) or isinstance(t, list): 57 | if t == []: 58 | print(' Metric Not Computed') 59 | else: 60 | print(' Task:', count, ' -> ', float(sum(t)) / float(len(t))) 61 | metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float( 62 | len(weathers)) 63 | 64 | count += 1 65 | 66 | print (' Average Between Weathers') 67 | for i in range(len(metric_sum_values)): 68 | print(' Task ', i, ' -> ', metric_sum_values[i]) 69 | print ("") 70 | 71 | infraction_metrics = [ 72 | 'collision_pedestrians', 73 | 'collision_vehicles', 74 | 'collision_other', 75 | 'intersection_offroad', 76 | 'intersection_otherlane' 77 | 78 | ] 79 | 80 | # We need to collect the total number of kilometers for each task 81 | 82 | for metric in infraction_metrics: 83 | values_driven = metrics_summary['driven_kilometers'] 84 | values = metrics_summary[metric] 85 | metric_sum_values = np.zeros(number_of_episodes) 86 | summed_driven_kilometers = np.zeros(number_of_episodes) 87 | 88 | if metric == 'collision_pedestrians': 89 | print ('Avg. Kilometers driven before a collision to a PEDESTRIAN') 90 | elif metric == 'collision_vehicles': 91 | print('Avg. Kilometers driven before a collision to a VEHICLE') 92 | elif metric == 'collision_other': 93 | print('Avg. Kilometers driven before a collision to a STATIC OBSTACLE') 94 | elif metric == 'intersection_offroad': 95 | print('Avg. Kilometers driven before going OUTSIDE OF THE ROAD') 96 | else: 97 | print('Avg. Kilometers driven before invading the OPPOSITE LANE') 98 | 99 | # print (zip(values.items(), values_driven.items())) 100 | for items_metric, items_driven in zip(values.items(), values_driven.items()): 101 | weather = items_metric[0] 102 | tasks = items_metric[1] 103 | tasks_driven = items_driven[1] 104 | 105 | if weather in set(weathers): 106 | print(' Weather: ', weather_name_dict[weather]) 107 | count = 0 108 | for t, t_driven in zip(tasks, tasks_driven): 109 | # if isinstance(t, np.ndarray) or isinstance(t, list): 110 | if t == []: 111 | print('Metric Not Computed') 112 | else: 113 | if sum(t) > 0: 114 | print(' Task ', count, ' -> ', t_driven / float(sum(t))) 115 | else: 116 | print(' Task ', count, ' -> more than', t_driven) 117 | 118 | metric_sum_values[count] += float(sum(t)) 119 | summed_driven_kilometers[count] += t_driven 120 | 121 | count += 1 122 | print (' Average Between Weathers') 123 | for i in range(len(metric_sum_values)): 124 | if metric_sum_values[i] == 0: 125 | print(' Task ', i, ' -> more than ', summed_driven_kilometers[i]) 126 | else: 127 | print(' Task ', i, ' -> ', summed_driven_kilometers[i] / metric_sum_values[i]) 128 | print ("") 129 | 130 | print("") 131 | print("") 132 | -------------------------------------------------------------------------------- /carla08/planner/Town01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/planner/Town01.png -------------------------------------------------------------------------------- /carla08/planner/Town01.txt: -------------------------------------------------------------------------------- 1 | 0.0,0.0,-38.11000000 2 | 0.000000,0.000000,0.0 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 49, 41 6 | 0,0 0,40 40 7 | 0,40 0,0 40 8 | 48,40 41,40 7 9 | 41,40 48,40 7 10 | 48,0 48,40 40 11 | 48,40 48,0 40 12 | 0,0 11,0 11 13 | 11,0 0,0 11 14 | 41,0 48,0 7 15 | 48,0 41,0 7 16 | 41,40 11,40 30 17 | 11,40 41,40 30 18 | 41,0 41,7 7 19 | 41,7 41,0 7 20 | 11,40 0,40 11 21 | 0,40 11,40 11 22 | 11,0 19,0 8 23 | 19,0 11,0 8 24 | 11,40 11,24 16 25 | 11,24 11,40 16 26 | 41,24 41,40 16 27 | 41,40 41,24 16 28 | 11,24 11,16 8 29 | 11,16 11,24 8 30 | 41,24 11,24 30 31 | 11,24 41,24 30 32 | 41,16 41,24 8 33 | 41,24 41,16 8 34 | 11,16 11,7 9 35 | 11,7 11,16 9 36 | 41,16 11,16 30 37 | 11,16 41,16 30 38 | 41,7 41,16 9 39 | 41,16 41,7 9 40 | 11,7 11,0 7 41 | 11,0 11,7 7 42 | 41,7 19,7 22 43 | 19,7 41,7 22 44 | 19,0 41,0 22 45 | 41,0 19,0 22 46 | 19,7 11,7 8 47 | 11,7 19,7 8 48 | 19,0 19,7 7 49 | 19,7 19,0 7 50 | -------------------------------------------------------------------------------- /carla08/planner/Town01Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/planner/Town01Central.png -------------------------------------------------------------------------------- /carla08/planner/Town01Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/planner/Town01Lanes.png -------------------------------------------------------------------------------- /carla08/planner/Town02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/planner/Town02.png -------------------------------------------------------------------------------- /carla08/planner/Town02.txt: -------------------------------------------------------------------------------- 1 | 5.4400,-107.48000,-38.11000000 2 | 0.000000,0.000000,0.000000 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 25, 25 6 | 0,10 0,24 14 7 | 0,24 0,10 14 8 | 24,24 6,24 18 9 | 6,24 24,24 18 10 | 24,0 24,10 10 11 | 24,10 24,0 10 12 | 0,0 24,0 24 13 | 24,0 0,0 24 14 | 0,10 0,0 10 15 | 0,0 0,10 10 16 | 24,10 24,16 6 17 | 24,16 24,10 6 18 | 0,10 6,10 6 19 | 6,10 0,10 6 20 | 6,24 0,24 6 21 | 0,24 6,24 6 22 | 6,10 17,10 11 23 | 17,10 6,10 11 24 | 6,24 6,16 8 25 | 6,16 6,24 8 26 | 24,16 24,24 8 27 | 24,24 24,16 8 28 | 6,16 6,10 6 29 | 6,10 6,16 6 30 | 24,16 17,16 7 31 | 17,16 24,16 7 32 | 17,16 6,16 11 33 | 6,16 17,16 11 34 | 17,10 24,10 7 35 | 24,10 17,10 7 36 | 17,16 17,10 6 37 | 17,10 17,16 6 38 | -------------------------------------------------------------------------------- /carla08/planner/Town02Big.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/planner/Town02Big.png -------------------------------------------------------------------------------- /carla08/planner/Town02Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/planner/Town02Central.png -------------------------------------------------------------------------------- /carla08/planner/Town02Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/carla08/planner/Town02Lanes.png -------------------------------------------------------------------------------- /carla08/planner/__init__.py: -------------------------------------------------------------------------------- 1 | from .planner import Planner 2 | -------------------------------------------------------------------------------- /carla08/planner/bezier.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.misc import comb 3 | 4 | def bernstein_poly(i, n, t): 5 | """ 6 | The Bernstein polynomial of n, i as a function of t 7 | """ 8 | 9 | return comb(n, i) * ( t**(n-i) ) * (1 - t)**i 10 | 11 | 12 | def bezier_curve(points, nTimes=1000): 13 | """ 14 | Given a set of control points, return the 15 | bezier curve defined by the control points. 16 | 17 | points should be a list of lists, or list of tuples 18 | such as [ [1,1], 19 | [2,3], 20 | [4,5], ..[Xn, Yn] ] 21 | nTimes is the number of time steps, defaults to 1000 22 | 23 | See http://processingjs.nihongoresources.com/bezierinfo/ 24 | """ 25 | 26 | nPoints = len(points) 27 | xPoints = np.array([p[0] for p in points]) 28 | yPoints = np.array([p[1] for p in points]) 29 | 30 | t = np.linspace(0.0, 1.0, nTimes) 31 | 32 | polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ]) 33 | 34 | xvals = np.dot(xPoints, polynomial_array) 35 | yvals = np.dot(yPoints, polynomial_array) 36 | 37 | return xvals, yvals 38 | -------------------------------------------------------------------------------- /carla08/planner/graph.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | 11 | def string_to_node(string): 12 | vec = string.split(',') 13 | return (int(vec[0]), int(vec[1])) 14 | 15 | 16 | def string_to_floats(string): 17 | vec = string.split(',') 18 | return (float(vec[0]), float(vec[1]), float(vec[2])) 19 | 20 | 21 | def sldist(c1, c2): 22 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 23 | 24 | 25 | def sldist3(c1, c2): 26 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) 27 | ** 2 + (c2[2] - c1[2]) ** 2) 28 | 29 | 30 | class Graph(object): 31 | """ 32 | A simple directed, weighted graph 33 | """ 34 | 35 | def __init__(self, graph_file=None, node_density=50): 36 | 37 | self._nodes = set() 38 | self._angles = {} 39 | self._edges = {} 40 | self._distances = {} 41 | self._node_density = node_density 42 | 43 | if graph_file is not None: 44 | with open(graph_file, 'r') as f: 45 | # Skipe the first four lines that 46 | lines_after_4 = f.readlines()[4:] 47 | 48 | # the graph resolution. 49 | linegraphres = lines_after_4[0] 50 | self._resolution = string_to_node(linegraphres) 51 | for line in lines_after_4[1:]: 52 | 53 | from_node, to_node, d = line.split() 54 | from_node = string_to_node(from_node) 55 | to_node = string_to_node(to_node) 56 | 57 | if from_node not in self._nodes: 58 | self.add_node(from_node) 59 | if to_node not in self._nodes: 60 | self.add_node(to_node) 61 | 62 | self._edges.setdefault(from_node, []) 63 | self._edges[from_node].append(to_node) 64 | self._distances[(from_node, to_node)] = float(d) 65 | 66 | def add_node(self, value): 67 | self._nodes.add(value) 68 | 69 | def make_orientations(self, node, heading): 70 | 71 | import collections 72 | distance_dic = {} 73 | for node_iter in self._nodes: 74 | if node_iter != node: 75 | distance_dic[sldist(node, node_iter)] = node_iter 76 | 77 | distance_dic = collections.OrderedDict( 78 | sorted(distance_dic.items())) 79 | 80 | self._angles[node] = heading 81 | for _, v in distance_dic.items(): 82 | start_to_goal = np.array([node[0] - v[0], node[1] - v[1]]) 83 | 84 | print(start_to_goal) 85 | 86 | self._angles[v] = start_to_goal / np.linalg.norm(start_to_goal) 87 | 88 | def add_edge(self, from_node, to_node, distance): 89 | self._add_edge(from_node, to_node, distance) 90 | 91 | def _add_edge(self, from_node, to_node, distance): 92 | self._edges.setdefault(from_node, []) 93 | self._edges[from_node].append(to_node) 94 | self._distances[(from_node, to_node)] = distance 95 | 96 | def get_resolution(self): 97 | return self._resolution 98 | def get_edges(self): 99 | return self._edges 100 | 101 | def intersection_nodes(self): 102 | 103 | intersect_nodes = [] 104 | for node in self._nodes: 105 | if len(self._edges[node]) > 2: 106 | intersect_nodes.append(node) 107 | 108 | return intersect_nodes 109 | 110 | def curve_nodes(self): 111 | 112 | intersect_nodes = [] 113 | for node in self._nodes: 114 | if len(self._edges[node]) > 1: 115 | intersect_nodes.append(node) 116 | 117 | return intersect_nodes 118 | 119 | # This contains also the non-intersection turns... 120 | 121 | def turn_nodes(self): 122 | 123 | return self._nodes 124 | 125 | def plot_ori(self, c): 126 | from matplotlib import collections as mc 127 | 128 | import matplotlib.pyplot as plt 129 | line_len = 1 130 | 131 | lines = [[(p[0], p[1]), (p[0] + line_len * self._angles[p][0], 132 | p[1] + line_len * self._angles[p][1])] for p in self._nodes] 133 | lc = mc.LineCollection(lines, linewidth=2, color='green') 134 | _, ax = plt.subplots() 135 | ax.add_collection(lc) 136 | 137 | ax.autoscale() 138 | ax.margins(0.1) 139 | 140 | xs = [p[0] for p in self._nodes] 141 | ys = [p[1] for p in self._nodes] 142 | 143 | plt.scatter(xs, ys, color=c) 144 | 145 | def plot(self, c): 146 | import matplotlib.pyplot as plt 147 | xs = [p[0] for p in self._nodes] 148 | ys = [p[1] for p in self._nodes] 149 | 150 | plt.scatter(xs, ys, color=c) 151 | -------------------------------------------------------------------------------- /carla08/planner/grid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import copy 8 | import numpy as np 9 | 10 | 11 | def angle_between(v1, v2): 12 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 13 | 14 | 15 | class Grid(object): 16 | 17 | def __init__(self, graph): 18 | 19 | self._graph = graph 20 | self._structure = self._make_structure() 21 | self._walls = self._make_walls() 22 | 23 | def search_on_grid(self, x, y): 24 | visit = [[0, 1], [0, -1], [1, 0], [-1, 0], 25 | [1, -1], [1, 1], [-1, 1], [-1, -1]] 26 | c_x, c_y = x, y 27 | scale = 1 28 | 29 | 30 | while self._structure[c_x, c_y] != 0: 31 | for offset in visit: 32 | c_x, c_y = x + offset[0] * scale, y + offset[1] * scale 33 | 34 | if c_x >= 0 and c_x < self._graph.get_resolution()[ 35 | 0] and c_y >= 0 and c_y < self._graph.get_resolution()[1]: 36 | if self._structure[c_x, c_y] == 0: 37 | break 38 | else: 39 | c_x, c_y = x, y 40 | scale += 1 41 | 42 | return c_x, c_y 43 | def get_walls(self): 44 | return self._walls 45 | 46 | def get_wall_source(self, pos, pos_ori, target): 47 | 48 | free_nodes = self.get_adjacent_free_nodes(pos) 49 | # print self._walls 50 | final_walls = copy.copy(self._walls) 51 | # print final_walls 52 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 53 | for adj in free_nodes: 54 | 55 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 56 | angle = angle_between(heading_start, start_to_goal) 57 | if (angle > 1.6 and adj != target): 58 | #print ("added source ", (adj[0], adj[1])) 59 | final_walls.add((adj[0], adj[1])) 60 | 61 | return final_walls 62 | 63 | def get_wall_target(self, pos, pos_ori, source): 64 | 65 | free_nodes = self.get_adjacent_free_nodes(pos) 66 | final_walls = copy.copy(self._walls) 67 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 68 | for adj in free_nodes: 69 | 70 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 71 | angle = angle_between(heading_start, start_to_goal) 72 | 73 | if (angle < 1.0 and adj != source): 74 | #print("added target ", (adj[0], adj[1])) 75 | final_walls.add((adj[0], adj[1])) 76 | 77 | return final_walls 78 | 79 | def _draw_line(self, grid, xi, yi, xf, yf): 80 | 81 | if xf < xi: 82 | aux = xi 83 | xi = xf 84 | xf = aux 85 | 86 | if yf < yi: 87 | aux = yi 88 | yi = yf 89 | yf = aux 90 | 91 | for i in range(xi, xf + 1): 92 | 93 | for j in range(yi, yf + 1): 94 | grid[i, j] = 0.0 95 | 96 | return grid 97 | 98 | def _make_structure(self): 99 | structure = np.ones( 100 | (self._graph.get_resolution()[0], 101 | self._graph.get_resolution()[1])) 102 | 103 | for key, connections in self._graph.get_edges().items(): 104 | 105 | # draw a line 106 | for con in connections: 107 | # print key[0],key[1],con[0],con[1] 108 | structure = self._draw_line( 109 | structure, key[0], key[1], con[0], con[1]) 110 | # print grid 111 | return structure 112 | 113 | def _make_walls(self): 114 | walls = set() 115 | 116 | for i in range(self._structure.shape[0]): 117 | 118 | for j in range(self._structure.shape[1]): 119 | if self._structure[i, j] == 1.0: 120 | walls.add((i, j)) 121 | 122 | return walls 123 | 124 | def get_adjacent_free_nodes(self, pos): 125 | """ Eight nodes in total """ 126 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 127 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 128 | 129 | adjacent = set() 130 | for offset in visit: 131 | node = (pos[0] + offset[0], pos[1] + offset[1]) 132 | 133 | if (node[0] >= 0 and node[0] < self._graph.get_resolution()[0] 134 | and node[1] >= 0 and node[1] < self._graph.get_resolution()[1]): 135 | 136 | if self._structure[node[0], node[1]] == 0.0: 137 | adjacent.add(node) 138 | 139 | return adjacent 140 | -------------------------------------------------------------------------------- /carla08/settings.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA Settings""" 8 | 9 | import io 10 | import random 11 | import sys 12 | 13 | 14 | if sys.version_info >= (3, 0): 15 | 16 | from configparser import ConfigParser 17 | 18 | else: 19 | 20 | from ConfigParser import RawConfigParser as ConfigParser 21 | 22 | 23 | from . import sensor as carla_sensor 24 | 25 | 26 | MAX_NUMBER_OF_WEATHER_IDS = 14 27 | 28 | 29 | class CarlaSettings(object): 30 | """ 31 | The CarlaSettings object controls the settings of an episode. The __str__ 32 | method retrieves an str with a CarlaSettings.ini file contents. 33 | """ 34 | 35 | def __init__(self, **kwargs): 36 | # [CARLA/Server] 37 | self.SynchronousMode = True 38 | self.SendNonPlayerAgentsInfo = False 39 | # [CARLA/QualitySettings] 40 | self.QualityLevel = 'Epic' 41 | # [CARLA/LevelSettings] 42 | self.PlayerVehicle = None 43 | self.NumberOfVehicles = 20 44 | self.NumberOfPedestrians = 30 45 | self.WeatherId = 1 46 | self.SeedVehicles = None 47 | self.SeedPedestrians = None 48 | self.DisableTwoWheeledVehicles = False 49 | self.set(**kwargs) 50 | self._sensors = [] 51 | 52 | def set(self, **kwargs): 53 | for key, value in kwargs.items(): 54 | if not hasattr(self, key): 55 | raise ValueError('CarlaSettings: no key named %r' % key) 56 | setattr(self, key, value) 57 | 58 | def randomize_seeds(self): 59 | """ 60 | Randomize the seeds of the new episode's pseudo-random number 61 | generators. 62 | """ 63 | self.SeedVehicles = random.getrandbits(16) 64 | self.SeedPedestrians = random.getrandbits(16) 65 | 66 | def randomize_weather(self): 67 | """Randomized the WeatherId.""" 68 | self.WeatherId = random.randint(0, MAX_NUMBER_OF_WEATHER_IDS) 69 | 70 | def add_sensor(self, sensor): 71 | """Add a sensor to the player vehicle (see sensor.py).""" 72 | if not isinstance(sensor, carla_sensor.Sensor): 73 | raise ValueError('Sensor not supported') 74 | self._sensors.append(sensor) 75 | 76 | def __str__(self): 77 | """Converts this object to an INI formatted string.""" 78 | ini = ConfigParser() 79 | ini.optionxform = str 80 | S_SERVER = 'CARLA/Server' 81 | S_QUALITY = 'CARLA/QualitySettings' 82 | S_LEVEL = 'CARLA/LevelSettings' 83 | S_SENSOR = 'CARLA/Sensor' 84 | 85 | def get_attribs(obj): 86 | return [a for a in dir(obj) if not a.startswith('_') and not callable(getattr(obj, a))] 87 | 88 | def add_section(section, obj, keys): 89 | for key in keys: 90 | if hasattr(obj, key) and getattr(obj, key) is not None: 91 | if not ini.has_section(section): 92 | ini.add_section(section) 93 | ini.set(section, key, str(getattr(obj, key))) 94 | 95 | add_section(S_SERVER, self, [ 96 | 'SynchronousMode', 97 | 'SendNonPlayerAgentsInfo']) 98 | add_section(S_QUALITY, self, [ 99 | 'QualityLevel']) 100 | add_section(S_LEVEL, self, [ 101 | 'NumberOfVehicles', 102 | 'NumberOfPedestrians', 103 | 'WeatherId', 104 | 'SeedVehicles', 105 | 'SeedPedestrians', 106 | 'DisableTwoWheeledVehicles']) 107 | 108 | ini.add_section(S_SENSOR) 109 | ini.set(S_SENSOR, 'Sensors', ','.join(s.SensorName for s in self._sensors)) 110 | 111 | for sensor_def in self._sensors: 112 | section = S_SENSOR + '/' + sensor_def.SensorName 113 | add_section(section, sensor_def, get_attribs(sensor_def)) 114 | 115 | if sys.version_info >= (3, 0): 116 | text = io.StringIO() 117 | else: 118 | text = io.BytesIO() 119 | 120 | ini.write(text) 121 | return text.getvalue().replace(' = ', '=') 122 | -------------------------------------------------------------------------------- /carla08/tcp.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """Basic TCP client.""" 8 | 9 | import logging 10 | import socket 11 | import struct 12 | import time 13 | 14 | class TCPConnectionError(Exception): 15 | pass 16 | 17 | 18 | class TCPClient(object): 19 | """ 20 | Basic networking client for TCP connections. Errors occurred during 21 | networking operations are raised as TCPConnectionError. 22 | 23 | Received messages are expected to be prepended by a int32 defining the 24 | message size. Messages are sent following this convention. 25 | """ 26 | 27 | def __init__(self, host, port, timeout): 28 | self._host = host 29 | self._port = port 30 | self._timeout = timeout 31 | self._socket = None 32 | self._logprefix = '(%s:%s) ' % (self._host, self._port) 33 | 34 | def connect(self, connection_attempts=10): 35 | """Try to establish a connection to the given host:port.""" 36 | connection_attempts = max(1, connection_attempts) 37 | error = None 38 | for attempt in range(1, connection_attempts + 1): 39 | try: 40 | self._socket = socket.create_connection(address=(self._host, self._port), timeout=self._timeout) 41 | self._socket.settimeout(self._timeout) 42 | logging.debug('%sconnected', self._logprefix) 43 | return 44 | except socket.error as exception: 45 | error = exception 46 | logging.debug('%sconnection attempt %d: %s', self._logprefix, attempt, error) 47 | time.sleep(1) 48 | self._reraise_exception_as_tcp_error('failed to connect', error) 49 | 50 | def disconnect(self): 51 | """Disconnect any active connection.""" 52 | if self._socket is not None: 53 | logging.debug('%sdisconnecting', self._logprefix) 54 | self._socket.close() 55 | self._socket = None 56 | 57 | def connected(self): 58 | """Return whether there is an active connection.""" 59 | return self._socket is not None 60 | 61 | def write(self, message): 62 | """Send message to the server.""" 63 | if self._socket is None: 64 | raise TCPConnectionError(self._logprefix + 'not connected') 65 | header = struct.pack(' 0: 86 | try: 87 | data = self._socket.recv(length) 88 | except socket.error as exception: 89 | self._reraise_exception_as_tcp_error('failed to read data', exception) 90 | if not data: 91 | raise TCPConnectionError(self._logprefix + 'connection closed') 92 | buf += data 93 | length -= len(data) 94 | return buf 95 | 96 | def _reraise_exception_as_tcp_error(self, message, exception): 97 | raise TCPConnectionError('%s%s: %s' % (self._logprefix, message, exception)) 98 | -------------------------------------------------------------------------------- /carla08/transform.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB), and the INTEL Visual Computing Lab. 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | 9 | from collections import namedtuple 10 | 11 | try: 12 | import numpy 13 | except ImportError: 14 | raise RuntimeError( 15 | 'cannot import numpy, make sure numpy package is installed.') 16 | 17 | try: 18 | from . import carla_server_pb2 as carla_protocol 19 | except ImportError: 20 | raise RuntimeError('cannot import "carla_server_pb2.py", run ' 21 | 'the protobuf compiler to generate this file') 22 | 23 | 24 | Translation = namedtuple('Translation', 'x y z') 25 | Translation.__new__.__defaults__ = (0.0, 0.0, 0.0) 26 | 27 | Rotation = namedtuple('Rotation', 'pitch yaw roll') 28 | Rotation.__new__.__defaults__ = (0.0, 0.0, 0.0) 29 | 30 | Scale = namedtuple('Scale', 'x y z') 31 | Scale.__new__.__defaults__ = (1.0, 1.0, 1.0) 32 | 33 | 34 | class Transform(object): 35 | """A 3D transformation. 36 | 37 | The transformation is applied in the order: scale, rotation, translation. 38 | """ 39 | 40 | def __init__(self, *args, **kwargs): 41 | if 'matrix' in kwargs: 42 | self.matrix = kwargs['matrix'] 43 | return 44 | if isinstance(args[0], carla_protocol.Transform): 45 | args = [ 46 | Translation( 47 | args[0].location.x, 48 | args[0].location.y, 49 | args[0].location.z), 50 | Rotation( 51 | args[0].rotation.pitch, 52 | args[0].rotation.yaw, 53 | args[0].rotation.roll) 54 | ] 55 | self.matrix = numpy.matrix(numpy.identity(4)) 56 | self.set(*args, **kwargs) 57 | 58 | def set(self, *args): 59 | """Builds the transform matrix given a Translate, Rotation 60 | and Scale. 61 | """ 62 | translation = Translation() 63 | rotation = Rotation() 64 | scale = Scale() 65 | 66 | if len(args) > 3: 67 | raise ValueError("'Transform' accepts 3 values as maximum.") 68 | 69 | def get_single_obj_type(obj_type): 70 | """Returns the unique object contained in the 71 | arguments lists that is instance of 'obj_type'. 72 | """ 73 | obj = [x for x in args if isinstance(x, obj_type)] 74 | if len(obj) > 1: 75 | raise ValueError("Transform only accepts one instances of " + 76 | str(obj_type) + " as a parameter") 77 | elif not obj: 78 | # Create an instance of the type that is 'obj_type' 79 | return obj_type() 80 | return obj[0] 81 | 82 | translation = get_single_obj_type(Translation) 83 | rotation = get_single_obj_type(Rotation) 84 | scale = get_single_obj_type(Scale) 85 | 86 | for param in args: 87 | if not isinstance(param, Translation) and \ 88 | not isinstance(param, Rotation) and \ 89 | not isinstance(param, Scale): 90 | raise TypeError( 91 | "'" + str(type(param)) + "' type not match with \ 92 | 'Translation', 'Rotation' or 'Scale'") 93 | 94 | # Transformation matrix 95 | cy = math.cos(numpy.radians(rotation.yaw)) 96 | sy = math.sin(numpy.radians(rotation.yaw)) 97 | cr = math.cos(numpy.radians(rotation.roll)) 98 | sr = math.sin(numpy.radians(rotation.roll)) 99 | cp = math.cos(numpy.radians(rotation.pitch)) 100 | sp = math.sin(numpy.radians(rotation.pitch)) 101 | self.matrix[0, 3] = translation.x 102 | self.matrix[1, 3] = translation.y 103 | self.matrix[2, 3] = translation.z 104 | self.matrix[0, 0] = scale.x * (cp * cy) 105 | self.matrix[0, 1] = scale.y * (cy * sp * sr - sy * cr) 106 | self.matrix[0, 2] = -scale.z * (cy * sp * cr + sy * sr) 107 | self.matrix[1, 0] = scale.x * (sy * cp) 108 | self.matrix[1, 1] = scale.y * (sy * sp * sr + cy * cr) 109 | self.matrix[1, 2] = scale.z * (cy * sr - sy * sp * cr) 110 | self.matrix[2, 0] = scale.x * (sp) 111 | self.matrix[2, 1] = -scale.y * (cp * sr) 112 | self.matrix[2, 2] = scale.z * (cp * cr) 113 | 114 | def inverse(self): 115 | """Return the inverse transform.""" 116 | return Transform(matrix=numpy.linalg.inv(self.matrix)) 117 | 118 | def transform_points(self, points): 119 | """ 120 | Given a 4x4 transformation matrix, transform an array of 3D points. 121 | Expected point foramt: [[X0,Y0,Z0],..[Xn,Yn,Zn]] 122 | """ 123 | # Needed foramt: [[X0,..Xn],[Z0,..Zn],[Z0,..Zn]]. So let's transpose 124 | # the point matrix. 125 | points = points.transpose() 126 | # Add 0s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[0,..0]] 127 | points = numpy.append(points, numpy.ones((1, points.shape[1])), axis=0) 128 | # Point transformation 129 | points = self.matrix * points 130 | # Return all but last row 131 | return points[0:3].transpose() 132 | 133 | def __mul__(self, other): 134 | return Transform(matrix=numpy.dot(self.matrix, other.matrix)) 135 | 136 | def __str__(self): 137 | return str(self.matrix) 138 | -------------------------------------------------------------------------------- /carla08/util.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import datetime 8 | import sys 9 | 10 | from contextlib import contextmanager 11 | 12 | 13 | @contextmanager 14 | def make_connection(client_type, *args, **kwargs): 15 | """Context manager to create and connect a networking client object.""" 16 | client = None 17 | try: 18 | client = client_type(*args, **kwargs) 19 | client.connect() 20 | yield client 21 | finally: 22 | if client is not None: 23 | client.disconnect() 24 | 25 | 26 | class StopWatch(object): 27 | def __init__(self): 28 | self.start = datetime.datetime.now() 29 | self.end = None 30 | 31 | def restart(self): 32 | self.start = datetime.datetime.now() 33 | self.end = None 34 | 35 | def stop(self): 36 | self.end = datetime.datetime.now() 37 | 38 | def seconds(self): 39 | return (self.end - self.start).total_seconds() 40 | 41 | def milliseconds(self): 42 | return 1000.0 * self.seconds() 43 | 44 | 45 | def to_hex_str(header): 46 | return ':'.join('{:02x}'.format(ord(c)) for c in header) 47 | 48 | 49 | if sys.version_info >= (3, 3): 50 | 51 | import shutil 52 | 53 | def print_over_same_line(text): 54 | terminal_width = shutil.get_terminal_size((80, 20)).columns 55 | empty_space = max(0, terminal_width - len(text)) 56 | sys.stdout.write('\r' + text + empty_space * ' ') 57 | sys.stdout.flush() 58 | 59 | else: 60 | 61 | # Workaround for older Python versions. 62 | def print_over_same_line(text): 63 | line_length = max(print_over_same_line.last_line_length, len(text)) 64 | empty_space = max(0, line_length - len(text)) 65 | sys.stdout.write('\r' + text + empty_space * ' ') 66 | sys.stdout.flush() 67 | print_over_same_line.last_line_length = line_length 68 | print_over_same_line.last_line_length = 0 69 | -------------------------------------------------------------------------------- /coil_core/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .executer import execute_train, execute_validation, execute_drive, folder_execute -------------------------------------------------------------------------------- /coil_core/adamaio.py: -------------------------------------------------------------------------------- 1 | ''' 2 | All-In-One Adam Optimizer where several novelties are combined from following papers: 3 | 4 | Decoupled Weight Decay Regularization for Adam https://arxiv.org/abs/1711.05101 5 | 6 | Authors shown that the real reason why Momentum optimizer is often outperforming Adam in generalization was due to 7 | the fact that Adam does not perform well under L2 regularization and developed decoupled weight decay as a solution. 8 | 9 | Online Learning Rate Adaptation with Hypergradient Descent https://arxiv.org/abs/1703.04782 10 | 11 | This is enabled via "hypergrad" parameter by setting it to any value except zero. It enables the optimizer to update 12 | the learning-rate itself by the technique proposed in the paper, instead of giving an external schedule which would 13 | require lots of additional hyperparameters. It is especially useful when one doesn't want to hypertune a schedule. 14 | 15 | Closing the Generalization Gap of Adaptive Gradient Methods in Training Deep Neural Networks 16 | https://arxiv.org/abs/1711.05101 17 | 18 | This can be set by the "partial" parameter, which controls how likely the optimizer acts similar to Adam (1.0) and 19 | SGD (0.0), which is very useful if hypertuned. One can also update (decay) this parameter online to switch between 20 | Adam and SGD optimizers in an easy way, which has been recommended by previous research for a better generalization. 21 | ''' 22 | 23 | import math 24 | import torch 25 | from torch.optim.optimizer import Optimizer 26 | 27 | class AdamAIO(Optimizer): 28 | def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=5e-3, weight_decay=5e-6, hypergrad=1e-7, partial=0.75): 29 | defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hypergrad=hypergrad, partial=partial) 30 | super().__init__(params, defaults) 31 | def step(self, closure=None): 32 | loss = None if closure is None else closure() 33 | for group in self.param_groups: 34 | for p in group['params']: 35 | if p.grad is None: continue 36 | grad = p.grad.data 37 | state = self.state[p] 38 | if len(state) == 0: 39 | state['step'] = 0 40 | state['exp_avg'] = torch.zeros_like(p.data) 41 | state['exp_avg_sq'] = torch.zeros_like(p.data) 42 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] 43 | beta1, beta2 = group['betas'] 44 | state['step'] += 1 45 | if group['hypergrad'] > 0 and state['step'] > 1: 46 | prev_bias_correction1 = 1 - beta1 ** (state['step'] - 1) 47 | prev_bias_correction2 = 1 - beta2 ** (state['step'] - 1) 48 | h = torch.dot(grad.view(-1), torch.div(exp_avg, exp_avg_sq.sqrt().add_(group['eps'])).view(-1)) * math.sqrt(prev_bias_correction2) / prev_bias_correction1 49 | group['lr'] += group['hypergrad'] * h 50 | exp_avg.mul_(beta1).add_(1 - beta1, grad) 51 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) 52 | denom = exp_avg_sq.sqrt().add_(group['eps']) 53 | bias_correction1 = 1 - beta1 ** state['step'] 54 | bias_correction2 = 1 - beta2 ** state['step'] 55 | step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 56 | if group['weight_decay'] != 0: 57 | decayed_weights = torch.mul(p.data, group['weight_decay']) 58 | p.data.addcdiv_(-step_size, exp_avg, denom**group['partial']) 59 | p.data.sub_(decayed_weights) 60 | else: 61 | p.data.addcdiv_(-step_size, exp_avg, denom**group['partial']) 62 | return loss -------------------------------------------------------------------------------- /coil_core/grad_cam.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script computes gradcam attentions maps for the images stored as per the standard training data format 3 | """ 4 | 5 | import os 6 | import time 7 | import sys 8 | import random 9 | import argparse 10 | 11 | import torch 12 | import traceback 13 | import dlib 14 | 15 | from configs import g_conf, set_type_of_process, merge_with_yaml 16 | from network import CoILModel 17 | from input import CoILDataset, Augmenter 18 | from logger import coil_logger 19 | from coilutils.checkpoint_schedule import get_latest_evaluated_checkpoint, is_next_checkpoint_ready,\ 20 | maximun_checkpoint_reach, get_next_checkpoint 21 | 22 | import matplotlib.pyplot as plt 23 | import cv2 24 | import numpy as np 25 | 26 | 27 | parser = argparse.ArgumentParser() 28 | 29 | parser.add_argument('--gpus', type=str, required=True, help='gpu id') 30 | parser.add_argument('--dataset_path', type=str, required=True, help='path to carla dataset') 31 | parser.add_argument('--preload_name', type=str, required=True, help='preload file name') 32 | parser.add_argument('--config', type=str, required=True, help='configuration file') 33 | parser.add_argument('--checkpoint', type=str, required=True, help='saved model checkpoint') 34 | parser.add_argument('--gradcam_path', type=str, required=True, help='path to save gradcam heatmap') 35 | parser.add_argument('--type', type=str, required=True, help='type of evaluation') 36 | 37 | args = parser.parse_args() 38 | 39 | merge_with_yaml(args.config) 40 | 41 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus 42 | 43 | augmenter = Augmenter(None) 44 | dataset = CoILDataset(args.dataset_path, transform=augmenter, preload_name=args.preload_name) 45 | 46 | dataloader = torch.utils.data.DataLoader(dataset, batch_size=g_conf.BATCH_SIZE, shuffle=False, 47 | num_workers=g_conf.NUMBER_OF_LOADING_WORKERS, pin_memory=True) 48 | 49 | model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION) 50 | model = model.cuda() 51 | 52 | checkpoint = torch.load(args.checkpoint) 53 | model.load_state_dict(checkpoint['state_dict']) 54 | 55 | model.eval() 56 | print (len(dataset)) 57 | 58 | save_dir = os.path.join(args.gradcam_path, args.type) 59 | if not os.path.isdir(save_dir): 60 | os.mkdir(save_dir) 61 | 62 | count = 0 63 | for data in dataloader: 64 | 65 | for i in range(g_conf.BATCH_SIZE): 66 | controls = data['directions'] 67 | output = model.forward_branch(torch.squeeze(data['rgb']).cuda(), 68 | dataset.extract_inputs(data).cuda(), 69 | controls) 70 | activations = model.get_perception_activations(torch.squeeze(data['rgb']).cuda())[4].detach() 71 | # gradcam results in the suppmat are computed using brake 72 | output[i,2].backward() # backprop from the steer (0), throttle (1) or brake (2) 73 | gradients = model.get_perception_gradients() 74 | pooled_gradients = torch.mean(torch.mean(torch.mean(gradients, 3), 2), 0) 75 | 76 | for j in range(512): # number of feature maps = 512 for conv4, 256 for conv3 77 | activations[:,j,:,:] *= pooled_gradients[j] 78 | 79 | 80 | heatmap = torch.mean(activations, dim=1).squeeze() 81 | heatmap = np.maximum(heatmap, 0) 82 | heatmap /= torch.max(heatmap) 83 | curr_heatmap = heatmap[i] 84 | curr_heatmap = curr_heatmap.cpu().numpy() 85 | 86 | img = data['rgb'][i].numpy().transpose(1, 2, 0) 87 | img = np.uint8(255*img) 88 | curr_heatmap = cv2.resize(curr_heatmap, (img.shape[1], img.shape[0])) 89 | curr_heatmap = np.uint8(255*curr_heatmap) 90 | curr_heatmap = cv2.applyColorMap(curr_heatmap, cv2.COLORMAP_JET) 91 | superimposed_img = np.uint8(curr_heatmap*0.4 + img) 92 | 93 | # plt.imshow(superimposed_img) 94 | # plt.show() 95 | cv2.imwrite(os.path.join(save_dir, 'img_%d.jpg'%count), superimposed_img) 96 | count += 1 97 | if count%100 == 0: 98 | print (count) 99 | 100 | # specify the number of images to be saved 101 | if count>=20000: 102 | break 103 | -------------------------------------------------------------------------------- /coilutils/__init__.py: -------------------------------------------------------------------------------- 1 | from .attribute_dict import AttributeDict 2 | 3 | #from .experiment_schedule import mount_experiment_heap, get_free_gpus, pop_half_gpu, pop_one_gpu -------------------------------------------------------------------------------- /coilutils/attribute_dict.py: -------------------------------------------------------------------------------- 1 | """A simple attribute dictionary used for representing configuration options.""" 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | 9 | class AttributeDict(dict): 10 | 11 | IMMUTABLE = '__immutable__' 12 | 13 | def __init__(self, *args, **kwargs): 14 | super(AttributeDict, self).__init__(*args, **kwargs) 15 | self.__dict__[AttributeDict.IMMUTABLE] = False 16 | 17 | def __getattr__(self, name): 18 | if name in self.__dict__: 19 | return self.__dict__[name] 20 | elif name in self: 21 | return self[name] 22 | else: 23 | raise AttributeError(name) 24 | 25 | def __setattr__(self, name, value): 26 | if not self.__dict__[AttributeDict.IMMUTABLE]: 27 | if name in self.__dict__: 28 | self.__dict__[name] = value 29 | else: 30 | self[name] = value 31 | else: 32 | raise AttributeError( 33 | 'Attempted to set "{}" to "{}", but AttributeDict is immutable'. 34 | format(name, value) 35 | ) 36 | 37 | def immutable(self, is_immutable): 38 | """Set immutability to is_immutable and recursively apply the setting 39 | to all nested AttributeDict. 40 | """ 41 | self.__dict__[AttributeDict.IMMUTABLE] = is_immutable 42 | # Recursively set immutable state 43 | for v in self.__dict__.values(): 44 | if isinstance(v, AttributeDict): 45 | v.immutable(is_immutable) 46 | for v in self.values(): 47 | if isinstance(v, AttributeDict): 48 | v.immutable(is_immutable) 49 | 50 | def is_immutable(self): 51 | return self.__dict__[AttributeDict.IMMUTABLE] 52 | 53 | 54 | def __repr__(self): 55 | return str(self.__dict__) -------------------------------------------------------------------------------- /coilutils/checking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import h5py 3 | import sys 4 | import numpy as np 5 | import numbers 6 | 7 | 8 | def _is_tensor_image(img): 9 | return torch.is_tensor(img) and img.ndimension() == 3 10 | 11 | def _is_numpy_image(img): 12 | return isinstance(img, np.ndarray) and (img.ndim in {2, 3}) 13 | 14 | def do_assert(condition, message="Assertion failed."): 15 | """ 16 | Function that behaves equally to an `assert` statement, but raises an 17 | Exception. 18 | 19 | This is added because `assert` statements are removed in optimized code. 20 | It replaces `assert` statements throughout the library that should be 21 | kept even in optimized code. 22 | 23 | Parameters 24 | ---------- 25 | condition : bool 26 | If False, an exception is raised. 27 | 28 | message : string, optional(default="Assertion failed.") 29 | Error message. 30 | 31 | """ 32 | if not condition: 33 | raise AssertionError(str(message)) 34 | 35 | def is_single_number(val): 36 | """ 37 | Checks whether a variable is a number, i.e. an integer or float. 38 | 39 | Parameters 40 | ---------- 41 | val : anything 42 | The variable to 43 | check. 44 | 45 | Returns 46 | ------- 47 | out : bool 48 | True if the variable is a number. Otherwise False. 49 | 50 | """ 51 | return isinstance(val, numbers.Integral) or isinstance(val, numbers.Real) 52 | 53 | 54 | def is_callable(val): 55 | """ 56 | Checks whether a variable is a callable, e.g. a function. 57 | 58 | Parameters 59 | ---------- 60 | val : anything 61 | The variable to 62 | check. 63 | 64 | Returns 65 | ------- 66 | out : bool 67 | True if the variable is a callable. Otherwise False. 68 | 69 | """ 70 | # python 3.x with x <= 2 does not support callable(), apparently 71 | if sys.version_info[0] == 3 and sys.version_info[1] <= 2: 72 | return hasattr(val, '__call__') 73 | else: 74 | return callable(val) 75 | 76 | # TODO Resource temporarily unavailable. 77 | 78 | def is_hdf5_prepared(filename): 79 | """ 80 | We add this checking to verify if the hdf5 file has all the necessary metadata needed for performing, 81 | our trainings. 82 | # TODO: I dont know the scope but maybe this can change depending on the system. BUt i want to keep this for 83 | CARLA 84 | 85 | """ 86 | 87 | data = h5py.File(filename, "r+") 88 | 89 | # Check if the number of metadata is correct, the current number is 28 90 | 91 | 92 | if len(data['metadata_targets']) < 28: 93 | return False 94 | if len(data['targets'][0]) < 28: 95 | return False 96 | 97 | 98 | # Check if the steering is fine 99 | if sum(data['targets'][0, :]) == 0.0: 100 | return False 101 | 102 | 103 | data.close() 104 | return True 105 | 106 | 107 | -------------------------------------------------------------------------------- /coilutils/checkpoint_schedule.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | from configs import g_conf 5 | from logger import monitorer 6 | 7 | from coilutils.general import sort_nicely 8 | 9 | 10 | def is_open(file_name): 11 | if os.path.exists(file_name): 12 | file1 = os.stat(file_name) # initial file size 13 | file1_size = file1.st_size 14 | 15 | # your script here that collects and writes data (increase file size) 16 | time.sleep(0.5) 17 | file2 = os.stat(file_name) # updated file size 18 | file2_size = file2.st_size 19 | comp = file2_size - file1_size # compares sizes 20 | if comp == 0: 21 | return False 22 | else: 23 | return True 24 | 25 | raise NameError 26 | 27 | 28 | 29 | def maximun_checkpoint_reach(iteration, checkpoint_schedule): 30 | if iteration is None: 31 | return False 32 | 33 | if iteration >= max(checkpoint_schedule): 34 | return True 35 | else: 36 | return False 37 | 38 | 39 | """ FUNCTIONS FOR SAVING THE CHECKPOINTS """ 40 | 41 | 42 | def is_ready_to_save(iteration): 43 | """ Returns if the iteration is a iteration for saving a checkpoint 44 | 45 | """ 46 | if iteration in set(g_conf.SAVE_SCHEDULE): 47 | return True 48 | else: 49 | return False 50 | 51 | def get_latest_saved_checkpoint(): 52 | """ 53 | Returns the , latest checkpoint number that was saved 54 | 55 | """ 56 | checkpoint_files = os.listdir(os.path.join('_logs', g_conf.EXPERIMENT_BATCH_NAME, 57 | g_conf.EXPERIMENT_NAME, 'checkpoints')) 58 | if checkpoint_files == []: 59 | return None 60 | else: 61 | sort_nicely(checkpoint_files) 62 | return checkpoint_files[-1] 63 | 64 | 65 | """ FUNCTIONS FOR GETTING THE CHECKPOINTS""" 66 | 67 | def get_latest_evaluated_checkpoint(filename=None): 68 | 69 | """ 70 | Get the latest checkpoint that was validated or tested. 71 | Args: 72 | """ 73 | 74 | return monitorer.get_latest_checkpoint(filename) 75 | 76 | 77 | def is_next_checkpoint_ready(checkpoint_schedule, control_filename=None): 78 | 79 | # IT needs 80 | ltst_check = get_latest_evaluated_checkpoint(control_filename) 81 | #ltst_check = None 82 | # This means that we got the last one, so we return false and go back to the loop 83 | if ltst_check == g_conf.TEST_SCHEDULE[-1]: 84 | return False 85 | if ltst_check is None: # This means no checkpoints were evaluated 86 | next_check = checkpoint_schedule[0] # Return the first one 87 | else: 88 | next_check = checkpoint_schedule[checkpoint_schedule.index(ltst_check)+1] 89 | 90 | # print ('list check: ', next_check) 91 | # Check if the file is in the checkpoints list. 92 | if os.path.exists(os.path.join('_logs', g_conf.EXPERIMENT_BATCH_NAME, 93 | g_conf.EXPERIMENT_NAME, 'checkpoints')): 94 | 95 | # test if the file exist: 96 | if str(next_check) + '.pth' in os.listdir(os.path.join('_logs', g_conf.EXPERIMENT_BATCH_NAME, 97 | g_conf.EXPERIMENT_NAME, 'checkpoints')): 98 | # now check if someone is writing to it, if it is the case return false 99 | return not is_open(os.path.join('_logs', g_conf.EXPERIMENT_BATCH_NAME, 100 | g_conf.EXPERIMENT_NAME, 'checkpoints', str(next_check) + '.pth')) 101 | 102 | else: 103 | return False 104 | else: 105 | # This mean the training part has not created the checkpoints yet. 106 | return False 107 | 108 | 109 | def get_next_checkpoint(checkpoint_schedule, filename=None): 110 | ltst_check = get_latest_evaluated_checkpoint(filename) 111 | #ltst_check = None 112 | if ltst_check is None: 113 | return checkpoint_schedule[0] 114 | 115 | if checkpoint_schedule.index(ltst_check) + 1 == len(checkpoint_schedule): 116 | raise RuntimeError("Not able to get next checkpoint, maximum checkpoint is reach") 117 | 118 | print(checkpoint_schedule.index(ltst_check)) 119 | print (ltst_check) 120 | return checkpoint_schedule[checkpoint_schedule.index(ltst_check) + 1] 121 | 122 | 123 | def check_loss_validation_stopped(checkpoint, validation_name): 124 | """ 125 | Check if validation has already found a point that the curve is not going down 126 | AND 127 | check if the training iteration is bigger than than the stale checkpoint 128 | 129 | """ 130 | 131 | stale_file_name = "validation_" + validation_name + "_stale.csv" 132 | full_path = os.path.join('_logs', g_conf.EXPERIMENT_BATCH_NAME, 133 | g_conf.EXPERIMENT_NAME, stale_file_name) 134 | 135 | if os.path.exists(full_path): 136 | with open(full_path, 'r') as f: 137 | # So if training ran more iterations more than the stale point of validation 138 | if checkpoint > int(f.read()): 139 | return True 140 | else: 141 | return False 142 | 143 | else: 144 | return False 145 | 146 | 147 | def validation_stale_point(validation_name): 148 | 149 | stale_file_name = "validation_" + validation_name + "_stale.csv" 150 | full_path = os.path.join('_logs', g_conf.EXPERIMENT_BATCH_NAME, 151 | g_conf.EXPERIMENT_NAME, stale_file_name) 152 | if os.path.exists(full_path): 153 | with open(full_path, 'r') as f: 154 | # Return the stale iteration of the validation 155 | return int(f.read()) 156 | 157 | else: 158 | return None 159 | -------------------------------------------------------------------------------- /coilutils/drive_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | def checkpoint_parse_configuration_file(filename): 4 | 5 | with open(filename, 'r') as f: 6 | configuration_dict = json.loads(f.read()) 7 | 8 | return configuration_dict['yaml'], configuration_dict['checkpoint'] -------------------------------------------------------------------------------- /configs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/configs/.DS_Store -------------------------------------------------------------------------------- /configs/__init__.py: -------------------------------------------------------------------------------- 1 | from .coil_global import g_conf, merge_with_yaml, set_type_of_process 2 | -------------------------------------------------------------------------------- /configs/namer.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | def get_dropout_sum(model_configuration): 4 | return (sum(model_configuration['branches']['fc']['dropouts']) + 5 | sum(model_configuration['speed_branch']['fc']['dropouts']) + 6 | sum(model_configuration['measurements']['fc']['dropouts'])+ 7 | sum(model_configuration['join']['fc']['dropouts'])+ 8 | sum(model_configuration['perception']['fc']['dropouts'])) 9 | 10 | 11 | # TODO: THIS FUNCTION IS REPEATED FROM MAIN 12 | def parse_split_configuration(configuration): 13 | 14 | """ 15 | Turns the configuration line of sliptting into a name and a set of params. 16 | 17 | """ 18 | if configuration is None: 19 | return "None", None 20 | print ('conf', configuration) 21 | conf_dict = collections.OrderedDict(configuration) 22 | 23 | name = 'split' 24 | for key in conf_dict.keys(): 25 | if key != 'weights': 26 | name += '_' 27 | name += key 28 | 29 | 30 | 31 | return name, conf_dict 32 | 33 | def generate_name(g_conf): 34 | # TODO: Make a cool name generator, maybe in another class 35 | """ 36 | 37 | The name generator is currently formed by the following parts 38 | Dataset_name. 39 | THe type of network used, got directly from the class. 40 | The regularization 41 | The strategy with respect to time 42 | The type of output 43 | The preprocessing made in the data 44 | The type of loss function 45 | The parts of data that where used. 46 | 47 | Take into account if the variable was not set, it set the default name, from the global conf 48 | 49 | 50 | 51 | Returns: 52 | a string containing the name 53 | 54 | 55 | """ 56 | 57 | 58 | 59 | final_name_string = "" 60 | # Addind dataset 61 | final_name_string += g_conf.TRAIN_DATASET_NAME 62 | # Model type 63 | final_name_string += '_' + g_conf.MODEL_TYPE 64 | # Model Size 65 | #TODO: for now is just saying the number of convs, add a layer counting 66 | if 'conv' in g_conf.MODEL_CONFIGURATION['perception']: 67 | final_name_string += '_' + str(len(g_conf.MODEL_CONFIGURATION['perception']['conv']['kernels'])) +'conv' 68 | else: # FOR NOW IT IS A RES MODEL 69 | final_name_string += '_' + str(g_conf.MODEL_CONFIGURATION['perception']['res']['name']) 70 | 71 | # Model Regularization 72 | # We start by checking if there is some kind of augmentation, and the schedule name. 73 | 74 | if 'conv' in g_conf.MODEL_CONFIGURATION['perception']: 75 | if g_conf.AUGMENTATION is not None and g_conf.AUGMENTATION != 'None': 76 | final_name_string += '_' + g_conf.AUGMENTATION 77 | else: 78 | # We check if there is dropout 79 | if get_dropout_sum(g_conf.MODEL_CONFIGURATION) > 4: 80 | final_name_string += '_highdropout' 81 | elif get_dropout_sum(g_conf.MODEL_CONFIGURATION) > 2: 82 | final_name_string += '_milddropout' 83 | elif get_dropout_sum(g_conf.MODEL_CONFIGURATION) > 0: 84 | final_name_string += '_lowdropout' 85 | else: 86 | final_name_string += '_none' 87 | 88 | 89 | # Temporal 90 | 91 | if g_conf.NUMBER_FRAMES_FUSION > 1 and g_conf.NUMBER_IMAGES_SEQUENCE > 1: 92 | final_name_string += '_lstm_fusion' 93 | elif g_conf.NUMBER_FRAMES_FUSION > 1: 94 | final_name_string += '_fusion' 95 | elif g_conf.NUMBER_IMAGES_SEQUENCE > 1: 96 | final_name_string += '_lstm' 97 | else: 98 | final_name_string += '_single' 99 | 100 | # THe type of output 101 | 102 | if 'waypoint1_angle' in set(g_conf.TARGETS): 103 | 104 | final_name_string += '_waypoints' 105 | else: 106 | final_name_string += '_control' 107 | 108 | # The pre processing ( Balance or not ) 109 | if g_conf.BALANCE_DATA and len(g_conf.STEERING_DIVISION) > 0: 110 | final_name_string += '_balancesteer' 111 | elif g_conf.BALANCE_DATA and g_conf.PEDESTRIAN_PERCENTAGE > 0: 112 | final_name_string += '_balancepedestrian' 113 | elif g_conf.BALANCE_DATA and len(g_conf.SPEED_DIVISION) > 0: 114 | final_name_string += '_balancespeed' 115 | else: 116 | final_name_string += '_random' 117 | 118 | 119 | # The type of loss function 120 | 121 | final_name_string += '_' + g_conf.LOSS_FUNCTION 122 | 123 | # the parts of the data that were used. 124 | 125 | if g_conf.USE_NOISE_DATA: 126 | final_name_string += '_noise_' 127 | else: 128 | final_name_string += '_' 129 | 130 | final_name_string += g_conf.DATA_USED 131 | 132 | final_name_string += '_' + str(g_conf.AUGMENT_LATERAL_STEERINGS) 133 | name_splitter, _ = parse_split_configuration(g_conf.SPLIT) 134 | final_name_string += '_' + name_splitter 135 | 136 | 137 | final_name_string += '_' + str(g_conf.NUMBER_OF_HOURS) + 'hours' 138 | 139 | 140 | if g_conf.USE_FULL_ORACLE: 141 | return 'ORACLE' 142 | 143 | return final_name_string -------------------------------------------------------------------------------- /dataset_configurations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/dataset_configurations/__init__.py -------------------------------------------------------------------------------- /drive/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/drive/.DS_Store -------------------------------------------------------------------------------- /drive/__init__.py: -------------------------------------------------------------------------------- 1 | from .coil_agent import CoILAgent 2 | -------------------------------------------------------------------------------- /drive/sample_agent.json: -------------------------------------------------------------------------------- 1 | { 2 | "yaml": "configs/baselines/resnet34imnet.yaml", 3 | "checkpoint": 180000 4 | } -------------------------------------------------------------------------------- /drive/suites/nocrash_new_town_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | from carla08.driving_benchmark.experiment import Experiment 12 | from carla08.sensor import Camera 13 | from carla08.settings import CarlaSettings 14 | from carla08.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite 15 | 16 | 17 | 18 | class NocrashNewTown(ExperimentSuite): 19 | 20 | def __init__(self): 21 | super(NocrashNewTown, self).__init__('Town02') 22 | 23 | @property 24 | def train_weathers(self): 25 | return [1, 3, 6, 8] 26 | 27 | @property 28 | def test_weathers(self): 29 | return [] 30 | 31 | @property 32 | def collision_as_failure(self): 33 | return True 34 | 35 | def calculate_time_out(self, path_distance): 36 | """ 37 | Function to return the timeout ,in milliseconds, 38 | that is calculated based on distance to goal. 39 | This is the same timeout as used on the CoRL paper. 40 | """ 41 | return ((path_distance / 1000.0) / 5.0) * 3600.0 + 20.0 42 | 43 | def _poses(self): 44 | 45 | def _poses_navigation(): 46 | return [[19, 66], [79, 14], [19, 57], [39, 53], [60, 26], 47 | [53, 76], [42, 13], [31, 71], [59, 35], [47, 16], 48 | [10, 61], [66, 3], [20, 79], [14, 56], [26, 69], 49 | [79, 19], [2, 29], [16, 14], [5, 57], [77, 68], 50 | [70, 73], [46, 67], [34, 77], [61, 49], [21, 12]] 51 | 52 | 53 | return [_poses_navigation()] 54 | 55 | 56 | 57 | def build_experiments(self): 58 | """ 59 | Creates the whole set of experiment objects, 60 | The experiments created depend on the selected Town. 61 | 62 | 63 | """ 64 | 65 | # We set the camera 66 | # This single RGB camera is used on every experiment 67 | 68 | camera = Camera('rgb') 69 | camera.set(FOV=100) 70 | camera.set_image_size(800, 600) 71 | camera.set_position(2.0, 0.0, 1.4) 72 | camera.set_rotation(-15.0, 0, 0) 73 | 74 | 75 | poses_tasks = self._poses() 76 | vehicles_tasks = [0, 15, 70] 77 | pedestrians_tasks = [0, 50, 150] 78 | 79 | vehicles_tasks = [70] 80 | pedestrians_tasks = [150] 81 | 82 | task_names = ['empty', 'normal', 'cluttered'] 83 | task_names = ['cuttered'] 84 | 85 | experiments_vector = [] 86 | 87 | for weather in self.weathers: 88 | 89 | for iteration in range(len(poses_tasks)): 90 | poses = poses_tasks[iteration] 91 | vehicles = vehicles_tasks[iteration] 92 | pedestrians = pedestrians_tasks[iteration] 93 | 94 | conditions = CarlaSettings() 95 | conditions.set( 96 | SendNonPlayerAgentsInfo=True, 97 | NumberOfVehicles=vehicles, 98 | NumberOfPedestrians=pedestrians, 99 | WeatherId=weather 100 | 101 | ) 102 | conditions.set(DisableTwoWheeledVehicles=True) 103 | # Add all the cameras that were set for this experiments 104 | 105 | conditions.add_sensor(camera) 106 | 107 | experiment = Experiment() 108 | experiment.set( 109 | Conditions=conditions, 110 | Poses=poses, 111 | Task=iteration, 112 | TaskName=task_names[iteration], 113 | Repetitions=1 114 | ) 115 | experiments_vector.append(experiment) 116 | 117 | return experiments_vector 118 | 119 | 120 | -------------------------------------------------------------------------------- /drive/suites/nocrash_new_weather_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | from carla08.driving_benchmark.experiment import Experiment 12 | from carla08.sensor import Camera 13 | from carla08.settings import CarlaSettings 14 | from carla08.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite 15 | 16 | class NocrashNewWeather(ExperimentSuite): 17 | 18 | def __init__(self): 19 | super(NocrashNewWeather, self).__init__('Town01') 20 | 21 | @property 22 | def train_weathers(self): 23 | return [2, 4, 5, 7, 9, 10, 11, 12, 13, 14] 24 | 25 | @property 26 | def test_weathers(self): 27 | return [] 28 | @property 29 | def collision_as_failure(self): 30 | return True 31 | 32 | def calculate_time_out(self, path_distance): 33 | """ 34 | Function to return the timeout ,in milliseconds, 35 | that is calculated based on distance to goal. 36 | This is the same timeout as used on the CoRL paper. 37 | """ 38 | return ((path_distance / 1000.0) / 5.0) * 3600.0 + 20.0 39 | 40 | def _poses(self): 41 | 42 | def _poses_navigation(): 43 | return[[105, 29], [27, 130], [102, 87], [132, 27], [25, 44], 44 | [4, 64], [34, 67], [54, 30], [140, 134], [105, 9], 45 | [148, 129], [65, 18], [21, 16], [147, 97], [134, 49], 46 | [30, 41], [81, 89], [69, 45], [102, 95], [18, 145], 47 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]] 48 | 49 | return [_poses_navigation()] 50 | 51 | def build_experiments(self): 52 | """ 53 | Creates the whole set of experiment objects, 54 | The experiments created depend on the selected Town. 55 | """ 56 | 57 | # We set the camera 58 | # This single RGB camera is used on every experiment 59 | 60 | camera = Camera('rgb') 61 | camera.set(FOV=100) 62 | camera.set_image_size(800, 600) 63 | camera.set_position(2.0, 0.0, 1.4) 64 | camera.set_rotation(-15.0, 0, 0) 65 | 66 | poses_tasks = self._poses() 67 | vehicles_tasks = [0, 20, 100] 68 | pedestrians_tasks = [0, 50, 250] 69 | 70 | vehicles_tasks = [100] 71 | pedestrians_tasks = [250] 72 | 73 | task_names = ['empty', 'normal', 'cluttered'] 74 | task_names = ['cluttered'] 75 | 76 | experiments_vector = [] 77 | 78 | for weather in self.weathers: 79 | 80 | for iteration in range(len(poses_tasks)): 81 | poses = poses_tasks[iteration] 82 | vehicles = vehicles_tasks[iteration] 83 | pedestrians = pedestrians_tasks[iteration] 84 | 85 | conditions = CarlaSettings() 86 | conditions.set( 87 | SendNonPlayerAgentsInfo=True, 88 | NumberOfVehicles=vehicles, 89 | NumberOfPedestrians=pedestrians, 90 | WeatherId=weather 91 | 92 | ) 93 | conditions.set(DisableTwoWheeledVehicles=True) 94 | # Add all the cameras that were set for this experiments 95 | 96 | conditions.add_sensor(camera) 97 | 98 | experiment = Experiment() 99 | experiment.set( 100 | Conditions=conditions, 101 | Poses=poses, 102 | Task=iteration, 103 | TaskName=task_names[iteration], 104 | Repetitions=1 105 | ) 106 | experiments_vector.append(experiment) 107 | 108 | return experiments_vector 109 | 110 | -------------------------------------------------------------------------------- /drive/suites/nocrash_new_weather_town_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | 12 | from carla08.driving_benchmark.experiment import Experiment 13 | from carla08.sensor import Camera 14 | from carla08.settings import CarlaSettings 15 | from carla08.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite 16 | 17 | 18 | 19 | class NocrashNewWeatherTown(ExperimentSuite): 20 | 21 | def __init__(self): 22 | super(NocrashNewWeatherTown, self).__init__('Town02') 23 | 24 | @property 25 | def train_weathers(self): 26 | return [] 27 | 28 | @property 29 | def test_weathers(self): 30 | return [2, 4, 5, 7, 9, 10, 11, 12, 13, 14] 31 | 32 | @property 33 | def collision_as_failure(self): 34 | return True 35 | 36 | 37 | def calculate_time_out(self, path_distance): 38 | """ 39 | Function to return the timeout ,in milliseconds, 40 | that is calculated based on distance to goal. 41 | This is the same timeout as used on the CoRL paper. 42 | """ 43 | return ((path_distance / 1000.0) / 5.0) * 3600.0 + 20.0 44 | 45 | def _poses(self): 46 | 47 | def _poses_navigation(): 48 | return [[19, 66], [79, 14], [19, 57], [39, 53], [60, 26], 49 | [53, 76], [42, 13], [31, 71], [59, 35], [47, 16], 50 | [10, 61], [66, 3], [20, 79], [14, 56], [26, 69], 51 | [79, 19], [2, 29], [16, 14], [5, 57], [77, 68], 52 | [70, 73], [46, 67], [34, 77], [61, 49], [21, 12]] 53 | 54 | return [_poses_navigation()] 55 | 56 | 57 | 58 | def build_experiments(self): 59 | """ 60 | Creates the whole set of experiment objects, 61 | The experiments created depend on the selected Town. 62 | 63 | 64 | """ 65 | 66 | # We set the camera 67 | # This single RGB camera is used on every experiment 68 | 69 | camera = Camera('rgb') 70 | camera.set(FOV=100) 71 | camera.set_image_size(800, 600) 72 | camera.set_position(2.0, 0.0, 1.4) 73 | camera.set_rotation(-15.0, 0, 0) 74 | 75 | 76 | poses_tasks = self._poses() 77 | vehicles_tasks = [0, 15, 70] 78 | pedestrians_tasks = [0, 50, 150] 79 | 80 | vehicles_tasks = [70] 81 | pedestrians_tasks = [150] 82 | 83 | task_names = ['empty', 'normal', 'cluttered'] 84 | task_names = ['cluttered'] 85 | 86 | experiments_vector = [] 87 | 88 | for weather in self.weathers: 89 | 90 | for iteration in range(len(poses_tasks)): 91 | poses = poses_tasks[iteration] 92 | vehicles = vehicles_tasks[iteration] 93 | pedestrians = pedestrians_tasks[iteration] 94 | 95 | conditions = CarlaSettings() 96 | conditions.set( 97 | SendNonPlayerAgentsInfo=True, 98 | NumberOfVehicles=vehicles, 99 | NumberOfPedestrians=pedestrians, 100 | WeatherId=weather 101 | 102 | ) 103 | conditions.set(DisableTwoWheeledVehicles=True) 104 | # Add all the cameras that were set for this experiments 105 | 106 | conditions.add_sensor(camera) 107 | 108 | experiment = Experiment() 109 | experiment.set( 110 | Conditions=conditions, 111 | Poses=poses, 112 | Task=iteration, 113 | TaskName=task_names[iteration], 114 | Repetitions=1 115 | ) 116 | experiments_vector.append(experiment) 117 | 118 | return experiments_vector 119 | 120 | 121 | 122 | -------------------------------------------------------------------------------- /drive/suites/nocrash_training_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | from carla08.driving_benchmark.experiment import Experiment 12 | from carla08.sensor import Camera 13 | from carla08.settings import CarlaSettings 14 | from carla08.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite 15 | 16 | 17 | 18 | class NocrashTraining(ExperimentSuite): 19 | 20 | def __init__(self): 21 | super(NocrashTraining, self).__init__('Town01') 22 | 23 | @property 24 | def train_weathers(self): 25 | return [1, 3, 6, 8] 26 | 27 | @property 28 | def test_weathers(self): 29 | return [] 30 | @property 31 | def collision_as_failure(self): 32 | return True 33 | 34 | 35 | def calculate_time_out(self, path_distance): 36 | """ 37 | Function to return the timeout ,in milliseconds, 38 | that is calculated based on distance to goal. 39 | This is the same timeout as used on the CoRL paper. 40 | """ 41 | return ((path_distance / 1000.0) / 5.0) * 3600.0 + 20.0 42 | 43 | def _poses(self): 44 | 45 | def _poses_navigation(): 46 | return [[105, 29], [27, 130], [102, 87], [132, 27], [25, 44], 47 | [4, 64], [34, 67], [54, 30], [140, 134], [105, 9], 48 | [148, 129], [65, 18], [21, 16], [147, 97], [134, 49], 49 | [30, 41], [81, 89], [69, 45], [102, 95], [18, 145], 50 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]] 51 | 52 | #return [_poses_navigation(), 53 | # _poses_navigation(), 54 | # _poses_navigation() 55 | # ] 56 | return [_poses_navigation()] 57 | 58 | def build_experiments(self): 59 | """ 60 | Creates the whole set of experiment objects, 61 | The experiments created depend on the selected Town. 62 | """ 63 | 64 | # We set the camera 65 | # This single RGB camera is used on every experiment 66 | 67 | camera = Camera('rgb') 68 | camera.set(FOV=100) 69 | camera.set_image_size(800, 600) 70 | camera.set_position(2.0, 0.0, 1.4) 71 | camera.set_rotation(-15.0, 0, 0) 72 | 73 | poses_tasks = self._poses() 74 | vehicles_tasks = [0, 20, 100] 75 | pedestrians_tasks = [0, 50, 250] 76 | 77 | vehicles_tasks = [100] 78 | pedestrians_tasks = [250] 79 | 80 | task_names = ['empty', 'normal', 'cluttered'] 81 | task_names = ['cluttered'] 82 | 83 | experiments_vector = [] 84 | 85 | for weather in self.weathers: 86 | 87 | for iteration in range(len(poses_tasks)): 88 | poses = poses_tasks[iteration] 89 | vehicles = vehicles_tasks[iteration] 90 | pedestrians = pedestrians_tasks[iteration] 91 | 92 | conditions = CarlaSettings() 93 | conditions.set( 94 | SendNonPlayerAgentsInfo=True, 95 | NumberOfVehicles=vehicles, 96 | NumberOfPedestrians=pedestrians, 97 | WeatherId=weather 98 | 99 | ) 100 | conditions.set(DisableTwoWheeledVehicles=True) 101 | # Add all the cameras that were set for this experiments 102 | 103 | conditions.add_sensor(camera) 104 | 105 | experiment = Experiment() 106 | experiment.set( 107 | Conditions=conditions, 108 | Poses=poses, 109 | Task=iteration, 110 | TaskName=task_names[iteration], 111 | Repetitions=1 112 | ) 113 | experiments_vector.append(experiment) 114 | 115 | return experiments_vector 116 | 117 | -------------------------------------------------------------------------------- /input/__init__.py: -------------------------------------------------------------------------------- 1 | from .coil_dataset import CoILDataset 2 | from .coil_sampler import BatchSequenceSampler, RandomSampler, PreSplittedSampler 3 | from .augmenter import Augmenter 4 | from .splitter import select_balancing_strategy -------------------------------------------------------------------------------- /input/augmenter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import input.scheduler 3 | 4 | 5 | class Augmenter(object): 6 | """ 7 | This class serve as a wrapper to apply augmentations from IMGAUG in CPU mode in 8 | the same way augmentations are applied when using the transform library from pytorch 9 | 10 | """ 11 | # Here besides just applying the list, the class should also apply the scheduling 12 | 13 | def __init__(self, scheduler_strategy): 14 | if scheduler_strategy is not None and scheduler_strategy != 'None': 15 | self.scheduler = getattr(input.scheduler, scheduler_strategy) 16 | else: 17 | self.scheduler = None 18 | 19 | def __call__(self, iteration, img, **kwargs): 20 | #TODO: Check this format issue 21 | 22 | # THe scheduler receives an iteration number and returns a transformation, vec 23 | 24 | #print (img.shape) 25 | if self.scheduler is not None: 26 | # print ('in augmenter') 27 | t = self.scheduler(iteration) 28 | #print (t) 29 | img = t.augment_image(img) 30 | 31 | img = np.swapaxes(img, 0, 2) 32 | img = np.swapaxes(img, 1, 2) 33 | 34 | return img 35 | 36 | def __repr__(self): 37 | format_string = self.__class__.__name__ + '(' 38 | for t in self.scheduler: 39 | format_string += '\n' 40 | format_string += ' {0}'.format(t) 41 | format_string += '\n)' 42 | return format_string -------------------------------------------------------------------------------- /input/data_parser.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import json 4 | import numpy as np 5 | """ 6 | Module used to check attributes existent on data before incorporating them 7 | to the coil dataset 8 | """ 9 | 10 | 11 | def orientation_vector(measurement_data): 12 | pitch = np.deg2rad(measurement_data['rotation_pitch']) 13 | yaw = np.deg2rad(measurement_data['rotation_yaw']) 14 | orientation = np.array([np.cos(pitch)*np.cos(yaw), np.cos(pitch)*np.sin(yaw), np.sin(pitch)]) 15 | return orientation 16 | 17 | 18 | def forward_speed(measurement_data): 19 | vel_np = np.array([measurement_data['velocity_x'], measurement_data['velocity_y'], 20 | measurement_data['velocity_z']]) 21 | speed = np.dot(vel_np, orientation_vector(measurement_data)) 22 | 23 | return speed 24 | 25 | 26 | def get_speed(measurement_data): 27 | """ Extract the proper speed from the measurement data dict """ 28 | 29 | # If the forward speed is not on the dataset it is because speed is zero. 30 | if 'playerMeasurements' in measurement_data and \ 31 | 'forwardSpeed' in measurement_data['playerMeasurements']: 32 | return measurement_data['playerMeasurements']['forwardSpeed'] 33 | elif 'velocity_x' in measurement_data: # We have a 0.9.X data here 34 | return forward_speed(measurement_data) 35 | else: # There is no speed key, probably speed is zero. 36 | return 0 37 | 38 | 39 | def check_available_measurements(episode): 40 | """ Try to automatically check the measurements 41 | The ones named 'steer' are probably the steer for the vehicle 42 | This needs to be made more general to avoid possible mistakes on dataset reading 43 | """ 44 | 45 | measurements_list = glob.glob(os.path.join(episode, 'measurement*')) 46 | # Open a sample measurement 47 | with open(measurements_list[0]) as f: 48 | measurement_data = json.load(f) 49 | 50 | available_measurements = {} 51 | for meas_name in measurement_data.keys(): 52 | 53 | # Add steer 54 | if 'steer' in meas_name and 'noise' not in meas_name: 55 | available_measurements.update({'steer': meas_name}) 56 | 57 | # Add Throttle 58 | if 'throttle' in meas_name and 'noise' not in meas_name: 59 | available_measurements.update({'throttle': meas_name}) 60 | 61 | # Add brake ( Not hand brake) 62 | if 'brake' in meas_name and 'noise' not in meas_name and 'hand' not in meas_name: 63 | available_measurements.update({'brake': meas_name}) 64 | 65 | # add game time 66 | 67 | return available_measurements 68 | 69 | -------------------------------------------------------------------------------- /logger/__init__.py: -------------------------------------------------------------------------------- 1 | from .json_formatter import readJSONlog 2 | from . import tensorboard_logger 3 | -------------------------------------------------------------------------------- /logger/namer.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/logger/namer.py -------------------------------------------------------------------------------- /logger/tensorboard_logger.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514 4 | import tensorflow as tf 5 | import numpy as np 6 | import scipy.misc 7 | 8 | # for new versions 9 | from PIL import Image 10 | 11 | try: 12 | from StringIO import StringIO # Python 2.7 13 | except ImportError: 14 | from io import BytesIO # Python 3.x 15 | 16 | 17 | class Logger(object): 18 | 19 | def __init__(self, log_dir): 20 | """Create a summary writer logging to log_dir.""" 21 | #from datetime import datetime 22 | #now = datetime.now() 23 | #log_dir = log_dir + now.strftime("%Y%m%d-%H%M%S") 24 | self.writer = tf.summary.FileWriter(log_dir) 25 | 26 | def scalar_summary(self, tag, value, step): 27 | """Log a scalar variable.""" 28 | summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]) 29 | self.writer.add_summary(summary, step) 30 | 31 | def image_summary(self, tag, images, step): 32 | """Log a list of images.""" 33 | 34 | img_summaries = [] 35 | for i, img in enumerate(images): 36 | # Write the image to a string 37 | try: 38 | s = StringIO() 39 | except: 40 | s = BytesIO() 41 | # scipy.misc.toimage(img).save(s, format="png") # depreciated in new version 42 | # below 3 lines modified to be compatible with PIL 43 | img = img.transpose(2, 1, 0) 44 | img = (img*255).astype(np.uint8) 45 | Image.fromarray(img).save(s, format='png') 46 | 47 | # Create an Image object 48 | img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), 49 | height=img.shape[0], 50 | width=img.shape[1]) 51 | # Create a Summary value 52 | img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum)) 53 | 54 | # Create and write Summary 55 | summary = tf.Summary(value=img_summaries) 56 | self.writer.add_summary(summary, step) 57 | 58 | def histo_summary(self, tag, values, step, bins=1000): 59 | """Log a histogram of the tensor of values.""" 60 | 61 | # Create a histogram using numpy 62 | counts, bin_edges = np.histogram(values, bins=bins) 63 | 64 | # Fill the fields of the histogram proto 65 | hist = tf.HistogramProto() 66 | hist.min = float(np.min(values)) 67 | hist.max = float(np.max(values)) 68 | hist.num = int(np.prod(values.shape)) 69 | hist.sum = float(np.sum(values)) 70 | hist.sum_squares = float(np.sum(values ** 2)) 71 | 72 | # Drop the start of the first bin 73 | bin_edges = bin_edges[1:] 74 | 75 | # Add bin edges and counts 76 | for edge in bin_edges: 77 | hist.bucket_limit.append(edge) 78 | for c in counts: 79 | hist.bucket.append(c) 80 | 81 | # Create and write Summary 82 | summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)]) 83 | self.writer.add_summary(summary, step) 84 | self.writer.flush() 85 | -------------------------------------------------------------------------------- /logger/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/logger/utils.py -------------------------------------------------------------------------------- /modules/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/modules/.DS_Store -------------------------------------------------------------------------------- /modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .screen_manager import ScreenManager 2 | -------------------------------------------------------------------------------- /modules/collision_checker.py: -------------------------------------------------------------------------------- 1 | 2 | class CollisionChecker(object): 3 | 4 | def __init__(self): 5 | 6 | self.first_iter = True 7 | # The parameters used for the case we want to detect collisions 8 | self._thresh_other = 400 9 | self._thresh_vehicle = 400 10 | self._thresh_pedestrian = 300 11 | self._previous_pedestrian_collision = 0 12 | self._previous_vehicle_collision = 0 13 | self._previous_other_collision = 0 14 | 15 | self._collision_time = -1 16 | self._count_collisions = 0 17 | 18 | 19 | 20 | 21 | def test_collision(self, player_measurements): 22 | """ 23 | test if there is any instant collision. 24 | 25 | """ 26 | collided = False 27 | 28 | if (player_measurements.collision_vehicles - self._previous_vehicle_collision) \ 29 | > self._thresh_vehicle: 30 | collided = True 31 | if (player_measurements.collision_pedestrians - self._previous_pedestrian_collision) \ 32 | > self._thresh_pedestrian: 33 | collided = True 34 | if (player_measurements.collision_other - self._previous_other_collision) \ 35 | > self._thresh_other: 36 | collided = True 37 | 38 | self._previous_pedestrian_collision = player_measurements.collision_pedestrians 39 | self._previous_vehicle_collision = player_measurements.collision_vehicles 40 | self._previous_other_collision = player_measurements.collision_other 41 | 42 | return collided 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /modules/data_writer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import shutil 4 | 5 | from google.protobuf.json_format import MessageToJson, MessageToDict 6 | 7 | 8 | def write_json_measurements(episode_path, data_point_id, measurements, control, control_noise, 9 | state, directions=None): 10 | 11 | with open(os.path.join(episode_path, 'measurements_' + data_point_id.zfill(5) + '.json'), 'w') as fo: 12 | 13 | jsonObj = MessageToDict(measurements) 14 | jsonObj.update(state) 15 | jsonObj.update({'steer': control.steer}) 16 | jsonObj.update({'throttle': control.throttle}) 17 | jsonObj.update({'brake': control.brake}) 18 | jsonObj.update({'hand_brake': control.hand_brake}) 19 | jsonObj.update({'reverse': control.reverse}) 20 | jsonObj.update({'steer_noise': control_noise.steer}) 21 | jsonObj.update({'throttle_noise': control_noise.throttle}) 22 | jsonObj.update({'brake_noise': control_noise.brake}) 23 | if directions is not None: 24 | jsonObj.update({'directions': directions}) 25 | 26 | fo.write(json.dumps(jsonObj, sort_keys=True, indent=4)) 27 | 28 | 29 | def write_sensor_data(episode_path, data_point_id, sensor_data, sensors_frequency): 30 | try: 31 | from PIL import Image as PImage 32 | except ImportError: 33 | raise RuntimeError( 34 | 'cannot import PIL, make sure pillow package is installed') 35 | 36 | for name, data in sensor_data.items(): 37 | if int(data_point_id) % int((1/sensors_frequency[name])) == 0: 38 | format = '.png' 39 | if 'RGB' in name: 40 | format = '.png' 41 | if 'Lidar' in name: 42 | format = '.ply' 43 | data.save_to_disk(os.path.join(episode_path, name + '_' + data_point_id.zfill(5)), format) 44 | 45 | 46 | def make_dataset_path(dataset_path): 47 | # print ('apth: ', dataset_path) 48 | if not os.path.exists(dataset_path): 49 | os.makedirs(dataset_path) 50 | 51 | 52 | def add_metadata(dataset_path, settings_module): 53 | with open(os.path.join(dataset_path, 'metadata.json'), 'w') as fo: 54 | jsonObj = {} 55 | jsonObj.update(settings_module.sensors_yaw) 56 | jsonObj.update({'fov': settings_module.FOV}) 57 | jsonObj.update({'width': settings_module.WINDOW_WIDTH}) 58 | jsonObj.update({'height': settings_module.WINDOW_HEIGHT}) 59 | jsonObj.update({'lateral_noise_percentage': settings_module.lat_noise_percent}) 60 | jsonObj.update({'longitudinal_noise_percentage': settings_module.long_noise_percent}) 61 | jsonObj.update({'car range': settings_module.NumberOfVehicles}) 62 | jsonObj.update({'pedestrian range': settings_module.NumberOfPedestrians}) 63 | jsonObj.update({'set_of_weathers': settings_module.set_of_weathers}) 64 | fo.write(json.dumps(jsonObj, sort_keys=True, indent=4)) 65 | 66 | def add_episode_metadata(dataset_path, episode_number, episode_aspects): 67 | 68 | if not os.path.exists(os.path.join(dataset_path, 'episode_' + episode_number)): 69 | os.mkdir(os.path.join(dataset_path, 'episode_' + episode_number)) 70 | 71 | with open(os.path.join(dataset_path, 'episode_' + episode_number, 'metadata.json'), 'w') as fo: 72 | 73 | jsonObj = {} 74 | jsonObj.update({'number_of_pedestrian': episode_aspects['number_of_pedestrians']}) 75 | jsonObj.update({'number_of_vehicles': episode_aspects['number_of_vehicles']}) 76 | jsonObj.update({'seeds_pedestrians': episode_aspects['seeds_pedestrians']}) 77 | jsonObj.update({'seeds_vehicles': episode_aspects['seeds_vehicles']}) 78 | jsonObj.update({'weather': episode_aspects['weather']}) 79 | jsonObj.update({'start_pose': episode_aspects['start_pose']}) 80 | jsonObj.update({'end_pose': episode_aspects['end_pose']}) 81 | fo.write(json.dumps(jsonObj, sort_keys=True, indent=4)) 82 | 83 | 84 | 85 | def add_data_point(measurements, control, control_noise, sensor_data, state, 86 | dataset_path, episode_number, data_point_id, sensors_frequency, directions=None): 87 | 88 | episode_path = os.path.join(dataset_path, 'episode_' + episode_number) 89 | if not os.path.exists(os.path.join(dataset_path, 'episode_' + episode_number)): 90 | os.mkdir(os.path.join(dataset_path, 'episode_' + episode_number)) 91 | write_sensor_data(episode_path, data_point_id, sensor_data, sensors_frequency) 92 | write_json_measurements(episode_path, data_point_id, measurements, control, control_noise, 93 | state, directions) 94 | 95 | # Delete an episode in the case 96 | def delete_episode(dataset_path, episode_number): 97 | 98 | shutil.rmtree(os.path.join(dataset_path, 'episode_' + episode_number)) -------------------------------------------------------------------------------- /multi_gpu_collection.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | import logging 4 | import subprocess 5 | import multiprocessing 6 | 7 | from carla.client import make_carla_client 8 | from carla.tcp import TCPConnectionError 9 | 10 | from collect import collect 11 | 12 | 13 | class Arguments(): 14 | def __init__(self, port, number_of_episodes, episode_number, path_name, data_configuration_name, gpu_id, town_name, container_name, mode): 15 | self.port = port 16 | self.gpu = gpu_id 17 | self.host = 'localhost' 18 | self.number_of_episodes = number_of_episodes 19 | self.episode_number = episode_number 20 | self.not_record = False 21 | self.debug = False 22 | self.verbose = True 23 | self.controlling_agent = 'CommandFollower' 24 | self.data_path = path_name 25 | self.data_configuration_name = data_configuration_name 26 | self.town_name = town_name 27 | self.container_name = container_name 28 | self.mode = mode 29 | 30 | 31 | def collect_loop(args): 32 | try: 33 | carla_process, out = open_carla(args.port, args.town_name, args.gpu, args.container_name) 34 | 35 | while True: 36 | try: 37 | with make_carla_client(args.host, args.port) as client: 38 | collect(client, args) 39 | break 40 | 41 | except TCPConnectionError as error: 42 | logging.error(error) 43 | time.sleep(1) 44 | 45 | # KILL CARLA TO AVOID ZOMBIES 46 | carla_process.kill() 47 | subprocess.call(['docker', 'stop', out[:-1]]) 48 | 49 | except KeyboardInterrupt: 50 | print ('Killed By User') 51 | carla_process.kill() 52 | subprocess.call(['docker', 'stop', out[:-1]]) 53 | 54 | except: 55 | carla_process.kill() 56 | subprocess.call(['docker', 'stop', out[:-1]]) 57 | 58 | def execute_collector(args): 59 | p = multiprocessing.Process(target=collect_loop, 60 | args=(args,)) 61 | p.start() 62 | 63 | 64 | # open a carla docker with the container_name 65 | def open_carla(port, town_name, gpu, container_name): 66 | sp = subprocess.Popen( 67 | ['docker', 'run', '--rm', '-d', '-p', 68 | str(port) + '-' + str(port + 2) + ':' + str(port) + '-' + str(port + 2), 69 | '--runtime=nvidia', '-e', 'NVIDIA_VISIBLE_DEVICES=' + str(gpu), container_name, 70 | '/bin/bash', 'CarlaUE4.sh', '/Game/Maps/' + town_name, '-windowed', 71 | '-benchmark', '-fps=10', '-world-port=' + str(port)], shell=False, 72 | stdout=subprocess.PIPE) 73 | 74 | (out, err) = sp.communicate() 75 | 76 | return sp, out 77 | 78 | 79 | if __name__ == '__main__': 80 | argparser = argparse.ArgumentParser(description='Release Data Collectors') 81 | 82 | argparser.add_argument('-ids','--ids_gpus', type=str, required=True, help='string containing the gpu ids') 83 | argparser.add_argument('-n', '--number_collectors', default=1, type=int, help='number of collectors used') 84 | argparser.add_argument('-e', '--number_episodes', default=200, type=int, help='number of episodes per collector used') 85 | argparser.add_argument('-g', '--carlas_per_gpu', default=3, type=int, help='number of gpus per carla') 86 | argparser.add_argument('-s', '--start_episode', default=0, type=int, help='first episode number') 87 | argparser.add_argument('-d', '--data_configuration_name', default='coil_training_dataset', type=str, help='config file in dataset_configurations') 88 | argparser.add_argument('-pt', '--data_path', type=str, required=True, help='path used to save the data') 89 | argparser.add_argument('-ct', '--container_name', default='carlagear', type=str, help='docker container used to collect data') 90 | argparser.add_argument('-t', '--town_name', default=1, type=int, help='town name (1/2)') 91 | argparser.add_argument('-m', '--mode', default='expert', type=str, help='data collection mode - expert/dagger/dart') 92 | 93 | args = argparser.parse_args() 94 | 95 | town_name = 'Town0' + str(args.town_name) 96 | # distribute collectors over the gpus 97 | for i in range(args.number_collectors): 98 | port = 10000 + i * 3 99 | gpu = (int(i / args.carlas_per_gpu)) 100 | gpu_id = args.ids_gpus[gpu % len(args.ids_gpus)] 101 | print ('using gpu id: ', gpu_id) 102 | collector_args = Arguments(port, args.number_episodes, 103 | args.start_episode + (args.number_episodes) * (i), 104 | args.data_path, 105 | args.data_configuration_name, 106 | gpu_id, 107 | town_name, 108 | args.container_name, 109 | args.mode) 110 | execute_collector(collector_args) 111 | -------------------------------------------------------------------------------- /network/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/network/.DS_Store -------------------------------------------------------------------------------- /network/__init__.py: -------------------------------------------------------------------------------- 1 | from .loss import Loss 2 | from .coil_model import CoILModel 3 | from .optimizer import adjust_learning_rate, adjust_learning_rate_auto, adjust_learning_rate_cosine_annealing -------------------------------------------------------------------------------- /network/coil_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple factory module that returns instances of possible modules 3 | 4 | """ 5 | 6 | from .models import CoILICRA 7 | 8 | 9 | def CoILModel(architecture_name, architecture_configuration): 10 | """ Factory function 11 | 12 | Note: It is defined with the first letter as uppercase even though is a function to contrast 13 | the actual use of this function that is making classes 14 | """ 15 | # TODO: this could be extended to some more arbitrary definition 16 | 17 | if architecture_name == 'coil-icra': 18 | 19 | return CoILICRA(architecture_configuration) 20 | 21 | else: 22 | 23 | raise ValueError(" Not found architecture name") -------------------------------------------------------------------------------- /network/loss.py: -------------------------------------------------------------------------------- 1 | from . import loss_functional as LF 2 | import torch 3 | 4 | 5 | def l1(params): 6 | return branched_loss(LF.l1_loss, params) 7 | 8 | 9 | def l2(params): 10 | return branched_loss(LF.l2_loss, params) 11 | 12 | 13 | def l1_attention(params): 14 | return branched_loss(LF.l1_attention_loss, params) 15 | 16 | 17 | def branched_loss(loss_function, params): 18 | 19 | """ 20 | Args 21 | loss_function: The loss functional that is actually computing the loss 22 | params: all the parameters, including 23 | branches: The tensor containing all the branches branches output from the network 24 | targets: The ground truth targets that the network should produce 25 | controls: the controls used for each point 26 | branches weights: the weigths that each branch will have on the loss function 27 | speed_gt: the ground truth speed for these data points 28 | variable_weights: The weights for each of the variables used 29 | 30 | For other losses it could contain more parameters 31 | 32 | Returns 33 | The computed loss function, but also a dictionary with plotable variables for tensorboard 34 | """ 35 | 36 | controls_mask = LF.compute_branches_masks(params['controls'], 37 | params['branches'][0].shape[1]) 38 | # Update the dictionary to add also the controls mask. 39 | params.update({'controls_mask': controls_mask}) 40 | 41 | # calculate loss for each branch with specific activation 42 | loss_branches_vec, plotable_params = loss_function(params) 43 | 44 | # Apply the variable weights 45 | # This is applied to all branches except the last one, that is the speed branch... 46 | # TODO This is hardcoded to have 4 branches not using speed. 47 | 48 | for i in range(4): 49 | loss_branches_vec[i] = loss_branches_vec[i][:, 0] * params['variable_weights']['Steer'] \ 50 | + loss_branches_vec[i][:, 1] * params['variable_weights']['Gas'] \ 51 | + loss_branches_vec[i][:, 2] * params['variable_weights']['Brake'] 52 | 53 | loss_function = loss_branches_vec[0] + loss_branches_vec[1] + loss_branches_vec[2] + \ 54 | loss_branches_vec[3] 55 | 56 | speed_loss = loss_branches_vec[4]/(params['branches'][0].shape[0]) 57 | 58 | return torch.sum(loss_function) / (params['branches'][0].shape[0])\ 59 | + torch.sum(speed_loss) / (params['branches'][0].shape[0]),\ 60 | plotable_params 61 | 62 | 63 | def Loss(loss_name): 64 | """ Factory function 65 | 66 | Note: It is defined with the first letter as uppercase even though is a function to contrast 67 | the actual use of this function that is making classes 68 | """ 69 | # TODO: this could be extended to some more arbitrary definition 70 | 71 | if loss_name == 'L1': 72 | 73 | return l1 74 | 75 | elif loss_name == 'L2': 76 | 77 | return l2 78 | 79 | else: 80 | raise ValueError(" Not found Loss name") 81 | 82 | 83 | -------------------------------------------------------------------------------- /network/models/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/network/models/.DS_Store -------------------------------------------------------------------------------- /network/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .coil_icra import CoILICRA -------------------------------------------------------------------------------- /network/models/building_blocks/__init__.py: -------------------------------------------------------------------------------- 1 | from .branching import Branching 2 | from .conv import Conv 3 | from .fc import FC 4 | from .join import Join 5 | -------------------------------------------------------------------------------- /network/models/building_blocks/branching.py: -------------------------------------------------------------------------------- 1 | from logger import coil_logger 2 | import torch.nn as nn 3 | 4 | 5 | class Branching(nn.Module): 6 | 7 | def __init__(self, branched_modules=None): 8 | """ 9 | 10 | Args: 11 | branch_config: A tuple containing number of branches and the output size. 12 | """ 13 | # TODO: Make an auto naming function for this. 14 | 15 | super(Branching, self).__init__() 16 | 17 | """ ---------------------- BRANCHING MODULE --------------------- """ 18 | if branched_modules is None: 19 | raise ValueError("No model provided after branching") 20 | 21 | self.branched_modules = nn.ModuleList(branched_modules) 22 | 23 | 24 | 25 | 26 | # TODO: iteration control should go inside the logger, somehow 27 | 28 | def forward(self, x): 29 | # get only the speeds from measurement labels 30 | 31 | 32 | 33 | # TODO: we could easily place this speed outside 34 | 35 | branches_outputs = [] 36 | for branch in self.branched_modules: 37 | branches_outputs.append(branch(x)) 38 | 39 | return branches_outputs 40 | 41 | 42 | def load_network(self, checkpoint): 43 | """ 44 | Load a network for a given model definition . 45 | 46 | Args: 47 | checkpoint: The checkpoint that the user wants to add . 48 | 49 | 50 | 51 | """ 52 | coil_logger.add_message('Loading', { 53 | "Model": {"Loaded checkpoint: " + str(checkpoint) } 54 | 55 | }) 56 | 57 | 58 | 59 | # TODO: implement 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /network/models/building_blocks/conv.py: -------------------------------------------------------------------------------- 1 | 2 | from logger import coil_logger 3 | import torch.nn as nn 4 | import torch.nn.init as init 5 | import torch 6 | import torch.nn.functional as F 7 | 8 | 9 | class Conv(nn.Module): 10 | 11 | def __init__(self, params=None, module_name='Default'): 12 | super(Conv, self).__init__() 13 | 14 | if params is None: 15 | raise ValueError("Creating a NULL fully connected block") 16 | if 'channels' not in params: 17 | raise ValueError(" Missing the channel sizes parameter ") 18 | if 'kernels' not in params: 19 | raise ValueError(" Missing the kernel sizes parameter ") 20 | if 'strides' not in params: 21 | raise ValueError(" Missing the strides parameter ") 22 | if 'dropouts' not in params: 23 | raise ValueError(" Missing the dropouts parameter ") 24 | if 'end_layer' not in params: 25 | raise ValueError(" Missing the end module parameter ") 26 | 27 | if len(params['dropouts']) != len(params['channels'])-1: 28 | raise ValueError("Dropouts should be from the len of channel_sizes minus 1") 29 | 30 | 31 | """" ------------------ IMAGE MODULE ---------------- """ 32 | # Conv2d(input channel, output channel, kernel size, stride), Xavier initialization and 0.1 bias initialization 33 | 34 | 35 | self.layers = [] 36 | 37 | for i in range(0, len(params['channels'])-1): 38 | conv = nn.Conv2d(in_channels=params['channels'][i], out_channels=params['channels'][i+1], 39 | kernel_size=params['kernels'][i], stride=params['strides'][i]) 40 | 41 | dropout = nn.Dropout2d(p=params['dropouts'][i]) 42 | relu = nn.ReLU(inplace=True) 43 | bn = nn.BatchNorm2d(params['channels'][i+1]) 44 | 45 | layer = nn.Sequential(*[conv, bn, dropout, relu]) 46 | 47 | self.layers.append(layer) 48 | 49 | self.layers = nn.Sequential(*self.layers) 50 | self.module_name = module_name 51 | 52 | 53 | def forward(self, x): 54 | 55 | """ Each conv is: conv + batch normalization + dropout + relu """ 56 | x = self.layers(x) 57 | 58 | x = x.view(-1, self.num_flat_features(x)) 59 | 60 | return x, self.layers 61 | 62 | 63 | def num_flat_features(self, x): 64 | size = x.size()[1:] # all dimensions except the batch dimension 65 | num_features = 1 66 | for s in size: 67 | num_features *= s 68 | return num_features 69 | 70 | 71 | def get_conv_output(self, shape): 72 | """ 73 | By inputing the shape of the input, simulate what is the ouputsize. 74 | """ 75 | 76 | bs = 1 77 | input = torch.autograd.Variable(torch.rand(bs, *shape)) 78 | output_feat, _ = self.forward(input) 79 | n_size = output_feat.data.view(bs, -1).size(1) 80 | return n_size 81 | 82 | -------------------------------------------------------------------------------- /network/models/building_blocks/fc.py: -------------------------------------------------------------------------------- 1 | 2 | from logger import coil_logger 3 | import torch.nn as nn 4 | import torch.nn.init as init 5 | import torch 6 | import torch.nn.functional as F 7 | 8 | 9 | class FC(nn.Module): 10 | 11 | def __init__(self, params=None, module_name='Default' 12 | ): 13 | # TODO: Make an auto naming function for this. 14 | 15 | super(FC, self).__init__() 16 | 17 | 18 | """" ---------------------- FC ----------------------- """ 19 | if params is None: 20 | raise ValueError("Creating a NULL fully connected block") 21 | if 'neurons' not in params: 22 | raise ValueError(" Missing the kernel sizes parameter ") 23 | if 'dropouts' not in params: 24 | raise ValueError(" Missing the dropouts parameter ") 25 | if 'end_layer' not in params: 26 | raise ValueError(" Missing the end module parameter ") 27 | 28 | if len(params['dropouts']) != len(params['neurons'])-1: 29 | raise ValueError("Dropouts should be from the len of kernels minus 1") 30 | 31 | 32 | self.layers = [] 33 | 34 | 35 | for i in range(0, len(params['neurons']) -1): 36 | 37 | fc = nn.Linear(params['neurons'][i], params['neurons'][i+1]) 38 | dropout = nn.Dropout2d(p=params['dropouts'][i]) 39 | relu = nn.ReLU(inplace=True) 40 | 41 | if i == len(params['neurons'])-2 and params['end_layer']: 42 | self.layers.append(nn.Sequential(*[fc, dropout])) 43 | else: 44 | self.layers.append(nn.Sequential(*[fc, dropout, relu])) 45 | 46 | 47 | self.layers = nn.Sequential(*self.layers) 48 | 49 | 50 | 51 | def forward(self, x): 52 | # if X is a tuple, just return the other elements, the idea is to re pass 53 | # the intermediate layers for future attention plotting 54 | if type(x) is tuple: 55 | return self.layers(x[0]), x[1] 56 | else: 57 | return self.layers(x) 58 | 59 | 60 | -------------------------------------------------------------------------------- /network/models/building_blocks/join.py: -------------------------------------------------------------------------------- 1 | 2 | from logger import coil_logger 3 | import torch.nn as nn 4 | import torch.nn.init as init 5 | import torch 6 | import torch.nn.functional as F 7 | 8 | 9 | class Join(nn.Module): 10 | 11 | def __init__(self, params=None, module_name='Default'): 12 | # TODO: For now the end module is a case 13 | # TODO: Make an auto naming function for this. 14 | 15 | super(Join, self).__init__() 16 | 17 | if params is None: 18 | raise ValueError("Creating a NULL fully connected block") 19 | if 'mode' not in params: 20 | raise ValueError(" Missing the mode parameter ") 21 | if 'after_process' not in params: 22 | raise ValueError(" Missing the after_process parameter ") 23 | 24 | """" ------------------ IMAGE MODULE ---------------- """ 25 | # Conv2d(input channel, output channel, kernel size, stride), Xavier initialization and 0.1 bias initialization 26 | 27 | self.after_process = params['after_process'] 28 | self.mode = params['mode'] 29 | 30 | 31 | 32 | # TODO: iteration control should go inside the logger, somehow 33 | 34 | def forward(self, x, m): 35 | # get only the speeds from measurement labels 36 | 37 | if self.mode == 'cat': 38 | j = torch.cat((x, m), 1) 39 | 40 | else: 41 | raise ValueError("Mode to join networks not found") 42 | 43 | return self.after_process(j) 44 | 45 | 46 | 47 | 48 | def load_network(self, checkpoint): 49 | """ 50 | Load a network for a given model definition . 51 | 52 | Args: 53 | checkpoint: The checkpoint that the user wants to add . 54 | 55 | 56 | 57 | """ 58 | coil_logger.add_message('Loading', { 59 | "Model": {"Loaded checkpoint: " + str(checkpoint) } 60 | 61 | }) 62 | 63 | 64 | 65 | # TODO: implement 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /network/optimizer.py: -------------------------------------------------------------------------------- 1 | import dlib 2 | import math 3 | from configs import g_conf 4 | 5 | 6 | 7 | def adjust_learning_rate(optimizer, num_iters): 8 | """ 9 | Adjusts the learning rate every epoch based on the selected schedule 10 | """ 11 | cur_iters = num_iters 12 | minlr = 0.0000001 13 | scheduler = "normal" 14 | learning_rate = g_conf.LEARNING_RATE 15 | decayinterval = g_conf.LEARNING_RATE_DECAY_INTERVAL 16 | decaylevel = g_conf.LEARNING_RATE_DECAY_LEVEL 17 | if scheduler == "normal": 18 | while cur_iters >= decayinterval: 19 | learning_rate = learning_rate * decaylevel 20 | cur_iters = cur_iters - decayinterval 21 | learning_rate = max(learning_rate, minlr) 22 | 23 | for param_group in optimizer.param_groups: 24 | print("New Learning rate is ", learning_rate) 25 | param_group['lr'] = learning_rate 26 | 27 | 28 | def adjust_learning_rate_auto(optimizer, loss_window): 29 | """ 30 | Adjusts the learning rate every epoch based on the selected schedule 31 | """ 32 | minlr = 0.0000001 33 | learning_rate = g_conf.LEARNING_RATE 34 | thresh = g_conf.LEARNING_RATE_THRESHOLD 35 | decaylevel = g_conf.LEARNING_RATE_DECAY_LEVEL 36 | n = 1000 37 | start_point = 0 38 | while n < len(loss_window): 39 | steps_no_decrease = dlib.count_steps_without_decrease(loss_window[start_point:n]) 40 | steps_no_decrease_robust = dlib.count_steps_without_decrease_robust(loss_window[start_point:n]) 41 | if steps_no_decrease > thresh and steps_no_decrease_robust > thresh: 42 | start_point = n 43 | learning_rate = learning_rate * decaylevel 44 | 45 | n += 1000 46 | 47 | learning_rate = max(learning_rate, minlr) 48 | 49 | for param_group in optimizer.param_groups: 50 | param_group['lr'] = learning_rate 51 | 52 | 53 | def adjust_learning_rate_cosine_annealing(optimizer, loss_window, iteration): 54 | """ 55 | Adjusts the learning rate based on cyclic cosine annealing https://arxiv.org/pdf/1704.00109.pdf 56 | Adam converges within 100k iterations so lets assume 5 cycles of 20k iterations each 57 | """ 58 | initial_lr = g_conf.LEARNING_RATE 59 | learning_rate= initial_lr*(math.cos(math.pi*((iteration-1)%20000)/20000)+1) 60 | 61 | for param_group in optimizer.param_groups: 62 | # print("New Learning rate is ", learning_rate) 63 | param_group['lr'] = learning_rate -------------------------------------------------------------------------------- /plotter/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/data_aggregation/76777156a465cbb77d6d5ab88da8f1812e7ff043/plotter/.DS_Store -------------------------------------------------------------------------------- /plotter/__init__.py: -------------------------------------------------------------------------------- 1 | from .plotter import plot_scatter -------------------------------------------------------------------------------- /plotter/plotting_params/sample_plot.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | ## Set the experiments taht are going to be used to compute the plot 4 | list_of_experiments = ['experiment_64.yaml', 'experiment_67.yaml', 'experiment_68.yaml'] 5 | # Set the output validation and driving data 6 | # that is going to be read from each of the experiments 7 | # The plots are made correlating prediction (offline) with driving (online). 8 | # With this the user must define the pairs that are going to be correlated. 9 | # The pairs are in the form ValidationDataset: driving benchmark. The 10 | # validation dataset must exist on the COIL_DATASET_PATH 11 | data_params = {'control': '_auto', 'root_path': '_logs', 12 | 'validation_driving_pairs': {'Town01W1': 'ECCVTrainingSuite_Town01', 13 | 'Town01W1Noise': 'ECCVTrainingSuite_Town01', 14 | 'Town02W14': 'ECCVGeneralizationSuite_Town02', 15 | 'Town02W14Noise': 'ECCVGeneralizationSuite_Town02'}, 16 | 17 | } 18 | # There is not data filter 19 | data_filter = {} 20 | # The parameters processed that are going to be used for after plotting 21 | processing_params = {'Success rate': {'metric': 'control_success_rate', 'filter': {}, 'params': {}}, 22 | 'Steering absolute error': {'metric': 'steering_error', 'filter': data_filter, 23 | 'params': {}}, 24 | 'step': {'metric': 'step', 'filter': {}, 'params': {}}, 25 | 'town_id': {'metric': 'id', 'filter': {}, 'params': {}}, 26 | 'exp': {'metric': 'experiment', 'filter': {}, 'params': {}} 27 | } 28 | 29 | plot_params = collections.OrderedDict() 30 | 31 | 32 | 33 | #### Definition of the plots that are going to be made 34 | 35 | plot_params['ctrl_vs_steer_50'] = {'print': True, 36 | 'x': {'data': 'Steering absolute error', 'log': True}, 37 | 'y': {'data': 'Success rate', 'log': False}, 38 | 'size': {'data': 'step'}, 39 | 'color': {'data': 'town_id'}, 40 | 'plot_best_n_percent': 50 41 | } 42 | 43 | -------------------------------------------------------------------------------- /requirements.yaml: -------------------------------------------------------------------------------- 1 | name: coiltraine 2 | channels: 3 | - defaults 4 | dependencies: 5 | - asn1crypto=0.24.0=py35_0 6 | - ca-certificates=2018.12.5=0 7 | - certifi=2018.8.24=py35_1 8 | - cffi=1.11.5=py35he75722e_1 9 | - chardet=3.0.4=py35_1 10 | - cryptography=2.3.1=py35hc365091_0 11 | - idna=2.7=py35_0 12 | - libedit=3.1.20181209=hc058e9b_0 13 | - libffi=3.2.1=hd88cf55_4 14 | - libgcc-ng=8.2.0=hdf63c60_1 15 | - libstdcxx-ng=8.2.0=hdf63c60_1 16 | - ncurses=6.1=he6710b0_1 17 | - openssl=1.0.2p=h14c3975_0 18 | - pip=10.0.1=py35_0 19 | - pycparser=2.19=py35_0 20 | - pyopenssl=18.0.0=py35_0 21 | - pysocks=1.6.8=py35_0 22 | - python=3.5.6=hc3d631a_0 23 | - readline=7.0=h7b6447c_5 24 | - requests=2.19.1=py35_0 25 | - setuptools=40.2.0=py35_0 26 | - six=1.11.0=py35_1 27 | - sqlite=3.26.0=h7b6447c_0 28 | - tk=8.6.8=hbc83047_0 29 | - urllib3=1.23=py35_0 30 | - wheel=0.31.1=py35_0 31 | - xz=5.2.4=h14c3975_4 32 | - zlib=1.2.11=h7b6447c_3 33 | - pip: 34 | - absl-py==0.1.13 35 | - astor==0.6.2 36 | - backcall==0.1.0 37 | - bleach==1.5.0 38 | - configparser==3.5.0 39 | - cycler==0.10.0 40 | - cython==0.28.2 41 | - decorator==4.3.0 42 | - dlib==19.16.0 43 | - entrypoints==0.2.3 44 | - enum34==1.1.6 45 | - gast==0.2.0 46 | - grpcio==1.10.0 47 | - imgaug==0.2.5 48 | - ipykernel==4.8.2 49 | - ipython==6.3.0 50 | - ipython-genutils==0.2.0 51 | - ipywidgets==7.2.0 52 | - jedi==0.11.1 53 | - jinja2==2.10 54 | - jsonschema==2.6.0 55 | - kiwisolver==1.0.1 56 | - markdown==2.6.11 57 | - markupsafe==1.0 58 | - matplotlib==2.2.2 59 | - mistune==0.8.3 60 | - mock==2.0.0 61 | - nbconvert==5.3.1 62 | - nbformat==4.4.0 63 | - networkx==2.1 64 | - notebook==5.4.1 65 | - numpy==1.15.3 66 | - opencv-python==3.4.0.12 67 | - pandas==0.22.0 68 | - pandocfilters==1.4.2 69 | - parso==0.1.1 70 | - pbr==4.0.2 71 | - pexpect==4.4.0 72 | - pickleshare==0.7.4 73 | - pid==2.2.0 74 | - pid-controller==0.2.0 75 | - pillow==5.3.0 76 | - prompt-toolkit==1.0.15 77 | - protobuf==3.6.1 78 | - ptyprocess==0.5.2 79 | - pygame==1.9.4 80 | - pygments==2.2.0 81 | - pyparsing==2.2.0 82 | - python-dateutil==2.7.2 83 | - pytz==2018.4 84 | - pywavelets==0.5.2 85 | - pyyaml==3.12 86 | - pyzmq==17.0.0 87 | - qtconsole==4.3.1 88 | - scikit-image==0.13.1 89 | - scipy==1.0.1 90 | - seaborn==0.8.1 91 | - send2trash==1.5.0 92 | - simplegeneric==0.8.1 93 | - tensorboard==1.6.0 94 | - tensorflow==1.6.0 95 | - tensorflow-tensorboard==0.4.0 96 | - termcolor==1.1.0 97 | - terminado==0.8.1 98 | - testpath==0.3.1 99 | - torch==0.4.1 100 | - torchvision==0.2.1 101 | - tornado==5.0.1 102 | - traitlets==4.3.2 103 | - wcwidth==0.1.7 104 | - webencodings==0.5.1 105 | - werkzeug==0.14.1 106 | - widgetsnbextension==3.2.0 107 | 108 | -------------------------------------------------------------------------------- /tools/batch_rename.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to rename all the files from a folder. it forces the numbering to start on 0 3 | """ 4 | 5 | from __future__ import print_function 6 | 7 | import os 8 | import re 9 | import argparse 10 | 11 | 12 | if __name__ == "__main__": 13 | 14 | parser = argparse.ArgumentParser(description='Name changer') 15 | parser.add_argument('-pt', '--path', default="") 16 | args = parser.parse_args() 17 | path = args.path 18 | 19 | count = 0 20 | file_list = sorted(os.listdir(path)) 21 | 22 | print (file_list) 23 | 24 | for filename in file_list: 25 | print (filename) 26 | newfilename = os.path.join(path, 'episode_'+str(count+60).zfill(5)) 27 | 28 | os.rename(os.path.join(path, filename), newfilename ) 29 | count +=1 30 | 31 | -------------------------------------------------------------------------------- /tools/copy_data_fast.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to copy data fast using multiprocessing and shutil 3 | 4 | Usage: python copy_data_fast.py 5 | """ 6 | 7 | import os 8 | import sys 9 | import numpy as np 10 | import shutil 11 | import time 12 | import multiprocessing 13 | 14 | source_dir = sys.argv[1] 15 | target_dir = sys.argv[2] 16 | 17 | def copy_episodes(episode): 18 | if not os.path.isdir(os.path.join(target_dir, episode)): 19 | os.mkdir(os.path.join(target_dir, episode)) 20 | 21 | episode_data = sorted(os.listdir(os.path.join(source_dir, episode))) 22 | 23 | for file_name in episode_data: 24 | 25 | if 'metadata' in file_name: 26 | shutil.copy2(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name)) 27 | 28 | if 'processed2' in file_name: 29 | shutil.copy2(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name)) 30 | 31 | if 'measurements_' in file_name: 32 | 33 | # copy the data to other directory 34 | episode_number = file_name.split('.')[0].split('_')[-1] 35 | 36 | central_image = os.path.join(source_dir, episode, 'CentralRGB_%s.png'%episode_number) 37 | left_image = os.path.join(source_dir, episode, 'LeftRGB_%s.png'%episode_number) 38 | right_image = os.path.join(source_dir, episode, 'RightRGB_%s.png'%episode_number) 39 | 40 | shutil.copy2(central_image, os.path.join(target_dir, episode, 'CentralRGB_%s.png'%episode_number)) 41 | shutil.copy2(left_image, os.path.join(target_dir, episode, 'LeftRGB_%s.png'%episode_number)) 42 | shutil.copy2(right_image, os.path.join(target_dir, episode, 'RightRGB_%s.png'%episode_number)) 43 | 44 | shutil.copy2(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name)) 45 | 46 | 47 | 48 | 49 | if __name__ == '__main__': 50 | 51 | episodes_list = sorted(os.listdir(source_dir)) 52 | 53 | jobs = [] 54 | 55 | st = time.time() 56 | for episode in episodes_list: 57 | if os.path.isdir(os.path.join(source_dir, episode)): 58 | episode_number = int(episode.split('_')[-1]) 59 | 60 | if episode_number >= int(sys.argv[3]) and episode_number <= int(sys.argv[4]): 61 | print (episode) 62 | p = multiprocessing.Process(target=copy_episodes, args=(episode,)) 63 | jobs.append(p) 64 | p.start() 65 | 66 | 67 | for process in jobs: 68 | process.join() 69 | 70 | print ('total time taken: ', time.time()-st) 71 | -------------------------------------------------------------------------------- /tools/count_time.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to compute total time traversed in the collected data assuming 10 fps at generation 3 | """ 4 | 5 | import glob 6 | import re 7 | import argparse 8 | 9 | import os 10 | from collections import deque 11 | 12 | def tryint(s): 13 | try: 14 | return int(s) 15 | except: 16 | return s 17 | 18 | def alphanum_key(s): 19 | """ 20 | Turn a string into a list of string and number chunks. 21 | "z23a" -> ["z", 23, "a"] 22 | """ 23 | return [tryint(c) for c in re.split('([0-9]+)', s) ] 24 | 25 | def sort_nicely(l): 26 | """ 27 | Sort the given list in the way that humans expect. 28 | """ 29 | l.sort(key=alphanum_key) 30 | 31 | # ***** main loop ***** 32 | if __name__ == "__main__": 33 | 34 | parser = argparse.ArgumentParser(description='Path viewer') 35 | parser.add_argument('-pt', '--path', default="") 36 | parser.add_argument('-e', '--st_episode', type=int, default=0, type='starting episode number') 37 | parser.add_argument('-t', '--end_episode', type=int, default=1e10, type='last episode number') 38 | parser.add_argument('--episodes', nargs='+', dest='episodes', type=str, default ='all') 39 | 40 | args = parser.parse_args() 41 | path = args.path 42 | 43 | # By setting episodes as all, it means that all episodes should be visualized 44 | if args.episodes == 'all': 45 | episodes_list = glob.glob(os.path.join(path, 'episode_*')) 46 | else: 47 | episodes_list = args.episodes 48 | sort_nicely(episodes_list) 49 | 50 | total_number_of_seconds = 0 51 | 52 | for episode in episodes_list: 53 | 54 | if 'episode' not in episode: 55 | episode = 'episode_' + episode 56 | 57 | episode_number = int(episode.split('_')[-1]) 58 | 59 | # only count the episodes in the specified range 60 | if episode_number < int(args.st_episode) or episode_number > int(args.end_episode): 61 | continue 62 | 63 | # Take all the measurements from a list 64 | measurements_list = glob.glob(os.path.join(episode, 'measurement*')) 65 | sort_nicely(measurements_list) 66 | 67 | # time is computed assuming 10 fps generation 68 | if len (measurements_list) > 0: 69 | data_point_number = len(measurements_list) # total number of frames 70 | total_number_of_seconds += float(data_point_number)/10.0 71 | 72 | print( 'Total Hours = ', total_number_of_seconds/3600.0) 73 | 74 | # save_gta_surface(gta_surface) 75 | -------------------------------------------------------------------------------- /tools/create_plots.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to create plots of Fig. 2 in the main paper 3 | This runs best in jupyter notebook 4 | """ 5 | 6 | import os 7 | import sys 8 | import numpy as np 9 | import torch 10 | import torch.nn as nn 11 | import torch.optim as optim 12 | import copy 13 | import json 14 | import cv2 15 | import glob 16 | 17 | """ 18 | This data consists of the success rate of the 11 models in Fig. 2 in the main paper 19 | Each row contains the success rate for 1 model over 4 environmnetal conditions for multiple iterations 20 | [[i_0, i_1, i_2, i_3] for training, [i_0, i_1, i_2, i_3] for NW, [i_0, i_1, i_2, i_3] for NT, [i_0, i_1, i_2, i_3] for NTW] 21 | NW = New Weather, NT = New Town, NTW = New Town & Weather 22 | """ 23 | x = ['Iter 0', 'Iter 1', 'Iter 2', 'Iter 3'] 24 | models = [ 25 | [[71, 71, 71, 71], [72, 72, 72, 72], [41, 41, 41, 41], [43, 43, 43, 43]], 26 | [[45, 54, 63, 66], [39, 47, 51, 57], [23, 25, 34, 36], [26, 32, 31, 36]], 27 | [[45, 47, 63, 63], [39, 31, 48, 60], [23, 20, 22, 35], [26, 20, 27, 25]], 28 | [[26, 47, 60, 58], [23, 26, 47, 54], [17, 24, 24, 28], [16, 27, 27, 26]], 29 | [[26, 50, 51, 48], [23, 35, 39, 35], [17, 16, 23, 26], [16, 22, 23, 21]], 30 | [[45, 52, 57, 53], [39, 39, 40, 41], [23, 27, 30, 29], [26, 32, 32, 31]], 31 | [[26, 45, 45, 45], [23, 40, 40, 41], [17, 21, 24, 21], [16, 23, 24, 23]], 32 | [[45, 50, 48, 53], [39, 39, 44, 40], [23, 23, 26, 25], [26, 23, 26, 21]], 33 | [[26, 46, 46, 36], [23, 41, 39, 38], [17, 19, 22, 19], [16, 22, 24, 23]], 34 | [[45, 45, 45, 45], [39, 39, 39, 39], [23, 23, 23, 23], [26, 26, 26, 26]], 35 | [[26, 26, 26, 26], [23, 23, 23, 23], [17, 17, 17, 17], [16, 16, 16, 16]], 36 | ] 37 | 38 | # legend = ['cilrs', 'expert', 'darbpe', 'darbp', 'darb', 'dart', 'smilep', 'smile' 'daggerp', 'dagger', 'cilrsp'] 39 | # legend = ['CILRS', 'CILRS+', 'DAgger', 'DAgger+', 'SMILe', 'SMILe+', 'DART', 'DA-RB', 'DA-RB+', 'DA-RB+(E)', 'Expert'] 40 | # new_legend = [] 41 | # for i in range(1,12): 42 | # new_legend.append(legend[-i]) 43 | # new_legend 44 | 45 | 46 | import matplotlib.pyplot as plt 47 | import matplotlib.gridspec as gridspec 48 | 49 | #Gridspec demo 50 | fig = plt.figure() 51 | fig.set_size_inches(18,12) 52 | fig.set_dpi(640) 53 | 54 | rows = 17 #the larger the number here, the smaller the spacing around the legend 55 | start1 = 1 56 | end1 = int((rows-1)/2) 57 | start2 = end1 58 | end2 = int(rows-1) 59 | 60 | gspec = gridspec.GridSpec(ncols=4, nrows=rows) 61 | 62 | axes = [] 63 | axes.append(fig.add_subplot(gspec[start1:end1,0:2])) 64 | axes.append(fig.add_subplot(gspec[start1:end1,2:4])) 65 | axes.append(fig.add_subplot(gspec[start2:end2,0:2])) 66 | axes.append(fig.add_subplot(gspec[start2:end2,2:4])) 67 | axes.append(fig.add_subplot(gspec[0,0:4])) # end2 here 68 | 69 | 70 | # line style & labels 71 | lines = [] 72 | for i in range(4): 73 | lines = [] 74 | for j in range(len(legend)): 75 | key = legend[j] 76 | if key == 'Expert': 77 | linestyle = '-' 78 | line, = axes[i].plot(x, models[j][i], linestyle=linestyle, color='black') 79 | elif key == 'CILRS' or key == 'CILRS+': 80 | linestyle = '-' 81 | line, = axes[i].plot(x, models[j][i], linestyle=linestyle) 82 | else: 83 | linestyle = '--' 84 | line, = axes[i].plot(x, models[j][i], linestyle=linestyle, marker='^') 85 | # axes[i].set_xlabel('Iteration') 86 | axes[i].set_ylabel('Success Rate', size='x-large') 87 | 88 | lines.append(line) 89 | 90 | axes[0].set_title('Training Conditions', fontsize='xx-large') 91 | axes[1].set_title('New Weather', fontsize='xx-large') 92 | axes[2].set_title('New Town', fontsize='xx-large') 93 | axes[3].set_title('New Town & Weather', fontsize='xx-large') 94 | 95 | for i in range(4): 96 | axes[i].tick_params(axis='x', labelsize=12) 97 | axes[i].tick_params(axis='y', labelsize=12) 98 | 99 | ''' 100 | lines = [] 101 | for i in range(len(legend)): 102 | key = legend[i] 103 | if key == 'CILRS' or key == 'CILRS+' or key == 'Expert': 104 | linestyle = '-' 105 | else: 106 | linestyle = '--' 107 | line, = axes[-1].plot(x, new_model[i], linestyle=linestyle) 108 | # print (line) 109 | lines.append(line) 110 | ''' 111 | # line, _ = axes[-1].plot(x, new_model) 112 | # handle, labels = ax[0,0].get_legend_handles_labels() 113 | 114 | axes[-1].legend(lines, legend, loc='center', ncol=11, mode='expand', fontsize='x-large') # create legend on bottommost axis 115 | axes[-1].set_axis_off() # don't show bottommost axis 116 | 117 | fig.tight_layout() 118 | 119 | plt.savefig('/is/sg2/aprakash/Dataset/plots.pdf') 120 | 121 | # plt.show() 122 | 123 | 124 | ''' 125 | import matplotlib.pyplot as plt 126 | import matplotlib.gridspec as gridspec 127 | from collections import OrderedDict 128 | 129 | f, ax = plt.subplots(2,2) 130 | 131 | curr_id = 0 132 | for i in range(2): 133 | for j in range(2): 134 | for i in range(len(legend)): 135 | key = legend[i] 136 | if key == 'CILRS' or key == 'CILRS+' or key == 'Expert': 137 | linestyle = '-' 138 | else: 139 | linestyle = '--' 140 | ax[i,j].plot(x, models[key][curr_id], linestyle=linestyle) 141 | curr_id += 1 142 | 143 | ax[0,0].set_title('Training Conditions') 144 | ax[0,1].set_title('New Weather') 145 | ax[1,0].set_title('New Town') 146 | ax[1,1].set_title('New Town & Weather') 147 | 148 | for i in range(2): 149 | for j in range(2): 150 | ax[i,j].set_xlabel('Iteration') 151 | ax[i,j].set_ylabel('Success Rate') 152 | 153 | #legend = ['expert', 'darbpe', 'darbp', 'darb', 'dart', 'smilep', 'smile' 'daggerp', 'dagger', 'cilrsp', 'cilrs'] 154 | plt.legend(models.keys(), loc=8) 155 | plt.savefig('/is/sg2/aprakash/Dataset/plot.pdf') 156 | plt.show() 157 | ''' -------------------------------------------------------------------------------- /tools/create_video.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to generate video from a set of images in an episode 3 | 4 | data_type: folder containing the data 5 | episode_number: specific episode number containing the images 6 | """ 7 | 8 | from PIL import Image, ImageFont, ImageDraw 9 | import csv 10 | import os 11 | import sys 12 | import json 13 | import numpy as np 14 | import skvideo.io 15 | 16 | DATA_DIR = '/is/rg/avg/aprakash' 17 | 18 | if __name__ == '__main__': 19 | data_type = sys.argv[1] 20 | episode_number = sys.argv[2] 21 | 22 | episode_path = os.path.join(DATA_DIR, data_type, 'episode_%s'%(episode_number)) 23 | 24 | episode_data = sorted(os.listdir(episode_path)) 25 | 26 | measurements_data = [] 27 | expert_steer = [] 28 | expert_throttle = [] 29 | expert_brake = [] 30 | agent_steer = [] 31 | agent_throttle = [] 32 | agent_brake = [] 33 | directions = [] 34 | speed_module = [] 35 | central_image_path = [] 36 | 37 | for file_name in episode_data: 38 | if 'measurements_' in file_name: 39 | # print (file_name) 40 | frame_number = file_name.split('_')[-1].split('.')[0] 41 | with open(os.path.join(episode_path, file_name)) as file: 42 | json_data = json.load(file) 43 | file.close() 44 | 45 | measurements_data.append(json_data) 46 | 47 | expert_steer.append(float(json_data['steer'])) 48 | expert_throttle.append(float(json_data['throttle'])) 49 | expert_brake.append(float(json_data['brake'])) 50 | 51 | agent_steer.append(float(json_data['steer_noise'])) 52 | agent_throttle.append(float(json_data['throttle_noise'])) 53 | agent_brake.append(float(json_data['brake_noise'])) 54 | 55 | # directions.append(json_data['directions']) 56 | if 'playerMeasurements' in json_data and 'forwardSpeed' in json_data['playerMeasurements']: 57 | speed_module.append(float(json_data['playerMeasurements']['forwardSpeed'])) 58 | else: 59 | speed_module.append(0) 60 | 61 | image_path = os.path.join(episode_path, 'CentralRGB_%s.png'%(frame_number)) 62 | central_image_path.append(image_path) 63 | 64 | 65 | # print (len(measurements_data), len(central_image_path), len(expert_steer), len(expert_throttle), len(expert_brake), 66 | # len(agent_steer), len(agent_throttle), len(agent_brake), len(directions), len(speed_module)) 67 | if not os.path.isdir(os.path.join(DATA_DIR, 'videos')): 68 | os.mkdir(os.path.join(DATA_DIR, 'videos')) 69 | 70 | writer = skvideo.io.FFmpegWriter(os.path.join(DATA_DIR, 'videos', '%s_episode_%s.mp4'%(data_type, episode_number)), inputdict={'-r': '10'}, outputdict={'-r': '10'}) 71 | for i in range(1, len(measurements_data)): 72 | img = Image.open(central_image_path[i]) 73 | helvetica = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeSerif.ttf", size=20) 74 | d = ImageDraw.Draw(img) 75 | text_color = (255, 255, 255) 76 | 77 | location = (40, 10) 78 | d.text(location, "agent_steer = %0.4f"%agent_steer[i], font=helvetica, fill=text_color) 79 | 80 | location = (40, 35) 81 | d.text(location, "agent_throttle = %0.4f"%agent_throttle[i], font=helvetica, fill=text_color) 82 | 83 | location = (40, 60) 84 | d.text(location, "agent_brake = %0.4f"%agent_brake[i], font=helvetica, fill=text_color) 85 | 86 | location = (300, 10) 87 | d.text(location, "expert_steer = %0.4f"%expert_steer[i], font=helvetica, fill=text_color) 88 | 89 | location = (300, 35) 90 | d.text(location, "expert_throttle = %0.4f"%expert_throttle[i], font=helvetica, fill=text_color) 91 | 92 | location = (300, 60) 93 | d.text(location, "expert_brake = %0.4f"%expert_brake[i], font=helvetica, fill=text_color) 94 | 95 | location = (40, 85) 96 | d.text(location, "vehicle_speed = %0.4f"%speed_module[i], font=helvetica, fill=text_color) 97 | 98 | writer.writeFrame(img) 99 | 100 | writer.close() 101 | -------------------------------------------------------------------------------- /tools/filter_dagger_data_var.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script for policy-based sampling using uncertainty estimate. 3 | Uncertainty is measured by computing the variance in the predicted controls 4 | of 100 runs of model with test time dropout 5 | 6 | Requires: 7 | var_file: computed variance in the controls with test time dropout 8 | preload: npy preload file for the entire on-policy data 9 | """ 10 | 11 | import os 12 | import sys 13 | import time 14 | import json 15 | import shutil 16 | import argparse 17 | import multiprocessing 18 | import numpy as np 19 | 20 | def filter_episode(episode, episode_data): 21 | if not os.path.isdir(os.path.join(args.target_dir, episode)): 22 | os.mkdir(os.path.join(args.target_dir, episode)) 23 | 24 | files = sorted(os.listdir(os.path.join(args.source_dir, episode))) 25 | if 'metadata.json' in files: 26 | shutil.copy2(os.path.join(args.source_dir, episode, 'metadata.json'), os.path.join(args.target_dir, episode, 'metadata.json')) 27 | 28 | if 'processed2' in files: 29 | shutil.copy2(os.path.join(args.source_dir, episode, 'processed2'), os.path.join(args.target_dir, episode, 'processed2')) 30 | 31 | for filename in episode_data: 32 | 33 | episode_number = filename.split('_')[-1].split('.')[0] 34 | 35 | central_image = os.path.join(args.source_dir, episode, 'CentralRGB_%s.png'%episode_number) 36 | left_image = os.path.join(args.source_dir, episode, 'LeftRGB_%s.png'%episode_number) 37 | right_image = os.path.join(args.source_dir, episode, 'RightRGB_%s.png'%episode_number) 38 | 39 | shutil.copy2(central_image, os.path.join(args.target_dir, episode, 'CentralRGB_%s.png'%episode_number)) 40 | shutil.copy2(left_image, os.path.join(args.target_dir, episode, 'LeftRGB_%s.png'%episode_number)) 41 | shutil.copy2(right_image, os.path.join(args.target_dir, episode, 'RightRGB_%s.png'%episode_number)) 42 | 43 | measurements_file = os.path.join(args.source_dir, episode, 'measurements_%s.json'%episode_number) 44 | 45 | shutil.copy2(measurements_file, os.path.join(args.target_dir, episode, 'measurements_%s.json'%episode_number)) 46 | 47 | 48 | # this function is used to get the sampled episodes in the first iteration 49 | def get_required_episodes(): 50 | computed_var = np.load(args.var_file) 51 | preload = np.load(args.preload) 52 | 53 | # take the max variance out of steer, throtte and brake 54 | max_var = np.max(computed_var, axis=1) 55 | print (max_var.shape) 56 | indices_var = np.argsort(max_var) 57 | required_var = max_var[indices_var[::-1]] 58 | threshold_index = 72507 # this is selected based on the proportion of data to be sampled in the first iteration 59 | threshold_var = required_var[threshold_index] 60 | print (threshold_var) 61 | new_preload = preload[0][indices_var[::-1]] 62 | required_preload = new_preload[:threshold_index] 63 | 64 | required_episodes = {} 65 | for i in range(len(required_preload)): 66 | curr_episode, curr_frame = required_preload[i].split('/') 67 | if curr_episode in required_episodes: 68 | required_episodes[curr_episode].append(curr_frame) 69 | else: 70 | required_episodes[curr_episode] = [curr_frame] 71 | 72 | print (len(required_episodes)) 73 | 74 | return required_episodes 75 | 76 | # once the threshold is fixed after the first iteration, use this function for sampling 77 | def get_required_episodes_thres(): 78 | computed_var = np.load(args.var_file) 79 | preload = np.load(args.preload) 80 | max_var = np.max(computed_var, axis=1) 81 | thres = 0.00963 82 | required_preload = preload[0][max_var>thres] 83 | 84 | ''' 85 | #indices_var = np.argsort(max_var) 86 | #required_var = max_var[indices_var[::-1]] 87 | #threshold_index = 72507 88 | #threshold_var = required_var[threshold_index] 89 | #print (threshold_var) 90 | #new_preload = preload[0][indices_var[::-1]] 91 | #required_preload = new_preload[:threshold_index] 92 | #print (required_preload) 93 | ''' 94 | 95 | required_episodes = {} 96 | for i in range(len(required_preload)): 97 | curr_episode, curr_frame = required_preload[i].split('/') 98 | if curr_episode in required_episodes: 99 | required_episodes[curr_episode].append(curr_frame) 100 | else: 101 | required_episodes[curr_episode] = [curr_frame] 102 | 103 | print (len(required_episodes)) 104 | 105 | return required_episodes 106 | 107 | def main(): 108 | 109 | manager = multiprocessing.Manager() 110 | return_dict = manager.dict() 111 | jobs = [] 112 | 113 | if not os.path.isdir(args.target_dir): 114 | os.mkdir(args.target_dir) 115 | 116 | # episodes_dict = get_required_episodes() # this is used for the first iteration 117 | episodes_dict = get_required_episodes_thres() # this is used for the sunsequent iteration 118 | 119 | # if 'metadata.json' in episodes_list: 120 | # shutil.copy2(os.path.join(source_dir, 'metadata.json'), os.path.join(target_dir, 'metadata.json')) 121 | 122 | st = time.time() 123 | for episode in episodes_dict: 124 | print ('episode: ', episode) 125 | p = multiprocessing.Process(target=filter_episode, args=(episode, episodes_dict[episode])) 126 | jobs.append(p) 127 | p.start() 128 | 129 | for process in jobs: 130 | process.join() 131 | print ('time for processing episodes: ', time.time()-st) 132 | 133 | for episode in episodes_dict: 134 | print (episode, len(episodes_dict[episode])) 135 | 136 | if __name__ == '__main__': 137 | global args 138 | parser = argparse.ArgumentParser() 139 | parser.add_argument('--source_dir', type=str, required=True, help='source directory') 140 | parser.add_argument('--target_dir', type=str, required=True, help='target directory') 141 | parser.add_argument('--preload', type=str, required=True, help='preload path of required dataset') 142 | parser.add_argument('--var_file', type=str, required=True, help='path of variance file') 143 | 144 | args = parser.parse_args() 145 | 146 | 147 | main() -------------------------------------------------------------------------------- /tools/move_data_fast.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to move data fast using multiprocessing and shutil 3 | 4 | Usage: python move_data_fast.py 5 | """ 6 | 7 | import os 8 | import sys 9 | import numpy as np 10 | import shutil 11 | import time 12 | import multiprocessing 13 | 14 | source_dir = sys.argv[1] 15 | target_dir = sys.argv[2] 16 | 17 | def copy_episodes(episode): 18 | if not os.path.isdir(os.path.join(target_dir, episode)): 19 | os.mkdir(os.path.join(target_dir, episode)) 20 | 21 | episode_data = sorted(os.listdir(os.path.join(source_dir, episode))) 22 | 23 | for file_name in episode_data: 24 | 25 | if 'metadata' in file_name: 26 | shutil.move(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name)) 27 | 28 | if 'processed2' in file_name: 29 | shutil.move(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name)) 30 | 31 | if 'measurements_' in file_name: 32 | 33 | # copy the data to other directory 34 | episode_number = file_name.split('.')[0].split('_')[-1] 35 | 36 | central_image = os.path.join(source_dir, episode, 'CentralRGB_%s.png'%episode_number) 37 | left_image = os.path.join(source_dir, episode, 'LeftRGB_%s.png'%episode_number) 38 | right_image = os.path.join(source_dir, episode, 'RightRGB_%s.png'%episode_number) 39 | 40 | shutil.move(central_image, os.path.join(target_dir, episode, 'CentralRGB_%s.png'%episode_number)) 41 | shutil.move(left_image, os.path.join(target_dir, episode, 'LeftRGB_%s.png'%episode_number)) 42 | shutil.move(right_image, os.path.join(target_dir, episode, 'RightRGB_%s.png'%episode_number)) 43 | 44 | shutil.move(os.path.join(source_dir, episode, file_name), os.path.join(target_dir, episode, file_name)) 45 | 46 | 47 | 48 | 49 | if __name__ == '__main__': 50 | 51 | episodes_list = sorted(os.listdir(source_dir)) 52 | 53 | jobs = [] 54 | 55 | st = time.time() 56 | for episode in episodes_list: 57 | if os.path.isdir(os.path.join(source_dir, episode)): 58 | episode_number = int(episode.split('_')[-1]) 59 | 60 | if episode_number >= int(sys.argv[3]) and episode_number <= int(sys.argv[4]): 61 | print (episode) 62 | p = multiprocessing.Process(target=copy_episodes, args=(episode,)) 63 | jobs.append(p) 64 | p.start() 65 | 66 | 67 | for process in jobs: 68 | process.join() 69 | 70 | print ('total time taken: ', time.time()-st) 71 | --------------------------------------------------------------------------------