├── .gitignore ├── Docs ├── benchmark_basic_results_town01.md ├── benchmark_basic_results_town02.md ├── benchmark_creating.md ├── benchmark_metrics.md ├── benchmark_start.md └── benchmark_structure.md ├── README.md ├── benchmarks_084.py ├── mkdocs.yml └── version084 ├── __init__.py ├── benchmark_tools ├── __init__.py ├── agent.py ├── benchmark_runner.py ├── experiment.py ├── experiment_suites │ ├── __init__.py │ ├── basic_experiment_suite.py │ └── experiment_suite.py ├── metrics.py ├── recording.py └── results_printer.py ├── carla ├── __init__.py ├── carla_server_pb2.py ├── client.py ├── image_converter.py ├── planner │ ├── Town01.png │ ├── Town01.txt │ ├── Town01Central.png │ ├── Town01Lanes.png │ ├── Town02.png │ ├── Town02.txt │ ├── Town02Big.png │ ├── Town02Central.png │ ├── Town02Lanes.png │ ├── __init__.py │ ├── astar.py │ ├── bezier.py │ ├── city_track.py │ ├── converter.py │ ├── graph.py │ ├── grid.py │ ├── map.py │ └── planner.py ├── sensor.py ├── settings.py ├── tcp.py ├── transform.py └── util.py ├── client_example.py ├── driving_benchmarks ├── __init__.py ├── carla100 │ ├── __init__.py │ └── carla100.py └── corl2017 │ ├── __init__.py │ └── corl_2017.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | .hypothesis/ 50 | .pytest_cache/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | db.sqlite3 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # IPython 78 | profile_default/ 79 | ipython_config.py 80 | 81 | # pyenv 82 | .python-version 83 | 84 | # celery beat schedule file 85 | celerybeat-schedule 86 | 87 | # SageMath parsed files 88 | *.sage.py 89 | 90 | # Environments 91 | .env 92 | .venv 93 | env/ 94 | venv/ 95 | ENV/ 96 | env.bak/ 97 | venv.bak/ 98 | 99 | # Spyder project settings 100 | .spyderproject 101 | .spyproject 102 | 103 | # Rope project settings 104 | .ropeproject 105 | 106 | # mkdocs documentation 107 | /site 108 | 109 | # mypy 110 | .mypy_cache/ 111 | .dmypy.json 112 | dmypy.json 113 | 114 | # Pyre type checker 115 | .pyre/ 116 | # Compiled source # 117 | ################### 118 | *.com 119 | *.class 120 | *.dll 121 | *.exe 122 | *.o 123 | *.so 124 | 125 | # Packages # 126 | ############ 127 | # it's better to unpack these files and commit the raw source 128 | # git has its own built in compression methods 129 | *.7z 130 | *.dmg 131 | *.gz 132 | *.iso 133 | *.jar 134 | *.rar 135 | *.tar 136 | *.zip 137 | 138 | # Logs and databases # 139 | ###################### 140 | *.log 141 | *.sql 142 | *.sqlite 143 | 144 | # OS generated files # 145 | ###################### 146 | .DS_Store 147 | .DS_Store? 148 | ._* 149 | .Spotlight-V100 150 | .Trashes 151 | ehthumbs.db 152 | Thumbs.db 153 | -------------------------------------------------------------------------------- /Docs/benchmark_basic_results_town01.md: -------------------------------------------------------------------------------- 1 | We show the results for test and train weathers when 2 | [running the simple example](benchmark_creating/#expected-results) for Town01. 3 | The following result should print on the screen after running the 4 | example. 5 | 6 | ----- Printing results for training weathers (Seen in Training) ----- 7 | 8 | 9 | Percentage of Successful Episodes 10 | 11 | Weather: Clear Noon 12 | Task: 0 -> 1.0 13 | Task: 1 -> 0.0 14 | Task: 2 -> 0.0 15 | Task: 3 -> 0.0 16 | Average Between Weathers 17 | Task 0 -> 1.0 18 | Task 1 -> 0.0 19 | Task 2 -> 0.0 20 | Task 3 -> 0.0 21 | 22 | Average Percentage of Distance to Goal Travelled 23 | 24 | Weather: Clear Noon 25 | Task: 0 -> 0.9643630125892909 26 | Task: 1 -> 0.6794216252808839 27 | Task: 2 -> 0.6593855166486696 28 | Task: 3 -> 0.6646695325122313 29 | Average Between Weathers 30 | Task 0 -> 0.9643630125892909 31 | Task 1 -> 0.6794216252808839 32 | Task 2 -> 0.6593855166486696 33 | Task 3 -> 0.6646695325122313 34 | 35 | Avg. Kilometers driven before a collision to a PEDESTRIAN 36 | Weather: Clear Noon 37 | Task 0 -> more than 0.04316352371637994 38 | Task 1 -> more than 0.12350085985904342 39 | Task 2 -> more than 0.2400373917146113 40 | Task 3 -> more than 0.22983408429063365 41 | Average Between Weathers 42 | Task 0 -> more than 0.04316352371637994 43 | Task 1 -> more than 0.12350085985904342 44 | Task 2 -> more than 0.2400373917146113 45 | Task 3 -> more than 0.22983408429063365 46 | 47 | Avg. Kilometers driven before a collision to a VEHICLE 48 | Weather: Clear Noon 49 | Task 0 -> more than 0.04316352371637994 50 | Task 1 -> more than 0.12350085985904342 51 | Task 2 -> more than 0.2400373917146113 52 | Task 3 -> 0.11491704214531683 53 | Average Between Weathers 54 | Task 0 -> more than 0.04316352371637994 55 | Task 1 -> more than 0.12350085985904342 56 | Task 2 -> more than 0.2400373917146113 57 | Task 3 -> 0.11491704214531683 58 | 59 | Avg. Kilometers driven before a collision to a STATIC OBSTACLE 60 | Weather: Clear Noon 61 | Task 0 -> more than 0.04316352371637994 62 | Task 1 -> more than 0.12350085985904342 63 | Task 2 -> more than 0.2400373917146113 64 | Task 3 -> 0.22983408429063365 65 | Average Between Weathers 66 | Task 0 -> more than 0.04316352371637994 67 | Task 1 -> more than 0.12350085985904342 68 | Task 2 -> more than 0.2400373917146113 69 | Task 3 -> 0.22983408429063365 70 | 71 | Avg. Kilometers driven before going OUTSIDE OF THE ROAD 72 | Weather: Clear Noon 73 | Task 0 -> more than 0.04316352371637994 74 | Task 1 -> 0.12350085985904342 75 | Task 2 -> 0.2400373917146113 76 | Task 3 -> more than 0.22983408429063365 77 | Average Between Weathers 78 | Task 0 -> more than 0.04316352371637994 79 | Task 1 -> 0.12350085985904342 80 | Task 2 -> 0.2400373917146113 81 | Task 3 -> more than 0.22983408429063365 82 | 83 | Avg. Kilometers driven before invading the OPPOSITE LANE 84 | Weather: Clear Noon 85 | Task 0 -> more than 0.04316352371637994 86 | Task 1 -> more than 0.12350085985904342 87 | Task 2 -> more than 0.2400373917146113 88 | Task 3 -> more than 0.22983408429063365 89 | Average Between Weathers 90 | Task 0 -> more than 0.04316352371637994 91 | Task 1 -> more than 0.12350085985904342 92 | Task 2 -> more than 0.2400373917146113 93 | Task 3 -> more than 0.22983408429063365 94 | 95 | 96 | 97 | 98 | 99 | ----- Printing results for test weathers (Unseen in Training) ----- 100 | 101 | 102 | Percentage of Successful Episodes 103 | 104 | Weather: Clear Noon 105 | Task: 0 -> 1.0 106 | Task: 1 -> 0.0 107 | Task: 2 -> 0.0 108 | Task: 3 -> 0.0 109 | Average Between Weathers 110 | Task 0 -> 1.0 111 | Task 1 -> 0.0 112 | Task 2 -> 0.0 113 | Task 3 -> 0.0 114 | 115 | Average Percentage of Distance to Goal Travelled 116 | 117 | Weather: Clear Noon 118 | Task: 0 -> 0.9643630125892909 119 | Task: 1 -> 0.6794216252808839 120 | Task: 2 -> 0.6593855166486696 121 | Task: 3 -> 0.6646695325122313 122 | Average Between Weathers 123 | Task 0 -> 0.9643630125892909 124 | Task 1 -> 0.6794216252808839 125 | Task 2 -> 0.6593855166486696 126 | Task 3 -> 0.6646695325122313 127 | 128 | Avg. Kilometers driven before a collision to a PEDESTRIAN 129 | Weather: Clear Noon 130 | Task 0 -> more than 0.04316352371637994 131 | Task 1 -> more than 0.12350085985904342 132 | Task 2 -> more than 0.2400373917146113 133 | Task 3 -> more than 0.22983408429063365 134 | Average Between Weathers 135 | Task 0 -> more than 0.04316352371637994 136 | Task 1 -> more than 0.12350085985904342 137 | Task 2 -> more than 0.2400373917146113 138 | Task 3 -> more than 0.22983408429063365 139 | 140 | Avg. Kilometers driven before a collision to a VEHICLE 141 | Weather: Clear Noon 142 | Task 0 -> more than 0.04316352371637994 143 | Task 1 -> more than 0.12350085985904342 144 | Task 2 -> more than 0.2400373917146113 145 | Task 3 -> 0.11491704214531683 146 | Average Between Weathers 147 | Task 0 -> more than 0.04316352371637994 148 | Task 1 -> more than 0.12350085985904342 149 | Task 2 -> more than 0.2400373917146113 150 | Task 3 -> 0.11491704214531683 151 | 152 | Avg. Kilometers driven before a collision to a STATIC OBSTACLE 153 | Weather: Clear Noon 154 | Task 0 -> more than 0.04316352371637994 155 | Task 1 -> more than 0.12350085985904342 156 | Task 2 -> more than 0.2400373917146113 157 | Task 3 -> 0.22983408429063365 158 | Average Between Weathers 159 | Task 0 -> more than 0.04316352371637994 160 | Task 1 -> more than 0.12350085985904342 161 | Task 2 -> more than 0.2400373917146113 162 | Task 3 -> 0.22983408429063365 163 | 164 | Avg. Kilometers driven before going OUTSIDE OF THE ROAD 165 | Weather: Clear Noon 166 | Task 0 -> more than 0.04316352371637994 167 | Task 1 -> 0.12350085985904342 168 | Task 2 -> 0.2400373917146113 169 | Task 3 -> more than 0.22983408429063365 170 | Average Between Weathers 171 | Task 0 -> more than 0.04316352371637994 172 | Task 1 -> 0.12350085985904342 173 | Task 2 -> 0.2400373917146113 174 | Task 3 -> more than 0.22983408429063365 175 | 176 | Avg. Kilometers driven before invading the OPPOSITE LANE 177 | Weather: Clear Noon 178 | Task 0 -> more than 0.04316352371637994 179 | Task 1 -> more than 0.12350085985904342 180 | Task 2 -> more than 0.2400373917146113 181 | Task 3 -> more than 0.22983408429063365 182 | Average Between Weathers 183 | Task 0 -> more than 0.04316352371637994 184 | Task 1 -> more than 0.12350085985904342 185 | Task 2 -> more than 0.2400373917146113 186 | Task 3 -> more than 0.22983408429063365 187 | 188 | 189 | 190 | -------------------------------------------------------------------------------- /Docs/benchmark_basic_results_town02.md: -------------------------------------------------------------------------------- 1 | We show the results for test and train weathers when 2 | [running the simple example](benchmark_creating/#expected-results) for Town02. 3 | The following result should print on the screen after running the 4 | example. 5 | 6 | 7 | ----- Printing results for training weathers (Seen in Training) ----- 8 | 9 | 10 | Percentage of Successful Episodes 11 | 12 | Weather: Clear Noon 13 | Task: 0 -> 1.0 14 | Task: 1 -> 0.0 15 | Task: 2 -> 0.0 16 | Task: 3 -> 0.0 17 | Average Between Weathers 18 | Task 0 -> 1.0 19 | Task 1 -> 0.0 20 | Task 2 -> 0.0 21 | Task 3 -> 0.0 22 | 23 | Average Percentage of Distance to Goal Travelled 24 | 25 | Weather: Clear Noon 26 | Task: 0 -> 0.8127653637426329 27 | Task: 1 -> 0.10658303206448155 28 | Task: 2 -> -0.20448736444348714 29 | Task: 3 -> -0.20446966646041384 30 | Average Between Weathers 31 | Task 0 -> 0.8127653637426329 32 | Task 1 -> 0.10658303206448155 33 | Task 2 -> -0.20448736444348714 34 | Task 3 -> -0.20446966646041384 35 | 36 | Avg. Kilometers driven before a collision to a PEDESTRIAN 37 | Weather: Clear Noon 38 | Task 0 -> more than 0.0071004936693366055 39 | Task 1 -> more than 0.03856641710143665 40 | Task 2 -> more than 0.03928511962584409 41 | Task 3 -> more than 0.039282971002912705 42 | Average Between Weathers 43 | Task 0 -> more than 0.0071004936693366055 44 | Task 1 -> more than 0.03856641710143665 45 | Task 2 -> more than 0.03928511962584409 46 | Task 3 -> more than 0.039282971002912705 47 | 48 | Avg. Kilometers driven before a collision to a VEHICLE 49 | Weather: Clear Noon 50 | Task 0 -> more than 0.0071004936693366055 51 | Task 1 -> more than 0.03856641710143665 52 | Task 2 -> more than 0.03928511962584409 53 | Task 3 -> more than 0.039282971002912705 54 | Average Between Weathers 55 | Task 0 -> more than 0.0071004936693366055 56 | Task 1 -> more than 0.03856641710143665 57 | Task 2 -> more than 0.03928511962584409 58 | Task 3 -> more than 0.039282971002912705 59 | 60 | Avg. Kilometers driven before a collision to a STATIC OBSTACLE 61 | Weather: Clear Noon 62 | Task 0 -> more than 0.0071004936693366055 63 | Task 1 -> more than 0.03856641710143665 64 | Task 2 -> more than 0.03928511962584409 65 | Task 3 -> 0.019641485501456352 66 | Average Between Weathers 67 | Task 0 -> more than 0.0071004936693366055 68 | Task 1 -> more than 0.03856641710143665 69 | Task 2 -> more than 0.03928511962584409 70 | Task 3 -> 0.019641485501456352 71 | 72 | Avg. Kilometers driven before going OUTSIDE OF THE ROAD 73 | Weather: Clear Noon 74 | Task 0 -> more than 0.0071004936693366055 75 | Task 1 -> 0.03856641710143665 76 | Task 2 -> 0.03928511962584409 77 | Task 3 -> 0.039282971002912705 78 | Average Between Weathers 79 | Task 0 -> more than 0.0071004936693366055 80 | Task 1 -> 0.03856641710143665 81 | Task 2 -> 0.03928511962584409 82 | Task 3 -> 0.039282971002912705 83 | 84 | Avg. Kilometers driven before invading the OPPOSITE LANE 85 | Weather: Clear Noon 86 | Task 0 -> more than 0.0071004936693366055 87 | Task 1 -> more than 0.03856641710143665 88 | Task 2 -> more than 0.03928511962584409 89 | Task 3 -> more than 0.039282971002912705 90 | Average Between Weathers 91 | Task 0 -> more than 0.0071004936693366055 92 | Task 1 -> more than 0.03856641710143665 93 | Task 2 -> more than 0.03928511962584409 94 | Task 3 -> more than 0.039282971002912705 95 | 96 | 97 | 98 | 99 | 100 | ----- Printing results for test weathers (Unseen in Training) ----- 101 | 102 | 103 | Percentage of Successful Episodes 104 | 105 | Weather: Clear Noon 106 | Task: 0 -> 1.0 107 | Task: 1 -> 0.0 108 | Task: 2 -> 0.0 109 | Task: 3 -> 0.0 110 | Average Between Weathers 111 | Task 0 -> 1.0 112 | Task 1 -> 0.0 113 | Task 2 -> 0.0 114 | Task 3 -> 0.0 115 | 116 | Average Percentage of Distance to Goal Travelled 117 | 118 | Weather: Clear Noon 119 | Task: 0 -> 0.8127653637426329 120 | Task: 1 -> 0.10658303206448155 121 | Task: 2 -> -0.20448736444348714 122 | Task: 3 -> -0.20446966646041384 123 | Average Between Weathers 124 | Task 0 -> 0.8127653637426329 125 | Task 1 -> 0.10658303206448155 126 | Task 2 -> -0.20448736444348714 127 | Task 3 -> -0.20446966646041384 128 | 129 | Avg. Kilometers driven before a collision to a PEDESTRIAN 130 | Weather: Clear Noon 131 | Task 0 -> more than 0.0071004936693366055 132 | Task 1 -> more than 0.03856641710143665 133 | Task 2 -> more than 0.03928511962584409 134 | Task 3 -> more than 0.039282971002912705 135 | Average Between Weathers 136 | Task 0 -> more than 0.0071004936693366055 137 | Task 1 -> more than 0.03856641710143665 138 | Task 2 -> more than 0.03928511962584409 139 | Task 3 -> more than 0.039282971002912705 140 | 141 | Avg. Kilometers driven before a collision to a VEHICLE 142 | Weather: Clear Noon 143 | Task 0 -> more than 0.0071004936693366055 144 | Task 1 -> more than 0.03856641710143665 145 | Task 2 -> more than 0.03928511962584409 146 | Task 3 -> more than 0.039282971002912705 147 | Average Between Weathers 148 | Task 0 -> more than 0.0071004936693366055 149 | Task 1 -> more than 0.03856641710143665 150 | Task 2 -> more than 0.03928511962584409 151 | Task 3 -> more than 0.039282971002912705 152 | 153 | Avg. Kilometers driven before a collision to a STATIC OBSTACLE 154 | Weather: Clear Noon 155 | Task 0 -> more than 0.0071004936693366055 156 | Task 1 -> more than 0.03856641710143665 157 | Task 2 -> more than 0.03928511962584409 158 | Task 3 -> 0.019641485501456352 159 | Average Between Weathers 160 | Task 0 -> more than 0.0071004936693366055 161 | Task 1 -> more than 0.03856641710143665 162 | Task 2 -> more than 0.03928511962584409 163 | Task 3 -> 0.019641485501456352 164 | 165 | Avg. Kilometers driven before going OUTSIDE OF THE ROAD 166 | Weather: Clear Noon 167 | Task 0 -> more than 0.0071004936693366055 168 | Task 1 -> 0.03856641710143665 169 | Task 2 -> 0.03928511962584409 170 | Task 3 -> 0.039282971002912705 171 | Average Between Weathers 172 | Task 0 -> more than 0.0071004936693366055 173 | Task 1 -> 0.03856641710143665 174 | Task 2 -> 0.03928511962584409 175 | Task 3 -> 0.039282971002912705 176 | 177 | Avg. Kilometers driven before invading the OPPOSITE LANE 178 | Weather: Clear Noon 179 | Task 0 -> more than 0.0071004936693366055 180 | Task 1 -> more than 0.03856641710143665 181 | Task 2 -> more than 0.03928511962584409 182 | Task 3 -> more than 0.039282971002912705 183 | Average Between Weathers 184 | Task 0 -> more than 0.0071004936693366055 185 | Task 1 -> more than 0.03856641710143665 186 | Task 2 -> more than 0.03928511962584409 187 | Task 3 -> more than 0.039282971002912705 188 | -------------------------------------------------------------------------------- /Docs/benchmark_creating.md: -------------------------------------------------------------------------------- 1 | Benchmarking your Agent 2 | --------------------------- 3 | 4 | 5 | In this tutorial we show: 6 | 7 | * [How to define a trivial agent with a forward going policy.](#defining-the-agent) 8 | * [How to define a basic experiment suite.](#defining-the-experiment-suite) 9 | 10 | 11 | #### Introduction 12 | 13 | ![Benchmark_structure](img/benchmark_diagram_small.png) 14 | 15 | The driving benchmark is associated with other two modules. 16 | The *agent* module, that is a controller which performs in 17 | another module: the *experiment suite*. 18 | Both modules are abstract classes that must be redefined by 19 | the user. 20 | 21 | The following code excerpt is 22 | an example of how to apply a driving benchmark; 23 | 24 | # We instantiate a forward agent, a simple policy that just set 25 | # acceleration as 0.9 and steering as zero 26 | agent = ForwardAgent() 27 | 28 | # We instantiate an experiment suite. Basically a set of experiments 29 | # that are going to be evaluated on this benchmark. 30 | experiment_suite = BasicExperimentSuite(city_name) 31 | 32 | # Now actually run the driving_benchmark 33 | # Besides the agent and experiment suite we should send 34 | # the city name ( Town01, Town02) the log 35 | run_driving_benchmark(agent, experiment_suite, city_name, 36 | log_name, continue_experiment, 37 | host, port) 38 | 39 | 40 | 41 | Following this excerpt, there are two classes to be defined. 42 | The ForwardAgent() and the BasicExperimentSuite(). 43 | After that, the benchmark can ne run with the "run_driving_benchmark" function. 44 | The summary of the execution, the [performance metrics](benchmark_metrics.md), are stored 45 | in a json file and printed to the screen. 46 | 47 | 48 | 49 | 50 | #### Defining the Agent 51 | 52 | The tested agent must inherit the base *Agent* class. 53 | Let's start by deriving a simple forward agent: 54 | 55 | from carla.agent.agent import Agent 56 | from carla.client import VehicleControl 57 | 58 | class ForwardAgent(Agent): 59 | 60 | 61 | To have its performance evaluated, the ForwardAgent derived class _must_ 62 | redefine the *run_step* function as it is done in the following excerpt: 63 | 64 | def run_step(self, measurements, sensor_data, directions, target): 65 | """ 66 | Function to run a control step in the CARLA vehicle. 67 | """ 68 | control = VehicleControl() 69 | control.throttle = 0.9 70 | return control 71 | 72 | 73 | This function receives the following parameters: 74 | 75 | * [Measurements](measurements.md): the entire state of the world received 76 | by the client from the CARLA Simulator. These measurements contains agent position, orientation, 77 | dynamic objects information, etc. 78 | * [Sensor Data](cameras_and_sensors.md): The measured data from defined sensors, 79 | such as Lidars or RGB cameras. 80 | * Directions: Information from the high level planner. Currently the planner sends 81 | a high level command from the follwoing set: STRAIGHT, RIGHT, LEFT, NOTHING. 82 | * Target Position: The position and orientation of the target. 83 | 84 | With all this information, the *run_step* function is expected 85 | to return a [vehicle control message](measurements.md), containing: 86 | steering value, throttle value, brake value, etc. 87 | 88 | 89 | 90 | #### Defining the Experiment Suite 91 | 92 | 93 | To create a Experiment Suite class you need to perform 94 | the following steps: 95 | 96 | * Create your custom class by inheriting the ExperimentSuite base class. 97 | * Define the test and train weather conditions to be used. 98 | * Build the *Experiment* objects . 99 | 100 | 101 | 102 | ##### Definition 103 | 104 | 105 | The defined set of experiments must derive the *ExperimentSuite* class 106 | as in the following code excerpt: 107 | 108 | from carla.agent_benchmark.experiment import Experiment 109 | from carla.sensor import Camera 110 | from carla.settings import CarlaSettings 111 | 112 | from .experiment_suite import ExperimentSuite 113 | 114 | 115 | class BasicExperimentSuite(ExperimentSuite): 116 | 117 | ##### Define test and train weather conditions 118 | 119 | The user must select the weathers to be used. One should select the set 120 | of test weathers and the set of train weathers. This is defined as a 121 | class property as in the following example: 122 | 123 | @property 124 | def train_weathers(self): 125 | return [1] 126 | @property 127 | def test_weathers(self): 128 | return [1] 129 | 130 | 131 | ##### Building Experiments 132 | 133 | The [experiments are composed by a *task* that is defined by a set of *poses*](benchmark_structure.md). 134 | Let's start by selecting poses for one of the cities, let's take Town01, for instance. 135 | First of all, we need to see all the possible positions, for that, with 136 | a CARLA simulator running in a terminal, run: 137 | 138 | python view_start_positions.py 139 | 140 | ![town01_positions](img/town01_positions.png) 141 | 142 | 143 | Now let's choose, for instance, 140 as start position and 134 144 | as the end position. This two positions can be visualized by running: 145 | 146 | python view_start_positions.py --pos 140,134 --no-labels 147 | 148 | ![town01_positions](img/town01_140_134.png) 149 | 150 | Let's choose two more poses, one for going straight, other one for one simple turn. 151 | Also, let's also choose three poses for Town02: 152 | 153 | 154 | ![town01_positions](img/initial_positions.png) 155 | >Figure: The poses used on this basic *Experiment Suite* example. Poses are 156 | a tuple of start and end position of a goal-directed episode. Start positions are 157 | shown in Blue, end positions in Red. Left: Straight poses, 158 | where the goal is just straight away from the start position. Middle: One turn 159 | episode, where the goal is one turn away from the start point. Right: arbitrary position, 160 | the goal is far away from the start position, usually more than one turn. 161 | 162 | 163 | We define each of these poses as a task. Plus, we also set 164 | the number of dynamic objects for each of these tasks and repeat 165 | the arbitrary position task to have it also defined with dynamic 166 | objects. In the following code excerpt we show the final 167 | defined positions and the number of dynamic objects for each task: 168 | 169 | # Define the start/end position below as tasks 170 | poses_task0 = [[7, 3]] 171 | poses_task1 = [[138, 17]] 172 | poses_task2 = [[140, 134]] 173 | poses_task3 = [[140, 134]] 174 | # Concatenate all the tasks 175 | poses_tasks = [poses_task0, poses_task1 , poses_task1 , poses_task3] 176 | # Add dynamic objects to tasks 177 | vehicles_tasks = [0, 0, 0, 20] 178 | pedestrians_tasks = [0, 0, 0, 50] 179 | 180 | 181 | Finally by using the defined tasks we can build the experiments 182 | vector as we show in the following code excerpt: 183 | 184 | 185 | experiments_vector = [] 186 | # The used weathers is the union between test and train weathers 187 | for weather in used_weathers: 188 | for iteration in range(len(poses_tasks)): 189 | poses = poses_tasks[iteration] 190 | vehicles = vehicles_tasks[iteration] 191 | pedestrians = pedestrians_tasks[iteration] 192 | 193 | conditions = CarlaSettings() 194 | conditions.set( 195 | SendNonPlayerAgentsInfo=True, 196 | NumberOfVehicles=vehicles, 197 | NumberOfPedestrians=pedestrians, 198 | WeatherId=weather 199 | 200 | ) 201 | # Add all the cameras that were set for this experiments 202 | conditions.add_sensor(camera) 203 | experiment = Experiment() 204 | experiment.set( 205 | Conditions=conditions, 206 | Poses=poses, 207 | Task=iteration, 208 | Repetitions=1 209 | ) 210 | experiments_vector.append(experiment) 211 | 212 | 213 | 214 | 215 | 216 | The full code could be found at [basic_experiment_suite.py](https://github.com/carla-simulator/carla/blob/master/PythonClient/carla/driving_benchmark/experiment_suites/basic_experiment_suite.py) 217 | 218 | 219 | 220 | #### Expected Results 221 | 222 | First you need a CARLA Simulator running with [fixed time-step](configuring_the_simulation/#fixed-time-step) 223 | , so the results you will obtain will be more or less reproducible. 224 | For that you should run the CARLA simulator as: 225 | 226 | ./CarlaUE4.sh /Game/Maps/ -windowed -world-port=2000 -benchmark -fps=10 227 | 228 | The example presented in this tutorial can be executed for Town01 as: 229 | 230 | ./driving_benchmark_example.py -c Town01 231 | 232 | You should expect these results: [town01_basic_forward_results](benchmark_basic_results_town01) 233 | 234 | For Town02: 235 | 236 | ./driving_benchmark_example.py -c Town02 237 | 238 | You should expect these results: [town01_basic_forward_results](benchmark_basic_results_town02) 239 | 240 | 241 | 242 | -------------------------------------------------------------------------------- /Docs/benchmark_metrics.md: -------------------------------------------------------------------------------- 1 | 2 | Driving Benchmark Performance Metrics 3 | ------------------------------ 4 | 5 | This page explains the performance metrics module. 6 | This module is used to compute a summary of results based on the actions 7 | performed by the agent during the benchmark. 8 | 9 | 10 | ### Provided performance metrics 11 | 12 | The driving benchmark performance metrics module provides the following performance metrics: 13 | 14 | * **Percentage of Success**: The percentage of episodes (poses from tasks), 15 | that the agent successfully completed. 16 | 17 | * **Average Completion**: The average distance towards the goal that the 18 | agent was able to travel. 19 | 20 | * **Off Road Intersection**: The number of times the agent goes out of the road. 21 | The intersection is only counted if the area of the vehicle outside 22 | of the road is bigger than a *threshold*. 23 | 24 | * **Other Lane Intersection**: The number of times the agent goes to the other 25 | lane. The intersection is only counted if the area of the vehicle on the 26 | other lane is bigger than a *threshold*. 27 | 28 | * **Vehicle Collisions**: The number of collisions with vehicles that had 29 | an impact bigger than a *threshold*. 30 | 31 | * **Pedestrian Collisions**: The number of collisions with pedestrians 32 | that had an impact bigger than a *threshold*. 33 | 34 | * **General Collisions**: The number of collisions with all other 35 | objects with an impact bigger than a *threshold*. 36 | 37 | 38 | ### Executing and Setting Parameters 39 | 40 | The metrics are computed as the final step of the benchmark 41 | and stores a summary of the results a json file. 42 | Internally it is executed as follows: 43 | 44 | ```python 45 | metrics_object = Metrics(metrics_parameters) 46 | summary_dictionary = metrics_object.compute(path_to_execution_log) 47 | ``` 48 | 49 | The Metric's compute function 50 | receives the full path to the execution log. 51 | The Metric class should be instanced with some parameters. 52 | The parameters are: 53 | 54 | * **Threshold**: The threshold used by the metrics. 55 | * **Frames Recount**: After making the infraction, set the number 56 | of frames that the agent needs to keep doing the infraction for 57 | it to be counted as another infraction. 58 | * **Frames Skip**: It is related to the number of frames that are 59 | skipped after a collision or a intersection starts. 60 | 61 | These parameters are defined as property of the *Experiment Suite* 62 | base class and can be redefined at your 63 | [custom *Experiment Suite*](benchmark_creating/#defining-the-experiment-suite). 64 | 65 | The default parameters are: 66 | 67 | 68 | @property 69 | def metrics_parameters(self): 70 | """ 71 | Property to return the parameters for the metrics module 72 | Could be redefined depending on the needs of the user. 73 | """ 74 | return { 75 | 76 | 'intersection_offroad': {'frames_skip': 10, 77 | 'frames_recount': 20, 78 | 'threshold': 0.3 79 | }, 80 | 'intersection_otherlane': {'frames_skip': 10, 81 | 'frames_recount': 20, 82 | 'threshold': 0.4 83 | }, 84 | 'collision_other': {'frames_skip': 10, 85 | 'frames_recount': 20, 86 | 'threshold': 400 87 | }, 88 | 'collision_vehicles': {'frames_skip': 10, 89 | 'frames_recount': 30, 90 | 'threshold': 400 91 | }, 92 | 'collision_pedestrians': {'frames_skip': 5, 93 | 'frames_recount': 100, 94 | 'threshold': 300 95 | }, 96 | 97 | } 98 | -------------------------------------------------------------------------------- /Docs/benchmark_start.md: -------------------------------------------------------------------------------- 1 | Driving Benchmarks 2 | ================== 3 | 4 | The *benchmark tools* module is made 5 | to evaluate a driving controller (agent) and obtain 6 | metrics about its performance. 7 | 8 | This module is mainly designed for: 9 | 10 | * Users that work developing autonomous driving agents and want 11 | to see how they perform in CARLA. 12 | 13 | On this section you will learn. 14 | 15 | * How to quickly get started and benchmark a trivial agent right away. 16 | * Learn about the general implementation [architecture of the driving 17 | benchmark module](benchmark_structure.md). 18 | * Learn [how to set up your agent and create your 19 | own set of experiments](benchmark_creating.md). 20 | * Learn about the [performance metrics used](benchmark_metrics.md). 21 | 22 | 23 | 24 | 25 | Getting Started 26 | ---------------- 27 | 28 | As a way to familiarize yourself with the system we 29 | provide a trivial agent performing in an small 30 | set of experiments (Basic). To execute it, simply 31 | run: 32 | 33 | 34 | $ ./benchmarks_084.py 35 | 36 | 37 | Keep in mind that, to run the command above, you need a CARLA 0.8.4 simulator 38 | running at localhost and on port 2000. 39 | 40 | This benchmark example can be further configured. 41 | Run the help command to see options available. 42 | 43 | $ ./benchmarks_084.py --help 44 | 45 | 46 | One of the options available is to be able to continue 47 | from a previous benchmark execution. For example, 48 | to continue a experiment in a basic benchmark 49 | with a log name of "driving_benchmark_test", run: 50 | 51 | $ ./benchmarks_084.py --continue-experiment -n driving_benchmark_test 52 | 53 | 54 | !!! note 55 | if the log name already exists and you don't set it to continue, it 56 | will create another log under a different name. 57 | 58 | 59 | Benchmarks Availabe 60 | ------------------- 61 | 62 | ### CoRL 2017 63 | 64 | As explained on the legacy CARLA paper: 65 | [CoRL 66 | 2017 paper](http://proceedings.mlr.press/v78/dosovitskiy17a/dosovitskiy17a.pdf). 67 | The CoRL 2017 experiment suite can be run in a trivial agent by 68 | running: 69 | 70 | $ ./benchmarks_084.py --corl-2017 71 | 72 | When running the driving benchmark for the basic configuration 73 | you should [expect these results](benchmark_creating/#expected-results) 74 | 75 | 76 | ### CARLA 100 77 | 78 | 79 | The CARLA100 experiment suite can be run in a trivial agent by 80 | running: 81 | 82 | $ ./benchmarks_084.py --carla100 83 | 84 | -------------------------------------------------------------------------------- /Docs/benchmark_structure.md: -------------------------------------------------------------------------------- 1 | 2 | Benchmark Tools Structure 3 | ----------------------------------- 4 | 5 | The figure below shows the general structure of the driving 6 | benchmark module. 7 | 8 | 9 | 10 | ![Benchmark_structure](img/benchmark_diagram.png) 11 | >Figure: The general structure of the agent benchmark module. 12 | 13 | 14 | The *benchmark tools* is the module responsible for evaluating a certain 15 | *agent* in an *experiment suite*. 16 | 17 | The *experiment suite* is an abstract module. 18 | Thus, the user must define its own derivation 19 | of *experiment suite*. We already provide the CoRL2017 suite and a simple 20 | *experiment suite* for testing. 21 | 22 | The *experiment suite* is composed by set of *experiments*. 23 | Each *experiment* contains a *task* that consists of a set of navigation 24 | episodes, represented by a set of *poses*. 25 | These *poses* are tuples containing the start and end points of an 26 | episode. 27 | 28 | The *experiments* are also associated with a *condition*. A 29 | condition is represented by a [carla settings](carla_settings.md) object. 30 | The conditions specify simulation parameters such as: weather, sensor suite, number of 31 | vehicles and pedestrians, etc. 32 | 33 | 34 | The user also should derivate an *agent* class. The *agent* is the active 35 | part which will be evaluated on the driving benchmark. 36 | 37 | The driving benchmark also contains two auxiliary modules. 38 | The *recording module* is used to keep track of all measurements and 39 | can be used to pause and continue a driving benchmark. 40 | The [*metrics module*](benchmark_metrics.md) is used to compute the performance metrics 41 | by using the recorded measurements. 42 | 43 | 44 | 45 | 46 | Example: CORL 2017 47 | ---------------------- 48 | 49 | We already provide the CoRL 2017 experiment suite used to benchmark the 50 | agents for the [CoRL 2017 paper](http://proceedings.mlr.press/v78/dosovitskiy17a/dosovitskiy17a.pdf). 51 | 52 | The CoRL 2017 experiment suite has the following composition: 53 | 54 | * A total of 24 experiments for each CARLA town containing: 55 | * A task for going straight. 56 | * A task for making a single turn. 57 | * A task for going to an arbitrary position. 58 | * A task for going to an arbitrary position with dynamic objects. 59 | * Each task is composed of 25 poses that are repeated in 6 different weathers (Clear Noon, Heavy Rain Noon, Clear Sunset, After Rain Noon, Cloudy After Rain and Soft Rain Sunset). 60 | * The entire experiment set has 600 episodes. 61 | * The CoRL 2017 can take up to 24 hours to execute for Town01 and up to 15 62 | hours for Town02 depending on the agent performance. 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | CARLA Driving Benchmarks Repository 2 | =================================== 3 | 4 | 5 | This repository was made in order to store different driving benchmarks 6 | that run on the [CARLA simulator](https://github.com/carla-simulator/carla) 7 | 8 | Right now we have available the following benchmarks: 9 | 10 | Version 0.8.4: 11 | 12 | * CoRL2017 - [Docs](Docs/benchmark_start.md/#corl-2017) / [Paper](http://proceedings.mlr.press/v78/dosovitskiy17a/dosovitskiy17a.pdf). 13 | 14 | * NoCrash - [Docs](Docs/benchmark_start.md/#carla-100) /[Paper](https://arxiv.org/abs/1904.08980) 15 | 16 | 17 | 18 | 19 | We are working on having a 0.9.X version of the CoRL2017 Benchmark. 20 | We happily accept new benchmarks as pull requests. 21 | 22 | 23 | License 24 | ------- 25 | 26 | CARLA Benchmarks specific code is distributed under MIT License. 27 | -------------------------------------------------------------------------------- /benchmarks_084.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import argparse 10 | import logging 11 | 12 | from version084.benchmark_tools import run_driving_benchmark 13 | from version084.driving_benchmarks import CoRL2017, CARLA100 14 | from version084.benchmark_tools.experiment_suites.basic_experiment_suite import BasicExperimentSuite 15 | from version084.benchmark_tools.agent import ForwardAgent 16 | 17 | if __name__ == '__main__': 18 | 19 | argparser = argparse.ArgumentParser(description=__doc__) 20 | argparser.add_argument( 21 | '-v', '--verbose', 22 | action='store_true', 23 | dest='verbose', 24 | help='print some extra status information') 25 | argparser.add_argument( 26 | '-db', '--debug', 27 | action='store_true', 28 | dest='debug', 29 | help='print debug information') 30 | argparser.add_argument( 31 | '--host', 32 | metavar='H', 33 | default='localhost', 34 | help='IP of the host server (default: localhost)') 35 | argparser.add_argument( 36 | '-p', '--port', 37 | metavar='P', 38 | default=2000, 39 | type=int, 40 | help='TCP port to listen to (default: 2000)') 41 | argparser.add_argument( 42 | '-c', '--city-name', 43 | metavar='C', 44 | default='Town01', 45 | help='The town that is going to be used on benchmark' 46 | + '(needs to match active town in server, options: Town01 or Town02)') 47 | argparser.add_argument( 48 | '-n', '--log_name', 49 | metavar='T', 50 | default='test', 51 | help='The name of the log file to be created by the benchmark' 52 | ) 53 | argparser.add_argument( 54 | '--corl-2017', 55 | action='store_true', 56 | help='If you want to benchmark the corl-2017 instead of the Basic one' 57 | ) 58 | argparser.add_argument( 59 | '--carla100', 60 | action='store_true', 61 | help='If you want to use the carla100 benchmark instead of the Basic one' 62 | ) 63 | argparser.add_argument( 64 | '--continue-experiment', 65 | action='store_true', 66 | help='If you want to continue the experiment with the same name' 67 | ) 68 | 69 | args = argparser.parse_args() 70 | if args.debug: 71 | log_level = logging.DEBUG 72 | elif args.verbose: 73 | log_level = logging.INFO 74 | else: 75 | log_level = logging.WARNING 76 | 77 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 78 | logging.info('listening to server %s:%s', args.host, args.port) 79 | 80 | # We instantiate a forward agent, a simple policy that just set 81 | # acceleration as 0.9 and steering as zero 82 | agent = ForwardAgent() 83 | 84 | # We instantiate an experiment suite. Basically a set of experiments 85 | # that are going to be evaluated on this benchmark. 86 | if args.corl_2017: 87 | experiment_suite = CoRL2017(args.city_name) 88 | elif args.carla100: 89 | experiment_suite = CARLA100(args.city_name) 90 | else: 91 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017' 92 | ' experiment suites, you should run' 93 | ' python driving_benchmark_example.py --corl-2017') 94 | experiment_suite = BasicExperimentSuite(args.city_name) 95 | 96 | # Now actually run the driving_benchmark 97 | run_driving_benchmark(agent, experiment_suite, args.city_name, 98 | args.log_name, args.continue_experiment, 99 | args.host, args.port) 100 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: CARLA Benchmarks 2 | repo_url: https://github.com/carla-simulator/carla 3 | docs_dir: Docs 4 | theme: readthedocs 5 | 6 | pages: 7 | - Driving Benchmark: 8 | - 'Quick Start': 'benchmark_start.md' 9 | - 'General Structure': 'benchmark_structure.md' 10 | - 'Creating Your Benchmark': 'benchmark_creating.md' 11 | - 'Computed Performance Metrics': 'benchmark_metrics.md' 12 | - Appendix: 13 | - 'Driving Benchmark Sample Results Town01': 'benchmark_basic_results_town01.md' 14 | - 'Driving Benchmark Sample Results Town02': 'benchmark_basic_results_town02.md' 15 | 16 | 17 | markdown_extensions: 18 | - admonition 19 | -------------------------------------------------------------------------------- /version084/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/__init__.py -------------------------------------------------------------------------------- /version084/benchmark_tools/__init__.py: -------------------------------------------------------------------------------- 1 | from .benchmark_runner import run_driving_benchmark 2 | -------------------------------------------------------------------------------- /version084/benchmark_tools/agent.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 3 | # Barcelona (UAB). 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | # @author: german,felipecode 8 | 9 | 10 | from __future__ import print_function 11 | import abc 12 | from ..carla.client import VehicleControl 13 | 14 | 15 | class Agent(object): 16 | def __init__(self): 17 | self.__metaclass__ = abc.ABCMeta 18 | 19 | @abc.abstractmethod 20 | def run_step(self, measurements, sensor_data, directions, target): 21 | """ 22 | Function to be redefined by an agent. 23 | :param The measurements like speed, the image data and a target 24 | :returns A carla Control object, with the steering/gas/brake for the agent 25 | """ 26 | 27 | class ForwardAgent(Agent): 28 | """ 29 | Simple derivation of Agent Class, 30 | A trivial agent agent that goes straight 31 | """ 32 | def run_step(self, measurements, sensor_data, directions, target): 33 | control = VehicleControl() 34 | control.throttle = 0.9 35 | 36 | return control -------------------------------------------------------------------------------- /version084/benchmark_tools/benchmark_runner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | import abc 9 | import logging 10 | import math 11 | import time 12 | import numpy as np 13 | 14 | from ..carla.client import VehicleControl 15 | from ..carla.client import make_carla_client 16 | from ..carla.planner.planner import Planner 17 | from ..carla.settings import CarlaSettings 18 | from ..carla.tcp import TCPConnectionError 19 | 20 | from . import results_printer 21 | from .recording import Recording 22 | from .metrics import Metrics 23 | 24 | def get_vec_dist(x_dst, y_dst, x_src, y_src): 25 | vec = np.array([x_dst, y_dst] - np.array([x_src, y_src])) 26 | dist = math.sqrt(vec[0] ** 2 + vec[1] ** 2) 27 | return vec / dist, dist 28 | 29 | 30 | def sldist(c1, c2): 31 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 32 | 33 | 34 | class BenchmarkRunner(object): 35 | """ 36 | The Benchmark class, controls the execution of the benchmark interfacing 37 | an Agent class with a set Suite. 38 | 39 | 40 | The benchmark class must be inherited with a class that defines the 41 | all the experiments to be run by the agent 42 | """ 43 | 44 | def __init__( 45 | self, 46 | city_name='Town01', 47 | name_to_save='Test', 48 | continue_experiment=False, 49 | save_images=False, 50 | distance_for_success=2.0 51 | ): 52 | """ 53 | Args 54 | city_name: 55 | name_to_save: 56 | continue_experiment: 57 | save_images: 58 | distance_for_success: 59 | collisions_as_failure: if this flag is set to true, episodes will terminate as failure, when the car collides. 60 | """ 61 | 62 | self.__metaclass__ = abc.ABCMeta 63 | 64 | self._city_name = city_name 65 | self._base_name = name_to_save 66 | # The minimum distance for arriving into the goal point in 67 | # order to consider ir a success 68 | self._distance_for_success = distance_for_success 69 | # The object used to record the benchmark and to able to continue after 70 | self._recording = Recording(name_to_save=name_to_save, 71 | continue_experiment=continue_experiment, 72 | save_images=save_images 73 | ) 74 | 75 | # We have a default planner instantiated that produces high level commands 76 | self._planner = Planner(city_name) 77 | self._map = self._planner._city_track.get_map() 78 | 79 | # TO keep track of the previous collisions 80 | self._previous_pedestrian_collision = 0 81 | self._previous_vehicle_collision = 0 82 | self._previous_other_collision = 0 83 | 84 | def benchmark_agent(self, experiment_suite, agent, client): 85 | """ 86 | Function to benchmark the agent. 87 | It first check the log file for this benchmark. 88 | if it exist it continues from the experiment where it stopped. 89 | 90 | 91 | Args: 92 | experiment_suite 93 | agent: an agent object with the run step class implemented. 94 | client: 95 | 96 | 97 | Return: 98 | A dictionary with all the metrics computed from the 99 | agent running the set of experiments. 100 | """ 101 | 102 | # Instantiate a metric object that will be used to compute the metrics for 103 | # the benchmark afterwards. 104 | metrics_object = Metrics(experiment_suite.metrics_parameters, 105 | experiment_suite.dynamic_tasks) 106 | 107 | # Function return the current pose and task for this benchmark. 108 | start_pose, start_experiment, start_rep = self._recording.get_pose_experiment_rep( 109 | experiment_suite.get_number_of_poses_task(), experiment_suite.get_number_of_reps_poses()) 110 | 111 | logging.info('START') 112 | 113 | for experiment in experiment_suite.get_experiments()[int(start_experiment):]: 114 | positions = client.load_settings( 115 | experiment.conditions).player_start_spots 116 | 117 | self._recording.log_start(experiment.task) 118 | 119 | for pose in experiment.poses[start_pose:]: 120 | for rep in range(start_rep, experiment.repetitions): 121 | 122 | start_index = pose[0] 123 | end_index = pose[1] 124 | client.start_episode(start_index) 125 | # Print information on 126 | logging.info('======== !!!! ==========') 127 | logging.info(' Start Position %d End Position %d ', 128 | start_index, end_index) 129 | 130 | self._recording.log_poses(start_index, end_index, 131 | experiment.Conditions.WeatherId) 132 | 133 | # Calculate the initial distance for this episode 134 | initial_distance = \ 135 | sldist( 136 | [positions[start_index].location.x, positions[start_index].location.y], 137 | [positions[end_index].location.x, positions[end_index].location.y]) 138 | 139 | time_out = experiment_suite.calculate_time_out( 140 | self._get_shortest_path(positions[start_index], positions[end_index])) 141 | 142 | logging.info('Timeout for Episode: %f', time_out) 143 | # running the agent 144 | (result, reward_vec, control_vec, final_time, remaining_distance, col_ped, 145 | col_veh, col_oth, number_of_red_lights, number_of_green_lights) = \ 146 | self._run_navigation_episode( 147 | agent, client, time_out, positions[end_index], 148 | str(experiment.Conditions.WeatherId) + '_' 149 | + str(experiment.task) + '_' + str(start_index) 150 | + '.' + str(end_index), experiment_suite.metrics_parameters, 151 | experiment_suite.collision_as_failure, 152 | experiment_suite.traffic_light_as_failure) 153 | 154 | # Write the general status of the just ran episode 155 | self._recording.write_summary_results( 156 | experiment, pose, rep, initial_distance, 157 | remaining_distance, final_time, time_out, result, col_ped, col_veh, col_oth, 158 | number_of_red_lights, number_of_green_lights) 159 | 160 | # Write the details of this episode. 161 | self._recording.write_measurements_results(experiment, rep, pose, reward_vec, 162 | control_vec) 163 | if result > 0: 164 | logging.info('+++++ Target achieved in %f seconds! +++++', 165 | final_time) 166 | else: 167 | logging.info('----- Timeout! -----') 168 | 169 | start_rep = 0 170 | start_pose = 0 171 | 172 | 173 | self._recording.log_end() 174 | 175 | return metrics_object.compute(self._recording.path) 176 | 177 | def get_path(self): 178 | """ 179 | Returns the path were the log was saved. 180 | """ 181 | return self._recording.path 182 | 183 | def _get_directions(self, current_point, end_point): 184 | """ 185 | Class that should return the directions to reach a certain goal 186 | """ 187 | 188 | directions = self._planner.get_next_command( 189 | (current_point.location.x, 190 | current_point.location.y, 0.22), 191 | (current_point.orientation.x, 192 | current_point.orientation.y, 193 | current_point.orientation.z), 194 | (end_point.location.x, end_point.location.y, 0.22), 195 | (end_point.orientation.x, end_point.orientation.y, end_point.orientation.z)) 196 | return directions 197 | 198 | def _get_shortest_path(self, start_point, end_point): 199 | """ 200 | Calculates the shortest path between two points considering the road netowrk 201 | """ 202 | 203 | return self._planner.get_shortest_path_distance( 204 | [ 205 | start_point.location.x, start_point.location.y, 0.22], [ 206 | start_point.orientation.x, start_point.orientation.y, 0.22], [ 207 | end_point.location.x, end_point.location.y, end_point.location.z], [ 208 | end_point.orientation.x, end_point.orientation.y, end_point.orientation.z]) 209 | 210 | def _has_agent_collided(self, measurement, metrics_parameters): 211 | 212 | """ 213 | This function must have a certain state and only look to one measurement. 214 | """ 215 | collided_veh = 0 216 | collided_ped = 0 217 | collided_oth = 0 218 | 219 | if (measurement.collision_vehicles - self._previous_vehicle_collision) \ 220 | > metrics_parameters['collision_vehicles']['threshold']/2.0: 221 | collided_veh = 1 222 | if (measurement.collision_pedestrians - self._previous_pedestrian_collision) \ 223 | > metrics_parameters['collision_pedestrians']['threshold']/2.0: 224 | collided_ped = 1 225 | if (measurement.collision_other - self._previous_other_collision) \ 226 | > metrics_parameters['collision_other']['threshold']/2.0: 227 | collided_oth = 1 228 | 229 | self._previous_pedestrian_collision = measurement.collision_pedestrians 230 | self._previous_vehicle_collision = measurement.collision_vehicles 231 | self._previous_other_collision = measurement.collision_other 232 | 233 | return collided_ped, collided_veh, collided_oth 234 | 235 | def _is_traffic_light_active(self, agent, orientation): 236 | 237 | x_agent = agent.traffic_light.transform.location.x 238 | y_agent = agent.traffic_light.transform.location.y 239 | 240 | def search_closest_lane_point(x_agent, y_agent, depth): 241 | step_size = 4 242 | if depth > 1: 243 | return None 244 | try: 245 | degrees = self._map.get_lane_orientation_degrees([x_agent, y_agent, 38]) 246 | #print (degrees) 247 | except: 248 | return None 249 | 250 | if not self._map.is_point_on_lane([x_agent, y_agent, 38]): 251 | #print (" Not on lane ") 252 | result = search_closest_lane_point(x_agent + step_size, y_agent, depth+1) 253 | if result is not None: 254 | return result 255 | result = search_closest_lane_point(x_agent, y_agent + step_size, depth+1) 256 | if result is not None: 257 | return result 258 | result = search_closest_lane_point(x_agent + step_size, y_agent + step_size, depth+1) 259 | if result is not None: 260 | return result 261 | result = search_closest_lane_point(x_agent + step_size, y_agent - step_size, depth+1) 262 | if result is not None: 263 | return result 264 | result = search_closest_lane_point(x_agent - step_size, y_agent + step_size, depth+1) 265 | if result is not None: 266 | return result 267 | result = search_closest_lane_point(x_agent - step_size, y_agent, depth+1) 268 | if result is not None: 269 | return result 270 | result = search_closest_lane_point(x_agent, y_agent - step_size, depth+1) 271 | if result is not None: 272 | return result 273 | result = search_closest_lane_point(x_agent - step_size, y_agent - step_size, depth+1) 274 | if result is not None: 275 | return result 276 | else: 277 | #print(" ON Lane ") 278 | if degrees < 6: 279 | return [x_agent, y_agent] 280 | else: 281 | return None 282 | 283 | closest_lane_point = search_closest_lane_point(x_agent, y_agent, 0) 284 | car_direction = math.atan2(orientation.y, orientation.x) + 3.1415 285 | if car_direction > 6.0: 286 | car_direction -= 6.0 287 | 288 | return math.fabs(car_direction - 289 | self._map.get_lane_orientation_degrees([closest_lane_point[0], closest_lane_point[1], 38]) 290 | ) < 1 291 | 292 | def _test_for_traffic_lights(self, measurement): 293 | """ 294 | 295 | This function tests if the car passed into a traffic light, returning 'red' 296 | if it crossed a red light , 'green' if it crossed a green light or none otherwise 297 | 298 | Args: 299 | measurement: all the measurements collected by carla 0.8.4 300 | 301 | Returns: 302 | 303 | """ 304 | 305 | def is_on_burning_point(_map, location): 306 | 307 | # We get the current lane orientation 308 | ori_x, ori_y = _map.get_lane_orientation([location.x, location.y, 38]) 309 | 310 | # We test to walk in direction of the lane 311 | future_location_x = location.x 312 | future_location_y = location.y 313 | 314 | for i in range(3): 315 | future_location_x += ori_x 316 | future_location_y += ori_y 317 | # Take a point on a intersection in the future 318 | location_on_intersection_x = future_location_x + 2 * ori_x 319 | location_on_intersection_y = future_location_y + 2 * ori_y 320 | 321 | if not _map.is_point_on_intersection([future_location_x, 322 | future_location_y, 323 | 38]) and \ 324 | _map.is_point_on_intersection([location_on_intersection_x, 325 | location_on_intersection_y, 326 | 38]): 327 | return True 328 | 329 | return False 330 | 331 | # Check nearest traffic light with the correct orientation state. 332 | player_x = measurement.player_measurements.transform.location.x 333 | player_y = measurement.player_measurements.transform.location.y 334 | 335 | # The vehicle is on an intersection so we verify if it burned a traffic light 336 | for agent in measurement.non_player_agents: 337 | if agent.HasField('traffic_light'): 338 | if not self._map.is_point_on_intersection([player_x, player_y, 38]): 339 | x_agent = agent.traffic_light.transform.location.x 340 | y_agent = agent.traffic_light.transform.location.y 341 | tl_vector, tl_dist = get_vec_dist(x_agent, y_agent, player_x, player_y) 342 | if self._is_traffic_light_active(agent, 343 | measurement.player_measurements. 344 | transform.orientation): 345 | if is_on_burning_point(self._map, 346 | measurement.player_measurements.transform.location)\ 347 | and tl_dist < 6.0: 348 | if agent.traffic_light.state != 0: # Not green 349 | return 'red' 350 | else: 351 | return 'green' 352 | 353 | return None 354 | 355 | def _run_navigation_episode( 356 | self, 357 | agent, 358 | client, 359 | time_out, 360 | target, 361 | episode_name, 362 | metrics_parameters, 363 | collision_as_failure, 364 | traffic_light_as_failure): 365 | """ 366 | Run one episode of the benchmark (Pose) for a certain agent. 367 | 368 | 369 | Args: 370 | agent: the agent object 371 | client: an object of the carla client to communicate 372 | with the CARLA simulator 373 | time_out: the time limit to complete this episode 374 | target: the target to reach 375 | episode_name: The name for saving images of this episode 376 | metrics_object: The metrics object to check for collisions 377 | 378 | """ 379 | 380 | # Send an initial command. 381 | measurements, sensor_data = client.read_data() 382 | client.send_control(VehicleControl()) 383 | 384 | initial_timestamp = measurements.game_timestamp 385 | current_timestamp = initial_timestamp 386 | 387 | # The vector containing all measurements produced on this episode 388 | measurement_vec = [] 389 | # The vector containing all controls produced on this episode 390 | control_vec = [] 391 | frame = 0 392 | distance = 10000 393 | col_ped, col_veh, col_oth = 0, 0, 0 394 | traffic_light_state, number_red_lights, number_green_lights = None, 0, 0 395 | fail = False 396 | success = False 397 | not_count = 0 398 | 399 | while not fail and not success: 400 | 401 | # Read data from server with the client 402 | measurements, sensor_data = client.read_data() 403 | # The directions to reach the goal are calculated. 404 | directions = self._get_directions(measurements.player_measurements.transform, target) 405 | # Agent process the data. 406 | control = agent.run_step(measurements, sensor_data, directions, target) 407 | # Send the control commands to the vehicle 408 | client.send_control(control) 409 | 410 | # save images if the flag is activated 411 | self._recording.save_images(sensor_data, episode_name, frame) 412 | 413 | current_x = measurements.player_measurements.transform.location.x 414 | current_y = measurements.player_measurements.transform.location.y 415 | 416 | logging.info("Controller is Inputting:") 417 | logging.info('Steer = %f Throttle = %f Brake = %f ', 418 | control.steer, control.throttle, control.brake) 419 | 420 | current_timestamp = measurements.game_timestamp 421 | logging.info('Timestamp %f', current_timestamp) 422 | # Get the distance travelled until now 423 | 424 | distance = sldist([current_x, current_y], 425 | [target.location.x, target.location.y]) 426 | # Write status of the run on verbose mode 427 | logging.info('Status:') 428 | logging.info( 429 | '[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f', 430 | float(distance), current_x, current_y, target.location.x, 431 | target.location.y) 432 | # Check if reach the target 433 | col_ped, col_veh, col_oth = self._has_agent_collided(measurements.player_measurements, 434 | metrics_parameters) 435 | # test if car crossed the traffic light 436 | traffic_light_state = self._test_for_traffic_lights(measurements) 437 | 438 | if traffic_light_state == 'red' and not_count == 0: 439 | number_red_lights += 1 440 | not_count = 20 441 | 442 | elif traffic_light_state == 'green' and not_count == 0: 443 | number_green_lights += 1 444 | not_count = 20 445 | 446 | else: 447 | not_count -= 1 448 | not_count = max(0, not_count) 449 | 450 | if distance < self._distance_for_success: 451 | success = True 452 | elif (current_timestamp - initial_timestamp) > (time_out * 1000): 453 | fail = True 454 | elif collision_as_failure and (col_ped or col_veh or col_oth): 455 | fail = True 456 | elif traffic_light_as_failure and traffic_light_state == 'red': 457 | fail = True 458 | logging.info('Traffic Lights:') 459 | logging.info( 460 | 'red %f green %f, total %f', 461 | number_red_lights, number_green_lights, number_red_lights + number_green_lights) 462 | # Increment the vectors and append the measurements and controls. 463 | frame += 1 464 | measurement_vec.append(measurements.player_measurements) 465 | control_vec.append(control) 466 | 467 | if success: 468 | return 1, measurement_vec, control_vec, float( 469 | current_timestamp - initial_timestamp) / 1000.0, distance, col_ped, col_veh, col_oth, \ 470 | number_red_lights, number_green_lights 471 | return 0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth, \ 472 | number_red_lights, number_green_lights 473 | 474 | 475 | def run_driving_benchmark(agent, 476 | experiment_suite, 477 | city_name='Town01', 478 | log_name='Test', 479 | continue_experiment=False, 480 | host='127.0.0.1', 481 | port=2000 482 | ): 483 | while True: 484 | try: 485 | 486 | with make_carla_client(host, port) as client: 487 | # Hack to fix for the issue 310, we force a reset, so it does not get 488 | # the positions on first server reset. 489 | client.load_settings(CarlaSettings()) 490 | client.start_episode(0) 491 | 492 | # We instantiate the driving benchmark, that is the engine used to 493 | # benchmark an agent. The instantiation starts the log process, sets 494 | 495 | benchmark = BenchmarkRunner(city_name=city_name, 496 | name_to_save=log_name + '_' 497 | + type(experiment_suite).__name__ 498 | + '_' + city_name, 499 | continue_experiment=continue_experiment) 500 | # This function performs the benchmark. It returns a dictionary summarizing 501 | # the entire execution. 502 | 503 | benchmark_summary = benchmark.benchmark_agent(experiment_suite, agent, client) 504 | 505 | print("") 506 | print("") 507 | print("----- Printing results for training weathers (Seen in Training) -----") 508 | print("") 509 | print("") 510 | results_printer.print_summary(benchmark_summary, experiment_suite.train_weathers, 511 | benchmark.get_path()) 512 | 513 | print("") 514 | print("") 515 | print("----- Printing results for test weathers (Unseen in Training) -----") 516 | print("") 517 | print("") 518 | 519 | results_printer.print_summary(benchmark_summary, experiment_suite.test_weathers, 520 | benchmark.get_path()) 521 | 522 | break 523 | 524 | except TCPConnectionError as error: 525 | logging.error(error) 526 | time.sleep(1) 527 | -------------------------------------------------------------------------------- /version084/benchmark_tools/experiment.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | from ..carla.settings import CarlaSettings 8 | 9 | 10 | class Experiment(object): 11 | """ 12 | Experiment defines a certain task, under conditions 13 | A task is associated with a set of poses, containing start and end pose. 14 | 15 | Conditions are associated with a carla Settings and describe the following: 16 | 17 | Number Of Vehicles 18 | Number Of Pedestrians 19 | Weather 20 | Random Seed of the agents, describing their behaviour. 21 | 22 | """ 23 | 24 | def __init__(self): 25 | self.Task = 0 26 | self.TaskName = '' 27 | self.Conditions = CarlaSettings() 28 | self.Poses = [[]] 29 | self.Repetitions = 1 30 | 31 | def set(self, **kwargs): 32 | for key, value in kwargs.items(): 33 | if not hasattr(self, key): 34 | raise ValueError('Experiment: no key named %r' % key) 35 | setattr(self, key, value) 36 | 37 | 38 | 39 | @property 40 | def task(self): 41 | return self.Task 42 | @property 43 | def task_name(self): 44 | return self.TaskName 45 | 46 | @property 47 | def conditions(self): 48 | return self.Conditions 49 | 50 | @property 51 | def poses(self): 52 | return self.Poses 53 | 54 | @property 55 | def repetitions(self): 56 | return self.Repetitions 57 | -------------------------------------------------------------------------------- /version084/benchmark_tools/experiment_suites/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /version084/benchmark_tools/experiment_suites/basic_experiment_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | from __future__ import print_function 9 | 10 | from ..experiment import Experiment 11 | from ...carla.sensor import Camera 12 | from ...carla.settings import CarlaSettings 13 | 14 | from .experiment_suite import ExperimentSuite 15 | 16 | 17 | class BasicExperimentSuite(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [1] 26 | 27 | def build_experiments(self): 28 | """ 29 | Creates the whole set of experiment objects, 30 | The experiments created depends on the selected Town. 31 | 32 | """ 33 | 34 | # We check the town, based on that we define the town related parameters 35 | # The size of the vector is related to the number of tasks, inside each 36 | # task there is also multiple poses ( start end, positions ) 37 | if self._city_name == 'Town01': 38 | poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]] 39 | vehicles_tasks = [0, 0, 0, 20] 40 | pedestrians_tasks = [0, 0, 0, 50] 41 | else: 42 | poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]] 43 | vehicles_tasks = [0, 0, 0, 15] 44 | pedestrians_tasks = [0, 0, 0, 50] 45 | 46 | # We set the camera 47 | # This single RGB camera is used on every experiment 48 | 49 | camera = Camera('CameraRGB') 50 | camera.set(FOV=100) 51 | camera.set_image_size(800, 600) 52 | camera.set_position(2.0, 0.0, 1.4) 53 | camera.set_rotation(-15.0, 0, 0) 54 | 55 | # Based on the parameters, creates a vector with experiment objects. 56 | experiments_vector = [] 57 | for weather in self.weathers: 58 | 59 | for iteration in range(len(poses_tasks)): 60 | poses = poses_tasks[iteration] 61 | vehicles = vehicles_tasks[iteration] 62 | pedestrians = pedestrians_tasks[iteration] 63 | 64 | conditions = CarlaSettings() 65 | conditions.set( 66 | SendNonPlayerAgentsInfo=True, 67 | NumberOfVehicles=vehicles, 68 | NumberOfPedestrians=pedestrians, 69 | WeatherId=weather 70 | 71 | ) 72 | # Add all the cameras that were set for this experiments 73 | conditions.add_sensor(camera) 74 | experiment = Experiment() 75 | experiment.set( 76 | Conditions=conditions, 77 | Poses=poses, 78 | Task=iteration, 79 | Repetitions=2 80 | ) 81 | experiments_vector.append(experiment) 82 | 83 | return experiments_vector 84 | -------------------------------------------------------------------------------- /version084/benchmark_tools/experiment_suites/experiment_suite.py: -------------------------------------------------------------------------------- 1 | # To be redefined on subclasses on how to calculate timeout for an episode 2 | import abc 3 | 4 | 5 | class ExperimentSuite(object): 6 | 7 | def __init__(self, city_name): 8 | 9 | self._city_name = city_name 10 | self._experiments = self.build_experiments() 11 | 12 | def calculate_time_out(self, path_distance): 13 | """ 14 | Function to return the timeout ,in milliseconds, 15 | that is calculated based on distance to goal. 16 | This is the same timeout as used on the CoRL paper. 17 | """ 18 | return ((path_distance / 1000.0) / 10.0) * 3600.0 + 10.0 19 | 20 | def get_number_of_poses_task(self): 21 | """ 22 | Get the number of poses a task have for this benchmark 23 | """ 24 | 25 | # Warning: assumes that all tasks have the same size 26 | 27 | return len(self._experiments[0].poses) 28 | 29 | def get_number_of_reps_poses(self): 30 | """ 31 | Get the number of poses a task have for this benchmark 32 | """ 33 | 34 | # Warning: assumes that all poses have the same number of repetitions 35 | 36 | return self._experiments[0].repetitions 37 | 38 | 39 | def get_experiments(self): 40 | """ 41 | Getter for the experiment set. 42 | """ 43 | return self._experiments 44 | 45 | @property 46 | def dynamic_tasks(self): 47 | """ 48 | Returns the episodes that contain dynamic obstacles 49 | """ 50 | dynamic_tasks = set() 51 | for exp in self._experiments: 52 | if exp.conditions.NumberOfVehicles > 0 or exp.conditions.NumberOfPedestrians > 0: 53 | dynamic_tasks.add(exp.task) 54 | 55 | return list(dynamic_tasks) 56 | 57 | @property 58 | def metrics_parameters(self): 59 | """ 60 | Property to return the parameters for the metric module 61 | Could be redefined depending on the needs of the user. 62 | """ 63 | return { 64 | 65 | 'intersection_offroad': {'frames_skip': 10, 66 | 'frames_recount': 20, 67 | 'threshold': 0.3 68 | }, 69 | 'intersection_otherlane': {'frames_skip': 10, 70 | 'frames_recount': 20, 71 | 'threshold': 0.4 72 | }, 73 | 'collision_other': {'frames_skip': 10, 74 | 'frames_recount': 20, 75 | 'threshold': 400 76 | }, 77 | 'collision_vehicles': {'frames_skip': 10, 78 | 'frames_recount': 30, 79 | 'threshold': 400 80 | }, 81 | 'collision_pedestrians': {'frames_skip': 5, 82 | 'frames_recount': 100, 83 | 'threshold': 300 84 | }, 85 | 86 | } 87 | 88 | @property 89 | def weathers(self): 90 | weathers = set(self.train_weathers) 91 | weathers.update(self.test_weathers) 92 | return weathers 93 | 94 | @property 95 | def collision_as_failure(self): 96 | return False 97 | 98 | @property 99 | def traffic_light_as_failure(self): 100 | return False 101 | 102 | @abc.abstractmethod 103 | def build_experiments(self): 104 | """ 105 | Returns a set of experiments to be evaluated 106 | Must be redefined in an inherited class. 107 | 108 | """ 109 | 110 | @abc.abstractproperty 111 | def train_weathers(self): 112 | """ 113 | Return the weathers that are considered as training conditions 114 | """ 115 | 116 | @abc.abstractproperty 117 | def test_weathers(self): 118 | """ 119 | Return the weathers that are considered as testing conditions 120 | """ 121 | -------------------------------------------------------------------------------- /version084/benchmark_tools/metrics.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | import numpy as np 9 | import math 10 | import os 11 | 12 | sldist = lambda c1, c2: math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 13 | flatten = lambda l: [item for sublist in l for item in sublist] 14 | 15 | 16 | class Metrics(object): 17 | """ 18 | The metrics class is made to take the driving measurements 19 | and calculate some specific performance metrics. 20 | 21 | """ 22 | 23 | def __init__(self, parameters, dynamic_tasks): 24 | """ 25 | Args 26 | parameters: A dictionary with the used parameters for checking how to count infractions 27 | dynamic_tasks: A list of the all dynamic tasks (That contain dynamic objects) 28 | """ 29 | 30 | self._parameters = parameters 31 | self._parameters['dynamic_tasks'] = dynamic_tasks 32 | 33 | def _divide_by_episodes(self, measurements_matrix, header): 34 | 35 | """ 36 | Divides the measurements matrix on different episodes. 37 | 38 | Args: 39 | measurements_matrix: The full measurements matrix 40 | header: The header from the measurements matrix 41 | 42 | """ 43 | 44 | # Read previous for position zero 45 | prev_start = measurements_matrix[0, header.index('start_point')] 46 | prev_end = measurements_matrix[0, header.index('end_point')] 47 | prev_exp_id = measurements_matrix[0, header.index('exp_id')] 48 | 49 | # Start at the position 1. 50 | i = 1 51 | prev_i_position = 0 52 | episode_matrix_metrics = [] 53 | 54 | while i < measurements_matrix.shape[0]: 55 | 56 | current_start = measurements_matrix[i, header.index('start_point')] 57 | current_end = measurements_matrix[i, header.index('end_point')] 58 | current_exp_id = measurements_matrix[i, header.index('exp_id')] 59 | 60 | # If there is a change in the position it means it is a new episode for sure. 61 | if (current_start != prev_start and current_end != prev_end) \ 62 | or current_exp_id != prev_exp_id: 63 | episode_matrix_metrics.append(measurements_matrix[prev_i_position:i, :]) 64 | prev_i_position = i 65 | 66 | prev_start = current_start 67 | prev_end = current_end 68 | prev_exp_id = current_exp_id 69 | 70 | i += 1 71 | 72 | episode_matrix_metrics.append(measurements_matrix[prev_i_position:-1, :]) 73 | 74 | return episode_matrix_metrics 75 | 76 | def _get_collisions(self, selected_matrix, header): 77 | """ 78 | Get the number of collisions for pedestrians, vehicles or other 79 | Args: 80 | selected_matrix: The matrix with all the experiments summary 81 | header: The header , to know the positions of details 82 | 83 | 84 | """ 85 | count_collisions_general = 0 86 | count_collisions_pedestrian = 0 87 | count_collisions_vehicle = 0 88 | i = 1 89 | # Computing general collisions 90 | while i < selected_matrix.shape[0]: 91 | if (selected_matrix[i, header.index('collision_other')] 92 | - selected_matrix[ 93 | (i - self._parameters['collision_other']['frames_skip']), header.index( 94 | 'collision_other')]) > \ 95 | self._parameters['collision_other']['threshold']: 96 | count_collisions_general += 1 97 | i += self._parameters['collision_other']['frames_recount'] 98 | i += 1 99 | 100 | i = 1 101 | # Computing collisions for vehicles 102 | while i < selected_matrix.shape[0]: 103 | if (selected_matrix[i, header.index('collision_vehicles')] 104 | - selected_matrix[ 105 | (i - self._parameters['collision_vehicles']['frames_skip']), header.index( 106 | 'collision_vehicles')]) > \ 107 | self._parameters['collision_vehicles']['threshold']: 108 | count_collisions_vehicle += 1 109 | i += self._parameters['collision_vehicles']['frames_recount'] 110 | i += 1 111 | 112 | i = 1 113 | 114 | # Computing the collisions for pedestrians 115 | while i < selected_matrix.shape[0]: 116 | if (selected_matrix[i, header.index('collision_pedestrians')] 117 | - selected_matrix[i - self._parameters['collision_pedestrians']['frames_skip'], 118 | header.index('collision_pedestrians')]) > \ 119 | self._parameters['collision_pedestrians']['threshold']: 120 | count_collisions_pedestrian += 1 121 | i += self._parameters['collision_pedestrians']['frames_recount'] 122 | i += 1 123 | 124 | return count_collisions_general, count_collisions_vehicle, count_collisions_pedestrian 125 | 126 | def _get_distance_traveled(self, selected_matrix, header): 127 | """ 128 | Compute the total distance travelled 129 | Args: 130 | selected_matrix: The matrix with all the experiments summary 131 | header: The header , to know the positions of details 132 | 133 | 134 | """ 135 | 136 | prev_x = selected_matrix[0, header.index('pos_x')] 137 | prev_y = selected_matrix[0, header.index('pos_y')] 138 | 139 | i = 1 140 | acummulated_distance = 0 141 | 142 | while i < selected_matrix.shape[0]: 143 | x = selected_matrix[i, header.index('pos_x')] 144 | y = selected_matrix[i, header.index('pos_y')] 145 | 146 | acummulated_distance += sldist((x, y), (prev_x, prev_y)) 147 | 148 | prev_x = x 149 | prev_y = y 150 | 151 | i += 1 152 | 153 | return acummulated_distance / (1000.0) 154 | 155 | def _get_out_of_road_lane(self, selected_matrix, header): 156 | 157 | """ 158 | Check for the situations were the agent goes out of the road. 159 | Args: 160 | selected_matrix: The matrix with all the experiments summary 161 | header: The header , to know the positions of details 162 | 163 | 164 | """ 165 | 166 | count_sidewalk_intersect = 0 167 | count_lane_intersect = 0 168 | 169 | i = 0 170 | 171 | while i < selected_matrix.shape[0]: 172 | 173 | if (selected_matrix[i, header.index('intersection_offroad')] 174 | - selected_matrix[(i - self._parameters['intersection_offroad']['frames_skip']), 175 | header.index('intersection_offroad')]) \ 176 | > self._parameters['intersection_offroad']['threshold']: 177 | count_sidewalk_intersect += 1 178 | i += self._parameters['intersection_offroad']['frames_recount'] 179 | if i >= selected_matrix.shape[0]: 180 | break 181 | 182 | if (selected_matrix[i, header.index('intersection_otherlane')] 183 | - selected_matrix[(i - self._parameters['intersection_otherlane']['frames_skip']), 184 | header.index('intersection_otherlane')]) \ 185 | > self._parameters['intersection_otherlane']['threshold']: 186 | count_lane_intersect += 1 187 | i += self._parameters['intersection_otherlane']['frames_recount'] 188 | 189 | i += 1 190 | 191 | return count_lane_intersect, count_sidewalk_intersect 192 | 193 | 194 | def _get_percentage_out_road(self, selected_matrix, header): 195 | 196 | """ 197 | Check for the situations were the agent goes out of the road. 198 | Args: 199 | selected_matrix: The matrix with all the experiments summary 200 | header: The header , to know the positions of details 201 | """ 202 | 203 | # Variable to count the time it is outside 204 | count_outside = 0 205 | 206 | i = 0 207 | 208 | # IF there is more than 50 consecutive frames... we say that the car got 209 | # Stuck outside the road, counting this destroy the average 210 | consecutive_frames = 0 211 | 212 | while i < selected_matrix.shape[0]: 213 | 214 | if (selected_matrix[i, header.index('intersection_offroad')]) \ 215 | > self._parameters['intersection_offroad']['threshold'] or \ 216 | selected_matrix[i, header.index('intersection_otherlane')] \ 217 | > self._parameters['intersection_otherlane']['threshold']: 218 | 219 | if consecutive_frames < 50: 220 | count_outside += 1 221 | 222 | consecutive_frames += 1 223 | else: 224 | consecutive_frames = 0 225 | i += 1 226 | 227 | 228 | return float(count_outside)/float(selected_matrix.shape[0]) 229 | 230 | 231 | def compute(self, path): 232 | 233 | """ 234 | Compute a dictionary containing the following metrics 235 | 236 | * Off Road Intersection: The number of times the agent goes out of the road. 237 | The intersection is only counted if the area of the vehicle outside 238 | of the road is bigger than a *threshold*. 239 | 240 | * Other Lane Intersection: The number of times the agent goes to the other 241 | lane. The intersection is only counted if the area of the vehicle on the 242 | other lane is bigger than a *threshold*. 243 | 244 | * Vehicle Collisions: The number of collisions with vehicles that have 245 | an impact bigger than a *threshold*. 246 | 247 | * Pedestrian Collisions: The number of collisions with pedestrians 248 | that have an impact bigger than a threshold. 249 | 250 | * General Collisions: The number of collisions with all other 251 | objects. 252 | 253 | 254 | Args: 255 | path: Path where the log files are. 256 | 257 | """ 258 | 259 | with open(os.path.join(path, 'summary.csv'), "rU") as f: 260 | header = f.readline() 261 | header = header.split(',') 262 | header[-1] = header[-1][:-1] 263 | 264 | with open(os.path.join(path, 'measurements.csv'), "rU") as f: 265 | 266 | header_metrics = f.readline() 267 | header_metrics = header_metrics.split(',') 268 | header_metrics[-1] = header_metrics[-1][:-1] 269 | 270 | result_matrix = np.loadtxt(os.path.join(path, 'summary.csv'), delimiter=",", skiprows=1) 271 | 272 | # Corner Case: The presented test just had one episode 273 | if result_matrix.ndim == 1: 274 | result_matrix = np.expand_dims(result_matrix, axis=0) 275 | 276 | tasks = np.unique(result_matrix[:, header.index('exp_id')]) 277 | 278 | all_weathers = np.unique(result_matrix[:, header.index('weather')]) 279 | 280 | measurements_matrix = np.loadtxt(os.path.join(path, 'measurements.csv'), delimiter=",", 281 | skiprows=1) 282 | 283 | metrics_dictionary = {'episodes_completion': {w: [0] * len(tasks) for w in all_weathers}, 284 | 'intersection_offroad': {w: [[] for i in range(len(tasks))] for w in 285 | all_weathers}, 286 | 'intersection_otherlane': {w: [[] for i in range(len(tasks))] for w in 287 | all_weathers}, 288 | 'collision_pedestrians': {w: [[] for i in range(len(tasks))] for w in 289 | all_weathers}, 290 | 'collision_vehicles': {w: [[] for i in range(len(tasks))] for w in 291 | all_weathers}, 292 | 'collision_other': {w: [[] for i in range(len(tasks))] for w in 293 | all_weathers}, 294 | 'percentage_off_road': {w: [[] for i in range(len(tasks))] for w in 295 | all_weathers}, 296 | 'percentage_green_lights': {w: [0] * len(tasks) for w in 297 | all_weathers}, 298 | 'end_pedestrian_collision': {w: [0] * len(tasks) for w in 299 | all_weathers}, 300 | 'end_vehicle_collision': {w: [0] * len(tasks) for w in 301 | all_weathers}, 302 | 'end_other_collision': {w: [0] * len(tasks) for w in 303 | all_weathers}, 304 | 'episodes_fully_completed': {w: [0] * len(tasks) for w in 305 | all_weathers}, 306 | 'average_speed': {w: [0] * len(tasks) for w in all_weathers}, 307 | 'driven_kilometers': {w: [0] * len(tasks) for w in all_weathers} 308 | } 309 | 310 | for t in range(len(tasks)): 311 | experiment_results_matrix = result_matrix[ 312 | result_matrix[:, header.index('exp_id')] == tasks[t]] 313 | 314 | weathers = np.unique(experiment_results_matrix[:, header.index('weather')]) 315 | 316 | for w in weathers: 317 | 318 | experiment_results_matrix = result_matrix[ 319 | np.logical_and(result_matrix[:, header.index( 320 | 'exp_id')] == tasks[t], result_matrix[:, header.index('weather')] == w)] 321 | 322 | experiment_metrics_matrix = measurements_matrix[ 323 | np.logical_and(measurements_matrix[:, header_metrics.index( 324 | 'exp_id')] == float(tasks[t]), 325 | measurements_matrix[:, header_metrics.index('weather')] == float( 326 | w))] 327 | 328 | metrics_dictionary['end_pedestrian_collision'][w][t] = \ 329 | experiment_results_matrix[:, header.index('end_pedestrian_collision')].tolist() 330 | 331 | metrics_dictionary['end_vehicle_collision'][w][t] = \ 332 | experiment_results_matrix[:, header.index('end_vehicle_collision')].tolist() 333 | 334 | metrics_dictionary['end_other_collision'][w][t] = \ 335 | experiment_results_matrix[:, header.index('end_other_collision')].tolist() 336 | 337 | metrics_dictionary['percentage_green_lights'][w][t] = np.nan_to_num(( 338 | experiment_results_matrix[:, header.index('number_green_lights')]/ 339 | (experiment_results_matrix[:, header.index('number_green_lights')] + 340 | experiment_results_matrix[:, header.index('number_red_lights')]))).tolist() 341 | 342 | 343 | 344 | metrics_dictionary['episodes_fully_completed'][w][t] = \ 345 | experiment_results_matrix[:, header.index('result')].tolist() 346 | 347 | 348 | metrics_dictionary['episodes_completion'][w][t] = \ 349 | ((experiment_results_matrix[:, header.index('initial_distance')] 350 | - experiment_results_matrix[:, header.index('final_distance')]) 351 | / experiment_results_matrix[:, header.index('initial_distance')]).tolist() 352 | 353 | # Now we divide the experiment metrics matrix 354 | 355 | episode_experiment_metrics_matrix = self._divide_by_episodes( 356 | experiment_metrics_matrix, header_metrics) 357 | 358 | count = 0 359 | 360 | for episode_experiment_metrics in episode_experiment_metrics_matrix: 361 | 362 | km_run_episodes = self._get_distance_traveled( 363 | episode_experiment_metrics, header_metrics) 364 | metrics_dictionary['driven_kilometers'][w][t] += km_run_episodes 365 | metrics_dictionary['average_speed'][w][t] = \ 366 | km_run_episodes / (experiment_results_matrix[count, 367 | header.index( 368 | 'final_time')] / 3600.0) 369 | count += 1 370 | 371 | lane_road = self._get_out_of_road_lane( 372 | episode_experiment_metrics, header_metrics) 373 | 374 | metrics_dictionary['intersection_otherlane'][ 375 | w][t].append(lane_road[0]) 376 | metrics_dictionary['intersection_offroad'][ 377 | w][t].append(lane_road[1]) 378 | 379 | metrics_dictionary['percentage_off_road'][ 380 | w][t].append(self._get_percentage_out_road( 381 | episode_experiment_metrics, header_metrics)) 382 | 383 | 384 | if tasks[t] in set(self._parameters['dynamic_tasks']): 385 | 386 | collisions = self._get_collisions(episode_experiment_metrics, 387 | header_metrics) 388 | 389 | metrics_dictionary['collision_pedestrians'][ 390 | w][t].append(collisions[2]) 391 | metrics_dictionary['collision_vehicles'][ 392 | w][t].append(collisions[1]) 393 | metrics_dictionary['collision_other'][ 394 | w][t].append(collisions[0]) 395 | 396 | else: 397 | 398 | metrics_dictionary['collision_pedestrians'][ 399 | w][t].append(0) 400 | metrics_dictionary['collision_vehicles'][ 401 | w][t].append(0) 402 | metrics_dictionary['collision_other'][ 403 | w][t].append(0) 404 | 405 | return metrics_dictionary 406 | -------------------------------------------------------------------------------- /version084/benchmark_tools/recording.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import datetime 3 | import os 4 | 5 | 6 | class Recording(object): 7 | 8 | def __init__(self, 9 | name_to_save, 10 | continue_experiment, 11 | save_images 12 | 13 | ): 14 | 15 | self._dict_summary = {'exp_id': -1, 16 | 'rep': -1, 17 | 'weather': -1, 18 | 'start_point': -1, 19 | 'end_point': -1, 20 | 'result': -1, 21 | 'initial_distance': -1, 22 | 'final_distance': -1, 23 | 'final_time': -1, 24 | 'time_out': -1, 25 | 'end_pedestrian_collision': -1, 26 | 'end_vehicle_collision': -1, 27 | 'end_other_collision': -1, 28 | 'number_red_lights': -1, 29 | 'number_green_lights': -1 30 | } 31 | self._dict_measurements = {'exp_id': -1, 32 | 'rep': -1, 33 | 'weather': -1, 34 | 'start_point': -1, 35 | 'end_point': -1, 36 | 'collision_other': -1, 37 | 'collision_pedestrians': -1, 38 | 'collision_vehicles': -1, 39 | 'intersection_otherlane': -1, 40 | 'intersection_offroad': -1, 41 | 'pos_x': -1, 42 | 'pos_y': -1, 43 | 'steer': -1, 44 | 'throttle': -1, 45 | 'brake': -1 46 | } 47 | 48 | # Just in the case is the first time and there is no benchmark results folder 49 | if not os.path.exists('_benchmarks_results'): 50 | os.mkdir('_benchmarks_results') 51 | 52 | # Generate the full path for the log files 53 | self._path = os.path.join('_benchmarks_results' 54 | , name_to_save 55 | ) 56 | 57 | # Check for continuation of experiment, also returns the last line, used for test purposes 58 | # If you don't want to continue it will create a new path name with a number. 59 | # Also returns the fieldnames for both measurements and summary, so you can keep the 60 | # previous order 61 | self._path, _, self._summary_fieldnames, self._measurements_fieldnames\ 62 | = self._continue_experiment(continue_experiment) 63 | 64 | self._create_log_files() 65 | 66 | # A log with a date file: to show when was the last access and log what was tested, 67 | now = datetime.datetime.now() 68 | self._internal_log_name = os.path.join(self._path, 'log_' + now.strftime("%Y%m%d%H%M")) 69 | open(self._internal_log_name, 'w').close() 70 | 71 | # store the save images flag, and already store the format for image saving 72 | self._save_images = save_images 73 | self._image_filename_format = os.path.join( 74 | self._path, '_images/episode_{:s}/{:s}/image_{:0>5d}.jpg') 75 | 76 | @property 77 | def path(self): 78 | return self._path 79 | 80 | def log_poses(self, start_index, end_index, weather_id): 81 | with open(self._internal_log_name, 'a+') as log: 82 | log.write(' Start Poses (%d %d ) on weather %d \n ' % 83 | (start_index, end_index, weather_id)) 84 | 85 | def log_poses_finish(self): 86 | with open(self._internal_log_name, 'a+') as log: 87 | log.write('Finished Task') 88 | 89 | def log_start(self, id_experiment): 90 | 91 | with open(self._internal_log_name, 'a+') as log: 92 | log.write('Start Task %s \n' % str(id_experiment)) 93 | 94 | 95 | def log_end(self): 96 | with open(self._internal_log_name, 'a+') as log: 97 | log.write('====== Finished Entire Benchmark ======') 98 | 99 | def write_summary_results(self, experiment, pose, rep, 100 | path_distance, remaining_distance, 101 | final_time, time_out, result, 102 | end_pedestrian, end_vehicle, end_other, 103 | number_red_lights, number_green_lights): 104 | """ 105 | Method to record the summary of an episode(pose) execution 106 | """ 107 | 108 | self._dict_summary['exp_id'] = experiment.task 109 | self._dict_summary['rep'] = rep 110 | self._dict_summary['weather'] = experiment.Conditions.WeatherId 111 | self._dict_summary['start_point'] = pose[0] 112 | self._dict_summary['end_point'] = pose[1] 113 | self._dict_summary['result'] = result 114 | self._dict_summary['initial_distance'] = path_distance 115 | self._dict_summary['final_distance'] = remaining_distance 116 | self._dict_summary['final_time'] = final_time 117 | self._dict_summary['time_out'] = time_out 118 | self._dict_summary['end_pedestrian_collision'] = end_pedestrian 119 | self._dict_summary['end_vehicle_collision'] = end_vehicle 120 | self._dict_summary['end_other_collision'] = end_other 121 | self._dict_summary['number_red_lights'] = number_red_lights 122 | self._dict_summary['number_green_lights'] = number_green_lights 123 | 124 | 125 | 126 | with open(os.path.join(self._path, 'summary.csv'), 'a+') as ofd: 127 | w = csv.DictWriter(ofd, self._dict_summary.keys()) 128 | w.fieldnames = self._summary_fieldnames 129 | 130 | w.writerow(self._dict_summary) 131 | 132 | def write_measurements_results(self, experiment, rep, pose, reward_vec, control_vec): 133 | """ 134 | Method to record the measurements, sensors, 135 | controls and status of the entire benchmark. 136 | """ 137 | with open(os.path.join(self._path, 'measurements.csv'), 'a+') as rfd: 138 | mw = csv.DictWriter(rfd, self._dict_measurements.keys()) 139 | mw.fieldnames = self._measurements_fieldnames 140 | for i in range(len(reward_vec)): 141 | self._dict_measurements['exp_id'] = experiment.task 142 | self._dict_measurements['rep'] = rep 143 | self._dict_measurements['start_point'] = pose[0] 144 | self._dict_measurements['end_point'] = pose[1] 145 | self._dict_measurements['weather'] = experiment.Conditions.WeatherId 146 | self._dict_measurements['collision_other'] = reward_vec[ 147 | i].collision_other 148 | self._dict_measurements['collision_pedestrians'] = reward_vec[ 149 | i].collision_pedestrians 150 | self._dict_measurements['collision_vehicles'] = reward_vec[ 151 | i].collision_vehicles 152 | self._dict_measurements['intersection_otherlane'] = reward_vec[ 153 | i].intersection_otherlane 154 | self._dict_measurements['intersection_offroad'] = reward_vec[ 155 | i].intersection_offroad 156 | self._dict_measurements['pos_x'] = reward_vec[ 157 | i].transform.location.x 158 | self._dict_measurements['pos_y'] = reward_vec[ 159 | i].transform.location.y 160 | self._dict_measurements['steer'] = control_vec[ 161 | i].steer 162 | self._dict_measurements['throttle'] = control_vec[ 163 | i].throttle 164 | self._dict_measurements['brake'] = control_vec[ 165 | i].brake 166 | 167 | mw.writerow(self._dict_measurements) 168 | 169 | def _create_log_files(self): 170 | """ 171 | Just create the log files and add the necessary header for it. 172 | """ 173 | 174 | if not self._experiment_exist(): 175 | os.mkdir(self._path) 176 | 177 | with open(os.path.join(self._path, 'summary.csv'), 'w') as ofd: 178 | sw = csv.DictWriter(ofd, self._dict_summary.keys()) 179 | sw.writeheader() 180 | if self._summary_fieldnames is None: 181 | self._summary_fieldnames = sw.fieldnames 182 | 183 | with open(os.path.join(self._path, 'measurements.csv'), 'w') as rfd: 184 | mw = csv.DictWriter(rfd, self._dict_measurements.keys()) 185 | mw.writeheader() 186 | if self._measurements_fieldnames is None: 187 | self._measurements_fieldnames = mw.fieldnames 188 | 189 | def _continue_experiment(self, continue_experiment): 190 | """ 191 | Get the line on the file for the experiment. 192 | If continue_experiment is false and experiment exist, generates a new file path 193 | 194 | """ 195 | 196 | def get_non_existent_path(f_name_path): 197 | """ 198 | Get the path to a filename which does not exist by incrementing path. 199 | """ 200 | if not os.path.exists(f_name_path): 201 | return f_name_path 202 | filename, file_extension = os.path.splitext(f_name_path) 203 | i = 1 204 | new_f_name = "{}-{}{}".format(filename, i, file_extension) 205 | while os.path.exists(new_f_name): 206 | i += 1 207 | new_f_name = "{}-{}{}".format(filename, i, file_extension) 208 | return new_f_name 209 | 210 | # start the new path as the same one as before 211 | new_path = self._path 212 | summary_fieldnames = None 213 | measurements_fieldnames = None 214 | 215 | # if the experiment exist 216 | if self._experiment_exist(): 217 | 218 | # If you want to continue just get the last position 219 | if continue_experiment: 220 | line_on_file = self._get_last_position() 221 | # Get the previously used fileorder 222 | with open(os.path.join(self._path, 'summary.csv'), 'r') as ofd: 223 | summary_reader = csv.DictReader(ofd) 224 | summary_fieldnames = summary_reader.fieldnames 225 | with open(os.path.join(self._path, 'measurements.csv'), 'r') as ofd: 226 | measurements_reader = csv.DictReader(ofd) 227 | measurements_fieldnames = measurements_reader.fieldnames 228 | 229 | else: 230 | # Get a new non_conflicting path name 231 | new_path = get_non_existent_path(new_path) 232 | line_on_file = 1 233 | 234 | else: 235 | line_on_file = 1 236 | return new_path, line_on_file, summary_fieldnames, measurements_fieldnames 237 | 238 | def save_images(self, sensor_data, episode_name, frame): 239 | """ 240 | Save a image during the experiment 241 | """ 242 | if self._save_images: 243 | for name, image in sensor_data.items(): 244 | image.save_to_disk(self._image_filename_format.format( 245 | episode_name, name, frame)) 246 | 247 | def get_pose_experiment_rep(self, number_poses_task, repetitions): 248 | """ 249 | Based on the line in log file, return the current pose, experiment and repetition. 250 | If the line is zero, create new log files. 251 | 252 | """ 253 | # Warning: assumes that all tasks have the same size 254 | line_on_file = self._get_last_position() - 1 255 | if line_on_file == 0: 256 | return 0, 0, 0 257 | else: 258 | return int(line_on_file/repetitions) % number_poses_task, \ 259 | line_on_file // (number_poses_task * repetitions), \ 260 | line_on_file % repetitions 261 | 262 | 263 | def _experiment_exist(self): 264 | 265 | return os.path.exists(self._path) 266 | 267 | def _get_last_position(self): 268 | """ 269 | Get the last position on the summary experiment file 270 | With this you are able to continue from there 271 | 272 | Returns: 273 | int, position: 274 | """ 275 | # Try to open, if the file is not found 276 | try: 277 | with open(os.path.join(self._path, 'summary.csv')) as f: 278 | return sum(1 for _ in f) 279 | except IOError: 280 | return 0 281 | -------------------------------------------------------------------------------- /version084/benchmark_tools/results_printer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | 5 | 6 | def print_summary(metrics_summary, weathers, path): 7 | """ 8 | We plot the summary of the testing for the set selected weathers. 9 | 10 | We take the raw data and print the way it was described on CORL 2017 paper 11 | 12 | """ 13 | 14 | # Improve readability by adding a weather dictionary 15 | weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon', 16 | 6: 'Heavy Rain Noon', 8: 'Clear Sunset', 10: 'rainy after rain', 17 | 4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'} 18 | 19 | # First we write the entire dictionary on the benchmark folder. 20 | with open(os.path.join(path, 'metrics.json'), 'w') as fo: 21 | fo.write(json.dumps(metrics_summary)) 22 | 23 | # Second we plot the metrics that are already ready by averaging 24 | 25 | metrics_to_average = [ 26 | 'episodes_fully_completed', 27 | 'episodes_completion', 28 | 'percentage_off_road', 29 | 'percentage_green_lights' 30 | 31 | ] 32 | # We compute the number of episodes based on size of average completion 33 | number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1]) 34 | 35 | for metric in metrics_to_average: 36 | 37 | if metric == 'episodes_completion': 38 | print ("Average Percentage of Distance to Goal Travelled ") 39 | elif metric == 'percentage_off_road': 40 | print("Average Percentage of Distance to Percentage OffRoad") 41 | elif metric == 'percentage_green_lights': 42 | print("Average Percentage of Distance to Percentage Green Lights") 43 | else: 44 | print ("Percentage of Successful Episodes") 45 | 46 | print ("") 47 | values = metrics_summary[metric] 48 | print( " VALUES ") 49 | 50 | metric_sum_values = np.zeros(number_of_episodes) 51 | for weather, tasks in values.items(): 52 | if weather in set(weathers): 53 | print(' Weather: ', weather_name_dict[weather]) 54 | count = 0 55 | for t in tasks: 56 | # if isinstance(t, np.ndarray) or isinstance(t, list): 57 | if t == []: 58 | print(' Metric Not Computed') 59 | else: 60 | print(' Task:', count, ' -> ', float(sum(t)) / float(len(t))) 61 | metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float( 62 | len(weathers)) 63 | 64 | count += 1 65 | 66 | print (' Average Between Weathers') 67 | for i in range(len(metric_sum_values)): 68 | print(' Task ', i, ' -> ', metric_sum_values[i]) 69 | print ("") 70 | 71 | infraction_metrics = [ 72 | 'collision_pedestrians', 73 | 'collision_vehicles', 74 | 'collision_other', 75 | 'intersection_offroad', 76 | 'intersection_otherlane' 77 | 78 | ] 79 | 80 | # We need to collect the total number of kilometers for each task 81 | 82 | for metric in infraction_metrics: 83 | values_driven = metrics_summary['driven_kilometers'] 84 | values = metrics_summary[metric] 85 | metric_sum_values = np.zeros(number_of_episodes) 86 | summed_driven_kilometers = np.zeros(number_of_episodes) 87 | 88 | if metric == 'collision_pedestrians': 89 | print ('Avg. Kilometers driven before a collision to a PEDESTRIAN') 90 | elif metric == 'collision_vehicles': 91 | print('Avg. Kilometers driven before a collision to a VEHICLE') 92 | elif metric == 'collision_other': 93 | print('Avg. Kilometers driven before a collision to a STATIC OBSTACLE') 94 | elif metric == 'intersection_offroad': 95 | print('Avg. Kilometers driven before going OUTSIDE OF THE ROAD') 96 | else: 97 | print('Avg. Kilometers driven before invading the OPPOSITE LANE') 98 | 99 | # print (zip(values.items(), values_driven.items())) 100 | for items_metric, items_driven in zip(values.items(), values_driven.items()): 101 | weather = items_metric[0] 102 | tasks = items_metric[1] 103 | tasks_driven = items_driven[1] 104 | 105 | if weather in set(weathers): 106 | print(' Weather: ', weather_name_dict[weather]) 107 | count = 0 108 | for t, t_driven in zip(tasks, tasks_driven): 109 | # if isinstance(t, np.ndarray) or isinstance(t, list): 110 | if t == []: 111 | print('Metric Not Computed') 112 | else: 113 | if sum(t) > 0: 114 | print(' Task ', count, ' -> ', t_driven / float(sum(t))) 115 | else: 116 | print(' Task ', count, ' -> more than', t_driven) 117 | 118 | metric_sum_values[count] += float(sum(t)) 119 | summed_driven_kilometers[count] += t_driven 120 | 121 | count += 1 122 | print (' Average Between Weathers') 123 | for i in range(len(metric_sum_values)): 124 | if metric_sum_values[i] == 0: 125 | print(' Task ', i, ' -> more than ', summed_driven_kilometers[i]) 126 | else: 127 | print(' Task ', i, ' -> ', summed_driven_kilometers[i] / metric_sum_values[i]) 128 | print ("") 129 | 130 | print("") 131 | print("") 132 | -------------------------------------------------------------------------------- /version084/carla/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/carla/__init__.py -------------------------------------------------------------------------------- /version084/carla/client.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA Client.""" 8 | 9 | import logging 10 | import struct 11 | 12 | from contextlib import contextmanager 13 | 14 | from . import sensor 15 | from . import tcp 16 | from . import util 17 | 18 | try: 19 | from . import carla_server_pb2 as carla_protocol 20 | except ImportError: 21 | raise RuntimeError('cannot import "carla_server_pb2.py", run the protobuf compiler to generate this file') 22 | 23 | try: 24 | import numpy 25 | except ImportError: 26 | raise RuntimeError('cannot import numpy, make sure numpy package is installed.') 27 | 28 | 29 | VehicleControl = carla_protocol.Control 30 | 31 | 32 | @contextmanager 33 | def make_carla_client(host, world_port, timeout=15): 34 | """Context manager for creating and connecting a CarlaClient.""" 35 | with util.make_connection(CarlaClient, host, world_port, timeout) as client: 36 | yield client 37 | 38 | 39 | class CarlaClient(object): 40 | """The CARLA client. Manages communications with the CARLA server.""" 41 | 42 | def __init__(self, host, world_port, timeout=15): 43 | self._world_client = tcp.TCPClient(host, world_port, timeout) 44 | self._stream_client = tcp.TCPClient(host, world_port + 1, timeout) 45 | self._control_client = tcp.TCPClient(host, world_port + 2, timeout) 46 | self._current_settings = None 47 | self._is_episode_requested = False 48 | self._sensors = {} 49 | 50 | def connect(self, connection_attempts=10): 51 | """ 52 | Try to establish a connection to a CARLA server at the given host:port. 53 | """ 54 | self._world_client.connect(connection_attempts) 55 | 56 | def disconnect(self): 57 | """Disconnect from server.""" 58 | self._control_client.disconnect() 59 | self._stream_client.disconnect() 60 | self._world_client.disconnect() 61 | 62 | def connected(self): 63 | """Return whether there is an active connection.""" 64 | return self._world_client.connected() 65 | 66 | def load_settings(self, carla_settings): 67 | """ 68 | Load new settings and request a new episode based on these settings. 69 | carla_settings object must be convertible to a str holding the contents 70 | of a CarlaSettings.ini file. 71 | 72 | Return a protobuf object holding the scene description. 73 | """ 74 | self._current_settings = carla_settings 75 | return self._request_new_episode(carla_settings) 76 | 77 | def start_episode(self, player_start_index): 78 | """ 79 | Start the new episode at the player start given by the 80 | player_start_index. The list of player starts is retrieved by 81 | "load_settings". 82 | 83 | The new episode is started based on the last settings loaded by 84 | "load_settings". 85 | 86 | This function waits until the server answers with an EpisodeReady. 87 | """ 88 | if self._current_settings is None: 89 | raise RuntimeError('no settings loaded, cannot start episode') 90 | 91 | # if no new settings are loaded, request new episode with previous 92 | if not self._is_episode_requested: 93 | self._request_new_episode(self._current_settings) 94 | 95 | try: 96 | pb_message = carla_protocol.EpisodeStart() 97 | pb_message.player_start_spot_index = player_start_index 98 | self._world_client.write(pb_message.SerializeToString()) 99 | # Wait for EpisodeReady. 100 | data = self._world_client.read() 101 | if not data: 102 | raise RuntimeError('failed to read data from server') 103 | pb_message = carla_protocol.EpisodeReady() 104 | pb_message.ParseFromString(data) 105 | if not pb_message.ready: 106 | raise RuntimeError('cannot start episode: server failed to start episode') 107 | # We can start the agent clients now. 108 | self._stream_client.connect() 109 | self._control_client.connect() 110 | # Set again the status for no episode requested 111 | finally: 112 | self._is_episode_requested = False 113 | 114 | def read_data(self): 115 | """ 116 | Read the data sent from the server this frame. The episode must be 117 | started. Return a pair containing the protobuf object containing the 118 | measurements followed by the raw data of the sensors. 119 | """ 120 | # Read measurements. 121 | data = self._stream_client.read() 122 | if not data: 123 | raise RuntimeError('failed to read data from server') 124 | pb_message = carla_protocol.Measurements() 125 | pb_message.ParseFromString(data) 126 | # Read sensor data. 127 | return pb_message, dict(x for x in self._read_sensor_data()) 128 | 129 | def send_control(self, *args, **kwargs): 130 | """ 131 | Send the VehicleControl to be applied this frame. 132 | 133 | If synchronous mode was requested, the server will pause the simulation 134 | until this message is received. 135 | """ 136 | if isinstance(args[0] if args else None, carla_protocol.Control): 137 | pb_message = args[0] 138 | else: 139 | pb_message = carla_protocol.Control() 140 | pb_message.steer = kwargs.get('steer', 0.0) 141 | pb_message.throttle = kwargs.get('throttle', 0.0) 142 | pb_message.brake = kwargs.get('brake', 0.0) 143 | pb_message.hand_brake = kwargs.get('hand_brake', False) 144 | pb_message.reverse = kwargs.get('reverse', False) 145 | self._control_client.write(pb_message.SerializeToString()) 146 | 147 | def _request_new_episode(self, carla_settings): 148 | """ 149 | Internal function to request a new episode. Prepare the client for a new 150 | episode by disconnecting agent clients. 151 | """ 152 | # Disconnect agent clients. 153 | self._stream_client.disconnect() 154 | self._control_client.disconnect() 155 | # Send new episode request. 156 | pb_message = carla_protocol.RequestNewEpisode() 157 | pb_message.ini_file = str(carla_settings) 158 | self._world_client.write(pb_message.SerializeToString()) 159 | # Read scene description. 160 | data = self._world_client.read() 161 | if not data: 162 | raise RuntimeError('failed to read data from server') 163 | pb_message = carla_protocol.SceneDescription() 164 | pb_message.ParseFromString(data) 165 | self._sensors = dict((sensor.id, sensor) \ 166 | for sensor in _make_sensor_parsers(pb_message.sensors)) 167 | self._is_episode_requested = True 168 | return pb_message 169 | 170 | def _read_sensor_data(self): 171 | while True: 172 | data = self._stream_client.read() 173 | if not data: 174 | raise StopIteration 175 | yield self._parse_sensor_data(data) 176 | 177 | def _parse_sensor_data(self, data): 178 | sensor_id = struct.unpack(' id else 'Unknown' 186 | getint32 = lambda data, index: struct.unpack('. 6 | 7 | """ 8 | Handy conversions for CARLA images. 9 | 10 | The functions here are provided for real-time display, if you want to save the 11 | converted images, save the images from Python without conversion and convert 12 | them afterwards with the C++ implementation at "Util/ImageConverter" as it 13 | provides considerably better performance. 14 | """ 15 | 16 | import math 17 | 18 | try: 19 | import numpy 20 | from numpy.matlib import repmat 21 | except ImportError: 22 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 23 | 24 | 25 | from . import sensor 26 | 27 | 28 | def to_bgra_array(image): 29 | """Convert a CARLA raw image to a BGRA numpy array.""" 30 | if not isinstance(image, sensor.Image): 31 | raise ValueError("Argument must be a carla.sensor.Image") 32 | array = numpy.frombuffer(image.raw_data, dtype=numpy.dtype("uint8")) 33 | array = numpy.reshape(array, (image.height, image.width, 4)) 34 | return array 35 | 36 | 37 | def to_rgb_array(image): 38 | """Convert a CARLA raw image to a RGB numpy array.""" 39 | array = to_bgra_array(image) 40 | # Convert BGRA to RGB. 41 | array = array[:, :, :3] 42 | array = array[:, :, ::-1] 43 | return array 44 | 45 | 46 | def labels_to_array(image): 47 | """ 48 | Convert an image containing CARLA semantic segmentation labels to a 2D array 49 | containing the label of each pixel. 50 | """ 51 | return to_bgra_array(image)[:, :, 2] 52 | 53 | 54 | def labels_to_cityscapes_palette(image): 55 | """ 56 | Convert an image containing CARLA semantic segmentation labels to 57 | Cityscapes palette. 58 | """ 59 | classes = { 60 | 0: [0, 0, 0], # None 61 | 1: [70, 70, 70], # Buildings 62 | 2: [190, 153, 153], # Fences 63 | 3: [72, 0, 90], # Other 64 | 4: [220, 20, 60], # Pedestrians 65 | 5: [153, 153, 153], # Poles 66 | 6: [157, 234, 50], # RoadLines 67 | 7: [128, 64, 128], # Roads 68 | 8: [244, 35, 232], # Sidewalks 69 | 9: [107, 142, 35], # Vegetation 70 | 10: [0, 0, 255], # Vehicles 71 | 11: [102, 102, 156], # Walls 72 | 12: [220, 220, 0] # TrafficSigns 73 | } 74 | array = labels_to_array(image) 75 | result = numpy.zeros((array.shape[0], array.shape[1], 3)) 76 | for key, value in classes.items(): 77 | result[numpy.where(array == key)] = value 78 | return result 79 | 80 | 81 | def depth_to_array(image): 82 | """ 83 | Convert an image containing CARLA encoded depth-map to a 2D array containing 84 | the depth value of each pixel normalized between [0.0, 1.0]. 85 | """ 86 | array = to_bgra_array(image) 87 | array = array.astype(numpy.float32) 88 | # Apply (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1). 89 | normalized_depth = numpy.dot(array[:, :, :3], [65536.0, 256.0, 1.0]) 90 | normalized_depth /= 16777215.0 # (256.0 * 256.0 * 256.0 - 1.0) 91 | return normalized_depth 92 | 93 | 94 | def depth_to_logarithmic_grayscale(image): 95 | """ 96 | Convert an image containing CARLA encoded depth-map to a logarithmic 97 | grayscale image array. 98 | "max_depth" is used to omit the points that are far enough. 99 | """ 100 | normalized_depth = depth_to_array(image) 101 | # Convert to logarithmic depth. 102 | logdepth = numpy.ones(normalized_depth.shape) + \ 103 | (numpy.log(normalized_depth) / 5.70378) 104 | logdepth = numpy.clip(logdepth, 0.0, 1.0) 105 | logdepth *= 255.0 106 | # Expand to three colors. 107 | return numpy.repeat(logdepth[:, :, numpy.newaxis], 3, axis=2) 108 | 109 | 110 | def depth_to_local_point_cloud(image, color=None, max_depth=0.9): 111 | """ 112 | Convert an image containing CARLA encoded depth-map to a 2D array containing 113 | the 3D position (relative to the camera) of each pixel and its corresponding 114 | RGB color of an array. 115 | "max_depth" is used to omit the points that are far enough. 116 | """ 117 | far = 1000.0 # max depth in meters. 118 | normalized_depth = depth_to_array(image) 119 | 120 | # (Intrinsic) K Matrix 121 | k = numpy.identity(3) 122 | k[0, 2] = image.width / 2.0 123 | k[1, 2] = image.height / 2.0 124 | k[0, 0] = k[1, 1] = image.width / \ 125 | (2.0 * math.tan(image.fov * math.pi / 360.0)) 126 | 127 | # 2d pixel coordinates 128 | pixel_length = image.width * image.height 129 | u_coord = repmat(numpy.r_[image.width-1:-1:-1], 130 | image.height, 1).reshape(pixel_length) 131 | v_coord = repmat(numpy.c_[image.height-1:-1:-1], 132 | 1, image.width).reshape(pixel_length) 133 | if color is not None: 134 | color = color.reshape(pixel_length, 3) 135 | normalized_depth = numpy.reshape(normalized_depth, pixel_length) 136 | 137 | # Search for pixels where the depth is greater than max_depth to 138 | # delete them 139 | max_depth_indexes = numpy.where(normalized_depth > max_depth) 140 | normalized_depth = numpy.delete(normalized_depth, max_depth_indexes) 141 | u_coord = numpy.delete(u_coord, max_depth_indexes) 142 | v_coord = numpy.delete(v_coord, max_depth_indexes) 143 | if color is not None: 144 | color = numpy.delete(color, max_depth_indexes, axis=0) 145 | 146 | # pd2 = [u,v,1] 147 | p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)]) 148 | 149 | # P = [X,Y,Z] 150 | p3d = numpy.dot(numpy.linalg.inv(k), p2d) 151 | p3d *= normalized_depth * far 152 | 153 | # Formating the output to: 154 | # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]] 155 | if color is not None: 156 | # numpy.concatenate((numpy.transpose(p3d), color), axis=1) 157 | return sensor.PointCloud( 158 | image.frame_number, 159 | numpy.transpose(p3d), 160 | color_array=color) 161 | # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]] 162 | return sensor.PointCloud(image.frame_number, numpy.transpose(p3d)) 163 | -------------------------------------------------------------------------------- /version084/carla/planner/Town01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/carla/planner/Town01.png -------------------------------------------------------------------------------- /version084/carla/planner/Town01.txt: -------------------------------------------------------------------------------- 1 | 0.0,0.0,-38.11000000 2 | 0.000000,0.000000,0.0 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 49, 41 6 | 0,0 0,40 40 7 | 0,40 0,0 40 8 | 48,40 41,40 7 9 | 41,40 48,40 7 10 | 48,0 48,40 40 11 | 48,40 48,0 40 12 | 0,0 11,0 11 13 | 11,0 0,0 11 14 | 41,0 48,0 7 15 | 48,0 41,0 7 16 | 41,40 11,40 30 17 | 11,40 41,40 30 18 | 41,0 41,7 7 19 | 41,7 41,0 7 20 | 11,40 0,40 11 21 | 0,40 11,40 11 22 | 11,0 19,0 8 23 | 19,0 11,0 8 24 | 11,40 11,24 16 25 | 11,24 11,40 16 26 | 41,24 41,40 16 27 | 41,40 41,24 16 28 | 11,24 11,16 8 29 | 11,16 11,24 8 30 | 41,24 11,24 30 31 | 11,24 41,24 30 32 | 41,16 41,24 8 33 | 41,24 41,16 8 34 | 11,16 11,7 9 35 | 11,7 11,16 9 36 | 41,16 11,16 30 37 | 11,16 41,16 30 38 | 41,7 41,16 9 39 | 41,16 41,7 9 40 | 11,7 11,0 7 41 | 11,0 11,7 7 42 | 41,7 19,7 22 43 | 19,7 41,7 22 44 | 19,0 41,0 22 45 | 41,0 19,0 22 46 | 19,7 11,7 8 47 | 11,7 19,7 8 48 | 19,0 19,7 7 49 | 19,7 19,0 7 50 | -------------------------------------------------------------------------------- /version084/carla/planner/Town01Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/carla/planner/Town01Central.png -------------------------------------------------------------------------------- /version084/carla/planner/Town01Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/carla/planner/Town01Lanes.png -------------------------------------------------------------------------------- /version084/carla/planner/Town02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/carla/planner/Town02.png -------------------------------------------------------------------------------- /version084/carla/planner/Town02.txt: -------------------------------------------------------------------------------- 1 | 5.4400,-107.48000,-38.11000000 2 | 0.000000,0.000000,0.000000 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 25, 25 6 | 0,10 0,24 14 7 | 0,24 0,10 14 8 | 24,24 6,24 18 9 | 6,24 24,24 18 10 | 24,0 24,10 10 11 | 24,10 24,0 10 12 | 0,0 24,0 24 13 | 24,0 0,0 24 14 | 0,10 0,0 10 15 | 0,0 0,10 10 16 | 24,10 24,16 6 17 | 24,16 24,10 6 18 | 0,10 6,10 6 19 | 6,10 0,10 6 20 | 6,24 0,24 6 21 | 0,24 6,24 6 22 | 6,10 17,10 11 23 | 17,10 6,10 11 24 | 6,24 6,16 8 25 | 6,16 6,24 8 26 | 24,16 24,24 8 27 | 24,24 24,16 8 28 | 6,16 6,10 6 29 | 6,10 6,16 6 30 | 24,16 17,16 7 31 | 17,16 24,16 7 32 | 17,16 6,16 11 33 | 6,16 17,16 11 34 | 17,10 24,10 7 35 | 24,10 17,10 7 36 | 17,16 17,10 6 37 | 17,10 17,16 6 38 | -------------------------------------------------------------------------------- /version084/carla/planner/Town02Big.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/carla/planner/Town02Big.png -------------------------------------------------------------------------------- /version084/carla/planner/Town02Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/carla/planner/Town02Central.png -------------------------------------------------------------------------------- /version084/carla/planner/Town02Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/carla-simulator/driving-benchmarks/1614d19581d43eac9966e4001611cc8341bf5319/version084/carla/planner/Town02Lanes.png -------------------------------------------------------------------------------- /version084/carla/planner/__init__.py: -------------------------------------------------------------------------------- 1 | from .planner import Planner 2 | -------------------------------------------------------------------------------- /version084/carla/planner/astar.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import heapq 8 | 9 | 10 | class Cell(object): 11 | def __init__(self, x, y, reachable): 12 | """Initialize new cell. 13 | 14 | @param reachable is cell reachable? not a wall? 15 | @param x cell x coordinate 16 | @param y cell y coordinate 17 | @param g cost to move from the starting cell to this cell. 18 | @param h estimation of the cost to move from this cell 19 | to the ending cell. 20 | @param f f = g + h 21 | """ 22 | self.reachable = reachable 23 | self.x = x 24 | self.y = y 25 | self.parent = None 26 | self.g = 0 27 | self.h = 0 28 | self.f = 0 29 | 30 | def __lt__(self, other): 31 | return self.g < other.g 32 | 33 | 34 | class AStar(object): 35 | def __init__(self): 36 | # open list 37 | self.opened = [] 38 | heapq.heapify(self.opened) 39 | # visited cells list 40 | self.closed = set() 41 | # grid cells 42 | self.cells = [] 43 | self.grid_height = None 44 | self.grid_width = None 45 | self.start = None 46 | self.end = None 47 | 48 | def init_grid(self, width, height, walls, start, end): 49 | """Prepare grid cells, walls. 50 | 51 | @param width grid's width. 52 | @param height grid's height. 53 | @param walls list of wall x,y tuples. 54 | @param start grid starting point x,y tuple. 55 | @param end grid ending point x,y tuple. 56 | """ 57 | self.grid_height = height 58 | self.grid_width = width 59 | for x in range(self.grid_width): 60 | for y in range(self.grid_height): 61 | if (x, y) in walls: 62 | reachable = False 63 | else: 64 | reachable = True 65 | self.cells.append(Cell(x, y, reachable)) 66 | self.start = self.get_cell(*start) 67 | self.end = self.get_cell(*end) 68 | 69 | def get_heuristic(self, cell): 70 | """Compute the heuristic value H for a cell. 71 | 72 | Distance between this cell and the ending cell multiply by 10. 73 | 74 | @returns heuristic value H 75 | """ 76 | return 10 * (abs(cell.x - self.end.x) + abs(cell.y - self.end.y)) 77 | 78 | def get_cell(self, x, y): 79 | """Returns a cell from the cells list. 80 | 81 | @param x cell x coordinate 82 | @param y cell y coordinate 83 | @returns cell 84 | """ 85 | return self.cells[x * self.grid_height + y] 86 | 87 | def get_adjacent_cells(self, cell): 88 | """Returns adjacent cells to a cell. 89 | 90 | Clockwise starting from the one on the right. 91 | 92 | @param cell get adjacent cells for this cell 93 | @returns adjacent cells list. 94 | """ 95 | cells = [] 96 | if cell.x < self.grid_width - 1: 97 | cells.append(self.get_cell(cell.x + 1, cell.y)) 98 | if cell.y > 0: 99 | cells.append(self.get_cell(cell.x, cell.y - 1)) 100 | if cell.x > 0: 101 | cells.append(self.get_cell(cell.x - 1, cell.y)) 102 | if cell.y < self.grid_height - 1: 103 | cells.append(self.get_cell(cell.x, cell.y + 1)) 104 | return cells 105 | 106 | def get_path(self): 107 | cell = self.end 108 | path = [(cell.x, cell.y)] 109 | while cell.parent is not self.start: 110 | cell = cell.parent 111 | path.append((cell.x, cell.y)) 112 | 113 | path.append((self.start.x, self.start.y)) 114 | path.reverse() 115 | return path 116 | 117 | def update_cell(self, adj, cell): 118 | """Update adjacent cell. 119 | 120 | @param adj adjacent cell to current cell 121 | @param cell current cell being processed 122 | """ 123 | adj.g = cell.g + 10 124 | adj.h = self.get_heuristic(adj) 125 | adj.parent = cell 126 | adj.f = adj.h + adj.g 127 | 128 | def solve(self, printing_grid): 129 | """Solve maze, find path to ending cell. 130 | 131 | @returns path or None if not found. 132 | """ 133 | # add starting cell to open heap queue 134 | heapq.heappush(self.opened, (self.start.f, self.start)) 135 | printing_grid[self.start.x, self.start.y] = 2.0 136 | printing_grid[self.end.x, self.end.y] = 4.0 137 | 138 | while len(self.opened): 139 | # pop cell from heap queue 140 | _, cell = heapq.heappop(self.opened) 141 | 142 | # add cell to closed list so we don't process it twice 143 | printing_grid[cell.x, cell.y] = 3.0 144 | 145 | self.closed.add(cell) 146 | # if ending cell, return found path 147 | if cell is self.end: 148 | return self.get_path() 149 | # get adjacent cells for cell 150 | adj_cells = self.get_adjacent_cells(cell) 151 | for adj_cell in adj_cells: 152 | if adj_cell.reachable and adj_cell not in self.closed: 153 | if (adj_cell.f, adj_cell) in self.opened: 154 | # if adj cell in open list, check if current path is 155 | # better than the one previously found 156 | # for this adj cell. 157 | if adj_cell.g > cell.g + 10: 158 | self.update_cell(adj_cell, cell) 159 | else: 160 | self.update_cell(adj_cell, cell) 161 | # add adj cell to open list 162 | heapq.heappush(self.opened, (adj_cell.f, adj_cell)) 163 | -------------------------------------------------------------------------------- /version084/carla/planner/bezier.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.misc import comb 3 | 4 | def bernstein_poly(i, n, t): 5 | """ 6 | The Bernstein polynomial of n, i as a function of t 7 | """ 8 | 9 | return comb(n, i) * ( t**(n-i) ) * (1 - t)**i 10 | 11 | 12 | def bezier_curve(points, nTimes=1000): 13 | """ 14 | Given a set of control points, return the 15 | bezier curve defined by the control points. 16 | 17 | points should be a list of lists, or list of tuples 18 | such as [ [1,1], 19 | [2,3], 20 | [4,5], ..[Xn, Yn] ] 21 | nTimes is the number of time steps, defaults to 1000 22 | 23 | See http://processingjs.nihongoresources.com/bezierinfo/ 24 | """ 25 | 26 | nPoints = len(points) 27 | xPoints = np.array([p[0] for p in points]) 28 | yPoints = np.array([p[1] for p in points]) 29 | 30 | t = np.linspace(0.0, 1.0, nTimes) 31 | 32 | polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ]) 33 | 34 | xvals = np.dot(xPoints, polynomial_array) 35 | yvals = np.dot(yPoints, polynomial_array) 36 | 37 | return xvals, yvals 38 | -------------------------------------------------------------------------------- /version084/carla/planner/city_track.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | import numpy as np 7 | 8 | from .graph import sldist 9 | 10 | from .astar import AStar 11 | from .map import CarlaMap 12 | import sys 13 | 14 | 15 | class CityTrack(object): 16 | 17 | def __init__(self, city_name): 18 | 19 | # These values are fixed for every city. 20 | self._node_density = 50.0 21 | self._pixel_density = 0.1643 22 | 23 | self._map = CarlaMap(city_name, self._pixel_density, self._node_density) 24 | 25 | self._astar = AStar() 26 | 27 | # Refers to the start position of the previous route computation 28 | self._previous_node = [] 29 | 30 | # The current computed route 31 | self._route = None 32 | 33 | def project_node(self, position): 34 | """ 35 | Projecting the graph node into the city road 36 | """ 37 | 38 | node = self._map.convert_to_node(position) 39 | # print ("Converted Node ", node) 40 | 41 | # To change the orientation with respect to the map standards 42 | 43 | node = tuple([int(round(x)) for x in node]) 44 | 45 | # Set to zero if it is less than zero. 46 | 47 | node = (max(0, node[0]), max(0, node[1])) 48 | node = (min(self._map.get_graph_resolution()[0] - 1, node[0]), 49 | min(self._map.get_graph_resolution()[1] - 1, node[1])) 50 | 51 | node = self._map.search_on_grid(node) 52 | # print("Final Node ", node) 53 | 54 | return node 55 | 56 | def get_intersection_nodes(self): 57 | return self._map.get_intersection_nodes() 58 | 59 | def get_map(self): 60 | return self._map 61 | 62 | def get_pixel_density(self): 63 | return self._pixel_density 64 | 65 | def get_node_density(self): 66 | return self._node_density 67 | 68 | def is_at_goal(self, source, target): 69 | return source == target 70 | 71 | def is_at_new_node(self, current_node): 72 | 73 | return current_node != self._previous_node 74 | 75 | def is_away_from_intersection(self, current_node): 76 | return self.closest_intersection_position(current_node) > 1 77 | 78 | def is_far_away_from_route_intersection(self, current_node): 79 | # CHECK FOR THE EMPTY CASE 80 | if self._route is None: 81 | raise RuntimeError('Impossible to find route' 82 | + ' Current planner is limited' 83 | + ' Try to select start points away from intersections') 84 | 85 | return self._closest_intersection_route_position(current_node, 86 | self._route) > 4 87 | 88 | def move_node(self, node, direction, displacement): 89 | 90 | moved_node = [round(node[0] + displacement * direction[0]), 91 | round(node[1] + displacement * direction[1])] 92 | 93 | return moved_node 94 | 95 | def compute_route(self, node_source, source_ori, node_target, target_ori): 96 | 97 | self._previous_node = node_source 98 | 99 | printing_grid = np.copy(self._map._grid._structure) 100 | 101 | np.set_printoptions(edgeitems=3, infstr='inf', threshold=np.nan, linewidth=129) 102 | 103 | a_star = AStar() 104 | a_star.init_grid(self._map.get_graph_resolution()[0], 105 | self._map.get_graph_resolution()[1], 106 | self._map.get_walls_directed(node_source, source_ori, 107 | node_target, target_ori), node_source, 108 | node_target) 109 | 110 | route = a_star.solve(printing_grid) 111 | printing_grid[node_source[0], node_source[1]] = 7 112 | 113 | printing_grid[node_target[0], node_target[1]] = 2 114 | 115 | if route is None: 116 | printing_grid = np.copy(self._map._grid._structure) 117 | # printing_grid[node_target[0], node_target[1]] = 3 118 | printing_grid[node_source[0], node_source[1]] = 7 119 | 120 | printing_grid[node_target[0], node_target[1]] = 2 121 | a_star = AStar() 122 | a_star.init_grid(self._map.get_graph_resolution()[0], 123 | self._map.get_graph_resolution()[1], 124 | self._map.get_walls_directed(node_source, source_ori, 125 | node_target, target_ori, 126 | both_walls=False), node_source, 127 | node_target) 128 | 129 | route = a_star.solve(printing_grid) 130 | 131 | if route is None: 132 | print('Impossible to find route, returning previous route') 133 | return self._route 134 | 135 | self._route = route 136 | 137 | return route 138 | 139 | def get_distance_closest_node_route(self, pos, route): 140 | distance = [] 141 | 142 | for node_iter in route: 143 | 144 | if node_iter in self._map.get_intersection_nodes(): 145 | distance.append(sldist(node_iter, pos)) 146 | 147 | if not distance: 148 | return sldist(route[-1], pos) 149 | return sorted(distance)[0] 150 | 151 | def closest_intersection_position(self, current_node): 152 | 153 | distance_vector = [] 154 | for node_iterator in self._map.get_intersection_nodes(): 155 | distance_vector.append(sldist(node_iterator, current_node)) 156 | 157 | return sorted(distance_vector)[0] 158 | 159 | def closest_curve_position(self, current_node): 160 | 161 | distance_vector = [] 162 | for node_iterator in self._map.get_curve_nodes(): 163 | distance_vector.append(sldist(node_iterator, current_node)) 164 | 165 | return sorted(distance_vector)[0] 166 | 167 | def _closest_intersection_route_position(self, current_node, route): 168 | 169 | distance_vector = [] 170 | for _ in route: 171 | for node_iterator in self._map.get_intersection_nodes(): 172 | distance_vector.append(sldist(node_iterator, current_node)) 173 | 174 | return sorted(distance_vector)[0] 175 | -------------------------------------------------------------------------------- /version084/carla/planner/converter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | from .graph import string_to_floats 11 | 12 | # Constant definition enumeration 13 | 14 | PIXEL = 0 15 | WORLD = 1 16 | NODE = 2 17 | 18 | 19 | class Converter(object): 20 | 21 | def __init__(self, city_file, pixel_density, node_density): 22 | 23 | self._node_density = node_density 24 | self._pixel_density = pixel_density 25 | with open(city_file, 'r') as f: 26 | # The offset of the world from the zero coordinates ( The 27 | # coordinate we consider zero) 28 | self._worldoffset = string_to_floats(f.readline()) 29 | 30 | angles = string_to_floats(f.readline()) 31 | 32 | # If there is an rotation between the world and map coordinates. 33 | self._worldrotation = np.array([ 34 | [math.cos(math.radians(angles[2])), -math.sin(math.radians(angles[2])), 0.0], 35 | [math.sin(math.radians(angles[2])), math.cos(math.radians(angles[2])), 0.0], 36 | [0.0, 0.0, 1.0]]) 37 | 38 | # Ignore for now, these are offsets for map coordinates and scale 39 | # (not used). 40 | _ = f.readline() 41 | 42 | # The offset of the map zero coordinate. 43 | self._mapoffset = string_to_floats(f.readline()) 44 | 45 | def convert_to_node(self, input_data): 46 | """ 47 | Receives a data type (Can Be Pixel or World ) 48 | :param input_data: position in some coordinate 49 | :return: A vector representing a node 50 | """ 51 | 52 | input_type = self._check_input_type(input_data) 53 | if input_type == PIXEL: 54 | return self._pixel_to_node(input_data) 55 | elif input_type == WORLD: 56 | return self._world_to_node(input_data) 57 | else: 58 | raise ValueError('Invalid node to be converted') 59 | 60 | def convert_to_pixel(self, input_data): 61 | 62 | """ 63 | Receives a data type (Can Be Node or World ) 64 | :param input_data: position in some coordinate 65 | :return: A vector with pixel coordinates 66 | """ 67 | 68 | input_type = self._check_input_type(input_data) 69 | 70 | if input_type == NODE: 71 | 72 | return self._node_to_pixel(input_data) 73 | elif input_type == WORLD: 74 | pixel = self._world_to_pixel(input_data) 75 | return [math.floor(pixel[0]), math.floor(pixel[1])] 76 | else: 77 | raise ValueError('Invalid node to be converted') 78 | 79 | def convert_to_world(self, input_data): 80 | 81 | """ 82 | Receives a data type (Can Be Pixel or Node ) 83 | :param input_data: position in some coordinate 84 | :return: vector with world coordinates 85 | """ 86 | 87 | input_type = self._check_input_type(input_data) 88 | if input_type == NODE: 89 | return self._node_to_world(input_data) 90 | elif input_type == PIXEL: 91 | return self._pixel_to_world(input_data) 92 | else: 93 | raise ValueError('Invalid node to be converted') 94 | 95 | def _node_to_pixel(self, node): 96 | """ 97 | Conversion from node format (graph) to pixel (image) 98 | :param node: 99 | :return: pixel 100 | """ 101 | pixel = [((node[0] + 2) * self._node_density) 102 | , ((node[1] + 2) * self._node_density)] 103 | return pixel 104 | 105 | def _pixel_to_node(self, pixel): 106 | """ 107 | Conversion from pixel format (image) to node (graph) 108 | :param node: 109 | :return: pixel 110 | """ 111 | node = [int(round((pixel[0]) / self._node_density, 0) - 2), 112 | int(round((pixel[1]) / self._node_density, 0) - 2)] 113 | 114 | return tuple(node) 115 | 116 | def _pixel_to_world(self, pixel): 117 | """ 118 | Conversion from pixel format (image) to world (3D) 119 | :param pixel: 120 | :return: world 121 | """ 122 | 123 | relative_location = [pixel[0] * self._pixel_density, 124 | pixel[1] * self._pixel_density] 125 | 126 | world = [ 127 | relative_location[0] + self._mapoffset[0] - self._worldoffset[0], 128 | relative_location[1] + self._mapoffset[1] - self._worldoffset[1], 129 | 22 130 | ] 131 | 132 | return world 133 | 134 | def _world_to_pixel(self, world): 135 | """ 136 | Conversion from world format (3D) to pixel 137 | :param world: 138 | :return: pixel 139 | """ 140 | 141 | rotation = np.array([world[0], world[1], world[2]]) 142 | rotation = rotation.dot(self._worldrotation) 143 | 144 | relative_location = [rotation[0] + self._worldoffset[0] - self._mapoffset[0], 145 | rotation[1] + self._worldoffset[1] - self._mapoffset[1], 146 | rotation[2] + self._worldoffset[2] - self._mapoffset[2]] 147 | 148 | 149 | 150 | pixel = [(relative_location[0] / float(self._pixel_density)), 151 | (relative_location[1] / float(self._pixel_density))] 152 | 153 | return pixel 154 | 155 | def _world_to_node(self, world): 156 | return self._pixel_to_node(self._world_to_pixel(world)) 157 | 158 | def _node_to_world(self, node): 159 | 160 | return self._pixel_to_world(self._node_to_pixel(node)) 161 | 162 | def _check_input_type(self, input_data): 163 | if len(input_data) > 2: 164 | return WORLD 165 | elif type(input_data[0]) is int: 166 | return NODE 167 | else: 168 | return PIXEL 169 | -------------------------------------------------------------------------------- /version084/carla/planner/graph.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | 11 | def string_to_node(string): 12 | vec = string.split(',') 13 | return (int(vec[0]), int(vec[1])) 14 | 15 | 16 | def string_to_floats(string): 17 | vec = string.split(',') 18 | return (float(vec[0]), float(vec[1]), float(vec[2])) 19 | 20 | 21 | def sldist(c1, c2): 22 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 23 | 24 | 25 | def sldist3(c1, c2): 26 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) 27 | ** 2 + (c2[2] - c1[2]) ** 2) 28 | 29 | 30 | class Graph(object): 31 | """ 32 | A simple directed, weighted graph 33 | """ 34 | 35 | def __init__(self, graph_file=None, node_density=50): 36 | 37 | self._nodes = set() 38 | self._angles = {} 39 | self._edges = {} 40 | self._distances = {} 41 | self._node_density = node_density 42 | 43 | if graph_file is not None: 44 | with open(graph_file, 'r') as f: 45 | # Skipe the first four lines that 46 | lines_after_4 = f.readlines()[4:] 47 | 48 | # the graph resolution. 49 | linegraphres = lines_after_4[0] 50 | self._resolution = string_to_node(linegraphres) 51 | for line in lines_after_4[1:]: 52 | 53 | from_node, to_node, d = line.split() 54 | from_node = string_to_node(from_node) 55 | to_node = string_to_node(to_node) 56 | 57 | if from_node not in self._nodes: 58 | self.add_node(from_node) 59 | if to_node not in self._nodes: 60 | self.add_node(to_node) 61 | 62 | self._edges.setdefault(from_node, []) 63 | self._edges[from_node].append(to_node) 64 | self._distances[(from_node, to_node)] = float(d) 65 | 66 | def add_node(self, value): 67 | self._nodes.add(value) 68 | 69 | def make_orientations(self, node, heading): 70 | 71 | import collections 72 | distance_dic = {} 73 | for node_iter in self._nodes: 74 | if node_iter != node: 75 | distance_dic[sldist(node, node_iter)] = node_iter 76 | 77 | distance_dic = collections.OrderedDict( 78 | sorted(distance_dic.items())) 79 | 80 | self._angles[node] = heading 81 | for _, v in distance_dic.items(): 82 | start_to_goal = np.array([node[0] - v[0], node[1] - v[1]]) 83 | 84 | 85 | self._angles[v] = start_to_goal / np.linalg.norm(start_to_goal) 86 | 87 | def add_edge(self, from_node, to_node, distance): 88 | self._add_edge(from_node, to_node, distance) 89 | 90 | def _add_edge(self, from_node, to_node, distance): 91 | self._edges.setdefault(from_node, []) 92 | self._edges[from_node].append(to_node) 93 | self._distances[(from_node, to_node)] = distance 94 | 95 | def get_resolution(self): 96 | return self._resolution 97 | def get_edges(self): 98 | return self._edges 99 | 100 | def intersection_nodes(self): 101 | 102 | intersect_nodes = [] 103 | for node in self._nodes: 104 | if len(self._edges[node]) > 2: 105 | intersect_nodes.append(node) 106 | 107 | return intersect_nodes 108 | 109 | def curve_nodes(self): 110 | 111 | intersect_nodes = [] 112 | for node in self._nodes: 113 | if len(self._edges[node]) > 1: 114 | intersect_nodes.append(node) 115 | 116 | return intersect_nodes 117 | 118 | # This contains also the non-intersection turns... 119 | 120 | def turn_nodes(self): 121 | 122 | return self._nodes 123 | 124 | def plot_ori(self, c): 125 | from matplotlib import collections as mc 126 | 127 | import matplotlib.pyplot as plt 128 | line_len = 1 129 | 130 | lines = [[(p[0], p[1]), (p[0] + line_len * self._angles[p][0], 131 | p[1] + line_len * self._angles[p][1])] for p in self._nodes] 132 | lc = mc.LineCollection(lines, linewidth=2, color='green') 133 | _, ax = plt.subplots() 134 | ax.add_collection(lc) 135 | 136 | ax.autoscale() 137 | ax.margins(0.1) 138 | 139 | xs = [p[0] for p in self._nodes] 140 | ys = [p[1] for p in self._nodes] 141 | 142 | plt.scatter(xs, ys, color=c) 143 | 144 | def plot(self, c): 145 | import matplotlib.pyplot as plt 146 | xs = [p[0] for p in self._nodes] 147 | ys = [p[1] for p in self._nodes] 148 | 149 | plt.scatter(xs, ys, color=c) 150 | -------------------------------------------------------------------------------- /version084/carla/planner/grid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import copy 8 | import numpy as np 9 | 10 | 11 | def angle_between(v1, v2): 12 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 13 | 14 | 15 | class Grid(object): 16 | 17 | def __init__(self, graph): 18 | 19 | self._graph = graph 20 | self._structure = self._make_structure() 21 | self._walls = self._make_walls() 22 | 23 | def search_on_grid(self, x, y): 24 | visit = [[0, 1], [0, -1], [1, 0], [-1, 0], 25 | [1, -1], [1, 1], [-1, 1], [-1, -1]] 26 | c_x, c_y = x, y 27 | scale = 1 28 | 29 | 30 | while self._structure[c_x, c_y] != 0: 31 | for offset in visit: 32 | c_x, c_y = x + offset[0] * scale, y + offset[1] * scale 33 | 34 | if c_x >= 0 and c_x < self._graph.get_resolution()[ 35 | 0] and c_y >= 0 and c_y < self._graph.get_resolution()[1]: 36 | if self._structure[c_x, c_y] == 0: 37 | break 38 | else: 39 | c_x, c_y = x, y 40 | scale += 1 41 | 42 | return c_x, c_y 43 | def get_walls(self): 44 | return self._walls 45 | 46 | def get_wall_source(self, pos, pos_ori, target): 47 | 48 | free_nodes = self.get_adjacent_free_nodes(pos) 49 | # print self._walls 50 | final_walls = copy.copy(self._walls) 51 | 52 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 53 | for adj in free_nodes: 54 | 55 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 56 | angle = angle_between(heading_start, start_to_goal) 57 | if (angle > 1.6 and adj != target): 58 | final_walls.add((adj[0], adj[1])) 59 | 60 | return final_walls 61 | 62 | def get_wall_target(self, pos, pos_ori, source): 63 | 64 | free_nodes = self.get_adjacent_free_nodes(pos) 65 | final_walls = copy.copy(self._walls) 66 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 67 | for adj in free_nodes: 68 | 69 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 70 | angle = angle_between(heading_start, start_to_goal) 71 | 72 | if (angle < 1.0 and adj != source): 73 | #print("added target ", (adj[0], adj[1])) 74 | final_walls.add((adj[0], adj[1])) 75 | 76 | return final_walls 77 | 78 | def _draw_line(self, grid, xi, yi, xf, yf): 79 | 80 | if xf < xi: 81 | aux = xi 82 | xi = xf 83 | xf = aux 84 | 85 | if yf < yi: 86 | aux = yi 87 | yi = yf 88 | yf = aux 89 | 90 | for i in range(xi, xf + 1): 91 | 92 | for j in range(yi, yf + 1): 93 | grid[i, j] = 0.0 94 | 95 | return grid 96 | 97 | def _make_structure(self): 98 | structure = np.ones( 99 | (self._graph.get_resolution()[0], 100 | self._graph.get_resolution()[1])) 101 | 102 | for key, connections in self._graph.get_edges().items(): 103 | 104 | # draw a line 105 | for con in connections: 106 | # print key[0],key[1],con[0],con[1] 107 | structure = self._draw_line( 108 | structure, key[0], key[1], con[0], con[1]) 109 | # print grid 110 | return structure 111 | 112 | def _make_walls(self): 113 | walls = set() 114 | 115 | for i in range(self._structure.shape[0]): 116 | 117 | for j in range(self._structure.shape[1]): 118 | if self._structure[i, j] == 1.0: 119 | walls.add((i, j)) 120 | 121 | return walls 122 | 123 | def get_adjacent_free_nodes(self, pos): 124 | """ Eight nodes in total """ 125 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 126 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 127 | 128 | adjacent = set() 129 | for offset in visit: 130 | node = (pos[0] + offset[0], pos[1] + offset[1]) 131 | 132 | if (node[0] >= 0 and node[0] < self._graph.get_resolution()[0] 133 | and node[1] >= 0 and node[1] < self._graph.get_resolution()[1]): 134 | if self._structure[node[0], node[1]] == 0.0: 135 | adjacent.add(node) 136 | 137 | return adjacent 138 | -------------------------------------------------------------------------------- /version084/carla/planner/map.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """Class used for operating the city map.""" 8 | 9 | import math 10 | import os 11 | 12 | try: 13 | import numpy as np 14 | except ImportError: 15 | raise RuntimeError('cannot import numpy, make sure numpy package is installed') 16 | 17 | try: 18 | from PIL import Image 19 | except ImportError: 20 | raise RuntimeError('cannot import PIL, make sure pillow package is installed') 21 | 22 | from .graph import Graph 23 | from .graph import sldist 24 | from .grid import Grid 25 | from .converter import Converter 26 | 27 | 28 | def color_to_angle(color): 29 | return (float(color) / 255.0) * 2 * math.pi 30 | 31 | 32 | class CarlaMap(object): 33 | 34 | def __init__(self, city, pixel_density=0.1643, node_density=50): 35 | dir_path = os.path.dirname(__file__) 36 | city_file = os.path.join(dir_path, city + '.txt') 37 | 38 | city_map_file = os.path.join(dir_path, city + '.png') 39 | city_map_file_lanes = os.path.join(dir_path, city + 'Lanes.png') 40 | city_map_file_center = os.path.join(dir_path, city + 'Central.png') 41 | 42 | # The built graph. This is the exact same graph that unreal builds. This 43 | # is a generic structure used for many cases 44 | self._graph = Graph(city_file, node_density) 45 | 46 | self._pixel_density = pixel_density 47 | self._grid = Grid(self._graph) 48 | # The number of game units per pixel. For now this is fixed. 49 | 50 | self._converter = Converter(city_file, pixel_density, node_density) 51 | 52 | # Load the lanes image 53 | self.map_image_lanes = Image.open(city_map_file_lanes) 54 | self.map_image_lanes.load() 55 | self.map_image_lanes = np.asarray(self.map_image_lanes, dtype="int32") 56 | # Load the image 57 | self.map_image = Image.open(city_map_file) 58 | self.map_image.load() 59 | self.map_image = np.asarray(self.map_image, dtype="int32") 60 | 61 | # Load the lanes image 62 | self.map_image_center = Image.open(city_map_file_center) 63 | self.map_image_center.load() 64 | self.map_image_center = np.asarray(self.map_image_center, dtype="int32") 65 | 66 | def get_graph_resolution(self): 67 | 68 | return self._graph.get_resolution() 69 | def check_pixel_on_map(self, pixel): 70 | 71 | if pixel[0] < self.map_image_lanes.shape[1] and pixel[0] > 0 and \ 72 | pixel[1] < self.map_image_lanes.shape[0] and pixel[1] > 0: 73 | return True 74 | else: 75 | return False 76 | 77 | def get_map(self, height=None): 78 | if height is not None: 79 | img = Image.fromarray(self.map_image.astype(np.uint8)) 80 | 81 | aspect_ratio = height / float(self.map_image.shape[0]) 82 | 83 | img = img.resize((int(aspect_ratio * self.map_image.shape[1]), height), Image.ANTIALIAS) 84 | img.load() 85 | return np.asarray(img, dtype="int32") 86 | return np.fliplr(self.map_image) 87 | 88 | def get_map_lanes(self, size=None): 89 | if size is not None: 90 | img = Image.fromarray(self.map_image_lanes.astype(np.uint8)) 91 | img = img.resize((size[1], size[0]), Image.ANTIALIAS) 92 | img.load() 93 | return np.fliplr(np.asarray(img, dtype="int32")) 94 | return np.fliplr(self.map_image_lanes) 95 | 96 | def get_lane_orientation(self, world): 97 | """Get the lane orientation of a certain world position.""" 98 | pixel = self.convert_to_pixel(world) 99 | 100 | ori = self.map_image_lanes[int(pixel[1]), int(pixel[0]), 2] 101 | ori = color_to_angle(ori) 102 | 103 | return (-math.cos(ori), -math.sin(ori)) 104 | 105 | def get_lane_orientation_degrees(self, world): 106 | """Get the lane orientation of a certain world position.""" 107 | pixel = self.convert_to_pixel(world) 108 | 109 | ori = self.map_image_lanes[int(pixel[1]), int(pixel[0]), 2] 110 | 111 | return color_to_angle(ori) 112 | 113 | 114 | def convert_to_node(self, input_data): 115 | """ 116 | Receives a data type (Can Be Pixel or World ) 117 | :param input_data: position in some coordinate 118 | :return: A node object 119 | """ 120 | return self._converter.convert_to_node(input_data) 121 | 122 | def convert_to_pixel(self, input_data): 123 | """ 124 | Receives a data type (Can Be Node or World ) 125 | :param input_data: position in some coordinate 126 | :return: A node object 127 | """ 128 | return self._converter.convert_to_pixel(input_data) 129 | 130 | def convert_to_world(self, input_data): 131 | """ 132 | Receives a data type (Can Be Pixel or Node ) 133 | :param input_data: position in some coordinate 134 | :return: A node object 135 | """ 136 | return self._converter.convert_to_world(input_data) 137 | 138 | def get_walls_directed(self, node_source, source_ori, node_target, target_ori, both_walls=True): 139 | """ 140 | This is the most hacky function. Instead of planning on two ways, 141 | we basically use a one way road and interrupt the other road by adding 142 | an artificial wall. 143 | """ 144 | 145 | if both_walls: 146 | final_walls = self._grid.get_wall_source(node_source, source_ori, node_target) 147 | 148 | final_walls = final_walls.union(self._grid.get_wall_target( 149 | node_target, target_ori, node_source)) 150 | 151 | return final_walls 152 | else: 153 | 154 | return self._grid.get_wall_source(node_source, source_ori, node_target) 155 | 156 | def is_point_on_lane(self, world): 157 | pixel = self.convert_to_pixel(world) 158 | 159 | if not self.check_pixel_on_map(pixel): 160 | return False 161 | 162 | ori = self.map_image_lanes[int(pixel[1]), int(pixel[0]), 1] 163 | 164 | if ori == 0: 165 | return False 166 | else: 167 | return True 168 | 169 | def is_point_on_intersection(self, world): 170 | pixel = self.convert_to_pixel(world) 171 | if not self.check_pixel_on_map(pixel): 172 | return False 173 | ori = self.map_image_lanes[int(pixel[1]), int(pixel[0]), 0] 174 | 175 | 176 | if ori > 0: 177 | return True 178 | else: 179 | return False 180 | 181 | def get_walls(self): 182 | 183 | return self._grid.get_walls() 184 | 185 | def get_distance_closest_node(self, pos): 186 | 187 | distance = [] 188 | for node_iter in self._graph.intersection_nodes(): 189 | distance.append(sldist(node_iter, pos)) 190 | 191 | return sorted(distance)[0] 192 | 193 | def get_intersection_nodes(self): 194 | return self._graph.intersection_nodes() 195 | 196 | def get_curve_nodes(self): 197 | return self._graph.curve_nodes() 198 | 199 | def get_adjacent_free_nodes(self, pos): 200 | return self._grid.get_adjacent_free_nodes(pos) 201 | 202 | def search_on_grid(self, node): 203 | return self._grid.search_on_grid(node[0], node[1]) 204 | -------------------------------------------------------------------------------- /version084/carla/planner/planner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import collections 8 | import math 9 | 10 | import numpy as np 11 | 12 | from . import city_track 13 | 14 | 15 | def compare(x, y): 16 | return collections.Counter(x) == collections.Counter(y) 17 | 18 | 19 | # Constants Used for the high level commands 20 | 21 | 22 | REACH_GOAL = 0.0 23 | GO_STRAIGHT = 5.0 24 | TURN_RIGHT = 4.0 25 | TURN_LEFT = 3.0 26 | LANE_FOLLOW = 2.0 27 | 28 | 29 | # Auxiliary algebra function 30 | def angle_between(v1, v2): 31 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 32 | 33 | 34 | def sldist(c1, c2): return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 35 | 36 | 37 | def signal(v1, v2): 38 | return np.cross(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2) 39 | 40 | 41 | class Planner(object): 42 | 43 | def __init__(self, city_name): 44 | 45 | self._city_track = city_track.CityTrack(city_name) 46 | 47 | self._commands = [] 48 | 49 | def get_next_command(self, source, source_ori, target, target_ori): 50 | """ 51 | Computes the full plan and returns the next command, 52 | Args 53 | source: source position 54 | source_ori: source orientation 55 | target: target position 56 | target_ori: target orientation 57 | Returns 58 | a command ( Straight,Lane Follow, Left or Right) 59 | """ 60 | 61 | track_source = self._city_track.project_node(source) 62 | track_target = self._city_track.project_node(target) 63 | 64 | # reach the goal 65 | 66 | if self._city_track.is_at_goal(track_source, track_target): 67 | return REACH_GOAL 68 | 69 | if (self._city_track.is_at_new_node(track_source) 70 | and self._city_track.is_away_from_intersection(track_source)): 71 | 72 | route = self._city_track.compute_route(track_source, source_ori, 73 | track_target, target_ori) 74 | 75 | self._commands = self._route_to_commands(route) 76 | 77 | if self._city_track.is_far_away_from_route_intersection( 78 | track_source): 79 | return LANE_FOLLOW 80 | else: 81 | if self._commands: 82 | return self._commands[0] 83 | else: 84 | return LANE_FOLLOW 85 | else: 86 | 87 | if self._city_track.is_far_away_from_route_intersection( 88 | track_source): 89 | return LANE_FOLLOW 90 | 91 | # If there are computed commands 92 | if self._commands: 93 | return self._commands[0] 94 | else: 95 | return LANE_FOLLOW 96 | 97 | def get_shortest_path_distance( 98 | self, 99 | source, 100 | source_ori, 101 | target, 102 | target_ori): 103 | 104 | distance = 0 105 | track_source = self._city_track.project_node(source) 106 | track_target = self._city_track.project_node(target) 107 | 108 | current_pos = track_source 109 | 110 | route = self._city_track.compute_route(track_source, source_ori, 111 | track_target, target_ori) 112 | # No Route, distance is zero 113 | if route is None: 114 | return 0.0 115 | 116 | for node_iter in route: 117 | distance += sldist(node_iter, current_pos) 118 | current_pos = node_iter 119 | 120 | # We multiply by these values to convert distance to world coordinates 121 | 122 | return distance * float(self._city_track.get_pixel_density()) \ 123 | * float(self._city_track.get_node_density()) 124 | 125 | def is_there_posible_route(self, source, source_ori, target, target_ori): 126 | 127 | track_source = self._city_track.project_node(source) 128 | track_target = self._city_track.project_node(target) 129 | 130 | return not self._city_track.compute_route( 131 | track_source, source_ori, track_target, target_ori) is None 132 | 133 | def test_position(self, source): 134 | 135 | node_source = self._city_track.project_node(source) 136 | 137 | return self._city_track.is_away_from_intersection(node_source) 138 | 139 | def _route_to_commands(self, route): 140 | 141 | """ 142 | from the shortest path graph, transform it into a list of commands 143 | 144 | :param route: the sub graph containing the shortest path 145 | :return: list of commands encoded from 0-5 146 | """ 147 | 148 | commands_list = [] 149 | 150 | for i in range(0, len(route)): 151 | if route[i] not in self._city_track.get_intersection_nodes(): 152 | continue 153 | 154 | current = route[i] 155 | past = route[i - 1] 156 | future = route[i + 1] 157 | 158 | past_to_current = np.array( 159 | [current[0] - past[0], current[1] - past[1]]) 160 | current_to_future = np.array( 161 | [future[0] - current[0], future[1] - current[1]]) 162 | angle = signal(current_to_future, past_to_current) 163 | 164 | if angle < -0.1: 165 | command = TURN_RIGHT 166 | elif angle > 0.1: 167 | command = TURN_LEFT 168 | else: 169 | command = GO_STRAIGHT 170 | 171 | commands_list.append(command) 172 | 173 | return commands_list 174 | -------------------------------------------------------------------------------- /version084/carla/sensor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA sensors.""" 8 | 9 | 10 | import os 11 | 12 | from collections import namedtuple 13 | 14 | try: 15 | import numpy 16 | except ImportError: 17 | raise RuntimeError('cannot import numpy, make sure numpy package is installed.') 18 | 19 | from .transform import Transform, Translation, Rotation, Scale 20 | 21 | 22 | # ============================================================================== 23 | # -- Helpers ------------------------------------------------------------------- 24 | # ============================================================================== 25 | 26 | 27 | Color = namedtuple('Color', 'r g b') 28 | Color.__new__.__defaults__ = (0, 0, 0) 29 | 30 | 31 | Point = namedtuple('Point', 'x y z color') 32 | Point.__new__.__defaults__ = (0.0, 0.0, 0.0, None) 33 | 34 | 35 | def _append_extension(filename, ext): 36 | return filename if filename.lower().endswith(ext.lower()) else filename + ext 37 | 38 | 39 | # ============================================================================== 40 | # -- Sensor -------------------------------------------------------------------- 41 | # ============================================================================== 42 | 43 | 44 | class Sensor(object): 45 | """ 46 | Base class for sensor descriptions. Used to add sensors to CarlaSettings. 47 | """ 48 | 49 | def __init__(self, name, sensor_type): 50 | self.SensorName = name 51 | self.SensorType = sensor_type 52 | self.PositionX = 0.2 53 | self.PositionY = 0.0 54 | self.PositionZ = 1.3 55 | self.RotationPitch = 0.0 56 | self.RotationRoll = 0.0 57 | self.RotationYaw = 0.0 58 | 59 | def set(self, **kwargs): 60 | for key, value in kwargs.items(): 61 | if not hasattr(self, key): 62 | raise ValueError('sensor.Sensor: no key named %r' % key) 63 | setattr(self, key, value) 64 | 65 | def set_position(self, x, y, z): 66 | self.PositionX = x 67 | self.PositionY = y 68 | self.PositionZ = z 69 | 70 | def set_rotation(self, pitch, yaw, roll): 71 | self.RotationPitch = pitch 72 | self.RotationYaw = yaw 73 | self.RotationRoll = roll 74 | 75 | def get_transform(self): 76 | ''' 77 | Returns the camera to [whatever the camera is attached to] 78 | transformation. 79 | ''' 80 | return Transform( 81 | Translation(self.PositionX, self.PositionY, self.PositionZ), 82 | Rotation(self.RotationPitch, self.RotationYaw, self.RotationRoll)) 83 | 84 | def get_unreal_transform(self): 85 | ''' 86 | Returns the camera to [whatever the camera is attached to] 87 | transformation with the Unreal necessary corrections applied. 88 | 89 | @todo Do we need to expose this? 90 | ''' 91 | to_unreal_transform = Transform(Rotation(roll=-90, yaw=90), Scale(x=-1)) 92 | return self.get_transform() * to_unreal_transform 93 | 94 | 95 | class Camera(Sensor): 96 | """ 97 | Camera description. This class can be added to a CarlaSettings object to add 98 | a camera to the player vehicle. 99 | """ 100 | 101 | def __init__(self, name, **kwargs): 102 | super(Camera, self).__init__(name, sensor_type="CAMERA") 103 | self.PostProcessing = 'SceneFinal' 104 | self.ImageSizeX = 720 105 | self.ImageSizeY = 512 106 | self.FOV = 90.0 107 | self.set(**kwargs) 108 | 109 | def set_image_size(self, pixels_x, pixels_y): 110 | '''Sets the image size in pixels''' 111 | self.ImageSizeX = pixels_x 112 | self.ImageSizeY = pixels_y 113 | 114 | 115 | class Lidar(Sensor): 116 | """ 117 | Lidar description. This class can be added to a CarlaSettings object to add 118 | a Lidar to the player vehicle. 119 | """ 120 | 121 | def __init__(self, name, **kwargs): 122 | super(Lidar, self).__init__(name, sensor_type="LIDAR_RAY_CAST") 123 | self.Channels = 32 124 | self.Range = 50.0 125 | self.PointsPerSecond = 56000 126 | self.RotationFrequency = 10.0 127 | self.UpperFovLimit = 10.0 128 | self.LowerFovLimit = -30.0 129 | self.ShowDebugPoints = False 130 | self.set(**kwargs) 131 | 132 | 133 | # ============================================================================== 134 | # -- SensorData ---------------------------------------------------------------- 135 | # ============================================================================== 136 | 137 | 138 | class SensorData(object): 139 | """Base class for sensor data returned from the server.""" 140 | def __init__(self, frame_number): 141 | self.frame_number = frame_number 142 | 143 | 144 | class Image(SensorData): 145 | """Data generated by a Camera.""" 146 | 147 | def __init__(self, frame_number, width, height, image_type, fov, raw_data): 148 | super(Image, self).__init__(frame_number=frame_number) 149 | assert len(raw_data) == 4 * width * height 150 | self.width = width 151 | self.height = height 152 | self.type = image_type 153 | self.fov = fov 154 | self.raw_data = raw_data 155 | self._converted_data = None 156 | 157 | @property 158 | def data(self): 159 | """ 160 | Lazy initialization for data property, stores converted data in its 161 | default format. 162 | """ 163 | if self._converted_data is None: 164 | from . import image_converter 165 | 166 | if self.type == 'Depth': 167 | self._converted_data = image_converter.depth_to_array(self) 168 | elif self.type == 'SemanticSegmentation': 169 | self._converted_data = image_converter.labels_to_array(self) 170 | else: 171 | self._converted_data = image_converter.to_rgb_array(self) 172 | return self._converted_data 173 | 174 | def save_to_disk(self, filename, format='.png'): 175 | """Save this image to disk (requires PIL installed).""" 176 | filename = _append_extension(filename, format) 177 | 178 | try: 179 | from PIL import Image as PImage 180 | except ImportError: 181 | raise RuntimeError( 182 | 'cannot import PIL, make sure pillow package is installed') 183 | 184 | image = PImage.frombytes( 185 | mode='RGBA', 186 | size=(self.width, self.height), 187 | data=self.raw_data, 188 | decoder_name='raw') 189 | color = image.split() 190 | image = PImage.merge("RGB", color[2::-1]) 191 | 192 | folder = os.path.dirname(filename) 193 | if not os.path.isdir(folder): 194 | os.makedirs(folder) 195 | image.save(filename, quality=100) 196 | 197 | 198 | class PointCloud(SensorData): 199 | """A list of points.""" 200 | 201 | def __init__(self, frame_number, array, color_array=None): 202 | super(PointCloud, self).__init__(frame_number=frame_number) 203 | self._array = array 204 | self._color_array = color_array 205 | self._has_colors = color_array is not None 206 | 207 | @property 208 | def array(self): 209 | """The numpy array holding the point-cloud. 210 | 211 | 3D points format for n elements: 212 | [ [X0,Y0,Z0], 213 | ..., 214 | [Xn,Yn,Zn] ] 215 | """ 216 | return self._array 217 | 218 | @property 219 | def color_array(self): 220 | """The numpy array holding the colors corresponding to each point. 221 | It is None if there are no colors. 222 | 223 | Colors format for n elements: 224 | [ [R0,G0,B0], 225 | ..., 226 | [Rn,Gn,Bn] ] 227 | """ 228 | return self._color_array 229 | 230 | def has_colors(self): 231 | """Return whether the points have color.""" 232 | return self._has_colors 233 | 234 | def apply_transform(self, transformation): 235 | """Modify the PointCloud instance transforming its points""" 236 | self._array = transformation.transform_points(self._array) 237 | 238 | def save_to_disk(self, filename, format='.ply'): 239 | """Save this point-cloud to disk as PLY format.""" 240 | filename = _append_extension(filename, format) 241 | 242 | def construct_ply_header(): 243 | """Generates a PLY header given a total number of 3D points and 244 | coloring property if specified 245 | """ 246 | points = len(self) # Total point number 247 | header = ['ply', 248 | 'format ascii 1.0', 249 | 'element vertex {}', 250 | 'property float32 x', 251 | 'property float32 y', 252 | 'property float32 z', 253 | 'property uchar diffuse_red', 254 | 'property uchar diffuse_green', 255 | 'property uchar diffuse_blue', 256 | 'end_header'] 257 | if not self._has_colors: 258 | return '\n'.join(header[0:6] + [header[-1]]).format(points) 259 | return '\n'.join(header).format(points) 260 | 261 | if not self._has_colors: 262 | ply = '\n'.join(['{:.2f} {:.2f} {:.2f}'.format( 263 | *p) for p in self._array.tolist()]) 264 | else: 265 | points_3d = numpy.concatenate( 266 | (self._array, self._color_array), axis=1) 267 | ply = '\n'.join(['{:.2f} {:.2f} {:.2f} {:.0f} {:.0f} {:.0f}' 268 | .format(*p) for p in points_3d.tolist()]) 269 | 270 | # Create folder to save if does not exist. 271 | folder = os.path.dirname(filename) 272 | if not os.path.isdir(folder): 273 | os.makedirs(folder) 274 | 275 | # Open the file and save with the specific PLY format. 276 | with open(filename, 'w+') as ply_file: 277 | ply_file.write('\n'.join([construct_ply_header(), ply])) 278 | 279 | def __len__(self): 280 | return len(self.array) 281 | 282 | def __getitem__(self, key): 283 | color = None if self._color_array is None else Color( 284 | *self._color_array[key]) 285 | return Point(*self._array[key], color=color) 286 | 287 | def __iter__(self): 288 | class PointIterator(object): 289 | """Iterator class for PointCloud""" 290 | 291 | def __init__(self, point_cloud): 292 | self.point_cloud = point_cloud 293 | self.index = -1 294 | 295 | def __next__(self): 296 | self.index += 1 297 | if self.index >= len(self.point_cloud): 298 | raise StopIteration 299 | return self.point_cloud[self.index] 300 | 301 | def next(self): 302 | return self.__next__() 303 | 304 | return PointIterator(self) 305 | 306 | def __str__(self): 307 | return str(self.array) 308 | 309 | 310 | class LidarMeasurement(SensorData): 311 | """Data generated by a Lidar.""" 312 | 313 | def __init__(self, frame_number, horizontal_angle, channels, point_count_by_channel, point_cloud): 314 | super(LidarMeasurement, self).__init__(frame_number=frame_number) 315 | assert numpy.sum(point_count_by_channel) == len(point_cloud.array) 316 | self.horizontal_angle = horizontal_angle 317 | self.channels = channels 318 | self.point_count_by_channel = point_count_by_channel 319 | self.point_cloud = point_cloud 320 | 321 | @property 322 | def data(self): 323 | """The numpy array holding the point-cloud. 324 | 325 | 3D points format for n elements: 326 | [ [X0,Y0,Z0], 327 | ..., 328 | [Xn,Yn,Zn] ] 329 | """ 330 | return self.point_cloud.array 331 | 332 | def save_to_disk(self, filename, format='.ply'): 333 | """Save point-cloud to disk as PLY format.""" 334 | self.point_cloud.save_to_disk(filename, format) 335 | -------------------------------------------------------------------------------- /version084/carla/settings.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA Settings""" 8 | 9 | import io 10 | import random 11 | import sys 12 | 13 | 14 | if sys.version_info >= (3, 0): 15 | 16 | from configparser import ConfigParser 17 | 18 | else: 19 | 20 | from ConfigParser import RawConfigParser as ConfigParser 21 | 22 | 23 | from . import sensor as carla_sensor 24 | 25 | 26 | MAX_NUMBER_OF_WEATHER_IDS = 14 27 | 28 | 29 | class CarlaSettings(object): 30 | """ 31 | The CarlaSettings object controls the settings of an episode. The __str__ 32 | method retrieves an str with a CarlaSettings.ini file contents. 33 | """ 34 | 35 | def __init__(self, **kwargs): 36 | # [CARLA/Server] 37 | self.SynchronousMode = True 38 | self.SendNonPlayerAgentsInfo = False 39 | # [CARLA/QualitySettings] 40 | self.QualityLevel = 'Epic' 41 | # [CARLA/LevelSettings] 42 | self.PlayerVehicle = None 43 | self.NumberOfVehicles = 20 44 | self.NumberOfPedestrians = 30 45 | self.WeatherId = 1 46 | self.SeedVehicles = None 47 | self.SeedPedestrians = None 48 | self.DisableTwoWheeledVehicles = False 49 | self.set(**kwargs) 50 | self._sensors = [] 51 | 52 | def set(self, **kwargs): 53 | for key, value in kwargs.items(): 54 | if not hasattr(self, key): 55 | raise ValueError('CarlaSettings: no key named %r' % key) 56 | setattr(self, key, value) 57 | 58 | def randomize_seeds(self): 59 | """ 60 | Randomize the seeds of the new episode's pseudo-random number 61 | generators. 62 | """ 63 | self.SeedVehicles = random.getrandbits(16) 64 | self.SeedPedestrians = random.getrandbits(16) 65 | 66 | def randomize_weather(self): 67 | """Randomized the WeatherId.""" 68 | self.WeatherId = random.randint(0, MAX_NUMBER_OF_WEATHER_IDS) 69 | 70 | def add_sensor(self, sensor): 71 | """Add a sensor to the player vehicle (see sensor.py).""" 72 | if not isinstance(sensor, carla_sensor.Sensor): 73 | raise ValueError('Sensor not supported') 74 | self._sensors.append(sensor) 75 | 76 | def __str__(self): 77 | """Converts this object to an INI formatted string.""" 78 | ini = ConfigParser() 79 | ini.optionxform = str 80 | S_SERVER = 'CARLA/Server' 81 | S_QUALITY = 'CARLA/QualitySettings' 82 | S_LEVEL = 'CARLA/LevelSettings' 83 | S_SENSOR = 'CARLA/Sensor' 84 | 85 | def get_attribs(obj): 86 | return [a for a in dir(obj) if not a.startswith('_') and not callable(getattr(obj, a))] 87 | 88 | def add_section(section, obj, keys): 89 | for key in keys: 90 | if hasattr(obj, key) and getattr(obj, key) is not None: 91 | if not ini.has_section(section): 92 | ini.add_section(section) 93 | ini.set(section, key, str(getattr(obj, key))) 94 | 95 | add_section(S_SERVER, self, [ 96 | 'SynchronousMode', 97 | 'SendNonPlayerAgentsInfo']) 98 | add_section(S_QUALITY, self, [ 99 | 'QualityLevel']) 100 | add_section(S_LEVEL, self, [ 101 | 'NumberOfVehicles', 102 | 'NumberOfPedestrians', 103 | 'WeatherId', 104 | 'SeedVehicles', 105 | 'SeedPedestrians', 106 | 'DisableTwoWheeledVehicles']) 107 | 108 | ini.add_section(S_SENSOR) 109 | ini.set(S_SENSOR, 'Sensors', ','.join(s.SensorName for s in self._sensors)) 110 | 111 | for sensor_def in self._sensors: 112 | section = S_SENSOR + '/' + sensor_def.SensorName 113 | add_section(section, sensor_def, get_attribs(sensor_def)) 114 | 115 | if sys.version_info >= (3, 0): 116 | text = io.StringIO() 117 | else: 118 | text = io.BytesIO() 119 | 120 | ini.write(text) 121 | return text.getvalue().replace(' = ', '=') 122 | -------------------------------------------------------------------------------- /version084/carla/tcp.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """Basic TCP client.""" 8 | 9 | import logging 10 | import socket 11 | import struct 12 | import time 13 | 14 | class TCPConnectionError(Exception): 15 | pass 16 | 17 | 18 | class TCPClient(object): 19 | """ 20 | Basic networking client for TCP connections. Errors occurred during 21 | networking operations are raised as TCPConnectionError. 22 | 23 | Received messages are expected to be prepended by a int32 defining the 24 | message size. Messages are sent following this convention. 25 | """ 26 | 27 | def __init__(self, host, port, timeout): 28 | self._host = host 29 | self._port = port 30 | self._timeout = timeout 31 | self._socket = None 32 | self._logprefix = '(%s:%s) ' % (self._host, self._port) 33 | 34 | def connect(self, connection_attempts=10): 35 | """Try to establish a connection to the given host:port.""" 36 | connection_attempts = max(1, connection_attempts) 37 | error = None 38 | for attempt in range(1, connection_attempts + 1): 39 | try: 40 | self._socket = socket.create_connection(address=(self._host, self._port), timeout=self._timeout) 41 | self._socket.settimeout(self._timeout) 42 | logging.debug('%sconnected', self._logprefix) 43 | return 44 | except socket.error as exception: 45 | error = exception 46 | logging.debug('%sconnection attempt %d: %s', self._logprefix, attempt, error) 47 | time.sleep(1) 48 | self._reraise_exception_as_tcp_error('failed to connect', error) 49 | 50 | def disconnect(self): 51 | """Disconnect any active connection.""" 52 | if self._socket is not None: 53 | logging.debug('%sdisconnecting', self._logprefix) 54 | self._socket.close() 55 | self._socket = None 56 | 57 | def connected(self): 58 | """Return whether there is an active connection.""" 59 | return self._socket is not None 60 | 61 | def write(self, message): 62 | """Send message to the server.""" 63 | if self._socket is None: 64 | raise TCPConnectionError(self._logprefix + 'not connected') 65 | header = struct.pack(' 0: 86 | try: 87 | data = self._socket.recv(length) 88 | except socket.error as exception: 89 | self._reraise_exception_as_tcp_error('failed to read data', exception) 90 | if not data: 91 | raise TCPConnectionError(self._logprefix + 'connection closed') 92 | buf += data 93 | length -= len(data) 94 | return buf 95 | 96 | def _reraise_exception_as_tcp_error(self, message, exception): 97 | raise TCPConnectionError('%s%s: %s' % (self._logprefix, message, exception)) 98 | -------------------------------------------------------------------------------- /version084/carla/transform.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB), and the INTEL Visual Computing Lab. 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | 9 | from collections import namedtuple 10 | 11 | try: 12 | import numpy 13 | except ImportError: 14 | raise RuntimeError( 15 | 'cannot import numpy, make sure numpy package is installed.') 16 | 17 | try: 18 | from . import carla_server_pb2 as carla_protocol 19 | except ImportError: 20 | raise RuntimeError('cannot import "carla_server_pb2.py", run ' 21 | 'the protobuf compiler to generate this file') 22 | 23 | 24 | Translation = namedtuple('Translation', 'x y z') 25 | Translation.__new__.__defaults__ = (0.0, 0.0, 0.0) 26 | 27 | Rotation = namedtuple('Rotation', 'pitch yaw roll') 28 | Rotation.__new__.__defaults__ = (0.0, 0.0, 0.0) 29 | 30 | Scale = namedtuple('Scale', 'x y z') 31 | Scale.__new__.__defaults__ = (1.0, 1.0, 1.0) 32 | 33 | 34 | class Transform(object): 35 | """A 3D transformation. 36 | 37 | The transformation is applied in the order: scale, rotation, translation. 38 | """ 39 | 40 | def __init__(self, *args, **kwargs): 41 | if 'matrix' in kwargs: 42 | self.matrix = kwargs['matrix'] 43 | return 44 | if isinstance(args[0], carla_protocol.Transform): 45 | args = [ 46 | Translation( 47 | args[0].location.x, 48 | args[0].location.y, 49 | args[0].location.z), 50 | Rotation( 51 | args[0].rotation.pitch, 52 | args[0].rotation.yaw, 53 | args[0].rotation.roll) 54 | ] 55 | self.matrix = numpy.matrix(numpy.identity(4)) 56 | self.set(*args, **kwargs) 57 | 58 | def set(self, *args): 59 | """Builds the transform matrix given a Translate, Rotation 60 | and Scale. 61 | """ 62 | translation = Translation() 63 | rotation = Rotation() 64 | scale = Scale() 65 | 66 | if len(args) > 3: 67 | raise ValueError("'Transform' accepts 3 values as maximum.") 68 | 69 | def get_single_obj_type(obj_type): 70 | """Returns the unique object contained in the 71 | arguments lists that is instance of 'obj_type'. 72 | """ 73 | obj = [x for x in args if isinstance(x, obj_type)] 74 | if len(obj) > 1: 75 | raise ValueError("Transform only accepts one instances of " + 76 | str(obj_type) + " as a parameter") 77 | elif not obj: 78 | # Create an instance of the type that is 'obj_type' 79 | return obj_type() 80 | return obj[0] 81 | 82 | translation = get_single_obj_type(Translation) 83 | rotation = get_single_obj_type(Rotation) 84 | scale = get_single_obj_type(Scale) 85 | 86 | for param in args: 87 | if not isinstance(param, Translation) and \ 88 | not isinstance(param, Rotation) and \ 89 | not isinstance(param, Scale): 90 | raise TypeError( 91 | "'" + str(type(param)) + "' type not match with \ 92 | 'Translation', 'Rotation' or 'Scale'") 93 | 94 | # Transformation matrix 95 | cy = math.cos(numpy.radians(rotation.yaw)) 96 | sy = math.sin(numpy.radians(rotation.yaw)) 97 | cr = math.cos(numpy.radians(rotation.roll)) 98 | sr = math.sin(numpy.radians(rotation.roll)) 99 | cp = math.cos(numpy.radians(rotation.pitch)) 100 | sp = math.sin(numpy.radians(rotation.pitch)) 101 | self.matrix[0, 3] = translation.x 102 | self.matrix[1, 3] = translation.y 103 | self.matrix[2, 3] = translation.z 104 | self.matrix[0, 0] = scale.x * (cp * cy) 105 | self.matrix[0, 1] = scale.y * (cy * sp * sr - sy * cr) 106 | self.matrix[0, 2] = -scale.z * (cy * sp * cr + sy * sr) 107 | self.matrix[1, 0] = scale.x * (sy * cp) 108 | self.matrix[1, 1] = scale.y * (sy * sp * sr + cy * cr) 109 | self.matrix[1, 2] = scale.z * (cy * sr - sy * sp * cr) 110 | self.matrix[2, 0] = scale.x * (sp) 111 | self.matrix[2, 1] = -scale.y * (cp * sr) 112 | self.matrix[2, 2] = scale.z * (cp * cr) 113 | 114 | def inverse(self): 115 | """Return the inverse transform.""" 116 | return Transform(matrix=numpy.linalg.inv(self.matrix)) 117 | 118 | def transform_points(self, points): 119 | """ 120 | Given a 4x4 transformation matrix, transform an array of 3D points. 121 | Expected point foramt: [[X0,Y0,Z0],..[Xn,Yn,Zn]] 122 | """ 123 | # Needed foramt: [[X0,..Xn],[Z0,..Zn],[Z0,..Zn]]. So let's transpose 124 | # the point matrix. 125 | points = points.transpose() 126 | # Add 0s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[0,..0]] 127 | points = numpy.append(points, numpy.ones((1, points.shape[1])), axis=0) 128 | # Point transformation 129 | points = self.matrix * points 130 | # Return all but last row 131 | return points[0:3].transpose() 132 | 133 | def __mul__(self, other): 134 | return Transform(matrix=numpy.dot(self.matrix, other.matrix)) 135 | 136 | def __str__(self): 137 | return str(self.matrix) 138 | -------------------------------------------------------------------------------- /version084/carla/util.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import datetime 8 | import sys 9 | 10 | from contextlib import contextmanager 11 | 12 | 13 | @contextmanager 14 | def make_connection(client_type, *args, **kwargs): 15 | """Context manager to create and connect a networking client object.""" 16 | client = None 17 | try: 18 | client = client_type(*args, **kwargs) 19 | client.connect() 20 | yield client 21 | finally: 22 | if client is not None: 23 | client.disconnect() 24 | 25 | 26 | class StopWatch(object): 27 | def __init__(self): 28 | self.start = datetime.datetime.now() 29 | self.end = None 30 | 31 | def restart(self): 32 | self.start = datetime.datetime.now() 33 | self.end = None 34 | 35 | def stop(self): 36 | self.end = datetime.datetime.now() 37 | 38 | def seconds(self): 39 | return (self.end - self.start).total_seconds() 40 | 41 | def milliseconds(self): 42 | return 1000.0 * self.seconds() 43 | 44 | 45 | def to_hex_str(header): 46 | return ':'.join('{:02x}'.format(ord(c)) for c in header) 47 | 48 | 49 | if sys.version_info >= (3, 3): 50 | 51 | import shutil 52 | 53 | def print_over_same_line(text): 54 | terminal_width = shutil.get_terminal_size((80, 20)).columns 55 | empty_space = max(0, terminal_width - len(text)) 56 | sys.stdout.write('\r' + text + empty_space * ' ') 57 | sys.stdout.flush() 58 | 59 | else: 60 | 61 | # Workaround for older Python versions. 62 | def print_over_same_line(text): 63 | line_length = max(print_over_same_line.last_line_length, len(text)) 64 | empty_space = max(0, line_length - len(text)) 65 | sys.stdout.write('\r' + text + empty_space * ' ') 66 | sys.stdout.flush() 67 | print_over_same_line.last_line_length = line_length 68 | print_over_same_line.last_line_length = 0 69 | -------------------------------------------------------------------------------- /version084/client_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """Basic CARLA client example.""" 10 | 11 | from __future__ import print_function 12 | 13 | import argparse 14 | import logging 15 | import random 16 | import time 17 | 18 | from carla.client import make_carla_client 19 | from carla.sensor import Camera, Lidar 20 | from carla.settings import CarlaSettings 21 | from carla.tcp import TCPConnectionError 22 | from carla.util import print_over_same_line 23 | 24 | 25 | def run_carla_client(args): 26 | # Here we will run 3 episodes with 300 frames each. 27 | number_of_episodes = 3 28 | frames_per_episode = 300 29 | 30 | # We assume the CARLA server is already waiting for a client to connect at 31 | # host:port. To create a connection we can use the `make_carla_client` 32 | # context manager, it creates a CARLA client object and starts the 33 | # connection. It will throw an exception if something goes wrong. The 34 | # context manager makes sure the connection is always cleaned up on exit. 35 | with make_carla_client(args.host, args.port) as client: 36 | print('CarlaClient connected') 37 | 38 | for episode in range(0, number_of_episodes): 39 | # Start a new episode. 40 | 41 | if args.settings_filepath is None: 42 | 43 | # Create a CarlaSettings object. This object is a wrapper around 44 | # the CarlaSettings.ini file. Here we set the configuration we 45 | # want for the new episode. 46 | settings = CarlaSettings() 47 | settings.set( 48 | SynchronousMode=True, 49 | SendNonPlayerAgentsInfo=True, 50 | NumberOfVehicles=20, 51 | NumberOfPedestrians=40, 52 | WeatherId=random.choice([1, 3, 7, 8, 14]), 53 | QualityLevel=args.quality_level) 54 | settings.randomize_seeds() 55 | 56 | # Now we want to add a couple of cameras to the player vehicle. 57 | # We will collect the images produced by these cameras every 58 | # frame. 59 | 60 | # The default camera captures RGB images of the scene. 61 | camera0 = Camera('CameraRGB') 62 | # Set image resolution in pixels. 63 | camera0.set_image_size(800, 600) 64 | # Set its position relative to the car in meters. 65 | camera0.set_position(0.30, 0, 1.30) 66 | 67 | settings.add_sensor(camera0) 68 | 69 | # Let's add another camera producing ground-truth depth. 70 | camera1 = Camera('CameraDepth', PostProcessing='Depth') 71 | camera1.set_image_size(800, 600) 72 | camera1.set_position(0.30, 0, 1.30) 73 | settings.add_sensor(camera1) 74 | 75 | if args.lidar: 76 | lidar = Lidar('Lidar32') 77 | lidar.set_position(0, 0, 2.50) 78 | lidar.set_rotation(0, 0, 0) 79 | lidar.set( 80 | Channels=32, 81 | Range=50, 82 | PointsPerSecond=100000, 83 | RotationFrequency=10, 84 | UpperFovLimit=10, 85 | LowerFovLimit=-30) 86 | settings.add_sensor(lidar) 87 | 88 | else: 89 | 90 | # Alternatively, we can load these settings from a file. 91 | with open(args.settings_filepath, 'r') as fp: 92 | settings = fp.read() 93 | 94 | # Now we load these settings into the server. The server replies 95 | # with a scene description containing the available start spots for 96 | # the player. Here we can provide a CarlaSettings object or a 97 | # CarlaSettings.ini file as string. 98 | 99 | scene = client.load_settings(settings) 100 | 101 | # Choose one player start at random. 102 | number_of_player_starts = len(scene.player_start_spots) 103 | player_start = random.randint(0, max(0, number_of_player_starts - 1)) 104 | 105 | # Notify the server that we want to start the episode at the 106 | # player_start index. This function blocks until the server is ready 107 | # to start the episode. 108 | print('Starting new episode at %r...' % scene.map_name) 109 | 110 | client.start_episode(player_start) 111 | 112 | # Iterate every frame in the episode. 113 | for frame in range(0, frames_per_episode): 114 | 115 | # Read the data produced by the server this frame. 116 | measurements, sensor_data = client.read_data() 117 | 118 | # Print some of the measurements. 119 | print_measurements(measurements) 120 | 121 | # Save the images to disk if requested. 122 | if args.save_images_to_disk: 123 | for name, measurement in sensor_data.items(): 124 | filename = args.out_filename_format.format(episode, name, frame) 125 | measurement.save_to_disk(filename) 126 | 127 | control = measurements.player_measurements.autopilot_control 128 | control.steer += random.uniform(-0.1, 0.1) 129 | client.send_control(control) 130 | 131 | 132 | # We can access the encoded data of a given image as numpy 133 | # array using its "data" property. For instance, to get the 134 | # depth value (normalized) at pixel X, Y 135 | # 136 | # depth_array = sensor_data['CameraDepth'].data 137 | # value_at_pixel = depth_array[Y, X] 138 | # 139 | 140 | # Now we have to send the instructions to control the vehicle. 141 | # If we are in synchronous mode the server will pause the 142 | # simulation until we send this control. 143 | 144 | if not args.autopilot: 145 | 146 | client.send_control( 147 | steer=random.uniform(-1.0, 1.0), 148 | throttle=0.5, 149 | brake=0.0, 150 | hand_brake=False, 151 | reverse=False) 152 | 153 | else: 154 | 155 | # Together with the measurements, the server has sent the 156 | # control that the in-game autopilot would do this frame. We 157 | # can enable autopilot by sending back this control to the 158 | # server. We can modify it if wanted, here for instance we 159 | # will add some noise to the steer. 160 | 161 | control = measurements.player_measurements.autopilot_control 162 | control.steer += random.uniform(-0.1, 0.1) 163 | client.send_control(control) 164 | 165 | 166 | def print_measurements(measurements): 167 | number_of_agents = len(measurements.non_player_agents) 168 | player_measurements = measurements.player_measurements 169 | message = 'Vehicle at ({pos_x:.1f}, {pos_y:.1f}), ' 170 | message += '{speed:.0f} km/h, ' 171 | message += 'Collision: {{vehicles={col_cars:.0f}, pedestrians={col_ped:.0f}, other={col_other:.0f}}}, ' 172 | message += '{other_lane:.0f}% other lane, {offroad:.0f}% off-road, ' 173 | message += '({agents_num:d} non-player agents in the scene)' 174 | message = message.format( 175 | pos_x=player_measurements.transform.location.x, 176 | pos_y=player_measurements.transform.location.y, 177 | speed=player_measurements.forward_speed * 3.6, # m/s -> km/h 178 | col_cars=player_measurements.collision_vehicles, 179 | col_ped=player_measurements.collision_pedestrians, 180 | col_other=player_measurements.collision_other, 181 | other_lane=100 * player_measurements.intersection_otherlane, 182 | offroad=100 * player_measurements.intersection_offroad, 183 | agents_num=number_of_agents) 184 | print_over_same_line(message) 185 | 186 | 187 | def main(): 188 | argparser = argparse.ArgumentParser(description=__doc__) 189 | argparser.add_argument( 190 | '-v', '--verbose', 191 | action='store_true', 192 | dest='debug', 193 | help='print debug information') 194 | argparser.add_argument( 195 | '--host', 196 | metavar='H', 197 | default='localhost', 198 | help='IP of the host server (default: localhost)') 199 | argparser.add_argument( 200 | '-p', '--port', 201 | metavar='P', 202 | default=2000, 203 | type=int, 204 | help='TCP port to listen to (default: 2000)') 205 | argparser.add_argument( 206 | '-a', '--autopilot', 207 | action='store_true', 208 | help='enable autopilot') 209 | argparser.add_argument( 210 | '-l', '--lidar', 211 | action='store_true', 212 | help='enable Lidar') 213 | argparser.add_argument( 214 | '-q', '--quality-level', 215 | choices=['Low', 'Epic'], 216 | type=lambda s: s.title(), 217 | default='Epic', 218 | help='graphics quality level, a lower level makes the simulation run considerably faster.') 219 | argparser.add_argument( 220 | '-i', '--images-to-disk', 221 | action='store_true', 222 | dest='save_images_to_disk', 223 | help='save images (and Lidar data if active) to disk') 224 | argparser.add_argument( 225 | '-c', '--carla-settings', 226 | metavar='PATH', 227 | dest='settings_filepath', 228 | default=None, 229 | help='Path to a "CarlaSettings.ini" file') 230 | 231 | args = argparser.parse_args() 232 | 233 | log_level = logging.DEBUG if args.debug else logging.INFO 234 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 235 | 236 | logging.info('listening to server %s:%s', args.host, args.port) 237 | 238 | args.out_filename_format = '_out/episode_{:0>4d}/{:s}/{:0>6d}' 239 | 240 | while True: 241 | try: 242 | 243 | run_carla_client(args) 244 | 245 | print('Done.') 246 | return 247 | 248 | except TCPConnectionError as error: 249 | logging.error(error) 250 | time.sleep(1) 251 | 252 | 253 | if __name__ == '__main__': 254 | 255 | try: 256 | main() 257 | except KeyboardInterrupt: 258 | print('\nCancelled by user. Bye!') 259 | -------------------------------------------------------------------------------- /version084/driving_benchmarks/__init__.py: -------------------------------------------------------------------------------- 1 | from .carla100 import CARLA100 2 | from .corl2017 import CoRL2017 -------------------------------------------------------------------------------- /version084/driving_benchmarks/carla100/__init__.py: -------------------------------------------------------------------------------- 1 | from .carla100 import CARLA100 -------------------------------------------------------------------------------- /version084/driving_benchmarks/carla100/carla100.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CARLA 100 experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | from ...benchmark_tools.experiment import Experiment 12 | from ...carla.sensor import Camera 13 | from ...carla.settings import CarlaSettings 14 | from ...benchmark_tools.experiment_suites.experiment_suite import ExperimentSuite 15 | 16 | 17 | class CARLA100(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1, 3, 6, 8] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [10, 14] 26 | 27 | @property 28 | def collision_as_failure(self): 29 | return True 30 | @property 31 | def traffic_light_as_failure(self): 32 | return False 33 | def calculate_time_out(self, path_distance): 34 | """ 35 | Function to return the timeout ,in milliseconds, 36 | that is calculated based on distance to goal. 37 | This timeout is increased since stop for traffic lights is expected. 38 | """ 39 | return ((path_distance / 1000.0) / 5.0) * 3600.0 + 20.0 40 | 41 | def _poses_town01(self): 42 | """ 43 | Each matrix is a new task. We have all the four tasks 44 | 45 | """ 46 | 47 | def _poses_navigation(): 48 | return [[105, 29], [27, 130], [102, 87], [132, 27], [25, 44], 49 | [4, 64], [34, 67], [54, 30], [140, 134], [105, 9], 50 | [148, 129], [65, 18], [21, 16], [147, 97], [134, 49], 51 | [30, 41], [81, 89], [69, 45], [102, 95], [18, 145], 52 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]] 53 | 54 | return [_poses_navigation(), 55 | _poses_navigation(), 56 | _poses_navigation()] 57 | 58 | def _poses_town02(self): 59 | 60 | def _poses_navigation(): 61 | return [[19, 66], [79, 14], [19, 57], [39, 53], [60, 26], 62 | [53, 76], [42, 13], [31, 71], [59, 35], [47, 16], 63 | [10, 61], [66, 3], [20, 79], [14, 56], [26, 69], 64 | [79, 19], [2, 29], [16, 14], [5, 57], [77, 68], 65 | [70, 73], [46, 67], [34, 77], [61, 49], [21, 12]] 66 | return [_poses_navigation(), 67 | _poses_navigation(), 68 | _poses_navigation() 69 | ] 70 | 71 | def build_experiments(self): 72 | """ 73 | Creates the whole set of experiment objects, 74 | The experiments created depend on the selected Town. 75 | 76 | 77 | """ 78 | 79 | # We set the camera 80 | # This single RGB camera is used on every experiment 81 | 82 | camera = Camera('CameraRGB') 83 | camera.set(FOV=100) 84 | camera.set_image_size(800, 600) 85 | camera.set_position(2.0, 0.0, 1.4) 86 | camera.set_rotation(-15.0, 0, 0) 87 | 88 | if self._city_name == 'Town01': 89 | poses_tasks = self._poses_town01() 90 | vehicles_tasks = [0, 20, 100] 91 | pedestrians_tasks = [0, 50, 250] 92 | else: 93 | poses_tasks = self._poses_town02() 94 | vehicles_tasks = [0, 15, 70] 95 | pedestrians_tasks = [0, 50, 150] 96 | 97 | experiments_vector = [] 98 | 99 | for weather in self.weathers: 100 | 101 | for iteration in range(len(poses_tasks)): 102 | poses = poses_tasks[iteration] 103 | vehicles = vehicles_tasks[iteration] 104 | pedestrians = pedestrians_tasks[iteration] 105 | 106 | conditions = CarlaSettings() 107 | conditions.set( 108 | SendNonPlayerAgentsInfo=True, 109 | NumberOfVehicles=vehicles, 110 | NumberOfPedestrians=pedestrians, 111 | WeatherId=weather 112 | ) 113 | 114 | conditions.set(DisableTwoWheeledVehicles=True) 115 | # Add all the cameras that were set for this experiments 116 | 117 | conditions.add_sensor(camera) 118 | 119 | experiment = Experiment() 120 | experiment.set( 121 | Conditions=conditions, 122 | Poses=poses, 123 | Task=iteration, 124 | Repetitions=1 125 | ) 126 | experiments_vector.append(experiment) 127 | 128 | return experiments_vector 129 | -------------------------------------------------------------------------------- /version084/driving_benchmarks/corl2017/__init__.py: -------------------------------------------------------------------------------- 1 | from .corl_2017 import CoRL2017 -------------------------------------------------------------------------------- /version084/driving_benchmarks/corl2017/corl_2017.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | # CORL experiment set. 8 | 9 | from __future__ import print_function 10 | 11 | 12 | from ...benchmark_tools.experiment import Experiment 13 | from ...carla.sensor import Camera 14 | from ...carla.settings import CarlaSettings 15 | from ...benchmark_tools.experiment_suites.experiment_suite import ExperimentSuite 16 | 17 | 18 | class CoRL2017(ExperimentSuite): 19 | 20 | @property 21 | def train_weathers(self): 22 | return [1, 3, 6, 8] 23 | 24 | @property 25 | def test_weathers(self): 26 | return [4, 14] 27 | 28 | def _poses_town01(self): 29 | """ 30 | Each matrix is a new task. We have all the four tasks 31 | 32 | """ 33 | 34 | def _poses_straight(): 35 | return [[36, 40], [39, 35], [110, 114], [7, 3], [0, 4], 36 | [68, 50], [61, 59], [47, 64], [147, 90], [33, 87], 37 | [26, 19], [80, 76], [45, 49], [55, 44], [29, 107], 38 | [95, 104], [84, 34], [53, 67], [22, 17], [91, 148], 39 | [20, 107], [78, 70], [95, 102], [68, 44], [45, 69]] 40 | 41 | def _poses_one_curve(): 42 | return [[138, 17], [47, 16], [26, 9], [42, 49], [140, 124], 43 | [85, 98], [65, 133], [137, 51], [76, 66], [46, 39], 44 | [40, 60], [0, 29], [4, 129], [121, 140], [2, 129], 45 | [78, 44], [68, 85], [41, 102], [95, 70], [68, 129], 46 | [84, 69], [47, 79], [110, 15], [130, 17], [0, 17]] 47 | 48 | def _poses_navigation(): 49 | return [[105, 29], [27, 130], [102, 87], [132, 27], [24, 44], 50 | [96, 26], [34, 67], [28, 1], [140, 134], [105, 9], 51 | [148, 129], [65, 18], [21, 16], [147, 97], [42, 51], 52 | [30, 41], [18, 107], [69, 45], [102, 95], [18, 145], 53 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]] 54 | 55 | return [_poses_straight(), 56 | _poses_one_curve(), 57 | _poses_navigation(), 58 | _poses_navigation()] 59 | 60 | def _poses_town02(self): 61 | 62 | def _poses_straight(): 63 | return [[38, 34], [4, 2], [12, 10], [62, 55], [43, 47], 64 | [64, 66], [78, 76], [59, 57], [61, 18], [35, 39], 65 | [12, 8], [0, 18], [75, 68], [54, 60], [45, 49], 66 | [46, 42], [53, 46], [80, 29], [65, 63], [0, 81], 67 | [54, 63], [51, 42], [16, 19], [17, 26], [77, 68]] 68 | 69 | def _poses_one_curve(): 70 | return [[37, 76], [8, 24], [60, 69], [38, 10], [21, 1], 71 | [58, 71], [74, 32], [44, 0], [71, 16], [14, 24], 72 | [34, 11], [43, 14], [75, 16], [80, 21], [3, 23], 73 | [75, 59], [50, 47], [11, 19], [77, 34], [79, 25], 74 | [40, 63], [58, 76], [79, 55], [16, 61], [27, 11]] 75 | 76 | def _poses_navigation(): 77 | return [[19, 66], [79, 14], [19, 57], [23, 1], 78 | [53, 76], [42, 13], [31, 71], [33, 5], 79 | [54, 30], [10, 61], [66, 3], [27, 12], 80 | [79, 19], [2, 29], [16, 14], [5, 57], 81 | [70, 73], [46, 67], [57, 50], [61, 49], [21, 12], 82 | [51, 81], [77, 68], [56, 65], [43, 54]] 83 | 84 | return [_poses_straight(), 85 | _poses_one_curve(), 86 | _poses_navigation(), 87 | _poses_navigation() 88 | ] 89 | 90 | def build_experiments(self): 91 | """ 92 | Creates the whole set of experiment objects, 93 | The experiments created depend on the selected Town. 94 | 95 | 96 | """ 97 | 98 | # We set the camera 99 | # This single RGB camera is used on every experiment 100 | 101 | camera = Camera('CameraRGB') 102 | camera.set(FOV=100) 103 | camera.set_image_size(800, 600) 104 | camera.set_position(2.0, 0.0, 1.4) 105 | camera.set_rotation(-15.0, 0, 0) 106 | 107 | if self._city_name == 'Town01': 108 | poses_tasks = self._poses_town01() 109 | vehicles_tasks = [0, 0, 0, 20] 110 | pedestrians_tasks = [0, 0, 0, 50] 111 | else: 112 | poses_tasks = self._poses_town02() 113 | vehicles_tasks = [0, 0, 0, 15] 114 | pedestrians_tasks = [0, 0, 0, 50] 115 | 116 | experiments_vector = [] 117 | 118 | for weather in self.weathers: 119 | 120 | for iteration in range(len(poses_tasks)): 121 | poses = poses_tasks[iteration] 122 | vehicles = vehicles_tasks[iteration] 123 | pedestrians = pedestrians_tasks[iteration] 124 | 125 | conditions = CarlaSettings() 126 | conditions.set( 127 | SendNonPlayerAgentsInfo=True, 128 | NumberOfVehicles=vehicles, 129 | NumberOfPedestrians=pedestrians, 130 | WeatherId=weather 131 | ) 132 | # Add all the cameras that were set for this experiments 133 | 134 | conditions.add_sensor(camera) 135 | 136 | experiment = Experiment() 137 | experiment.set( 138 | Conditions=conditions, 139 | Poses=poses, 140 | Task=iteration, 141 | Repetitions=1 142 | ) 143 | experiments_vector.append(experiment) 144 | 145 | return experiments_vector 146 | -------------------------------------------------------------------------------- /version084/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | # @todo Dependencies are missing. 4 | 5 | setup( 6 | name='carla_client', 7 | version='0.8.4', 8 | packages=['carla', 'carla.driving_benchmark', 'carla.agent','carla.agent.modules', 9 | 'carla.driving_benchmark.experiment_suites', 'carla.planner', 'game'], 10 | license='MIT License', 11 | description='Python API for communicating with the CARLA server.', 12 | url='https://github.com/carla-simulator/carla', 13 | author='The CARLA team', 14 | author_email='carla.simulator@gmail.com', 15 | include_package_data=True 16 | ) 17 | --------------------------------------------------------------------------------