├── driving-benchmarks-yaw
├── agents
│ ├── __init__.py
│ ├── yaw
│ │ ├── __init__.py
│ │ └── modules
│ │ │ └── __init__.py
│ └── auxiliary
│ │ ├── __init__.py
│ │ └── modules
│ │ └── __init__.py
├── version084
│ ├── __init__.py
│ ├── carla
│ │ ├── __init__.py
│ │ ├── planner
│ │ │ ├── __init__.py
│ │ │ ├── Town01.png
│ │ │ ├── Town02.png
│ │ │ ├── Town02Big.png
│ │ │ ├── Town01Lanes.png
│ │ │ ├── Town01_lite.png
│ │ │ ├── Town02Lanes.png
│ │ │ ├── Town01Central.png
│ │ │ ├── Town02Central.png
│ │ │ ├── Town01_liteLanes.png
│ │ │ ├── Town01_liteCentral.png
│ │ │ ├── Town02.txt
│ │ │ ├── Town01.txt
│ │ │ ├── Town01_lite.txt
│ │ │ ├── bezier.py
│ │ │ ├── grid.py
│ │ │ └── graph.py
│ │ ├── util.py
│ │ ├── tcp.py
│ │ ├── settings.py
│ │ └── transform.py
│ ├── benchmark_tools
│ │ ├── experiment_suites
│ │ │ ├── __init__.py
│ │ │ ├── basic_experiment_suite.py
│ │ │ └── experiment_suite.py
│ │ ├── __init__.py
│ │ ├── agent.py
│ │ └── experiment.py
│ ├── driving_benchmarks
│ │ ├── carla100
│ │ │ ├── __init__.py
│ │ │ └── carla100.py
│ │ ├── corl2017
│ │ │ └── __init__.py
│ │ └── __init__.py
│ └── setup.py
├── .idea
│ ├── inspectionProfiles
│ │ └── profiles_settings.xml
│ ├── modules.xml
│ ├── misc.xml
│ ├── deployment.xml
│ ├── webServers.xml
│ └── driving-benchmarks-yaw.iml
├── mkdocs.yml
├── README.md
├── utils
│ ├── weight_average.py
│ ├── convlstmcell.py
│ ├── max_branch_ver0_1_adj.py
│ ├── PCF_max_branch_net_ver0_1_adj.py
│ ├── auxi_ver2_adj.py
│ ├── auxi_ver3_adj.py
│ └── max_branch_ver0_adj.py
├── Docs
│ ├── benchmark_start.md
│ ├── benchmark_structure.md
│ └── benchmark_metrics.md
├── .gitignore
├── benchmarks_084.py
├── run_auxi_ver3.py
├── run_yaw_ver0.py
├── run_yaw_ver1.py
├── run_yaw_ver2.py
├── run_auxi_ver0.py
└── run_auxi_ver2.py
├── carla_cil_pytorch_eval-pytorch_eval
├── .gitmodules
├── carla
│ ├── __init__.py
│ ├── planner
│ │ ├── __init__.py
│ │ ├── Town01.png
│ │ ├── Town02.png
│ │ ├── Town02Big.png
│ │ ├── Town01Lanes.png
│ │ ├── Town01_lite.png
│ │ ├── Town02Lanes.png
│ │ ├── Town01Central.png
│ │ ├── Town02Central.png
│ │ ├── Town01_liteLanes.png
│ │ ├── Town01_liteCentral.png
│ │ ├── Town02.txt
│ │ ├── Town01.txt
│ │ ├── Town01_lite.txt
│ │ ├── grid.py
│ │ ├── graph.py
│ │ └── city_track.py
│ ├── driving_benchmark
│ │ ├── __init__.py
│ │ ├── experiment_suites
│ │ │ ├── __init__.py
│ │ │ ├── basic_experiment_suite.py
│ │ │ └── experiment_suite.py
│ │ ├── experiment.py
│ │ └── results_printer.py
│ ├── agent
│ │ ├── __init__.py
│ │ ├── forward_agent.py
│ │ └── agent.py
│ ├── util.py
│ ├── tcp.py
│ ├── settings.py
│ └── transform.py
├── agents
│ ├── __init__.py
│ └── imitation
│ │ ├── __init__.py
│ │ ├── modules
│ │ ├── __init__.py
│ │ └── carla_net.py
│ │ ├── .gitignore
│ │ ├── model
│ │ └── .gitignore
│ │ ├── runs
│ │ └── .gitignore
│ │ └── imitation_learning_pytorch.py
├── benchmarks
│ └── __init__.py
├── .gitignore
├── script.sh
├── docker
│ ├── docker_build
│ │ └── Dockerfile
│ └── carla_test_compose
│ │ └── docker-compose.yml
├── .idea
│ ├── misc.xml
│ ├── modules.xml
│ ├── deployment.xml
│ ├── carla_cil_pytorch_eval-pytorch_eval.iml
│ └── webServers.xml
├── README.md
├── tmp_code_kimna.txt
├── LICENSE
├── run_CIL.py
└── run_CIL_add_db.py
├── carla.png
├── Kinematic-bicycle-model-of-the-vehicle.png
├── README.md
└── helper_auxi_v0.py
/driving-benchmarks-yaw/agents/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/.gitmodules:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/agents/yaw/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/agents/auxiliary/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/agents/yaw/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/agents/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/benchmarks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/agents/auxiliary/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/agents/imitation/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/agents/imitation/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/.gitignore:
--------------------------------------------------------------------------------
1 | *pycache*
2 | _benchmarks_results
3 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/agents/imitation/.gitignore:
--------------------------------------------------------------------------------
1 | model_*
2 | !model
3 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/benchmark_tools/experiment_suites/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/carla.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/__init__.py:
--------------------------------------------------------------------------------
1 | from .planner import Planner
2 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/agents/imitation/model/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | */
3 | !.gitignore
4 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/agents/imitation/runs/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | */
3 | !.gitignore
4 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/driving_benchmarks/carla100/__init__.py:
--------------------------------------------------------------------------------
1 | from .carla100 import CARLA100
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/driving_benchmarks/corl2017/__init__.py:
--------------------------------------------------------------------------------
1 | from .corl_2017 import CoRL2017
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/benchmark_tools/__init__.py:
--------------------------------------------------------------------------------
1 | from .benchmark_runner import run_driving_benchmark
2 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/driving_benchmark/__init__.py:
--------------------------------------------------------------------------------
1 | from .driving_benchmark import run_driving_benchmark
2 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/agent/__init__.py:
--------------------------------------------------------------------------------
1 | from .forward_agent import ForwardAgent
2 | from .agent import Agent
3 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/driving_benchmarks/__init__.py:
--------------------------------------------------------------------------------
1 | from .carla100 import CARLA100
2 | from .corl2017 import CoRL2017
--------------------------------------------------------------------------------
/Kinematic-bicycle-model-of-the-vehicle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/Kinematic-bicycle-model-of-the-vehicle.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/driving_benchmark/experiment_suites/__init__.py:
--------------------------------------------------------------------------------
1 | from .basic_experiment_suite import BasicExperimentSuite
2 | from .corl_2017 import CoRL2017
3 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town01.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town02.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town02Big.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town02Big.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02Big.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02Big.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town01Lanes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town01Lanes.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town01_lite.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town01_lite.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town02Lanes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town02Lanes.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01Lanes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01Lanes.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01_lite.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01_lite.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02Lanes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02Lanes.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town01Central.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town01Central.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town02Central.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town02Central.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01Central.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01Central.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02Central.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02Central.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town01_liteLanes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town01_liteLanes.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01_liteLanes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01_liteLanes.png
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town01_liteCentral.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/driving-benchmarks-yaw/version084/carla/planner/Town01_liteCentral.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01_liteCentral.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/POSTECH-IMLAB/PIMNET_Autonomous_Driving/HEAD/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01_liteCentral.png
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/script.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # weathers 1 clear 3 wet 6 hardrain 8 sunset
4 |
5 | python run_CIL.py \
6 | --log-name local_test \
7 | --weathers 6 \
8 | --model-path "model/policy.pth" \
9 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/docker/docker_build/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM pytorch/pytorch:0.4.1-cuda9-cudnn7-runtime
2 |
3 | RUN pip install tensorboardX
4 |
5 | RUN rm -rf /var/lib/apt/lists/*
6 | RUN rm -rf /tmp/*
7 |
8 | WORKDIR /home
9 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/agent/forward_agent.py:
--------------------------------------------------------------------------------
1 |
2 | from carla.agent.agent import Agent
3 | from carla.client import VehicleControl
4 |
5 |
6 | class ForwardAgent(Agent):
7 | """
8 | Simple derivation of Agent Class,
9 | A trivial agent agent that goes straight
10 | """
11 | def run_step(self, measurements, sensor_data, directions, target):
12 | control = VehicleControl()
13 | control.throttle = 0.9
14 |
15 | return control
16 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | # @todo Dependencies are missing.
4 |
5 | setup(
6 | name='carla_client',
7 | version='0.8.4',
8 | packages=['carla', 'carla.driving_benchmark', 'carla.agent','carla.agent.modules',
9 | 'carla.driving_benchmark.experiment_suites', 'carla.planner', 'game'],
10 | license='MIT License',
11 | description='Python API for communicating with the CARLA server.',
12 | url='https://github.com/carla-simulator/carla',
13 | author='The CARLA team',
14 | author_email='carla.simulator@gmail.com',
15 | include_package_data=True
16 | )
17 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: CARLA Benchmarks
2 | repo_url: https://github.com/carla-simulator/carla
3 | docs_dir: Docs
4 | theme: readthedocs
5 |
6 | pages:
7 | - Driving Benchmark:
8 | - 'Quick Start': 'benchmark_start.md'
9 | - 'General Structure': 'benchmark_structure.md'
10 | - 'Creating Your Benchmark': 'benchmark_creating.md'
11 | - 'Computed Performance Metrics': 'benchmark_metrics.md'
12 | - Appendix:
13 | - 'Driving Benchmark Sample Results Town01': 'benchmark_basic_results_town01.md'
14 | - 'Driving Benchmark Sample Results Town02': 'benchmark_basic_results_town02.md'
15 |
16 |
17 | markdown_extensions:
18 | - admonition
19 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/.idea/carla_cil_pytorch_eval-pytorch_eval.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/.idea/webServers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
14 |
15 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/.idea/webServers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town02.txt:
--------------------------------------------------------------------------------
1 | 5.4400,-107.48000,-0.22000000
2 | 0.000000,0.000000,0.000000
3 | 1.000000,1.000000,1.000000
4 | -16.43022,-16.43022,0.000
5 | 25, 25
6 | 0,10 0,24 14
7 | 0,24 0,10 14
8 | 24,24 6,24 18
9 | 6,24 24,24 18
10 | 24,0 24,10 10
11 | 24,10 24,0 10
12 | 0,0 24,0 24
13 | 24,0 0,0 24
14 | 0,10 0,0 10
15 | 0,0 0,10 10
16 | 24,10 24,16 6
17 | 24,16 24,10 6
18 | 0,10 6,10 6
19 | 6,10 0,10 6
20 | 6,24 0,24 6
21 | 0,24 6,24 6
22 | 6,10 17,10 11
23 | 17,10 6,10 11
24 | 6,24 6,16 8
25 | 6,16 6,24 8
26 | 24,16 24,24 8
27 | 24,24 24,16 8
28 | 6,16 6,10 6
29 | 6,10 6,16 6
30 | 24,16 17,16 7
31 | 17,16 24,16 7
32 | 17,16 6,16 11
33 | 6,16 17,16 11
34 | 17,10 24,10 7
35 | 24,10 17,10 7
36 | 17,16 17,10 6
37 | 17,10 17,16 6
38 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town02.txt:
--------------------------------------------------------------------------------
1 | 5.4400,-107.48000,-38.11000000
2 | 0.000000,0.000000,0.000000
3 | 1.000000,1.000000,1.000000
4 | -16.43022,-16.43022,0.000
5 | 25, 25
6 | 0,10 0,24 14
7 | 0,24 0,10 14
8 | 24,24 6,24 18
9 | 6,24 24,24 18
10 | 24,0 24,10 10
11 | 24,10 24,0 10
12 | 0,0 24,0 24
13 | 24,0 0,0 24
14 | 0,10 0,0 10
15 | 0,0 0,10 10
16 | 24,10 24,16 6
17 | 24,16 24,10 6
18 | 0,10 6,10 6
19 | 6,10 0,10 6
20 | 6,24 0,24 6
21 | 0,24 6,24 6
22 | 6,10 17,10 11
23 | 17,10 6,10 11
24 | 6,24 6,16 8
25 | 6,16 6,24 8
26 | 24,16 24,24 8
27 | 24,24 24,16 8
28 | 6,16 6,10 6
29 | 6,10 6,16 6
30 | 24,16 17,16 7
31 | 17,16 24,16 7
32 | 17,16 6,16 11
33 | 6,16 17,16 11
34 | 17,10 24,10 7
35 | 24,10 17,10 7
36 | 17,16 17,10 6
37 | 17,10 17,16 6
38 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/.idea/driving-benchmarks-yaw.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/README.md:
--------------------------------------------------------------------------------
1 | CARLA Driving Benchmarks Repository
2 | ===================================
3 |
4 |
5 | This repository was made in order to store different driving benchmarks
6 | that run on the [CARLA simulator](https://github.com/carla-simulator/carla)
7 |
8 | Right now we have available the following benchmarks:
9 |
10 | Version 0.8.4:
11 |
12 | * CoRL2017 - [Docs](Docs/benchmark_start.md/#corl-2017) / [Paper](http://proceedings.mlr.press/v78/dosovitskiy17a/dosovitskiy17a.pdf).
13 |
14 | * CARLA100 - [Docs](Docs/benchmark_start.md/#carla-100) /[Paper] (out soon)
15 |
16 |
17 |
18 |
19 | We are working on having a 0.9.X version of the CoRL2017 Benchmark.
20 | We happily accept new benchmarks as pull requests.
21 |
22 |
23 | License
24 | -------
25 |
26 | CARLA Benchmarks specific code is distributed under MIT License.
27 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/agent/agent.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
3 | # Barcelona (UAB).
4 | #
5 | # This work is licensed under the terms of the MIT license.
6 | # For a copy, see .
7 | # @author: german,felipecode
8 |
9 |
10 | from __future__ import print_function
11 | import abc
12 |
13 |
14 | class Agent(object):
15 | def __init__(self):
16 | self.__metaclass__ = abc.ABCMeta
17 |
18 | @abc.abstractmethod
19 | def run_step(self, measurements, sensor_data, directions, target):
20 | """
21 | Function to be redefined by an agent.
22 | :param The measurements like speed, the image data and a target
23 | :returns A carla Control object, with the steering/gas/brake for the agent
24 | """
25 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/docker/carla_test_compose/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.3'
2 | services:
3 | carla_env:
4 | image: carlasim/carla:0.8.2
5 | runtime: nvidia
6 | command: ./CarlaUE4.sh -carla-server -fps=30
7 | networks:
8 | - carla_net
9 | environment:
10 | - NVIDIA_VISIBLE_DEVICES=0
11 |
12 | policy_image:
13 | image: onlytailei/pytorch:0.4.1-cuda9-tensorboardx
14 | runtime: nvidia
15 | environment:
16 | - NVIDIA_VISIBLE_DEVICES=0
17 | volumes:
18 | - ${TAI_CARLA}:/home/carla/
19 | networks:
20 | - carla_net
21 | working_dir: /home/carla/PythonClient/imitation_learning/
22 | command:
23 | python run_CIL.py
24 | --host carla_env
25 | --log-name clear2wet
26 | --weathers 1
27 | --model-path "model/policy.pth"
28 |
29 | networks:
30 | carla_net:
31 | external: true
32 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/README.md:
--------------------------------------------------------------------------------
1 | carla_cil_pytorch_eval
2 | ===============
3 |
4 | A pytorch implementation to evaluate the conditional imitation learning policy in "End-to-end Driving via Conditional Imitation Learning" and "CARLA: An Open Urban Driving Simulator".
5 |
6 | Requirements
7 | -------
8 | pytorch > 0.4.0
9 | tensorboardX
10 |
11 |
12 | Running
13 | ------
14 | Start carla simulater and leave your trained policy weight in ***model/policy.pth***
15 | run:
16 | ```
17 | $ python run_CIL.py --log-name local_test --weathers 6 --model-path "model/policy.pth"
18 | ```
19 |
20 | Policy Training
21 | ------
22 | Please reference [carla_cil_pytorh](https://github.com/onlytailei/carla_cil_pytorch).
23 | For the benchmark results, please check our RA-L paper [VR-Goggles for Robots: Real-to-sim Domain Adaptation for Visual Control](https://ram-lab.com/file/tailei/vr_goggles/index.html).
24 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town01.txt:
--------------------------------------------------------------------------------
1 | 0.0,0.0,-38.11000000
2 | 0.000000,0.000000,0.0
3 | 1.000000,1.000000,1.000000
4 | -16.43022,-16.43022,0.000
5 | 49, 41
6 | 0,0 0,40 40
7 | 0,40 0,0 40
8 | 48,40 41,40 7
9 | 41,40 48,40 7
10 | 48,0 48,40 40
11 | 48,40 48,0 40
12 | 0,0 11,0 11
13 | 11,0 0,0 11
14 | 41,0 48,0 7
15 | 48,0 41,0 7
16 | 41,40 11,40 30
17 | 11,40 41,40 30
18 | 41,0 41,7 7
19 | 41,7 41,0 7
20 | 11,40 0,40 11
21 | 0,40 11,40 11
22 | 11,0 19,0 8
23 | 19,0 11,0 8
24 | 11,40 11,24 16
25 | 11,24 11,40 16
26 | 41,24 41,40 16
27 | 41,40 41,24 16
28 | 11,24 11,16 8
29 | 11,16 11,24 8
30 | 41,24 11,24 30
31 | 11,24 41,24 30
32 | 41,16 41,24 8
33 | 41,24 41,16 8
34 | 11,16 11,7 9
35 | 11,7 11,16 9
36 | 41,16 11,16 30
37 | 11,16 41,16 30
38 | 41,7 41,16 9
39 | 41,16 41,7 9
40 | 11,7 11,0 7
41 | 11,0 11,7 7
42 | 41,7 19,7 22
43 | 19,7 41,7 22
44 | 19,0 41,0 22
45 | 41,0 19,0 22
46 | 19,7 11,7 8
47 | 11,7 19,7 8
48 | 19,0 19,7 7
49 | 19,7 19,0 7
50 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01.txt:
--------------------------------------------------------------------------------
1 | 0.0,0.0,-0.3811000000
2 | 0.000000,0.000000,0.0
3 | 1.000000,1.000000,1.000000
4 | -16.43022,-16.43022,0.000
5 | 49, 41
6 | 0,0 0,40 40
7 | 0,40 0,0 40
8 | 48,40 41,40 7
9 | 41,40 48,40 7
10 | 48,0 48,40 40
11 | 48,40 48,0 40
12 | 0,0 11,0 11
13 | 11,0 0,0 11
14 | 41,0 48,0 7
15 | 48,0 41,0 7
16 | 41,40 11,40 30
17 | 11,40 41,40 30
18 | 41,0 41,7 7
19 | 41,7 41,0 7
20 | 11,40 0,40 11
21 | 0,40 11,40 11
22 | 11,0 19,0 8
23 | 19,0 11,0 8
24 | 11,40 11,24 16
25 | 11,24 11,40 16
26 | 41,24 41,40 16
27 | 41,40 41,24 16
28 | 11,24 11,16 8
29 | 11,16 11,24 8
30 | 41,24 11,24 30
31 | 11,24 41,24 30
32 | 41,16 41,24 8
33 | 41,24 41,16 8
34 | 11,16 11,7 9
35 | 11,7 11,16 9
36 | 41,16 11,16 30
37 | 11,16 41,16 30
38 | 41,7 41,16 9
39 | 41,16 41,7 9
40 | 11,7 11,0 7
41 | 11,0 11,7 7
42 | 41,7 19,7 22
43 | 19,7 41,7 22
44 | 19,0 41,0 22
45 | 41,0 19,0 22
46 | 19,7 11,7 8
47 | 11,7 19,7 8
48 | 19,0 19,7 7
49 | 19,7 19,0 7
50 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/Town01_lite.txt:
--------------------------------------------------------------------------------
1 | 0.0,0.0,-0.3811000000
2 | 0.000000,0.000000,0.0
3 | 1.000000,1.000000,1.000000
4 | -16.43022,-16.43022,0.000
5 | 49, 41
6 | 0,0 0,40 40
7 | 0,40 0,0 40
8 | 48,40 41,40 7
9 | 41,40 48,40 7
10 | 48,0 48,40 40
11 | 48,40 48,0 40
12 | 0,0 11,0 11
13 | 11,0 0,0 11
14 | 41,0 48,0 7
15 | 48,0 41,0 7
16 | 41,40 11,40 30
17 | 11,40 41,40 30
18 | 41,0 41,7 7
19 | 41,7 41,0 7
20 | 11,40 0,40 11
21 | 0,40 11,40 11
22 | 11,0 19,0 8
23 | 19,0 11,0 8
24 | 11,40 11,24 16
25 | 11,24 11,40 16
26 | 41,24 41,40 16
27 | 41,40 41,24 16
28 | 11,24 11,16 8
29 | 11,16 11,24 8
30 | 41,24 11,24 30
31 | 11,24 41,24 30
32 | 41,16 41,24 8
33 | 41,24 41,16 8
34 | 11,16 11,7 9
35 | 11,7 11,16 9
36 | 41,16 11,16 30
37 | 11,16 41,16 30
38 | 41,7 41,16 9
39 | 41,16 41,7 9
40 | 11,7 11,0 7
41 | 11,0 11,7 7
42 | 41,7 19,7 22
43 | 19,7 41,7 22
44 | 19,0 41,0 22
45 | 41,0 19,0 22
46 | 19,7 11,7 8
47 | 11,7 19,7 8
48 | 19,0 19,7 7
49 | 19,7 19,0 7
50 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/Town01_lite.txt:
--------------------------------------------------------------------------------
1 | 0.0,0.0,-0.3811000000
2 | 0.000000,0.000000,0.0
3 | 1.000000,1.000000,1.000000
4 | -16.43022,-16.43022,0.000
5 | 49, 41
6 | 0,0 0,40 40
7 | 0,40 0,0 40
8 | 48,40 41,40 7
9 | 41,40 48,40 7
10 | 48,0 48,40 40
11 | 48,40 48,0 40
12 | 0,0 11,0 11
13 | 11,0 0,0 11
14 | 41,0 48,0 7
15 | 48,0 41,0 7
16 | 41,40 11,40 30
17 | 11,40 41,40 30
18 | 41,0 41,7 7
19 | 41,7 41,0 7
20 | 11,40 0,40 11
21 | 0,40 11,40 11
22 | 11,0 19,0 8
23 | 19,0 11,0 8
24 | 11,40 11,24 16
25 | 11,24 11,40 16
26 | 41,24 41,40 16
27 | 41,40 41,24 16
28 | 11,24 11,16 8
29 | 11,16 11,24 8
30 | 41,24 11,24 30
31 | 11,24 41,24 30
32 | 41,16 41,24 8
33 | 41,24 41,16 8
34 | 11,16 11,7 9
35 | 11,7 11,16 9
36 | 41,16 11,16 30
37 | 11,16 41,16 30
38 | 41,7 41,16 9
39 | 41,16 41,7 9
40 | 11,7 11,0 7
41 | 11,0 11,7 7
42 | 41,7 19,7 22
43 | 19,7 41,7 22
44 | 19,0 41,0 22
45 | 41,0 19,0 22
46 | 19,7 11,7 8
47 | 11,7 19,7 8
48 | 19,0 19,7 7
49 | 19,7 19,0 7
50 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/tmp_code_kimna.txt:
--------------------------------------------------------------------------------
1 |
2 | ### (width, height, channel) 순으로 만들어라
3 | # tensor to image
4 | from PIL import Image
5 | # img_tmp = img[0,:,:,:].cpu().numpy() * 255
6 | # np.multiply(img, 255)
7 | img_tmp = Image.fromarray(img.astype('uint8'), 'RGB')
8 | img_tmp.save('img.png')
9 |
10 | img_tmp1 = np.array(h5_file['rgb'])[199]
11 | img_tmp1 = Image.fromarray(img_tmp1.astype('uint8'), 'RGB')
12 | img_tmp1.save('img1.png')
13 |
14 | ad_array_tmp = np.concatenate((img_tmp, img_tmp1), axis=2)
15 | ad_array_tmp_transform = self.transform(ad_array_tmp)
16 |
17 | ad_array_tmp2 = ad_array_tmp_transform.permute(1,2,0).cpu().numpy() * 255
18 | ad_array_tmp2_split = np.split(ad_array_tmp2, 2, axis=2)
19 |
20 | ### tensor는 기본적으로 Channel x Height x Width
21 | 이미지로 저장하기 위해 ==> Height x Width x Channel
22 | img_tmp1 = np.array(h5_file['rgb'])[199]
23 | img_tmp1 = img_tmp1.permute(1,2,0).cpu().numpy() * 255
24 | img_tmp1 = Image.fromarray(img_tmp1.astype('uint8'), 'RGB')
25 | img_tmp1.save('img1.png')
26 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/bezier.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.misc import comb
3 |
4 | def bernstein_poly(i, n, t):
5 | """
6 | The Bernstein polynomial of n, i as a function of t
7 | """
8 |
9 | return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
10 |
11 |
12 | def bezier_curve(points, nTimes=1000):
13 | """
14 | Given a set of control points, return the
15 | bezier curve defined by the control points.
16 |
17 | points should be a list of lists, or list of tuples
18 | such as [ [1,1],
19 | [2,3],
20 | [4,5], ..[Xn, Yn] ]
21 | nTimes is the number of time steps, defaults to 1000
22 |
23 | See http://processingjs.nihongoresources.com/bezierinfo/
24 | """
25 |
26 | nPoints = len(points)
27 | xPoints = np.array([p[0] for p in points])
28 | yPoints = np.array([p[1] for p in points])
29 |
30 | t = np.linspace(0.0, 1.0, nTimes)
31 |
32 | polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
33 |
34 | xvals = np.dot(xPoints, polynomial_array)
35 | yvals = np.dot(yPoints, polynomial_array)
36 |
37 | return xvals, yvals
38 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/benchmark_tools/agent.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
3 | # Barcelona (UAB).
4 | #
5 | # This work is licensed under the terms of the MIT license.
6 | # For a copy, see .
7 | # @author: german,felipecode
8 |
9 |
10 | from __future__ import print_function
11 | import abc
12 | from ..carla.client import VehicleControl
13 |
14 |
15 | class Agent(object):
16 | def __init__(self):
17 | self.__metaclass__ = abc.ABCMeta
18 |
19 | @abc.abstractmethod
20 | def run_step(self, measurements, sensor_data, directions, target):
21 | """
22 | Function to be redefined by an agent.
23 | :param The measurements like speed, the image data and a target
24 | :returns A carla Control object, with the steering/gas/brake for the agent
25 | """
26 |
27 | class ForwardAgent(Agent):
28 | """
29 | Simple derivation of Agent Class,
30 | A trivial agent agent that goes straight
31 | """
32 | def run_step(self, measurements, sensor_data, directions, target):
33 | control = VehicleControl()
34 | control.throttle = 0.9
35 |
36 | return control
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # carla_yaw
4 |
5 |
6 | End-to-end Autonomous Driving System using yaw information.
7 |
8 | We implement a vision-based autonomous driving mode using yaw and localization based End-to-end autonomous driving model with pytorch.
9 |
10 | Implemented Version
11 | - Python 3.6
12 | - Pytorch 1.3
13 | - Torchvision 0.2
14 | - CARLA 0.8.2
15 |
16 |
17 | # Kinematic Vehicle Equation
18 |
19 | We utilize the bicycle model form kinematic vehicle dynamics.
20 | The yaw angle is calculated by Kinematic Vehicle Equation.
21 |
22 |
23 |
24 | # How to Run
25 | - Train: python main_auxi_v0.py --lr=1e-4 --train-dir=your dir --eval-dir=your dir
26 | - Eval: python run_auxi_ver0.py --model-path=trained_model.pth
27 |
28 |
29 | # CARLA benchmark
30 | The CARLA simulator has a large variety of driving environments, such as traff ic lights and dynamic obstacles, including dynamic vehicles and pedestrians.
31 |
32 | You can download the CARLA simulator from here ([benchmark]).
33 |
34 |
35 |
36 | [benchmark]:
37 |
38 | licence : LGPL 2.1
39 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/driving_benchmark/experiment.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | from carla.settings import CarlaSettings
8 |
9 |
10 | class Experiment(object):
11 | """
12 | Experiment defines a certain task, under conditions
13 | A task is associated with a set of poses, containing start and end pose.
14 |
15 | Conditions are associated with a carla Settings and describe the following:
16 |
17 | Number Of Vehicles
18 | Number Of Pedestrians
19 | Weather
20 | Random Seed of the agents, describing their behaviour.
21 |
22 | """
23 |
24 | def __init__(self):
25 | self.Task = 0
26 | self.Conditions = CarlaSettings()
27 | self.Poses = [[]]
28 | self.Repetitions = 1
29 |
30 | def set(self, **kwargs):
31 | for key, value in kwargs.items():
32 | if not hasattr(self, key):
33 | raise ValueError('Experiment: no key named %r' % key)
34 | setattr(self, key, value)
35 |
36 | if self.Repetitions != 1:
37 | raise NotImplementedError()
38 |
39 | @property
40 | def task(self):
41 | return self.Task
42 |
43 | @property
44 | def conditions(self):
45 | return self.Conditions
46 |
47 | @property
48 | def poses(self):
49 | return self.Poses
50 |
51 | @property
52 | def repetitions(self):
53 | return self.Repetitions
54 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/benchmark_tools/experiment.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | from ..carla.settings import CarlaSettings
8 |
9 |
10 | class Experiment(object):
11 | """
12 | Experiment defines a certain task, under conditions
13 | A task is associated with a set of poses, containing start and end pose.
14 |
15 | Conditions are associated with a carla Settings and describe the following:
16 |
17 | Number Of Vehicles
18 | Number Of Pedestrians
19 | Weather
20 | Random Seed of the agents, describing their behaviour.
21 |
22 | """
23 |
24 | def __init__(self):
25 | self.Task = 0
26 | self.TaskName = ''
27 | self.Conditions = CarlaSettings()
28 | self.Poses = [[]]
29 | self.Repetitions = 1
30 |
31 | def set(self, **kwargs):
32 | for key, value in kwargs.items():
33 | if not hasattr(self, key):
34 | raise ValueError('Experiment: no key named %r' % key)
35 | setattr(self, key, value)
36 |
37 |
38 |
39 | @property
40 | def task(self):
41 | return self.Task
42 | @property
43 | def task_name(self):
44 | return self.TaskName
45 |
46 | @property
47 | def conditions(self):
48 | return self.Conditions
49 |
50 | @property
51 | def poses(self):
52 | return self.Poses
53 |
54 | @property
55 | def repetitions(self):
56 | return self.Repetitions
57 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/utils/weight_average.py:
--------------------------------------------------------------------------------
1 | '''
2 |
3 | Weight를 미리 계산해서 적어놓음
4 | 인덱스 1 ~ N 까지에 대해
5 | 밑이 0.5인 log를 이용해
6 | =LOG(G16/10, 0.5)
7 | exp를 계산하고
8 | =EXP(H28)
9 |
10 | softmax를 적용해서 합이 1인 weight 값으로 만듬
11 | '''
12 |
13 |
14 | class predefined_weights():
15 |
16 | def getWeight(self, queue_size):
17 | if queue_size == 1:
18 | return [1]
19 | elif queue_size == 2:
20 | return [0.731058579 * 2, 0.268941421 * 2]
21 | elif queue_size == 3:
22 | return [0.635794633 * 3, 0.233895774 * 3, 0.130309593 * 3]
23 | elif queue_size == 4:
24 | return [0.585421756 * 4, 0.215364628 * 4, 0.119985396 * 4, 0.079228219 * 4]
25 | elif queue_size == 5:
26 | return [0.553631842 * 5, 0.203669773 * 5, 0.113469879 * 5, 0.074925922 * 5, 0.054302585 * 5]
27 | elif queue_size == 6:
28 | return [0.531447503 * 6, 0.195508611 * 6, 0.108923077 * 6, 0.071923598 * 6, 0.05212665 * 6, 0.040070561 * 6]
29 | elif queue_size == 7:
30 | return [0.514928326 * 7, 0.189431545 * 7, 0.105537382 * 7, 0.069687971 * 7, 0.050506378 * 7, 0.038825033 * 7, 0.031083365 * 7]
31 | elif queue_size == 8:
32 | return [0.502057201 * 8, 0.184696522 * 8, 0.102899374 * 8, 0.067946053 * 8, 0.049243923 * 8, 0.037854564 * 8, 0.030306407 * 8, 0.024995956 * 8]
33 | elif queue_size == 9:
34 | return [0.491687612 * 9, 0.180881764 * 9, 0.10077407 * 9, 0.066542682 * 9, 0.048226829 * 9, 0.037072708 * 9, 0.029680452 * 9, 0.024479685 * 9, 0.020654198 * 9]
35 | elif queue_size == 10:
36 | return [0.483116285 * 10, 0.177728549 * 10, 0.099017329 * 10, 0.065382679 * 10, 0.047386117 * 10, 0.03642644 * 10, 0.029163049 * 10, 0.024052944 * 10, 0.020294144 * 10, 0.017432378 * 10]
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/util.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | import datetime
8 | import sys
9 |
10 | from contextlib import contextmanager
11 |
12 |
13 | @contextmanager
14 | def make_connection(client_type, *args, **kwargs):
15 | """Context manager to create and connect a networking client object."""
16 | client = None
17 | try:
18 | client = client_type(*args, **kwargs)
19 | client.connect()
20 | yield client
21 | finally:
22 | if client is not None:
23 | client.disconnect()
24 |
25 |
26 | class StopWatch(object):
27 | def __init__(self):
28 | self.start = datetime.datetime.now()
29 | self.end = None
30 |
31 | def restart(self):
32 | self.start = datetime.datetime.now()
33 | self.end = None
34 |
35 | def stop(self):
36 | self.end = datetime.datetime.now()
37 |
38 | def seconds(self):
39 | return (self.end - self.start).total_seconds()
40 |
41 | def milliseconds(self):
42 | return 1000.0 * self.seconds()
43 |
44 |
45 | def to_hex_str(header):
46 | return ':'.join('{:02x}'.format(ord(c)) for c in header)
47 |
48 |
49 | if sys.version_info >= (3, 3):
50 |
51 | import shutil
52 |
53 | def print_over_same_line(text):
54 | terminal_width = shutil.get_terminal_size((80, 20)).columns
55 | empty_space = max(0, terminal_width - len(text))
56 | sys.stdout.write('\r' + text + empty_space * ' ')
57 | sys.stdout.flush()
58 |
59 | else:
60 |
61 | # Workaround for older Python versions.
62 | def print_over_same_line(text):
63 | line_length = max(print_over_same_line.last_line_length, len(text))
64 | empty_space = max(0, line_length - len(text))
65 | sys.stdout.write('\r' + text + empty_space * ' ')
66 | sys.stdout.flush()
67 | print_over_same_line.last_line_length = line_length
68 | print_over_same_line.last_line_length = 0
69 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/util.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | import datetime
8 | import sys
9 |
10 | from contextlib import contextmanager
11 |
12 |
13 | @contextmanager
14 | def make_connection(client_type, *args, **kwargs):
15 | """Context manager to create and connect a networking client object."""
16 | client = None
17 | try:
18 | client = client_type(*args, **kwargs)
19 | client.connect()
20 | yield client
21 | finally:
22 | if client is not None:
23 | client.disconnect()
24 |
25 |
26 | class StopWatch(object):
27 | def __init__(self):
28 | self.start = datetime.datetime.now()
29 | self.end = None
30 |
31 | def restart(self):
32 | self.start = datetime.datetime.now()
33 | self.end = None
34 |
35 | def stop(self):
36 | self.end = datetime.datetime.now()
37 |
38 | def seconds(self):
39 | return (self.end - self.start).total_seconds()
40 |
41 | def milliseconds(self):
42 | return 1000.0 * self.seconds()
43 |
44 |
45 | def to_hex_str(header):
46 | return ':'.join('{:02x}'.format(ord(c)) for c in header)
47 |
48 |
49 | if sys.version_info >= (3, 3):
50 |
51 | import shutil
52 |
53 | def print_over_same_line(text):
54 | terminal_width = shutil.get_terminal_size((80, 20)).columns
55 | empty_space = max(0, terminal_width - len(text))
56 | sys.stdout.write('\r' + text + empty_space * ' ')
57 | sys.stdout.flush()
58 |
59 | else:
60 |
61 | # Workaround for older Python versions.
62 | def print_over_same_line(text):
63 | line_length = max(print_over_same_line.last_line_length, len(text))
64 | empty_space = max(0, line_length - len(text))
65 | sys.stdout.write('\r' + text + empty_space * ' ')
66 | sys.stdout.flush()
67 | print_over_same_line.last_line_length = line_length
68 | print_over_same_line.last_line_length = 0
69 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Lei Tai
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
23 | ---------------- LICENSE FOR carla imitation-learning ------------------------
24 |
25 | MIT License
26 |
27 | Copyright (c) 2018 CARLA
28 |
29 | Permission is hereby granted, free of charge, to any person obtaining a copy
30 | of this software and associated documentation files (the "Software"), to deal
31 | in the Software without restriction, including without limitation the rights
32 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
33 | copies of the Software, and to permit persons to whom the Software is
34 | furnished to do so, subject to the following conditions:
35 |
36 | The above copyright notice and this permission notice shall be included in all
37 | copies or substantial portions of the Software.
38 |
39 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
40 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
42 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
43 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
44 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
45 | SOFTWARE.
46 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/Docs/benchmark_start.md:
--------------------------------------------------------------------------------
1 | Driving Benchmarks
2 | ==================
3 |
4 | The *benchmark tools* module is made
5 | to evaluate a driving controller (agent) and obtain
6 | metrics about its performance.
7 |
8 | This module is mainly designed for:
9 |
10 | * Users that work developing autonomous driving agents and want
11 | to see how they perform in CARLA.
12 |
13 | On this section you will learn.
14 |
15 | * How to quickly get started and benchmark a trivial agent right away.
16 | * Learn about the general implementation [architecture of the driving
17 | benchmark module](benchmark_structure.md).
18 | * Learn [how to set up your agent and create your
19 | own set of experiments](benchmark_creating.md).
20 | * Learn about the [performance metrics used](benchmark_metrics.md).
21 |
22 |
23 |
24 |
25 | Getting Started
26 | ----------------
27 |
28 | As a way to familiarize yourself with the system we
29 | provide a trivial agent performing in an small
30 | set of experiments (Basic). To execute it, simply
31 | run:
32 |
33 |
34 | $ ./benchmarks_084.py
35 |
36 |
37 | Keep in mind that, to run the command above, you need a CARLA 0.8.4 simulator
38 | running at localhost and on port 2000.
39 |
40 | This benchmark example can be further configured.
41 | Run the help command to see options available.
42 |
43 | $ ./benchmarks_084.py --help
44 |
45 |
46 | One of the options available is to be able to continue
47 | from a previous benchmark execution. For example,
48 | to continue a experiment in a basic benchmark
49 | with a log name of "driving_benchmark_test", run:
50 |
51 | $ ./benchmarks_084.py --continue-experiment -n driving_benchmark_test
52 |
53 |
54 | !!! note
55 | if the log name already exists and you don't set it to continue, it
56 | will create another log under a different name.
57 |
58 |
59 | Benchmarks Availabe
60 | -------------------
61 |
62 | ### CoRL 2017
63 |
64 | As explained on the legacy CARLA paper:
65 | [CoRL
66 | 2017 paper](http://proceedings.mlr.press/v78/dosovitskiy17a/dosovitskiy17a.pdf).
67 | The CoRL 2017 experiment suite can be run in a trivial agent by
68 | running:
69 |
70 | $ ./benchmarks_084.py --corl-2017
71 |
72 | When running the driving benchmark for the basic configuration
73 | you should [expect these results](benchmark_creating/#expected-results)
74 |
75 |
76 | ### CARLA 100
77 |
78 |
79 | The CARLA100 experiment suite can be run in a trivial agent by
80 | running:
81 |
82 | $ ./benchmarks_084.py --carla100
83 |
84 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | .hypothesis/
50 | .pytest_cache/
51 |
52 | # Translations
53 | *.mo
54 | *.pot
55 |
56 | # Django stuff:
57 | *.log
58 | local_settings.py
59 | db.sqlite3
60 |
61 | # Flask stuff:
62 | instance/
63 | .webassets-cache
64 |
65 | # Scrapy stuff:
66 | .scrapy
67 |
68 | # Sphinx documentation
69 | docs/_build/
70 |
71 | # PyBuilder
72 | target/
73 |
74 | # Jupyter Notebook
75 | .ipynb_checkpoints
76 |
77 | # IPython
78 | profile_default/
79 | ipython_config.py
80 |
81 | # pyenv
82 | .python-version
83 |
84 | # celery beat schedule file
85 | celerybeat-schedule
86 |
87 | # SageMath parsed files
88 | *.sage.py
89 |
90 | # Environments
91 | .env
92 | .venv
93 | env/
94 | venv/
95 | ENV/
96 | env.bak/
97 | venv.bak/
98 |
99 | # Spyder project settings
100 | .spyderproject
101 | .spyproject
102 |
103 | # Rope project settings
104 | .ropeproject
105 |
106 | # mkdocs documentation
107 | /site
108 |
109 | # mypy
110 | .mypy_cache/
111 | .dmypy.json
112 | dmypy.json
113 |
114 | # Pyre type checker
115 | .pyre/
116 | # Compiled source #
117 | ###################
118 | *.com
119 | *.class
120 | *.dll
121 | *.exe
122 | *.o
123 | *.so
124 |
125 | # Packages #
126 | ############
127 | # it's better to unpack these files and commit the raw source
128 | # git has its own built in compression methods
129 | *.7z
130 | *.dmg
131 | *.gz
132 | *.iso
133 | *.jar
134 | *.rar
135 | *.tar
136 | *.zip
137 |
138 | # Logs and databases #
139 | ######################
140 | *.log
141 | *.sql
142 | *.sqlite
143 |
144 | # OS generated files #
145 | ######################
146 | .DS_Store
147 | .DS_Store?
148 | ._*
149 | .Spotlight-V100
150 | .Trashes
151 | ehthumbs.db
152 | Thumbs.db
153 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/Docs/benchmark_structure.md:
--------------------------------------------------------------------------------
1 |
2 | Benchmark Tools Structure
3 | -----------------------------------
4 |
5 | The figure below shows the general structure of the driving
6 | benchmark module.
7 |
8 |
9 |
10 | 
11 | >Figure: The general structure of the agent benchmark module.
12 |
13 |
14 | The *benchmark tools* is the module responsible for evaluating a certain
15 | *agent* in an *experiment suite*.
16 |
17 | The *experiment suite* is an abstract module.
18 | Thus, the user must define its own derivation
19 | of *experiment suite*. We already provide the CoRL2017 suite and a simple
20 | *experiment suite* for testing.
21 |
22 | The *experiment suite* is composed by set of *experiments*.
23 | Each *experiment* contains a *task* that consists of a set of navigation
24 | episodes, represented by a set of *poses*.
25 | These *poses* are tuples containing the start and end points of an
26 | episode.
27 |
28 | The *experiments* are also associated with a *condition*. A
29 | condition is represented by a [carla settings](carla_settings.md) object.
30 | The conditions specify simulation parameters such as: weather, sensor suite, number of
31 | vehicles and pedestrians, etc.
32 |
33 |
34 | The user also should derivate an *agent* class. The *agent* is the active
35 | part which will be evaluated on the driving benchmark.
36 |
37 | The driving benchmark also contains two auxiliary modules.
38 | The *recording module* is used to keep track of all measurements and
39 | can be used to pause and continue a driving benchmark.
40 | The [*metrics module*](benchmark_metrics.md) is used to compute the performance metrics
41 | by using the recorded measurements.
42 |
43 |
44 |
45 |
46 | Example: CORL 2017
47 | ----------------------
48 |
49 | We already provide the CoRL 2017 experiment suite used to benchmark the
50 | agents for the [CoRL 2017 paper](http://proceedings.mlr.press/v78/dosovitskiy17a/dosovitskiy17a.pdf).
51 |
52 | The CoRL 2017 experiment suite has the following composition:
53 |
54 | * A total of 24 experiments for each CARLA town containing:
55 | * A task for going straight.
56 | * A task for making a single turn.
57 | * A task for going to an arbitrary position.
58 | * A task for going to an arbitrary position with dynamic objects.
59 | * Each task is composed of 25 poses that are repeated in 6 different weathers (Clear Noon, Heavy Rain Noon, Clear Sunset, After Rain Noon, Cloudy After Rain and Soft Rain Sunset).
60 | * The entire experiment set has 600 episodes.
61 | * The CoRL 2017 can take up to 24 hours to execute for Town01 and up to 15
62 | hours for Town02 depending on the agent performance.
63 |
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/benchmark_tools/experiment_suites/basic_experiment_suite.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 |
8 | from __future__ import print_function
9 |
10 | from ..experiment import Experiment
11 | from ...carla.sensor import Camera
12 | from ...carla.settings import CarlaSettings
13 |
14 | from .experiment_suite import ExperimentSuite
15 |
16 |
17 | class BasicExperimentSuite(ExperimentSuite):
18 |
19 | @property
20 | def train_weathers(self):
21 | return [1]
22 |
23 | @property
24 | def test_weathers(self):
25 | return [1]
26 |
27 | def build_experiments(self):
28 | """
29 | Creates the whole set of experiment objects,
30 | The experiments created depends on the selected Town.
31 |
32 | """
33 |
34 | # We check the town, based on that we define the town related parameters
35 | # The size of the vector is related to the number of tasks, inside each
36 | # task there is also multiple poses ( start end, positions )
37 | if self._city_name == 'Town01':
38 | poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
39 | vehicles_tasks = [0, 0, 0, 20]
40 | pedestrians_tasks = [0, 0, 0, 50]
41 | else:
42 | poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
43 | vehicles_tasks = [0, 0, 0, 15]
44 | pedestrians_tasks = [0, 0, 0, 50]
45 |
46 | # We set the camera
47 | # This single RGB camera is used on every experiment
48 |
49 | camera = Camera('CameraRGB')
50 | camera.set(FOV=100)
51 | camera.set_image_size(800, 600)
52 | camera.set_position(2.0, 0.0, 1.4)
53 | camera.set_rotation(-15.0, 0, 0)
54 |
55 | # Based on the parameters, creates a vector with experiment objects.
56 | experiments_vector = []
57 | for weather in self.weathers:
58 |
59 | for iteration in range(len(poses_tasks)):
60 | poses = poses_tasks[iteration]
61 | vehicles = vehicles_tasks[iteration]
62 | pedestrians = pedestrians_tasks[iteration]
63 |
64 | conditions = CarlaSettings()
65 | conditions.set(
66 | SendNonPlayerAgentsInfo=True,
67 | NumberOfVehicles=vehicles,
68 | NumberOfPedestrians=pedestrians,
69 | WeatherId=weather
70 |
71 | )
72 | # Add all the cameras that were set for this experiments
73 | conditions.add_sensor(camera)
74 | experiment = Experiment()
75 | experiment.set(
76 | Conditions=conditions,
77 | Poses=poses,
78 | Task=iteration,
79 | Repetitions=2
80 | )
81 | experiments_vector.append(experiment)
82 |
83 | return experiments_vector
84 |
--------------------------------------------------------------------------------
/helper_auxi_v0.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import os
5 | import random
6 | import shutil
7 |
8 | import torch
9 |
10 | # original transformations
11 | # check: https://github.com/carla-simulator/imitation-learning/issues/1
12 |
13 | # from imgaug import augmenters as iaa
14 | # st = lambda aug: iaa.Sometimes(0.4, aug)
15 | # oc = lambda aug: iaa.Sometimes(0.3, aug)
16 | # rl = lambda aug: iaa.Sometimes(0.09, aug)
17 | # seq = iaa.SomeOf((4, None), [
18 | # # blur images with a sigma between 0 and 1.5
19 | # rl(iaa.GaussianBlur((0, 1.5))),
20 | # # add gaussian noise to images
21 | # rl(iaa.AdditiveGaussianNoise(
22 | # loc=0,
23 | # scale=(0.0, 0.05),
24 | # per_channel=0.5)),
25 | # # randomly remove up to X% of the pixels
26 | # oc(iaa.Dropout((0.0, 0.10), per_channel=0.5)),
27 | # # randomly remove up to X% of the pixels
28 | # oc(iaa.CoarseDropout(
29 | # (0.0, 0.10), size_percent=(0.08, 0.2), per_channel=0.5)),
30 | # # change brightness of images (by -X to Y of original value)
31 | # oc(iaa.Add((-40, 40), per_channel=0.5)),
32 | # # change brightness of images (X-Y% of original value)
33 | # st(iaa.Multiply((0.10, 2.5), per_channel=0.2)),
34 | # # improve or worsen the contrast
35 | # rl(iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)),
36 | # # rl(iaa.Grayscale((0.0, 1))), # put grayscale
37 | # ], random_order=True)
38 |
39 |
40 | class TransWrapper(object):
41 | def __init__(self, seq):
42 | self.seq = seq
43 |
44 | def __call__(self, img):
45 | return self.seq.augment_image(img)
46 |
47 |
48 | class RandomTransWrapper(object):
49 | def __init__(self, seq, p=0.5):
50 | self.seq = seq
51 | self.p = p
52 |
53 | def __call__(self, img):
54 | if self.p < random.random():
55 | return img
56 | return self.seq.augment_image(img)
57 |
58 |
59 | class RandomTransWrapper_seqImg(object):
60 | def __init__(self, seq, p=0.5, rand_prob=0.5):
61 | self.seq = seq
62 | self.p = p
63 | self.rand_prob = rand_prob
64 |
65 | def __call__(self, img):
66 | if self.p < self.rand_prob:
67 | return img
68 | return self.seq.augment_image(img)
69 |
70 |
71 | class AverageMeter(object):
72 | """Computes and stores the average and current value"""
73 |
74 | def __init__(self):
75 | self.reset()
76 |
77 | def reset(self):
78 | self.val = 0
79 | self.avg = 0
80 | self.sum = 0
81 | self.count = 0
82 |
83 | def update(self, val, n=1):
84 | self.val = val
85 | self.sum += val * n
86 | self.count += n
87 | self.avg = self.sum / self.count
88 |
89 |
90 | def save_checkpoint(state, id_, is_best, filename='checkpoint.pth'):
91 | torch.save(state, filename)
92 | if is_best:
93 | shutil.copyfile(
94 | filename,
95 | os.path.join("save_models", "{}_best.pth".format(id_))
96 | )
97 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/driving_benchmark/experiment_suites/basic_experiment_suite.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 |
8 | from __future__ import print_function
9 |
10 | from carla.driving_benchmark.experiment import Experiment
11 | from carla.sensor import Camera
12 | from carla.settings import CarlaSettings
13 |
14 | from .experiment_suite import ExperimentSuite
15 |
16 |
17 | class BasicExperimentSuite(ExperimentSuite):
18 |
19 | @property
20 | def train_weathers(self):
21 | return [1]
22 |
23 | @property
24 | def test_weathers(self):
25 | return [1]
26 |
27 | def build_experiments(self):
28 | """
29 | Creates the whole set of experiment objects,
30 | The experiments created depends on the selected Town.
31 |
32 | """
33 |
34 | # We check the town, based on that we define the town related parameters
35 | # The size of the vector is related to the number of tasks, inside each
36 | # task there is also multiple poses ( start end, positions )
37 | if self._city_name == 'Town01':
38 | poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
39 | vehicles_tasks = [0, 0, 0, 20]
40 | pedestrians_tasks = [0, 0, 0, 50]
41 | else:
42 | poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
43 | vehicles_tasks = [0, 0, 0, 15]
44 | pedestrians_tasks = [0, 0, 0, 50]
45 |
46 | # We set the camera
47 | # This single RGB camera is used on every experiment
48 |
49 | camera = Camera('CameraRGB')
50 | camera.set(FOV=100)
51 | camera.set_image_size(800, 600)
52 | camera.set_position(2.0, 0.0, 1.4)
53 | camera.set_rotation(-15.0, 0, 0)
54 |
55 | # Based on the parameters, creates a vector with experiment objects.
56 | experiments_vector = []
57 | for weather in self.weathers:
58 |
59 | for iteration in range(len(poses_tasks)):
60 | poses = poses_tasks[iteration]
61 | vehicles = vehicles_tasks[iteration]
62 | pedestrians = pedestrians_tasks[iteration]
63 |
64 | conditions = CarlaSettings()
65 | conditions.set(
66 | SendNonPlayerAgentsInfo=True,
67 | NumberOfVehicles=vehicles,
68 | NumberOfPedestrians=pedestrians,
69 | WeatherId=weather
70 |
71 | )
72 | # Add all the cameras that were set for this experiments
73 | conditions.add_sensor(camera)
74 | experiment = Experiment()
75 | experiment.set(
76 | Conditions=conditions,
77 | Poses=poses,
78 | Task=iteration,
79 | Repetitions=1
80 | )
81 | experiments_vector.append(experiment)
82 |
83 | return experiments_vector
84 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/driving_benchmark/experiment_suites/experiment_suite.py:
--------------------------------------------------------------------------------
1 | # To be redefined on subclasses on how to calculate timeout for an episode
2 | import abc
3 |
4 |
5 | class ExperimentSuite(object):
6 |
7 | def __init__(self, city_name):
8 |
9 | self._city_name = city_name
10 | self._experiments = self.build_experiments()
11 |
12 | def calculate_time_out(self, path_distance):
13 | """
14 | Function to return the timeout ,in milliseconds,
15 | that is calculated based on distance to goal.
16 | This is the same timeout as used on the CoRL paper.
17 | """
18 | return ((path_distance / 1000.0) / 10.0) * 3600.0 + 10.0
19 |
20 | def get_number_of_poses_task(self):
21 | """
22 | Get the number of poses a task have for this benchmark
23 | """
24 |
25 | # Warning: assumes that all tasks have the same size
26 |
27 | return len(self._experiments[0].poses)
28 |
29 | def get_experiments(self):
30 | """
31 | Getter for the experiment set.
32 | """
33 | return self._experiments
34 |
35 | @property
36 | def dynamic_tasks(self):
37 | """
38 | Returns the episodes that contain dynamic obstacles
39 | """
40 | dynamic_tasks = set()
41 | for exp in self._experiments:
42 | if exp.conditions.NumberOfVehicles > 0 or exp.conditions.NumberOfPedestrians > 0:
43 | dynamic_tasks.add(exp.task)
44 |
45 | return list(dynamic_tasks)
46 |
47 | @property
48 | def metrics_parameters(self):
49 | """
50 | Property to return the parameters for the metric module
51 | Could be redefined depending on the needs of the user.
52 | """
53 | return {
54 |
55 | 'intersection_offroad': {'frames_skip': 10,
56 | 'frames_recount': 20,
57 | 'threshold': 0.3
58 | },
59 | 'intersection_otherlane': {'frames_skip': 10,
60 | 'frames_recount': 20,
61 | 'threshold': 0.4
62 | },
63 | 'collision_other': {'frames_skip': 10,
64 | 'frames_recount': 20,
65 | 'threshold': 400
66 | },
67 | 'collision_vehicles': {'frames_skip': 10,
68 | 'frames_recount': 30,
69 | 'threshold': 400
70 | },
71 | 'collision_pedestrians': {'frames_skip': 5,
72 | 'frames_recount': 100,
73 | 'threshold': 300
74 | },
75 |
76 | }
77 |
78 | @property
79 | def weathers(self):
80 | weathers = set(self.train_weathers)
81 | weathers.update(self.test_weathers)
82 | return weathers
83 |
84 | @abc.abstractmethod
85 | def build_experiments(self):
86 | """
87 | Returns a set of experiments to be evaluated
88 | Must be redefined in an inherited class.
89 |
90 | """
91 |
92 | @abc.abstractproperty
93 | def train_weathers(self):
94 | """
95 | Return the weathers that are considered as training conditions
96 | """
97 |
98 | @abc.abstractproperty
99 | def test_weathers(self):
100 | """
101 | Return the weathers that are considered as testing conditions
102 | """
103 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/tcp.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | """Basic TCP client."""
8 |
9 | import logging
10 | import socket
11 | import struct
12 | import time
13 |
14 | class TCPConnectionError(Exception):
15 | pass
16 |
17 |
18 | class TCPClient(object):
19 | """
20 | Basic networking client for TCP connections. Errors occurred during
21 | networking operations are raised as TCPConnectionError.
22 |
23 | Received messages are expected to be prepended by a int32 defining the
24 | message size. Messages are sent following this convention.
25 | """
26 |
27 | def __init__(self, host, port, timeout):
28 | self._host = host
29 | self._port = port
30 | self._timeout = timeout
31 | self._socket = None
32 | self._logprefix = '(%s:%s) ' % (self._host, self._port)
33 |
34 | def connect(self, connection_attempts=10):
35 | """Try to establish a connection to the given host:port."""
36 | connection_attempts = max(1, connection_attempts)
37 | error = None
38 | for attempt in range(1, connection_attempts + 1):
39 | try:
40 | self._socket = socket.create_connection(address=(self._host, self._port), timeout=self._timeout)
41 | self._socket.settimeout(self._timeout)
42 | logging.debug('%sconnected', self._logprefix)
43 | return
44 | except socket.error as exception:
45 | error = exception
46 | logging.debug('%sconnection attempt %d: %s', self._logprefix, attempt, error)
47 | time.sleep(1)
48 | self._reraise_exception_as_tcp_error('failed to connect', error)
49 |
50 | def disconnect(self):
51 | """Disconnect any active connection."""
52 | if self._socket is not None:
53 | logging.debug('%sdisconnecting', self._logprefix)
54 | self._socket.close()
55 | self._socket = None
56 |
57 | def connected(self):
58 | """Return whether there is an active connection."""
59 | return self._socket is not None
60 |
61 | def write(self, message):
62 | """Send message to the server."""
63 | if self._socket is None:
64 | raise TCPConnectionError(self._logprefix + 'not connected')
65 | header = struct.pack(' 0:
86 | try:
87 | data = self._socket.recv(length)
88 | except socket.error as exception:
89 | self._reraise_exception_as_tcp_error('failed to read data', exception)
90 | if not data:
91 | raise TCPConnectionError(self._logprefix + 'connection closed')
92 | buf += data
93 | length -= len(data)
94 | return buf
95 |
96 | def _reraise_exception_as_tcp_error(self, message, exception):
97 | raise TCPConnectionError('%s%s: %s' % (self._logprefix, message, exception))
98 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/tcp.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | """Basic TCP client."""
8 |
9 | import logging
10 | import socket
11 | import struct
12 | import time
13 |
14 | class TCPConnectionError(Exception):
15 | pass
16 |
17 |
18 | class TCPClient(object):
19 | """
20 | Basic networking client for TCP connections. Errors occurred during
21 | networking operations are raised as TCPConnectionError.
22 |
23 | Received messages are expected to be prepended by a int32 defining the
24 | message size. Messages are sent following this convention.
25 | """
26 |
27 | def __init__(self, host, port, timeout):
28 | self._host = host
29 | self._port = port
30 | self._timeout = timeout
31 | self._socket = None
32 | self._logprefix = '(%s:%s) ' % (self._host, self._port)
33 |
34 | def connect(self, connection_attempts=10):
35 | """Try to establish a connection to the given host:port."""
36 | connection_attempts = max(1, connection_attempts)
37 | error = None
38 | for attempt in range(1, connection_attempts + 1):
39 | try:
40 | self._socket = socket.create_connection(address=(self._host, self._port), timeout=self._timeout)
41 | self._socket.settimeout(self._timeout)
42 | logging.debug('%sconnected', self._logprefix)
43 | return
44 | except socket.error as exception:
45 | error = exception
46 | logging.debug('%sconnection attempt %d: %s', self._logprefix, attempt, error)
47 | time.sleep(1)
48 | self._reraise_exception_as_tcp_error('failed to connect', error)
49 |
50 | def disconnect(self):
51 | """Disconnect any active connection."""
52 | if self._socket is not None:
53 | logging.debug('%sdisconnecting', self._logprefix)
54 | self._socket.close()
55 | self._socket = None
56 |
57 | def connected(self):
58 | """Return whether there is an active connection."""
59 | return self._socket is not None
60 |
61 | def write(self, message):
62 | """Send message to the server."""
63 | if self._socket is None:
64 | raise TCPConnectionError(self._logprefix + 'not connected')
65 | header = struct.pack(' 0:
86 | try:
87 | data = self._socket.recv(length)
88 | except socket.error as exception:
89 | self._reraise_exception_as_tcp_error('failed to read data', exception)
90 | if not data:
91 | raise TCPConnectionError(self._logprefix + 'connection closed')
92 | buf += data
93 | length -= len(data)
94 | return buf
95 |
96 | def _reraise_exception_as_tcp_error(self, message, exception):
97 | raise TCPConnectionError('%s%s: %s' % (self._logprefix, message, exception))
98 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/benchmarks_084.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
4 | # Barcelona (UAB).
5 | #
6 | # This work is licensed under the terms of the MIT license.
7 | # For a copy, see .
8 |
9 | import argparse
10 | import logging
11 |
12 | from version084.benchmark_tools import run_driving_benchmark
13 | from version084.driving_benchmarks import CoRL2017, CARLA100
14 | from version084.benchmark_tools.experiment_suites.basic_experiment_suite import BasicExperimentSuite
15 | from version084.benchmark_tools.agent import ForwardAgent
16 |
17 | if __name__ == '__main__':
18 |
19 | argparser = argparse.ArgumentParser(description=__doc__)
20 | argparser.add_argument(
21 | '-v', '--verbose',
22 | action='store_true',
23 | dest='verbose',
24 | help='print some extra status information')
25 | argparser.add_argument(
26 | '-db', '--debug',
27 | action='store_true',
28 | dest='debug',
29 | help='print debug information')
30 | argparser.add_argument(
31 | '--host',
32 | metavar='H',
33 | # default='localhost',
34 | default='141.223.12.42',
35 | help='IP of the host server (default: localhost)')
36 | argparser.add_argument(
37 | '-p', '--port',
38 | metavar='P',
39 | default=2000,
40 | type=int,
41 | help='TCP port to listen to (default: 2000)')
42 | argparser.add_argument(
43 | '-c', '--city-name',
44 | metavar='C',
45 | default='Town01',
46 | help='The town that is going to be used on benchmark'
47 | + '(needs to match active town in server, options: Town01 or Town02)')
48 | argparser.add_argument(
49 | '-n', '--log_name',
50 | metavar='T',
51 | default='test',
52 | help='The name of the log file to be created by the benchmark'
53 | )
54 | argparser.add_argument(
55 | '--corl-2017',
56 | action='store_true',
57 | help='If you want to benchmark the corl-2017 instead of the Basic one'
58 | )
59 | argparser.add_argument(
60 | '--carla100',
61 | action='store_true',
62 | help='If you want to use the carla100 benchmark instead of the Basic one'
63 | )
64 | argparser.add_argument(
65 | '--continue-experiment',
66 | action='store_true',
67 | help='If you want to continue the experiment with the same name'
68 | )
69 |
70 | args = argparser.parse_args()
71 | if args.debug:
72 | log_level = logging.DEBUG
73 | elif args.verbose:
74 | log_level = logging.INFO
75 | else:
76 | log_level = logging.WARNING
77 |
78 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
79 | logging.info('listening to server %s:%s', args.host, args.port)
80 |
81 | # We instantiate a forward agent, a simple policy that just set
82 | # acceleration as 0.9 and steering as zero
83 | agent = ForwardAgent()
84 |
85 | # We instantiate an experiment suite. Basically a set of experiments
86 | # that are going to be evaluated on this benchmark.
87 | if args.corl_2017:
88 | experiment_suite = CoRL2017(args.city_name)
89 | elif args.carla100:
90 | experiment_suite = CARLA100(args.city_name)
91 | else:
92 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
93 | ' experiment suites, you should run'
94 | ' python driving_benchmark_example.py --corl-2017')
95 | experiment_suite = BasicExperimentSuite(args.city_name)
96 |
97 | # Now actually run the driving_benchmark
98 | run_driving_benchmark(agent, experiment_suite, args.city_name,
99 | args.log_name, args.continue_experiment,
100 | args.host, args.port)
101 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/Docs/benchmark_metrics.md:
--------------------------------------------------------------------------------
1 |
2 | Driving Benchmark Performance Metrics
3 | ------------------------------
4 |
5 | This page explains the performance metrics module.
6 | This module is used to compute a summary of results based on the actions
7 | performed by the agent during the benchmark.
8 |
9 |
10 | ### Provided performance metrics
11 |
12 | The driving benchmark performance metrics module provides the following performance metrics:
13 |
14 | * **Percentage of Success**: The percentage of episodes (poses from tasks),
15 | that the agent successfully completed.
16 |
17 | * **Average Completion**: The average distance towards the goal that the
18 | agent was able to travel.
19 |
20 | * **Off Road Intersection**: The number of times the agent goes out of the road.
21 | The intersection is only counted if the area of the vehicle outside
22 | of the road is bigger than a *threshold*.
23 |
24 | * **Other Lane Intersection**: The number of times the agent goes to the other
25 | lane. The intersection is only counted if the area of the vehicle on the
26 | other lane is bigger than a *threshold*.
27 |
28 | * **Vehicle Collisions**: The number of collisions with vehicles that had
29 | an impact bigger than a *threshold*.
30 |
31 | * **Pedestrian Collisions**: The number of collisions with pedestrians
32 | that had an impact bigger than a *threshold*.
33 |
34 | * **General Collisions**: The number of collisions with all other
35 | objects with an impact bigger than a *threshold*.
36 |
37 |
38 | ### Executing and Setting Parameters
39 |
40 | The metrics are computed as the final step of the benchmark
41 | and stores a summary of the results a json file.
42 | Internally it is executed as follows:
43 |
44 | ```python
45 | metrics_object = Metrics(metrics_parameters)
46 | summary_dictionary = metrics_object.compute(path_to_execution_log)
47 | ```
48 |
49 | The Metric's compute function
50 | receives the full path to the execution log.
51 | The Metric class should be instanced with some parameters.
52 | The parameters are:
53 |
54 | * **Threshold**: The threshold used by the metrics.
55 | * **Frames Recount**: After making the infraction, set the number
56 | of frames that the agent needs to keep doing the infraction for
57 | it to be counted as another infraction.
58 | * **Frames Skip**: It is related to the number of frames that are
59 | skipped after a collision or a intersection starts.
60 |
61 | These parameters are defined as property of the *Experiment Suite*
62 | base class and can be redefined at your
63 | [custom *Experiment Suite*](benchmark_creating/#defining-the-experiment-suite).
64 |
65 | The default parameters are:
66 |
67 |
68 | @property
69 | def metrics_parameters(self):
70 | """
71 | Property to return the parameters for the metrics module
72 | Could be redefined depending on the needs of the user.
73 | """
74 | return {
75 |
76 | 'intersection_offroad': {'frames_skip': 10,
77 | 'frames_recount': 20,
78 | 'threshold': 0.3
79 | },
80 | 'intersection_otherlane': {'frames_skip': 10,
81 | 'frames_recount': 20,
82 | 'threshold': 0.4
83 | },
84 | 'collision_other': {'frames_skip': 10,
85 | 'frames_recount': 20,
86 | 'threshold': 400
87 | },
88 | 'collision_vehicles': {'frames_skip': 10,
89 | 'frames_recount': 30,
90 | 'threshold': 400
91 | },
92 | 'collision_pedestrians': {'frames_skip': 5,
93 | 'frames_recount': 100,
94 | 'threshold': 300
95 | },
96 |
97 | }
98 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/utils/convlstmcell.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import math
3 | import torch.nn as nn
4 | from torch.nn import Parameter
5 | from torch.nn import functional as F
6 | from torch.autograd import Variable
7 | from torch.nn.modules.utils import _pair
8 |
9 | # https://gist.github.com/Kaixhin/57901e91e5c5a8bac3eb0cbbdd3aba81
10 |
11 | class ConvLSTMCell(nn.Module):
12 | def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, dilation=1, groups=1, bias=True):
13 | super(ConvLSTMCell, self).__init__()
14 | if in_channels % groups != 0:
15 | raise ValueError('in_channels must be divisible by groups')
16 | if out_channels % groups != 0:
17 | raise ValueError('out_channels must be divisible by groups')
18 | kernel_size = _pair(kernel_size)
19 | stride = _pair(stride)
20 | padding = _pair(padding)
21 | dilation = _pair(dilation)
22 | self.in_channels = in_channels
23 | self.out_channels = out_channels
24 | self.kernel_size = kernel_size
25 | self.stride = stride
26 | self.padding = padding
27 | self.padding_h = tuple(
28 | k // 2 for k, s, p, d in zip(kernel_size, stride, padding, dilation))
29 | self.dilation = dilation
30 | self.groups = groups
31 | self.weight_ih = Parameter(torch.Tensor(
32 | 4 * out_channels, in_channels // groups, *kernel_size))
33 | self.weight_hh = Parameter(torch.Tensor(
34 | 4 * out_channels, out_channels // groups, *kernel_size))
35 | self.weight_ch = Parameter(torch.Tensor(
36 | 3 * out_channels, out_channels // groups, *kernel_size))
37 | if bias:
38 | self.bias_ih = Parameter(torch.Tensor(4 * out_channels))
39 | self.bias_hh = Parameter(torch.Tensor(4 * out_channels))
40 | self.bias_ch = Parameter(torch.Tensor(3 * out_channels))
41 | else:
42 | self.register_parameter('bias_ih', None)
43 | self.register_parameter('bias_hh', None)
44 | self.register_parameter('bias_ch', None)
45 | self.register_buffer('wc_blank', torch.zeros(1, 1, 1, 1))
46 | self.reset_parameters()
47 |
48 | def reset_parameters(self):
49 | n = 4 * self.in_channels
50 | for k in self.kernel_size:
51 | n *= k
52 | stdv = 1. / math.sqrt(n)
53 | self.weight_ih.data.uniform_(-stdv, stdv)
54 | self.weight_hh.data.uniform_(-stdv, stdv)
55 | self.weight_ch.data.uniform_(-stdv, stdv)
56 | if self.bias_ih is not None:
57 | self.bias_ih.data.uniform_(-stdv, stdv)
58 | self.bias_hh.data.uniform_(-stdv, stdv)
59 | self.bias_ch.data.uniform_(-stdv, stdv)
60 |
61 | def forward(self, input, hx):
62 | h_0, c_0 = hx
63 | wx = F.conv2d(input, self.weight_ih, self.bias_ih,
64 | self.stride, self.padding, self.dilation, self.groups)
65 |
66 | wh = F.conv2d(h_0, self.weight_hh, self.bias_hh, self.stride,
67 | self.padding_h, self.dilation, self.groups)
68 |
69 | # Cell uses a Hadamard product instead of a convolution?
70 | wc = F.conv2d(c_0, self.weight_ch, self.bias_ch, self.stride,
71 | self.padding_h, self.dilation, self.groups)
72 |
73 | wxhc = wx + wh + torch.cat((wc[:, :2 * self.out_channels], Variable(self.wc_blank).expand(
74 | wc.size(0), wc.size(1) // 3, wc.size(2), wc.size(3)), wc[:, 2 * self.out_channels:]), 1)
75 |
76 | # i = F.sigmoid(wxhc[:, :self.out_channels])
77 | # f = F.sigmoid(wxhc[:, self.out_channels:2 * self.out_channels])
78 | # g = F.tanh(wxhc[:, 2 * self.out_channels:3 * self.out_channels])
79 | # o = F.sigmoid(wxhc[:, 3 * self.out_channels:])
80 | i = torch.sigmoid(wxhc[:, :self.out_channels])
81 | f = torch.sigmoid(wxhc[:, self.out_channels:2 * self.out_channels])
82 | g = torch.tanh(wxhc[:, 2 * self.out_channels:3 * self.out_channels])
83 | o = torch.sigmoid(wxhc[:, 3 * self.out_channels:])
84 |
85 | c_1 = f * c_0 + i * g
86 | # h_1 = o * F.tanh(c_1)
87 | h_1 = o * torch.tanh(c_1)
88 | return h_1, (h_1, c_1)
89 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/benchmark_tools/experiment_suites/experiment_suite.py:
--------------------------------------------------------------------------------
1 | # To be redefined on subclasses on how to calculate timeout for an episode
2 | import abc
3 |
4 |
5 | class ExperimentSuite(object):
6 |
7 | def __init__(self, city_name):
8 |
9 | self._city_name = city_name
10 | self._experiments = self.build_experiments()
11 |
12 | def calculate_time_out(self, path_distance):
13 | """
14 | Function to return the timeout ,in milliseconds,
15 | that is calculated based on distance to goal.
16 | This is the same timeout as used on the CoRL paper.
17 | """
18 | return ((path_distance / 1000.0) / 10.0) * 3600.0 + 10.0
19 |
20 | def get_number_of_poses_task(self):
21 | """
22 | Get the number of poses a task have for this benchmark
23 | """
24 |
25 | # Warning: assumes that all tasks have the same size
26 |
27 | return len(self._experiments[0].poses)
28 |
29 | def get_number_of_reps_poses(self):
30 | """
31 | Get the number of poses a task have for this benchmark
32 | """
33 |
34 | # Warning: assumes that all poses have the same number of repetitions
35 |
36 | return self._experiments[0].repetitions
37 |
38 |
39 | def get_experiments(self):
40 | """
41 | Getter for the experiment set.
42 | """
43 | return self._experiments
44 |
45 | @property
46 | def dynamic_tasks(self):
47 | """
48 | Returns the episodes that contain dynamic obstacles
49 | """
50 | dynamic_tasks = set()
51 | for exp in self._experiments:
52 | if exp.conditions.NumberOfVehicles > 0 or exp.conditions.NumberOfPedestrians > 0:
53 | dynamic_tasks.add(exp.task)
54 |
55 | return list(dynamic_tasks)
56 |
57 | @property
58 | def metrics_parameters(self):
59 | """
60 | Property to return the parameters for the metric module
61 | Could be redefined depending on the needs of the user.
62 | """
63 | return {
64 |
65 | 'intersection_offroad': {'frames_skip': 10,
66 | 'frames_recount': 20,
67 | 'threshold': 0.3
68 | },
69 | 'intersection_otherlane': {'frames_skip': 10,
70 | 'frames_recount': 20,
71 | 'threshold': 0.4
72 | },
73 | 'collision_other': {'frames_skip': 10,
74 | 'frames_recount': 20,
75 | 'threshold': 400
76 | },
77 | 'collision_vehicles': {'frames_skip': 10,
78 | 'frames_recount': 30,
79 | 'threshold': 400
80 | },
81 | 'collision_pedestrians': {'frames_skip': 5,
82 | 'frames_recount': 100,
83 | 'threshold': 300
84 | },
85 |
86 | }
87 |
88 | @property
89 | def weathers(self):
90 | weathers = set(self.train_weathers)
91 | weathers.update(self.test_weathers)
92 | return weathers
93 |
94 | @property
95 | def collision_as_failure(self):
96 | return False
97 |
98 | @property
99 | def traffic_light_as_failure(self):
100 | return False
101 |
102 | @abc.abstractmethod
103 | def build_experiments(self):
104 | """
105 | Returns a set of experiments to be evaluated
106 | Must be redefined in an inherited class.
107 |
108 | """
109 |
110 | @abc.abstractproperty
111 | def train_weathers(self):
112 | """
113 | Return the weathers that are considered as training conditions
114 | """
115 |
116 | @abc.abstractproperty
117 | def test_weathers(self):
118 | """
119 | Return the weathers that are considered as testing conditions
120 | """
121 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/settings.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | """CARLA Settings"""
8 |
9 | import io
10 | import random
11 | import sys
12 |
13 |
14 | if sys.version_info >= (3, 0):
15 |
16 | from configparser import ConfigParser
17 |
18 | else:
19 |
20 | from ConfigParser import RawConfigParser as ConfigParser
21 |
22 |
23 | from . import sensor as carla_sensor
24 |
25 |
26 | MAX_NUMBER_OF_WEATHER_IDS = 14
27 |
28 |
29 | class CarlaSettings(object):
30 | """
31 | The CarlaSettings object controls the settings of an episode. The __str__
32 | method retrieves an str with a CarlaSettings.ini file contents.
33 | """
34 |
35 | def __init__(self, **kwargs):
36 | # [CARLA/Server]
37 | self.SynchronousMode = True
38 | self.SendNonPlayerAgentsInfo = False
39 | # [CARLA/QualitySettings]
40 | self.QualityLevel = 'Epic'
41 | # [CARLA/LevelSettings]
42 | self.PlayerVehicle = None
43 | self.NumberOfVehicles = 20
44 | self.NumberOfPedestrians = 30
45 | self.WeatherId = 1
46 | self.SeedVehicles = None
47 | self.SeedPedestrians = None
48 | self.set(**kwargs)
49 | self._sensors = []
50 |
51 | def set(self, **kwargs):
52 | for key, value in kwargs.items():
53 | if not hasattr(self, key):
54 | raise ValueError('CarlaSettings: no key named %r' % key)
55 | setattr(self, key, value)
56 |
57 | def randomize_seeds(self):
58 | """
59 | Randomize the seeds of the new episode's pseudo-random number
60 | generators.
61 | """
62 | self.SeedVehicles = random.getrandbits(16)
63 | self.SeedPedestrians = random.getrandbits(16)
64 |
65 | def randomize_weather(self):
66 | """Randomized the WeatherId."""
67 | self.WeatherId = random.randint(0, MAX_NUMBER_OF_WEATHER_IDS)
68 |
69 | def add_sensor(self, sensor):
70 | """Add a sensor to the player vehicle (see sensor.py)."""
71 | if not isinstance(sensor, carla_sensor.Sensor):
72 | raise ValueError('Sensor not supported')
73 | self._sensors.append(sensor)
74 |
75 | def __str__(self):
76 | """Converts this object to an INI formatted string."""
77 | ini = ConfigParser()
78 | ini.optionxform = str
79 | S_SERVER = 'CARLA/Server'
80 | S_QUALITY = 'CARLA/QualitySettings'
81 | S_LEVEL = 'CARLA/LevelSettings'
82 | S_SENSOR = 'CARLA/Sensor'
83 |
84 | def get_attribs(obj):
85 | return [a for a in dir(obj) if not a.startswith('_') and not callable(getattr(obj, a))]
86 |
87 | def add_section(section, obj, keys):
88 | for key in keys:
89 | if hasattr(obj, key) and getattr(obj, key) is not None:
90 | if not ini.has_section(section):
91 | ini.add_section(section)
92 | ini.set(section, key, str(getattr(obj, key)))
93 |
94 | add_section(S_SERVER, self, [
95 | 'SynchronousMode',
96 | 'SendNonPlayerAgentsInfo'])
97 | add_section(S_QUALITY, self, [
98 | 'QualityLevel'])
99 | add_section(S_LEVEL, self, [
100 | 'NumberOfVehicles',
101 | 'NumberOfPedestrians',
102 | 'WeatherId',
103 | 'SeedVehicles',
104 | 'SeedPedestrians'])
105 |
106 | ini.add_section(S_SENSOR)
107 | ini.set(S_SENSOR, 'Sensors', ','.join(s.SensorName for s in self._sensors))
108 |
109 | for sensor_def in self._sensors:
110 | section = S_SENSOR + '/' + sensor_def.SensorName
111 | add_section(section, sensor_def, get_attribs(sensor_def))
112 |
113 | if sys.version_info >= (3, 0):
114 | text = io.StringIO()
115 | else:
116 | text = io.BytesIO()
117 |
118 | ini.write(text)
119 | return text.getvalue().replace(' = ', '=')
120 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/agents/imitation/modules/carla_net.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | '''
4 | Author:Tai Lei
5 | Date:Thu Nov 22 12:09:33 2018
6 | Info:
7 | '''
8 |
9 | import torch
10 | import torch.nn as nn
11 | # from torch.nn import functional as F
12 |
13 |
14 | class CarlaNet(nn.Module):
15 | def __init__(self, iptImgCnt=1, dropout_vec=None):
16 | super(CarlaNet, self).__init__()
17 | self.conv_block = nn.Sequential(
18 | nn.Conv2d(3*iptImgCnt, 32, kernel_size=5, stride=2),
19 | nn.BatchNorm2d(32),
20 | # nn.Dropout(self.dropout_vec[0]),
21 | nn.ReLU(),
22 | nn.Conv2d(32, 32, kernel_size=3, stride=1),
23 | nn.BatchNorm2d(32),
24 | # nn.Dropout(self.dropout_vec[1]),
25 | nn.ReLU(),
26 | nn.Conv2d(32, 64, kernel_size=3, stride=2),
27 | nn.BatchNorm2d(64),
28 | # nn.Dropout(self.dropout_vec[2]),
29 | nn.ReLU(),
30 | nn.Conv2d(64, 64, kernel_size=3, stride=1),
31 | nn.BatchNorm2d(64),
32 | # nn.Dropout(self.dropout_vec[3]),
33 | nn.ReLU(),
34 | nn.Conv2d(64, 128, kernel_size=3, stride=2),
35 | nn.BatchNorm2d(128),
36 | # nn.Dropout(self.dropout_vec[4]),
37 | nn.ReLU(),
38 | nn.Conv2d(128, 128, kernel_size=3, stride=1),
39 | nn.BatchNorm2d(128),
40 | # nn.Dropout(self.dropout_vec[5]),
41 | nn.ReLU(),
42 | nn.Conv2d(128, 256, kernel_size=3, stride=1),
43 | nn.BatchNorm2d(256),
44 | # nn.Dropout(self.dropout_vec[6]),
45 | nn.ReLU(),
46 | nn.Conv2d(256, 256, kernel_size=3, stride=1),
47 | nn.BatchNorm2d(256),
48 | # nn.Dropout(self.dropout_vec[7]),
49 | nn.ReLU(),
50 | )
51 |
52 | self.img_fc = nn.Sequential(
53 | nn.Linear(8192, 512),
54 | nn.Dropout(0.3),
55 | nn.ReLU(),
56 | nn.Linear(512, 512),
57 | nn.Dropout(0.3),
58 | nn.ReLU(),
59 | )
60 |
61 | self.speed_fc = nn.Sequential(
62 | nn.Linear(1, 128),
63 | nn.Dropout(0.5),
64 | nn.ReLU(),
65 | nn.Linear(128, 128),
66 | nn.Dropout(0.5),
67 | nn.ReLU(),
68 | )
69 |
70 | self.emb_fc = nn.Sequential(
71 | nn.Linear(512+128, 512),
72 | nn.Dropout(0.5),
73 | nn.ReLU(),
74 | )
75 |
76 | self.branches = nn.ModuleList([
77 | nn.Sequential(
78 | nn.Linear(512, 256),
79 | nn.Dropout(0.5),
80 | nn.ReLU(),
81 | nn.Linear(256, 256),
82 | # nn.Dropout(self.dropout_vec[i*2+14]),
83 | nn.ReLU(),
84 | nn.Linear(256, 3),
85 | ) for i in range(4)
86 | ])
87 |
88 | self.speed_branch = nn.Sequential(
89 | nn.Linear(512, 256),
90 | nn.Dropout(0.5),
91 | nn.ReLU(),
92 | nn.Linear(256, 256),
93 | # nn.Dropout(self.dropout_vec[1]),
94 | nn.ReLU(),
95 | nn.Linear(256, 1),
96 | )
97 |
98 | for m in self.modules():
99 | if isinstance(m, nn.Conv2d):
100 | nn.init.kaiming_normal_(
101 | m.weight, mode='fan_out', nonlinearity='relu')
102 | elif isinstance(m, nn.BatchNorm2d):
103 | nn.init.constant_(m.weight, 1)
104 | nn.init.constant_(m.bias, 0)
105 |
106 | def forward(self, img, speed):
107 | img = self.conv_block(img)
108 | img = img.view(-1, 8192)
109 | img = self.img_fc(img)
110 |
111 | speed = self.speed_fc(speed)
112 | emb = torch.cat([img, speed], dim=1)
113 | emb = self.emb_fc(emb)
114 |
115 | output = torch.cat([out(emb) for out in self.branches],
116 | dim=1)
117 | pred_speed = self.speed_branch(img)
118 |
119 | return output, pred_speed
120 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/settings.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | """CARLA Settings"""
8 |
9 | import io
10 | import random
11 | import sys
12 |
13 |
14 | if sys.version_info >= (3, 0):
15 |
16 | from configparser import ConfigParser
17 |
18 | else:
19 |
20 | from ConfigParser import RawConfigParser as ConfigParser
21 |
22 |
23 | from . import sensor as carla_sensor
24 |
25 |
26 | MAX_NUMBER_OF_WEATHER_IDS = 14
27 |
28 |
29 | class CarlaSettings(object):
30 | """
31 | The CarlaSettings object controls the settings of an episode. The __str__
32 | method retrieves an str with a CarlaSettings.ini file contents.
33 | """
34 |
35 | def __init__(self, **kwargs):
36 | # [CARLA/Server]
37 | self.SynchronousMode = True
38 | self.SendNonPlayerAgentsInfo = False
39 | # [CARLA/QualitySettings]
40 | self.QualityLevel = 'Epic'
41 | # [CARLA/LevelSettings]
42 | self.PlayerVehicle = None
43 | self.NumberOfVehicles = 20
44 | self.NumberOfPedestrians = 30
45 | self.WeatherId = 1
46 | self.SeedVehicles = None
47 | self.SeedPedestrians = None
48 | self.DisableTwoWheeledVehicles = False
49 | self.set(**kwargs)
50 | self._sensors = []
51 |
52 | def set(self, **kwargs):
53 | for key, value in kwargs.items():
54 | if not hasattr(self, key):
55 | raise ValueError('CarlaSettings: no key named %r' % key)
56 | setattr(self, key, value)
57 |
58 | def randomize_seeds(self):
59 | """
60 | Randomize the seeds of the new episode's pseudo-random number
61 | generators.
62 | """
63 | self.SeedVehicles = random.getrandbits(16)
64 | self.SeedPedestrians = random.getrandbits(16)
65 |
66 | def randomize_weather(self):
67 | """Randomized the WeatherId."""
68 | self.WeatherId = random.randint(0, MAX_NUMBER_OF_WEATHER_IDS)
69 |
70 | def add_sensor(self, sensor):
71 | """Add a sensor to the player vehicle (see sensor.py)."""
72 | if not isinstance(sensor, carla_sensor.Sensor):
73 | raise ValueError('Sensor not supported')
74 | self._sensors.append(sensor)
75 |
76 | def __str__(self):
77 | """Converts this object to an INI formatted string."""
78 | ini = ConfigParser()
79 | ini.optionxform = str
80 | S_SERVER = 'CARLA/Server'
81 | S_QUALITY = 'CARLA/QualitySettings'
82 | S_LEVEL = 'CARLA/LevelSettings'
83 | S_SENSOR = 'CARLA/Sensor'
84 |
85 | def get_attribs(obj):
86 | return [a for a in dir(obj) if not a.startswith('_') and not callable(getattr(obj, a))]
87 |
88 | def add_section(section, obj, keys):
89 | for key in keys:
90 | if hasattr(obj, key) and getattr(obj, key) is not None:
91 | if not ini.has_section(section):
92 | ini.add_section(section)
93 | ini.set(section, key, str(getattr(obj, key)))
94 |
95 | add_section(S_SERVER, self, [
96 | 'SynchronousMode',
97 | 'SendNonPlayerAgentsInfo'])
98 | add_section(S_QUALITY, self, [
99 | 'QualityLevel'])
100 | add_section(S_LEVEL, self, [
101 | 'NumberOfVehicles',
102 | 'NumberOfPedestrians',
103 | 'WeatherId',
104 | 'SeedVehicles',
105 | 'SeedPedestrians',
106 | 'DisableTwoWheeledVehicles'])
107 |
108 | ini.add_section(S_SENSOR)
109 | ini.set(S_SENSOR, 'Sensors', ','.join(s.SensorName for s in self._sensors))
110 |
111 | for sensor_def in self._sensors:
112 | section = S_SENSOR + '/' + sensor_def.SensorName
113 | add_section(section, sensor_def, get_attribs(sensor_def))
114 |
115 | if sys.version_info >= (3, 0):
116 | text = io.StringIO()
117 | else:
118 | text = io.BytesIO()
119 |
120 | ini.write(text)
121 | return text.getvalue().replace(' = ', '=')
122 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/run_CIL.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import sys
4 | # import os
5 | # os.environ["CUDA_VISIBLE_DEVICES"] = "3"
6 |
7 | try:
8 | sys.path.append("../")
9 | # from carla import carla_server_pb2 as carla_protocol
10 | from carla.driving_benchmark import run_driving_benchmark
11 | # from carla.driving_benchmark.experiment_suites import CoRL2017
12 |
13 | from agents.imitation.imitation_learning_pytorch import ImitationLearning
14 | from benchmarks.vrg_transfer import VrgTransferSuite
15 |
16 |
17 | except ImportError:
18 | raise RuntimeError(
19 | 'cannot import "carla_server_pb2.py", run the protobuf compiler to generate this file')
20 |
21 | if (__name__ == '__main__'):
22 | argparser = argparse.ArgumentParser(description=__doc__)
23 | argparser.add_argument(
24 | '-v', '--verbose',
25 | action='store_true',
26 | dest='debug',
27 | help='print debug information')
28 | argparser.add_argument(
29 | '--host',
30 | metavar='H',
31 | # default='localhost',
32 | default='141.223.12.42',
33 | help='IP of the host server (default: localhost)')
34 | argparser.add_argument(
35 | '-p', '--port',
36 | metavar='P',
37 | default=2000,
38 | type=int,
39 | help='TCP port to listen to (default: 2000)')
40 | argparser.add_argument(
41 | '-c', '--city-name',
42 | metavar='C',
43 | default='Town01_lite',
44 | help='The town that is going to be used on benchmark'
45 | + '(needs to match active town in server, options: Town01 or Town02)')
46 | argparser.add_argument(
47 | '-n', '--log-name',
48 | metavar='T',
49 | default='test',
50 | help='The name of the log file to be created by the scripts'
51 | )
52 |
53 | argparser.add_argument(
54 | '--avoid-stopping',
55 | default=False,
56 | action='store_false',
57 | help=' Uses the speed prediction branch to avoid unwanted agent stops'
58 | )
59 | argparser.add_argument(
60 | '--continue-experiment',
61 | action='store_true',
62 | help='If you want to continue the experiment with the given log name'
63 | )
64 | argparser.add_argument(
65 | '--weathers',
66 | nargs='+',
67 | type=int,
68 | default=[1],
69 | help='weather list 1:clear 3:wet, 6:rain 8:sunset'
70 | )
71 | argparser.add_argument(
72 | '--model-path',
73 | metavar='P',
74 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models/training/198_training.pth',
75 | default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models_add_db/training/25_training.pth',
76 | # default='model/training_best_multi_img_ver2.pth',
77 | type=str,
78 | help='torch imitation learning model path (relative in model dir)'
79 | )
80 | argparser.add_argument(
81 | '--visualize',
82 | default=False,
83 | action='store_true',
84 | help='visualize the image and transfered image through tensorflow'
85 | )
86 | argparser.add_argument('--gpu', default=3, type=int,
87 | help='GPU id to use.')
88 |
89 | args = argparser.parse_args()
90 | log_level = logging.DEBUG if args.debug else logging.INFO
91 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
92 |
93 | logging.info('listening to server %s:%s', args.host, args.port)
94 |
95 | agent = ImitationLearning(args.city_name,
96 | args.avoid_stopping,
97 | args.model_path,
98 | args.visualize,
99 | args.log_name,
100 | # args.gpu
101 | )
102 |
103 | ############################################### by Kimna
104 | # args.continue_experiment = True
105 |
106 |
107 | # experiment_suites = CoRL2017(args.city_name)
108 | experiment_suites = VrgTransferSuite(args.city_name, args.weathers)
109 |
110 | # Now actually run the driving_benchmark
111 | run_driving_benchmark(agent, experiment_suites, args.city_name,
112 | args.log_name, args.continue_experiment,
113 | args.host, args.port)
114 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/agents/imitation/imitation_learning_pytorch.py:
--------------------------------------------------------------------------------
1 | import os
2 | import scipy
3 | import scipy.misc
4 |
5 | import torch
6 | import numpy as np
7 |
8 | from carla.agent import Agent
9 | from carla.carla_server_pb2 import Control
10 | from agents.imitation.modules.carla_net import CarlaNet
11 |
12 |
13 | class ImitationLearning(Agent):
14 |
15 | def __init__(self, city_name,
16 | avoid_stopping=True,
17 | model_path="model/policy.pth",
18 | visualize=False,
19 | log_name="test_log",
20 | image_cut=[115, 510]):
21 |
22 | super(ImitationLearning, self).__init__()
23 | # Agent.__init__(self)
24 |
25 | self._image_size = (88, 200, 3)
26 | self._avoid_stopping = avoid_stopping
27 |
28 | dir_path = os.path.dirname(__file__)
29 | self._models_path = os.path.join(dir_path, model_path)
30 | self.model = CarlaNet()
31 | if torch.cuda.is_available():
32 | self.model.cuda()
33 | self.load_model()
34 | self.model.eval()
35 |
36 | self._image_cut = image_cut
37 |
38 | def load_model(self):
39 | if not os.path.exists(self._models_path):
40 | raise RuntimeError('failed to find the models path: %s'
41 | % self._models_path)
42 | checkpoint = torch.load(self._models_path, map_location='cuda:0')
43 | self.model.load_state_dict(checkpoint['state_dict'])
44 |
45 | def run_step(self, measurements, sensor_data, directions, target):
46 |
47 | control = self._compute_action(
48 | sensor_data['CameraRGB'].data,
49 | measurements.player_measurements.forward_speed,
50 | directions)
51 |
52 | return control
53 |
54 | def _compute_action(self, rgb_image, speed, direction=None):
55 |
56 | rgb_image = rgb_image[self._image_cut[0]:self._image_cut[1], :]
57 |
58 | image_input = scipy.misc.imresize(rgb_image, [self._image_size[0],
59 | self._image_size[1]])
60 |
61 | image_input = image_input.astype(np.float32)
62 | image_input = np.expand_dims(
63 | np.transpose(image_input, (2, 0, 1)),
64 | axis=0)
65 |
66 | image_input = np.multiply(image_input, 1.0 / 255.0)
67 | speed = np.array([[speed]]).astype(np.float32) / 25.0
68 | direction = int(direction-2)
69 |
70 | steer, acc, brake = self._control_function(image_input,
71 | speed,
72 | direction)
73 |
74 | # This a bit biased, but is to avoid fake breaking
75 |
76 | if brake < 0.1:
77 | brake = 0.0
78 |
79 | if acc > brake:
80 | brake = 0.0
81 |
82 | # We limit speed to 35 km/h to avoid
83 | if speed > 10.0 and brake == 0.0:
84 | acc = 0.0
85 |
86 | if np.abs(steer) > 0.15:
87 | acc = acc * 0.4
88 |
89 | control = Control()
90 | control.steer = steer
91 | control.throttle = acc
92 | control.brake = brake
93 |
94 | control.hand_brake = 0
95 | control.reverse = 0
96 |
97 | return control
98 |
99 | def _control_function(self, image_input, speed, control_input):
100 |
101 | img_ts = torch.from_numpy(image_input).cuda()
102 | speed_ts = torch.from_numpy(speed).cuda()
103 |
104 | with torch.no_grad():
105 | branches, pred_speed = self.model(img_ts, speed_ts)
106 |
107 | pred_result = branches[0][
108 | 3*control_input:3*(control_input+1)].cpu().numpy()
109 |
110 | predicted_steers = (pred_result[0])
111 |
112 | predicted_acc = (pred_result[1])
113 |
114 | predicted_brake = (pred_result[2])
115 |
116 | if self._avoid_stopping:
117 | predicted_speed = pred_speed.squeeze().item()
118 | real_speed = speed * 25.0
119 |
120 | real_predicted = predicted_speed * 25.0
121 | if real_speed < 2.0 and real_predicted > 3.0:
122 |
123 | predicted_acc = 1 * (5.6 / 25.0 - speed) + predicted_acc
124 |
125 | predicted_brake = 0.0
126 |
127 | predicted_acc = predicted_acc
128 |
129 | return predicted_steers, predicted_acc, predicted_brake
130 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/run_CIL_add_db.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import sys
4 | # import os
5 | # os.environ["CUDA_VISIBLE_DEVICES"] = "3"
6 |
7 | try:
8 | sys.path.append("../")
9 | from carla import carla_server_pb2 as carla_protocol
10 | from carla.driving_benchmark import run_driving_benchmark
11 | # from .carla.driving_benchmark.experiment_suites import CoRL2017
12 |
13 | from agents.imitation.imitation_learning_pytorch_add_db import ImitationLearning
14 | from benchmarks.vrg_transfer import VrgTransferSuite
15 |
16 |
17 | except ImportError:
18 | raise RuntimeError(
19 | 'cannot import "carla_server_pb2.py", run the protobuf compiler to generate this file')
20 |
21 | if (__name__ == '__main__'):
22 | argparser = argparse.ArgumentParser(description=__doc__)
23 | argparser.add_argument(
24 | '-v', '--verbose',
25 | action='store_true',
26 | dest='debug',
27 | help='print debug information')
28 | argparser.add_argument(
29 | '--host',
30 | metavar='H',
31 | # default='localhost',
32 | default='141.223.12.42',
33 | help='IP of the host server (default: localhost)')
34 | argparser.add_argument(
35 | '-p', '--port',
36 | metavar='P',
37 | default=2000,
38 | type=int,
39 | help='TCP port to listen to (default: 2000)')
40 | argparser.add_argument(
41 | '-c', '--city-name',
42 | metavar='C',
43 | default='Town01_lite',
44 | # default='Town01',
45 | help='The town that is going to be used on benchmark'
46 | + '(needs to match active town in server, options: Town01 or Town02)')
47 | argparser.add_argument(
48 | '-n', '--log-name',
49 | metavar='T',
50 | default='test',
51 | help='The name of the log file to be created by the scripts'
52 | )
53 |
54 | argparser.add_argument(
55 | '--avoid-stopping',
56 | default=True,
57 | action='store_false',
58 | help=' Uses the speed prediction branch to avoid unwanted agent stops'
59 | )
60 | argparser.add_argument(
61 | '--continue-experiment',
62 | action='store_true',
63 | help='If you want to continue the experiment with the given log name'
64 | )
65 | argparser.add_argument(
66 | '--weathers',
67 | nargs='+',
68 | type=int,
69 | default=[1],
70 | help='weather list 1:clear 3:wet, 6:rain 8:sunset'
71 | )
72 | argparser.add_argument(
73 | '--model-path',
74 | metavar='P',
75 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models/training/198_training.pth',
76 | default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models_add_db/training/25_training.pth',
77 | # default='model/training_best_multi_img_ver2.pth',
78 | type=str,
79 | help='torch imitation learning model path (relative in model dir)'
80 | )
81 | argparser.add_argument(
82 | '--visualize',
83 | default=False,
84 | action='store_true',
85 | help='visualize the image and transfered image through tensorflow'
86 | )
87 | argparser.add_argument('--gpu', default=2, type=int,
88 | help='GPU id to use.')
89 |
90 | args = argparser.parse_args()
91 | log_level = logging.DEBUG if args.debug else logging.INFO
92 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
93 |
94 | logging.info('listening to server %s:%s', args.host, args.port)
95 |
96 | agent = ImitationLearning(args.city_name,
97 | args.avoid_stopping,
98 | args.model_path,
99 | args.visualize,
100 | args.log_name,
101 | # args.gpu
102 | )
103 |
104 | ############################################### by Kimna
105 | args.continue_experiment = True
106 |
107 |
108 | # experiment_suites = CoRL2017(args.city_name)
109 | experiment_suites = VrgTransferSuite(args.city_name, args.weathers)
110 |
111 | # Now actually run the driving_benchmark
112 | run_driving_benchmark(agent, experiment_suites, args.city_name,
113 | args.log_name, args.continue_experiment,
114 | args.host, args.port)
115 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/grid.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | import copy
8 | import numpy as np
9 |
10 |
11 | def angle_between(v1, v2):
12 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2))
13 |
14 |
15 | class Grid(object):
16 |
17 | def __init__(self, graph):
18 |
19 | self._graph = graph
20 | self._structure = self._make_structure()
21 | self._walls = self._make_walls()
22 |
23 | def search_on_grid(self, x, y):
24 | visit = [[0, 1], [0, -1], [1, 0], [1, 1],
25 | [1, -1], [-1, 0], [-1, 1], [-1, -1]]
26 | c_x, c_y = x, y
27 | scale = 1
28 | while self._structure[c_x, c_y] != 0:
29 | for offset in visit:
30 | c_x, c_y = x + offset[0] * scale, y + offset[1] * scale
31 |
32 | if c_x >= 0 and c_x < self._graph.get_resolution()[
33 | 0] and c_y >= 0 and c_y < self._graph.get_resolution()[1]:
34 | if self._structure[c_x, c_y] == 0:
35 | break
36 | else:
37 | c_x, c_y = x, y
38 | scale += 1
39 |
40 | return c_x, c_y
41 | def get_walls(self):
42 | return self._walls
43 |
44 | def get_wall_source(self, pos, pos_ori, target):
45 |
46 | free_nodes = self._get_adjacent_free_nodes(pos)
47 | # print self._walls
48 | final_walls = copy.copy(self._walls)
49 | # print final_walls
50 | heading_start = np.array([pos_ori[0], pos_ori[1]])
51 | for adj in free_nodes:
52 |
53 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]])
54 | angle = angle_between(heading_start, start_to_goal)
55 | if (angle > 1.6 and adj != target):
56 | final_walls.add((adj[0], adj[1]))
57 |
58 | return final_walls
59 |
60 | def get_wall_target(self, pos, pos_ori, source):
61 |
62 | free_nodes = self._get_adjacent_free_nodes(pos)
63 | final_walls = copy.copy(self._walls)
64 | heading_start = np.array([pos_ori[0], pos_ori[1]])
65 | for adj in free_nodes:
66 |
67 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]])
68 | angle = angle_between(heading_start, start_to_goal)
69 |
70 | if (angle < 1.0 and adj != source):
71 | final_walls.add((adj[0], adj[1]))
72 |
73 | return final_walls
74 |
75 | def _draw_line(self, grid, xi, yi, xf, yf):
76 |
77 | if xf < xi:
78 | aux = xi
79 | xi = xf
80 | xf = aux
81 |
82 | if yf < yi:
83 | aux = yi
84 | yi = yf
85 | yf = aux
86 |
87 | for i in range(xi, xf + 1):
88 |
89 | for j in range(yi, yf + 1):
90 | grid[i, j] = 0.0
91 |
92 | return grid
93 |
94 | def _make_structure(self):
95 | structure = np.ones(
96 | (self._graph.get_resolution()[0],
97 | self._graph.get_resolution()[1]))
98 |
99 | for key, connections in self._graph.get_edges().items():
100 |
101 | # draw a line
102 | for con in connections:
103 | # print key[0],key[1],con[0],con[1]
104 | structure = self._draw_line(
105 | structure, key[0], key[1], con[0], con[1])
106 | # print grid
107 | return structure
108 |
109 | def _make_walls(self):
110 | walls = set()
111 |
112 | for i in range(self._structure.shape[0]):
113 |
114 | for j in range(self._structure.shape[1]):
115 | if self._structure[i, j] == 1.0:
116 | walls.add((i, j))
117 |
118 | return walls
119 |
120 | def _get_adjacent_free_nodes(self, pos):
121 | """ Eight nodes in total """
122 | visit = [[0, 1], [0, -1], [1, 0], [1, 1],
123 | [1, -1], [-1, 0], [-1, 1], [-1, -1]]
124 |
125 | adjacent = set()
126 | for offset in visit:
127 | node = (pos[0] + offset[0], pos[1] + offset[1])
128 |
129 | if (node[0] >= 0 and node[0] < self._graph.get_resolution()[0]
130 | and node[1] >= 0 and node[1] < self._graph.get_resolution()[1]):
131 |
132 | if self._structure[node[0], node[1]] == 0.0:
133 | adjacent.add(node)
134 |
135 | return adjacent
136 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/utils/max_branch_ver0_1_adj.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def steering_multi_function(input_speed, weight_factor=1.7):
5 | return input_speed * -0.02 + weight_factor
6 |
7 | def saperate_environment(episode_name):
8 | return episode_name.split('_')
9 |
10 |
11 | def action_adjusting(direction, steer, acc, brake, speed, episode_name):
12 |
13 | weather, exp_id, se_point = saperate_environment(episode_name)
14 |
15 | if exp_id == "3":
16 | if brake < 0.1:
17 | brake = 0.0
18 | else:
19 | brake *= 5
20 |
21 | if acc > 0.8:
22 | brake = 0.0
23 |
24 | else:
25 | if brake < 0.2:
26 | brake = 0.0
27 |
28 | if acc > brake:
29 | brake = 0.0
30 |
31 | # We limit speed to 35 km/h to avoid
32 | # for Town 1
33 | # if speed > 35: # and brake == 0.0:
34 | # acc = 0.0
35 | # for Town 2
36 |
37 | # if self.before_steering != 0 and abs(self.before_steering - steer) > 0.1:
38 | # steer = (self.before_steering + steer) / 2
39 |
40 | if exp_id == "0":
41 | if speed > 37:
42 | acc = 0.0
43 | acc = acc * 3.7
44 | elif exp_id == "3":
45 | if speed > 35:
46 | acc = 0.0
47 | acc = acc * 3.2
48 | else:
49 | if speed > 35:
50 | acc = 0.0
51 | acc = acc * 3.5
52 |
53 | # steer이 특정 값 보다 크면 가속을 줄인다.
54 | if np.abs(steer) > 0.15:
55 | acc = acc * 0.5
56 |
57 | # 커브 기준 각도를 지정 하고: curve_limit
58 | # 허용 하는 속도를 지정: curve_limit_speed
59 | # 우선 static 하게 해보고, 필요하다면 steer 값과, curve_limit의 차이를 brake로 쓰거나.. 비율적으로 하자
60 | curve_limit = 0.05
61 | curve_limit_speed = 15
62 | curve_limit_1_2 = 0.01
63 | curve_limit_speed_1_2 = 17
64 |
65 | if direction == 0:
66 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
67 | acc = 0
68 | brake = 1
69 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
70 | if speed <= curve_limit_speed:
71 | steer = steering_multi_function(speed) * steer
72 | elif direction == 1:
73 | if speed > curve_limit_speed_1_2:
74 | acc = 0
75 | brake = 1
76 | steer = steering_multi_function(speed, 1.7) * steer
77 | if steer > 0:
78 | steer = 0
79 | elif direction == 2:
80 | if speed > curve_limit_speed_1_2:
81 | acc = 0
82 | brake = 1
83 | steer = steering_multi_function(speed) * steer
84 | if steer < 0:
85 | steer = 0
86 |
87 | return steer, acc, brake
88 |
89 |
90 | def action_adjusting_town02(direction, steer, acc, brake, speed, episode_name):
91 | weather, exp_id, se_point = saperate_environment(episode_name)
92 |
93 | if brake < 0.1:
94 | brake = 0.0
95 |
96 | if acc > brake:
97 | brake = 0.0
98 |
99 | # We limit speed to 35 km/h to avoid
100 | # for Town 1
101 | # if speed > 35: # and brake == 0.0:
102 | # acc = 0.0
103 | # for Town 2
104 |
105 | if exp_id == "0":
106 | if speed > 37:
107 | acc = 0.0
108 | acc = acc * 1.7
109 | else:
110 | if speed > 35:
111 | acc = 0.0
112 | acc = acc * 1.5
113 |
114 | # steer이 특정 값 보다 크면 가속을 줄인다.
115 | if np.abs(steer) > 0.15:
116 | acc = acc * 0.5
117 |
118 | curve_limit = 0.04
119 | curve_limit_speed = 15
120 | curve_limit_1_2 = 0.01
121 | curve_limit_speed_1_2 = 17
122 |
123 | if direction == 0:
124 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
125 | acc = 0
126 | brake = 1
127 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
128 | if speed <= curve_limit_speed:
129 | steer = steering_multi_function(speed) * steer
130 | elif direction == 1:
131 |
132 | if weather == "14":
133 | if speed > curve_limit_speed_1_2 - 1:
134 | acc = 0
135 | brake = 1
136 | steer = steering_multi_function(speed, 1.55) * steer
137 | else:
138 | if speed > curve_limit_speed_1_2:
139 | acc = 0
140 | brake = 1
141 | steer = steering_multi_function(speed, 1.5) * steer
142 | if steer > 0:
143 | steer = 0
144 | elif direction == 2:
145 | if speed > curve_limit_speed_1_2:
146 | acc = 0
147 | brake = 1
148 | steer = steering_multi_function(speed) * steer
149 | if steer < 0:
150 | steer = 0
151 |
152 | return steer, acc, brake
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/grid.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | import copy
8 | import numpy as np
9 |
10 |
11 | def angle_between(v1, v2):
12 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2))
13 |
14 |
15 | class Grid(object):
16 |
17 | def __init__(self, graph):
18 |
19 | self._graph = graph
20 | self._structure = self._make_structure()
21 | self._walls = self._make_walls()
22 |
23 | def search_on_grid(self, x, y):
24 | visit = [[0, 1], [0, -1], [1, 0], [-1, 0],
25 | [1, -1], [1, 1], [-1, 1], [-1, -1]]
26 | c_x, c_y = x, y
27 | scale = 1
28 |
29 |
30 | while self._structure[c_x, c_y] != 0:
31 | for offset in visit:
32 | c_x, c_y = x + offset[0] * scale, y + offset[1] * scale
33 |
34 | if c_x >= 0 and c_x < self._graph.get_resolution()[
35 | 0] and c_y >= 0 and c_y < self._graph.get_resolution()[1]:
36 | if self._structure[c_x, c_y] == 0:
37 | break
38 | else:
39 | c_x, c_y = x, y
40 | scale += 1
41 |
42 | return c_x, c_y
43 | def get_walls(self):
44 | return self._walls
45 |
46 | def get_wall_source(self, pos, pos_ori, target):
47 |
48 | free_nodes = self.get_adjacent_free_nodes(pos)
49 | # print self._walls
50 | final_walls = copy.copy(self._walls)
51 |
52 | heading_start = np.array([pos_ori[0], pos_ori[1]])
53 | for adj in free_nodes:
54 |
55 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]])
56 | angle = angle_between(heading_start, start_to_goal)
57 | if (angle > 1.6 and adj != target):
58 | final_walls.add((adj[0], adj[1]))
59 |
60 | return final_walls
61 |
62 | def get_wall_target(self, pos, pos_ori, source):
63 |
64 | free_nodes = self.get_adjacent_free_nodes(pos)
65 | final_walls = copy.copy(self._walls)
66 | heading_start = np.array([pos_ori[0], pos_ori[1]])
67 | for adj in free_nodes:
68 |
69 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]])
70 | angle = angle_between(heading_start, start_to_goal)
71 |
72 | if (angle < 1.0 and adj != source):
73 | #print("added target ", (adj[0], adj[1]))
74 | final_walls.add((adj[0], adj[1]))
75 |
76 | return final_walls
77 |
78 | def _draw_line(self, grid, xi, yi, xf, yf):
79 |
80 | if xf < xi:
81 | aux = xi
82 | xi = xf
83 | xf = aux
84 |
85 | if yf < yi:
86 | aux = yi
87 | yi = yf
88 | yf = aux
89 |
90 | for i in range(xi, xf + 1):
91 |
92 | for j in range(yi, yf + 1):
93 | grid[i, j] = 0.0
94 |
95 | return grid
96 |
97 | def _make_structure(self):
98 | structure = np.ones(
99 | (self._graph.get_resolution()[0],
100 | self._graph.get_resolution()[1]))
101 |
102 | for key, connections in self._graph.get_edges().items():
103 |
104 | # draw a line
105 | for con in connections:
106 | # print key[0],key[1],con[0],con[1]
107 | structure = self._draw_line(
108 | structure, key[0], key[1], con[0], con[1])
109 | # print grid
110 | return structure
111 |
112 | def _make_walls(self):
113 | walls = set()
114 |
115 | for i in range(self._structure.shape[0]):
116 |
117 | for j in range(self._structure.shape[1]):
118 | if self._structure[i, j] == 1.0:
119 | walls.add((i, j))
120 |
121 | return walls
122 |
123 | def get_adjacent_free_nodes(self, pos):
124 | """ Eight nodes in total """
125 | visit = [[0, 1], [0, -1], [1, 0], [1, 1],
126 | [1, -1], [-1, 0], [-1, 1], [-1, -1]]
127 |
128 | adjacent = set()
129 | for offset in visit:
130 | node = (pos[0] + offset[0], pos[1] + offset[1])
131 |
132 | if (node[0] >= 0 and node[0] < self._graph.get_resolution()[0]
133 | and node[1] >= 0 and node[1] < self._graph.get_resolution()[1]):
134 | if self._structure[node[0], node[1]] == 0.0:
135 | adjacent.add(node)
136 |
137 | return adjacent
138 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/graph.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | import math
8 | import numpy as np
9 |
10 |
11 | def string_to_node(string):
12 | vec = string.split(',')
13 | return (int(vec[0]), int(vec[1]))
14 |
15 |
16 | def string_to_floats(string):
17 | vec = string.split(',')
18 | return (float(vec[0]), float(vec[1]), float(vec[2]))
19 |
20 |
21 | def sldist(c1, c2):
22 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2)
23 |
24 |
25 | def sldist3(c1, c2):
26 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1])
27 | ** 2 + (c2[2] - c1[2]) ** 2)
28 |
29 |
30 | class Graph(object):
31 | """
32 | A simple directed, weighted graph
33 | """
34 |
35 | def __init__(self, graph_file=None, node_density=50):
36 |
37 | self._nodes = set()
38 | self._angles = {}
39 | self._edges = {}
40 | self._distances = {}
41 | self._node_density = node_density
42 |
43 | if graph_file is not None:
44 | with open(graph_file, 'r') as f:
45 | # Skipe the first four lines that
46 | lines_after_4 = f.readlines()[4:]
47 |
48 | # the graph resolution.
49 | linegraphres = lines_after_4[0]
50 | self._resolution = string_to_node(linegraphres)
51 | for line in lines_after_4[1:]:
52 |
53 | from_node, to_node, d = line.split()
54 | from_node = string_to_node(from_node)
55 | to_node = string_to_node(to_node)
56 |
57 | if from_node not in self._nodes:
58 | self.add_node(from_node)
59 | if to_node not in self._nodes:
60 | self.add_node(to_node)
61 |
62 | self._edges.setdefault(from_node, [])
63 | self._edges[from_node].append(to_node)
64 | self._distances[(from_node, to_node)] = float(d)
65 |
66 | def add_node(self, value):
67 | self._nodes.add(value)
68 |
69 | def make_orientations(self, node, heading):
70 |
71 | import collections
72 | distance_dic = {}
73 | for node_iter in self._nodes:
74 | if node_iter != node:
75 | distance_dic[sldist(node, node_iter)] = node_iter
76 |
77 | distance_dic = collections.OrderedDict(
78 | sorted(distance_dic.items()))
79 |
80 | self._angles[node] = heading
81 | for _, v in distance_dic.items():
82 | start_to_goal = np.array([node[0] - v[0], node[1] - v[1]])
83 |
84 | print(start_to_goal)
85 |
86 | self._angles[v] = start_to_goal / np.linalg.norm(start_to_goal)
87 |
88 | def add_edge(self, from_node, to_node, distance):
89 | self._add_edge(from_node, to_node, distance)
90 |
91 | def _add_edge(self, from_node, to_node, distance):
92 | self._edges.setdefault(from_node, [])
93 | self._edges[from_node].append(to_node)
94 | self._distances[(from_node, to_node)] = distance
95 |
96 | def get_resolution(self):
97 | return self._resolution
98 | def get_edges(self):
99 | return self._edges
100 |
101 | def intersection_nodes(self):
102 |
103 | intersect_nodes = []
104 | for node in self._nodes:
105 | if len(self._edges[node]) > 2:
106 | intersect_nodes.append(node)
107 |
108 | return intersect_nodes
109 |
110 | # This contains also the non-intersection turns...
111 |
112 | def turn_nodes(self):
113 |
114 | return self._nodes
115 |
116 | def plot_ori(self, c):
117 | from matplotlib import collections as mc
118 |
119 | import matplotlib.pyplot as plt
120 | line_len = 1
121 |
122 | lines = [[(p[0], p[1]), (p[0] + line_len * self._angles[p][0],
123 | p[1] + line_len * self._angles[p][1])] for p in self._nodes]
124 | lc = mc.LineCollection(lines, linewidth=2, color='green')
125 | _, ax = plt.subplots()
126 | ax.add_collection(lc)
127 |
128 | ax.autoscale()
129 | ax.margins(0.1)
130 |
131 | xs = [p[0] for p in self._nodes]
132 | ys = [p[1] for p in self._nodes]
133 |
134 | plt.scatter(xs, ys, color=c)
135 |
136 | def plot(self, c):
137 | import matplotlib.pyplot as plt
138 | xs = [p[0] for p in self._nodes]
139 | ys = [p[1] for p in self._nodes]
140 |
141 | plt.scatter(xs, ys, color=c)
142 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/planner/city_track.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | from carla.planner.graph import sldist
8 |
9 | from carla.planner.astar import AStar
10 | from carla.planner.map import CarlaMap
11 |
12 |
13 | class CityTrack(object):
14 |
15 | def __init__(self, city_name):
16 |
17 | # These values are fixed for every city.
18 | self._node_density = 50.0
19 | self._pixel_density = 0.1643
20 |
21 | self._map = CarlaMap(city_name, self._pixel_density, self._node_density)
22 |
23 | self._astar = AStar()
24 |
25 | # Refers to the start position of the previous route computation
26 | self._previous_node = []
27 |
28 | # The current computed route
29 | self._route = None
30 |
31 | def project_node(self, position):
32 | """
33 | Projecting the graph node into the city road
34 | """
35 |
36 | node = self._map.convert_to_node(position)
37 |
38 | # To change the orientation with respect to the map standards
39 |
40 | node = tuple([int(x) for x in node])
41 |
42 | # Set to zero if it is less than zero.
43 |
44 | node = (max(0, node[0]), max(0, node[1]))
45 | node = (min(self._map.get_graph_resolution()[0] - 1, node[0]),
46 | min(self._map.get_graph_resolution()[1] - 1, node[1]))
47 |
48 | node = self._map.search_on_grid(node)
49 |
50 | return node
51 |
52 | def get_intersection_nodes(self):
53 | return self._map.get_intersection_nodes()
54 |
55 | def get_pixel_density(self):
56 | return self._pixel_density
57 |
58 | def get_node_density(self):
59 | return self._node_density
60 |
61 | def is_at_goal(self, source, target):
62 | return source == target
63 |
64 | def is_at_new_node(self, current_node):
65 | return current_node != self._previous_node
66 |
67 | def is_away_from_intersection(self, current_node):
68 | return self._closest_intersection_position(current_node) > 1
69 |
70 | def is_far_away_from_route_intersection(self, current_node):
71 | # CHECK FOR THE EMPTY CASE
72 | if self._route is None:
73 | raise RuntimeError('Impossible to find route'
74 | + ' Current planner is limited'
75 | + ' Try to select start points away from intersections')
76 |
77 | return self._closest_intersection_route_position(current_node,
78 | self._route) > 4
79 |
80 | def compute_route(self, node_source, source_ori, node_target, target_ori):
81 |
82 | self._previous_node = node_source
83 |
84 | a_star = AStar()
85 | a_star.init_grid(self._map.get_graph_resolution()[0],
86 | self._map.get_graph_resolution()[1],
87 | self._map.get_walls_directed(node_source, source_ori,
88 | node_target, target_ori), node_source,
89 | node_target)
90 |
91 | route = a_star.solve()
92 |
93 | # JuSt a Corner Case
94 | # Clean this to avoid having to use this function
95 | if route is None:
96 | a_star = AStar()
97 | a_star.init_grid(self._map.get_graph_resolution()[0],
98 | self._map.get_graph_resolution()[1], self._map.get_walls(),
99 | node_source, node_target)
100 |
101 | route = a_star.solve()
102 |
103 | self._route = route
104 |
105 | return route
106 |
107 | def get_distance_closest_node_route(self, pos, route):
108 | distance = []
109 |
110 | for node_iter in route:
111 |
112 | if node_iter in self._map.get_intersection_nodes():
113 | distance.append(sldist(node_iter, pos))
114 |
115 | if not distance:
116 | return sldist(route[-1], pos)
117 | return sorted(distance)[0]
118 |
119 |
120 | def _closest_intersection_position(self, current_node):
121 |
122 | distance_vector = []
123 | for node_iterator in self._map.get_intersection_nodes():
124 | distance_vector.append(sldist(node_iterator, current_node))
125 |
126 | return sorted(distance_vector)[0]
127 |
128 |
129 | def _closest_intersection_route_position(self, current_node, route):
130 |
131 | distance_vector = []
132 | for _ in route:
133 | for node_iterator in self._map.get_intersection_nodes():
134 | distance_vector.append(sldist(node_iterator, current_node))
135 |
136 | return sorted(distance_vector)[0]
137 |
138 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/planner/graph.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | import math
8 | import numpy as np
9 |
10 |
11 | def string_to_node(string):
12 | vec = string.split(',')
13 | return (int(vec[0]), int(vec[1]))
14 |
15 |
16 | def string_to_floats(string):
17 | vec = string.split(',')
18 | return (float(vec[0]), float(vec[1]), float(vec[2]))
19 |
20 |
21 | def sldist(c1, c2):
22 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2)
23 |
24 |
25 | def sldist3(c1, c2):
26 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1])
27 | ** 2 + (c2[2] - c1[2]) ** 2)
28 |
29 |
30 | class Graph(object):
31 | """
32 | A simple directed, weighted graph
33 | """
34 |
35 | def __init__(self, graph_file=None, node_density=50):
36 |
37 | self._nodes = set()
38 | self._angles = {}
39 | self._edges = {}
40 | self._distances = {}
41 | self._node_density = node_density
42 |
43 | if graph_file is not None:
44 | with open(graph_file, 'r') as f:
45 | # Skipe the first four lines that
46 | lines_after_4 = f.readlines()[4:]
47 |
48 | # the graph resolution.
49 | linegraphres = lines_after_4[0]
50 | self._resolution = string_to_node(linegraphres)
51 | for line in lines_after_4[1:]:
52 |
53 | from_node, to_node, d = line.split()
54 | from_node = string_to_node(from_node)
55 | to_node = string_to_node(to_node)
56 |
57 | if from_node not in self._nodes:
58 | self.add_node(from_node)
59 | if to_node not in self._nodes:
60 | self.add_node(to_node)
61 |
62 | self._edges.setdefault(from_node, [])
63 | self._edges[from_node].append(to_node)
64 | self._distances[(from_node, to_node)] = float(d)
65 |
66 | def add_node(self, value):
67 | self._nodes.add(value)
68 |
69 | def make_orientations(self, node, heading):
70 |
71 | import collections
72 | distance_dic = {}
73 | for node_iter in self._nodes:
74 | if node_iter != node:
75 | distance_dic[sldist(node, node_iter)] = node_iter
76 |
77 | distance_dic = collections.OrderedDict(
78 | sorted(distance_dic.items()))
79 |
80 | self._angles[node] = heading
81 | for _, v in distance_dic.items():
82 | start_to_goal = np.array([node[0] - v[0], node[1] - v[1]])
83 |
84 |
85 | self._angles[v] = start_to_goal / np.linalg.norm(start_to_goal)
86 |
87 | def add_edge(self, from_node, to_node, distance):
88 | self._add_edge(from_node, to_node, distance)
89 |
90 | def _add_edge(self, from_node, to_node, distance):
91 | self._edges.setdefault(from_node, [])
92 | self._edges[from_node].append(to_node)
93 | self._distances[(from_node, to_node)] = distance
94 |
95 | def get_resolution(self):
96 | return self._resolution
97 | def get_edges(self):
98 | return self._edges
99 |
100 | def intersection_nodes(self):
101 |
102 | intersect_nodes = []
103 | for node in self._nodes:
104 | if len(self._edges[node]) > 2:
105 | intersect_nodes.append(node)
106 |
107 | return intersect_nodes
108 |
109 | def curve_nodes(self):
110 |
111 | intersect_nodes = []
112 | for node in self._nodes:
113 | if len(self._edges[node]) > 1:
114 | intersect_nodes.append(node)
115 |
116 | return intersect_nodes
117 |
118 | # This contains also the non-intersection turns...
119 |
120 | def turn_nodes(self):
121 |
122 | return self._nodes
123 |
124 | def plot_ori(self, c):
125 | from matplotlib import collections as mc
126 |
127 | import matplotlib.pyplot as plt
128 | line_len = 1
129 |
130 | lines = [[(p[0], p[1]), (p[0] + line_len * self._angles[p][0],
131 | p[1] + line_len * self._angles[p][1])] for p in self._nodes]
132 | lc = mc.LineCollection(lines, linewidth=2, color='green')
133 | _, ax = plt.subplots()
134 | ax.add_collection(lc)
135 |
136 | ax.autoscale()
137 | ax.margins(0.1)
138 |
139 | xs = [p[0] for p in self._nodes]
140 | ys = [p[1] for p in self._nodes]
141 |
142 | plt.scatter(xs, ys, color=c)
143 |
144 | def plot(self, c):
145 | import matplotlib.pyplot as plt
146 | xs = [p[0] for p in self._nodes]
147 | ys = [p[1] for p in self._nodes]
148 |
149 | plt.scatter(xs, ys, color=c)
150 |
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/driving_benchmark/results_printer.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import json
4 |
5 |
6 | def print_summary(metrics_summary, weathers, path):
7 | """
8 | We plot the summary of the testing for the set selected weathers.
9 |
10 | We take the raw data and print the way it was described on CORL 2017 paper
11 |
12 | """
13 |
14 | # Improve readability by adding a weather dictionary
15 | weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon',
16 | 6: 'Heavy Rain Noon', 8: 'Clear Sunset',
17 | 4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'}
18 |
19 | # First we write the entire dictionary on the benchmark folder.
20 | with open(os.path.join(path, 'metrics.json'), 'w') as fo:
21 | fo.write(json.dumps(metrics_summary))
22 |
23 | # Second we plot the metrics that are already ready by averaging
24 |
25 | metrics_to_average = [
26 | 'episodes_fully_completed',
27 | 'episodes_completion'
28 |
29 | ]
30 | # We compute the number of episodes based on size of average completion
31 | number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1])
32 |
33 | for metric in metrics_to_average:
34 |
35 | if metric == 'episodes_completion':
36 | print ("Average Percentage of Distance to Goal Travelled ")
37 | else:
38 | print ("Percentage of Successful Episodes")
39 |
40 | print ("")
41 | values = metrics_summary[metric]
42 |
43 | metric_sum_values = np.zeros(number_of_episodes)
44 | for weather, tasks in values.items():
45 | if weather in set(weathers):
46 | print(' Weather: ', weather_name_dict[weather])
47 | count = 0
48 | for t in tasks:
49 | # if isinstance(t, np.ndarray) or isinstance(t, list):
50 | if t == []:
51 | print(' Metric Not Computed')
52 | else:
53 | print(' Task:', count, ' -> ', float(sum(t)) / float(len(t)))
54 | metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float(
55 | len(weathers))
56 |
57 | count += 1
58 |
59 | print (' Average Between Weathers')
60 | for i in range(len(metric_sum_values)):
61 | print(' Task ', i, ' -> ', metric_sum_values[i])
62 | print ("")
63 |
64 | infraction_metrics = [
65 | 'collision_pedestrians',
66 | 'collision_vehicles',
67 | 'collision_other',
68 | 'intersection_offroad',
69 | 'intersection_otherlane'
70 |
71 | ]
72 |
73 | # We need to collect the total number of kilometers for each task
74 |
75 | for metric in infraction_metrics:
76 | values_driven = metrics_summary['driven_kilometers']
77 | values = metrics_summary[metric]
78 | metric_sum_values = np.zeros(number_of_episodes)
79 | summed_driven_kilometers = np.zeros(number_of_episodes)
80 |
81 | if metric == 'collision_pedestrians':
82 | print ('Avg. Kilometers driven before a collision to a PEDESTRIAN')
83 | elif metric == 'collision_vehicles':
84 | print('Avg. Kilometers driven before a collision to a VEHICLE')
85 | elif metric == 'collision_other':
86 | print('Avg. Kilometers driven before a collision to a STATIC OBSTACLE')
87 | elif metric == 'intersection_offroad':
88 | print('Avg. Kilometers driven before going OUTSIDE OF THE ROAD')
89 | else:
90 | print('Avg. Kilometers driven before invading the OPPOSITE LANE')
91 |
92 | # print (zip(values.items(), values_driven.items()))
93 | for items_metric, items_driven in zip(values.items(), values_driven.items()):
94 | weather = items_metric[0]
95 | tasks = items_metric[1]
96 | tasks_driven = items_driven[1]
97 |
98 | if weather in set(weathers):
99 | print(' Weather: ', weather_name_dict[weather])
100 | count = 0
101 | for t, t_driven in zip(tasks, tasks_driven):
102 | # if isinstance(t, np.ndarray) or isinstance(t, list):
103 | if t == []:
104 | print('Metric Not Computed')
105 | else:
106 | if sum(t) > 0:
107 | print(' Task ', count, ' -> ', t_driven / float(sum(t)))
108 | else:
109 | print(' Task ', count, ' -> more than', t_driven)
110 |
111 | metric_sum_values[count] += float(sum(t))
112 | summed_driven_kilometers[count] += t_driven
113 |
114 | count += 1
115 | print (' Average Between Weathers')
116 | for i in range(len(metric_sum_values)):
117 | if metric_sum_values[i] == 0:
118 | print(' Task ', i, ' -> more than ', summed_driven_kilometers[i])
119 | else:
120 | print(' Task ', i, ' -> ', summed_driven_kilometers[i] / metric_sum_values[i])
121 | print ("")
122 |
123 | print("")
124 | print("")
125 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/utils/PCF_max_branch_net_ver0_1_adj.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def steering_multi_function(input_speed, weight_factor=1.7):
5 | return input_speed * -0.02 + weight_factor
6 |
7 | def saperate_environment(episode_name):
8 | return episode_name.split('_')
9 |
10 |
11 | def action_adjusting(direction, steer, acc, brake, speed, episode_name, before_steering):
12 |
13 | weather, exp_id, se_point = saperate_environment(episode_name)
14 |
15 | if brake < 0.1:
16 | brake = 0.0
17 |
18 | # We limit speed to 35 km/h to avoid
19 | # for Town 1
20 | # if speed > 35: # and brake == 0.0:
21 | # acc = 0.0
22 | # for Town 2
23 |
24 | # if self.before_steering != 0 and abs(self.before_steering - steer) > 0.1:
25 | # steer = (self.before_steering + steer) / 2
26 |
27 | if exp_id == "0":
28 | if speed > 37:
29 | acc = 0.0
30 | acc = acc * 1.7
31 | elif exp_id == "3":
32 | if speed > 30:
33 | acc = acc * 1
34 | else:
35 | acc = acc * 1.5
36 | else:
37 | if speed > 35:
38 | acc = 0.0
39 | acc = acc * 1.5
40 |
41 | # steer이 특정 값 보다 크면 가속을 줄인다.
42 | if np.abs(steer) > 0.15:
43 | acc = acc * 0.5
44 |
45 | # 커브 기준 각도를 지정 하고: curve_limit
46 | # 허용 하는 속도를 지정: curve_limit_speed
47 | # 우선 static 하게 해보고, 필요하다면 steer 값과, curve_limit의 차이를 brake로 쓰거나.. 비율적으로 하자
48 | curve_limit = 0.05
49 | curve_limit_speed = 15
50 | curve_limit_1_2 = 0.01
51 | curve_limit_speed_1_2 = 17
52 |
53 | if direction == 0:
54 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
55 | acc = 0
56 | brake = 1
57 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
58 | if speed <= curve_limit_speed:
59 | steer = steering_multi_function(speed) * steer
60 | elif direction == 1:
61 | if speed > curve_limit_speed_1_2:
62 | acc = 0
63 | brake = 1
64 | steer = steering_multi_function(speed, 1.6) * steer
65 | if steer > 0:
66 | steer = 0
67 | elif direction == 2:
68 | if speed > curve_limit_speed_1_2:
69 | acc = 0
70 | brake = 1
71 | steer = steering_multi_function(speed) * steer
72 | if steer < 0:
73 | steer = 0
74 |
75 | return steer, acc, brake
76 |
77 |
78 | def action_adjusting_town02(direction, steer, acc, brake, speed, episode_name, before_steering):
79 | weather, exp_id, se_point = saperate_environment(episode_name)
80 |
81 | if brake < 0.1:
82 | brake = 0.0
83 |
84 | # We limit speed to 35 km/h to avoid
85 | # for Town 1
86 | # if speed > 35: # and brake == 0.0:
87 | # acc = 0.0
88 | # for Town 2
89 |
90 | if exp_id == "0":
91 | if speed > 37:
92 | acc = 0.0
93 | acc = acc * 1.7
94 | elif exp_id == "3":
95 | if speed > 30:
96 | acc = acc * 1
97 | else:
98 | acc = acc * 1.5
99 | else:
100 | if speed > 35:
101 | acc = 0.0
102 | acc = acc * 1.5
103 |
104 | # steer이 특정 값 보다 크면 가속을 줄인다.
105 | if np.abs(steer) > 0.15:
106 | acc = acc * 0.5
107 |
108 | curve_limit = 0.04
109 | curve_limit_speed = 15
110 | curve_limit_1_2 = 0.01
111 | curve_limit_speed_1_2 = 17
112 |
113 | if direction == 0:
114 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
115 | acc = 0
116 | brake = 1
117 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
118 | if speed <= curve_limit_speed:
119 | steer = steering_multi_function(speed) * steer
120 | elif direction == 1:
121 |
122 | if weather == "14":
123 | if speed > curve_limit_speed_1_2 - 1:
124 | acc = 0
125 | brake = 1
126 | steer = steering_multi_function(speed, 1.55) * steer
127 | elif weather == "1":
128 | if speed > curve_limit_speed_1_2:
129 | acc = 0
130 | brake = 1
131 | if exp_id == "3":
132 | steer = steering_multi_function(speed, 1.4) * steer
133 | else:
134 | steer = steering_multi_function(speed, 1.6) * steer
135 | elif weather == "4":
136 | if speed > curve_limit_speed_1_2:
137 | acc = 0
138 | brake = 1
139 | if exp_id == "3":
140 | steer = steering_multi_function(speed, 1.5) * steer
141 | else:
142 | steer = steering_multi_function(speed, 1.6) * steer
143 | else:
144 | if speed > curve_limit_speed_1_2:
145 | acc = 0
146 | brake = 1
147 | if exp_id == "3":
148 | steer = steering_multi_function(speed, 1.42) * steer
149 | else:
150 | steer = steering_multi_function(speed, 1.5) * steer
151 | if steer > 0:
152 | steer = 0
153 | elif direction == 2:
154 | if speed > curve_limit_speed_1_2:
155 | acc = 0
156 | brake = 0.7
157 | steer = steering_multi_function(speed) * steer
158 | if steer < 0:
159 | steer = 0
160 |
161 | return steer, acc, brake
--------------------------------------------------------------------------------
/carla_cil_pytorch_eval-pytorch_eval/carla/transform.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB), and the INTEL Visual Computing Lab.
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | import math
8 |
9 | from collections import namedtuple
10 |
11 | try:
12 | import numpy
13 | except ImportError:
14 | raise RuntimeError(
15 | 'cannot import numpy, make sure numpy package is installed.')
16 |
17 | try:
18 | from . import carla_server_pb2 as carla_protocol
19 | except ImportError:
20 | raise RuntimeError('cannot import "carla_server_pb2.py", run '
21 | 'the protobuf compiler to generate this file')
22 |
23 |
24 | Translation = namedtuple('Translation', 'x y z')
25 | Translation.__new__.__defaults__ = (0.0, 0.0, 0.0)
26 |
27 | Rotation = namedtuple('Rotation', 'pitch yaw roll')
28 | Rotation.__new__.__defaults__ = (0.0, 0.0, 0.0)
29 |
30 | Scale = namedtuple('Scale', 'x y z')
31 | Scale.__new__.__defaults__ = (1.0, 1.0, 1.0)
32 |
33 |
34 | class Transform(object):
35 | """A 3D transformation.
36 |
37 | The transformation is applied in the order: scale, rotation, translation.
38 | """
39 |
40 | def __init__(self, *args, **kwargs):
41 | if 'matrix' in kwargs:
42 | self.matrix = kwargs['matrix']
43 | return
44 | if isinstance(args[0], carla_protocol.Transform):
45 | args = [
46 | Translation(
47 | args[0].location.x,
48 | args[0].location.y,
49 | args[0].location.z),
50 | Rotation(
51 | args[0].rotation.pitch,
52 | args[0].rotation.yaw,
53 | args[0].rotation.roll)
54 | ]
55 | self.matrix = numpy.matrix(numpy.identity(4))
56 | self.set(*args, **kwargs)
57 |
58 | def set(self, *args):
59 | """Builds the transform matrix given a Translate, Rotation
60 | and Scale.
61 | """
62 | translation = Translation()
63 | rotation = Rotation()
64 | scale = Scale()
65 |
66 | if len(args) > 3:
67 | raise ValueError("'Transform' accepts 3 values as maximum.")
68 |
69 | def get_single_obj_type(obj_type):
70 | """Returns the unique object contained in the
71 | arguments lists that is instance of 'obj_type'.
72 | """
73 | obj = [x for x in args if isinstance(x, obj_type)]
74 | if len(obj) > 1:
75 | raise ValueError("Transform only accepts one instances of " +
76 | str(obj_type) + " as a parameter")
77 | elif not obj:
78 | # Create an instance of the type that is 'obj_type'
79 | return obj_type()
80 | return obj[0]
81 |
82 | translation = get_single_obj_type(Translation)
83 | rotation = get_single_obj_type(Rotation)
84 | scale = get_single_obj_type(Scale)
85 |
86 | for param in args:
87 | if not isinstance(param, Translation) and \
88 | not isinstance(param, Rotation) and \
89 | not isinstance(param, Scale):
90 | raise TypeError(
91 | "'" + str(type(param)) + "' type not match with \
92 | 'Translation', 'Rotation' or 'Scale'")
93 |
94 | # Transformation matrix
95 | cy = math.cos(numpy.radians(rotation.yaw))
96 | sy = math.sin(numpy.radians(rotation.yaw))
97 | cr = math.cos(numpy.radians(rotation.roll))
98 | sr = math.sin(numpy.radians(rotation.roll))
99 | cp = math.cos(numpy.radians(rotation.pitch))
100 | sp = math.sin(numpy.radians(rotation.pitch))
101 | self.matrix[0, 3] = translation.x
102 | self.matrix[1, 3] = translation.y
103 | self.matrix[2, 3] = translation.z
104 | self.matrix[0, 0] = scale.x * (cp * cy)
105 | self.matrix[0, 1] = scale.y * (cy * sp * sr - sy * cr)
106 | self.matrix[0, 2] = -scale.z * (cy * sp * cr + sy * sr)
107 | self.matrix[1, 0] = scale.x * (sy * cp)
108 | self.matrix[1, 1] = scale.y * (sy * sp * sr + cy * cr)
109 | self.matrix[1, 2] = scale.z * (cy * sr - sy * sp * cr)
110 | self.matrix[2, 0] = scale.x * (sp)
111 | self.matrix[2, 1] = -scale.y * (cp * sr)
112 | self.matrix[2, 2] = scale.z * (cp * cr)
113 |
114 | def inverse(self):
115 | """Return the inverse transform."""
116 | return Transform(matrix=numpy.linalg.inv(self.matrix))
117 |
118 | def transform_points(self, points):
119 | """
120 | Given a 4x4 transformation matrix, transform an array of 3D points.
121 | Expected point foramt: [[X0,Y0,Z0],..[Xn,Yn,Zn]]
122 | """
123 | # Needed foramt: [[X0,..Xn],[Z0,..Zn],[Z0,..Zn]]. So let's transpose
124 | # the point matrix.
125 | points = points.transpose()
126 | # Add 0s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[0,..0]]
127 | points = numpy.append(points, numpy.ones((1, points.shape[1])), axis=0)
128 | # Point transformation
129 | points = self.matrix * points
130 | # Return all but last row
131 | return points[0:3].transpose()
132 |
133 | def __mul__(self, other):
134 | return Transform(matrix=numpy.dot(self.matrix, other.matrix))
135 |
136 | def __str__(self):
137 | return str(self.matrix)
138 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/carla/transform.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB), and the INTEL Visual Computing Lab.
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | import math
8 |
9 | from collections import namedtuple
10 |
11 | try:
12 | import numpy
13 | except ImportError:
14 | raise RuntimeError(
15 | 'cannot import numpy, make sure numpy package is installed.')
16 |
17 | try:
18 | from . import carla_server_pb2 as carla_protocol
19 | except ImportError:
20 | raise RuntimeError('cannot import "carla_server_pb2.py", run '
21 | 'the protobuf compiler to generate this file')
22 |
23 |
24 | Translation = namedtuple('Translation', 'x y z')
25 | Translation.__new__.__defaults__ = (0.0, 0.0, 0.0)
26 |
27 | Rotation = namedtuple('Rotation', 'pitch yaw roll')
28 | Rotation.__new__.__defaults__ = (0.0, 0.0, 0.0)
29 |
30 | Scale = namedtuple('Scale', 'x y z')
31 | Scale.__new__.__defaults__ = (1.0, 1.0, 1.0)
32 |
33 |
34 | class Transform(object):
35 | """A 3D transformation.
36 |
37 | The transformation is applied in the order: scale, rotation, translation.
38 | """
39 |
40 | def __init__(self, *args, **kwargs):
41 | if 'matrix' in kwargs:
42 | self.matrix = kwargs['matrix']
43 | return
44 | if isinstance(args[0], carla_protocol.Transform):
45 | args = [
46 | Translation(
47 | args[0].location.x,
48 | args[0].location.y,
49 | args[0].location.z),
50 | Rotation(
51 | args[0].rotation.pitch,
52 | args[0].rotation.yaw,
53 | args[0].rotation.roll)
54 | ]
55 | self.matrix = numpy.matrix(numpy.identity(4))
56 | self.set(*args, **kwargs)
57 |
58 | def set(self, *args):
59 | """Builds the transform matrix given a Translate, Rotation
60 | and Scale.
61 | """
62 | translation = Translation()
63 | rotation = Rotation()
64 | scale = Scale()
65 |
66 | if len(args) > 3:
67 | raise ValueError("'Transform' accepts 3 values as maximum.")
68 |
69 | def get_single_obj_type(obj_type):
70 | """Returns the unique object contained in the
71 | arguments lists that is instance of 'obj_type'.
72 | """
73 | obj = [x for x in args if isinstance(x, obj_type)]
74 | if len(obj) > 1:
75 | raise ValueError("Transform only accepts one instances of " +
76 | str(obj_type) + " as a parameter")
77 | elif not obj:
78 | # Create an instance of the type that is 'obj_type'
79 | return obj_type()
80 | return obj[0]
81 |
82 | translation = get_single_obj_type(Translation)
83 | rotation = get_single_obj_type(Rotation)
84 | scale = get_single_obj_type(Scale)
85 |
86 | for param in args:
87 | if not isinstance(param, Translation) and \
88 | not isinstance(param, Rotation) and \
89 | not isinstance(param, Scale):
90 | raise TypeError(
91 | "'" + str(type(param)) + "' type not match with \
92 | 'Translation', 'Rotation' or 'Scale'")
93 |
94 | # Transformation matrix
95 | cy = math.cos(numpy.radians(rotation.yaw))
96 | sy = math.sin(numpy.radians(rotation.yaw))
97 | cr = math.cos(numpy.radians(rotation.roll))
98 | sr = math.sin(numpy.radians(rotation.roll))
99 | cp = math.cos(numpy.radians(rotation.pitch))
100 | sp = math.sin(numpy.radians(rotation.pitch))
101 | self.matrix[0, 3] = translation.x
102 | self.matrix[1, 3] = translation.y
103 | self.matrix[2, 3] = translation.z
104 | self.matrix[0, 0] = scale.x * (cp * cy)
105 | self.matrix[0, 1] = scale.y * (cy * sp * sr - sy * cr)
106 | self.matrix[0, 2] = -scale.z * (cy * sp * cr + sy * sr)
107 | self.matrix[1, 0] = scale.x * (sy * cp)
108 | self.matrix[1, 1] = scale.y * (sy * sp * sr + cy * cr)
109 | self.matrix[1, 2] = scale.z * (cy * sr - sy * sp * cr)
110 | self.matrix[2, 0] = scale.x * (sp)
111 | self.matrix[2, 1] = -scale.y * (cp * sr)
112 | self.matrix[2, 2] = scale.z * (cp * cr)
113 |
114 | def inverse(self):
115 | """Return the inverse transform."""
116 | return Transform(matrix=numpy.linalg.inv(self.matrix))
117 |
118 | def transform_points(self, points):
119 | """
120 | Given a 4x4 transformation matrix, transform an array of 3D points.
121 | Expected point foramt: [[X0,Y0,Z0],..[Xn,Yn,Zn]]
122 | """
123 | # Needed foramt: [[X0,..Xn],[Z0,..Zn],[Z0,..Zn]]. So let's transpose
124 | # the point matrix.
125 | points = points.transpose()
126 | # Add 0s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[0,..0]]
127 | points = numpy.append(points, numpy.ones((1, points.shape[1])), axis=0)
128 | # Point transformation
129 | points = self.matrix * points
130 | # Return all but last row
131 | return points[0:3].transpose()
132 |
133 | def __mul__(self, other):
134 | return Transform(matrix=numpy.dot(self.matrix, other.matrix))
135 |
136 | def __str__(self):
137 | return str(self.matrix)
138 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/utils/auxi_ver2_adj.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def steering_multi_function(input_speed, weight_factor=1.7):
4 | return input_speed * -0.02 + weight_factor
5 |
6 | def saperate_environment(episode_name):
7 | return episode_name.split('_')
8 |
9 | def action_adjusting(direction, steer, acc, brake, speed, pred_speed, episode_name, loc_x, loc_y, running_cnt):
10 |
11 | weather, exp_id, se_point = saperate_environment(episode_name)
12 |
13 | if exp_id == "3":
14 | if brake < 0.1:
15 | brake = 0.0
16 | else:
17 | brake *= 5
18 |
19 | if acc > 0.8:
20 | brake = 0.0
21 |
22 | else:
23 | if brake < 0.2:
24 | brake = 0.0
25 |
26 | if acc > brake:
27 | brake = 0.0
28 |
29 |
30 | if exp_id == "0":
31 | if speed > 37:
32 | acc = 0.0
33 | acc = acc * 3.7
34 | elif exp_id == "3":
35 | if speed > 35:
36 | acc = 0.0
37 | acc = acc * 3.2
38 | else:
39 | if speed > 35:
40 | acc = 0.0
41 | acc = acc * 3.5
42 |
43 | # steer이 특정 값 보다 크면 가속을 줄인다.
44 | if np.abs(steer) > 0.15:
45 | acc = acc * 0.5
46 |
47 | # 커브 기준 각도를 지정 하고: curve_limit
48 | # 허용 하는 속도를 지정: curve_limit_speed
49 | # 우선 static 하게 해보고, 필요하다면 steer 값과, curve_limit의 차이를 brake로 쓰거나.. 비율적으로 하자
50 | curve_limit = 0.05
51 | curve_limit_speed = 10
52 | curve_limit_1_2 = 0.01
53 | curve_limit_speed_1_2 = 17
54 |
55 | if direction == 0:
56 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
57 | acc = 0
58 | brake = 1
59 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
60 | # if speed <= curve_limit_speed and pred_speed <= curve_limit_speed:
61 | if speed <= curve_limit_speed:
62 | steer = steering_multi_function(speed) * steer
63 | # 시작하자마자 커브를 도는 케이스 (Town01 [102, 87]) 제거를 위해
64 | if int(loc_x) in (379, 380, 381, 382, 383, 384) and int(loc_y) == 330 and steer < 0:
65 | steer = 0
66 |
67 | # 직진 잘 하다가 옆의 가드레일에 박는 케이스 제거
68 | if int(loc_x) == -2 and int(loc_y) in (14, 15) and steer > 0:
69 | steer = 0
70 | if int(loc_x) == 391 and int(loc_y) in list(range(150, 219)) and steer > 0:
71 | steer = 0
72 | if int(loc_x) in(0, 1) and int(loc_y) in(23, 24):
73 | steer = 0
74 |
75 | elif direction == 1:
76 | if speed > curve_limit_speed_1_2:
77 | acc = 0
78 | brake = 1
79 | steer = steering_multi_function(speed, 1.4) * steer
80 | if steer > 0:
81 | steer = 0
82 | elif direction == 2:
83 | if speed > curve_limit_speed_1_2:
84 | acc = 0
85 | brake = 1
86 | steer = steering_multi_function(speed) * steer
87 | if steer < 0:
88 | steer = 0
89 | elif direction == 3:
90 | # steer = max(-0.002, min(0.002, steer))
91 | steer *= 0.3
92 | if int(loc_x) in list(range(330, 350)) and int(loc_y) in (325, 326):
93 | if steer > 0:
94 | steer = 0
95 | else:
96 | steer *= 4
97 |
98 | ''' steering adjusting using running cnt '''
99 | if running_cnt < 30:
100 | steer = 0
101 |
102 | acc = np.fmax(np.fmin(acc, 1.0), 0.0)
103 |
104 | return steer, acc, brake
105 |
106 |
107 | def action_adjusting_town02(direction, steer, acc, brake, speed, pred_speed, episode_name, loc_x, loc_y, running_cnt):
108 | weather, exp_id, se_point = saperate_environment(episode_name)
109 |
110 | if brake < 0.1:
111 | brake = 0.0
112 |
113 | if acc > brake:
114 | brake = 0.0
115 |
116 | # We limit speed to 35 km/h to avoid
117 | # for Town 1
118 | # if speed > 35: # and brake == 0.0:
119 | # acc = 0.0
120 | # for Town 2
121 |
122 | if exp_id == "0":
123 | if speed > 37:
124 | acc = 0.0
125 | acc = acc * 1.7
126 | else:
127 | if speed > 35:
128 | acc = 0.0
129 | acc = acc * 1.5
130 |
131 | # steer이 특정 값 보다 크면 가속을 줄인다.
132 | if np.abs(steer) > 0.15:
133 | acc = acc * 0.5
134 |
135 | curve_limit = 0.04
136 | curve_limit_speed = 15
137 | curve_limit_1_2 = 0.01
138 | curve_limit_speed_1_2 = 17
139 |
140 | if direction == 0:
141 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
142 | acc = 0
143 | brake = 1
144 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
145 | if speed <= curve_limit_speed:
146 | steer = steering_multi_function(speed) * steer
147 | elif direction == 1:
148 |
149 | if weather == "14":
150 | if speed > curve_limit_speed_1_2 - 1:
151 | acc = 0
152 | brake = 1
153 | steer = steering_multi_function(speed, 1.55) * steer
154 | else:
155 | if speed > curve_limit_speed_1_2:
156 | acc = 0
157 | brake = 1
158 | steer = steering_multi_function(speed, 1.5) * steer
159 | if steer > 0:
160 | steer = 0
161 | elif direction == 2:
162 | if speed > curve_limit_speed_1_2:
163 | acc = 0
164 | brake = 1
165 | steer = steering_multi_function(speed) * steer
166 | if steer < 0:
167 | steer = 0
168 | elif direction == 3:
169 | steer *= 0.3
170 |
171 | return steer, acc, brake
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/utils/auxi_ver3_adj.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def steering_multi_function(input_speed, weight_factor=1.7):
4 | return input_speed * -0.02 + weight_factor
5 |
6 | def saperate_environment(episode_name):
7 | return episode_name.split('_')
8 |
9 | def action_adjusting(direction, steer, acc, brake, speed, pred_speed, episode_name, loc_x, loc_y, running_cnt):
10 |
11 | weather, exp_id, se_point = saperate_environment(episode_name)
12 |
13 | if exp_id == "3":
14 | if brake < 0.1:
15 | brake = 0.0
16 | else:
17 | brake *= 5
18 |
19 | if acc > 0.8:
20 | brake = 0.0
21 |
22 | else:
23 | if brake < 0.2:
24 | brake = 0.0
25 |
26 | if acc > brake:
27 | brake = 0.0
28 |
29 |
30 | if exp_id == "0":
31 | if speed > 37:
32 | acc = 0.0
33 | acc = acc * 3.7
34 | elif exp_id == "3":
35 | if speed > 35:
36 | acc = 0.0
37 | acc = acc * 3.2
38 | else:
39 | if speed > 35:
40 | acc = 0.0
41 | acc = acc * 3.5
42 |
43 | # steer이 특정 값 보다 크면 가속을 줄인다.
44 | if np.abs(steer) > 0.15:
45 | acc = acc * 0.5
46 |
47 | # 커브 기준 각도를 지정 하고: curve_limit
48 | # 허용 하는 속도를 지정: curve_limit_speed
49 | # 우선 static 하게 해보고, 필요하다면 steer 값과, curve_limit의 차이를 brake로 쓰거나.. 비율적으로 하자
50 | curve_limit = 0.05
51 | curve_limit_speed = 10
52 | curve_limit_1_2 = 0.01
53 | curve_limit_speed_1_2 = 17
54 |
55 | if direction == 0:
56 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
57 | acc = 0
58 | brake = 1
59 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
60 | # if speed <= curve_limit_speed and pred_speed <= curve_limit_speed:
61 | if speed <= curve_limit_speed:
62 | steer = steering_multi_function(speed) * steer
63 | # 시작하자마자 커브를 도는 케이스 (Town01 [102, 87]) 제거를 위해
64 | if int(loc_x) in (379, 380, 381, 382, 383, 384) and int(loc_y) == 330 and steer < 0:
65 | steer = 0
66 |
67 | # 직진 잘 하다가 옆의 가드레일에 박는 케이스 제거
68 | if int(loc_x) == -2 and int(loc_y) in (14, 15) and steer > 0:
69 | steer = 0
70 | if int(loc_x) == 391 and int(loc_y) in list(range(150, 219)) and steer > 0:
71 | steer = 0
72 | if int(loc_x) in(0, 1) and int(loc_y) in(23, 24):
73 | steer = 0
74 |
75 | elif direction == 1:
76 | if speed > curve_limit_speed_1_2:
77 | acc = 0
78 | brake = 1
79 | steer = steering_multi_function(speed, 1.7) * steer
80 | if steer > 0:
81 | steer = 0
82 | elif direction == 2:
83 | if speed > curve_limit_speed_1_2:
84 | acc = 0
85 | brake = 1
86 | steer = steering_multi_function(speed) * steer
87 | if steer < 0:
88 | steer = 0
89 | elif direction == 3:
90 | # steer = max(-0.002, min(0.002, steer))
91 | steer *= 0.3
92 | if int(loc_x) in list(range(330, 350)) and int(loc_y) in (325, 326):
93 | if steer > 0:
94 | steer = 0
95 | else:
96 | steer *= 4
97 |
98 | ''' steering adjusting using running cnt '''
99 | if running_cnt < 30:
100 | steer = 0
101 |
102 | acc = np.fmax(np.fmin(acc, 1.0), 0.0)
103 |
104 | return steer, acc, brake
105 |
106 |
107 | def action_adjusting_town02(direction, steer, acc, brake, speed, pred_speed, episode_name, loc_x, loc_y, running_cnt):
108 | weather, exp_id, se_point = saperate_environment(episode_name)
109 |
110 | if brake < 0.1:
111 | brake = 0.0
112 |
113 | if acc > brake:
114 | brake = 0.0
115 |
116 | # We limit speed to 35 km/h to avoid
117 | # for Town 1
118 | # if speed > 35: # and brake == 0.0:
119 | # acc = 0.0
120 | # for Town 2
121 |
122 | if exp_id == "0":
123 | if speed > 37:
124 | acc = 0.0
125 | acc = acc * 1.7
126 | else:
127 | if speed > 35:
128 | acc = 0.0
129 | acc = acc * 1.5
130 |
131 | # steer이 특정 값 보다 크면 가속을 줄인다.
132 | if np.abs(steer) > 0.15:
133 | acc = acc * 0.5
134 |
135 | curve_limit = 0.04
136 | curve_limit_speed = 15
137 | curve_limit_1_2 = 0.01
138 | curve_limit_speed_1_2 = 17
139 |
140 | if direction == 0:
141 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
142 | acc = 0
143 | brake = 1
144 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
145 | if speed <= curve_limit_speed:
146 | steer = steering_multi_function(speed) * steer
147 | elif direction == 1:
148 |
149 | if weather == "14":
150 | if speed > curve_limit_speed_1_2 - 1:
151 | acc = 0
152 | brake = 1
153 | steer = steering_multi_function(speed, 1.55) * steer
154 | else:
155 | if speed > curve_limit_speed_1_2:
156 | acc = 0
157 | brake = 1
158 | steer = steering_multi_function(speed, 1.5) * steer
159 | if steer > 0:
160 | steer = 0
161 | elif direction == 2:
162 | if speed > curve_limit_speed_1_2:
163 | acc = 0
164 | brake = 1
165 | steer = steering_multi_function(speed) * steer
166 | if steer < 0:
167 | steer = 0
168 | elif direction == 3:
169 | steer *= 0.3
170 |
171 | return steer, acc, brake
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/version084/driving_benchmarks/carla100/carla100.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
2 | # Barcelona (UAB).
3 | #
4 | # This work is licensed under the terms of the MIT license.
5 | # For a copy, see .
6 |
7 | # CARLA 100 experiment set.
8 |
9 | from __future__ import print_function
10 |
11 | from ...benchmark_tools.experiment import Experiment
12 | from ...carla.sensor import Camera
13 | from ...carla.settings import CarlaSettings
14 | from ...benchmark_tools.experiment_suites.experiment_suite import ExperimentSuite
15 |
16 |
17 | class CARLA100(ExperimentSuite):
18 |
19 | @property
20 | def train_weathers(self):
21 | # return [10]
22 | return [1, 3, 6, 8]
23 |
24 | @property
25 | def test_weathers(self):
26 | # return [14]
27 | return [10, 14]
28 |
29 | @property
30 | def collision_as_failure(self):
31 | return True
32 | @property
33 | def traffic_light_as_failure(self):
34 | return False
35 | def calculate_time_out(self, path_distance):
36 | """
37 | Function to return the timeout ,in milliseconds,
38 | that is calculated based on distance to goal.
39 | This timeout is increased since stop for traffic lights is expected.
40 | """
41 | return ((path_distance / 1000.0) / 5.0) * 3600.0 + 10.0
42 |
43 | def _poses_town01(self):
44 | """
45 | Each matrix is a new task. We have all the four tasks
46 |
47 | """
48 |
49 | def _poses_navigation():
50 | return [[105, 29], [27, 130], [102, 87], [132, 27], [25, 44],
51 | [4, 64], [34, 67], [54, 30], [140, 134], [105, 9],
52 | [148, 129], [65, 18], [21, 16], [147, 97], [134, 49],
53 | [30, 41], [81, 89], [69, 45], [102, 95], [18, 145],
54 | [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]]
55 |
56 | return [
57 | _poses_navigation(),
58 | _poses_navigation(),
59 | _poses_navigation()
60 | ]
61 |
62 | def _poses_town01_lite(self):
63 | def _poses_navigation():
64 | return [[21, 16]]
65 |
66 | return [_poses_navigation()]
67 |
68 | def _poses_town02(self):
69 |
70 | def _poses_navigation():
71 | return [[19, 66], [79, 14], [19, 57], [39, 53], [60, 26],
72 | [53, 76], [42, 13], [31, 71], [59, 35], [47, 16],
73 | [10, 61], [66, 3], [20, 79], [14, 56], [26, 69],
74 | [79, 19], [2, 29], [16, 14], [5, 57], [77, 68],
75 | [70, 73], [46, 67], [34, 77], [61, 49], [21, 12]]
76 | return [_poses_navigation(),
77 | _poses_navigation(),
78 | _poses_navigation()
79 | ]
80 |
81 | def _poses_town02_lite(self):
82 |
83 | def _poses_navigation():
84 | return [[19, 66]]
85 | return [_poses_navigation()
86 | ]
87 |
88 | def build_experiments(self):
89 | """
90 | Creates the whole set of experiment objects,
91 | The experiments created depend on the selected Town.
92 |
93 |
94 | """
95 |
96 | # We set the camera
97 | # This single RGB camera is used on every experiment
98 |
99 | camera = Camera('CameraRGB')
100 | camera.set(FOV=100)
101 | camera.set_image_size(800, 600)
102 | camera.set_position(2.0, 0.0, 1.4)
103 | camera.set_rotation(-15.0, 0, 0)
104 |
105 | if self._city_name == 'Town01':
106 | poses_tasks = self._poses_town01()
107 | vehicles_tasks = [0, 20, 100]
108 | pedestrians_tasks = [0, 50, 250]
109 | elif self._city_name == 'Town01_lite':
110 | poses_tasks = self._poses_town01_lite()
111 | vehicles_tasks = [20]
112 | pedestrians_tasks = [50]
113 | elif self._city_name == 'Town02_lite':
114 | poses_tasks = self._poses_town02_lite()
115 | vehicles_tasks = [35]
116 | pedestrians_tasks = [75]
117 | else:
118 | poses_tasks = self._poses_town02()
119 | vehicles_tasks = [0, 15, 70]
120 | pedestrians_tasks = [0, 50, 150]
121 |
122 | experiments_vector = []
123 |
124 | for weather in self.weathers:
125 |
126 | for iteration in range(len(poses_tasks)):
127 | poses = poses_tasks[iteration]
128 | vehicles = vehicles_tasks[iteration]
129 | pedestrians = pedestrians_tasks[iteration]
130 |
131 | conditions = CarlaSettings()
132 | conditions.set(
133 | SendNonPlayerAgentsInfo=True,
134 | NumberOfVehicles=vehicles,
135 | NumberOfPedestrians=pedestrians,
136 | WeatherId=weather
137 | )
138 |
139 | conditions.set(DisableTwoWheeledVehicles=True)
140 | # Add all the cameras that were set for this experiments
141 |
142 | conditions.add_sensor(camera)
143 |
144 | experiment = Experiment()
145 | experiment.set(
146 | Conditions=conditions,
147 | Poses=poses,
148 | Task=iteration,
149 | Repetitions=1
150 | )
151 | experiments_vector.append(experiment)
152 |
153 | return experiments_vector
154 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/utils/max_branch_ver0_adj.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def steering_multi_function(input_speed, weight_factor=1.7):
5 | return input_speed * -0.02 + weight_factor
6 |
7 | def saperate_environment(episode_name):
8 | return episode_name.split('_')
9 |
10 |
11 | def action_adjusting(direction, steer, acc, brake, speed, pred_speed, episode_name, loc_x, loc_y, running_cnt):
12 |
13 | weather, exp_id, se_point = saperate_environment(episode_name)
14 |
15 | if exp_id == "3":
16 | if brake < 0.1:
17 | brake = 0.0
18 | else:
19 | brake *= 5
20 |
21 | if acc > 0.8:
22 | brake = 0.0
23 |
24 | else:
25 | if brake < 0.2:
26 | brake = 0.0
27 |
28 | if acc > brake:
29 | brake = 0.0
30 |
31 |
32 | if exp_id == "0":
33 | if speed > 37:
34 | acc = 0.0
35 | acc = acc * 3.7
36 | elif exp_id == "3":
37 | if speed > 35:
38 | acc = 0.0
39 | acc = acc * 3.2
40 | else:
41 | if speed > 35:
42 | acc = 0.0
43 | acc = acc * 3.5
44 |
45 | # steer이 특정 값 보다 크면 가속을 줄인다.
46 | if np.abs(steer) > 0.15:
47 | acc = acc * 0.5
48 |
49 | # 커브 기준 각도를 지정 하고: curve_limit
50 | # 허용 하는 속도를 지정: curve_limit_speed
51 | # 우선 static 하게 해보고, 필요하다면 steer 값과, curve_limit의 차이를 brake로 쓰거나.. 비율적으로 하자
52 | curve_limit = 0.05
53 | curve_limit_speed = 10
54 | curve_limit_1_2 = 0.01
55 | curve_limit_speed_1_2 = 17
56 |
57 | if direction == 0:
58 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
59 | acc = 0
60 | brake = 1
61 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
62 | # if speed <= curve_limit_speed and pred_speed <= curve_limit_speed:
63 | if speed <= curve_limit_speed:
64 | steer = steering_multi_function(speed) * steer
65 | # 시작하자마자 커브를 도는 케이스 (Town01 [102, 87]) 제거를 위해
66 | if int(loc_x) in (379, 380, 381, 382, 383, 384) and int(loc_y) == 330 and steer < 0:
67 | steer = 0
68 |
69 | # 직진 잘 하다가 옆의 가드레일에 박는 케이스 제거
70 | if int(loc_x) == -2 and int(loc_y) in (14, 15) and steer > 0:
71 | steer = 0
72 | if int(loc_x) == 391 and int(loc_y) in list(range(150, 219)) and steer > 0:
73 | steer = 0
74 | if int(loc_x) in(0, 1) and int(loc_y) in(23, 24):
75 | steer = 0
76 |
77 | elif direction == 1:
78 | if speed > curve_limit_speed_1_2:
79 | acc = 0
80 | brake = 1
81 | steer = steering_multi_function(speed, 1.4) * steer
82 | if steer > 0:
83 | steer = 0
84 | elif direction == 2:
85 | if speed > curve_limit_speed_1_2:
86 | acc = 0
87 | brake = 1
88 | steer = steering_multi_function(speed) * steer
89 | if steer < 0:
90 | steer = 0
91 | elif direction == 3:
92 | # steer = max(-0.002, min(0.002, steer))
93 | steer *= 0.3
94 | if int(loc_x) in list(range(330, 350)) and int(loc_y) in (325, 326):
95 | if steer > 0:
96 | steer = 0
97 | else:
98 | steer *= 4
99 |
100 | ''' steering adjusting using running cnt '''
101 | if running_cnt < 30:
102 | steer = 0
103 |
104 | acc = np.fmax(np.fmin(acc, 1.0), 0.0)
105 |
106 | return steer, acc, brake
107 |
108 |
109 | def action_adjusting_town02(direction, steer, acc, brake, speed, pred_speed, episode_name, loc_x, loc_y, running_cnt):
110 | weather, exp_id, se_point = saperate_environment(episode_name)
111 |
112 | if brake < 0.1:
113 | brake = 0.0
114 |
115 | if acc > brake:
116 | brake = 0.0
117 |
118 | # We limit speed to 35 km/h to avoid
119 | # for Town 1
120 | # if speed > 35: # and brake == 0.0:
121 | # acc = 0.0
122 | # for Town 2
123 |
124 | if exp_id == "0":
125 | if speed > 37:
126 | acc = 0.0
127 | acc = acc * 1.7
128 | else:
129 | if speed > 35:
130 | acc = 0.0
131 | acc = acc * 1.5
132 |
133 | # steer이 특정 값 보다 크면 가속을 줄인다.
134 | if np.abs(steer) > 0.15:
135 | acc = acc * 0.5
136 |
137 | curve_limit = 0.04
138 | curve_limit_speed = 15
139 | curve_limit_1_2 = 0.01
140 | curve_limit_speed_1_2 = 17
141 |
142 | if direction == 0:
143 | if np.abs(steer) > curve_limit and speed > curve_limit_speed:
144 | acc = 0
145 | brake = 1
146 | # direction == 0 일때는 커브를 위해 속도가 줄 었을 경우에만 아래의 steering weight 적용
147 | if speed <= curve_limit_speed:
148 | steer = steering_multi_function(speed) * steer
149 | elif direction == 1:
150 |
151 | if weather == "14":
152 | if speed > curve_limit_speed_1_2 - 1:
153 | acc = 0
154 | brake = 1
155 | steer = steering_multi_function(speed, 1.55) * steer
156 | else:
157 | if speed > curve_limit_speed_1_2:
158 | acc = 0
159 | brake = 1
160 | steer = steering_multi_function(speed, 1.5) * steer
161 | if steer > 0:
162 | steer = 0
163 | elif direction == 2:
164 | if speed > curve_limit_speed_1_2:
165 | acc = 0
166 | brake = 1
167 | steer = steering_multi_function(speed) * steer
168 | if steer < 0:
169 | steer = 0
170 | elif direction == 3:
171 | steer *= 0.3
172 |
173 | return steer, acc, brake
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/run_auxi_ver3.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
4 | # Barcelona (UAB).
5 | #
6 | # This work is licensed under the terms of the MIT license.
7 | # For a copy, see .
8 |
9 | import os
10 | os.environ["CUDA_VISIBLE_DEVICES"] = "0"
11 | using_gpu_num = -1
12 |
13 |
14 |
15 | import argparse
16 | import logging
17 |
18 | from version084.benchmark_tools import run_driving_benchmark
19 | from version084.driving_benchmarks import CoRL2017, CARLA100
20 | from version084.benchmark_tools.experiment_suites.basic_experiment_suite import BasicExperimentSuite
21 | from version084.benchmark_tools.agent import ForwardAgent
22 |
23 | from agents.auxiliary.auxi_ver3 import ImitationLearning
24 |
25 | if __name__ == '__main__':
26 |
27 | argparser = argparse.ArgumentParser(description=__doc__)
28 | argparser.add_argument(
29 | '-v', '--verbose',
30 | action='store_true',
31 | dest='verbose',
32 | help='print some extra status information')
33 | argparser.add_argument(
34 | '-db', '--debug',
35 | action='store_true',
36 | dest='debug',
37 | help='print debug information')
38 | argparser.add_argument(
39 | '--host',
40 | metavar='H',
41 | # default='localhost',
42 | default='141.223.12.42',
43 | help='IP of the host server (default: localhost)')
44 | argparser.add_argument(
45 | '-p', '--port',
46 | metavar='P',
47 | default=2000,
48 | type=int,
49 | help='TCP port to listen to (default: 2000)')
50 | argparser.add_argument(
51 | '-c', '--city-name',
52 | metavar='C',
53 | default='Town01_lite',
54 | # default='Town02',
55 | help='The town that is going to be used on benchmark'
56 | + '(needs to match active town in server, options: Town01 or Town02)')
57 | argparser.add_argument(
58 | '-n', '--log_name',
59 | metavar='T',
60 | default='test_auxi_ver3',
61 | help='The name of the log file to be created by the benchmark'
62 | )
63 | argparser.add_argument(
64 | '--corl-2017',
65 | action='store_true',
66 | help='If you want to benchmark the corl-2017 instead of the Basic one'
67 | )
68 | argparser.add_argument(
69 | '--carla100',
70 | action='store_true',
71 | help='If you want to use the carla100 benchmark instead of the Basic one'
72 | )
73 | argparser.add_argument(
74 | '--continue-experiment',
75 | action='store_true',
76 | help='If you want to continue the experiment with the same name'
77 | )
78 |
79 | ### from old eval source code
80 | argparser.add_argument(
81 | '--model-path',
82 | metavar='P',
83 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models/training/198_training.pth',
84 | default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/auxi_ver3/save_models/training/4_training.pth',
85 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/auxi_ver2/save_models/training_best.pth',
86 | # default='model/training_best_multi_img_ver2.pth',
87 | type=str,
88 | help='torch imitation learning model path (relative in model dir)'
89 | )
90 | argparser.add_argument(
91 | '--visualize',
92 | default=False,
93 | action='store_true',
94 | help='visualize the image and transfered image through tensorflow'
95 | )
96 |
97 | argparser.add_argument(
98 | '--avoid-stopping',
99 | default=True,
100 | action='store_false',
101 | help=' Uses the speed prediction branch to avoid unwanted agent stops'
102 | )
103 |
104 |
105 | args = argparser.parse_args()
106 | if args.debug:
107 | log_level = logging.DEBUG
108 | elif args.verbose:
109 | log_level = logging.INFO
110 | else:
111 | log_level = logging.WARNING
112 |
113 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
114 | logging.info('listening to server %s:%s', args.host, args.port)
115 |
116 | # We instantiate a forward agent, a simple policy that just set
117 | # acceleration as 0.9 and steering as zero
118 | # agent = ForwardAgent()
119 | agent = ImitationLearning(args.city_name,
120 | args.avoid_stopping,
121 | args.model_path,
122 | args.visualize,
123 | args.log_name,
124 | using_gpu_num
125 | )
126 |
127 | # We instantiate an experiment suite. Basically a set of experiments
128 | # that are going to be evaluated on this benchmark.
129 | if args.corl_2017:
130 | experiment_suite = CoRL2017(args.city_name)
131 | elif args.carla100:
132 | experiment_suite = CARLA100(args.city_name)
133 | else:
134 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
135 | ' experiment suites, you should run'
136 | ' python driving_benchmark_example.py --corl-2017')
137 | experiment_suite = BasicExperimentSuite(args.city_name)
138 |
139 | # Now actually run the driving_benchmark
140 | run_driving_benchmark(agent, experiment_suite, args.city_name,
141 | args.log_name, args.continue_experiment,
142 | args.host, args.port)
143 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/run_yaw_ver0.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
4 | # Barcelona (UAB).
5 | #
6 | # This work is licensed under the terms of the MIT license.
7 | # For a copy, see .
8 |
9 | import os
10 | # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
11 |
12 | import argparse
13 | import logging
14 |
15 | from version084.benchmark_tools import run_driving_benchmark
16 | from version084.driving_benchmarks import CoRL2017, CARLA100
17 | from version084.benchmark_tools.experiment_suites.basic_experiment_suite import BasicExperimentSuite
18 | from version084.benchmark_tools.agent import ForwardAgent
19 |
20 | from agents.yaw.yaw_ver0 import ImitationLearning
21 |
22 | if __name__ == '__main__':
23 |
24 | argparser = argparse.ArgumentParser(description=__doc__)
25 | argparser.add_argument(
26 | '-v', '--verbose',
27 | action='store_true',
28 | dest='verbose',
29 | help='print some extra status information')
30 | argparser.add_argument(
31 | '-db', '--debug',
32 | action='store_true',
33 | dest='debug',
34 | help='print debug information')
35 | argparser.add_argument(
36 | '--host',
37 | metavar='H',
38 | # default='localhost',
39 | default='141.223.12.42',
40 | help='IP of the host server (default: localhost)')
41 | argparser.add_argument(
42 | '-p', '--port',
43 | metavar='P',
44 | default=2000,
45 | type=int,
46 | help='TCP port to listen to (default: 2000)')
47 | argparser.add_argument(
48 | '-c', '--city-name',
49 | metavar='C',
50 | # default='Town01_lite',
51 | default='Town01',
52 | help='The town that is going to be used on benchmark'
53 | + '(needs to match active town in server, options: Town01 or Town02)')
54 | argparser.add_argument(
55 | '-n', '--log_name',
56 | metavar='T',
57 | default='test_yaw_ver0',
58 | help='The name of the log file to be created by the benchmark'
59 | )
60 | argparser.add_argument(
61 | '--corl-2017',
62 | action='store_true',
63 | help='If you want to benchmark the corl-2017 instead of the Basic one'
64 | )
65 | argparser.add_argument(
66 | '--carla100',
67 | action='store_true',
68 | help='If you want to use the carla100 benchmark instead of the Basic one'
69 | )
70 | argparser.add_argument(
71 | '--continue-experiment',
72 | action='store_true',
73 | help='If you want to continue the experiment with the same name'
74 | )
75 |
76 | ### from old eval source code
77 | argparser.add_argument(
78 | '--model-path',
79 | metavar='P',
80 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models/training/198_training.pth',
81 | default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/ver0/save_models/training/5_training.pth',
82 | # default='D:\\PytorchWorkspace\\CARLA_Pytorch\\carla_max_branch\\ver0\\save_models\\training\\15_training.pth',
83 | # default='model/training_best_multi_img_ver2.pth',
84 | type=str,
85 | help='torch imitation learning model path (relative in model dir)'
86 | )
87 | argparser.add_argument(
88 | '--visualize',
89 | default=False,
90 | action='store_true',
91 | help='visualize the image and transfered image through tensorflow'
92 | )
93 | argparser.add_argument('--gpu', default=0, type=int,
94 | help='GPU id to use.')
95 | argparser.add_argument(
96 | '--avoid-stopping',
97 | default=True,
98 | action='store_false',
99 | help=' Uses the speed prediction branch to avoid unwanted agent stops'
100 | )
101 |
102 |
103 | args = argparser.parse_args()
104 | if args.debug:
105 | log_level = logging.DEBUG
106 | elif args.verbose:
107 | log_level = logging.INFO
108 | else:
109 | log_level = logging.WARNING
110 |
111 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
112 | logging.info('listening to server %s:%s', args.host, args.port)
113 |
114 | # We instantiate a forward agent, a simple policy that just set
115 | # acceleration as 0.9 and steering as zero
116 | # agent = ForwardAgent()
117 | agent = ImitationLearning(args.city_name,
118 | args.avoid_stopping,
119 | args.model_path,
120 | args.visualize,
121 | args.log_name
122 | )
123 |
124 | # We instantiate an experiment suite. Basically a set of experiments
125 | # that are going to be evaluated on this benchmark.
126 | if args.corl_2017:
127 | experiment_suite = CoRL2017(args.city_name)
128 | elif args.carla100:
129 | experiment_suite = CARLA100(args.city_name)
130 | else:
131 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
132 | ' experiment suites, you should run'
133 | ' python driving_benchmark_example.py --corl-2017')
134 | experiment_suite = BasicExperimentSuite(args.city_name)
135 |
136 | # Now actually run the driving_benchmark
137 | run_driving_benchmark(agent, experiment_suite, args.city_name,
138 | args.log_name, args.continue_experiment,
139 | args.host, args.port)
140 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/run_yaw_ver1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
4 | # Barcelona (UAB).
5 | #
6 | # This work is licensed under the terms of the MIT license.
7 | # For a copy, see .
8 |
9 | import os
10 | # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
11 |
12 | import argparse
13 | import logging
14 |
15 | from version084.benchmark_tools import run_driving_benchmark
16 | from version084.driving_benchmarks import CoRL2017, CARLA100
17 | from version084.benchmark_tools.experiment_suites.basic_experiment_suite import BasicExperimentSuite
18 | from version084.benchmark_tools.agent import ForwardAgent
19 |
20 | from agents.yaw.yaw_ver1 import ImitationLearning
21 |
22 | if __name__ == '__main__':
23 |
24 | argparser = argparse.ArgumentParser(description=__doc__)
25 | argparser.add_argument(
26 | '-v', '--verbose',
27 | action='store_true',
28 | dest='verbose',
29 | help='print some extra status information')
30 | argparser.add_argument(
31 | '-db', '--debug',
32 | action='store_true',
33 | dest='debug',
34 | help='print debug information')
35 | argparser.add_argument(
36 | '--host',
37 | metavar='H',
38 | # default='localhost',
39 | default='141.223.12.42',
40 | help='IP of the host server (default: localhost)')
41 | argparser.add_argument(
42 | '-p', '--port',
43 | metavar='P',
44 | default=2000,
45 | type=int,
46 | help='TCP port to listen to (default: 2000)')
47 | argparser.add_argument(
48 | '-c', '--city-name',
49 | metavar='C',
50 | # default='Town01_lite',
51 | default='Town01',
52 | help='The town that is going to be used on benchmark'
53 | + '(needs to match active town in server, options: Town01 or Town02)')
54 | argparser.add_argument(
55 | '-n', '--log_name',
56 | metavar='T',
57 | default='test_yaw_ver1',
58 | help='The name of the log file to be created by the benchmark'
59 | )
60 | argparser.add_argument(
61 | '--corl-2017',
62 | action='store_true',
63 | help='If you want to benchmark the corl-2017 instead of the Basic one'
64 | )
65 | argparser.add_argument(
66 | '--carla100',
67 | action='store_true',
68 | help='If you want to use the carla100 benchmark instead of the Basic one'
69 | )
70 | argparser.add_argument(
71 | '--continue-experiment',
72 | action='store_true',
73 | help='If you want to continue the experiment with the same name'
74 | )
75 |
76 | ### from old eval source code
77 | argparser.add_argument(
78 | '--model-path',
79 | metavar='P',
80 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models/training/198_training.pth',
81 | default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/ver1/save_models/training/4_training.pth',
82 | # default='D:\\PytorchWorkspace\\CARLA_Pytorch\\carla_max_branch\\ver0\\save_models\\training\\15_training.pth',
83 | # default='model/training_best_multi_img_ver2.pth',
84 | type=str,
85 | help='torch imitation learning model path (relative in model dir)'
86 | )
87 | argparser.add_argument(
88 | '--visualize',
89 | default=False,
90 | action='store_true',
91 | help='visualize the image and transfered image through tensorflow'
92 | )
93 | argparser.add_argument('--gpu', default=0, type=int,
94 | help='GPU id to use.')
95 | argparser.add_argument(
96 | '--avoid-stopping',
97 | default=True,
98 | action='store_false',
99 | help=' Uses the speed prediction branch to avoid unwanted agent stops'
100 | )
101 |
102 |
103 | args = argparser.parse_args()
104 | if args.debug:
105 | log_level = logging.DEBUG
106 | elif args.verbose:
107 | log_level = logging.INFO
108 | else:
109 | log_level = logging.WARNING
110 |
111 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
112 | logging.info('listening to server %s:%s', args.host, args.port)
113 |
114 | # We instantiate a forward agent, a simple policy that just set
115 | # acceleration as 0.9 and steering as zero
116 | # agent = ForwardAgent()
117 | agent = ImitationLearning(args.city_name,
118 | args.avoid_stopping,
119 | args.model_path,
120 | args.visualize,
121 | args.log_name
122 | )
123 |
124 | # We instantiate an experiment suite. Basically a set of experiments
125 | # that are going to be evaluated on this benchmark.
126 | if args.corl_2017:
127 | experiment_suite = CoRL2017(args.city_name)
128 | elif args.carla100:
129 | experiment_suite = CARLA100(args.city_name)
130 | else:
131 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
132 | ' experiment suites, you should run'
133 | ' python driving_benchmark_example.py --corl-2017')
134 | experiment_suite = BasicExperimentSuite(args.city_name)
135 |
136 | # Now actually run the driving_benchmark
137 | run_driving_benchmark(agent, experiment_suite, args.city_name,
138 | args.log_name, args.continue_experiment,
139 | args.host, args.port)
140 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/run_yaw_ver2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
4 | # Barcelona (UAB).
5 | #
6 | # This work is licensed under the terms of the MIT license.
7 | # For a copy, see .
8 |
9 | import os
10 | # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
11 |
12 | import argparse
13 | import logging
14 |
15 | from version084.benchmark_tools import run_driving_benchmark
16 | from version084.driving_benchmarks import CoRL2017, CARLA100
17 | from version084.benchmark_tools.experiment_suites.basic_experiment_suite import BasicExperimentSuite
18 | from version084.benchmark_tools.agent import ForwardAgent
19 |
20 | from agents.yaw.yaw_ver2 import ImitationLearning
21 |
22 | if __name__ == '__main__':
23 |
24 | argparser = argparse.ArgumentParser(description=__doc__)
25 | argparser.add_argument(
26 | '-v', '--verbose',
27 | action='store_true',
28 | dest='verbose',
29 | help='print some extra status information')
30 | argparser.add_argument(
31 | '-db', '--debug',
32 | action='store_true',
33 | dest='debug',
34 | help='print debug information')
35 | argparser.add_argument(
36 | '--host',
37 | metavar='H',
38 | # default='localhost',
39 | default='141.223.12.42',
40 | help='IP of the host server (default: localhost)')
41 | argparser.add_argument(
42 | '-p', '--port',
43 | metavar='P',
44 | default=2000,
45 | type=int,
46 | help='TCP port to listen to (default: 2000)')
47 | argparser.add_argument(
48 | '-c', '--city-name',
49 | metavar='C',
50 | # default='Town01_lite',
51 | default='Town02',
52 | help='The town that is going to be used on benchmark'
53 | + '(needs to match active town in server, options: Town01 or Town02)')
54 | argparser.add_argument(
55 | '-n', '--log_name',
56 | metavar='T',
57 | default='test_yaw_ver2',
58 | help='The name of the log file to be created by the benchmark'
59 | )
60 | argparser.add_argument(
61 | '--corl-2017',
62 | action='store_true',
63 | help='If you want to benchmark the corl-2017 instead of the Basic one'
64 | )
65 | argparser.add_argument(
66 | '--carla100',
67 | action='store_true',
68 | help='If you want to use the carla100 benchmark instead of the Basic one'
69 | )
70 | argparser.add_argument(
71 | '--continue-experiment',
72 | action='store_true',
73 | help='If you want to continue the experiment with the same name'
74 | )
75 |
76 | ### from old eval source code
77 | argparser.add_argument(
78 | '--model-path',
79 | metavar='P',
80 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models/training/198_training.pth',
81 | default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/ver2/save_models/training/6_training.pth',
82 | # default='D:\\PytorchWorkspace\\CARLA_Pytorch\\carla_max_branch\\ver0\\save_models\\training\\15_training.pth',
83 | # default='model/training_best_multi_img_ver2.pth',
84 | type=str,
85 | help='torch imitation learning model path (relative in model dir)'
86 | )
87 | argparser.add_argument(
88 | '--visualize',
89 | default=False,
90 | action='store_true',
91 | help='visualize the image and transfered image through tensorflow'
92 | )
93 | argparser.add_argument('--gpu', default=0, type=int,
94 | help='GPU id to use.')
95 | argparser.add_argument(
96 | '--avoid-stopping',
97 | default=True,
98 | action='store_false',
99 | help=' Uses the speed prediction branch to avoid unwanted agent stops'
100 | )
101 |
102 |
103 | args = argparser.parse_args()
104 | if args.debug:
105 | log_level = logging.DEBUG
106 | elif args.verbose:
107 | log_level = logging.INFO
108 | else:
109 | log_level = logging.WARNING
110 |
111 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
112 | logging.info('listening to server %s:%s', args.host, args.port)
113 |
114 | # We instantiate a forward agent, a simple policy that just set
115 | # acceleration as 0.9 and steering as zero
116 | # agent = ForwardAgent()
117 | agent = ImitationLearning(args.city_name,
118 | args.avoid_stopping,
119 | args.model_path,
120 | args.visualize,
121 | args.log_name
122 | )
123 |
124 | # We instantiate an experiment suite. Basically a set of experiments
125 | # that are going to be evaluated on this benchmark.
126 | if args.corl_2017:
127 | experiment_suite = CoRL2017(args.city_name)
128 | elif args.carla100:
129 | experiment_suite = CARLA100(args.city_name)
130 | else:
131 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
132 | ' experiment suites, you should run'
133 | ' python driving_benchmark_example.py --corl-2017')
134 | experiment_suite = BasicExperimentSuite(args.city_name)
135 |
136 | # Now actually run the driving_benchmark
137 | run_driving_benchmark(agent, experiment_suite, args.city_name,
138 | args.log_name, args.continue_experiment,
139 | args.host, args.port)
140 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/run_auxi_ver0.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
4 | # Barcelona (UAB).
5 | #
6 | # This work is licensed under the terms of the MIT license.
7 | # For a copy, see .
8 |
9 | import os
10 | os.environ["CUDA_VISIBLE_DEVICES"] = "0"
11 |
12 | import argparse
13 | import logging
14 |
15 | from version084.benchmark_tools import run_driving_benchmark
16 | from version084.driving_benchmarks import CoRL2017, CARLA100
17 | from version084.benchmark_tools.experiment_suites.basic_experiment_suite import BasicExperimentSuite
18 | from version084.benchmark_tools.agent import ForwardAgent
19 |
20 | from agents.auxiliary.auxi_ver0 import ImitationLearning
21 |
22 | if __name__ == '__main__':
23 |
24 | argparser = argparse.ArgumentParser(description=__doc__)
25 | argparser.add_argument(
26 | '-v', '--verbose',
27 | action='store_true',
28 | dest='verbose',
29 | help='print some extra status information')
30 | argparser.add_argument(
31 | '-db', '--debug',
32 | action='store_true',
33 | dest='debug',
34 | help='print debug information')
35 | argparser.add_argument(
36 | '--host',
37 | metavar='H',
38 | # default='localhost',
39 | default='141.223.12.42',
40 | help='IP of the host server (default: localhost)')
41 | argparser.add_argument(
42 | '-p', '--port',
43 | metavar='P',
44 | default=2000,
45 | type=int,
46 | help='TCP port to listen to (default: 2000)')
47 | argparser.add_argument(
48 | '-c', '--city-name',
49 | metavar='C',
50 | default='Town01_lite',
51 | # default='Town02',
52 | help='The town that is going to be used on benchmark'
53 | + '(needs to match active town in server, options: Town01 or Town02)')
54 | argparser.add_argument(
55 | '-n', '--log_name',
56 | metavar='T',
57 | default='test_auxi_ver0',
58 | help='The name of the log file to be created by the benchmark'
59 | )
60 | argparser.add_argument(
61 | '--corl-2017',
62 | action='store_true',
63 | help='If you want to benchmark the corl-2017 instead of the Basic one'
64 | )
65 | argparser.add_argument(
66 | '--carla100',
67 | action='store_true',
68 | help='If you want to use the carla100 benchmark instead of the Basic one'
69 | )
70 | argparser.add_argument(
71 | '--continue-experiment',
72 | action='store_true',
73 | help='If you want to continue the experiment with the same name'
74 | )
75 |
76 | ### from old eval source code
77 | argparser.add_argument(
78 | '--model-path',
79 | metavar='P',
80 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models/training/198_training.pth',
81 | default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/auxi_ver0/save_models/training/3_training.pth',
82 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/auxi_ver2/save_models/training_best.pth',
83 | # default='model/training_best_multi_img_ver2.pth',
84 | type=str,
85 | help='torch imitation learning model path (relative in model dir)'
86 | )
87 | argparser.add_argument(
88 | '--visualize',
89 | default=False,
90 | action='store_true',
91 | help='visualize the image and transfered image through tensorflow'
92 | )
93 | argparser.add_argument('--gpu', default=0, type=int,
94 | help='GPU id to use.')
95 | argparser.add_argument(
96 | '--avoid-stopping',
97 | default=True,
98 | action='store_false',
99 | help=' Uses the speed prediction branch to avoid unwanted agent stops'
100 | )
101 |
102 |
103 | args = argparser.parse_args()
104 | if args.debug:
105 | log_level = logging.DEBUG
106 | elif args.verbose:
107 | log_level = logging.INFO
108 | else:
109 | log_level = logging.WARNING
110 |
111 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
112 | logging.info('listening to server %s:%s', args.host, args.port)
113 |
114 | # We instantiate a forward agent, a simple policy that just set
115 | # acceleration as 0.9 and steering as zero
116 | # agent = ForwardAgent()
117 | agent = ImitationLearning(args.city_name,
118 | args.avoid_stopping,
119 | args.model_path,
120 | args.visualize,
121 | args.log_name
122 | )
123 |
124 | # We instantiate an experiment suite. Basically a set of experiments
125 | # that are going to be evaluated on this benchmark.
126 | if args.corl_2017:
127 | experiment_suite = CoRL2017(args.city_name)
128 | elif args.carla100:
129 | experiment_suite = CARLA100(args.city_name)
130 | else:
131 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
132 | ' experiment suites, you should run'
133 | ' python driving_benchmark_example.py --corl-2017')
134 | experiment_suite = BasicExperimentSuite(args.city_name)
135 |
136 | # Now actually run the driving_benchmark
137 | run_driving_benchmark(agent, experiment_suite, args.city_name,
138 | args.log_name, args.continue_experiment,
139 | args.host, args.port)
140 |
--------------------------------------------------------------------------------
/driving-benchmarks-yaw/run_auxi_ver2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
4 | # Barcelona (UAB).
5 | #
6 | # This work is licensed under the terms of the MIT license.
7 | # For a copy, see .
8 |
9 | import os
10 | # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
11 |
12 | import argparse
13 | import logging
14 |
15 | from version084.benchmark_tools import run_driving_benchmark
16 | from version084.driving_benchmarks import CoRL2017, CARLA100
17 | from version084.benchmark_tools.experiment_suites.basic_experiment_suite import BasicExperimentSuite
18 | from version084.benchmark_tools.agent import ForwardAgent
19 |
20 | from agents.auxiliary.auxi_ver2 import ImitationLearning
21 |
22 | if __name__ == '__main__':
23 |
24 | argparser = argparse.ArgumentParser(description=__doc__)
25 | argparser.add_argument(
26 | '-v', '--verbose',
27 | action='store_true',
28 | dest='verbose',
29 | help='print some extra status information')
30 | argparser.add_argument(
31 | '-db', '--debug',
32 | action='store_true',
33 | dest='debug',
34 | help='print debug information')
35 | argparser.add_argument(
36 | '--host',
37 | metavar='H',
38 | # default='localhost',
39 | default='141.223.12.42',
40 | help='IP of the host server (default: localhost)')
41 | argparser.add_argument(
42 | '-p', '--port',
43 | metavar='P',
44 | default=2000,
45 | type=int,
46 | help='TCP port to listen to (default: 2000)')
47 | argparser.add_argument(
48 | '-c', '--city-name',
49 | metavar='C',
50 | default='Town01_lite',
51 | # default='Town02',
52 | help='The town that is going to be used on benchmark'
53 | + '(needs to match active town in server, options: Town01 or Town02)')
54 | argparser.add_argument(
55 | '-n', '--log_name',
56 | metavar='T',
57 | default='test_auxi_ver2',
58 | help='The name of the log file to be created by the benchmark'
59 | )
60 | argparser.add_argument(
61 | '--corl-2017',
62 | action='store_true',
63 | help='If you want to benchmark the corl-2017 instead of the Basic one'
64 | )
65 | argparser.add_argument(
66 | '--carla100',
67 | action='store_true',
68 | help='If you want to use the carla100 benchmark instead of the Basic one'
69 | )
70 | argparser.add_argument(
71 | '--continue-experiment',
72 | action='store_true',
73 | help='If you want to continue the experiment with the same name'
74 | )
75 |
76 | ### from old eval source code
77 | argparser.add_argument(
78 | '--model-path',
79 | metavar='P',
80 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_cil_pytorch-master/save_models/training/198_training.pth',
81 | default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/auxi_ver2/save_models/training/6_training.pth',
82 | # default='/home/kimna/PytorchWorkspace/CARLA_Pytorch/carla_yaw/auxi_ver2/save_models/training_best.pth',
83 | # default='model/training_best_multi_img_ver2.pth',
84 | type=str,
85 | help='torch imitation learning model path (relative in model dir)'
86 | )
87 | argparser.add_argument(
88 | '--visualize',
89 | default=False,
90 | action='store_true',
91 | help='visualize the image and transfered image through tensorflow'
92 | )
93 | argparser.add_argument('--gpu', default=0, type=int,
94 | help='GPU id to use.')
95 | argparser.add_argument(
96 | '--avoid-stopping',
97 | default=True,
98 | action='store_false',
99 | help=' Uses the speed prediction branch to avoid unwanted agent stops'
100 | )
101 |
102 |
103 | args = argparser.parse_args()
104 | if args.debug:
105 | log_level = logging.DEBUG
106 | elif args.verbose:
107 | log_level = logging.INFO
108 | else:
109 | log_level = logging.WARNING
110 |
111 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
112 | logging.info('listening to server %s:%s', args.host, args.port)
113 |
114 | # We instantiate a forward agent, a simple policy that just set
115 | # acceleration as 0.9 and steering as zero
116 | # agent = ForwardAgent()
117 | agent = ImitationLearning(args.city_name,
118 | args.avoid_stopping,
119 | args.model_path,
120 | args.visualize,
121 | args.log_name
122 | )
123 |
124 | # We instantiate an experiment suite. Basically a set of experiments
125 | # that are going to be evaluated on this benchmark.
126 | if args.corl_2017:
127 | experiment_suite = CoRL2017(args.city_name)
128 | elif args.carla100:
129 | experiment_suite = CARLA100(args.city_name)
130 | else:
131 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
132 | ' experiment suites, you should run'
133 | ' python driving_benchmark_example.py --corl-2017')
134 | experiment_suite = BasicExperimentSuite(args.city_name)
135 |
136 | # Now actually run the driving_benchmark
137 | run_driving_benchmark(agent, experiment_suite, args.city_name,
138 | args.log_name, args.continue_experiment,
139 | args.host, args.port)
140 |
--------------------------------------------------------------------------------