├── code ├── autodrive │ ├── keras-rl │ │ ├── rl │ │ │ ├── __init__.py │ │ │ ├── common │ │ │ │ ├── __init__.py │ │ │ │ ├── misc_util.py │ │ │ │ ├── cmd_util.py │ │ │ │ ├── tile_images.py │ │ │ │ └── vec_env │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── subproc_env_vec.py │ │ │ ├── agents │ │ │ │ └── __init__.py │ │ │ ├── random.py │ │ │ ├── processors.py │ │ │ └── util.py │ │ ├── tests │ │ │ ├── __init__.py │ │ │ ├── rl │ │ │ │ ├── __init__.py │ │ │ │ ├── agents │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_cem.py │ │ │ │ │ └── test_ddpg.py │ │ │ │ ├── util.py │ │ │ │ └── test_util.py │ │ │ └── integration │ │ │ │ ├── test_continuous.py │ │ │ │ └── test_discrete.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── gym │ │ │ │ ├── __init__.py │ │ │ │ ├── spaces │ │ │ │ ├── __init__.py │ │ │ │ └── discrete.py │ │ │ │ ├── envs │ │ │ │ ├── __init__.py │ │ │ │ └── twoRoundDeterministicRewardEnv.py │ │ │ │ └── prng.py │ │ ├── docs │ │ │ ├── sources │ │ │ │ ├── index.md │ │ │ │ ├── agents │ │ │ │ │ ├── naf.md │ │ │ │ │ ├── sarsa.md │ │ │ │ │ ├── ddpg.md │ │ │ │ │ ├── cem.md │ │ │ │ │ └── dqn.md │ │ │ │ ├── processors.md │ │ │ │ └── core.md │ │ │ ├── templates │ │ │ │ ├── index.md │ │ │ │ ├── core.md │ │ │ │ ├── processors.md │ │ │ │ └── agents │ │ │ │ │ ├── naf.md │ │ │ │ │ ├── ddpg.md │ │ │ │ │ ├── sarsa.md │ │ │ │ │ ├── cem.md │ │ │ │ │ ├── dqn.md │ │ │ │ │ └── overview.md │ │ │ └── requirements.txt │ │ ├── setup.cfg │ │ ├── setup.py │ │ ├── mkdocs.yml │ │ ├── ISSUE_TEMPLATE.md │ │ ├── CONTRIBUTING.md │ │ ├── LICENSE │ │ ├── .gitignore │ │ ├── pytest.ini │ │ ├── examples │ │ │ ├── sarsa_cartpole.py │ │ │ ├── visualize_log.py │ │ │ ├── dqn_cartpole.py │ │ │ ├── cem_cartpole.py │ │ │ ├── duel_dqn_cartpole.py │ │ │ ├── ddpg_pendulum.py │ │ │ ├── ddpg_mujoco.py │ │ │ └── naf_pendulum.py │ │ ├── .travis.yml │ │ └── README.md │ ├── carla-client │ │ ├── carla │ │ │ ├── __init__.py │ │ │ ├── planner │ │ │ │ ├── __init__.py │ │ │ │ ├── Town01.png │ │ │ │ ├── Town02.png │ │ │ │ ├── Town01Lanes.png │ │ │ │ ├── Town02Big.png │ │ │ │ ├── Town02Lanes.png │ │ │ │ ├── Town01Central.png │ │ │ │ ├── Town02Central.png │ │ │ │ ├── Town02.txt │ │ │ │ ├── Town01.txt │ │ │ │ ├── grid.py │ │ │ │ ├── graph.py │ │ │ │ └── city_track.py │ │ │ ├── driving_benchmark │ │ │ │ ├── __init__.py │ │ │ │ ├── experiment_suites │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── basic_experiment_suite.py │ │ │ │ │ └── experiment_suite.py │ │ │ │ └── experiment.py │ │ │ ├── agent │ │ │ │ ├── __init__.py │ │ │ │ ├── forward_agent.py │ │ │ │ └── agent.py │ │ │ ├── util.py │ │ │ ├── tcp.py │ │ │ └── settings.py │ │ ├── test │ │ │ ├── __init__.py │ │ │ ├── acceptance_tests │ │ │ │ └── __init__.py │ │ │ ├── unit_tests │ │ │ │ ├── test_data │ │ │ │ │ └── testfile_collisions │ │ │ │ │ │ └── summary.csv │ │ │ │ └── test_experiment_suite.py │ │ │ └── suite │ │ │ │ └── __init__.py │ │ ├── carla_client.egg-info │ │ │ ├── top_level.txt │ │ │ ├── dependency_links.txt │ │ │ ├── PKG-INFO │ │ │ └── SOURCES.txt │ │ ├── _benchmarks_results │ │ │ ├── test_BasicExperimentSuite_Town01 │ │ │ │ ├── log_201811301140 │ │ │ │ ├── summary.csv │ │ │ │ └── measurements.csv │ │ │ ├── test_BasicExperimentSuite_Town01-1 │ │ │ │ ├── log_201811301140 │ │ │ │ └── summary.csv │ │ │ └── test_BasicExperimentSuite_Town01-2 │ │ │ │ ├── log_201811301142 │ │ │ │ └── summary.csv │ │ ├── MANIFEST.in │ │ ├── requirements.txt │ │ ├── setup.py │ │ ├── driving_benchmark_example.py │ │ └── view_start_positions.py │ ├── requirements.txt │ ├── imgs │ │ ├── actor.png │ │ ├── model.png │ │ └── critic.png │ ├── __init__.py │ ├── carla_rl │ │ ├── __init__.py │ │ ├── carla_config.py │ │ └── renderer.py │ ├── carla_settings.py │ ├── processor.py │ ├── human_play.py │ ├── segment_tree.py │ ├── mysettings.ini │ └── models.py └── cnn_vae │ ├── notebooks │ ├── cvae.gif │ ├── cvae.gif.png │ ├── image_at_epoch_0000.png │ ├── image_at_epoch_0001.png │ ├── image_at_epoch_0002.png │ ├── image_at_epoch_0003.png │ ├── image_at_epoch_0004.png │ ├── image_at_epoch_0005.png │ ├── image_at_epoch_0006.png │ ├── image_at_epoch_0007.png │ ├── image_at_epoch_0008.png │ ├── image_at_epoch_0009.png │ ├── image_at_epoch_0010.png │ ├── image_at_epoch_0011.png │ ├── image_at_epoch_0012.png │ ├── image_at_epoch_0013.png │ ├── image_at_epoch_0014.png │ ├── image_at_epoch_0015.png │ ├── image_at_epoch_0016.png │ ├── image_at_epoch_0017.png │ ├── image_at_epoch_0018.png │ ├── image_at_epoch_0019.png │ ├── image_at_epoch_0020.png │ ├── image_at_epoch_0021.png │ ├── image_at_epoch_0022.png │ ├── image_at_epoch_0023.png │ ├── image_at_epoch_0024.png │ ├── image_at_epoch_0025.png │ ├── image_at_epoch_0026.png │ ├── image_at_epoch_0027.png │ ├── image_at_epoch_0028.png │ ├── image_at_epoch_0029.png │ ├── image_at_epoch_0030.png │ ├── image_at_epoch_0031.png │ ├── image_at_epoch_0032.png │ ├── image_at_epoch_0033.png │ ├── image_at_epoch_0034.png │ ├── image_at_epoch_0035.png │ ├── image_at_epoch_0036.png │ ├── image_at_epoch_0037.png │ ├── image_at_epoch_0038.png │ ├── image_at_epoch_0039.png │ ├── image_at_epoch_0040.png │ ├── image_at_epoch_0041.png │ ├── image_at_epoch_0042.png │ ├── image_at_epoch_0043.png │ ├── image_at_epoch_0044.png │ ├── image_at_epoch_0045.png │ ├── image_at_epoch_0046.png │ ├── image_at_epoch_0047.png │ ├── image_at_epoch_0048.png │ ├── image_at_epoch_0049.png │ ├── image_at_epoch_0050.png │ ├── image_at_epoch_0051.png │ ├── image_at_epoch_0052.png │ ├── image_at_epoch_0053.png │ ├── image_at_epoch_0054.png │ ├── image_at_epoch_0055.png │ ├── image_at_epoch_0056.png │ ├── image_at_epoch_0057.png │ ├── image_at_epoch_0058.png │ ├── image_at_epoch_0059.png │ ├── image_at_epoch_0060.png │ ├── image_at_epoch_0061.png │ ├── image_at_epoch_0062.png │ ├── image_at_epoch_0063.png │ ├── image_at_epoch_0064.png │ ├── image_at_epoch_0065.png │ ├── image_at_epoch_0066.png │ ├── image_at_epoch_0067.png │ ├── image_at_epoch_0068.png │ ├── image_at_epoch_0069.png │ ├── image_at_epoch_0070.png │ ├── image_at_epoch_0071.png │ ├── image_at_epoch_0072.png │ ├── image_at_epoch_0073.png │ ├── image_at_epoch_0074.png │ ├── image_at_epoch_0075.png │ ├── image_at_epoch_0076.png │ ├── image_at_epoch_0077.png │ ├── image_at_epoch_0078.png │ ├── image_at_epoch_0079.png │ ├── image_at_epoch_0080.png │ ├── image_at_epoch_0081.png │ ├── image_at_epoch_0082.png │ ├── image_at_epoch_0083.png │ ├── image_at_epoch_0084.png │ ├── image_at_epoch_0085.png │ ├── image_at_epoch_0086.png │ ├── image_at_epoch_0087.png │ ├── image_at_epoch_0088.png │ ├── image_at_epoch_0089.png │ ├── image_at_epoch_0090.png │ ├── image_at_epoch_0091.png │ ├── image_at_epoch_0092.png │ ├── image_at_epoch_0093.png │ ├── image_at_epoch_0094.png │ ├── image_at_epoch_0095.png │ ├── image_at_epoch_0096.png │ ├── image_at_epoch_0097.png │ ├── image_at_epoch_0098.png │ ├── image_at_epoch_0099.png │ ├── image_at_epoch_0100.png │ └── dataset_api.py │ ├── __init__.py │ ├── logs │ ├── events.out.tfevents.1542081438.ankurrc-ubuntu │ ├── events.out.tfevents.1542081544.ankurrc-ubuntu │ └── events.out.tfevents.1542082019.ankurrc-ubuntu │ ├── layers.py │ ├── scripts │ └── dataset_crawler.py │ ├── dataset_api.py │ ├── main.py │ └── vgg_vae.py ├── papers ├── streetlearn.pdf ├── learning_2_drive_in_ a_ day.pdf └── e2e-driving-conditional-imitation-learning.pdf ├── rl_project_report.pdf ├── RL_Project_Proposal.pdf ├── .gitignore └── README.md /code/autodrive/keras-rl/rl/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/sources/index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/rl/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/utils/gym/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/rl/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/test/acceptance_tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/core.md: -------------------------------------------------------------------------------- 1 | {{autogenerated}} 2 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla_client.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | carla 2 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla_client.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/processors.md: -------------------------------------------------------------------------------- 1 | {{autogenerated}} 2 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/common/__init__.py: -------------------------------------------------------------------------------- 1 | from .misc_util import * 2 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/utils/gym/spaces/__init__.py: -------------------------------------------------------------------------------- 1 | from .discrete import Discrete 2 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | mkdocs 3 | python-markdown-math 4 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/_benchmarks_results/test_BasicExperimentSuite_Town01/log_201811301140: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include carla/planner/*.txt 2 | include carla/planner/*.png 3 | -------------------------------------------------------------------------------- /papers/streetlearn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/papers/streetlearn.pdf -------------------------------------------------------------------------------- /rl_project_report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/rl_project_report.pdf -------------------------------------------------------------------------------- /RL_Project_Proposal.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/RL_Project_Proposal.pdf -------------------------------------------------------------------------------- /code/autodrive/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | protobuf 3 | scipy 4 | pynput 5 | pygame 6 | pypng 7 | Pillow 8 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/requirements.txt: -------------------------------------------------------------------------------- 1 | Pillow 2 | numpy 3 | protobuf 4 | pygame 5 | matplotlib 6 | future 7 | -------------------------------------------------------------------------------- /code/autodrive/imgs/actor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/imgs/actor.png -------------------------------------------------------------------------------- /code/autodrive/imgs/model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/imgs/model.png -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/driving_benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | from .driving_benchmark import run_driving_benchmark 2 | -------------------------------------------------------------------------------- /code/autodrive/imgs/critic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/imgs/critic.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/cvae.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/cvae.gif -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .forward_agent import ForwardAgent 2 | from .agent import Agent 3 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/utils/gym/envs/__init__.py: -------------------------------------------------------------------------------- 1 | from .twoRoundDeterministicRewardEnv import TwoRoundDeterministicRewardEnv 2 | -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/cvae.gif.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/cvae.gif.png -------------------------------------------------------------------------------- /papers/learning_2_drive_in_ a_ day.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/papers/learning_2_drive_in_ a_ day.pdf -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0000.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0001.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0002.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0003.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0004.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0005.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0006.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0007.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0008.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0009.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0010.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0011.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0012.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0013.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0014.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0015.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0016.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0017.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0018.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0018.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0019.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0020.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0021.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0022.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0023.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0024.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0025.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0026.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0026.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0027.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0028.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0028.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0029.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0029.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0030.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0030.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0031.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0031.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0032.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0032.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0033.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0033.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0034.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0034.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0035.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0035.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0036.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0036.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0037.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0037.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0038.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0038.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0039.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0039.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0040.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0040.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0041.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0041.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0042.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0042.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0043.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0043.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0044.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0044.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0045.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0045.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0046.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0046.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0047.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0047.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0048.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0048.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0049.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0049.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0050.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0051.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0052.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0052.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0053.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0053.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0054.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0054.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0055.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0055.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0056.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0056.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0057.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0057.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0058.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0058.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0059.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0060.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0060.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0061.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0061.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0062.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0062.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0063.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0063.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0064.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0064.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0065.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0065.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0066.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0066.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0067.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0067.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0068.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0068.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0069.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0069.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0070.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0070.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0071.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0071.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0072.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0072.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0073.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0073.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0074.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0074.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0075.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0075.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0076.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0076.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0077.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0077.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0078.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0078.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0079.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0079.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0080.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0080.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0081.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0081.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0082.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0082.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0083.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0083.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0084.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0084.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0085.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0085.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0086.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0086.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0087.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0087.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0088.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0088.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0089.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0089.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0090.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0090.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0091.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0091.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0092.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0092.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0093.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0093.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0094.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0094.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0095.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0095.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0096.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0096.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0097.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0097.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0098.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0098.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0099.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0099.png -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/image_at_epoch_0100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/notebooks/image_at_epoch_0100.png -------------------------------------------------------------------------------- /code/autodrive/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Friday November 30th 2018 3 | Last Modified: Friday November 30th 2018 7:15:06 pm 4 | Author: ankurrc 5 | ''' 6 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/carla-client/carla/planner/Town01.png -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/carla-client/carla/planner/Town02.png -------------------------------------------------------------------------------- /code/cnn_vae/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Saturday November 3rd 2018 3 | Last Modified: Saturday November 3rd 2018 10:49:51 pm 4 | Author: ankurrc 5 | ''' 6 | -------------------------------------------------------------------------------- /papers/e2e-driving-conditional-imitation-learning.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/papers/e2e-driving-conditional-imitation-learning.pdf -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town01Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/carla-client/carla/planner/Town01Lanes.png -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town02Big.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/carla-client/carla/planner/Town02Big.png -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town02Lanes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/carla-client/carla/planner/Town02Lanes.png -------------------------------------------------------------------------------- /code/autodrive/carla_rl/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Friday November 30th 2018 3 | Last Modified: Friday November 30th 2018 7:18:13 pm 4 | Author: ankurrc 5 | ''' 6 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/driving_benchmark/experiment_suites/__init__.py: -------------------------------------------------------------------------------- 1 | from .basic_experiment_suite import BasicExperimentSuite 2 | from .corl_2017 import CoRL2017 3 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town01Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/carla-client/carla/planner/Town01Central.png -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town02Central.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/autodrive/carla-client/carla/planner/Town02Central.png -------------------------------------------------------------------------------- /code/cnn_vae/logs/events.out.tfevents.1542081438.ankurrc-ubuntu: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/logs/events.out.tfevents.1542081438.ankurrc-ubuntu -------------------------------------------------------------------------------- /code/cnn_vae/logs/events.out.tfevents.1542081544.ankurrc-ubuntu: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/logs/events.out.tfevents.1542081544.ankurrc-ubuntu -------------------------------------------------------------------------------- /code/cnn_vae/logs/events.out.tfevents.1542082019.ankurrc-ubuntu: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankur-rc/autodrive_ddpg/HEAD/code/cnn_vae/logs/events.out.tfevents.1542082019.ankurrc-ubuntu -------------------------------------------------------------------------------- /code/autodrive/carla-client/_benchmarks_results/test_BasicExperimentSuite_Town01/summary.csv: -------------------------------------------------------------------------------- 1 | exp_id,rep,weather,start_point,end_point,result,initial_distance,final_distance,final_time,time_out 2 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/_benchmarks_results/test_BasicExperimentSuite_Town01-1/log_201811301140: -------------------------------------------------------------------------------- 1 | Start Task 0 2 | Start Poses (7 3 ) on weather 1 3 | Start Task 1 4 | Start Poses (138 17 ) on weather 1 5 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/_benchmarks_results/test_BasicExperimentSuite_Town01-2/log_201811301142: -------------------------------------------------------------------------------- 1 | Start Task 0 2 | Start Poses (7 3 ) on weather 1 3 | Start Task 1 4 | Start Poses (138 17 ) on weather 1 5 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/common/misc_util.py: -------------------------------------------------------------------------------- 1 | # Inspired from OpenAI Baselines 2 | 3 | import gym 4 | import numpy as np 5 | import random 6 | 7 | def set_global_seeds(i): 8 | np.random.seed(i) 9 | random.seed(i) 10 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/agents/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from .dqn import DQNAgent, NAFAgent, ContinuousDQNAgent 3 | from .ddpg import DDPGAgent 4 | from .cem import CEMAgent 5 | from .sarsa import SarsaAgent, SARSAAgent 6 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/_benchmarks_results/test_BasicExperimentSuite_Town01-1/summary.csv: -------------------------------------------------------------------------------- 1 | exp_id,rep,weather,start_point,end_point,result,initial_distance,final_distance,final_time,time_out 2 | 0,0,1,7,3,1,46.38000488282254,1.652843650117772,6.8,24.787 3 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/_benchmarks_results/test_BasicExperimentSuite_Town01-2/summary.csv: -------------------------------------------------------------------------------- 1 | exp_id,rep,weather,start_point,end_point,result,initial_distance,final_distance,final_time,time_out 2 | 0,0,1,7,3,1,46.38000488282254,1.652843650117772,6.8,24.787 3 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/_benchmarks_results/test_BasicExperimentSuite_Town01/measurements.csv: -------------------------------------------------------------------------------- 1 | exp_id,rep,weather,start_point,end_point,collision_other,collision_pedestrians,collision_vehicles,intersection_otherlane,intersection_offroad,pos_x,pos_y,steer,throttle,brake 2 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/agents/naf.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | {{autogenerated}} 6 | 7 | --- 8 | 9 | ### References 10 | - [Continuous Deep Q-Learning with Model-based Acceleration](https://arxiv.org/abs/1603.00748), Gu et al., 2016 11 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/agents/ddpg.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | {{autogenerated}} 6 | 7 | --- 8 | 9 | ### References 10 | - [Continuous control with deep reinforcement learning](https://arxiv.org/abs/1509.02971), Lillicrap et al., 2015 11 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/agents/sarsa.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | {{autogenerated}} 6 | 7 | --- 8 | 9 | ### References 10 | - [Reinforcement learning: An introduction](http://people.inf.elte.hu/lorincz/Files/RL_2006/SuttonBook.pdf), Sutton and Barto, 2011 11 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/test/unit_tests/test_data/testfile_collisions/summary.csv: -------------------------------------------------------------------------------- 1 | weather,time_out,result,final_time,end_point,final_distance,exp_id,rep,start_point,initial_distance 2 | 3,335314,0,335314,29,171.3219381575824,3,0,105,280.44944447968976 3 | 3,243.6346,0,243.6346,130,215.56398248559435,3,0,27,174.94691018267446 -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla_client.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 1.0 2 | Name: carla-client 3 | Version: 0.8.2 4 | Summary: Python API for communicating with the CARLA server. 5 | Home-page: https://github.com/carla-simulator/carla 6 | Author: The CARLA team 7 | Author-email: carla.simulator@gmail.com 8 | License: MIT License 9 | Description: UNKNOWN 10 | Platform: UNKNOWN 11 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/test/suite/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | class CarlaServerTest(object): 9 | def __init__(self, args): 10 | self.args = args 11 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/agents/cem.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | {{autogenerated}} 6 | 7 | --- 8 | 9 | ### References 10 | - [Learning Tetris Using the Noisy Cross-Entropy Method](http://www.mitpressjournals.org/doi/abs/10.1162/neco.2006.18.12.2936?journalCode=neco), Szita et al., 2006 11 | - [Deep Reinforcement Learning (MLSS lecture notes)](http://learning.mpi-sws.org/mlss2016/slides/2016-MLSS-RL.pdf), Schulman, 2016 12 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/agent/forward_agent.py: -------------------------------------------------------------------------------- 1 | 2 | from carla.agent.agent import Agent 3 | from carla.client import VehicleControl 4 | 5 | 6 | class ForwardAgent(Agent): 7 | """ 8 | Simple derivation of Agent Class, 9 | A trivial agent agent that goes straight 10 | """ 11 | def run_step(self, measurements, sensor_data, directions, target): 12 | control = VehicleControl() 13 | control.throttle = 0.9 14 | 15 | return control 16 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/sources/agents/naf.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/agents/dqn.py#L548) 6 | ### NAFAgent 7 | 8 | ```python 9 | rl.agents.dqn.NAFAgent(V_model, L_model, mu_model, random_process=None, covariance_mode='full') 10 | ``` 11 | 12 | Write me 13 | 14 | 15 | --- 16 | 17 | ### References 18 | - [Continuous Deep Q-Learning with Model-based Acceleration](https://arxiv.org/abs/1603.00748), Gu et al., 2016 19 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from setuptools import find_packages 3 | 4 | 5 | setup(name='keras-rl', 6 | version='0.4.2', 7 | description='Deep Reinforcement Learning for Keras', 8 | author='Matthias Plappert', 9 | author_email='matthiasplappert@me.com', 10 | url='https://github.com/keras-rl/keras-rl', 11 | license='MIT', 12 | install_requires=['keras>=2.0.7'], 13 | extras_require={ 14 | 'gym': ['gym'], 15 | }, 16 | packages=find_packages()) 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | <<<<<<< HEAD 2 | # do not add the datasets 3 | dataset/ 4 | 5 | .DS_Store 6 | out 7 | node_modules 8 | *.pyc 9 | **/.vscode/ 10 | **/testFiles/**/.cache/** 11 | *.noseids 12 | .vscode-test 13 | __pycache__ 14 | npm-debug.log 15 | **/.mypy_cache/** 16 | !yarn.lock 17 | coverage/ 18 | .vscode-test/** 19 | **/.venv*/ 20 | precommit.hook 21 | pythonFiles/experimental/ptvsd/** 22 | debug_coverage*/** 23 | languageServer/** 24 | languageServer.*/** 25 | bin/** 26 | obj/** 27 | .pytest_cache 28 | tmp/** 29 | .python-version 30 | .vs/ 31 | 32 | assets 33 | 34 | logs 35 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/sources/agents/sarsa.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/agents/sarsa.py#L17) 6 | ### SARSAAgent 7 | 8 | ```python 9 | rl.agents.sarsa.SARSAAgent(model, nb_actions, policy=None, test_policy=None, gamma=0.99, nb_steps_warmup=10, train_interval=1, delta_clip=inf) 10 | ``` 11 | 12 | Write me 13 | 14 | 15 | --- 16 | 17 | ### References 18 | - [Reinforcement learning: An introduction](http://people.inf.elte.hu/lorincz/Files/RL_2006/SuttonBook.pdf), Sutton and Barto, 2011 19 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | # @todo Dependencies are missing. 4 | 5 | setup( 6 | name='carla_client', 7 | version='0.8.2', 8 | packages=['carla', 'carla.driving_benchmark', 'carla.agent', 9 | 'carla.driving_benchmark.experiment_suites', 'carla.planner'], 10 | license='MIT License', 11 | description='Python API for communicating with the CARLA server.', 12 | url='https://github.com/carla-simulator/carla', 13 | author='The CARLA team', 14 | author_email='carla.simulator@gmail.com', 15 | include_package_data=True 16 | ) 17 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/agents/dqn.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | {{autogenerated}} 6 | 7 | --- 8 | 9 | ### References 10 | - [Playing Atari with Deep Reinforcement Learning](https://arxiv.org/abs/1312.5602), Mnih et al., 2013 11 | - [Human-level control through deep reinforcement learning](http://www.nature.com/nature/journal/v518/n7540/abs/nature14236.html), Mnih et al., 2015 12 | - [Deep Reinforcement Learning with Double Q-learning](http://www0.cs.ucl.ac.uk/staff/d.silver/web/Applications_files/doubledqn.pdf), van Hasselt et al., 2015 13 | - [Dueling Network Architectures for Deep Reinforcement Learning](https://arxiv.org/abs/1511.06581), Wang et al., 2016 14 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Keras-RL Documentation 2 | theme: readthedocs 3 | docs_dir: docs/sources 4 | repo_url: https://github.com/keras-rl/keras-rl 5 | site_description: 'Documentation for Keras-RL, a library for Deep Reinforcement Learning with Keras.' 6 | #markdown_extensions: [mdx_math] 7 | #extra_javascript: ['https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML'] 8 | 9 | dev_addr: '0.0.0.0:8000' 10 | 11 | pages: 12 | - Home: index.md 13 | - Core: core.md 14 | - Agents: 15 | - Overview: agents/overview.md 16 | - DQNAgent: agents/dqn.md 17 | - NAFAgent: agents/naf.md 18 | - DDPGAgent: agents/ddpg.md 19 | - SARSAAgent: agents/sarsa.md 20 | - CEMAgent: agents/cem.md 21 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/sources/agents/ddpg.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/agents/ddpg.py#L22) 6 | ### DDPGAgent 7 | 8 | ```python 9 | rl.agents.ddpg.DDPGAgent(nb_actions, actor, critic, critic_action_input, memory, gamma=0.99, batch_size=32, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000, train_interval=1, memory_interval=1, delta_range=None, delta_clip=inf, random_process=None, custom_model_objects={}, target_model_update=0.001) 10 | ``` 11 | 12 | Write me 13 | 14 | 15 | --- 16 | 17 | ### References 18 | - [Continuous control with deep reinforcement learning](https://arxiv.org/abs/1509.02971), Lillicrap et al., 2015 19 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/rl/util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | 4 | from rl.core import Env 5 | 6 | 7 | class MultiInputTestEnv(Env): 8 | def __init__(self, observation_shape): 9 | self.observation_shape = observation_shape 10 | 11 | def step(self, action): 12 | return self._get_obs(), random.choice([0, 1]), random.choice([True, False]), {} 13 | 14 | def reset(self): 15 | return self._get_obs() 16 | 17 | def _get_obs(self): 18 | if type(self.observation_shape) is list: 19 | return [np.random.random(s) for s in self.observation_shape] 20 | else: 21 | return np.random.random(self.observation_shape) 22 | 23 | def __del__(self): 24 | pass 25 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town02.txt: -------------------------------------------------------------------------------- 1 | 5.4400,-107.48000,-0.22000000 2 | 0.000000,0.000000,0.000000 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 25, 25 6 | 0,10 0,24 14 7 | 0,24 0,10 14 8 | 24,24 6,24 18 9 | 6,24 24,24 18 10 | 24,0 24,10 10 11 | 24,10 24,0 10 12 | 0,0 24,0 24 13 | 24,0 0,0 24 14 | 0,10 0,0 10 15 | 0,0 0,10 10 16 | 24,10 24,16 6 17 | 24,16 24,10 6 18 | 0,10 6,10 6 19 | 6,10 0,10 6 20 | 6,24 0,24 6 21 | 0,24 6,24 6 22 | 6,10 17,10 11 23 | 17,10 6,10 11 24 | 6,24 6,16 8 25 | 6,16 6,24 8 26 | 24,16 24,24 8 27 | 24,24 24,16 8 28 | 6,16 6,10 6 29 | 6,10 6,16 6 30 | 24,16 17,16 7 31 | 17,16 24,16 7 32 | 17,16 6,16 11 33 | 6,16 17,16 11 34 | 17,10 24,10 7 35 | 24,10 17,10 7 36 | 17,16 17,10 6 37 | 17,10 17,16 6 38 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/common/cmd_util.py: -------------------------------------------------------------------------------- 1 | # Inspired from OpenAI Baselines 2 | 3 | import gym 4 | from rl.common.vec_env.subproc_env_vec import SubprocVecEnv 5 | from rl.common import set_global_seeds 6 | 7 | def make_gym_env(env_id, num_env=2, seed=123, wrapper_kwargs=None, start_index=0): 8 | """ 9 | Create a wrapped, SubprocVecEnv for Gym Environments. 10 | """ 11 | if wrapper_kwargs is None: wrapper_kwargs = {} 12 | def make_env(rank): # pylint: disable=C0111 13 | def _thunk(): 14 | env = gym.make(env_id) 15 | env.seed(seed + rank) 16 | return env 17 | return _thunk 18 | set_global_seeds(seed) 19 | return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)]) 20 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/sources/agents/cem.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/agents/cem.py#L12) 6 | ### CEMAgent 7 | 8 | ```python 9 | rl.agents.cem.CEMAgent(model, nb_actions, memory, batch_size=50, nb_steps_warmup=1000, train_interval=50, elite_frac=0.05, memory_interval=1, theta_init=None, noise_decay_const=0.0, noise_ampl=0.0) 10 | ``` 11 | 12 | Write me 13 | 14 | 15 | --- 16 | 17 | ### References 18 | - [Learning Tetris Using the Noisy Cross-Entropy Method](http://www.mitpressjournals.org/doi/abs/10.1162/neco.2006.18.12.2936?journalCode=neco), Szita et al., 2006 19 | - [Deep Reinforcement Learning (MLSS lecture notes)](http://learning.mpi-sws.org/mlss2016/slides/2016-MLSS-RL.pdf), Schulman, 2016 20 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/utils/gym/prng.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | np_random = numpy.random.RandomState() 4 | 5 | 6 | def seed(seed=None): 7 | """Seed the common numpy.random.RandomState used in spaces 8 | 9 | CF 10 | https://github.com/openai/gym/commit/58e6aa95e5af2c738557431f812abb81c505a7cf#commitcomment-17669277 11 | for some details about why we seed the spaces separately from the 12 | envs, but tl;dr is that it's pretty uncommon for them to be used 13 | within an actual algorithm, and the code becomes simpler to just 14 | use this common numpy.random.RandomState. 15 | """ 16 | np_random.seed(seed) 17 | 18 | # This numpy.random.RandomState gets used in all spaces for their 19 | # 'sample' method. It's not really expected that people will be using 20 | # these in their algorithms. 21 | seed(0) 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Autonomous Driving using Deep Deterministic Policy Gradients. 2 | Based on [Kendall, et. al. 2018](1). 3 | 4 | [1] Alex Kendall, Jeffrey Hawke, David Janz, Przemyslaw Mazur, Daniele Reda, John-Mark Allen, Vinh-Dieu Lam, Alex Bewley: “Learning to Drive in a Day”, 2018; [http://arxiv.org/abs/1807.00412 arXiv:1807.00412]. 5 | 6 | ## Installation 7 | 1. Navigate to __code/autodrive__ 8 | 2. Run 9 | ``` pip install -r requirements.txt ``` 10 | 3. Navigate to __code/autodrive/keras-rl__ 11 | 4. Run 12 | ```pip install .``` 13 | 5. Navigate to __code/autodrive/carla-client__ 14 | 6. Run ```pip install .``` 15 | 16 | ## Set Carla environment variable 17 | 1.```$ export CARLA_ROOT=path/to/carla/directory``` 18 | 2. Copy __mysettings.ini__ to CARLA_ROOT directory 19 | 20 | ## Run experiment 21 | ``` 22 | python run_experiment.py 23 | ``` 24 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/agent/agent.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 3 | # Barcelona (UAB). 4 | # 5 | # This work is licensed under the terms of the MIT license. 6 | # For a copy, see . 7 | # @author: german,felipecode 8 | 9 | 10 | from __future__ import print_function 11 | import abc 12 | 13 | 14 | class Agent(object): 15 | def __init__(self): 16 | self.__metaclass__ = abc.ABCMeta 17 | 18 | @abc.abstractmethod 19 | def run_step(self, measurements, sensor_data, directions, target): 20 | """ 21 | Function to be redefined by an agent. 22 | :param The measurements like speed, the image data and a target 23 | :returns A carla Control object, with the steering/gas/brake for the agent 24 | """ 25 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/common/tile_images.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def tile_images(img_nhwc): 4 | """ 5 | Tile N images into one big PxQ image 6 | (P,Q) are chosen to be as close as possible, and if N 7 | is square, then P=Q. 8 | input: img_nhwc, list or array of images, ndim=4 once turned into array 9 | n = batch index, h = height, w = width, c = channel 10 | returns: 11 | bigim_HWc, ndarray with ndim=3 12 | """ 13 | img_nhwc = np.asarray(img_nhwc) 14 | N, h, w, c = img_nhwc.shape 15 | H = int(np.ceil(np.sqrt(N))) 16 | W = int(np.ceil(float(N)/H)) 17 | img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) 18 | img_HWhwc = img_nhwc.reshape(H, W, h, w, c) 19 | img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) 20 | img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) 21 | return img_Hh_Ww_c 22 | -------------------------------------------------------------------------------- /code/cnn_vae/layers.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Sunday November 11th 2018 3 | Last Modified: Sunday November 11th 2018 10:39:47 pm 4 | Author: ankurrc 5 | ''' 6 | import tensorflow as tf 7 | 8 | 9 | def UpSampling2D_NN(stride, **kwargs): 10 | def layer(x): 11 | input_shape = tf.keras.backend.int_shape(x) 12 | output_shape = (stride * input_shape[1], stride * input_shape[2]) 13 | return tf.image.resize_nearest_neighbor(x, output_shape, align_corners=True) 14 | return tf.keras.layers.Lambda(layer, **kwargs) 15 | 16 | 17 | def UpSampling2D_Bilinear(stride, **kwargs): 18 | def layer(x): 19 | input_shape = tf.keras.backend.int_shape(x) 20 | output_shape = (stride * input_shape[1], stride * input_shape[2]) 21 | return tf.image.resize_bilinear(x, output_shape, align_corners=True) 22 | return tf.keras.layers.Lambda(layer, **kwargs) 23 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/Town01.txt: -------------------------------------------------------------------------------- 1 | 0.0,0.0,-0.3811000000 2 | 0.000000,0.000000,0.0 3 | 1.000000,1.000000,1.000000 4 | -16.43022,-16.43022,0.000 5 | 49, 41 6 | 0,0 0,40 40 7 | 0,40 0,0 40 8 | 48,40 41,40 7 9 | 41,40 48,40 7 10 | 48,0 48,40 40 11 | 48,40 48,0 40 12 | 0,0 11,0 11 13 | 11,0 0,0 11 14 | 41,0 48,0 7 15 | 48,0 41,0 7 16 | 41,40 11,40 30 17 | 11,40 41,40 30 18 | 41,0 41,7 7 19 | 41,7 41,0 7 20 | 11,40 0,40 11 21 | 0,40 11,40 11 22 | 11,0 19,0 8 23 | 19,0 11,0 8 24 | 11,40 11,24 16 25 | 11,24 11,40 16 26 | 41,24 41,40 16 27 | 41,40 41,24 16 28 | 11,24 11,16 8 29 | 11,16 11,24 8 30 | 41,24 11,24 30 31 | 11,24 41,24 30 32 | 41,16 41,24 8 33 | 41,24 41,16 8 34 | 11,16 11,7 9 35 | 11,7 11,16 9 36 | 41,16 11,16 30 37 | 11,16 41,16 30 38 | 41,7 41,16 9 39 | 41,16 41,7 9 40 | 11,7 11,0 7 41 | 11,0 11,7 7 42 | 41,7 19,7 22 43 | 19,7 41,7 22 44 | 19,0 41,0 22 45 | 41,0 19,0 22 46 | 19,7 11,7 8 47 | 11,7 19,7 8 48 | 19,0 19,7 7 49 | 19,7 19,0 7 50 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/test/unit_tests/test_experiment_suite.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from carla.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite 3 | 4 | from carla.driving_benchmark.experiment_suites.basic_experiment_suite import BasicExperimentSuite 5 | 6 | from carla.driving_benchmark.experiment_suites.corl_2017 import CoRL2017 7 | 8 | class testExperimentSuite(unittest.TestCase): 9 | 10 | 11 | def test_init(self): 12 | 13 | base_class = ExperimentSuite('Town01') 14 | subclasses_instanciate = [obj('Town01') for obj in ExperimentSuite.__subclasses__()] 15 | 16 | 17 | def test_properties(self): 18 | 19 | all_classes = [obj('Town01') for obj in ExperimentSuite.__subclasses__()] 20 | print (all_classes) 21 | for exp_suite in all_classes: 22 | print(exp_suite.__class__) 23 | print(exp_suite.dynamic_tasks) 24 | print(exp_suite.weathers) 25 | 26 | 27 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Please make sure that the boxes below are checked before you submit your issue. If your issue is an implementation question, please ask your question in the [Keras-RL Google group](https://groups.google.com/forum/#!forum/keras-rl-users) or [join the Keras-RL Gitter channel](https://gitter.im/keras-rl/Lobby) and ask there instead of filing a GitHub issue. 2 | 3 | Thank you! 4 | 5 | - [ ] Check that you are up-to-date with the master branch of Keras-RL. You can update with: 6 | `pip install git+git://github.com/keras-rl/keras-rl.git --upgrade --no-deps` 7 | 8 | - [ ] Check that you are up-to-date with the master branch of Keras. You can update with: 9 | `pip install git+git://github.com/fchollet/keras.git --upgrade --no-deps` 10 | 11 | - [ ] Provide a link to a GitHub Gist of a Python script that can reproduce your issue (or just copy the script here if it is short). If you report an error, please include the error message and the backtrace. 12 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/sources/agents/dqn.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | 3 | --- 4 | 5 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/agents/dqn.py#L89) 6 | ### DQNAgent 7 | 8 | ```python 9 | rl.agents.dqn.DQNAgent(model, policy=None, test_policy=None, enable_double_dqn=True, enable_dueling_network=False, dueling_type='avg') 10 | ``` 11 | 12 | Write me 13 | 14 | 15 | --- 16 | 17 | ### References 18 | - [Playing Atari with Deep Reinforcement Learning](https://arxiv.org/abs/1312.5602), Mnih et al., 2013 19 | - [Human-level control through deep reinforcement learning](http://www.nature.com/nature/journal/v518/n7540/abs/nature14236.html), Mnih et al., 2015 20 | - [Deep Reinforcement Learning with Double Q-learning](http://www0.cs.ucl.ac.uk/staff/d.silver/web/Applications_files/doubledqn.pdf), van Hasselt et al., 2015 21 | - [Dueling Network Architectures for Deep Reinforcement Learning](https://arxiv.org/abs/1511.06581), Wang et al., 2016 22 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/utils/gym/spaces/discrete.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import gym 4 | from .. import prng 5 | 6 | 7 | class Discrete(gym.Space): 8 | """ 9 | {0,1,...,n-1} 10 | 11 | Example usage: 12 | self.observation_space = spaces.Discrete(2) 13 | """ 14 | def __init__(self, n): 15 | self.n = n 16 | 17 | def sample(self): 18 | return prng.np_random.randint(self.n) 19 | 20 | def contains(self, x): 21 | if isinstance(x, int): 22 | as_int = x 23 | elif isinstance(x, (np.generic, np.ndarray)) and (x.dtype.kind in np.typecodes['AllInteger'] and x.shape == ()): 24 | as_int = int(x) 25 | else: 26 | return False 27 | return as_int >= 0 and as_int < self.n 28 | 29 | @property 30 | def shape(self): 31 | return (self.n,) 32 | 33 | def __repr__(self): 34 | return "Discrete(%d)" % self.n 35 | 36 | def __eq__(self, other): 37 | return self.n == other.n 38 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/utils/gym/envs/twoRoundDeterministicRewardEnv.py: -------------------------------------------------------------------------------- 1 | import gym 2 | from ..spaces import Discrete 3 | 4 | 5 | class TwoRoundDeterministicRewardEnv(gym.Env): 6 | def __init__(self): 7 | self.action_space = Discrete(2) 8 | self.observation_space = Discrete(3) 9 | self.reset() 10 | 11 | def step(self, action): 12 | rewards = [[0, 3], [1, 2]] 13 | 14 | assert self.action_space.contains(action) 15 | 16 | if self.firstAction is None: 17 | self.firstAction = action 18 | reward = 0 19 | done = False 20 | else: 21 | reward = rewards[self.firstAction][action] 22 | done = True 23 | 24 | return self.get_obs(), reward, done, {} 25 | 26 | def get_obs(self): 27 | if self.firstAction is None: 28 | return 2 29 | else: 30 | return self.firstAction 31 | 32 | def reset(self): 33 | self.firstAction = None 34 | return self.get_obs() 35 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Keras-RL 2 | 3 | New contributors are very welcomed! If you're interested, please post a message on the [Gitter](https://gitter.im/keras-rl/Lobby). 4 | 5 | Here is a list of ways you can contribute to this repository: 6 | - Tackle an open issue on [Github](https://github.com/keras-rl/keras-rl/issues) 7 | - Improve documentation 8 | - Improve test coverage 9 | - Add examples 10 | - Implement new algorithms on Keras-RL (please get in touch on Gitter) 11 | - Link to your personal projects built on top of Keras-RL 12 | 13 | 14 | ## How to run the tests 15 | 16 | To run the tests locally, you'll first have to install the following dependencies: 17 | ```bash 18 | pip install pytest pytest-xdist pep8 pytest-pep8 pytest-cov python-coveralls 19 | ``` 20 | You can then run all tests using this command: 21 | ```bash 22 | py.test tests/. 23 | ``` 24 | If you want to check if the files conform to the PEP8 style guidelines, run the following command: 25 | ```bash 26 | py.test --pep8 27 | ``` 28 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/templates/agents/overview.md: -------------------------------------------------------------------------------- 1 | ## Available Agents 2 | 3 | | Name | Implementation | Observation Space | Action Space | 4 | | ---------------------- |------------------------| -------------------| ---------------| 5 | | [DQN](/agents/dqn) | `rl.agents.DQNAgent` | discrete or continuous | discrete | 6 | | [DDPG](/agents/ddpg) | `rl.agents.DDPGAgent` | discrete or continuous | continuous | 7 | | [NAF](/agents/naf) | `rl.agents.NAFAgent` | discrete or continuous | continuous | 8 | | [CEM](/agents/cem) | `rl.agents.CEMAgent` | discrete or continuous | discrete | 9 | | [SARSA](/agents/sarsa) | `rl.agents.SARSAAgent` | discrete or continuous | discrete | 10 | 11 | --- 12 | 13 | ## Common API 14 | 15 | All agents share a common API. This allows you to easily switch between different agents. 16 | That being said, keep in mind that some agents make assumptions regarding the action space, i.e. assume discrete 17 | or continuous actions. 18 | 19 | {{autogenerated}} 20 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Matthias Plappert 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/.gitignore: -------------------------------------------------------------------------------- 1 | # OS X 2 | .DS_Store 3 | docs/site/* 4 | 5 | # Ubuntu 6 | *~ 7 | 8 | # PyCharm 9 | .idea 10 | 11 | # Byte-compiled / optimized / DLL files 12 | __pycache__/ 13 | *.py[cod] 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Distribution / packaging 19 | .Python 20 | env/ 21 | build/ 22 | download/ 23 | bin/ 24 | develop-eggs/ 25 | dist/ 26 | downloads/ 27 | eggs/ 28 | .eggs/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | include/ 33 | lib/ 34 | man/ 35 | local/ 36 | var/ 37 | share/ 38 | pip-selfcheck.json 39 | *.egg-info/ 40 | .installed.cfg 41 | *.egg 42 | 43 | # PyInstaller 44 | # Usually these files are written by a python script from a template 45 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 46 | *.manifest 47 | *.spec 48 | 49 | # Installer logs 50 | pip-log.txt 51 | pip-delete-this-directory.txt 52 | 53 | # Unit test / coverage reports 54 | htmlcov/ 55 | .tox/ 56 | .coverage 57 | .coverage.* 58 | .cache 59 | nosetests.xml 60 | coverage.xml 61 | *,cover 62 | .pytest_cache/ 63 | 64 | # Translations 65 | *.mo 66 | *.pot 67 | 68 | # Django stuff: 69 | *.log 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/pytest.ini: -------------------------------------------------------------------------------- 1 | # Configuration of py.test 2 | [pytest] 3 | addopts=-v 4 | -n 2 5 | --durations=10 6 | 7 | # Do not run tests in the build folder or in the virtualenv folder `venv`. 8 | norecursedirs=build venv 9 | 10 | # PEP-8 The following are ignored: 11 | # E251 unexpected spaces around keyword / parameter equals 12 | # E225 missing whitespace around operator 13 | # E226 missing whitespace around arithmetic operator 14 | # W291 trailing whitespace 15 | # W293 blank line contains whitespace 16 | # E501 line too long (82 > 79 characters) 17 | # E402 module level import not at top of file - temporary measure to coninue adding ros python packaged in sys.path 18 | # E731 do not assign a lambda expression, use a def 19 | # E302 two blank lines between the functions 20 | # E231 missing whitespace after , 21 | # E241 multiple spaces after ',' 22 | # E261 at least two spaces before inline comment 23 | 24 | 25 | pep8ignore=* E251 \ 26 | * E225 \ 27 | * E226 \ 28 | * W291 \ 29 | * W293 \ 30 | * E501 \ 31 | * E402 \ 32 | * E731 \ 33 | * E302 \ 34 | * E231 \ 35 | * E241 \ 36 | * E261 37 | -------------------------------------------------------------------------------- /code/autodrive/carla_rl/carla_config.py: -------------------------------------------------------------------------------- 1 | # Resolutions 2 | 3 | server_height = 360 4 | server_width = 360 5 | render_height = 84 6 | render_width = 84 7 | 8 | # 96 * 8/3 = 256 9 | # 3/8 = 0.375 10 | 11 | scale_factor = 0.375 12 | 13 | ''' 14 | Segmentation Classes: 15 | 0 None 16 | 1 Buildings 17 | 2 Fences 18 | 3 Other 19 | 4 Pedestrians 20 | 5 Poles 21 | 6 RoadLines 22 | 7 Roads 23 | 8 Sidewalks 24 | 9 Vegetation 25 | 10 Vehicles 26 | 11 Walls 27 | 12 TrafficSigns 28 | 29 | Source: https://carla.readthedocs.io/en/latest/cameras_and_sensors/ 30 | ''' 31 | no_of_classes = 13 32 | class_grouping = [4, 5, 12, 6, 7, 8, 10, (0, 1, 2, 3, 9, 11)] 33 | 34 | colors_segment = [[0, 0, 0], # Black None 35 | [100, 100, 255], # X-colored Buildings 36 | [246, 0, 255], # Dark pink Fences 37 | [0, 255, 251], # Cyan Other stuffs 38 | [255, 0, 0], # Red Pedestrians 39 | [255, 255, 255], # White Poles 40 | [255, 142, 255], # Pink Roadlines 41 | [119, 120, 118], # Gray Roads 42 | [255, 153, 0], # Orange Sidewalks 43 | [30, 0, 255], # Dark blue Vegetation 44 | [0, 255, 34], # Light Green Vehicles 45 | [0, 149, 255], # Light blue Walls 46 | [255, 251, 0], # Yellow TrafficSigns 47 | ] 48 | 49 | assert(len(colors_segment) == no_of_classes) 50 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/rl/agents/test_cem.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import absolute_import 3 | 4 | import pytest 5 | import numpy as np 6 | from numpy.testing import assert_allclose 7 | 8 | from keras.models import Model, Sequential 9 | from keras.layers import Input, Dense, Flatten, Concatenate 10 | 11 | from rl.agents.cem import CEMAgent 12 | from rl.memory import EpisodeParameterMemory 13 | from rl.processors import MultiInputProcessor 14 | 15 | from ..util import MultiInputTestEnv 16 | 17 | 18 | def test_single_cem_input(): 19 | model = Sequential() 20 | model.add(Flatten(input_shape=(2, 3))) 21 | model.add(Dense(2)) 22 | 23 | memory = EpisodeParameterMemory(limit=10, window_length=2) 24 | agent = CEMAgent(model, memory=memory, nb_actions=2, nb_steps_warmup=5, batch_size=4, train_interval=50) 25 | agent.compile() 26 | agent.fit(MultiInputTestEnv((3,)), nb_steps=100) 27 | 28 | 29 | def test_multi_cem_input(): 30 | input1 = Input(shape=(2, 3)) 31 | input2 = Input(shape=(2, 4)) 32 | x = Concatenate()([input1, input2]) 33 | x = Flatten()(x) 34 | x = Dense(2)(x) 35 | model = Model(inputs=[input1, input2], outputs=x) 36 | 37 | memory = EpisodeParameterMemory(limit=10, window_length=2) 38 | processor = MultiInputProcessor(nb_inputs=2) 39 | agent = CEMAgent(model, memory=memory, nb_actions=2, nb_steps_warmup=5, batch_size=4, 40 | processor=processor, train_interval=50) 41 | agent.compile() 42 | agent.fit(MultiInputTestEnv([(3,), (4,)]), nb_steps=100) 43 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/driving_benchmark/experiment.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | from carla.settings import CarlaSettings 8 | 9 | 10 | class Experiment(object): 11 | """ 12 | Experiment defines a certain task, under conditions 13 | A task is associated with a set of poses, containing start and end pose. 14 | 15 | Conditions are associated with a carla Settings and describe the following: 16 | 17 | Number Of Vehicles 18 | Number Of Pedestrians 19 | Weather 20 | Random Seed of the agents, describing their behaviour. 21 | 22 | """ 23 | 24 | def __init__(self): 25 | self.Task = 0 26 | self.Conditions = CarlaSettings() 27 | self.Poses = [[]] 28 | self.Repetitions = 1 29 | 30 | def set(self, **kwargs): 31 | for key, value in kwargs.items(): 32 | if not hasattr(self, key): 33 | raise ValueError('Experiment: no key named %r' % key) 34 | setattr(self, key, value) 35 | 36 | if self.Repetitions != 1: 37 | raise NotImplementedError() 38 | 39 | @property 40 | def task(self): 41 | return self.Task 42 | 43 | @property 44 | def conditions(self): 45 | return self.Conditions 46 | 47 | @property 48 | def poses(self): 49 | return self.Poses 50 | 51 | @property 52 | def repetitions(self): 53 | return self.Repetitions 54 | -------------------------------------------------------------------------------- /code/cnn_vae/notebooks/dataset_api.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Saturday November 3rd 2018 3 | Last Modified: Saturday November 3rd 2018 9:18:59 pm 4 | Author: ankurrc 5 | ''' 6 | import tensorflow as tf 7 | 8 | 9 | class AutoencoderDataset(object): 10 | 11 | def __init__(self, files, size, batch_size, epochs): 12 | """ 13 | Class Instantiate 14 | :param files type tensor(tf.string): files that are part of the dataset 15 | :param size type 2-tuple: (width, height) in pixels 16 | :param batch_size type int: the batch_size 17 | :param epochs type int: number of epochs 18 | """ 19 | self.size = size 20 | self.dataset = tf.data.Dataset.from_tensor_slices(files) 21 | self.dataset = self.dataset.map( 22 | self._parse_function, num_parallel_calls=4) 23 | self.dataset = self.dataset.batch(batch_size) 24 | self.dataset = self.dataset.shuffle(1000) 25 | self.dataset = self.dataset.repeat(epochs) 26 | self.iterator = self.dataset.make_initializable_iterator() 27 | 28 | def _parse_function(self, filename): 29 | """ 30 | Function to load image from file and convert to tensor 31 | :param filename type str: path to image 32 | :rtype rank-3 tensor 33 | """ 34 | image_string = tf.read_file(filename) 35 | image_decoded = tf.image.decode_jpeg(image_string) 36 | image_resized = tf.image.resize_images(image_decoded, self.size) 37 | image_resized = tf.image.convert_image_dtype(image_resized, tf.float32) 38 | return image_resized 39 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla_client.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | MANIFEST.in 2 | setup.py 3 | carla/__init__.py 4 | carla/carla_server_pb2.py 5 | carla/client.py 6 | carla/image_converter.py 7 | carla/sensor.py 8 | carla/settings.py 9 | carla/tcp.py 10 | carla/transform.py 11 | carla/util.py 12 | carla/agent/__init__.py 13 | carla/agent/agent.py 14 | carla/agent/forward_agent.py 15 | carla/driving_benchmark/__init__.py 16 | carla/driving_benchmark/driving_benchmark.py 17 | carla/driving_benchmark/experiment.py 18 | carla/driving_benchmark/metrics.py 19 | carla/driving_benchmark/recording.py 20 | carla/driving_benchmark/results_printer.py 21 | carla/driving_benchmark/experiment_suites/__init__.py 22 | carla/driving_benchmark/experiment_suites/basic_experiment_suite.py 23 | carla/driving_benchmark/experiment_suites/corl_2017.py 24 | carla/driving_benchmark/experiment_suites/experiment_suite.py 25 | carla/planner/Town01.png 26 | carla/planner/Town01.txt 27 | carla/planner/Town01Central.png 28 | carla/planner/Town01Lanes.png 29 | carla/planner/Town02.png 30 | carla/planner/Town02.txt 31 | carla/planner/Town02Big.png 32 | carla/planner/Town02Central.png 33 | carla/planner/Town02Lanes.png 34 | carla/planner/__init__.py 35 | carla/planner/astar.py 36 | carla/planner/city_track.py 37 | carla/planner/converter.py 38 | carla/planner/graph.py 39 | carla/planner/grid.py 40 | carla/planner/map.py 41 | carla/planner/planner.py 42 | carla_client.egg-info/PKG-INFO 43 | carla_client.egg-info/SOURCES.txt 44 | carla_client.egg-info/dependency_links.txt 45 | carla_client.egg-info/top_level.txt 46 | test/test_client.py 47 | test/test_repeatability.py 48 | test/test_suite.py -------------------------------------------------------------------------------- /code/autodrive/keras-rl/examples/sarsa_cartpole.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import gym 3 | 4 | from keras.models import Sequential 5 | from keras.layers import Dense, Activation, Flatten 6 | from keras.optimizers import Adam 7 | 8 | from rl.agents import SARSAAgent 9 | from rl.policy import BoltzmannQPolicy 10 | 11 | 12 | ENV_NAME = 'CartPole-v0' 13 | 14 | # Get the environment and extract the number of actions. 15 | env = gym.make(ENV_NAME) 16 | np.random.seed(123) 17 | env.seed(123) 18 | nb_actions = env.action_space.n 19 | 20 | # Next, we build a very simple model. 21 | model = Sequential() 22 | model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 23 | model.add(Dense(16)) 24 | model.add(Activation('relu')) 25 | model.add(Dense(16)) 26 | model.add(Activation('relu')) 27 | model.add(Dense(16)) 28 | model.add(Activation('relu')) 29 | model.add(Dense(nb_actions)) 30 | model.add(Activation('linear')) 31 | print(model.summary()) 32 | 33 | # SARSA does not require a memory. 34 | policy = BoltzmannQPolicy() 35 | sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10, policy=policy) 36 | sarsa.compile(Adam(lr=1e-3), metrics=['mae']) 37 | 38 | # Okay, now it's time to learn something! We visualize the training here for show, but this 39 | # slows down training quite a lot. You can always safely abort the training prematurely using 40 | # Ctrl + C. 41 | sarsa.fit(env, nb_steps=50000, visualize=False, verbose=2) 42 | 43 | # After training is done, we save the final weights. 44 | sarsa.save_weights('sarsa_{}_weights.h5f'.format(ENV_NAME), overwrite=True) 45 | 46 | # Finally, evaluate our algorithm for 5 episodes. 47 | sarsa.test(env, nb_episodes=5, visualize=True) 48 | -------------------------------------------------------------------------------- /code/cnn_vae/scripts/dataset_crawler.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Saturday November 3rd 2018 3 | Last Modified: Saturday November 3rd 2018 8:53:52 pm 4 | Author: ankurrc 5 | ''' 6 | 7 | import argparse 8 | import os 9 | import logging 10 | 11 | 12 | def main(args): 13 | logging.basicConfig(level=logging.INFO) 14 | logger = logging.getLogger(__name__) 15 | 16 | root = args.root 17 | output = args.output 18 | extensions = args.extensions 19 | op_file = "files.txt" 20 | 21 | root = os.path.abspath(root) 22 | if not os.path.exists(root): 23 | logger.error("Root folder does not exist!") 24 | raise FileNotFoundError 25 | 26 | output = os.path.join(os.path.abspath(output), op_file) 27 | logger.info("Writing to {}..".format(output)) 28 | 29 | with open(output, "w") as op: 30 | for root, _, files in os.walk(root): 31 | logger.info("Accessing.. {}".format(root)) 32 | for fil in files: 33 | if os.path.splitext(fil)[1] in extensions: 34 | op.write(os.path.join(root, fil)+"\n") 35 | 36 | 37 | if __name__ == '__main__': 38 | parser = argparse.ArgumentParser( 39 | description="Crawls the directory structure to accumulate file paths.") 40 | parser.add_argument("root", metavar="ROOT", 41 | help="Root directory of your dataset.") 42 | parser.add_argument("output", metavar="OUTPUT", 43 | help="Output folder directory. Save file will be 'files.txt'") 44 | parser.add_argument( 45 | "--extensions", metavar="EXT", nargs="+", help="Filename extensions to be considered (default=.jpg)", default=[".jpg"]) 46 | 47 | args = parser.parse_args() 48 | main(args) 49 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/examples/visualize_log.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | 4 | import matplotlib.pyplot as plt 5 | 6 | 7 | def visualize_log(filename, figsize=None, output=None): 8 | with open(filename, 'r') as f: 9 | data = json.load(f) 10 | if 'episode' not in data: 11 | raise ValueError('Log file "{}" does not contain the "episode" key.'.format(filename)) 12 | episodes = data['episode'] 13 | 14 | # Get value keys. The x axis is shared and is the number of episodes. 15 | keys = sorted(list(set(data.keys()).difference(set(['episode'])))) 16 | 17 | if figsize is None: 18 | figsize = (15., 5. * len(keys)) 19 | f, axarr = plt.subplots(len(keys), sharex=True, figsize=figsize) 20 | for idx, key in enumerate(keys): 21 | axarr[idx].plot(episodes, data[key]) 22 | axarr[idx].set_ylabel(key) 23 | plt.xlabel('episodes') 24 | plt.tight_layout() 25 | if output is None: 26 | plt.show() 27 | else: 28 | plt.savefig(output) 29 | 30 | 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('filename', type=str, help='The filename of the JSON log generated during training.') 33 | parser.add_argument('--output', type=str, default=None, help='The output file. If not specified, the log will only be displayed.') 34 | parser.add_argument('--figsize', nargs=2, type=float, default=None, help='The size of the figure in `width height` format specified in points.') 35 | args = parser.parse_args() 36 | 37 | # You can use visualize_log to easily view the stats that were recorded during training. Simply 38 | # provide the filename of the `FileLogger` that was used in `FileLogger`. 39 | visualize_log(args.filename, output=args.output, figsize=args.figsize) 40 | -------------------------------------------------------------------------------- /code/autodrive/carla_settings.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Saturday December 1st 2018 3 | Last Modified: Saturday December 1st 2018 3:10:27 pm 4 | Author: ankurrc 5 | ''' 6 | 7 | import random 8 | 9 | from carla.settings import CarlaSettings 10 | from carla.sensor import Camera 11 | 12 | from carla_rl import carla_config 13 | 14 | 15 | def get_carla_settings(settings_file=None): 16 | 17 | if settings_file is None: 18 | 19 | # Create a CarlaSettings object. This object is a wrapper around 20 | # the CarlaSettings.ini file. Here we set the configuration we 21 | # want for the new episode. 22 | settings = CarlaSettings() 23 | settings.set( 24 | SynchronousMode=True, 25 | SendNonPlayerAgentsInfo=False, 26 | NumberOfVehicles=0, 27 | NumberOfPedestrians=0, 28 | # 8-14 are sunset; we want easy first 29 | WeatherId=random.choice(range(0, 11)), 30 | QualityLevel='Epic' 31 | ) 32 | settings.randomize_seeds() 33 | 34 | # Now we want to add a couple of cameras to the player vehicle. 35 | # We will collect the images produced by these cameras every 36 | # frame. 37 | 38 | # The default camera captures RGB images of the scene. 39 | camera0 = Camera('CameraRGB') 40 | # Set image resolution in pixels. 41 | camera0.set_image_size(carla_config.render_width, 42 | carla_config.render_height) 43 | # Set its position relative to the car in meters. 44 | camera0.set_position(0.30, 0, 1.30) 45 | settings.add_sensor(camera0) 46 | 47 | else: 48 | 49 | # Alternatively, we can load these settings from a file. 50 | with open(settings_file, 'r') as fp: 51 | settings = fp.read() 52 | 53 | return settings 54 | -------------------------------------------------------------------------------- /code/autodrive/processor.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Sunday December 2nd 2018 3 | Last Modified: Sunday December 2nd 2018 6:17:51 pm 4 | Author: ankurrc 5 | ''' 6 | import numpy as np 7 | 8 | from rl.core import Processor 9 | from carla.image_converter import to_rgb_array 10 | 11 | 12 | class MultiInputProcessor(Processor): 13 | 14 | def __init__(self, window_length=None, nb_inputs=None): 15 | self.window_length = window_length 16 | self.nb_inputs = nb_inputs 17 | 18 | # def process_observation(self, observation): 19 | # # convert image to numoy array and normalise 20 | # observation[1] = to_rgb_array(observation[1]) 21 | # observation[1] /= 255. 22 | 23 | # print("obsv. min and max:", np.min( 24 | # observation[1]), np.max(observation[1])) 25 | 26 | # return observation 27 | 28 | def process_state_batch(self, batch): 29 | # creates [[], []]; after 1:- [[o_0_0, o_1_0], [o_0_1, o_1_1]]; after 2:- [[o_0_0, o_1_0, o_2_0], [o_0_1, o_1_1, o_2_1]]... 30 | input_batches = [[] for x in range(self.nb_inputs)] 31 | 32 | # [1, 4, 2] -> [4, 2] 33 | for state in batch: 34 | # creates [[], []] 35 | processed_state = [[] for x in range(self.nb_inputs)] 36 | # [4, 2] -> [2] 37 | for observation in state: 38 | assert len(observation) == self.nb_inputs 39 | # [(o_t_0, []), (o_t_1, [])] 40 | for o, s in zip(observation, processed_state): 41 | # [(o_t_0, [o_t_0]), (o_t_1, [o_t_1])] 42 | s.append(o) 43 | 44 | for idx, s in enumerate(processed_state): 45 | input_batches[idx].append(s) 46 | 47 | # ob = [ np.array([o_0_0, o_1_0, o_2_0]), np.array([o_0_1, o_1_1, o_2_1]) ] 48 | ob = [np.array(x) for x in input_batches] 49 | 50 | return ob 51 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/examples/dqn_cartpole.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import gym 3 | 4 | from keras.models import Sequential 5 | from keras.layers import Dense, Activation, Flatten 6 | from keras.optimizers import Adam 7 | 8 | from rl.agents.dqn import DQNAgent 9 | from rl.policy import BoltzmannQPolicy 10 | from rl.memory import SequentialMemory 11 | 12 | 13 | ENV_NAME = 'CartPole-v0' 14 | 15 | 16 | # Get the environment and extract the number of actions. 17 | env = gym.make(ENV_NAME) 18 | np.random.seed(123) 19 | env.seed(123) 20 | nb_actions = env.action_space.n 21 | 22 | # Next, we build a very simple model. 23 | model = Sequential() 24 | model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 25 | model.add(Dense(16)) 26 | model.add(Activation('relu')) 27 | model.add(Dense(16)) 28 | model.add(Activation('relu')) 29 | model.add(Dense(16)) 30 | model.add(Activation('relu')) 31 | model.add(Dense(nb_actions)) 32 | model.add(Activation('linear')) 33 | print(model.summary()) 34 | 35 | # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and 36 | # even the metrics! 37 | memory = SequentialMemory(limit=50000, window_length=1) 38 | policy = BoltzmannQPolicy() 39 | dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10, 40 | target_model_update=1e-2, policy=policy) 41 | dqn.compile(Adam(lr=1e-3), metrics=['mae']) 42 | 43 | # Okay, now it's time to learn something! We visualize the training here for show, but this 44 | # slows down training quite a lot. You can always safely abort the training prematurely using 45 | # Ctrl + C. 46 | dqn.fit(env, nb_steps=50000, visualize=True, verbose=2) 47 | 48 | # After training is done, we save the final weights. 49 | dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True) 50 | 51 | # Finally, evaluate our algorithm for 5 episodes. 52 | dqn.test(env, nb_episodes=5, visualize=True) 53 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/sources/processors.md: -------------------------------------------------------------------------------- 1 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/processors.py#L7) 2 | ### MultiInputProcessor 3 | 4 | ```python 5 | rl.processors.MultiInputProcessor(nb_inputs) 6 | ``` 7 | 8 | Converts observations from an environment with multiple observations for use in a neural network 9 | policy. 10 | 11 | In some cases, you have environments that return multiple different observations per timestep 12 | (in a robotics context, for example, a camera may be used to view the scene and a joint encoder may 13 | be used to report the angles for each joint). Usually, this can be handled by a policy that has 14 | multiple inputs, one for each modality. However, observations are returned by the environment 15 | in the form of a tuple `[(modality1_t, modality2_t, ..., modalityn_t) for t in T]` but the neural network 16 | expects them in per-modality batches like so: `[[modality1_1, ..., modality1_T], ..., [[modalityn_1, ..., modalityn_T]]`. 17 | This processor converts observations appropriate for this use case. 18 | 19 | __Arguments__ 20 | 21 | - __nb_inputs__ (integer): The number of inputs, that is different modalities, to be used. 22 | Your neural network that you use for the policy must have a corresponding number of 23 | inputs. 24 | 25 | ---- 26 | 27 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/processors.py#L40) 28 | ### WhiteningNormalizerProcessor 29 | 30 | ```python 31 | rl.processors.WhiteningNormalizerProcessor() 32 | ``` 33 | 34 | Normalizes the observations to have zero mean and standard deviation of one, 35 | i.e. it applies whitening to the inputs. 36 | 37 | This typically helps significantly with learning, especially if different dimensions are 38 | on different scales. However, it complicates training in the sense that you will have to store 39 | these weights alongside the policy if you intend to load it later. It is the responsibility of 40 | the user to do so. 41 | 42 | -------------------------------------------------------------------------------- /code/cnn_vae/dataset_api.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Saturday November 3rd 2018 3 | Last Modified: Saturday November 3rd 2018 9:18:59 pm 4 | Author: ankurrc 5 | ''' 6 | import tensorflow as tf 7 | 8 | 9 | class AutoencoderDataset(object): 10 | 11 | def __init__(self, files, size, batch_size, epochs): 12 | """ 13 | Class Instantiate 14 | :param files type tensor(tf.string): files that are part of the dataset 15 | :param size type 2-tuple: (width, height) in pixels 16 | :param batch_size type int: the batch_size 17 | :param epochs type int: number of epochs 18 | """ 19 | self.size = size 20 | self.dataset = tf.data.Dataset.from_tensor_slices(files) 21 | self.dataset = self.dataset.map( 22 | self._parse_function, num_parallel_calls=4) 23 | self.dataset = self.dataset.batch(batch_size) 24 | self.dataset = self.dataset.shuffle(1000) 25 | self.dataset = self.dataset.repeat() 26 | #self.iterator = self.dataset.make_initializable_iterator() 27 | 28 | def _parse_function(self, filename): 29 | """ 30 | Function to load image from file and convert to tensor 31 | :param filename type str: path to image 32 | :rtype rank-3 tensor 33 | """ 34 | image_string = tf.read_file(filename) 35 | image_decoded = tf.image.decode_jpeg(image_string) 36 | image_resized = tf.image.resize_images(image_decoded, self.size) 37 | image_resized = tf.image.convert_image_dtype(image_resized, tf.float32) 38 | return image_resized 39 | 40 | def get_iterator(self): 41 | """ 42 | https://stackoverflow.com/questions/50955798/keras-model-fit-with-tf-dataset-api-validation-data 43 | """ 44 | iterator = self.dataset.make_one_shot_iterator() 45 | next_val = iterator.get_next() 46 | 47 | with tf.keras.backend.get_session().as_default() as sess: 48 | while True: 49 | inputs = sess.run(next_val) 50 | return inputs 51 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/examples/cem_cartpole.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import gym 3 | 4 | from keras.models import Sequential 5 | from keras.layers import Dense, Activation, Flatten 6 | from keras.optimizers import Adam 7 | 8 | from rl.agents.cem import CEMAgent 9 | from rl.memory import EpisodeParameterMemory 10 | 11 | ENV_NAME = 'CartPole-v0' 12 | 13 | 14 | # Get the environment and extract the number of actions. 15 | env = gym.make(ENV_NAME) 16 | np.random.seed(123) 17 | env.seed(123) 18 | 19 | nb_actions = env.action_space.n 20 | obs_dim = env.observation_space.shape[0] 21 | 22 | # Option 1 : Simple model 23 | model = Sequential() 24 | model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 25 | model.add(Dense(nb_actions)) 26 | model.add(Activation('softmax')) 27 | 28 | # Option 2: deep network 29 | # model = Sequential() 30 | # model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 31 | # model.add(Dense(16)) 32 | # model.add(Activation('relu')) 33 | # model.add(Dense(16)) 34 | # model.add(Activation('relu')) 35 | # model.add(Dense(16)) 36 | # model.add(Activation('relu')) 37 | # model.add(Dense(nb_actions)) 38 | # model.add(Activation('softmax')) 39 | 40 | 41 | print(model.summary()) 42 | 43 | 44 | # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and 45 | # even the metrics! 46 | memory = EpisodeParameterMemory(limit=1000, window_length=1) 47 | 48 | cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory, 49 | batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05) 50 | cem.compile() 51 | 52 | # Okay, now it's time to learn something! We visualize the training here for show, but this 53 | # slows down training quite a lot. You can always safely abort the training prematurely using 54 | # Ctrl + C. 55 | cem.fit(env, nb_steps=100000, visualize=False, verbose=2) 56 | 57 | # After training is done, we save the best weights. 58 | cem.save_weights('cem_{}_params.h5f'.format(ENV_NAME), overwrite=True) 59 | 60 | # Finally, evaluate our algorithm for 5 episodes. 61 | cem.test(env, nb_episodes=5, visualize=True) 62 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/util.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import datetime 8 | import sys 9 | 10 | from contextlib import contextmanager 11 | 12 | 13 | @contextmanager 14 | def make_connection(client_type, *args, **kwargs): 15 | """Context manager to create and connect a networking client object.""" 16 | client = None 17 | try: 18 | client = client_type(*args, **kwargs) 19 | client.connect() 20 | yield client 21 | finally: 22 | if client is not None: 23 | client.disconnect() 24 | 25 | 26 | class StopWatch(object): 27 | def __init__(self): 28 | self.start = datetime.datetime.now() 29 | self.end = None 30 | 31 | def restart(self): 32 | self.start = datetime.datetime.now() 33 | self.end = None 34 | 35 | def stop(self): 36 | self.end = datetime.datetime.now() 37 | 38 | def seconds(self): 39 | return (self.end - self.start).total_seconds() 40 | 41 | def milliseconds(self): 42 | return 1000.0 * self.seconds() 43 | 44 | 45 | def to_hex_str(header): 46 | return ':'.join('{:02x}'.format(ord(c)) for c in header) 47 | 48 | 49 | if sys.version_info >= (3, 3): 50 | 51 | import shutil 52 | 53 | def print_over_same_line(text): 54 | terminal_width = shutil.get_terminal_size((80, 20)).columns 55 | empty_space = max(0, terminal_width - len(text)) 56 | sys.stdout.write('\r' + text + empty_space * ' ') 57 | sys.stdout.flush() 58 | 59 | else: 60 | 61 | # Workaround for older Python versions. 62 | def print_over_same_line(text): 63 | line_length = max(print_over_same_line.last_line_length, len(text)) 64 | empty_space = max(0, line_length - len(text)) 65 | sys.stdout.write('\r' + text + empty_space * ' ') 66 | sys.stdout.flush() 67 | print_over_same_line.last_line_length = line_length 68 | print_over_same_line.last_line_length = 0 69 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/random.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import numpy as np 3 | 4 | 5 | class RandomProcess(object): 6 | def reset_states(self): 7 | pass 8 | 9 | 10 | class AnnealedGaussianProcess(RandomProcess): 11 | def __init__(self, mu, sigma, sigma_min, n_steps_annealing): 12 | self.mu = mu 13 | self.sigma = sigma 14 | self.n_steps = 0 15 | 16 | if sigma_min is not None: 17 | self.m = -float(sigma - sigma_min) / float(n_steps_annealing) 18 | self.c = sigma 19 | self.sigma_min = sigma_min 20 | else: 21 | self.m = 0. 22 | self.c = sigma 23 | self.sigma_min = sigma 24 | 25 | @property 26 | def current_sigma(self): 27 | sigma = max(self.sigma_min, self.m * float(self.n_steps) + self.c) 28 | return sigma 29 | 30 | 31 | class GaussianWhiteNoiseProcess(AnnealedGaussianProcess): 32 | def __init__(self, mu=0., sigma=1., sigma_min=None, n_steps_annealing=1000, size=1): 33 | super(GaussianWhiteNoiseProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing) 34 | self.size = size 35 | 36 | def sample(self): 37 | sample = np.random.normal(self.mu, self.current_sigma, self.size) 38 | self.n_steps += 1 39 | return sample 40 | 41 | # Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab 42 | class OrnsteinUhlenbeckProcess(AnnealedGaussianProcess): 43 | def __init__(self, theta, mu=0., sigma=1., dt=1e-2, size=1, sigma_min=None, n_steps_annealing=1000): 44 | super(OrnsteinUhlenbeckProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing) 45 | self.theta = theta 46 | self.mu = mu 47 | self.dt = dt 48 | self.size = size 49 | self.reset_states() 50 | 51 | def sample(self): 52 | x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size) 53 | self.x_prev = x 54 | self.n_steps += 1 55 | return x 56 | 57 | def reset_states(self): 58 | self.x_prev = np.random.normal(self.mu,self.current_sigma,self.size) 59 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/examples/duel_dqn_cartpole.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import gym 3 | 4 | from keras.models import Sequential 5 | from keras.layers import Dense, Activation, Flatten 6 | from keras.optimizers import Adam 7 | 8 | from rl.agents.dqn import DQNAgent 9 | from rl.policy import BoltzmannQPolicy 10 | from rl.memory import SequentialMemory 11 | 12 | 13 | ENV_NAME = 'CartPole-v0' 14 | 15 | 16 | # Get the environment and extract the number of actions. 17 | env = gym.make(ENV_NAME) 18 | np.random.seed(123) 19 | env.seed(123) 20 | nb_actions = env.action_space.n 21 | 22 | # Next, we build a very simple model regardless of the dueling architecture 23 | # if you enable dueling network in DQN , DQN will build a dueling network base on your model automatically 24 | # Also, you can build a dueling network by yourself and turn off the dueling network in DQN. 25 | model = Sequential() 26 | model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 27 | model.add(Dense(16)) 28 | model.add(Activation('relu')) 29 | model.add(Dense(16)) 30 | model.add(Activation('relu')) 31 | model.add(Dense(16)) 32 | model.add(Activation('relu')) 33 | model.add(Dense(nb_actions, activation='linear')) 34 | print(model.summary()) 35 | 36 | # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and 37 | # even the metrics! 38 | memory = SequentialMemory(limit=50000, window_length=1) 39 | policy = BoltzmannQPolicy() 40 | # enable the dueling network 41 | # you can specify the dueling_type to one of {'avg','max','naive'} 42 | dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10, 43 | enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy) 44 | dqn.compile(Adam(lr=1e-3), metrics=['mae']) 45 | 46 | # Okay, now it's time to learn something! We visualize the training here for show, but this 47 | # slows down training quite a lot. You can always safely abort the training prematurely using 48 | # Ctrl + C. 49 | dqn.fit(env, nb_steps=50000, visualize=False, verbose=2) 50 | 51 | # After training is done, we save the final weights. 52 | dqn.save_weights('duel_dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True) 53 | 54 | # Finally, evaluate our algorithm for 5 episodes. 55 | dqn.test(env, nb_episodes=5, visualize=False) 56 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/common/vec_env/__init__.py: -------------------------------------------------------------------------------- 1 | # Inspired from VecEnv from OpenAI Baselines 2 | 3 | class VecEnv(object): 4 | """ 5 | An abstract asynchronous, vectorized environment. 6 | """ 7 | def __init__(self, num_envs, observation_space, action_space): 8 | self.num_envs = num_envs 9 | self.observation_space = observation_space 10 | self.action_space = action_space 11 | 12 | def reset(self): 13 | """ 14 | Reset all the environments and return an array of 15 | observations, or a tuple of observation arrays. 16 | If step_async is still doing work, that work will 17 | be cancelled and step_wait() should not be called 18 | until step_async() is invoked again. 19 | """ 20 | pass 21 | 22 | def step_async(self, actions): 23 | """ 24 | Tell all the environments to start taking a step 25 | with the given actions. 26 | Call step_wait() to get the results of the step. 27 | You should not call this if a step_async run is 28 | already pending. 29 | """ 30 | raise NotImplementedError() 31 | 32 | def step_wait(self): 33 | """ 34 | Wait for the step taken with step_async(). 35 | Returns (obs, rews, dones, infos): 36 | - obs: an array of observations, or a tuple of 37 | arrays of observations. 38 | - rews: an array of rewards 39 | - dones: an array of "episode done" booleans 40 | - infos: a sequence of info objects 41 | """ 42 | raise NotImplementedError() 43 | 44 | def close(self): 45 | """ 46 | Clean up the environments' resources. 47 | """ 48 | raise NotImplementedError() 49 | 50 | def step(self, actions): 51 | self.step_async(actions) 52 | return self.step_wait() 53 | 54 | def render(self, mode='human'): 55 | logger.warn('Render not defined for %s'%self) 56 | 57 | def seed(self, i): 58 | raise NotImplementedError() 59 | 60 | @property 61 | def unwrapped(self): 62 | if isinstance(self, VecEnvWrapper): 63 | return self.venv.unwrapped 64 | else: 65 | return self 66 | 67 | class CloudpickleWrapper(object): 68 | """ 69 | Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) 70 | """ 71 | def __init__(self, x): 72 | self.x = x 73 | def __getstate__(self): 74 | import cloudpickle 75 | return cloudpickle.dumps(self.x) 76 | def __setstate__(self, ob): 77 | import pickle 78 | self.x = pickle.loads(ob) 79 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/docs/sources/core.md: -------------------------------------------------------------------------------- 1 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.py#L11) 2 | ### Agent 3 | 4 | ```python 5 | rl.core.Agent(processor=None) 6 | ``` 7 | 8 | Abstract base class for all implemented agents. 9 | 10 | Each agent interacts with the environment (as defined by the `Env` class) by first observing the 11 | state of the environment. Based on this observation the agent changes the environment by performing 12 | an action. 13 | 14 | Do not use this abstract base class directly but instead use one of the concrete agents implemented. 15 | Each agent realizes a reinforcement learning algorithm. Since all agents conform to the same 16 | interface, you can use them interchangeably. 17 | 18 | To implement your own agent, you have to implement the following methods: 19 | 20 | - `forward` 21 | - `backward` 22 | - `compile` 23 | - `load_weights` 24 | - `save_weights` 25 | - `layers` 26 | 27 | __Arguments__ 28 | 29 | - __processor__ (`Processor` instance): See [Processor](#processor) for details. 30 | 31 | ---- 32 | 33 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.py#L454) 34 | ### Processor 35 | 36 | ```python 37 | rl.core.Processor() 38 | ``` 39 | 40 | Abstract base class for implementing processors. 41 | 42 | A processor acts as a coupling mechanism between an `Agent` and its `Env`. This can 43 | be necessary if your agent has different requirements with respect to the form of the 44 | observations, actions, and rewards of the environment. By implementing a custom processor, 45 | you can effectively translate between the two without having to change the underlaying 46 | implementation of the agent or environment. 47 | 48 | Do not use this abstract base class directly but instead use one of the concrete implementations 49 | or write your own. 50 | 51 | ---- 52 | 53 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.py#L533) 54 | ### Env 55 | 56 | ```python 57 | rl.core.Env() 58 | ``` 59 | 60 | The abstract environment class that is used by all agents. This class has the exact 61 | same API that OpenAI Gym uses so that integrating with it is trivial. In contrast to the 62 | OpenAI Gym implementation, this class only defines the abstract methods without any actual 63 | implementation. 64 | 65 | ---- 66 | 67 | [[source]](https://github.com/keras-rl/keras-rl/blob/master/rl/core.py#L609) 68 | ### Space 69 | 70 | ```python 71 | rl.core.Space() 72 | ``` 73 | 74 | Abstract model for a space that is used for the state and action spaces. This class has the 75 | exact same API that OpenAI Gym uses so that integrating with it is trivial. 76 | 77 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: xenial 3 | language: python 4 | matrix: 5 | include: 6 | - python: 3.6 7 | env: KERAS_BACKEND=theano 8 | - python: 3.6 9 | env: KERAS_BACKEND=tensorflow 10 | - python: 2.7 11 | env: KERAS_BACKEND=theano 12 | - python: 2.7 13 | env: KERAS_BACKEND=tensorflow 14 | - python: 2.7 15 | env: KERAS_BACKEND=tensorflow TEST_MODE=PEP8 16 | - python: 2.7 17 | env: KERAS_BACKEND=theano TEST_MODE=INTEGRATION 18 | - python: 3.6 19 | env: KERAS_BACKEND=theano TEST_MODE=INTEGRATION 20 | - python: 2.7 21 | env: KERAS_BACKEND=tensorflow TEST_MODE=INTEGRATION 22 | - python: 3.6 23 | env: KERAS_BACKEND=tensorflow TEST_MODE=INTEGRATION 24 | install: 25 | # Adopted from https://github.com/fchollet/keras/blob/master/.travis.yml. 26 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then 27 | wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh; 28 | else 29 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; 30 | fi 31 | - bash miniconda.sh -b -p $HOME/miniconda 32 | - export PATH="$HOME/miniconda/bin:$PATH" 33 | - hash -r 34 | - conda config --set always_yes yes --set changeps1 no 35 | - conda update -q conda 36 | # Useful for debugging any issues with conda 37 | - conda info -a 38 | 39 | - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy matplotlib pandas pytest h5py 40 | - source activate test-environment 41 | - pip install pytest-xdist 42 | # See https://github.com/pytest-dev/pytest-cov/issues/124 for details 43 | - pip install pep8 pytest-pep8 44 | - pip install tensorflow 45 | # Bleeding-edge: pip install git+https://github.com/Theano/Theano.git 46 | - pip install "theano<1.0" 47 | - pip install gym 48 | - pip install scipy 49 | # Bleeding-edge: pip install git+https://github.com/fchollet/keras.git; 50 | - pip install "keras>=2.0.7"; 51 | - python setup.py install 52 | 53 | # command to run tests. 54 | script: 55 | # Run keras backend init to initialize backend config. 56 | - python -c "import keras.backend" 57 | # Set up keras backend 58 | - sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json; 59 | - echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)" 60 | - if [[ "$TEST_MODE" == "INTEGRATION" ]]; then 61 | PYTHONPATH=$PWD:$PYTHONPATH py.test tests/integration; 62 | elif [[ "$TEST_MODE" == "PEP8" ]]; then 63 | PYTHONPATH=$PWD:$PYTHONPATH py.test --pep8 -m pep8 -n0; 64 | else 65 | PYTHONPATH=$PWD:$PYTHONPATH py.test tests/; 66 | fi 67 | after_success: 68 | - coveralls 69 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/processors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from rl.core import Processor 4 | from rl.util import WhiteningNormalizer 5 | 6 | 7 | class MultiInputProcessor(Processor): 8 | """Converts observations from an environment with multiple observations for use in a neural network 9 | policy. 10 | 11 | In some cases, you have environments that return multiple different observations per timestep 12 | (in a robotics context, for example, a camera may be used to view the scene and a joint encoder may 13 | be used to report the angles for each joint). Usually, this can be handled by a policy that has 14 | multiple inputs, one for each modality. However, observations are returned by the environment 15 | in the form of a tuple `[(modality1_t, modality2_t, ..., modalityn_t) for t in T]` but the neural network 16 | expects them in per-modality batches like so: `[[modality1_1, ..., modality1_T], ..., [[modalityn_1, ..., modalityn_T]]`. 17 | This processor converts observations appropriate for this use case. 18 | 19 | # Arguments 20 | nb_inputs (integer): The number of inputs, that is different modalities, to be used. 21 | Your neural network that you use for the policy must have a corresponding number of 22 | inputs. 23 | """ 24 | def __init__(self, nb_inputs): 25 | self.nb_inputs = nb_inputs 26 | 27 | def process_state_batch(self, state_batch): 28 | input_batches = [[] for x in range(self.nb_inputs)] 29 | for state in state_batch: 30 | processed_state = [[] for x in range(self.nb_inputs)] 31 | for observation in state: 32 | assert len(observation) == self.nb_inputs 33 | for o, s in zip(observation, processed_state): 34 | s.append(o) 35 | for idx, s in enumerate(processed_state): 36 | input_batches[idx].append(s) 37 | return [np.array(x) for x in input_batches] 38 | 39 | 40 | class WhiteningNormalizerProcessor(Processor): 41 | """Normalizes the observations to have zero mean and standard deviation of one, 42 | i.e. it applies whitening to the inputs. 43 | 44 | This typically helps significantly with learning, especially if different dimensions are 45 | on different scales. However, it complicates training in the sense that you will have to store 46 | these weights alongside the policy if you intend to load it later. It is the responsibility of 47 | the user to do so. 48 | """ 49 | def __init__(self): 50 | self.normalizer = None 51 | 52 | def process_state_batch(self, batch): 53 | if self.normalizer is None: 54 | self.normalizer = WhiteningNormalizer(shape=batch.shape[1:], dtype=batch.dtype) 55 | self.normalizer.update(batch) 56 | return self.normalizer.normalize(batch) 57 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/rl/agents/test_ddpg.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import absolute_import 3 | 4 | import pytest 5 | import numpy as np 6 | from numpy.testing import assert_allclose 7 | 8 | from keras.models import Model, Sequential 9 | from keras.layers import Input, Dense, Flatten, Concatenate 10 | 11 | from rl.agents.ddpg import DDPGAgent 12 | from rl.memory import SequentialMemory 13 | from rl.processors import MultiInputProcessor 14 | 15 | from ..util import MultiInputTestEnv 16 | 17 | 18 | def test_single_ddpg_input(): 19 | nb_actions = 2 20 | 21 | actor = Sequential() 22 | actor.add(Flatten(input_shape=(2, 3))) 23 | actor.add(Dense(nb_actions)) 24 | 25 | action_input = Input(shape=(nb_actions,), name='action_input') 26 | observation_input = Input(shape=(2, 3), name='observation_input') 27 | x = Concatenate()([action_input, Flatten()(observation_input)]) 28 | x = Dense(1)(x) 29 | critic = Model(inputs=[action_input, observation_input], outputs=x) 30 | 31 | memory = SequentialMemory(limit=10, window_length=2) 32 | agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory, 33 | nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4) 34 | agent.compile('sgd') 35 | agent.fit(MultiInputTestEnv((3,)), nb_steps=10) 36 | 37 | 38 | def test_multi_ddpg_input(): 39 | nb_actions = 2 40 | 41 | actor_observation_input1 = Input(shape=(2, 3), name='actor_observation_input1') 42 | actor_observation_input2 = Input(shape=(2, 4), name='actor_observation_input2') 43 | actor = Sequential() 44 | x = Concatenate()([actor_observation_input1, actor_observation_input2]) 45 | x = Flatten()(x) 46 | x = Dense(nb_actions)(x) 47 | actor = Model(inputs=[actor_observation_input1, actor_observation_input2], outputs=x) 48 | 49 | action_input = Input(shape=(nb_actions,), name='action_input') 50 | critic_observation_input1 = Input(shape=(2, 3), name='critic_observation_input1') 51 | critic_observation_input2 = Input(shape=(2, 4), name='critic_observation_input2') 52 | x = Concatenate()([critic_observation_input1, critic_observation_input2]) 53 | x = Concatenate()([action_input, Flatten()(x)]) 54 | x = Dense(1)(x) 55 | critic = Model(inputs=[action_input, critic_observation_input1, critic_observation_input2], outputs=x) 56 | 57 | processor = MultiInputProcessor(nb_inputs=2) 58 | memory = SequentialMemory(limit=10, window_length=2) 59 | agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory, 60 | nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4, 61 | processor=processor) 62 | agent.compile('sgd') 63 | agent.fit(MultiInputTestEnv([(3,), (4,)]), nb_steps=10) 64 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/examples/ddpg_pendulum.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import gym 3 | 4 | from keras.models import Sequential, Model 5 | from keras.layers import Dense, Activation, Flatten, Input, Concatenate 6 | from keras.optimizers import Adam 7 | 8 | from rl.agents import DDPGAgent 9 | from rl.memory import SequentialMemory 10 | from rl.random import OrnsteinUhlenbeckProcess 11 | 12 | 13 | ENV_NAME = 'Pendulum-v0' 14 | gym.undo_logger_setup() 15 | 16 | 17 | # Get the environment and extract the number of actions. 18 | env = gym.make(ENV_NAME) 19 | np.random.seed(123) 20 | env.seed(123) 21 | assert len(env.action_space.shape) == 1 22 | nb_actions = env.action_space.shape[0] 23 | 24 | # Next, we build a very simple model. 25 | actor = Sequential() 26 | actor.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 27 | actor.add(Dense(16)) 28 | actor.add(Activation('relu')) 29 | actor.add(Dense(16)) 30 | actor.add(Activation('relu')) 31 | actor.add(Dense(16)) 32 | actor.add(Activation('relu')) 33 | actor.add(Dense(nb_actions)) 34 | actor.add(Activation('linear')) 35 | print(actor.summary()) 36 | 37 | action_input = Input(shape=(nb_actions,), name='action_input') 38 | observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input') 39 | flattened_observation = Flatten()(observation_input) 40 | x = Concatenate()([action_input, flattened_observation]) 41 | x = Dense(32)(x) 42 | x = Activation('relu')(x) 43 | x = Dense(32)(x) 44 | x = Activation('relu')(x) 45 | x = Dense(32)(x) 46 | x = Activation('relu')(x) 47 | x = Dense(1)(x) 48 | x = Activation('linear')(x) 49 | critic = Model(inputs=[action_input, observation_input], outputs=x) 50 | print(critic.summary()) 51 | 52 | # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and 53 | # even the metrics! 54 | memory = SequentialMemory(limit=100000, window_length=1) 55 | random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3) 56 | agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input, 57 | memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100, 58 | random_process=random_process, gamma=.99, target_model_update=1e-3) 59 | agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae']) 60 | 61 | # Okay, now it's time to learn something! We visualize the training here for show, but this 62 | # slows down training quite a lot. You can always safely abort the training prematurely using 63 | # Ctrl + C. 64 | agent.fit(env, nb_steps=50000, visualize=True, verbose=1, nb_max_episode_steps=200) 65 | 66 | # After training is done, we save the final weights. 67 | agent.save_weights('ddpg_{}_weights.h5f'.format(ENV_NAME), overwrite=True) 68 | 69 | # Finally, evaluate our algorithm for 5 episodes. 70 | agent.test(env, nb_episodes=5, visualize=True, nb_max_episode_steps=200) 71 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/driving_benchmark/experiment_suites/basic_experiment_suite.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | 8 | from __future__ import print_function 9 | 10 | from carla.driving_benchmark.experiment import Experiment 11 | from carla.sensor import Camera 12 | from carla.settings import CarlaSettings 13 | 14 | from .experiment_suite import ExperimentSuite 15 | 16 | 17 | class BasicExperimentSuite(ExperimentSuite): 18 | 19 | @property 20 | def train_weathers(self): 21 | return [1] 22 | 23 | @property 24 | def test_weathers(self): 25 | return [1] 26 | 27 | def build_experiments(self): 28 | """ 29 | Creates the whole set of experiment objects, 30 | The experiments created depends on the selected Town. 31 | 32 | """ 33 | 34 | # We check the town, based on that we define the town related parameters 35 | # The size of the vector is related to the number of tasks, inside each 36 | # task there is also multiple poses ( start end, positions ) 37 | if self._city_name == 'Town01': 38 | poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]] 39 | vehicles_tasks = [0, 0, 0, 20] 40 | pedestrians_tasks = [0, 0, 0, 50] 41 | else: 42 | poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]] 43 | vehicles_tasks = [0, 0, 0, 15] 44 | pedestrians_tasks = [0, 0, 0, 50] 45 | 46 | # We set the camera 47 | # This single RGB camera is used on every experiment 48 | 49 | camera = Camera('CameraRGB') 50 | camera.set(FOV=100) 51 | camera.set_image_size(800, 600) 52 | camera.set_position(2.0, 0.0, 1.4) 53 | camera.set_rotation(-15.0, 0, 0) 54 | 55 | # Based on the parameters, creates a vector with experiment objects. 56 | experiments_vector = [] 57 | for weather in self.weathers: 58 | 59 | for iteration in range(len(poses_tasks)): 60 | poses = poses_tasks[iteration] 61 | vehicles = vehicles_tasks[iteration] 62 | pedestrians = pedestrians_tasks[iteration] 63 | 64 | conditions = CarlaSettings() 65 | conditions.set( 66 | SendNonPlayerAgentsInfo=True, 67 | NumberOfVehicles=vehicles, 68 | NumberOfPedestrians=pedestrians, 69 | WeatherId=weather 70 | 71 | ) 72 | # Add all the cameras that were set for this experiments 73 | conditions.add_sensor(camera) 74 | experiment = Experiment() 75 | experiment.set( 76 | Conditions=conditions, 77 | Poses=poses, 78 | Task=iteration, 79 | Repetitions=1 80 | ) 81 | experiments_vector.append(experiment) 82 | 83 | return experiments_vector 84 | -------------------------------------------------------------------------------- /code/autodrive/carla_rl/renderer.py: -------------------------------------------------------------------------------- 1 | import pygame 2 | from pygame.locals import * 3 | import numpy as np 4 | 5 | 6 | class Renderer(object): 7 | def __init__(self): 8 | self.size = (1, 1) 9 | self.screen = None 10 | self.clock = pygame.time.Clock() 11 | self.display = pygame.display 12 | self.fps = 30 13 | self.pressed_keys = [] 14 | self.is_open = False 15 | 16 | def create_screen(self, width, height): 17 | """ 18 | Creates a pygame window 19 | :param width: the width of the window 20 | :param height: the height of the window 21 | :return: None 22 | """ 23 | self.size = (width, height) 24 | self.screen = self.display.set_mode(self.size, HWSURFACE | DOUBLEBUF) 25 | self.display.set_caption("Renderer") 26 | self.is_open = True 27 | 28 | def normalize_image(self, image): 29 | """ 30 | Normalize image values to be between 0 and 255 31 | :param image: 2D/3D array containing an image with arbitrary values 32 | :return: the input image with values rescaled to 0-255 33 | """ 34 | image_min, image_max = image.min(), image.max() 35 | return 255.0 * (image - image_min) / (image_max - image_min) 36 | 37 | def render_image(self, image): 38 | """ 39 | Render the given image to the pygame window 40 | :param image: a grayscale or color image in an arbitrary size. assumes that the channels are the last axis 41 | :return: None 42 | """ 43 | if self.is_open: 44 | if len(image.shape) == 2: 45 | image = np.stack([image] * 3) 46 | if len(image.shape) == 3: 47 | if image.shape[0] == 3 or image.shape[0] == 1: 48 | image = np.transpose(image, (1, 2, 0)) 49 | surface = pygame.surfarray.make_surface(image.swapaxes(0, 1)) 50 | surface = pygame.transform.scale(surface, self.size) 51 | self.screen.blit(surface, (0, 0)) 52 | self.display.flip() 53 | self.clock.tick() 54 | self.get_events() 55 | 56 | def get_events(self): 57 | """ 58 | Get all the window events in the last tick and reponse accordingly 59 | :return: None 60 | """ 61 | for event in pygame.event.get(): 62 | if event.type == pygame.KEYDOWN: 63 | self.pressed_keys.append(event.key) 64 | # esc pressed 65 | if event.key == pygame.K_ESCAPE: 66 | self.close() 67 | elif event.type == pygame.KEYUP: 68 | if event.key in self.pressed_keys: 69 | self.pressed_keys.remove(event.key) 70 | elif event.type == pygame.QUIT: 71 | self.close() 72 | 73 | def get_key_names(self, key_ids): 74 | """ 75 | Get the key name for each key index in the list 76 | :param key_ids: a list of key id's 77 | :return: a list of key names corresponding to the key id's 78 | """ 79 | return [pygame.key.name(key_id) for key_id in key_ids] 80 | 81 | def close(self): 82 | """ 83 | Close the pygame window 84 | :return: None 85 | """ 86 | self.is_open = False 87 | pygame.quit() 88 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/rl/test_util.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import pytest 3 | import numpy as np 4 | from numpy.testing import assert_allclose 5 | 6 | from keras.models import Model, Sequential 7 | from keras.layers import Input, Dense, Concatenate 8 | from keras.optimizers import SGD 9 | import keras.backend as K 10 | 11 | from rl.util import clone_optimizer, clone_model, huber_loss, WhiteningNormalizer 12 | 13 | 14 | def test_clone_sequential_model(): 15 | seq = Sequential() 16 | seq.add(Dense(8, input_shape=(3,))) 17 | seq.compile(optimizer='sgd', loss='mse') 18 | 19 | clone = clone_model(seq) 20 | clone.compile(optimizer='sgd', loss='mse') 21 | 22 | ins = np.random.random((4, 3)) 23 | y_pred_seq = seq.predict_on_batch(ins) 24 | y_pred_clone = clone.predict_on_batch(ins) 25 | assert y_pred_seq.shape == y_pred_clone.shape 26 | assert_allclose(y_pred_seq, y_pred_clone) 27 | 28 | 29 | def test_clone_graph_model(): 30 | in1 = Input(shape=(2,)) 31 | in2 = Input(shape=(3,)) 32 | x = Dense(8)(Concatenate()([in1, in2])) 33 | graph = Model([in1, in2], x) 34 | graph.compile(optimizer='sgd', loss='mse') 35 | 36 | clone = clone_model(graph) 37 | clone.compile(optimizer='sgd', loss='mse') 38 | 39 | ins = [np.random.random((4, 2)), np.random.random((4, 3))] 40 | y_pred_graph = graph.predict_on_batch(ins) 41 | y_pred_clone = clone.predict_on_batch(ins) 42 | assert y_pred_graph.shape == y_pred_clone.shape 43 | assert_allclose(y_pred_graph, y_pred_clone) 44 | 45 | 46 | def test_clone_optimizer(): 47 | lr, momentum, clipnorm, clipvalue = np.random.random(size=4) 48 | optimizer = SGD(lr=lr, momentum=momentum, clipnorm=clipnorm, clipvalue=clipvalue) 49 | clone = clone_optimizer(optimizer) 50 | 51 | assert isinstance(clone, SGD) 52 | assert K.get_value(optimizer.lr) == K.get_value(clone.lr) 53 | assert K.get_value(optimizer.momentum) == K.get_value(clone.momentum) 54 | assert optimizer.clipnorm == clone.clipnorm 55 | assert optimizer.clipvalue == clone.clipvalue 56 | 57 | 58 | def test_clone_optimizer_from_string(): 59 | clone = clone_optimizer('sgd') 60 | assert isinstance(clone, SGD) 61 | 62 | 63 | def test_huber_loss(): 64 | a = np.array([1., 1.5, 2., 4.]) 65 | b = np.array([1.5, 1., 4., 2.]) 66 | assert_allclose(K.eval(huber_loss(a, b, 1.)), np.array([.125, .125, 1.5, 1.5])) 67 | assert_allclose(K.eval(huber_loss(a, b, 3.)), np.array([.125, .125, 2., 2.])) 68 | assert_allclose(K.eval(huber_loss(a, b, np.inf)), np.array([.125, .125, 2., 2.])) 69 | 70 | 71 | def test_whitening_normalizer(): 72 | x = np.random.normal(loc=.2, scale=2., size=(1000, 5)) 73 | normalizer = WhiteningNormalizer(shape=(5,)) 74 | normalizer.update(x[:500]) 75 | normalizer.update(x[500:]) 76 | 77 | assert_allclose(normalizer.mean, np.mean(x, axis=0)) 78 | assert_allclose(normalizer.std, np.std(x, axis=0)) 79 | 80 | x_norm = normalizer.normalize(x) 81 | assert_allclose(np.mean(x_norm, axis=0), np.zeros(5, dtype=normalizer.dtype), atol=1e-5) 82 | assert_allclose(np.std(x_norm, axis=0), np.ones(5, dtype=normalizer.dtype), atol=1e-5) 83 | 84 | x_denorm = normalizer.denormalize(x_norm) 85 | assert_allclose(x_denorm, x) 86 | 87 | 88 | if __name__ == '__main__': 89 | pytest.main([__file__]) 90 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/examples/ddpg_mujoco.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import gym 4 | from gym import wrappers 5 | 6 | from keras.models import Sequential, Model 7 | from keras.layers import Dense, Activation, Flatten, Input, Concatenate 8 | from keras.optimizers import Adam 9 | 10 | from rl.processors import WhiteningNormalizerProcessor 11 | from rl.agents import DDPGAgent 12 | from rl.memory import SequentialMemory 13 | from rl.random import OrnsteinUhlenbeckProcess 14 | 15 | from carla_rl.carla_environment_wrapper import CarlaEnvironmentWrapper as CarlaEnv 16 | from carla_settings import get_carla_settings 17 | 18 | 19 | class MujocoProcessor(WhiteningNormalizerProcessor): 20 | def process_action(self, action): 21 | return np.clip(action, -1., 1.) 22 | 23 | 24 | ENV_NAME = "Carla" 25 | # Get the environment and extract the number of actions. 26 | config_file = "/media/ankurrc/new_volume/689_ece_rl/project/code/autodrive/mysettings.ini" 27 | settings = get_carla_settings() 28 | env = CarlaEnv(is_render_enabled=True, automatic_render=True, num_speedup_steps=10, run_offscreen=False, 29 | cameras=None, save_screens=False, carla_settings=settings, carla_server_settings=config_file) 30 | 31 | np.random.seed(123) 32 | observation_shape = (2,) 33 | nb_actions = 2 34 | 35 | # Next, we build a very simple model. 36 | actor = Sequential() 37 | actor.add(Flatten(input_shape=(1,) + observation_shape)) 38 | actor.add(Dense(400)) 39 | actor.add(Activation('relu')) 40 | actor.add(Dense(300)) 41 | actor.add(Activation('relu')) 42 | actor.add(Dense(nb_actions)) 43 | actor.add(Activation('tanh')) 44 | print(actor.summary()) 45 | 46 | action_input = Input(shape=(nb_actions,), name='action_input') 47 | observation_input = Input( 48 | shape=(1,) + observation_shape, name='observation_input') 49 | flattened_observation = Flatten()(observation_input) 50 | x = Dense(400)(flattened_observation) 51 | x = Activation('relu')(x) 52 | x = Concatenate()([x, action_input]) 53 | x = Dense(300)(x) 54 | x = Activation('relu')(x) 55 | x = Dense(1)(x) 56 | x = Activation('linear')(x) 57 | critic = Model(inputs=[action_input, observation_input], outputs=x) 58 | print(critic.summary()) 59 | 60 | # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and 61 | # even the metrics! 62 | memory = SequentialMemory(limit=100000, window_length=1) 63 | random_process = OrnsteinUhlenbeckProcess( 64 | size=nb_actions, theta=.15, mu=0., sigma=.1) 65 | agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input, 66 | memory=memory, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000, 67 | random_process=random_process, gamma=.99, target_model_update=1e-3) 68 | 69 | agent.compile([Adam(lr=1e-4), Adam(lr=1e-3)], metrics=['mae']) 70 | 71 | # Okay, now it's time to learn something! We visualize the training here for show, but this 72 | # slows down training quite a lot. You can always safely abort the training prematurely using 73 | # Ctrl + C. 74 | agent.fit(env, nb_steps=1000000, visualize=False, verbose=1) 75 | 76 | # After training is done, we save the final weights. 77 | agent.save_weights('ddpg_{}_weights.h5f'.format(ENV_NAME), overwrite=True) 78 | 79 | # Finally, evaluate our algorithm for 5 episodes. 80 | agent.test(env, nb_episodes=5, visualize=False, nb_max_episode_steps=200) 81 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/driving_benchmark_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | import argparse 10 | import logging 11 | 12 | from carla.driving_benchmark import run_driving_benchmark 13 | from carla.driving_benchmark.experiment_suites import CoRL2017 14 | from carla.driving_benchmark.experiment_suites import BasicExperimentSuite 15 | from carla.agent import ForwardAgent 16 | 17 | if __name__ == '__main__': 18 | 19 | argparser = argparse.ArgumentParser(description=__doc__) 20 | argparser.add_argument( 21 | '-v', '--verbose', 22 | action='store_true', 23 | dest='verbose', 24 | help='print some extra status information') 25 | argparser.add_argument( 26 | '-db', '--debug', 27 | action='store_true', 28 | dest='debug', 29 | help='print debug information') 30 | argparser.add_argument( 31 | '--host', 32 | metavar='H', 33 | default='localhost', 34 | help='IP of the host server (default: localhost)') 35 | argparser.add_argument( 36 | '-p', '--port', 37 | metavar='P', 38 | default=2000, 39 | type=int, 40 | help='TCP port to listen to (default: 2000)') 41 | argparser.add_argument( 42 | '-c', '--city-name', 43 | metavar='C', 44 | default='Town01', 45 | help='The town that is going to be used on benchmark' 46 | + '(needs to match active town in server, options: Town01 or Town02)') 47 | argparser.add_argument( 48 | '-n', '--log_name', 49 | metavar='T', 50 | default='test', 51 | help='The name of the log file to be created by the benchmark' 52 | ) 53 | argparser.add_argument( 54 | '--corl-2017', 55 | action='store_true', 56 | help='If you want to benchmark the corl-2017 instead of the Basic one' 57 | ) 58 | argparser.add_argument( 59 | '--continue-experiment', 60 | action='store_true', 61 | help='If you want to continue the experiment with the same name' 62 | ) 63 | 64 | args = argparser.parse_args() 65 | if args.debug: 66 | log_level = logging.DEBUG 67 | elif args.verbose: 68 | log_level = logging.INFO 69 | else: 70 | log_level = logging.WARNING 71 | 72 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 73 | logging.info('listening to server %s:%s', args.host, args.port) 74 | 75 | # We instantiate a forward agent, a simple policy that just set 76 | # acceleration as 0.9 and steering as zero 77 | agent = ForwardAgent() 78 | 79 | # We instantiate an experiment suite. Basically a set of experiments 80 | # that are going to be evaluated on this benchmark. 81 | if args.corl_2017: 82 | experiment_suite = CoRL2017(args.city_name) 83 | else: 84 | print (' WARNING: running the basic driving benchmark, to run for CoRL 2017' 85 | ' experiment suites, you should run' 86 | ' python driving_benchmark_example.py --corl-2017') 87 | experiment_suite = BasicExperimentSuite(args.city_name) 88 | 89 | # Now actually run the driving_benchmark 90 | run_driving_benchmark(agent, experiment_suite, args.city_name, 91 | args.log_name, args.continue_experiment, 92 | args.host, args.port) 93 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/examples/naf_pendulum.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import gym 3 | 4 | from keras.models import Sequential, Model 5 | from keras.layers import Dense, Activation, Flatten, Input, Concatenate 6 | from keras.optimizers import Adam 7 | 8 | from rl.agents import NAFAgent 9 | from rl.memory import SequentialMemory 10 | from rl.random import OrnsteinUhlenbeckProcess 11 | from rl.core import Processor 12 | 13 | class PendulumProcessor(Processor): 14 | def process_reward(self, reward): 15 | # The magnitude of the reward can be important. Since each step yields a relatively 16 | # high reward, we reduce the magnitude by two orders. 17 | return reward / 100. 18 | 19 | 20 | ENV_NAME = 'Pendulum-v0' 21 | gym.undo_logger_setup() 22 | 23 | 24 | # Get the environment and extract the number of actions. 25 | env = gym.make(ENV_NAME) 26 | np.random.seed(123) 27 | env.seed(123) 28 | assert len(env.action_space.shape) == 1 29 | nb_actions = env.action_space.shape[0] 30 | 31 | # Build all necessary models: V, mu, and L networks. 32 | V_model = Sequential() 33 | V_model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 34 | V_model.add(Dense(16)) 35 | V_model.add(Activation('relu')) 36 | V_model.add(Dense(16)) 37 | V_model.add(Activation('relu')) 38 | V_model.add(Dense(16)) 39 | V_model.add(Activation('relu')) 40 | V_model.add(Dense(1)) 41 | V_model.add(Activation('linear')) 42 | print(V_model.summary()) 43 | 44 | mu_model = Sequential() 45 | mu_model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 46 | mu_model.add(Dense(16)) 47 | mu_model.add(Activation('relu')) 48 | mu_model.add(Dense(16)) 49 | mu_model.add(Activation('relu')) 50 | mu_model.add(Dense(16)) 51 | mu_model.add(Activation('relu')) 52 | mu_model.add(Dense(nb_actions)) 53 | mu_model.add(Activation('linear')) 54 | print(mu_model.summary()) 55 | 56 | action_input = Input(shape=(nb_actions,), name='action_input') 57 | observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input') 58 | x = Concatenate()([action_input, Flatten()(observation_input)]) 59 | x = Dense(32)(x) 60 | x = Activation('relu')(x) 61 | x = Dense(32)(x) 62 | x = Activation('relu')(x) 63 | x = Dense(32)(x) 64 | x = Activation('relu')(x) 65 | x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x) 66 | x = Activation('linear')(x) 67 | L_model = Model(inputs=[action_input, observation_input], outputs=x) 68 | print(L_model.summary()) 69 | 70 | # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and 71 | # even the metrics! 72 | processor = PendulumProcessor() 73 | memory = SequentialMemory(limit=100000, window_length=1) 74 | random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3, size=nb_actions) 75 | agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model, 76 | memory=memory, nb_steps_warmup=100, random_process=random_process, 77 | gamma=.99, target_model_update=1e-3, processor=processor) 78 | agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae']) 79 | 80 | # Okay, now it's time to learn something! We visualize the training here for show, but this 81 | # slows down training quite a lot. You can always safely abort the training prematurely using 82 | # Ctrl + C. 83 | agent.fit(env, nb_steps=50000, visualize=True, verbose=1, nb_max_episode_steps=200) 84 | 85 | # After training is done, we save the final weights. 86 | agent.save_weights('cdqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True) 87 | 88 | # Finally, evaluate our algorithm for 5 episodes. 89 | agent.test(env, nb_episodes=10, visualize=True, nb_max_episode_steps=200) 90 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/common/vec_env/subproc_env_vec.py: -------------------------------------------------------------------------------- 1 | # Inspired from OpenAI Baselines 2 | 3 | import numpy as np 4 | from multiprocessing import Process, Pipe 5 | from rl.common.vec_env import VecEnv, CloudpickleWrapper 6 | from rl.common.tile_images import tile_images 7 | 8 | def worker(remote, parent_remote, env_fn_wrapper): 9 | parent_remote.close() 10 | env = env_fn_wrapper.x() 11 | while True: 12 | cmd, data = remote.recv() 13 | if cmd == 'step': 14 | ob, reward, done, info = env.step(data) 15 | if done: 16 | ob = env.reset() 17 | remote.send((ob, reward, done, info)) 18 | elif cmd == 'reset': 19 | ob = env.reset() 20 | remote.send(ob) 21 | elif cmd == 'render': 22 | remote.send(env.render(mode='rgb_array')) 23 | elif cmd == 'close': 24 | remote.close() 25 | break 26 | elif cmd == 'get_spaces': 27 | remote.send((env.observation_space, env.action_space)) 28 | elif cmd == 'seed': 29 | val = env.seed(data) 30 | remote.send(val) 31 | else: 32 | raise NotImplementedError 33 | 34 | 35 | class SubprocVecEnv(VecEnv): 36 | def __init__(self, env_fns, spaces=None): 37 | """ 38 | envs: list of gym environments to run in subprocesses 39 | """ 40 | self.waiting = False 41 | self.closed = False 42 | nenvs = len(env_fns) 43 | self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) 44 | self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) 45 | for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] 46 | for p in self.ps: 47 | p.daemon = True # if the main process crashes, we should not cause things to hang 48 | p.start() 49 | for remote in self.work_remotes: 50 | remote.close() 51 | 52 | self.remotes[0].send(('get_spaces', None)) 53 | observation_space, action_space = self.remotes[0].recv() 54 | VecEnv.__init__(self, len(env_fns), observation_space, action_space) 55 | 56 | def step_async(self, actions): 57 | for remote, action in zip(self.remotes, actions): 58 | remote.send(('step', action)) 59 | self.waiting = True 60 | 61 | def step_wait(self): 62 | results = [remote.recv() for remote in self.remotes] 63 | self.waiting = False 64 | obs, rews, dones, infos = zip(*results) 65 | return np.stack(obs), np.stack(rews), np.stack(dones), infos 66 | 67 | def reset(self): 68 | for remote in self.remotes: 69 | remote.send(('reset', None)) 70 | return np.stack([remote.recv() for remote in self.remotes]) 71 | 72 | def reset_task(self): 73 | for remote in self.remotes: 74 | remote.send(('reset_task', None)) 75 | return np.stack([remote.recv() for remote in self.remotes]) 76 | 77 | def close(self): 78 | if self.closed: 79 | return 80 | if self.waiting: 81 | for remote in self.remotes: 82 | remote.recv() 83 | for remote in self.remotes: 84 | remote.send(('close', None)) 85 | for p in self.ps: 86 | p.join() 87 | self.closed = True 88 | 89 | def render(self, mode='human'): 90 | raise NotImplementedError('Render is not implemented for Synchronous Environment') 91 | 92 | def seed(self, i): 93 | rank = i 94 | for remote in self.remotes: 95 | remote.send(('seed', rank)) 96 | rank += 1 97 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/driving_benchmark/experiment_suites/experiment_suite.py: -------------------------------------------------------------------------------- 1 | # To be redefined on subclasses on how to calculate timeout for an episode 2 | import abc 3 | 4 | 5 | class ExperimentSuite(object): 6 | 7 | def __init__(self, city_name): 8 | 9 | self._city_name = city_name 10 | self._experiments = self.build_experiments() 11 | 12 | def calculate_time_out(self, path_distance): 13 | """ 14 | Function to return the timeout ,in milliseconds, 15 | that is calculated based on distance to goal. 16 | This is the same timeout as used on the CoRL paper. 17 | """ 18 | return ((path_distance / 1000.0) / 10.0) * 3600.0 + 10.0 19 | 20 | def get_number_of_poses_task(self): 21 | """ 22 | Get the number of poses a task have for this benchmark 23 | """ 24 | 25 | # Warning: assumes that all tasks have the same size 26 | 27 | return len(self._experiments[0].poses) 28 | 29 | def get_experiments(self): 30 | """ 31 | Getter for the experiment set. 32 | """ 33 | return self._experiments 34 | 35 | @property 36 | def dynamic_tasks(self): 37 | """ 38 | Returns the episodes that contain dynamic obstacles 39 | """ 40 | dynamic_tasks = set() 41 | for exp in self._experiments: 42 | if exp.conditions.NumberOfVehicles > 0 or exp.conditions.NumberOfPedestrians > 0: 43 | dynamic_tasks.add(exp.task) 44 | 45 | return list(dynamic_tasks) 46 | 47 | @property 48 | def metrics_parameters(self): 49 | """ 50 | Property to return the parameters for the metric module 51 | Could be redefined depending on the needs of the user. 52 | """ 53 | return { 54 | 55 | 'intersection_offroad': {'frames_skip': 10, 56 | 'frames_recount': 20, 57 | 'threshold': 0.3 58 | }, 59 | 'intersection_otherlane': {'frames_skip': 10, 60 | 'frames_recount': 20, 61 | 'threshold': 0.4 62 | }, 63 | 'collision_other': {'frames_skip': 10, 64 | 'frames_recount': 20, 65 | 'threshold': 400 66 | }, 67 | 'collision_vehicles': {'frames_skip': 10, 68 | 'frames_recount': 30, 69 | 'threshold': 400 70 | }, 71 | 'collision_pedestrians': {'frames_skip': 5, 72 | 'frames_recount': 100, 73 | 'threshold': 300 74 | }, 75 | 76 | } 77 | 78 | @property 79 | def weathers(self): 80 | weathers = set(self.train_weathers) 81 | weathers.update(self.test_weathers) 82 | return weathers 83 | 84 | @abc.abstractmethod 85 | def build_experiments(self): 86 | """ 87 | Returns a set of experiments to be evaluated 88 | Must be redefined in an inherited class. 89 | 90 | """ 91 | 92 | @abc.abstractproperty 93 | def train_weathers(self): 94 | """ 95 | Return the weathers that are considered as training conditions 96 | """ 97 | 98 | @abc.abstractproperty 99 | def test_weathers(self): 100 | """ 101 | Return the weathers that are considered as testing conditions 102 | """ 103 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/tcp.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """Basic TCP client.""" 8 | 9 | import logging 10 | import socket 11 | import struct 12 | import time 13 | 14 | class TCPConnectionError(Exception): 15 | pass 16 | 17 | 18 | class TCPClient(object): 19 | """ 20 | Basic networking client for TCP connections. Errors occurred during 21 | networking operations are raised as TCPConnectionError. 22 | 23 | Received messages are expected to be prepended by a int32 defining the 24 | message size. Messages are sent following this convention. 25 | """ 26 | 27 | def __init__(self, host, port, timeout): 28 | self._host = host 29 | self._port = port 30 | self._timeout = timeout 31 | self._socket = None 32 | self._logprefix = '(%s:%s) ' % (self._host, self._port) 33 | 34 | def connect(self, connection_attempts=10): 35 | """Try to establish a connection to the given host:port.""" 36 | connection_attempts = max(1, connection_attempts) 37 | error = None 38 | for attempt in range(1, connection_attempts + 1): 39 | try: 40 | self._socket = socket.create_connection(address=(self._host, self._port), timeout=self._timeout) 41 | self._socket.settimeout(self._timeout) 42 | logging.debug('%sconnected', self._logprefix) 43 | return 44 | except socket.error as exception: 45 | error = exception 46 | logging.debug('%sconnection attempt %d: %s', self._logprefix, attempt, error) 47 | time.sleep(1) 48 | self._reraise_exception_as_tcp_error('failed to connect', error) 49 | 50 | def disconnect(self): 51 | """Disconnect any active connection.""" 52 | if self._socket is not None: 53 | logging.debug('%sdisconnecting', self._logprefix) 54 | self._socket.close() 55 | self._socket = None 56 | 57 | def connected(self): 58 | """Return whether there is an active connection.""" 59 | return self._socket is not None 60 | 61 | def write(self, message): 62 | """Send message to the server.""" 63 | if self._socket is None: 64 | raise TCPConnectionError(self._logprefix + 'not connected') 65 | header = struct.pack(' 0: 86 | try: 87 | data = self._socket.recv(length) 88 | except socket.error as exception: 89 | self._reraise_exception_as_tcp_error('failed to read data', exception) 90 | if not data: 91 | raise TCPConnectionError(self._logprefix + 'connection closed') 92 | buf += data 93 | length -= len(data) 94 | return buf 95 | 96 | def _reraise_exception_as_tcp_error(self, message, exception): 97 | raise TCPConnectionError('%s%s: %s' % (self._logprefix, message, exception)) 98 | -------------------------------------------------------------------------------- /code/cnn_vae/main.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Saturday November 3rd 2018 3 | Last Modified: Saturday November 3rd 2018 10:50:05 pm 4 | Author: ankurrc 5 | ''' 6 | import argparse 7 | import logging 8 | import os 9 | import random 10 | import time 11 | 12 | import tensorflow as tf 13 | 14 | from dataset_api import AutoencoderDataset 15 | from vgg_vae import build_encoder, build_decoder, build_vae 16 | 17 | 18 | def main(args): 19 | 20 | logging.basicConfig(level=logging.INFO) 21 | logger = logging.getLogger(__name__) 22 | 23 | epochs = args.epochs 24 | batch_size = args.batch 25 | size = tuple(args.size) 26 | learning_rate = args.lr 27 | training_samples = args.samples[0] 28 | val_samples = args.samples[1] 29 | 30 | file_path = os.path.abspath(args.file) 31 | if not os.path.exists(file_path): 32 | logger.fatal("File path does not exist: {}".format(file_path)) 33 | raise FileNotFoundError 34 | 35 | filenames = [] 36 | with open(file_path, "r") as ip: 37 | lines = ip.readlines() 38 | for line in lines: 39 | filenames.append(line[:-1]) 40 | 41 | random.shuffle(filenames) 42 | 43 | train_files = tf.constant(filenames[:training_samples]) 44 | val_files = tf.constant( 45 | filenames[training_samples:(training_samples+val_samples)]) 46 | 47 | train_dataset = AutoencoderDataset(train_files, size, batch_size, epochs) 48 | train_it = train_dataset.get_iterator() 49 | 50 | val_dataset = AutoencoderDataset(val_files, size, batch_size, epochs) 51 | val_it = val_dataset.get_iterator() 52 | 53 | callbacks = [ 54 | tf.keras.callbacks.EarlyStopping( 55 | patience=50, monitor='val_loss', min_delta=0.1), 56 | tf.keras.callbacks.TensorBoard( 57 | log_dir='./logs', write_graph=True, write_grads=True, write_images=True), 58 | tf.keras.callbacks.ModelCheckpoint("weights.{epoch:02d}-{val_loss:.2f}.hdf5", monitor='val_loss', verbose=0, 59 | save_best_only=False, save_weights_only=False, mode='auto', period=5), 60 | tf.keras.callbacks.ReduceLROnPlateau( 61 | monitor='val_loss', factor=0.5, patience=25, min_lr=0.001 * learning_rate) 62 | ] 63 | 64 | optimizer = tf.keras.optimizers.Adam(lr=learning_rate) 65 | 66 | # build model 67 | encoder, dim = build_encoder() 68 | decoder = build_decoder(dim) 69 | vae = build_vae(batch_size, encoder, decoder) 70 | 71 | encoder.summary() 72 | decoder.summary() 73 | vae.summary() 74 | 75 | vae.compile(optimizer=optimizer) 76 | 77 | # train 78 | start = time.time() 79 | 80 | vae.fit(train_it, 81 | steps_per_epoch=(training_samples//batch_size), 82 | epochs=epochs, 83 | callbacks=callbacks, 84 | # validation_data=val_it, 85 | # validation_steps=(val_samples//batch_size), 86 | verbose=2) 87 | 88 | done = time.time() 89 | elapsed = done - start 90 | print("Elapsed: ", elapsed) 91 | 92 | 93 | if __name__ == "__main__": 94 | parser = argparse.ArgumentParser("A test suite for tf dataset api.") 95 | parser.add_argument( 96 | "file", help="Path to file that contains the filenames comprising the dataset") 97 | parser.add_argument("--epochs", help="No. epochs", type=int, default=10) 98 | parser.add_argument("--size", help="Image resize (height, width)", 99 | nargs=2, type=int, default=(160, 120)) 100 | parser.add_argument("--batch", help="Batch size", type=int, default=32) 101 | parser.add_argument("--lr", help="learning_rate", 102 | type=float, default=0.001) 103 | parser.add_argument( 104 | "--samples", help="(Train, Validation samples)", nargs=2, type=int, default=(2048, 256)) 105 | 106 | args = parser.parse_args() 107 | main(args) 108 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/integration/test_continuous.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import numpy as np 4 | import gym 5 | 6 | from keras.models import Sequential, Model 7 | from keras.layers import Dense, Activation, Flatten, Input, Concatenate 8 | from keras.optimizers import Adam 9 | 10 | from rl.agents import NAFAgent, DDPGAgent 11 | from rl.random import OrnsteinUhlenbeckProcess 12 | from rl.memory import SequentialMemory 13 | 14 | 15 | def test_cdqn(): 16 | # TODO: replace this with a simpler environment where we can actually test if it finds a solution 17 | env = gym.make('Pendulum-v0') 18 | np.random.seed(123) 19 | env.seed(123) 20 | random.seed(123) 21 | nb_actions = env.action_space.shape[0] 22 | 23 | V_model = Sequential() 24 | V_model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 25 | V_model.add(Dense(16)) 26 | V_model.add(Activation('relu')) 27 | V_model.add(Dense(1)) 28 | 29 | mu_model = Sequential() 30 | mu_model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 31 | mu_model.add(Dense(16)) 32 | mu_model.add(Activation('relu')) 33 | mu_model.add(Dense(nb_actions)) 34 | 35 | action_input = Input(shape=(nb_actions,), name='action_input') 36 | observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input') 37 | x = Concatenate()([action_input, Flatten()(observation_input)]) 38 | x = Dense(16)(x) 39 | x = Activation('relu')(x) 40 | x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x) 41 | L_model = Model(inputs=[action_input, observation_input], outputs=x) 42 | 43 | memory = SequentialMemory(limit=1000, window_length=1) 44 | random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3, size=nb_actions) 45 | agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model, 46 | memory=memory, nb_steps_warmup=50, random_process=random_process, 47 | gamma=.99, target_model_update=1e-3) 48 | agent.compile(Adam(lr=1e-3)) 49 | 50 | agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100) 51 | h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100) 52 | # TODO: evaluate history 53 | 54 | 55 | def test_ddpg(): 56 | # TODO: replace this with a simpler environment where we can actually test if it finds a solution 57 | env = gym.make('Pendulum-v0') 58 | np.random.seed(123) 59 | env.seed(123) 60 | random.seed(123) 61 | nb_actions = env.action_space.shape[0] 62 | 63 | actor = Sequential() 64 | actor.add(Flatten(input_shape=(1,) + env.observation_space.shape)) 65 | actor.add(Dense(16)) 66 | actor.add(Activation('relu')) 67 | actor.add(Dense(nb_actions)) 68 | actor.add(Activation('linear')) 69 | 70 | action_input = Input(shape=(nb_actions,), name='action_input') 71 | observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input') 72 | flattened_observation = Flatten()(observation_input) 73 | x = Concatenate()([action_input, flattened_observation]) 74 | x = Dense(16)(x) 75 | x = Activation('relu')(x) 76 | x = Dense(1)(x) 77 | x = Activation('linear')(x) 78 | critic = Model(inputs=[action_input, observation_input], outputs=x) 79 | 80 | memory = SequentialMemory(limit=1000, window_length=1) 81 | random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3) 82 | agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input, 83 | memory=memory, nb_steps_warmup_critic=50, nb_steps_warmup_actor=50, 84 | random_process=random_process, gamma=.99, target_model_update=1e-3) 85 | agent.compile([Adam(lr=1e-3), Adam(lr=1e-3)]) 86 | 87 | agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100) 88 | h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100) 89 | # TODO: evaluate history 90 | -------------------------------------------------------------------------------- /code/autodrive/human_play.py: -------------------------------------------------------------------------------- 1 | 2 | '''******************************************************* 3 | 4 | By Gokul NC ( http://about.me/GokulNC ) 5 | 6 | *******************************************************''' 7 | import numpy as np 8 | from pynput import keyboard 9 | from threading import Thread 10 | import time 11 | 12 | from carla_rl.carla_environment_wrapper import CarlaEnvironmentWrapper as CarlaEnv 13 | from carla_settings import get_carla_settings 14 | 15 | steering_strength = 0.5 16 | gas_strength = 1.0 17 | brake_strength = -0.5 18 | 19 | action = [0.0, 0.0] 20 | reset = False 21 | total_reward = 0.0 22 | 23 | frame_skip = 5 # No. of frames to skip, i.e., the no. of frames in which to produce consecutive actions. Already CARLA is low FPS, so better be 1 24 | 25 | debug_logs = True 26 | 27 | if debug_logs: 28 | frame_id = 0 29 | total_frames = 100 # No. of frames once to print the FPS rate 30 | start_time = time.time() 31 | 32 | 33 | def start_listen(): 34 | # Listen for keypresses to control game via Terminal. Inspired from: https://pypi.python.org/pypi/pynput 35 | global action, reset, steering_strength, gas_strength, brake_strength 36 | 37 | def on_press(key): 38 | global action, reset, steering_strength, gas_strength, brake_strength 39 | if key == keyboard.KeyCode(char="w"): 40 | action[1] = gas_strength 41 | elif key == keyboard.KeyCode(char="s"): 42 | action[1] = brake_strength 43 | elif key == keyboard.KeyCode(char="a"): 44 | action[0] = -steering_strength 45 | elif key == keyboard.KeyCode(char="d"): 46 | action[0] = steering_strength 47 | elif key == keyboard.Key.space: 48 | reset = True 49 | 50 | def on_release(key): 51 | global action 52 | if key == keyboard.KeyCode(char="w") or key == keyboard.KeyCode(char="s"): 53 | action[1] = 0.0 54 | elif key == keyboard.KeyCode(char="a") or key == keyboard.KeyCode(char="d"): 55 | action[0] = 0.0 56 | 57 | # Collect events until released 58 | with keyboard.Listener( 59 | on_press=on_press, 60 | on_release=on_release) as listener: 61 | listener.join() 62 | 63 | 64 | config_file = "/media/ankurrc/new_volume/689_ece_rl/project/code/autodrive/mysettings.ini" 65 | 66 | print("Creating Environment..") 67 | settings = get_carla_settings() 68 | env = CarlaEnv(is_render_enabled=True, automatic_render=True, num_speedup_steps=10, run_offscreen=False, 69 | cameras=['SceneFinal'], save_screens=False, carla_settings=settings, carla_server_settings=config_file) 70 | 71 | print("Resetting the environment..") 72 | env.reset() 73 | 74 | # Start listening to key presses and update actions 75 | t = Thread(target=start_listen) 76 | t.start() 77 | 78 | print("Start playing..... :)") 79 | 80 | try: 81 | while True: 82 | 83 | if debug_logs: 84 | print("Action: "+str(action)) 85 | frame_id = (frame_id+1) % total_frames 86 | if frame_id == 0: 87 | end_time = time.time() 88 | print("FPS: "+str(total_frames/(end_time-start_time))) 89 | start_time = end_time 90 | 91 | r = 0.0 92 | for _ in range(frame_skip): 93 | # print(action_map[tuple(action)])m 94 | observation, reward, done, _ = env.step(action) 95 | print("Action:{}".format(action)) 96 | # env.render() 97 | # env.save_screenshots() 98 | r += reward 99 | if done: 100 | break 101 | 102 | total_reward += r 103 | if reset: 104 | done = True 105 | 106 | if done: 107 | env.reset(settings=get_carla_settings()) 108 | reset = False 109 | print("Total reward in episode:"+str(total_reward)) 110 | total_reward = 0.0 111 | 112 | except KeyboardInterrupt: 113 | t.join(timeout=1) 114 | env.close_client_and_server() 115 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/settings.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | """CARLA Settings""" 8 | 9 | import io 10 | import random 11 | import sys 12 | 13 | 14 | if sys.version_info >= (3, 0): 15 | 16 | from configparser import ConfigParser 17 | 18 | else: 19 | 20 | from ConfigParser import RawConfigParser as ConfigParser 21 | 22 | 23 | from . import sensor as carla_sensor 24 | 25 | 26 | MAX_NUMBER_OF_WEATHER_IDS = 14 27 | 28 | 29 | class CarlaSettings(object): 30 | """ 31 | The CarlaSettings object controls the settings of an episode. The __str__ 32 | method retrieves an str with a CarlaSettings.ini file contents. 33 | """ 34 | 35 | def __init__(self, **kwargs): 36 | # [CARLA/Server] 37 | self.SynchronousMode = True 38 | self.SendNonPlayerAgentsInfo = False 39 | # [CARLA/QualitySettings] 40 | self.QualityLevel = 'Epic' 41 | # [CARLA/LevelSettings] 42 | self.PlayerVehicle = None 43 | self.NumberOfVehicles = 20 44 | self.NumberOfPedestrians = 30 45 | self.WeatherId = 1 46 | self.SeedVehicles = None 47 | self.SeedPedestrians = None 48 | self.set(**kwargs) 49 | self._sensors = [] 50 | 51 | def set(self, **kwargs): 52 | for key, value in kwargs.items(): 53 | if not hasattr(self, key): 54 | raise ValueError('CarlaSettings: no key named %r' % key) 55 | setattr(self, key, value) 56 | 57 | def randomize_seeds(self): 58 | """ 59 | Randomize the seeds of the new episode's pseudo-random number 60 | generators. 61 | """ 62 | self.SeedVehicles = random.getrandbits(16) 63 | self.SeedPedestrians = random.getrandbits(16) 64 | 65 | def randomize_weather(self): 66 | """Randomized the WeatherId.""" 67 | self.WeatherId = random.randint(0, MAX_NUMBER_OF_WEATHER_IDS) 68 | 69 | def add_sensor(self, sensor): 70 | """Add a sensor to the player vehicle (see sensor.py).""" 71 | if not isinstance(sensor, carla_sensor.Sensor): 72 | raise ValueError('Sensor not supported') 73 | self._sensors.append(sensor) 74 | 75 | def __str__(self): 76 | """Converts this object to an INI formatted string.""" 77 | ini = ConfigParser() 78 | ini.optionxform = str 79 | S_SERVER = 'CARLA/Server' 80 | S_QUALITY = 'CARLA/QualitySettings' 81 | S_LEVEL = 'CARLA/LevelSettings' 82 | S_SENSOR = 'CARLA/Sensor' 83 | 84 | def get_attribs(obj): 85 | return [a for a in dir(obj) if not a.startswith('_') and not callable(getattr(obj, a))] 86 | 87 | def add_section(section, obj, keys): 88 | for key in keys: 89 | if hasattr(obj, key) and getattr(obj, key) is not None: 90 | if not ini.has_section(section): 91 | ini.add_section(section) 92 | ini.set(section, key, str(getattr(obj, key))) 93 | 94 | add_section(S_SERVER, self, [ 95 | 'SynchronousMode', 96 | 'SendNonPlayerAgentsInfo']) 97 | add_section(S_QUALITY, self, [ 98 | 'QualityLevel']) 99 | add_section(S_LEVEL, self, [ 100 | 'NumberOfVehicles', 101 | 'NumberOfPedestrians', 102 | 'WeatherId', 103 | 'SeedVehicles', 104 | 'SeedPedestrians']) 105 | 106 | ini.add_section(S_SENSOR) 107 | ini.set(S_SENSOR, 'Sensors', ','.join(s.SensorName for s in self._sensors)) 108 | 109 | for sensor_def in self._sensors: 110 | section = S_SENSOR + '/' + sensor_def.SensorName 111 | add_section(section, sensor_def, get_attribs(sensor_def)) 112 | 113 | if sys.version_info >= (3, 0): 114 | text = io.StringIO() 115 | else: 116 | text = io.BytesIO() 117 | 118 | ini.write(text) 119 | return text.getvalue().replace(' = ', '=') 120 | -------------------------------------------------------------------------------- /code/autodrive/segment_tree.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Wednesday December 5th 2018 3 | Last Modified: Wednesday December 5th 2018 11:19:40 pm 4 | Author: ankurrc 5 | ''' 6 | import math 7 | 8 | 9 | def main(): 10 | 11 | n, q = input().strip().split(" ") 12 | n = int(n) 13 | q = int(q) 14 | 15 | a = input().strip().split(" ") 16 | a = [int(i) for i in a] 17 | 18 | tree = SegmentTree(n, a) 19 | 20 | for _ in range(q): 21 | op = input().strip() 22 | if op.startswith("u"): 23 | _, idx, val = op.split(" ") 24 | tree.update(int(idx) - 1, int(val)) 25 | elif op.startswith("q"): 26 | _, l, r = op.split(" ") 27 | print(tree.query(int(l) - 1, int(r) - 1)) 28 | 29 | # print(tree) 30 | 31 | 32 | class SegmentTree(object): 33 | def __init__(self, n, a): 34 | self.n = n 35 | self.levels = math.ceil(math.log2(n)) 36 | 37 | self.a = a + [1e14]*(2**self.levels - len(a)) 38 | 39 | self.tree = [0]*(2**(self.levels + 1) - 1) 40 | # print(n, self.a, self.tree) 41 | 42 | self._build(0, 0, 2**self.levels - 1) 43 | 44 | def _build(self, pointer, start, end): 45 | try: 46 | if start == end: 47 | self.tree[pointer] = self.a[start] 48 | else: 49 | mid = (end + start)//2 50 | # left 51 | self._build(2*(pointer + 1) - 1, start, mid) 52 | # right 53 | self._build(2*(pointer + 1), mid + 1, end) 54 | 55 | # self.tree[pointer] = self.tree[2*(pointer + 1) - 1] + \ 56 | # self.tree[2*(pointer + 1)] 57 | self.tree[pointer] = min( 58 | self.tree[2*(pointer + 1) - 1], self.tree[2*(pointer + 1)]) 59 | 60 | except Exception as e: 61 | print(e, pointer, start, end, self.tree) 62 | 63 | def _update(self, pointer, start, end, idx, val): 64 | if start == end: 65 | self.a[idx] = val 66 | self.tree[pointer] = val 67 | else: 68 | mid = (end + start)//2 69 | if idx >= start and idx <= mid: 70 | self._update(2*(pointer + 1) - 1, start, mid, idx, val) 71 | elif idx >= mid + 1 and idx <= end: 72 | self._update(2*(pointer + 1), mid + 1, end, idx, val) 73 | else: 74 | raise Exception("Impossible state for start {}, mid {}, end {} and idx {}".format( 75 | start, mid, end, idx)) 76 | 77 | # self.tree[pointer] = self.tree[2*(pointer + 1) - 1] + \ 78 | # self.tree[2*(pointer + 1)] 79 | 80 | self.tree[pointer] = min( 81 | self.tree[2*(pointer + 1) - 1], self.tree[2*(pointer + 1)]) 82 | 83 | def _query(self, pointer, start, end, l, r): 84 | 85 | assert l <= r 86 | 87 | minimum = 0 88 | if start == end: 89 | minimum = self.tree[pointer] 90 | elif start == l and end == r: 91 | minimum = self.tree[pointer] 92 | else: 93 | mid = (end + start)//2 94 | if l >= start and r <= mid: 95 | minimum = self._query(2*(pointer + 1) - 1, start, mid, l, r) 96 | elif l >= mid + 1 and r <= end: 97 | minimum = self._query(2*(pointer + 1), mid + 1, end, l, r) 98 | elif l <= mid and r <= end: 99 | min_l = self._query(2*(pointer + 1) - 1, start, mid, l, mid) 100 | min_r = self._query(2*(pointer + 1), mid + 1, end, mid + 1, r) 101 | minimum = min(min_l, min_r) 102 | elif l > end or r < start: 103 | minimum = 0 104 | 105 | return minimum 106 | 107 | def update(self, idx, val): 108 | assert idx <= len(self.a) - 1 and idx >= 0 109 | 110 | self._update(0, 0, 2**self.levels - 1, idx, val) 111 | # # print(self) 112 | 113 | def query(self, l, r): 114 | l = max(0, l) 115 | r = min(len(self.a) - 1, r) 116 | 117 | result = self._query(0, 0, 2**self.levels - 1, l, r) 118 | # print(self) 119 | return result 120 | 121 | def __repr__(self): 122 | return "tree: {}".format(self.tree) 123 | 124 | 125 | if __name__ == "__main__": 126 | main() 127 | -------------------------------------------------------------------------------- /code/autodrive/mysettings.ini: -------------------------------------------------------------------------------- 1 | ; Example of settings file for CARLA. 2 | ; 3 | ; This file can be loaded with the Python client to be sent to the server. It 4 | ; defines the parameters to be used when requesting a new episode. 5 | ; 6 | ; Note that server specific variables are only loaded when launching the 7 | ; simulator. Use it with `./CarlaUE4.sh -carla-settings=Path/To/This/File`. 8 | 9 | [CARLA/Server] 10 | ; If set to false, a mock controller will be used instead of waiting for a real 11 | ; client to connect. (Server only) 12 | UseNetworking=false 13 | ; Ports to use for the server-client communication. This can be overridden by 14 | ; the command-line switch `-world-port=N`, write and read ports will be set to 15 | ; N+1 and N+2 respectively. (Server only) 16 | WorldPort=2000 17 | ; Time-out in milliseconds for the networking operations. (Server only) 18 | ServerTimeOut=99999999 19 | ; In synchronous mode, CARLA waits every frame until the control from the client 20 | ; is received. 21 | SynchronousMode=true 22 | ; Send info about every non-player agent in the scene every frame, the 23 | ; information is attached to the measurements message. This includes other 24 | ; vehicles, pedestrians and traffic signs. Disabled by default to improve 25 | ; performance. 26 | ;SendNonPlayerAgentsInfo=false 27 | 28 | ;[CARLA/QualitySettings] 29 | ; Quality level of the graphics, a lower level makes the simulation run 30 | ; considerably faster. Available: Low or Epic. 31 | QualityLevel=Low 32 | 33 | ;[CARLA/LevelSettings] 34 | ; Path of the vehicle class to be used for the player. Leave empty for default. 35 | ; Paths follow the pattern "/Game/Blueprints/Vehicles/Mustang/Mustang.Mustang_C" 36 | ;PlayerVehicle= 37 | ; Number of non-player vehicles to be spawned into the level. 38 | ;NumberOfVehicles=0 39 | ; Number of non-player pedestrians to be spawned into the level. 40 | ;NumberOfPedestrians=0 41 | ; Index of the weather/lighting presets to use. If negative, the default presets 42 | ; of the map will be used. 43 | ;WeatherId=1 44 | ; Seeds for the pseudo-random number generators. 45 | ;SeedVehicles=123456789 46 | ;SeedPedestrians=123456789 47 | 48 | ;[CARLA/Sensor] 49 | ; Names of the sensors to be attached to the player, comma-separated, each of 50 | ; them should be defined in its own subsection. 51 | 52 | ; Uncomment next line to add a camera called MyCamera to the vehicle 53 | ; Sensors=CameraRGB 54 | 55 | ; or uncomment next line to add a camera and a Lidar 56 | ; Sensors=MyCamera,MyLidar 57 | 58 | ; or uncomment next line to add a regular camera and a depth camera 59 | ; Sensors=MyCamera,MyCamera/Depth 60 | 61 | ; Now, every camera we added needs to be defined it in its own subsection. 62 | ;[CARLA/Sensor/CameraRGB] 63 | ; Type of the sensor. The available types are: 64 | ; * CAMERA A scene capture camera. 65 | ; * LIDAR_RAY_CAST A Lidar implementation based on ray-casting. 66 | ;SensorType=CAMERA 67 | ; Post-processing effect to be applied to this camera. Valid values: 68 | ; * None No effects applied. 69 | ; * SceneFinal Post-processing present at scene (bloom, fog, etc). 70 | ; * Depth Depth map ground-truth only. 71 | ; * SemanticSegmentation Semantic segmentation ground-truth only. 72 | ;PostProcessing=SceneFinal 73 | ; Size of the captured image in pixels. 74 | ;ImageSizeX=360 75 | ;ImageSizeY=360 76 | ; Camera (horizontal) field of view in degrees. 77 | ;FOV=90 78 | ; Position of the camera relative to the car in meters. 79 | ;PositionX=0.20 80 | ;PositionY=0 81 | ;PositionZ=1.30 82 | ; Rotation of the camera relative to the car in degrees. 83 | ;RotationPitch=8 84 | ;RotationRoll=0 85 | ;RotationYaw=0 86 | 87 | ;[CARLA/Sensor/MyCamera/Depth] 88 | ; The sensor can be defined in a subsection of MyCamera so it inherits the 89 | ; values in MyCamera. This adds a camera similar to MyCamera but generating 90 | ; depth map images instead. 91 | ;PostProcessing=Depth 92 | 93 | ;[CARLA/Sensor/MyLidar] 94 | ;SensorType=LIDAR_RAY_CAST 95 | ; Number of lasers. 96 | ;Channels=32 97 | ; Measure distance in meters. 98 | ;Range=50.0 99 | ; Points generated by all lasers per second. 100 | ;PointsPerSecond=100000 101 | ; Lidar rotation frequency. 102 | ;RotationFrequency=10 103 | ; Upper and lower laser angles, positive values means above horizontal line. 104 | ;UpperFOVLimit=10 105 | ;LowerFOVLimit=-30 106 | ; Position and rotation relative to the vehicle. 107 | ;PositionX=0 108 | ;PositionY=0 109 | ;PositionZ=1.40 110 | ;RotationPitch=0 111 | ;RotationYaw=0 112 | ;RotationRoll=0 113 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/grid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import copy 8 | import numpy as np 9 | 10 | 11 | def angle_between(v1, v2): 12 | return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)) 13 | 14 | 15 | class Grid(object): 16 | 17 | def __init__(self, graph): 18 | 19 | self._graph = graph 20 | self._structure = self._make_structure() 21 | self._walls = self._make_walls() 22 | 23 | def search_on_grid(self, x, y): 24 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 25 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 26 | c_x, c_y = x, y 27 | scale = 1 28 | while self._structure[c_x, c_y] != 0: 29 | for offset in visit: 30 | c_x, c_y = x + offset[0] * scale, y + offset[1] * scale 31 | 32 | if c_x >= 0 and c_x < self._graph.get_resolution()[ 33 | 0] and c_y >= 0 and c_y < self._graph.get_resolution()[1]: 34 | if self._structure[c_x, c_y] == 0: 35 | break 36 | else: 37 | c_x, c_y = x, y 38 | scale += 1 39 | 40 | return c_x, c_y 41 | def get_walls(self): 42 | return self._walls 43 | 44 | def get_wall_source(self, pos, pos_ori, target): 45 | 46 | free_nodes = self._get_adjacent_free_nodes(pos) 47 | # print self._walls 48 | final_walls = copy.copy(self._walls) 49 | # print final_walls 50 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 51 | for adj in free_nodes: 52 | 53 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 54 | angle = angle_between(heading_start, start_to_goal) 55 | if (angle > 1.6 and adj != target): 56 | final_walls.add((adj[0], adj[1])) 57 | 58 | return final_walls 59 | 60 | def get_wall_target(self, pos, pos_ori, source): 61 | 62 | free_nodes = self._get_adjacent_free_nodes(pos) 63 | final_walls = copy.copy(self._walls) 64 | heading_start = np.array([pos_ori[0], pos_ori[1]]) 65 | for adj in free_nodes: 66 | 67 | start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]]) 68 | angle = angle_between(heading_start, start_to_goal) 69 | 70 | if (angle < 1.0 and adj != source): 71 | final_walls.add((adj[0], adj[1])) 72 | 73 | return final_walls 74 | 75 | def _draw_line(self, grid, xi, yi, xf, yf): 76 | 77 | if xf < xi: 78 | aux = xi 79 | xi = xf 80 | xf = aux 81 | 82 | if yf < yi: 83 | aux = yi 84 | yi = yf 85 | yf = aux 86 | 87 | for i in range(xi, xf + 1): 88 | 89 | for j in range(yi, yf + 1): 90 | grid[i, j] = 0.0 91 | 92 | return grid 93 | 94 | def _make_structure(self): 95 | structure = np.ones( 96 | (self._graph.get_resolution()[0], 97 | self._graph.get_resolution()[1])) 98 | 99 | for key, connections in self._graph.get_edges().items(): 100 | 101 | # draw a line 102 | for con in connections: 103 | # print key[0],key[1],con[0],con[1] 104 | structure = self._draw_line( 105 | structure, key[0], key[1], con[0], con[1]) 106 | # print grid 107 | return structure 108 | 109 | def _make_walls(self): 110 | walls = set() 111 | 112 | for i in range(self._structure.shape[0]): 113 | 114 | for j in range(self._structure.shape[1]): 115 | if self._structure[i, j] == 1.0: 116 | walls.add((i, j)) 117 | 118 | return walls 119 | 120 | def _get_adjacent_free_nodes(self, pos): 121 | """ Eight nodes in total """ 122 | visit = [[0, 1], [0, -1], [1, 0], [1, 1], 123 | [1, -1], [-1, 0], [-1, 1], [-1, -1]] 124 | 125 | adjacent = set() 126 | for offset in visit: 127 | node = (pos[0] + offset[0], pos[1] + offset[1]) 128 | 129 | if (node[0] >= 0 and node[0] < self._graph.get_resolution()[0] 130 | and node[1] >= 0 and node[1] < self._graph.get_resolution()[1]): 131 | 132 | if self._structure[node[0], node[1]] == 0.0: 133 | adjacent.add(node) 134 | 135 | return adjacent 136 | -------------------------------------------------------------------------------- /code/cnn_vae/vgg_vae.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Sunday November 11th 2018 3 | Last Modified: Sunday November 11th 2018 1:13:05 pm 4 | Author: ankurrc 5 | ''' 6 | import tensorflow as tf 7 | from layers import UpSampling2D_NN 8 | 9 | INPUT_DIMS = (160, 120, 3) 10 | LATENT_DIMS = 1024 11 | 12 | 13 | def build_encoder(): 14 | 15 | input = tf.keras.Input(shape=INPUT_DIMS, name="encoder_input") 16 | x = tf.keras.layers.ZeroPadding2D( 17 | padding=(0, 4), name="padded_input")(input) 18 | 19 | filters = 32 20 | for i in range(1, 3): 21 | for j in range(1, 3): 22 | x = tf.keras.layers.Conv2D( 23 | filters=filters, kernel_size=(3, 3), padding="same", activation="relu", name="block{}_conv{}".format(i, j))(x) 24 | x = tf.keras.layers.MaxPool2D( 25 | pool_size=(2, 2), strides=(2, 2), name="block{}_pool".format(i))(x) 26 | filters *= 2 27 | 28 | for i in range(3, 6): 29 | for j in range(1, 4): 30 | x = tf.keras.layers.Conv2D( 31 | filters=filters, kernel_size=(3, 3), padding="same", activation="relu", name="block{}_conv{}".format(i, j))(x) 32 | x = tf.keras.layers.MaxPool2D( 33 | pool_size=(2, 2), strides=(2, 2), name="block{}_pool".format(i))(x) 34 | filters *= 2 35 | 36 | interim_dims = x.shape[1:] 37 | 38 | # fc 39 | x = tf.keras.layers.Flatten()(x) 40 | 41 | # output 42 | z_mean = tf.keras.layers.Dense( 43 | LATENT_DIMS, activation="relu", name="z_mean")(x) 44 | z_logvar = tf.keras.layers.Dense( 45 | LATENT_DIMS, activation="relu", name="z_logvar")(x) 46 | 47 | return tf.keras.Model(inputs=input, outputs=[z_mean, z_logvar], name="encoder"), interim_dims 48 | 49 | 50 | def build_decoder(interim_dims): 51 | 52 | input = tf.keras.layers.Input(shape=(LATENT_DIMS,), name="decoder_input") 53 | flattened_interim_dims = interim_dims[0]*interim_dims[1]*interim_dims[2] 54 | 55 | # fc 56 | x = tf.keras.layers.Dense(flattened_interim_dims, 57 | activation="relu", name="fc2")(input) 58 | 59 | x = tf.keras.layers.Reshape(interim_dims)(x) 60 | 61 | filters = 512 62 | for i in range(1, 4): 63 | x = UpSampling2D_NN(stride=2, name="block{}_upsample".format(i))(x) 64 | for j in range(1, 4): 65 | x = tf.keras.layers.Conv2D( 66 | filters=filters, kernel_size=(3, 3), padding="same", activation="relu", name="block{}_conv{}".format(i, j))(x) 67 | filters //= 2 68 | 69 | for i in range(4, 6): 70 | x = UpSampling2D_NN(stride=2, name="block{}_upsample".format(i))(x) 71 | for j in range(1, 3): 72 | x = tf.keras.layers.Conv2D( 73 | filters=filters, kernel_size=(3, 3), padding="same", activation="relu", name="block{}_conv{}".format(i, j))(x) 74 | filters //= 2 75 | 76 | x = tf.keras.layers.Conv2D(filters=3, kernel_size=( 77 | 3, 3), activation="relu", padding="same", name="padded_output")(x) 78 | output = tf.keras.layers.Cropping2D(cropping=(0, 4), name="output")(x) 79 | 80 | return tf.keras.Model(inputs=input, outputs=output, name="decoder") 81 | 82 | 83 | def sample(args): 84 | 85 | z_mean, z_log_var = args 86 | batch = tf.shape(z_mean)[0] 87 | dims = tf.shape(z_mean)[1] 88 | epsilon = tf.random_normal(shape=(batch, dims)) 89 | 90 | return z_mean + tf.exp(z_log_var)*epsilon 91 | 92 | 93 | def build_vae(batch_size, encoder, decoder): 94 | x_input = tf.keras.Input(batch_shape=(batch_size,) + INPUT_DIMS) 95 | 96 | z_mean, z_log_var = encoder(x_input) 97 | z = tf.keras.layers.Lambda(sample)([z_mean, z_log_var]) 98 | _output = decoder(z) 99 | 100 | reconstruction_loss = tf.keras.losses.mse( 101 | tf.keras.backend.flatten(x_input), tf.keras.backend.flatten(_output)) 102 | reconstruction_loss *= INPUT_DIMS[0]*INPUT_DIMS[1]*INPUT_DIMS[2] 103 | 104 | kl_loss = 1 + z_log_var - \ 105 | tf.keras.backend.square(z_mean) - tf.keras.backend.exp(z_log_var) 106 | kl_loss = tf.keras.backend.sum(kl_loss, axis=-1) 107 | kl_loss *= -0.5 108 | 109 | vae_loss = tf.keras.backend.mean(reconstruction_loss + kl_loss) 110 | 111 | vae = tf.keras.Model(inputs=x_input, outputs=_output, name="vae") 112 | vae.add_loss(vae_loss) 113 | 114 | return vae 115 | 116 | 117 | def main(): 118 | batch_size = 128 119 | 120 | encoder, dim = build_encoder() 121 | decoder = build_decoder(dim) 122 | 123 | vae = build_vae(batch_size, encoder, decoder) 124 | 125 | encoder.summary() 126 | decoder.summary() 127 | vae.summary() 128 | 129 | 130 | if __name__ == "__main__": 131 | main() 132 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/graph.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | import math 8 | import numpy as np 9 | 10 | 11 | def string_to_node(string): 12 | vec = string.split(',') 13 | return (int(vec[0]), int(vec[1])) 14 | 15 | 16 | def string_to_floats(string): 17 | vec = string.split(',') 18 | return (float(vec[0]), float(vec[1]), float(vec[2])) 19 | 20 | 21 | def sldist(c1, c2): 22 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2) 23 | 24 | 25 | def sldist3(c1, c2): 26 | return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) 27 | ** 2 + (c2[2] - c1[2]) ** 2) 28 | 29 | 30 | class Graph(object): 31 | """ 32 | A simple directed, weighted graph 33 | """ 34 | 35 | def __init__(self, graph_file=None, node_density=50): 36 | 37 | self._nodes = set() 38 | self._angles = {} 39 | self._edges = {} 40 | self._distances = {} 41 | self._node_density = node_density 42 | 43 | if graph_file is not None: 44 | with open(graph_file, 'r') as f: 45 | # Skipe the first four lines that 46 | lines_after_4 = f.readlines()[4:] 47 | 48 | # the graph resolution. 49 | linegraphres = lines_after_4[0] 50 | self._resolution = string_to_node(linegraphres) 51 | for line in lines_after_4[1:]: 52 | 53 | from_node, to_node, d = line.split() 54 | from_node = string_to_node(from_node) 55 | to_node = string_to_node(to_node) 56 | 57 | if from_node not in self._nodes: 58 | self.add_node(from_node) 59 | if to_node not in self._nodes: 60 | self.add_node(to_node) 61 | 62 | self._edges.setdefault(from_node, []) 63 | self._edges[from_node].append(to_node) 64 | self._distances[(from_node, to_node)] = float(d) 65 | 66 | def add_node(self, value): 67 | self._nodes.add(value) 68 | 69 | def make_orientations(self, node, heading): 70 | 71 | import collections 72 | distance_dic = {} 73 | for node_iter in self._nodes: 74 | if node_iter != node: 75 | distance_dic[sldist(node, node_iter)] = node_iter 76 | 77 | distance_dic = collections.OrderedDict( 78 | sorted(distance_dic.items())) 79 | 80 | self._angles[node] = heading 81 | for _, v in distance_dic.items(): 82 | start_to_goal = np.array([node[0] - v[0], node[1] - v[1]]) 83 | 84 | print(start_to_goal) 85 | 86 | self._angles[v] = start_to_goal / np.linalg.norm(start_to_goal) 87 | 88 | def add_edge(self, from_node, to_node, distance): 89 | self._add_edge(from_node, to_node, distance) 90 | 91 | def _add_edge(self, from_node, to_node, distance): 92 | self._edges.setdefault(from_node, []) 93 | self._edges[from_node].append(to_node) 94 | self._distances[(from_node, to_node)] = distance 95 | 96 | def get_resolution(self): 97 | return self._resolution 98 | def get_edges(self): 99 | return self._edges 100 | 101 | def intersection_nodes(self): 102 | 103 | intersect_nodes = [] 104 | for node in self._nodes: 105 | if len(self._edges[node]) > 2: 106 | intersect_nodes.append(node) 107 | 108 | return intersect_nodes 109 | 110 | # This contains also the non-intersection turns... 111 | 112 | def turn_nodes(self): 113 | 114 | return self._nodes 115 | 116 | def plot_ori(self, c): 117 | from matplotlib import collections as mc 118 | 119 | import matplotlib.pyplot as plt 120 | line_len = 1 121 | 122 | lines = [[(p[0], p[1]), (p[0] + line_len * self._angles[p][0], 123 | p[1] + line_len * self._angles[p][1])] for p in self._nodes] 124 | lc = mc.LineCollection(lines, linewidth=2, color='green') 125 | _, ax = plt.subplots() 126 | ax.add_collection(lc) 127 | 128 | ax.autoscale() 129 | ax.margins(0.1) 130 | 131 | xs = [p[0] for p in self._nodes] 132 | ys = [p[1] for p in self._nodes] 133 | 134 | plt.scatter(xs, ys, color=c) 135 | 136 | def plot(self, c): 137 | import matplotlib.pyplot as plt 138 | xs = [p[0] for p in self._nodes] 139 | ys = [p[1] for p in self._nodes] 140 | 141 | plt.scatter(xs, ys, color=c) 142 | -------------------------------------------------------------------------------- /code/autodrive/models.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created Date: Sunday December 2nd 2018 3 | Last Modified: Sunday December 2nd 2018 10:13:25 am 4 | Author: ankurrc 5 | ''' 6 | 7 | from keras.layers import TimeDistributed, Conv2D, LSTM, Input, BatchNormalization, Flatten, Dense, Concatenate, GRU, AveragePooling2D 8 | from keras.initializers import RandomUniform 9 | from keras.models import Model 10 | from keras.utils import plot_model 11 | from keras.regularizers import l2 12 | 13 | 14 | class Models(object): 15 | 16 | def __init__(self, image_shape=None, odometry_shape=None, window_length=None, nb_actions=None): 17 | self.window_length = window_length 18 | self.odometry_shape = odometry_shape 19 | self.image_shape = image_shape 20 | self.nb_actions = nb_actions 21 | 22 | self.ih_img, self.ih_odo, self.ih_out = self._build_inputhead() 23 | self.actor = None 24 | self.critic = None 25 | 26 | def _build_inputhead(self): 27 | 28 | layer_prefix = "ih" 29 | img_ip = Input(shape=(self.window_length,) + 30 | self.image_shape, name="{}_image_in".format(layer_prefix)) 31 | odo_ip = Input(shape=(self.window_length,) + 32 | self.odometry_shape, name="{}_odometry_in".format(layer_prefix)) 33 | 34 | x = TimeDistributed(Conv2D(filters=16, kernel_size=( 35 | 5, 5), padding="same", strides=3, activation="relu"))(img_ip) 36 | x = TimeDistributed(Conv2D(filters=32, kernel_size=( 37 | 3, 3), padding="same", strides=2, activation="relu"))(x) 38 | x = TimeDistributed(Conv2D(filters=32, kernel_size=( 39 | 3, 3), padding="same", strides=2, activation="relu"))(x) 40 | x = TimeDistributed(AveragePooling2D(pool_size=2))(x) 41 | x = TimeDistributed(BatchNormalization())(x) 42 | x = TimeDistributed(Flatten())(x) 43 | # x = LSTM(200, recurrent_dropout=0.2, dropout=0.2)(x) 44 | x = GRU(256, recurrent_dropout=0.2, dropout=0.2)(x) 45 | 46 | y = TimeDistributed(Dense(32, activation="relu"))(odo_ip) 47 | y = TimeDistributed(BatchNormalization())(y) 48 | # y = LSTM(16, recurrent_dropout=0.2, dropout=0.2)(y) 49 | y = GRU(16, recurrent_dropout=0.2, dropout=0.2)(y) 50 | 51 | op = Concatenate()([x, y]) 52 | op = BatchNormalization(name="{}_out".format(layer_prefix))(op) 53 | 54 | return img_ip, odo_ip, op 55 | 56 | def build_actor(self): 57 | 58 | layer_prefix = "actor" 59 | 60 | x = Dense(200, activation="relu", name="{}_dense_1".format( 61 | layer_prefix))(self.ih_out) 62 | x = Dense(200, activation="relu", 63 | name="{}_dense_2".format(layer_prefix))(x) 64 | out = Dense(self.nb_actions, activation="tanh", 65 | kernel_initializer=RandomUniform(minval=-3e-4, maxval=3e-4), name="{}_out".format(layer_prefix))(x) 66 | 67 | self.actor = Model( 68 | inputs=[self.ih_odo, self.ih_img], outputs=out, name="actor") 69 | print(self.actor.summary()) 70 | # plot_model(self.actor, to_file="imgs/actor.png", show_shapes=True) 71 | 72 | return self.actor 73 | 74 | def build_critic(self): 75 | 76 | layer_prefix = "critic" 77 | 78 | self.action_input = action_input = Input(shape=(self.nb_actions,), 79 | name="{}_action_inp".format(layer_prefix)) 80 | x = Concatenate(name="{}_inp".format(layer_prefix))( 81 | [self.ih_out, action_input]) 82 | x = BatchNormalization()(x) 83 | x = Dense(200, activation="relu", name="{}_dense_1".format( 84 | layer_prefix))(x) 85 | x = Dense(200, activation="relu", 86 | name="{}_dense_2".format(layer_prefix))(x) 87 | out = Dense(1, activation="linear", kernel_initializer=RandomUniform( 88 | minval=-3e-4, maxval=3e-4), name="{}_out".format(layer_prefix), kernel_regularizer=l2(l=0.01))(x) 89 | 90 | self.critic = Model( 91 | inputs=[self.ih_odo, self.ih_img, action_input], outputs=out, name="critic") 92 | print(self.critic.summary()) 93 | # plot_model(self.critic, to_file="imgs/critic.png", show_shapes=True) 94 | 95 | return self.critic 96 | 97 | 98 | def main(): 99 | models = Models(image_shape=(84, 84, 3), odometry_shape=( 100 | 4,), window_length=4, nb_actions=2) 101 | actor = models.build_actor() 102 | critic = models.build_critic() 103 | 104 | model = Model(inputs=[models.ih_odo, models.ih_img, models.action_input], 105 | outputs=actor.outputs + critic.outputs) 106 | # plot_model(model, to_file="imgs/model.png", show_shapes=True) 107 | print(model.summary()) 108 | 109 | 110 | if __name__ == "__main__": 111 | main() 112 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/carla/planner/city_track.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 2 | # Barcelona (UAB). 3 | # 4 | # This work is licensed under the terms of the MIT license. 5 | # For a copy, see . 6 | 7 | from carla.planner.graph import sldist 8 | 9 | from carla.planner.astar import AStar 10 | from carla.planner.map import CarlaMap 11 | 12 | 13 | class CityTrack(object): 14 | 15 | def __init__(self, city_name): 16 | 17 | # These values are fixed for every city. 18 | self._node_density = 50.0 19 | self._pixel_density = 0.1643 20 | 21 | self._map = CarlaMap(city_name, self._pixel_density, self._node_density) 22 | 23 | self._astar = AStar() 24 | 25 | # Refers to the start position of the previous route computation 26 | self._previous_node = [] 27 | 28 | # The current computed route 29 | self._route = None 30 | 31 | def project_node(self, position): 32 | """ 33 | Projecting the graph node into the city road 34 | """ 35 | 36 | node = self._map.convert_to_node(position) 37 | 38 | # To change the orientation with respect to the map standards 39 | 40 | node = tuple([int(x) for x in node]) 41 | 42 | # Set to zero if it is less than zero. 43 | 44 | node = (max(0, node[0]), max(0, node[1])) 45 | node = (min(self._map.get_graph_resolution()[0] - 1, node[0]), 46 | min(self._map.get_graph_resolution()[1] - 1, node[1])) 47 | 48 | node = self._map.search_on_grid(node) 49 | 50 | return node 51 | 52 | def get_intersection_nodes(self): 53 | return self._map.get_intersection_nodes() 54 | 55 | def get_pixel_density(self): 56 | return self._pixel_density 57 | 58 | def get_node_density(self): 59 | return self._node_density 60 | 61 | def is_at_goal(self, source, target): 62 | return source == target 63 | 64 | def is_at_new_node(self, current_node): 65 | return current_node != self._previous_node 66 | 67 | def is_away_from_intersection(self, current_node): 68 | return self._closest_intersection_position(current_node) > 1 69 | 70 | def is_far_away_from_route_intersection(self, current_node): 71 | # CHECK FOR THE EMPTY CASE 72 | if self._route is None: 73 | raise RuntimeError('Impossible to find route' 74 | + ' Current planner is limited' 75 | + ' Try to select start points away from intersections') 76 | 77 | return self._closest_intersection_route_position(current_node, 78 | self._route) > 4 79 | 80 | def compute_route(self, node_source, source_ori, node_target, target_ori): 81 | 82 | self._previous_node = node_source 83 | 84 | a_star = AStar() 85 | a_star.init_grid(self._map.get_graph_resolution()[0], 86 | self._map.get_graph_resolution()[1], 87 | self._map.get_walls_directed(node_source, source_ori, 88 | node_target, target_ori), node_source, 89 | node_target) 90 | 91 | route = a_star.solve() 92 | 93 | # JuSt a Corner Case 94 | # Clean this to avoid having to use this function 95 | if route is None: 96 | a_star = AStar() 97 | a_star.init_grid(self._map.get_graph_resolution()[0], 98 | self._map.get_graph_resolution()[1], self._map.get_walls(), 99 | node_source, node_target) 100 | 101 | route = a_star.solve() 102 | 103 | self._route = route 104 | 105 | return route 106 | 107 | def get_distance_closest_node_route(self, pos, route): 108 | distance = [] 109 | 110 | for node_iter in route: 111 | 112 | if node_iter in self._map.get_intersection_nodes(): 113 | distance.append(sldist(node_iter, pos)) 114 | 115 | if not distance: 116 | return sldist(route[-1], pos) 117 | return sorted(distance)[0] 118 | 119 | 120 | def _closest_intersection_position(self, current_node): 121 | 122 | distance_vector = [] 123 | for node_iterator in self._map.get_intersection_nodes(): 124 | distance_vector.append(sldist(node_iterator, current_node)) 125 | 126 | return sorted(distance_vector)[0] 127 | 128 | 129 | def _closest_intersection_route_position(self, current_node, route): 130 | 131 | distance_vector = [] 132 | for _ in route: 133 | for node_iterator in self._map.get_intersection_nodes(): 134 | distance_vector.append(sldist(node_iterator, current_node)) 135 | 136 | return sorted(distance_vector)[0] 137 | 138 | -------------------------------------------------------------------------------- /code/autodrive/carla-client/view_start_positions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de 4 | # Barcelona (UAB). 5 | # 6 | # This work is licensed under the terms of the MIT license. 7 | # For a copy, see . 8 | 9 | """Connects with a CARLA simulator and displays the available start positions 10 | for the current map.""" 11 | 12 | from __future__ import print_function 13 | 14 | import argparse 15 | import logging 16 | import time 17 | 18 | import matplotlib.image as mpimg 19 | import matplotlib.pyplot as plt 20 | 21 | from matplotlib.patches import Circle 22 | 23 | from carla.client import make_carla_client 24 | from carla.planner.map import CarlaMap 25 | from carla.settings import CarlaSettings 26 | from carla.tcp import TCPConnectionError 27 | 28 | 29 | def view_start_positions(args): 30 | # We assume the CARLA server is already waiting for a client to connect at 31 | # host:port. The same way as in the client example. 32 | with make_carla_client(args.host, args.port) as client: 33 | print('CarlaClient connected') 34 | 35 | # We load the default settings to the client. 36 | scene = client.load_settings(CarlaSettings()) 37 | print("Received the start positions") 38 | 39 | # We get the number of player starts, in order to detect the city. 40 | number_of_player_starts = len(scene.player_start_spots) 41 | if number_of_player_starts > 100: # WARNING: unsafe way to check for city, see issue #313 42 | image = mpimg.imread("carla/planner/Town01.png") 43 | carla_map = CarlaMap('Town01', 0.1653, 50) 44 | 45 | else: 46 | 47 | image = mpimg.imread("carla/planner/Town02.png") 48 | carla_map = CarlaMap('Town02', 0.1653, 50) 49 | 50 | fig, ax = plt.subplots(1) 51 | 52 | ax.imshow(image) 53 | 54 | if args.positions == 'all': 55 | positions_to_plot = range(len(scene.player_start_spots)) 56 | else: 57 | positions_to_plot = map(int, args.positions.split(',')) 58 | 59 | for position in positions_to_plot: 60 | # Check if position is valid 61 | if position >= len(scene.player_start_spots): 62 | raise RuntimeError('Selected position is invalid') 63 | 64 | # Convert world to pixel coordinates 65 | pixel = carla_map.convert_to_pixel([scene.player_start_spots[position].location.x, 66 | scene.player_start_spots[position].location.y, 67 | scene.player_start_spots[position].location.z]) 68 | 69 | circle = Circle((pixel[0], pixel[1]), 12, color='r', label='A point') 70 | ax.add_patch(circle) 71 | 72 | if not args.no_labels: 73 | plt.text(pixel[0], pixel[1], str(position), size='x-small') 74 | 75 | plt.axis('off') 76 | plt.show() 77 | 78 | fig.savefig('town_positions.pdf', orientation='landscape', bbox_inches='tight') 79 | 80 | 81 | def main(): 82 | argparser = argparse.ArgumentParser(description=__doc__) 83 | argparser.add_argument( 84 | '-v', '--verbose', 85 | action='store_true', 86 | dest='debug', 87 | help='print debug information') 88 | argparser.add_argument( 89 | '--host', 90 | metavar='H', 91 | default='localhost', 92 | help='IP of the host server (default: localhost)') 93 | argparser.add_argument( 94 | '-p', '--port', 95 | metavar='P', 96 | default=2000, 97 | type=int, 98 | help='TCP port to listen to (default: 2000)') 99 | argparser.add_argument( 100 | '-pos', '--positions', 101 | metavar='P', 102 | default='all', 103 | help='Indices of the positions that you want to plot on the map. ' 104 | 'The indices must be separated by commas (default = all positions)') 105 | argparser.add_argument( 106 | '--no-labels', 107 | action='store_true', 108 | help='do not display position indices') 109 | 110 | args = argparser.parse_args() 111 | 112 | log_level = logging.DEBUG if args.debug else logging.INFO 113 | logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) 114 | 115 | logging.info('listening to server %s:%s', args.host, args.port) 116 | 117 | while True: 118 | try: 119 | 120 | view_start_positions(args) 121 | print('Done.') 122 | return 123 | 124 | except TCPConnectionError as error: 125 | logging.error(error) 126 | time.sleep(1) 127 | except RuntimeError as error: 128 | logging.error(error) 129 | break 130 | 131 | 132 | if __name__ == '__main__': 133 | 134 | try: 135 | main() 136 | except KeyboardInterrupt: 137 | print('\nCancelled by user. Bye!') 138 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/rl/util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras.models import model_from_config, Sequential, Model, model_from_config 4 | import keras.optimizers as optimizers 5 | import keras.backend as K 6 | 7 | 8 | def clone_model(model, custom_objects={}): 9 | # Requires Keras 1.0.7 since get_config has breaking changes. 10 | config = { 11 | 'class_name': model.__class__.__name__, 12 | 'config': model.get_config(), 13 | } 14 | clone = model_from_config(config, custom_objects=custom_objects) 15 | clone.set_weights(model.get_weights()) 16 | return clone 17 | 18 | 19 | def clone_optimizer(optimizer): 20 | if type(optimizer) is str: 21 | return optimizers.get(optimizer) 22 | # Requires Keras 1.0.7 since get_config has breaking changes. 23 | params = dict([(k, v) for k, v in optimizer.get_config().items()]) 24 | config = { 25 | 'class_name': optimizer.__class__.__name__, 26 | 'config': params, 27 | } 28 | if hasattr(optimizers, 'optimizer_from_config'): 29 | # COMPATIBILITY: Keras < 2.0 30 | clone = optimizers.optimizer_from_config(config) 31 | else: 32 | clone = optimizers.deserialize(config) 33 | return clone 34 | 35 | 36 | def get_soft_target_model_updates(target, source, tau): 37 | target_weights = target.trainable_weights + sum([l.non_trainable_weights for l in target.layers], []) 38 | source_weights = source.trainable_weights + sum([l.non_trainable_weights for l in source.layers], []) 39 | assert len(target_weights) == len(source_weights) 40 | 41 | # Create updates. 42 | updates = [] 43 | for tw, sw in zip(target_weights, source_weights): 44 | updates.append((tw, tau * sw + (1. - tau) * tw)) 45 | return updates 46 | 47 | 48 | def get_object_config(o): 49 | if o is None: 50 | return None 51 | 52 | config = { 53 | 'class_name': o.__class__.__name__, 54 | 'config': o.get_config() 55 | } 56 | return config 57 | 58 | 59 | def huber_loss(y_true, y_pred, clip_value): 60 | # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and 61 | # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b 62 | # for details. 63 | assert clip_value > 0. 64 | 65 | x = y_true - y_pred 66 | if np.isinf(clip_value): 67 | # Spacial case for infinity since Tensorflow does have problems 68 | # if we compare `K.abs(x) < np.inf`. 69 | return .5 * K.square(x) 70 | 71 | condition = K.abs(x) < clip_value 72 | squared_loss = .5 * K.square(x) 73 | linear_loss = clip_value * (K.abs(x) - .5 * clip_value) 74 | if K.backend() == 'tensorflow': 75 | import tensorflow as tf 76 | if hasattr(tf, 'select'): 77 | return tf.select(condition, squared_loss, linear_loss) # condition, true, false 78 | else: 79 | return tf.where(condition, squared_loss, linear_loss) # condition, true, false 80 | elif K.backend() == 'theano': 81 | from theano import tensor as T 82 | return T.switch(condition, squared_loss, linear_loss) 83 | else: 84 | raise RuntimeError('Unknown backend "{}".'.format(K.backend())) 85 | 86 | 87 | class AdditionalUpdatesOptimizer(optimizers.Optimizer): 88 | def __init__(self, optimizer, additional_updates): 89 | super(AdditionalUpdatesOptimizer, self).__init__() 90 | self.optimizer = optimizer 91 | self.additional_updates = additional_updates 92 | 93 | def get_updates(self, params, loss): 94 | updates = self.optimizer.get_updates(params=params, loss=loss) 95 | updates += self.additional_updates 96 | self.updates = updates 97 | return self.updates 98 | 99 | def get_config(self): 100 | return self.optimizer.get_config() 101 | 102 | 103 | # Based on https://github.com/openai/baselines/blob/master/baselines/common/mpi_running_mean_std.py 104 | class WhiteningNormalizer(object): 105 | def __init__(self, shape, eps=1e-2, dtype=np.float64): 106 | self.eps = eps 107 | self.shape = shape 108 | self.dtype = dtype 109 | 110 | self._sum = np.zeros(shape, dtype=dtype) 111 | self._sumsq = np.zeros(shape, dtype=dtype) 112 | self._count = 0 113 | 114 | self.mean = np.zeros(shape, dtype=dtype) 115 | self.std = np.ones(shape, dtype=dtype) 116 | 117 | def normalize(self, x): 118 | return (x - self.mean) / self.std 119 | 120 | def denormalize(self, x): 121 | return self.std * x + self.mean 122 | 123 | def update(self, x): 124 | if x.ndim == len(self.shape): 125 | x = x.reshape(-1, *self.shape) 126 | assert x.shape[1:] == self.shape 127 | 128 | self._count += x.shape[0] 129 | self._sum += np.sum(x, axis=0) 130 | self._sumsq += np.sum(np.square(x), axis=0) 131 | 132 | self.mean = self._sum / float(self._count) 133 | self.std = np.sqrt(np.maximum(np.square(self.eps), self._sumsq / float(self._count) - np.square(self.mean))) 134 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/tests/integration/test_discrete.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import numpy as np 4 | from numpy.testing import assert_allclose 5 | 6 | from keras.models import Sequential 7 | from keras.layers import Dense, Activation 8 | from keras.optimizers import Adam 9 | from rl.agents import DQNAgent, CEMAgent, SARSAAgent 10 | from rl.policy import EpsGreedyQPolicy 11 | from rl.memory import SequentialMemory, EpisodeParameterMemory 12 | from utils.gym.envs import TwoRoundDeterministicRewardEnv 13 | 14 | 15 | def test_dqn(): 16 | env = TwoRoundDeterministicRewardEnv() 17 | np.random.seed(123) 18 | env.seed(123) 19 | random.seed(123) 20 | nb_actions = env.action_space.n 21 | 22 | # Next, we build a very simple model. 23 | model = Sequential() 24 | model.add(Dense(16, input_shape=(1,))) 25 | model.add(Activation('relu')) 26 | model.add(Dense(nb_actions)) 27 | model.add(Activation('linear')) 28 | 29 | memory = SequentialMemory(limit=1000, window_length=1) 30 | policy = EpsGreedyQPolicy(eps=.1) 31 | dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50, 32 | target_model_update=1e-1, policy=policy, enable_double_dqn=False) 33 | dqn.compile(Adam(lr=1e-3)) 34 | 35 | dqn.fit(env, nb_steps=2000, visualize=False, verbose=0) 36 | policy.eps = 0. 37 | h = dqn.test(env, nb_episodes=20, visualize=False) 38 | assert_allclose(np.mean(h.history['episode_reward']), 3.) 39 | 40 | 41 | def test_double_dqn(): 42 | env = TwoRoundDeterministicRewardEnv() 43 | np.random.seed(123) 44 | env.seed(123) 45 | random.seed(123) 46 | nb_actions = env.action_space.n 47 | 48 | # Next, we build a very simple model. 49 | model = Sequential() 50 | model.add(Dense(16, input_shape=(1,))) 51 | model.add(Activation('relu')) 52 | model.add(Dense(nb_actions)) 53 | model.add(Activation('linear')) 54 | 55 | memory = SequentialMemory(limit=1000, window_length=1) 56 | policy = EpsGreedyQPolicy(eps=.1) 57 | dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50, 58 | target_model_update=1e-1, policy=policy, enable_double_dqn=True) 59 | dqn.compile(Adam(lr=1e-3)) 60 | 61 | dqn.fit(env, nb_steps=2000, visualize=False, verbose=0) 62 | policy.eps = 0. 63 | h = dqn.test(env, nb_episodes=20, visualize=False) 64 | assert_allclose(np.mean(h.history['episode_reward']), 3.) 65 | 66 | 67 | def test_cem(): 68 | env = TwoRoundDeterministicRewardEnv() 69 | np.random.seed(123) 70 | env.seed(123) 71 | random.seed(123) 72 | nb_actions = env.action_space.n 73 | 74 | # Next, we build a very simple model. 75 | model = Sequential() 76 | model.add(Dense(16, input_shape=(1,))) 77 | model.add(Activation('relu')) 78 | model.add(Dense(nb_actions)) 79 | model.add(Activation('linear')) 80 | 81 | memory = EpisodeParameterMemory(limit=1000, window_length=1) 82 | dqn = CEMAgent(model=model, nb_actions=nb_actions, memory=memory) 83 | dqn.compile() 84 | 85 | dqn.fit(env, nb_steps=2000, visualize=False, verbose=1) 86 | h = dqn.test(env, nb_episodes=20, visualize=False) 87 | assert_allclose(np.mean(h.history['episode_reward']), 3.) 88 | 89 | 90 | def test_duel_dqn(): 91 | env = TwoRoundDeterministicRewardEnv() 92 | np.random.seed(123) 93 | env.seed(123) 94 | random.seed(123) 95 | nb_actions = env.action_space.n 96 | 97 | # Next, we build a very simple model. 98 | model = Sequential() 99 | model.add(Dense(16, input_shape=(1,))) 100 | model.add(Activation('relu')) 101 | model.add(Dense(nb_actions, activation='linear')) 102 | 103 | memory = SequentialMemory(limit=1000, window_length=1) 104 | policy = EpsGreedyQPolicy(eps=.1) 105 | dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50, 106 | target_model_update=1e-1, policy=policy, enable_double_dqn=False, enable_dueling_network=True) 107 | dqn.compile(Adam(lr=1e-3)) 108 | 109 | dqn.fit(env, nb_steps=2000, visualize=False, verbose=0) 110 | policy.eps = 0. 111 | h = dqn.test(env, nb_episodes=20, visualize=False) 112 | assert_allclose(np.mean(h.history['episode_reward']), 3.) 113 | 114 | 115 | def test_sarsa(): 116 | env = TwoRoundDeterministicRewardEnv() 117 | np.random.seed(123) 118 | env.seed(123) 119 | random.seed(123) 120 | nb_actions = env.action_space.n 121 | 122 | # Next, we build a very simple model. 123 | model = Sequential() 124 | model.add(Dense(16, input_shape=(1,))) 125 | model.add(Activation('relu')) 126 | model.add(Dense(nb_actions, activation='linear')) 127 | 128 | policy = EpsGreedyQPolicy(eps=.1) 129 | sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=50, policy=policy) 130 | sarsa.compile(Adam(lr=1e-3)) 131 | 132 | sarsa.fit(env, nb_steps=20000, visualize=False, verbose=0) 133 | policy.eps = 0. 134 | h = sarsa.test(env, nb_episodes=20, visualize=False) 135 | assert_allclose(np.mean(h.history['episode_reward']), 3.) 136 | -------------------------------------------------------------------------------- /code/autodrive/keras-rl/README.md: -------------------------------------------------------------------------------- 1 | # Deep Reinforcement Learning for Keras 2 | [![Build Status](https://api.travis-ci.org/keras-rl/keras-rl.svg?branch=master)](https://travis-ci.org/keras-rl/keras-rl) 3 | [![Documentation](https://readthedocs.org/projects/keras-rl/badge/)](http://keras-rl.readthedocs.io/) 4 | [![License](https://img.shields.io/github/license/mashape/apistatus.svg?maxAge=2592000)](https://github.com/keras-rl/keras-rl/blob/master/LICENSE) 5 | [![Join the chat at https://gitter.im/keras-rl/Lobby](https://badges.gitter.im/keras-rl/Lobby.svg)](https://gitter.im/keras-rl/Lobby) 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 16 | 17 | ## What is it? 18 | 19 | `keras-rl` implements some state-of-the art deep reinforcement learning algorithms in Python and seamlessly integrates with the deep learning library [Keras](http://keras.io). 20 | 21 | Furthermore, `keras-rl` works with [OpenAI Gym](https://gym.openai.com/) out of the box. This means that evaluating and playing around with different algorithms is easy. 22 | 23 | Of course you can extend `keras-rl` according to your own needs. You can use built-in Keras callbacks and metrics or define your own. 24 | Even more so, it is easy to implement your own environments and even algorithms by simply extending some simple abstract classes. Documentation is available [online](http://keras-rl.readthedocs.org). 25 | 26 | 27 | ## What is included? 28 | As of today, the following algorithms have been implemented: 29 | 30 | - [x] Deep Q Learning (DQN) [[1]](http://arxiv.org/abs/1312.5602), [[2]](https://www.nature.com/articles/nature14236) 31 | - [x] Double DQN [[3]](http://arxiv.org/abs/1509.06461) 32 | - [x] Deep Deterministic Policy Gradient (DDPG) [[4]](http://arxiv.org/abs/1509.02971) 33 | - [x] Continuous DQN (CDQN or NAF) [[6]](http://arxiv.org/abs/1603.00748) 34 | - [x] Cross-Entropy Method (CEM) [[7]](http://learning.mpi-sws.org/mlss2016/slides/2016-MLSS-RL.pdf), [[8]](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.6579&rep=rep1&type=pdf) 35 | - [x] Dueling network DQN (Dueling DQN) [[9]](https://arxiv.org/abs/1511.06581) 36 | - [x] Deep SARSA [[10]](http://people.inf.elte.hu/lorincz/Files/RL_2006/SuttonBook.pdf) 37 | - [ ] Asynchronous Advantage Actor-Critic (A3C) [[5]](http://arxiv.org/abs/1602.01783) 38 | - [ ] Proximal Policy Optimization Algorithms (PPO) [[11]](https://arxiv.org/abs/1707.06347) 39 | 40 | You can find more information on each agent in the [doc](http://keras-rl.readthedocs.io/en/latest/agents/overview/). 41 | 42 | 43 | ## Installation 44 | 45 | - Install Keras-RL from Pypi (recommended): 46 | 47 | ``` 48 | pip install keras-rl 49 | ``` 50 | 51 | - Install from Github source: 52 | 53 | ``` 54 | git clone https://github.com/keras-rl/keras-rl.git 55 | cd keras-rl 56 | python setup.py install 57 | ``` 58 | 59 | ## Examples 60 | 61 | If you want to run the examples, you'll also have to install: 62 | - **gym** by OpenAI: [Installation instruction](https://github.com/openai/gym#installation) 63 | - **h5py**: simply run `pip install h5py` 64 | 65 | For atari example you will also need: 66 | - **Pillow**: `pip install Pillow` 67 | - **gym[atari]**: Atari module for gym. Use `pip install gym[atari]` 68 | 69 | Once you have installed everything, you can try out a simple example: 70 | ```bash 71 | python examples/dqn_cartpole.py 72 | ``` 73 | This is a very simple example and it should converge relatively quickly, so it's a great way to get started! 74 | It also visualizes the game during training, so you can watch it learn. How cool is that? 75 | 76 | Some sample weights are available on [keras-rl-weights](https://github.com/matthiasplappert/keras-rl-weights). 77 | 78 | If you have questions or problems, please file an issue or, even better, fix the problem yourself and submit a pull request! 79 | 80 | ## Citing 81 | 82 | If you use `keras-rl` in your research, you can cite it as follows: 83 | ```bibtex 84 | @misc{plappert2016kerasrl, 85 | author = {Matthias Plappert}, 86 | title = {keras-rl}, 87 | year = {2016}, 88 | publisher = {GitHub}, 89 | journal = {GitHub repository}, 90 | howpublished = {\url{https://github.com/keras-rl/keras-rl}}, 91 | } 92 | ``` 93 | 94 | ## References 95 | 96 | 1. *Playing Atari with Deep Reinforcement Learning*, Mnih et al., 2013 97 | 2. *Human-level control through deep reinforcement learning*, Mnih et al., 2015 98 | 3. *Deep Reinforcement Learning with Double Q-learning*, van Hasselt et al., 2015 99 | 4. *Continuous control with deep reinforcement learning*, Lillicrap et al., 2015 100 | 5. *Asynchronous Methods for Deep Reinforcement Learning*, Mnih et al., 2016 101 | 6. *Continuous Deep Q-Learning with Model-based Acceleration*, Gu et al., 2016 102 | 7. *Learning Tetris Using the Noisy Cross-Entropy Method*, Szita et al., 2006 103 | 8. *Deep Reinforcement Learning (MLSS lecture notes)*, Schulman, 2016 104 | 9. *Dueling Network Architectures for Deep Reinforcement Learning*, Wang et al., 2016 105 | 10. *Reinforcement learning: An introduction*, Sutton and Barto, 2011 106 | 11. *Proximal Policy Optimization Algorithms*, Schulman et al., 2017 107 | --------------------------------------------------------------------------------