├── CBU-Y junction smart traffic lights-simulation
├── smarttrafficlighst
│ ├── FDS traffic control
│ │ ├── logfile.txt
│ │ ├── seed.npy
│ │ ├── intersection
│ │ │ ├── STLs.sumocfg
│ │ │ ├── STLs.net.xml
│ │ │ └── STLs.rou.xml
│ │ ├── __test__.py
│ │ ├── DQN.py
│ │ ├── dataPlotter.py
│ │ ├── Traffic_generator_weibu.py
│ │ ├── SimulationEnv.py
│ │ └── main.py
│ └── ML traffic control
│ │ ├── logfile.txt
│ │ ├── seed.npy
│ │ ├── intersection
│ │ ├── STLs.sumocfg
│ │ └── STLs.net.xml
│ │ ├── __test__.py
│ │ ├── DQN.py
│ │ ├── dataPlotter.py
│ │ ├── Traffic_generator_weibu.py
│ │ ├── SimulationEnv.py
│ │ └── main.py
└── README.md
├── README.md
└── CBU-Y junction smart traffic lights-implementation-toy-cars
└── SSD_toy_car_detction_.ipynb
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/logfile.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/logfile.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/seed.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/collins-droid/-Smart-Traffic-Light-Control-System/HEAD/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/seed.npy
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/seed.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/collins-droid/-Smart-Traffic-Light-Control-System/HEAD/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/seed.npy
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/intersection/STLs.sumocfg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/intersection/STLs.sumocfg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/__test__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon May 1 14:14:30 2023
4 |
5 | @author: HP
6 | """
7 |
8 | import numpy as np
9 | from DQN import Model
10 | import os
11 | from Traffic_generator_weibu import TrafficGenerator
12 |
13 | from dataPlotter import plot_rewards, plot_intersection_queue_size
14 | def test_dqn():
15 | num_states = 10
16 | num_actions = 5
17 |
18 | model = Model(num_states, num_actions)
19 |
20 | dummy_state = np.random.rand(1, num_states)
21 | dummy_action_values = model.predict(dummy_state)
22 |
23 | assert dummy_action_values.shape == (1, num_actions), "Model prediction shape mismatch"
24 |
25 | print("DQN test passed.")
26 | def test_traffic_generator():
27 | max_steps = 100
28 | seed = 0
29 |
30 | traffic_gen = TrafficGenerator(max_steps)
31 | traffic_gen.generate_routefile(seed)
32 |
33 | assert os.path.exists("intersection/trips.trips.4L.xml"), "Route file not generated"
34 | print("Traffic Generator test passed.")
35 | def test_data_plotter():
36 | num_experiments = 5
37 | total_episodes = 50
38 |
39 | dummy_rewards = np.random.rand(num_experiments, total_episodes)
40 | dummy_queue_sizes = np.random.rand(num_experiments, total_episodes)
41 |
42 | plot_rewards(dummy_rewards, total_episodes - 1)
43 | plot_intersection_queue_size(dummy_queue_sizes, total_episodes - 1)
44 |
45 | print("Data Plotter test passed (check plots).")
46 |
47 |
48 | if __name__ == "__main__":
49 | test_dqn()
50 | test_traffic_generator()
51 | test_data_plotter()
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/__test__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon May 1 14:14:30 2023
4 |
5 | @author: HP
6 | """
7 |
8 | import numpy as np
9 | from DQN import Model
10 | import os
11 | from Traffic_generator_weibu import TrafficGenerator
12 |
13 | from dataPlotter import plot_rewards, plot_intersection_queue_size
14 | def test_dqn():
15 | num_states = 10
16 | num_actions = 5
17 |
18 | model = Model(num_states, num_actions)
19 |
20 | dummy_state = np.random.rand(1, num_states)
21 | dummy_action_values = model.predict(dummy_state)
22 |
23 | assert dummy_action_values.shape == (1, num_actions), "Model prediction shape mismatch"
24 |
25 | print("DQN test passed.")
26 | def test_traffic_generator():
27 | max_steps = 100
28 | seed = 0
29 |
30 | traffic_gen = TrafficGenerator(max_steps)
31 | traffic_gen.generate_routefile(seed)
32 |
33 | assert os.path.exists("intersection/trips.trips.4L.xml"), "Route file not generated"
34 | print("Traffic Generator test passed.")
35 | def test_data_plotter():
36 | num_experiments = 5
37 | total_episodes = 50
38 |
39 | dummy_rewards = np.random.rand(num_experiments, total_episodes)
40 | dummy_queue_sizes = np.random.rand(num_experiments, total_episodes)
41 |
42 | plot_rewards(dummy_rewards, total_episodes - 1)
43 | plot_intersection_queue_size(dummy_queue_sizes, total_episodes - 1)
44 |
45 | print("Data Plotter test passed (check plots).")
46 |
47 |
48 | if __name__ == "__main__":
49 | test_dqn()
50 | test_traffic_generator()
51 | test_data_plotter()
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/DQN.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon May 1 11:29:38
4 | Adaptive Traffic Light System based on DQN
5 | Author: collins mtonga
6 | Date: May, 2023
7 |
8 | This implementation is based on:
9 | - The original DQN algorithm by DeepMind (2013)
10 | Source: https://arxiv.org/abs/1312.5602
11 | - The OpenAI Gym library
12 | Source: https://gym.openai.com/
13 | - The Keras library
14 | Source: https://keras.io/
15 | - The SUMO toolkit for traffic simulation
16 | Source: https://www.eclipse.org/sumo/
17 | -RituPandes work adaptive traffic lights
18 | https://github.com/RituPande/DQL-TSC.git
19 | """
20 | from keras.models import Sequential
21 | from keras.layers import Dense, Dropout
22 | from keras.optimizers import Adam
23 |
24 | class Model:
25 |
26 | def __init__(self, num_states, num_actions):
27 | # Create a Sequential model
28 | model = Sequential()
29 |
30 | # Input layer with 'num_states' input features
31 | model.add(Dense(400, input_dim=num_states, activation='relu'))
32 |
33 | # Dropout layer for regularization
34 | model.add(Dropout(0.2))
35 |
36 | # First hidden layer
37 | model.add(Dense(400, activation='relu'))
38 |
39 | # Dropout layer for regularization
40 | model.add(Dropout(0.2))
41 |
42 | # Second hidden layer
43 | model.add(Dense(200, activation='relu'))
44 |
45 | # Output layer with 'num_actions' output neurons
46 | model.add(Dense(num_actions, activation='linear'))
47 |
48 | # Compile the model with Mean Squared Error loss and Adam optimizer
49 | model.compile(loss='mse', optimizer=Adam())
50 |
51 | self.model = model
52 |
53 | def get_weights(self):
54 | return self.model.get_weights()
55 |
56 | def set_weights(self, w):
57 | self.model.set_weights(w)
58 |
59 | def predict(self, state):
60 | return self.model.predict(state)
61 |
62 | def fit(self, x_batch, y_batch, batch_size, verbose=0):
63 | self.model.fit(x_batch, y_batch, batch_size, verbose=0)
64 |
65 | def save(self, filename):
66 | self.model.save(filename)
67 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/DQN.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
4 | """
5 | Adaptive Traffic Light System based on DQN
6 | Author: collins mtonga
7 | Date: May, 2023
8 |
9 | This implementation is based on:
10 | - The original DQN algorithm by DeepMind (2013)
11 | Source: https://arxiv.org/abs/1312.5602
12 | - The OpenAI Gym library
13 | Source: https://gym.openai.com/
14 | - The Keras library
15 | Source: https://keras.io/
16 | - The SUMO toolkit for traffic simulation
17 | Source: https://www.eclipse.org/sumo/
18 | -RituPandes work adaptive traffic lights
19 | https://github.com/RituPande/DQL-TSC.git
20 | """
21 | """
22 | Created on Mon May 1 11:29:38 2023
23 |
24 | @author:Collins
25 | """
26 | from keras.models import Sequential
27 | from keras.layers import Dense, Dropout
28 | from keras.optimizers import Adam
29 |
30 | class Model:
31 |
32 | def __init__(self, num_states, num_actions):
33 | # Create a Sequential model
34 | model = Sequential()
35 |
36 | # Input layer with 'num_states' input features
37 | model.add(Dense(400, input_dim=num_states, activation='relu'))
38 |
39 | # Dropout layer for regularization
40 | model.add(Dropout(0.2))
41 |
42 | # First hidden layer
43 | model.add(Dense(400, activation='relu'))
44 |
45 | # Dropout layer for regularization
46 | model.add(Dropout(0.2))
47 |
48 | # Second hidden layer
49 | model.add(Dense(200, activation='relu'))
50 |
51 | # Output layer with 'num_actions' output neurons
52 | model.add(Dense(num_actions, activation='linear'))
53 |
54 | # Compile the model with Mean Squared Error loss and Adam optimizer
55 | model.compile(loss='mse', optimizer=Adam())
56 |
57 | self.model = model
58 |
59 | def get_weights(self):
60 | return self.model.get_weights()
61 |
62 | def set_weights(self, w):
63 | self.model.set_weights(w)
64 |
65 | def predict(self, state):
66 | return self.model.predict(state)
67 |
68 | def fit(self, x_batch, y_batch, batch_size, verbose=0):
69 | self.model.fit(x_batch, y_batch, batch_size, verbose=0)
70 |
71 | def save(self, filename):
72 | self.model.save(filename)
73 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # -Smart-Traffic-Light-Control-System
2 | A smart traffic light control system using DQN reinforcement learning model, trained on simulated traffic data generated by SUMO and deployed in real-world environments with cameras, OpenCV, and YOLOv5 for object detection.
3 | # Smart Traffic Light Control System
4 |
5 | This project aims to optimize traffic light control using a Deep Q-Network (DQN) reinforcement learning model. The model learns from simulated traffic data generated by SUMO (Simulation of Urban Mobility) and is deployed in a real-world environment using cameras, OpenCV, and YOLOv5 for object detection.
6 |
7 | ## Modules
8 |
9 | - DQN.py: Implements the neural network model using Keras and its training methods (predict, fit, save).
10 | - Traffic_generator_weibu.py: Generates traffic scenarios using a Weibull distribution with 1000 cars per episode.
11 | - DataPlotter.py: Plots the performance metrics (rewards, queue size) during training using matplotlib and seaborn.
12 | - Main module: Integrates all the other modules, manages the training process, and interacts with the SUMO simulation.
13 |
14 | ### Real-world implementation modules
15 |
16 | - CameraModule: Captures and preprocesses camera feeds using OpenCV.
17 | - YOLOv5Module: Performs object detection using YOLOv5 model and extracts the required parameters.
18 | - DataProcessing: Processes the detected parameters and converts them into the input format required by qmodel.h5.
19 | - ControlModule: Loads qmodel.h5, provides the processed input features, and executes the control actions based on the model's output.
20 | - Main module: Integrates all the other modules and manages the end-to-end process.
21 |
22 | ## Installation
23 |
24 | 1. Install the dependencies by running `pip install -r requirements.txt`.
25 | 2. Install SUMO (Simulation of Urban Mobility) following the instructions on the official website: https://sumo.dlr.de/docs/Installing/index.html
26 | 3. Clone the repository using `git clone https://github.com/collins-droid/Smart-Traffic-Light-Control-System.git`.
27 | 4. Run the main module using `python main.py`.
28 |
29 | ## Usage
30 |
31 | The project can be used to train a DQN model on simulated traffic data and deploy the model in a real-world environment. Camera feeds and object detection with YOLOv5 are used to provide the necessary input features for the model, which then decides the traffic light control actions.
32 |
33 | ## Contributing
34 |
35 | Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
36 |
37 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/dataPlotter.py:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | """
5 | Adaptive Traffic Light System based on DQN
6 | Author: collins mtonga
7 | Date: May, 2023
8 |
9 | This implementation is based on:
10 | - The original DQN algorithm by DeepMind (2013)
11 | Source: https://arxiv.org/abs/1312.5602
12 | - The OpenAI Gym library
13 | Source: https://gym.openai.com/
14 | - The Keras library
15 | Source: https://keras.io/
16 | - The SUMO toolkit for traffic simulation
17 | Source: https://www.eclipse.org/sumo/
18 | -RituPandes work adaptive traffic lights
19 | https://github.com/RituPande/DQL-TSC.git
20 | """
21 |
22 |
23 | import matplotlib.pyplot as plt
24 | import numpy as np
25 | import glob
26 | import seaborn as sns
27 |
28 |
29 | def get_file_names():
30 | qmodel_file_name = glob.glob('./qmodel*')
31 | stats_file_name = glob.glob('./stats*')
32 |
33 | if not qmodel_file_name:
34 | qmodel_file_name = ''
35 | else:
36 | qmodel_file_name = qmodel_file_name[0]
37 |
38 | if not stats_file_name:
39 | stats_file_name = ''
40 | else:
41 | stats_file_name = stats_file_name[0]
42 |
43 | return qmodel_file_name, stats_file_name
44 |
45 |
46 | def get_init_epoch(filename, total_episodes):
47 | if filename:
48 | index = filename.find('_')
49 | exp_start = index + 1
50 | exp_end = int(filename.find('_', exp_start))
51 | exp = int(filename[exp_start:exp_end])
52 | epoch_start = exp_end + 1
53 | epoch_end = int(filename.find('.', epoch_start))
54 | epoch = int(filename[epoch_start:epoch_end])
55 | if epoch < total_episodes - 1:
56 | epoch += 1
57 | else:
58 | epoch = 0
59 | exp += 1
60 |
61 | else:
62 | exp = 0
63 | epoch = 0
64 | return exp, epoch
65 |
66 |
67 | def get_stats(stats_filename, num_experiments, total_episodes, learn=True):
68 | if stats_filename and learn:
69 | stats = np.load(stats_filename, allow_pickle=True)[()]
70 |
71 | else:
72 | reward_store = np.zeros((num_experiments, total_episodes))
73 | intersection_queue_store = np.zeros((num_experiments, total_episodes))
74 | stats = {'rewards': reward_store, 'intersection_queue': intersection_queue_store}
75 |
76 | return stats
77 |
78 |
79 | def plot_sample(sample, title, xlabel, legend_label, show=True, subplot=False):
80 | if not subplot:
81 | plt.figure()
82 |
83 | ax = sns.distplot(sample, kde=True, label=legend_label)
84 | ax.set(xlabel=xlabel, title=title)
85 | ax.legend()
86 |
87 | if show:
88 | plt.show()
89 |
90 |
91 |
92 | def plot_rewards(reward_store, label):
93 | x = np.mean(reward_store, axis=0)
94 | plt.plot(x, label=label)
95 | plt.xlabel('Episodes')
96 | plt.ylabel('Cummulative negative wait times')
97 | plt.title('Cummulative negative wait times across episodes')
98 | plt.legend()
99 |
100 |
101 | def plot_intersection_queue_size(intersection_queue_store, label):
102 | x = np.mean(intersection_queue_store, axis=0)
103 | plt.plot(x, label=label, color='m')
104 | plt.xlabel('Episodes')
105 | plt.ylabel('Cummulative intersection queue size')
106 | plt.title('Cummulative intersection queue size across episodes')
107 | plt.legend()
108 |
109 |
110 | def show_plots():
111 | plt.show()
112 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/dataPlotter.py:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | """
5 | Adaptive Traffic Light System based on DQN
6 | Author: collins mtonga
7 | Date: May, 2023
8 |
9 | This implementation is based on:
10 | - The original DQN algorithm by DeepMind (2013)
11 | Source: https://arxiv.org/abs/1312.5602
12 | - The OpenAI Gym library
13 | Source: https://gym.openai.com/
14 | - The Keras library
15 | Source: https://keras.io/
16 | - The SUMO toolkit for traffic simulation
17 | Source: https://www.eclipse.org/sumo/
18 | -RituPandes work adaptive traffic lights
19 | https://github.com/RituPande/DQL-TSC.git
20 | """
21 |
22 |
23 |
24 | import matplotlib.pyplot as plt
25 | import numpy as np
26 | import glob
27 | import seaborn as sns
28 |
29 | def get_file_names():
30 | qmodel_file_name = glob.glob('./qmodel*')
31 | stats_file_name = glob.glob('./stats*')
32 |
33 | if not qmodel_file_name:
34 | qmodel_file_name = ''
35 | else:
36 | qmodel_file_name = qmodel_file_name[0]
37 |
38 | if not stats_file_name:
39 | stats_file_name = ''
40 | else:
41 | stats_file_name = stats_file_name[0]
42 |
43 | return qmodel_file_name, stats_file_name
44 |
45 | def get_init_epoch( filename,total_episodes ):
46 |
47 | if filename:
48 | index = filename.find('_')
49 | exp_start = index + 1
50 | exp_end = int(filename.find('_', exp_start))
51 | exp = int(filename[exp_start:exp_end])
52 | epoch_start= exp_end + 1
53 | epoch_end = int(filename.find('.', epoch_start))
54 | epoch = int(filename[epoch_start:epoch_end])
55 | if epoch < total_episodes -1:
56 | epoch +=1
57 | else:
58 | epoch = 0
59 | exp +=1
60 |
61 | else:
62 | exp=0
63 | epoch = 0
64 | return exp , epoch
65 |
66 | def get_stats(stats_filename, num_experiments, total_episodes, learn = True):
67 |
68 | if stats_filename and learn:
69 | stats =np.load(stats_filename, allow_pickle = True)[()]
70 |
71 | else:
72 | reward_store = np.zeros((num_experiments,total_episodes))
73 | intersection_queue_store = np.zeros((num_experiments,total_episodes))
74 | stats = {'rewards': reward_store, 'intersection_queue': intersection_queue_store }
75 |
76 | return stats
77 |
78 |
79 |
80 | def plot_sample(sample, title, xlabel, legend_label, show= True):
81 |
82 | #plt.hist(sample, bins = 5, histtype = 'bar')
83 | #plt.xlabel(xlabel)
84 | ax= sns.distplot(sample, kde=True, label = legend_label)
85 | ax.set(xlabel=xlabel, title= title)
86 | ax.legend()
87 | if show:
88 | plt.show()
89 |
90 |
91 | def plot_rewards( reward_store):
92 | x = np.mean(reward_store, axis = 0 )
93 | plt.plot( x , label = "Cummulative negative wait times")
94 | plt.xlabel('Episodes')
95 | plt.ylabel('Cummulative negative wait times')
96 | plt.title('Cummulative negative wait times across episodes')
97 | plt.legend()
98 | plt.show()
99 |
100 | def plot_intersection_queue_size( intersection_queue_store):
101 |
102 | x = np.mean(intersection_queue_store, axis = 0 )
103 | plt.plot(x, label = "Cummulative intersection queue size ", color='m')
104 | plt.xlabel('Episodes')
105 | plt.ylabel('Cummulative intersection queue size')
106 | plt.title('Cummulative intersection queue size across episodes')
107 | plt.legend()
108 | plt.show()
109 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/README.md:
--------------------------------------------------------------------------------
1 | # Simulation of Smart Traffic Light System (STLS) in SUMO
2 |
3 | ## Table of Contents
4 | - [Introduction](#introduction)
5 | - [Stages of Simulation Setup](#stages-of-simulation-setup)
6 | - [Requirements Analysis](#requirements-analysis)
7 | - [Software Design](#software-design)
8 | - [Implementation](#implementation)
9 | - [Testing](#testing)
10 | - [Documentation](#documentation)
11 | - [Deployment](#deployment)
12 | - [Maintenance](#maintenance)
13 | - [Description of Main Modules](#description-of-main-modules)
14 | - [1. Deep Q-Network](#1-deep-q-network)
15 | - [Input Parameters](#input-parameters)
16 | - [Output Parameters](#output-parameters)
17 | - [2. Traffic Generator (Weibu)](#2-traffic-generator-weibu)
18 | - [3. SUMO Environment](#3-sumo-environment)
19 | - [State Space](#state-space)
20 | - [Action Space](#action-space)
21 | - [Reward Function](#reward-function)
22 | - [Terminal State](#terminal-state)
23 | - [4. Data Plotter](#4-data-plotter)
24 | - [5. Main Module](#5-main-module)
25 |
26 | ## Introduction
27 |
28 | This README provides an overview of the simulation setup for the Smart Traffic Light System (STLS) implemented in SUMO. The system aims to optimize traffic signal timings using a Deep Q-Network (DQN) in the SUMO simulation environment.
29 |
30 | ## Stages of Simulation Setup
31 |
32 | ### Requirements Analysis
33 |
34 | The goal of the software is to optimize traffic signal timings using a Deep Q-Network (DQN) in the SUMO simulation environment.
35 |
36 | ### Software Design
37 |
38 | The software is designed with five main components: DQN.py, Traffic_generator_weibu.py, DataPlotter.py, SumoEnv.py, and the main module.
39 |
40 | ### Implementation
41 |
42 | Modules and their functionalities:
43 | - DQN.py: Implements the neural network model using Keras and its training methods.
44 | - Traffic_generator_weibu.py: Generates traffic scenarios using a Weibull distribution.
45 | - DataPlotter.py: Plots the performance metrics during training using matplotlib and seaborn.
46 | - SumoEnv.py: Encapsulates the interaction with the SUMO simulation.
47 | - Main module: Integrates all the other modules and manages the training process.
48 |
49 | ### Testing
50 |
51 | Each module was tested independently to ensure proper functionality.
52 |
53 | ### Documentation
54 |
55 | Documentation has been provided for each module, including their functionalities, inputs, outputs, and usage on GitHub.
56 |
57 | ### Deployment
58 |
59 | The software is ready for use and can be distributed along with its dependencies (Python, Keras, SUMO, etc.) and installation instructions.
60 |
61 | ### Maintenance
62 |
63 | Continuous monitoring and updates are performed to implement features, bug fixes, and performance improvements as needed.
64 |
65 | ## Description of Main Modules
66 |
67 | ### 1. Deep Q-Network
68 |
69 | #### Input Parameters
70 |
71 | | Description | Explanation |
72 | |-----------------------|-------------------------------------------------------|
73 | | Vehicle count | Number of vehicles in each lane approaching the intersection |
74 | | Vehicle speed | Average speed of vehicles in each lane |
75 | | Traffic light state | Current state of the traffic lights for each lane |
76 | | Vehicle waiting time | Average waiting time of vehicles in each lane |
77 | | Lane occupancy | Percentage of lane occupancy in each lane |
78 |
79 | #### Output Parameters
80 |
81 | | Description | Explanation |
82 | |---------------------------------|-----------------------------------------------------------|
83 | | Change the traffic light state | Change the traffic light state for a specific lane |
84 | | Extend the green light duration| Extend the green light duration for a specific lane |
85 | | Shorten the green light duration| Shorten the green light duration for a specific lane |
86 |
87 | ### 2. Traffic Generator (Weibu)
88 |
89 | This module generates traffic scenarios for the SUMO simulation using a Weibull distribution.
90 |
91 | ### 3. SUMO Environment
92 |
93 | #### State Space
94 |
95 | The state space consists of a discrete representation of the positions of vehicles approaching the traffic light.
96 |
97 | #### Action Space
98 |
99 | The action space consists of the possible traffic light configurations.
100 |
101 | #### Reward Function
102 |
103 | The reward function encourages the agent to minimize the waiting time of vehicles at the intersection.
104 |
105 | #### Terminal State
106 |
107 | A terminal state is reached when the total number of steps in the simulation reaches the maximum allowed steps.
108 |
109 | ### 4. Data Plotter
110 |
111 | This module plots performance metrics collected during training.
112 |
113 | ### 5. Main Module
114 |
115 | The main module integrates all other modules and oversees the simulation process.
116 |
117 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/Traffic_generator_weibu.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | """
4 | Adaptive Traffic Light System based on DQN
5 | Author: collins mtonga
6 | Date: May, 2023
7 |
8 | This implementation is based on:
9 | - The original DQN algorithm by DeepMind (2013)
10 | Source: https://arxiv.org/abs/1312.5602
11 | - The OpenAI Gym library
12 | Source: https://gym.openai.com/
13 | - The Keras library
14 | Source: https://keras.io/
15 | - The SUMO toolkit for traffic simulation
16 | Source: https://www.eclipse.org/sumo/
17 | -RituPandes work adaptive traffic lights
18 | https://github.com/RituPande/DQL-TSC.git
19 | """
20 |
21 |
22 |
23 | import numpy as np
24 | import math
25 |
26 | # HANDLE THE GENERATION OF VEHICLES IN ONE EPISODE
27 | class TrafficGenerator:
28 | def __init__(self, max_steps):
29 | self._n_cars_generated = 1000 # how many cars per episode
30 | self._max_steps = max_steps
31 |
32 | # generation of routes of cars
33 | def generate_routefile(self, seed):
34 |
35 | if np.any(seed) >=0 :
36 | np.random.seed(seed) # make tests reproducible
37 |
38 | # the generation of cars is distributed according to a weibull distribution
39 | timings = np.random.weibull(2, self._n_cars_generated)
40 | timings = np.sort(timings)
41 |
42 | # reshape the distribution to fit the interval 0:max_steps
43 | car_gen_steps = []
44 | min_old = math.floor(timings[1])
45 | max_old = math.ceil(timings[-1])
46 | min_new = 0
47 | max_new = self._max_steps
48 | for value in timings:
49 | car_gen_steps = np.append(car_gen_steps, ((max_new - min_new) / (max_old - min_old)) * (value - min_old) + min_new)
50 |
51 | car_gen_steps = np.rint(car_gen_steps) # round every value to int -> effective steps when a car will be generated
52 |
53 | # produce the file for cars generation, one car per line
54 | with open("intersection/STLs.rou.xml", "w") as routes:
55 | print("""
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 | """, file=routes)
74 |
75 | for car_counter, step in enumerate(car_gen_steps):
76 | straight_or_turn = np.random.uniform()
77 | if straight_or_turn < 0.75: # choose direction: straight or turn - 75% of times the car goes straight
78 | route_straight = np.random.randint(1, 5) # choose a random source & destination
79 | if route_straight == 1:
80 | print(' ' % (car_counter, step), file=routes)
81 | elif route_straight == 2:
82 | print(' ' % (car_counter, step), file=routes)
83 | elif route_straight == 3:
84 | print(' ' % (car_counter, step), file=routes)
85 | else:
86 | print(' ' % (car_counter, step), file=routes)
87 | else: # car that turn -25% of the time the car turns
88 | route_turn = np.random.randint(1, 9) # choose random source source & destination
89 | if route_turn == 1:
90 | print(' ' % (car_counter, step), file=routes)
91 | elif route_turn == 2:
92 | print(' ' % (car_counter, step), file=routes)
93 | elif route_turn == 3:
94 | print(' ' % (car_counter, step), file=routes)
95 | elif route_turn == 4:
96 | print(' ' % (car_counter, step), file=routes)
97 | elif route_turn == 5:
98 | print(' ' % (car_counter, step), file=routes)
99 | elif route_turn == 6:
100 | print(' ' % (car_counter, step), file=routes)
101 | elif route_turn == 7:
102 | print(' ' % (car_counter, step), file=routes)
103 | elif route_turn == 8:
104 | print(' ' % (car_counter, step), file=routes)
105 |
106 | print("", file=routes)
107 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/Traffic_generator_weibu.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | """
4 | Adaptive Traffic Light System based on DQN
5 | Author: collins mtonga
6 | Date: May, 2023
7 |
8 | This implementation is based on:
9 | - The original DQN algorithm by DeepMind (2013)
10 | Source: https://arxiv.org/abs/1312.5602
11 | - The OpenAI Gym library
12 | Source: https://gym.openai.com/
13 | - The Keras library
14 | Source: https://keras.io/
15 | - The SUMO toolkit for traffic simulation
16 | Source: https://www.eclipse.org/sumo/
17 | -RituPandes work adaptive traffic lights
18 | https://github.com/RituPande/DQL-TSC.git
19 | """
20 |
21 |
22 |
23 | import numpy as np
24 | import math
25 |
26 | # HANDLE THE GENERATION OF VEHICLES IN ONE EPISODE
27 | class TrafficGenerator:
28 | def __init__(self, max_steps):
29 | self._n_cars_generated = 1000 # how many cars per episode
30 | self._max_steps = max_steps
31 |
32 | # generation of routes of cars
33 | def generate_routefile(self, seed):
34 |
35 | if np.any(seed) >=0 :
36 | np.random.seed(seed) # make tests reproducible
37 |
38 | # the generation of cars is distributed according to a weibull distribution
39 | timings = np.random.weibull(2, self._n_cars_generated)
40 | timings = np.sort(timings)
41 |
42 | # reshape the distribution to fit the interval 0:max_steps
43 | car_gen_steps = []
44 | min_old = math.floor(timings[1])
45 | max_old = math.ceil(timings[-1])
46 | min_new = 0
47 | max_new = self._max_steps
48 | for value in timings:
49 | car_gen_steps = np.append(car_gen_steps, ((max_new - min_new) / (max_old - min_old)) * (value - min_old) + min_new)
50 |
51 | car_gen_steps = np.rint(car_gen_steps) # round every value to int -> effective steps when a car will be generated
52 |
53 | # produce the file for cars generation, one car per line
54 | with open("intersection/STLs.rou.xml", "w") as routes:
55 | print("""
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 | """, file=routes)
74 |
75 | for car_counter, step in enumerate(car_gen_steps):
76 | straight_or_turn = np.random.uniform()
77 | if straight_or_turn < 0.75: # choose direction: straight or turn - 75% of times the car goes straight
78 | route_straight = np.random.randint(1, 5) # choose a random source & destination
79 | if route_straight == 1:
80 | print(' ' % (car_counter, step), file=routes)
81 | elif route_straight == 2:
82 | print(' ' % (car_counter, step), file=routes)
83 | elif route_straight == 3:
84 | print(' ' % (car_counter, step), file=routes)
85 | else:
86 | print(' ' % (car_counter, step), file=routes)
87 | else: # car that turn -25% of the time the car turns
88 | route_turn = np.random.randint(1, 9) # choose random source source & destination
89 | if route_turn == 1:
90 | print(' ' % (car_counter, step), file=routes)
91 | elif route_turn == 2:
92 | print(' ' % (car_counter, step), file=routes)
93 | elif route_turn == 3:
94 | print(' ' % (car_counter, step), file=routes)
95 | elif route_turn == 4:
96 | print(' ' % (car_counter, step), file=routes)
97 | elif route_turn == 5:
98 | print(' ' % (car_counter, step), file=routes)
99 | elif route_turn == 6:
100 | print(' ' % (car_counter, step), file=routes)
101 | elif route_turn == 7:
102 | print(' ' % (car_counter, step), file=routes)
103 | elif route_turn == 8:
104 | print(' ' % (car_counter, step), file=routes)
105 |
106 | print("", file=routes)
107 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/SimulationEnv.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon May 1 11:29:38 2023
4 |
5 | @author:Collins
6 | """
7 | import traci
8 | import numpy as np
9 |
10 | class SumoEnv:
11 |
12 | def __init__(self, sumoBinary, max_steps):
13 | self.sumoCmd = [sumoBinary, "-c", "intersection/STLs.sumocfg", "--no-step-log", "true", "--waiting-time-memory", str(max_steps), "--log","logfile.txt"]
14 | self.SUMO_INT_LANE_LENGTH = 500
15 | self.num_states = 80 # 0-79 see _encode_env_state function for details
16 | self.max_steps = max_steps
17 |
18 | self._init()
19 |
20 |
21 |
22 | def _init(self):
23 | self.current_state = None
24 | self.curr_wait_time = 0
25 | self.steps = 0
26 |
27 | def get_state(self):
28 | return self.current_state
29 |
30 | def start(self):
31 | traci.start(self.sumoCmd)
32 | self.current_state = self._encode_env_state()
33 | return self.current_state
34 |
35 | def reset(self):
36 | #traci.close()
37 | traci.start(self.sumoCmd)
38 | self._init()
39 | self.current_state = self._encode_env_state()
40 | return self.current_state
41 |
42 | def step(self, num_steps=1):
43 | if self.steps + num_steps > self.max_steps:
44 | num_steps = self.max_steps - self.steps
45 |
46 | for i in range(num_steps):
47 | traci.simulationStep()
48 |
49 | self.steps += num_steps
50 | self.current_state = self._encode_env_state()
51 | new_wait_time = self._get_waiting_time()
52 |
53 | # calculate reward of action taken (change in cumulative waiting time between actions)
54 | reward = 0.9 * self.curr_wait_time - new_wait_time
55 | self.curr_wait_time = new_wait_time
56 |
57 | # one episode ends when all vehicles have arrived at their destination
58 | is_terminal = self.steps >= self.max_steps
59 | return (reward, self.current_state, is_terminal)
60 |
61 | def _get_waiting_time(self):
62 | incoming_roads = ["E3", "E5", "E4"]
63 | total_waiting_time = 0
64 | for veh_id in traci.vehicle.getIDList():
65 | wait_time_car = traci.vehicle.getAccumulatedWaitingTime(veh_id)
66 | road_id = traci.vehicle.getRoadID(veh_id) # get the road id where the car is located
67 | if road_id in incoming_roads: # consider only the waiting times of cars in incoming roads
68 | total_waiting_time += wait_time_car
69 | return total_waiting_time
70 |
71 | def get_intersection_q_per_step(self):
72 | halt_E3 = traci.edge.getLastStepHaltingNumber("E3")
73 | halt_E4 = traci.edge.getLastStepHaltingNumber("E4")
74 | halt_E5 = traci.edge.getLastStepHaltingNumber("E5")
75 |
76 | intersection_queue = halt_E3 + halt_E4 + halt_E5
77 | return intersection_queue
78 |
79 | def _encode_env_state(self):
80 | state = np.zeros(self.num_states)
81 |
82 | for veh_id in traci.vehicle.getIDList():
83 | lane_pos = traci.vehicle.getLanePosition(veh_id)
84 | lane_id = traci.vehicle.getLaneID(veh_id)
85 | lane_pos = self.SUMO_INT_LANE_LENGTH - lane_pos # inversion of lane pos, so if the car is close to TL, lane_pos = 0
86 | lane_group = -1 # just dummy initialization
87 | is_car_valid = False # flag for not detecting cars crossing the intersection or driving away from it
88 |
89 | # distance in meters from the TLS -> mapping into cells
90 | if lane_pos < 7:
91 | lane_cell = 0
92 | elif lane_pos < 14:
93 | lane_cell = 1
94 | elif lane_pos < 21:
95 | lane_cell = 2
96 | elif lane_pos < 28:
97 | lane_cell = 3
98 | elif lane_pos < 40:
99 | lane_cell = 4
100 | elif lane_pos < 60:
101 | lane_cell = 5
102 | elif lane_pos < 100:
103 | lane_cell = 6
104 | elif lane_pos < 200:
105 | lane_cell = 7
106 | elif lane_pos < 350:
107 | lane_cell = 8
108 | elif lane_pos <= 500:
109 | lane_cell = 9
110 |
111 | # Isolate the "turn left only" from "straight" and "right" turning lanes.
112 | # This is because TL lights are turned on separately for these sets
113 | if lane_id == "r_0" or lane_id == "r_1" or lane_id == "r_3":
114 | lane_group = 0
115 | elif lane_id == "r_4":
116 | lane_group = 1
117 | elif lane_id == "r_5" or lane_id == "r_6" :
118 | lane_group = 2
119 | elif lane_id == "r_3":
120 | lane_group = 3
121 | elif lane_id == "r_4" or lane_id == "r_5" or lane_id == "r_6":
122 | lane_group = 4
123 | elif lane_id == "r_1":
124 | lane_group = 5
125 | elif lane_id == "r_3" or lane_id == "r_5" or lane_id == "r_3":
126 | lane_group = 6
127 | elif lane_id == "r_3":
128 | lane_group = 7
129 |
130 | if lane_group >= 1 and lane_group <= 7:
131 | veh_position = int(str(lane_group) + str(lane_cell)) # composition of the two position ID to create a number in interval 0-79
132 | is_car_valid = True
133 | elif lane_group == 0:
134 | veh_position = lane_cell
135 | is_car_valid = True
136 |
137 | if is_car_valid:
138 | state[veh_position] = 1 # write the position of the car veh_id in the state array
139 |
140 | return state
141 |
142 | def __del__(self):
143 | traci.close()
144 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/SimulationEnv.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon May 1 11:29:38 2023
4 |
5 | @author:Collins
6 | """
7 |
8 |
9 | """
10 | Adaptive Traffic Light System based on DQN
11 | Author: collins mtonga
12 | Date: May, 2023
13 |
14 | This implementation is based on:
15 | - The original DQN algorithm by DeepMind (2013)
16 | Source: https://arxiv.org/abs/1312.5602
17 | - The OpenAI Gym library
18 | Source: https://gym.openai.com/
19 | - The Keras library
20 | Source: https://keras.io/
21 | - The SUMO toolkit for traffic simulation
22 | Source: https://www.eclipse.org/sumo/
23 | -RituPandes work adaptive traffic lights
24 | https://github.com/RituPande/DQL-TSC.git
25 | """
26 | import traci
27 | import numpy as np
28 |
29 | class SumoEnv:
30 |
31 | def __init__(self, sumoBinary, max_steps):
32 | self.sumoCmd = [sumoBinary, "-c", "intersection/STLs.sumocfg", "--no-step-log", "true", "--waiting-time-memory", str(max_steps), "--log","logfile.txt"]
33 | self.SUMO_INT_LANE_LENGTH = 500
34 | self.num_states = 80 # 0-79 see _encode_env_state function for details
35 | self.max_steps = max_steps
36 |
37 | self._init()
38 |
39 |
40 |
41 | def _init(self):
42 | self.current_state = None
43 | self.curr_wait_time = 0
44 | self.steps = 0
45 |
46 | def get_state(self):
47 | return self.current_state
48 |
49 | def start(self):
50 | traci.start(self.sumoCmd)
51 | self.current_state = self._encode_env_state()
52 | return self.current_state
53 |
54 | def reset(self):
55 | #traci.close()
56 | traci.start(self.sumoCmd)
57 | self._init()
58 | self.current_state = self._encode_env_state()
59 | return self.current_state
60 |
61 | def step(self, num_steps=1):
62 | if self.steps + num_steps > self.max_steps:
63 | num_steps = self.max_steps - self.steps
64 |
65 | for i in range(num_steps):
66 | traci.simulationStep()
67 |
68 | self.steps += num_steps
69 | self.current_state = self._encode_env_state()
70 | new_wait_time = self._get_waiting_time()
71 |
72 | # calculate reward of action taken (change in cumulative waiting time between actions)
73 | reward = 0.9 * self.curr_wait_time - new_wait_time
74 | self.curr_wait_time = new_wait_time
75 |
76 | # one episode ends when all vehicles have arrived at their destination
77 | is_terminal = self.steps >= self.max_steps
78 | return (reward, self.current_state, is_terminal)
79 |
80 | def _get_waiting_time(self):
81 | incoming_roads = ["E3", "E5", "E4"]
82 | total_waiting_time = 0
83 | for veh_id in traci.vehicle.getIDList():
84 | wait_time_car = traci.vehicle.getAccumulatedWaitingTime(veh_id)
85 | road_id = traci.vehicle.getRoadID(veh_id) # get the road id where the car is located
86 | if road_id in incoming_roads: # consider only the waiting times of cars in incoming roads
87 | total_waiting_time += wait_time_car
88 | return total_waiting_time
89 |
90 | def get_intersection_q_per_step(self):
91 | halt_E3 = traci.edge.getLastStepHaltingNumber("E3")
92 | halt_E4 = traci.edge.getLastStepHaltingNumber("E4")
93 | halt_E5 = traci.edge.getLastStepHaltingNumber("E5")
94 |
95 | intersection_queue = halt_E3 + halt_E4 + halt_E5
96 | return intersection_queue
97 |
98 | def _encode_env_state(self):
99 | state = np.zeros(self.num_states)
100 |
101 | for veh_id in traci.vehicle.getIDList():
102 | lane_pos = traci.vehicle.getLanePosition(veh_id)
103 | lane_id = traci.vehicle.getLaneID(veh_id)
104 | lane_pos = self.SUMO_INT_LANE_LENGTH - lane_pos # inversion of lane pos, so if the car is close to TL, lane_pos = 0
105 | lane_group = -1 # just dummy initialization
106 | is_car_valid = False # flag for not detecting cars crossing the intersection or driving away from it
107 |
108 | # distance in meters from the TLS -> mapping into cells
109 | if lane_pos < 7:
110 | lane_cell = 0
111 | elif lane_pos < 14:
112 | lane_cell = 1
113 | elif lane_pos < 21:
114 | lane_cell = 2
115 | elif lane_pos < 28:
116 | lane_cell = 3
117 | elif lane_pos < 40:
118 | lane_cell = 4
119 | elif lane_pos < 60:
120 | lane_cell = 5
121 | elif lane_pos < 100:
122 | lane_cell = 6
123 | elif lane_pos < 200:
124 | lane_cell = 7
125 | elif lane_pos < 350:
126 | lane_cell = 8
127 | elif lane_pos <= 500:
128 | lane_cell = 9
129 |
130 | # Isolate the "turn left only" from "straight" and "right" turning lanes.
131 | # This is because TL lights are turned on separately for these sets
132 | if lane_id == "r_0" or lane_id == "r_1" or lane_id == "r_3":
133 | lane_group = 0
134 | elif lane_id == "r_4":
135 | lane_group = 1
136 | elif lane_id == "r_5" or lane_id == "r_6" :
137 | lane_group = 2
138 | elif lane_id == "r_3":
139 | lane_group = 3
140 | elif lane_id == "r_4" or lane_id == "r_5" or lane_id == "r_6":
141 | lane_group = 4
142 | elif lane_id == "r_1":
143 | lane_group = 5
144 | elif lane_id == "r_3" or lane_id == "r_5" or lane_id == "r_3":
145 | lane_group = 6
146 | elif lane_id == "r_3":
147 | lane_group = 7
148 |
149 | if lane_group >= 1 and lane_group <= 7:
150 | veh_position = int(str(lane_group) + str(lane_cell)) # composition of the two position ID to create a number in interval 0-79
151 | is_car_valid = True
152 | elif lane_group == 0:
153 | veh_position = lane_cell
154 | is_car_valid = True
155 |
156 | if is_car_valid:
157 | state[veh_position] = 1 # write the position of the car veh_id in the state array
158 |
159 | return state
160 |
161 | def __del__(self):
162 | traci.close()
163 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/intersection/STLs.net.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/intersection/STLs.net.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-implementation-toy-cars/SSD_toy_car_detction_.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "view-in-github",
7 | "colab_type": "text"
8 | },
9 | "source": [
10 | "
"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 28,
16 | "metadata": {
17 | "id": "JLcvd2VRujgf"
18 | },
19 | "outputs": [],
20 | "source": [
21 | "!cd /content/drive/MyDrive/jetson-train-main\n"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 29,
27 | "metadata": {
28 | "id": "dwYm8_DUwOLo"
29 | },
30 | "outputs": [],
31 | "source": [
32 | "import os\n",
33 | "os.chdir('/content/drive/MyDrive/jetson-train-main')\n"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": 30,
39 | "metadata": {
40 | "colab": {
41 | "base_uri": "https://localhost:8080/"
42 | },
43 | "id": "r2TboEPKxXwy",
44 | "outputId": "3ec5f4fa-916f-4194-d569-379914478b96"
45 | },
46 | "outputs": [
47 | {
48 | "output_type": "stream",
49 | "name": "stdout",
50 | "text": [
51 | "/content/drive/MyDrive/jetson-train-main\n"
52 | ]
53 | }
54 | ],
55 | "source": [
56 | "!pwd"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {
63 | "colab": {
64 | "base_uri": "https://localhost:8080/"
65 | },
66 | "id": "ehkzSUktzeuZ",
67 | "outputId": "c81c03c2-44c8-4b84-b188-f15815ecd7c6"
68 | },
69 | "outputs": [
70 | {
71 | "name": "stdout",
72 | "output_type": "stream",
73 | "text": [
74 | "python3: can't open file '/content/drive/MyDrive/jetson-train-main/train_ssd.py': [Errno 2] No such file or directory\n"
75 | ]
76 | }
77 | ],
78 | "source": [
79 | "!sudo python train_ssd.py --dataset-type=voc --data=data/toys/ --model-dir=models/mobilenet-v1-ssd-mp --batch-size=2 --workers=5 --epochs=500\n"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {
86 | "id": "IcygEzvQrWHl"
87 | },
88 | "outputs": [],
89 | "source": []
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 31,
94 | "metadata": {
95 | "colab": {
96 | "base_uri": "https://localhost:8080/"
97 | },
98 | "id": "g61Oqejpq92m",
99 | "outputId": "ada6a9ce-5818-4489-b065-2b404238cebd"
100 | },
101 | "outputs": [
102 | {
103 | "output_type": "stream",
104 | "name": "stdout",
105 | "text": [
106 | "/content/drive/MyDrive/jetson-train-main/jetson-train-main\n"
107 | ]
108 | }
109 | ],
110 | "source": [
111 | "%cd /content/drive/MyDrive/jetson-train-main/jetson-train-main"
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": null,
117 | "metadata": {
118 | "id": "Rkq7YC55Mot_"
119 | },
120 | "outputs": [],
121 | "source": []
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {
127 | "colab": {
128 | "background_save": true,
129 | "base_uri": "https://localhost:8080/"
130 | },
131 | "id": "d_6B2-tK-ldr",
132 | "outputId": "d467bcdc-3089-4c69-a71a-74a2f6af7e1c"
133 | },
134 | "outputs": [
135 | {
136 | "output_type": "stream",
137 | "name": "stdout",
138 | "text": [
139 | "2023-08-01 20:42:03.933963: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
140 | "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
141 | "2023-08-01 20:42:05.003026: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
142 | "2023-08-01 20:42:07 - Namespace(dataset_type='voc', datasets=['data/toys/'], balance_data=False, net='mb1-ssd', resolution=300, freeze_base_net=False, freeze_net=False, mb2_width_mult=1.0, base_net=None, pretrained_ssd='models/mobilenet-v1-ssd-mp-0_675.pth', resume='/content/drive/MyDrive/jetson-train-main/jetson-train-main/models/mobilenet-v1-ssd-mp-0_675/ssd-Epoch-154-Loss-0.8113.pth', lr=0.01, momentum=0.9, weight_decay=0.0005, gamma=0.1, base_net_lr=0.001, extra_layers_lr=None, scheduler='cosine', milestones='80,100', t_max=100, batch_size=2, num_epochs=200, num_workers=0, validation_epochs=1, validation_mean_ap=False, debug_steps=10, use_cuda=True, checkpoint_folder='models/mobilenet-v1-ssd-mp-0_675', log_level='info')\n",
143 | "2023-08-01 20:42:07 - model resolution 300x300\n",
144 | "2023-08-01 20:42:07 - SSDSpec(feature_map_size=19, shrinkage=16, box_sizes=SSDBoxSizes(min=60, max=105), aspect_ratios=[2, 3])\n",
145 | "2023-08-01 20:42:07 - SSDSpec(feature_map_size=10, shrinkage=32, box_sizes=SSDBoxSizes(min=105, max=150), aspect_ratios=[2, 3])\n",
146 | "2023-08-01 20:42:07 - SSDSpec(feature_map_size=5, shrinkage=64, box_sizes=SSDBoxSizes(min=150, max=195), aspect_ratios=[2, 3])\n",
147 | "2023-08-01 20:42:07 - SSDSpec(feature_map_size=3, shrinkage=100, box_sizes=SSDBoxSizes(min=195, max=240), aspect_ratios=[2, 3])\n",
148 | "2023-08-01 20:42:07 - SSDSpec(feature_map_size=2, shrinkage=150, box_sizes=SSDBoxSizes(min=240, max=285), aspect_ratios=[2, 3])\n",
149 | "2023-08-01 20:42:07 - SSDSpec(feature_map_size=1, shrinkage=300, box_sizes=SSDBoxSizes(min=285, max=330), aspect_ratios=[2, 3])\n",
150 | "2023-08-01 20:42:07 - Prepare training datasets.\n",
151 | "2023-08-01 20:42:07 - VOC Labels read from file: ('BACKGROUND', 'car')\n",
152 | "2023-08-01 20:42:07 - Stored labels into file models/mobilenet-v1-ssd-mp-0_675/labels.txt.\n",
153 | "2023-08-01 20:42:07 - Train dataset size: 96\n",
154 | "2023-08-01 20:42:07 - Prepare Validation datasets.\n",
155 | "2023-08-01 20:42:07 - VOC Labels read from file: ('BACKGROUND', 'car')\n",
156 | "2023-08-01 20:42:07 - Validation dataset size: 20\n",
157 | "2023-08-01 20:42:07 - Build network.\n",
158 | "2023-08-01 20:42:07 - Resume from the model /content/drive/MyDrive/jetson-train-main/jetson-train-main/models/mobilenet-v1-ssd-mp-0_675/ssd-Epoch-154-Loss-0.8113.pth\n",
159 | "2023-08-01 20:42:08 - Took 0.60 seconds to load the model.\n",
160 | "2023-08-01 20:42:08 - Learning rate: 0.01, Base net learning rate: 0.001, Extra Layers learning rate: 0.01.\n",
161 | "2023-08-01 20:42:08 - Uses CosineAnnealingLR scheduler.\n",
162 | "2023-08-01 20:42:08 - Start training from epoch 155.\n",
163 | "/usr/local/lib/python3.10/dist-packages/torch/nn/_reduction.py:42: UserWarning: size_average and reduce args will be deprecated, please use reduction='sum' instead.\n",
164 | " warnings.warn(warning.format(ret))\n",
165 | "2023-08-01 20:42:50 - Epoch: 155, Step: 10/48, Avg Loss: 1.2041, Avg Regression Loss 0.2773, Avg Classification Loss: 0.9268\n",
166 | "2023-08-01 20:43:44 - Epoch: 155, Step: 20/48, Avg Loss: 1.5580, Avg Regression Loss 0.3399, Avg Classification Loss: 1.2181\n"
167 | ]
168 | }
169 | ],
170 | "source": [
171 | "!sudo python train_ssd.py --dataset-type=voc --data=data/toys/ --model-dir=models/mobilenet-v1-ssd-mp-0_675 --batch-size=2 --workers=0 --epochs=200 --resume=/content/drive/MyDrive/jetson-train-main/jetson-train-main/models/mobilenet-v1-ssd-mp-0_675/ssd-Epoch-154-Loss-0.8113.pth"
172 | ]
173 | },
174 | {
175 | "cell_type": "markdown",
176 | "metadata": {
177 | "id": "pYSIGdrKlkyb"
178 | },
179 | "source": []
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": null,
184 | "metadata": {
185 | "colab": {
186 | "base_uri": "https://localhost:8080/"
187 | },
188 | "id": "M8wkbsVn9un7",
189 | "outputId": "7b0530b6-7d26-4d91-ec3b-3cdb1febf933"
190 | },
191 | "outputs": [
192 | {
193 | "name": "stdout",
194 | "output_type": "stream",
195 | "text": [
196 | "\r\n",
197 | "Please enter model name: mb1-ssd-Epoch-9-Loss-2.206191062927246.pth\n",
198 | "Traceback (most recent call last):\n",
199 | " File \"/content/drive/MyDrive/jetson-train-main/jetson-train-main/result.py\", line 6, in \n",
200 | " files = os.listdir(\"models/{}\".format(res))\n",
201 | "NotADirectoryError: [Errno 20] Not a directory: 'models/mb1-ssd-Epoch-9-Loss-2.206191062927246.pth'\n"
202 | ]
203 | }
204 | ],
205 | "source": [
206 | "!sudo python result.py"
207 | ]
208 | }
209 | ],
210 | "metadata": {
211 | "colab": {
212 | "provenance": [],
213 | "mount_file_id": "1ntaOpirIJZLlKTEevB1Fw_dRfq9rXUYA",
214 | "authorship_tag": "ABX9TyPEVfwdO+0nZUq8I2yR+adA",
215 | "include_colab_link": true
216 | },
217 | "kernelspec": {
218 | "display_name": "Python 3",
219 | "name": "python3"
220 | },
221 | "language_info": {
222 | "name": "python"
223 | }
224 | },
225 | "nbformat": 4,
226 | "nbformat_minor": 0
227 | }
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/ML traffic control/main.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | """
4 | Adaptive Traffic Light System based on DQN
5 | Author: collins mtonga
6 | Date: May, 2023
7 |
8 | This implementation is based on:
9 | - The original DQN algorithm by DeepMind (2013)
10 | Source: https://arxiv.org/abs/1312.5602
11 | - The OpenAI Gym library
12 | Source: https://gym.openai.com/
13 | - The Keras library
14 | Source: https://keras.io/
15 | - The SUMO toolkit for traffic simulation
16 | Source: https://www.eclipse.org/sumo/
17 | -RituPandes work adaptive traffic lights
18 | https://github.com/RituPande/DQL-TSC.git
19 | """
20 |
21 |
22 | import os
23 | import sys
24 | from DQN import Model
25 | import traci
26 | from SimulationEnv import SumoEnv
27 | import numpy as np
28 | import random
29 | from Traffic_generator_weibu import TrafficGenerator
30 | from collections import deque
31 | import dataPlotter
32 | from keras.models import load_model
33 | from keras.callbacks import TensorBoard
34 | import copy
35 | from datetime import datetime
36 | LOG_DIR = "tensorboard_logs"
37 | if not os.path.exists(LOG_DIR):
38 | os.makedirs(LOG_DIR)
39 |
40 |
41 | class TLAgent:
42 |
43 | def __init__(self, env, traffic_gen, max_steps, num_experiments, total_episodes, qmodel_filename, stats, init_epoch, learn=True):
44 | """
45 | Constructor for the TLAgent class.
46 |
47 | Args:
48 | env (SumoEnv): Sumo environment object.
49 | traffic_gen (TrafficGenerator): Traffic generator object.
50 | max_steps (int): Maximum steps per episode.
51 | num_experiments (int): Number of experiments to run.
52 | total_episodes (int): Total number of episodes per experiment.
53 | qmodel_filename (str): Filename for the Q-model.
54 | stats (dict): Statistics dictionary.
55 | init_epoch (int): Initial epoch.
56 | learn (bool, optional): Whether to train the agent. Defaults to True.
57 | """
58 |
59 | self.env = env
60 | self.traffic_gen = traffic_gen
61 | self.total_episodes = total_episodes
62 | self.discount = 0.75
63 | self.epsilon = 0.9
64 | self.replay_buffer = deque(maxlen=50000)
65 | self.batch_size = 100
66 | self.num_states = 80
67 | self.num_actions = 4
68 | self.num_experiments = num_experiments
69 |
70 | # Phases are in the same order as specified in the .net.xml file
71 | self.PHASE_NS_GREEN = 0 # action 0 code 00
72 | self.PHASE_NS_YELLOW = 1
73 | self.PHASE_NSL_GREEN = 2 # action 1 code 01
74 | self.PHASE_NSL_YELLOW = 3
75 | self.PHASE_EW_GREEN = 4 # action 2 code 10
76 | self.PHASE_EW_YELLOW = 5
77 | self.PHASE_EWL_GREEN = 6 # action 3 code 11
78 | self.PHASE_EWL_YELLOW = 7
79 |
80 | self.green_duration = 10
81 | self.yellow_duration = 4
82 | self.stats = stats
83 | self.init_epoch = init_epoch
84 | self.QModel = None
85 | self.tau = 20
86 | self.TargetQModel = None
87 | self.qmodel_filename = qmodel_filename
88 | self.stats_filename = stats_filename
89 | self.init_epoch = init_epoch
90 | self._load_models(learn)
91 | self.max_steps = max_steps
92 | def _load_models( self , learn = True) :
93 |
94 | self.QModel = Model(self.num_states, self.num_actions )
95 | self.TargetQModel = Model(self.num_states, self.num_actions)
96 |
97 | if self.init_epoch !=0 or not learn:
98 | print('model read from file')
99 | qmodel_fd = open(self.qmodel_filename, 'r')
100 |
101 | if (qmodel_fd is not None):
102 |
103 | self.QModel = load_model(qmodel_fd.name)
104 | self.TargetQModel = load_model(qmodel_fd.name)
105 |
106 | return self.QModel, self.TargetQModel
107 |
108 |
109 |
110 | def _preprocess_input( self, state ):
111 | state = np.reshape(state, [1, self.num_states])
112 | return state
113 |
114 | def _add_to_replay_buffer( self, curr_state, action, reward, next_state, done ):
115 | self.replay_buffer.append((curr_state, action, reward, next_state, done))
116 |
117 | def _sync_target_model( self ):
118 | self.TargetQModel.set_weights( self.QModel.get_weights())
119 |
120 | def _replay(self):
121 | x_batch, y_batch = [], []
122 | mini_batch = random.sample( self.replay_buffer, min(len(self.replay_buffer), self.batch_size))
123 |
124 | for i in range( len(mini_batch)):
125 | curr_state, action, reward, next_state, done = mini_batch[i]
126 | y_target = self.QModel.predict(curr_state) # get existing Qvalues for the current state
127 | y_target[0][action] = reward if done else reward + self.discount*np.max(self.TargetQModel.predict(next_state)) # modify the qvalues for the action perfomrmed to get the new target
128 | x_batch.append(curr_state[0])
129 | y_batch.append(y_target[0])
130 | # Add TensorBoard callback
131 | log_dir = os.path.join(LOG_DIR, datetime.now().strftime("%Y%m%d-%H%M%S"))
132 | tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
133 |
134 |
135 | self.QModel.fit(np.array(x_batch), np.array(y_batch), batch_size=len(x_batch), verbose=0)
136 |
137 |
138 | def _agent_policy( self, episode, state, learn = True ):
139 |
140 | if learn:
141 | epsilon = 1 - episode/self.total_episodes
142 | choice = np.random.random()
143 | if choice <= epsilon:
144 | action = np.random.choice(range(self.num_actions))
145 | else:
146 | action = np.argmax(self.QModel.predict(state))
147 | else:
148 | action = np.argmax(self.QModel.predict(state))
149 |
150 | return action
151 |
152 | # SET IN SUMO THE CORRECT YELLOW PHASE
153 | def _set_yellow_phase(self, old_action):
154 | yellow_phase = old_action * 1 + 1 # obtain the yellow phase code, based on the old action
155 | traci.trafficlight.setPhase("J6", yellow_phase)
156 |
157 | # SET IN SUMO A GREEN PHASE
158 | def _set_green_phase(self, action):
159 | if action == 0:
160 | traci.trafficlight.setPhase("J6", self.PHASE_NS_GREEN)
161 | elif action == 1:
162 | traci.trafficlight.setPhase("J6", self.PHASE_NSL_GREEN)
163 | elif action == 2:
164 | traci.trafficlight.setPhase("J6", self.PHASE_EW_GREEN)
165 |
166 |
167 | def evaluate_model( self, experiment, seeds ):
168 |
169 | self.traffic_gen.generate_routefile(seeds[self.init_epoch])
170 | curr_state = self.env.start()
171 |
172 | for e in range( self.init_epoch, self.total_episodes):
173 |
174 | done = False
175 | sum_intersection_queue = 0
176 | sum_neg_rewards = 0
177 | old_action = None
178 | while not done:
179 | curr_state = self._preprocess_input( curr_state )
180 | action = self._agent_policy( e, curr_state, learn = False)
181 | yellow_reward = 0
182 |
183 | if old_action!= None and old_action != action:
184 | self._set_yellow_phase(old_action)
185 | yellow_reward, _ , _ = self.env.step(self.yellow_duration)
186 |
187 | self._set_green_phase(action)
188 | reward, next_state, done = self.env.step(self.green_duration)
189 | reward += yellow_reward
190 | next_state = self._preprocess_input( next_state )
191 | curr_state = next_state
192 | old_action = action
193 | sum_intersection_queue += self.env.get_intersection_q_per_step()
194 | if reward < 0:
195 | sum_neg_rewards += reward
196 |
197 | self._save_stats(experiment, e, sum_intersection_queue,sum_neg_rewards)
198 | print('sum_neg_rewards={}'.format(sum_neg_rewards))
199 | print('sum_intersection_queue={}'.format(sum_intersection_queue))
200 | print('Epoch {} complete'.format(e))
201 | if e != 0:
202 | os.remove('stats_{}_{}.npy'.format(experiment, e-1))
203 | elif experiment !=0:
204 | os.remove('stats_{}_{}.npy'.format(experiment-1, self.total_episodes-1))
205 | if e +1 < self.total_episodes:
206 | self.traffic_gen.generate_routefile(seeds[e+1])
207 | curr_state =self.env.reset()
208 |
209 |
210 |
211 | def train( self, experiment ):
212 |
213 | self.traffic_gen.generate_routefile(0)
214 | curr_state = self.env.start()
215 |
216 | for e in range( self.init_epoch, self.total_episodes):
217 |
218 | curr_state = self._preprocess_input( curr_state)
219 | old_action = None
220 | done = False # whether the episode has ended or not
221 | sum_intersection_queue = 0
222 | sum_neg_rewards = 0
223 | while not done:
224 |
225 | action = self._agent_policy( e,curr_state)
226 | yellow_reward = 0
227 |
228 | if old_action!= None and old_action != action:
229 | self._set_yellow_phase(old_action)
230 | yellow_reward, _ , _ = self.env.step(self.yellow_duration)
231 |
232 | self._set_green_phase(action)
233 | reward, next_state, done = self.env.step(self.green_duration)
234 | reward += yellow_reward
235 | next_state = self._preprocess_input( next_state )
236 | self._add_to_replay_buffer( curr_state, action, reward, next_state, done )
237 |
238 | if e > 0 and e % self.tau == 0:
239 | self._sync_target_model()
240 | self._replay()
241 | curr_state = next_state
242 | old_action = action
243 | sum_intersection_queue += self.env.get_intersection_q_per_step()
244 | if reward < 0:
245 | sum_neg_rewards += reward
246 |
247 |
248 | self._save_stats(experiment, e, sum_intersection_queue,sum_neg_rewards)
249 | self.QModel.save('qmodel_{}_{}.hd5'.format(experiment, e))
250 | if e != 0:
251 | os.remove('qmodel_{}_{}.hd5'.format(experiment, e-1))
252 | os.remove('stats_{}_{}.npy'.format(experiment, e-1))
253 | elif experiment !=0:
254 | os.remove('qmodel_{}_{}.hd5'.format(experiment-1, self.total_episodes-1))
255 | os.remove('stats_{}_{}.npy'.format(experiment-1, self.total_episodes-1))
256 | self.traffic_gen.generate_routefile(e+1)
257 | curr_state = self.env.reset() # reset the environment before every episode
258 | print('Epoch {} complete'.format(e))
259 |
260 | def execute( self):
261 | while traci.simulation.getMinExpectedNumber() > 0:
262 | traci.simulationStep()
263 |
264 | def _save_stats(self, experiment, episode, sum_intersection_queue_per_episode, sum_rewards_per_episode):
265 | self.stats['rewards'][experiment, episode] = sum_rewards_per_episode
266 | self.stats['intersection_queue'][experiment, episode] = sum_intersection_queue_per_episode
267 | np.save('stats_{}_{}.npy'.format(experiment, episode), self.stats)
268 |
269 |
270 | if __name__ == "__main__":
271 | # --- TRAINING OPTIONS ---
272 | training_enabled = True
273 | gui = 0
274 |
275 | sys.path.append(os.path.join('c:', os.sep, 'Users', 'Desktop', 'Work', 'Sumo', 'tools'))
276 | # ----------------------
277 |
278 | # attributes of the agent
279 |
280 | # setting the cmd mode or the visual mode
281 | if gui == True:
282 | sumoBinary = 'sumo.exe'
283 | else:
284 | sumoBinary = 'sumo'
285 |
286 |
287 | # initializations
288 | max_steps = 5400# seconds = 1 h 30 min each episode
289 | total_episodes =100
290 | num_experiments = 1
291 | learn = True
292 | traffic_gen = TrafficGenerator(max_steps)
293 | qmodel_filename, stats_filename = dataPlotter.get_file_names()
294 | init_experiment, init_epoch = dataPlotter.get_init_epoch(stats_filename, total_episodes)
295 | print('init_experiment={} init_epoch={}'.format(init_experiment, init_epoch))
296 | stats = dataPlotter.get_stats(stats_filename, num_experiments, total_episodes)
297 |
298 | for experiment in range(init_experiment, num_experiments):
299 | env = SumoEnv(sumoBinary, max_steps)
300 | tl = TLAgent(env, traffic_gen, max_steps, num_experiments, total_episodes, qmodel_filename, stats, init_epoch, learn)
301 | init_epoch = 0 # reset init_epoch after the first experiment
302 | if learn:
303 | tl.train(experiment)
304 | else:
305 | seeds = np.load('seed.npy')
306 | tl.evaluate_model(experiment, seeds)
307 |
308 | stats = copy.deepcopy(tl.stats)
309 | print(stats['rewards'][0:experiment+1, :])
310 | print(stats['intersection_queue'][0:experiment+1, :])
311 | dataPlotter.plot_rewards(stats['rewards'][0:experiment+1, :])
312 | dataPlotter.plot_intersection_queue_size(stats['intersection_queue'][0:experiment+1, :])
313 | dataPlotter.plot_sample(stats['intersection_queue'],'smart traffic lights', 'Cumulative Vehicle Queue Size', 'Adaptive TRLC')
314 | dataPlotter.plot_sample(stats['rewards'],'smart traffic lights', 'cumulative_wait_times', 'Adaptive TRLC')
315 |
316 | del env
317 | del tl
318 | print('Experiment {} complete.........'.format(experiment))
319 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/main.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | """
4 | Adaptive Traffic Light System based on DQN
5 | Author: collins mtonga
6 | Date: May, 2023
7 |
8 | This implementation is based on:
9 | - The original DQN algorithm by DeepMind (2013)
10 | Source: https://arxiv.org/abs/1312.5602
11 | - The OpenAI Gym library
12 | Source: https://gym.openai.com/
13 | - The Keras library
14 | Source: https://keras.io/
15 | - The SUMO toolkit for traffic simulation
16 | Source: https://www.eclipse.org/sumo/
17 | -RituPandes work adaptive traffic lights
18 | https://github.com/RituPande/DQL-TSC.git
19 | """
20 |
21 |
22 |
23 |
24 |
25 | import os
26 | import sys
27 | from DQN import Model
28 | import traci
29 | from SimulationEnv import SumoEnv
30 | import numpy as np
31 | import random
32 | from Traffic_generator_weibu import TrafficGenerator
33 | from collections import deque
34 | import dataPlotter
35 | from keras.models import load_model
36 | import copy
37 | import matplotlib as plt
38 | class TLAgent:
39 |
40 | def __init__(self, env, traffic_gen, max_steps, num_experiments, total_episodes, qmodel_filename, stats, init_epoch, learn=True):
41 | """
42 | Constructor for the TLAgent class.
43 |
44 | Args:
45 | env (SumoEnv): Sumo environment object.
46 | traffic_gen (TrafficGenerator): Traffic generator object.
47 | max_steps (int): Maximum steps per episode.
48 | num_experiments (int): Number of experiments to run.
49 | total_episodes (int): Total number of episodes per experiment.
50 | qmodel_filename (str): Filename for the Q-model.
51 | stats (dict): Statistics dictionary.
52 | init_epoch (int): Initial epoch.
53 | learn (bool, optional): Whether to train the agent. Defaults to True.
54 | """
55 |
56 | self.env = env
57 | self.traffic_gen = traffic_gen
58 | self.total_episodes = total_episodes
59 | self.discount = 0.75
60 | self.epsilon = 0.9
61 | self.replay_buffer = deque(maxlen=50000)
62 | self.batch_size = 100
63 | self.num_states = 80
64 | self.num_actions = 4
65 | self.num_experiments = num_experiments
66 |
67 | # Phases are in the same order as specified in the .net.xml file
68 | self.PHASE_NS_GREEN = 0 # action 0 code 00
69 | self.PHASE_NS_YELLOW = 1
70 | self.PHASE_NSL_GREEN = 2 # action 1 code 01
71 | self.PHASE_NSL_YELLOW = 3
72 | self.PHASE_EW_GREEN = 4 # action 2 code 10
73 | self.PHASE_EW_YELLOW = 5
74 | self.PHASE_EWL_GREEN = 6 # action 3 code 11
75 | self.PHASE_EWL_YELLOW = 7
76 |
77 | self.green_duration = 10
78 | self.yellow_duration = 4
79 | self.stats = stats
80 | self.init_epoch = init_epoch
81 | self.QModel = None
82 | self.tau = 20
83 | self.TargetQModel = None
84 | self.qmodel_filename = qmodel_filename
85 | self.stats_filename = stats_filename
86 | self.init_epoch = init_epoch
87 | self._load_models(learn)
88 | self.max_steps = max_steps
89 | def _load_models( self , learn = True) :
90 |
91 | self.QModel = Model(self.num_states, self.num_actions )
92 | self.TargetQModel = Model(self.num_states, self.num_actions)
93 |
94 | if self.init_epoch !=0 or not learn:
95 | print('model read from file')
96 | qmodel_fd = open(self.qmodel_filename, 'r')
97 |
98 | if (qmodel_fd is not None):
99 |
100 | self.QModel = load_model(qmodel_fd.name)
101 | self.TargetQModel = load_model(qmodel_fd.name)
102 |
103 | return self.QModel, self.TargetQModel
104 |
105 |
106 |
107 | def _preprocess_input( self, state ):
108 | state = np.reshape(state, [1, self.num_states])
109 | return state
110 |
111 | def _add_to_replay_buffer( self, curr_state, action, reward, next_state, done ):
112 | self.replay_buffer.append((curr_state, action, reward, next_state, done))
113 |
114 | def _sync_target_model( self ):
115 | self.TargetQModel.set_weights( self.QModel.get_weights())
116 |
117 | def _replay(self):
118 | x_batch, y_batch = [], []
119 | mini_batch = random.sample( self.replay_buffer, min(len(self.replay_buffer), self.batch_size))
120 |
121 | for i in range( len(mini_batch)):
122 | curr_state, action, reward, next_state, done = mini_batch[i]
123 | y_target = self.QModel.predict(curr_state) # get existing Qvalues for the current state
124 | y_target[0][action] = reward if done else reward + self.discount*np.max(self.TargetQModel.predict(next_state)) # modify the qvalues for the action perfomrmed to get the new target
125 | x_batch.append(curr_state[0])
126 | y_batch.append(y_target[0])
127 |
128 | self.QModel.fit(np.array(x_batch), np.array(y_batch), batch_size=len(x_batch), verbose=0)
129 |
130 |
131 | def _agent_policy( self, episode, state, learn = True ):
132 |
133 | if learn:
134 | epsilon = 1 - episode/self.total_episodes
135 | choice = np.random.random()
136 | if choice <= epsilon:
137 | action = np.random.choice(range(self.num_actions))
138 | else:
139 | action = np.argmax(self.QModel.predict(state))
140 | else:
141 | action = np.argmax(self.QModel.predict(state))
142 |
143 | return action
144 |
145 | # SET IN SUMO THE CORRECT YELLOW PHASE
146 | def _set_yellow_phase(self, old_action):
147 | yellow_phase = old_action * 1 + 1 # obtain the yellow phase code, based on the old action
148 | traci.trafficlight.setPhase("J6", yellow_phase)
149 |
150 | # SET IN SUMO A GREEN PHASE
151 | def _set_green_phase(self, action):
152 | if action == 0:
153 | traci.trafficlight.setPhase("J6", self.PHASE_NS_GREEN)
154 | elif action == 1:
155 | traci.trafficlight.setPhase("J6", self.PHASE_NSL_GREEN)
156 | elif action == 2:
157 | traci.trafficlight.setPhase("J6", self.PHASE_EW_GREEN)
158 |
159 |
160 | def evaluate_model( self, experiment, seeds ):
161 |
162 | self.traffic_gen.generate_routefile(seeds[self.init_epoch])
163 | curr_state = self.env.start()
164 |
165 | for e in range( self.init_epoch, self.total_episodes):
166 |
167 | done = False
168 | sum_intersection_queue = 0
169 | sum_neg_rewards = 0
170 | old_action = None
171 | while not done:
172 | curr_state = self._preprocess_input( curr_state )
173 | action = self._agent_policy( e, curr_state, learn = False)
174 | yellow_reward = 0
175 |
176 | if old_action!= None and old_action != action:
177 | self._set_yellow_phase(old_action)
178 | yellow_reward, _ , _ = self.env.step(self.yellow_duration)
179 |
180 | self._set_green_phase(action)
181 | reward, next_state, done = self.env.step(self.green_duration)
182 | reward += yellow_reward
183 | next_state = self._preprocess_input( next_state )
184 | curr_state = next_state
185 | old_action = action
186 | sum_intersection_queue += self.env.get_intersection_q_per_step()
187 | if reward < 0:
188 | sum_neg_rewards += reward
189 |
190 | self._save_stats(experiment, e, sum_intersection_queue,sum_neg_rewards)
191 | print('sum_neg_rewards={}'.format(sum_neg_rewards))
192 | print('sum_intersection_queue={}'.format(sum_intersection_queue))
193 | print('Epoch {} complete'.format(e))
194 | if e != 0:
195 | os.remove('stats_{}_{}.npy'.format(experiment, e-1))
196 | elif experiment !=0:
197 | os.remove('stats_{}_{}.npy'.format(experiment-1, self.total_episodes-1))
198 | if e +1 < self.total_episodes:
199 | self.traffic_gen.generate_routefile(seeds[e+1])
200 | curr_state =self.env.reset()
201 |
202 |
203 | def execute_classical( self, experiment, seeds ):
204 | self.traffic_gen.generate_routefile(seeds[self.init_epoch])
205 | self.env.start()
206 |
207 | for e in range( self.init_epoch, self.total_episodes):
208 |
209 | done = False
210 | sum_intersection_queue = 0
211 | sum_neg_rewards = 0
212 | while not done:
213 | for action in range(self.num_actions):
214 | self._set_green_phase(action)
215 | reward, _, done = self.env.step(self.green_duration)
216 | self._set_yellow_phase(action)
217 | yellow_reward, _, _ = self.env.step(self.yellow_duration)
218 | reward += yellow_reward
219 | if reward < 0:
220 | sum_neg_rewards += reward
221 | sum_intersection_queue += self.env.get_intersection_q_per_step()
222 |
223 | self._save_stats(experiment, e, sum_intersection_queue,sum_neg_rewards)
224 | print('sum_neg_rewards={}'.format(sum_neg_rewards))
225 | print('sum_intersection_queue={}'.format(sum_intersection_queue))
226 | print('Epoch {} complete'.format(e))
227 | if e != 0:
228 | os.remove('stats_{}_{}.npy'.format(experiment, e-1))
229 | elif experiment !=0:
230 | os.remove('stats_{}_{}.npy'.format(experiment-1, self.total_episodes-1))
231 | if e +1 < self.total_episodes:
232 | self.traffic_gen.generate_routefile(seeds[e+1])
233 | self.env.reset()
234 |
235 |
236 | def train( self, experiment ):
237 |
238 | self.traffic_gen.generate_routefile(0)
239 | curr_state = self.env.start()
240 |
241 | for e in range( self.init_epoch, self.total_episodes):
242 |
243 | curr_state = self._preprocess_input( curr_state)
244 | old_action = None
245 | done = False # whether the episode has ended or not
246 | sum_intersection_queue = 0
247 | sum_neg_rewards = 0
248 | while not done:
249 |
250 | action = self._agent_policy( e,curr_state)
251 | yellow_reward = 0
252 |
253 | if old_action!= None and old_action != action:
254 | self._set_yellow_phase(old_action)
255 | yellow_reward, _ , _ = self.env.step(self.yellow_duration)
256 |
257 | self._set_green_phase(action)
258 | reward, next_state, done = self.env.step(self.green_duration)
259 | reward += yellow_reward
260 | next_state = self._preprocess_input( next_state )
261 | self._add_to_replay_buffer( curr_state, action, reward, next_state, done )
262 |
263 | if e > 0 and e % self.tau == 0:
264 | self._sync_target_model()
265 | self._replay()
266 | curr_state = next_state
267 | old_action = action
268 | sum_intersection_queue += self.env.get_intersection_q_per_step()
269 | if reward < 0:
270 | sum_neg_rewards += reward
271 |
272 |
273 | self._save_stats(experiment, e, sum_intersection_queue,sum_neg_rewards)
274 | self.QModel.save('qmodel_{}_{}.hd5'.format(experiment, e))
275 | if e != 0:
276 | os.remove('qmodel_{}_{}.hd5'.format(experiment, e-1))
277 | os.remove('stats_{}_{}.npy'.format(experiment, e-1))
278 | elif experiment !=0:
279 | os.remove('qmodel_{}_{}.hd5'.format(experiment-1, self.total_episodes-1))
280 | os.remove('stats_{}_{}.npy'.format(experiment-1, self.total_episodes-1))
281 | self.traffic_gen.generate_routefile(e+1)
282 | curr_state = self.env.reset() # reset the environment before every episode
283 | print('Epoch {} complete'.format(e))
284 |
285 | def execute( self):
286 | while traci.simulation.getMinExpectedNumber() > 0:
287 | traci.simulationStep()
288 |
289 | def _save_stats(self, experiment, episode, sum_intersection_queue_per_episode, sum_rewards_per_episode):
290 | self.stats['rewards'][experiment, episode] = sum_rewards_per_episode
291 | self.stats['intersection_queue'][experiment, episode] = sum_intersection_queue_per_episode
292 | np.save('stats_{}_{}.npy'.format(experiment, episode), self.stats)
293 |
294 |
295 |
296 | """
297 | Constructor for the TLAgent class.
298 |
299 | Args:
300 | env (SumoEnv): Sumo environment object.
301 | traffic_gen (TrafficGenerator): Traffic generator object.
302 | max_steps (int): Maximum steps per episode.
303 | num_experiments (int): Number of experiments to run.
304 | total_episodes (int): Total number of episodes per experiment.
305 | qmodel_filename (str): Filename for the Q-model.
306 | stats (dict): Statistics dictionary.
307 | init_epoch (int): Initial epoch.
308 | learn (bool, optional): Whether to train the agent. Defaults to True.
309 | """
310 |
311 | self.env = env
312 | self.traffic_gen = traffic_gen
313 | self.total_episodes = total_episodes
314 | self.discount = 0.75
315 | self.epsilon = 0.9
316 | self.replay_buffer = deque(maxlen=50000)
317 | self.batch_size = 100
318 | self.num_states = 80
319 | self.num_actions = 4
320 | self.num_experiments = num_experiments
321 |
322 | # Phases are in the same order as specified in the .net.xml file
323 | self.PHASE_NS_GREEN = 0 # action 0 code 00
324 | self.PHASE_NS_YELLOW = 1
325 | self.PHASE_NSL_GREEN = 2 # action 1 code 01
326 | self.PHASE_NSL_YELLOW = 3
327 | self.PHASE_EW_GREEN = 4 # action 2 code 10
328 | self.PHASE_EW_YELLOW = 5
329 | self.PHASE_EWL_GREEN = 6 # action 3 code 11
330 | self.PHASE_EWL_YELLOW = 7
331 |
332 | self.green_duration = 10
333 | self.yellow_duration = 4
334 | self.stats = stats
335 | self.init_epoch = init_epoch
336 | self.QModel = None
337 | self.tau = 20
338 | self.TargetQModel = None
339 | self.qmodel_filename = qmodel_filename
340 | self.stats_filename = stats_filename
341 | self.init_epoch = init_epoch
342 | self._load_models(learn)
343 | self.max_steps = max_steps
344 |
345 |
346 |
347 | # The rest of the code remains unchanged
348 |
349 | if __name__ == "__main__":
350 | # --- TRAINING OPTIONS ---
351 | training_enabled = False
352 | gui = 1
353 |
354 | sys.path.append(os.path.join('c:', os.sep, 'Users', 'Desktop', 'Work', 'Sumo', 'tools'))
355 | # ----------------------
356 |
357 | # attributes of the agent
358 |
359 | # setting the cmd mode or the visual mode
360 | if gui == False:
361 | sumoBinary = 'sumo.exe'
362 | else:
363 | sumoBinary = 'sumo-gui.exe'
364 |
365 | # initializations
366 | max_steps = 5000 # seconds = 1 h 30 min each episode
367 | total_episodes = 100
368 | num_experiments = 1
369 | learn = False
370 | traffic_gen = TrafficGenerator(max_steps)
371 | qmodel_filename, stats_filename = dataPlotter.get_file_names()
372 | init_experiment, init_epoch = dataPlotter.get_init_epoch(stats_filename, total_episodes)
373 | print('init_experiment={} init_epoch={}'.format(init_experiment, init_epoch))
374 | stats = dataPlotter.get_stats(stats_filename, num_experiments, total_episodes)
375 | for experiment in range(init_experiment, num_experiments):
376 | # Run FDS traffic lights experiment
377 | env = SumoEnv(sumoBinary, max_steps)
378 | tl = TLAgent(env, traffic_gen, max_steps, num_experiments, total_episodes, qmodel_filename, stats, init_epoch, learn)
379 | init_epoch = 0 # reset init_epoch after the first experiment
380 |
381 | seeds = np.load('seed.npy')
382 | tl.execute_classical(experiment, seeds)
383 |
384 | fds_stats = copy.deepcopy(tl.stats)
385 |
386 | del env
387 | del tl
388 |
389 | # Run Adaptive TRLC traffic lights experiment
390 | env = SumoEnv(sumoBinary, max_steps)
391 | tl = TLAgent(env, traffic_gen, max_steps, num_experiments, total_episodes, qmodel_filename, stats, init_epoch, learn)
392 | init_epoch = 0 # reset init_epoch after the first experiment
393 |
394 | if learn:
395 | tl.train(experiment)
396 | else:
397 | tl.evaluate_model(experiment, seeds)
398 |
399 | adaptive_trlc_stats = copy.deepcopy(tl.stats)
400 |
401 | del env
402 | del tl
403 |
404 | # Plot the results
405 | dataPlotter.plot_sample(fds_stats['intersection_queue'], 'Cumulative Vehicle Queue Size', 'FDS vs Adaptive TRLC', 'FDS TLCS', show=False, subplot=True)
406 | dataPlotter.plot_sample(adaptive_trlc_stats['intersection_queue'], 'Cumulative Vehicle Queue Size', 'FDS vs Adaptive TRLC', 'Adaptive TRLC', show=False, subplot=True)
407 | plt.show()
408 |
409 | dataPlotter.plot_sample(fds_stats['rewards'], 'Cumulative Wait Times', 'FDS vs Adaptive TRLC', 'FDS TLCS', show=False, subplot=True)
410 | dataPlotter.plot_sample(adaptive_trlc_stats['rewards'], 'Cumulative Wait Times', 'FDS vs Adaptive TRLC', 'Adaptive TRLC', show=False, subplot=True)
411 | plt.show()
412 |
413 | print('Experiment {} complete.........'.format(experiment))
414 |
415 | dataPlotter.show_plots()
416 |
--------------------------------------------------------------------------------
/CBU-Y junction smart traffic lights-simulation/smarttrafficlighst/FDS traffic control/intersection/STLs.rou.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
369 |
370 |
371 |
372 |
373 |
374 |
375 |
376 |
377 |
378 |
379 |
380 |
381 |
382 |
383 |
384 |
385 |
386 |
387 |
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 |
402 |
403 |
404 |
405 |
406 |
407 |
408 |
409 |
410 |
411 |
412 |
413 |
414 |
415 |
416 |
417 |
418 |
419 |
420 |
421 |
422 |
423 |
424 |
425 |
426 |
427 |
428 |
429 |
430 |
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 |
445 |
446 |
447 |
448 |
449 |
450 |
451 |
452 |
453 |
454 |
455 |
456 |
457 |
458 |
459 |
460 |
461 |
462 |
463 |
464 |
465 |
466 |
467 |
468 |
469 |
470 |
471 |
472 |
473 |
474 |
475 |
476 |
477 |
478 |
479 |
480 |
481 |
482 |
483 |
484 |
485 |
486 |
487 |
488 |
489 |
490 |
491 |
492 |
493 |
494 |
495 |
496 |
497 |
498 |
499 |
500 |
501 |
502 |
503 |
504 |
505 |
506 |
507 |
508 |
509 |
510 |
511 |
512 |
513 |
514 |
515 |
516 |
517 |
518 |
519 |
520 |
521 |
522 |
523 |
524 |
525 |
526 |
527 |
528 |
529 |
530 |
531 |
532 |
533 |
534 |
535 |
536 |
537 |
538 |
539 |
540 |
541 |
542 |
543 |
544 |
545 |
546 |
547 |
548 |
549 |
550 |
551 |
552 |
553 |
554 |
555 |
556 |
557 |
558 |
559 |
560 |
561 |
562 |
563 |
564 |
565 |
566 |
567 |
568 |
569 |
570 |
571 |
572 |
573 |
574 |
575 |
576 |
577 |
578 |
579 |
580 |
581 |
582 |
583 |
584 |
585 |
586 |
587 |
588 |
589 |
590 |
591 |
592 |
593 |
594 |
595 |
596 |
597 |
598 |
599 |
600 |
601 |
602 |
603 |
604 |
605 |
606 |
607 |
608 |
609 |
610 |
611 |
612 |
613 |
614 |
615 |
616 |
617 |
618 |
619 |
620 |
621 |
622 |
623 |
624 |
625 |
626 |
627 |
628 |
629 |
630 |
631 |
632 |
633 |
634 |
635 |
636 |
637 |
638 |
639 |
640 |
641 |
642 |
643 |
644 |
645 |
646 |
647 |
648 |
649 |
650 |
651 |
652 |
653 |
654 |
655 |
656 |
657 |
658 |
659 |
660 |
661 |
662 |
663 |
664 |
665 |
666 |
667 |
668 |
669 |
670 |
671 |
672 |
673 |
674 |
675 |
676 |
677 |
678 |
679 |
680 |
681 |
682 |
683 |
684 |
685 |
686 |
687 |
688 |
689 |
690 |
691 |
692 |
693 |
694 |
695 |
696 |
697 |
698 |
699 |
700 |
701 |
702 |
703 |
704 |
705 |
706 |
707 |
708 |
709 |
710 |
711 |
712 |
713 |
714 |
715 |
716 |
717 |
718 |
719 |
720 |
721 |
722 |
723 |
724 |
725 |
726 |
727 |
728 |
729 |
730 |
731 |
732 |
733 |
734 |
735 |
736 |
737 |
738 |
739 |
740 |
741 |
742 |
743 |
744 |
745 |
746 |
747 |
748 |
749 |
750 |
751 |
752 |
753 |
754 |
755 |
756 |
757 |
758 |
759 |
760 |
761 |
762 |
763 |
764 |
765 |
766 |
767 |
768 |
769 |
770 |
771 |
772 |
773 |
774 |
775 |
776 |
777 |
778 |
779 |
780 |
781 |
782 |
783 |
784 |
785 |
786 |
787 |
788 |
789 |
790 |
791 |
792 |
793 |
794 |
795 |
796 |
797 |
798 |
799 |
800 |
801 |
802 |
803 |
804 |
805 |
806 |
807 |
808 |
809 |
810 |
811 |
812 |
813 |
814 |
815 |
816 |
817 |
818 |
819 |
820 |
821 |
822 |
823 |
824 |
825 |
826 |
827 |
828 |
829 |
830 |
831 |
832 |
833 |
834 |
835 |
836 |
837 |
838 |
839 |
840 |
841 |
842 |
843 |
844 |
845 |
846 |
847 |
848 |
849 |
850 |
851 |
852 |
853 |
854 |
855 |
856 |
857 |
858 |
859 |
860 |
861 |
862 |
863 |
864 |
865 |
866 |
867 |
868 |
869 |
870 |
871 |
872 |
873 |
874 |
875 |
876 |
877 |
878 |
879 |
880 |
881 |
882 |
883 |
884 |
885 |
886 |
887 |
888 |
889 |
890 |
891 |
892 |
893 |
894 |
895 |
896 |
897 |
898 |
899 |
900 |
901 |
902 |
903 |
904 |
905 |
906 |
907 |
908 |
909 |
910 |
911 |
912 |
913 |
914 |
915 |
916 |
917 |
918 |
919 |
920 |
921 |
922 |
923 |
924 |
925 |
926 |
927 |
928 |
929 |
930 |
931 |
932 |
933 |
934 |
935 |
936 |
937 |
938 |
939 |
940 |
941 |
942 |
943 |
944 |
945 |
946 |
947 |
948 |
949 |
950 |
951 |
952 |
953 |
954 |
955 |
956 |
957 |
958 |
959 |
960 |
961 |
962 |
963 |
964 |
965 |
966 |
967 |
968 |
969 |
970 |
971 |
972 |
973 |
974 |
975 |
976 |
977 |
978 |
979 |
980 |
981 |
982 |
983 |
984 |
985 |
986 |
987 |
988 |
989 |
990 |
991 |
992 |
993 |
994 |
995 |
996 |
997 |
998 |
999 |
1000 |
1001 |
1002 |
1003 |
1004 |
1005 |
1006 |
1007 |
1008 |
1009 |
1010 |
1011 |
1012 |
1013 |
1014 |
1015 |
1016 |
1017 |
1018 |
1019 |
1020 |
1021 |
--------------------------------------------------------------------------------