├── LICENSE ├── README.md ├── schedule8Links.JPG └── src ├── data.json ├── data_10links.json ├── requirements.txt ├── resulting_schedule.txt ├── schedulegenadapt.py ├── schedulegenmodular.py └── schedulegenwithdeadline.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Abhilash 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TrafficScheduling 2 | Traffic Scheduling for Time Sensitive Networks 3 | 4 | Run the program schedulegenwithdeadline.py 5 | 6 | ![Resulting Schedule](schedule8Links.JPG) 7 | 8 | Overview of the approach 9 | 10 | [Traffic_Scheduling-GeneticAlgoApproach.pdf](https://github.com/user-attachments/files/16885374/Traffic_Scheduling-GeneticAlgoApproach.pdf) 11 | 12 | A brief description of code structure 13 | 14 | 15 | Scheduling being an NPHard problem many methods have been applied. 16 | Genetic Algorithms, Metaheuristics all have found its advantages. 17 | I found it easier with Genetic algorithms as finally all you do is 18 | try creating different populations and check the fitness (whether rules or constraints are met) 19 | more relatable. This is the approach in Genetic. 20 | 21 | Inline image 22 | 23 | An initial flow sequence is provided by JSON for 10 Links 24 | ![image](https://github.com/user-attachments/assets/d3a073fb-2120-4026-ae8b-0cbda77eb37f) 25 | 26 | The Traffic Model is here. The flows (4) are described in code here 27 | ![image](https://github.com/user-attachments/assets/40209bdb-b804-4f45-a6ca-2561cb66fd51) 28 | 29 | Main method 30 | ![image](https://github.com/user-attachments/assets/ee561a0a-76f6-4bb7-bb47-996def607bca) 31 | Include calls to prepare initial polpulation 32 | Multiple Iterations involved to find the best 33 | ![image](https://github.com/user-attachments/assets/b708fbb6-0ed8-41dd-a99d-8bfe5950f57b) 34 | Perform mutations (rearrangements) and check fitness 35 | ![image](https://github.com/user-attachments/assets/85ae25f5-dd4d-448a-977c-1df203865d2f) 36 | Identifying the best schedule by checking makespan. The Shortest Makespan is considered best 37 | ![image](https://github.com/user-attachments/assets/b5d1b91e-fecb-40ac-bdfe-3fdf63b86562) 38 | 39 | Here is where we check deadline. Main rule here is deadline 40 | ![image](https://github.com/user-attachments/assets/5e96e946-e092-43a1-bef5-e06e668cf839) 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /schedule8Links.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Abh4git/TrafficScheduling/038391412d5bb1fac8792ddfbb2691467affc19f/schedule8Links.JPG -------------------------------------------------------------------------------- /src/data.json: -------------------------------------------------------------------------------- 1 | {"Flow Sequence": 2 | {"F1": 3 | {"L1": 1, 4 | "L2": 2, 5 | "L3": 3, 6 | "L4": 4, 7 | "L5": 5, 8 | "L6": 6 9 | }, 10 | "F2": 11 | {"L1": 1, 12 | "L2": 3, 13 | "L3": 5, 14 | "L4": 6, 15 | "L5": 4, 16 | "L6": 2 17 | }, 18 | "F3": 19 | {"L1": 2, 20 | "L2": 1, 21 | "L3": 4, 22 | "L4": 3, 23 | "L5": 6, 24 | "L6": 5 25 | }, 26 | "F4": 27 | {"L1": 2, 28 | "L2": 3, 29 | "L3": 1, 30 | "L4": 5, 31 | "L5": 4, 32 | "L6": 6 33 | }, 34 | "F5": 35 | {"L1": 2, 36 | "L2": 3, 37 | "L3": 1, 38 | "L4": 5, 39 | "L5": 4, 40 | "L6": 6 41 | }, 42 | "F6": 43 | {"L1": 2, 44 | "L2": 3, 45 | "L3": 1, 46 | "L4": 5, 47 | "L5": 4, 48 | "L6": 6 49 | } 50 | }, 51 | "Processing Time": 52 | {"F1": 53 | {"L1": 10, 54 | "L2": 20, 55 | "L3": 10, 56 | "L4": 20, 57 | "L5": 10, 58 | "L6": 20 59 | }, 60 | "F2": 61 | {"L1": 10, 62 | "L2": 20, 63 | "L3": 10, 64 | "L4": 20, 65 | "L5": 10, 66 | "L6": 20 67 | }, 68 | "F3": 69 | {"L1": 10, 70 | "L2": 20, 71 | "L3": 10, 72 | "L4": 20, 73 | "L5": 10, 74 | "L6": 20 75 | }, 76 | "F4": 77 | {"L1": 10, 78 | "L2": 20, 79 | "L3": 10, 80 | "L4": 20, 81 | "L5": 10, 82 | "L6": 20 83 | }, 84 | "F5": 85 | {"L1": 10, 86 | "L2": 20, 87 | "L3": 10, 88 | "L4": 20, 89 | "L5": 10, 90 | "L6": 20 91 | }, 92 | "F6": 93 | {"L1": 10, 94 | "L2": 20, 95 | "L3": 10, 96 | "L4": 20, 97 | "L5": 10, 98 | "L6": 20 99 | } 100 | }} 101 | -------------------------------------------------------------------------------- /src/data_10links.json: -------------------------------------------------------------------------------- 1 | {"Flow Sequence": 2 | {"F1": 3 | { 4 | "L1": 1, 5 | "L2": 2, 6 | "L3": 3, 7 | "L4": 4, 8 | "L5": 0, 9 | "L6": 0, 10 | "L7": 0, 11 | "L8": 0, 12 | "L9": 0, 13 | "L10": 0 14 | }, 15 | "F2": 16 | { 17 | "L1": 1, 18 | "L2": 2, 19 | "L3": 3, 20 | "L4": 4, 21 | "L5": 0, 22 | "L6": 0, 23 | "L7": 0, 24 | "L8": 0, 25 | "L9": 0, 26 | "L10": 0 27 | }, 28 | "F3": 29 | { 30 | "L1": 0, 31 | "L2": 0, 32 | "L3": 0, 33 | "L4": 0, 34 | "L5": 5, 35 | "L6": 6, 36 | "L7": 7, 37 | "L8": 8, 38 | "L9": 0, 39 | "L10": 0 40 | }, 41 | "F4": 42 | { 43 | "L1": 0, 44 | "L2": 0, 45 | "L3": 0, 46 | "L4": 0, 47 | "L5": 5, 48 | "L6": 6, 49 | "L7": 7, 50 | "L8": 8, 51 | "L9": 0, 52 | "L10": 0 53 | } 54 | }, 55 | "Processing Time": 56 | {"F1": 57 | { 58 | "L1": 10, 59 | "L2": 20, 60 | "L3": 10, 61 | "L4": 20, 62 | "L5": 10, 63 | "L6": 20, 64 | "L7": 20, 65 | "L8": 20, 66 | "L9": 20, 67 | "L10": 20 68 | }, 69 | "F2": 70 | { 71 | "L1": 10, 72 | "L2": 20, 73 | "L3": 10, 74 | "L4": 20, 75 | "L5": 10, 76 | "L6": 20, 77 | "L7": 20, 78 | "L8": 20, 79 | "L9": 20, 80 | "L10": 20 81 | }, 82 | "F3": 83 | { 84 | "L1": 10, 85 | "L2": 20, 86 | "L3": 10, 87 | "L4": 20, 88 | "L5": 10, 89 | "L6": 20, 90 | "L7": 20, 91 | "L8": 20, 92 | "L9": 20, 93 | "L10": 20 94 | }, 95 | "F4": 96 | { 97 | "L1": 10, 98 | "L2": 20, 99 | "L3": 10, 100 | "L4": 20, 101 | "L5": 10, 102 | "L6": 20, 103 | "L7": 20, 104 | "L8": 20, 105 | "L9": 20, 106 | "L10": 20 107 | } 108 | }} 109 | -------------------------------------------------------------------------------- /src/requirements.txt: -------------------------------------------------------------------------------- 1 | certifi==2023.5.7 2 | charset-normalizer==3.1.0 3 | chart-studio==1.1.0 4 | contourpy==1.0.7 5 | cycler==0.11.0 6 | fonttools==4.39.3 7 | idna==3.4 8 | importlib-resources==5.12.0 9 | kiwisolver==1.4.4 10 | matplotlib==3.7.1 11 | numpy==1.24.3 12 | packaging==23.1 13 | pandas==2.0.1 14 | Pillow==9.5.0 15 | plotly==5.14.1 16 | pyparsing==3.0.9 17 | python-dateutil==2.8.2 18 | pytz==2023.3 19 | requests==2.30.0 20 | retrying==1.3.4 21 | six==1.16.0 22 | tenacity==8.2.2 23 | tzdata==2023.3 24 | urllib3==2.0.2 25 | zipp==3.15.0 26 | -------------------------------------------------------------------------------- /src/resulting_schedule.txt: -------------------------------------------------------------------------------- 1 | [{'Task': 'Link 1', 'Start': '2020-02-01 0:00:10', 'Finish': '2020-02-01 0:00:20', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 1', 'Start': '2020-02-01 0:00:00', 'Finish': '2020-02-01 0:00:10', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 2', 'Start': '2020-02-01 0:00:30', 'Finish': '2020-02-01 0:00:50', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 2', 'Start': '2020-02-01 0:00:10', 'Finish': '2020-02-01 0:00:30', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 3', 'Start': '2020-02-01 0:00:50', 'Finish': '2020-02-01 0:01:00', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 3', 'Start': '2020-02-01 0:00:30', 'Finish': '2020-02-01 0:00:40', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 4', 'Start': '2020-02-01 0:01:00', 'Finish': '2020-02-01 0:01:20', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 4', 'Start': '2020-02-01 0:00:40', 'Finish': '2020-02-01 0:01:00', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 5', 'Start': '2020-02-01 0:00:00', 'Finish': '2020-02-01 0:00:10', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 5', 'Start': '2020-02-01 0:00:10', 'Finish': '2020-02-01 0:00:20', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 6', 'Start': '2020-02-01 0:00:10', 'Finish': '2020-02-01 0:00:30', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 6', 'Start': '2020-02-01 0:00:30', 'Finish': '2020-02-01 0:00:50', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 7', 'Start': '2020-02-01 0:00:30', 'Finish': '2020-02-01 0:00:50', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 7', 'Start': '2020-02-01 0:00:50', 'Finish': '2020-02-01 0:01:10', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 8', 'Start': '2020-02-01 0:00:50', 'Finish': '2020-02-01 0:01:10', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 8', 'Start': '2020-02-01 0:01:10', 'Finish': '2020-02-01 0:01:30', 'Resource': 'Flow 4 Deadline 300'}] 2 | {'start': '2020-02-01T00:00:00', 'end': '2020-02-01T00:01:30', 'data': [{'Task': 'Link 1', 'Start': '2020-02-01T00:00:10', 'Finish': '2020-02-01T00:00:20', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 1', 'Start': '2020-02-01T00:00:00', 'Finish': '2020-02-01T00:00:10', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 2', 'Start': '2020-02-01T00:00:30', 'Finish': '2020-02-01T00:00:50', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 2', 'Start': '2020-02-01T00:00:10', 'Finish': '2020-02-01T00:00:30', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 3', 'Start': '2020-02-01T00:00:50', 'Finish': '2020-02-01T00:01:00', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 3', 'Start': '2020-02-01T00:00:30', 'Finish': '2020-02-01T00:00:40', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 4', 'Start': '2020-02-01T00:01:00', 'Finish': '2020-02-01T00:01:20', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 4', 'Start': '2020-02-01T00:00:40', 'Finish': '2020-02-01T00:01:00', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 5', 'Start': '2020-02-01T00:00:00', 'Finish': '2020-02-01T00:00:10', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 5', 'Start': '2020-02-01T00:00:10', 'Finish': '2020-02-01T00:00:20', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 6', 'Start': '2020-02-01T00:00:10', 'Finish': '2020-02-01T00:00:30', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 6', 'Start': '2020-02-01T00:00:30', 'Finish': '2020-02-01T00:00:50', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 7', 'Start': '2020-02-01T00:00:30', 'Finish': '2020-02-01T00:00:50', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 7', 'Start': '2020-02-01T00:00:50', 'Finish': '2020-02-01T00:01:10', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 8', 'Start': '2020-02-01T00:00:50', 'Finish': '2020-02-01T00:01:10', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 8', 'Start': '2020-02-01T00:01:10', 'Finish': '2020-02-01T00:01:30', 'Resource': 'Flow 4 Deadline 300'}]} 3 | Schedule {'start': '2020-02-01T00:00:00', 'end': '2020-02-01T00:01:30', 'data': [{'Task': 'Link 1', 'Start': '2020-02-01T00:00:10', 'Finish': '2020-02-01T00:00:20', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 1', 'Start': '2020-02-01T00:00:00', 'Finish': '2020-02-01T00:00:10', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 2', 'Start': '2020-02-01T00:00:30', 'Finish': '2020-02-01T00:00:50', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 2', 'Start': '2020-02-01T00:00:10', 'Finish': '2020-02-01T00:00:30', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 3', 'Start': '2020-02-01T00:00:50', 'Finish': '2020-02-01T00:01:00', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 3', 'Start': '2020-02-01T00:00:30', 'Finish': '2020-02-01T00:00:40', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 4', 'Start': '2020-02-01T00:01:00', 'Finish': '2020-02-01T00:01:20', 'Resource': 'Flow 1 Deadline 200'}, {'Task': 'Link 4', 'Start': '2020-02-01T00:00:40', 'Finish': '2020-02-01T00:01:00', 'Resource': 'Flow 2 Deadline 300'}, {'Task': 'Link 5', 'Start': '2020-02-01T00:00:00', 'Finish': '2020-02-01T00:00:10', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 5', 'Start': '2020-02-01T00:00:10', 'Finish': '2020-02-01T00:00:20', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 6', 'Start': '2020-02-01T00:00:10', 'Finish': '2020-02-01T00:00:30', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 6', 'Start': '2020-02-01T00:00:30', 'Finish': '2020-02-01T00:00:50', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 7', 'Start': '2020-02-01T00:00:30', 'Finish': '2020-02-01T00:00:50', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 7', 'Start': '2020-02-01T00:00:50', 'Finish': '2020-02-01T00:01:10', 'Resource': 'Flow 4 Deadline 300'}, {'Task': 'Link 8', 'Start': '2020-02-01T00:00:50', 'Finish': '2020-02-01T00:01:10', 'Resource': 'Flow 3 Deadline 200'}, {'Task': 'Link 8', 'Start': '2020-02-01T00:01:10', 'Finish': '2020-02-01T00:01:30', 'Resource': 'Flow 4 Deadline 300'}]} 4 | 5 | -------------------------------------------------------------------------------- /src/schedulegenadapt.py: -------------------------------------------------------------------------------- 1 | """ Defined function for static initial data for Production Planning """ 2 | 3 | #importing libraries 4 | import pandas as pd 5 | import json 6 | 7 | def data_from_json(): 8 | # Opening JSON file 9 | f = open('data.json') 10 | 11 | # returns JSON object as 12 | # a dictionary 13 | data = json.load(f) 14 | 15 | # Iterating through the json 16 | # list 17 | for i in data['Flow Sequence']: 18 | print(i) 19 | 20 | # Closing file 21 | f.close() 22 | return data 23 | def data_excel_json(excel_sheet): 24 | """ convert excel into json """ 25 | data_excel = xl.load_workbook(excel_sheet) 26 | data = {} 27 | sheet_name = data_excel.sheetnames 28 | for sheet in sheet_name: 29 | print("Sheet",sheet) 30 | wb_sheet = data_excel[sheet] 31 | cell_values = wb_sheet.values 32 | print(cell_values) 33 | df = pd.DataFrame(cell_values, columns=next(cell_values)) 34 | print(df) 35 | df.iloc[:, 0] = df.iloc[:, 0].apply(lambda x : x.strip()) 36 | df.index = df.iloc[:, 0] 37 | df.drop(columns = df.columns[0], inplace=True) 38 | data[sheet] = df.T.to_dict() 39 | return data 40 | 41 | def json_to_df(json_data): 42 | """ convert json into excel """ 43 | dict_data = {} 44 | for key in json_data.keys(): 45 | dict_data[key] = pd.DataFrame(json_data.get(key)).T 46 | return dict_data 47 | 48 | 49 | ''' Solving job shop scheduling problem by gentic algorithm ''' 50 | 51 | # importing required modules 52 | import pandas as pd 53 | import numpy as np 54 | import matplotlib.pyplot as plt 55 | import pandas as pd 56 | #import chart_studio.plotly as py 57 | import plotly.figure_factory as ff 58 | import datetime 59 | import time 60 | import copy 61 | 62 | 63 | def json_to_df(json_data): 64 | """ convert json into excel """ 65 | dict_data = {} 66 | for key in json_data.keys(): 67 | dict_data[key] = pd.DataFrame(json_data.get(key)).T 68 | 69 | return dict_data 70 | 71 | ##################FLOWS################################## 72 | # f1 : ES1->SW1 (Link1), SW1->ES3 (Link2), Period: 1000 ms, Nw Delay 20ms 73 | # f2 : ES1-> SW1(Link1), SW1->SW2 (Link3),SW2->ES3 (Link4) , Period 2000ms, Nw Delay-10ms 74 | # f3 : ES2->SW2(link5), SW2->SW1(Link6), SW1->ES3 (Link2) , Period: 1000ms, 10 75 | # f4 : ES2->SW2 (Link5), SW2->SW1 (Link6), SW1->ES3 (Link2), Period: 2000ms, 20ms 76 | 77 | 78 | def traffic_schedule(data_dict, population_size=30, crossover_rate=0.8, mutation_rate=0.2, mutation_selection_rate=0.2, 79 | num_iteration=2000): 80 | """ initialize genetic algorithm parameters and read data """ 81 | data_json = json_to_df(data_dict) 82 | flow_sequence_tmp = data_json['Flow Sequence'] 83 | process_time_tmp = data_json['Processing Time'] 84 | 85 | df_shape = process_time_tmp.shape 86 | 87 | num_links = df_shape[1] # number of links 88 | num_flows = df_shape[0] # number of flows 89 | print("Num Links,", num_links, "Num of Flows",num_flows) 90 | num_gene = num_links * num_flows # number of genes in a chromosome 91 | num_mutation_jobs = round(num_gene * mutation_selection_rate) 92 | 93 | process_time = [list(map(int, process_time_tmp.iloc[i])) for i in range(num_flows)] 94 | flow_sequence = [list(map(int, flow_sequence_tmp.iloc[i])) for i in range(num_flows)] 95 | 96 | # start_time = time.time() 97 | 98 | Tbest = 999999999999999 99 | 100 | best_list, best_obj = [], [] 101 | population_list = [] 102 | makespan_record = [] 103 | 104 | # Initial Population 105 | for i in range(population_size): 106 | nxm_random_num = list(np.random.permutation(num_gene)) # generate a random permutation of 0 to num_job*num_mc-1 107 | population_list.append(nxm_random_num) # add to the population_list 108 | for j in range(num_gene): 109 | population_list[i][j] = population_list[i][ 110 | j] % num_flows # convert to flow number format, every flow appears m times 111 | 112 | #Iterations start here 113 | for iteration in range(num_iteration): 114 | Tbest_now = 99999999999 115 | 116 | """ Two Point Cross-Over """ 117 | parent_list = copy.deepcopy(population_list) 118 | offspring_list = copy.deepcopy( 119 | population_list) # generate a random sequence to select the parent chromosome to crossover 120 | pop_random_size = list(np.random.permutation(population_size)) 121 | 122 | for size in range(int(population_size / 2)): 123 | crossover_prob = np.random.rand() 124 | if crossover_rate >= crossover_prob: 125 | parent_1 = population_list[pop_random_size[2 * size]][:] 126 | parent_2 = population_list[pop_random_size[2 * size + 1]][:] 127 | 128 | child_1 = parent_1[:] 129 | child_2 = parent_2[:] 130 | cutpoint = list(np.random.choice(num_gene, 2, replace=False)) 131 | cutpoint.sort() 132 | 133 | child_1[cutpoint[0]:cutpoint[1]] = parent_2[cutpoint[0]:cutpoint[1]] 134 | child_2[cutpoint[0]:cutpoint[1]] = parent_1[cutpoint[0]:cutpoint[1]] 135 | offspring_list[pop_random_size[2 * size]] = child_1[:] 136 | offspring_list[pop_random_size[2 * size + 1]] = child_2[:] 137 | 138 | for pop in range(population_size): 139 | 140 | """ Repairment """ 141 | job_count = {} 142 | larger, less = [], [] # 'larger' record jobs appear in the chromosome more than pop times, and 'less' records less than pop times. 143 | for job in range(num_flows): 144 | if job in offspring_list[pop]: 145 | count = offspring_list[pop].count(job) 146 | pos = offspring_list[pop].index(job) 147 | job_count[job] = [count, pos] # store the above two values to the job_count dictionary 148 | else: 149 | count = 0 150 | job_count[job] = [count, 0] 151 | 152 | if count > num_links: 153 | larger.append(job) 154 | elif count < num_links: 155 | less.append(job) 156 | 157 | for large in range(len(larger)): 158 | change_job = larger[large] 159 | while job_count[change_job][0] > num_links: 160 | for les in range(len(less)): 161 | if job_count[less[les]][0] < num_links: 162 | offspring_list[pop][job_count[change_job][1]] = less[les] 163 | job_count[change_job][1] = offspring_list[pop].index(change_job) 164 | job_count[change_job][0] = job_count[change_job][0] - 1 165 | job_count[less[les]][0] = job_count[less[les]][0] + 1 166 | if job_count[change_job][0] == num_links: 167 | break 168 | 169 | for off_spring in range(len(offspring_list)): 170 | 171 | """ Mutations """ 172 | mutation_prob = np.random.rand() 173 | if mutation_rate >= mutation_prob: 174 | m_change = list( 175 | np.random.choice(num_gene, num_mutation_jobs, replace=False)) # chooses the position to mutation 176 | t_value_last = offspring_list[off_spring][ 177 | m_change[0]] # save the value which is on the first mutation position 178 | for i in range(num_mutation_jobs - 1): 179 | offspring_list[off_spring][m_change[i]] = offspring_list[off_spring][ 180 | m_change[i + 1]] # displacement 181 | # move the value of the first mutation position to the last mutation position 182 | offspring_list[off_spring][m_change[num_mutation_jobs - 1]] = t_value_last 183 | 184 | """ fitness value (calculate makespan) """ 185 | total_chromosome = copy.deepcopy(parent_list) + copy.deepcopy( 186 | offspring_list) # parent and offspring chromosomes combination 187 | chrom_fitness, chrom_fit = [], [] 188 | total_fitness = 0 189 | for pop_size in range(population_size * 2): 190 | j_keys = [j for j in range(num_flows)] 191 | key_count = {key: 0 for key in j_keys} 192 | j_count = {key: 0 for key in j_keys} 193 | m_keys = [j + 1 for j in range(num_links)] 194 | m_count = {key: 0 for key in m_keys} 195 | #print(m_count) 196 | for i in total_chromosome[pop_size]: 197 | gen_t = int(process_time[i][key_count[i]]) 198 | #print("Gen_t",gen_t) 199 | 200 | gen_m = int(flow_sequence[i][key_count[i]]) 201 | #if (gen_m != 0): 202 | j_count[i] = j_count[i] + gen_t 203 | #print("Genm",gen_m) 204 | #print ("I value",i, "Genm value", gen_m) 205 | m_count[gen_m] = m_count[gen_m] + gen_t 206 | 207 | if m_count[gen_m] < j_count[i]: 208 | m_count[gen_m] = j_count[i] 209 | elif m_count[gen_m] > j_count[i]: 210 | j_count[i] = m_count[gen_m] 211 | #else: 212 | # m_count[gen_m]=0 213 | # j_count[i]=0 214 | key_count[i] = key_count[i] + 1 215 | 216 | makespan = max(j_count.values()) 217 | chrom_fitness.append(1 / makespan) 218 | chrom_fit.append(makespan) 219 | total_fitness = total_fitness + chrom_fitness[pop_size] 220 | 221 | """ Selection (roulette wheel approach) """ 222 | pk, qk = [], [] 223 | 224 | for size in range(population_size * 2): 225 | pk.append(chrom_fitness[size] / total_fitness) 226 | for size in range(population_size * 2): 227 | cumulative = 0 228 | 229 | for j in range(0, size + 1): 230 | cumulative = cumulative + pk[j] 231 | qk.append(cumulative) 232 | 233 | selection_rand = [np.random.rand() for i in range(population_size)] 234 | 235 | for pop_size in range(population_size): 236 | if selection_rand[pop_size] <= qk[0]: 237 | population_list[pop_size] = copy.deepcopy(total_chromosome[0]) 238 | else: 239 | for j in range(0, population_size * 2 - 1): 240 | if selection_rand[pop_size] > qk[j] and selection_rand[pop_size] <= qk[j + 1]: 241 | population_list[pop_size] = copy.deepcopy(total_chromosome[j + 1]) 242 | break 243 | 244 | """ comparison """ 245 | for pop_size in range(population_size * 2): 246 | if chrom_fit[pop_size] < Tbest_now: 247 | Tbest_now = chrom_fit[pop_size] 248 | sequence_now = copy.deepcopy(total_chromosome[pop_size]) 249 | if Tbest_now <= Tbest: 250 | Tbest = Tbest_now 251 | sequence_best = copy.deepcopy(sequence_now) 252 | 253 | makespan_record.append(Tbest) 254 | 255 | """ Results - Makespan """ 256 | 257 | print("optimal sequence", sequence_best) 258 | print("optimal value:%f" % Tbest) 259 | print("\n") 260 | # print('the elapsed time:%s'% (time.time() - start_time)) 261 | 262 | # %matplotlib inline 263 | plt.plot([i for i in range(len(makespan_record))], makespan_record, 'b') 264 | plt.ylabel('makespan', fontsize=15) 265 | plt.xlabel('generation', fontsize=15) 266 | plt.show() 267 | 268 | """ plot gantt chart """ 269 | 270 | m_keys = [j + 1 for j in range(num_links)] 271 | j_keys = [j for j in range(num_flows)] 272 | key_count = {key: 0 for key in j_keys} 273 | j_count = {key: 0 for key in j_keys} 274 | m_count = {key: 0 for key in m_keys} 275 | j_record = {} 276 | for i in sequence_best: 277 | gen_t = int(process_time[i][key_count[i]]) 278 | gen_m = int(flow_sequence[i][key_count[i]]) 279 | #if (gen_m != 0): 280 | j_count[i] = j_count[i] + gen_t 281 | 282 | m_count[gen_m] = m_count[gen_m] + gen_t 283 | 284 | if m_count[gen_m] < j_count[i]: 285 | m_count[gen_m] = j_count[i] 286 | elif m_count[gen_m] > j_count[i]: 287 | j_count[i] = m_count[gen_m] 288 | #else: 289 | #j_count[i] = j_count[i] + gen_t 290 | #m_count[gen_m] = 0 291 | 292 | 293 | 294 | start_time = str(datetime.timedelta( 295 | seconds=j_count[i] - process_time[i][key_count[i]])) # convert seconds to hours, minutes and seconds 296 | end_time = str(datetime.timedelta(seconds=j_count[i])) 297 | 298 | j_record[(i, gen_m)] = [start_time, end_time] 299 | 300 | key_count[i] = key_count[i] + 1 301 | #print("J Record",j_record) 302 | 303 | df = [] 304 | for m in m_keys: 305 | for j in j_keys: 306 | #if ( m!=3 & j!=0): 307 | #print(j_record[j, m]) 308 | df.append(dict(Task='Link %s' % (m), Start='2020-02-01 %s' % (str(j_record[(j, m)][0])), \ 309 | Finish='2020-02-01 %s' % (str(j_record[(j, m)][1])), Resource='Flow %s' % (j + 1))) 310 | 311 | df_ = pd.DataFrame(df) 312 | df_.Start = pd.to_datetime(df_['Start']) 313 | df_.Finish = pd.to_datetime(df_['Finish']) 314 | start = df_.Start.min() 315 | end = df_.Finish.max() 316 | 317 | df_.Start = df_.Start.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) 318 | df_.Finish = df_.Finish.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) 319 | print("Df",df_) 320 | data = df_.to_dict(orient='records') 321 | 322 | final_data = { 323 | 'start': start.strftime('%Y-%m-%dT%H:%M:%S'), 324 | 'end': end.strftime('%Y-%m-%dT%H:%M:%S'), 325 | 'data': data} 326 | 327 | fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, 328 | title='Traffic Schedule') 329 | fig.show() 330 | # iplot(fig, filename='GA_job_shop_scheduling') 331 | print(final_data) 332 | return final_data, df 333 | 334 | 335 | """ Job_Shop_Schedule """ 336 | 337 | #data = data_excel_json('data/JSP_dataset.xlsx') 338 | data = data_from_json() 339 | print("Data",data) 340 | schedule = traffic_schedule(data_dict=data) 341 | print("Scheedule",schedule[0]) 342 | 343 | #import chart_studio.plotly as py 344 | #import plotly.figure_factory as ff 345 | 346 | #df = schedule[1] 347 | #fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, title='Job shop Schedule') 348 | #fig.show() 349 | -------------------------------------------------------------------------------- /src/schedulegenmodular.py: -------------------------------------------------------------------------------- 1 | """ Defined function for static initial data for Production Planning """ 2 | 3 | #importing libraries 4 | import pandas as pd 5 | #import openpyxl as xl 6 | import json 7 | 8 | def data_from_json(): 9 | # Opening JSON file 10 | f = open('data.json') 11 | 12 | # returns JSON object as 13 | # a dictionary 14 | data = json.load(f) 15 | 16 | # Iterating through the json 17 | # list 18 | for i in data['Flow Sequence']: 19 | print(i) 20 | 21 | # Closing file 22 | f.close() 23 | return data 24 | 25 | def json_to_df(json_data): 26 | """ convert json into excel """ 27 | dict_data = {} 28 | for key in json_data.keys(): 29 | dict_data[key] = pd.DataFrame(json_data.get(key)).T 30 | return dict_data 31 | 32 | 33 | ''' Solving job shop scheduling problem by gentic algorithm ''' 34 | 35 | # importing required modules 36 | import pandas as pd 37 | import numpy as np 38 | import matplotlib.pyplot as plt 39 | import pandas as pd 40 | import chart_studio.plotly as py 41 | import plotly.figure_factory as ff 42 | import datetime 43 | import time 44 | import copy 45 | 46 | 47 | def json_to_df(json_data): 48 | """ convert json into excel """ 49 | dict_data = {} 50 | for key in json_data.keys(): 51 | dict_data[key] = pd.DataFrame(json_data.get(key)).T 52 | 53 | return dict_data 54 | 55 | class Flow: 56 | def __init__(self, identifier, sender, receiver, size, sequence, deadline): 57 | self.identifier = identifier 58 | self.sender = sender 59 | self.receiver = receiver 60 | self.size = size 61 | self.endToEndDelay = 0 62 | self.injectTime = 0 63 | self.arrival = 0 64 | self.deadline = deadline 65 | self.sequence = sequence 66 | self.periodInterval = 1 #1 Millisecond 67 | self.nodeType = 0 # 0- Switch, 1-Source, 2-Destination 68 | self.previous = None 69 | self.subflows = [] 70 | 71 | def get_identifier(self): 72 | return self.identifier 73 | 74 | def display(self, nodetype): 75 | self.nodeType = nodetype 76 | 77 | def set_previousflow(self,Flow): 78 | self.previous = Flow 79 | def add_SubFlow(self,flow): 80 | self.subflows.append(flow) 81 | 82 | def get_endToEndDelay(self): 83 | return self.endToEndDelay 84 | 85 | 86 | ##################FLOWS################################## 87 | # f1 : ES1->SW1 (Link1), SW1->ES3 (Link2), Period: 1000 ms, Nw Delay 20ms 88 | # f2 : ES1-> SW1(Link1), SW1->SW2 (Link3),SW2->ES3 (Link4) , Period 2000ms, Nw Delay-10ms 89 | # f3 : ES2->SW2(link5), SW2->SW1(Link6), SW1->ES3 (Link2) , Period: 1000ms, 10 90 | # f4 : ES2->SW2 (Link5), SW2->SW1 (Link6), SW1->ES3 (Link2), Period: 2000ms, 20ms 91 | 92 | 93 | #Flows Defined - Traffic flowing across the network 94 | flow1 = Flow(0, "es1", "es3",69632,1,100) # Id, sender, receiver, size, sequence, deadline 95 | flow2 = Flow(1, "es1", "es3",69632,2,200) 96 | flow3 = Flow(2, "es2", "es3",69632,3,100) 97 | flow4 = Flow(3, "es2", "es3",69632,4,200) 98 | flow5 = Flow(4, "es2", "es3",69632,5,100) 99 | flow6 = Flow(5, "es2", "es3",69632,6,200) 100 | 101 | 102 | flows = [flow1, flow2, flow3, flow4, flow5, flow6] 103 | 104 | def prepare_initial_population(population_size, population_list, num_gene, num_flows): 105 | for i in range(population_size): 106 | nxm_random_num = list(np.random.permutation(num_gene)) # generate a random permutation of 0 to num_job*num_mc-1 107 | population_list.append(nxm_random_num) # add to the population_list 108 | for j in range(num_gene): 109 | population_list[i][j] = population_list[i][ 110 | j] % num_flows # convert to flow number format, every flow appears m times 111 | print("Population List:", population_list) 112 | return population_list 113 | 114 | def two_point_crossover(population_list,population_size,crossover_rate, num_gene): 115 | parent_list = copy.deepcopy(population_list) 116 | offspring_list = copy.deepcopy( 117 | population_list) # generate a random sequence to select the parent chromosome to crossover 118 | pop_random_size = list(np.random.permutation(population_size)) 119 | 120 | for size in range(int(population_size / 2)): 121 | crossover_prob = np.random.rand() 122 | if crossover_rate >= crossover_prob: 123 | parent_1 = population_list[pop_random_size[2 * size]][:] 124 | parent_2 = population_list[pop_random_size[2 * size + 1]][:] 125 | 126 | child_1 = parent_1[:] 127 | child_2 = parent_2[:] 128 | cutpoint = list(np.random.choice(num_gene, 2, replace=False)) 129 | cutpoint.sort() 130 | 131 | child_1[cutpoint[0]:cutpoint[1]] = parent_2[cutpoint[0]:cutpoint[1]] 132 | child_2[cutpoint[0]:cutpoint[1]] = parent_1[cutpoint[0]:cutpoint[1]] 133 | offspring_list[pop_random_size[2 * size]] = child_1[:] 134 | offspring_list[pop_random_size[2 * size + 1]] = child_2[:] 135 | return offspring_list,parent_list 136 | 137 | def perform_mutations(offspring_list, mutation_rate, num_gene, num_mutation_jobs): 138 | for off_spring in range(len(offspring_list)): 139 | 140 | """ Mutations """ 141 | mutation_prob = np.random.rand() 142 | if mutation_rate >= mutation_prob: 143 | m_change = list( 144 | np.random.choice(num_gene, num_mutation_jobs, replace=False)) # chooses the position to mutation 145 | t_value_last = offspring_list[off_spring][ 146 | m_change[0]] # save the value which is on the first mutation position 147 | for i in range(num_mutation_jobs - 1): 148 | offspring_list[off_spring][m_change[i]] = offspring_list[off_spring][ 149 | m_change[i + 1]] # displacement 150 | # move the value of the first mutation position to the last mutation position 151 | offspring_list[off_spring][m_change[num_mutation_jobs - 1]] = t_value_last 152 | return offspring_list 153 | 154 | def checkfitness_and_calculate_makespan (parent_list,offspring_list,population_size, num_flows,num_links, process_time, flow_sequence): 155 | """ fitness value (calculate makespan) """ 156 | total_chromosome = copy.deepcopy(parent_list) + copy.deepcopy( 157 | offspring_list) # parent and offspring chromosomes combination 158 | chrom_fitness, chrom_fit = [], [] 159 | total_fitness = 0 160 | for pop_size in range(population_size * 2): 161 | f_keys = [j for j in range(num_flows)] 162 | key_count = {key: 0 for key in f_keys} 163 | f_count = {key: 0 for key in f_keys} 164 | l_keys = [j + 1 for j in range(num_links)] 165 | l_count = {key: 0 for key in l_keys} 166 | for i in total_chromosome[pop_size]: 167 | gen_t = int(process_time[i][key_count[i]]) 168 | gen_l = int(flow_sequence[i][key_count[i]]) 169 | # Check for flow and deadline here 170 | # if this flow then check if the total timeline is less than deadline, 171 | # if not do not include it in f_count / something like that 172 | flow_i = flows[i-1] 173 | #flow_i.endToEndDelay = f_count[i] + gen_t 174 | if (f_count[i] + gen_t) <= flow_i.deadline: # for a valid schedule, the end to end delay for one flow should be within deadline 175 | print("Fit case, Flow i", flow_i.identifier,":",f_count[i] + gen_t, ":", flow_i.deadline) 176 | f_count[i] = f_count[i] + gen_t 177 | l_count[gen_l] = l_count[gen_l] + gen_t 178 | if l_count[gen_l] < f_count[i]: # Check if 179 | l_count[gen_l] = f_count[i] 180 | elif (l_count[gen_l] > f_count[i] ): 181 | f_count[i] = l_count[gen_l] 182 | #flow_i.endToEndDelay = f_count[i] 183 | print("Special case: Flow i", flow_i.identifier, ":", f_count[i], "Deadline:", flow_i.deadline) 184 | 185 | 186 | key_count[i] = key_count[i] + 1 187 | #print("End to end delay:",flow_i.endToEndDelay) 188 | #else: # unfit case 189 | # continue 190 | 191 | # print("Flow Count", f_count) 192 | # print("Link count", l_count) 193 | if (max(f_count.values())!=0): 194 | 195 | makespan = max(f_count.values()) 196 | chrom_fitness.append(1 / makespan) 197 | chrom_fit.append(makespan) 198 | total_fitness = total_fitness + chrom_fitness[pop_size] 199 | else: 200 | makespan = 10000 201 | chrom_fitness.append(.0001) 202 | chrom_fit.append(makespan) 203 | total_fitness = total_fitness + chrom_fitness[pop_size] 204 | return total_fitness, chrom_fitness, chrom_fit, total_chromosome , makespan 205 | 206 | 207 | def plot_gantt_chart(num_links,num_flows,sequence_best, process_time,flow_sequence): 208 | l_keys = [j + 1 for j in range(num_links)] 209 | f_keys = [j for j in range(num_flows)] 210 | key_count = {key: 0 for key in f_keys} 211 | f_count = {key: 0 for key in f_keys} 212 | l_count = {key: 0 for key in l_keys} 213 | f_record = {} 214 | for i in sequence_best: 215 | gen_t = int(process_time[i][key_count[i]]) 216 | gen_l = int(flow_sequence[i][key_count[i]]) 217 | #if (gen_m != 0): 218 | 219 | flow_i = flows[i-1] 220 | #flow_i.endToEndDelay = f_count[i] + gen_t 221 | if (f_count[i] + gen_t) <= flow_i.deadline: # for a valid schedule, the end to end delay for one flow should be within deadline 222 | print("Fit case, Flow i", flow_i.identifier,":",f_count[i] + gen_t, ":", flow_i.deadline) 223 | f_count[i] = f_count[i] + gen_t 224 | l_count[gen_l] = l_count[gen_l] + gen_t 225 | if l_count[gen_l] < f_count[i]: # Check if 226 | l_count[gen_l] = f_count[i] 227 | elif (l_count[gen_l] > f_count[i] ): 228 | f_count[i] = l_count[gen_l] 229 | #flow_i.endToEndDelay = f_count[i] 230 | 231 | 232 | #if flow_i.endToEndDelay <= flow_i.deadline: 233 | # print("Fit case, Flow i", flow_i.identifier, ":", flow_i.endToEndDelay) 234 | #f_count[i] = f_count[i] + gen_t 235 | 236 | #f_count[i] = f_count[i] + gen_t 237 | 238 | #l_count[gen_l] = l_count[gen_l] + gen_t 239 | 240 | #if l_count[gen_l] < f_count[i]: 241 | # l_count[gen_l] = f_count[i] 242 | #elif l_count[gen_l] > f_count[i]: 243 | # f_count[i] = l_count[gen_l] 244 | #else: 245 | #j_count[i] = j_count[i] + gen_t 246 | #m_count[gen_m] = 0 247 | 248 | 249 | 250 | start_time = str(datetime.timedelta( 251 | seconds=f_count[i] - process_time[i][key_count[i]])) # convert seconds to hours, minutes and seconds 252 | end_time = str(datetime.timedelta(seconds=f_count[i])) 253 | 254 | f_record[(i, gen_l)] = [start_time, end_time] 255 | 256 | key_count[i] = key_count[i] + 1 257 | #print("J Record",j_record) 258 | 259 | df = [] 260 | for m in l_keys: 261 | for j in f_keys: 262 | #if ( m!=3 & j!=0): 263 | #print(j_record[j, m]) 264 | df.append(dict(Task='Link %s' % (m), Start='2020-02-01 %s' % (str(f_record[(j, m)][0])), \ 265 | Finish='2020-02-01 %s' % (str(f_record[(j, m)][1])), Resource='Flow %s' % (j + 1))) 266 | 267 | df_ = pd.DataFrame(df) 268 | df_.Start = pd.to_datetime(df_['Start']) 269 | df_.Finish = pd.to_datetime(df_['Finish']) 270 | start = df_.Start.min() 271 | end = df_.Finish.max() 272 | 273 | df_.Start = df_.Start.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) 274 | df_.Finish = df_.Finish.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) 275 | print("Df",df_) 276 | data = df_.to_dict(orient='records') 277 | 278 | final_data = { 279 | 'start': start.strftime('%Y-%m-%dT%H:%M:%S'), 280 | 'end': end.strftime('%Y-%m-%dT%H:%M:%S'), 281 | 'data': data} 282 | 283 | fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, 284 | title='Traffic Schedule') 285 | fig.show() 286 | return final_data,df 287 | 288 | 289 | 290 | def traffic_schedule(data_dict, population_size=30, crossover_rate=0.8, mutation_rate=0.2, mutation_selection_rate=0.2, 291 | num_iteration=2000): 292 | """ initialize genetic algorithm parameters and read data """ 293 | data_json = json_to_df(data_dict) 294 | flow_sequence_tmp = data_json['Flow Sequence'] 295 | process_time_tmp = data_json['Processing Time'] 296 | 297 | df_shape = process_time_tmp.shape 298 | 299 | num_links = df_shape[1] # number of links 300 | num_flows = df_shape[0] # number of flows 301 | print("Num Links,", num_links, "Num of Flows",num_flows) 302 | num_gene = num_links * num_flows # number of genes in a chromosome 303 | num_mutation_jobs = round(num_gene * mutation_selection_rate) 304 | 305 | process_time = [list(map(int, process_time_tmp.iloc[i])) for i in range(num_flows)] 306 | flow_sequence = [list(map(int, flow_sequence_tmp.iloc[i])) for i in range(num_flows)] 307 | 308 | # start_time = time.time() 309 | 310 | Tbest = 999999999999999 311 | 312 | best_list, best_obj = [], [] 313 | population_list = [] 314 | makespan_record = [] 315 | 316 | # Initial Population 317 | population_list=prepare_initial_population(population_size,population_list,num_gene,num_flows) 318 | print ("Population List:", population_list) 319 | #Iterations start here 320 | for iteration in range(num_iteration): 321 | Tbest_now = 99999999999 322 | 323 | """ Two Point Cross-Over """ 324 | offspring_list,parent_list = two_point_crossover(population_list,population_size,crossover_rate,num_gene) 325 | 326 | for pop in range(population_size): 327 | 328 | """ Repairment """ 329 | job_count = {} 330 | larger, less = [], [] # 'larger' record jobs appear in the chromosome more than pop times, and 'less' records less than pop times. 331 | for job in range(num_flows): 332 | if job in offspring_list[pop]: 333 | count = offspring_list[pop].count(job) 334 | pos = offspring_list[pop].index(job) 335 | job_count[job] = [count, pos] # store the above two values to the job_count dictionary 336 | else: 337 | count = 0 338 | job_count[job] = [count, 0] 339 | 340 | if count > num_links: 341 | larger.append(job) 342 | elif count < num_links: 343 | less.append(job) 344 | 345 | for large in range(len(larger)): 346 | change_job = larger[large] 347 | while job_count[change_job][0] > num_links: 348 | for les in range(len(less)): 349 | if job_count[less[les]][0] < num_links: 350 | offspring_list[pop][job_count[change_job][1]] = less[les] 351 | job_count[change_job][1] = offspring_list[pop].index(change_job) 352 | job_count[change_job][0] = job_count[change_job][0] - 1 353 | job_count[less[les]][0] = job_count[less[les]][0] + 1 354 | if job_count[change_job][0] == num_links: 355 | break 356 | 357 | offspring_list = perform_mutations(offspring_list,mutation_rate, num_gene, num_mutation_jobs) 358 | 359 | total_fitness, chrom_fitness, chrom_fit, total_chromosome , makespan = checkfitness_and_calculate_makespan(parent_list,offspring_list,population_size, num_flows,num_links, process_time, flow_sequence) 360 | 361 | 362 | """ Selection (roulette wheel approach) """ 363 | pk, qk = [], [] 364 | 365 | for size in range(population_size * 2): 366 | pk.append(chrom_fitness[size] / total_fitness) 367 | for size in range(population_size * 2): 368 | cumulative = 0 369 | 370 | for j in range(0, size + 1): 371 | cumulative = cumulative + pk[j] 372 | qk.append(cumulative) 373 | 374 | selection_rand = [np.random.rand() for i in range(population_size)] 375 | 376 | for pop_size in range(population_size): 377 | if selection_rand[pop_size] <= qk[0]: 378 | population_list[pop_size] = copy.deepcopy(total_chromosome[0]) 379 | else: 380 | for j in range(0, population_size * 2 - 1): 381 | if selection_rand[pop_size] > qk[j] and selection_rand[pop_size] <= qk[j + 1]: 382 | population_list[pop_size] = copy.deepcopy(total_chromosome[j + 1]) 383 | break 384 | 385 | """ comparison """ 386 | for pop_size in range(population_size * 2): 387 | chom_fit_current =chrom_fit[pop_size] 388 | if chom_fit_current < Tbest_now: 389 | Tbest_now = chrom_fit[pop_size] 390 | sequence_now = copy.deepcopy(total_chromosome[pop_size]) 391 | if Tbest_now <= Tbest: 392 | Tbest = Tbest_now 393 | sequence_best = copy.deepcopy(sequence_now) 394 | 395 | makespan_record.append(Tbest) 396 | 397 | """ Results - Makespan """ 398 | 399 | print("optimal sequence", sequence_best) 400 | print("optimal value:%f" % Tbest) 401 | print("\n") 402 | # print('the elapsed time:%s'% (time.time() - start_time)) 403 | 404 | # %matplotlib inline 405 | plt.plot([i for i in range(len(makespan_record))], makespan_record, 'b') 406 | plt.ylabel('makespan', fontsize=15) 407 | plt.xlabel('generation', fontsize=15) 408 | plt.show() 409 | 410 | print(makespan_record) 411 | 412 | """ plot gantt chart """ 413 | final_data,df=plot_gantt_chart( num_links,num_flows,sequence_best, process_time,flow_sequence) 414 | 415 | # iplot(fig, filename='GA_job_shop_scheduling') 416 | print(final_data) 417 | return final_data, df 418 | 419 | 420 | """ Job_Shop_Schedule """ 421 | 422 | #data = data_excel_json('data/JSP_dataset.xlsx') 423 | data = data_from_json() 424 | print("Data",data) 425 | schedule = traffic_schedule(data_dict=data) 426 | print("Schedule",schedule[0]) 427 | 428 | #import chart_studio.plotly as py 429 | #import plotly.figure_factory as ff 430 | 431 | #df = schedule[1] 432 | #fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, title='Job shop Schedule') 433 | #fig.show() 434 | -------------------------------------------------------------------------------- /src/schedulegenwithdeadline.py: -------------------------------------------------------------------------------- 1 | """ Defined function for static initial data for Production Planning """ 2 | 3 | # importing libraries 4 | import pandas as pd 5 | # import openpyxl as xl 6 | import json 7 | import time 8 | 9 | #Abhilash 2024 - Old case before 2024- line 13 uncommented, 14 commented, Line 119 uncommented - 121 -commented 10 | #Current Case - Create Schedule for wifi wireless case 11 | def data_from_json(): 12 | # Opening JSON file 13 | #f = open('data.json') # Commented Abhilash 2024 old case 14 | f = open('data_10links.json') # Added Abhilash 2024 new case 10 Links 15 | 16 | # returns JSON object as 17 | # a dictionary 18 | data = json.load(f) 19 | 20 | # Iterating through the json 21 | # list 22 | for i in data['Flow Sequence']: 23 | print(i) 24 | 25 | # Closing file 26 | f.close() 27 | return data 28 | 29 | 30 | def json_to_df(json_data): 31 | """ convert json into excel """ 32 | dict_data = {} 33 | for key in json_data.keys(): 34 | dict_data[key] = pd.DataFrame(json_data.get(key)).T 35 | return dict_data 36 | 37 | 38 | ''' Solving trafficscheduling problem by genetic algorithm ''' 39 | 40 | # importing required modules 41 | import pandas as pd 42 | import numpy as np 43 | import matplotlib.pyplot as plt 44 | import pandas as pd 45 | import chart_studio.plotly as py 46 | import plotly.figure_factory as ff 47 | import datetime 48 | import time 49 | import copy 50 | 51 | 52 | def json_to_df(json_data): 53 | """ convert json into excel """ 54 | dict_data = {} 55 | for key in json_data.keys(): 56 | dict_data[key] = pd.DataFrame(json_data.get(key)).T 57 | 58 | return dict_data 59 | 60 | """ 61 | Fit chromosomes list 62 | """ 63 | 64 | class FitChromosome: 65 | def __init__(self, chromosome, makespan, total_fitness, chrom_fit): 66 | self.chromosome = chromosome 67 | self.makespan = makespan 68 | self.total_fitness = total_fitness 69 | self.chrom_fit = chrom_fit 70 | 71 | 72 | class Flow: 73 | def __init__(self, identifier, sender, receiver, size, sequence, deadline): 74 | self.identifier = identifier 75 | self.sender = sender 76 | self.receiver = receiver 77 | self.size = size 78 | self.endToEndDelay = 0 79 | self.injectTime = 0 80 | self.arrival = 0 81 | self.deadline = deadline 82 | self.sequence = sequence 83 | self.periodInterval = 1 # 1 Millisecond 84 | self.nodeType = 0 # 0- Switch, 1-Source, 2-Destination 85 | self.previous = None 86 | self.subflows = [] 87 | 88 | def get_identifier(self): 89 | return self.identifier 90 | 91 | def display(self, nodetype): 92 | self.nodeType = nodetype 93 | 94 | def set_previousflow(self, Flow): 95 | self.previous = Flow 96 | 97 | def add_SubFlow(self, flow): 98 | self.subflows.append(flow) 99 | 100 | def get_endToEndDelay(self): 101 | return self.endToEndDelay 102 | 103 | 104 | ##################FLOWS################################## 105 | # f1 : ES1->SW1 (Link1), SW1->ES3 (Link2), Period: 1000 ms, Nw Delay 20ms 106 | # f2 : ES1-> SW1(Link1), SW1->SW2 (Link3),SW2->ES3 (Link4) , Period 2000ms, Nw Delay-10ms 107 | # f3 : ES2->SW2(link5), SW2->SW1(Link6), SW1->ES3 (Link2) , Period: 1000ms, 10 108 | # f4 : ES2->SW2 (Link5), SW2->SW1 (Link6), SW1->ES3 (Link2), Period: 2000ms, 20ms 109 | 110 | 111 | # Flows Defined - Traffic flowing across the network 112 | flow1 = Flow(0, "es1", "es3", 69632, 1, 200) # Id, sender, receiver, size, sequence, deadline 113 | flow2 = Flow(1, "es1", "es3", 69632, 2, 300) 114 | flow3 = Flow(2, "es2", "es3", 69632, 3, 200) 115 | flow4 = Flow(3, "es2", "es3", 69632, 4, 300) 116 | flow5 = Flow(4, "es2", "es3", 69632, 5, 200) 117 | flow6 = Flow(5, "es2", "es3", 69632, 6, 300) 118 | 119 | #flows = [flow1, flow2, flow3, flow4, flow5, flow6] #Case 1 - 4 flows old case commented Abhilash 2024 120 | 121 | flows = [flow1, flow2, flow3, flow4] #Added Abhilash 2024 122 | def prepare_initial_population(population_size, population_list, num_gene, num_flows): 123 | for i in range(population_size): 124 | nxm_random_num = list(np.random.permutation(num_gene)) # generate a random permutation of 0 to num_job*num_mc-1 125 | population_list.append(nxm_random_num) # add to the population_list 126 | for j in range(num_gene): 127 | population_list[i][j] = population_list[i][ 128 | j] % num_flows # convert to flow number format, every flow appears m times 129 | #print("Population List:", population_list) 130 | return population_list 131 | 132 | 133 | def two_point_crossover(population_list, population_size, crossover_rate, num_gene): 134 | parent_list = copy.deepcopy(population_list) 135 | offspring_list = copy.deepcopy( 136 | population_list) # generate a random sequence to select the parent chromosome to crossover 137 | pop_random_size = list(np.random.permutation(population_size)) 138 | 139 | for size in range(int(population_size / 2)): 140 | crossover_prob = np.random.rand() 141 | if crossover_rate >= crossover_prob: 142 | parent_1 = population_list[pop_random_size[2 * size]][:] 143 | parent_2 = population_list[pop_random_size[2 * size + 1]][:] 144 | 145 | child_1 = parent_1[:] 146 | child_2 = parent_2[:] 147 | cutpoint = list(np.random.choice(num_gene, 2, replace=False)) 148 | cutpoint.sort() 149 | 150 | child_1[cutpoint[0]:cutpoint[1]] = parent_2[cutpoint[0]:cutpoint[1]] 151 | child_2[cutpoint[0]:cutpoint[1]] = parent_1[cutpoint[0]:cutpoint[1]] 152 | offspring_list[pop_random_size[2 * size]] = child_1[:] 153 | offspring_list[pop_random_size[2 * size + 1]] = child_2[:] 154 | return offspring_list, parent_list 155 | 156 | 157 | def perform_mutations(offspring_list, mutation_rate, num_gene, num_mutation_jobs): 158 | for off_spring in range(len(offspring_list)): 159 | 160 | """ Mutations """ 161 | mutation_prob = np.random.rand() 162 | if mutation_rate >= mutation_prob: 163 | m_change = list( 164 | np.random.choice(num_gene, num_mutation_jobs, replace=False)) # chooses the position to mutation 165 | t_value_last = offspring_list[off_spring][ 166 | m_change[0]] # save the value which is on the first mutation position 167 | for i in range(num_mutation_jobs - 1): 168 | offspring_list[off_spring][m_change[i]] = offspring_list[off_spring][ 169 | m_change[i + 1]] # displacement 170 | # move the value of the first mutation position to the last mutation position 171 | offspring_list[off_spring][m_change[num_mutation_jobs - 1]] = t_value_last 172 | return offspring_list 173 | 174 | 175 | def checkfitness_and_calculate_makespan(parent_list, offspring_list, population_size, num_flows, num_links, 176 | process_time, flow_sequence,fit_chromosomes_list): 177 | """ fitness value (calculate makespan) """ 178 | total_chromosome = copy.deepcopy(parent_list) + copy.deepcopy( 179 | offspring_list) # parent and offspring chromosomes combination 180 | chrom_fitness, chrom_fit = [], [] 181 | total_fitness = 0 182 | population_list_fit = [] 183 | for pop_size in range(population_size * 2): 184 | f_keys = [j for j in range(num_flows)] 185 | key_count = {key: 0 for key in f_keys} 186 | f_count = {key: 0 for key in f_keys} 187 | l_keys = [j + 1 for j in range(num_links)] 188 | l_count = {key: 0 for key in l_keys} 189 | """for i in total_chromosome[pop_size]: 190 | gen_t = int(process_time[i][key_count[i]]) 191 | gen_l = int(flow_sequence[i][key_count[i]]) 192 | # Check for flow and deadline here 193 | # if this flow then check if the total timeline is less than deadline, 194 | # if not do not include it in f_count / something like that 195 | flow_i = flows[i] 196 | # flow_i.endToEndDelay = f_count[i] + gen_t 197 | if ((f_count[i] + gen_t) <= flow_i.deadline) and ((l_count[gen_l] + gen_t) <= flow_i.deadline): # for a valid schedule, the end to end delay for one flow should be within deadline 198 | #print("Fit case, Flow i", flow_i.identifier, ":", f_count[i] + gen_t, ":", flow_i.deadline) 199 | population_list_fit.append(1) 200 | f_count[i] = f_count[i] + gen_t 201 | l_count[gen_l] = l_count[gen_l] + gen_t 202 | if l_count[gen_l] < f_count[i]: # Check if 203 | l_count[gen_l] = f_count[i] 204 | elif (l_count[gen_l] > f_count[i]): 205 | f_count[i] = l_count[gen_l] 206 | # flow_i.endToEndDelay = f_count[i] 207 | #print("Special case: Flow i", flow_i.identifier, ":", f_count[i], "Deadline:", flow_i.deadline) 208 | 209 | else: 210 | 211 | population_list_fit.append(0) 212 | 213 | key_count[i] = key_count[i] + 1 214 | # print("End to end delay:",flow_i.endToEndDelay) 215 | # else: # unfit case 216 | # continue 217 | 218 | # print("Flow Count", f_count) 219 | # print("Link count", l_count) 220 | if (max(f_count.values()) != 0): 221 | 222 | makespan = max(f_count.values()) 223 | chrom_fitness.append(1 / makespan) 224 | chrom_fit.append(makespan) 225 | total_fitness = total_fitness + chrom_fitness[pop_size] 226 | else: 227 | makespan = 99999999999 228 | chrom_fitness.append(1/makespan) 229 | chrom_fit.append(makespan) 230 | total_fitness = total_fitness + chrom_fitness[pop_size] 231 | """ 232 | makespan = calculate_fitness_for_chromosome(total_chromosome[pop_size],process_time,key_count,flow_sequence,f_count,l_count,population_list_fit) 233 | chrom_fitness.append(1 / makespan) 234 | chrom_fit.append(makespan) 235 | total_fitness = total_fitness + chrom_fitness[pop_size] 236 | positive_infinity = float('inf') 237 | if (makespan!= positive_infinity): 238 | fit_guy = FitChromosome(total_chromosome[pop_size],makespan,total_fitness,chrom_fit) 239 | fit_chromosomes_list.append(fit_guy) 240 | return total_fitness, chrom_fitness, chrom_fit, total_chromosome, population_list_fit 241 | 242 | def calculate_fitness_for_chromosome(current_chromosome,process_time,key_count,flow_sequence,f_count,l_count,population_list_fit): 243 | """ 244 | Seperate Fitness function per chromosome. Then we can 245 | check for fitness and include only the fit ones as part of selection 246 | Fitness here includes minimizing makespan while also meeting deadline for flow 247 | """ 248 | for i in current_chromosome: 249 | gen_t = int(process_time[i][key_count[i]]) 250 | gen_l = int(flow_sequence[i][key_count[i]]) 251 | # Check for flow and deadline here 252 | # if this flow then check if the total timeline is less than deadline, 253 | # if not do not include it in f_count / something like that 254 | flow_i = flows[i] 255 | #if ((f_count[i] + gen_t) <= flow_i.deadline) and ((l_count[gen_l] + gen_t) <=flow_i.deadline): # for a valid schedule, the end to end delay for one flow should be within deadline 256 | # print("Fit case, Flow i", flow_i.identifier, ":", f_count[i] + gen_t, ":", flow_i.deadline) 257 | #population_list_fit.append(1) 258 | f_count[i] = f_count[i] + gen_t 259 | if (gen_l != 0): 260 | l_count[gen_l] = l_count[gen_l] + gen_t 261 | if l_count[gen_l] < f_count[i]: # Check if 262 | l_count[gen_l] = f_count[i] 263 | elif (l_count[gen_l] > f_count[i]): 264 | f_count[i] = l_count[gen_l] 265 | # flow_i.endToEndDelay = f_count[i] 266 | # print("Special case: Flow i", flow_i.identifier, ":", f_count[i], "Deadline:", flow_i.deadline) 267 | 268 | #else: 269 | #f_count[i] = 0 270 | # population_list_fit.append(0) 271 | 272 | key_count[i] = key_count[i] + 1 273 | 274 | # Check if the f_count.values fits as per deadline in flow 275 | flow_i_pos=0 276 | positive_infinity = float('inf') 277 | for i in range(len(f_count)): 278 | flow_i=flows[i] 279 | if (f_count[i] <=flow_i.deadline): 280 | makespan = max(f_count.values()) 281 | else: 282 | makespan = positive_infinity 283 | break 284 | #flow_i_pos = flow_i_pos+1 285 | #if (max(f_count.values()) != 0): 286 | 287 | #else: 288 | # makespan = 99999999999 289 | return makespan 290 | 291 | def calculate_fitness_for_chromosomev1(current_chromosome,process_time,key_count,flow_sequence,f_count,l_count,population_list_fit): 292 | """ 293 | Seperate Fitness function per chromosome. Then we can 294 | check for fitness and include only the fit ones as part of selection 295 | Fitness here includes minimizing makespan while also meeting deadline for flow 296 | """ 297 | for i in current_chromosome: 298 | gen_t = int(process_time[i][key_count[i]]) 299 | gen_l = int(flow_sequence[i][key_count[i]]) 300 | f_count[i] = f_count[i] + gen_t 301 | l_count[gen_l] = l_count[gen_l] + gen_t 302 | if l_count[gen_l] < f_count[i]: # Check if 303 | l_count[gen_l] = f_count[i] 304 | elif (l_count[gen_l] > f_count[i]): 305 | f_count[i] = l_count[gen_l] 306 | 307 | key_count[i] = key_count[i] + 1 308 | 309 | makespan = max(f_count.values()) 310 | 311 | return makespan 312 | 313 | def plot_gantt_chart(num_links, num_flows, sequence_best, process_time, flow_sequence): 314 | l_keys = [j + 1 for j in range(num_links)] 315 | f_keys = [j for j in range(num_flows)] 316 | key_count = {key: 0 for key in f_keys} 317 | f_count = {key: 0 for key in f_keys} 318 | l_count = {key: 0 for key in l_keys} 319 | f_record = {} 320 | for i in sequence_best: 321 | gen_t = int(process_time[i][key_count[i]]) 322 | gen_l = int(flow_sequence[i][key_count[i]]) 323 | # if (gen_m != 0): 324 | 325 | #flow_i = flows[i - 1] 326 | # flow_i.endToEndDelay = f_count[i] + gen_t 327 | #if (f_count[i] + gen_t) <= flow_i.deadline: # for a valid schedule, the end to end delay for one flow should be within deadline 328 | # print("Fit case, Flow i", flow_i.identifier, ":", f_count[i] + gen_t, ":", flow_i.deadline) 329 | if (gen_l != 0): # Abh 2024 - Changed to handle the link marked 0. 330 | f_count[i] = f_count[i] + gen_t 331 | l_count[gen_l] = l_count[gen_l] + gen_t 332 | if l_count[gen_l] < f_count[i]: # Check if 333 | l_count[gen_l] = f_count[i] 334 | elif (l_count[gen_l] > f_count[i]): 335 | f_count[i] = l_count[gen_l] 336 | 337 | # if flow_i.endToEndDelay <= flow_i.deadline: 338 | # print("Fit case, Flow i", flow_i.identifier, ":", flow_i.endToEndDelay) 339 | # f_count[i] = f_count[i] + gen_t 340 | 341 | # f_count[i] = f_count[i] + gen_t 342 | 343 | # l_count[gen_l] = l_count[gen_l] + gen_t 344 | 345 | # if l_count[gen_l] < f_count[i]: 346 | # l_count[gen_l] = f_count[i] 347 | # elif l_count[gen_l] > f_count[i]: 348 | # f_count[i] = l_count[gen_l] 349 | # else: 350 | # j_count[i] = j_count[i] + gen_t 351 | # m_count[gen_m] = 0 352 | 353 | start_time = str(datetime.timedelta( 354 | seconds=f_count[i] - process_time[i][key_count[i]])) # convert seconds to hours, minutes and seconds 355 | end_time = str(datetime.timedelta(seconds=f_count[i])) 356 | if (gen_l!=0): 357 | f_record[(i, gen_l)] = [start_time, end_time] 358 | 359 | key_count[i] = key_count[i] + 1 360 | # print("J Record",j_record) 361 | print ("f_record:", f_record ) 362 | df = [] 363 | for link_key in l_keys: 364 | for flow_key in f_keys: 365 | # if ( m!=3 & j!=0): 366 | # print(j_record[j, m]) 367 | flow=flows[flow_key] 368 | df.append(dict(Task='Link %s' % (link_key), Start='2020-02-01 %s' % (str(f_record[(flow_key, link_key)][0])), \ 369 | Finish='2020-02-01 %s' % (str(f_record[(flow_key, link_key)][1])), Resource='Flow %s Deadline %d' % (flow_key + 1,flow.deadline))) 370 | 371 | df_ = pd.DataFrame(df) 372 | df_.Start = pd.to_datetime(df_['Start']) 373 | df_.Finish = pd.to_datetime(df_['Finish']) 374 | start = df_.Start.min() 375 | end = df_.Finish.max() 376 | 377 | df_.Start = df_.Start.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) 378 | df_.Finish = df_.Finish.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) 379 | print("Df", df) 380 | data = df_.to_dict(orient='records') 381 | 382 | final_data = { 383 | 'start': start.strftime('%Y-%m-%dT%H:%M:%S'), 384 | 'end': end.strftime('%Y-%m-%dT%H:%M:%S'), 385 | 'data': data} 386 | 387 | fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, 388 | title='Traffic Schedule') 389 | fig.show() 390 | return final_data, df 391 | 392 | def plot_new_gantt_chart(num_links, num_flows, sequence_best, process_time, flow_sequence): 393 | l_keys = [j + 1 for j in range(num_links)] 394 | f_keys = [j for j in range(num_flows)] 395 | key_count = {key: 0 for key in f_keys} 396 | f_count = {key: 0 for key in f_keys} 397 | l_count = {key: 0 for key in l_keys} 398 | f_record = {} 399 | for i in sequence_best.chromosome: 400 | gen_t = int(process_time[i][key_count[i]]) 401 | gen_l = int(flow_sequence[i][key_count[i]]) 402 | # if (gen_m != 0): 403 | 404 | #flow_i = flows[i - 1] 405 | # flow_i.endToEndDelay = f_count[i] + gen_t 406 | #if (f_count[i] + gen_t) <= flow_i.deadline: # for a valid schedule, the end to end delay for one flow should be within deadline 407 | # print("Fit case, Flow i", flow_i.identifier, ":", f_count[i] + gen_t, ":", flow_i.deadline) 408 | if (gen_l != 0): #Added Abhilash 2024 to handle link not in use 409 | f_count[i] = f_count[i] + gen_t 410 | l_count[gen_l] = l_count[gen_l] + gen_t 411 | if l_count[gen_l] < f_count[i]: # Check if 412 | l_count[gen_l] = f_count[i] 413 | elif (l_count[gen_l] > f_count[i]): 414 | f_count[i] = l_count[gen_l] 415 | 416 | # if flow_i.endToEndDelay <= flow_i.deadline: 417 | # print("Fit case, Flow i", flow_i.identifier, ":", flow_i.endToEndDelay) 418 | # f_count[i] = f_count[i] + gen_t 419 | 420 | # f_count[i] = f_count[i] + gen_t 421 | 422 | # l_count[gen_l] = l_count[gen_l] + gen_t 423 | 424 | # if l_count[gen_l] < f_count[i]: 425 | # l_count[gen_l] = f_count[i] 426 | # elif l_count[gen_l] > f_count[i]: 427 | # f_count[i] = l_count[gen_l] 428 | # else: 429 | # j_count[i] = j_count[i] + gen_t 430 | # m_count[gen_m] = 0 431 | 432 | start_time = str(datetime.timedelta( 433 | seconds=f_count[i] - process_time[i][key_count[i]])) # convert seconds to hours, minutes and seconds 434 | end_time = str(datetime.timedelta(seconds=f_count[i])) 435 | 436 | f_record[(i, gen_l)] = [start_time, end_time] 437 | 438 | key_count[i] = key_count[i] + 1 439 | # print("J Record",j_record) 440 | print ("f_record:", f_record ) 441 | df = [] 442 | for link_key in l_keys: 443 | for flow_key in f_keys: 444 | # if ( m!=3 & j!=0): 445 | print("flow key",flow_key) 446 | print("link key", link_key) 447 | #if (flow_key!=0 and link_key!=0): 448 | flow=flows[flow_key] 449 | #if (link_key!=0): 450 | #if (f_record.keys(flow_key, link_key)): 451 | try: 452 | df.append(dict(Task='Link %s' % (link_key), Start='2020-02-01 %s' % (str(f_record[(flow_key, link_key)][0])), \ 453 | Finish='2020-02-01 %s' % (str(f_record[(flow_key, link_key)][1])), Resource='Flow %s Deadline %d' % (flow_key + 1,flow.deadline))) 454 | except KeyError as error: 455 | print("The given key doesn't exist in the dictionary") 456 | continue 457 | df_ = pd.DataFrame(df) 458 | df_.Start = pd.to_datetime(df_['Start']) 459 | df_.Finish = pd.to_datetime(df_['Finish']) 460 | start = df_.Start.min() 461 | end = df_.Finish.max() 462 | 463 | df_.Start = df_.Start.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) 464 | df_.Finish = df_.Finish.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) 465 | print("Df", df) 466 | data = df_.to_dict(orient='records') 467 | 468 | final_data = { 469 | 'start': start.strftime('%Y-%m-%dT%H:%M:%S'), 470 | 'end': end.strftime('%Y-%m-%dT%H:%M:%S'), 471 | 'data': data} 472 | 473 | fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, 474 | title='Traffic Schedule') 475 | fig.show() 476 | return final_data, df 477 | 478 | def find_optimal_sequence(Tbest, fit_chromosomes ): 479 | 480 | Tbest_now=Tbest 481 | if (len(fit_chromosomes)==0): 482 | print("No good schedules to fit the deadline found. Rethink deadlines") 483 | return 484 | for chromosome in fit_chromosomes: 485 | current_best_seq = chromosome 486 | current_chromosome= chromosome 487 | chom_fit_current = current_chromosome.makespan 488 | if chom_fit_current < Tbest_now: 489 | Tbest_now = chom_fit_current 490 | best_sequence_now = copy.deepcopy(current_chromosome) 491 | current_best_seq=best_sequence_now 492 | 493 | if Tbest_now <= Tbest: 494 | Tbest = Tbest_now 495 | copy_sequence_best = copy.deepcopy(current_best_seq) 496 | identified_best_sequence= copy_sequence_best 497 | return Tbest, identified_best_sequence 498 | 499 | def traffic_schedule(data_dict, population_size=30, crossover_rate=0.8, mutation_rate=0.2, mutation_selection_rate=0.2, 500 | num_iteration=3000): 501 | """ initialize genetic algorithm parameters and read data """ 502 | data_json = json_to_df(data_dict) 503 | flow_sequence_tmp = data_json['Flow Sequence'] 504 | process_time_tmp = data_json['Processing Time'] 505 | 506 | df_shape = process_time_tmp.shape 507 | 508 | num_links = df_shape[1] # number of links 509 | num_flows = df_shape[0] # number of flows 510 | print("Num Links,", num_links, "Num of Flows", num_flows) 511 | num_gene = num_links * num_flows # number of genes in a chromosome 512 | num_mutation_jobs = round(num_gene * mutation_selection_rate) 513 | 514 | process_time = [list(map(int, process_time_tmp.iloc[i])) for i in range(num_flows)] 515 | flow_sequence = [list(map(int, flow_sequence_tmp.iloc[i])) for i in range(num_flows)] 516 | 517 | # start_time = time.time() 518 | 519 | Tbest = 999999999999999 520 | 521 | best_list, best_obj = [], [] 522 | population_list = [] 523 | makespan_record = [] 524 | 525 | # Time - Start time 526 | start_time = time.time() 527 | # Initial Population 528 | population_list = prepare_initial_population(population_size, population_list, num_gene, num_flows) 529 | print("Population List:", population_list) 530 | # Iterations start here 531 | for iteration in range(num_iteration): 532 | Tbest_now = 99999999999 533 | 534 | """ Two Point Cross-Over """ 535 | offspring_list, parent_list = two_point_crossover(population_list, population_size, crossover_rate, num_gene) 536 | 537 | for pop in range(population_size): 538 | 539 | """ Repairment """ 540 | job_count = {} 541 | larger, less = [], [] # 'larger' record jobs appear in the chromosome more than pop times, and 'less' records less than pop times. 542 | for job in range(num_flows): 543 | if job in offspring_list[pop]: 544 | count = offspring_list[pop].count(job) 545 | pos = offspring_list[pop].index(job) 546 | job_count[job] = [count, pos] # store the above two values to the job_count dictionary 547 | else: 548 | count = 0 549 | job_count[job] = [count, 0] 550 | 551 | if count > num_links: 552 | larger.append(job) 553 | elif count < num_links: 554 | less.append(job) 555 | 556 | for large in range(len(larger)): 557 | change_job = larger[large] 558 | while job_count[change_job][0] > num_links: 559 | for les in range(len(less)): 560 | if job_count[less[les]][0] < num_links: 561 | offspring_list[pop][job_count[change_job][1]] = less[les] 562 | job_count[change_job][1] = offspring_list[pop].index(change_job) 563 | job_count[change_job][0] = job_count[change_job][0] - 1 564 | job_count[less[les]][0] = job_count[less[les]][0] + 1 565 | if job_count[change_job][0] == num_links: 566 | break 567 | 568 | offspring_list = perform_mutations(offspring_list, mutation_rate, num_gene, num_mutation_jobs) 569 | fit_chromosomes_list = [] 570 | total_fitness, chrom_fitness, chrom_fit, total_chromosome, population_list_fit = checkfitness_and_calculate_makespan( 571 | parent_list, offspring_list, population_size, num_flows, num_links, process_time, flow_sequence, fit_chromosomes_list) 572 | for fit in fit_chromosomes_list: 573 | print("Fit Makespan:", fit.makespan) 574 | print("Fit chrom fit:", fit.chrom_fit) 575 | 576 | #print ("Chromosome:",total_chromosome, "Makespan:", makespan ) 577 | """ Selection (roulette wheel approach) """ 578 | #pk, qk = [], [] 579 | 580 | #for size in range(population_size * 2): 581 | # pk.append(chrom_fitness[size] / total_fitness) 582 | #for size in range(population_size * 2): 583 | # cumulative = 0 584 | 585 | # for j in range(0, size + 1): 586 | # cumulative = cumulative + pk[j] 587 | # qk.append(cumulative) 588 | 589 | #selection_rand = [np.random.rand() for i in range(population_size)] 590 | 591 | #for pop_size in range(population_size): 592 | # if selection_rand[pop_size] <= qk[0]: 593 | # population_list[pop_size] = copy.deepcopy(total_chromosome[0]) 594 | # else: 595 | # for j in range(0, population_size * 2 - 1): 596 | # if selection_rand[pop_size] > qk[j] and selection_rand[pop_size] <= qk[j + 1]: 597 | # population_list[pop_size] = copy.deepcopy(total_chromosome[j + 1]) 598 | # break 599 | #equence_now=None 600 | """ comparison """ 601 | """for pop_size in range(population_size * 2): 602 | #if (population_list_fit [pop_size]==1): 603 | #print("Compare inside -<") 604 | chom_fit_current = chrom_fit[pop_size] 605 | if chom_fit_current < Tbest_now: 606 | Tbest_now = chrom_fit[pop_size] 607 | sequence_now = copy.deepcopy(total_chromosome[pop_size]) 608 | #else: 609 | # print("Compare outside ->") 610 | if (Tbest_now <= Tbest ): 611 | Tbest = Tbest_now 612 | sequence_best = copy.deepcopy(sequence_now) 613 | """ 614 | Tbest, sequence_best = find_optimal_sequence(Tbest,fit_chromosomes_list) 615 | makespan_record.append(Tbest) 616 | end_time = time.time() 617 | time_taken = end_time-start_time 618 | 619 | """ Results - Makespan """ 620 | 621 | print("optimal sequence", sequence_best) 622 | print("optimal value:%f" % Tbest) 623 | print("\n") 624 | print('the elapsed time:%s'% (end_time - start_time)) 625 | 626 | # %matplotlib inline 627 | plt.plot([i for i in range(len(makespan_record))], makespan_record, 'b') 628 | plt.ylabel('makespan', fontsize=15) 629 | plt.xlabel('generation', fontsize=15) 630 | plt.show() 631 | 632 | print(makespan_record) 633 | 634 | """ plot gantt chart """ 635 | final_data, df = plot_new_gantt_chart(num_links, num_flows, sequence_best, process_time, flow_sequence) 636 | #plot_gantt_chart(num_links, num_flows, sequence_best, process_time, flow_sequence) 637 | 638 | # iplot(fig, filename='GA_job_shop_scheduling') 639 | print(final_data) 640 | return final_data, df 641 | 642 | 643 | """ Job_Shop_Schedule """ 644 | 645 | # data = data_excel_json('data/JSP_dataset.xlsx') 646 | data = data_from_json() 647 | print("Data", data) 648 | schedule = traffic_schedule(data_dict=data) 649 | print("Schedule", schedule[0]) 650 | 651 | # import chart_studio.plotly as py 652 | # import plotly.figure_factory as ff 653 | 654 | # df = schedule[1] 655 | # fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, title='Job shop Schedule') 656 | # fig.show() 657 | --------------------------------------------------------------------------------