├── OSPF_delay └── Proac │ ├── Metrics │ └── Folder_for_storing_metrics_during_experiment │ ├── bw.txt │ ├── bw_r.txt │ ├── clear.sh │ ├── ospf_proac.py │ ├── ospf_proac.pyc │ ├── paths_delay.json │ ├── paths_weight.json │ ├── setting.py │ ├── setting.pyc │ ├── simple_awareness.py │ ├── simple_awareness.pyc │ ├── simple_delay.py │ ├── simple_delay.pyc │ ├── simple_monitor.py │ ├── simple_monitor.pyc │ └── stretch │ └── Folder_for_storing_paths_stretch_during_experiment ├── OSPF_loss └── Proac │ ├── Metrics │ └── Folder_for_storing_metrics_during_experiment │ ├── bw.txt │ ├── bw_r.txt │ ├── clear.sh │ ├── ospf_proac.py │ ├── paths_loss.json │ ├── paths_weight.json │ ├── setting.py │ ├── simple_awareness.py │ ├── simple_delay.py │ ├── simple_monitor.py │ ├── stretch │ └── Folder_for_storing_paths_stretch_during_experiment │ └── times.txt ├── README.md └── SDNapps_proac ├── Metrics └── Folder_for_storing_metrics_during_experiment ├── RL_threading.py ├── RoutingGeant ├── Capacidades.csv ├── Q_routing.py ├── Q_routing.pyc ├── bw_r.csv ├── dict.csv ├── get_R_Q.py ├── get_R_Q.pyc ├── get_all_routes.py ├── get_all_routes.pyc ├── get_dict.py ├── get_dict.pyc ├── get_group.py ├── get_result.py ├── get_result.pyc ├── main.py ├── main.pyc ├── neighbors.csv ├── net_info.csv ├── paths.json ├── paths_weight.json └── stretch │ └── Folder_for_storing_paths_stretch_during_experiment ├── bw_r.txt ├── clear.sh ├── net_info.csv ├── paths.json ├── setting.py ├── simple_awareness.py ├── simple_delay.py ├── simple_monitor.py └── start_net_info.py /OSPF_delay/Proac/Metrics/Folder_for_storing_metrics_during_experiment: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_delay/Proac/Metrics/Folder_for_storing_metrics_during_experiment -------------------------------------------------------------------------------- /OSPF_delay/Proac/bw.txt: -------------------------------------------------------------------------------- 1 | 10 3 100.0 2 | 22 20 25.0 3 | 15 20 25.0 4 | 3 14 1.55 5 | 7 19 25.0 6 | 10 11 25.0 7 | 5 8 25.0 8 | 7 17 100.0 9 | 3 11 25.0 10 | 13 19 25.0 11 | 8 9 25.0 12 | 3 21 100.0 13 | 14 13 1.55 14 | 2 13 100.0 15 | 6 19 1.55 16 | 12 22 100.0 17 | 16 10 100.0 18 | 1 7 100.0 19 | 3 1 100.0 20 | 2 4 100.0 21 | 17 13 100.0 22 | 1 16 100.0 23 | 9 15 25.0 24 | 20 17 100.0 25 | 2 18 25.0 26 | 17 10 100.0 27 | 6 7 1.55 28 | 16 5 25.0 29 | 4 16 100.0 30 | 2 23 25.0 31 | 7 21 100.0 32 | 12 10 100.0 33 | 7 2 100.0 34 | 17 23 25.0 35 | 21 18 25.0 36 | 16 9 100.0 37 | 2 12 100.0 38 | 3 10 100.0 39 | 20 22 25.0 40 | 20 15 25.0 41 | 14 3 1.55 42 | 19 7 25.0 43 | 11 10 25.0 44 | 8 5 25.0 45 | 17 7 100.0 46 | 11 3 25.0 47 | 19 13 25.0 48 | 9 8 25.0 49 | 21 3 100.0 50 | 13 14 1.55 51 | 13 2 100.0 52 | 19 6 1.55 53 | 22 12 100.0 54 | 10 16 100.0 55 | 7 1 100.0 56 | 1 3 100.0 57 | 4 2 100.0 58 | 13 17 100.0 59 | 16 1 100.0 60 | 15 9 25.0 61 | 17 20 100.0 62 | 18 2 25.0 63 | 10 17 100.0 64 | 7 6 1.55 65 | 5 16 25.0 66 | 16 4 100.0 67 | 23 2 25.0 68 | 21 7 100.0 69 | 10 12 100.0 70 | 2 7 100.0 71 | 23 17 25.0 72 | 18 21 25.0 73 | 9 16 100.0 74 | 12 2 100.0 75 | -------------------------------------------------------------------------------- /OSPF_delay/Proac/bw_r.txt: -------------------------------------------------------------------------------- 1 | 10,3,78,100 2 | 22,20,28,25 3 | 15,20,68,25 4 | 3,14,74,1.55 5 | 7,19,13,25 6 | 10,11,31,25 7 | 5,8,90,25 8 | 7,17,38,100 9 | 3,11,82,25 10 | 13,19,69,25 11 | 8,9,15,25 12 | 3,21,57,100 13 | 14,13,1,1.55 14 | 2,13,82,100 15 | 6,19,34,1.55 16 | 12,22,43,100 17 | 16,10,66,100 18 | 1,7,32,100 19 | 3,1,17,100 20 | 2,4,26,100 21 | 17,13,46,100 22 | 1,16,57,100 23 | 9,15,83,25 24 | 20,17,26,100 25 | 2,18,44,25 26 | 17,10,90,100 27 | 6,7,8,1.55 28 | 16,5,31,25 29 | 4,16,3,100 30 | 2,23,25,25 31 | 7,21,8,100 32 | 12,10,30,100 33 | 7,2,45,100 34 | 17,23,52,25 35 | 21,18,78,25 36 | 16,9,90,100 37 | 2,12,58,100 38 | 3,10,78,100 39 | 20,22,28,25 40 | 20,15,68,25 41 | 14,3,74,1.55 42 | 19,7,13,25 43 | 11,10,31,25 44 | 8,5,90,25 45 | 17,7,38,100 46 | 11,3,82,25 47 | 19,13,69,25 48 | 9,8,15,25 49 | 21,3,57,100 50 | 13,14,1,1.55 51 | 13,2,82,100 52 | 19,6,34,1.55 53 | 22,12,43,100 54 | 10,16,66,100 55 | 7,1,32,100 56 | 1,3,17,100 57 | 4,2,26,100 58 | 13,17,46,100 59 | 16,1,57,100 60 | 15,9,83,25 61 | 17,20,26,100 62 | 18,2,44,25 63 | 10,17,90,100 64 | 7,6,8,1.55 65 | 5,16,31,25 66 | 16,4,3,100 67 | 23,2,25,25 68 | 21,7,8,100 69 | 10,12,30,100 70 | 2,7,45,100 71 | 23,17,52,25 72 | 18,21,78,25 73 | 9,16,90,100 74 | 12,2,58,100 75 | -------------------------------------------------------------------------------- /OSPF_delay/Proac/clear.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo rm times.txt 3 | cd ~/ryu/ryu/app/OSPF_delay/Proac/Metrics/ 4 | sudo rm *metrics* 5 | cd ~/ryu/ryu/app/OSPF_delay/Proac/stretch/ 6 | sudo rm *stretch* 7 | 8 | 9 | -------------------------------------------------------------------------------- /OSPF_delay/Proac/ospf_proac.py: -------------------------------------------------------------------------------- 1 | from operator import attrgetter 2 | 3 | from ryu.base import app_manager 4 | from ryu.controller import ofp_event 5 | from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER 6 | from ryu.controller.handler import CONFIG_DISPATCHER 7 | from ryu.controller.handler import set_ev_cls 8 | from ryu.topology import event, switches 9 | from ryu.ofproto.ether import ETH_TYPE_IP 10 | from ryu.topology.api import get_switch, get_link 11 | from ryu.ofproto import ofproto_v1_3 12 | from ryu.lib import hub 13 | from ryu.lib.packet import packet 14 | from ryu.lib.packet import arp 15 | 16 | import time 17 | 18 | import simple_awareness 19 | import simple_delay 20 | import simple_monitor 21 | # import requests 22 | import json, ast 23 | import setting 24 | import csv 25 | import time 26 | 27 | class baseline_Dijsktra(app_manager.RyuApp): 28 | ''' 29 | A Ryu app that route traffic based on Dijkstra algorithm when it takes 30 | link delay as link delay. 31 | ''' 32 | 33 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 34 | _CONTEXTS = {"simple_awareness": simple_awareness.simple_Awareness, 35 | "simple_delay": simple_delay.simple_Delay, 36 | "simple_monitor": simple_monitor.simple_Monitor} 37 | 38 | def __init__(self, *args, **kwargs): 39 | super(baseline_Dijsktra, self).__init__(*args, **kwargs) 40 | self.awareness = kwargs["simple_awareness"] 41 | self.delay = kwargs["simple_delay"] 42 | self.monitor = kwargs["simple_monitor"] 43 | self.datapaths = {} 44 | self.paths = {} 45 | self.monitor_thread = hub.spawn(self.installation_module) 46 | 47 | @set_ev_cls(ofp_event.EventOFPStateChange, 48 | [MAIN_DISPATCHER, DEAD_DISPATCHER]) 49 | def state_change_handler(self, ev): 50 | """ 51 | Record datapath information. 52 | """ 53 | datapath = ev.datapath 54 | if ev.state == MAIN_DISPATCHER: 55 | if datapath.id not in self.datapaths: 56 | self.logger.debug('Datapath registered: %016x', datapath.id) 57 | print 'Datapath registered:', datapath.id ## 58 | self.datapaths[datapath.id] = datapath 59 | elif ev.state == DEAD_DISPATCHER: 60 | if datapath.id in self.datapaths: 61 | self.logger.debug('Datapath unregistered: %016x', datapath.id) 62 | print 'Datapath unregistered:', datapath.id 63 | print "FUCK" 64 | del self.datapaths[datapath.id] 65 | 66 | def installation_module(self): 67 | """ 68 | Main entry method of monitoring traffic. 69 | """ 70 | while True: 71 | if self.awareness.link_to_port: 72 | self.paths = None 73 | self.flow_install_monitor() 74 | hub.sleep(setting.MONITOR_PERIOD) 75 | 76 | def flow_install_monitor(self): 77 | print("[Flow Installation Ok]") 78 | out_time = time.time() 79 | for dp in self.datapaths.values(): 80 | for dp2 in self.datapaths.values(): 81 | if dp.id != dp2.id: 82 | ip_src = '10.0.0.'+str(dp.id) 83 | ip_dst = '10.0.0.'+str(dp2.id) 84 | self.forwarding(dp.id, ip_src, ip_dst, dp.id, dp2.id) 85 | time.sleep(0.0005) 86 | end_out_time = time.time() 87 | out_total_ = end_out_time - out_time 88 | return 89 | 90 | def forwarding(self, dpid, ip_src, ip_dst, src_sw, dst_sw): 91 | """ 92 | Get paths and install them into datapaths. 93 | """ 94 | path = self.get_path(str(src_sw), str(dst_sw)) #changed to str cuz the json convertion 95 | flow_info = (ip_src, ip_dst) 96 | self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info) 97 | 98 | 99 | def install_flow(self, datapaths, link_to_port, path, 100 | flow_info, data=None): 101 | init_time_install = time.time() 102 | ''' 103 | Install flow entires. 104 | path=[dpid1, dpid2...] 105 | flow_info=(src_ip, dst_ip) 106 | ''' 107 | if path is None or len(path) == 0: 108 | self.logger.info("Path error!") 109 | return 110 | 111 | in_port = 1 112 | first_dp = datapaths[path[0]] 113 | 114 | out_port = first_dp.ofproto.OFPP_LOCAL 115 | back_info = (flow_info[1], flow_info[0]) 116 | 117 | # Flow installing por middle datapaths in path 118 | if len(path) > 2: 119 | for i in range(1, len(path)-1): 120 | port = self.get_port_pair_from_link(link_to_port, 121 | path[i-1], path[i]) 122 | port_next = self.get_port_pair_from_link(link_to_port, 123 | path[i], path[i+1]) 124 | if port and port_next: 125 | src_port, dst_port = port[1], port_next[0] 126 | datapath = datapaths[path[i]] 127 | self.send_flow_mod(datapath, flow_info, src_port, dst_port) 128 | self.send_flow_mod(datapath, back_info, dst_port, src_port) 129 | if len(path) > 1: 130 | # The last flow entry 131 | port_pair = self.get_port_pair_from_link(link_to_port, 132 | path[-2], path[-1]) 133 | if port_pair is None: 134 | self.logger.info("Port is not found") 135 | return 136 | src_port = port_pair[1] 137 | dst_port = 1 #I know that is the host port -- 138 | last_dp = datapaths[path[-1]] 139 | self.send_flow_mod(last_dp, flow_info, src_port, dst_port) 140 | self.send_flow_mod(last_dp, back_info, dst_port, src_port) 141 | 142 | # The first flow entry 143 | port_pair = self.get_port_pair_from_link(link_to_port, path[0], path[1]) 144 | if port_pair is None: 145 | self.logger.info("Port not found in first hop.") 146 | return 147 | out_port = port_pair[0] 148 | self.send_flow_mod(first_dp, flow_info, in_port, out_port) 149 | self.send_flow_mod(first_dp, back_info, out_port, in_port) 150 | 151 | # src and dst on the same datapath 152 | else: 153 | out_port = 1 154 | self.send_flow_mod(first_dp, flow_info, in_port, out_port) 155 | self.send_flow_mod(first_dp, back_info, out_port, in_port) 156 | 157 | end_time_install = time.time() 158 | total_install = end_time_install - init_time_install 159 | #print("Time install", total_install) 160 | 161 | def send_flow_mod(self, datapath, flow_info, src_port, dst_port): 162 | """ 163 | Build flow entry, and send it to datapath. 164 | """ 165 | ofproto = datapath.ofproto 166 | parser = datapath.ofproto_parser 167 | actions = [] 168 | actions.append(parser.OFPActionOutput(dst_port)) 169 | 170 | match = parser.OFPMatch( 171 | eth_type=ETH_TYPE_IP, ipv4_src=flow_info[0], 172 | ipv4_dst=flow_info[1]) 173 | 174 | self.add_flow(datapath, 1, match, actions, 175 | idle_timeout=250, hard_timeout=0) 176 | 177 | 178 | def add_flow(self, dp, priority, match, actions, idle_timeout=0, hard_timeout=0): 179 | """ 180 | Send a flow entry to datapath. 181 | """ 182 | ofproto = dp.ofproto 183 | parser = dp.ofproto_parser 184 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)] 185 | mod = parser.OFPFlowMod(datapath=dp, command=dp.ofproto.OFPFC_ADD, priority=priority, 186 | idle_timeout=idle_timeout, 187 | hard_timeout=hard_timeout, 188 | match=match, instructions=inst) 189 | dp.send_msg(mod) 190 | 191 | def build_packet_out(self, datapath, buffer_id, src_port, dst_port, data): 192 | """ 193 | Build packet out object. 194 | """ 195 | actions = [] 196 | if dst_port: 197 | actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port)) 198 | 199 | msg_data = None 200 | if buffer_id == datapath.ofproto.OFP_NO_BUFFER: 201 | if data is None: 202 | return None 203 | msg_data = data 204 | 205 | out = datapath.ofproto_parser.OFPPacketOut( 206 | datapath=datapath, buffer_id=buffer_id, 207 | data=msg_data, in_port=src_port, actions=actions) 208 | return out 209 | 210 | def arp_forwarding(self, msg, src_ip, dst_ip): 211 | """ 212 | Send ARP packet to the destination host if the dst host record 213 | is existed. 214 | result = (datapath, port) of host 215 | """ 216 | datapath = msg.datapath 217 | ofproto = datapath.ofproto 218 | 219 | result = self.awareness.get_host_location(dst_ip) 220 | if result: 221 | # Host has been recorded in access table. 222 | datapath_dst, out_port = result[0], result[1] 223 | datapath = self.datapaths[datapath_dst] 224 | out = self.build_packet_out(datapath, ofproto.OFP_NO_BUFFER, 225 | ofproto.OFPP_CONTROLLER, 226 | out_port, msg.data) 227 | datapath.send_msg(out) 228 | self.logger.debug("Deliver ARP packet to knew host") 229 | else: 230 | pass 231 | 232 | def get_path(self, src, dst): 233 | if self.paths != None: 234 | path = self.paths.get(src).get(dst) 235 | return path 236 | else: 237 | paths = self.get_dijkstra_paths_() 238 | path = paths.get(src).get(dst) 239 | return path 240 | 241 | def get_dijkstra_paths_(self): 242 | 243 | file = '/home/controlador/ryu/ryu/app/OSPF_delay/Proac/paths_delay.json' 244 | try: 245 | with open(file,'r') as json_file: 246 | paths_dict = json.load(json_file) 247 | paths_dict = ast.literal_eval(json.dumps(paths_dict)) 248 | self.paths = paths_dict 249 | return self.paths 250 | except ValueError as e: #error excpetion when trying to read the json and is still been updated 251 | return 252 | else: 253 | with open(file,'r') as json_file: #try again 254 | paths_dict = json.load(json_file) 255 | paths_dict = ast.literal_eval(json.dumps(paths_dict)) 256 | self.paths = paths_dict 257 | return self.paths 258 | 259 | def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid): 260 | """ 261 | Get port pair of link, so that controller can install flow entry. 262 | link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),} 263 | """ 264 | if (src_dpid, dst_dpid) in link_to_port: 265 | return link_to_port[(src_dpid, dst_dpid)] 266 | else: 267 | self.logger.info("Link from dpid:%s to dpid:%s is not in links" % 268 | (src_dpid, dst_dpid)) 269 | return None 270 | 271 | 272 | @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) 273 | def port_status_handler(self, ev): 274 | """ 275 | Handle the port status changed event. 276 | """ 277 | msg = ev.msg 278 | ofproto = msg.datapath.ofproto 279 | reason = msg.reason 280 | dpid = msg.datapath.id 281 | port_no = msg.desc.port_no 282 | 283 | reason_dict = {ofproto.OFPPR_ADD: "added", 284 | ofproto.OFPPR_DELETE: "deleted", 285 | ofproto.OFPPR_MODIFY: "modified", } 286 | 287 | if reason in reason_dict: 288 | print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no) 289 | else: 290 | print "switch%d: Illegal port state %s %s" % (dpid, port_no, reason) 291 | 292 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 293 | def packet_in_handler(self, ev): 294 | ''' 295 | In packet_in handler, we need to learn access_table by ARP and IP packets. 296 | Therefore, the first packet from UNKOWN host MUST be ARP 297 | ''' 298 | msg = ev.msg 299 | pkt = packet.Packet(msg.data) 300 | arp_pkt = pkt.get_protocol(arp.arp) 301 | if isinstance(arp_pkt, arp.arp): 302 | self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip) 303 | -------------------------------------------------------------------------------- /OSPF_delay/Proac/ospf_proac.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_delay/Proac/ospf_proac.pyc -------------------------------------------------------------------------------- /OSPF_delay/Proac/setting.py: -------------------------------------------------------------------------------- 1 | DISCOVERY_PERIOD = 5 # For discovering topology. 2 | 3 | MONITOR_PERIOD = 10 # For monitoring traffic 4 | 5 | DELAY_DETECTING_PERIOD = 7 6 | 7 | TOSHOW = True # For showing information in terminal 8 | -------------------------------------------------------------------------------- /OSPF_delay/Proac/setting.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_delay/Proac/setting.pyc -------------------------------------------------------------------------------- /OSPF_delay/Proac/simple_awareness.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import matplotlib.pyplot as plt 3 | import time 4 | 5 | from ryu import cfg 6 | from ryu.base import app_manager 7 | from ryu.controller import ofp_event 8 | from ryu.base.app_manager import lookup_service_brick 9 | from ryu.controller.handler import MAIN_DISPATCHER 10 | from ryu.controller.handler import CONFIG_DISPATCHER 11 | from ryu.controller.handler import DEAD_DISPATCHER 12 | from ryu.controller.handler import set_ev_cls 13 | from ryu.ofproto import ofproto_v1_3 14 | from ryu.lib.packet import packet 15 | from ryu.lib.packet import ethernet 16 | from ryu.lib.packet import ipv4 17 | from ryu.lib.packet import arp 18 | from ryu.lib import hub 19 | from ryu.topology import event 20 | from ryu.topology.api import get_switch, get_link 21 | 22 | import setting 23 | 24 | CONF = cfg.CONF 25 | 26 | 27 | class simple_Awareness(app_manager.RyuApp): 28 | """ 29 | A Ryu app for discovering topology information. 30 | Provides many data services for other Apps, such as 31 | link_to_port, access_table, switch_port_table, access_ports, 32 | interior_ports, and topology graph. 33 | This represent the Topology discovery module of the Control Plane 34 | """ 35 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 36 | 37 | # List the event list should be listened. 38 | events = [event.EventSwitchEnter, 39 | event.EventSwitchLeave, event.EventPortAdd, 40 | event.EventPortDelete, event.EventPortModify, 41 | event.EventLinkAdd, event.EventLinkDelete] 42 | 43 | def __init__(self, *args, **kwargs): 44 | super(simple_Awareness, self).__init__(*args, **kwargs) 45 | self.topology_api_app = self 46 | self.name = "awareness" 47 | self.link_to_port = {} # {(src_dpid,dst_dpid):(src_port,dst_port),} 48 | self.access_table = {} # {(sw,port):(ip, mac),} 49 | self.switch_port_table = {} # {dpid:set(port_num,),} 50 | self.access_ports = {} # {dpid:set(port_num,),} 51 | self.interior_ports = {} # {dpid:set(port_num,),} 52 | self.switches = [] # self.switches = [dpid,] 53 | self.shortest_paths = {} # {dpid:{dpid:[[path],],},} 54 | self.pre_link_to_port = {} 55 | self.pre_access_table = {} 56 | 57 | self.graph = nx.DiGraph() 58 | # Get initiation delay. 59 | self.initiation_delay = self.get_initiation_delay(4) 60 | self.start_time = time.time() 61 | 62 | # Start a green thread to discover network resource. 63 | self.discover_thread = hub.spawn(self._discover) 64 | 65 | 66 | def _discover(self): 67 | i = 0 68 | while True: 69 | 70 | self.show_topology() 71 | if i == 1: 72 | self.get_topology(None) 73 | i = 0 74 | hub.sleep(setting.DISCOVERY_PERIOD) 75 | i = i + 1 76 | 77 | def add_flow(self, dp, priority, match, actions, idle_timeout=0, hard_timeout=0): 78 | ofproto = dp.ofproto 79 | parser = dp.ofproto_parser 80 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, 81 | actions)] 82 | mod = parser.OFPFlowMod(datapath=dp, priority=priority, 83 | idle_timeout=idle_timeout, 84 | hard_timeout=hard_timeout, 85 | match=match, instructions=inst) 86 | dp.send_msg(mod) 87 | 88 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) 89 | def switch_features_handler(self, ev): 90 | """ 91 | Install table-miss flow entry to datapaths. 92 | """ 93 | 94 | datapath = ev.msg.datapath 95 | ofproto = datapath.ofproto 96 | parser = datapath.ofproto_parser 97 | self.logger.info("switch:%s connected", datapath.id) 98 | 99 | 100 | # Install table-miss flow entry. 101 | match = parser.OFPMatch() 102 | actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, 103 | ofproto.OFPCML_NO_BUFFER)] 104 | self.add_flow(datapath, 0, match, actions) 105 | 106 | 107 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 108 | def _packet_in_handler(self, ev): 109 | """ 110 | Handle the packet_in packet, and register the access info. 111 | """ 112 | msg = ev.msg 113 | datapath = msg.datapath 114 | in_port = msg.match['in_port'] 115 | pkt = packet.Packet(msg.data) 116 | eth_type = pkt.get_protocols(ethernet.ethernet)[0].ethertype #delay 117 | arp_pkt = pkt.get_protocol(arp.arp) 118 | ip_pkt = pkt.get_protocol(ipv4.ipv4) 119 | 120 | if arp_pkt: 121 | arp_src_ip = arp_pkt.src_ip 122 | arp_dst_ip = arp_pkt.dst_ip #delay 123 | mac = arp_pkt.src_mac 124 | # Record the access infomation. 125 | self.register_access_info(datapath.id, in_port, arp_src_ip, mac) 126 | 127 | elif ip_pkt: 128 | ip_src_ip = ip_pkt.src 129 | eth = pkt.get_protocols(ethernet.ethernet)[0] 130 | mac = eth.src 131 | # Record the access infomation. 132 | self.register_access_info(datapath.id, in_port, ip_src_ip, mac) 133 | else: 134 | pass 135 | 136 | @set_ev_cls(events) 137 | def get_topology(self, ev): 138 | """ 139 | Get topology info and calculate shortest paths. 140 | Note: In looped network, we should get the topology 141 | 20 or 30 seconds after the network went up. 142 | """ 143 | present_time = time.time() 144 | if present_time - self.start_time < self.initiation_delay: #Set to 30s 145 | return 146 | 147 | self.logger.info("[Topology Discovery Ok]") 148 | switch_list = get_switch(self.topology_api_app, None) 149 | self.create_port_map(switch_list) 150 | time.sleep(0.5) 151 | self.switches = [sw.dp.id for sw in switch_list] 152 | links = get_link(self.topology_api_app, None) 153 | self.create_interior_links(links) 154 | self.create_access_ports() 155 | self.graph = self.get_graph(self.link_to_port.keys()) 156 | 157 | def get_host_location(self, host_ip): 158 | """ 159 | Get host location info ((datapath, port)) according to the host ip. 160 | self.access_table = {(sw,port):(ip, mac),} 161 | """ 162 | 163 | for key in self.access_table.keys(): 164 | if self.access_table[key][0] == host_ip: 165 | return key 166 | self.logger.info("%s location is not found." % host_ip) 167 | return None 168 | 169 | def get_graph(self, link_list): 170 | """ 171 | Get Adjacency matrix from link_to_port. 172 | """ 173 | _graph = self.graph.copy() 174 | for src in self.switches: 175 | for dst in self.switches: 176 | if src == dst: 177 | _graph.add_edge(src, dst, weight=0) 178 | elif (src, dst) in link_list: 179 | _graph.add_edge(src, dst, weight=1) 180 | else: 181 | pass 182 | return _graph 183 | 184 | def get_initiation_delay(self, fanout): 185 | """ 186 | Get initiation delay. 187 | """ 188 | if fanout == 4: 189 | delay = 10 190 | elif fanout == 8: 191 | delay = 20 192 | else: 193 | delay = 20 194 | return delay 195 | 196 | def create_port_map(self, switch_list): 197 | """ 198 | Create interior_port table and access_port table. 199 | """ 200 | for sw in switch_list: 201 | dpid = sw.dp.id 202 | self.switch_port_table.setdefault(dpid, set()) 203 | # switch_port_table is equal to interior_ports plus access_ports. 204 | self.interior_ports.setdefault(dpid, set()) 205 | self.access_ports.setdefault(dpid, set()) 206 | for port in sw.ports: 207 | # switch_port_table = {dpid:set(port_num,),} 208 | self.switch_port_table[dpid].add(port.port_no) 209 | 210 | def create_interior_links(self, link_list): 211 | """ 212 | Get links' srouce port to dst port from link_list. 213 | link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),} 214 | """ 215 | for link in link_list: 216 | src = link.src 217 | dst = link.dst 218 | self.link_to_port[(src.dpid, dst.dpid)] = (src.port_no, dst.port_no) 219 | # Find the access ports and interior ports. 220 | if link.src.dpid in self.switches: 221 | self.interior_ports[link.src.dpid].add(link.src.port_no) 222 | if link.dst.dpid in self.switches: 223 | self.interior_ports[link.dst.dpid].add(link.dst.port_no) 224 | 225 | def create_access_ports(self): 226 | """ 227 | Get ports without link into access_ports. 228 | """ 229 | for sw in self.switch_port_table: 230 | all_port_table = self.switch_port_table[sw] 231 | interior_port = self.interior_ports[sw] 232 | # That comes the access port of the switch. 233 | self.access_ports[sw] = all_port_table - interior_port 234 | 235 | def register_access_info(self, dpid, in_port, ip, mac): 236 | """ 237 | Register access host info into access table. 238 | """ 239 | if in_port in self.access_ports[dpid]: 240 | if (dpid, in_port) in self.access_table: 241 | if self.access_table[(dpid, in_port)] == (ip, mac): 242 | return 243 | else: 244 | self.access_table[(dpid, in_port)] = (ip, mac) 245 | return 246 | else: 247 | self.access_table.setdefault((dpid, in_port), None) 248 | self.access_table[(dpid, in_port)] = (ip, mac) 249 | return 250 | 251 | def show_topology(self): 252 | if self.pre_link_to_port != self.link_to_port:# and setting.TOSHOW: 253 | # It means the link_to_port table has changed. 254 | _graph = self.graph.copy() 255 | print "\n---------------------Link Port---------------------" 256 | print '%6s' % ('switch'), 257 | for node in sorted([node for node in _graph.nodes()], key=lambda node: node): 258 | print '%6d' % node, 259 | print 260 | for node1 in sorted([node for node in _graph.nodes()], key=lambda node: node): 261 | print '%6d' % node1, 262 | for node2 in sorted([node for node in _graph.nodes()], key=lambda node: node): 263 | if (node1, node2) in self.link_to_port.keys(): 264 | print '%6s' % str(self.link_to_port[(node1, node2)]), 265 | else: 266 | print '%6s' % '/', 267 | print 268 | print 269 | self.pre_link_to_port = self.link_to_port.copy() 270 | 271 | if self.pre_access_table != self.access_table:# and setting.TOSHOW: 272 | # It means the access_table has changed. 273 | print "\n----------------Access Host-------------------" 274 | print '%10s' % 'switch', '%10s' % 'port', '%22s' % 'Host' 275 | if not self.access_table.keys(): 276 | print " NO found host" 277 | else: 278 | for sw in sorted(self.access_table.keys()): 279 | print '%10d' % sw[0], '%10d ' % sw[1], self.access_table[sw] 280 | print 281 | self.pre_access_table = self.access_table.copy() 282 | 283 | # nx.draw(self.graph) 284 | # plt.show() 285 | # plt.savefig("/home/controlador/ryu/ryu/app/SDNapps_proac/%d.png" % int(time.time())) -------------------------------------------------------------------------------- /OSPF_delay/Proac/simple_awareness.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_delay/Proac/simple_awareness.pyc -------------------------------------------------------------------------------- /OSPF_delay/Proac/simple_delay.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from ryu import cfg 3 | from ryu.base import app_manager 4 | from ryu.base.app_manager import lookup_service_brick 5 | from ryu.controller import ofp_event 6 | from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER 7 | from ryu.controller.handler import set_ev_cls 8 | from ryu.ofproto import ofproto_v1_3 9 | from ryu.lib import hub 10 | from ryu.topology.switches import Switches 11 | from ryu.topology.switches import LLDPPacket 12 | from ryu.app import simple_switch_13 13 | import networkx as nx 14 | import time 15 | import json,ast 16 | import csv 17 | import setting 18 | 19 | import simple_awareness 20 | 21 | CONF = cfg.CONF 22 | 23 | 24 | class simple_Delay(app_manager.RyuApp): 25 | """ 26 | A Ryu app for calculating link delay by using echo replay 27 | messages from the Control Plane to the datapaths in the Data Plane. 28 | It is part of the Statistics module of the Control Plane 29 | 30 | """ 31 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 32 | 33 | def __init__(self, *args, **kwargs): 34 | super(simple_Delay, self).__init__(*args, **kwargs) 35 | self.name = "delay" 36 | self.sending_echo_request_interval = 0.1 37 | self.sw_module = lookup_service_brick('switches') 38 | self.awareness = lookup_service_brick('awareness') 39 | self.count = 0 40 | self.datapaths = {} 41 | self.echo_latency = {} 42 | self.link_delay = {} 43 | self.delay_dict = {} 44 | self.measure_thread = hub.spawn(self._detector) 45 | 46 | @set_ev_cls(ofp_event.EventOFPStateChange, 47 | [MAIN_DISPATCHER, DEAD_DISPATCHER]) 48 | def _state_change_handler(self, ev): 49 | datapath = ev.datapath 50 | if ev.state == MAIN_DISPATCHER: 51 | if not datapath.id in self.datapaths: 52 | self.logger.debug('Register datapath: %016x', datapath.id) 53 | self.datapaths[datapath.id] = datapath 54 | elif ev.state == DEAD_DISPATCHER: 55 | if datapath.id in self.datapaths: 56 | self.logger.debug('Unregister datapath: %016x', datapath.id) 57 | del self.datapaths[datapath.id] 58 | 59 | def _detector(self): 60 | """ 61 | Delay detecting functon. 62 | Send echo request and calculate link delay periodically 63 | """ 64 | while True: 65 | self.count += 1 66 | self._send_echo_request() 67 | self.create_link_delay() 68 | try: 69 | self.awareness.shortest_paths = {} 70 | self.logger.debug("Refresh the shortest_paths") 71 | except: 72 | self.awareness = lookup_service_brick('awareness') 73 | if self.awareness is not None: 74 | self.show_delay_statis() 75 | hub.sleep(setting.DELAY_DETECTING_PERIOD) 76 | 77 | def _send_echo_request(self): 78 | """ 79 | Seng echo request msg to datapath. 80 | """ 81 | for datapath in self.datapaths.values(): 82 | parser = datapath.ofproto_parser 83 | echo_req = parser.OFPEchoRequest(datapath, 84 | data="%.12f" % time.time()) 85 | datapath.send_msg(echo_req) 86 | # Important! Don't send echo request together, it will 87 | # generate a lot of echo reply almost in the same time. 88 | # which will generate a lot of delay of waiting in queue 89 | # when processing echo reply in echo_reply_handler. 90 | 91 | hub.sleep(self.sending_echo_request_interval) 92 | 93 | @set_ev_cls(ofp_event.EventOFPEchoReply, MAIN_DISPATCHER) 94 | def echo_reply_handler(self, ev): 95 | """ 96 | Handle the echo reply msg, and get the latency of link. 97 | """ 98 | now_timestamp = time.time() 99 | try: 100 | latency = now_timestamp - eval(ev.msg.data) 101 | self.echo_latency[ev.msg.datapath.id] = latency 102 | except: 103 | return 104 | 105 | def get_delay(self, src, dst): 106 | """ 107 | Get link delay. 108 | Controller 109 | | | 110 | src echo latency| |dst echo latency 111 | | | 112 | SwitchA-------SwitchB 113 | 114 | fwd_delay---> 115 | <----reply_delay 116 | delay = (forward delay + reply delay - src datapath's echo latency 117 | """ 118 | try: 119 | fwd_delay = self.awareness.graph[src][dst]['lldpdelay'] 120 | re_delay = self.awareness.graph[dst][src]['lldpdelay'] 121 | src_latency = self.echo_latency[src] 122 | dst_latency = self.echo_latency[dst] 123 | delay = (fwd_delay + re_delay - src_latency - dst_latency)/2 124 | return max(delay, 0) 125 | except: 126 | return float('inf') 127 | 128 | def _save_lldp_delay(self, src=0, dst=0, lldpdelay=0): 129 | try: 130 | self.awareness.graph[src][dst]['lldpdelay'] = lldpdelay 131 | except: 132 | if self.awareness is None: 133 | self.awareness = lookup_service_brick('awareness') 134 | return 135 | 136 | def create_link_delay(self): 137 | """ 138 | Create link delay data, and save it into graph object. 139 | """ 140 | try: 141 | for src in self.awareness.graph: 142 | for dst in self.awareness.graph[src]: 143 | if src == dst: 144 | self.awareness.graph[src][dst]['delay'] = 0 145 | continue 146 | delay = self.get_delay(src, dst) 147 | self.awareness.graph[src][dst]['delay'] = delay 148 | if self.awareness is not None: 149 | for dp in self.awareness.graph: 150 | self.delay_dict.setdefault(dp, {}) 151 | self.get_link_delay() 152 | except: 153 | if self.awareness is None: 154 | self.awareness = lookup_service_brick('awareness') 155 | return 156 | 157 | 158 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 159 | def packet_in_handler(self, ev): 160 | """ 161 | Explore LLDP packet and get the delay of link (fwd and reply). 162 | """ 163 | msg = ev.msg 164 | try: 165 | src_dpid, src_port_no = LLDPPacket.lldp_parse(msg.data) 166 | dpid = msg.datapath.id 167 | if self.sw_module is None: 168 | self.sw_module = lookup_service_brick('switches') 169 | 170 | for port in self.sw_module.ports.keys(): 171 | if src_dpid == port.dpid and src_port_no == port.port_no: 172 | delay = self.sw_module.ports[port].delay 173 | self._save_lldp_delay(src=src_dpid, dst=dpid, 174 | lldpdelay=delay) 175 | except LLDPPacket.LLDPUnknownFormat as e: 176 | return 177 | 178 | def get_link_delay(self): 179 | ''' 180 | Calculates total link dealy and save it in self.link_delay[(node1,node2)]: link_delay 181 | ''' 182 | for src in self.awareness.graph: 183 | for dst in self.awareness.graph[src]: 184 | if src != dst: 185 | delay1 = self.awareness.graph[src][dst]['delay'] 186 | delay2 = self.awareness.graph[dst][src]['delay'] 187 | link_delay = ((delay1 + delay2)*1000.0)/2 #saves in ms 188 | link = (src, dst) 189 | self.link_delay[link] = link_delay 190 | self.delay_dict[src][dst] = link_delay 191 | # print(self.link_delay) 192 | if self.awareness.link_to_port: 193 | self.write_dijkstra_paths() 194 | # self.calc_stretch() 195 | 196 | def write_dijkstra_paths(self): 197 | time_init = time.time() 198 | paths = {} 199 | for dp in self.awareness.switches: 200 | paths.setdefault(dp,{}) 201 | for src in self.awareness.switches: 202 | for dst in self.awareness.switches: 203 | if src != dst: 204 | paths[src][dst] = self.dijkstra(self.delay_dict, src, dst, visited=[], distances={}, predecessors={}) 205 | 206 | with open('/home/controlador/ryu/ryu/app/OSPF_delay/Proac/paths_delay.json','w') as json_file: 207 | json.dump(paths, json_file, indent=2) 208 | 209 | total_time = time.time() - time_init 210 | # print(total_time) 211 | with open('/home/controlador/ryu/ryu/app/OSPF_delay/Proac/times.txt','a') as txt_file: 212 | txt_file.write(str(total_time)+'\n') 213 | self.calc_stretch() 214 | 215 | def dijkstra(self, graph, src, dest, visited=[], distances={}, predecessors={}): 216 | """ calculates a shortest path tree routed in src 217 | """ 218 | 219 | # a few sanity checks 220 | if src not in graph: 221 | raise TypeError('The root of the shortest path tree cannot be found') 222 | if dest not in graph: 223 | raise TypeError('The target of the shortest path cannot be found') 224 | # ending condition 225 | if src == dest: 226 | # We build the shortest path and display it 227 | path = [] 228 | pred = dest 229 | while pred != None: 230 | path.append(pred) 231 | pred = predecessors.get(pred, None) 232 | 233 | return list(reversed(path)) 234 | else: 235 | # if it is the initial run, initializes the cost 236 | if not visited: 237 | distances[src] = 0 238 | # visit the neighbors 239 | for neighbor in graph[src]: 240 | if neighbor not in visited: 241 | new_distance = distances[src] + graph[src][neighbor] 242 | if new_distance < distances.get(neighbor, float('inf')): 243 | distances[neighbor] = new_distance 244 | predecessors[neighbor] = src 245 | # mark as visited 246 | 247 | visited.append(src) 248 | # now that all neighbors have been visited: recurse 249 | # select the non visited node with lowest distance 'x' 250 | # run Dijskstra with src='x' 251 | unvisited = {} 252 | for k in graph: 253 | if k not in visited: 254 | unvisited[k] = distances.get(k, float('inf')) #sets the cost of link to the src neighbors with the actual value and inf for the non neighbors 255 | x = min(unvisited, key=unvisited.get) #find w not in N' such that D(w) is a minimum 256 | return self.dijkstra(graph, x, dest, visited, distances, predecessors) 257 | 258 | def get_paths_dijkstra(self): 259 | file_dijkstra = '/home/controlador/ryu/ryu/app/OSPF_delay/Proac/paths_delay.json' 260 | with open(file_dijkstra,'r') as json_file: 261 | paths_dict = json.load(json_file) 262 | paths_dijkstra = ast.literal_eval(json.dumps(paths_dict)) 263 | return paths_dijkstra 264 | 265 | def get_paths_base(self): 266 | file_base = '/home/controlador/ryu/ryu/app/OSPF_delay/Proac/paths_weight.json' 267 | with open(file_base,'r') as json_file: 268 | paths_dict = json.load(json_file) 269 | paths_base = ast.literal_eval(json.dumps(paths_dict)) 270 | return paths_base 271 | 272 | def stretch(self, paths, paths_base, src, dst): 273 | add_stretch = len(paths.get(str(src)).get(str(dst))) - len(paths_base.get(str(src)).get(str(dst))) 274 | mul_stretch = len(paths.get(str(src)).get(str(dst))) / len(paths_base.get(str(src)).get(str(dst))) 275 | return add_stretch, mul_stretch 276 | 277 | def calc_stretch(self): 278 | paths_base = self.get_paths_base() 279 | paths_dijkstra = self.get_paths_dijkstra() 280 | cont_dijkstra = 0 281 | a = time.time() 282 | sw = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] 283 | 284 | with open('/home/controlador/ryu/ryu/app/OSPF_delay/Proac/stretch/'+str(self.count)+'_stretch.csv','wb') as csvfile: 285 | header = ['src','dst','add_st','mul_st'] 286 | file = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL) 287 | file.writerow(header) 288 | for src in sw: 289 | for dst in sw: 290 | if src != dst: 291 | add_stretch, mul_stretch = self.stretch(paths_dijkstra, paths_base, src, dst) 292 | # print(add_stretch) 293 | # print(mul_stretch) 294 | file.writerow([src, dst, add_stretch, mul_stretch]) 295 | total_time = time.time() - a 296 | 297 | def show_delay_statis(self): 298 | if self.awareness is None: 299 | print("Not doing nothing, awareness none") 300 | # else: 301 | # print("Latency ok") 302 | # if setting.TOSHOW and self.awareness is not None: 303 | # self.logger.info("\nsrc dst delay") 304 | # self.logger.info("---------------------------") 305 | # for src in self.awareness.graph: 306 | # for dst in self.awareness.graph[src]: 307 | # delay = self.awareness.graph[src][dst]['delay'] 308 | # self.logger.info("%s <--> %s : %s" % (src, dst, delay)) -------------------------------------------------------------------------------- /OSPF_delay/Proac/simple_delay.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_delay/Proac/simple_delay.pyc -------------------------------------------------------------------------------- /OSPF_delay/Proac/simple_monitor.py: -------------------------------------------------------------------------------- 1 | from operator import attrgetter 2 | 3 | from ryu.base import app_manager 4 | from ryu.controller import ofp_event 5 | from ryu.base.app_manager import lookup_service_brick 6 | from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER 7 | from ryu.controller.handler import CONFIG_DISPATCHER 8 | from ryu.controller.handler import set_ev_cls 9 | from ryu.topology import event, switches 10 | from ryu.ofproto.ether import ETH_TYPE_IP 11 | from ryu.topology.api import get_switch, get_link 12 | from ryu.ofproto import ofproto_v1_3 13 | from ryu.lib import hub 14 | from ryu.lib.packet import packet 15 | from ryu.lib.packet import arp 16 | 17 | import time 18 | 19 | import simple_awareness 20 | import simple_delay 21 | # import requests 22 | import json, ast 23 | import setting 24 | import csv 25 | import time 26 | 27 | class simple_Monitor(app_manager.RyuApp): 28 | """ 29 | A Ryu app for netowrk monitoring. It retreieves statistics information through openflow 30 | of datapaths at the Data Plane. 31 | This class contains functions belonging to the Statistics module and Flow Installation module 32 | of the Control Plane. 33 | I also contains the functions corresponding to the Process Statistics module of the 34 | Management Plane in order to adventage the monitorin threading for statistics processing. 35 | """ 36 | 37 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 38 | 39 | def __init__(self, *args, **kwargs): 40 | super(simple_Monitor, self).__init__(*args, **kwargs) 41 | self.name = "monitor" 42 | self.count_monitor = 0 43 | self.topology_api_app = self 44 | self.datapaths = {} 45 | self.port_stats = {} 46 | self.port_speed = {} 47 | self.flow_stats = {} 48 | self.flow_speed = {} 49 | self.flow_loss = {} 50 | self.port_loss = {} 51 | self.link_loss = {} 52 | self.net_info = {} 53 | self.net_metrics= {} 54 | self.link_free_bw = {} 55 | self.link_used_bw = {} 56 | self.stats = {} 57 | self.port_features = {} 58 | self.free_bandwidth = {} 59 | self.paths = {} 60 | self.installed_paths = {} 61 | self.awareness = lookup_service_brick('awareness') 62 | self.delay = lookup_service_brick('delay') 63 | 64 | self.monitor_thread = hub.spawn(self.monitor) 65 | 66 | @set_ev_cls(ofp_event.EventOFPStateChange, 67 | [MAIN_DISPATCHER, DEAD_DISPATCHER]) 68 | def state_change_handler(self, ev): 69 | """ 70 | Record datapath information. 71 | """ 72 | datapath = ev.datapath 73 | if ev.state == MAIN_DISPATCHER: 74 | if datapath.id not in self.datapaths: 75 | self.logger.debug('Datapath registered: %016x', datapath.id) 76 | print 'Datapath registered:', datapath.id ## 77 | self.datapaths[datapath.id] = datapath 78 | elif ev.state == DEAD_DISPATCHER: 79 | if datapath.id in self.datapaths: 80 | self.logger.debug('Datapath unregistered: %016x', datapath.id) 81 | print 'Datapath unregistered:', datapath.id 82 | print "FUCK" 83 | del self.datapaths[datapath.id] 84 | 85 | def monitor(self): 86 | """ 87 | Main entry method of monitoring traffic. 88 | """ 89 | while True: 90 | self.stats['flow'] = {} 91 | self.stats['port'] = {} 92 | print("[Statistics Module Ok]") 93 | print("[{0}]".format(self.count_monitor)) 94 | if self.delay is None: 95 | print('No monitor') 96 | self.delay = lookup_service_brick('delay') 97 | for dp in self.datapaths.values(): 98 | self.port_features.setdefault(dp.id, {}) #setdefault() returns the value of the item with the specified key 99 | self.paths = None 100 | self.request_stats(dp) 101 | hub.sleep(0.9) 102 | 103 | if self.stats['port']: 104 | self.count_monitor += 1 105 | self.get_port_loss() 106 | self.get_link_free_bw() 107 | self.get_link_used_bw() 108 | self.write_values() 109 | 110 | hub.sleep(setting.MONITOR_PERIOD) 111 | if self.stats['port']: 112 | self.show_stat('link') 113 | hub.sleep(1) 114 | #---------------------CONTROL PLANE FUNCTIONS--------------------------------- 115 | #---------------------STATISTICS MODULE FUNCTIONS ---------------------------- 116 | 117 | def request_stats(self, datapath): #OK 118 | self.logger.debug('send stats request: %016x', datapath.id) 119 | ofproto = datapath.ofproto 120 | parser = datapath.ofproto_parser 121 | 122 | req = parser.OFPPortDescStatsRequest(datapath, 0) #for port description 123 | datapath.send_msg(req) 124 | 125 | req = parser.OFPFlowStatsRequest(datapath) #individual flow statistics 126 | datapath.send_msg(req) 127 | 128 | req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY) 129 | datapath.send_msg(req) 130 | 131 | def save_stats(self, _dict, key, value, length=5): 132 | if key not in _dict: 133 | _dict[key] = [] 134 | _dict[key].append(value) 135 | if len(_dict[key]) > length: 136 | _dict[key].pop(0) 137 | 138 | def get_speed(self, now, pre, period): #bits/s 139 | if period: 140 | return ((now - pre)*8) / period 141 | else: 142 | return 0 143 | 144 | def get_time(self, sec, nsec): #Total time that the flow was alive in seconds 145 | return sec + nsec / 1000000000.0 146 | 147 | def get_period(self, n_sec, n_nsec, p_sec, p_nsec): # (time las flow, time) 148 | # calculates period of time between flows 149 | return self.get_time(n_sec, n_nsec) - self.get_time(p_sec, p_nsec) 150 | 151 | def get_sw_dst(self, dpid, out_port): 152 | for key in self.awareness.link_to_port: 153 | src_port = self.awareness.link_to_port[key][0] 154 | if key[0] == dpid and src_port == out_port: 155 | dst_sw = key[1] 156 | dst_port = self.awareness.link_to_port[key][1] 157 | # print(dst_sw,dst_port) 158 | return (dst_sw, dst_port) 159 | 160 | def get_link_bw(self, file, src_dpid, dst_dpid): 161 | fin = open(file, "r") 162 | bw_capacity_dict = {} 163 | for line in fin: 164 | a = line.split(',') 165 | if a: 166 | s1 = a[0] 167 | s2 = a[1] 168 | # bwd = a[2] #random capacities 169 | bwd = a[3] #original caps 170 | bw_capacity_dict.setdefault(s1,{}) 171 | bw_capacity_dict[str(a[0])][str(a[1])] = bwd 172 | fin.close() 173 | bw_link = bw_capacity_dict[str(src_dpid)][str(dst_dpid)] 174 | return bw_link 175 | 176 | def get_free_bw(self, port_capacity, speed): 177 | # freebw: Kbit/s 178 | return max(port_capacity - (speed/ 1000.0), 0) 179 | 180 | #------------------MANAGEMENT PLANE MODULE --------------------------- 181 | #------------------PROCESS STATISTICS MODULE FUNCTIONS---------------- 182 | 183 | def get_flow_loss(self): 184 | #Get per flow loss 185 | bodies = self.stats['flow'] 186 | for dp in bodies.keys(): 187 | list_flows = sorted([flow for flow in bodies[dp] if flow.priority == 1], 188 | key=lambda flow: (flow.match.get('ipv4_src'),flow.match.get('ipv4_dst'))) 189 | for stat in list_flows: 190 | out_port = stat.instructions[0].actions[0].port 191 | if self.awareness.link_to_port and out_port != 1: #get loss form ports of network 192 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 193 | tmp1 = self.flow_stats[dp][key] 194 | byte_count_src = tmp1[-1][1] 195 | 196 | result = self.get_sw_dst(dp, out_port) 197 | dst_sw = result[0] 198 | tmp2 = self.flow_stats[dst_sw][key] 199 | byte_count_dst = tmp2[-1][1] 200 | flow_loss = byte_count_src - byte_count_dst 201 | self.save_stats(self.flow_loss[dp], key, flow_loss, 5) 202 | 203 | def get_port_loss(self): 204 | #Get loss_port 205 | bodies = self.stats['port'] 206 | for dp in sorted(bodies.keys()): 207 | for stat in sorted(bodies[dp], key=attrgetter('port_no')): 208 | if self.awareness.link_to_port and stat.port_no != 1 and stat.port_no != ofproto_v1_3.OFPP_LOCAL: #get loss form ports of network 209 | key1 = (dp, stat.port_no) 210 | tmp1 = self.port_stats[key1] 211 | tx_bytes_src = tmp1[-1][0] 212 | tx_pkts_src = tmp1[-1][8] 213 | 214 | key2 = self.get_sw_dst(dp, stat.port_no) 215 | tmp2 = self.port_stats[key2] 216 | rx_bytes_dst = tmp2[-1][1] 217 | rx_pkts_dst = tmp2[-1][9] 218 | loss_port = float(tx_pkts_src - rx_pkts_dst) / tx_pkts_src #loss rate 219 | values = (loss_port, key2) 220 | self.save_stats(self.port_loss[dp], key1, values, 5) 221 | 222 | #Calculates the total link loss and save it in self.link_loss[(node1,node2)]:loss 223 | for dp in self.port_loss.keys(): 224 | for port in self.port_loss[dp]: 225 | key2 = self.port_loss[dp][port][-1][1] 226 | loss_src = self.port_loss[dp][port][-1][0] 227 | # tx_src = self.port_loss[dp][port][-1][1] 228 | loss_dst = self.port_loss[key2[0]][key2][-1][0] 229 | # tx_dst = self.port_loss[key2[0]][key2][-1][1] 230 | loss_l = (abs(loss_src) + abs(loss_dst)) / 2 231 | link = (dp, key2[0]) 232 | self.link_loss[link] = loss_l*100.0 #loss in porcentage 233 | 234 | def get_link_free_bw(self): 235 | #Calculates the total free bw of link and save it in self.link_free_bw[(node1,node2)]:link_free_bw 236 | for dp in self.free_bandwidth.keys(): 237 | for port in self.free_bandwidth[dp]: 238 | free_bw1 = self.free_bandwidth[dp][port] 239 | key2 = self.get_sw_dst(dp, port) #key2 = (dp,port) 240 | free_bw2= self.free_bandwidth[key2[0]][key2[1]] 241 | link_free_bw = (free_bw1 + free_bw2)/2 242 | link = (dp, key2[0]) 243 | self.link_free_bw[link] = link_free_bw 244 | 245 | def get_link_used_bw(self): 246 | #Calculates the total free bw of link and save it in self.link_free_bw[(node1,node2)]:link_free_bw 247 | for key in self.port_speed.keys(): 248 | used_bw1 = self.port_speed[key][-1] 249 | key2 = self.get_sw_dst(key[0], key[1]) #key2 = (dp,port) 250 | used_bw2 = self.port_speed[key2][-1] 251 | link_used_bw = (used_bw1 + used_bw2)/2 252 | link = (key[0], key2[0]) 253 | self.link_used_bw[link] = link_used_bw 254 | 255 | #---------------------CONTROL PLANE FUNCTIONS--------------------------------- 256 | #---------------------STATISTICS MODULE FUNCTIONS ---------------------------- 257 | 258 | @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER) #OK 259 | def flow_stats_reply_handler(self, ev): 260 | """ 261 | Save flow stats reply information into self.flow_stats. 262 | Calculate flow speed and Save it. 263 | self.flow_stats = {dpid:{(ipv4_src, ipv4_dst):[(packet_count, byte_count, duration_sec, duration_nsec),],},} 264 | self.flow_speed = {dpid:{(ipv4_src, ipv4_dst):[speed,],},} 265 | self.flow_loss = {dpid:{(ipv4_src, ipv4_dst, dst_sw):[loss,],},} 266 | """ 267 | 268 | body = ev.msg.body 269 | dpid = ev.msg.datapath.id 270 | self.stats['flow'][dpid] = body 271 | self.flow_stats.setdefault(dpid, {}) 272 | self.flow_speed.setdefault(dpid, {}) 273 | self.flow_loss.setdefault(dpid, {}) 274 | 275 | #flows.append('table_id=%s ' 276 | # 'duration_sec=%d duration_nsec=%d ' 277 | # 'priority=%d ' 278 | # 'idle_timeout=%d hard_timeout=%d flags=0x%04x ' 279 | # 'cookie=%d packet_count=%d byte_count=%d ' 280 | # 'match=%s instructions=%s' % 281 | # (stat.table_id, 282 | # stat.duration_sec, stat.duration_nsec, 283 | # stat.priority, 284 | # stat.idle_timeout, stat.hard_timeout, stat.flags, 285 | # stat.cookie, stat.packet_count, stat.byte_count, 286 | # stat.match, stat.instructions) 287 | 288 | for stat in sorted([flow for flow in body if flow.priority == 1], 289 | key=lambda flow: (flow.match.get('ipv4_src'), 290 | flow.match.get('ipv4_dst'))): 291 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 292 | 293 | value = (stat.packet_count, stat.byte_count, 294 | stat.duration_sec, stat.duration_nsec)#duration_sec: Time flow was alive in seconds 295 | #duration_nsec: Time flow was alive in nanoseconds beyond duration_sec 296 | self.save_stats(self.flow_stats[dpid], key, value, 5) 297 | 298 | # CALCULATE FLOW BYTE RATE 299 | pre = 0 300 | period = setting.MONITOR_PERIOD 301 | tmp = self.flow_stats[dpid][key] 302 | if len(tmp) > 1: 303 | pre = tmp[-2][1] #penultimo flow byte_count 304 | period = self.get_period(tmp[-1][2], tmp[-1][3], #valores (sec,nsec) ultimo flow, penultimo flow) 305 | tmp[-2][2], tmp[-2][3]) 306 | speed = self.get_speed(self.flow_stats[dpid][key][-1][1], #ultimo flow byte_count, penultimo byte_count, periodo 307 | pre, period) 308 | self.save_stats(self.flow_speed[dpid], key, speed, 5) #bits/s 309 | 310 | @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER) 311 | def port_stats_reply_handler(self, ev): 312 | a = time.time() 313 | body = ev.msg.body 314 | dpid = ev.msg.datapath.id 315 | 316 | self.stats['port'][dpid] = body 317 | self.free_bandwidth.setdefault(dpid, {}) 318 | self.port_loss.setdefault(dpid, {}) 319 | """ 320 | Save port's stats information into self.port_stats. 321 | Calculate port speed and Save it. 322 | self.port_stats = {(dpid, port_no):[(tx_bytes, rx_bytes, rx_errors, duration_sec, duration_nsec),],} 323 | self.port_speed = {(dpid, port_no):[speed,],} 324 | Note: The transmit performance and receive performance are independent of a port. 325 | Calculate the load of a port only using tx_bytes. 326 | 327 | Replay message content: 328 | (stat.port_no, 329 | stat.rx_packets, stat.tx_packets, 330 | stat.rx_bytes, stat.tx_bytes, 331 | stat.rx_dropped, stat.tx_dropped, 332 | stat.rx_errors, stat.tx_errors, 333 | stat.rx_frame_err, stat.rx_over_err, 334 | stat.rx_crc_err, stat.collisions, 335 | stat.duration_sec, stat.duration_nsec)) 336 | """ 337 | 338 | for stat in sorted(body, key=attrgetter('port_no')): #get the value of port_no form body 339 | port_no = stat.port_no 340 | key = (dpid, port_no) #src_dpid, src_port 341 | value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors, 342 | stat.duration_sec, stat.duration_nsec, stat.tx_errors, stat.tx_dropped, stat.rx_dropped, stat.tx_packets, stat.rx_packets) 343 | self.save_stats(self.port_stats, key, value, 5) 344 | 345 | if port_no != ofproto_v1_3.OFPP_LOCAL: #si es dif de puerto local del sw donde se lee port 346 | if port_no != 1 and self.awareness.link_to_port : 347 | # Get port speed and Save it. 348 | pre = 0 349 | period = setting.MONITOR_PERIOD 350 | tmp = self.port_stats[key] 351 | if len(tmp) > 1: 352 | # Calculate with the tx_bytes and rx_bytes 353 | pre = tmp[-2][0] + tmp[-2][1] #penultimo port tx_bytes 354 | period = self.get_period(tmp[-1][3], tmp[-1][4], tmp[-2][3], tmp[-2][4]) #periodo entre el ultimo y penultimo total bytes en el puerto 355 | speed = self.get_speed(self.port_stats[key][-1][0] + self.port_stats[key][-1][1], pre, period) #speed in bits/s 356 | self.save_stats(self.port_speed, key, speed, 5) 357 | 358 | #Get links capacities 359 | file = '/home/controlador/ryu/ryu/app/OSPF_delay/Proac/bw_r.txt' 360 | link_to_port = self.awareness.link_to_port 361 | 362 | for k in list(link_to_port.keys()): 363 | if k[0] == dpid: 364 | if link_to_port[k][0] == port_no: 365 | dst_dpid = k[1] 366 | 367 | #FUNCIONA CON LISTA----------------------------- 368 | # list_dst_dpid = [k for k in list(link_to_port.keys()) if k[0] == dpid and link_to_port[k][0] == port_no] 369 | # if len(list_dst_dpid) > 0: 370 | # dst_dpid = list_dst_dpid[0][1] 371 | # ----------------------------------------- 372 | bw_link = float(self.get_link_bw(file, dpid, dst_dpid)) 373 | port_state = self.port_features.get(dpid).get(port_no) 374 | 375 | if port_state: 376 | bw_link_kbps = bw_link * 1000.0 377 | self.port_features[dpid][port_no].append(bw_link_kbps) 378 | free_bw = self.get_free_bw(bw_link_kbps, speed) 379 | self.free_bandwidth[dpid][port_no] = free_bw 380 | 381 | @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) 382 | def port_desc_stats_reply_handler(self, ev): 383 | """ 384 | Save port description info. 385 | """ 386 | msg = ev.msg 387 | dpid = msg.datapath.id 388 | ofproto = msg.datapath.ofproto 389 | 390 | config_dict = {ofproto.OFPPC_PORT_DOWN: "Down", 391 | ofproto.OFPPC_NO_RECV: "No Recv", 392 | ofproto.OFPPC_NO_FWD: "No Farward", 393 | ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"} 394 | 395 | state_dict = {ofproto.OFPPS_LINK_DOWN: "Down", 396 | ofproto.OFPPS_BLOCKED: "Blocked", 397 | ofproto.OFPPS_LIVE: "Live"} 398 | 399 | ports = [] 400 | for p in ev.msg.body: 401 | if p.port_no != 1: 402 | 403 | ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x ' 404 | 'state=0x%08x curr=0x%08x advertised=0x%08x ' 405 | 'supported=0x%08x peer=0x%08x curr_speed=%d ' 406 | 'max_speed=%d' % 407 | (p.port_no, p.hw_addr, 408 | p.name, p.config, 409 | p.state, p.curr, p.advertised, 410 | p.supported, p.peer, p.curr_speed, 411 | p.max_speed)) 412 | if p.config in config_dict: 413 | config = config_dict[p.config] 414 | else: 415 | config = "up" 416 | 417 | if p.state in state_dict: 418 | state = state_dict[p.state] 419 | else: 420 | state = "up" 421 | 422 | # Recording data. 423 | port_feature = [config, state] 424 | self.port_features[dpid][p.port_no] = port_feature 425 | 426 | @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) 427 | def port_status_handler(self, ev): 428 | """ 429 | Handle the port status changed event. 430 | """ 431 | msg = ev.msg 432 | ofproto = msg.datapath.ofproto 433 | reason = msg.reason 434 | dpid = msg.datapath.id 435 | port_no = msg.desc.port_no 436 | 437 | reason_dict = {ofproto.OFPPR_ADD: "added", 438 | ofproto.OFPPR_DELETE: "deleted", 439 | ofproto.OFPPR_MODIFY: "modified", } 440 | 441 | if reason in reason_dict: 442 | print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no) 443 | else: 444 | print "switch%d: Illegal port state %s %s" % (dpid, port_no, reason) 445 | 446 | def write_values(self): 447 | a = time.time() 448 | if self.delay.link_delay: 449 | for link in self.link_free_bw: 450 | self.net_info[link] = [round(self.link_free_bw[link],6) , round(self.delay.link_delay[link],6), round(self.link_loss[link],6)] 451 | self.net_metrics[link] = [round(self.link_free_bw[link],6), round(self.link_used_bw[link],6), round(self.link_loss[link],6), round(self.delay.link_delay[link],6)] 452 | 453 | file_metrics = '/home/controlador/ryu/ryu/app/OSPF_delay/Proac/Metrics/'+str(self.count_monitor)+'_net_metrics.csv' 454 | with open(file_metrics,'wb') as csvfile: 455 | header_ = ['node1','node2','free_bw','used_bw', 'pkloss', 'delay'] 456 | file = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL) 457 | links_in = [] 458 | file.writerow(header_) 459 | for link, values in sorted(self.net_metrics.items()): 460 | links_in.append(link) 461 | tup = (link[1], link[0]) 462 | if tup not in links_in: 463 | file.writerow([link[0],link[1],values[0],values[1],values[2],values[3]]) 464 | b = time.time() 465 | return 466 | 467 | def show_stat(self, _type): 468 | ''' 469 | Show statistics information according to data type. 470 | _type: 'port' / 'flow' 471 | ''' 472 | if setting.TOSHOW is False: 473 | return 474 | 475 | 476 | if _type == 'flow' and self.awareness.link_to_port: 477 | bodies = self.stats['flow'] 478 | print('datapath '' ip_src ip-dst ' 479 | 'out-port packets bytes flow-speed(b/s)') 480 | print('---------------- '' -------- ----------------- ' 481 | '-------- -------- -------- -----------') 482 | for dpid in bodies.keys(): 483 | for stat in sorted( 484 | [flow for flow in bodies[dpid] if flow.priority == 1], 485 | # key=lambda flow: (flow.match.get('in_port'), 486 | key=lambda flow: (flow.match.get('ipv4_src'), 487 | flow.match.get('ipv4_dst'))): 488 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 489 | print('{:>016} {:>9} {:>17} {:>8} {:>8} {:>8} {:>8.1f}'.format( 490 | dpid, 491 | stat.match['ipv4_src'], stat.match['ipv4_dst'], #flow match 492 | stat.instructions[0].actions[0].port, #port 493 | stat.packet_count, stat.byte_count, 494 | abs(self.flow_speed[dpid][key][-1])))#, 495 | # abs(self.flow_loss[dpid][ #flow loss 496 | # (stat.match.get('ipv4_src'),stat.match.get('ipv4_dst'))][-1]))) 497 | print() 498 | 499 | if _type == 'port': #and self.awareness.link_to_port: 500 | bodies = self.stats['port'] 501 | print('\ndatapath port ' 502 | ' rx-pkts rx-bytes '' tx-pkts tx-bytes ' 503 | ' port-bw(Kb/s) port-speed(Kb/s) port-freebw(Kb/s) ' 504 | ' port-state link-state') 505 | print('-------- ---- ' 506 | '--------- ----------- ''--------- ----------- ' 507 | '------------- --------------- ----------------- ' 508 | '---------- ----------') 509 | format_ = '{:>8} {:>4} {:>9} {:>11} {:>9} {:>11} {:>13.3f} {:>15.5f} {:>17.5f} {:>10} {:>10} {:>10} {:>10}' 510 | 511 | for dpid in sorted(bodies.keys()): 512 | for stat in sorted(bodies[dpid], key=attrgetter('port_no')): 513 | if stat.port_no != 1: 514 | if stat.port_no != ofproto_v1_3.OFPP_LOCAL: #port 1 is the host output 515 | if self.free_bandwidth[dpid]: 516 | self.logger.info(format_.format( 517 | dpid, stat.port_no, #datapath , num_port 518 | stat.rx_packets, stat.rx_bytes, 519 | stat.tx_packets, stat.tx_bytes, 520 | self.port_features[dpid][stat.port_no][2], #port_bw (kb/s) MAX 521 | abs(self.port_speed[(dpid, stat.port_no)][-1]/1000.0), #port_speed Kbits/s 522 | self.free_bandwidth[dpid][stat.port_no], #port_free bw kb/s 523 | self.port_features[dpid][stat.port_no][0], #port state 524 | self.port_features[dpid][stat.port_no][1], #link state 525 | stat.rx_dropped, stat.tx_dropped)) 526 | print() 527 | 528 | if _type == 'link': 529 | print('\nnode1 node2 used-bw(Kb/s) free-bw(Kb/s) latency loss') 530 | print('----- ----- -------------- -------------- ---------- ---- ') 531 | 532 | format_ = '{:>5} {:>5} {:>14.5f} {:>14.5f} {:>10} {:>12}' 533 | 534 | links_in = [] 535 | for link, values in sorted(self.net_info.items()): 536 | links_in.append(link) 537 | tup = (link[1], link[0]) 538 | if tup not in links_in: 539 | print(format_.format(link[0],link[1], 540 | self.link_used_bw[link]/1000.0, 541 | values[0], values[1],values[2])) -------------------------------------------------------------------------------- /OSPF_delay/Proac/simple_monitor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_delay/Proac/simple_monitor.pyc -------------------------------------------------------------------------------- /OSPF_delay/Proac/stretch/Folder_for_storing_paths_stretch_during_experiment: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_delay/Proac/stretch/Folder_for_storing_paths_stretch_during_experiment -------------------------------------------------------------------------------- /OSPF_loss/Proac/Metrics/Folder_for_storing_metrics_during_experiment: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_loss/Proac/Metrics/Folder_for_storing_metrics_during_experiment -------------------------------------------------------------------------------- /OSPF_loss/Proac/bw.txt: -------------------------------------------------------------------------------- 1 | 10 3 100.0 2 | 22 20 25.0 3 | 15 20 25.0 4 | 3 14 1.55 5 | 7 19 25.0 6 | 10 11 25.0 7 | 5 8 25.0 8 | 7 17 100.0 9 | 3 11 25.0 10 | 13 19 25.0 11 | 8 9 25.0 12 | 3 21 100.0 13 | 14 13 1.55 14 | 2 13 100.0 15 | 6 19 1.55 16 | 12 22 100.0 17 | 16 10 100.0 18 | 1 7 100.0 19 | 3 1 100.0 20 | 2 4 100.0 21 | 17 13 100.0 22 | 1 16 100.0 23 | 9 15 25.0 24 | 20 17 100.0 25 | 2 18 25.0 26 | 17 10 100.0 27 | 6 7 1.55 28 | 16 5 25.0 29 | 4 16 100.0 30 | 2 23 25.0 31 | 7 21 100.0 32 | 12 10 100.0 33 | 7 2 100.0 34 | 17 23 25.0 35 | 21 18 25.0 36 | 16 9 100.0 37 | 2 12 100.0 38 | 3 10 100.0 39 | 20 22 25.0 40 | 20 15 25.0 41 | 14 3 1.55 42 | 19 7 25.0 43 | 11 10 25.0 44 | 8 5 25.0 45 | 17 7 100.0 46 | 11 3 25.0 47 | 19 13 25.0 48 | 9 8 25.0 49 | 21 3 100.0 50 | 13 14 1.55 51 | 13 2 100.0 52 | 19 6 1.55 53 | 22 12 100.0 54 | 10 16 100.0 55 | 7 1 100.0 56 | 1 3 100.0 57 | 4 2 100.0 58 | 13 17 100.0 59 | 16 1 100.0 60 | 15 9 25.0 61 | 17 20 100.0 62 | 18 2 25.0 63 | 10 17 100.0 64 | 7 6 1.55 65 | 5 16 25.0 66 | 16 4 100.0 67 | 23 2 25.0 68 | 21 7 100.0 69 | 10 12 100.0 70 | 2 7 100.0 71 | 23 17 25.0 72 | 18 21 25.0 73 | 9 16 100.0 74 | 12 2 100.0 75 | -------------------------------------------------------------------------------- /OSPF_loss/Proac/bw_r.txt: -------------------------------------------------------------------------------- 1 | 10,3,78,100 2 | 22,20,28,25 3 | 15,20,68,25 4 | 3,14,74,1.55 5 | 7,19,13,25 6 | 10,11,31,25 7 | 5,8,90,25 8 | 7,17,38,100 9 | 3,11,82,25 10 | 13,19,69,25 11 | 8,9,15,25 12 | 3,21,57,100 13 | 14,13,1,1.55 14 | 2,13,82,100 15 | 6,19,34,1.55 16 | 12,22,43,100 17 | 16,10,66,100 18 | 1,7,32,100 19 | 3,1,17,100 20 | 2,4,26,100 21 | 17,13,46,100 22 | 1,16,57,100 23 | 9,15,83,25 24 | 20,17,26,100 25 | 2,18,44,25 26 | 17,10,90,100 27 | 6,7,8,1.55 28 | 16,5,31,25 29 | 4,16,3,100 30 | 2,23,25,25 31 | 7,21,8,100 32 | 12,10,30,100 33 | 7,2,45,100 34 | 17,23,52,25 35 | 21,18,78,25 36 | 16,9,90,100 37 | 2,12,58,100 38 | 3,10,78,100 39 | 20,22,28,25 40 | 20,15,68,25 41 | 14,3,74,1.55 42 | 19,7,13,25 43 | 11,10,31,25 44 | 8,5,90,25 45 | 17,7,38,100 46 | 11,3,82,25 47 | 19,13,69,25 48 | 9,8,15,25 49 | 21,3,57,100 50 | 13,14,1,1.55 51 | 13,2,82,100 52 | 19,6,34,1.55 53 | 22,12,43,100 54 | 10,16,66,100 55 | 7,1,32,100 56 | 1,3,17,100 57 | 4,2,26,100 58 | 13,17,46,100 59 | 16,1,57,100 60 | 15,9,83,25 61 | 17,20,26,100 62 | 18,2,44,25 63 | 10,17,90,100 64 | 7,6,8,1.55 65 | 5,16,31,25 66 | 16,4,3,100 67 | 23,2,25,25 68 | 21,7,8,100 69 | 10,12,30,100 70 | 2,7,45,100 71 | 23,17,52,25 72 | 18,21,78,25 73 | 9,16,90,100 74 | 12,2,58,100 75 | -------------------------------------------------------------------------------- /OSPF_loss/Proac/clear.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo rm times.txt 3 | cd ~/ryu/ryu/app/OSPF_loss/Proac/Metrics/ 4 | sudo rm *metrics* 5 | cd ~/ryu/ryu/app/OSPF_loss/Proac/stretch/ 6 | sudo rm *stretch* 7 | 8 | 9 | -------------------------------------------------------------------------------- /OSPF_loss/Proac/ospf_proac.py: -------------------------------------------------------------------------------- 1 | from operator import attrgetter 2 | 3 | from ryu.base import app_manager 4 | from ryu.controller import ofp_event 5 | from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER 6 | from ryu.controller.handler import CONFIG_DISPATCHER 7 | from ryu.controller.handler import set_ev_cls 8 | from ryu.topology import event, switches 9 | from ryu.ofproto.ether import ETH_TYPE_IP 10 | from ryu.topology.api import get_switch, get_link 11 | from ryu.ofproto import ofproto_v1_3 12 | from ryu.lib import hub 13 | from ryu.lib.packet import packet 14 | from ryu.lib.packet import arp 15 | 16 | import time 17 | import simple_awareness 18 | import simple_delay 19 | import simple_monitor 20 | # import requests 21 | import json, ast 22 | import setting 23 | import csv 24 | import time 25 | 26 | class baseline_Dijsktra(app_manager.RyuApp): 27 | ''' 28 | A Ryu app that route traffic based on Dijkstra algorithm when it takes 29 | link loss as link cost. 30 | ''' 31 | 32 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 33 | _CONTEXTS = {"simple_awareness": simple_awareness.simple_Awareness, 34 | "simple_delay": simple_delay.simple_Delay, 35 | "simple_monitor": simple_monitor.simple_Monitor} 36 | 37 | def __init__(self, *args, **kwargs): 38 | super(baseline_Dijsktra, self).__init__(*args, **kwargs) 39 | self.awareness = kwargs["simple_awareness"] 40 | self.delay = kwargs["simple_delay"] 41 | self.monitor = kwargs["simple_monitor"] 42 | self.datapaths = {} 43 | self.paths = {} 44 | self.monitor_thread = hub.spawn(self.installation_module) 45 | 46 | @set_ev_cls(ofp_event.EventOFPStateChange, 47 | [MAIN_DISPATCHER, DEAD_DISPATCHER]) 48 | def state_change_handler(self, ev): 49 | """ 50 | Record datapath information. 51 | """ 52 | datapath = ev.datapath 53 | if ev.state == MAIN_DISPATCHER: 54 | if datapath.id not in self.datapaths: 55 | self.logger.debug('Datapath registered: %016x', datapath.id) 56 | print 'Datapath registered:', datapath.id ## 57 | self.datapaths[datapath.id] = datapath 58 | elif ev.state == DEAD_DISPATCHER: 59 | if datapath.id in self.datapaths: 60 | self.logger.debug('Datapath unregistered: %016x', datapath.id) 61 | print 'Datapath unregistered:', datapath.id 62 | print "FUCK" 63 | del self.datapaths[datapath.id] 64 | 65 | def installation_module(self): 66 | """ 67 | Main entry method of monitoring traffic. 68 | """ 69 | while True: 70 | if self.awareness.link_to_port: 71 | self.paths = None 72 | self.flow_install_monitor() 73 | hub.sleep(setting.MONITOR_PERIOD) 74 | 75 | def flow_install_monitor(self): 76 | print("[Flow Installation Ok]") 77 | out_time = time.time() 78 | for dp in self.datapaths.values(): 79 | for dp2 in self.datapaths.values(): 80 | if dp.id != dp2.id: 81 | ip_src = '10.0.0.'+str(dp.id) 82 | ip_dst = '10.0.0.'+str(dp2.id) 83 | self.forwarding(dp.id, ip_src, ip_dst, dp.id, dp2.id) 84 | time.sleep(0.0005) 85 | end_out_time = time.time() 86 | out_total_ = end_out_time - out_time 87 | return 88 | 89 | def forwarding(self, dpid, ip_src, ip_dst, src_sw, dst_sw): 90 | """ 91 | Get paths and install them into datapaths. 92 | """ 93 | path = self.get_path(str(src_sw), str(dst_sw)) #changed to str cuz the json convertion 94 | flow_info = (ip_src, ip_dst) 95 | self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info) 96 | 97 | 98 | def install_flow(self, datapaths, link_to_port, path, 99 | flow_info, data=None): 100 | init_time_install = time.time() 101 | ''' 102 | Install flow entires. 103 | path=[dpid1, dpid2...] 104 | flow_info=(src_ip, dst_ip) 105 | ''' 106 | if path is None or len(path) == 0: 107 | self.logger.info("Path error!") 108 | return 109 | 110 | in_port = 1 111 | first_dp = datapaths[path[0]] 112 | 113 | out_port = first_dp.ofproto.OFPP_LOCAL 114 | back_info = (flow_info[1], flow_info[0]) 115 | 116 | # Flow installing por middle datapaths in path 117 | if len(path) > 2: 118 | for i in range(1, len(path)-1): 119 | port = self.get_port_pair_from_link(link_to_port, 120 | path[i-1], path[i]) 121 | port_next = self.get_port_pair_from_link(link_to_port, 122 | path[i], path[i+1]) 123 | if port and port_next: 124 | src_port, dst_port = port[1], port_next[0] 125 | datapath = datapaths[path[i]] 126 | self.send_flow_mod(datapath, flow_info, src_port, dst_port) 127 | self.send_flow_mod(datapath, back_info, dst_port, src_port) 128 | if len(path) > 1: 129 | # The last flow entry 130 | port_pair = self.get_port_pair_from_link(link_to_port, 131 | path[-2], path[-1]) 132 | if port_pair is None: 133 | self.logger.info("Port is not found") 134 | return 135 | src_port = port_pair[1] 136 | dst_port = 1 #I know that is the host port -- 137 | last_dp = datapaths[path[-1]] 138 | self.send_flow_mod(last_dp, flow_info, src_port, dst_port) 139 | self.send_flow_mod(last_dp, back_info, dst_port, src_port) 140 | 141 | # The first flow entry 142 | port_pair = self.get_port_pair_from_link(link_to_port, path[0], path[1]) 143 | if port_pair is None: 144 | self.logger.info("Port not found in first hop.") 145 | return 146 | out_port = port_pair[0] 147 | self.send_flow_mod(first_dp, flow_info, in_port, out_port) 148 | self.send_flow_mod(first_dp, back_info, out_port, in_port) 149 | 150 | # src and dst on the same datapath 151 | else: 152 | out_port = 1 153 | self.send_flow_mod(first_dp, flow_info, in_port, out_port) 154 | self.send_flow_mod(first_dp, back_info, out_port, in_port) 155 | 156 | end_time_install = time.time() 157 | total_install = end_time_install - init_time_install 158 | 159 | def send_flow_mod(self, datapath, flow_info, src_port, dst_port): 160 | """ 161 | Build flow entry, and send it to datapath. 162 | """ 163 | ofproto = datapath.ofproto 164 | parser = datapath.ofproto_parser 165 | actions = [] 166 | actions.append(parser.OFPActionOutput(dst_port)) 167 | 168 | match = parser.OFPMatch( 169 | eth_type=ETH_TYPE_IP, ipv4_src=flow_info[0], 170 | ipv4_dst=flow_info[1]) 171 | 172 | self.add_flow(datapath, 1, match, actions, 173 | idle_timeout=250, hard_timeout=0) 174 | 175 | 176 | def add_flow(self, dp, priority, match, actions, idle_timeout=0, hard_timeout=0): 177 | """ 178 | Send a flow entry to datapath. 179 | """ 180 | ofproto = dp.ofproto 181 | parser = dp.ofproto_parser 182 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)] 183 | mod = parser.OFPFlowMod(datapath=dp, command=dp.ofproto.OFPFC_ADD, priority=priority, 184 | idle_timeout=idle_timeout, 185 | hard_timeout=hard_timeout, 186 | match=match, instructions=inst) 187 | dp.send_msg(mod) 188 | 189 | def build_packet_out(self, datapath, buffer_id, src_port, dst_port, data): 190 | """ 191 | Build packet out object. 192 | """ 193 | actions = [] 194 | if dst_port: 195 | actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port)) 196 | 197 | msg_data = None 198 | if buffer_id == datapath.ofproto.OFP_NO_BUFFER: 199 | if data is None: 200 | return None 201 | msg_data = data 202 | 203 | out = datapath.ofproto_parser.OFPPacketOut( 204 | datapath=datapath, buffer_id=buffer_id, 205 | data=msg_data, in_port=src_port, actions=actions) 206 | return out 207 | 208 | def arp_forwarding(self, msg, src_ip, dst_ip): 209 | """ 210 | Send ARP packet to the destination host if the dst host record 211 | is existed. 212 | result = (datapath, port) of host 213 | """ 214 | datapath = msg.datapath 215 | ofproto = datapath.ofproto 216 | 217 | result = self.awareness.get_host_location(dst_ip) 218 | if result: 219 | # Host has been recorded in access table. 220 | datapath_dst, out_port = result[0], result[1] 221 | datapath = self.datapaths[datapath_dst] 222 | out = self.build_packet_out(datapath, ofproto.OFP_NO_BUFFER, 223 | ofproto.OFPP_CONTROLLER, 224 | out_port, msg.data) 225 | datapath.send_msg(out) 226 | self.logger.debug("Deliver ARP packet to knew host") 227 | else: 228 | 229 | pass 230 | 231 | def get_path(self, src, dst): 232 | if self.paths != None: 233 | #print ('PATHS: OK') 234 | path = self.paths.get(src).get(dst) 235 | # print('get_path return:', path) 236 | return path 237 | else: 238 | #print('Getting paths: OK') 239 | paths = self.get_dijkstra_paths_() 240 | path = paths.get(src).get(dst) 241 | return path 242 | 243 | def get_dijkstra_paths_(self): 244 | 245 | file = '/home/controlador/ryu/ryu/app/OSPF_loss/Proac/paths_loss.json' 246 | try: 247 | with open(file,'r') as json_file: 248 | paths_dict = json.load(json_file) 249 | paths_dict = ast.literal_eval(json.dumps(paths_dict)) 250 | self.paths = paths_dict 251 | return self.paths 252 | except ValueError as e: #error excpetion when trying to read the json and is still been updated 253 | return 254 | else: 255 | with open(file,'r') as json_file: #try again 256 | paths_dict = json.load(json_file) 257 | paths_dict = ast.literal_eval(json.dumps(paths_dict)) 258 | self.paths = paths_dict 259 | return self.paths 260 | 261 | def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid): 262 | """ 263 | Get port pair of link, so that controller can install flow entry. 264 | link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),} 265 | """ 266 | if (src_dpid, dst_dpid) in link_to_port: 267 | return link_to_port[(src_dpid, dst_dpid)] 268 | else: 269 | self.logger.info("Link from dpid:%s to dpid:%s is not in links" % 270 | (src_dpid, dst_dpid)) 271 | return None 272 | 273 | 274 | @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) 275 | def port_status_handler(self, ev): 276 | """ 277 | Handle the port status changed event. 278 | """ 279 | msg = ev.msg 280 | ofproto = msg.datapath.ofproto 281 | reason = msg.reason 282 | dpid = msg.datapath.id 283 | port_no = msg.desc.port_no 284 | 285 | reason_dict = {ofproto.OFPPR_ADD: "added", 286 | ofproto.OFPPR_DELETE: "deleted", 287 | ofproto.OFPPR_MODIFY: "modified", } 288 | 289 | if reason in reason_dict: 290 | print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no) 291 | else: 292 | print "switch%d: Illegal port state %s %s" % (dpid, port_no, reason) 293 | 294 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 295 | def packet_in_handler(self, ev): 296 | ''' 297 | In packet_in handler, we need to learn access_table by ARP and IP packets. 298 | Therefore, the first packet from UNKOWN host MUST be ARP 299 | ''' 300 | msg = ev.msg 301 | pkt = packet.Packet(msg.data) 302 | arp_pkt = pkt.get_protocol(arp.arp) 303 | if isinstance(arp_pkt, arp.arp): 304 | self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip) 305 | -------------------------------------------------------------------------------- /OSPF_loss/Proac/setting.py: -------------------------------------------------------------------------------- 1 | DISCOVERY_PERIOD = 5 # For discovering topology. 2 | 3 | MONITOR_PERIOD = 10 # For monitoring traffic 4 | 5 | DELAY_DETECTING_PERIOD = 7 6 | 7 | TOSHOW = True # For showing information in terminal 8 | -------------------------------------------------------------------------------- /OSPF_loss/Proac/simple_awareness.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import matplotlib.pyplot as plt 3 | import time 4 | 5 | from ryu import cfg 6 | from ryu.base import app_manager 7 | from ryu.controller import ofp_event 8 | from ryu.base.app_manager import lookup_service_brick 9 | from ryu.controller.handler import MAIN_DISPATCHER 10 | from ryu.controller.handler import CONFIG_DISPATCHER 11 | from ryu.controller.handler import DEAD_DISPATCHER 12 | from ryu.controller.handler import set_ev_cls 13 | from ryu.ofproto import ofproto_v1_3 14 | from ryu.lib.packet import packet 15 | from ryu.lib.packet import ethernet 16 | from ryu.lib.packet import ipv4 17 | from ryu.lib.packet import arp 18 | from ryu.lib import hub 19 | from ryu.topology import event 20 | from ryu.topology.api import get_switch, get_link 21 | 22 | import setting 23 | 24 | CONF = cfg.CONF 25 | 26 | 27 | class simple_Awareness(app_manager.RyuApp): 28 | """ 29 | A Ryu app for discovering topology information. 30 | Provides many data services for other Apps, such as 31 | link_to_port, access_table, switch_port_table, access_ports, 32 | interior_ports, and topology graph. 33 | This represent the Topology discovery module of the Control Plane 34 | """ 35 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 36 | 37 | # List the event list should be listened. 38 | events = [event.EventSwitchEnter, 39 | event.EventSwitchLeave, event.EventPortAdd, 40 | event.EventPortDelete, event.EventPortModify, 41 | event.EventLinkAdd, event.EventLinkDelete] 42 | 43 | def __init__(self, *args, **kwargs): 44 | super(simple_Awareness, self).__init__(*args, **kwargs) 45 | self.topology_api_app = self 46 | self.name = "awareness" 47 | self.link_to_port = {} # {(src_dpid,dst_dpid):(src_port,dst_port),} 48 | self.access_table = {} # {(sw,port):(ip, mac),} 49 | self.switch_port_table = {} # {dpid:set(port_num,),} 50 | self.access_ports = {} # {dpid:set(port_num,),} 51 | self.interior_ports = {} # {dpid:set(port_num,),} 52 | self.switches = [] # self.switches = [dpid,] 53 | self.shortest_paths = {} # {dpid:{dpid:[[path],],},} 54 | self.pre_link_to_port = {} 55 | self.pre_access_table = {} 56 | 57 | self.graph = nx.DiGraph() 58 | # Get initiation delay. 59 | self.initiation_delay = self.get_initiation_delay(4) 60 | self.start_time = time.time() 61 | 62 | # Start a green thread to discover network resource. 63 | self.discover_thread = hub.spawn(self._discover) 64 | 65 | 66 | def _discover(self): 67 | i = 0 68 | while True: 69 | 70 | self.show_topology() 71 | if i == 1: 72 | self.get_topology(None) 73 | i = 0 74 | hub.sleep(setting.DISCOVERY_PERIOD) 75 | i = i + 1 76 | 77 | def add_flow(self, dp, priority, match, actions, idle_timeout=0, hard_timeout=0): 78 | ofproto = dp.ofproto 79 | parser = dp.ofproto_parser 80 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, 81 | actions)] 82 | mod = parser.OFPFlowMod(datapath=dp, priority=priority, 83 | idle_timeout=idle_timeout, 84 | hard_timeout=hard_timeout, 85 | match=match, instructions=inst) 86 | dp.send_msg(mod) 87 | 88 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) 89 | def switch_features_handler(self, ev): 90 | """ 91 | Install table-miss flow entry to datapaths. 92 | """ 93 | 94 | datapath = ev.msg.datapath 95 | ofproto = datapath.ofproto 96 | parser = datapath.ofproto_parser 97 | self.logger.info("switch:%s connected", datapath.id) 98 | 99 | 100 | # Install table-miss flow entry. 101 | match = parser.OFPMatch() 102 | actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, 103 | ofproto.OFPCML_NO_BUFFER)] 104 | self.add_flow(datapath, 0, match, actions) 105 | 106 | 107 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 108 | def _packet_in_handler(self, ev): 109 | """ 110 | Handle the packet_in packet, and register the access info. 111 | """ 112 | msg = ev.msg 113 | datapath = msg.datapath 114 | in_port = msg.match['in_port'] 115 | pkt = packet.Packet(msg.data) 116 | eth_type = pkt.get_protocols(ethernet.ethernet)[0].ethertype #delay 117 | arp_pkt = pkt.get_protocol(arp.arp) 118 | ip_pkt = pkt.get_protocol(ipv4.ipv4) 119 | 120 | if arp_pkt: 121 | arp_src_ip = arp_pkt.src_ip 122 | arp_dst_ip = arp_pkt.dst_ip #delay 123 | mac = arp_pkt.src_mac 124 | # Record the access infomation. 125 | self.register_access_info(datapath.id, in_port, arp_src_ip, mac) 126 | 127 | elif ip_pkt: 128 | ip_src_ip = ip_pkt.src 129 | eth = pkt.get_protocols(ethernet.ethernet)[0] 130 | mac = eth.src 131 | # Record the access infomation. 132 | self.register_access_info(datapath.id, in_port, ip_src_ip, mac) 133 | else: 134 | pass 135 | 136 | @set_ev_cls(events) 137 | def get_topology(self, ev): 138 | """ 139 | Get topology info and calculate shortest paths. 140 | Note: In looped network, we should get the topology 141 | 20 or 30 seconds after the network went up. 142 | """ 143 | present_time = time.time() 144 | if present_time - self.start_time < self.initiation_delay: #Set to 30s 145 | return 146 | 147 | self.logger.info("[Topology Discovery Ok]") 148 | switch_list = get_switch(self.topology_api_app, None) 149 | self.create_port_map(switch_list) 150 | time.sleep(0.5) 151 | self.switches = [sw.dp.id for sw in switch_list] 152 | links = get_link(self.topology_api_app, None) 153 | self.create_interior_links(links) 154 | self.create_access_ports() 155 | self.graph = self.get_graph(self.link_to_port.keys()) 156 | 157 | def get_host_location(self, host_ip): 158 | """ 159 | Get host location info ((datapath, port)) according to the host ip. 160 | self.access_table = {(sw,port):(ip, mac),} 161 | """ 162 | 163 | for key in self.access_table.keys(): 164 | if self.access_table[key][0] == host_ip: 165 | return key 166 | self.logger.info("%s location is not found." % host_ip) 167 | return None 168 | 169 | def get_graph(self, link_list): 170 | """ 171 | Get Adjacency matrix from link_to_port. 172 | """ 173 | _graph = self.graph.copy() 174 | for src in self.switches: 175 | for dst in self.switches: 176 | if src == dst: 177 | _graph.add_edge(src, dst, weight=0) 178 | elif (src, dst) in link_list: 179 | _graph.add_edge(src, dst, weight=1) 180 | else: 181 | pass 182 | return _graph 183 | 184 | def get_initiation_delay(self, fanout): 185 | """ 186 | Get initiation delay. 187 | """ 188 | if fanout == 4: 189 | delay = 10 190 | elif fanout == 8: 191 | delay = 20 192 | else: 193 | delay = 20 194 | return delay 195 | 196 | def create_port_map(self, switch_list): 197 | """ 198 | Create interior_port table and access_port table. 199 | """ 200 | for sw in switch_list: 201 | dpid = sw.dp.id 202 | self.switch_port_table.setdefault(dpid, set()) 203 | # switch_port_table is equal to interior_ports plus access_ports. 204 | self.interior_ports.setdefault(dpid, set()) 205 | self.access_ports.setdefault(dpid, set()) 206 | for port in sw.ports: 207 | # switch_port_table = {dpid:set(port_num,),} 208 | self.switch_port_table[dpid].add(port.port_no) 209 | 210 | def create_interior_links(self, link_list): 211 | """ 212 | Get links' srouce port to dst port from link_list. 213 | link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),} 214 | """ 215 | for link in link_list: 216 | src = link.src 217 | dst = link.dst 218 | self.link_to_port[(src.dpid, dst.dpid)] = (src.port_no, dst.port_no) 219 | # Find the access ports and interior ports. 220 | if link.src.dpid in self.switches: 221 | self.interior_ports[link.src.dpid].add(link.src.port_no) 222 | if link.dst.dpid in self.switches: 223 | self.interior_ports[link.dst.dpid].add(link.dst.port_no) 224 | 225 | def create_access_ports(self): 226 | """ 227 | Get ports without link into access_ports. 228 | """ 229 | for sw in self.switch_port_table: 230 | all_port_table = self.switch_port_table[sw] 231 | interior_port = self.interior_ports[sw] 232 | # That comes the access port of the switch. 233 | self.access_ports[sw] = all_port_table - interior_port 234 | 235 | def register_access_info(self, dpid, in_port, ip, mac): 236 | """ 237 | Register access host info into access table. 238 | """ 239 | if in_port in self.access_ports[dpid]: 240 | if (dpid, in_port) in self.access_table: 241 | if self.access_table[(dpid, in_port)] == (ip, mac): 242 | return 243 | else: 244 | self.access_table[(dpid, in_port)] = (ip, mac) 245 | return 246 | else: 247 | self.access_table.setdefault((dpid, in_port), None) 248 | self.access_table[(dpid, in_port)] = (ip, mac) 249 | return 250 | 251 | def show_topology(self): 252 | if self.pre_link_to_port != self.link_to_port:# and setting.TOSHOW: 253 | # It means the link_to_port table has changed. 254 | _graph = self.graph.copy() 255 | print "\n---------------------Link Port---------------------" 256 | print '%6s' % ('switch'), 257 | for node in sorted([node for node in _graph.nodes()], key=lambda node: node): 258 | print '%6d' % node, 259 | print 260 | for node1 in sorted([node for node in _graph.nodes()], key=lambda node: node): 261 | print '%6d' % node1, 262 | for node2 in sorted([node for node in _graph.nodes()], key=lambda node: node): 263 | if (node1, node2) in self.link_to_port.keys(): 264 | print '%6s' % str(self.link_to_port[(node1, node2)]), 265 | else: 266 | print '%6s' % '/', 267 | print 268 | print 269 | self.pre_link_to_port = self.link_to_port.copy() 270 | 271 | if self.pre_access_table != self.access_table:# and setting.TOSHOW: 272 | # It means the access_table has changed. 273 | print "\n----------------Access Host-------------------" 274 | print '%10s' % 'switch', '%10s' % 'port', '%22s' % 'Host' 275 | if not self.access_table.keys(): 276 | print " NO found host" 277 | else: 278 | for sw in sorted(self.access_table.keys()): 279 | print '%10d' % sw[0], '%10d ' % sw[1], self.access_table[sw] 280 | print 281 | self.pre_access_table = self.access_table.copy() 282 | 283 | # nx.draw(self.graph) 284 | # plt.show() 285 | # plt.savefig("/home/controlador/ryu/ryu/app/SDNapps_proac/%d.png" % int(time.time())) -------------------------------------------------------------------------------- /OSPF_loss/Proac/simple_delay.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from ryu import cfg 3 | from ryu.base import app_manager 4 | from ryu.base.app_manager import lookup_service_brick 5 | from ryu.controller import ofp_event 6 | from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER 7 | from ryu.controller.handler import set_ev_cls 8 | from ryu.ofproto import ofproto_v1_3 9 | from ryu.lib import hub 10 | from ryu.topology.switches import Switches 11 | from ryu.topology.switches import LLDPPacket 12 | from ryu.app import simple_switch_13 13 | import networkx as nx 14 | import time 15 | import json,ast 16 | import csv 17 | import setting 18 | 19 | import simple_awareness 20 | import simple_monitor 21 | 22 | CONF = cfg.CONF 23 | 24 | 25 | class simple_Delay(app_manager.RyuApp): 26 | """ 27 | A Ryu app for calculating link delay by using echo replay 28 | messages from the Control Plane to the datapaths in the Data Plane. 29 | It is part of the Statistics module of the Control Plane 30 | 31 | """ 32 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 33 | 34 | def __init__(self, *args, **kwargs): 35 | super(simple_Delay, self).__init__(*args, **kwargs) 36 | self.name = "delay" 37 | self.sending_echo_request_interval = 0.1 38 | self.sw_module = lookup_service_brick('switches') 39 | self.monitor = lookup_service_brick('monitor') 40 | self.awareness = lookup_service_brick('awareness') 41 | self.count = 0 42 | self.datapaths = {} 43 | self.echo_latency = {} 44 | self.link_delay = {} 45 | self.delay_dict = {} 46 | self.measure_thread = hub.spawn(self._detector) 47 | 48 | @set_ev_cls(ofp_event.EventOFPStateChange, 49 | [MAIN_DISPATCHER, DEAD_DISPATCHER]) 50 | def _state_change_handler(self, ev): 51 | datapath = ev.datapath 52 | if ev.state == MAIN_DISPATCHER: 53 | if not datapath.id in self.datapaths: 54 | self.logger.debug('Register datapath: %016x', datapath.id) 55 | self.datapaths[datapath.id] = datapath 56 | elif ev.state == DEAD_DISPATCHER: 57 | if datapath.id in self.datapaths: 58 | self.logger.debug('Unregister datapath: %016x', datapath.id) 59 | del self.datapaths[datapath.id] 60 | 61 | def _detector(self): 62 | """ 63 | Delay detecting functon. 64 | Send echo request and calculate link delay periodically 65 | """ 66 | while True: 67 | self.count += 1 68 | self._send_echo_request() 69 | self.create_link_delay() 70 | try: 71 | self.awareness.shortest_paths = {} 72 | self.logger.debug("Refresh the shortest_paths") 73 | except: 74 | self.awareness = lookup_service_brick('awareness') 75 | if self.awareness is not None: 76 | self.show_delay_statis() 77 | hub.sleep(setting.DELAY_DETECTING_PERIOD) 78 | 79 | def _send_echo_request(self): 80 | """ 81 | Seng echo request msg to datapath. 82 | """ 83 | for datapath in self.datapaths.values(): 84 | parser = datapath.ofproto_parser 85 | echo_req = parser.OFPEchoRequest(datapath, 86 | data="%.12f" % time.time()) 87 | datapath.send_msg(echo_req) 88 | # Important! Don't send echo request together, it will 89 | # generate a lot of echo reply almost in the same time. 90 | # which will generate a lot of delay of waiting in queue 91 | # when processing echo reply in echo_reply_handler. 92 | 93 | hub.sleep(self.sending_echo_request_interval) 94 | 95 | @set_ev_cls(ofp_event.EventOFPEchoReply, MAIN_DISPATCHER) 96 | def echo_reply_handler(self, ev): 97 | """ 98 | Handle the echo reply msg, and get the latency of link. 99 | """ 100 | now_timestamp = time.time() 101 | try: 102 | latency = now_timestamp - eval(ev.msg.data) 103 | self.echo_latency[ev.msg.datapath.id] = latency 104 | except: 105 | return 106 | 107 | def get_delay(self, src, dst): 108 | """ 109 | Get link delay. 110 | Controller 111 | | | 112 | src echo latency| |dst echo latency 113 | | | 114 | SwitchA-------SwitchB 115 | 116 | fwd_delay---> 117 | <----reply_delay 118 | delay = (forward delay + reply delay - src datapath's echo latency 119 | """ 120 | try: 121 | fwd_delay = self.awareness.graph[src][dst]['lldpdelay'] 122 | re_delay = self.awareness.graph[dst][src]['lldpdelay'] 123 | src_latency = self.echo_latency[src] 124 | dst_latency = self.echo_latency[dst] 125 | delay = (fwd_delay + re_delay - src_latency - dst_latency)/2 126 | return max(delay, 0) 127 | except: 128 | return float('inf') 129 | 130 | def _save_lldp_delay(self, src=0, dst=0, lldpdelay=0): 131 | try: 132 | self.awareness.graph[src][dst]['lldpdelay'] = lldpdelay 133 | except: 134 | if self.awareness is None: 135 | self.awareness = lookup_service_brick('awareness') 136 | return 137 | 138 | def create_link_delay(self): 139 | """ 140 | Create link delay data, and save it into graph object. 141 | """ 142 | try: 143 | for src in self.awareness.graph: 144 | for dst in self.awareness.graph[src]: 145 | if src == dst: 146 | self.awareness.graph[src][dst]['delay'] = 0 147 | continue 148 | delay = self.get_delay(src, dst) 149 | self.awareness.graph[src][dst]['delay'] = delay 150 | if self.awareness is not None: 151 | for dp in self.awareness.graph: 152 | self.delay_dict.setdefault(dp, {}) 153 | self.get_link_delay() 154 | except: 155 | if self.awareness is None: 156 | self.awareness = lookup_service_brick('awareness') 157 | return 158 | 159 | 160 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 161 | def packet_in_handler(self, ev): 162 | """ 163 | Explore LLDP packet and get the delay of link (fwd and reply). 164 | """ 165 | msg = ev.msg 166 | try: 167 | src_dpid, src_port_no = LLDPPacket.lldp_parse(msg.data) 168 | dpid = msg.datapath.id 169 | if self.sw_module is None: 170 | self.sw_module = lookup_service_brick('switches') 171 | 172 | for port in self.sw_module.ports.keys(): 173 | if src_dpid == port.dpid and src_port_no == port.port_no: 174 | delay = self.sw_module.ports[port].delay 175 | self._save_lldp_delay(src=src_dpid, dst=dpid, 176 | lldpdelay=delay) 177 | except LLDPPacket.LLDPUnknownFormat as e: 178 | return 179 | 180 | def get_link_delay(self): 181 | ''' 182 | Calculates total link dealy and save it in self.link_delay[(node1,node2)]: link_delay 183 | ''' 184 | for src in self.awareness.graph: 185 | for dst in self.awareness.graph[src]: 186 | if src != dst: 187 | delay1 = self.awareness.graph[src][dst]['delay'] 188 | delay2 = self.awareness.graph[dst][src]['delay'] 189 | link_delay = ((delay1 + delay2)*1000.0)/2 #saves in ms 190 | link = (src, dst) 191 | self.link_delay[link] = link_delay 192 | self.delay_dict[src][dst] = link_delay 193 | 194 | if self.monitor is None: 195 | print('No monitor') 196 | self.monitor = lookup_service_brick('monitor') 197 | 198 | if self.awareness.link_to_port: 199 | self.write_dijkstra_paths() 200 | 201 | def write_dijkstra_paths(self): 202 | # loss_dict_cost = {} #Free bw in value of cost for dijsktra (1/bwd) 203 | loss_dict = {} #dictionary in format for pass to dijkstra 204 | for dp in self.awareness.switches: 205 | loss_dict.setdefault(dp,{}) 206 | 207 | for loss in self.monitor.link_loss: 208 | loss_dict[loss[0]][loss[1]] = self.monitor.link_loss[loss] 209 | 210 | print('writing paths file') 211 | time_init = time.time() 212 | paths = {} 213 | for dp in self.awareness.switches: 214 | paths.setdefault(dp,{}) 215 | for src in self.awareness.switches: 216 | for dst in self.awareness.switches: 217 | if src != dst: 218 | paths[src][dst] = self.dijkstra(loss_dict, src, dst, visited=[], distances={}, predecessors={}) 219 | 220 | with open('/home/controlador/ryu/ryu/app/OSPF_loss/Proac/paths_loss.json','w') as json_file: 221 | json.dump(paths, json_file, indent=2) 222 | 223 | total_time = time.time() - time_init 224 | # print(total_time) 225 | with open('/home/controlador/ryu/ryu/app/OSPF_loss/Proac/times.txt','a') as txt_file: 226 | txt_file.write(str(total_time)+'\n') 227 | self.calc_stretch() 228 | 229 | def dijkstra(self, graph, src, dest, visited=[], distances={}, predecessors={}): 230 | """ calculates a shortest path tree routed in src 231 | """ 232 | 233 | # a few sanity checks 234 | if src not in graph: 235 | raise TypeError('The root of the shortest path tree cannot be found') 236 | if dest not in graph: 237 | raise TypeError('The target of the shortest path cannot be found') 238 | # ending condition 239 | if src == dest: 240 | # We build the shortest path and display it 241 | path = [] 242 | pred = dest 243 | while pred != None: 244 | path.append(pred) 245 | pred = predecessors.get(pred, None) 246 | 247 | return list(reversed(path)) 248 | else: 249 | # if it is the initial run, initializes the cost 250 | if not visited: 251 | distances[src] = 0 252 | # visit the neighbors 253 | for neighbor in graph[src]: 254 | if neighbor not in visited: 255 | new_distance = distances[src] + graph[src][neighbor] 256 | if new_distance < distances.get(neighbor, float('inf')): 257 | distances[neighbor] = new_distance 258 | predecessors[neighbor] = src 259 | # mark as visited 260 | 261 | visited.append(src) 262 | # now that all neighbors have been visited: recurse 263 | # select the non visited node with lowest distance 'x' 264 | # run Dijskstra with src='x' 265 | unvisited = {} 266 | for k in graph: 267 | if k not in visited: 268 | unvisited[k] = distances.get(k, float('inf')) #sets the cost of link to the src neighbors with the actual value and inf for the non neighbors 269 | x = min(unvisited, key=unvisited.get) #find w not in N' such that D(w) is a minimum 270 | return self.dijkstra(graph, x, dest, visited, distances, predecessors) 271 | 272 | def get_paths_dijkstra(self): 273 | file_dijkstra = '/home/controlador/ryu/ryu/app/OSPF_loss/Proac/paths_loss.json' 274 | with open(file_dijkstra,'r') as json_file: 275 | paths_dict = json.load(json_file) 276 | paths_dijkstra = ast.literal_eval(json.dumps(paths_dict)) 277 | return paths_dijkstra 278 | 279 | def get_paths_base(self): 280 | file_base = '/home/controlador/ryu/ryu/app/OSPF_loss/Proac/paths_weight.json' 281 | with open(file_base,'r') as json_file: 282 | paths_dict = json.load(json_file) 283 | paths_base = ast.literal_eval(json.dumps(paths_dict)) 284 | return paths_base 285 | 286 | def stretch(self, paths, paths_base, src, dst): 287 | add_stretch = len(paths.get(str(src)).get(str(dst))) - len(paths_base.get(str(src)).get(str(dst))) 288 | mul_stretch = len(paths.get(str(src)).get(str(dst))) / len(paths_base.get(str(src)).get(str(dst))) 289 | return add_stretch, mul_stretch 290 | 291 | def calc_stretch(self): 292 | paths_base = self.get_paths_base() 293 | paths_dijkstra = self.get_paths_dijkstra() 294 | cont_dijkstra = 0 295 | a = time.time() 296 | sw = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] 297 | 298 | with open('/home/controlador/ryu/ryu/app/OSPF_loss/Proac/stretch/'+str(self.count)+'_stretch.csv','wb') as csvfile: 299 | header = ['src','dst','add_st','mul_st'] 300 | file = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL) 301 | file.writerow(header) 302 | for src in sw: 303 | for dst in sw: 304 | if src != dst: 305 | add_stretch, mul_stretch = self.stretch(paths_dijkstra, paths_base, src, dst) 306 | # print(add_stretch) 307 | # print(mul_stretch) 308 | file.writerow([src, dst, add_stretch, mul_stretch]) 309 | total_time = time.time() - a 310 | 311 | def show_delay_statis(self): 312 | if self.awareness is None: 313 | print("Not doing nothing, awareness none") 314 | # else: 315 | # print("Latency ok") 316 | # if setting.TOSHOW and self.awareness is not None: 317 | # self.logger.info("\nsrc dst delay") 318 | # self.logger.info("---------------------------") 319 | # for src in self.awareness.graph: 320 | # for dst in self.awareness.graph[src]: 321 | # delay = self.awareness.graph[src][dst]['delay'] 322 | # self.logger.info("%s <--> %s : %s" % (src, dst, delay)) 323 | 324 | -------------------------------------------------------------------------------- /OSPF_loss/Proac/simple_monitor.py: -------------------------------------------------------------------------------- 1 | from operator import attrgetter 2 | 3 | from ryu.base import app_manager 4 | from ryu.controller import ofp_event 5 | from ryu.base.app_manager import lookup_service_brick 6 | from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER 7 | from ryu.controller.handler import CONFIG_DISPATCHER 8 | from ryu.controller.handler import set_ev_cls 9 | from ryu.topology import event, switches 10 | from ryu.ofproto.ether import ETH_TYPE_IP 11 | from ryu.topology.api import get_switch, get_link 12 | from ryu.ofproto import ofproto_v1_3 13 | from ryu.lib import hub 14 | from ryu.lib.packet import packet 15 | from ryu.lib.packet import arp 16 | 17 | import time 18 | 19 | import simple_awareness 20 | import simple_delay 21 | # import requests 22 | import json, ast 23 | import setting 24 | import csv 25 | import time 26 | 27 | class simple_Monitor(app_manager.RyuApp): 28 | """ 29 | A Ryu app for netowrk monitoring. It retreieves statistics information through openflow 30 | of datapaths at the Data Plane. 31 | This class contains functions belonging to the Statistics module and Flow Installation module 32 | of the Control Plane. 33 | I also contains the functions corresponding to the Process Statistics module of the 34 | Management Plane in order to adventage the monitorin threading for statistics processing. 35 | """ 36 | 37 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 38 | 39 | def __init__(self, *args, **kwargs): 40 | super(simple_Monitor, self).__init__(*args, **kwargs) 41 | self.name = "monitor" 42 | self.count_monitor = 0 43 | self.topology_api_app = self 44 | self.datapaths = {} 45 | self.port_stats = {} 46 | self.port_speed = {} 47 | self.flow_stats = {} 48 | self.flow_speed = {} 49 | self.flow_loss = {} 50 | self.port_loss = {} 51 | self.link_loss = {} 52 | self.net_info = {} 53 | self.net_metrics= {} 54 | self.link_free_bw = {} 55 | self.link_used_bw = {} 56 | self.stats = {} 57 | self.port_features = {} 58 | self.free_bandwidth = {} 59 | self.paths = {} 60 | self.installed_paths = {} 61 | self.delay = lookup_service_brick('delay') 62 | self.awareness = lookup_service_brick('awareness') 63 | 64 | self.monitor_thread = hub.spawn(self.monitor) 65 | 66 | @set_ev_cls(ofp_event.EventOFPStateChange, 67 | [MAIN_DISPATCHER, DEAD_DISPATCHER]) 68 | def state_change_handler(self, ev): 69 | """ 70 | Record datapath information. 71 | """ 72 | datapath = ev.datapath 73 | if ev.state == MAIN_DISPATCHER: 74 | if datapath.id not in self.datapaths: 75 | self.logger.debug('Datapath registered: %016x', datapath.id) 76 | print 'Datapath registered:', datapath.id ## 77 | self.datapaths[datapath.id] = datapath 78 | elif ev.state == DEAD_DISPATCHER: 79 | if datapath.id in self.datapaths: 80 | self.logger.debug('Datapath unregistered: %016x', datapath.id) 81 | print 'Datapath unregistered:', datapath.id 82 | del self.datapaths[datapath.id] 83 | 84 | def monitor(self): 85 | """ 86 | Main entry method of monitoring traffic. 87 | """ 88 | while True: 89 | self.stats['flow'] = {} 90 | self.stats['port'] = {} 91 | print("[Statistics Module Ok]") 92 | print("[{0}]".format(self.count_monitor)) 93 | if self.delay is None: 94 | print('No monitor') 95 | self.delay = lookup_service_brick('delay') 96 | for dp in self.datapaths.values(): 97 | self.port_features.setdefault(dp.id, {}) #setdefault() returns the value of the item with the specified key 98 | self.paths = None 99 | self.request_stats(dp) 100 | hub.sleep(1) 101 | 102 | if self.stats['port']: 103 | self.count_monitor += 1 104 | self.get_port_loss() 105 | self.get_link_free_bw() 106 | self.get_link_used_bw() 107 | self.write_values() 108 | 109 | hub.sleep(setting.MONITOR_PERIOD) 110 | if self.stats['port']: 111 | self.show_stat('link') 112 | hub.sleep(1) 113 | 114 | #---------------------CONTROL PLANE FUNCTIONS--------------------------------- 115 | #---------------------STATISTICS MODULE FUNCTIONS ---------------------------- 116 | 117 | def request_stats(self, datapath): 118 | self.logger.debug('send stats request: %016x', datapath.id) 119 | ofproto = datapath.ofproto 120 | parser = datapath.ofproto_parser 121 | 122 | req = parser.OFPPortDescStatsRequest(datapath, 0) #for port description 123 | datapath.send_msg(req) 124 | 125 | req = parser.OFPFlowStatsRequest(datapath) #individual flow statistics 126 | datapath.send_msg(req) 127 | 128 | req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY) 129 | datapath.send_msg(req) 130 | 131 | def save_stats(self, _dict, key, value, length=5): 132 | if key not in _dict: 133 | _dict[key] = [] 134 | _dict[key].append(value) 135 | if len(_dict[key]) > length: 136 | _dict[key].pop(0) 137 | 138 | def get_speed(self, now, pre, period): #bits/s 139 | if period: 140 | return ((now - pre)*8) / period 141 | else: 142 | return 0 143 | 144 | def get_time(self, sec, nsec): #Total time that the flow was alive in seconds 145 | return sec + nsec / 1000000000.0 146 | 147 | def get_period(self, n_sec, n_nsec, p_sec, p_nsec): # (time las flow, time) 148 | # calculates period of time between flows 149 | return self.get_time(n_sec, n_nsec) - self.get_time(p_sec, p_nsec) 150 | 151 | def get_sw_dst(self, dpid, out_port): 152 | for key in self.awareness.link_to_port: 153 | src_port = self.awareness.link_to_port[key][0] 154 | if key[0] == dpid and src_port == out_port: 155 | dst_sw = key[1] 156 | dst_port = self.awareness.link_to_port[key][1] 157 | return (dst_sw, dst_port) 158 | 159 | def get_link_bw(self, file, src_dpid, dst_dpid): 160 | fin = open(file, "r") 161 | bw_capacity_dict = {} 162 | for line in fin: 163 | a = line.split(',') 164 | if a: 165 | s1 = a[0] 166 | s2 = a[1] 167 | # bwd = a[2] #random caps 168 | bwd = a[3] #original caps 169 | bw_capacity_dict.setdefault(s1,{}) 170 | bw_capacity_dict[str(a[0])][str(a[1])] = bwd 171 | fin.close() 172 | bw_link = bw_capacity_dict[str(src_dpid)][str(dst_dpid)] 173 | return bw_link 174 | 175 | def get_free_bw(self, port_capacity, speed): 176 | # freebw: Kbit/s 177 | return max(port_capacity - (speed/ 1000.0), 0) 178 | 179 | #------------------MANAGEMENT PLANE MODULE --------------------------- 180 | #------------------PROCESS STATISTICS MODULE FUNCTIONS---------------- 181 | 182 | def get_flow_loss(self): 183 | #Get per flow loss 184 | bodies = self.stats['flow'] 185 | for dp in bodies.keys(): 186 | list_flows = sorted([flow for flow in bodies[dp] if flow.priority == 1], 187 | key=lambda flow: (flow.match.get('ipv4_src'),flow.match.get('ipv4_dst'))) 188 | for stat in list_flows: 189 | out_port = stat.instructions[0].actions[0].port 190 | if self.awareness.link_to_port and out_port != 1: #get loss form ports of network 191 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 192 | tmp1 = self.flow_stats[dp][key] 193 | byte_count_src = tmp1[-1][1] 194 | 195 | result = self.get_sw_dst(dp, out_port) 196 | dst_sw = result[0] 197 | tmp2 = self.flow_stats[dst_sw][key] 198 | byte_count_dst = tmp2[-1][1] 199 | flow_loss = byte_count_src - byte_count_dst 200 | self.save_stats(self.flow_loss[dp], key, flow_loss, 5) 201 | 202 | def get_port_loss(self): 203 | #Get loss_port 204 | bodies = self.stats['port'] 205 | for dp in sorted(bodies.keys()): 206 | for stat in sorted(bodies[dp], key=attrgetter('port_no')): 207 | if self.awareness.link_to_port and stat.port_no != 1 and stat.port_no != ofproto_v1_3.OFPP_LOCAL: #get loss form ports of network 208 | key1 = (dp, stat.port_no) 209 | tmp1 = self.port_stats[key1] 210 | tx_bytes_src = tmp1[-1][0] 211 | tx_pkts_src = tmp1[-1][8] 212 | 213 | key2 = self.get_sw_dst(dp, stat.port_no) 214 | tmp2 = self.port_stats[key2] 215 | rx_bytes_dst = tmp2[-1][1] 216 | rx_pkts_dst = tmp2[-1][9] 217 | loss_port = float(tx_pkts_src - rx_pkts_dst) / tx_pkts_src #loss rate 218 | values = (loss_port, key2) 219 | self.save_stats(self.port_loss[dp], key1, values, 5) 220 | 221 | #Calculates the total link loss and save it in self.link_loss[(node1,node2)]:loss 222 | for dp in self.port_loss.keys(): 223 | for port in self.port_loss[dp]: 224 | key2 = self.port_loss[dp][port][-1][1] 225 | loss_src = self.port_loss[dp][port][-1][0] 226 | # tx_src = self.port_loss[dp][port][-1][1] 227 | loss_dst = self.port_loss[key2[0]][key2][-1][0] 228 | # tx_dst = self.port_loss[key2[0]][key2][-1][1] 229 | loss_l = (abs(loss_src) + abs(loss_dst)) / 2 230 | link = (dp, key2[0]) 231 | self.link_loss[link] = loss_l*100.0 232 | 233 | def get_link_free_bw(self): 234 | #Calculates the total free bw of link and save it in self.link_free_bw[(node1,node2)]:link_free_bw 235 | for dp in self.free_bandwidth.keys(): 236 | for port in self.free_bandwidth[dp]: 237 | free_bw1 = self.free_bandwidth[dp][port] 238 | key2 = self.get_sw_dst(dp, port) #key2 = (dp,port) 239 | free_bw2= self.free_bandwidth[key2[0]][key2[1]] 240 | link_free_bw = (free_bw1 + free_bw2)/2 241 | link = (dp, key2[0]) 242 | self.link_free_bw[link] = link_free_bw 243 | 244 | def get_link_used_bw(self): 245 | #Calculates the total free bw of link and save it in self.link_free_bw[(node1,node2)]:link_free_bw 246 | for key in self.port_speed.keys(): 247 | used_bw1 = self.port_speed[key][-1] 248 | key2 = self.get_sw_dst(key[0], key[1]) #key2 = (dp,port) 249 | used_bw2 = self.port_speed[key2][-1] 250 | link_used_bw = (used_bw1 + used_bw2)/2 251 | link = (key[0], key2[0]) 252 | self.link_used_bw[link] = link_used_bw 253 | 254 | #---------------------CONTROL PLANE FUNCTIONS--------------------------------- 255 | #---------------------STATISTICS MODULE FUNCTIONS ---------------------------- 256 | 257 | @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER) #OK 258 | def flow_stats_reply_handler(self, ev): 259 | """ 260 | Save flow stats reply information into self.flow_stats. 261 | Calculate flow speed and Save it. 262 | self.flow_stats = {dpid:{(ipv4_src, ipv4_dst):[(packet_count, byte_count, duration_sec, duration_nsec),],},} 263 | self.flow_speed = {dpid:{(ipv4_src, ipv4_dst):[speed,],},} 264 | self.flow_loss = {dpid:{(ipv4_src, ipv4_dst, dst_sw):[loss,],},} 265 | """ 266 | 267 | body = ev.msg.body 268 | dpid = ev.msg.datapath.id 269 | self.stats['flow'][dpid] = body 270 | self.flow_stats.setdefault(dpid, {}) 271 | self.flow_speed.setdefault(dpid, {}) 272 | self.flow_loss.setdefault(dpid, {}) 273 | 274 | #flows.append('table_id=%s ' 275 | # 'duration_sec=%d duration_nsec=%d ' 276 | # 'priority=%d ' 277 | # 'idle_timeout=%d hard_timeout=%d flags=0x%04x ' 278 | # 'cookie=%d packet_count=%d byte_count=%d ' 279 | # 'match=%s instructions=%s' % 280 | # (stat.table_id, 281 | # stat.duration_sec, stat.duration_nsec, 282 | # stat.priority, 283 | # stat.idle_timeout, stat.hard_timeout, stat.flags, 284 | # stat.cookie, stat.packet_count, stat.byte_count, 285 | # stat.match, stat.instructions) 286 | 287 | for stat in sorted([flow for flow in body if flow.priority == 1], 288 | key=lambda flow: (flow.match.get('ipv4_src'), 289 | flow.match.get('ipv4_dst'))): 290 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 291 | 292 | value = (stat.packet_count, stat.byte_count, 293 | stat.duration_sec, stat.duration_nsec)#duration_sec: Time flow was alive in seconds 294 | #duration_nsec: Time flow was alive in nanoseconds beyond duration_sec 295 | self.save_stats(self.flow_stats[dpid], key, value, 5) 296 | 297 | # CALCULATE FLOW BYTE RATE 298 | pre = 0 299 | period = setting.MONITOR_PERIOD 300 | tmp = self.flow_stats[dpid][key] 301 | if len(tmp) > 1: 302 | pre = tmp[-2][1] #penultimo flow byte_count 303 | period = self.get_period(tmp[-1][2], tmp[-1][3], #valores (sec,nsec) ultimo flow, penultimo flow) 304 | tmp[-2][2], tmp[-2][3]) 305 | speed = self.get_speed(self.flow_stats[dpid][key][-1][1], #ultimo flow byte_count, penultimo byte_count, periodo 306 | pre, period) 307 | self.save_stats(self.flow_speed[dpid], key, speed, 5) #bits/s 308 | 309 | @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER) 310 | def port_stats_reply_handler(self, ev): 311 | a = time.time() 312 | body = ev.msg.body 313 | dpid = ev.msg.datapath.id 314 | 315 | self.stats['port'][dpid] = body 316 | self.free_bandwidth.setdefault(dpid, {}) 317 | self.port_loss.setdefault(dpid, {}) 318 | """ 319 | Save port's stats information into self.port_stats. 320 | Calculate port speed and Save it. 321 | self.port_stats = {(dpid, port_no):[(tx_bytes, rx_bytes, rx_errors, duration_sec, duration_nsec),],} 322 | self.port_speed = {(dpid, port_no):[speed,],} 323 | Note: The transmit performance and receive performance are independent of a port. 324 | Calculate the load of a port only using tx_bytes. 325 | 326 | Replay message content: 327 | (stat.port_no, 328 | stat.rx_packets, stat.tx_packets, 329 | stat.rx_bytes, stat.tx_bytes, 330 | stat.rx_dropped, stat.tx_dropped, 331 | stat.rx_errors, stat.tx_errors, 332 | stat.rx_frame_err, stat.rx_over_err, 333 | stat.rx_crc_err, stat.collisions, 334 | stat.duration_sec, stat.duration_nsec)) 335 | """ 336 | 337 | for stat in sorted(body, key=attrgetter('port_no')): #get the value of port_no form body 338 | port_no = stat.port_no 339 | key = (dpid, port_no) 340 | value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors, 341 | stat.duration_sec, stat.duration_nsec, stat.tx_errors, stat.tx_dropped, stat.rx_dropped, stat.tx_packets, stat.rx_packets) 342 | self.save_stats(self.port_stats, key, value, 5) 343 | 344 | if port_no != ofproto_v1_3.OFPP_LOCAL: 345 | if port_no != 1 and self.awareness.link_to_port : 346 | # Get port speed and Save it. 347 | pre = 0 348 | period = setting.MONITOR_PERIOD 349 | tmp = self.port_stats[key] 350 | if len(tmp) > 1: 351 | # Calculate with the tx_bytes and rx_bytes 352 | pre = tmp[-2][0] + tmp[-2][1] #penultimo port tx_bytes 353 | period = self.get_period(tmp[-1][3], tmp[-1][4], tmp[-2][3], tmp[-2][4]) #periodo entre el ultimo y penultimo total bytes en el puerto 354 | speed = self.get_speed(self.port_stats[key][-1][0] + self.port_stats[key][-1][1], pre, period) #speed in bits/s 355 | self.save_stats(self.port_speed, key, speed, 5) 356 | 357 | #Get links capacities 358 | file = '/home/controlador/ryu/ryu/app/OSPF_loss/Proac/bw_r.txt' #random link capacities 359 | link_to_port = self.awareness.link_to_port 360 | 361 | for k in list(link_to_port.keys()): 362 | if k[0] == dpid: 363 | if link_to_port[k][0] == port_no: 364 | dst_dpid = k[1] 365 | 366 | #FUNCIONA CON LISTA----------------------------- 367 | # list_dst_dpid = [k for k in list(link_to_port.keys()) if k[0] == dpid and link_to_port[k][0] == port_no] 368 | # if len(list_dst_dpid) > 0: 369 | # dst_dpid = list_dst_dpid[0][1] 370 | # ----------------------------------------- 371 | bw_link = float(self.get_link_bw(file, dpid, dst_dpid)) 372 | port_state = self.port_features.get(dpid).get(port_no) 373 | 374 | if port_state: 375 | bw_link_kbps = bw_link * 1000.0 376 | self.port_features[dpid][port_no].append(bw_link_kbps) 377 | free_bw = self.get_free_bw(bw_link_kbps, speed) 378 | self.free_bandwidth[dpid][port_no] = free_bw 379 | 380 | @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) 381 | def port_desc_stats_reply_handler(self, ev): 382 | """ 383 | Save port description info. 384 | """ 385 | msg = ev.msg 386 | dpid = msg.datapath.id 387 | ofproto = msg.datapath.ofproto 388 | 389 | config_dict = {ofproto.OFPPC_PORT_DOWN: "Down", 390 | ofproto.OFPPC_NO_RECV: "No Recv", 391 | ofproto.OFPPC_NO_FWD: "No Farward", 392 | ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"} 393 | 394 | state_dict = {ofproto.OFPPS_LINK_DOWN: "Down", 395 | ofproto.OFPPS_BLOCKED: "Blocked", 396 | ofproto.OFPPS_LIVE: "Live"} 397 | 398 | ports = [] 399 | for p in ev.msg.body: 400 | if p.port_no != 1: 401 | 402 | ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x ' 403 | 'state=0x%08x curr=0x%08x advertised=0x%08x ' 404 | 'supported=0x%08x peer=0x%08x curr_speed=%d ' 405 | 'max_speed=%d' % 406 | (p.port_no, p.hw_addr, 407 | p.name, p.config, 408 | p.state, p.curr, p.advertised, 409 | p.supported, p.peer, p.curr_speed, 410 | p.max_speed)) 411 | if p.config in config_dict: 412 | config = config_dict[p.config] 413 | else: 414 | config = "up" 415 | 416 | if p.state in state_dict: 417 | state = state_dict[p.state] 418 | else: 419 | state = "up" 420 | 421 | # Recording data. 422 | port_feature = [config, state] 423 | self.port_features[dpid][p.port_no] = port_feature 424 | 425 | @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) 426 | def port_status_handler(self, ev): 427 | """ 428 | Handle the port status changed event. 429 | """ 430 | msg = ev.msg 431 | ofproto = msg.datapath.ofproto 432 | reason = msg.reason 433 | dpid = msg.datapath.id 434 | port_no = msg.desc.port_no 435 | 436 | reason_dict = {ofproto.OFPPR_ADD: "added", 437 | ofproto.OFPPR_DELETE: "deleted", 438 | ofproto.OFPPR_MODIFY: "modified", } 439 | 440 | if reason in reason_dict: 441 | print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no) 442 | else: 443 | print "switch%d: Illegal port state %s %s" % (dpid, port_no, reason) 444 | 445 | def write_values(self): 446 | a = time.time() 447 | if self.delay.link_delay: 448 | for link in self.link_free_bw: 449 | self.net_info[link] = [round(self.link_free_bw[link],6) , round(self.delay.link_delay[link],6), round(self.link_loss[link],6)] 450 | self.net_metrics[link] = [round(self.link_free_bw[link],6), round(self.link_used_bw[link],6), round(self.link_loss[link],6), round(self.delay.link_delay[link],6)] 451 | 452 | file_metrics = '/home/controlador/ryu/ryu/app/OSPF_loss/Proac/Metrics/'+str(self.count_monitor)+'_net_metrics.csv' 453 | with open(file_metrics,'wb') as csvfile: 454 | header_ = ['node1','node2','free_bw','used_bw', 'pkloss', 'delay'] 455 | file = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL) 456 | links_in = [] 457 | file.writerow(header_) 458 | for link, values in sorted(self.net_metrics.items()): 459 | links_in.append(link) 460 | tup = (link[1], link[0]) 461 | if tup not in links_in: 462 | file.writerow([link[0],link[1],values[0],values[1],values[2],values[3]]) 463 | b = time.time() 464 | return 465 | 466 | def show_stat(self, _type): 467 | ''' 468 | Show statistics information according to data type. 469 | _type: 'port' / 'flow' 470 | ''' 471 | if setting.TOSHOW is False: 472 | return 473 | 474 | 475 | if _type == 'flow' and self.awareness.link_to_port: 476 | bodies = self.stats['flow'] 477 | print('datapath '' ip_src ip-dst ' 478 | 'out-port packets bytes flow-speed(b/s)') 479 | print('---------------- '' -------- ----------------- ' 480 | '-------- -------- -------- -----------') 481 | for dpid in bodies.keys(): 482 | for stat in sorted( 483 | [flow for flow in bodies[dpid] if flow.priority == 1], 484 | # key=lambda flow: (flow.match.get('in_port'), 485 | key=lambda flow: (flow.match.get('ipv4_src'), 486 | flow.match.get('ipv4_dst'))): 487 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 488 | print('{:>016} {:>9} {:>17} {:>8} {:>8} {:>8} {:>8.1f}'.format( 489 | dpid, 490 | stat.match['ipv4_src'], stat.match['ipv4_dst'], #flow match 491 | stat.instructions[0].actions[0].port, #port 492 | stat.packet_count, stat.byte_count, 493 | abs(self.flow_speed[dpid][key][-1])))#, 494 | # abs(self.flow_loss[dpid][ #flow loss 495 | # (stat.match.get('ipv4_src'),stat.match.get('ipv4_dst'))][-1]))) 496 | print() 497 | 498 | if _type == 'port': #and self.awareness.link_to_port: 499 | bodies = self.stats['port'] 500 | print('\ndatapath port ' 501 | ' rx-pkts rx-bytes '' tx-pkts tx-bytes ' 502 | ' port-bw(Kb/s) port-speed(Kb/s) port-freebw(Kb/s) ' 503 | ' port-state link-state') 504 | print('-------- ---- ' 505 | '--------- ----------- ''--------- ----------- ' 506 | '------------- --------------- ----------------- ' 507 | '---------- ----------') 508 | format_ = '{:>8} {:>4} {:>9} {:>11} {:>9} {:>11} {:>13.3f} {:>15.5f} {:>17.5f} {:>10} {:>10} {:>10} {:>10}' 509 | 510 | for dpid in sorted(bodies.keys()): 511 | for stat in sorted(bodies[dpid], key=attrgetter('port_no')): 512 | if stat.port_no != 1: 513 | if stat.port_no != ofproto_v1_3.OFPP_LOCAL: #port 1 is the host output 514 | if self.free_bandwidth[dpid]: 515 | self.logger.info(format_.format( 516 | dpid, stat.port_no, #datapath , num_port 517 | stat.rx_packets, stat.rx_bytes, 518 | stat.tx_packets, stat.tx_bytes, 519 | self.port_features[dpid][stat.port_no][2], #port_bw (kb/s) MAX 520 | abs(self.port_speed[(dpid, stat.port_no)][-1]/1000.0), #port_speed Kbits/s 521 | self.free_bandwidth[dpid][stat.port_no], #port_free bw kb/s 522 | self.port_features[dpid][stat.port_no][0], #port state 523 | self.port_features[dpid][stat.port_no][1], #link state 524 | stat.rx_dropped, stat.tx_dropped)) 525 | print() 526 | 527 | if _type == 'link': 528 | print('\nnode1 node2 used-bw(Kb/s) free-bw(Kb/s) latency loss') 529 | print('----- ----- -------------- -------------- ---------- ---- ') 530 | 531 | format_ = '{:>5} {:>5} {:>14.5f} {:>14.5f} {:>10} {:>12}' 532 | 533 | links_in = [] 534 | for link, values in sorted(self.net_info.items()): 535 | links_in.append(link) 536 | tup = (link[1], link[0]) 537 | if tup not in links_in: 538 | print(format_.format(link[0],link[1], 539 | self.link_used_bw[link]/1000.0, 540 | values[0], values[1],values[2])) -------------------------------------------------------------------------------- /OSPF_loss/Proac/stretch/Folder_for_storing_paths_stretch_during_experiment: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/OSPF_loss/Proac/stretch/Folder_for_storing_paths_stretch_during_experiment -------------------------------------------------------------------------------- /OSPF_loss/Proac/times.txt: -------------------------------------------------------------------------------- 1 | 0.0830430984497 2 | 0.066880941391 3 | 0.058366060257 4 | 0.0568671226501 5 | 0.0682199001312 6 | 0.0715610980988 7 | 0.0565469264984 8 | 0.0575139522552 9 | 0.0819461345673 10 | 0.063737154007 11 | 0.0809569358826 12 | 0.0859289169312 13 | 0.0723760128021 14 | 0.0750579833984 15 | 0.068027973175 16 | 0.0592379570007 17 | 0.0948529243469 18 | 0.077085018158 19 | 0.0605728626251 20 | 0.0866601467133 21 | 0.0686230659485 22 | 0.0730109214783 23 | 0.0813121795654 24 | 0.0628399848938 25 | 0.0867059230804 26 | 0.0699310302734 27 | 0.0784349441528 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing 2 | An approach, called RSIR, for KDN (KDN) by adding a Knowledge Plane based on RL (RL) in SDN. RSIR defines an RL-based routing algorithm that considers link-state metrics to explore, learn, and exploit efficient routing paths even under dynamic traffic changes. This algorithm capitalizes on the interaction with the environment (RL) and the global view of the network (SDN), to proactively install, in advance, routes that meet traffic demands. This proactive approach allows the reduction of the latency in calculating and installing routes in traditional SDN. RSIR has extensively been evaluated by emulation using real traffic matrices from the GÉANT network. Results demonstrate that RSIR outperforms Dijkstra-based routing algorithms with dynamic link costs (i.e., delay or loss), when considering the stretch of routing paths, delay, loss, and used bandwidth in the network as metrics of comparison. 3 | -------------------------------------------------------------------------------- /SDNapps_proac/Metrics/Folder_for_storing_metrics_during_experiment: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/SDNapps_proac/Metrics/Folder_for_storing_metrics_during_experiment -------------------------------------------------------------------------------- /SDNapps_proac/RL_threading.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0,'/home/controlador/ryu/ryu/app/SDNapps_proac/RoutingGeant') 3 | from main import get_all_paths 4 | import pandas as pd 5 | import time 6 | import threading 7 | import bot 8 | import json,ast 9 | import csv 10 | # THREADING 11 | 12 | def paths_(): 13 | data = pd.read_csv("/home/controlador/ryu/ryu/app/SDNapps_proac/net_info.csv") 14 | # print(data) 15 | paths, total_time = get_all_paths(data) 16 | threading.Timer(10, paths_).start() 17 | 18 | def call_bot(msg): 19 | bot.sendMessage(msg) 20 | 21 | def get_paths_base(): 22 | file_base = '/home/controlador/ryu/ryu/app/SDNapps_proac/RoutingGeant/paths_weight.json' 23 | with open(file_base,'r') as json_file: 24 | paths_dict = json.load(json_file) 25 | paths_base = ast.literal_eval(json.dumps(paths_dict)) 26 | return paths_base 27 | 28 | def get_paths_RL(): 29 | file_RL = '/home/controlador/ryu/ryu/app/SDNapps_proac/paths.json' 30 | with open(file_RL,'r') as json_file: 31 | paths_dict = json.load(json_file) 32 | paths_RL = ast.literal_eval(json.dumps(paths_dict)) 33 | return paths_RL 34 | 35 | def stretch(paths, paths_base, src, dst): 36 | 37 | if isinstance(paths.get(str(src)).get(str(dst))[0],list): 38 | # print (paths.get(str(src)).get(str(dst))[0],'----', paths_base.get(str(src)).get(str(dst))) 39 | add_stretch = float(len(paths.get(str(src)).get(str(dst))[0])) - float(len(paths_base.get(str(src)).get(str(dst)))) 40 | mul_stretch = float(len(paths.get(str(src)).get(str(dst))[0])) / float(len(paths_base.get(str(src)).get(str(dst)))) 41 | return add_stretch, mul_stretch 42 | else: 43 | # print (paths.get(str(src)).get(str(dst)),'----', paths_base.get(str(src)).get(str(dst))) 44 | add_stretch = float(len(paths.get(str(src)).get(str(dst)))) - float(len(paths_base.get(str(src)).get(str(dst)))) 45 | mul_stretch = float(len(paths.get(str(src)).get(str(dst)))) / float(len(paths_base.get(str(src)).get(str(dst)))) 46 | return add_stretch, mul_stretch 47 | 48 | def calc_all_stretch(cont): 49 | paths_base = get_paths_base() 50 | paths_RL = get_paths_RL() 51 | cont_RL = 0 52 | total_paths = 0 53 | switches = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] 54 | a = time.time() 55 | with open('/home/controlador/ryu/ryu/app/SDNapps_proac/RoutingGeant/stretch/'+str(cont)+'_stretch.csv','wb') as csvfile: 56 | header = ['src','dst','add_st','mul_st'] 57 | file = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL) 58 | file.writerow(header) 59 | for src in switches: 60 | for dst in switches: 61 | if src != dst: 62 | total_paths += 1 63 | add_stretch_RL, mul_stretch_RL = stretch(paths_RL, paths_base, src, dst) 64 | if add_stretch_RL != 0: 65 | cont_RL += 1 66 | # print('Additive stretch RL: ', add_stretch_RL) 67 | # print('Multi stretch RL: ', mul_stretch_RL) 68 | file.writerow([src,dst,add_stretch_RL,mul_stretch_RL]) 69 | total_time = time.time() - a 70 | return total_time 71 | 72 | def RL_thread(): 73 | cont = 0 74 | while cont < 836: 75 | # while cont < 30: 76 | a = time.time() 77 | data = pd.read_csv("/home/controlador/ryu/ryu/app/SDNapps_proac/net_info.csv") 78 | paths, time_RL = get_all_paths(data) 79 | # print('time_RL',time_RL) 80 | time_stretch = calc_all_stretch(cont) 81 | # print('time_stretch' , time_stretch) 82 | if time_RL > 10: 83 | time.sleep(time_RL + time_stretch) 84 | else: 85 | time.sleep(10 - time_RL - time_stretch) 86 | cont = cont + 1 87 | # print(time.time()-a) 88 | RL_thread() 89 | call_bot("RL-thread ended") 90 | -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/Capacidades.csv: -------------------------------------------------------------------------------- 1 | original connected weight capacity 2 | 10 3 0,010 100,000 3 | 22 20 0,040 25,000 4 | 15 20 0,040 25,000 5 | 3 14 0,645 1,550 6 | 7 19 0,040 25,000 7 | 10 11 0,040 25,000 8 | 5 8 0,040 25,000 9 | 7 17 0,010 100,000 10 | 3 11 0,040 25,000 11 | 13 19 0,040 25,000 12 | 8 9 0,040 25,000 13 | 3 21 0,010 100,000 14 | 14 13 0,645 1,550 15 | 2 13 0,010 100,000 16 | 6 19 0,645 1,550 17 | 12 22 0,010 100,000 18 | 16 10 0,010 100,000 19 | 1 7 0,010 100,000 20 | 3 1 0,010 100,000 21 | 2 4 0,010 100,000 22 | 17 13 0,010 100,000 23 | 1 16 0,010 100,000 24 | 9 15 0,040 25,000 25 | 20 17 0,010 100,000 26 | 2 18 0,040 25,000 27 | 17 10 0,010 100,000 28 | 6 7 0,645 1,550 29 | 16 5 0,040 25,000 30 | 4 16 0,010 100,000 31 | 2 23 0,040 25,000 32 | 7 21 0,010 100,000 33 | 12 10 0,010 100,000 34 | 7 2 0,010 100,000 35 | 17 23 0,040 25,000 36 | 21 18 0,040 25,000 37 | 16 9 0,010 100,000 38 | 2 12 0,010 100,000 39 | -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/Q_routing.py: -------------------------------------------------------------------------------- 1 | import random 2 | from get_all_routes import get_best_nodes 3 | import numpy as np 4 | 5 | def update_Q(T,Q,current_state, next_state, alpha): 6 | current_t = T[current_state][next_state] 7 | current_q = Q[current_state][next_state] 8 | 9 | #updating SARSA 10 | # best_next_action_val = min(Q[next_state].values()) 11 | # for action in Q[next_state].keys(): 12 | # if Q[next_state][action] == best_next_action_val: 13 | # best_next_action = action 14 | # # print(best_next_action) 15 | # new_q = current_q + alpha * (current_t + gamma * Q[next_state][best_next_action] - current_q) #for each state, it will choose the minimun furture cost instead of maximum future reward SARSA 16 | 17 | #updating Q-learning 18 | new_q = current_q + alpha * (current_t + min(Q[next_state].values()) - current_q) #for each state, 19 | #it will choose the minimun furture cost instead of maximum future reward. 20 | Q[current_state][next_state] = new_q 21 | return Q 22 | 23 | def get_key_of_min_value(dic): 24 | min_val = min(dic.values()) 25 | return [k for k, v in dic.items() if v == min_val] 26 | 27 | def Q_routing(T,Q,alpha,epsilon,n_episodes,start,end): #Fill Q table and explore all options 28 | #--------------e-greedy decay--------------------------------- 29 | # min_epsilon = 0.01 30 | # max_epsilon = 0.9 31 | # decay_rate = 0.001 32 | episode_hops = {} 33 | 34 | #T is network info 35 | for e in range(1,n_episodes+1): 36 | # print("Episode {0}:".format(e)) 37 | current_state = start 38 | goal = False 39 | stored_states = [] 40 | 41 | while not goal: 42 | #takes the next hops negihbors for state 43 | valid_moves = list(Q[current_state].keys()) 44 | 45 | if len(valid_moves) <= 1: 46 | next_state = valid_moves[0] 47 | else: 48 | best_action = random.choice(get_key_of_min_value(Q[current_state])) 49 | if random.random() < epsilon: 50 | next_state = best_action 51 | else: 52 | valid_moves.pop(valid_moves.index(best_action)) 53 | next_state = random.choice(valid_moves) 54 | Q = update_Q(T,Q,current_state, next_state, alpha) 55 | current_state = next_state 56 | # print(next_state) 57 | stored_states.append(next_state) 58 | 59 | if next_state in end: 60 | goal = True 61 | # print('Q-table:', Q) 62 | # print('Switches', stored_states) 63 | # episode_hops[e] = stored_states 64 | # print('resume',episode_hops) 65 | # name = '~/ryu/ryu/SDNapps_proac/RoutingGeant/stretch/Graphs_parameters/alpha_'+str(alpha)+'/'+str(it)+'_alpha_'+str(alpha)+'_epsilon_'+str(epsilon)+'_' 66 | 67 | # with open(str(name)+'hops_episodes.json', 'w') as json_file: 68 | # json.dump(episode_hops, json_file, indent=1) 69 | 70 | #--------------e-greedy decay--------------------------------- 71 | # e += 1 72 | # epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*e) 73 | # print epsilon 74 | return Q, epsilon 75 | -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/Q_routing.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/SDNapps_proac/RoutingGeant/Q_routing.pyc -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/bw_r.csv: -------------------------------------------------------------------------------- 1 | node1,node2,capr,capor 2 | 1,3,17,100.0 3 | 1,7,32,100.0 4 | 1,16,57,100.0 5 | 2,4,26,100.0 6 | 2,7,45,100.0 7 | 2,12,58,100.0 8 | 2,13,82,100.0 9 | 2,18,44,25.0 10 | 2,23,25,25.0 11 | 3,10,78,100.0 12 | 3,11,82,25.0 13 | 3,14,74,1.55 14 | 3,21,57,100.0 15 | 4,16,3,100.0 16 | 5,8,90,25.0 17 | 5,16,31,25.0 18 | 6,7,8,1.55 19 | 6,19,34,1.55 20 | 7,17,38,100.0 21 | 7,19,13,25.0 22 | 7,21,8,100.0 23 | 8,9,15,25.0 24 | 9,15,83,25.0 25 | 9,16,90,100.0 26 | 10,11,31,25.0 27 | 10,12,30,100.0 28 | 10,16,66,100.0 29 | 10,17,90,100.0 30 | 12,22,43,100.0 31 | 13,14,1,1.55 32 | 13,17,46,100.0 33 | 13,19,69,25.0 34 | 15,20,68,25.0 35 | 17,20,26,100.0 36 | 17,23,52,25.0 37 | 18,21,78,25.0 38 | 20,22,28,25.0 -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/dict.csv: -------------------------------------------------------------------------------- 1 | 1,"[7, 16, 3]" 2 | 2,"[13, 4, 18, 23, 12, 7]" 3 | 3,"[14, 11, 21, 1, 10]" 4 | 4,"[16, 2]" 5 | 5,"[8, 16]" 6 | 6,"[19, 7]" 7 | 7,"[19, 17, 21, 2, 1, 6]" 8 | 8,"[9, 5]" 9 | 9,"[15, 8, 16]" 10 | 10,"[3, 11, 16, 17, 12]" 11 | 11,"[10, 3]" 12 | 12,"[22, 10, 2]" 13 | 13,"[19, 14, 2, 17]" 14 | 14,"[13, 3]" 15 | 15,"[20, 9]" 16 | 16,"[10, 5, 9, 1, 4]" 17 | 17,"[13, 10, 23, 7, 20]" 18 | 18,"[2, 21]" 19 | 19,"[7, 13, 6]" 20 | 20,"[17, 22, 15]" 21 | 21,"[18, 3, 7]" 22 | 22,"[20, 12]" 23 | 23,"[2, 17]" 24 | -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_R_Q.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import numpy as np 3 | def rand_number(seed): 4 | m = 2^34 5 | c = 251 6 | a = 4*c +1 7 | b = 351 8 | return ((a*seed+b)%m)/m 9 | 10 | def gaussian(x, mu, sig): 11 | return round(np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))), 3) 12 | 13 | def initial_R(A,Z,weight,A_Z_dict): 14 | #input net is 15 | R = {} 16 | net = copy.deepcopy(A_Z_dict) 17 | for i in net.keys(): 18 | sub_key = net[i] 19 | sub_dic = {} 20 | for j in sub_key: 21 | sub_dic[j] = 0 22 | R[i] = sub_dic 23 | for i in range(len(A)): 24 | R[A[i]][Z[i]] = weight[i] 25 | return R 26 | 27 | def initial_Q(R): 28 | seed = np.random.randint(0, 100) 29 | Q = copy.deepcopy(R) 30 | for i in Q.keys(): 31 | for j in Q[i].keys(): 32 | # Q[i][j] = rand_number(seed) 33 | Q[i][j] = 0 34 | return Q -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_R_Q.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/SDNapps_proac/RoutingGeant/get_R_Q.pyc -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_all_routes.py: -------------------------------------------------------------------------------- 1 | from collections import Counter 2 | 3 | def get_single_dict(dic): 4 | single_link = {} 5 | min_value = min(dic.values()) 6 | for key in dic.keys(): 7 | if dic[key] == min_value: 8 | single_link[key] = dic[key] 9 | return single_link.keys() 10 | 11 | def get_best_nodes(Q,start,end): 12 | next_level = [start] 13 | node_use = [start] 14 | while list(set(next_level) & set(end)) == []: #while dest is not in next level exploration do: 15 | temp_level = [] 16 | for i in next_level: 17 | temp_level += get_single_dict(Q[i]) 18 | next_level = list(set(temp_level)) 19 | node_use += next_level #get the whole next exploration level... all neighbors to explore 20 | return list(set(node_use)) 21 | 22 | def get_best_net(Q,nodes): #build a dict with the best route found 23 | best_net = {} 24 | for i in nodes: 25 | best_net[i] = list(set( get_single_dict(Q[i]) ) & set(nodes)) 26 | return best_net 27 | 28 | def get_all_best_routes(graph,start,end,max_depth): #get all the routes that reach the dest 29 | past_path = [] 30 | # maintain a queue of paths 31 | queue = [] 32 | # push the first path into the queue 33 | queue.append([start]) 34 | while queue: 35 | # get the first path from the queue 36 | path = queue.pop(0) #takes las value from queue and removes it 37 | # get the last node from the path 38 | node = path[-1] 39 | # path found 40 | 41 | # enumerate all adjacent nodes, construct a new path and push it into the queue 42 | for adjacent in graph.get(node, []): 43 | new_path = list(path) 44 | ## end the current loop if we already reach the point 45 | if adjacent in end: 46 | new_path.append(adjacent) 47 | past_path.append(new_path) 48 | continue 49 | 50 | if adjacent in new_path: 51 | continue 52 | 53 | new_path.append(adjacent) 54 | if len(new_path) >= max_depth and new_path[-1] not in end: 55 | break 56 | # print new_path 57 | queue.append(new_path) 58 | past_path.append(new_path) 59 | 60 | best_paths = [] 61 | for l in past_path: 62 | if l[-1] in end: 63 | best_paths.append(l) 64 | return best_paths 65 | 66 | 67 | def get_cost(R,route): #return the total cost of each route found 68 | cost = 0 69 | for i in range(len(route)-1): 70 | cost += R[route[i]][route[i+1]] 71 | return round(cost,3) 72 | 73 | def count_routes(routes): #encuentra cuantas veces se alcanzo el dest 74 | ends_find = [] 75 | all_routes = {} 76 | for i in range(len(routes)): 77 | ends_find.append(routes[i][-1]) 78 | 79 | count = dict(Counter(ends_find)) #says how many times a value is used in the dict 80 | 81 | ends = list(set(ends_find)) 82 | for i in ends: 83 | all_routes[i] = [] 84 | for i in routes: 85 | end = i[-1] 86 | all_routes[end].append(i) 87 | return {"routes_number":count, 88 | "all_routes":all_routes} 89 | 90 | 91 | 92 | def get_route(Q,start,end): 93 | """ input is Q-table is like:{1: {2: 0.5, 3: 3.8}, 94 | 2: {1: 5.9, 5: 10}} """ 95 | single_route = [start] 96 | while single_route[-1] not in end: #get route from start with min q(values) until reach end 97 | next_step = min(Q[single_route[-1]],key=Q[single_route[-1]].get) 98 | single_route.append(next_step) 99 | if len(single_route) > 2 and single_route[-1] in single_route[:-1]: 100 | break 101 | return single_route 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_all_routes.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/SDNapps_proac/RoutingGeant/get_all_routes.pyc -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_dict.py: -------------------------------------------------------------------------------- 1 | import csv 2 | 3 | def normalize(value, minD, maxD, min_val, max_val): 4 | if max_val == min_val: 5 | value_n = (maxD + minD) / 2 6 | else: 7 | value_n = (maxD - minD) * (value - min_val) / (max_val - min_val) + minD 8 | return value_n 9 | 10 | def normalize_path_cost(bwd, delay, pkloss): 11 | ''' 12 | Normalize values for reward. 13 | ''' 14 | 15 | bwd_cost = [] #since the RL will minimize reward function, we do 1/bwd for such function 16 | for val in bwd: 17 | if val > 0.005: #ensure minimum bwd available 18 | temp = 1/val 19 | bwd_cost.append(round(temp, 6)) 20 | else: 21 | bwd_cost.append(1/0.005) 22 | 23 | bwd_n = [normalize(bwd_val, 0, 100, min(bwd_cost), max(bwd_cost)) for bwd_val in bwd_cost] 24 | delay_n = [normalize(delay_val, 0, 100, min(delay), max(delay)) for delay_val in delay] 25 | pkloss_n = [normalize(pkloss_val, 0, 100, min(pkloss), max(pkloss)) for pkloss_val in pkloss] 26 | return bwd_n, delay_n, pkloss_n 27 | 28 | def reward(beta1, beta2, beta3, bwd, delay, pkloss, cost_action): 29 | bwd_cost_ = [i* beta1 for i in bwd] #bwd available 30 | delay_cost_ = [j* beta2 for j in delay] #delay 31 | pkloss_cost = [k* beta3 for k in pkloss] #pkloss 32 | 33 | rew = [cost_action+i+j+k for i,j,k in zip(bwd_cost_,delay_cost_,pkloss_cost)] #reward/cost of each link 34 | return rew 35 | 36 | def get_dict(data): 37 | A_0 = data["node1"].values.tolist() #get nodes 38 | Z_0 = data["node2"].values.tolist() #get neighbors 39 | 40 | #the order of cost paths is in the same as 'data' 41 | bwd = data["bwd"].values.tolist() #get cost paths 42 | delay = data["delay"].values.tolist() # get cost paths 43 | pkloss = data["pkloss"].values.tolist() # get cost paths 44 | 45 | bwd = list(map(lambda x: round(float(x),6), bwd)) #with 6 decimals 46 | delay = list(map(lambda x: float(x), delay)) 47 | pkloss = list(map(lambda x: float(x), pkloss)) 48 | 49 | bwd_n, delay_n, pkloss_n = normalize_path_cost(bwd, delay, pkloss) 50 | 51 | #weigths for reward 52 | beta1=1 53 | beta2=1 54 | beta3=1 55 | cost_action = 1 56 | 57 | weight_=reward(beta1,beta2,beta3,bwd_n,delay_n,pkloss_n,cost_action) 58 | A = A_0 + Z_0 59 | Z = Z_0 + A_0 60 | weight_ = weight_ + weight_ 61 | 62 | #turns all values in A and Z integers 63 | A = list(map(lambda x:int(x), A)) #takes values in list, aplies lambda and return the result into A 64 | Z = list(map(lambda x:int(x), Z)) 65 | 66 | A_key = sorted(set(A)) 67 | links={} 68 | 69 | for i in range(len(A_key)): 70 | links[A_key[i]] = [] 71 | 72 | for i in range(len(A)): 73 | if Z[i] not in links[A[i]]: 74 | links[A[i]].append(Z[i]) 75 | 76 | mydict = links 77 | with open('/home/controlador/ryu/ryu/app/SDNapps_proac/neighbors.csv', 'w') as csv_file: 78 | writer = csv.writer(csv_file) 79 | for key, value in mydict.items(): 80 | writer.writerow([key, value]) 81 | 82 | return {"A":A, 83 | "Z":Z, 84 | "weight": weight_, 85 | "links":links} -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_dict.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/SDNapps_proac/RoutingGeant/get_dict.pyc -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_group.py: -------------------------------------------------------------------------------- 1 | 2 | def reach_out(group,net): 3 | ## this reach out for points, one loop 4 | search_key = set(net.keys()) & set(group) 5 | for key in search_key: 6 | if key in net.keys(): 7 | group += net[key] 8 | del net[key] 9 | return {"new_group":group, 10 | "new_net":net} 11 | 12 | def get_group(point,net): 13 | group = list([point] + net[point]) 14 | group_len = [0,1] 15 | while group_len[-2] != group_len[-1]: 16 | temp = reach_out(group,net) 17 | group = temp["new_group"] 18 | net = temp["new_net"] 19 | group_len.append(len(group)) 20 | return list(set(group)) 21 | 22 | def get_sub_net(nodes,net): 23 | ## input is a node of a network of list 24 | sub_net = {} 25 | for i in nodes: 26 | sub_net[i] = net[i] 27 | return sub_net -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_result.py: -------------------------------------------------------------------------------- 1 | from Q_routing import Q_routing 2 | from get_all_routes import get_best_nodes, get_best_net, get_all_best_routes, get_cost, count_routes, get_route 3 | from collections import Counter 4 | 5 | def get_result(R,Q,alpha,epsilon,n_episodes,start,end): 6 | Q, epsilon = Q_routing(R,Q,alpha,epsilon,n_episodes,start,end) 7 | nodes = get_best_nodes(Q,start,end) #get best nodes to reach dest 8 | graph = get_best_net(Q,nodes) #get dict with the path for the best nodes 9 | route_len = len(get_route(Q,start,end)) #calculate number of nodes in best route 10 | routes = get_all_best_routes(graph,start,end,route_len+1) 11 | result = count_routes(routes) 12 | 13 | ends_find = [] 14 | for i in range(len(routes)): 15 | ends_find.append(routes[i][-1]) 16 | ends_find = list(set(ends_find)) 17 | 18 | cost = [] 19 | for i in routes: 20 | cost.append(get_cost(R,i)) 21 | Counter(cost) 22 | res = {"nodes":nodes, 23 | "graph":graph, 24 | "final_Q": Q, 25 | "ends_find":ends_find, 26 | "cost":dict(Counter(cost)), 27 | "routes_number":result['routes_number'], 28 | "all_routes":result['all_routes']} 29 | return res -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/get_result.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/SDNapps_proac/RoutingGeant/get_result.pyc -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/main.py: -------------------------------------------------------------------------------- 1 | from get_dict import get_dict 2 | from get_R_Q import initial_R, initial_Q 3 | from get_result import get_result 4 | import pandas as pd 5 | import time 6 | import json 7 | 8 | def RL_forwarding(data, src, dst): 9 | 10 | graph = get_dict(data) 11 | 12 | A = graph["A"] 13 | Z = graph["Z"] 14 | weight = graph["weight"] 15 | links = graph["links"] 16 | 17 | R = initial_R(A,Z,weight,links) 18 | Q = initial_Q(R) 19 | 20 | alpha = 0.9 # learning rate 21 | epsilon = 0.8 #greedy policy 22 | n_episodes = 300 23 | 24 | return get_result(R,Q,alpha,epsilon,n_episodes,src,dst) 25 | 26 | def get_all_paths(data): 27 | time_init = time.time() 28 | graph = get_dict(data) 29 | links = graph["links"] 30 | sws = list(links.keys()) 31 | 32 | paths = {} 33 | for i in sws: 34 | paths.setdefault(i, {}) 35 | for j in sws: 36 | if i != j: 37 | j = [j] 38 | time0 = time.time() 39 | result = RL_forwarding(data,i,j) 40 | 41 | if j[0] not in paths[i]: 42 | paths[i][j[0]] = result["all_routes"][j[-1]] 43 | with open('/home/controlador/ryu/ryu/app/SDNapps_proac/paths.json','w') as json_file: 44 | json.dump(paths, json_file, indent=2) 45 | time_end = time.time() 46 | total_time = time_end - time_init 47 | 48 | with open('/home/controlador/ryu/ryu/app/SDNapps_proac/times.txt','a') as txt_file: 49 | txt_file.write(str(total_time)+'\n') 50 | 51 | # For testing --------------------------------- 52 | print("ok") #aaa 53 | # print("Final dict paths: {0}".format(paths)) 54 | # print("execute",time.ctime()) 55 | # print("total time:" , total_time) 56 | #--------------------------------------------- 57 | return paths, total_time 58 | 59 | #For testing------------------------------- 60 | # file ='/home/controlador/ryu/ryu/app/SDNapps_proac/net_info.csv' 61 | # data = pd.read_csv(file) 62 | # get_all_paths(data) -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/main.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/SDNapps_proac/RoutingGeant/main.pyc -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/neighbors.csv: -------------------------------------------------------------------------------- 1 | 1,"[3, 7, 16]" 2 | 2,"[4, 7, 12, 13, 18, 23]" 3 | 3,"[10, 11, 14, 21, 1]" 4 | 4,"[16, 2]" 5 | 5,"[8, 16]" 6 | 6,"[7, 19]" 7 | 7,"[17, 19, 21, 1, 2, 6]" 8 | 8,"[9, 5]" 9 | 9,"[15, 16, 8]" 10 | 10,"[11, 12, 16, 17, 3]" 11 | 11,"[3, 10]" 12 | 12,"[22, 2, 10]" 13 | 13,"[14, 17, 19, 2]" 14 | 14,"[3, 13]" 15 | 15,"[20, 9]" 16 | 16,"[1, 4, 5, 9, 10]" 17 | 17,"[20, 23, 7, 10, 13]" 18 | 18,"[21, 2]" 19 | 19,"[6, 7, 13]" 20 | 20,"[22, 15, 17]" 21 | 21,"[3, 7, 18]" 22 | 22,"[12, 20]" 23 | 23,"[2, 17]" 24 | -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/net_info.csv: -------------------------------------------------------------------------------- 1 | node1,node2,bwd,delay,pkloss 2 | 1,3,99999.82666,7.398963,0.0 3 | 1,7,99999.783139,37.056923,0.0 4 | 1,16,99999.826652,6.010532,0.0 5 | 2,4,99999.783394,15.530467,0.0 6 | 2,7,99999.783207,5.506396,0.0 7 | 2,12,99999.804905,2.807617,3.846154 8 | 2,13,99999.783394,2.590537,0.0 9 | 2,18,24999.783207,6.539464,0.0 10 | 2,23,24999.783207,1.135468,0.0 11 | 3,10,99999.826574,6.569982,0.0 12 | 3,11,24999.783384,8.31449,0.0 13 | 3,14,1549.826574,7.212043,0.0 14 | 3,21,99999.783394,9.758472,0.0 15 | 4,16,99999.826715,6.406069,0.0 16 | 5,8,24999.783384,11.216402,0.0 17 | 5,16,24999.826715,7.904649,0.0 18 | 6,7,1549.783207,6.621718,0.0 19 | 6,19,1549.783217,0.0,0.0 20 | 7,17,99999.826566,1.262069,0.0 21 | 7,19,24999.783031,3.494859,0.0 22 | 7,21,99999.826566,7.397532,0.0 23 | 8,9,24999.783374,3.655434,0.0 24 | 9,15,24999.783374,6.658912,0.0 25 | 9,16,99999.826707,6.157994,0.0 26 | 10,11,24999.783207,5.402565,0.0 27 | 10,12,99999.826433,1.695991,0.0 28 | 10,16,99999.826574,4.613638,0.0 29 | 10,17,99999.826574,1.931429,0.0 30 | 12,22,99999.826574,0.680566,0.0 31 | 13,14,1549.783207,3.759027,0.0 32 | 13,17,99999.826715,2.554417,0.0 33 | 13,19,24999.783217,6.753564,0.0 34 | 15,20,24999.783374,2.078652,0.0 35 | 17,20,99999.826707,1.420498,0.0 36 | 17,23,24999.826566,2.832055,0.0 37 | 18,21,24999.826566,7.03001,0.0 38 | 20,22,24999.783384,2.962112,0.0 39 | -------------------------------------------------------------------------------- /SDNapps_proac/RoutingGeant/stretch/Folder_for_storing_paths_stretch_during_experiment: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielaCasasv/RSIR-Reinforcement-Learning-and-SDN-Intelligent-Routing/e0d1db707103748fdd0d519da324ebc55f2de728/SDNapps_proac/RoutingGeant/stretch/Folder_for_storing_paths_stretch_during_experiment -------------------------------------------------------------------------------- /SDNapps_proac/bw_r.txt: -------------------------------------------------------------------------------- 1 | 10,3,78,100 2 | 22,20,28,25 3 | 15,20,68,25 4 | 3,14,74,1.55 5 | 7,19,13,25 6 | 10,11,31,25 7 | 5,8,90,25 8 | 7,17,38,100 9 | 3,11,82,25 10 | 13,19,69,25 11 | 8,9,15,25 12 | 3,21,57,100 13 | 14,13,1,1.55 14 | 2,13,82,100 15 | 6,19,34,1.55 16 | 12,22,43,100 17 | 16,10,66,100 18 | 1,7,32,100 19 | 3,1,17,100 20 | 2,4,26,100 21 | 17,13,46,100 22 | 1,16,57,100 23 | 9,15,83,25 24 | 20,17,26,100 25 | 2,18,44,25 26 | 17,10,90,100 27 | 6,7,8,1.55 28 | 16,5,31,25 29 | 4,16,3,100 30 | 2,23,25,25 31 | 7,21,8,100 32 | 12,10,30,100 33 | 7,2,45,100 34 | 17,23,52,25 35 | 21,18,78,25 36 | 16,9,90,100 37 | 2,12,58,100 38 | 3,10,78,100 39 | 20,22,28,25 40 | 20,15,68,25 41 | 14,3,74,1.55 42 | 19,7,13,25 43 | 11,10,31,25 44 | 8,5,90,25 45 | 17,7,38,100 46 | 11,3,82,25 47 | 19,13,69,25 48 | 9,8,15,25 49 | 21,3,57,100 50 | 13,14,1,1.55 51 | 13,2,82,100 52 | 19,6,34,1.55 53 | 22,12,43,100 54 | 10,16,66,100 55 | 7,1,32,100 56 | 1,3,17,100 57 | 4,2,26,100 58 | 13,17,46,100 59 | 16,1,57,100 60 | 15,9,83,25 61 | 17,20,26,100 62 | 18,2,44,25 63 | 10,17,90,100 64 | 7,6,8,1.55 65 | 5,16,31,25 66 | 16,4,3,100 67 | 23,2,25,25 68 | 21,7,8,100 69 | 10,12,30,100 70 | 2,7,45,100 71 | 23,17,52,25 72 | 18,21,78,25 73 | 9,16,90,100 74 | 12,2,58,100 75 | -------------------------------------------------------------------------------- /SDNapps_proac/clear.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo rm times.txt 3 | cd ~/ryu/ryu/app/SDNapps_proac/Metrics/ 4 | sudo rm *metrics* 5 | cd ~/ryu/ryu/app/SDNapps_proac/RoutingGeant/stretch 6 | sudo rm *stretch* 7 | 8 | 9 | -------------------------------------------------------------------------------- /SDNapps_proac/net_info.csv: -------------------------------------------------------------------------------- 1 | node1,node2,bwd,delay,pkloss 2 | 1,3,99999.820453,6.514549,0.0 3 | 1,7,99999.820453,10.996938,0.0 4 | 1,16,99999.856314,7.30145,0.0 5 | 2,4,99999.820453,11.698842,0.0 6 | 2,7,99999.820453,21.344423,0.0 7 | 2,12,99999.82046,3.547072,0.0 8 | 2,13,99999.820426,31.85451,0.0 9 | 2,18,24999.820386,5.787373,0.0 10 | 2,23,24999.784463,3.225327,0.0 11 | 3,10,99999.820467,4.582047,0.0 12 | 3,11,24999.820346,23.528457,0.0 13 | 3,14,1549.820467,5.331874,0.0 14 | 3,21,99999.82042,4.888535,0.0 15 | 4,16,99999.856314,4.849553,0.0 16 | 5,8,24999.8204,8.31449,0.0 17 | 5,16,24999.856293,4.209876,0.0 18 | 6,7,1549.856363,27.347088,0.0 19 | 6,19,1549.856239,0.0,0.0 20 | 7,17,99999.85632,16.330481,0.0 21 | 7,19,24999.856239,0.0,0.0 22 | 7,21,99999.82042,4.954457,0.0 23 | 8,9,24999.820386,4.140139,0.0 24 | 9,15,24999.820359,13.550043,0.0 25 | 9,16,99999.820359,5.306959,0.0 26 | 10,11,24999.820326,18.01002,0.0 27 | 10,12,99999.820467,5.53906,0.0 28 | 10,16,99999.8204,5.090952,0.0 29 | 10,17,99999.85633,3.937602,0.0 30 | 12,22,99999.820406,2.245665,0.0 31 | 13,14,1549.82044,16.578674,0.0 32 | 13,17,99999.856298,6.365538,0.0 33 | 13,19,24999.784326,0.0,0.0 34 | 15,20,24999.820366,6.773472,0.0 35 | 17,20,99999.856293,15.611529,0.0 36 | 17,23,24999.820339,2.795458,0.0 37 | 18,21,24999.820359,28.990507,0.0 38 | 20,22,24999.820366,4.46701,0.0 39 | -------------------------------------------------------------------------------- /SDNapps_proac/setting.py: -------------------------------------------------------------------------------- 1 | DISCOVERY_PERIOD = 5 # For discovering topology 2 | 3 | MONITOR_PERIOD = 10 # For monitoring traffic 4 | 5 | DELAY_DETECTING_PERIOD = 8 #For calulating link delay 6 | 7 | TOSHOW = True # For showing information in terminal 8 | -------------------------------------------------------------------------------- /SDNapps_proac/simple_awareness.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import matplotlib.pyplot as plt 3 | import time 4 | 5 | from ryu import cfg 6 | from ryu.base import app_manager 7 | from ryu.controller import ofp_event 8 | from ryu.controller.handler import MAIN_DISPATCHER 9 | from ryu.controller.handler import CONFIG_DISPATCHER 10 | from ryu.controller.handler import DEAD_DISPATCHER 11 | from ryu.controller.handler import set_ev_cls 12 | from ryu.ofproto import ofproto_v1_3 13 | from ryu.lib.packet import packet 14 | from ryu.lib.packet import ethernet 15 | from ryu.lib.packet import ipv4 16 | from ryu.lib.packet import arp 17 | from ryu.lib import hub 18 | from ryu.topology import event 19 | from ryu.topology.api import get_switch, get_link 20 | 21 | import setting 22 | 23 | 24 | CONF = cfg.CONF 25 | 26 | 27 | class simple_Awareness(app_manager.RyuApp): 28 | """ 29 | A Ryu app for discovering topology information. 30 | Provides many data services for other Apps, such as 31 | link_to_port, access_table, switch_port_table, access_ports, 32 | interior_ports, and topology graph. 33 | This represent the Topology discovery module of the Control Plane 34 | """ 35 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 36 | 37 | events = [event.EventSwitchEnter, 38 | event.EventSwitchLeave, event.EventPortAdd, 39 | event.EventPortDelete, event.EventPortModify, 40 | event.EventLinkAdd, event.EventLinkDelete] 41 | 42 | def __init__(self, *args, **kwargs): 43 | super(simple_Awareness, self).__init__(*args, **kwargs) 44 | self.topology_api_app = self 45 | self.name = "awareness" 46 | self.link_to_port = {} # {(src_dpid,dst_dpid):(src_port,dst_port),} 47 | self.access_table = {} # {(sw,port):(ip, mac),} 48 | self.switch_port_table = {} # {dpid:set(port_num,),} 49 | self.access_ports = {} # {dpid:set(port_num,),} 50 | self.interior_ports = {} # {dpid:set(port_num,),} 51 | self.switches = [] # self.switches = [dpid,] 52 | self.pre_link_to_port = {} 53 | self.pre_access_table = {} 54 | 55 | self.graph = nx.DiGraph() 56 | # Get initiation delay. 57 | self.initiation_delay = self.get_initiation_delay(CONF.fanout) 58 | self.start_time = time.time() 59 | 60 | self.discover_thread = hub.spawn(self._discover) 61 | 62 | 63 | def _discover(self): 64 | i = 0 65 | while True: 66 | 67 | self.show_topology() 68 | if i == 1: 69 | self.get_topology(None) 70 | i = 0 71 | hub.sleep(setting.DISCOVERY_PERIOD) 72 | i = i + 1 73 | 74 | def add_flow(self, dp, priority, match, actions, idle_timeout=0, hard_timeout=0): 75 | ofproto = dp.ofproto 76 | parser = dp.ofproto_parser 77 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, 78 | actions)] 79 | mod = parser.OFPFlowMod(datapath=dp, priority=priority, 80 | idle_timeout=idle_timeout, 81 | hard_timeout=hard_timeout, 82 | match=match, instructions=inst) 83 | dp.send_msg(mod) 84 | 85 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) 86 | def switch_features_handler(self, ev): 87 | """ 88 | Install table-miss flow entry to datapaths. 89 | """ 90 | 91 | datapath = ev.msg.datapath 92 | ofproto = datapath.ofproto 93 | parser = datapath.ofproto_parser 94 | self.logger.info("switch:%s connected", datapath.id) 95 | 96 | # Install table-miss flow entry. 97 | match = parser.OFPMatch() 98 | actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, 99 | ofproto.OFPCML_NO_BUFFER)] 100 | self.add_flow(datapath, 0, match, actions) 101 | 102 | 103 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 104 | def _packet_in_handler(self, ev): 105 | """ 106 | Handle the packet_in packet, and register the access info. 107 | """ 108 | msg = ev.msg 109 | datapath = msg.datapath 110 | in_port = msg.match['in_port'] 111 | pkt = packet.Packet(msg.data) 112 | eth_type = pkt.get_protocols(ethernet.ethernet)[0].ethertype 113 | arp_pkt = pkt.get_protocol(arp.arp) 114 | ip_pkt = pkt.get_protocol(ipv4.ipv4) 115 | 116 | if arp_pkt: 117 | arp_src_ip = arp_pkt.src_ip 118 | arp_dst_ip = arp_pkt.dst_ip 119 | mac = arp_pkt.src_mac 120 | # Record the access infomation. 121 | self.register_access_info(datapath.id, in_port, arp_src_ip, mac) 122 | 123 | elif ip_pkt: 124 | ip_src_ip = ip_pkt.src 125 | eth = pkt.get_protocols(ethernet.ethernet)[0] 126 | mac = eth.src 127 | # Record the access infomation. 128 | self.register_access_info(datapath.id, in_port, ip_src_ip, mac) 129 | else: 130 | pass 131 | 132 | @set_ev_cls(events) 133 | def get_topology(self, ev): 134 | """ 135 | Get topology info and calculate shortest paths. 136 | Note: In looped network, we should get the topology 137 | 20 or 30 seconds after the network went up. 138 | """ 139 | present_time = time.time() 140 | if present_time - self.start_time < self.initiation_delay: #Set to 30s 141 | return 142 | 143 | self.logger.info("[Topology Discovery Ok]") 144 | switch_list = get_switch(self.topology_api_app, None) 145 | self.create_port_map(switch_list) 146 | time.sleep(0.5) 147 | self.switches = [sw.dp.id for sw in switch_list] 148 | links = get_link(self.topology_api_app, None) 149 | self.create_interior_links(links) 150 | self.create_access_ports() 151 | self.graph = self.get_graph(self.link_to_port.keys()) 152 | 153 | def get_host_location(self, host_ip): 154 | """ 155 | Get host location info ((datapath, port)) according to the host ip. 156 | self.access_table = {(sw,port):(ip, mac),} 157 | """ 158 | 159 | for key in self.access_table.keys(): 160 | if self.access_table[key][0] == host_ip: 161 | return key 162 | self.logger.info("%s location is not found." % host_ip) 163 | return None 164 | 165 | def get_graph(self, link_list): 166 | """ 167 | Get Adjacency matrix from link_to_port. 168 | """ 169 | _graph = self.graph.copy() 170 | for src in self.switches: 171 | for dst in self.switches: 172 | if src == dst: 173 | _graph.add_edge(src, dst, weight=0) 174 | elif (src, dst) in link_list: 175 | _graph.add_edge(src, dst, weight=1) 176 | else: 177 | pass 178 | return _graph 179 | 180 | def get_initiation_delay(self, fanout): 181 | """ 182 | Get initiation delay. 183 | """ 184 | if fanout == 4: 185 | delay = 10 186 | elif fanout == 8: 187 | delay = 20 188 | else: 189 | delay = 20 190 | return delay 191 | 192 | def create_port_map(self, switch_list): 193 | """ 194 | Create interior_port table and access_port table. 195 | """ 196 | for sw in switch_list: 197 | dpid = sw.dp.id 198 | self.switch_port_table.setdefault(dpid, set()) 199 | # switch_port_table is equal to interior_ports plus access_ports. 200 | self.interior_ports.setdefault(dpid, set()) 201 | self.access_ports.setdefault(dpid, set()) 202 | for port in sw.ports: 203 | self.switch_port_table[dpid].add(port.port_no) 204 | 205 | def create_interior_links(self, link_list): 206 | """ 207 | Get links' srouce port to dst port from link_list. 208 | link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),} 209 | """ 210 | for link in link_list: 211 | src = link.src 212 | dst = link.dst 213 | self.link_to_port[(src.dpid, dst.dpid)] = (src.port_no, dst.port_no) 214 | # Find the access ports and interior ports. 215 | if link.src.dpid in self.switches: 216 | self.interior_ports[link.src.dpid].add(link.src.port_no) 217 | if link.dst.dpid in self.switches: 218 | self.interior_ports[link.dst.dpid].add(link.dst.port_no) 219 | 220 | def create_access_ports(self): 221 | """ 222 | Get ports without link into access_ports. 223 | """ 224 | for sw in self.switch_port_table: 225 | all_port_table = self.switch_port_table[sw] 226 | interior_port = self.interior_ports[sw] 227 | # That comes the access port of the switch. 228 | self.access_ports[sw] = all_port_table - interior_port 229 | 230 | def register_access_info(self, dpid, in_port, ip, mac): 231 | """ 232 | Register access host info into access table. 233 | """ 234 | if in_port in self.access_ports[dpid]: 235 | if (dpid, in_port) in self.access_table: 236 | if self.access_table[(dpid, in_port)] == (ip, mac): 237 | return 238 | else: 239 | self.access_table[(dpid, in_port)] = (ip, mac) 240 | return 241 | else: 242 | self.access_table.setdefault((dpid, in_port), None) 243 | self.access_table[(dpid, in_port)] = (ip, mac) 244 | return 245 | 246 | def show_topology(self): 247 | if self.pre_link_to_port != self.link_to_port: 248 | # It means the link_to_port table has changed. 249 | _graph = self.graph.copy() 250 | print "\n---------------------Link Port---------------------" 251 | print '%6s' % ('switch'), 252 | for node in sorted([node for node in _graph.nodes()], key=lambda node: node): 253 | print '%6d' % node, 254 | print 255 | for node1 in sorted([node for node in _graph.nodes()], key=lambda node: node): 256 | print '%6d' % node1, 257 | for node2 in sorted([node for node in _graph.nodes()], key=lambda node: node): 258 | if (node1, node2) in self.link_to_port.keys(): 259 | print '%6s' % str(self.link_to_port[(node1, node2)]), 260 | else: 261 | print '%6s' % '/', 262 | print 263 | print 264 | self.pre_link_to_port = self.link_to_port.copy() 265 | 266 | if self.pre_access_table != self.access_table: 267 | # It means the access_table has changed. 268 | print "\n----------------Access Host-------------------" 269 | print '%10s' % 'switch', '%10s' % 'port', '%22s' % 'Host' 270 | if not self.access_table.keys(): 271 | print " NO found host" 272 | else: 273 | for sw in sorted(self.access_table.keys()): 274 | print '%10d' % sw[0], '%10d ' % sw[1], self.access_table[sw] 275 | print 276 | self.pre_access_table = self.access_table.copy() 277 | 278 | # nx.draw(self.graph) 279 | # plt.show() 280 | # plt.savefig("/home/controlador/ryu/ryu/app/SDNapps_proac/%d.png" % int(time.time())) 281 | 282 | -------------------------------------------------------------------------------- /SDNapps_proac/simple_delay.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from ryu import cfg 3 | from ryu.base import app_manager 4 | from ryu.base.app_manager import lookup_service_brick 5 | from ryu.controller import ofp_event 6 | from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER 7 | from ryu.controller.handler import set_ev_cls 8 | from ryu.ofproto import ofproto_v1_3 9 | from ryu.lib import hub 10 | from ryu.topology.switches import Switches 11 | from ryu.topology.switches import LLDPPacket 12 | from ryu.app import simple_switch_13 13 | import networkx as nx 14 | import time 15 | import setting 16 | 17 | import simple_awareness 18 | 19 | CONF = cfg.CONF 20 | 21 | 22 | class simple_Delay(app_manager.RyuApp): 23 | """ 24 | A Ryu app for calculating link delay by using echo replay 25 | messages from the Control Plane to the datapaths in the Data Plane. 26 | It is part of the Statistics module of the Control Plane 27 | 28 | """ 29 | 30 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 31 | 32 | def __init__(self, *args, **kwargs): 33 | super(simple_Delay, self).__init__(*args, **kwargs) 34 | 35 | self.sending_echo_request_interval = 0.3 36 | self.sw_module = lookup_service_brick('switches') 37 | self.awareness = lookup_service_brick('awareness') 38 | self.datapaths = {} 39 | self.echo_latency = {} 40 | self.link_delay = {} 41 | self.measure_thread = hub.spawn(self._detector) 42 | 43 | @set_ev_cls(ofp_event.EventOFPStateChange, 44 | [MAIN_DISPATCHER, DEAD_DISPATCHER]) 45 | def _state_change_handler(self, ev): 46 | datapath = ev.datapath 47 | if ev.state == MAIN_DISPATCHER: 48 | if not datapath.id in self.datapaths: 49 | self.logger.debug('Register datapath: %016x', datapath.id) 50 | self.datapaths[datapath.id] = datapath 51 | elif ev.state == DEAD_DISPATCHER: 52 | if datapath.id in self.datapaths: 53 | self.logger.debug('Unregister datapath: %016x', datapath.id) 54 | del self.datapaths[datapath.id] 55 | 56 | def _detector(self): 57 | """ 58 | Delay detecting functon. 59 | Send echo request and calculate link delay periodically 60 | """ 61 | while True: 62 | self._send_echo_request() 63 | self.create_link_delay() 64 | try: 65 | self.awareness.shortest_paths = {} 66 | self.logger.debug("Refresh the shortest_paths") 67 | except: 68 | self.awareness = lookup_service_brick('awareness') 69 | if self.awareness is not None: 70 | self.show_delay_statis() 71 | hub.sleep(setting.DELAY_DETECTING_PERIOD) 72 | 73 | def _send_echo_request(self): 74 | """ 75 | Seng echo request msg to datapath. 76 | """ 77 | for datapath in self.datapaths.values(): 78 | parser = datapath.ofproto_parser 79 | echo_req = parser.OFPEchoRequest(datapath, 80 | data="%.12f" % time.time()) 81 | datapath.send_msg(echo_req) 82 | # Important! Don't send echo request together, it will 83 | # generate a lot of echo reply almost in the same time. 84 | # which will generate a lot of delay of waiting in queue 85 | # when processing echo reply in echo_reply_handler. 86 | 87 | hub.sleep(self.sending_echo_request_interval) 88 | 89 | @set_ev_cls(ofp_event.EventOFPEchoReply, MAIN_DISPATCHER) 90 | def echo_reply_handler(self, ev): 91 | """ 92 | Handle the echo reply msg, and get the latency of link. 93 | """ 94 | now_timestamp = time.time() 95 | try: 96 | latency = now_timestamp - eval(ev.msg.data) 97 | self.echo_latency[ev.msg.datapath.id] = latency 98 | except: 99 | return 100 | 101 | def get_delay(self, src, dst): 102 | """ 103 | Get link delay. 104 | Controller 105 | | | 106 | src echo latency| |dst echo latency 107 | | | 108 | SwitchA-------SwitchB 109 | 110 | fwd_delay---> 111 | <----reply_delay 112 | delay = (forward delay + reply delay - src datapath's echo latency 113 | """ 114 | try: 115 | fwd_delay = self.awareness.graph[src][dst]['lldpdelay'] 116 | re_delay = self.awareness.graph[dst][src]['lldpdelay'] 117 | src_latency = self.echo_latency[src] 118 | dst_latency = self.echo_latency[dst] 119 | delay = (fwd_delay + re_delay - src_latency - dst_latency)/2 120 | return max(delay, 0) 121 | except: 122 | return float('inf') 123 | 124 | def _save_lldp_delay(self, src=0, dst=0, lldpdelay=0): 125 | try: 126 | self.awareness.graph[src][dst]['lldpdelay'] = lldpdelay 127 | except: 128 | if self.awareness is None: 129 | self.awareness = lookup_service_brick('awareness') 130 | return 131 | 132 | def create_link_delay(self): 133 | """ 134 | Create link delay data, and save it into graph object. 135 | """ 136 | try: 137 | for src in self.awareness.graph: 138 | for dst in self.awareness.graph[src]: 139 | if src == dst: 140 | self.awareness.graph[src][dst]['delay'] = 0 141 | continue 142 | delay = self.get_delay(src, dst) 143 | self.awareness.graph[src][dst]['delay'] = delay 144 | if self.awareness is not None: 145 | self.get_link_delay() 146 | except: 147 | if self.awareness is None: 148 | self.awareness = lookup_service_brick('awareness') 149 | return 150 | 151 | 152 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 153 | def packet_in_handler(self, ev): 154 | """ 155 | Explore LLDP packet and get the delay of link (fwd and reply). 156 | """ 157 | msg = ev.msg 158 | try: 159 | src_dpid, src_port_no = LLDPPacket.lldp_parse(msg.data) 160 | dpid = msg.datapath.id 161 | if self.sw_module is None: 162 | self.sw_module = lookup_service_brick('switches') 163 | 164 | for port in self.sw_module.ports.keys(): 165 | if src_dpid == port.dpid and src_port_no == port.port_no: 166 | delay = self.sw_module.ports[port].delay 167 | self._save_lldp_delay(src=src_dpid, dst=dpid, 168 | lldpdelay=delay) 169 | except LLDPPacket.LLDPUnknownFormat as e: 170 | return 171 | 172 | def get_link_delay(self): 173 | ''' 174 | Calculates total link dealy and save it in self.link_delay[(node1,node2)]: link_delay 175 | ''' 176 | for src in self.awareness.graph: 177 | for dst in self.awareness.graph[src]: 178 | if src != dst: 179 | delay1 = self.awareness.graph[src][dst]['delay'] 180 | delay2 = self.awareness.graph[dst][src]['delay'] 181 | link_delay = ((delay1 + delay2)*1000.0)/2 #saves in ms 182 | link = (src, dst) 183 | self.link_delay[link] = link_delay 184 | 185 | def show_delay_statis(self): 186 | if self.awareness is None: 187 | print("Not doing nothing, awarness none") 188 | # else: 189 | # print("Latency ok") 190 | # if setting.TOSHOW and self.awareness is not None: 191 | # self.logger.info("\nsrc dst delay") 192 | # self.logger.info("---------------------------") 193 | # for src in self.awareness.graph: 194 | # for dst in self.awareness.graph[src]: 195 | # delay = self.awareness.graph[src][dst]['delay'] 196 | # self.logger.info("%s <--> %s : %s" % (src, dst, delay)) 197 | -------------------------------------------------------------------------------- /SDNapps_proac/simple_monitor.py: -------------------------------------------------------------------------------- 1 | from operator import attrgetter 2 | 3 | from ryu.base import app_manager 4 | from ryu.controller import ofp_event 5 | from ryu.base.app_manager import lookup_service_brick 6 | from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER 7 | from ryu.controller.handler import CONFIG_DISPATCHER 8 | from ryu.controller.handler import set_ev_cls 9 | from ryu.topology import event, switches 10 | from ryu.ofproto.ether import ETH_TYPE_IP 11 | from ryu.topology.api import get_switch, get_link 12 | from ryu.ofproto import ofproto_v1_3 13 | from ryu.lib import hub 14 | from ryu.lib.packet import packet 15 | from ryu.lib.packet import arp 16 | 17 | import time 18 | 19 | import simple_awareness 20 | import simple_delay 21 | # import requests 22 | import json, ast 23 | import setting 24 | import csv 25 | import time 26 | 27 | class simple_Monitor(app_manager.RyuApp): 28 | """ 29 | A Ryu app for netowrk monitoring. It retreieves statistics information through openflow 30 | of datapaths at the Data Plane. 31 | This class contains functions belonging to the Statistics module and Flow Installation module 32 | of the Control Plane. 33 | I also contains the functions corresponding to the Process Statistics module of the 34 | Management Plane in order to adventage the monitorin threading for statistics processing. 35 | """ 36 | 37 | OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] 38 | _CONTEXTS = {"simple_awareness": simple_awareness.simple_Awareness, 39 | "simple_delay": simple_delay.simple_Delay} 40 | 41 | def __init__(self, *args, **kwargs): 42 | super(simple_Monitor, self).__init__(*args, **kwargs) 43 | self.name = "monitor" 44 | self.count_monitor = 0 45 | self.topology_api_app = self 46 | self.datapaths = {} 47 | self.port_stats = {} 48 | self.port_speed = {} 49 | self.flow_stats = {} 50 | self.flow_speed = {} 51 | self.flow_loss = {} 52 | self.port_loss = {} 53 | self.link_loss = {} 54 | self.net_info = {} 55 | self.net_metrics= {} 56 | self.link_free_bw = {} 57 | self.link_used_bw = {} 58 | self.stats = {} 59 | self.port_features = {} 60 | self.free_bandwidth = {} 61 | self.awareness = kwargs["simple_awareness"] 62 | self.delay = kwargs["simple_delay"] 63 | self.paths = {} 64 | self.installed_paths = {} 65 | 66 | self.monitor_thread = hub.spawn(self.monitor) 67 | 68 | @set_ev_cls(ofp_event.EventOFPStateChange, 69 | [MAIN_DISPATCHER, DEAD_DISPATCHER]) 70 | def state_change_handler(self, ev): 71 | """ 72 | Record datapath information. 73 | """ 74 | datapath = ev.datapath 75 | if ev.state == MAIN_DISPATCHER: 76 | if datapath.id not in self.datapaths: 77 | self.logger.debug('Datapath registered: %016x', datapath.id) 78 | print 'Datapath registered:', datapath.id ## 79 | self.datapaths[datapath.id] = datapath 80 | elif ev.state == DEAD_DISPATCHER: 81 | if datapath.id in self.datapaths: 82 | self.logger.debug('Datapath unregistered: %016x', datapath.id) 83 | print 'Datapath unregistered:', datapath.id 84 | del self.datapaths[datapath.id] 85 | 86 | def monitor(self): 87 | """ 88 | Main entry method of monitoring traffic. 89 | """ 90 | while True: 91 | self.count_monitor += 1 92 | self.stats['flow'] = {} 93 | self.stats['port'] = {} 94 | print("[Statistics Module Ok]") 95 | print("[{0}]".format(self.count_monitor)) 96 | for dp in self.datapaths.values(): 97 | self.port_features.setdefault(dp.id, {}) 98 | self.paths = None 99 | self.request_stats(dp) 100 | 101 | if self.awareness.link_to_port: 102 | self.flow_install_monitor() 103 | if self.stats['port']: 104 | self.get_port_loss() 105 | self.get_link_free_bw() 106 | self.get_link_used_bw() 107 | self.write_values() 108 | 109 | hub.sleep(setting.MONITOR_PERIOD) 110 | if self.stats['port']: 111 | self.show_stat('link') 112 | hub.sleep(1) 113 | 114 | #---------------------CONTROL PLANE FUNCTIONS---------------------------------------- 115 | #---------------------FLOW INSTALLATION MODULE FUNCTIONS ---------------------------- 116 | 117 | def flow_install_monitor(self): 118 | print("[Flow Installation Ok]") 119 | out_time= time.time() 120 | for dp in self.datapaths.values(): 121 | for dp2 in self.datapaths.values(): 122 | if dp.id != dp2.id: 123 | ip_src = '10.0.0.'+str(dp.id) 124 | ip_dst = '10.0.0.'+str(dp2.id) 125 | self.forwarding(dp.id, ip_src, ip_dst, dp.id, dp2.id) 126 | time.sleep(0.0005) 127 | end_out_time = time.time() 128 | out_total_ = end_out_time - out_time 129 | # print("FLow installation ends in: {0}s".format(out_total_)) 130 | return 131 | 132 | def forwarding(self, dpid, ip_src, ip_dst, src_sw, dst_sw): 133 | """ 134 | Get paths and install them into datapaths. 135 | """ 136 | 137 | self.installed_paths.setdefault(dpid, {}) 138 | path = self.get_path(str(src_sw), str(dst_sw)) 139 | self.installed_paths[src_sw][dst_sw] = path 140 | flow_info = (ip_src, ip_dst) 141 | self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info) 142 | 143 | def request_stats(self, datapath): #OK 144 | self.logger.debug('send stats request: %016x', datapath.id) 145 | ofproto = datapath.ofproto 146 | parser = datapath.ofproto_parser 147 | 148 | req = parser.OFPPortDescStatsRequest(datapath, 0) #for port description 149 | datapath.send_msg(req) 150 | 151 | req = parser.OFPFlowStatsRequest(datapath) #individual flow statistics 152 | datapath.send_msg(req) 153 | 154 | req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY) 155 | datapath.send_msg(req) 156 | 157 | def install_flow(self, datapaths, link_to_port, path, 158 | flow_info, data=None): 159 | init_time_install = time.time() 160 | ''' 161 | Install flow entires. 162 | path=[dpid1, dpid2...] 163 | flow_info=(src_ip, dst_ip) 164 | ''' 165 | if path is None or len(path) == 0: 166 | self.logger.info("Path error!") 167 | return 168 | 169 | in_port = 1 170 | first_dp = datapaths[path[0]] 171 | 172 | out_port = first_dp.ofproto.OFPP_LOCAL 173 | back_info = (flow_info[1], flow_info[0]) 174 | 175 | # Flow installing por middle datapaths in path 176 | if len(path) > 2: 177 | for i in range(1, len(path)-1): 178 | port = self.get_port_pair_from_link(link_to_port, 179 | path[i-1], path[i]) 180 | port_next = self.get_port_pair_from_link(link_to_port, 181 | path[i], path[i+1]) 182 | if port and port_next: 183 | src_port, dst_port = port[1], port_next[0] 184 | datapath = datapaths[path[i]] 185 | self.send_flow_mod(datapath, flow_info, src_port, dst_port) 186 | self.send_flow_mod(datapath, back_info, dst_port, src_port) 187 | if len(path) > 1: 188 | # The last flow entry 189 | port_pair = self.get_port_pair_from_link(link_to_port, 190 | path[-2], path[-1]) 191 | if port_pair is None: 192 | self.logger.info("Port is not found") 193 | return 194 | src_port = port_pair[1] 195 | dst_port = 1 #I know that is the host port 196 | last_dp = datapaths[path[-1]] 197 | self.send_flow_mod(last_dp, flow_info, src_port, dst_port) 198 | self.send_flow_mod(last_dp, back_info, dst_port, src_port) 199 | 200 | # The first flow entry 201 | port_pair = self.get_port_pair_from_link(link_to_port, path[0], path[1]) 202 | if port_pair is None: 203 | self.logger.info("Port not found in first hop.") 204 | return 205 | out_port = port_pair[0] 206 | self.send_flow_mod(first_dp, flow_info, in_port, out_port) 207 | self.send_flow_mod(first_dp, back_info, out_port, in_port) 208 | 209 | # src and dst on the same datapath 210 | else: 211 | out_port = 1 212 | self.send_flow_mod(first_dp, flow_info, in_port, out_port) 213 | self.send_flow_mod(first_dp, back_info, out_port, in_port) 214 | 215 | end_time_install = time.time() 216 | total_install = end_time_install - init_time_install 217 | 218 | def send_flow_mod(self, datapath, flow_info, src_port, dst_port): 219 | """ 220 | Build flow entry, and send it to datapath. 221 | """ 222 | ofproto = datapath.ofproto 223 | parser = datapath.ofproto_parser 224 | actions = [] 225 | actions.append(parser.OFPActionOutput(dst_port)) 226 | 227 | match = parser.OFPMatch( 228 | eth_type=ETH_TYPE_IP, ipv4_src=flow_info[0], 229 | ipv4_dst=flow_info[1]) 230 | 231 | self.add_flow(datapath, 1, match, actions, 232 | idle_timeout=250, hard_timeout=0) 233 | 234 | 235 | def add_flow(self, dp, priority, match, actions, idle_timeout=0, hard_timeout=0): 236 | """ 237 | Send a flow entry to datapath. 238 | """ 239 | ofproto = dp.ofproto 240 | parser = dp.ofproto_parser 241 | inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)] 242 | mod = parser.OFPFlowMod(datapath=dp, command=dp.ofproto.OFPFC_ADD, priority=priority, 243 | idle_timeout=idle_timeout, 244 | hard_timeout=hard_timeout, 245 | match=match, instructions=inst) 246 | dp.send_msg(mod) 247 | 248 | def del_flow(self, datapath, dst): 249 | """ 250 | Deletes a flow entry of the datapath. 251 | """ 252 | ofproto = datapath.ofproto 253 | parser = datapath.ofproto_parser 254 | 255 | match = parser.OFPMatch(eth_type=ETH_TYPE_IP, ipv4_src=flow_info[0],ipv4_dst=flow_info[1]) 256 | mod = parser.OFPFlowMod(datapath=datapath, match=match, cookie=0,command=ofproto.OFPFC_DELETE) 257 | datapath.send_msg(mod) 258 | 259 | def build_packet_out(self, datapath, buffer_id, src_port, dst_port, data): 260 | """ 261 | Build packet out object. 262 | """ 263 | actions = [] 264 | if dst_port: 265 | actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port)) 266 | 267 | msg_data = None 268 | if buffer_id == datapath.ofproto.OFP_NO_BUFFER: 269 | if data is None: 270 | return None 271 | msg_data = data 272 | 273 | out = datapath.ofproto_parser.OFPPacketOut( 274 | datapath=datapath, buffer_id=buffer_id, 275 | data=msg_data, in_port=src_port, actions=actions) 276 | return out 277 | 278 | def arp_forwarding(self, msg, src_ip, dst_ip): 279 | """ 280 | Send ARP packet to the destination host if the dst host record 281 | is existed. 282 | result = (datapath, port) of host 283 | """ 284 | datapath = msg.datapath 285 | ofproto = datapath.ofproto 286 | 287 | result = self.awareness.get_host_location(dst_ip) 288 | if result: 289 | # Host has been recorded in access table. 290 | datapath_dst, out_port = result[0], result[1] 291 | datapath = self.datapaths[datapath_dst] 292 | out = self.build_packet_out(datapath, ofproto.OFP_NO_BUFFER, 293 | ofproto.OFPP_CONTROLLER, 294 | out_port, msg.data) 295 | datapath.send_msg(out) 296 | self.logger.debug("Deliver ARP packet to knew host") 297 | else: 298 | # self.flood(msg) 299 | pass 300 | 301 | def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid): 302 | """ 303 | Get port pair of link, so that controller can install flow entry. 304 | link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),} 305 | """ 306 | if (src_dpid, dst_dpid) in link_to_port: 307 | return link_to_port[(src_dpid, dst_dpid)] 308 | else: 309 | self.logger.info("Link from dpid:%s to dpid:%s is not in links" % 310 | (src_dpid, dst_dpid)) 311 | return None 312 | 313 | def get_path(self, src, dst): 314 | 315 | if self.paths != None: 316 | # print ('PATHS: OK') 317 | path = self.paths.get(src).get(dst)[0] 318 | return path 319 | else: 320 | # print('Getting paths: OK') 321 | paths = self.get_RL_paths() 322 | path = paths.get(src).get(dst)[0] 323 | return path 324 | 325 | def get_RL_paths(self): 326 | 327 | file = '/home/controlador/ryu/ryu/app/SDNapps_proac/paths.json' 328 | try: 329 | with open(file,'r') as json_file: 330 | paths_dict = json.load(json_file) 331 | paths_dict = ast.literal_eval(json.dumps(paths_dict)) 332 | self.paths = paths_dict 333 | return self.paths 334 | except ValueError as e: #error excpetion when trying to read the json and is still been updated 335 | return 336 | else: 337 | with open(file,'r') as json_file: #try again 338 | paths_dict = json.load(json_file) 339 | paths_dict = ast.literal_eval(json.dumps(paths_dict)) 340 | self.paths = paths_dict 341 | return self.paths 342 | 343 | #---------------------CONTROL PLANE ----------------------------------------- 344 | #-----------------------STATISTICS MODULE FUNCTIONS ------------------------- 345 | 346 | def save_stats(self, _dict, key, value, length=5): #Save values in dics (max len 5) 347 | if key not in _dict: 348 | _dict[key] = [] 349 | _dict[key].append(value) 350 | if len(_dict[key]) > length: 351 | _dict[key].pop(0) 352 | 353 | def get_speed(self, now, pre, period): #bits/s 354 | if period: 355 | return ((now - pre)*8) / period 356 | else: 357 | return 0 358 | 359 | def get_time(self, sec, nsec): #Total time that the flow was alive in seconds 360 | return sec + nsec / 1000000000.0 361 | 362 | def get_period(self, n_sec, n_nsec, p_sec, p_nsec): # (time las flow, time) 363 | # calculates period of time between flows 364 | return self.get_time(n_sec, n_nsec) - self.get_time(p_sec, p_nsec) 365 | 366 | def get_sw_dst(self, dpid, out_port): 367 | for key in self.awareness.link_to_port: 368 | src_port = self.awareness.link_to_port[key][0] 369 | if key[0] == dpid and src_port == out_port: 370 | dst_sw = key[1] 371 | dst_port = self.awareness.link_to_port[key][1] 372 | return (dst_sw, dst_port) 373 | 374 | def get_link_bw(self, file, src_dpid, dst_dpid): 375 | fin = open(file, "r") 376 | bw_capacity_dict = {} 377 | for line in fin: 378 | a = line.split(',') 379 | if a: 380 | s1 = a[0] 381 | s2 = a[1] 382 | # bwd = a[2] #random capacities 383 | bwd = a[3] #original capacities 384 | bw_capacity_dict.setdefault(s1,{}) 385 | bw_capacity_dict[str(a[0])][str(a[1])] = bwd 386 | fin.close() 387 | bw_link = bw_capacity_dict[str(src_dpid)][str(dst_dpid)] 388 | return bw_link 389 | 390 | def get_free_bw(self, port_capacity, speed): 391 | # freebw: Kbit/s 392 | return max(port_capacity - (speed/ 1000.0), 0) 393 | 394 | #------------------MANAGEMENT PLANE MODULE --------------------------- 395 | #------------------PROCESS STATISTICS MODULE FUNCTIONS---------------- 396 | 397 | def get_flow_loss(self): 398 | #Get per flow loss 399 | bodies = self.stats['flow'] 400 | for dp in bodies.keys(): 401 | list_flows = sorted([flow for flow in bodies[dp] if flow.priority == 1], 402 | key=lambda flow: (flow.match.get('ipv4_src'),flow.match.get('ipv4_dst'))) 403 | for stat in list_flows: 404 | out_port = stat.instructions[0].actions[0].port 405 | if self.awareness.link_to_port and out_port != 1: #get loss from ports of network 406 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 407 | tmp1 = self.flow_stats[dp][key] 408 | byte_count_src = tmp1[-1][1] 409 | 410 | result = self.get_sw_dst(dp, out_port) 411 | dst_sw = result[0] 412 | tmp2 = self.flow_stats[dst_sw][key] 413 | byte_count_dst = tmp2[-1][1] 414 | flow_loss = byte_count_src - byte_count_dst 415 | self.save_stats(self.flow_loss[dp], key, flow_loss, 5) 416 | 417 | def get_port_loss(self): 418 | #Get loss_port 419 | bodies = self.stats['port'] 420 | for dp in sorted(bodies.keys()): 421 | for stat in sorted(bodies[dp], key=attrgetter('port_no')): 422 | if self.awareness.link_to_port and stat.port_no != 1 and stat.port_no != ofproto_v1_3.OFPP_LOCAL: #get loss form ports of network 423 | key1 = (dp, stat.port_no) 424 | tmp1 = self.port_stats[key1] 425 | tx_bytes_src = tmp1[-1][0] 426 | tx_pkts_src = tmp1[-1][8] 427 | 428 | key2 = self.get_sw_dst(dp, stat.port_no) 429 | tmp2 = self.port_stats[key2] 430 | rx_bytes_dst = tmp2[-1][1] 431 | rx_pkts_dst = tmp2[-1][9] 432 | loss_port = float(tx_pkts_src - rx_pkts_dst) / tx_pkts_src #loss rate 433 | values = (loss_port, key2) 434 | self.save_stats(self.port_loss[dp], key1, values, 5) 435 | 436 | #Calculates the total link loss and save it in self.link_loss[(node1,node2)]:loss 437 | for dp in self.port_loss.keys(): 438 | for port in self.port_loss[dp]: 439 | key2 = self.port_loss[dp][port][-1][1] 440 | loss_src = self.port_loss[dp][port][-1][0] 441 | # tx_src = self.port_loss[dp][port][-1][1] 442 | loss_dst = self.port_loss[key2[0]][key2][-1][0] 443 | # tx_dst = self.port_loss[key2[0]][key2][-1][1] 444 | loss_l = (abs(loss_src) + abs(loss_dst)) / 2 445 | link = (dp, key2[0]) 446 | self.link_loss[link] = loss_l*100.0 447 | 448 | def get_link_free_bw(self): 449 | #Calculates the total free bw of link and save it in self.link_free_bw[(node1,node2)]:link_free_bw 450 | for dp in self.free_bandwidth.keys(): 451 | for port in self.free_bandwidth[dp]: 452 | free_bw1 = self.free_bandwidth[dp][port] 453 | key2 = self.get_sw_dst(dp, port) #key2 = (dp,port) 454 | free_bw2= self.free_bandwidth[key2[0]][key2[1]] 455 | link_free_bw = (free_bw1 + free_bw2)/2 456 | link = (dp, key2[0]) 457 | self.link_free_bw[link] = link_free_bw 458 | 459 | def get_link_used_bw(self): 460 | #Calculates the total free bw of link and save it in self.link_free_bw[(node1,node2)]:link_free_bw 461 | for key in self.port_speed.keys(): 462 | used_bw1 = self.port_speed[key][-1] 463 | key2 = self.get_sw_dst(key[0], key[1]) #key2 = (dp,port) 464 | used_bw2 = self.port_speed[key2][-1] 465 | link_used_bw = (used_bw1 + used_bw2)/2 466 | link = (key[0], key2[0]) 467 | self.link_used_bw[link] = link_used_bw 468 | 469 | def write_values(self): 470 | a = time.time() 471 | if self.delay.link_delay: 472 | for link in self.link_free_bw: 473 | self.net_info[link] = [round(self.link_free_bw[link],6) , round(self.delay.link_delay[link],6), round(self.link_loss[link],6)] 474 | self.net_metrics[link] = [round(self.link_free_bw[link],6), round(self.link_used_bw[link],6), round(self.delay.link_delay[link],6), round(self.link_loss[link],6)] 475 | 476 | with open('/home/controlador/ryu/ryu/app/SDNapps_proac/net_info.csv','wb') as csvfile: 477 | header_names = ['node1','node2','bwd','delay', 'pkloss'] 478 | file = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL) 479 | links_in = [] 480 | file.writerow(header_names) 481 | for link, values in sorted(self.net_info.items()): 482 | links_in.append(link) 483 | tup = (link[1], link[0]) 484 | if tup not in links_in: 485 | file.writerow([link[0],link[1], values[0],values[1],values[2]]) 486 | 487 | file_metrics = '/home/controlador/ryu/ryu/app/SDNapps_proac/Metrics/'+str(self.count_monitor)+'_net_metrics.csv' 488 | with open(file_metrics,'wb') as csvfile: 489 | header_ = ['node1','node2','free_bw','used_bw','delay','pkloss'] 490 | file = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL) 491 | links_in = [] 492 | file.writerow(header_) 493 | for link, values in sorted(self.net_metrics.items()): 494 | links_in.append(link) 495 | tup = (link[1], link[0]) 496 | if tup not in links_in: 497 | file.writerow([link[0],link[1],values[0],values[1],values[2],values[3]]) 498 | b = time.time() 499 | return 500 | 501 | #---------------------CONTROL PLANE FUNCTIONS--------------------------------- 502 | #---------------------STATISTICS MODULE FUNCTIONS ---------------------------- 503 | 504 | @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER) 505 | def flow_stats_reply_handler(self, ev): 506 | """ 507 | Save flow stats reply information into self.flow_stats. 508 | Calculate flow speed and Save it. 509 | self.flow_stats = {dpid:{(ipv4_src, ipv4_dst):[(packet_count, byte_count, duration_sec, duration_nsec),],},} 510 | self.flow_speed = {dpid:{(ipv4_src, ipv4_dst):[speed,],},} 511 | self.flow_loss = {dpid:{(ipv4_src, ipv4_dst, dst_sw):[loss,],},} 512 | """ 513 | 514 | body = ev.msg.body 515 | dpid = ev.msg.datapath.id 516 | self.stats['flow'][dpid] = body 517 | self.flow_stats.setdefault(dpid, {}) 518 | self.flow_speed.setdefault(dpid, {}) 519 | self.flow_loss.setdefault(dpid, {}) 520 | 521 | #flows.append('table_id=%s ' 522 | # 'duration_sec=%d duration_nsec=%d ' 523 | # 'priority=%d ' 524 | # 'idle_timeout=%d hard_timeout=%d flags=0x%04x ' 525 | # 'cookie=%d packet_count=%d byte_count=%d ' 526 | # 'match=%s instructions=%s' % 527 | # (stat.table_id, 528 | # stat.duration_sec, stat.duration_nsec, 529 | # stat.priority, 530 | # stat.idle_timeout, stat.hard_timeout, stat.flags, 531 | # stat.cookie, stat.packet_count, stat.byte_count, 532 | # stat.match, stat.instructions) 533 | 534 | for stat in sorted([flow for flow in body if flow.priority == 1], 535 | key=lambda flow: (flow.match.get('ipv4_src'), 536 | flow.match.get('ipv4_dst'))): 537 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 538 | 539 | value = (stat.packet_count, stat.byte_count, 540 | stat.duration_sec, stat.duration_nsec)#duration_sec: Time flow was alive in seconds 541 | #duration_nsec: Time flow was alive in nanoseconds beyond duration_sec 542 | self.save_stats(self.flow_stats[dpid], key, value, 5) 543 | 544 | # CALCULATE FLOW BYTE RATE 545 | pre = 0 546 | period = setting.MONITOR_PERIOD 547 | tmp = self.flow_stats[dpid][key] 548 | if len(tmp) > 1: 549 | pre = tmp[-2][1] #penultimo flow byte_count 550 | period = self.get_period(tmp[-1][2], tmp[-1][3], #valores (sec,nsec) ultimo flow, penultimo flow) 551 | tmp[-2][2], tmp[-2][3]) 552 | speed = self.get_speed(self.flow_stats[dpid][key][-1][1], #ultimo flow byte_count, penultimo byte_count, periodo 553 | pre, period) 554 | self.save_stats(self.flow_speed[dpid], key, speed, 5) #bits/s 555 | 556 | @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER) 557 | def port_stats_reply_handler(self, ev): 558 | a = time.time() 559 | body = ev.msg.body 560 | dpid = ev.msg.datapath.id 561 | 562 | self.stats['port'][dpid] = body 563 | self.free_bandwidth.setdefault(dpid, {}) 564 | self.port_loss.setdefault(dpid, {}) 565 | """ 566 | Save port's stats information into self.port_stats. 567 | Calculate port speed and Save it. 568 | self.port_stats = {(dpid, port_no):[(tx_bytes, rx_bytes, rx_errors, duration_sec, duration_nsec),],} 569 | self.port_speed = {(dpid, port_no):[speed,],} 570 | 571 | Replay message content: 572 | (stat.port_no, 573 | stat.rx_packets, stat.tx_packets, 574 | stat.rx_bytes, stat.tx_bytes, 575 | stat.rx_dropped, stat.tx_dropped, 576 | stat.rx_errors, stat.tx_errors, 577 | stat.rx_frame_err, stat.rx_over_err, 578 | stat.rx_crc_err, stat.collisions, 579 | stat.duration_sec, stat.duration_nsec)) 580 | """ 581 | 582 | for stat in sorted(body, key=attrgetter('port_no')): 583 | port_no = stat.port_no 584 | key = (dpid, port_no) 585 | value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors, 586 | stat.duration_sec, stat.duration_nsec, stat.tx_errors, stat.tx_dropped, stat.rx_dropped, stat.tx_packets, stat.rx_packets) 587 | self.save_stats(self.port_stats, key, value, 5) 588 | 589 | if port_no != ofproto_v1_3.OFPP_LOCAL: 590 | if port_no != 1 and self.awareness.link_to_port : 591 | # Get port speed and Save it. 592 | pre = 0 593 | period = setting.MONITOR_PERIOD 594 | tmp = self.port_stats[key] 595 | if len(tmp) > 1: 596 | # Calculate with the tx_bytes and rx_bytes 597 | pre = tmp[-2][0] + tmp[-2][1] #penultimo port tx_bytes 598 | period = self.get_period(tmp[-1][3], tmp[-1][4], tmp[-2][3], tmp[-2][4]) #periodo entre el ultimo y penultimo total bytes en el puerto 599 | speed = self.get_speed(self.port_stats[key][-1][0] + self.port_stats[key][-1][1], pre, period) #speed in bits/s 600 | self.save_stats(self.port_speed, key, speed, 5) 601 | 602 | #Get links capacities 603 | 604 | file = '/home/controlador/ryu/ryu/app/SDNapps_proac/bw_r.txt' # link capacities 605 | link_to_port = self.awareness.link_to_port 606 | 607 | for k in list(link_to_port.keys()): 608 | if k[0] == dpid: 609 | if link_to_port[k][0] == port_no: 610 | dst_dpid = k[1] 611 | 612 | #FUNCIONA CON LISTA----------------------------- 613 | # list_dst_dpid = [k for k in list(link_to_port.keys()) if k[0] == dpid and link_to_port[k][0] == port_no] 614 | # if len(list_dst_dpid) > 0: 615 | # dst_dpid = list_dst_dpid[0][1] 616 | # ----------------------------------------- 617 | bw_link = float(self.get_link_bw(file, dpid, dst_dpid)) 618 | port_state = self.port_features.get(dpid).get(port_no) 619 | 620 | if port_state: 621 | bw_link_kbps = bw_link * 1000.0 622 | self.port_features[dpid][port_no].append(bw_link_kbps) 623 | free_bw = self.get_free_bw(bw_link_kbps, speed) 624 | self.free_bandwidth[dpid][port_no] = free_bw 625 | 626 | @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) 627 | def port_desc_stats_reply_handler(self, ev): 628 | """ 629 | Save port description info. 630 | """ 631 | msg = ev.msg 632 | dpid = msg.datapath.id 633 | ofproto = msg.datapath.ofproto 634 | 635 | config_dict = {ofproto.OFPPC_PORT_DOWN: "Down", 636 | ofproto.OFPPC_NO_RECV: "No Recv", 637 | ofproto.OFPPC_NO_FWD: "No Farward", 638 | ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"} 639 | 640 | state_dict = {ofproto.OFPPS_LINK_DOWN: "Down", 641 | ofproto.OFPPS_BLOCKED: "Blocked", 642 | ofproto.OFPPS_LIVE: "Live"} 643 | 644 | ports = [] 645 | for p in ev.msg.body: 646 | if p.port_no != 1: 647 | 648 | ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x ' 649 | 'state=0x%08x curr=0x%08x advertised=0x%08x ' 650 | 'supported=0x%08x peer=0x%08x curr_speed=%d ' 651 | 'max_speed=%d' % 652 | (p.port_no, p.hw_addr, 653 | p.name, p.config, 654 | p.state, p.curr, p.advertised, 655 | p.supported, p.peer, p.curr_speed, 656 | p.max_speed)) 657 | if p.config in config_dict: 658 | config = config_dict[p.config] 659 | else: 660 | config = "up" 661 | 662 | if p.state in state_dict: 663 | state = state_dict[p.state] 664 | else: 665 | state = "up" 666 | 667 | # Recording data. 668 | port_feature = [config, state] 669 | self.port_features[dpid][p.port_no] = port_feature 670 | 671 | @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) 672 | def port_status_handler(self, ev): 673 | """ 674 | Handle the port status changed event. 675 | """ 676 | msg = ev.msg 677 | ofproto = msg.datapath.ofproto 678 | reason = msg.reason 679 | dpid = msg.datapath.id 680 | port_no = msg.desc.port_no 681 | 682 | reason_dict = {ofproto.OFPPR_ADD: "added", 683 | ofproto.OFPPR_DELETE: "deleted", 684 | ofproto.OFPPR_MODIFY: "modified", } 685 | 686 | if reason in reason_dict: 687 | print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no) 688 | else: 689 | print "switch%d: Illegal port state %s %s" % (dpid, port_no, reason) 690 | 691 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 692 | def packet_in_handler(self, ev): 693 | ''' 694 | In packet_in handler, we need to learn access_table by ARP and IP packets. 695 | Therefore, the first packet from UNKOWN host MUST be ARP 696 | ''' 697 | msg = ev.msg 698 | pkt = packet.Packet(msg.data) 699 | arp_pkt = pkt.get_protocol(arp.arp) 700 | if isinstance(arp_pkt, arp.arp): 701 | self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip) 702 | 703 | def show_stat(self, _type): 704 | ''' 705 | Show statistics information according to data type. 706 | _type: 'port' / 'flow' 707 | ''' 708 | if setting.TOSHOW is False: 709 | return 710 | 711 | 712 | if _type == 'flow' and self.awareness.link_to_port: 713 | bodies = self.stats['flow'] 714 | print('datapath '' ip_src ip-dst ' 715 | 'out-port packets bytes flow-speed(b/s)') 716 | print('---------------- '' -------- ----------------- ' 717 | '-------- -------- -------- -----------') 718 | for dpid in bodies.keys(): 719 | for stat in sorted( 720 | [flow for flow in bodies[dpid] if flow.priority == 1], 721 | key=lambda flow: (flow.match.get('ipv4_src'), 722 | flow.match.get('ipv4_dst'))): 723 | key = (stat.match.get('ipv4_src'), stat.match.get('ipv4_dst')) 724 | print('{:>016} {:>9} {:>17} {:>8} {:>8} {:>8} {:>8.1f}'.format( 725 | dpid, 726 | stat.match['ipv4_src'], stat.match['ipv4_dst'], #flow match 727 | stat.instructions[0].actions[0].port, #port 728 | stat.packet_count, stat.byte_count, 729 | abs(self.flow_speed[dpid][key][-1]))) 730 | print() 731 | 732 | if _type == 'port': #and self.awareness.link_to_port: 733 | bodies = self.stats['port'] 734 | print('\ndatapath port ' 735 | ' rx-pkts rx-bytes '' tx-pkts tx-bytes ' 736 | ' port-bw(Kb/s) port-speed(Kb/s) port-freebw(Kb/s) ' 737 | ' port-state link-state') 738 | print('-------- ---- ' 739 | '--------- ----------- ''--------- ----------- ' 740 | '------------- --------------- ----------------- ' 741 | '---------- ----------') 742 | format_ = '{:>8} {:>4} {:>9} {:>11} {:>9} {:>11} {:>13.3f} {:>15.5f} {:>17.5f} {:>10} {:>10} {:>10} {:>10}' 743 | 744 | 745 | for dpid in sorted(bodies.keys()): 746 | for stat in sorted(bodies[dpid], key=attrgetter('port_no')): 747 | if stat.port_no != 1: 748 | if stat.port_no != ofproto_v1_3.OFPP_LOCAL: #port 1 is the host output 749 | if self.free_bandwidth[dpid]: 750 | self.logger.info(format_.format( 751 | dpid, stat.port_no, #datapath , num_port 752 | stat.rx_packets, stat.rx_bytes, 753 | stat.tx_packets, stat.tx_bytes, 754 | self.port_features[dpid][stat.port_no][2], #port_bw (kb/s) MAX 755 | abs(self.port_speed[(dpid, stat.port_no)][-1]/1000.0), #port_speed Kbits/s 756 | self.free_bandwidth[dpid][stat.port_no], #port_free bw kb/s 757 | self.port_features[dpid][stat.port_no][0], #port state 758 | self.port_features[dpid][stat.port_no][1], #link state 759 | stat.rx_dropped, stat.tx_dropped)) 760 | print() 761 | 762 | if _type == 'link': 763 | print('\nnode1 node2 used-bw(Kb/s) free-bw(Kb/s) latency(ms) loss') 764 | print('----- ----- -------------- -------------- ----------- ---- ') 765 | 766 | format_ = '{:>5} {:>5} {:>14.5f} {:>14.5f} {:>12} {:>12}' 767 | 768 | links_in = [] 769 | for link, values in sorted(self.net_info.items()): 770 | links_in.append(link) 771 | tup = (link[1], link[0]) 772 | if tup not in links_in: 773 | print(format_.format(link[0],link[1], 774 | self.link_used_bw[link]/1000.0, 775 | values[0], values[1], values[2])) -------------------------------------------------------------------------------- /SDNapps_proac/start_net_info.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | file = '/home/controlador/ryu/ryu/app/SDNapps_proac/net_info.csv' 3 | df = pd.read_csv(file) 4 | df.delay = 0 5 | df.pkloss = 0 6 | 7 | print df --------------------------------------------------------------------------------