├── src ├── results.txt ├── results.txt.small ├── network.xml ├── network.xml.small ├── SPIDER_ctrl.py ├── results.txt.polska ├── results.txt.fat_tree └── network.xml.fat_tree ├── results ├── table2 │ ├── README │ ├── latex_plot.py │ ├── latex_table.py │ ├── dummy.py │ ├── fc_lib.py │ └── table2.py ├── fig8 │ ├── results.txt │ ├── README │ ├── decr_nping.py │ ├── network.xml │ ├── fig8.py │ └── fig8_ryu_app.py ├── fig9 │ ├── README │ ├── fig9.py │ ├── fig9_SPIDER_ryu_app.py │ └── fig9_OF_ryu_app.py └── fig7 │ ├── README │ ├── results.txt │ ├── network.xml │ ├── fig7.py │ └── fig7_ryu_app.py ├── README.md └── LICENSE /src/results.txt: -------------------------------------------------------------------------------- 1 | set PrimaryPath[1] := 1 2 3 4 6; 2 | set PrimaryPath[2] := 6 4 5 2 1; 3 | 4 | param DetectNode[3,4,1]:= 3; 5 | 6 | set DetourPath[3,4,1] := 2 5 4; 7 | 8 | -------------------------------------------------------------------------------- /src/results.txt.small: -------------------------------------------------------------------------------- 1 | set PrimaryPath[1] := 1 2 3 4 6; 2 | set PrimaryPath[2] := 6 4 5 2 1; 3 | 4 | param DetectNode[3,4,1]:= 3; 5 | 6 | set DetourPath[3,4,1] := 2 5 4; 7 | 8 | -------------------------------------------------------------------------------- /results/table2/README: -------------------------------------------------------------------------------- 1 | $ sudo python table2.py 2 | 3 | This script: 4 | -prints data on screen 5 | -stores data in tmp/{net}{E2E PP/greedy}.txt 6 | 7 | Simulations parameters can be tuned in table2.py. 8 | -------------------------------------------------------------------------------- /results/fig8/results.txt: -------------------------------------------------------------------------------- 1 | set PrimaryPath[1] := 3 4 6; 2 | set PrimaryPath[2] := 6 4 5 2 7 3; 3 | set PrimaryPath[3] := 7 2 5 4; 4 | set PrimaryPath[4] := 4 3 7; 5 | 6 | param DetectNode[3,4,1]:= 3; 7 | 8 | set DetourPath[3,4,1] := 3 7 2 5 4; -------------------------------------------------------------------------------- /results/fig8/README: -------------------------------------------------------------------------------- 1 | $ sudo python fig8.py 2 | 3 | This script: 4 | -prints coordinates data for LateX 5 | -creates many PNG files /home/mininet/spider/results/fig8/fig8_HB_rate_{HB_rate}.png 6 | 7 | Simulations parameters can be tuned in fig8.py. 8 | -------------------------------------------------------------------------------- /results/fig9/README: -------------------------------------------------------------------------------- 1 | $ sudo python fig9.py 2 | 3 | This script: 4 | -prints data on screen 5 | -prints LateX data on screen 6 | -stores data in ~/total_lost_packets_OF.txt and ~/total_lost_packets_SPIDER 7 | 8 | Simulations parameters can be tuned in fig9.py. -------------------------------------------------------------------------------- /results/fig7/README: -------------------------------------------------------------------------------- 1 | $ sudo python fig7.py 2 | 3 | This script: 4 | -prints data on screen 5 | -stores data in SPIDER_results_final.txt 6 | -creates many PNG files /home/mininet/SPIDER_losses_rate_{interarrival}.png 7 | 8 | Simulations parameters can be tuned in fig7.py. 9 | -------------------------------------------------------------------------------- /results/fig7/results.txt: -------------------------------------------------------------------------------- 1 | set PrimaryPath[1] := 1 2 7 3 4 6; 2 | set PrimaryPath[2] := 6 4 5 2 1; 3 | set PrimaryPath[3] := 7 3 4; 4 | set PrimaryPath[4] := 4 3 7; 5 | 6 | param DetectNode[2,7,1]:= 2; 7 | param DetectNode[3,4,1]:= 3; 8 | param DetectNode[4,3,4]:= 4; 9 | param DetectNode[3,4,3]:= 3; 10 | 11 | set DetourPath[2,7,1] := 2 5 4; 12 | set DetourPath[3,4,1] := 2 5 4; 13 | set DetourPath[4,3,4] := 4 5 2 7; 14 | set DetourPath[3,4,3] := 7 2 5 4; 15 | -------------------------------------------------------------------------------- /results/table2/latex_plot.py: -------------------------------------------------------------------------------- 1 | import commands 2 | 3 | for filename in ['E2E','greedy']: 4 | print '['+filename+']\n' 5 | s = commands.getstatusoutput('cat tmp/*x*'+filename+'*')[1] 6 | d = {} 7 | for i in s.split("\n"): 8 | d[eval(i)[0]]=eval(i)[1] 9 | #print d 10 | print 'coordinates{' 11 | for plot_name in d[ d.keys()[0] ]: 12 | for point in sorted(d, key=lambda d: int(d[:d.index('x')])): 13 | print ' ('+str(point[:point.index('x')])+','+str(int(d[point][plot_name]))+')' 14 | print ' };' 15 | print '\\addlegendentry{'+plot_name+'}' 16 | print '' 17 | print '##################################################' -------------------------------------------------------------------------------- /results/table2/latex_table.py: -------------------------------------------------------------------------------- 1 | import commands 2 | 3 | N=[] 4 | diz = {} 5 | for filename in ['E2E','greedy']: 6 | s = commands.getstatusoutput('cat tmp/*x*'+filename+'*')[1] 7 | for i in s.split("\n"): 8 | diz[eval(i)[0]]=eval(i)[1] 9 | if int(eval(i)[0].split("x")[0]) not in N: 10 | N.append(int(eval(i)[0].split("x")[0])) 11 | N=sorted(N) 12 | print """\\begin{table*}[] 13 | \\centering 14 | \\caption{Number of flow entries per node.} 15 | \\label{table:BigO} 16 | \\begin{tabular}{llllllllllll} 17 | \\toprule 18 | \\textbf{Net} & \\textbf{D} & \\textbf{E} & \\textbf{C} & \\textbf{min} & \\textbf{avg} & \\textbf{max} & $E^2\\times N$ \\\\ \\midrule""" 19 | for n in N: 20 | e = (4*n)-4 21 | c = (n-2)*(n-2) 22 | d = e*(e-1) 23 | print ("{} & {} & {} & {} & {} & {} & {} & {} \\\\".format(str(n)+"x"+str(n), d, e, c, diz[str(n)+"x"+str(n)+" E2E PP"]["min"],int(round(diz[str(n)+"x"+str(n)+" E2E PP"]["avg"])),diz[str(n)+"x"+str(n)+" E2E PP"]["max"], e*e*(e+c))), 24 | if n != N[-1]: 25 | print("\\hline") 26 | print """ 27 | \\bottomrule 28 | \\end{tabular} 29 | \\end{table*} 30 | """ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Readme 2 | 3 | 1) Follow the instruction at http://openstate-sdn.org/ to install OpenState on a Mininet 2.2.1 VM 4 | 5 | 2) Configure your VM Manager to forward VM's TCP port 8080 to localhost's TCP port 8080 6 | 7 | 3) SSH in your VM with X11 forwarding enabled: 8 | 9 | 10 | $ ssh -X mininet@VM_IP 11 | 12 | 13 | 4) Inside Mininet, clone this GitHub repository 14 | 15 | 16 | $ git clone http://github.com/OpenState-SDN/spider 17 | 18 | 19 | 5) Launch SPIDER: 20 | 21 | 22 | $ cd ~/spider/src 23 | $ sudo ryu-manager SPIDER_ctrl_WEBAPP.py 24 | 25 | 26 | 6) From a browser in your host machine open the following URL: http://localhost:8080/SPIDER 27 | 28 | The default topology is a small example network. Is it possible to select other preconfigured topology instances (polska, fat_tree or norway) by renaming them: 29 | 30 | 31 | $ cp results.txt.[topo_name] results.txt 32 | $ cp network.xml.[topo_name] network.xml 33 | 34 | 35 | ## Authors 36 | 37 | * Luca Pollini () 38 | * Davide Sanvito () 39 | * Carmelo Cascone () 40 | -------------------------------------------------------------------------------- /results/fig8/decr_nping.py: -------------------------------------------------------------------------------- 1 | # execute it with $sudo eval `python decr_ping.py` 2 | 3 | import random 4 | import os,sys 5 | 6 | if len(sys.argv)!=4: 7 | print("You need to specify [destination IP] [peak rate] [step]!") 8 | sys.exit() 9 | 10 | ping_host = sys.argv[1] 11 | 12 | 13 | peak_rate = int(sys.argv[2]) #pkt/s - Start from this rate... 14 | time_step = 1 #seconds - ...every time_step seconds... 15 | rate_step = int(sys.argv[3]) #pkt/s - ...decrease the rate of rate_step... 16 | peak_time = 5 #seconds - allow peak_time seconds of peak rate at the beginning... 17 | 18 | num_flows = peak_rate / rate_step 19 | sleep_int = 1.0 / peak_rate 20 | flow_pkt_interarrival = peak_rate 21 | tot_duration = num_flows * time_step 22 | welcome_msg = "Starting experiment... ETA {} seconds".format(tot_duration) 23 | 24 | flows = [] 25 | for i in range(1,num_flows+1): 26 | # sleep between pings so to (hopefully) have a equally spaced packets 27 | flows.append("sleep 1; nping --rate {} --count {} --icmp-type 0 {} --quiet &".format( 28 | flow_pkt_interarrival, flow_pkt_interarrival, ping_host)) 29 | flow_pkt_interarrival -= rate_step 30 | #Uncomment if you want to generate traffic at increasing rate 31 | ''' 32 | flows.append("sleep 10;") 33 | for i in range(1,num_flows+1): 34 | flow_pkt_interarrival += rate_step 35 | # sleep between pings so to (hopefully) have a equally spaced packets 36 | flows.append("sleep 1; nping --rate {} --count {} --icmp-type 0 {} &".format( 37 | flow_pkt_interarrival, flow_pkt_interarrival, ping_host)) 38 | ''' 39 | # start flows in random order to minimize bursty behavior 40 | command = " ".join(flows) 41 | print command 42 | os.system(command) -------------------------------------------------------------------------------- /src/network.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 100 8 | 200 9 | 10 | 11 | 12 | 13 | 200 14 | 200 15 | 16 | 17 | 18 | 19 | 300 20 | 200 21 | 22 | 23 | 24 | 25 | 400 26 | 200 27 | 28 | 29 | 30 | 31 | 300 32 | 250 33 | 34 | 35 | 36 | 37 | 500 38 | 200 39 | 40 | 41 | 42 | 43 | 44 | N1 45 | N2 46 | 47 | 48 | N2 49 | N3 50 | 51 | 52 | N3 53 | N4 54 | 55 | 56 | N4 57 | N6 58 | 59 | 60 | N2 61 | N5 62 | 63 | 64 | N4 65 | N5 66 | 67 | 68 | 69 | 70 | 71 | 72 | N1 73 | N6 74 | 10 75 | 76 | 77 | N6 78 | N1 79 | 1.0 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /src/network.xml.small: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 100 8 | 200 9 | 10 | 11 | 12 | 13 | 200 14 | 200 15 | 16 | 17 | 18 | 19 | 300 20 | 200 21 | 22 | 23 | 24 | 25 | 400 26 | 200 27 | 28 | 29 | 30 | 31 | 300 32 | 250 33 | 34 | 35 | 36 | 37 | 500 38 | 200 39 | 40 | 41 | 42 | 43 | 44 | N1 45 | N2 46 | 47 | 48 | N2 49 | N3 50 | 51 | 52 | N3 53 | N4 54 | 55 | 56 | N4 57 | N6 58 | 59 | 60 | N2 61 | N5 62 | 63 | 64 | N4 65 | N5 66 | 67 | 68 | 69 | 70 | 71 | 72 | N1 73 | N6 74 | 10 75 | 76 | 77 | N6 78 | N1 79 | 1.0 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /results/fig7/network.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 100 8 | 200 9 | 10 | 11 | 12 | 13 | 200 14 | 200 15 | 16 | 17 | 18 | 19 | 300 20 | 200 21 | 22 | 23 | 24 | 25 | 400 26 | 200 27 | 28 | 29 | 30 | 31 | 500 32 | 200 33 | 34 | 35 | 36 | 37 | 350 38 | 250 39 | 40 | 41 | 42 | 43 | 600 44 | 200 45 | 46 | 47 | 48 | 49 | 50 | N1 51 | N2 52 | 53 | 54 | N2 55 | N7 56 | 57 | 58 | N7 59 | N3 60 | 61 | 62 | N3 63 | N4 64 | 65 | 66 | N4 67 | N6 68 | 69 | 70 | N2 71 | N5 72 | 73 | 74 | N4 75 | N5 76 | 77 | 78 | 79 | 80 | 81 | 82 | N1 83 | N6 84 | 10 85 | 86 | 87 | N6 88 | N1 89 | 1.0 90 | 91 | 92 | N7 93 | N4 94 | 10 95 | 96 | 97 | N4 98 | N7 99 | 1.0 100 | 101 | 102 | -------------------------------------------------------------------------------- /results/fig8/network.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 100 8 | 200 9 | 10 | 11 | 12 | 13 | 200 14 | 200 15 | 16 | 17 | 18 | 19 | 300 20 | 200 21 | 22 | 23 | 24 | 25 | 400 26 | 200 27 | 28 | 29 | 30 | 31 | 500 32 | 200 33 | 34 | 35 | 36 | 37 | 350 38 | 250 39 | 40 | 41 | 42 | 43 | 600 44 | 200 45 | 46 | 47 | 48 | 49 | 50 | N1 51 | N2 52 | 53 | 54 | N2 55 | N7 56 | 57 | 58 | N7 59 | N3 60 | 61 | 62 | N3 63 | N4 64 | 65 | 66 | N4 67 | N6 68 | 69 | 70 | N2 71 | N5 72 | 73 | 74 | N4 75 | N5 76 | 77 | 78 | 79 | 80 | 81 | 82 | N3 83 | N6 84 | 10 85 | 86 | 87 | N6 88 | N3 89 | 1.0 90 | 91 | 92 | N7 93 | N4 94 | 10 95 | 96 | 97 | N4 98 | N7 99 | 1.0 100 | 101 | 102 | -------------------------------------------------------------------------------- /results/table2/dummy.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | from xml.dom import minidom 3 | import time 4 | import fc_lib 5 | import sys 6 | import os.path 7 | from numpy import mean, std 8 | 9 | def get_parsed_vals(vals): 10 | return min(vals), max(vals), mean(vals), std(vals) 11 | 12 | def print_statistic(G, fc, demands, allocated_demands, link_util_cap=1): 13 | #evaluation number of request not allocated 14 | percentage_dem = float(len(fc.pps))/len(demands) 15 | 16 | if len(allocated_demands) == 0: 17 | return 18 | 19 | #evaluation backup path length 20 | ratios = list() 21 | total_bp = 0.0 22 | for (s,t) in fc.pps: 23 | b = len(fc.pps[s,t]) - 1.0 24 | if (s,t) in fc.bps: 25 | for (n,m) in fc.bps[s,t]: 26 | ratios.append(((len(fc.bps[s,t][n,m]) - 1.0)/b) - 1) 27 | total_bp += len(fc.pps[s,t])-1 28 | 29 | print("{:<23} {:.0f}/{:.0f} ({:.0%})".format("PPs allocated:", len(allocated_demands), len(demands), percentage_dem)) 30 | print("{:<23} {:.0f}/{:.0f} ({:.0%})".format("BPs allocated:", len(ratios), total_bp, len(ratios)/total_bp)) 31 | 32 | # primary path length evaluation 33 | pp_lenghts = list() 34 | for p in fc.pps.values(): 35 | pp_lenghts.append(len(p)-1) 36 | 37 | bp_lenghts = list() 38 | for (s,t) in fc.bps: 39 | for (n,m) in fc.bps[s,t]: 40 | bp_lenghts.append(len(fc.bps[s,t][n,m])-1) 41 | 42 | print "{:<23} min {:5.1f} | max {:5.1f} | avg {:5.1f} (std {:5.1f})".format("PP length:", *get_parsed_vals(pp_lenghts)) 43 | print "{:<23} min {:5.1f} | max {:5.1f} | avg {:5.1f} (std {:5.1f})".format("BP length:", *get_parsed_vals(bp_lenghts)) 44 | print "{:<23} min {:5.0%} | max {:5.0%} | avg {:5.0%} (std {:5.0%})".format("PP/BP length ratio:", *get_parsed_vals(ratios)) 45 | 46 | #evaluation link congestion 47 | link_usages = list() 48 | for (i,j) in G.edges(): 49 | b = min([fc.pp_res_caps[i,j]] + fc.lf_res_caps[i,j].values() + fc.nf_res_caps[i,j].values()) 50 | link_usages.append(link_util_cap - (b / float(G.edge[i][j]['capacity']))) 51 | print "{:<23} min {:5.0%} | max {:5.0%} | avg {:5.0%} (std {:5.0%})".format("Link utilization:", *get_parsed_vals(link_usages)) 52 | 53 | #evaluation reverse path length 54 | reverse_paths = list() 55 | for (s,t) in fc.pps: 56 | if (s,t) in fc.bps: 57 | for (n,m) in fc.bps[s,t]: 58 | assert fc.bps[s,t][n,m][0] == fc.pps[s,t][0] 59 | assert fc.bps[s,t][n,m][-1] == fc.pps[s,t][-1] 60 | 61 | #print "Demand {}->{} {}x{}".format(s,t,n,m) 62 | if n != s: 63 | d_pos = fc.pps[s,t].index(n) 64 | for i in range(max(len(fc.bps[s,t][n,m]), len(fc.pps[s,t]))): 65 | if(fc.bps[s,t][n,m][i] == fc.pps[s,t][i]): 66 | r_pos = i 67 | else: 68 | break; 69 | #print "PP", fc.pps[s,t] 70 | #print "BP", fc.bps[s,t][n,m] 71 | #print "d_pos={}, r_pos={}".format(d_pos, r_pos) 72 | reverse_paths.append(float(d_pos-r_pos)/d_pos) 73 | 74 | #print r_pos,d_pos,reverse_paths[-1] 75 | print "{:<23} min {:5.0%} | max {:5.0%} | avg {:5.0%} (std {:5.0%})".format("Reverse path length:", *get_parsed_vals(reverse_paths)) 76 | 77 | def execute_instance(G, demands, link_util_cap=1, cost_func=None, given_pps=[], bp_node_disj=False): 78 | 79 | fc = fc_lib.Crankbacker(G, link_util_cap) 80 | t0 = time.clock() 81 | allocated_demands = fc.allocate_demands(demands, cost_func, given_pps, bp_node_disj) 82 | t1 = time.clock() 83 | 84 | print "{:<23} {} nodes, {} links, {} demands".format("Network", G.number_of_nodes(), G.number_of_edges(), len(demands)) 85 | print "{:<23} {:.1f}ms, ~{:.2f}ms per demand".format("Execution time:", (t1-t0)*1000, (float(t1-t0)/len(demands))*1000) 86 | 87 | print_statistic(G, fc, demands, allocated_demands) 88 | 89 | return fc -------------------------------------------------------------------------------- /results/fig7/fig7.py: -------------------------------------------------------------------------------- 1 | import os,glob 2 | import datetime,time 3 | from pprint import pprint 4 | import matplotlib 5 | # Force matplotlib to not use any Xwindows backend. 6 | matplotlib.use('Agg') 7 | import matplotlib.pyplot as plt 8 | import matplotlib.patches as mpatches 9 | import itertools 10 | from matplotlib.ticker import FixedLocator 11 | import subprocess 12 | import distutils.spawn 13 | 14 | ################################################################################################################################ 15 | # CONFIGURATION # 16 | ################################################################################################################################ 17 | 18 | # Number of realizations 19 | REALIZATIONS_NUM = 10 20 | 21 | # Traffic generation (actually INTERARRIVAL is INTERDEPARTURE from the host) 22 | # It is possible to configure multiple interarrival, each one with its value for number of generated ping, link up/down time 23 | INTERARRIVAL_VALUES = [0.001] # [1 , 0.1 , 0.01 , 0.001] 24 | PING_NUM_VALUES = [20000] # [20 , 200 , 2000 , 20000] 25 | LINK_DOWN = [10] # [10 , 10 , 10 , 10] 26 | LINK_UP = [10] # [10 , 10 , 10 , 10] 27 | 28 | # Range of values for Heartbeat requests generation timeout (X axis) 29 | # This creates DELTA_6_VALUES=[1.0, 0.5, 0.25, 0.125, 0.063, 0.032, 0.016, 0.008, 0.004, 0.002, 0.001] 30 | DELTA_6_VALUES = [1.0] 31 | for i in range(10): 32 | DELTA_6_VALUES.append(round(DELTA_6_VALUES[-1]/2,3)) 33 | 34 | # Range of values for Heartbeat reply timeout 35 | DELTA_7_VALUES = [0.1 , 0.05 , 0.025 , 0.01] 36 | 37 | # Value of Probe generation timeout 38 | delta_5 = 20 39 | 40 | ################################################################################################################################ 41 | ################################################################################################################################ 42 | 43 | # Check root privileges 44 | if os.geteuid() != 0: 45 | exit("You need to have root privileges to run this script") 46 | 47 | # Check if hping3 is installed 48 | def is_tool(name): 49 | return distutils.spawn.find_executable(name) is not None 50 | 51 | if not is_tool('hping3'): 52 | subprocess.call("sudo apt-get -q -y install hping3".split()) 53 | 54 | # Close mininet/Ryu instances 55 | os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") 56 | os.system('sudo mn -c 2> /dev/null') 57 | 58 | # These values are independent from the specific realization 59 | os.environ['DELTA_6_VALUES'] = str(DELTA_6_VALUES) 60 | os.environ['delta_5'] = str(delta_5) 61 | 62 | # results = { interarrival_1 : { delta_7 : [{ delta_6_a : losses_a , delta_6_b : losses_b } , { delta_6_a : losses_a , delta_6_b : losses_b }] } , ... } 63 | results = {} 64 | # results_avg = { interarrival_1 : { delta_7 : { delta_6_a : avg(losses_a) , delta_6_b : avg(losses_b) } } , ... } 65 | results_avg = {} 66 | # results_avg_positive_only is the same as results_avg, but without considering realizations without losses in the averaging 67 | results_avg_positive_only = {} 68 | 69 | timestamp = datetime.datetime.fromtimestamp( time.time() ).strftime('%Y-%m-%d %H:%M:%S') 70 | with open("SPIDER_results_final.txt", "a+") as out_file: 71 | out_file.write("Simulation started "+str(timestamp)+"\n") 72 | 73 | # NB: one realization produces one curve of fig7. 74 | # After fixing delta_7 and delta_5, fig7_ryu_app tests failure recovery swiping all the values of delta_6. 75 | # Each realization is repeated REALIZATIONS_NUM times. 76 | # NB2: if more than one value is provided in INTERARRIVAL_VALUES, the script will create mulltiple PNG files. 77 | # total number of realizations 78 | tot_sim=len(INTERARRIVAL_VALUES)*len(DELTA_7_VALUES)*REALIZATIONS_NUM 79 | 80 | i=0 # index of current realization 81 | for idx,interarrival in enumerate(INTERARRIVAL_VALUES): 82 | results[interarrival] = {} 83 | results_avg[interarrival] = {} 84 | results_avg_positive_only[interarrival] = {} 85 | 86 | # These values are independent from the current data point of a realization (a specific delta_6 value), 87 | # but depends on the specific realization (specific values for interarrival and delta_7) 88 | os.environ['INTERARRIVAL'] = str(interarrival) 89 | os.environ['PING_NUM'] = str(PING_NUM_VALUES[idx]) 90 | os.environ['LINK_DOWN'] = str(LINK_DOWN[idx]) 91 | os.environ['LINK_UP'] = str(LINK_UP[idx]) 92 | 93 | for delta_7 in DELTA_7_VALUES: 94 | os.environ['delta_7'] = str(delta_7) 95 | 96 | results[interarrival][delta_7] = [] 97 | results_avg[interarrival][delta_7] = {} 98 | results_avg_positive_only[interarrival][delta_7] = {} 99 | 100 | # Realizations execution 101 | for realiz_num in range(REALIZATIONS_NUM): 102 | i+=1 103 | print('\n\x1B[31mSTARTING REALIZATION #'+str(i)+" of "+str(tot_sim)+" - [interarrival: "+str(interarrival)+" - delta_7: "+str(delta_7)+" ("+str(realiz_num+1)+"/"+str(REALIZATIONS_NUM)+") ] - "+str(100*i/tot_sim)+'%\x1B[0m\n') 104 | os.system("> /var/log/syslog") 105 | os.system("rm -f ~/ping.*txt") 106 | os.system('ryu-manager fig7_ryu_app.py') 107 | 108 | in_file = open("SPIDER_results.txt","r") 109 | results[interarrival][delta_7].append(eval(in_file.read())) 110 | in_file.close() 111 | 112 | # Results parsing 113 | for delta_6 in DELTA_6_VALUES: 114 | # ex: if results[interarrival][delta_7] = [ {0.001: 1, 0.01: 9, 0.1: 5, 1: 5} , {0.001: 1000, 0.01: 9, 0.1: 6, 1: 5} ] 115 | # and delta_6=0.1 and we want values = [5,6] 116 | values = [ results[interarrival][delta_7][x][delta_6] for x in range(REALIZATIONS_NUM)] 117 | values_positive_only = [x for x in values if x > 0] 118 | results_avg[interarrival][delta_7][delta_6] = sum(values)/float(len(values)) 119 | results_avg_positive_only[interarrival][delta_7][delta_6] = sum(values_positive_only)/float(len(values_positive_only)) 120 | 121 | # Store results in SPIDER_results_final.txt 122 | timestamp = datetime.datetime.fromtimestamp( time.time() ).strftime('%Y-%m-%d %H:%M:%S') 123 | with open("SPIDER_results_final.txt", "a+") as out_file: 124 | out_file.write("results="+str(results)+"\n") 125 | out_file.write("results_avg="+str(results_avg)+"\n") 126 | out_file.write("results_avg_positive_only="+str(results_avg_positive_only)+"\n") 127 | out_file.write("REALIZATIONS_NUM = "+str(REALIZATIONS_NUM)+"\n") 128 | out_file.write("INTERARRIVAL_VALUES = "+str(INTERARRIVAL_VALUES)+"\n") 129 | out_file.write("PING_NUM_VALUES = "+str(PING_NUM_VALUES)+"\n") 130 | out_file.write("LINK_DOWN = "+str(LINK_DOWN)+"\n") 131 | out_file.write("LINK_UP = "+str(LINK_UP)+"\n") 132 | out_file.write("DELTA_6_VALUES = "+str(DELTA_6_VALUES)+"\n") 133 | out_file.write("DELTA_7_VALUES = "+str(DELTA_7_VALUES)+"\n") 134 | out_file.write("Simulation finished "+str(timestamp)+"\n\n") 135 | 136 | os.system("chown mininet:mininet SPIDER_results_final.txt") 137 | os.system("chown mininet:mininet SPIDER_results.txt") 138 | 139 | print 140 | print("REALIZATIONS_NUM = "+str(REALIZATIONS_NUM)) 141 | print("INTERARRIVAL_VALUES = "+str(INTERARRIVAL_VALUES)) 142 | print("PING_NUM_VALUES = "+str(PING_NUM_VALUES)) 143 | print("LINK_DOWN = "+str(LINK_DOWN)) 144 | print("LINK_UP = "+str(LINK_UP)) 145 | print("DELTA_6_VALUES = "+str(DELTA_6_VALUES)) 146 | print("DELTA_7_VALUES = "+str(DELTA_7_VALUES)) 147 | 148 | print 149 | print("results_avg = { interarrival_a : { delta_7_a : { delta_6_a : losses_a , delta_6_b : losses_b } , ... } , ... }") 150 | print 151 | print("[results_avg]") 152 | for interarrival in results_avg: 153 | print('interarrival = '+str(interarrival)) 154 | pprint(results_avg[interarrival]) 155 | print 156 | 157 | print("[results_avg_positive_only]") 158 | for interarrival in results_avg_positive_only: 159 | print('interarrival = '+str(interarrival)) 160 | pprint(results_avg_positive_only[interarrival]) 161 | 162 | # Creates many PNG file /home/mininet/SPIDER_losses_rate_{interarrival}.png 163 | for interarrival in results_avg_positive_only: 164 | f,ax = plt.subplots() 165 | f.set_size_inches(19,12) 166 | x = sorted(results_avg_positive_only[interarrival].values()[0].keys(),reverse=True) 167 | # x is not uniform, but we'd like equally-spaced points 168 | fake_x = range(len(x)) 169 | marker = itertools.cycle(('D','^','s','o')) 170 | 171 | for delta_7 in sorted(results_avg_positive_only[interarrival],reverse=True): 172 | y = [results_avg_positive_only[interarrival][delta_7][delta_6] for delta_6 in x] 173 | ax.plot(fake_x,y,marker=marker.next(),color='black',label=str(delta_7)+' s',markersize=15,linewidth=3) 174 | 175 | plt.legend(loc=1,prop={'size':36}) 176 | ax.set_ylabel('Losses', fontsize=32) 177 | ax.set_xlabel('delta_6 [sec]', fontsize=32) 178 | ax.xaxis.set_major_locator(FixedLocator(fake_x)) 179 | ax.set_xticklabels(x) 180 | plt.savefig("/home/mininet/SPIDER_losses_rate_"+str(int(1/interarrival))+".png",dpi=50) 181 | plt.clf() 182 | -------------------------------------------------------------------------------- /results/fig8/fig8.py: -------------------------------------------------------------------------------- 1 | import pcapy, glob, os 2 | from pcapy import open_offline 3 | import impacket 4 | from impacket.ImpactDecoder import EthDecoder, LinuxSLLDecoder 5 | from pprint import pprint 6 | import matplotlib 7 | # Force matplotlib to not use any Xwindows backend. 8 | #matplotlib.use('Agg') 9 | import matplotlib.pyplot as plt 10 | import re 11 | import subprocess 12 | import distutils.spawn 13 | 14 | ################################################################################################################################ 15 | # CONFIGURATION # 16 | ################################################################################################################################ 17 | 18 | # Number of realizations 19 | REALIZATIONS_NUM = 1 20 | 21 | # Incoming traffic initial rate 22 | PEAK_RATE = 200 # pkt/s 23 | 24 | # Decreasing step for incoming traffic 25 | STEP = 2 # pkt/sec 26 | 27 | # Range of Heartbeat requests rates 28 | HB_RATE_VALUES = [10,40,70,100] 29 | 30 | # Outgoing traffic rate 31 | TRAFFIC_RATE = 1000 32 | 33 | ################################################################################################################################ 34 | ################################################################################################################################ 35 | 36 | # TOPOLOGY 37 | 38 | # ----s5------ 39 | # / \ 40 | # s1 -- s2 -- s7 -- s3 -- s4 -- s6 41 | # | | | | 42 | # [H7] [H3] [H4] [H6] 43 | 44 | # demand H3->H6 is forwarded on path s3-s4-s6 45 | # demand H4->H7 is forwarded on path s4-s3-s7 46 | # demand H6->H3 is forwarded on path s6-s4-s5-s2 s7-s3 47 | # demand H7->H4 is forwarded on path s7-s2-s5-4s 48 | 49 | # The analized link is (S3-S4) 50 | # H3 generates traffic towards H6 at constant rate (TRAFFIC_RATE pkt/sec) 51 | # In absence of traffic coming from the opposite direction, this is used to trigger HB messages 52 | # H4 generates traffic towards H7 adecreasing rate (from PEAK_RATE to 0 pkt/sec) 53 | 54 | if os.geteuid() != 0: 55 | exit("You need to have root privileges to run this script") 56 | 57 | # Check if nmap is installed 58 | def is_tool(name): 59 | return distutils.spawn.find_executable(name) is not None 60 | 61 | if not is_tool('nmap'): 62 | subprocess.call("sudo apt-get -q -y install nmap".split()) 63 | 64 | 65 | numbers = re.compile(r'(\d+)') 66 | def numericalSort(value): 67 | parts = numbers.split(value) 68 | parts[1::2] = map(int, parts[1::2]) 69 | return parts 70 | 71 | # These two classes are needed to parse pcap files 72 | class Connection: 73 | """ This class can be used as a key in a dictionary to select a connection """ 74 | 75 | def __init__(self, p1): 76 | """ This constructor takes one tuple. The 1st element is the IP address as a string, and the 2nd is the port as an integer. """ 77 | self.p1 = p1 78 | 79 | def __cmp__(self, other): 80 | if (self.p1 == other.p1): 81 | return 0 82 | else: 83 | return -1 84 | 85 | def __hash__(self): 86 | return (hash(self.p1[0]) ^ hash(self.p1[1])) 87 | 88 | class Decoder: 89 | def __init__(self, pcapObj): 90 | # Query the type of the link and instantiate a decoder accordingly. 91 | datalink = pcapObj.datalink() 92 | if pcapy.DLT_EN10MB == datalink: 93 | self.decoder = EthDecoder() 94 | elif pcapy.DLT_LINUX_SLL == datalink: 95 | self.decoder = LinuxSLLDecoder() 96 | else: 97 | raise Exception("Datalink type not supported: " % datalink) 98 | 99 | self.pcap = pcapObj 100 | self.individual_counters = 0 101 | 102 | 103 | def start(self): 104 | # Sniff ad infinitum. PacketHandler shall be invoked by pcap for every packet. 105 | self.pcap.loop(0, self.packetHandler) 106 | return self.individual_counters 107 | 108 | def packetHandler(self, hdr, data): 109 | self.individual_counters += 1 110 | 111 | FIG8_BASE_DIR = "/home/mininet/spider/results/fig8" 112 | 113 | # Range of values for Heartbeat requests generation timeout 114 | DELTA_6_VALUES = ['%.6f' % (1/float(i)) for i in HB_RATE_VALUES] 115 | 116 | delete_all_folders=True 117 | if len(glob.glob(FIG8_BASE_DIR+"/HB_req_TO_*"))>0: 118 | msg="Some SIMULATIONS folders have been found! Do you want to delete them?" 119 | delete_all_folders = True if raw_input("%s (y/N) " % msg).lower() == 'y' else False 120 | 121 | if (delete_all_folders): 122 | os.system("rm -fr "+FIG8_BASE_DIR+"/HB_req_TO_*") 123 | 124 | os.environ['TRAFFIC_RATE'] = str(TRAFFIC_RATE) 125 | os.environ['PEAK_RATE'] = str(PEAK_RATE) 126 | os.environ['STEP'] = str(STEP) 127 | 128 | # NB: one realization produces one instance of a plot of fig8. 129 | # After fixing hb_rate, each realization is repeated REALIZATIONS_NUM times by calling fig8_ryu_app. 130 | # Finally all the instances are vertically averaged. 131 | tot_sim=len(HB_RATE_VALUES)*REALIZATIONS_NUM 132 | curr_sim = 0 # index of current simulation 133 | for delta_6 in DELTA_6_VALUES: 134 | for realiz_num in range(REALIZATIONS_NUM): 135 | curr_sim+=1 136 | 137 | # Close mininet/Ryu instances 138 | os.system("sudo kill -9 $(pidof -x ryu-manager) 2> /dev/null") 139 | os.system("sudo mn -c 2> /dev/null") 140 | os.system("cd /var/log; > syslog;") 141 | 142 | print "\n\x1B[31mSTARTING SIMULATION #"+str(curr_sim)+" of "+str(tot_sim)+" - [delta_6: "+str(delta_6)+"] realization #"+str(realiz_num+1)+" of "+str(REALIZATIONS_NUM)+" - "+str(100*curr_sim/tot_sim)+"%\x1B[0m\n" 143 | 144 | os.environ['realiz_num'] = str(realiz_num+1) 145 | os.environ['delta_6'] = str(delta_6) 146 | os.system('ryu-manager fig8_ryu_app.py') 147 | 148 | # for each PCAP file we extract rates of data and probe 149 | individual_counters={} # contains the number of data/prove packets for each '1-second' slot, for each HB_rate, for each realization 150 | average_counters={} # it's the same as individual_counters but vertically averaged over the realizations 151 | 152 | for delta_6_idx,delta_6 in enumerate(DELTA_6_VALUES): 153 | individual_counters[delta_6] = {} 154 | for realiz_num in range(REALIZATIONS_NUM): 155 | individual_counters[delta_6][realiz_num] = {} 156 | # there's only one pcap in each folder, so this 'for' has just one iteration 157 | for pcap in glob.glob(FIG8_BASE_DIR+"/HB_req_TO_"+str(delta_6)+"/realiz_"+str(realiz_num+1)+"/*pcap"): 158 | print 'Parsing '+pcap 159 | 160 | os.system("rm -rf "+os.path.dirname(pcap)+"/split; mkdir "+os.path.dirname(pcap)+"/split ") 161 | # pcap file is splitted in many files of 1 second: split_{second}_{date_hour}.pcap 162 | os.system("editcap -i 1 "+pcap+" "+os.path.dirname(pcap)+"/split/split.pcap") 163 | for splitted_pcap in glob.glob(os.path.dirname(pcap)+"/split/*pcap"): 164 | sec = int(splitted_pcap.split('_')[-2]) 165 | individual_counters[delta_6][realiz_num][sec] = {} 166 | 167 | # Open file 168 | p = open_offline(splitted_pcap) 169 | p.setfilter(r'(mpls 16 && dst host 10.0.0.7)') # BPF syntax 170 | # Start decoding process. 171 | individual_counters[delta_6][realiz_num][sec]['data']=Decoder(p,).start() 172 | 173 | # Open file 174 | p = open_offline(splitted_pcap) 175 | p.setfilter(r'(mpls 21 && dst host 10.0.0.6)') # BPF syntax 176 | # Start decoding process. 177 | individual_counters[delta_6][realiz_num][sec]['probe']=Decoder(p,).start() 178 | 179 | average_counters[delta_6] = {} 180 | for realiz_num in range(REALIZATIONS_NUM): 181 | for pcap in glob.glob(FIG8_BASE_DIR+"/HB_req_TO_"+str(delta_6)+"/realiz_"+str(realiz_num+1)+"/*pcap"): 182 | for sec in individual_counters[delta_6][realiz_num]: 183 | if sec not in average_counters[delta_6]: 184 | average_counters[delta_6][sec] = {'data': 0, 'probe': 0} 185 | average_counters[delta_6][sec]['data'] += individual_counters[delta_6][realiz_num][sec]['data'] 186 | average_counters[delta_6][sec]['probe'] += individual_counters[delta_6][realiz_num][sec]['probe'] 187 | 188 | for sec in average_counters[delta_6]: 189 | average_counters[delta_6][sec]['data'] = average_counters[delta_6][sec]['data']/REALIZATIONS_NUM 190 | average_counters[delta_6][sec]['probe'] = average_counters[delta_6][sec]['probe']/REALIZATIONS_NUM 191 | 192 | # each plot has 3 lines (data, probe, data+probe) 193 | data = [] 194 | for idx,i in enumerate(average_counters[delta_6]): 195 | data.append(average_counters[delta_6][idx]['data']) 196 | probe = [] 197 | for idx,i in enumerate(average_counters[delta_6]): 198 | probe.append(average_counters[delta_6][idx]['probe']) 199 | tot = [] 200 | for idx,i in enumerate(average_counters[delta_6]): 201 | tot.append(average_counters[delta_6][idx]['probe']+average_counters[delta_6][idx]['data']) 202 | x = range(len(average_counters[delta_6])) 203 | 204 | print 'DATA coordinates for LateX' 205 | for i in x: 206 | print((i,data[i])) 207 | print 208 | print 'PROBE coordinates for LateX' 209 | for i in x: 210 | print((i,data[i])) 211 | print 212 | print 'PROBE+DATA coordinates for LateX' 213 | for i in x: 214 | print((i,data[i])) 215 | print 216 | 217 | fig = plt.figure() 218 | ax1 = fig.add_subplot(111) 219 | ax1.set_xlim([0,len(tot)-2]) 220 | ax1.set_ylim([0,max(data)+100]) 221 | ax1.set_ylabel('[pkt/sec]') 222 | ax1.set_xlabel('[sec]') 223 | ax1.set_position([0.05,0.05,0.9,0.9]) 224 | 225 | ax1.plot(x,data,'--',color='black',label='Data: from '+str(PEAK_RATE)+' to 0 [pkt/sec]') 226 | ax1.plot(x,probe,':',color='black',label='HB_reply:' + str(HB_RATE_VALUES[delta_6_idx])+' [pkt/sec]') 227 | ax1.plot(x,tot,color='black',label='Total') 228 | plt.legend(loc="best", bbox_to_anchor=[0.99,0.99], 229 | ncol=1, shadow=True, title="HB Overhead", fancybox=True) 230 | plt.savefig(FIG8_BASE_DIR+'/fig8_HB_rate_'+str(HB_RATE_VALUES[delta_6_idx])+'.png',dpi=400) 231 | 232 | print 'Close the plot to continue...' 233 | plt.show() 234 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2015 Luca Pollini 190 | Davide Sanvito 191 | Carmelo Cascone 192 | 193 | Licensed under the Apache License, Version 2.0 (the "License"); 194 | you may not use this file except in compliance with the License. 195 | You may obtain a copy of the License at 196 | 197 | http://www.apache.org/licenses/LICENSE-2.0 198 | 199 | Unless required by applicable law or agreed to in writing, software 200 | distributed under the License is distributed on an "AS IS" BASIS, 201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 202 | See the License for the specific language governing permissions and 203 | limitations under the License. 204 | 205 | -------------------------------------------------------------------------------- /results/table2/fc_lib.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import networkx as nx 3 | import operator 4 | 5 | debug = False 6 | 7 | def log_debug(msg): 8 | if debug: 9 | print("[DEBUG] " + str(msg)) 10 | 11 | def edgify(nodelist): 12 | for i in range(len(nodelist) - 1): 13 | yield (nodelist[i], nodelist[i+1]) 14 | 15 | def cost_func_ca(link_cap, res_cap, b, coeff=0.75): 16 | assert b <= res_cap 17 | assert res_cap <= link_cap 18 | 19 | if (res_cap == b): 20 | return sys.maxint 21 | else: 22 | return coeff*((float(link_cap)/(res_cap-b))-1) 23 | 24 | def cost_func_inv(link_cap, res_cap, b, coeff=0.75): 25 | if res_cap == 0: 26 | return sys.maxint 27 | else: 28 | return 1.0 / res_cap 29 | 30 | def get_backup_path(J, pp, n, weight=None): 31 | bp = [] 32 | 33 | # z rappresenta la posizione del nodo n (detection) rispetto al primary path, 0 se si tratta del primo nodo 34 | z = pp.index(n) 35 | if z == 0: 36 | try: 37 | bp = nx.shortest_path(J, pp[0], pp[-1], weight=weight) 38 | except nx.NetworkXNoPath: 39 | raise Crankbacker.NoBackupPathException() 40 | else: 41 | min_len = sys.maxint 42 | for x in range(z+1): 43 | # Controllo se la porzione di backup path precedente al nodo di redirect si trova nel grafo residuo, cioe ha abbastanza capacita 44 | if x > 0 and (pp[x-1],pp[x]) not in J.edges(): 45 | break 46 | try: 47 | temp_bps = pp[0:x] + nx.shortest_path(J, pp[x], pp[-1]) 48 | temp_len = x + len(temp_bps) 49 | except nx.NetworkXNoPath: 50 | # Try with the following node x+1 51 | continue 52 | else: 53 | if temp_len <= min_len: 54 | min_len = temp_len 55 | bp = temp_bps 56 | 57 | if len(bp) > 0: 58 | return bp 59 | else: 60 | raise Crankbacker.NoBackupPathException() 61 | 62 | class Crankbacker(): 63 | 64 | def __init__(self, G, link_util_cap=1, print_alerts=False): 65 | self.G = G.copy() 66 | # TODO estrarre link_caps da G 67 | # contiene la capacita residua dei link 68 | self.dems = dict() 69 | self.pps = dict() 70 | self.bps = dict() 71 | self.link_caps = dict() 72 | self.pp_res_caps = dict() 73 | self.lf_res_caps = dict() 74 | self.nf_res_caps = dict() 75 | for (i,j) in self.G.edges(): 76 | assert 'capacity' in self.G.edge[i][j], "Missing 'capacity' attribute from edge {},{}".format(i,j) 77 | cap = self.G.edge[i][j]['capacity'] * link_util_cap 78 | del self.G.edge[i][j]['capacity'] 79 | self.link_caps[i,j] = cap 80 | self.pp_res_caps[i,j] = cap 81 | self.lf_res_caps[i,j] = dict() 82 | self.nf_res_caps[i,j] = dict() 83 | for (n,m) in self.G.edges(): 84 | self.lf_res_caps[i,j][n,m] = cap 85 | for m in self.G.nodes(): 86 | self.nf_res_caps[i,j][m] = cap 87 | self.print_alerts = print_alerts 88 | 89 | 90 | class NoBackupPathException(Exception): 91 | pass 92 | 93 | 94 | def debug_caps(self, s, t, n=None, m=None): 95 | print("PP[{},{}] > {}".format(s,t, self.pps[s,t])) 96 | caps = [self.pp_res_caps[i,j] for (i,j) in edgify(self.pps[s,t])] 97 | print(" caps: {}".format(caps)) 98 | 99 | def allocate_demands(self, new_dems, cost_func=None, given_pps=[], bp_node_disj=False): 100 | # Dizionario per le domande da allocare 101 | dems_to_allocate = dict() 102 | 103 | if cost_func is not None: 104 | weighted = True 105 | w_arg = 'w' 106 | else: 107 | weighted = False 108 | w_arg = None 109 | 110 | for (s,t),b in new_dems.items(): 111 | 112 | assert b >= 0, "bandwith must be greater or equal to zero for demand {},{}".format(s,t) 113 | assert s in self.G.nodes(), "s node {} not found in G".format(s) 114 | assert t in self.G.nodes(), "t node {} not found in G".format(t) 115 | 116 | free_cap = 0 # capacita da deallocare 117 | 118 | # se la domanda non e mai stata allocata prima 119 | if (s,t) not in self.dems: 120 | if b > 0: 121 | # mi ricordo di processare la domanda 122 | dems_to_allocate[s,t] = b 123 | else: 124 | # b = 0, che cazzo ci fai qui? 125 | continue 126 | # se la domanda e stata precedentemente allocata, ma la banda richiesta e maggiore di quella gia allocata 127 | elif b > self.dems[s,t]: 128 | # dealloco, rimuovo e mi ricordo di ri-calcolare 129 | free_cap = self.dems[s,t] 130 | dems_to_allocate[s,t] = b 131 | # se la domanda e stata precedentemente allocata, ma la banda richiesta e 0, o minore o uguale di quella gia allocata 132 | else: 133 | # dealloco 134 | free_cap = self.dems[s,t] - b 135 | 136 | # se ho dichiarato capacita da deallocare, allora procedo 137 | if free_cap > 0: 138 | # per ogni link / possibile failure sul primary path 139 | for (n,m) in edgify(self.pps[s,t]): 140 | self.pp_res_caps[n,m] += free_cap 141 | # Se esiste backup path per il caso (s,t) con failure i,j 142 | if (s,t) in self.bps and (n,m) in self.bps[s,t]: 143 | # dealloco la capacita sottratta a tutti i link del backup path 144 | for (i,j) in edgify(self.bps[s,t][n,m]): 145 | self.lf_res_caps[i,j][n,m] += free_cap 146 | self.lf_res_caps[i,j][m,n] += free_cap 147 | self.nf_res_caps[i,j][m] += free_cap 148 | 149 | # se ho richiesto di rimuovere, rimuovo 150 | if free_cap == self.dems[s,t]: 151 | del self.pps[s,t] 152 | del self.bps[s,t] 153 | del self.dems[s,t] 154 | """ 155 | Mi ritrovo adesso con dems_to_allocate per cui devo calcolare il prymary parh 156 | """ 157 | 158 | log_debug("OK, i've got {} dems to allocate".format(len(dems_to_allocate))) 159 | 160 | ############################################################# 161 | ################# PRIMARY EVALUATION ####################### 162 | ############################################################# 163 | allocated_pps = dict() 164 | 165 | # Scorro le domande in ordine decrescente per b 166 | for ((s,t),b) in sorted(dems_to_allocate.items(), key=operator.itemgetter(1), reverse=True): 167 | 168 | # elimino dalle domande da processare 169 | assert (s,t) not in self.pps, "a primary path already exists for demand ({},{}) >> {}".format(s,t,self.pps[s,t]) 170 | assert (s,t) not in self.dems, "dems already contains demand ({},{}) >> {}".format(s,t, self.dems[s,t]) 171 | 172 | if (s,t) in given_pps: 173 | # Se il PP mi e dato, evito di calcolarlo... 174 | pp = given_pps[s,t] 175 | log_debug("PP for demand ({},{}) already given: {}".format(s,t,pp)) 176 | 177 | else: 178 | log_debug("Processing PP for demand ({},{}): {}".format(s,t,b)) 179 | 180 | # create a copy of the network topology to work on 181 | removed_edges = [] 182 | 183 | # elimino dal grafo tutti i link che a priori non possono ospitare la domanda (perche non c'e capacita sufficiente) 184 | # Calcolo il costo 185 | for (i,j) in self.G.edges(): 186 | min_res_cap = min([self.pp_res_caps[i,j]] 187 | + self.lf_res_caps[i,j].values() 188 | + self.nf_res_caps[i,j].values()) 189 | if b > min_res_cap: 190 | removed_edges.append((i,j)) 191 | elif weighted: 192 | self.G[i][j][w_arg] = cost_func(self.link_caps[i,j], min_res_cap, b) 193 | 194 | self.G.remove_edges_from(removed_edges) 195 | log_debug("Removed edges %s"%removed_edges) 196 | 197 | # calcolo lo shortest path 198 | try: 199 | pp = nx.shortest_path(self.G, source=s, target=t, weight=w_arg) 200 | except nx.NetworkXNoPath: 201 | if self.print_alerts: 202 | print "PP skipped for d=({},{}) (No shortest path between s and t)".format(s,t) 203 | continue; 204 | finally: 205 | self.G.add_edges_from(removed_edges) 206 | log_debug("Re-added edges %s"%(removed_edges)) 207 | 208 | # print "pps[{},{}] = {}".format(s,t,self.pps[s,t]) 209 | # OK shortest path found 210 | self.pps[s,t] = pp 211 | self.dems[s,t] = b 212 | allocated_pps[s,t] = b 213 | 214 | pp_edges = [e for e in edgify(self.pps[s,t])] 215 | for (i,j) in pp_edges: 216 | #update the reamaining capacity of links used by the evaluated primary pat 217 | self.pp_res_caps[i,j] -= b 218 | assert self.pp_res_caps[i,j] >= 0, "BUG? pp_res_caps[{},{}] should be greater or equal to 0, instead its {}. Processing pp for demand {},{}".format(i,j,self.pp_res_caps[i,j],s,t) 219 | for (n,m) in self.lf_res_caps[i,j]: 220 | if (n,m) not in pp_edges or (m,n) not in pp_edges: 221 | self.lf_res_caps[i,j][n,m] -= b 222 | assert self.lf_res_caps[i,j][n,m] >= 0, "BUG? lf_caps[{},{}][{},{}] should be greater or equal to 0, instead its {}. Processing pp for demand {},{}".format(i,j,n,m,self.lf_res_caps[i,j][n,m],s,t) 223 | for m in self.nf_res_caps[i,j]: 224 | if m not in self.pps[s,t]: 225 | self.nf_res_caps[i,j][m] -= b 226 | assert self.nf_res_caps[i,j][m] >= 0, "BUG? nf_caps[{},{}][{}] should be greater or equal to 0, instead its {}. Processing pp for demand {},{}".format(i,j,m,self.nf_res_caps[i,j][m],s,t) 227 | 228 | ############################################################# 229 | ################# BACKUP EVALUATION ####################### 230 | ############################################################# 231 | 232 | for ((s,t),b) in sorted(allocated_pps.items(), key=operator.itemgetter(1), reverse=True): 233 | 234 | log_debug("Processing BP for demand ({},{}): {}".format(s,t,b)) 235 | 236 | #for each demand and every link in its primary path we ealuate a new path 237 | #in case that the link is not anymore available 238 | 239 | assert (s,t) not in self.bps, "backup paths already exist for demand ({},{}) >> {}".format(s,t, self.bps[s,t]) 240 | 241 | # per ogni link / possibile guasto, calcolo il detour 242 | for (n,m) in edgify(self.pps[s,t]): 243 | 244 | if (s,t) in self.bps: 245 | assert (n,m) not in self.bps[s,t], "backup paths alread exist for demand ({},{}) for failure ({},{}) >> {}".format(s,t,n,m,self.bps[s,t][n,m]) 246 | 247 | removed_edges = [] 248 | if len(self.pps[s,t]) == 2: 249 | removed_edges.extend([(s,t), (t,s)]) 250 | # rimuovo tutti i nodi (link uescenti/entranti) del pp se ho richiesto un bp node disjoint, altrimenti solo m 251 | if bp_node_disj: 252 | nodes_to_remove = self.pps[s,t][1:-1] 253 | else: 254 | nodes_to_remove = [m] 255 | for x in nodes_to_remove: 256 | if (x == t): 257 | # se x (cioe m) e il nodo terminal, rimuovo solo il link (n,m) e non tutto il nodo 258 | removed_edges.extend([(n,m), (m,n)]) 259 | else: 260 | # rimuovo tutto i link uscenti 261 | removed_edges.extend(self.G.edges(x)) 262 | # e i link entranti ad m (un po tricky) 263 | for u in self.G.successors(x): 264 | if self.G.has_edge(u,x): 265 | removed_edges.append((u,x)) 266 | 267 | # rimuovo tutti i link per cui non c'e capacita sufficiente, sia in caso di link failure, che in caso di node failure 268 | for (i,j) in self.G.edges(): 269 | min_res_cap = min(self.lf_res_caps[i,j][n,m], self.lf_res_caps[i,j][m,n], self.nf_res_caps[i,j][m]) 270 | if b > min_res_cap: 271 | removed_edges.append((i,j)) 272 | elif weighted: 273 | self.G[i][j][w_arg] = cost_func(self.link_caps[i,j], min_res_cap, b) 274 | 275 | self.G.remove_edges_from(removed_edges) 276 | log_debug("Removed edges %s"%removed_edges) 277 | 278 | try: 279 | bp = get_backup_path(self.G, pp=self.pps[s,t], n=n, weight=w_arg) 280 | except self.NoBackupPathException: 281 | if self.print_alerts: 282 | print "BP skipped for d=({},{}), f=({},{}) (No path between s and t)".format(s,t,n,m) 283 | # Next failure 284 | continue; 285 | else: 286 | if (s,t) not in self.bps: 287 | self.bps[s,t] = dict() 288 | self.bps[s,t][n,m] = bp 289 | # Update residual capacity 290 | for (i,j) in edgify(self.bps[s,t][n,m]): 291 | self.lf_res_caps[i,j][n,m] -= b 292 | self.lf_res_caps[i,j][m,n] -= b 293 | self.nf_res_caps[i,j][m] -= b 294 | assert self.lf_res_caps[i,j][n,m] >= 0, "BUG? lf_caps[{},{}][{},{}] should be greater or equal to 0, instead its {}. Processing bp for demand {},{}".format(i,j,n,m,self.lf_res_caps[i,j][n,m],s,t) 295 | assert self.lf_res_caps[i,j][m,n] >= 0, "BUG? lf_caps[{},{}][{},{}] should be greater or equal to 0, instead its {}. Processing bp for demand {},{}".format(i,j,m,n,self.lf_res_caps[i,j][m,n],s,t) 296 | assert self.nf_res_caps[i,j][m] >= 0, "BUG? nf_caps[{},{}][{}] should be greater or equal to 0, instead its {}. Processing bp for demand {},{}".format(i,j,m,self.nf_res_caps[i,j][m],s,t) 297 | finally: 298 | self.G.add_edges_from(removed_edges) 299 | log_debug("Re-added edges %s"%removed_edges) 300 | 301 | ############################################## 302 | ################# DONE ####################### 303 | ############################################## 304 | 305 | return allocated_pps -------------------------------------------------------------------------------- /src/SPIDER_ctrl.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Luca Pollini 2 | # Davide Sanvito 3 | # Carmelo Cascone 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from ryu.base import app_manager 18 | from ryu.controller import ofp_event 19 | from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER, HANDSHAKE_DISPATCHER 20 | from ryu.controller.handler import set_ev_cls 21 | import ryu.ofproto.ofproto_v1_3 as ofproto 22 | import ryu.ofproto.ofproto_v1_3_parser as ofparser 23 | import ryu.ofproto.openstate_v1_0 as osproto 24 | import ryu.ofproto.openstate_v1_0_parser as osparser 25 | from ryu.lib.packet import packet 26 | from ryu.topology import event 27 | import logging 28 | from sets import Set 29 | import time 30 | import SPIDER_parser 31 | import os 32 | 33 | class SPIDER(app_manager.RyuApp): 34 | OFP_VERSIONS = [ofproto.OFP_VERSION] 35 | 36 | def __init__(self, *args, **kwargs): 37 | super(SPIDER, self).__init__(*args, **kwargs) 38 | 39 | results_hash = SPIDER_parser.md5sum_results() 40 | if SPIDER_parser.network_has_changed(results_hash): 41 | SPIDER_parser.erase_figs_folder() 42 | 43 | (self.requests,self.faults) = SPIDER_parser.parse_ampl_results_if_not_cached() 44 | 45 | print len(self.requests), 'requests loaded' 46 | print len(self.faults), 'faults loaded' 47 | 48 | print "Building network graph from network.xml..." 49 | # G is a NetworkX Graph object 50 | (self.G, self.pos, self.hosts, self.switches, self.mapping) = SPIDER_parser.parse_network_xml() 51 | print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts' 52 | 53 | print "NetworkX to Mininet topology conversion..." 54 | # mn_topo is a Mininet Topo object 55 | self.mn_topo = SPIDER_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping) 56 | # mn_net is a Mininet object 57 | self.mn_net = SPIDER_parser.create_mininet_net(self.mn_topo) 58 | 59 | SPIDER_parser.launch_mininet(self.mn_net) 60 | 61 | self.ports_dict = SPIDER_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports) 62 | 63 | SPIDER_parser.mn_setup_MAC_and_IP(self.mn_net) 64 | 65 | SPIDER_parser.mn_setup_static_ARP_entries(self.mn_net) 66 | 67 | SPIDER_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts) 68 | 69 | (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_detection_timeouts_dict, self.flow_entries_with_flowlet_timeouts_dict) = SPIDER_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=SPIDER_parser.get_mac_match_mininet,check_cache=True,dpctl_script=True) 70 | 71 | #SPIDER_parser.print_flow_stats(SPIDER_parser.get_flow_stats_dict(self.flow_entries_dict)) 72 | 73 | # Associates dp_id to datapath object 74 | self.dp_dictionary=dict() 75 | # Associates dp_id to a dict associating port<->MAC address 76 | self.ports_mac_dict=dict() 77 | 78 | # Needed by SPIDER_ctrl_REST 79 | self.SPIDER_parser = SPIDER_parser 80 | 81 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) 82 | def switch_features_handler(self, ev): 83 | msg = ev.msg 84 | datapath = msg.datapath 85 | 86 | self.ports_mac_dict[datapath.id] = dict() 87 | self.send_features_request(datapath) 88 | self.send_port_desc_stats_request(datapath) 89 | 90 | self.configure_stateful_stages(datapath) 91 | self.install_flows(datapath) 92 | 93 | self.dp_dictionary[datapath.id] = datapath 94 | 95 | def install_flows(self,datapath): 96 | print("Configuring flow table for switch %d" % datapath.id) 97 | 98 | if datapath.id in self.flow_entries_dict.keys(): 99 | for table_id in self.flow_entries_dict[datapath.id]: 100 | for match in self.flow_entries_dict[datapath.id][table_id]: 101 | mod = ofparser.OFPFlowMod( 102 | datapath=datapath, cookie=0, cookie_mask=0, table_id=table_id, 103 | command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, 104 | priority=self.flow_entries_dict[datapath.id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 105 | out_port=ofproto.OFPP_ANY, 106 | out_group=ofproto.OFPG_ANY, 107 | flags=0, match=match, instructions=self.flow_entries_dict[datapath.id][table_id][match]['inst']) 108 | datapath.send_msg(mod) 109 | 110 | def send_features_request(self, datapath): 111 | req = ofparser.OFPFeaturesRequest(datapath) 112 | datapath.send_msg(req) 113 | 114 | def configure_stateful_stages(self, datapath): 115 | node_dict = SPIDER_parser.create_node_dict(self.ports_dict,self.requests) 116 | 117 | self.send_table_mod(datapath, table_id=2) 118 | self.send_key_lookup(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST]) 119 | self.send_key_update(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST]) 120 | 121 | self.send_table_mod(datapath, table_id=3) 122 | self.send_key_lookup(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA]) 123 | self.send_key_update(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA]) 124 | 125 | def configure_global_states(self, datapath): 126 | for port in self.ports_mac_dict[datapath.id]: 127 | if port!=ofproto.OFPP_LOCAL: 128 | (global_state, global_state_mask) = osparser.masked_global_state_from_str("1",port-1) 129 | msg = osparser.OFPExpSetGlobalState(datapath=datapath, global_state=global_state, global_state_mask=global_state_mask) 130 | datapath.send_msg(msg) 131 | 132 | def send_table_mod(self, datapath, table_id, stateful=1): 133 | req = osparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=table_id, stateful=stateful) 134 | datapath.send_msg(req) 135 | 136 | def send_key_lookup(self, datapath, table_id, fields): 137 | key_lookup_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=fields, table_id=table_id) 138 | datapath.send_msg(key_lookup_extractor) 139 | 140 | def send_key_update(self, datapath, table_id, fields): 141 | key_update_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=fields, table_id=table_id) 142 | datapath.send_msg(key_update_extractor) 143 | 144 | def set_link_down(self,node1,node2): 145 | if(node1 > node2): 146 | node1,node2 = node2,node1 147 | 148 | os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' down') 149 | os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' down') 150 | 151 | def set_link_up(self,node1,node2): 152 | if(node1 > node2): 153 | node1,node2 = node2,node1 154 | 155 | os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' up') 156 | os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' up') 157 | 158 | 159 | def send_port_desc_stats_request(self, datapath): 160 | req = ofparser.OFPPortDescStatsRequest(datapath, 0) 161 | datapath.send_msg(req) 162 | 163 | 164 | @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) 165 | def port_desc_stats_reply_handler(self, ev): 166 | # store the association port<->MAC address 167 | for p in ev.msg.body: 168 | self.ports_mac_dict[ev.msg.datapath.id][p.port_no]=p.hw_addr 169 | 170 | self.configure_global_states(ev.msg.datapath) 171 | 172 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 173 | def _packet_in_handler(self, ev): 174 | msg = ev.msg 175 | datapath = msg.datapath 176 | 177 | pkt = packet.Packet(msg.data) 178 | header_list = dict((p.protocol_name, p) for p in pkt.protocols if type(p) != str) 179 | 180 | #discard IPv6 multicast packets 181 | if not header_list['ethernet'].dst.startswith('33:33:'): 182 | print("\nSecond fault detected: packet received by the CTRL") 183 | print(pkt) 184 | 185 | @set_ev_cls(ofp_event.EventOFPExperimenterStatsReply, MAIN_DISPATCHER) 186 | def state_stats_reply_handler(self, ev): 187 | msg = ev.msg 188 | dp = msg.datapath 189 | 190 | if ev.msg.body.exp_type==0: 191 | # EXP_STATE_STATS 192 | stats = osparser.OFPStateStats.parser(ev.msg.body.data, offset=0) 193 | for stat in stats: 194 | if stat.entry.key != []: 195 | msg = osparser.OFPExpMsgSetFlowState( 196 | datapath=dp, state=0, keys=stat.entry.key, table_id=stat.table_id) 197 | dp.send_msg(msg) 198 | elif ev.msg.body.exp_type==1: 199 | stat = osparser.OFPGlobalStateStats.parser(ev.msg.body.data, offset=0) 200 | msg = osparser.OFPExpResetGlobalState(datapath=dp) 201 | dp.send_msg(msg) 202 | self.configure_global_states(dp) 203 | 204 | 205 | def timeout_probe(self,timeout): 206 | SPIDER_parser.selected_detection_timeouts = timeout 207 | 208 | for datapath_id in self.flow_entries_with_detection_timeouts_dict[timeout]: 209 | for table_id in self.flow_entries_with_detection_timeouts_dict[timeout][datapath_id]: 210 | for match in self.flow_entries_with_detection_timeouts_dict[timeout][datapath_id][table_id]: 211 | mod = ofparser.OFPFlowMod( 212 | datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id, 213 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 214 | priority=self.flow_entries_with_detection_timeouts_dict[timeout][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 215 | out_port=ofproto.OFPP_ANY, 216 | out_group=ofproto.OFPG_ANY, 217 | flags=0, match=match, instructions=self.flow_entries_with_detection_timeouts_dict[timeout][datapath_id][table_id][match]['inst']) 218 | self.dp_dictionary[datapath_id].send_msg(mod) 219 | 220 | def timeout_burst(self,burst): 221 | SPIDER_parser.selected_flowlet_timeouts = burst 222 | 223 | for datapath_id in self.flow_entries_with_flowlet_timeouts_dict[burst]: 224 | for table_id in self.flow_entries_with_flowlet_timeouts_dict[burst][datapath_id]: 225 | for match in self.flow_entries_with_flowlet_timeouts_dict[burst][datapath_id][table_id]: 226 | mod = ofparser.OFPFlowMod( 227 | datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id, 228 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 229 | priority=self.flow_entries_with_flowlet_timeouts_dict[burst][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 230 | out_port=ofproto.OFPP_ANY, 231 | out_group=ofproto.OFPG_ANY, 232 | flags=0, match=match, instructions=self.flow_entries_with_flowlet_timeouts_dict[burst][datapath_id][table_id][match]['inst']) 233 | self.dp_dictionary[datapath_id].send_msg(mod) 234 | 235 | def send_state_stats_request(self): 236 | for datapath_id in self.dp_dictionary: 237 | req = osparser.OFPExpStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id]) 238 | self.dp_dictionary[datapath_id].send_msg(req) 239 | 240 | def send_global_state_stats_request(self): 241 | for datapath_id in self.dp_dictionary: 242 | req = osparser.OFPExpGlobalStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id]) 243 | self.dp_dictionary[datapath_id].send_msg(req) -------------------------------------------------------------------------------- /results/fig9/fig9.py: -------------------------------------------------------------------------------- 1 | import os,glob 2 | import json 3 | import matplotlib 4 | # Force matplotlib to not use any Xwindows backend. 5 | matplotlib.use('Agg') 6 | import matplotlib.pyplot as plt 7 | import matplotlib.patches as mpatches 8 | import math 9 | from datetime import datetime 10 | import numpy as np 11 | import time 12 | import operator 13 | from pprint import pprint 14 | import itertools 15 | 16 | ################################################################################################################################ 17 | # CONFIGURATION # 18 | ################################################################################################################################ 19 | 20 | # Number of realizations 21 | REALIZATIONS_NUM = 20 22 | 23 | INDIVIDUAL_TRAFFIC_RATE = 100 # [pkt/sec] 24 | REQUESTS_RANGE = [5,10,15,20,25,30,35] 25 | 26 | LINK_DOWN = 5 # [sec] 27 | LINK_UP = 5 # [sec] 28 | ENABLE_FAULT = 'yes' 29 | 30 | # additional delay switch-controller, in ms (OF only) 31 | RTT_DELAY_LIST = [0,3,6,12] # [ms] 32 | 33 | # detection timeouts (SPIDER only) 34 | delta_6 = 0.002 # sec] 35 | delta_7 = 0.001 # [sec] 36 | delta_5 = 20 # [sec] 37 | 38 | ################################################################################################################################ 39 | ################################################################################################################################ 40 | 41 | if os.geteuid() != 0: 42 | exit("You need to have root privileges to run this script") 43 | 44 | os.system('sudo tc qdisc add dev lo root netem delay 0ms') 45 | os.system('sudo tc qdisc change dev lo root netem delay 0ms') 46 | 47 | delete_all_files=True 48 | if len(glob.glob("/home/mininet/ping*.txt"))>0: 49 | msg="Some ping TXT files have been found! Do you want to delete them?" 50 | delete_all_files = True if raw_input("%s (y/N) " % msg).lower() == 'y' else False 51 | 52 | if delete_all_files==False: 53 | exit() 54 | 55 | if len(glob.glob("/home/mininet/ping*.bak"))>0: 56 | print("Some ping BAK files have been found! Save them or remove them :)\nrm -f ~/*.bak") 57 | exit() 58 | 59 | # Remove old data 60 | os.system('rm /home/mininet/ping*.txt') 61 | 62 | # Close mininet/Ryu instances 63 | os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") 64 | os.system("sudo mn -c 2> /dev/null") 65 | 66 | # Environment variables 67 | os.environ['interarrival'] = str(1/float(INDIVIDUAL_TRAFFIC_RATE)) 68 | os.environ['LINK_DOWN'] = str(LINK_DOWN) 69 | os.environ['LINK_UP'] = str(LINK_UP) 70 | os.environ['ENABLE_FAULT'] = ENABLE_FAULT 71 | os.environ['RTT_DELAY_LIST'] = str(RTT_DELAY_LIST) 72 | os.environ['REALIZATIONS_NUM'] = str(REALIZATIONS_NUM) 73 | os.environ['delta_6'] = str(delta_6) 74 | os.environ['delta_7'] = str(delta_7) 75 | os.environ['delta_5'] = str(delta_5) 76 | 77 | # packets lost because no reply packet has been received (extracted from ping output) 78 | tot_lost_ping_OF={} # {N1: {RTT1: [tot_losses_1,tot_losses_2,..] , RTT2: [tot_losses_1,tot_losses_2,..] , ...} , N2: {...} , ...} 79 | tot_lost_ping_SPIDER={} # {N1: [tot_losses_1,tot_losses_2,..] , N2: [tot_losses_1,tot_losses_2,..] , ...} 80 | 81 | # NB: one realization produces one point of of each curve of fig7. 82 | # Each realization is repeated REALIZATIONS_NUM times. 83 | # total number of realizations 84 | tot_sim=len(REQUESTS_RANGE)*2 85 | 86 | i=1 # index of current realization 87 | for N in REQUESTS_RANGE: 88 | # number of requests generating traffic 89 | # NB: even if greater than the # of requests passing from the wrost link, there's no problem 90 | os.environ['N'] = str(N) 91 | 92 | # create results.txt 93 | f = open('results.txt','w') 94 | for r in range(1,N+1): 95 | f.write('set PrimaryPath['+str(r)+'] := 1 2 3 '+str(4+r)+';\n') 96 | f.write('set PrimaryPath['+str(r+N)+'] := '+str(4+r)+' 3 4 1;\n') 97 | f.write('\n') 98 | 99 | for r in range(1,N+1): 100 | f.write('param DetectNode[2,3,'+str(r)+']:= 2;\n') 101 | f.write('\n') 102 | 103 | for r in range(1,N+1): 104 | f.write('set DetourPath[2,3,'+str(r)+'] := 1 4 3;\n') 105 | f.write('\n') 106 | 107 | f.close() 108 | 109 | # TOPOLOGY 110 | 111 | # -- s4 -- -- s(4+1) -- [H 4+1] 112 | # / \ / 113 | # [H1] -- s1 -- s2 -- s3 -- s(4+2) -- [H 4+2] 114 | # \ 115 | # -- s(4+N) -- [H 4+N] 116 | # 117 | # The analized fault is (S2-S3) 118 | # H1 generates traffic towards H5,H6,...H(4+N) 119 | # Primary path from H1 to Hx is s1-s2-s3-s(4+x) 120 | # Backup path from H1 to Hx is s1-s4-s3-s(4+x) 121 | # Reverse path from Hx to H1 is s(4+x)-s3-s4-sq 122 | 123 | # create network.xml 124 | f = open('network.xml','w') 125 | network_xml_string=""" 126 | 127 | 128 | 129 | 130 | 131 | 0 132 | 100 133 | 134 | 135 | 136 | 137 | 100 138 | 100 139 | 140 | 141 | 142 | 143 | 200 144 | 100 145 | 146 | 147 | 148 | 149 | 100 150 | 0 151 | 152 | 153 | """ 154 | f.write(network_xml_string) 155 | for r in range(1,N+1): 156 | f.write(' \n \n 300\n '+str(r)+'\n \n \n') 157 | 158 | network_xml_string=""" 159 | 160 | 161 | N1 162 | N2 163 | 164 | 165 | N2 166 | N3 167 | 168 | 169 | N1 170 | N4 171 | 172 | 173 | N3 174 | N4 175 | 176 | """ 177 | f.write(network_xml_string) 178 | for r in range(1,N+1): 179 | f.write(' \n N3\n N'+str(4+r)+'\n \n') 180 | 181 | network_xml_string=""" 182 | 183 | 184 | """ 185 | f.write(network_xml_string) 186 | for r in range(1,N+1): 187 | f.write(' \n N1\n N'+str(4+r)+'\n 10\n \n') 188 | for r in range(1,N+1): 189 | f.write(' \n N'+str(4+r)+'\n N1\n 10\n \n') 190 | network_xml_string=""" 191 | 192 | 193 | """ 194 | f.write(network_xml_string) 195 | f.close() 196 | 197 | # launch controller 198 | print('\n\x1B[31mSTARTING OF SIMULATION #'+str(i)+" of "+str(tot_sim)+" (#REQ:"+str(N)+") - "+str(100*i/tot_sim)+'%\n\x1B[0m') 199 | os.system("> /var/log/syslog") 200 | os.system('ryu-manager fig9_OF_ryu_app.py') 201 | os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") 202 | os.system("sudo mn -c 2> /dev/null") 203 | for delay in RTT_DELAY_LIST: 204 | for sim_num in range(REALIZATIONS_NUM): 205 | lost=[] # list of lost packets for each request H1->H(4+x) 206 | for txt in glob.glob("/home/mininet/ping_OF*."+str(delay)+"rtt.sim"+str(sim_num)+".txt"): 207 | rx=os.popen("cat "+txt+" | grep transmitted | awk '{print $4}'").read() # received packets 208 | tx=os.popen("cat "+txt+" | grep transmitted | awk '{print $1}'").read() # transmitted packets 209 | lost.append(int(tx)-int(rx)) 210 | if N not in tot_lost_ping_OF: 211 | tot_lost_ping_OF[N] = {} 212 | if not delay in tot_lost_ping_OF[N]: 213 | tot_lost_ping_OF[N][delay] = [] 214 | tot_lost_ping_OF[N][delay].append(sum(lost)) # total number of packets with no reply 215 | #os.system('for file in /home/mininet/ping_OF*.'+str(delay)+'rtt.sim'+str(sim_num)+'.txt; do mv "$file" "${file%.txt}'+'.OF.simnum'+str(sim_num)+'.bak"; done') 216 | os.system('for file in /home/mininet/ping_OF*.'+str(delay)+'rtt.sim'+str(sim_num)+'.txt; do rm "$file"; done') 217 | i+=1 218 | 219 | # launch controller 220 | print('\n\x1B[31mSTARTING SPIDER SIMULATION #'+str(i)+" of "+str(tot_sim)+" (#REQ:"+str(N)+") - "+str(100*i/tot_sim)+'%\n\x1B[0m') 221 | os.system("> /var/log/syslog") 222 | os.system('ryu-manager fig9_SPIDER_ryu_app.py') 223 | os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") 224 | os.system("sudo mn -c 2> /dev/null") 225 | os.system('sudo tc qdisc change dev lo root netem delay 0ms') 226 | for sim_num in range(REALIZATIONS_NUM): 227 | lost=[] # list of lost packets for each request H1->H(4+x) 228 | for txt in glob.glob("/home/mininet/ping_SPIDER*.sim"+str(sim_num)+".txt"): 229 | rx=os.popen("cat "+txt+" | grep transmitted | awk '{print $4}'").read() # received packets 230 | tx=os.popen("cat "+txt+" | grep transmitted | awk '{print $1}'").read() # transmitted packets 231 | lost.append(int(tx)-int(rx)) 232 | if N not in tot_lost_ping_SPIDER: 233 | tot_lost_ping_SPIDER[N] = [] 234 | tot_lost_ping_SPIDER[N].append(sum(lost)) # total number of packets with no reply 235 | #os.system('for file in /home/mininet/ping_SPIDER*.sim'+str(sim_num)+'.txt; do mv "$file" "${file%.txt}'+'.SPIDER.simnum'+str(sim_num)+'.bak"; done') 236 | os.system('for file in /home/mininet/ping_SPIDER*.sim'+str(sim_num)+'.txt; do rm "$file"; done') 237 | i+=1 238 | 239 | pprint(tot_lost_ping_OF) 240 | pprint(tot_lost_ping_SPIDER) 241 | 242 | # tot_lost_ping_OF = {N1: {RTT1: [tot_losses_1,tot_losses_2,..] , RTT2: [tot_losses_1,tot_losses_2,..] , ...} , N2: {...} , ...} 243 | # tot_lost_ping_SPIDER = {N1: [tot_losses_1,tot_losses_2,..] , N2: [tot_losses_1,tot_losses_2,..] , ...} 244 | 245 | tot_lost_ping_OF_average={} # {N1: {RTT1: tot_losses_avg , RTT2: tot_losses_avg] , ...} , N2: {...} , ...} 246 | for N in tot_lost_ping_OF: 247 | tot_lost_ping_OF_average[N]={} 248 | for delay in tot_lost_ping_OF[N]: 249 | tot_lost_ping_OF_average[N][delay]=sum(tot_lost_ping_OF[N][delay])/len(tot_lost_ping_OF[N][delay]) 250 | 251 | tot_lost_ping_SPIDER_average={} # {N1: tot_losses_avg , N2: tot_losses_avg , ...} 252 | for N in tot_lost_ping_SPIDER: 253 | tot_lost_ping_SPIDER_average[N]=sum(tot_lost_ping_SPIDER[N])/len(tot_lost_ping_SPIDER[N]) 254 | 255 | print("\ntot_lost_ping_OF_average = {N1: {RTT1: tot_losses , RTT2: tot_losses , ...} , N2: {...} , ...}\n") 256 | print ("tot_lost_ping_OF_average=") 257 | pprint(tot_lost_ping_OF_average) 258 | 259 | with open("/home/mininet/total_lost_packets_OF.txt", "a+") as out_file: 260 | out_file.write("REALIZATIONS_NUM="+str(REALIZATIONS_NUM)+"\nINDIVIDUAL_TRAFFIC_RATE="+str(INDIVIDUAL_TRAFFIC_RATE)+"\nLINK_DOWN="+str(LINK_DOWN)+"\nLINK_UP"+str(LINK_UP)) 261 | out_file.write("\nREQUESTS_RANGE="+str(REQUESTS_RANGE)+"\nRTT_DELAY_LIST="+str(RTT_DELAY_LIST)+"\n") 262 | out_file.write("tot_lost_ping_OF_average = {N1: {RTT1: tot_losses , RTT2: tot_losses , ...} , N2: {...} , ...}\n") 263 | out_file.write("tot_lost_ping_OF = "+str(tot_lost_ping_OF)+"\n") 264 | out_file.write("tot_lost_ping_OF_average = "+str(tot_lost_ping_OF_average)+"\n\n\n") 265 | 266 | print("\ntot_lost_ping_SPIDER_average = {N1: tot_losses , N2: ... , ...}\n") 267 | print ("tot_lost_ping_SPIDER_average=") 268 | pprint(tot_lost_ping_SPIDER_average) 269 | 270 | with open("/home/mininet/total_lost_packets_SPIDER.txt", "a+") as out_file: 271 | out_file.write("REALIZATIONS_NUM="+str(REALIZATIONS_NUM)+"\nINDIVIDUAL_TRAFFIC_RATE="+str(INDIVIDUAL_TRAFFIC_RATE)+"\nLINK_DOWN="+str(LINK_DOWN)+"\nLINK_UP"+str(LINK_UP)) 272 | out_file.write("\nREQUESTS_RANGE="+str(REQUESTS_RANGE)+"\n") 273 | out_file.write("\ndelta_6="+str(delta_6)+"\ndelta_7="+str(delta_7)+"\ndelta_5="+str(delta_5)+"\n") 274 | out_file.write("tot_lost_ping_SPIDER_average = {N1: tot_losses , N2: ... , ...}\n") 275 | out_file.write("tot_lost_ping_SPIDER = "+str(tot_lost_ping_SPIDER)+"\n") 276 | out_file.write("tot_lost_ping_SPIDER_average = "+str(tot_lost_ping_SPIDER_average)+"\n\n\n") 277 | 278 | os.system('sudo tc qdisc change dev lo root netem delay 0ms') 279 | 280 | # Generate LateX data 281 | # tot_lost_ping_SPIDER_average = {N1: tot_losses_avg , N2: tot_losses_avg , ...} 282 | print('coordinates{') 283 | for N in sorted(tot_lost_ping_SPIDER_average): 284 | print(' ('+str(N)+','+str(tot_lost_ping_SPIDER_average[N])+')') 285 | 286 | print(' };') 287 | print('\\addlegendentry{SPIDER $\delta_7$=1ms}') 288 | print('') 289 | print('##################################################') 290 | 291 | # tot_lost_ping_OF_average = {N1: {RTT1: tot_losses_avg , RTT2: tot_losses_avg] , ...} , N2: {...} , ...} 292 | for rtt in RTT_DELAY_LIST: 293 | print('coordinates{') 294 | for N in sorted(tot_lost_ping_OF_average): 295 | print(' ('+str(N)+','+str(tot_lost_ping_OF_average[N][rtt])+')') 296 | 297 | print(' };') 298 | print('\\addlegendentry{OF FF ('+str(rtt)+'ms)}') 299 | print('') 300 | 301 | print('##################################################') 302 | 303 | -------------------------------------------------------------------------------- /results/fig8/fig8_ryu_app.py: -------------------------------------------------------------------------------- 1 | from ryu.base import app_manager 2 | from ryu.controller import ofp_event 3 | from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER, HANDSHAKE_DISPATCHER 4 | from ryu.controller.handler import set_ev_cls 5 | import ryu.ofproto.ofproto_v1_3 as ofproto 6 | import ryu.ofproto.ofproto_v1_3_parser as ofparser 7 | import ryu.ofproto.openstate_v1_0 as osproto 8 | import ryu.ofproto.openstate_v1_0_parser as osparser 9 | from ryu.lib.packet import packet 10 | from ryu.topology import event 11 | import logging 12 | from sets import Set 13 | import time 14 | import os 15 | 16 | import sys 17 | sys.path.append(os.path.abspath("/home/mininet/spider/src")) 18 | import SPIDER_parser as f_t_parser 19 | from ryu.lib import hub 20 | import subprocess 21 | 22 | realiz_num = os.environ['realiz_num'] 23 | delta_6 = os.environ['delta_6'] 24 | TRAFFIC_RATE = eval(os.environ['TRAFFIC_RATE']) 25 | PEAK_RATE = os.environ['PEAK_RATE'] 26 | STEP = os.environ['STEP'] 27 | 28 | class OpenStateFaultTolerance(app_manager.RyuApp): 29 | OFP_VERSIONS = [ofproto.OFP_VERSION] 30 | 31 | def __init__(self, *args, **kwargs): 32 | super(OpenStateFaultTolerance, self).__init__(*args, **kwargs) 33 | 34 | f_t_parser.detection_timeouts_list=[(eval(delta_6),0.1,10)] 35 | 36 | results_hash = f_t_parser.md5sum_results() 37 | if f_t_parser.network_has_changed(results_hash): 38 | f_t_parser.erase_figs_folder() 39 | 40 | (self.requests,self.faults) = f_t_parser.parse_ampl_results_if_not_cached() 41 | 42 | print len(self.requests), 'requests loaded' 43 | print len(self.faults), 'faults loaded' 44 | 45 | print "Building network graph from network.xml..." 46 | # G is a NetworkX Graph object 47 | (self.G, self.pos, self.hosts, self.switches, self.mapping) = f_t_parser.parse_network_xml() 48 | print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts' 49 | 50 | print "NetworkX to Mininet topology conversion..." 51 | # mn_topo is a Mininet Topo object 52 | self.mn_topo = f_t_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping) 53 | # mn_net is a Mininet object 54 | self.mn_net = f_t_parser.create_mininet_net(self.mn_topo) 55 | 56 | f_t_parser.launch_mininet(self.mn_net) 57 | 58 | self.ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports) 59 | 60 | f_t_parser.mn_setup_MAC_and_IP(self.mn_net) 61 | 62 | f_t_parser.mn_setup_static_ARP_entries(self.mn_net) 63 | 64 | f_t_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts) 65 | 66 | (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_timeout_dict, self.flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,dpctl_script=False) 67 | 68 | # Associates dp_id to datapath object 69 | self.dp_dictionary=dict() 70 | # Associates dp_id to a dict associating port<->MAC address 71 | self.ports_mac_dict=dict() 72 | 73 | # Needed by fault_tolerance_rest --> servira' ancora se memorizzo tutte le variabili qui?? 74 | self.f_t_parser = f_t_parser 75 | 76 | # switch counter 77 | self.switch_count = 0 78 | 79 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) 80 | def switch_features_handler(self, ev): 81 | msg = ev.msg 82 | datapath = msg.datapath 83 | 84 | self.ports_mac_dict[datapath.id] = dict() 85 | self.send_features_request(datapath) 86 | self.send_port_desc_stats_request(datapath) 87 | 88 | self.configure_stateful_stages(datapath) 89 | self.install_flows(datapath) 90 | 91 | self.dp_dictionary[datapath.id] = datapath 92 | 93 | def install_flows(self,datapath): 94 | print("Configuring flow table for switch %d" % datapath.id) 95 | 96 | if datapath.id in self.flow_entries_dict.keys(): 97 | for table_id in self.flow_entries_dict[datapath.id]: 98 | for match in self.flow_entries_dict[datapath.id][table_id]: 99 | mod = ofparser.OFPFlowMod( 100 | datapath=datapath, cookie=0, cookie_mask=0, table_id=table_id, 101 | command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, 102 | priority=self.flow_entries_dict[datapath.id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 103 | out_port=ofproto.OFPP_ANY, 104 | out_group=ofproto.OFPG_ANY, 105 | flags=0, match=match, instructions=self.flow_entries_dict[datapath.id][table_id][match]['inst']) 106 | datapath.send_msg(mod) 107 | 108 | self.switch_count += 1 109 | if self.switch_count == self.G.number_of_nodes(): 110 | self.monitor_thread = hub.spawn(self._monitor,datapath) 111 | 112 | def send_features_request(self, datapath): 113 | req = ofparser.OFPFeaturesRequest(datapath) 114 | datapath.send_msg(req) 115 | 116 | def configure_stateful_stages(self, datapath): 117 | node_dict = f_t_parser.create_node_dict(self.ports_dict,self.requests) 118 | 119 | self.send_table_mod(datapath, table_id=2) 120 | self.send_key_lookup(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST]) 121 | self.send_key_update(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST]) 122 | 123 | self.send_table_mod(datapath, table_id=3) 124 | self.send_key_lookup(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA]) 125 | self.send_key_update(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA]) 126 | 127 | def configure_global_states(self, datapath): 128 | for port in self.ports_mac_dict[datapath.id]: 129 | if port!=ofproto.OFPP_LOCAL: 130 | (global_state, global_state_mask) = osparser.masked_global_state_from_str("1",port-1) 131 | msg = osparser.OFPExpSetGlobalState(datapath=datapath, global_state=global_state, global_state_mask=global_state_mask) 132 | datapath.send_msg(msg) 133 | 134 | def send_table_mod(self, datapath, table_id, stateful=1): 135 | req = osparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=table_id, stateful=stateful) 136 | datapath.send_msg(req) 137 | 138 | def send_key_lookup(self, datapath, table_id, fields): 139 | key_lookup_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=fields, table_id=table_id) 140 | datapath.send_msg(key_lookup_extractor) 141 | 142 | def send_key_update(self, datapath, table_id, fields): 143 | key_update_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=fields, table_id=table_id) 144 | datapath.send_msg(key_update_extractor) 145 | 146 | def set_link_down(self,node1,node2): 147 | if(node1 > node2): 148 | node1,node2 = node2,node1 149 | 150 | os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' down') 151 | os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' down') 152 | 153 | def set_link_up(self,node1,node2): 154 | if(node1 > node2): 155 | node1,node2 = node2,node1 156 | 157 | os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' up') 158 | os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' up') 159 | 160 | def send_port_desc_stats_request(self, datapath): 161 | req = ofparser.OFPPortDescStatsRequest(datapath, 0) 162 | datapath.send_msg(req) 163 | 164 | @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) 165 | def port_desc_stats_reply_handler(self, ev): 166 | # store the association port<->MAC address 167 | for p in ev.msg.body: 168 | self.ports_mac_dict[ev.msg.datapath.id][p.port_no]=p.hw_addr 169 | 170 | self.configure_global_states(ev.msg.datapath) 171 | 172 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 173 | def _packet_in_handler(self, ev): 174 | msg = ev.msg 175 | datapath = msg.datapath 176 | 177 | pkt = packet.Packet(msg.data) 178 | header_list = dict((p.protocol_name, p) for p in pkt.protocols if type(p) != str) 179 | 180 | #discard IPv6 multicast packets 181 | if not header_list['ethernet'].dst.startswith('33:33:'): 182 | print("\nSecond fault detected: packet received by the CTRL") 183 | print(pkt) 184 | 185 | @set_ev_cls(ofp_event.EventOFPExperimenterStatsReply, MAIN_DISPATCHER) 186 | def state_stats_reply_handler(self, ev): 187 | msg = ev.msg 188 | dp = msg.datapath 189 | 190 | if ev.msg.body.exp_type==0: 191 | # EXP_STATE_STATS 192 | stats = osparser.OFPStateStats.parser(ev.msg.body.data, offset=0) 193 | for stat in stats: 194 | if stat.entry.key != []: 195 | msg = osparser.OFPExpMsgSetFlowState( 196 | datapath=dp, state=0, keys=stat.entry.key, table_id=stat.table_id) 197 | dp.send_msg(msg) 198 | elif ev.msg.body.exp_type==1: 199 | stat = osparser.OFPGlobalStateStats.parser(ev.msg.body.data, offset=0) 200 | msg = osparser.OFPExpResetGlobalState(datapath=dp) 201 | dp.send_msg(msg) 202 | self.configure_global_states(dp) 203 | 204 | 205 | def timeout_probe(self,timeout): 206 | f_t_parser.selected_timeout = timeout 207 | 208 | for datapath_id in self.flow_entries_with_timeout_dict[timeout]: 209 | for table_id in self.flow_entries_with_timeout_dict[timeout][datapath_id]: 210 | for match in self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id]: 211 | mod = ofparser.OFPFlowMod( 212 | datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id, 213 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 214 | priority=self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 215 | out_port=ofproto.OFPP_ANY, 216 | out_group=ofproto.OFPG_ANY, 217 | flags=0, match=match, instructions=self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id][match]['inst']) 218 | self.dp_dictionary[datapath_id].send_msg(mod) 219 | 220 | def timeout_burst(self,burst): 221 | f_t_parser.selected_burst = burst 222 | 223 | for datapath_id in self.flow_entries_with_burst_dict[burst]: 224 | for table_id in self.flow_entries_with_burst_dict[burst][datapath_id]: 225 | for match in self.flow_entries_with_burst_dict[burst][datapath_id][table_id]: 226 | mod = ofparser.OFPFlowMod( 227 | datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id, 228 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 229 | priority=self.flow_entries_with_burst_dict[burst][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 230 | out_port=ofproto.OFPP_ANY, 231 | out_group=ofproto.OFPG_ANY, 232 | flags=0, match=match, instructions=self.flow_entries_with_burst_dict[burst][datapath_id][table_id][match]['inst']) 233 | self.dp_dictionary[datapath_id].send_msg(mod) 234 | 235 | def send_state_stats_request(self): 236 | for datapath_id in self.dp_dictionary: 237 | req = osparser.OFPExpStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id]) 238 | self.dp_dictionary[datapath_id].send_msg(req) 239 | 240 | def send_global_state_stats_request(self): 241 | for datapath_id in self.dp_dictionary: 242 | req = osparser.OFPExpGlobalStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id]) 243 | self.dp_dictionary[datapath_id].send_msg(req) 244 | 245 | def _monitor(self,datapath): 246 | hub.sleep(5) 247 | print("Network is ready") 248 | 249 | # This is the main traffic, used to generate HB requests/reply 250 | print("\nStarting traffic from h3 to h6...") 251 | cmd = 'sudo nice --20 nping --rate '+str(TRAFFIC_RATE)+' --count 0 --icmp-type 0 --quiet '+self.mn_net['h'+str(6)].IP()+'&' 252 | print('h3# '+cmd) 253 | self.mn_net['h3'].cmd(cmd) 254 | hub.sleep(1) 255 | 256 | pcap_dir = "/home/mininet/spider/results/fig8/HB_req_TO_"+delta_6+"/realiz_"+realiz_num 257 | if not os.path.exists(pcap_dir): 258 | os.makedirs(pcap_dir) 259 | 260 | print("\nStarting tshark...") 261 | os.system("touch "+pcap_dir+"/ping.pcap") 262 | os.system("sudo tshark -i s3-eth3 -n -w "+pcap_dir+"/ping.pcap 2> /dev/null &") 263 | hub.sleep(1) 264 | 265 | # This is the reverse traffic preventing the generation of HB packets 266 | print("\nStarting traffic from h4 to h7...") 267 | cmd = 'sudo nice --20 python decr_nping.py '+self.mn_net['h'+str(7)].IP()+' '+PEAK_RATE+' '+STEP+'&' 268 | print('h4# '+cmd) 269 | self.mn_net['h4'].cmd(cmd) 270 | 271 | print 'Waiting',5 + (int(PEAK_RATE)/int(STEP)) + 5,'seconds...' 272 | hub.sleep( 5 + (int(PEAK_RATE)/int(STEP)) + 5 ) 273 | 274 | os.system("sudo kill -9 $(pidof tshark) 2> /dev/null") 275 | hub.sleep(1) 276 | 277 | os.system("sudo kill -9 $(pidof -x ryu-manager) 2> /dev/null") 278 | os.system("sudo kill -9 $(pidof -x ofdatapath) 2> /dev/null") 279 | os.system("sudo mn -c 2> /dev/null") -------------------------------------------------------------------------------- /results/table2/table2.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | from dummy import execute_instance 3 | import sys 4 | from fc_lib import cost_func_inv 5 | import multiprocessing as mp 6 | import os 7 | import cProfile 8 | from ryu.base import app_manager 9 | 10 | topology_dir="/home/mininet/spider/src" 11 | sys.path.append(os.path.abspath("/home/mininet/spider/src")) 12 | import SPIDER_parser as f_t_parser 13 | 14 | # Check root privileges 15 | if os.geteuid() != 0: 16 | exit("You need to have root privileges to run this script") 17 | 18 | def create_square_network(N, link_capacity=10, demand_volume=1): 19 | ''' 20 | Function: create_square_network 21 | Summary: Create a NxN square grid network along with traffic demands between each pair of edge nodes 22 | Examples: With N=3, demands generated only between edge nodes (0,1,2,3,5,6,7,8) 23 | 0--1--2 24 | | | | 25 | 3--4--5 26 | | | | 27 | 6--7--8 28 | Attributes: 29 | @param (N): Number of nodes per row/column 30 | @param (link_capacity) default=10: link capacity value to assign to each link 31 | @param (demand_volume) default=1: traffic volume for each generated demand 32 | Returns: The networkx graph G, a demands dictionary indexed like "(src_node, dst_node) = volume" 33 | ''' 34 | G = nx.Graph() 35 | G.add_nodes_from(range(N*N)) 36 | ebunch = [(i,i-1) for i in range(N*N) if i%N > 0] + [(i,i-N) for i in range(N,N*N)] 37 | G.add_edges_from(ebunch, capacity=link_capacity) 38 | st_nodes = [i for i in range(N*N) if i < N or i > (N*(N-1)-1) or i % N in (0,N-1)] 39 | demands = {(i,j): demand_volume for i in st_nodes for j in st_nodes if i != j} 40 | return G.to_directed(), demands 41 | 42 | def create_requests_faults_dict(pps, bps): 43 | ''' 44 | Given 45 | 46 | pps = {(x,y): [x, ... , y], ...} 47 | bps = {(x,y): {(a,b): [x, ... , y], (b,c): [x, ... , y]}, ... } 48 | 49 | it returns 50 | 51 | requests = {(x,y): {'primary_path': [...], 'faults': {(a,b): {'detour_path': [...], 'redirect_node': 7, 'fw_back_path': [...], 'detect_node': 3}}, 'pp_edge': (7, 4)}, ... } 52 | faults = {(a,b): {'requests': {(x,y): {'primary_path': [...], 'detour_path': [...], 'redirect_node': 2, 'fw_back_path': None, 'detect_node': 2}}}, } 53 | ''' 54 | 55 | requests=dict() 56 | for req in pps: 57 | requests[req] = {} 58 | requests[req]['primary_path'] = pps[req] 59 | requests[req]['faults'] = {} 60 | if req in bps: 61 | for flt in bps[req]: 62 | # NB bps dict is indexed by 'ordered' couple of nodes (a,b), so (a,b) is different from (b,a) 63 | # Instead faults dict is indexed by couple of nodes (a,b) where a is always < b, so keys (a,b) with bflt[1]: 66 | f_key=(flt[1],flt[0]) 67 | requests[req]['faults'][f_key] = {} 68 | requests[req]['faults'][f_key]['detect_node'] = flt[0] 69 | requests[req]['faults'][f_key]['fw_back_path'] = None 70 | 71 | detour=list(bps[req][flt]) 72 | # NB detour is a list of nodes including the redirect and the last detour node, while bps is the entire E2E backup path. 73 | # We need to remove the beginning part common to both primary and backup path and the common part starting from the end.... 74 | for node in bps[req][flt]: 75 | if node in pps[req]: 76 | detour.pop(0) 77 | else: 78 | break 79 | 80 | for node in reversed(bps[req][flt]): 81 | if node in pps[req]: 82 | detour.pop(-1) 83 | else: 84 | break 85 | # ...but we need also to add the redirect node and the last detour node 86 | requests[req]['faults'][f_key]['redirect_node'] = bps[req][flt][ bps[req][flt].index(detour[0])-1 ] 87 | requests[req]['faults'][f_key]['detour_path'] = detour 88 | requests[req]['faults'][f_key]['detour_path'].insert(0,requests[req]['faults'][f_key]['redirect_node']) 89 | requests[req]['faults'][f_key]['detour_path'].append(bps[req][flt][ bps[req][flt].index(detour[-1])+1 ]) 90 | 91 | detect_idx = pps[req].index(requests[req]['faults'][f_key]['detect_node']) 92 | redirect_idx = pps[req].index(requests[req]['faults'][f_key]['redirect_node']) 93 | if detect_idx - redirect_idx > 1: 94 | requests[req]['faults'][f_key]['fw_back_path'] = pps[req][redirect_idx+1:detect_idx] 95 | 96 | faults=dict() 97 | for r in requests: 98 | for f in requests[r]['faults']: 99 | if f not in faults: 100 | faults[f] = {} 101 | faults[f]['requests'] = {} 102 | faults[f]['requests'][r] = {} 103 | faults[f]['requests'][r]['primary_path'] = requests[r]['primary_path'] 104 | faults[f]['requests'][r]['detour_path'] = requests[r]['faults'][f]['detour_path'] 105 | faults[f]['requests'][r]['redirect_node'] = requests[r]['faults'][f]['redirect_node'] 106 | faults[f]['requests'][r]['fw_back_path'] = requests[r]['faults'][f]['fw_back_path'] 107 | faults[f]['requests'][r]['detect_node'] = requests[r]['faults'][f]['detect_node'] 108 | 109 | return requests, faults 110 | 111 | def create_ports_dict(G, demands): 112 | ''' 113 | Creates a dictionary 114 | ports_dict = {'sx': {'sy': 1, 'sz': 2}, 'sy': {...}, 'hx': {...}, ...} 115 | ''' 116 | edge_nodes = set([i for i,j in demands]+[j for i,j in demands]) 117 | 118 | ports_dict = {} 119 | for x in G.nodes(): 120 | port_no = 1 121 | ports_dict['s'+str(x)] = {} 122 | for y in G.neighbors(x): 123 | ports_dict['s'+str(x)]['s'+str(y)] = port_no 124 | port_no += 1 125 | if x in edge_nodes: 126 | ports_dict['s'+str(x)]['h'+str(x)] = port_no 127 | ports_dict['h'+str(x)] = {'s'+str(x) : 0} 128 | 129 | return ports_dict 130 | 131 | ########################################################################################## 132 | def process_NxN_E2E_PP(N,out_q): 133 | G, demands = create_square_network(N, link_capacity=N*N*10, demand_volume=1) 134 | print "\n# Dumb instance "+str(N)+"x"+str(N)+" with end-to-end path protection (bp_node_disj=True...)" 135 | fc = execute_instance(G, demands, bp_node_disj=True) 136 | ports_dict = create_ports_dict(G, demands) 137 | (requests,faults) = create_requests_faults_dict(fc.pps,fc.bps) 138 | # fictitious filename, just to caching purpose 139 | filename=str(N)+'X'+str(N)+'E2E.txt' 140 | (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests,faults,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename,confirm_cache_loading=False,dpctl_script=False) 141 | 142 | flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict) 143 | tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global'] 144 | '''print 'min',min(tot_flows) 145 | print 'avg',sum(tot_flows)/float(len((tot_flows))) 146 | print 'max',max(tot_flows)''' 147 | stats = [str(N)+"x"+str(N)+" E2E PP",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}] 148 | out_q.put(stats) 149 | with open("tmp/"+str(N)+"x"+str(N)+" E2E PP.txt", "a+") as out_file: 150 | out_file.write(str(stats)+"\n") 151 | return stats 152 | 153 | def process_NxN_greedy(N,out_q): 154 | G, demands = create_square_network(N, link_capacity=N*N*10, demand_volume=1) 155 | print "\n# Smart instance "+str(N)+"x"+str(N)+" with link cost function and bp_node_disj=False..." 156 | fc = execute_instance(G, demands, cost_func=cost_func_inv) 157 | ports_dict = create_ports_dict(G, demands) 158 | (requests,faults) = create_requests_faults_dict(fc.pps,fc.bps) 159 | # fictitious filename, just to caching purpose 160 | filename=str(N)+'X'+str(N)+'greedy.txt' 161 | (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests,faults,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename,confirm_cache_loading=False,dpctl_script=False) 162 | 163 | flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict) 164 | tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global'] 165 | '''print 'min',min(tot_flows) 166 | print 'avg',sum(tot_flows)/float(len((tot_flows))) 167 | print 'max',max(tot_flows)''' 168 | 169 | D = len(demands) 170 | F = len(G.edges())/2 if isinstance(G,nx.DiGraph) else len(G.edges()) 171 | print 'O(D*F) = %d*%d = %d'%(D,F,D*F) 172 | stats = [str(N)+"x"+str(N)+" greedy",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}] 173 | out_q.put(stats) 174 | with open("tmp/"+str(N)+"x"+str(N)+" greedy.txt", "a+") as out_file: 175 | out_file.write(str(stats)+"\n") 176 | return stats 177 | 178 | ########################################################################################## 179 | 180 | def process_network_E2E_PP(net_name,out_q): 181 | filename_res=topology_dir+"/results.txt."+net_name 182 | filename_net=topology_dir+"/network.xml."+net_name 183 | (G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net) 184 | (requests,faults) = f_t_parser.parse_ampl_results(filename=filename_res) 185 | print len(requests), 'requests loaded' 186 | print len(faults), 'faults loaded' 187 | print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts' 188 | mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping) 189 | ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports) 190 | 191 | print "\n# Dumb instance "+net_name+" with end-to-end path protection (bp_node_disj=True...)" 192 | # we take requests just for its keys, but primary/backup paths are calculated by execute_instance() 193 | demands = {dem : 1 for dem in requests.keys() } 194 | N = G.number_of_edges()-len(hosts) 195 | G_dir = G.to_directed() 196 | for e in G_dir.edges(): 197 | G_dir.edge[e[0]][e[1]] = {'capacity': N*N*10} 198 | fc = execute_instance(G_dir, demands, bp_node_disj=True) 199 | (requests_E2E,faults_E2E) = create_requests_faults_dict(fc.pps,fc.bps) 200 | (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests_E2E,faults_E2E,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename_res+"E2E",confirm_cache_loading=False,dpctl_script=False) 201 | 202 | flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict) 203 | tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global'] 204 | '''print 'min',min(tot_flows) 205 | print 'avg',sum(tot_flows)/float(len((tot_flows))) 206 | print 'max',max(tot_flows)''' 207 | stats = [net_name+" E2E PP",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}] 208 | out_q.put(stats) 209 | with open("tmp/"+str(net_name)+" E2E PP.txt", "a+") as out_file: 210 | out_file.write(str(stats)+"\n") 211 | return stats 212 | 213 | def process_network_AMPL_model(net_name,out_q): 214 | filename_res=topology_dir+"/results.txt."+net_name 215 | filename_net=topology_dir+"/network.xml."+net_name 216 | (G, pos, hosts, switches, mapping) = f_t_parser.parse_network_xml(filename=filename_net) 217 | (requests,faults) = f_t_parser.parse_ampl_results(filename=filename_res) 218 | print len(requests), 'requests loaded' 219 | print len(faults), 'faults loaded' 220 | print 'Network has', len(switches), 'switches,', G.number_of_edges()-len(hosts), 'links and', len(hosts), 'hosts' 221 | mn_topo = f_t_parser.networkx_to_mininet_topo(G, hosts, switches, mapping) 222 | ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(mn_topo.ports) 223 | 224 | print "\n# Smart instance "+net_name+" with results from AMPL model..." 225 | (fault_ID, flow_entries_dict, flow_entries_with_timeout_dict, flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(requests,faults,ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False,filename=filename_res,confirm_cache_loading=False,dpctl_script=False) 226 | 227 | flow_stats_dict = f_t_parser.get_flow_stats_dict(flow_entries_dict) 228 | tot_flows = [flow_stats_dict[node]['tot_flows'] for node in flow_stats_dict.keys() if node!='global'] 229 | '''print 'min',min(tot_flows) 230 | print 'avg',sum(tot_flows)/float(len((tot_flows))) 231 | print 'max',max(tot_flows)''' 232 | 233 | D = len(requests) 234 | F = len(faults) 235 | print 'O(D*F) = %d*%d = %d'%(D,F,D*F) 236 | stats = [net_name+" AMPL model",{'min' : min(tot_flows) ,'avg' : sum(tot_flows)/float(len((tot_flows))) , 'max' : max(tot_flows)}] 237 | out_q.put(stats) 238 | with open("tmp/"+str(net_name)+" AMPL model.txt", "a+") as out_file: 239 | out_file.write(str(stats)+"\n") 240 | return stats 241 | 242 | if __name__ == '__main__': 243 | 244 | MULTIPROCESSING = False 245 | NXN_range = [5,6,7] # range(5,16) needs a lot of RAM! 246 | networks_list = [] #['polska','norway','fat_tree'] 247 | out_q = mp.Queue() 248 | flow_entries_statistics = {} 249 | 250 | if MULTIPROCESSING: 251 | # Multiprcessing execution 252 | processes = [] 253 | for N in NXN_range: 254 | p = mp.Process(target=process_NxN_E2E_PP,args=(N,out_q)) 255 | processes.append(p) 256 | p.start() 257 | '''p = mp.Process(target=process_NxN_greedy,args=(N,out_q)) 258 | processes.append(p) 259 | p.start()''' 260 | for net_name in networks_list: 261 | p = mp.Process(target=process_network_E2E_PP,args=(net_name,out_q)) 262 | processes.append(p) 263 | p.start() 264 | '''p = mp.Process(target=process_network_AMPL_model,args=(net_name,out_q)) 265 | processes.append(p) 266 | p.start()''' 267 | 268 | # Wait for all worker processes to finish 269 | for p in processes: 270 | p.join() 271 | 272 | # Collect all results into a single result dict. We know how many dicts 273 | # with results to expect. 274 | for p in processes: 275 | stats = out_q.get() 276 | flow_entries_statistics[stats[0]] = stats[1] 277 | else: 278 | # Sequential execution 279 | for N in NXN_range: 280 | stats = process_NxN_E2E_PP(N,out_q) 281 | flow_entries_statistics[stats[0]] = stats[1] 282 | '''stats = process_NxN_greedy(N,out_q) 283 | flow_entries_statistics[stats[0]] = stats[1]''' 284 | for net_name in networks_list: 285 | stats = process_network_E2E_PP(net_name,out_q) 286 | flow_entries_statistics[stats[0]] = stats[1] 287 | '''stats = process_network_AMPL_model(net_name,out_q) 288 | flow_entries_statistics[stats[0]] = stats[1]''' 289 | 290 | print 291 | print 292 | print flow_entries_statistics 293 | os.system('sudo mn -c 2> /dev/null') -------------------------------------------------------------------------------- /results/fig9/fig9_SPIDER_ryu_app.py: -------------------------------------------------------------------------------- 1 | from ryu.base import app_manager 2 | from ryu.controller import ofp_event 3 | from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER, HANDSHAKE_DISPATCHER 4 | from ryu.controller.handler import set_ev_cls 5 | import ryu.ofproto.ofproto_v1_3 as ofproto 6 | import ryu.ofproto.ofproto_v1_3_parser as ofparser 7 | import ryu.ofproto.openstate_v1_0 as osproto 8 | import ryu.ofproto.openstate_v1_0_parser as osparser 9 | from ryu.lib.packet import packet 10 | from ryu.topology import event 11 | from pprint import pprint 12 | import logging 13 | from sets import Set 14 | import time 15 | import sys,os 16 | sys.path.append(os.path.abspath("/home/mininet/spider/src")) 17 | import SPIDER_parser as f_t_parser 18 | from ryu.lib import hub 19 | from datetime import datetime 20 | from time import sleep 21 | import random 22 | 23 | class OpenStateFaultTolerance(app_manager.RyuApp): 24 | OFP_VERSIONS = [ofproto.OFP_VERSION] 25 | 26 | def __init__(self, *args, **kwargs): 27 | super(OpenStateFaultTolerance, self).__init__(*args, **kwargs) 28 | 29 | delta_6 = float(os.environ['delta_6']) 30 | delta_7 = float(os.environ['delta_7']) 31 | delta_5 = float(os.environ['delta_5']) 32 | f_t_parser.detection_timeouts_list = [(delta_6,delta_7,delta_5)] 33 | 34 | self.REALIZATIONS_NUM = int(os.environ['REALIZATIONS_NUM']) 35 | 36 | results_hash = f_t_parser.md5sum_results() 37 | if f_t_parser.network_has_changed(results_hash): 38 | f_t_parser.erase_figs_folder() 39 | 40 | (self.requests,self.faults) = f_t_parser.parse_ampl_results_if_not_cached() 41 | 42 | print len(self.requests), 'requests loaded' 43 | print len(self.faults), 'faults loaded' 44 | 45 | print "Building network graph from network.xml..." 46 | # G is a NetworkX Graph object 47 | (self.G, self.pos, self.hosts, self.switches, self.mapping) = f_t_parser.parse_network_xml() 48 | print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts' 49 | 50 | print "NetworkX to Mininet topology conversion..." 51 | # mn_topo is a Mininet Topo object 52 | self.mn_topo = f_t_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping) 53 | # mn_net is a Mininet object 54 | self.mn_net = f_t_parser.create_mininet_net(self.mn_topo) 55 | 56 | f_t_parser.launch_mininet(self.mn_net) 57 | 58 | self.ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports) 59 | 60 | f_t_parser.mn_setup_MAC_and_IP(self.mn_net) 61 | 62 | f_t_parser.mn_setup_static_ARP_entries(self.mn_net) 63 | 64 | f_t_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts) 65 | 66 | (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_timeout_dict, self.flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False) 67 | 68 | # Associates dp_id to datapath object 69 | self.dp_dictionary=dict() 70 | # Associates dp_id to a dict associating port<->MAC address 71 | self.ports_mac_dict=dict() 72 | 73 | # Needed by fault_tolerance_rest 74 | self.f_t_parser = f_t_parser 75 | 76 | # switch counter 77 | self.switch_count = 0 78 | 79 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) 80 | def switch_features_handler(self, ev): 81 | msg = ev.msg 82 | datapath = msg.datapath 83 | 84 | self.ports_mac_dict[datapath.id] = dict() 85 | self.send_features_request(datapath) 86 | self.send_port_desc_stats_request(datapath) 87 | 88 | self.configure_stateful_stages(datapath) 89 | self.install_flows(datapath) 90 | 91 | self.dp_dictionary[datapath.id] = datapath 92 | 93 | def install_flows(self,datapath): 94 | print("Configuring flow table for switch %d" % datapath.id) 95 | 96 | if datapath.id in self.flow_entries_dict.keys(): 97 | for table_id in self.flow_entries_dict[datapath.id]: 98 | for match in self.flow_entries_dict[datapath.id][table_id]: 99 | mod = ofparser.OFPFlowMod( 100 | datapath=datapath, cookie=0, cookie_mask=0, table_id=table_id, 101 | command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, 102 | priority=self.flow_entries_dict[datapath.id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 103 | out_port=ofproto.OFPP_ANY, 104 | out_group=ofproto.OFPG_ANY, 105 | flags=0, match=match, instructions=self.flow_entries_dict[datapath.id][table_id][match]['inst']) 106 | datapath.send_msg(mod) 107 | 108 | self.switch_count += 1 109 | if self.switch_count == self.G.number_of_nodes(): 110 | self.monitor_thread = hub.spawn(self._monitor,datapath) 111 | 112 | def send_features_request(self, datapath): 113 | req = ofparser.OFPFeaturesRequest(datapath) 114 | datapath.send_msg(req) 115 | 116 | def configure_stateful_stages(self, datapath): 117 | node_dict = f_t_parser.create_node_dict(self.ports_dict,self.requests) 118 | 119 | self.send_table_mod(datapath, table_id=2) 120 | self.send_key_lookup(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST]) 121 | self.send_key_update(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST]) 122 | 123 | self.send_table_mod(datapath, table_id=3) 124 | self.send_key_lookup(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA]) 125 | self.send_key_update(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA]) 126 | 127 | def configure_global_states(self, datapath): 128 | for port in self.ports_mac_dict[datapath.id]: 129 | if port!=ofproto.OFPP_LOCAL: 130 | (global_state, global_state_mask) = osparser.masked_global_state_from_str("1",port-1) 131 | msg = osparser.OFPExpSetGlobalState(datapath=datapath, global_state=global_state, global_state_mask=global_state_mask) 132 | datapath.send_msg(msg) 133 | 134 | def send_table_mod(self, datapath, table_id, stateful=1): 135 | req = osparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=table_id, stateful=stateful) 136 | datapath.send_msg(req) 137 | 138 | def send_key_lookup(self, datapath, table_id, fields): 139 | key_lookup_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=fields, table_id=table_id) 140 | datapath.send_msg(key_lookup_extractor) 141 | 142 | def send_key_update(self, datapath, table_id, fields): 143 | key_update_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=fields, table_id=table_id) 144 | datapath.send_msg(key_update_extractor) 145 | 146 | def set_link_down(self,node1,node2): 147 | if(node1 > node2): 148 | node1,node2 = node2,node1 149 | 150 | os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' down') 151 | os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' down') 152 | 153 | def set_link_up(self,node1,node2): 154 | if(node1 > node2): 155 | node1,node2 = node2,node1 156 | 157 | os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' up') 158 | os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' up') 159 | 160 | def send_port_desc_stats_request(self, datapath): 161 | req = ofparser.OFPPortDescStatsRequest(datapath, 0) 162 | datapath.send_msg(req) 163 | 164 | @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) 165 | def port_desc_stats_reply_handler(self, ev): 166 | # store the association port<->MAC address 167 | for p in ev.msg.body: 168 | self.ports_mac_dict[ev.msg.datapath.id][p.port_no]=p.hw_addr 169 | 170 | self.configure_global_states(ev.msg.datapath) 171 | 172 | @set_ev_cls(ofp_event.EventOFPExperimenterStatsReply, MAIN_DISPATCHER) 173 | def state_stats_reply_handler(self, ev): 174 | msg = ev.msg 175 | dp = msg.datapath 176 | 177 | if ev.msg.body.exp_type==0: 178 | # EXP_STATE_STATS 179 | stats = osparser.OFPStateStats.parser(ev.msg.body.data, offset=0) 180 | for stat in stats: 181 | if stat.entry.key != []: 182 | msg = osparser.OFPExpMsgSetFlowState( 183 | datapath=dp, state=0, keys=stat.entry.key, table_id=stat.table_id) 184 | dp.send_msg(msg) 185 | elif ev.msg.body.exp_type==1: 186 | stat = osparser.OFPGlobalStateStats.parser(ev.msg.body.data, offset=0) 187 | msg = osparser.OFPExpResetGlobalState(datapath=dp) 188 | dp.send_msg(msg) 189 | self.configure_global_states(dp) 190 | 191 | def timeout_probe(self,timeout): 192 | f_t_parser.selected_timeout = timeout 193 | 194 | for datapath_id in self.flow_entries_with_timeout_dict[timeout]: 195 | for table_id in self.flow_entries_with_timeout_dict[timeout][datapath_id]: 196 | for match in self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id]: 197 | mod = ofparser.OFPFlowMod( 198 | datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id, 199 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 200 | priority=self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 201 | out_port=ofproto.OFPP_ANY, 202 | out_group=ofproto.OFPG_ANY, 203 | flags=0, match=match, instructions=self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id][match]['inst']) 204 | self.dp_dictionary[datapath_id].send_msg(mod) 205 | 206 | def timeout_burst(self,burst): 207 | f_t_parser.selected_burst = burst 208 | 209 | for datapath_id in self.flow_entries_with_burst_dict[burst]: 210 | for table_id in self.flow_entries_with_burst_dict[burst][datapath_id]: 211 | for match in self.flow_entries_with_burst_dict[burst][datapath_id][table_id]: 212 | mod = ofparser.OFPFlowMod( 213 | datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id, 214 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 215 | priority=self.flow_entries_with_burst_dict[burst][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 216 | out_port=ofproto.OFPP_ANY, 217 | out_group=ofproto.OFPG_ANY, 218 | flags=0, match=match, instructions=self.flow_entries_with_burst_dict[burst][datapath_id][table_id][match]['inst']) 219 | self.dp_dictionary[datapath_id].send_msg(mod) 220 | 221 | def send_state_stats_request(self): 222 | for datapath_id in self.dp_dictionary: 223 | req = osparser.OFPExpStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id]) 224 | self.dp_dictionary[datapath_id].send_msg(req) 225 | 226 | def send_global_state_stats_request(self): 227 | for datapath_id in self.dp_dictionary: 228 | req = osparser.OFPExpGlobalStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id]) 229 | self.dp_dictionary[datapath_id].send_msg(req) 230 | 231 | def _monitor(self,datapath): 232 | hub.sleep(5) 233 | print("Network is ready") 234 | 235 | hub.sleep(5) 236 | req_per_fault = {} 237 | for f in self.faults: 238 | req_per_fault[f]=len(self.faults[f]['requests']) 239 | worst_fault=max(req_per_fault.iterkeys(), key=lambda k: req_per_fault[k]) 240 | #worst_fault=(7,8) 241 | 242 | fw_back_path_len_per_req = {} 243 | for r in self.faults[worst_fault]['requests']: 244 | if self.faults[worst_fault]['requests'][r]['fw_back_path']!=None: 245 | fw_back_path_len_per_req[r]=len(self.faults[worst_fault]['requests'][r]['fw_back_path']) 246 | else: 247 | fw_back_path_len_per_req[r]=0 248 | 249 | # requests passing from worst_link sorted by fw_back_path_len in decreasing order 250 | sorted_req=sorted(fw_back_path_len_per_req,key=fw_back_path_len_per_req.__getitem__,reverse=True) 251 | 252 | i=0 253 | for sim_num in range(self.REALIZATIONS_NUM): 254 | print('\n\x1B[32mSTARTING REALIZATION '+str(i+1)+"/"+str(self.REALIZATIONS_NUM)+'\n\x1B[0m') 255 | count=0 256 | for req in sorted_req: 257 | count+=1 258 | print('h'+str(req[0])+'# ping -i '+str(os.environ['interarrival'])+' '+self.mn_net['h'+str(req[1])].IP()+'&') 259 | self.mn_net['h'+str(req[0])].cmd('ping -i '+str(os.environ['interarrival'])+' '+self.mn_net['h'+str(req[1])].IP()+'> ~/ping_SPIDER.'+str(req[0])+'.'+str(req[1])+'.sim'+str(i)+'.txt &') 260 | if count==int(os.environ['N']): 261 | break 262 | 263 | if os.environ['ENABLE_FAULT']=='yes': 264 | hub.sleep(int(os.environ['LINK_DOWN'])) 265 | print("LINK DOWN "+str(worst_fault)) 266 | self.set_link_down(worst_fault[0],worst_fault[1]) 267 | hub.sleep(int(os.environ['LINK_UP'])) 268 | print("LINK UP "+str(worst_fault)) 269 | os.system("sudo kill -SIGINT `pidof ping`") 270 | self.set_link_up(worst_fault[0],worst_fault[1]) 271 | self.send_state_stats_request() 272 | self.send_global_state_stats_request() 273 | hub.sleep(int(os.environ['LINK_UP'])) 274 | i+=1 275 | 276 | os.system("chown mininet:mininet ~/ping_SPIDER.*") 277 | os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") 278 | -------------------------------------------------------------------------------- /results/fig9/fig9_OF_ryu_app.py: -------------------------------------------------------------------------------- 1 | from ryu.base import app_manager 2 | from ryu.controller import ofp_event 3 | from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER, HANDSHAKE_DISPATCHER 4 | from ryu.controller.handler import set_ev_cls 5 | import ryu.ofproto.ofproto_v1_3 as ofproto 6 | import ryu.ofproto.ofproto_v1_3_parser as ofparser 7 | import ryu.ofproto.openstate_v1_0 as osproto 8 | import ryu.ofproto.openstate_v1_0_parser as osparser 9 | from ryu.lib.packet import packet 10 | from ryu.topology import event 11 | import logging 12 | from sets import Set 13 | import time 14 | import sys,os 15 | import f_t_parser_ctrl_drop as f_t_parser 16 | from ryu.lib import hub 17 | from datetime import datetime 18 | from time import sleep 19 | import random 20 | 21 | class OpenStateFaultTolerance(app_manager.RyuApp): 22 | OFP_VERSIONS = [ofproto.OFP_VERSION] 23 | 24 | def __init__(self, *args, **kwargs): 25 | super(OpenStateFaultTolerance, self).__init__(*args, **kwargs) 26 | f_t_parser.generate_flow_entries_dict(GUI=True) 27 | 28 | self.REALIZATIONS_NUM = int(os.environ['REALIZATIONS_NUM']) 29 | 30 | # Associates dp_id to datapath object 31 | self.dp_dictionary=dict() 32 | self.ports_mac_dict=dict() 33 | 34 | # Detect nodes need group entries installation 35 | self.detect_nodes=Set([]) 36 | for request in f_t_parser.requests: 37 | for y in range(len(f_t_parser.requests[request]['faults'])): 38 | self.detect_nodes.add(f_t_parser.requests[request]['faults'].items()[y][1]['detect_node']) 39 | 40 | # Primary path nodes match against "state=0" => they need to have a stateful stage 0 41 | self.stateful_nodes=Set([]) 42 | for request in f_t_parser.requests: 43 | for y in range(len(f_t_parser.requests[request]['primary_path'])): 44 | self.stateful_nodes.add(f_t_parser.requests[request]['primary_path'][y]) 45 | 46 | # Needed by fault_tolerance_rest 47 | self.f_t_parser = f_t_parser 48 | 49 | # switch counter 50 | self.switch_count = 0 51 | 52 | def save_datapath(self,dp_dictionary,dp_id,dp): 53 | dp_dictionary = dict(dp_dictionary.items() + [(dp_id, dp)]) 54 | return dp_dictionary 55 | 56 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 57 | def _packet_in_handler(self, ev): 58 | msg = ev.msg 59 | datapath = msg.datapath 60 | pkt = packet.Packet(msg.data) 61 | 62 | @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) 63 | def _port_status_handler(self, ev): 64 | msg = ev.msg 65 | name = msg.desc.name.rsplit('-')[0] 66 | port_no = msg.desc.port_no 67 | 68 | other = f_t_parser.mn_topo_ports[name] 69 | for key in other: 70 | if other[key]==port_no: 71 | fault = (int(key[1:]),int(name[1:])) 72 | if(fault[0] > fault[1]): 73 | fault=(fault[1],fault[0]) 74 | 75 | if msg.desc.config == 1: 76 | for request in f_t_parser.faults[fault]['requests']: 77 | redirect = f_t_parser.requests[request]['faults'][fault]['redirect_node'] 78 | detect = f_t_parser.requests[request]['faults'][fault]['detect_node'] 79 | if redirect!=detect: 80 | print("Installing redirect rules in node %d for request %s with fault %s" %(redirect,str(request),str(fault))) 81 | for flow_entry in f_t_parser.redirect_detour_dict[(redirect,request,fault)]: 82 | mod = ofparser.OFPFlowMod( 83 | datapath=self.dp_dictionary[redirect], cookie=0, cookie_mask=0, table_id=flow_entry['table_id'], 84 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 85 | priority=32768, buffer_id=ofproto.OFP_NO_BUFFER, 86 | out_port=ofproto.OFPP_ANY, 87 | out_group=ofproto.OFPG_ANY, 88 | flags=0, match=flow_entry['match'], instructions=flow_entry['inst']) 89 | self.dp_dictionary[redirect].send_msg(mod) 90 | 91 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) 92 | def switch_features_handler(self, ev): 93 | msg = ev.msg 94 | datapath = msg.datapath 95 | 96 | self.ports_mac_dict[datapath.id]={} 97 | self.send_features_request(datapath) 98 | self.send_port_desc_stats_request(datapath) 99 | self.install_flows(datapath,datapath.id in self.detect_nodes,datapath.id in self.stateful_nodes) 100 | 101 | self.dp_dictionary = self.save_datapath(self.dp_dictionary,datapath.id,datapath) 102 | 103 | @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) 104 | def port_desc_stats_reply_handler(self, ev): 105 | for p in ev.msg.body: 106 | self.ports_mac_dict[ev.msg.datapath.id][p.port_no]=p.hw_addr 107 | 108 | def install_flows(self,datapath,has_group,stateful): 109 | print("Configuring flow table for switch %d" % datapath.id) 110 | if stateful: 111 | self.send_table_mod(datapath) 112 | self.send_key_lookup(datapath) 113 | self.send_key_update(datapath) 114 | 115 | # group entries installation 116 | if has_group: 117 | self.install_group_entries(datapath) 118 | 119 | # flow entries installation 120 | if datapath.id in f_t_parser.flow_entries_dict.keys(): 121 | for flow_entry in f_t_parser.flow_entries_dict[datapath.id]: 122 | mod = ofparser.OFPFlowMod( 123 | datapath=datapath, cookie=0, cookie_mask=0, table_id=flow_entry['table_id'], 124 | command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, 125 | priority=32768, buffer_id=ofproto.OFP_NO_BUFFER, 126 | out_port=ofproto.OFPP_ANY, 127 | out_group=ofproto.OFPG_ANY, 128 | flags=0, match=flow_entry['match'], instructions=flow_entry['inst']) 129 | datapath.send_msg(mod) 130 | 131 | for primary_entry_key in f_t_parser.redirect_primary_dict.keys(): 132 | if primary_entry_key[0]==datapath.id: 133 | for flow_entry in f_t_parser.redirect_primary_dict[primary_entry_key]: 134 | mod = ofparser.OFPFlowMod( 135 | datapath=datapath, cookie=0, cookie_mask=0, table_id=flow_entry['table_id'], 136 | command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, 137 | priority=32768, buffer_id=ofproto.OFP_NO_BUFFER, 138 | out_port=ofproto.OFPP_ANY, 139 | out_group=ofproto.OFPG_ANY, 140 | flags=0, match=flow_entry['match'], instructions=flow_entry['inst']) 141 | datapath.send_msg(mod) 142 | 143 | self.switch_count += 1 144 | if self.switch_count == f_t_parser.G.number_of_nodes(): 145 | self.monitor_thread = hub.spawn(self._monitor,datapath) 146 | 147 | def send_table_mod(self, datapath): 148 | req = osparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=0, stateful=1) 149 | datapath.send_msg(req) 150 | 151 | def send_features_request(self, datapath): 152 | req = ofparser.OFPFeaturesRequest(datapath) 153 | datapath.send_msg(req) 154 | 155 | def send_key_lookup(self, datapath): 156 | key_lookup_extractor = osparser.OFPExpMsgKeyExtract(datapath, osproto.OFPSC_EXP_SET_L_EXTRACTOR, [ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST], table_id=0) 157 | datapath.send_msg(key_lookup_extractor) 158 | 159 | def send_key_update(self, datapath): 160 | key_update_extractor = osparser.OFPExpMsgKeyExtract(datapath, osproto.OFPSC_EXP_SET_U_EXTRACTOR, [ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST], table_id=0) 161 | datapath.send_msg(key_update_extractor) 162 | 163 | def set_link_down(self,node1,node2): 164 | if(node1 > node2): 165 | node1,node2 = node2,node1 166 | 167 | hw_addr1 = self.ports_mac_dict[self.dp_dictionary[node1].id][f_t_parser.mn_topo_ports['s'+str(node1)]['s'+str(node2)]] 168 | hw_addr2 = self.ports_mac_dict[self.dp_dictionary[node2].id][f_t_parser.mn_topo_ports['s'+str(node2)]['s'+str(node1)]] 169 | config = 1 170 | mask = (ofproto.OFPPC_PORT_DOWN) 171 | advertise = (ofproto.OFPPF_10MB_HD | ofproto.OFPPF_100MB_FD | ofproto.OFPPF_1GB_FD | ofproto.OFPPF_COPPER | 172 | ofproto.OFPPF_AUTONEG | ofproto.OFPPF_PAUSE | ofproto.OFPPF_PAUSE_ASYM) 173 | req1 = ofparser.OFPPortMod(self.dp_dictionary[node1], f_t_parser.mn_topo_ports['s'+str(node1)]['s'+str(node2)], hw_addr1, config, mask, advertise) 174 | self.dp_dictionary[node1].send_msg(req1) 175 | req2 = ofparser.OFPPortMod(self.dp_dictionary[node2], f_t_parser.mn_topo_ports['s'+str(node2)]['s'+str(node1)], hw_addr2, config, mask, advertise) 176 | self.dp_dictionary[node2].send_msg(req2) 177 | 178 | def set_link_up(self,node1,node2): 179 | if(node1 > node2): 180 | node1,node2 = node2,node1 181 | 182 | hw_addr1 = self.ports_mac_dict[self.dp_dictionary[node1].id][f_t_parser.mn_topo_ports['s'+str(node1)]['s'+str(node2)]] 183 | hw_addr2 = self.ports_mac_dict[self.dp_dictionary[node2].id][f_t_parser.mn_topo_ports['s'+str(node2)]['s'+str(node1)]] 184 | config = 0 185 | mask = (ofproto.OFPPC_PORT_DOWN) 186 | advertise = (ofproto.OFPPF_10MB_HD | ofproto.OFPPF_100MB_FD | ofproto.OFPPF_1GB_FD | ofproto.OFPPF_COPPER | 187 | ofproto.OFPPF_AUTONEG | ofproto.OFPPF_PAUSE | ofproto.OFPPF_PAUSE_ASYM) 188 | req1 = ofparser.OFPPortMod(self.dp_dictionary[node1], f_t_parser.mn_topo_ports['s'+str(node1)]['s'+str(node2)], hw_addr1, config, mask, advertise) 189 | self.dp_dictionary[node1].send_msg(req1) 190 | req2 = ofparser.OFPPortMod(self.dp_dictionary[node2], f_t_parser.mn_topo_ports['s'+str(node2)]['s'+str(node1)], hw_addr2, config, mask, advertise) 191 | self.dp_dictionary[node2].send_msg(req2) 192 | 193 | # "Primary path rules" installation in Redirect only nodes of all the requests involved in fault 194 | fault=(node1,node2) 195 | for request in f_t_parser.faults[(fault)]['requests']: 196 | redirect = f_t_parser.requests[request]['faults'][fault]['redirect_node'] 197 | #print("Installing primary rules in node %d for request %s with fault %s" %(redirect,str(request),str(fault))) 198 | for flow_entry in f_t_parser.redirect_primary_dict[(redirect,request)]: 199 | mod = ofparser.OFPFlowMod( 200 | datapath=self.dp_dictionary[redirect], cookie=0, cookie_mask=0, table_id=flow_entry['table_id'], 201 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 202 | priority=32768, buffer_id=ofproto.OFP_NO_BUFFER, 203 | out_port=ofproto.OFPP_ANY, 204 | out_group=ofproto.OFPG_ANY, 205 | flags=0, match=flow_entry['match'], instructions=flow_entry['inst']) 206 | self.dp_dictionary[redirect].send_msg(mod) 207 | 208 | def install_group_entries(self,datapath): 209 | for group_entry in f_t_parser.group_entries_dict[datapath.id]: 210 | buckets = f_t_parser.group_entries_dict[datapath.id][group_entry] 211 | req = ofparser.OFPGroupMod(datapath, ofproto.OFPGC_ADD,ofproto.OFPGT_FF, group_entry, buckets) 212 | datapath.send_msg(req) 213 | 214 | def send_port_desc_stats_request(self, datapath): 215 | req = ofparser.OFPPortDescStatsRequest(datapath, 0) 216 | datapath.send_msg(req) 217 | 218 | def _monitor(self,datapath): 219 | hub.sleep(5) 220 | print("Network is ready") 221 | 222 | hub.sleep(5) 223 | req_per_fault = {} 224 | for f in self.f_t_parser.faults: 225 | req_per_fault[f]=len(self.f_t_parser.faults[f]['requests']) 226 | worst_fault=max(req_per_fault.iterkeys(), key=lambda k: req_per_fault[k]) 227 | #worst_fault=(7,8) 228 | 229 | fw_back_path_len_per_req = {} 230 | for r in self.f_t_parser.faults[worst_fault]['requests']: 231 | if self.f_t_parser.faults[worst_fault]['requests'][r]['fw_back_path']!=None: 232 | fw_back_path_len_per_req[r]=len(self.f_t_parser.faults[worst_fault]['requests'][r]['fw_back_path']) 233 | else: 234 | fw_back_path_len_per_req[r]=0 235 | 236 | # requests passing from worst_link sorted by fw_back_path_len in fw_back_path_len_per_req order 237 | sorted_req=sorted(fw_back_path_len_per_req,key=fw_back_path_len_per_req.__getitem__,reverse=True) 238 | 239 | RTT_DELAY_LIST = eval(os.environ['RTT_DELAY_LIST']) 240 | for delay in RTT_DELAY_LIST: 241 | print("\n\x1B[32mSetting delay switch-CTRL: "+str(delay)+"ms\x1B[0m") 242 | os.system('sudo tc qdisc change dev lo root netem delay '+str(delay)+'ms') 243 | i=0 244 | for sim_num in range(self.REALIZATIONS_NUM): 245 | print('\n\x1B[32mSTARTING REALIZATION '+str(i+1)+"/"+str(self.REALIZATIONS_NUM)+'\n\x1B[0m') 246 | count=0 247 | for req in sorted_req: 248 | count+=1 249 | print('h'+str(req[0])+'# ping -i '+str(os.environ['interarrival'])+' '+self.f_t_parser.net['h'+str(req[1])].IP()+'&') 250 | self.f_t_parser.net['h'+str(req[0])].cmd('ping -i '+str(os.environ['interarrival'])+' '+self.f_t_parser.net['h'+str(req[1])].IP()+'> ~/ping_OF.'+str(req[0])+'.'+str(req[1])+'.'+str(delay)+'rtt.sim'+str(i)+'.txt &') 251 | if count==int(os.environ['N']): 252 | break 253 | 254 | if os.environ['ENABLE_FAULT']=='yes': 255 | hub.sleep(int(os.environ['LINK_DOWN'])) 256 | print("LINK DOWN "+str(worst_fault)) 257 | self.set_link_down(worst_fault[0],worst_fault[1]) 258 | hub.sleep(int(os.environ['LINK_UP'])) 259 | print("LINK UP "+str(worst_fault)) 260 | self.set_link_up(worst_fault[0],worst_fault[1]) 261 | os.system("sudo kill -SIGINT `pidof ping`") 262 | # wait for primary rules 263 | hub.sleep(int(os.environ['LINK_UP'])) 264 | i+=1 265 | 266 | os.system("chown mininet:mininet ~/ping_OF*") 267 | os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") 268 | -------------------------------------------------------------------------------- /src/results.txt.polska: -------------------------------------------------------------------------------- 1 | set PrimaryPath[1] := 1 11 7 12; 2 | set PrimaryPath[2] := 1 3; 3 | set PrimaryPath[3] := 1 11 7 4; 4 | set PrimaryPath[4] := 1 11 5; 5 | set PrimaryPath[5] := 1 6; 6 | set PrimaryPath[6] := 1 3 10; 7 | set PrimaryPath[7] := 1 11 2 8; 8 | set PrimaryPath[8] := 1 6 9; 9 | set PrimaryPath[9] := 3 10 8 12; 10 | set PrimaryPath[10] := 3 1; 11 | set PrimaryPath[11] := 3 10 8 12 4; 12 | set PrimaryPath[12] := 3 2 11 5; 13 | set PrimaryPath[13] := 3 1 6; 14 | set PrimaryPath[14] := 3 10; 15 | set PrimaryPath[15] := 3 2 8; 16 | set PrimaryPath[16] := 3 1 6 9; 17 | set PrimaryPath[17] := 4 12; 18 | set PrimaryPath[18] := 4 7 11 1; 19 | set PrimaryPath[19] := 4 12 8 10 3; 20 | set PrimaryPath[20] := 4 5; 21 | set PrimaryPath[21] := 4 5 11 6; 22 | set PrimaryPath[22] := 4 12 8 10; 23 | set PrimaryPath[23] := 4 12 8; 24 | set PrimaryPath[24] := 4 5 9; 25 | set PrimaryPath[25] := 5 4 12; 26 | set PrimaryPath[26] := 5 11 1; 27 | set PrimaryPath[27] := 5 4; 28 | set PrimaryPath[28] := 5 11 2 3; 29 | set PrimaryPath[29] := 5 11 6; 30 | set PrimaryPath[30] := 5 4 12 8 10; 31 | set PrimaryPath[31] := 5 4 12 8; 32 | set PrimaryPath[32] := 5 9; 33 | set PrimaryPath[33] := 6 11 7 12; 34 | set PrimaryPath[34] := 6 1; 35 | set PrimaryPath[35] := 6 11 5 4; 36 | set PrimaryPath[36] := 6 11 5; 37 | set PrimaryPath[37] := 6 1 3; 38 | set PrimaryPath[38] := 6 1 3 10; 39 | set PrimaryPath[39] := 6 11 2 8; 40 | set PrimaryPath[40] := 6 9; 41 | set PrimaryPath[41] := 8 12; 42 | set PrimaryPath[42] := 8 2 11 1; 43 | set PrimaryPath[43] := 8 12 4; 44 | set PrimaryPath[44] := 8 12 4 5; 45 | set PrimaryPath[45] := 8 2 11 6; 46 | set PrimaryPath[46] := 8 10; 47 | set PrimaryPath[47] := 8 2 3; 48 | set PrimaryPath[48] := 8 2 11 6 9; 49 | set PrimaryPath[49] := 10 8 12; 50 | set PrimaryPath[50] := 10 3 1; 51 | set PrimaryPath[51] := 10 8 12 4; 52 | set PrimaryPath[52] := 10 8 12 4 5; 53 | set PrimaryPath[53] := 10 3 1 6; 54 | set PrimaryPath[54] := 10 3; 55 | set PrimaryPath[55] := 10 8; 56 | set PrimaryPath[56] := 10 3 1 6 9; 57 | set PrimaryPath[57] := 9 5 4 12; 58 | set PrimaryPath[58] := 9 6 1; 59 | set PrimaryPath[59] := 9 5 4; 60 | set PrimaryPath[60] := 9 5; 61 | set PrimaryPath[61] := 9 6; 62 | set PrimaryPath[62] := 9 6 1 3 10; 63 | set PrimaryPath[63] := 9 6 11 2 8; 64 | set PrimaryPath[64] := 9 6 1 3; 65 | set PrimaryPath[65] := 12 8 10 3; 66 | set PrimaryPath[66] := 12 7 11 1; 67 | set PrimaryPath[67] := 12 4; 68 | set PrimaryPath[68] := 12 4 5; 69 | set PrimaryPath[69] := 12 7 11 6; 70 | set PrimaryPath[70] := 12 8 10; 71 | set PrimaryPath[71] := 12 8; 72 | set PrimaryPath[72] := 12 4 5 9; 73 | 74 | param DetectNode[1,3,2]:= 1; 75 | param DetectNode[1,3,6]:= 1; 76 | param DetectNode[1,3,37]:= 1; 77 | param DetectNode[1,3,38]:= 1; 78 | param DetectNode[1,3,62]:= 1; 79 | param DetectNode[1,3,64]:= 1; 80 | param DetectNode[1,6,5]:= 1; 81 | param DetectNode[1,6,8]:= 1; 82 | param DetectNode[1,6,13]:= 1; 83 | param DetectNode[1,6,16]:= 1; 84 | param DetectNode[1,6,53]:= 1; 85 | param DetectNode[1,6,56]:= 1; 86 | param DetectNode[1,11,1]:= 1; 87 | param DetectNode[1,11,3]:= 1; 88 | param DetectNode[1,11,4]:= 1; 89 | param DetectNode[1,11,7]:= 1; 90 | param DetectNode[2,3,28]:= 2; 91 | param DetectNode[2,3,47]:= 2; 92 | param DetectNode[2,8,7]:= 2; 93 | param DetectNode[2,8,15]:= 2; 94 | param DetectNode[2,8,39]:= 2; 95 | param DetectNode[2,8,63]:= 2; 96 | param DetectNode[2,11,12]:= 2; 97 | param DetectNode[2,11,42]:= 2; 98 | param DetectNode[2,11,45]:= 2; 99 | param DetectNode[2,11,48]:= 2; 100 | param DetectNode[3,1,10]:= 3; 101 | param DetectNode[3,1,13]:= 3; 102 | param DetectNode[3,1,16]:= 3; 103 | param DetectNode[3,1,50]:= 3; 104 | param DetectNode[3,1,53]:= 3; 105 | param DetectNode[3,1,56]:= 3; 106 | param DetectNode[3,2,12]:= 3; 107 | param DetectNode[3,2,15]:= 3; 108 | param DetectNode[3,10,6]:= 3; 109 | param DetectNode[3,10,9]:= 3; 110 | param DetectNode[3,10,11]:= 3; 111 | param DetectNode[3,10,14]:= 3; 112 | param DetectNode[3,10,38]:= 3; 113 | param DetectNode[3,10,62]:= 3; 114 | param DetectNode[4,5,20]:= 4; 115 | param DetectNode[4,5,21]:= 4; 116 | param DetectNode[4,5,24]:= 4; 117 | param DetectNode[4,5,44]:= 4; 118 | param DetectNode[4,5,52]:= 4; 119 | param DetectNode[4,5,68]:= 4; 120 | param DetectNode[4,5,72]:= 4; 121 | param DetectNode[4,7,18]:= 4; 122 | param DetectNode[4,12,17]:= 4; 123 | param DetectNode[4,12,19]:= 4; 124 | param DetectNode[4,12,22]:= 4; 125 | param DetectNode[4,12,23]:= 4; 126 | param DetectNode[4,12,25]:= 4; 127 | param DetectNode[4,12,30]:= 4; 128 | param DetectNode[4,12,31]:= 4; 129 | param DetectNode[4,12,57]:= 4; 130 | param DetectNode[5,4,25]:= 5; 131 | param DetectNode[5,4,27]:= 5; 132 | param DetectNode[5,4,30]:= 5; 133 | param DetectNode[5,4,31]:= 5; 134 | param DetectNode[5,4,35]:= 5; 135 | param DetectNode[5,4,57]:= 5; 136 | param DetectNode[5,4,59]:= 5; 137 | param DetectNode[5,9,24]:= 5; 138 | param DetectNode[5,9,32]:= 5; 139 | param DetectNode[5,9,72]:= 5; 140 | param DetectNode[5,11,21]:= 5; 141 | param DetectNode[5,11,26]:= 5; 142 | param DetectNode[5,11,28]:= 5; 143 | param DetectNode[5,11,29]:= 5; 144 | param DetectNode[6,1,34]:= 6; 145 | param DetectNode[6,1,37]:= 6; 146 | param DetectNode[6,1,38]:= 6; 147 | param DetectNode[6,1,58]:= 6; 148 | param DetectNode[6,1,62]:= 6; 149 | param DetectNode[6,1,64]:= 6; 150 | param DetectNode[6,9,8]:= 6; 151 | param DetectNode[6,9,16]:= 6; 152 | param DetectNode[6,9,40]:= 6; 153 | param DetectNode[6,9,48]:= 6; 154 | param DetectNode[6,9,56]:= 6; 155 | param DetectNode[6,11,33]:= 6; 156 | param DetectNode[6,11,35]:= 6; 157 | param DetectNode[6,11,36]:= 6; 158 | param DetectNode[6,11,39]:= 6; 159 | param DetectNode[6,11,63]:= 6; 160 | param DetectNode[7,4,3]:= 7; 161 | param DetectNode[7,11,18]:= 7; 162 | param DetectNode[7,11,66]:= 7; 163 | param DetectNode[7,11,69]:= 7; 164 | param DetectNode[7,12,1]:= 7; 165 | param DetectNode[7,12,33]:= 7; 166 | param DetectNode[8,2,42]:= 8; 167 | param DetectNode[8,2,45]:= 8; 168 | param DetectNode[8,2,47]:= 8; 169 | param DetectNode[8,2,48]:= 8; 170 | param DetectNode[8,10,19]:= 8; 171 | param DetectNode[8,10,22]:= 8; 172 | param DetectNode[8,10,30]:= 8; 173 | param DetectNode[8,10,46]:= 8; 174 | param DetectNode[8,10,65]:= 8; 175 | param DetectNode[8,10,70]:= 8; 176 | param DetectNode[8,12,9]:= 8; 177 | param DetectNode[8,12,11]:= 8; 178 | param DetectNode[8,12,41]:= 8; 179 | param DetectNode[8,12,43]:= 8; 180 | param DetectNode[8,12,44]:= 8; 181 | param DetectNode[8,12,49]:= 8; 182 | param DetectNode[8,12,51]:= 8; 183 | param DetectNode[8,12,52]:= 8; 184 | param DetectNode[9,5,57]:= 9; 185 | param DetectNode[9,5,59]:= 9; 186 | param DetectNode[9,5,60]:= 9; 187 | param DetectNode[9,6,58]:= 9; 188 | param DetectNode[9,6,61]:= 9; 189 | param DetectNode[9,6,62]:= 9; 190 | param DetectNode[9,6,63]:= 9; 191 | param DetectNode[9,6,64]:= 9; 192 | param DetectNode[10,3,19]:= 10; 193 | param DetectNode[10,3,50]:= 10; 194 | param DetectNode[10,3,53]:= 10; 195 | param DetectNode[10,3,54]:= 10; 196 | param DetectNode[10,3,56]:= 10; 197 | param DetectNode[10,3,65]:= 10; 198 | param DetectNode[10,8,9]:= 10; 199 | param DetectNode[10,8,11]:= 10; 200 | param DetectNode[10,8,49]:= 10; 201 | param DetectNode[10,8,51]:= 10; 202 | param DetectNode[10,8,52]:= 10; 203 | param DetectNode[10,8,55]:= 10; 204 | param DetectNode[11,1,18]:= 11; 205 | param DetectNode[11,1,26]:= 11; 206 | param DetectNode[11,1,42]:= 11; 207 | param DetectNode[11,1,66]:= 11; 208 | param DetectNode[11,2,7]:= 11; 209 | param DetectNode[11,2,28]:= 11; 210 | param DetectNode[11,2,39]:= 11; 211 | param DetectNode[11,2,63]:= 11; 212 | param DetectNode[11,5,4]:= 11; 213 | param DetectNode[11,5,12]:= 11; 214 | param DetectNode[11,5,35]:= 11; 215 | param DetectNode[11,5,36]:= 11; 216 | param DetectNode[11,6,21]:= 11; 217 | param DetectNode[11,6,29]:= 11; 218 | param DetectNode[11,6,45]:= 11; 219 | param DetectNode[11,6,48]:= 11; 220 | param DetectNode[11,6,69]:= 11; 221 | param DetectNode[11,7,1]:= 11; 222 | param DetectNode[11,7,3]:= 11; 223 | param DetectNode[11,7,33]:= 11; 224 | param DetectNode[12,4,11]:= 12; 225 | param DetectNode[12,4,43]:= 12; 226 | param DetectNode[12,4,44]:= 12; 227 | param DetectNode[12,4,51]:= 12; 228 | param DetectNode[12,4,52]:= 12; 229 | param DetectNode[12,4,67]:= 12; 230 | param DetectNode[12,4,68]:= 12; 231 | param DetectNode[12,4,72]:= 12; 232 | param DetectNode[12,7,66]:= 12; 233 | param DetectNode[12,7,69]:= 12; 234 | param DetectNode[12,8,19]:= 12; 235 | param DetectNode[12,8,22]:= 12; 236 | param DetectNode[12,8,23]:= 12; 237 | param DetectNode[12,8,30]:= 12; 238 | param DetectNode[12,8,31]:= 12; 239 | param DetectNode[12,8,65]:= 12; 240 | param DetectNode[12,8,70]:= 12; 241 | param DetectNode[12,8,71]:= 12; 242 | set DetourPath[1,3,2] := 1 11 2 3; 243 | set DetourPath[1,3,6] := 1 11 2 8 10; 244 | set DetourPath[1,3,37] := 1 11 2 3; 245 | set DetourPath[1,3,38] := 1 11 2 8 10; 246 | set DetourPath[1,3,62] := 1 11 2 8 10; 247 | set DetourPath[1,3,64] := 1 11 2 3; 248 | set DetourPath[1,6,5] := 1 11 6; 249 | set DetourPath[1,6,8] := 1 11 5 9; 250 | set DetourPath[1,6,13] := 1 11 6; 251 | set DetourPath[1,6,16] := 1 11 5 9; 252 | set DetourPath[1,6,53] := 1 11 6; 253 | set DetourPath[1,6,56] := 1 11 5 9; 254 | set DetourPath[1,11,1] := 1 3 2 8 12; 255 | set DetourPath[1,11,3] := 1 6 9 5 4; 256 | set DetourPath[1,11,4] := 1 6 9 5; 257 | set DetourPath[1,11,7] := 1 3 2; 258 | set DetourPath[2,3,28] := 11 1 3; 259 | set DetourPath[2,3,47] := 8 10 3; 260 | set DetourPath[2,8,7] := 2 3 10 8; 261 | set DetourPath[2,8,15] := 3 10 8; 262 | set DetourPath[2,8,39] := 11 7 12 8; 263 | set DetourPath[2,8,63] := 11 7 12 8; 264 | set DetourPath[2,11,12] := 3 1 6 9 5; 265 | set DetourPath[2,11,42] := 2 3 1; 266 | set DetourPath[2,11,45] := 2 3 1 6; 267 | set DetourPath[2,11,48] := 2 3 1 6; 268 | set DetourPath[3,1,10] := 3 2 11 1; 269 | set DetourPath[3,1,13] := 3 2 11 6; 270 | set DetourPath[3,1,16] := 3 2 11 6; 271 | set DetourPath[3,1,50] := 3 2 11 1; 272 | set DetourPath[3,1,53] := 3 2 11 6; 273 | set DetourPath[3,1,56] := 3 2 11 6; 274 | set DetourPath[3,2,12] := 3 1 11; 275 | set DetourPath[3,2,15] := 3 10 8; 276 | set DetourPath[3,10,6] := 3 2 8 10; 277 | set DetourPath[3,10,9] := 3 2 8; 278 | set DetourPath[3,10,11] := 3 2 8; 279 | set DetourPath[3,10,14] := 3 2 8 10; 280 | set DetourPath[3,10,38] := 3 2 8 10; 281 | set DetourPath[3,10,62] := 3 2 8 10; 282 | set DetourPath[4,5,20] := 4 7 11 5; 283 | set DetourPath[4,5,21] := 4 7 11; 284 | set DetourPath[4,5,24] := 4 7 11 6 9; 285 | set DetourPath[4,5,44] := 4 7 11 5; 286 | set DetourPath[4,5,52] := 4 7 11 5; 287 | set DetourPath[4,5,68] := 4 7 11 5; 288 | set DetourPath[4,5,72] := 4 7 11 6 9; 289 | set DetourPath[4,7,18] := 4 5 11; 290 | set DetourPath[4,12,17] := 4 7 12; 291 | set DetourPath[4,12,19] := 4 5 11 2 3; 292 | set DetourPath[4,12,22] := 4 5 11 2 8; 293 | set DetourPath[4,12,23] := 4 5 11 2 8; 294 | set DetourPath[4,12,25] := 4 7 12; 295 | set DetourPath[4,12,30] := 5 11 2 8; 296 | set DetourPath[4,12,31] := 5 11 2 8; 297 | set DetourPath[4,12,57] := 4 7 12; 298 | set DetourPath[5,4,25] := 5 11 7 12; 299 | set DetourPath[5,4,27] := 5 11 7 4; 300 | set DetourPath[5,4,30] := 5 11 2 8; 301 | set DetourPath[5,4,31] := 5 11 2 8; 302 | set DetourPath[5,4,35] := 11 7 4; 303 | set DetourPath[5,4,57] := 5 11 7 12; 304 | set DetourPath[5,4,59] := 5 11 7 4; 305 | set DetourPath[5,9,24] := 5 11 6 9; 306 | set DetourPath[5,9,32] := 5 11 6 9; 307 | set DetourPath[5,9,72] := 5 11 6 9; 308 | set DetourPath[5,11,21] := 5 9 6; 309 | set DetourPath[5,11,26] := 5 9 6 1; 310 | set DetourPath[5,11,28] := 5 9 6 1 3; 311 | set DetourPath[5,11,29] := 5 9 6; 312 | set DetourPath[6,1,34] := 6 11 1; 313 | set DetourPath[6,1,37] := 6 11 2 3; 314 | set DetourPath[6,1,38] := 6 11 2 3; 315 | set DetourPath[6,1,58] := 6 11 1; 316 | set DetourPath[6,1,62] := 6 11 2 3; 317 | set DetourPath[6,1,64] := 6 11 2 3; 318 | set DetourPath[6,9,8] := 6 11 5 9; 319 | set DetourPath[6,9,16] := 6 11 5 9; 320 | set DetourPath[6,9,40] := 6 11 5 9; 321 | set DetourPath[6,9,48] := 11 5 9; 322 | set DetourPath[6,9,56] := 6 11 5 9; 323 | set DetourPath[6,11,33] := 6 9 5 4 12; 324 | set DetourPath[6,11,35] := 6 9 5; 325 | set DetourPath[6,11,36] := 6 9 5; 326 | set DetourPath[6,11,39] := 6 1 3 2; 327 | set DetourPath[6,11,63] := 6 1 3 2; 328 | set DetourPath[7,4,3] := 11 5 4; 329 | set DetourPath[7,11,18] := 4 5 9 6 1; 330 | set DetourPath[7,11,66] := 12 8 2 3 1; 331 | set DetourPath[7,11,69] := 7 4 5 9 6; 332 | set DetourPath[7,12,1] := 7 4 12; 333 | set DetourPath[7,12,33] := 7 4 12; 334 | set DetourPath[8,2,42] := 8 10 3 1; 335 | set DetourPath[8,2,45] := 8 12 7 11; 336 | set DetourPath[8,2,47] := 8 10 3; 337 | set DetourPath[8,2,48] := 8 12 7 11; 338 | set DetourPath[8,10,19] := 8 2 3; 339 | set DetourPath[8,10,22] := 8 2 3 10; 340 | set DetourPath[8,10,30] := 8 2 3 10; 341 | set DetourPath[8,10,46] := 8 2 3 10; 342 | set DetourPath[8,10,65] := 8 2 3; 343 | set DetourPath[8,10,70] := 8 2 3 10; 344 | set DetourPath[8,12,9] := 8 2 11 7 12; 345 | set DetourPath[8,12,11] := 8 2 11 7 4; 346 | set DetourPath[8,12,41] := 8 2 11 7 12; 347 | set DetourPath[8,12,43] := 8 2 11 7 4; 348 | set DetourPath[8,12,44] := 8 2 11 5; 349 | set DetourPath[8,12,49] := 8 2 11 7 12; 350 | set DetourPath[8,12,51] := 8 2 11 7 4; 351 | set DetourPath[8,12,52] := 8 2 11 5; 352 | set DetourPath[9,5,57] := 9 6 11 7 12; 353 | set DetourPath[9,5,59] := 9 6 11 7 4; 354 | set DetourPath[9,5,60] := 9 6 11 5; 355 | set DetourPath[9,6,58] := 9 5 11 1; 356 | set DetourPath[9,6,61] := 9 5 11 6; 357 | set DetourPath[9,6,62] := 9 5 11 1; 358 | set DetourPath[9,6,63] := 9 5 11; 359 | set DetourPath[9,6,64] := 9 5 11 1; 360 | set DetourPath[10,3,19] := 8 2 3; 361 | set DetourPath[10,3,50] := 10 8 2 11 1; 362 | set DetourPath[10,3,53] := 10 8 2 11 6; 363 | set DetourPath[10,3,54] := 10 8 2 3; 364 | set DetourPath[10,3,56] := 10 8 2 11 6; 365 | set DetourPath[10,3,65] := 8 2 3; 366 | set DetourPath[10,8,9] := 3 2 11 7 12; 367 | set DetourPath[10,8,11] := 3 2 11 7 4; 368 | set DetourPath[10,8,49] := 10 3 2 11 7 12; 369 | set DetourPath[10,8,51] := 10 3 2 11 7 4; 370 | set DetourPath[10,8,52] := 10 3 2 11 5; 371 | set DetourPath[10,8,55] := 10 3 2 8; 372 | set DetourPath[11,1,18] := 11 6 1; 373 | set DetourPath[11,1,26] := 11 6 1; 374 | set DetourPath[11,1,42] := 2 3 1; 375 | set DetourPath[11,1,66] := 11 6 1; 376 | set DetourPath[11,2,7] := 1 3 10 8; 377 | set DetourPath[11,2,28] := 11 1 3; 378 | set DetourPath[11,2,39] := 11 7 12 8; 379 | set DetourPath[11,2,63] := 11 7 12 8; 380 | set DetourPath[11,5,4] := 11 6 9 5; 381 | set DetourPath[11,5,12] := 11 6 9 5; 382 | set DetourPath[11,5,35] := 11 7 4; 383 | set DetourPath[11,5,36] := 6 9 5; 384 | set DetourPath[11,6,21] := 5 9 6; 385 | set DetourPath[11,6,29] := 5 9 6; 386 | set DetourPath[11,6,45] := 11 1 6; 387 | set DetourPath[11,6,48] := 11 5 9; 388 | set DetourPath[11,6,69] := 11 1 6; 389 | set DetourPath[11,7,1] := 11 2 8 12; 390 | set DetourPath[11,7,3] := 11 5 4; 391 | set DetourPath[11,7,33] := 11 5 4 12; 392 | set DetourPath[12,4,11] := 12 7 4; 393 | set DetourPath[12,4,43] := 12 7 4; 394 | set DetourPath[12,4,44] := 8 2 11 5; 395 | set DetourPath[12,4,51] := 12 7 4; 396 | set DetourPath[12,4,52] := 8 2 11 5; 397 | set DetourPath[12,4,67] := 12 7 4; 398 | set DetourPath[12,4,68] := 12 7 11 5; 399 | set DetourPath[12,4,72] := 12 7 11 5; 400 | set DetourPath[12,7,66] := 12 8 2 11; 401 | set DetourPath[12,7,69] := 12 4 5 11; 402 | set DetourPath[12,8,19] := 4 5 11 2 3; 403 | set DetourPath[12,8,22] := 4 5 11 2 3 10; 404 | set DetourPath[12,8,23] := 4 5 11 2 8; 405 | set DetourPath[12,8,30] := 5 11 2 3 10; 406 | set DetourPath[12,8,31] := 5 11 2 8; 407 | set DetourPath[12,8,65] := 12 7 11 2 3; 408 | set DetourPath[12,8,70] := 12 7 11 2 3 10; 409 | set DetourPath[12,8,71] := 12 7 11 2 8; 410 | -------------------------------------------------------------------------------- /results/fig7/fig7_ryu_app.py: -------------------------------------------------------------------------------- 1 | from ryu.base import app_manager 2 | from ryu.controller import ofp_event 3 | from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER, HANDSHAKE_DISPATCHER 4 | from ryu.controller.handler import set_ev_cls 5 | import ryu.ofproto.ofproto_v1_3 as ofproto 6 | import ryu.ofproto.ofproto_v1_3_parser as ofparser 7 | import ryu.ofproto.openstate_v1_0 as osproto 8 | import ryu.ofproto.openstate_v1_0_parser as osparser 9 | from ryu.lib.packet import packet 10 | from ryu.topology import event 11 | from pprint import pprint 12 | import logging 13 | from sets import Set 14 | import time 15 | import sys,os 16 | sys.path.append(os.path.abspath("/home/mininet/spider/src")) 17 | import SPIDER_parser as f_t_parser 18 | from ryu.lib import hub 19 | from datetime import datetime 20 | from time import sleep 21 | import random 22 | 23 | def hping3_inter_str(interarrival): 24 | if interarrival<1: 25 | return 'u'+str(int(interarrival*1000000)) 26 | else: 27 | return str(int(interarrival)) 28 | 29 | class OpenStateFaultTolerance(app_manager.RyuApp): 30 | OFP_VERSIONS = [ofproto.OFP_VERSION] 31 | 32 | def __init__(self, *args, **kwargs): 33 | super(OpenStateFaultTolerance, self).__init__(*args, **kwargs) 34 | 35 | DELTA_6_VALUES = eval(os.environ['DELTA_6_VALUES']) # ugly and dangerous! 36 | delta_7 = float(os.environ['delta_7']) 37 | delta_5 = float(os.environ['delta_5']) 38 | f_t_parser.detection_timeouts_list = [(x,delta_7,delta_5) for x in DELTA_6_VALUES] 39 | 40 | results_hash = f_t_parser.md5sum_results() 41 | if f_t_parser.network_has_changed(results_hash): 42 | f_t_parser.erase_figs_folder() 43 | 44 | (self.requests,self.faults) = f_t_parser.parse_ampl_results_if_not_cached() 45 | 46 | print len(self.requests), 'requests loaded' 47 | print len(self.faults), 'faults loaded' 48 | 49 | print "Building network graph from network.xml..." 50 | # G is a NetworkX Graph object 51 | (self.G, self.pos, self.hosts, self.switches, self.mapping) = f_t_parser.parse_network_xml() 52 | print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts' 53 | 54 | print "NetworkX to Mininet topology conversion..." 55 | # mn_topo is a Mininet Topo object 56 | self.mn_topo = f_t_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping) 57 | # mn_net is a Mininet object 58 | self.mn_net = f_t_parser.create_mininet_net(self.mn_topo) 59 | 60 | f_t_parser.launch_mininet(self.mn_net) 61 | 62 | self.ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports) 63 | 64 | f_t_parser.mn_setup_MAC_and_IP(self.mn_net) 65 | 66 | f_t_parser.mn_setup_static_ARP_entries(self.mn_net) 67 | 68 | f_t_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts) 69 | 70 | (self.fault_ID, self.flow_entries_dict, self.flow_entries_with_timeout_dict, self.flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False) 71 | 72 | # Associates dp_id to datapath object 73 | self.dp_dictionary=dict() 74 | # Associates dp_id to a dict associating port<->MAC address 75 | self.ports_mac_dict=dict() 76 | 77 | # Needed by fault_tolerance_rest 78 | self.f_t_parser = f_t_parser 79 | 80 | # switch counter 81 | self.switch_count = 0 82 | 83 | @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) 84 | def switch_features_handler(self, ev): 85 | msg = ev.msg 86 | datapath = msg.datapath 87 | 88 | self.ports_mac_dict[datapath.id] = dict() 89 | self.send_features_request(datapath) 90 | self.send_port_desc_stats_request(datapath) 91 | 92 | self.configure_stateful_stages(datapath) 93 | self.install_flows(datapath) 94 | 95 | self.dp_dictionary[datapath.id] = datapath 96 | 97 | def install_flows(self,datapath): 98 | print("Configuring flow table for switch %d" % datapath.id) 99 | 100 | if datapath.id in self.flow_entries_dict.keys(): 101 | for table_id in self.flow_entries_dict[datapath.id]: 102 | for match in self.flow_entries_dict[datapath.id][table_id]: 103 | mod = ofparser.OFPFlowMod( 104 | datapath=datapath, cookie=0, cookie_mask=0, table_id=table_id, 105 | command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, 106 | priority=self.flow_entries_dict[datapath.id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 107 | out_port=ofproto.OFPP_ANY, 108 | out_group=ofproto.OFPG_ANY, 109 | flags=0, match=match, instructions=self.flow_entries_dict[datapath.id][table_id][match]['inst']) 110 | datapath.send_msg(mod) 111 | 112 | self.switch_count += 1 113 | if self.switch_count == self.G.number_of_nodes(): 114 | self.monitor_thread = hub.spawn(self._monitor,datapath) 115 | 116 | def send_features_request(self, datapath): 117 | req = ofparser.OFPFeaturesRequest(datapath) 118 | datapath.send_msg(req) 119 | 120 | def configure_stateful_stages(self, datapath): 121 | node_dict = f_t_parser.create_node_dict(self.ports_dict,self.requests) 122 | 123 | self.send_table_mod(datapath, table_id=2) 124 | self.send_key_lookup(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST]) 125 | self.send_key_update(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST]) 126 | 127 | self.send_table_mod(datapath, table_id=3) 128 | self.send_key_lookup(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA]) 129 | self.send_key_update(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA]) 130 | 131 | def configure_global_states(self, datapath): 132 | for port in self.ports_mac_dict[datapath.id]: 133 | if port!=ofproto.OFPP_LOCAL: 134 | (global_state, global_state_mask) = osparser.masked_global_state_from_str("1",port-1) 135 | msg = osparser.OFPExpSetGlobalState(datapath=datapath, global_state=global_state, global_state_mask=global_state_mask) 136 | datapath.send_msg(msg) 137 | 138 | def send_table_mod(self, datapath, table_id, stateful=1): 139 | req = osparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=table_id, stateful=stateful) 140 | datapath.send_msg(req) 141 | 142 | def send_key_lookup(self, datapath, table_id, fields): 143 | key_lookup_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=fields, table_id=table_id) 144 | datapath.send_msg(key_lookup_extractor) 145 | 146 | def send_key_update(self, datapath, table_id, fields): 147 | key_update_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=fields, table_id=table_id) 148 | datapath.send_msg(key_update_extractor) 149 | 150 | def set_link_down(self,node1,node2): 151 | if(node1 > node2): 152 | node1,node2 = node2,node1 153 | 154 | os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' down') 155 | os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' down') 156 | 157 | def set_link_up(self,node1,node2): 158 | if(node1 > node2): 159 | node1,node2 = node2,node1 160 | 161 | os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' up') 162 | os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' up') 163 | 164 | def send_port_desc_stats_request(self, datapath): 165 | req = ofparser.OFPPortDescStatsRequest(datapath, 0) 166 | datapath.send_msg(req) 167 | 168 | @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) 169 | def port_desc_stats_reply_handler(self, ev): 170 | # store the association port<->MAC address 171 | for p in ev.msg.body: 172 | self.ports_mac_dict[ev.msg.datapath.id][p.port_no]=p.hw_addr 173 | 174 | self.configure_global_states(ev.msg.datapath) 175 | 176 | @set_ev_cls(ofp_event.EventOFPExperimenterStatsReply, MAIN_DISPATCHER) 177 | def state_stats_reply_handler(self, ev): 178 | msg = ev.msg 179 | dp = msg.datapath 180 | 181 | if ev.msg.body.exp_type==0: 182 | # EXP_STATE_STATS 183 | stats = osparser.OFPStateStats.parser(ev.msg.body.data, offset=0) 184 | for stat in stats: 185 | if stat.entry.key != []: 186 | msg = osparser.OFPExpMsgSetFlowState( 187 | datapath=dp, state=0, keys=stat.entry.key, table_id=stat.table_id) 188 | dp.send_msg(msg) 189 | elif ev.msg.body.exp_type==1: 190 | stat = osparser.OFPGlobalStateStats.parser(ev.msg.body.data, offset=0) 191 | msg = osparser.OFPExpResetGlobalState(datapath=dp) 192 | dp.send_msg(msg) 193 | self.configure_global_states(dp) 194 | 195 | def timeout_probe(self,timeout): 196 | f_t_parser.selected_timeout = timeout 197 | 198 | for datapath_id in self.flow_entries_with_timeout_dict[timeout]: 199 | for table_id in self.flow_entries_with_timeout_dict[timeout][datapath_id]: 200 | for match in self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id]: 201 | mod = ofparser.OFPFlowMod( 202 | datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id, 203 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 204 | priority=self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 205 | out_port=ofproto.OFPP_ANY, 206 | out_group=ofproto.OFPG_ANY, 207 | flags=0, match=match, instructions=self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id][match]['inst']) 208 | self.dp_dictionary[datapath_id].send_msg(mod) 209 | 210 | def timeout_burst(self,burst): 211 | f_t_parser.selected_burst = burst 212 | 213 | for datapath_id in self.flow_entries_with_burst_dict[burst]: 214 | for table_id in self.flow_entries_with_burst_dict[burst][datapath_id]: 215 | for match in self.flow_entries_with_burst_dict[burst][datapath_id][table_id]: 216 | mod = ofparser.OFPFlowMod( 217 | datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id, 218 | command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0, 219 | priority=self.flow_entries_with_burst_dict[burst][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER, 220 | out_port=ofproto.OFPP_ANY, 221 | out_group=ofproto.OFPG_ANY, 222 | flags=0, match=match, instructions=self.flow_entries_with_burst_dict[burst][datapath_id][table_id][match]['inst']) 223 | self.dp_dictionary[datapath_id].send_msg(mod) 224 | 225 | def send_state_stats_request(self): 226 | for datapath_id in self.dp_dictionary: 227 | req = osparser.OFPExpStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id]) 228 | self.dp_dictionary[datapath_id].send_msg(req) 229 | 230 | def send_global_state_stats_request(self): 231 | for datapath_id in self.dp_dictionary: 232 | req = osparser.OFPExpGlobalStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id]) 233 | self.dp_dictionary[datapath_id].send_msg(req) 234 | 235 | def _monitor(self,datapath): 236 | hub.sleep(5) 237 | print("Network is ready") 238 | 239 | # Losses for the current realization as delta_6 varies 240 | # losses = {delta_6_a: losses_1 , delta_6_b: losses_2 , ...} 241 | losses = {} 242 | 243 | LINK_FAULT = (3,4) 244 | REQUEST = (1,6) 245 | 246 | # Gets realization parameters from environment variables 247 | INTERARRIVAL = float(os.environ['INTERARRIVAL']) 248 | LINK_DOWN = int(os.environ['LINK_DOWN']) 249 | LINK_UP = int(os.environ['LINK_UP']) 250 | PING_NUM = int(os.environ['PING_NUM']) 251 | 252 | print "\nTIMEOUTS LIST = "+str(f_t_parser.detection_timeouts_list)+"\n" 253 | 254 | for idx,detection_timeouts in enumerate(f_t_parser.detection_timeouts_list): 255 | # Reset flow states and global states 256 | self.send_state_stats_request() 257 | self.send_global_state_stats_request() 258 | 259 | # Configures detection timeouts 260 | print "\n"+str(100*(idx+1)/len(f_t_parser.detection_timeouts_list))+"% - Detection timeouts (d6,d7,d5) = "+str(detection_timeouts)+"\n" 261 | self.timeout_probe(detection_timeouts) 262 | hub.sleep(5) 263 | 264 | # To avoid syncronization we add a random sleep in [0,delta_6] 265 | # problem: hub.sleep() takes seconds, but we need to sleep in ms 266 | sleep_interval = random.uniform(0,detection_timeouts[0]) 267 | sleep_string = 'sleep %.3f' % round(sleep_interval,3) 268 | ping_file_name = "~/ping."+str(REQUEST[0])+"."+str(REQUEST[1])+".probe."+str(detection_timeouts[0])+".txt" 269 | cmd = 'sudo sh -c "'+sleep_string+';hping3 -1 -c '+str(PING_NUM)+' -i '+hping3_inter_str(INTERARRIVAL)+' '+self.mn_net['h'+str(REQUEST[1])].IP()+' > '+ping_file_name+' 2>&1 "' 270 | print('h'+str(REQUEST[0])+'# '+cmd) 271 | self.mn_net['h'+str(REQUEST[0])].cmd(cmd+' &') 272 | 273 | hub.sleep(LINK_DOWN) 274 | print("LINK DOWN "+str(LINK_FAULT)) 275 | self.set_link_down(LINK_FAULT[0],LINK_FAULT[1]) 276 | 277 | hub.sleep(LINK_UP) 278 | print("LINK UP "+str(LINK_FAULT)) 279 | self.set_link_up(LINK_FAULT[0],LINK_FAULT[1]) 280 | 281 | t=datetime.now().time() 282 | print('START: '+str(t.hour)+':'+str(t.minute)+':'+str(t.second)) 283 | # wait for ping stop 284 | while(1): 285 | hub.sleep(5) 286 | t=datetime.now().time() 287 | print(str(t.hour)+':'+str(t.minute)+':'+str(t.second)) 288 | if len(os.popen("pidof hping3").read()) == 0: 289 | break 290 | hub.sleep(1) 291 | 292 | rx=os.popen("cat "+ping_file_name+" | grep transmitted | awk '{print $4}'").read() # received packets 293 | tx=os.popen("cat "+ping_file_name+" | grep transmitted | awk '{print $1}'").read() # transmitted packets 294 | 295 | try: 296 | losses[detection_timeouts[0]] = int(tx)-int(rx) 297 | except ValueError: 298 | losses[detection_timeouts[0]] = 0 299 | print 'losses =',losses 300 | 301 | print 302 | pprint(losses) 303 | out_file = open("SPIDER_results.txt","w+") 304 | out_file.write(str(losses)) 305 | out_file.close() 306 | 307 | os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null") 308 | os.system('sudo mn -c 2> /dev/null') 309 | -------------------------------------------------------------------------------- /src/results.txt.fat_tree: -------------------------------------------------------------------------------- 1 | set PrimaryPath[1] := 6 4 7; 2 | set PrimaryPath[2] := 6 4 20 8 10; 3 | set PrimaryPath[3] := 6 4 20 8 11; 4 | set PrimaryPath[4] := 6 5 3 13 14; 5 | set PrimaryPath[5] := 6 4 20 12 15; 6 | set PrimaryPath[6] := 6 4 20 16 18; 7 | set PrimaryPath[7] := 6 4 20 16 19; 8 | set PrimaryPath[8] := 7 5 6; 9 | set PrimaryPath[9] := 7 5 3 9 10; 10 | set PrimaryPath[10] := 7 4 20 8 11; 11 | set PrimaryPath[11] := 7 5 3 13 14; 12 | set PrimaryPath[12] := 7 5 2 13 15; 13 | set PrimaryPath[13] := 7 5 2 17 18; 14 | set PrimaryPath[14] := 7 5 3 17 19; 15 | set PrimaryPath[15] := 10 8 1 4 6; 16 | set PrimaryPath[16] := 10 9 2 5 7; 17 | set PrimaryPath[17] := 10 9 11; 18 | set PrimaryPath[18] := 10 9 3 13 14; 19 | set PrimaryPath[19] := 10 8 1 12 15; 20 | set PrimaryPath[20] := 10 9 2 17 18; 21 | set PrimaryPath[21] := 10 8 1 16 19; 22 | set PrimaryPath[22] := 11 8 1 4 6; 23 | set PrimaryPath[23] := 11 9 2 5 7; 24 | set PrimaryPath[24] := 11 9 10; 25 | set PrimaryPath[25] := 11 8 20 12 14; 26 | set PrimaryPath[26] := 11 8 20 12 15; 27 | set PrimaryPath[27] := 11 9 3 17 18; 28 | set PrimaryPath[28] := 11 8 1 16 19; 29 | set PrimaryPath[29] := 14 12 20 4 6; 30 | set PrimaryPath[30] := 14 13 3 5 7; 31 | set PrimaryPath[31] := 14 12 20 8 10; 32 | set PrimaryPath[32] := 14 12 20 8 11; 33 | set PrimaryPath[33] := 14 13 15; 34 | set PrimaryPath[34] := 14 13 3 17 18; 35 | set PrimaryPath[35] := 14 12 1 16 19; 36 | set PrimaryPath[36] := 15 12 20 4 6; 37 | set PrimaryPath[37] := 15 12 20 4 7; 38 | set PrimaryPath[38] := 15 12 20 8 10; 39 | set PrimaryPath[39] := 15 12 20 8 11; 40 | set PrimaryPath[40] := 15 13 14; 41 | set PrimaryPath[41] := 15 12 20 16 18; 42 | set PrimaryPath[42] := 15 12 1 16 19; 43 | set PrimaryPath[43] := 18 16 20 4 6; 44 | set PrimaryPath[44] := 18 17 2 5 7; 45 | set PrimaryPath[45] := 18 17 3 9 10; 46 | set PrimaryPath[46] := 18 16 20 8 11; 47 | set PrimaryPath[47] := 18 16 20 12 14; 48 | set PrimaryPath[48] := 18 16 20 12 15; 49 | set PrimaryPath[49] := 18 16 19; 50 | set PrimaryPath[50] := 19 17 3 5 6; 51 | set PrimaryPath[51] := 19 17 3 5 7; 52 | set PrimaryPath[52] := 19 16 1 8 10; 53 | set PrimaryPath[53] := 19 17 3 9 11; 54 | set PrimaryPath[54] := 19 16 1 12 14; 55 | set PrimaryPath[55] := 19 16 1 12 15; 56 | set PrimaryPath[56] := 19 16 18; 57 | 58 | param DetectNode[1,4,15]:= 1; 59 | param DetectNode[1,4,22]:= 1; 60 | param DetectNode[1,8,52]:= 1; 61 | param DetectNode[1,12,19]:= 1; 62 | param DetectNode[1,12,54]:= 1; 63 | param DetectNode[1,12,55]:= 1; 64 | param DetectNode[1,16,21]:= 1; 65 | param DetectNode[1,16,28]:= 1; 66 | param DetectNode[1,16,35]:= 1; 67 | param DetectNode[1,16,42]:= 1; 68 | param DetectNode[2,5,16]:= 2; 69 | param DetectNode[2,5,23]:= 2; 70 | param DetectNode[2,5,44]:= 2; 71 | param DetectNode[2,13,12]:= 2; 72 | param DetectNode[2,17,13]:= 2; 73 | param DetectNode[2,17,20]:= 2; 74 | param DetectNode[3,5,30]:= 3; 75 | param DetectNode[3,5,50]:= 3; 76 | param DetectNode[3,5,51]:= 3; 77 | param DetectNode[3,9,9]:= 3; 78 | param DetectNode[3,9,45]:= 3; 79 | param DetectNode[3,9,53]:= 3; 80 | param DetectNode[3,13,4]:= 3; 81 | param DetectNode[3,13,11]:= 3; 82 | param DetectNode[3,13,18]:= 3; 83 | param DetectNode[3,17,14]:= 3; 84 | param DetectNode[3,17,27]:= 3; 85 | param DetectNode[3,17,34]:= 3; 86 | param DetectNode[4,6,15]:= 4; 87 | param DetectNode[4,6,22]:= 4; 88 | param DetectNode[4,6,29]:= 4; 89 | param DetectNode[4,6,36]:= 4; 90 | param DetectNode[4,6,43]:= 4; 91 | param DetectNode[4,7,1]:= 4; 92 | param DetectNode[4,7,37]:= 4; 93 | param DetectNode[4,20,2]:= 4; 94 | param DetectNode[4,20,3]:= 4; 95 | param DetectNode[4,20,5]:= 4; 96 | param DetectNode[4,20,6]:= 4; 97 | param DetectNode[4,20,7]:= 4; 98 | param DetectNode[4,20,10]:= 4; 99 | param DetectNode[5,2,12]:= 5; 100 | param DetectNode[5,2,13]:= 5; 101 | param DetectNode[5,3,4]:= 5; 102 | param DetectNode[5,3,9]:= 5; 103 | param DetectNode[5,3,11]:= 5; 104 | param DetectNode[5,3,14]:= 5; 105 | param DetectNode[5,6,8]:= 5; 106 | param DetectNode[5,6,50]:= 5; 107 | param DetectNode[5,7,16]:= 5; 108 | param DetectNode[5,7,23]:= 5; 109 | param DetectNode[5,7,30]:= 5; 110 | param DetectNode[5,7,44]:= 5; 111 | param DetectNode[5,7,51]:= 5; 112 | param DetectNode[6,4,1]:= 6; 113 | param DetectNode[6,4,2]:= 6; 114 | param DetectNode[6,4,3]:= 6; 115 | param DetectNode[6,4,5]:= 6; 116 | param DetectNode[6,4,6]:= 6; 117 | param DetectNode[6,4,7]:= 6; 118 | param DetectNode[6,5,4]:= 6; 119 | param DetectNode[7,4,10]:= 7; 120 | param DetectNode[7,5,8]:= 7; 121 | param DetectNode[7,5,9]:= 7; 122 | param DetectNode[7,5,11]:= 7; 123 | param DetectNode[7,5,12]:= 7; 124 | param DetectNode[7,5,13]:= 7; 125 | param DetectNode[7,5,14]:= 7; 126 | param DetectNode[8,1,15]:= 8; 127 | param DetectNode[8,1,19]:= 8; 128 | param DetectNode[8,1,21]:= 8; 129 | param DetectNode[8,1,22]:= 8; 130 | param DetectNode[8,1,28]:= 8; 131 | param DetectNode[8,10,2]:= 8; 132 | param DetectNode[8,10,31]:= 8; 133 | param DetectNode[8,10,38]:= 8; 134 | param DetectNode[8,10,52]:= 8; 135 | param DetectNode[8,11,3]:= 8; 136 | param DetectNode[8,11,10]:= 8; 137 | param DetectNode[8,11,32]:= 8; 138 | param DetectNode[8,11,39]:= 8; 139 | param DetectNode[8,11,46]:= 8; 140 | param DetectNode[8,20,25]:= 8; 141 | param DetectNode[8,20,26]:= 8; 142 | param DetectNode[9,2,16]:= 9; 143 | param DetectNode[9,2,20]:= 9; 144 | param DetectNode[9,2,23]:= 9; 145 | param DetectNode[9,3,18]:= 9; 146 | param DetectNode[9,3,27]:= 9; 147 | param DetectNode[9,10,9]:= 9; 148 | param DetectNode[9,10,24]:= 9; 149 | param DetectNode[9,10,45]:= 9; 150 | param DetectNode[9,11,17]:= 9; 151 | param DetectNode[9,11,53]:= 9; 152 | param DetectNode[10,8,15]:= 10; 153 | param DetectNode[10,8,19]:= 10; 154 | param DetectNode[10,8,21]:= 10; 155 | param DetectNode[10,9,16]:= 10; 156 | param DetectNode[10,9,17]:= 10; 157 | param DetectNode[10,9,18]:= 10; 158 | param DetectNode[10,9,20]:= 10; 159 | param DetectNode[11,8,22]:= 11; 160 | param DetectNode[11,8,25]:= 11; 161 | param DetectNode[11,8,26]:= 11; 162 | param DetectNode[11,8,28]:= 11; 163 | param DetectNode[11,9,23]:= 11; 164 | param DetectNode[11,9,24]:= 11; 165 | param DetectNode[11,9,27]:= 11; 166 | param DetectNode[12,1,35]:= 12; 167 | param DetectNode[12,1,42]:= 12; 168 | param DetectNode[12,14,25]:= 12; 169 | param DetectNode[12,14,47]:= 12; 170 | param DetectNode[12,14,54]:= 12; 171 | param DetectNode[12,15,5]:= 12; 172 | param DetectNode[12,15,19]:= 12; 173 | param DetectNode[12,15,26]:= 12; 174 | param DetectNode[12,15,48]:= 12; 175 | param DetectNode[12,15,55]:= 12; 176 | param DetectNode[12,20,29]:= 12; 177 | param DetectNode[12,20,31]:= 12; 178 | param DetectNode[12,20,32]:= 12; 179 | param DetectNode[12,20,36]:= 12; 180 | param DetectNode[12,20,37]:= 12; 181 | param DetectNode[12,20,38]:= 12; 182 | param DetectNode[12,20,39]:= 12; 183 | param DetectNode[12,20,41]:= 12; 184 | param DetectNode[13,3,30]:= 13; 185 | param DetectNode[13,3,34]:= 13; 186 | param DetectNode[13,14,4]:= 13; 187 | param DetectNode[13,14,11]:= 13; 188 | param DetectNode[13,14,18]:= 13; 189 | param DetectNode[13,14,40]:= 13; 190 | param DetectNode[13,15,12]:= 13; 191 | param DetectNode[13,15,33]:= 13; 192 | param DetectNode[14,12,29]:= 14; 193 | param DetectNode[14,12,31]:= 14; 194 | param DetectNode[14,12,32]:= 14; 195 | param DetectNode[14,12,35]:= 14; 196 | param DetectNode[14,13,30]:= 14; 197 | param DetectNode[14,13,33]:= 14; 198 | param DetectNode[14,13,34]:= 14; 199 | param DetectNode[15,12,36]:= 15; 200 | param DetectNode[15,12,37]:= 15; 201 | param DetectNode[15,12,38]:= 15; 202 | param DetectNode[15,12,39]:= 15; 203 | param DetectNode[15,12,41]:= 15; 204 | param DetectNode[15,12,42]:= 15; 205 | param DetectNode[15,13,40]:= 15; 206 | param DetectNode[16,1,52]:= 16; 207 | param DetectNode[16,1,54]:= 16; 208 | param DetectNode[16,1,55]:= 16; 209 | param DetectNode[16,18,6]:= 16; 210 | param DetectNode[16,18,41]:= 16; 211 | param DetectNode[16,18,56]:= 16; 212 | param DetectNode[16,19,7]:= 16; 213 | param DetectNode[16,19,21]:= 16; 214 | param DetectNode[16,19,28]:= 16; 215 | param DetectNode[16,19,35]:= 16; 216 | param DetectNode[16,19,42]:= 16; 217 | param DetectNode[16,19,49]:= 16; 218 | param DetectNode[16,20,43]:= 16; 219 | param DetectNode[16,20,46]:= 16; 220 | param DetectNode[16,20,47]:= 16; 221 | param DetectNode[16,20,48]:= 16; 222 | param DetectNode[17,2,44]:= 17; 223 | param DetectNode[17,3,45]:= 17; 224 | param DetectNode[17,3,50]:= 17; 225 | param DetectNode[17,3,51]:= 17; 226 | param DetectNode[17,3,53]:= 17; 227 | param DetectNode[17,18,13]:= 17; 228 | param DetectNode[17,18,20]:= 17; 229 | param DetectNode[17,18,27]:= 17; 230 | param DetectNode[17,18,34]:= 17; 231 | param DetectNode[17,19,14]:= 17; 232 | param DetectNode[18,16,43]:= 18; 233 | param DetectNode[18,16,46]:= 18; 234 | param DetectNode[18,16,47]:= 18; 235 | param DetectNode[18,16,48]:= 18; 236 | param DetectNode[18,16,49]:= 18; 237 | param DetectNode[18,17,44]:= 18; 238 | param DetectNode[18,17,45]:= 18; 239 | param DetectNode[19,16,52]:= 19; 240 | param DetectNode[19,16,54]:= 19; 241 | param DetectNode[19,16,55]:= 19; 242 | param DetectNode[19,16,56]:= 19; 243 | param DetectNode[19,17,50]:= 19; 244 | param DetectNode[19,17,51]:= 19; 245 | param DetectNode[19,17,53]:= 19; 246 | param DetectNode[20,4,29]:= 20; 247 | param DetectNode[20,4,36]:= 20; 248 | param DetectNode[20,4,37]:= 20; 249 | param DetectNode[20,4,43]:= 20; 250 | param DetectNode[20,8,2]:= 20; 251 | param DetectNode[20,8,3]:= 20; 252 | param DetectNode[20,8,10]:= 20; 253 | param DetectNode[20,8,31]:= 20; 254 | param DetectNode[20,8,32]:= 20; 255 | param DetectNode[20,8,38]:= 20; 256 | param DetectNode[20,8,39]:= 20; 257 | param DetectNode[20,8,46]:= 20; 258 | param DetectNode[20,12,5]:= 20; 259 | param DetectNode[20,12,25]:= 20; 260 | param DetectNode[20,12,26]:= 20; 261 | param DetectNode[20,12,47]:= 20; 262 | param DetectNode[20,12,48]:= 20; 263 | param DetectNode[20,16,6]:= 20; 264 | param DetectNode[20,16,7]:= 20; 265 | param DetectNode[20,16,41]:= 20; 266 | set DetourPath[1,4,15] := 10 9 2 5 6; 267 | set DetourPath[1,4,22] := 11 9 2 5 6; 268 | set DetourPath[1,8,52] := 19 17 3 9 10; 269 | set DetourPath[1,12,19] := 10 9 3 13 15; 270 | set DetourPath[1,12,54] := 19 17 2 13 14; 271 | set DetourPath[1,12,55] := 19 17 3 13 15; 272 | set DetourPath[1,16,21] := 10 9 3 17 19; 273 | set DetourPath[1,16,28] := 11 9 2 17 19; 274 | set DetourPath[1,16,35] := 14 13 3 17 19; 275 | set DetourPath[1,16,42] := 15 13 3 17 19; 276 | set DetourPath[2,5,16] := 10 8 1 4 7; 277 | set DetourPath[2,5,23] := 11 8 20 4 7; 278 | set DetourPath[2,5,44] := 18 16 1 4 7; 279 | set DetourPath[2,13,12] := 7 4 20 12 15; 280 | set DetourPath[2,17,13] := 7 4 20 16 18; 281 | set DetourPath[2,17,20] := 10 8 1 16 18; 282 | set DetourPath[3,5,30] := 14 12 1 4 7; 283 | set DetourPath[3,5,50] := 19 16 1 4 6; 284 | set DetourPath[3,5,51] := 19 16 1 4 7; 285 | set DetourPath[3,9,9] := 7 4 1 8 10; 286 | set DetourPath[3,9,45] := 18 16 20 8 10; 287 | set DetourPath[3,9,53] := 19 16 20 8 11; 288 | set DetourPath[3,13,4] := 6 4 1 12 14; 289 | set DetourPath[3,13,11] := 7 4 20 12 14; 290 | set DetourPath[3,13,18] := 10 8 1 12 14; 291 | set DetourPath[3,17,14] := 7 4 20 16 19; 292 | set DetourPath[3,17,27] := 11 8 1 16 18; 293 | set DetourPath[3,17,34] := 14 12 1 16 18; 294 | set DetourPath[4,6,15] := 10 9 2 5 6; 295 | set DetourPath[4,6,22] := 11 9 2 5 6; 296 | set DetourPath[4,6,29] := 14 13 2 5 6; 297 | set DetourPath[4,6,36] := 15 13 2 5 6; 298 | set DetourPath[4,6,43] := 18 17 2 5 6; 299 | set DetourPath[4,7,1] := 6 5 7; 300 | set DetourPath[4,7,37] := 15 13 3 5 7; 301 | set DetourPath[4,20,2] := 6 5 2 9 10; 302 | set DetourPath[4,20,3] := 6 5 2 9 11; 303 | set DetourPath[4,20,5] := 6 5 2 13 15; 304 | set DetourPath[4,20,6] := 6 5 2 17 18; 305 | set DetourPath[4,20,7] := 6 5 3 17 19; 306 | set DetourPath[4,20,10] := 7 5 2 9 11; 307 | set DetourPath[5,2,12] := 7 4 20 12 15; 308 | set DetourPath[5,2,13] := 7 4 20 16 18; 309 | set DetourPath[5,3,4] := 6 4 1 12 14; 310 | set DetourPath[5,3,9] := 7 4 1 8 10; 311 | set DetourPath[5,3,11] := 7 4 20 12 14; 312 | set DetourPath[5,3,14] := 7 4 20 16 19; 313 | set DetourPath[5,6,8] := 7 4 6; 314 | set DetourPath[5,6,50] := 19 16 1 4 6; 315 | set DetourPath[5,7,16] := 10 8 1 4 7; 316 | set DetourPath[5,7,23] := 11 8 20 4 7; 317 | set DetourPath[5,7,30] := 14 12 1 4 7; 318 | set DetourPath[5,7,44] := 18 16 1 4 7; 319 | set DetourPath[5,7,51] := 19 16 1 4 7; 320 | set DetourPath[6,4,1] := 6 5 7; 321 | set DetourPath[6,4,2] := 6 5 2 9 10; 322 | set DetourPath[6,4,3] := 6 5 2 9 11; 323 | set DetourPath[6,4,5] := 6 5 2 13 15; 324 | set DetourPath[6,4,6] := 6 5 2 17 18; 325 | set DetourPath[6,4,7] := 6 5 3 17 19; 326 | set DetourPath[6,5,4] := 6 4 1 12 14; 327 | set DetourPath[7,4,10] := 7 5 2 9 11; 328 | set DetourPath[7,5,8] := 7 4 6; 329 | set DetourPath[7,5,9] := 7 4 1 8 10; 330 | set DetourPath[7,5,11] := 7 4 20 12 14; 331 | set DetourPath[7,5,12] := 7 4 20 12 15; 332 | set DetourPath[7,5,13] := 7 4 20 16 18; 333 | set DetourPath[7,5,14] := 7 4 20 16 19; 334 | set DetourPath[8,1,15] := 10 9 2 5 6; 335 | set DetourPath[8,1,19] := 10 9 3 13 15; 336 | set DetourPath[8,1,21] := 10 9 3 17 19; 337 | set DetourPath[8,1,22] := 11 9 2 5 6; 338 | set DetourPath[8,1,28] := 11 9 2 17 19; 339 | set DetourPath[8,10,2] := 6 5 2 9 10; 340 | set DetourPath[8,10,31] := 14 13 2 9 10; 341 | set DetourPath[8,10,38] := 15 13 2 9 10; 342 | set DetourPath[8,10,52] := 19 17 3 9 10; 343 | set DetourPath[8,11,3] := 6 5 2 9 11; 344 | set DetourPath[8,11,10] := 7 5 2 9 11; 345 | set DetourPath[8,11,32] := 14 13 2 9 11; 346 | set DetourPath[8,11,39] := 15 13 3 9 11; 347 | set DetourPath[8,11,46] := 18 17 3 9 11; 348 | set DetourPath[8,20,25] := 11 9 2 13 14; 349 | set DetourPath[8,20,26] := 11 9 2 13 15; 350 | set DetourPath[9,2,16] := 10 8 1 4 7; 351 | set DetourPath[9,2,20] := 10 8 1 16 18; 352 | set DetourPath[9,2,23] := 11 8 20 4 7; 353 | set DetourPath[9,3,18] := 10 8 1 12 14; 354 | set DetourPath[9,3,27] := 11 8 1 16 18; 355 | set DetourPath[9,10,9] := 7 4 1 8 10; 356 | set DetourPath[9,10,24] := 11 8 10; 357 | set DetourPath[9,10,45] := 18 16 20 8 10; 358 | set DetourPath[9,11,17] := 10 8 11; 359 | set DetourPath[9,11,53] := 19 16 20 8 11; 360 | set DetourPath[10,8,15] := 10 9 2 5 6; 361 | set DetourPath[10,8,19] := 10 9 3 13 15; 362 | set DetourPath[10,8,21] := 10 9 3 17 19; 363 | set DetourPath[10,9,16] := 10 8 1 4 7; 364 | set DetourPath[10,9,17] := 10 8 11; 365 | set DetourPath[10,9,18] := 10 8 1 12 14; 366 | set DetourPath[10,9,20] := 10 8 1 16 18; 367 | set DetourPath[11,8,22] := 11 9 2 5 6; 368 | set DetourPath[11,8,25] := 11 9 2 13 14; 369 | set DetourPath[11,8,26] := 11 9 2 13 15; 370 | set DetourPath[11,8,28] := 11 9 2 17 19; 371 | set DetourPath[11,9,23] := 11 8 20 4 7; 372 | set DetourPath[11,9,24] := 11 8 10; 373 | set DetourPath[11,9,27] := 11 8 1 16 18; 374 | set DetourPath[12,1,35] := 14 13 3 17 19; 375 | set DetourPath[12,1,42] := 15 13 3 17 19; 376 | set DetourPath[12,14,25] := 11 9 2 13 14; 377 | set DetourPath[12,14,47] := 18 17 2 13 14; 378 | set DetourPath[12,14,54] := 19 17 2 13 14; 379 | set DetourPath[12,15,5] := 6 5 2 13 15; 380 | set DetourPath[12,15,19] := 10 9 3 13 15; 381 | set DetourPath[12,15,26] := 11 9 2 13 15; 382 | set DetourPath[12,15,48] := 18 17 2 13 15; 383 | set DetourPath[12,15,55] := 19 17 3 13 15; 384 | set DetourPath[12,20,29] := 14 13 2 5 6; 385 | set DetourPath[12,20,31] := 14 13 2 9 10; 386 | set DetourPath[12,20,32] := 14 13 2 9 11; 387 | set DetourPath[12,20,36] := 15 13 2 5 6; 388 | set DetourPath[12,20,37] := 15 13 3 5 7; 389 | set DetourPath[12,20,38] := 15 13 2 9 10; 390 | set DetourPath[12,20,39] := 15 13 3 9 11; 391 | set DetourPath[12,20,41] := 15 13 3 17 18; 392 | set DetourPath[13,3,30] := 14 12 1 4 7; 393 | set DetourPath[13,3,34] := 14 12 1 16 18; 394 | set DetourPath[13,14,4] := 6 4 1 12 14; 395 | set DetourPath[13,14,11] := 7 4 20 12 14; 396 | set DetourPath[13,14,18] := 10 8 1 12 14; 397 | set DetourPath[13,14,40] := 15 12 14; 398 | set DetourPath[13,15,12] := 7 4 20 12 15; 399 | set DetourPath[13,15,33] := 14 12 15; 400 | set DetourPath[14,12,29] := 14 13 2 5 6; 401 | set DetourPath[14,12,31] := 14 13 2 9 10; 402 | set DetourPath[14,12,32] := 14 13 2 9 11; 403 | set DetourPath[14,12,35] := 14 13 3 17 19; 404 | set DetourPath[14,13,30] := 14 12 1 4 7; 405 | set DetourPath[14,13,33] := 14 12 15; 406 | set DetourPath[14,13,34] := 14 12 1 16 18; 407 | set DetourPath[15,12,36] := 15 13 2 5 6; 408 | set DetourPath[15,12,37] := 15 13 3 5 7; 409 | set DetourPath[15,12,38] := 15 13 2 9 10; 410 | set DetourPath[15,12,39] := 15 13 3 9 11; 411 | set DetourPath[15,12,41] := 15 13 3 17 18; 412 | set DetourPath[15,12,42] := 15 13 3 17 19; 413 | set DetourPath[15,13,40] := 15 12 14; 414 | set DetourPath[16,1,52] := 19 17 3 9 10; 415 | set DetourPath[16,1,54] := 19 17 2 13 14; 416 | set DetourPath[16,1,55] := 19 17 3 13 15; 417 | set DetourPath[16,18,6] := 6 5 2 17 18; 418 | set DetourPath[16,18,41] := 15 13 3 17 18; 419 | set DetourPath[16,18,56] := 19 17 18; 420 | set DetourPath[16,19,7] := 6 5 3 17 19; 421 | set DetourPath[16,19,21] := 10 9 3 17 19; 422 | set DetourPath[16,19,28] := 11 9 2 17 19; 423 | set DetourPath[16,19,35] := 14 13 3 17 19; 424 | set DetourPath[16,19,42] := 15 13 3 17 19; 425 | set DetourPath[16,19,49] := 18 17 19; 426 | set DetourPath[16,20,43] := 18 17 2 5 6; 427 | set DetourPath[16,20,46] := 18 17 3 9 11; 428 | set DetourPath[16,20,47] := 18 17 2 13 14; 429 | set DetourPath[16,20,48] := 18 17 2 13 15; 430 | set DetourPath[17,2,44] := 18 16 1 4 7; 431 | set DetourPath[17,3,45] := 18 16 20 8 10; 432 | set DetourPath[17,3,50] := 19 16 1 4 6; 433 | set DetourPath[17,3,51] := 19 16 1 4 7; 434 | set DetourPath[17,3,53] := 19 16 20 8 11; 435 | set DetourPath[17,18,13] := 7 4 20 16 18; 436 | set DetourPath[17,18,20] := 10 8 1 16 18; 437 | set DetourPath[17,18,27] := 11 8 1 16 18; 438 | set DetourPath[17,18,34] := 14 12 1 16 18; 439 | set DetourPath[17,19,14] := 7 4 20 16 19; 440 | set DetourPath[18,16,43] := 18 17 2 5 6; 441 | set DetourPath[18,16,46] := 18 17 3 9 11; 442 | set DetourPath[18,16,47] := 18 17 2 13 14; 443 | set DetourPath[18,16,48] := 18 17 2 13 15; 444 | set DetourPath[18,16,49] := 18 17 19; 445 | set DetourPath[18,17,44] := 18 16 1 4 7; 446 | set DetourPath[18,17,45] := 18 16 20 8 10; 447 | set DetourPath[19,16,52] := 19 17 3 9 10; 448 | set DetourPath[19,16,54] := 19 17 2 13 14; 449 | set DetourPath[19,16,55] := 19 17 3 13 15; 450 | set DetourPath[19,16,56] := 19 17 18; 451 | set DetourPath[19,17,50] := 19 16 1 4 6; 452 | set DetourPath[19,17,51] := 19 16 1 4 7; 453 | set DetourPath[19,17,53] := 19 16 20 8 11; 454 | set DetourPath[20,4,29] := 14 13 2 5 6; 455 | set DetourPath[20,4,36] := 15 13 2 5 6; 456 | set DetourPath[20,4,37] := 15 13 3 5 7; 457 | set DetourPath[20,4,43] := 18 17 2 5 6; 458 | set DetourPath[20,8,2] := 6 5 2 9 10; 459 | set DetourPath[20,8,3] := 6 5 2 9 11; 460 | set DetourPath[20,8,10] := 7 5 2 9 11; 461 | set DetourPath[20,8,31] := 14 13 2 9 10; 462 | set DetourPath[20,8,32] := 14 13 2 9 11; 463 | set DetourPath[20,8,38] := 15 13 2 9 10; 464 | set DetourPath[20,8,39] := 15 13 3 9 11; 465 | set DetourPath[20,8,46] := 18 17 3 9 11; 466 | set DetourPath[20,12,5] := 6 5 2 13 15; 467 | set DetourPath[20,12,25] := 11 9 2 13 14; 468 | set DetourPath[20,12,26] := 11 9 2 13 15; 469 | set DetourPath[20,12,47] := 18 17 2 13 14; 470 | set DetourPath[20,12,48] := 18 17 2 13 15; 471 | set DetourPath[20,16,6] := 6 5 2 17 18; 472 | set DetourPath[20,16,7] := 6 5 3 17 19; 473 | set DetourPath[20,16,41] := 15 13 3 17 18; 474 | -------------------------------------------------------------------------------- /src/network.xml.fat_tree: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 437.5 8 | 750 9 | 10 | 11 | 12 | 13 | 687.5 14 | 750 15 | 16 | 17 | 18 | 19 | 937.5 20 | 750 21 | 22 | 23 | 24 | 25 | 125 26 | 250 27 | 28 | 29 | 30 | 31 | 250 32 | 250 33 | 34 | 35 | 36 | 37 | 125 38 | 125 39 | 40 | 41 | 42 | 43 | 250 44 | 125 45 | 46 | 47 | 48 | 49 | 375 50 | 250 51 | 52 | 53 | 54 | 55 | 500 56 | 250 57 | 58 | 59 | 60 | 61 | 375 62 | 125 63 | 64 | 65 | 66 | 67 | 500 68 | 125 69 | 70 | 71 | 72 | 73 | 625 74 | 250 75 | 76 | 77 | 78 | 79 | 750 80 | 250 81 | 82 | 83 | 84 | 85 | 625 86 | 125 87 | 88 | 89 | 90 | 91 | 750 92 | 125 93 | 94 | 95 | 96 | 97 | 875 98 | 250 99 | 100 | 101 | 102 | 103 | 1000 104 | 250 105 | 106 | 107 | 108 | 109 | 875 110 | 125 111 | 112 | 113 | 114 | 115 | 1000 116 | 125 117 | 118 | 119 | 120 | 121 | 187.5 122 | 750 123 | 124 | 125 | 126 | 127 | 128 | N4 129 | N7 130 | 156.0 131 | 132 | 133 | 155.0 134 | 156.0 135 | 136 | 137 | 622.0 138 | 468.0 139 | 140 | 141 | 142 | 143 | N5 144 | N6 145 | 272.0 146 | 147 | 148 | 155.0 149 | 272.0 150 | 151 | 152 | 622.0 153 | 816.0 154 | 155 | 156 | 157 | 158 | N20 159 | N16 160 | 156.0 161 | 162 | 163 | 155.0 164 | 156.0 165 | 166 | 167 | 622.0 168 | 468.0 169 | 170 | 171 | 172 | 173 | N2 174 | N5 175 | 186.0 176 | 177 | 178 | 155.0 179 | 186.0 180 | 181 | 182 | 622.0 183 | 558.0 184 | 185 | 186 | 187 | 188 | N3 189 | N17 190 | 272.0 191 | 192 | 193 | 155.0 194 | 272.0 195 | 196 | 197 | 622.0 198 | 816.0 199 | 200 | 201 | 202 | 203 | N2 204 | N9 205 | 227.0 206 | 207 | 208 | 155.0 209 | 227.0 210 | 211 | 212 | 622.0 213 | 711.0 214 | 215 | 216 | 217 | 218 | N1 219 | N16 220 | 208.0 221 | 222 | 223 | 155.0 224 | 208.0 225 | 226 | 227 | 622.0 228 | 624.0 229 | 230 | 231 | 232 | 233 | N8 234 | N10 235 | 181.0 236 | 237 | 238 | 155.0 239 | 181.0 240 | 241 | 242 | 622.0 243 | 542.0 244 | 245 | 246 | 247 | 248 | N2 249 | N13 250 | 208.0 251 | 252 | 253 | 155.0 254 | 208.0 255 | 256 | 257 | 622.0 258 | 624.0 259 | 260 | 261 | 262 | 263 | N11 264 | N9 265 | 250.0 266 | 267 | 268 | 155.0 269 | 250.0 270 | 271 | 272 | 622.0 273 | 750.0 274 | 275 | 276 | 277 | 278 | N20 279 | N4 280 | 224.0 281 | 282 | 283 | 155.0 284 | 224.0 285 | 286 | 287 | 622.0 288 | 972.0 289 | 290 | 291 | 292 | 293 | N16 294 | N18 295 | 224.0 296 | 297 | 298 | 155.0 299 | 224.0 300 | 301 | 302 | 622.0 303 | 972.0 304 | 305 | 306 | 307 | 308 | N17 309 | N19 310 | 250.0 311 | 312 | 313 | 155.0 314 | 250.0 315 | 316 | 317 | 622.0 318 | 750.0 319 | 320 | 321 | 322 | 323 | N12 324 | N14 325 | 165.0 326 | 327 | 328 | 155.0 329 | 165.0 330 | 331 | 332 | 622.0 333 | 495.0 334 | 335 | 336 | 337 | 338 | N3 339 | N13 340 | 205.0 341 | 342 | 343 | 155.0 344 | 205.0 345 | 346 | 347 | 622.0 348 | 915.0 349 | 350 | 351 | 352 | 353 | N8 354 | N11 355 | 142.0 356 | 357 | 358 | 155.0 359 | 142.0 360 | 361 | 362 | 622.0 363 | 426.0 364 | 365 | 366 | 367 | 368 | N13 369 | N15 370 | 195.0 371 | 372 | 373 | 155.0 374 | 195.0 375 | 376 | 377 | 622.0 378 | 585.0 379 | 380 | 381 | 382 | 383 | N1 384 | N4 385 | 294.0 386 | 387 | 388 | 155.0 389 | 294.0 390 | 391 | 392 | 622.0 393 | 882.0 394 | 395 | 396 | 397 | 398 | N20 399 | N12 400 | 294.0 401 | 402 | 403 | 155.0 404 | 294.0 405 | 406 | 407 | 622.0 408 | 882.0 409 | 410 | 411 | 412 | 413 | 414 | 415 | N9 416 | N10 417 | 294.0 418 | 419 | 420 | 155.0 421 | 294.0 422 | 423 | 424 | 622.0 425 | 882.0 426 | 427 | 428 | 429 | 430 | N1 431 | N8 432 | 294.0 433 | 434 | 435 | 155.0 436 | 294.0 437 | 438 | 439 | 622.0 440 | 882.0 441 | 442 | 443 | 444 | 445 | N3 446 | N9 447 | 294.0 448 | 449 | 450 | 155.0 451 | 294.0 452 | 453 | 454 | 622.0 455 | 882.0 456 | 457 | 458 | 459 | 460 | N16 461 | N19 462 | 294.0 463 | 464 | 465 | 155.0 466 | 294.0 467 | 468 | 469 | 622.0 470 | 882.0 471 | 472 | 473 | 474 | 475 | N20 476 | N8 477 | 294.0 478 | 479 | 480 | 155.0 481 | 294.0 482 | 483 | 484 | 622.0 485 | 882.0 486 | 487 | 488 | 489 | 490 | N2 491 | N17 492 | 294.0 493 | 494 | 495 | 155.0 496 | 294.0 497 | 498 | 499 | 622.0 500 | 882.0 501 | 502 | 503 | 504 | 505 | N3 506 | N5 507 | 294.0 508 | 509 | 510 | 155.0 511 | 294.0 512 | 513 | 514 | 622.0 515 | 882.0 516 | 517 | 518 | 519 | 520 | N17 521 | N18 522 | 294.0 523 | 524 | 525 | 155.0 526 | 294.0 527 | 528 | 529 | 622.0 530 | 882.0 531 | 532 | 533 | 534 | 535 | N12 536 | N15 537 | 294.0 538 | 539 | 540 | 155.0 541 | 294.0 542 | 543 | 544 | 622.0 545 | 882.0 546 | 547 | 548 | 549 | 550 | N4 551 | N6 552 | 294.0 553 | 554 | 555 | 155.0 556 | 294.0 557 | 558 | 559 | 622.0 560 | 882.0 561 | 562 | 563 | 564 | 565 | N12 566 | N1 567 | 294.0 568 | 569 | 570 | 155.0 571 | 294.0 572 | 573 | 574 | 622.0 575 | 882.0 576 | 577 | 578 | 579 | 580 | N13 581 | N14 582 | 294.0 583 | 584 | 585 | 155.0 586 | 294.0 587 | 588 | 589 | 622.0 590 | 882.0 591 | 592 | 593 | 594 | 595 | N5 596 | N7 597 | 294.0 598 | 599 | 600 | 155.0 601 | 294.0 602 | 603 | 604 | 622.0 605 | 882.0 606 | 607 | 608 | 609 | 610 | 611 | N6 612 | N7 613 | 195.0 614 | 615 | 616 | N6 617 | N10 618 | 195.0 619 | 620 | 621 | N6 622 | N11 623 | 195.0 624 | 625 | 626 | N6 627 | N14 628 | 195.0 629 | 630 | 631 | N6 632 | N15 633 | 195.0 634 | 635 | 636 | N6 637 | N18 638 | 195.0 639 | 640 | 641 | N6 642 | N19 643 | 195.0 644 | 645 | 646 | N7 647 | N6 648 | 195.0 649 | 650 | 651 | N7 652 | N10 653 | 195.0 654 | 655 | 656 | N7 657 | N11 658 | 195.0 659 | 660 | 661 | N7 662 | N14 663 | 195.0 664 | 665 | 666 | N7 667 | N15 668 | 195.0 669 | 670 | 671 | N7 672 | N18 673 | 195.0 674 | 675 | 676 | N7 677 | N19 678 | 195.0 679 | 680 | 681 | 682 | N10 683 | N6 684 | 195.0 685 | 686 | 687 | N10 688 | N7 689 | 195.0 690 | 691 | 692 | N10 693 | N11 694 | 195.0 695 | 696 | 697 | N10 698 | N14 699 | 195.0 700 | 701 | 702 | N10 703 | N15 704 | 195.0 705 | 706 | 707 | N10 708 | N18 709 | 195.0 710 | 711 | 712 | N10 713 | N19 714 | 195.0 715 | 716 | 717 | 718 | N11 719 | N6 720 | 195.0 721 | 722 | 723 | N11 724 | N7 725 | 195.0 726 | 727 | 728 | N11 729 | N10 730 | 195.0 731 | 732 | 733 | N11 734 | N14 735 | 195.0 736 | 737 | 738 | N11 739 | N15 740 | 195.0 741 | 742 | 743 | N11 744 | N18 745 | 195.0 746 | 747 | 748 | N11 749 | N19 750 | 195.0 751 | 752 | 753 | 754 | 755 | N14 756 | N6 757 | 195.0 758 | 759 | 760 | N14 761 | N7 762 | 195.0 763 | 764 | 765 | N14 766 | N10 767 | 195.0 768 | 769 | 770 | N14 771 | N11 772 | 195.0 773 | 774 | 775 | N14 776 | N15 777 | 195.0 778 | 779 | 780 | N14 781 | N18 782 | 195.0 783 | 784 | 785 | N14 786 | N19 787 | 195.0 788 | 789 | 790 | 791 | N15 792 | N6 793 | 195.0 794 | 795 | 796 | N15 797 | N7 798 | 195.0 799 | 800 | 801 | N15 802 | N10 803 | 195.0 804 | 805 | 806 | N15 807 | N11 808 | 195.0 809 | 810 | 811 | N15 812 | N14 813 | 195.0 814 | 815 | 816 | N15 817 | N18 818 | 195.0 819 | 820 | 821 | N15 822 | N19 823 | 195.0 824 | 825 | 826 | N18 827 | N6 828 | 195.0 829 | 830 | 831 | N18 832 | N7 833 | 195.0 834 | 835 | 836 | N18 837 | N10 838 | 195.0 839 | 840 | 841 | N18 842 | N11 843 | 195.0 844 | 845 | 846 | N18 847 | N14 848 | 195.0 849 | 850 | 851 | N18 852 | N15 853 | 195.0 854 | 855 | 856 | N18 857 | N19 858 | 195.0 859 | 860 | 861 | N19 862 | N6 863 | 195.0 864 | 865 | 866 | N19 867 | N7 868 | 195.0 869 | 870 | 871 | N19 872 | N10 873 | 195.0 874 | 875 | 876 | N19 877 | N11 878 | 195.0 879 | 880 | 881 | N19 882 | N14 883 | 195.0 884 | 885 | 886 | N19 887 | N15 888 | 195.0 889 | 890 | 891 | N19 892 | N18 893 | 195.0 894 | 895 | 896 | 897 | 898 | --------------------------------------------------------------------------------