├── .gitignore ├── README.md ├── benchmarks ├── benchmark_consts.py ├── compute_demand_stats.py ├── demand_tracking.py ├── fib_entries.py ├── fleischer.py ├── ncflow.py ├── ncflow_leader_election.py ├── num_partitions_sweep.py ├── path_form.py ├── smore.py └── teavar_star.sh ├── download.sh ├── environment.yml ├── ext ├── fleischer │ ├── .gitignore │ ├── Makefile │ ├── README │ ├── fleischer.cpp │ ├── pqueue.cpp │ ├── pqueue.h │ ├── test.cpp │ └── test_pqueue.cpp ├── modularity │ ├── .gitignore │ └── FastCommunity_w_GPL_v1.0.1 │ │ ├── .gitignore │ │ ├── Makefile │ │ ├── fastcommunity_w_mh.cc │ │ ├── maxheap.h │ │ ├── test1-fc_test1.wpairs │ │ ├── test1.wpairs │ │ ├── test2-fc_t2.wpairs │ │ ├── test2.wpairs │ │ └── vektor.h └── teavar │ ├── Algorithms │ ├── FFC.jl │ ├── MaxMin.jl │ ├── SMORE.jl │ ├── TEAVAR.jl │ └── TEAVAR_Star.jl │ ├── availability.jl │ ├── cutoff_error.jl │ ├── data │ ├── AttMpls.graphml │ │ └── paths │ │ │ ├── EDInvCap4 │ │ │ ├── SMORE4 │ │ │ └── SMORE8 │ ├── Uninett2010.graphml │ │ └── paths │ │ │ ├── EDInvCap4 │ │ │ ├── EDInvCap8 │ │ │ ├── SMORE4 │ │ │ └── SMORE8 │ └── b4-teavar.json │ │ └── paths │ │ ├── EDInvCap4 │ │ ├── EDInvCap8 │ │ └── SMORE4 │ ├── dependencies.txt │ ├── draw.jl │ ├── find_beta.jl │ ├── main.jl │ ├── network_utilization.jl │ ├── parsers.jl │ ├── path_selection.jl │ ├── probability_noise.jl │ ├── run_teavar.jl │ ├── run_teavar_star.jl │ ├── scenario_coverage.jl │ ├── server.jl │ ├── simulation.jl │ ├── throughput.jl │ ├── throughput_guarantee.jl │ ├── timer.jl │ └── util.jl ├── lib ├── __init__.py ├── algorithms │ ├── __init__.py │ ├── abstract_formulation.py │ ├── edge_formulation.py │ ├── min_max_flow_on_edge.py │ ├── ncflow │ │ ├── __init__.py │ │ ├── counter.py │ │ ├── ncflow_abstract.py │ │ ├── ncflow_edge_per_iter.py │ │ └── ncflow_single_iter.py │ ├── path_formulation.py │ └── smore.py ├── config.py ├── graph_utils.py ├── lp_solver.py ├── partitioning │ ├── __init__.py │ ├── abstract_partitioning_method.py │ ├── fm_partitioning.py │ ├── hard_coded_partitioning.py │ ├── leader_election.py │ ├── networkx_partitioning.py │ ├── spectral_clustering.py │ └── utils.py ├── path_utils.py ├── problem.py ├── problems.py ├── tests │ ├── __init__.py │ ├── abstract_test.py │ ├── feasibility_test.py │ ├── flow_path_construction_test.py │ ├── optgap4_test.py │ ├── optgapc1_test.py │ ├── optgapc2_test.py │ ├── optgapc3_test.py │ ├── recon3_test.py │ ├── reconciliation_problem_2_test.py │ ├── reconciliation_problem_test.py │ ├── single_edge_b.py │ ├── test_runner.py │ ├── toy_problem_test.py │ └── we_need_to_fix_this_test.py ├── traffic_matrix.py ├── utils.py └── vis.py ├── scripts ├── find_demand_scale_factor.py ├── generate_full_tms_for_fib_entries.py ├── generate_inputs_for_teavar.py ├── generate_traffic_matrices.py ├── grid_search.py ├── networks.py ├── pre_solve_path.py ├── run_yates_raeke.py ├── serialize_all_fleischer.py └── serialize_all_yates.py └── topologies ├── .gitignore ├── b4-teavar.json ├── bottleneck-dumbell.json ├── bottleneck.json ├── dumbell-bottleneck.json ├── feasible1.json ├── topology-zoo ├── Cogentco.graphml ├── Colt.graphml ├── Deltacom.graphml ├── DialtelecomCz.graphml ├── GtsCe.graphml ├── Interoute.graphml ├── Ion.graphml ├── Kdl.graphml ├── TataNld.graphml ├── Uninett2010.graphml └── UsCarrier.graphml ├── toy-network-2.json ├── toy-network-3.json ├── toy-network.json └── two-srcs.json /.gitignore: -------------------------------------------------------------------------------- 1 | *.sw* 2 | *.pyc 3 | *.log 4 | *.txt 5 | *.ipynb_checkpoints 6 | *.sol 7 | *.lp 8 | fm_rundir/ 9 | traffic-matrices/ 10 | traffic-matrices.zip 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NCFlow 2 | 3 | Anonymous code repository for NCFlow. 4 | 5 | Setup validated on Ubuntu 16.04. 6 | 7 | Run `download.sh` to fetch the traffic matrices and pre-computed paths used in 8 | our evaluation. (For confidentiality reasons, we only share TMs and paths for 9 | topologies from the Internet Topology Zoo.) 10 | 11 | ## Dependencies 12 | - Python 3.6 (Anaconda installation recommended) 13 | - See `environment.yml` for a list of Python library dependencies 14 | - Julia 1.0.5 (to run TEAVAR\*) 15 | - See `../ext/teavar/dependencies.txt` for a list of Julia library dependencies 16 | - Gurobi 8.1.1 (Requires a Gurobi license) 17 | 18 | -------------------------------------------------------------------------------- /benchmarks/benchmark_consts.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from glob import iglob 3 | 4 | import argparse 5 | import os 6 | 7 | import sys 8 | sys.path.append('..') 9 | 10 | from lib.partitioning import FMPartitioning, SpectralClustering 11 | 12 | PROBLEM_NAMES = [ 13 | 'GtsCe.graphml', 'UsCarrier.graphml', 'Cogentco.graphml', 'Colt.graphml', 14 | 'TataNld.graphml', 'Deltacom.graphml', 'DialtelecomCz.graphml', 15 | 'Uninett2010.graphml', 'Interoute.graphml', 'Ion.graphml', 'Kdl.graphml' 16 | ] 17 | 18 | PATH_FORM_HYPERPARAMS = (4, True, 'inv-cap') 19 | NCFLOW_HYPERPARAMS = { 20 | 'GtsCe.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 21 | 'UsCarrier.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 22 | 'Cogentco.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 23 | 'Colt.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 24 | 'TataNld.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 25 | 'Deltacom.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 26 | 'DialtelecomCz.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 27 | 'Uninett2010.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 28 | 'Interoute.graphml': (4, True, 'inv-cap', SpectralClustering, 2), 29 | 'Ion.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 30 | 'Kdl.graphml': (4, True, 'inv-cap', FMPartitioning, 3), 31 | } 32 | 33 | TM_MODELS = [ 34 | 'uniform', 'gravity', 'bimodal', 'poisson-high-intra', 'poisson-high-inter' 35 | ] 36 | PROBLEM_NAMES_AND_TM_MODELS = [(prob_name, tm_model) 37 | for prob_name in PROBLEM_NAMES 38 | for tm_model in TM_MODELS] 39 | 40 | PROBLEMS = [] 41 | GROUPED_BY_PROBLEMS = defaultdict(list) 42 | HOLDOUT_PROBLEMS = [] 43 | GROUPED_BY_HOLDOUT_PROBLEMS = defaultdict(list) 44 | 45 | for problem_name in PROBLEM_NAMES: 46 | if problem_name.endswith('.graphml'): 47 | topo_fname = os.path.join('..', 'topologies', 'topology-zoo', 48 | problem_name) 49 | else: 50 | topo_fname = os.path.join('..', 'topologies', problem_name) 51 | for model in TM_MODELS: 52 | for tm_fname in iglob( 53 | '../traffic-matrices/{}/{}*_traffic-matrix.pkl'.format( 54 | model, problem_name)): 55 | vals = os.path.basename(tm_fname)[:-4].split('_') 56 | _, traffic_seed, scale_factor = vals[1], int(vals[2]), float( 57 | vals[3]) 58 | GROUPED_BY_PROBLEMS[(problem_name, model, scale_factor)].append( 59 | (topo_fname, tm_fname)) 60 | PROBLEMS.append((problem_name, topo_fname, tm_fname)) 61 | for tm_fname in iglob( 62 | '../traffic-matrices/holdout/{}/{}*_traffic-matrix.pkl'.format( 63 | model, problem_name)): 64 | vals = os.path.basename(tm_fname)[:-4].split('_') 65 | _, traffic_seed, scale_factor = vals[1], int(vals[2]), float( 66 | vals[3]) 67 | GROUPED_BY_HOLDOUT_PROBLEMS[(problem_name, model, 68 | scale_factor)].append( 69 | (topo_fname, tm_fname)) 70 | HOLDOUT_PROBLEMS.append((problem_name, topo_fname, tm_fname)) 71 | 72 | GROUPED_BY_PROBLEMS = dict(GROUPED_BY_PROBLEMS) 73 | for key, vals in GROUPED_BY_PROBLEMS.items(): 74 | GROUPED_BY_PROBLEMS[key] = sorted(vals) 75 | 76 | GROUPED_BY_HOLDOUT_PROBLEMS = dict(GROUPED_BY_HOLDOUT_PROBLEMS) 77 | for key, vals in GROUPED_BY_HOLDOUT_PROBLEMS.items(): 78 | GROUPED_BY_HOLDOUT_PROBLEMS[key] = sorted(vals) 79 | 80 | 81 | def get_problems(args): 82 | problems = [] 83 | for ( 84 | problem_name, 85 | _, 86 | _, 87 | ), topo_and_tm_fnames in GROUPED_BY_PROBLEMS.items(): 88 | for slice in args.slices: 89 | topo_fname, tm_fname = topo_and_tm_fnames[slice] 90 | problems.append((problem_name, topo_fname, tm_fname)) 91 | return problems 92 | 93 | 94 | def get_args_and_problems(): 95 | parser = argparse.ArgumentParser() 96 | parser.add_argument('--dry-run', 97 | dest='dry_run', 98 | action='store_true', 99 | default=False) 100 | parser.add_argument('--slices', 101 | type=int, 102 | choices=range(5), 103 | nargs='+', 104 | required=True) 105 | args = parser.parse_args() 106 | return args, get_problems(args) 107 | 108 | 109 | def print_(*args, file=None): 110 | if file is None: 111 | file = sys.stdout 112 | print(*args, file=file) 113 | file.flush() 114 | -------------------------------------------------------------------------------- /benchmarks/compute_demand_stats.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from benchmark_consts import print_, PROBLEM_NAMES, TM_MODELS, NCFLOW_HYPERPARAMS 4 | import numpy as np 5 | import os 6 | from glob import iglob 7 | 8 | import sys 9 | sys.path.append('..') 10 | from lib.problem import Problem 11 | 12 | OUTPUT_CSV = 'demand-stats.csv' 13 | HEADERS = [ 14 | 'problem', 'num_nodes', 'num_edges', 'traffic_seed', 'tm_model', 15 | 'scale_factor', 'num_commodities', 'total_demand', 'clustering_algo', 16 | 'num_partitions', 'size_of_largest_partition', 'partition_runtime', 17 | 'intra_demand', 'inter_demand' 18 | ] 19 | PLACEHOLDER = ','.join('{}' for _ in HEADERS) 20 | PARTITIONER_DICT = {} 21 | 22 | if __name__ == '__main__': 23 | with open(OUTPUT_CSV, 'a') as w: 24 | print_(','.join(HEADERS), file=w) 25 | for problem_name in PROBLEM_NAMES: 26 | 27 | if problem_name.endswith('.graphml'): 28 | topo_fname = os.path.join('..', 'topologies', 'topology-zoo', 29 | problem_name) 30 | else: 31 | topo_fname = os.path.join('..', 'topologies', problem_name) 32 | _, _, _, partition_cls, num_parts_scale_factor = NCFLOW_HYPERPARAMS[ 33 | problem_name] 34 | 35 | for model in TM_MODELS: 36 | for tm_fname in iglob( 37 | '../traffic-matrices/{}/{}*_traffic-matrix.pkl'.format( 38 | model, problem_name)): 39 | print(tm_fname) 40 | vals = os.path.basename(tm_fname)[:-4].split('_') 41 | _, traffic_seed, scale_factor = vals[1], int( 42 | vals[2]), float(vals[3]) 43 | problem = Problem.from_file(topo_fname, tm_fname) 44 | 45 | if problem_name not in PARTITIONER_DICT: 46 | num_partitions_to_set = num_parts_scale_factor * int( 47 | np.sqrt(len(problem.G.nodes))) 48 | partitioner = partition_cls(num_partitions_to_set) 49 | PARTITIONER_DICT[problem_name] = partitioner 50 | else: 51 | partitioner = PARTITIONER_DICT[problem_name] 52 | partition_algo = partitioner.name 53 | 54 | intra_demand, inter_demand = problem.intra_and_inter_demands( 55 | partitioner) 56 | result_line = PLACEHOLDER.format( 57 | problem.name, len(problem.G.nodes), 58 | len(problem.G.edges), traffic_seed, 59 | problem.traffic_matrix.model, 60 | problem.traffic_matrix.scale_factor, 61 | len(problem.commodity_list), problem.total_demand, 62 | partition_algo, partitioner.num_partitions, 63 | partitioner.size_of_largest_partition, 64 | partitioner.runtime, intra_demand, inter_demand) 65 | print_(result_line, file=w) 66 | -------------------------------------------------------------------------------- /benchmarks/fleischer.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | import subprocess 5 | from glob import iglob 6 | from pathos import multiprocessing 7 | 8 | import sys 9 | sys.path.append('..') 10 | 11 | from lib.config import TL_DIR 12 | 13 | INPUT_DIR_PATHS = os.path.abspath( 14 | os.path.join(TL_DIR, 'traffic-matrices', 'fleischer-with-paths-format')) 15 | INPUT_DIR_EDGE = os.path.abspath( 16 | os.path.join(TL_DIR, 'traffic-matrices', 'fleischer-edge-format')) 17 | OUTPUT_DIR_PATHS = os.path.abspath( 18 | os.path.join(TL_DIR, 'benchmarks', 'fleischer-runs', 'with-paths')) 19 | OUTPUT_DIR_EDGE = os.path.abspath( 20 | os.path.join(TL_DIR, 'benchmarks', 'fleischer-runs', 'edge')) 21 | FLEISCHER_RUN_DIR = os.path.abspath( 22 | os.path.join(TL_DIR, 'ext', 'fleischer')) 23 | 24 | 25 | def run_fleischer(args): 26 | input_fname, epsilon, paths, output_fname = args 27 | if paths: 28 | flag = '-22p' 29 | else: 30 | flag = '-f' 31 | cmd = ['./fl', flag, input_fname, str(epsilon)] 32 | print(cmd + [output_fname]) 33 | 34 | with open(output_fname, 'w') as w: 35 | subprocess.call(cmd, stdout=w) 36 | 37 | 38 | if __name__ == '__main__': 39 | if sys.argv[1] == 'path': 40 | paths = True 41 | elif sys.argv[1] == 'edge': 42 | paths = False 43 | else: 44 | raise Exception('invalid arg {}'.format(sys.argv[1])) 45 | 46 | if sys.argv[2] != '--slice': 47 | raise Exception('missing --slice') 48 | slice = int(sys.argv[3]) 49 | 50 | dry_run = sys.argv[-1] == '--dry-run' 51 | 52 | if paths: 53 | input_dir = os.path.join(INPUT_DIR_PATHS, 'slice-{}'.format(slice)) 54 | output_dir = os.path.join(OUTPUT_DIR_PATHS, 'slice-{}'.format(slice)) 55 | else: 56 | input_dir = os.path.join(INPUT_DIR_EDGE, 'slice-{}'.format(slice)) 57 | output_dir = os.path.join(OUTPUT_DIR_EDGE, 'slice-{}'.format(slice)) 58 | 59 | if not os.path.exists(input_dir): 60 | print('{} does not exist; cannot run benchmark without input files. Run scripts/serialize_all_fleischer.py'. 61 | format(input_dir)) 62 | exit(1) 63 | 64 | if not os.path.exists(output_dir): 65 | os.makedirs(output_dir) 66 | 67 | os.chdir(FLEISCHER_RUN_DIR) 68 | subprocess.call(['make']) 69 | 70 | run_args = [] 71 | for input_fname in iglob(input_dir + '/*'): 72 | for epsilon in [0.5]: 73 | output_fname = os.path.join( 74 | output_dir, 75 | os.path.basename(input_fname).replace( 76 | '.txt', '_epsilon-{}.output'.format(epsilon))) 77 | if os.path.exists(output_fname): 78 | continue 79 | run_args.append((input_fname, epsilon, paths, output_fname)) 80 | if dry_run: 81 | print('Problems to run:') 82 | for run_arg in run_args: 83 | print(run_arg) 84 | else: 85 | # Only run 4 jobs at once 86 | pool = multiprocessing.ProcessPool(4) 87 | pool.map(run_fleischer, run_args) 88 | -------------------------------------------------------------------------------- /benchmarks/ncflow.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from benchmark_consts import get_args_and_problems, print_, NCFLOW_HYPERPARAMS 4 | 5 | import os 6 | import pickle 7 | import traceback 8 | import numpy as np 9 | 10 | import sys 11 | sys.path.append('..') 12 | 13 | from lib.algorithms import NcfEpi 14 | from lib.problem import Problem 15 | 16 | TOP_DIR = 'ncflow-logs' 17 | OUTPUT_CSV = 'ncflow.csv' 18 | 19 | # Sweep topos and traffic matrices for that topo. For each combo, record the 20 | # runtime and total flow for each algorithm 21 | HEADERS = [ 22 | 'problem', 'num_nodes', 'num_edges', 'traffic_seed', 'tm_model', 23 | 'scale_factor', 'num_commodities', 'total_demand', 'algo', 24 | 'clustering_algo', 'num_partitions', 'size_of_largest_partition', 25 | 'partition_runtime', 'num_paths', 'edge_disjoint', 'dist_metric', 26 | 'iteration', 'total_flow', 'runtime', 'r1_runtime', 'r2_runtime', 27 | 'recon_runtime', 'r3_runtime', 'kirchoffs_runtime' 28 | ] 29 | PLACEHOLDER = ','.join('{}' for _ in HEADERS) 30 | 31 | 32 | def benchmark(problems): 33 | 34 | with open(OUTPUT_CSV, 'a') as results: 35 | print_(','.join(HEADERS), file=results) 36 | for problem_name, topo_fname, tm_fname in problems: 37 | problem = Problem.from_file(topo_fname, tm_fname) 38 | print_(problem.name, tm_fname) 39 | traffic_seed = problem.traffic_matrix.seed 40 | total_demand = problem.total_demand 41 | print_('traffic seed: {}'.format(traffic_seed)) 42 | print_('traffic matrix model: {}'.format( 43 | problem.traffic_matrix.model)) 44 | print_('traffic matrix scale factor: {}'.format( 45 | problem.traffic_matrix.scale_factor)) 46 | print_('total demand: {}'.format(total_demand)) 47 | 48 | num_paths, edge_disjoint, dist_metric, partition_cls, num_parts_scale_factor = NCFLOW_HYPERPARAMS[ 49 | problem_name] 50 | num_partitions_to_set = num_parts_scale_factor * int( 51 | np.sqrt(len(problem.G.nodes))) 52 | partitioner = partition_cls(num_partitions_to_set) 53 | partition_algo = partitioner.name 54 | 55 | run_dir = os.path.join( 56 | TOP_DIR, problem.name, 57 | '{}-{}'.format(traffic_seed, problem.traffic_matrix.model)) 58 | if not os.path.exists(run_dir): 59 | os.makedirs(run_dir) 60 | 61 | try: 62 | print_( 63 | '\nNCFlow, {} partitioner, {} partitions, {} paths, edge disjoint {}, dist metric {}' 64 | .format(partition_algo, num_partitions_to_set, num_paths, 65 | edge_disjoint, dist_metric)) 66 | run_nc_dir = os.path.join( 67 | run_dir, 'ncflow', partition_algo, 68 | '{}-partitions'.format(num_partitions_to_set), 69 | '{}-paths'.format(num_paths), 70 | 'edge_disjoint-{}'.format(edge_disjoint), 71 | 'dist_metric-{}'.format(dist_metric)) 72 | if not os.path.exists(run_nc_dir): 73 | os.makedirs(run_nc_dir) 74 | with open( 75 | os.path.join( 76 | run_nc_dir, 77 | '{}-ncflow-partitioner_{}-{}_partitions-{}_paths-edge_disjoint_{}-dist_metric_{}.txt' 78 | .format(problem.name, partition_algo, 79 | num_partitions_to_set, num_paths, 80 | edge_disjoint, dist_metric)), 'w') as log: 81 | ncflow = NcfEpi.new_max_flow(num_paths, 82 | edge_disjoint=edge_disjoint, 83 | dist_metric=dist_metric, 84 | out=log) 85 | ncflow.solve(problem, partitioner) 86 | 87 | for i, nc in enumerate(ncflow._ncflows): 88 | with open( 89 | log.name.replace( 90 | '.txt', 91 | '-runtime-dict-iter-{}.pkl'.format(i)), 92 | 'wb') as w: 93 | pickle.dump(nc.runtime_dict, w) 94 | with open( 95 | log.name.replace( 96 | '.txt', '-sol-dict-iter-{}.pkl'.format(i)), 97 | 'wb') as w: 98 | pickle.dump(nc.sol_dict, w) 99 | num_partitions = len(np.unique(ncflow._partition_vector)) 100 | 101 | for iter in range(ncflow.num_iters): 102 | nc = ncflow._ncflows[iter] 103 | 104 | r1_runtime, r2_runtime, recon_runtime, \ 105 | r3_runtime, kirchoffs_runtime = nc.runtime_est(14, breakdown = True) 106 | runtime = r1_runtime + r2_runtime + recon_runtime + r3_runtime + kirchoffs_runtime 107 | total_flow = nc.obj_val 108 | 109 | result_line = PLACEHOLDER.format( 110 | problem.name, len(problem.G.nodes), 111 | len(problem.G.edges), traffic_seed, 112 | problem.traffic_matrix.model, 113 | problem.traffic_matrix.scale_factor, 114 | len(problem.commodity_list), total_demand, 115 | 'ncflow_edge_per_iter', partition_algo, 116 | num_partitions, 117 | partitioner.size_of_largest_partition, 118 | partitioner.runtime, num_paths, edge_disjoint, 119 | dist_metric, iter, total_flow, runtime, r1_runtime, 120 | r2_runtime, recon_runtime, r3_runtime, 121 | kirchoffs_runtime) 122 | print_(result_line, file=results) 123 | except: 124 | print_( 125 | 'NCFlow partitioner {}, {} paths, Problem {}, traffic seed {}, traffic model {} failed' 126 | .format(partition_algo, num_paths, problem.name, 127 | traffic_seed, problem.traffic_matrix.model)) 128 | traceback.print_exc(file=sys.stdout) 129 | 130 | 131 | if __name__ == '__main__': 132 | if not os.path.exists(TOP_DIR): 133 | os.makedirs(TOP_DIR) 134 | 135 | args, problems = get_args_and_problems() 136 | 137 | if args.dry_run: 138 | print('Problems to run:') 139 | for problem in problems: 140 | print(problem) 141 | else: 142 | benchmark(problems) 143 | -------------------------------------------------------------------------------- /benchmarks/ncflow_leader_election.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from benchmark_consts import get_args_and_problems, print_, NCFLOW_HYPERPARAMS 4 | 5 | import os 6 | import pickle 7 | import traceback 8 | import numpy as np 9 | 10 | import sys 11 | sys.path.append('..') 12 | 13 | from lib.algorithms import NcfEpi 14 | from lib.partitioning.leader_election import LeaderElection 15 | from lib.problem import Problem 16 | 17 | TOP_DIR = 'ncflow-leader-election-logs' 18 | OUTPUT_CSV = 'ncflow-leader-election.csv' 19 | 20 | # Sweep topos and traffic matrices for that topo. For each combo, record the 21 | # runtime and total flow for each algorithm 22 | HEADERS = [ 23 | 'problem', 'num_nodes', 'num_edges', 'traffic_seed', 'tm_model', 24 | 'scale_factor', 'num_commodities', 'total_demand', 'algo', 25 | 'clustering_algo', 'num_partitions', 'size_of_largest_partition', 26 | 'partition_runtime', 'num_paths', 'edge_disjoint', 'dist_metric', 27 | 'iteration', 'total_flow', 'runtime', 'r1_runtime', 'r2_runtime', 28 | 'recon_runtime', 'r3_runtime', 'kirchoffs_runtime' 29 | ] 30 | PLACEHOLDER = ','.join('{}' for _ in HEADERS) 31 | 32 | 33 | def benchmark(problems): 34 | 35 | with open(OUTPUT_CSV, 'a') as results: 36 | print_(','.join(HEADERS), file=results) 37 | for problem_name, topo_fname, tm_fname in problems: 38 | problem = Problem.from_file(topo_fname, tm_fname) 39 | print_(problem.name, tm_fname) 40 | traffic_seed = problem.traffic_matrix.seed 41 | total_demand = problem.total_demand 42 | print_('traffic seed: {}'.format(traffic_seed)) 43 | print_('traffic matrix model: {}'.format( 44 | problem.traffic_matrix.model)) 45 | print_('traffic matrix scale factor: {}'.format( 46 | problem.traffic_matrix.scale_factor)) 47 | print_('total demand: {}'.format(total_demand)) 48 | 49 | num_paths, edge_disjoint, dist_metric, _, num_parts_scale_factor = NCFLOW_HYPERPARAMS[ 50 | problem_name] 51 | num_partitions_to_set = num_parts_scale_factor * int( 52 | np.sqrt(len(problem.G.nodes))) 53 | partition_cls = LeaderElection 54 | partitioner = partition_cls(num_partitions_to_set) 55 | partition_algo = partitioner.name 56 | 57 | run_dir = os.path.join( 58 | TOP_DIR, problem.name, 59 | '{}-{}'.format(traffic_seed, problem.traffic_matrix.model)) 60 | if not os.path.exists(run_dir): 61 | os.makedirs(run_dir) 62 | 63 | try: 64 | print_( 65 | '\nNCFlow, {} partitioner, {} partitions, {} paths, edge disjoint {}, dist metric {}' 66 | .format(partition_algo, num_partitions_to_set, num_paths, 67 | edge_disjoint, dist_metric)) 68 | run_nc_dir = os.path.join( 69 | run_dir, 'ncflow', partition_algo, 70 | '{}-partitions'.format(num_partitions_to_set), 71 | '{}-paths'.format(num_paths), 72 | 'edge_disjoint-{}'.format(edge_disjoint), 73 | 'dist_metric-{}'.format(dist_metric)) 74 | if not os.path.exists(run_nc_dir): 75 | os.makedirs(run_nc_dir) 76 | with open( 77 | os.path.join( 78 | run_nc_dir, 79 | '{}-ncflow-partitioner_{}-{}_partitions-{}_paths-edge_disjoint_{}-dist_metric_{}.txt' 80 | .format(problem.name, partition_algo, 81 | num_partitions_to_set, num_paths, 82 | edge_disjoint, dist_metric)), 'w') as log: 83 | ncflow = NcfEpi.new_max_flow(num_paths, 84 | edge_disjoint=edge_disjoint, 85 | dist_metric=dist_metric, 86 | out=log) 87 | ncflow.solve(problem, partitioner) 88 | 89 | for i, nc in enumerate(ncflow._ncflows): 90 | with open( 91 | log.name.replace( 92 | '.txt', 93 | '-runtime-dict-iter-{}.pkl'.format(i)), 94 | 'wb') as w: 95 | pickle.dump(nc.runtime_dict, w) 96 | with open( 97 | log.name.replace( 98 | '.txt', '-sol-dict-iter-{}.pkl'.format(i)), 99 | 'wb') as w: 100 | pickle.dump(nc.sol_dict, w) 101 | num_partitions = len(np.unique(ncflow._partition_vector)) 102 | 103 | for iter in range(ncflow.num_iters): 104 | nc = ncflow._ncflows[iter] 105 | 106 | r1_runtime, r2_runtime, recon_runtime, \ 107 | r3_runtime, kirchoffs_runtime = nc.runtime_est(14, breakdown = True) 108 | runtime = r1_runtime + r2_runtime + recon_runtime + r3_runtime + kirchoffs_runtime 109 | total_flow = nc.obj_val 110 | 111 | result_line = PLACEHOLDER.format( 112 | problem.name, len(problem.G.nodes), 113 | len(problem.G.edges), traffic_seed, 114 | problem.traffic_matrix.model, 115 | problem.traffic_matrix.scale_factor, 116 | len(problem.commodity_list), total_demand, 117 | 'ncflow_edge_per_iter', partition_algo, 118 | num_partitions, 119 | partitioner.size_of_largest_partition, 120 | partitioner.runtime, num_paths, edge_disjoint, 121 | dist_metric, iter, total_flow, runtime, r1_runtime, 122 | r2_runtime, recon_runtime, r3_runtime, 123 | kirchoffs_runtime) 124 | print_(result_line, file=results) 125 | except: 126 | print_( 127 | 'NCFlow partitioner {}, {} paths, Problem {}, traffic seed {}, traffic model {} failed' 128 | .format(partition_algo, num_paths, problem.name, 129 | traffic_seed, problem.traffic_matrix.model)) 130 | traceback.print_exc(file=sys.stdout) 131 | 132 | 133 | if __name__ == '__main__': 134 | if not os.path.exists(TOP_DIR): 135 | os.makedirs(TOP_DIR) 136 | 137 | args, problems = get_args_and_problems() 138 | 139 | if args.dry_run: 140 | print('Problems to run:') 141 | for problem in problems: 142 | print(problem) 143 | else: 144 | benchmark(problems) 145 | -------------------------------------------------------------------------------- /benchmarks/path_form.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from benchmark_consts import get_args_and_problems, print_, PATH_FORM_HYPERPARAMS 4 | 5 | import os 6 | import pickle 7 | import traceback 8 | 9 | import sys 10 | sys.path.append('..') 11 | 12 | from lib.algorithms import PathFormulation 13 | from lib.problem import Problem 14 | 15 | TOP_DIR = 'path-form-logs' 16 | HEADERS = [ 17 | 'problem', 'num_nodes', 'num_edges', 'traffic_seed', 'scale_factor', 18 | 'tm_model', 'num_commodities', 'total_demand', 'algo', 'num_paths', 19 | 'edge_disjoint', 'dist_metric', 'total_flow', 'runtime' 20 | ] 21 | PLACEHOLDER = ','.join('{}' for _ in HEADERS) 22 | 23 | 24 | # Sweep topos and traffic matrices for that topo. For each combo, record the 25 | # runtime and total flow for each algorithm 26 | def benchmark(problems): 27 | num_paths, edge_disjoint, dist_metric = PATH_FORM_HYPERPARAMS 28 | with open('path-form.csv', 'a') as results: 29 | print_(','.join(HEADERS), file=results) 30 | for problem_name, topo_fname, tm_fname in problems: 31 | problem = Problem.from_file(topo_fname, tm_fname) 32 | print_(problem.name, tm_fname) 33 | traffic_seed = problem.traffic_matrix.seed 34 | total_demand = problem.total_demand 35 | print_('traffic seed: {}'.format(traffic_seed)) 36 | print_('traffic scale factor: {}'.format( 37 | problem.traffic_matrix.scale_factor)) 38 | print_('traffic matrix model: {}'.format( 39 | problem.traffic_matrix.model)) 40 | print_('total demand: {}'.format(total_demand)) 41 | 42 | run_dir = os.path.join( 43 | TOP_DIR, problem.name, 44 | '{}-{}'.format(traffic_seed, problem.traffic_matrix.model)) 45 | if not os.path.exists(run_dir): 46 | os.makedirs(run_dir) 47 | 48 | try: 49 | print_( 50 | '\nPath formulation, {} paths, edge disjoint {}, dist metric {}' 51 | .format(num_paths, edge_disjoint, dist_metric)) 52 | with open( 53 | os.path.join( 54 | run_dir, 55 | '{}-path-formulation_{}-paths_edge-disjoint-{}_dist-metric-{}.txt' 56 | .format(problem.name, num_paths, edge_disjoint, 57 | dist_metric)), 'w') as log: 58 | pf = PathFormulation.new_max_flow( 59 | num_paths, 60 | edge_disjoint=edge_disjoint, 61 | dist_metric=dist_metric, 62 | out=log) 63 | pf.solve(problem) 64 | pf_sol_dict = pf.extract_sol_as_dict() 65 | with open( 66 | os.path.join( 67 | run_dir, 68 | '{}-path-formulation_{}-paths_edge-disjoint-{}_dist-metric-{}_sol-dict.pkl' 69 | .format(problem.name, num_paths, edge_disjoint, 70 | dist_metric)), 'wb') as w: 71 | pickle.dump(pf_sol_dict, w) 72 | 73 | result_line = PLACEHOLDER.format( 74 | problem.name, 75 | len(problem.G.nodes), 76 | len(problem.G.edges), 77 | traffic_seed, 78 | problem.traffic_matrix.scale_factor, 79 | problem.traffic_matrix.model, 80 | len(problem.commodity_list), 81 | total_demand, 82 | 'path_formulation', 83 | num_paths, 84 | edge_disjoint, 85 | dist_metric, 86 | pf.obj_val, 87 | pf.runtime, 88 | ) 89 | print_(result_line, file=results) 90 | 91 | except Exception: 92 | print_( 93 | 'Path formulation {} paths, edge disjoint {}, dist metric {}, Problem {}, traffic seed {}, traffic model {} failed' 94 | .format(num_paths, edge_disjoint, dist_metric, 95 | problem.name, traffic_seed, 96 | problem.traffic_matrix.model)) 97 | traceback.print_exc(file=sys.stdout) 98 | 99 | 100 | if __name__ == '__main__': 101 | if not os.path.exists(TOP_DIR): 102 | os.makedirs(TOP_DIR) 103 | 104 | args, problems = get_args_and_problems() 105 | 106 | if args.dry_run: 107 | print('Problems to run:') 108 | for problem in problems: 109 | print(problem) 110 | else: 111 | benchmark(problems) 112 | -------------------------------------------------------------------------------- /benchmarks/smore.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from benchmark_consts import get_args_and_problems, print_ 4 | 5 | import os 6 | import pickle 7 | import traceback 8 | import numpy as np 9 | 10 | import sys 11 | sys.path.append('..') 12 | 13 | from lib.algorithms import SMORE 14 | from lib.problem import Problem 15 | 16 | TOP_DIR = 'smore-logs' 17 | HEADERS = [ 18 | 'problem', 'num_nodes', 'num_edges', 'traffic_seed', 'tm_model', 19 | 'scale_factor', 'num_commodities', 'total_demand', 'algo', 'num_paths', 20 | 'total_flow', 'runtime' 21 | ] 22 | PLACEHOLDER = ','.join('{}' for _ in HEADERS) 23 | 24 | 25 | def benchmark(problems): 26 | with open('smore.csv', 'a') as w: 27 | print_(','.join(HEADERS), file=w) 28 | for problem_name, topo_fname, tm_fname in problems: 29 | 30 | problem = Problem.from_file(topo_fname, tm_fname) 31 | print_(problem.name, tm_fname) 32 | 33 | traffic_seed = problem.traffic_matrix.seed 34 | total_demand = np.sum(problem.traffic_matrix.tm) 35 | print_('traffic seed: {}'.format(traffic_seed)) 36 | print_('traffic matrix model: {}'.format( 37 | problem.traffic_matrix.model)) 38 | print_('total demand: {}'.format(total_demand)) 39 | 40 | run_dir = os.path.join( 41 | TOP_DIR, problem.name, 42 | '{}-{}'.format(traffic_seed, problem.traffic_matrix.model)) 43 | if not os.path.exists(run_dir): 44 | os.makedirs(run_dir) 45 | 46 | try: 47 | with open( 48 | os.path.join(run_dir, 49 | '{}-smore.txt'.format(problem.name)), 50 | 'w') as log: 51 | smore = SMORE.new_max_flow(num_paths=4, out=log) 52 | smore.solve(problem) 53 | smore_sol_dict = smore.extract_sol_as_dict() 54 | pickle.dump( 55 | smore_sol_dict, 56 | open( 57 | os.path.join( 58 | run_dir, 59 | '{}-smore-sol-dict.pkl'.format(problem.name)), 60 | 'wb')) 61 | 62 | result_line = PLACEHOLDER.format( 63 | problem.name, len(problem.G.nodes), 64 | len(problem.G.edges), traffic_seed, 65 | problem.traffic_matrix.model, 66 | problem.traffic_matrix.scale_factor, 67 | len(problem.commodity_list), total_demand, 'smore', 4, 68 | smore.total_flow, smore.runtime) 69 | print_(result_line, file=w) 70 | 71 | except Exception: 72 | print_( 73 | 'SMORE, Problem {}, traffic seed {}, traffic model {} failed' 74 | .format(problem.name, traffic_seed, 75 | problem.traffic_matrix.model)) 76 | traceback.print_exc(file=sys.stdout) 77 | 78 | 79 | if __name__ == '__main__': 80 | if not os.path.exists(TOP_DIR): 81 | os.makedirs(TOP_DIR) 82 | 83 | args, problems = get_args_and_problems() 84 | 85 | if args.dry_run: 86 | print('Problems to run:') 87 | for problem in problems: 88 | print(problem) 89 | else: 90 | benchmark(problems) 91 | -------------------------------------------------------------------------------- /benchmarks/teavar_star.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -e 4 | set -x 5 | 6 | julia ../ext/teavar/run_teavar_star.jl b4-teavar.json 2 0.95 x EDInvCap4 topo_n2.txt 2 1 > teavar_star.txt 7 | -------------------------------------------------------------------------------- /download.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -e 4 | set -x 5 | 6 | export paths_file_id=1kmWab5GUHKLwTIbefKVThsMSdwh953bh 7 | export paths_filename=paths.zip 8 | 9 | export traffic_matrices_file_id=1cX9pzQmUXjArFU0q3SbwRN6WT5tb9B4I 10 | export traffic_matrices_filename=traffic-matrices.zip 11 | 12 | # Borrowed from https://www.matthuisman.nz/2019/01/download-google-drive-files-wget-curl.html 13 | curl -L -c cookies.txt 'https://docs.google.com/uc?export=download&id='$paths_file_id \ 14 | | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1/p' > confirm.txt 15 | curl -L -b cookies.txt -o $paths_filename \ 16 | 'https://docs.google.com/uc?export=download&id='$paths_file_id'&confirm='$( confirm.txt 22 | curl -L -b cookies.txt -o $traffic_matrices_filename \ 23 | 'https://docs.google.com/uc?export=download&id='$traffic_matrices_file_id'&confirm='$( 2 | #include 3 | #include 4 | // #define NDEBUG // uncomment to disable asserts 5 | #include 6 | #include "pqueue.h" 7 | 8 | using namespace std; 9 | 10 | #define assertm(exp, msg) assert(((void)msg, exp)) 11 | 12 | //////////////////////////////////////////////////////////////////////////////// 13 | // 14 | // PQUEUE 15 | // _d[1] is the datum with the smallest _dist 16 | // guarantee: _d[i/2] has smaller _dist than _d[i]; integer division; that is _d[1] <= _d[2], _d[1] <= _d[3] 17 | // guarantee: _positions[_d[i].node()] == i; that is, _positions is a valid backpointer 18 | // note: _d[0] is empty to make integer math simple 19 | // note: node ids are assumed to go from 0 to __size -1 20 | // note: datum points are stored at _d[1] ... _d[size] 21 | // 22 | //////////////////////////////////////////////////////////////////////////////// 23 | 24 | bool PQUEUE::pqinit( int n) 25 | { 26 | assertm(n > 0, "positive number of entries?"); 27 | assertm(_d == nullptr, "init with non-null data"); 28 | 29 | // initialize the queue; 30 | _d = new PQDATUM[n+1]; // note, we never use d[0] 31 | 32 | _positions = new int[n]; // position values will be 1 ... n 33 | for (int nid = 0; nid < n; nid++) 34 | _positions[nid] = -1; 35 | 36 | assertm(_d != nullptr, "unable to allocate space"); 37 | 38 | _avail = n; 39 | _size = 0; 40 | 41 | return _d != nullptr && _positions != nullptr; 42 | } 43 | 44 | 45 | bool PQUEUE::pqinsert( PQDATUM const datum) 46 | { 47 | PQDATUM *tmp; 48 | int newpos; 49 | 50 | assertm(_d != nullptr, "call pqinit first"); 51 | assertm(_size + 1 <= _avail, "not enough memory"); 52 | assertm(datum.node() >= 0 && datum.node() <= _avail, "datum node id is not legit"); 53 | 54 | newpos = ++_size; 55 | while (newpos > 1 && _d[newpos / 2].dist() > datum.dist()) { 56 | _d[newpos] = _d[newpos / 2]; 57 | _positions[ _d[newpos].node()] = newpos; 58 | newpos /= 2; 59 | } 60 | 61 | // deep copy 62 | *(&(_d[newpos])) = datum; 63 | _positions[ _d[newpos].node()] = newpos; 64 | return true; 65 | } 66 | 67 | // removes datum with smallest _distance 68 | bool PQUEUE::pqremove( PQDATUM *answer) 69 | { 70 | PQDATUM tmp; 71 | int curr = 1, next; 72 | 73 | assertm(_size > 0 && _d != nullptr, "no elements or not pqinit?"); 74 | 75 | *answer = _d[curr]; 76 | _positions[_d[curr].node()] = -1; // reset 77 | _size--; 78 | 79 | if (_size == 0) return true; 80 | 81 | // bubble up the next smallest into _d[1] and remove the old element at _d[_size + 1] 82 | tmp = _d[ _size+1]; 83 | while (curr <= _size / 2) { 84 | next = 2 * curr; 85 | if ( next < _size && _d[next].dist() > _d[next + 1].dist()) { 86 | next++; 87 | } 88 | if ( _d[next].dist() >= tmp.dist()) { 89 | break; 90 | } 91 | _d[curr] = _d[next]; 92 | _positions[ _d[curr].node()] = curr; 93 | curr = next; 94 | } 95 | 96 | // deep copy 97 | *(&(_d[curr])) = tmp; 98 | _positions[ _d[curr].node()] = curr; 99 | return true; 100 | } 101 | 102 | bool PQUEUE::pqdecrease( int node, double new_distance) 103 | { 104 | assertm(_size > 0 && _d != nullptr, "no elements or not init?"); 105 | assertm(node >= 0 && node < _avail, "node id not in positions"); 106 | assertm(_d[_positions[node]].node() == node, "node at position is not the same"); 107 | assertm(_d[_positions[node]].dist() > new_distance, "distance not decreasing"); 108 | 109 | int curr = _positions[node]; 110 | _d[curr].set_dist(new_distance); 111 | 112 | PQDATUM tmp = _d[curr]; 113 | 114 | // since distance has decreased, this node can only move up in the priority queue 115 | while ( curr > 1 && _d[curr / 2].dist() > new_distance) { 116 | _d[curr] = _d[curr / 2]; 117 | _positions[ _d[curr].node()] = curr; 118 | curr /= 2; 119 | } 120 | *(&(_d[curr])) = tmp; 121 | _positions[ _d[curr].node()] = curr; 122 | return true; 123 | } 124 | 125 | bool PQUEUE::pqpeek( PQDATUM *answer) 126 | { 127 | assertm(_size > 0 && _d != nullptr, "no elements or not init?"); 128 | assertm(answer != nullptr, "answer can't be null"); 129 | 130 | *answer = _d[1]; 131 | return true; 132 | } 133 | 134 | double PQUEUE::pqpeeklength(int nodeid) 135 | { 136 | assertm(nodeid >= 0 && nodeid < _avail, "nodeid outside scope"); 137 | int pos = _positions[nodeid]; 138 | assertm(pos >= 1 && pos <= _size, "pos outside scope"); 139 | return _d[pos].dist(); 140 | } -------------------------------------------------------------------------------- /ext/fleischer/pqueue.h: -------------------------------------------------------------------------------- 1 | #ifndef _PQUEUE_H_ 2 | #define _PQUEUE_H_ 3 | 4 | #include 5 | 6 | using namespace std; 7 | 8 | //////////////////////////////////////////////////////////////////////////////// 9 | // 10 | // PQUEUE 11 | // 12 | //////////////////////////////////////////////////////////////////////////////// 13 | 14 | class PQDATUM 15 | { 16 | private: 17 | int _node; 18 | double _dist; 19 | public: 20 | PQDATUM() { _node = -1; _dist = -1; } 21 | ~PQDATUM() {} 22 | 23 | int node() const { return _node; } 24 | double dist() const { return _dist; } 25 | void set_node(int node) { _node = node; } 26 | void set_dist(double dist) { _dist = dist; } 27 | }; 28 | 29 | class PQUEUE 30 | { 31 | private: 32 | int _size, _avail, _step; 33 | PQDATUM *_d; 34 | int* _positions; 35 | public: 36 | PQUEUE() : _size(0), _avail(-1), _d(nullptr), _positions(nullptr) {} 37 | PQUEUE(int n) : PQUEUE() { pqinit(n); } 38 | ~PQUEUE() { 39 | cout << "here" << endl; 40 | if (_d != nullptr) { delete[] _positions; delete[] _d; } 41 | cout << "and here?" << endl; 42 | } 43 | 44 | int size() { return _size; } 45 | int avail() { return _avail; } 46 | int step() { return _step; } 47 | // cannot change contents outside of class 48 | const PQDATUM *d() { return _d; } 49 | const int* positions() { return _positions; } 50 | 51 | // init 52 | bool pqinit(int n); 53 | 54 | // main 55 | bool pqinsert( const PQDATUM datum); // pos is a back-pointer from node_id to position in PQ 56 | bool pqremove( PQDATUM *answer); 57 | bool pqdecrease( int node, double new_distance); 58 | bool pqpeek( PQDATUM *answer); 59 | double pqpeeklength(int nodeid); 60 | 61 | 62 | // conveninece methods 63 | double get_key( PQDATUM d) { return d.dist(); } 64 | bool pqempty() { return ( _size == 0); } 65 | }; 66 | 67 | #endif 68 | -------------------------------------------------------------------------------- /ext/fleischer/test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | using namespace std; 4 | 5 | int main() 6 | { 7 | char data[100]; 8 | 9 | ifstream input("y.txt"); 10 | input.getline(data, 100); 11 | 12 | // cin.getline(data, 100); 13 | cout << data << endl; 14 | } 15 | -------------------------------------------------------------------------------- /ext/fleischer/test_pqueue.cpp: -------------------------------------------------------------------------------- 1 | #include "pqueue.h" 2 | #include 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | 9 | int main() 10 | { 11 | cout << "testing pqueue impl" << endl; 12 | 13 | int *positions = new int[10]; // assuming there are ten entries 14 | map items = { 15 | {0, 1.0}, {1, 0.1}, {2, 0.2}, {3, 0.4}, {4, 10}, {5, 6}, {6, 0.01}, {7, 0.8}, {8, .2}, {9, 100} 16 | }; 17 | 18 | PQUEUE pq(10); 19 | 20 | PQDATUM item; 21 | map::iterator midi; 22 | for(midi = items.begin(); midi != items.end(); midi++) 23 | { 24 | item.set_node((*midi).first); 25 | item.set_dist((*midi).second); 26 | 27 | pq.pqinsert(item); 28 | } 29 | 30 | 31 | cout << pq.size() << " " << pq.avail() << endl; 32 | 33 | 34 | while(pq.size() > 0) 35 | { 36 | pq.pqremove(&item); 37 | cout << "minel = " << item.node() << " " << item.dist() << endl; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /ext/modularity/.gitignore: -------------------------------------------------------------------------------- 1 | rundir/ 2 | -------------------------------------------------------------------------------- /ext/modularity/FastCommunity_w_GPL_v1.0.1/.gitignore: -------------------------------------------------------------------------------- 1 | FastCommunity_wMH 2 | -------------------------------------------------------------------------------- /ext/modularity/FastCommunity_w_GPL_v1.0.1/Makefile: -------------------------------------------------------------------------------- 1 | # Aaron Clauset 2 | # Makefile Oct2003 3 | # feel free to hack this to pieces 4 | 5 | #### local macros 6 | # remove without fussing about it 7 | RM = /bin/rm -f 8 | 9 | # compiler name and flags 10 | CCC = g++ 11 | CCFLAGS = -O3 -fomit-frame-pointer -funroll-loops -fforce-addr -fexpensive-optimizations -Wno-deprecated 12 | 13 | # loader flags 14 | LDFLAGS = 15 | 16 | ### local program information 17 | EXEC=FastCommunity_wMH 18 | SOURCES= fastcommunity_w_mh.cc 19 | 20 | ### intermediate objects 21 | OBJECTS = $(SOURCES: .cc=.o) 22 | 23 | ### includes 24 | INCLUDES = 25 | 26 | ### headers 27 | HEADERS = maxheap.h vektor.h 28 | 29 | ### targets, dependencies and actions 30 | $(EXEC): $(OBJECTS) Makefile 31 | $(LINK.cc) $(CCFLAGS) -o $(EXEC) $(OBJECTS) 32 | 33 | ### sort out dependencies 34 | depend: 35 | makedepend $(INCLUDES) $(HEADERS) $(SOURCES) 36 | 37 | ### housekeeping 38 | 39 | clean: 40 | $(RM) *.o *~ $(EXEC) 41 | 42 | -------------------------------------------------------------------------------- /ext/modularity/FastCommunity_w_GPL_v1.0.1/test1-fc_test1.wpairs: -------------------------------------------------------------------------------- 1 | 2 5 -0.392562 2 | 5 2 -0.392562 3 | -------------------------------------------------------------------------------- /ext/modularity/FastCommunity_w_GPL_v1.0.1/test1.wpairs: -------------------------------------------------------------------------------- 1 | 0 1 1 2 | 0 3 1 3 | 0 4 1 4 | 1 2 1 5 | 1 4 1 6 | 2 3 1 7 | 2 5 1 8 | 5 6 1 9 | 6 7 1 10 | 5 8 1 11 | 6 7 1 12 | 7 8 1 13 | -------------------------------------------------------------------------------- /ext/modularity/FastCommunity_w_GPL_v1.0.1/test2-fc_t2.wpairs: -------------------------------------------------------------------------------- 1 | 0 2 -0.311111 2 | 2 0 -0.311111 3 | -------------------------------------------------------------------------------- /ext/modularity/FastCommunity_w_GPL_v1.0.1/test2.wpairs: -------------------------------------------------------------------------------- 1 | 0 1 1 2 | 0 3 1 3 | 0 4 1 4 | 1 2 1 5 | 1 4 1 6 | 2 3 1 7 | 2 5 5 8 | 5 6 1 9 | 6 7 1 10 | 5 8 1 11 | 6 7 1 12 | 7 8 1 13 | -------------------------------------------------------------------------------- /ext/teavar/Algorithms/FFC.jl: -------------------------------------------------------------------------------- 1 | using JuMP, Combinatorics, Gurobi 2 | 3 | function printResults(o, a, b, edges, T, Tf) 4 | println("Objective value: ", o) 5 | 6 | for i in 1:size(b,1) 7 | println("bw flow ", i, " = ", getvalue(b[i])) 8 | end 9 | print("\n") 10 | println("-----------------------------------------") 11 | 12 | for i in 1:size(a,1) 13 | for j in 1:size(a,2) 14 | println("flow ",i, ", tunnel ", j, " = ", getvalue(a[i,j])) 15 | print("edges in use: ") 16 | for e in T[Tf[i][j]] 17 | print(edges[e]) 18 | end 19 | println("\n") 20 | end 21 | end 22 | end 23 | 24 | 25 | 26 | function FFC(env, edges, capacity, flows, demand, k, T, Tf; minb=0) 27 | 28 | nedges = size(edges,1) 29 | nflows = size(flows,1) 30 | ntunnels = size(T,1) 31 | 32 | # CREATE ALL SCENARIOS 33 | scenarios = [] 34 | for bits in collect(combinations(1:nedges,k)) 35 | s = ones(nedges) 36 | for bit in bits 37 | s[bit] = 0 38 | end 39 | push!(scenarios, s) 40 | end 41 | 42 | #CREATE RESIDUAL TUNNELS BY SCENARIO BY FLOW (References Tf) 43 | Tsf = [] 44 | for s in 1:size(scenarios,1) 45 | sft = [] 46 | for f in 1:size(Tf,1) 47 | ft = [] 48 | for t in 1:size(Tf[f],1) 49 | up = true 50 | if (length(T[Tf[f][t]]) == 0) 51 | up = false 52 | end 53 | for e in T[Tf[f][t]] 54 | if scenarios[s][e] == 0 55 | up = false 56 | end 57 | end 58 | if up 59 | push!(ft, t) 60 | end 61 | end 62 | push!(sft,ft) 63 | end 64 | push!(Tsf,sft) 65 | end 66 | 67 | #CREATE TUNNEL EDGE MATRIX 68 | L = zeros(ntunnels, nedges) 69 | for t in 1:ntunnels 70 | for e in 1:nedges 71 | if in(e, T[t]) 72 | L[t,e] = 1 73 | end 74 | end 75 | end 76 | 77 | model = Model(solver=GurobiSolver(env, OutputFlag=0)) 78 | @variable(model, b[1:nflows] >= 0, basename="b") 79 | @variable(model, a[1:nflows,1:size(Tf[1],1)] >= 0, basename="a") 80 | @variable(model, u >= 0, basename="u") 81 | 82 | 83 | for f in 1:nflows 84 | @constraint(model, sum(a[f,t] for t in 1:size(Tf[f],1)) >= b[f]) #the sum of all allocated bandwidths on every flow must be >= the total bandwidth for that flow 85 | end 86 | 87 | for e in 1:nedges 88 | @constraint(model, sum(a[f,t] * L[Tf[f][t],e] for f in 1:nflows for t in 1:size(Tf[f],1)) <= capacity[e]) #overlapping flows cannot add up to the capacity of that link 89 | end 90 | 91 | for f in 1:nflows 92 | for s in 1:size(scenarios,1) 93 | @constraint(model, sum(a[f,t] for t in Tsf[s][f]) >= b[f]) #residual tunnels must be able to carry bandwidth 94 | end 95 | end 96 | 97 | for f in 1:nflows 98 | @constraint(model, b[f] >= minb) 99 | end 100 | 101 | for f in 1:nflows 102 | @constraint(model, b[f] <= demand[f]) #all allocated bandwidths must be less than the demand for that flow 103 | for t in 1:size(Tf[f],1) 104 | @constraint(model, a[f,t] >= 0) #each allocated bandwidth on for flow f on tunnel t >= 0 105 | end 106 | end 107 | 108 | for f in 1:nflows 109 | # @constraint(model, u <= b[f]/demand[f]) 110 | end 111 | #   @objective(model, Max, u) 112 | 113 |   @objective(model, Max, sum((b[i] for i in 1:size(b,1)))) 114 | solve(model) 115 | # printResults(getobjectivevalue(model), a, b, edges, T, Tf) 116 | return getvalue(a), getvalue(b) 117 | end 118 | -------------------------------------------------------------------------------- /ext/teavar/Algorithms/MaxMin.jl: -------------------------------------------------------------------------------- 1 | using JuMP, Gurobi 2 | 3 | function printAllocations(a, edges, T, Tf) 4 | println("------------------ Allocations ----------------------\n") 5 | for i in 1:size(a,1) 6 | for j in 1:size(a,2) 7 | println("Flow ",i, ", tunnel ", j, " allocated : ", a[i,j]) 8 | print("Edges in use: ") 9 | for e in T[Tf[i][j]] 10 | print(edges[e]) 11 | end 12 | println("\n") 13 | end 14 | end 15 | end 16 | 17 | function MaxMin(env, edges, capacity, flows, demand, k, T, Tf, explain=false) 18 | nedges = size(edges,1) 19 | nflows = size(flows,1) 20 | ntunnels = size(T,1) 21 | 22 | #CREATE TUNNEL EDGE MATRIX 23 | L = zeros(ntunnels, nedges) 24 | for t in 1:ntunnels 25 | for e in 1:nedges 26 | if in(e, T[t]) 27 | L[t,e] = 1 28 | end 29 | end 30 | end 31 | a = zeros(nflows, k) 32 | b = zeros(nflows) .- 1 33 | U = collect(1:nflows) 34 | i = 0 35 | while length(U) != 0 36 | a, amin = MaxMinLP(env, edges, capacity, flows, U, b, demand, k, T, Tf) 37 | Z = [] 38 | for u in 1:length(U) 39 | if amin >= demand[U[u]] 40 | push!(Z, U[u]) 41 | b[U[u]] = sum(a[U[u],:]) 42 | end 43 | end 44 | U = filter(u -> u ∉ Z, U) 45 | i += 1 46 | end 47 | if explain 48 | printAllocations(a, edges, T, Tf) 49 | end 50 | return a 51 | end 52 | 53 | 54 | 55 | function MaxMinLP(env, 56 | edges, 57 | capacity, 58 | flows, 59 | U, 60 | b_fixed, 61 | demand, 62 | k, 63 | T, 64 | Tf) 65 | 66 | nedges = size(edges,1) 67 | nflows = size(flows,1) 68 | ntunnels = size(T,1) 69 | 70 | #CREATE TUNNEL EDGE MATRIX 71 | L = zeros(ntunnels, nedges) 72 | for t in 1:ntunnels 73 | for e in 1:nedges 74 | if in(e, T[t]) 75 | L[t,e] = 1 76 | end 77 | end 78 | end 79 | 80 | model = Model(solver=GurobiSolver(env, OutputFlag=0)) 81 | @variable(model, a[1:nflows, 1:k] >= 0, basename="a", category=:SemiCont) 82 | @variable(model, amin >= 0, basename="amin", category=:SemiCont) 83 | 84 | for e in 1:nedges 85 | @constraint(model, sum(a[U[u],t] * L[Tf[U[u]][t],e] for u in 1:length(U), t in 1:size(Tf[U[u]],1)) <= capacity[e]) 86 | end 87 | 88 | 89 | for u in 1:length(U) 90 | @constraint(model, sum(a[U[u],t] for t in 1:size(Tf[U[u]],1)) <= demand[U[u]]) 91 | end 92 | 93 | for f in 1:nflows 94 | if b_fixed[f] != -1 95 | @constraint(model, sum(a[f,t] for t in 1:size(Tf[f],1)) == b_fixed[f]) 96 | end 97 | end 98 | 99 | for u in 1:length(U) 100 | @constraint(model, amin <= sum(a[U[u],t] for t in 1:size(Tf[U[u]],1))) 101 | end 102 | 103 | @objective(model, Max, amin) 104 | 105 |     solve(model) 106 | return (getvalue(a), getvalue(amin)) 107 | end 108 | -------------------------------------------------------------------------------- /ext/teavar/Algorithms/SMORE.jl: -------------------------------------------------------------------------------- 1 | using JuMP, Gurobi 2 | 3 | 4 | function SMORE(env, edges, capacity, flows, demand, T, Tf) 5 | 6 | nedges = size(edges,1) 7 | nflows = size(flows,1) 8 | ntunnels = size(T,1) 9 | 10 | 11 | #CREATE TUNNEL EDGE MATRIX 12 | L = zeros(ntunnels, nedges) 13 | for t in 1:ntunnels 14 | for e in 1:nedges 15 | if in(e, T[t]) 16 | L[t,e] = 1 17 | end 18 | end 19 | end 20 | 21 | model = Model(solver=GurobiSolver(env, OutputFlag=0)) 22 | @variable(model, Z >= 0, basename="Z") 23 | @variable(model, a[1:nflows,1:size(Tf[1],1)] >= 0, basename="a") 24 | 25 | 26 | for f in 1:nflows 27 | for t in 1:size(Tf[f],1) 28 | @constraint(model, sum(a[f,t] for t in 1:size(Tf[f],1)) == 1) #the sum of all allocated bandwidths on every flow must be >= the total bandwidth for that flow 29 | end 30 | 31 | end 32 | 33 | @expression(model, U[e=1:nedges], sum(a[f,t] * L[Tf[f][t],e] for f in 1:nflows, t in 1:size(Tf[f],1)) / capacity[e]) 34 | for e in 1:nedges 35 | @constraint(model, U[e] <= Z) #overlapping flows cannot add up to the capacity of that link 36 | end 37 | 38 | 39 |   @objective(model, Min, Z) 40 | solve(model) 41 | return getvalue(a) 42 | end 43 | -------------------------------------------------------------------------------- /ext/teavar/Algorithms/TEAVAR.jl: -------------------------------------------------------------------------------- 1 | include("../util.jl") 2 | 3 | using JuMP, Gurobi 4 | 5 | function TEAVAR(env, 6 | edges, 7 | capacity, 8 | flows, 9 | demand, 10 | beta, 11 | k, 12 | T, 13 | Tf, 14 | scenarios, 15 | scenario_probs, 16 | outputfilename; 17 | explain=false, 18 | verbose=false, 19 | utilization=false, 20 | average=false) 21 | 22 | nedges = length(edges) 23 | nflows = length(flows) 24 | ntunnels = length(T) 25 | nscenarios = length(scenarios) 26 | p = scenario_probs 27 | 28 | println(Dates.format(now(), "HH:MM:SS"), ": #edges ", nedges, " #flows ", nflows, " #tunnels ", ntunnels, " #scenarios ", nscenarios) 29 | 30 | #CREATE TUNNEL SCENARIO MATRIX 31 | X = ones(nscenarios,ntunnels) 32 | for s in 1:nscenarios 33 | for t in 1:ntunnels 34 | if size(T[t],1) == 0 35 | X[s,t] = 0 36 | else 37 | for e in 1:nedges 38 | if scenarios[s][e] == 0 39 | back_edge = findfirst(x -> x == (edges[e][2],edges[e][1]), edges) 40 | if in(e, T[t]) || in(back_edge, T[t]) 41 | # if in(e, T[t]) 42 | X[s,t] = 0 43 | end 44 | end 45 | end 46 | end 47 | end 48 | end 49 | 50 | println(Dates.format(now(), "HH:MM:SS"), ": created tunnel scenario matrix") 51 | 52 | #CREATE TUNNEL EDGE MATRIX 53 | L = zeros(ntunnels, nedges) 54 | for t in 1:ntunnels 55 | for e in 1:nedges 56 | if in(e, T[t]) 57 | L[t,e] = 1 58 | end 59 | end 60 | end 61 | 62 | println(Dates.format(now(), "HH:MM:SS"), ": created tunnel edge matrix") 63 | 64 | model = Model(solver=GurobiSolver(env, OutputFlag=1)) 65 | # flow per commodity per path variables 66 | @variable(model, a[1:nflows, 1:k] >= 0, basename="a", category=:SemiCont) 67 | # alpha variable 68 | @variable(model, alpha >= 0, basename="alpha", category=:SemiCont) 69 | # maximum flow lost in that scenario 70 | @variable(model, umax[1:nscenarios] >= 0, basename="umax") 71 | # flow lost per commod per scenario 72 | @variable(model, u[1:nscenarios, 1:nflows] >= 0, basename="u") 73 | 74 | # capacity constraints for final flow assigned to "a" variables 75 | for e in 1:nedges 76 | @constraint(model, sum(a[f,t] * L[Tf[f][t],e] for f in 1:nflows, t in 1:size(Tf[f],1)) <= capacity[e]) 77 | end 78 | 79 | # FLOW LEVEL LOSS 80 | @expression(model, satisfied[s=1:nscenarios, f=1:nflows], sum(a[f,t] * X[s,Tf[f][t]] for t in 1:size(Tf[f],1)) / demand[f]) 81 | 82 | for s in 1:nscenarios 83 | for f in 1:nflows 84 | # @constraint(model, (demand[f] - sum(a[f,t] * X[s,Tf[f][t]] for t in 1:size(Tf[f],1))) / demand[f] <= u[s,f]) 85 | @constraint(model, u[s,f] >= 1 - satisfied[s,f]) 86 | end 87 | end 88 | 89 | for s in 1:nscenarios 90 | if average 91 | @constraint(model, umax[s] + alpha >= (sum(u[s,f] for f in 1:nflows)) / nflows) 92 | # @constraint(model, umax[s] + alpha >= avg_loss[s]) 93 | else 94 | for f in 1:nflows 95 | @constraint(model, umax[s] + alpha >= u[s,f]) 96 | end 97 | end 98 | end 99 | @objective(model, Min, alpha + (1 / (1 - beta)) * sum((p[s] * umax[s] for s in 1:nscenarios))) 100 | 101 | println(Dates.format(now(), "HH:MM:SS"), ": ready to solve") 102 | 103 |     solve(model) 104 | 105 | println(Dates.format(now(), "HH:MM:SS"), ": solver finished; explaining") 106 | 107 | if (explain) 108 | println("Runtime: ", getsolvetime(model)) 109 | println("beta: ", beta) 110 | println("#flows: ", nflows) 111 | println("#edges: ", nedges) 112 | println("#tunnels: ", ntunnels) 113 | println("#demands: ", nflows, " total demand=", sum(demand[f] for f in 1:nflows)) 114 | println("#scenarios: ", nscenarios, " total_prob= ", sum(p[s] for s in 1:nscenarios)) 115 | println("total_scenario_prob= ", sum(p[s] for s in 1:nscenarios)) 116 | 117 | result_u = getvalue(u) 118 | result_umax = getvalue(umax) 119 | result_satisfied = getvalue(satisfied) 120 | result_alpha = getvalue(alpha) 121 | result_a = getvalue(a) 122 | 123 | # compute allocations and weights 124 | flow_allocation_1 = Array{Float64}(undef, nflows) 125 | numtunnel_perflow = 0 126 | for f in 1:nflows 127 | numtunnel_perflow = max(numtunnel_perflow, size(Tf[f], 1)) 128 | end 129 | flow_tunnel_weights = Matrix{Float64}(undef, nflows, numtunnel_perflow) 130 | all_flow_alloc = 0 131 | 132 | for f in 1:nflows 133 | flow_totalalloc = 0 134 | for t in 1:size(Tf[f], 1) 135 | flow_totalalloc += result_a[f,t] 136 | end 137 | 138 | all_flow_alloc += flow_totalalloc 139 | flow_allocation_1[f] = (1 - result_alpha) * min(flow_totalalloc, demand[f]) 140 | for t in 1:size(Tf[f], 1) 141 | weight = 0 142 | if flow_totalalloc > 0 143 | weight = result_a[f, t]/ flow_totalalloc 144 | end 145 | flow_tunnel_weights[f, t] = weight 146 | end 147 | end 148 | flow_allocation_1[flow_allocation_1 .< 0] .= 0 149 | 150 | println("totalalloc= ", all_flow_alloc, " netalloc1= ", sum(flow_allocation_1[f] for f in 1:nflows)) 151 | 152 | output = open(outputfilename, "w") 153 | write(output, "---flow allocation with alpha---\n") 154 | writedlm(output, flow_allocation_1, ',') 155 | write(output, "---weights per flow tunnel---\n") 156 | writedlm(output, flow_tunnel_weights, ',') 157 | flush(output) 158 | close(output) 159 | 160 | printResults(getobjectivevalue(model), result_alpha, result_a, result_u, result_umax, edges, scenarios, T, Tf, L, capacity, p, demand, verbose=verbose, utilization=utilization) 161 | end 162 | 163 | return (result_alpha, getobjectivevalue(model), result_a, result_umax, getsolvetime(model)) 164 | end 165 | 166 | -------------------------------------------------------------------------------- /ext/teavar/availability.jl: -------------------------------------------------------------------------------- 1 | using DelimitedFiles, ProgressMeter, PyPlot, Gurobi 2 | 3 | include("./util.jl") 4 | include("./parsers.jl") 5 | include("./Algorithms/TEAVAR.jl") 6 | include("./Algorithms/MaxMin.jl") 7 | include("./Algorithms/SMORE.jl") 8 | include("./Algorithms/FFC.jl") 9 | include("./simulation.jl") 10 | 11 | function availabilityPlot(algorithmns, 12 | topologies, 13 | demand_downscales, 14 | num_demands, 15 | iterations, 16 | cutoff, 17 | start, 18 | step, 19 | finish; 20 | k=12, 21 | target=0, 22 | xliml=.95, 23 | xlimr=1.0001, 24 | paths="KSP", 25 | weibull_scale=.0001, 26 | plot=true, 27 | dirname="./data/raw/availability/") 28 | env = Gurobi.Env() 29 | availability_vals = [[] for i in 1:length(algorithmns)] 30 | 31 | dir = nextRun(dirname) 32 | for algorithmn in algorithmns 33 | mkdir("$(dir)/$(algorithmn)") 34 | end 35 | 36 | scenarios_all = [] 37 | scenario_probs_all = [] 38 | for t in 1:length(topologies) 39 | topology = topologies[t] 40 | links, capacity, link_probs, nodes = readTopology(topology) 41 | scenarios_all_top = [] 42 | scenario_probs_top = [] 43 | for i in 1:iterations 44 | link_probs = weibullProbs(length(links), shape=.8, scale=weibull_scale) 45 | scenarios, probs = subScenarios(link_probs, cutoff, first=true, last=false) 46 | push!(scenarios_all_top, scenarios) 47 | push!(scenario_probs_top, probs) 48 | println(probs) 49 | end 50 | push!(scenarios_all, scenarios_all_top) 51 | push!(scenario_probs_all, scenario_probs_top) 52 | end 53 | 54 | scales = collect(start:step:finish) 55 | progress = ProgressMeter.Progress(length(scales)*length(topologies)*num_demands*iterations*length(algorithmns), .1, "Computing Availability...", 50) 56 | confidence = zeros(length(scales), 3 * length(algorithmns) + 1) 57 | confidence[:,1] = scales 58 | for s in 1:length(scales) 59 | availabilities = [[] for i in 1:length(algorithmns)] 60 | for t in 1:length(topologies) 61 | links, capacity, link_probs, nodes = readTopology(topologies[t]) 62 | for d in 1:num_demands 63 | demand, flows = readDemand("$(topologies[t])/demand", length(nodes), d, scale=scales[s], downscale=demand_downscales[t]) 64 | if paths != "KSP" 65 | T, Tf, k = parsePaths("$(topologies[t])/paths/$(paths)", links, flows) 66 | else 67 | T, Tf, g = getTunnels(nodes, links, capacity, flows, k) 68 | end 69 | for i in 1:iterations 70 | for alg in 1:length(algorithmns) 71 | if algorithmns[alg] == "TEAVAR" 72 | var, cvar, a = TEAVAR(env, links, capacity, flows, demand, scenario_probs_all[t][i][1] - .01, k, T, Tf, scenarios_all[t][i], scenario_probs_all[t][i], average=true) 73 | elseif algorithmns[alg] == "ECMP" 74 | a = ones(size(Tf,1),k) 75 | elseif algorithmns[alg] == "MaxMin" 76 | a = MaxMin(env, links, capacity, flows, demand, k, T, Tf) 77 | elseif algorithmns[alg] == "FFC-1" 78 | a, _ = FFC(env, links, capacity, flows, demand, 1, T, Tf) 79 | elseif algorithmns[alg] == "FFC-2" 80 | a, _ = FFC(env, links, capacity, flows, demand, 2, T, Tf) 81 | elseif algorithmns[alg] == "SMORE" 82 | a = SMORE(env, links, capacity, flows, demand, T, Tf) 83 | else 84 | T, Tf, k = parsePaths("$(topologies[t])/paths/$(algorithmns[alg])", links, flows) 85 | a = parseYatesSplittingRatios("$(topologies[t])/paths/$(algorithmns[alg])", k, flows) 86 | end 87 | losses = calculateLossReallocation(links, capacity, demand, flows, T, Tf, k, a, scenarios_all[t][i], scenario_probs_all[t][i]) 88 | println(losses) 89 | open("$(dir)/$(algorithmns[alg])/$(algorithmns[alg])_losses.txt", "a") do io 90 | writedlm(io, transpose(hcat(scenario_probs_all[t][i], losses))) 91 | end 92 | availability = PDF(losses, scenario_probs_all[t][i], target) 93 | println(availability) 94 | push!(availabilities[alg], availability) 95 | ProgressMeter.next!(progress, showvalues = [(:topology,topologies[t]), (:scale,scales[s]), (:demand,"$(d)/$(num_demands)"), (:iteration, "$(i)/$(iterations)"), (:algorithmn,algorithmns[alg]), (:availability, availability)]) 96 | end 97 | end 98 | end 99 | end 100 | 101 | for alg in 1:length(algorithmns) 102 | confidence[s, (alg-1)*3 + 2] = sum(availabilities[alg]) / (num_demands * iterations * length(topologies)) 103 | confidence[s, (alg-1)*3 + 3] = minimum(availabilities[alg]) 104 | confidence[s, (alg-1)*3 + 4] = maximum(availabilities[alg]) 105 | open("$(dir)/$(algorithmns[alg])/$(algorithmns[alg])_availabilities.txt", "a") do io writedlm(io, transpose(availabilities[alg])) end 106 | push!(availability_vals[alg], sum(availabilities[alg]) / (num_demands * iterations * length(topologies))) 107 | end 108 | end 109 | 110 | # write to output directory 111 | writedlm("$(dir)/availabilities", availability_vals) 112 | writedlm("$(dir)/scales", scales) 113 | if paths == nothing 114 | paths = "" 115 | end 116 | writedlm("$(dir)/params", [["algorithmns", "topologies", "demand_downscales", "num_demands", "iterations", "cutoff", "scales", "k", "target", "paths", "weibull_scale"], [algorithmns, topologies, demand_downscales, num_demands, iterations, cutoff, scales, k, target, paths, weibull_scale]]) 117 | writedlm("$(dir)/confidence", confidence) 118 | 119 | if plot 120 | PyPlot.clf() 121 | for i in 1:size(availability_vals, 1) 122 | PyPlot.plot(availability_vals[i], scales) 123 | end 124 | PyPlot.xlabel("Availability", fontweight="bold") 125 | PyPlot.ylabel("Demand Scale", fontweight="bold") 126 | PyPlot.xlim(left=xliml, right=xlimr) 127 | PyPlot.legend(algorithmns, loc="upper right") 128 | PyPlot.savefig("$(dir)/plot.png") 129 | PyPlot.show() 130 | end 131 | end 132 | 133 | -------------------------------------------------------------------------------- /ext/teavar/cutoff_error.jl: -------------------------------------------------------------------------------- 1 | using DelimitedFiles, ProgressMeter, PyPlot 2 | 3 | include("./util.jl") 4 | include("./parsers.jl") 5 | include("./Algorithms/TEAVAR.jl") 6 | include("./simulation.jl") 7 | 8 | 9 | function topologyCutoffError(topology, 10 | num_demands, 11 | iterations, 12 | cutoffs; paths="SMORE", 13 | weibull_scale=.0001, 14 | demand_downscale=5000, 15 | plot=true, 16 | dirname="./data/raw/cutoff_error/") 17 | env = Gurobi.Env() 18 | y_vals = [] 19 | scenarios = [] 20 | probs = [] 21 | links, capacity, link_probs, nodes = readTopology("$(topology)") 22 | demand, flows = readDemand("$(topology)/demand", length(nodes), 1, scale=1, downscale=demand_downscale) 23 | T, Tf, k = parsePaths("$(topology)/paths/$(paths)", links, flows) 24 | 25 | dir = nextRun(dirname) 26 | for j in 1:length(cutoffs) 27 | writedlm("$(dir)/cutoff_$(cutoffs[j])_error", [["cvar_o", "cvar", "error_cvar"]]) 28 | end 29 | 30 | progress = ProgressMeter.Progress(num_demands*iterations*length(cutoffs), .1, "Computing cutoff error...", 50) 31 | cutoff_errors = zeros(length(cutoffs)) 32 | for d in 1:num_demands 33 | demand, flows = readDemand("$(topology)/demand", length(nodes), d, scale=1, downscale=demand_downscale) 34 | for i in 1:iterations 35 | weibull_probs = weibullProbs(length(link_probs), shape=.8, scale=weibull_scale) 36 | optimal_scenarios, optimal_probs = subScenarios(weibull_probs, (sum(weibull_probs)/length(weibull_probs))^2, first=true, last=true) 37 | beta = optimal_probs[1] 38 | var_o, cvar_o, a_o, max_u_o = TEAVAR(env, links, capacity, flows, demand, beta, k, T, Tf, optimal_scenarios, optimal_probs) 39 | for j in 1:length(cutoffs) 40 | scenarios, scenario_probs = subScenarios(weibull_probs, cutoffs[j], first=true, last=true) 41 | var, cvar, a, max_u = TEAVAR(env, links, capacity, flows, demand, beta, k, T, Tf, scenarios, scenario_probs) 42 | cvar_o = sum((1 .- max_u_o) .* optimal_probs) 43 | cvar = sum((1 .- max_u) .* scenario_probs) 44 | err = abs((cvar_o - cvar)/cvar_o) 45 | cutoff_errors[j] += err 46 | open("$(dir)/cutoff_$(cutoffs[j])_error", "a") do io 47 | writedlm(io, transpose([cvar_o, cvar, err])) 48 | end 49 | ProgressMeter.next!(progress, showvalues = [(:topology,topology), (:demand,"$(d)/$(num_demands)"), (:iterations,"$(i)/$(iterations)"), (:cutoff, cutoffs[j]), (:error, err)]) 50 | end 51 | end 52 | end 53 | y_vals = cutoff_errors ./ (num_demands * iterations) 54 | writedlm("$(dir)/cutoffs", cutoffs) 55 | writedlm("$(dir)/y_vals", y_vals) 56 | writedlm("$(dir)/params", [["topology", "num_demands", "iterations", "cutoffs", "paths", "weibull_scale", "demand_downscale"], [topology, num_demands, iterations, cutoffs, paths, weibull_scale, demand_downscale]]) 57 | 58 | if plot 59 | PyPlot.clf() 60 | nbars = length(cutoffs) 61 | barWidth = 1/(nbars) 62 | for i in 1:length(y_vals) 63 | PyPlot.bar(barWidth * i, y_vals[i], alpha=0.8, width=barWidth) 64 | end 65 | PyPlot.ylabel("Percent Error", fontweight="bold") 66 | PyPlot.legend(cutoffs, loc="upper right") 67 | PyPlot.savefig("$(dir)/plot.png") 68 | PyPlot.show() 69 | end 70 | end 71 | -------------------------------------------------------------------------------- /ext/teavar/dependencies.txt: -------------------------------------------------------------------------------- 1 | ArgParse 2 | Cairo 3 | Combinatorics 4 | Compose 5 | DelimitedFiles 6 | Fontconfig 7 | GraphPlot 8 | Gurobi 9 | HTTP 10 | JSON2 11 | JuMP (v0.18.5) 12 | LightGraphs 13 | ProgressMeter 14 | PyPlot 15 | Sockets 16 | MathOptFormat 17 | -------------------------------------------------------------------------------- /ext/teavar/draw.jl: -------------------------------------------------------------------------------- 1 | using LightGraphs, Compose, GraphPlot, Cairo, Fontconfig 2 | 3 | include("./parsers.jl") 4 | #################################################################################### 5 | #################### Draw graph with edges and allocations ####################### 6 | #################################################################################### 7 | 8 | function drawGraph(a, L, Tf, edges, num_nodes) 9 | edgelabels = [] 10 | graph = LightGraphs.DiGraph(num_nodes) 11 | for e in 1:size(edges,1) 12 | LightGraphs.add_edge!(graph, edges[e][1], edges[e][2]) 13 | s = 0 14 | for f in 1:size(a,1) 15 | for t in 1:size(a,2) 16 | s += a[f,t] * L[Tf[f][t],e] 17 | end 18 | end 19 | push!(edgelabels, s) 20 | end 21 | nodelabel = collect(1:num_nodes) 22 | Compose.draw(PNG("./graph.png", 1000, 1000), gplot(graph, edgelabelc="white", EDGELABELSIZE = 15.0, NODELABELSIZE=20.0, nodelabel=nodelabel, edgelabeldisty=0.5, edgelabel=edgelabels, EDGELINEWIDTH=3.0, arrowlengthfrac=.04)) 23 | end 24 | 25 | 26 | function drawGraph(topology; outdir="./") 27 | edges, capacity, probabilities, nodes = readTopology(topology) 28 | graph = LightGraphs.DiGraph(length(nodes)) 29 | for e in 1:length(edges) 30 | LightGraphs.add_edge!(graph, edges[e][1], edges[e][2]) 31 | end 32 | nodelabel = collect(1:length(nodes)) 33 | Compose.draw(PNG("$(outdir)/graph.png", 1000, 1000), gplot(graph, edgelabelc="white", EDGELABELSIZE = 15.0, NODELABELSIZE=20.0, nodelabel=nodelabel, edgelabeldisty=0.5, EDGELINEWIDTH=3.0, arrowlengthfrac=.04)) 34 | end 35 | 36 | -------------------------------------------------------------------------------- /ext/teavar/find_beta.jl: -------------------------------------------------------------------------------- 1 | include("./Algorithms/TEAVAR.jl") 2 | include("./simulation.jl") 3 | include("./util.jl") 4 | include("./parsers.jl") 5 | 6 | function findBeta(alpha, sigfigs, links, capacity, flows, demand, cutoff, T, Tf, k, scenarios, scenario_probs; cvar=false, allocations=nothing) 7 | 8 | step_size = 10.0^(-1 * sigfigs) 9 | low = 0 10 | high = 1 - step_size 11 | index = cvar ? 2 : 1 12 | 13 | while low <= high 14 | middle = round((low + high)/2, digits=sigfigs) 15 | val = allocations != nothing ? 16 | TEAVAR(links, capacity, flows, demand, middle, k, T, Tf, allocations=a)[index] : 17 | TEAVAR(links, capacity, flows, demand, middle, k, T, Tf, scenarios, scenario_probs)[index] 18 | if val <= alpha 19 | low = middle + step_size 20 | if (low == 1) break end 21 | val = allocations != nothing ? 22 | TEAVAR(links, capacity, flows, demand, low, k, T, Tf, allocations=a)[index] : 23 | TEAVAR(links, capacity, flows, demand, low, k, T, Tf, scenarios, scenario_probs)[index] 24 | if val > alpha 25 | return middle 26 | end 27 | else 28 | high = middle - step_size 29 | val = allocations != nothing ? 30 | TEAVAR(links, capacity, flows, demand, high, k, T, Tf, allocations=a)[index] : 31 | TEAVAR(links, capacity, flows, demand, high, k, T, Tf, scenarios, scenario_probs)[index] 32 | if val <= alpha 33 | return high 34 | end 35 | end 36 | end 37 | return high < .5 ? 0 : 1 38 | end 39 | -------------------------------------------------------------------------------- /ext/teavar/path_selection.jl: -------------------------------------------------------------------------------- 1 | using DelimitedFiles, ProgressMeter, PyPlot 2 | 3 | include("./util.jl") 4 | include("./parsers.jl") 5 | include("./Algorithms/TEAVAR.jl") 6 | include("./simulation.jl") 7 | 8 | function pathSelection(topologies, 9 | demand_downscales, 10 | paths, 11 | num_demands, 12 | bars, 13 | cutoff; 14 | scale=1.0, 15 | ksp=[], 16 | weibull=true, 17 | plot=true, 18 | dirname="./data/raw/path_selection") 19 | 20 | env = Gurobi.Env() 21 | x_vals = bars 22 | y_vals = [] 23 | labels = [] 24 | scenarios = [] 25 | probs = [] 26 | 27 | # COMPUTE SCENARIOS 28 | for i in 1:length(topologies) 29 | links, capacity, link_probs, nodes = readTopology(topologies[i]) 30 | if weibull == true 31 | weibull_probs = weibullProbs(length(link_probs), shape=.8, scale=.0001) 32 | temp_s, temp_p = subScenarios(weibull_probs, (sum(weibull_probs)/length(weibull_probs))^2, first=true, last=false) 33 | push!(scenarios, temp_s) 34 | push!(probs, temp_p) 35 | else 36 | scenario, prob = subScenarios(link_probs, cutoff, first=false, last=false) 37 | push!(scenarios, scenario) 38 | push!(probs, prob) 39 | end 40 | end 41 | 42 | 43 | for p in 1:length(paths) 44 | all_var_vals = [[] for i=1:length(x_vals)] 45 | algorithmn_vals = [] 46 | progress = ProgressMeter.Progress(length(topologies)*num_demands*length(x_vals), .1, "Computing TEAVAR_$(paths[p])...", 50) 47 | for i in 1:length(topologies) 48 | links, capacity, link_probs, nodes = readTopology(topologies[i]) 49 | for d in 1:num_demands 50 | demand, flows = readDemand("$(topologies[i])/demand", length(nodes), d, scale=scale, downscale=demand_downscales[i]) 51 | T, Tf, k = parsePaths("$(topologies[i])/paths/$(paths[p])", links, flows) 52 | vals = map(b -> (ProgressMeter.next!(progress, showvalues = [(:topology,topologies[i]), (:demand,"$(d)/$(num_demands)"), (:paths,paths[p])]); 53 | TEAVAR(env, links, capacity, flows, demand, b, k, T, Tf, scenarios[i], probs[i])), x_vals) 54 | for j in 1:length(vals) 55 | push!(all_var_vals[j], vals[j][2]) 56 | end 57 | end 58 | end 59 | for j in 1:length(all_var_vals) 60 | push!(algorithmn_vals, sum(all_var_vals[j])/length(all_var_vals[j])) 61 | end 62 | push!(y_vals, algorithmn_vals) 63 | push!(labels, "Teavar_$(paths[p])") 64 | end 65 | 66 | for p in 1:length(ksp) 67 | all_var_vals = [[] for i=1:length(x_vals)] 68 | algorithmn_vals = [] 69 | progress = ProgressMeter.Progress(length(topologies)*num_demands*length(x_vals), .1, "Computing TEAVAR_ksp$(ksp[p])...", 50) 70 | for i in 1:length(topologies) 71 | links, capacity, link_probs, nodes = readTopology(topologies[i]) 72 | for d in 1:num_demands 73 | demand, flows = readDemand("$(topologies[i])/demand", length(nodes), d, scale=scale, downscale=demand_downscales[i]) 74 | T, Tf, g = getTunnels(nodes, links, capacity, flows, ksp[p]) 75 | vals = map(b -> (ProgressMeter.next!(progress, showvalues = [(:topology,topologies[i]), (:demand,"$(d)/$(num_demands)"), (:paths,"KSP-$p")]); 76 | TEAVAR(env, links, capacity, flows, demand, b, ksp[p], T, Tf, scenarios[i],probs[i])), x_vals) 77 | for j in 1:length(vals) 78 | push!(all_var_vals[j], vals[j][2]) 79 | end 80 | end 81 | end 82 | for j in 1:length(all_var_vals) 83 | push!(algorithmn_vals, sum(all_var_vals[j])/length(all_var_vals[j])) 84 | end 85 | push!(y_vals, algorithmn_vals) 86 | push!(labels, "Teavar_ksp$(ksp[p])") 87 | end 88 | 89 | 90 | # LOG OUTPUTS 91 | dir = nextRun(dirname) 92 | z = zeros(length(x_vals), length(y_vals) + 1) 93 | z[:,1] = x_vals 94 | for i in 1:length(y_vals) 95 | z[:,i+1] = y_vals[i] 96 | end 97 | writedlm("$dir/vals", z) 98 | 99 | # PLOT 100 | if plot 101 | PyPlot.clf() 102 | for i in 1:size(y_vals, 1) 103 | PyPlot.plot(x_vals, y_vals[i]) 104 | end 105 | PyPlot.xlabel("Availability", fontweight="bold") 106 | PyPlot.ylabel("Beta Tail Loss", fontweight="bold") 107 | PyPlot.legend(labels, loc="upper right") 108 | PyPlot.savefig("$(dir)/plot.png") 109 | PyPlot.show() 110 | end 111 | end 112 | 113 | # pathSelection(["./data/B4", "./data/IBM"], 114 | # [4000, 4000], 115 | # ["SMORE", "FFC"], 116 | # 6, 117 | # [.9, .92, .94, .96, .98, .99], 118 | # .0001, 119 | # ksp=[4,6], 120 | # weibull=true) 121 | -------------------------------------------------------------------------------- /ext/teavar/probability_noise.jl: -------------------------------------------------------------------------------- 1 | using DelimitedFiles, ProgressMeter, PyPlot 2 | 3 | include("./util.jl") 4 | include("./parsers.jl") 5 | include("./Algorithms/TEAVAR.jl") 6 | include("./simulation.jl") 7 | 8 | 9 | function probabilityNoise(topologies, 10 | demand_downscales, 11 | num_demands, 12 | iterations, 13 | cutoff, 14 | noise; paths="SMORE", 15 | weibull_scale=.0001, 16 | add_noise="SCENARIOS", 17 | plot=true, 18 | dirname="./data/raw/probability_noise") 19 | env = Gurobi.Env() 20 | 21 | error_vals = [[] for i in 1:length(noise)] 22 | 23 | dir = nextRun(dirname) 24 | for j in 1:length(noise) 25 | writedlm("$(dir)/noise_$(noise[j])_error", [["val_optimal", "val", "error_val"]]) 26 | end 27 | 28 | progress = ProgressMeter.Progress(length(topologies)*num_demands*iterations*length(noise), .1, "Computing Probability Noise Error...", 50) 29 | noise_error = zeros(length(noise)) 30 | for t in 1:length(topologies) 31 | links, capacity, link_probs, nodes = readTopology(topologies[t]) 32 | for d in 1:num_demands 33 | demand, flows = readDemand("$(topologies[t])/demand", length(nodes), d, downscale=demand_downscales[t]) 34 | # T, Tf, k = parsePaths("$(topologies[t])/paths/$(paths)", links, flows) 35 | k = 10 36 | T, Tf, g = getTunnels(nodes, links, capacity, flows, k) 37 | 38 | for i in 1:iterations 39 | weibull_probs = weibullProbs(length(link_probs), shape=.8, scale=weibull_scale) 40 | scenarios, scenario_probs = subScenarios(weibull_probs, cutoff, first=true, last=false) 41 | # scenarios_w_noise, scenario_probs_w_noise = subScenarios(weibull_probs_noise, cutoff, first=true, last=false) 42 | var, cvar, a, max_u = TEAVAR(env, links, capacity, flows, demand, 0, k, T, Tf, scenarios, scenario_probs) 43 | losses = calculateLossReallocation(links, capacity, demand, flows, T, Tf, k, a, scenarios, scenario_probs) 44 | val_optimal = sum(losses .* scenario_probs) 45 | # val_optimal = 1-cvar 46 | # val_optimal = sum((1 .- max_u) .* scenario_probs) 47 | for j in 1:length(noise) 48 | scenario_probs_noise = [] 49 | if add_noise == "EVENTS" 50 | weibull_probs_noise = map(p -> p + (p * noise[j] * rand(Uniform(-1, 1))), weibull_probs) 51 | scenario_probs_noise = getProbabilities(scenarios, weibull_probs_noise) 52 | scenario_probs_noise = scenario_probs_noise ./ sum(scenario_probs_noise) 53 | else 54 | scenario_probs_noise = map(p -> p + (p * noise[j] * rand(Uniform(-1, 1))), scenario_probs) 55 | scenario_probs_noise = scenario_probs_noise ./ sum(scenario_probs_noise) 56 | end 57 | println(scenario_probs_noise) 58 | # var, cvar, a, max_u = TEAVAR(env, links, capacity, flows, demand, 0, k, T, Tf, scenarios, scenario_probs_noise) 59 | losses = calculateLossReallocation(links, capacity, demand, flows, T, Tf, k, a, scenarios, scenario_probs_noise) 60 | val = sum(losses .* scenario_probs_noise) 61 | # val = 1-cvar 62 | # val = sum((1 .- max_u) .* scenario_probs_noise) 63 | err = abs((val_optimal - val)/val_optimal) 64 | noise_error[j] += err 65 | open("$(dir)/noise_$(noise[j])_error", "a") do io 66 | writedlm(io, transpose([val_optimal, val, err])) 67 | end 68 | ProgressMeter.next!(progress, showvalues = [(:topology,topologies[t]), (:demand,"$(d)/$(num_demands)"), (:iterations,"$(i)/$(iterations)"), (:noise, noise[j]), (:error, err)]) 69 | end 70 | end 71 | end 72 | end 73 | y_vals = noise_error ./ (num_demands * iterations * length(topologies)) 74 | 75 | # LOG RESULTS 76 | writedlm("$(dir)/noises", noise) 77 | writedlm("$(dir)/y_vals", y_vals) 78 | writedlm("$(dir)/params", [["topologies", "demand_downscales", "num_demands", "iterations", "cutoff", "noise", "paths", "weibull_scale"], [topologies, demand_downscales, num_demands, iterations, cutoff, noise, paths, weibull_scale]]) 79 | 80 | # PLOT 81 | if plot 82 | PyPlot.clf() 83 | nbars = length(noise) 84 | barWidth = 1/(nbars) 85 | for i in 1:length(y_vals) 86 | PyPlot.bar(barWidth .* i, y_vals[i], alpha=0.8, width=barWidth) 87 | end 88 | PyPlot.ylabel("Percent Error", fontweight="bold") 89 | PyPlot.legend(noise, loc="upper right") 90 | PyPlot.savefig("$(dir)/plot.png") 91 | PyPlot.show() 92 | end 93 | end 94 | 95 | -------------------------------------------------------------------------------- /ext/teavar/run_teavar.jl: -------------------------------------------------------------------------------- 1 | include("./util.jl") 2 | include("./parsers.jl") 3 | include("./Algorithms/TEAVAR.jl") 4 | 5 | env = Gurobi.Env() 6 | 7 | topology = "B4" 8 | weibull = true 9 | shape = 0.8 10 | scale = 0.01 11 | paths = "SMORE" 12 | demand_num = 1 13 | beta=0.90 14 | 15 | 16 | links, capacity, link_probs, nodes = readTopology(topology) 17 | demand, flows = readDemand("$(topology)/demand", length(nodes), demand_num, matrix=true) 18 | T, Tf, k = parsePaths("$(topology)/paths/$(paths)", links, flows) 19 | 20 | if weibull 21 | probabilities = weibullProbs(length(links), shape=shape, scale=scale) 22 | else 23 | probabilities = link_probs 24 | end 25 | cutoff = (sum(probabilities)/length(probabilities))^2 26 | scenarios, scenario_probs = subScenariosRecursion(probabilities, cutoff) 27 | TEAVAR(env, links, capacity, flows, demand, beta, k, 28 | T, Tf, scenarios, scenario_probs, explain=true, verbose=true, 29 | utilization=true) 30 | 31 | 32 | -------------------------------------------------------------------------------- /ext/teavar/run_teavar_star.jl: -------------------------------------------------------------------------------- 1 | include("./util.jl") 2 | include("./parsers.jl") 3 | include("./Algorithms/TEAVAR_Star.jl") 4 | using Dates; 5 | 6 | env = Gurobi.Env() 7 | setparam!(env, "Method", 2) # choose only barrier; since crossover is needed for concurrent and that takes too long 8 | setparam!(env, "Crossover", 0) # disable barrier crossover 9 | 10 | print("method= ", getparam(env, "Method"), "; crossover= ", getparam(env, "Crossover")) 11 | 12 | topology = ARGS[1] 13 | # failure probabilities must always come from topology 14 | weibull = false 15 | shape = 0.8 16 | scale = 0.01 17 | 18 | # we will change demand numerals and beta 19 | demand_num = parse(Int, ARGS[2]) # used to be 1 20 | beta=parse(Float64, ARGS[3]) # used to be 0.99 21 | max_cf=ARGS[4] == "mcf" 22 | paths = ARGS[5] # used to be "SMORE" should be "SMORE4" or "SMORE8" 23 | what_to_read = ARGS[6] 24 | cutoff_downscale = parse(Float64, ARGS[7]) # used to 10 25 | 26 | # output file for result analysis and debugging 27 | outputfile=string(topology, "_d", demand_num, "_beta", beta, "_mcf", max_cf, "_paths", paths, "_topo", what_to_read, "_cutoffDS", cutoff_downscale) 28 | 29 | 30 | links, capacity, link_probs, nodes = readTopology(topology, what_to_read=what_to_read) 31 | demand, flows = readDemand("$(topology)/demand", length(nodes), demand_num, matrix=true) 32 | T, Tf, k = parsePaths("$(topology)/paths/$(paths)", links, flows) 33 | 34 | if weibull 35 | probabilities = weibullProbs(length(links), shape=shape, scale=scale) 36 | else 37 | probabilities = link_probs 38 | end 39 | println("FailureProbs= ", probabilities) 40 | 41 | beginning_cutoff = parse(Float64, ARGS[8]) 42 | 43 | let cutoff = beginning_cutoff 44 | while true 45 | println(Dates.format(now(), "HH:MM:SS"), " going to subScenariosRecursion with cutoff=", cutoff) 46 | flush(stdout) 47 | 48 | task_cutoff_finder = @task global scenarios, scenario_probs = subScenariosRecursion(probabilities, cutoff) 49 | t = Timer(60) 50 | # run for 60s or until task cutoff finder finishes 51 | schedule(task_cutoff_finder) 52 | while (!@isdefined(scenarios) || length(scenarios) == 0 || isopen(t)) && !istaskdone(task_cutoff_finder) 53 | # println(Dates.format(now(), "HH::MM::SS"), ": testing") 54 | # flush(stdout) 55 | yield() 56 | end 57 | 58 | nscenarios = length(scenarios) 59 | total_scenario_prob = sum(scenario_probs) 60 | 61 | println(Dates.format(now(), "HH:MM:SS"), " cutoff =", cutoff, " #scenarios=", nscenarios, " total_scenar_prob=", total_scenario_prob) 62 | flush(stdout) 63 | 64 | if total_scenario_prob >= 1 - (1-beta)/ cutoff_downscale 65 | break 66 | end 67 | 68 | if !isopen(t) 69 | println(Dates.format(now(), "HH:MM:SS"), ": timed out!") 70 | flush(stdout) 71 | break 72 | end 73 | 74 | cutoff = cutoff / cutoff_downscale 75 | end 76 | end 77 | 78 | TEAVAR_Star(env, links, capacity, flows, demand, beta, k, 79 | T, Tf, scenarios, scenario_probs, outputfile, explain=true, verbose=true, 80 | utilization=true, max_concurrent_flow=max_cf) 81 | 82 | 83 | -------------------------------------------------------------------------------- /ext/teavar/scenario_coverage.jl: -------------------------------------------------------------------------------- 1 | using DelimitedFiles, ProgressMeter, PyPlot 2 | 3 | include("./util.jl") 4 | include("./parsers.jl") 5 | 6 | function scenarioCoverage(topology, 7 | iterations, 8 | cutoffs; shape=.8, 9 | scale=.0001, 10 | weibull=true, 11 | plot=true, 12 | dirname="./data/raw/scenario_coverage") 13 | y_vals = [] 14 | weibull_probs = [] 15 | labels = [] 16 | optimal_vals = [] 17 | 18 | # COMUTE SCENARIOS 19 | links, capacity, link_probs, nodes = readTopology(topology) 20 | for j in 1:iterations 21 | if weibull 22 | probs = weibullProbs(length(link_probs), shape=shape, scale=scale) 23 | else 24 | probs = link_probs 25 | end 26 | push!(weibull_probs, probs) 27 | scenarios, probabilities = subScenariosRecursion(probs, sum(probs)/length(probs)^2) 28 | push!(optimal_vals, sum(probabilities)) 29 | end 30 | 31 | # COMPUTE COVERAGE 32 | progress = ProgressMeter.Progress(length(cutoffs)*iterations, .1, "Computing Scenario Coverage...", 50) 33 | for i in 1:length(cutoffs) 34 | vals = [] 35 | for j in 1:iterations 36 | scenarios, probabilities = subScenariosRecursion(weibull_probs[j], cutoffs[i]) 37 | push!(vals, sum(probabilities)) 38 | ProgressMeter.next!(progress, showvalues = [(:cutoff, cutoffs[i]), (:iteration, "$(j)/$(iterations)"), (:coverage, sum(probabilities))]) 39 | end 40 | push!(y_vals, sum(vals)/length(vals)) 41 | push!(labels, cutoffs[i]) 42 | end 43 | push!(labels, "optimal") 44 | push!(y_vals, sum(optimal_vals)/length(optimal_vals)) 45 | 46 | 47 | # LOG RESULTS 48 | dir = nextRun(dirname) 49 | writedlm("$(dir)/weibull_probs", weibull_probs) 50 | writedlm("$(dir)/coverage", y_vals) 51 | writedlm("$(dir)/cutoffs", labels) 52 | writedlm("$(dir)/params", [["iterations"], [iterations]]) 53 | 54 | # PLOT 55 | if plot 56 | PyPlot.clf() 57 | nbars = length(labels) 58 | barWidth = 1/(nbars) 59 | for i in 1:length(y_vals) 60 | PyPlot.bar((barWidth .* i), y_vals[i], alpha=0.8, width=barWidth) 61 | end 62 | PyPlot.ylabel("Total Coverage", fontweight="bold") 63 | PyPlot.legend(labels, loc="lower right") 64 | PyPlot.savefig("$(dir)/plot.png") 65 | PyPlot.show() 66 | end 67 | end 68 | 69 | # scenarioCoverage("./Data/IBM", 100, [.001,.0001,.00001,.000001,.0000001]; shape=.8, scale=.0001, weibull=true) 70 | 71 | 72 | -------------------------------------------------------------------------------- /ext/teavar/server.jl: -------------------------------------------------------------------------------- 1 | import Pkg 2 | # Pkg.add("Conda") 3 | # Pkg.add("PyPlot") 4 | # Pkg.add("ProgressMeter") 5 | # Pkg.add("Gurobi") 6 | # Pkg.add("DataFrames") 7 | # Pkg.add("JuMP") 8 | # Pkg.add("Combinatorics") 9 | # Pkg.add("LightGraphs") 10 | # Pkg.add("DelimitedFiles") 11 | # Pkg.add("MathProgBase") 12 | # Pkg.add("GraphPlot") 13 | # Pkg.add("Compose") 14 | # Pkg.add("Cairo") 15 | # Pkg.add("Fontconfig") 16 | # Pkg.add("Distributions") 17 | # Pkg.add("HTTP") 18 | # Pkg.add("Sockets") 19 | # Pkg.add("JSON2") 20 | # Pkg.add("JSON") 21 | # using Conda; Conda.add("pyqt") 22 | 23 | using HTTP, Sockets, JSON2 24 | 25 | include("./util.jl") 26 | include("./parsers.jl") 27 | include("./Algorithms/TEAVAR.jl") 28 | 29 | mutable struct Request 30 | topology::String 31 | demand::String 32 | path::String 33 | beta::String 34 | cutoff::String 35 | k::String 36 | downscale_demand::String 37 | end 38 | 39 | mutable struct Response 40 | var::Float64 41 | cvar::Float64 42 | allocation::Array 43 | num_nodes::Int64 44 | capacity::Array 45 | failure_probabilities::Array 46 | flows::Array 47 | demand::Array 48 | T::Array 49 | Tf::Array 50 | links::Array 51 | scenarios::Array 52 | probabilities::Array 53 | X::Array 54 | end 55 | 56 | ROUTER = HTTP.Router() 57 | 58 | function JSONHandler(req::HTTP.Request) 59 | if req.method == "OPTIONS" 60 | res = HTTP.Response(200) 61 | res.headers = [Pair("Access-Control-Allow-Origin", "*"), 62 | Pair("Vary", "Origin"), 63 | Pair("Vary", "Access-Control-Request-Method"), 64 | Pair("Vary", "Access-Control-Request-Headers"), 65 | Pair("Access-Control-Allow-Headers", "Content-Type, Origin, Accept, token"), 66 | Pair("Access-Control-Allow-Methods", "GET, POST,OPTIONS")] 67 | return res 68 | response_body = HTTP.handle(ROUTER, req) 69 | else 70 | response_body = HTTP.handle(ROUTER, req) 71 | end 72 | res = HTTP.Response(200, JSON2.write(response_body)) 73 | res.headers = [Pair("Access-Control-Allow-Origin", "*")] 74 | return res 75 | end 76 | 77 | 78 | function teavar(req::HTTP.Request) 79 | json = JSON2.read(IOBuffer(HTTP.payload(req)), Request) 80 | println(json) 81 | 82 | topology = json.topology 83 | demand_num = parse(Int64, json.demand) 84 | beta = parse(Float64, json.beta) 85 | cutoff = parse(Float64, json.cutoff) 86 | k = parse(Int64, json.k) 87 | downscale_demand = parse(Int64, json.downscale_demand) 88 | links, capacity, link_probs, nodes = readTopology(topology, downscale=1) 89 | 90 | 91 | if topology == "B4" 92 | weibull_probs = [0.00100046, 0.000419485, 0.000612366, 0.00237276, 0.00612306, 0.000365313, 0.00247675, 0.00166383, 0.000588749, 0.00174909, 0.000456167, 0.000834483, 0.00142059, 0.00243267, 0.00488089, 0.000947656, 4.65804e-5, 0.00280921, 0.00112321, 0.000168123, 0.000485535, 0.000119134, 0.000138946, 9.39895e-5, 0.000366816, 0.000420305, 0.000272837, 0.00109307, 0.00200512, 0.000152399, 0.000880158, 0.00019616, 0.00175013, 0.00140933, 0.00150634, 0.000185742, 0.000741036, 0.00261969] 93 | else 94 | weibull_probs = weibullProbs(length(links), shape=.8, scale=.001) 95 | end 96 | scenarios, scenario_probs = subScenarios(weibull_probs, cutoff, first=true, last=false) 97 | # w_scenarios, w_probs = subScenarios(weibull_probs, cutoff, first=true, last=false) 98 | demand, flows = readDemand("$(topology)/demand", length(nodes), demand_num, scale=1.0, downscale=downscale_demand) 99 | 100 | T, Tf = [], [] 101 | try 102 | T, Tf, k = parsePaths("$(topology)/paths/$(json.path)", links, flows) 103 | catch 104 | if json.path == "ED" 105 | T, Tf, k, g = getTunnels(nodes, links, capacity, flows, 30, edge_disjoint=true) 106 | else 107 | T, Tf, k, g = getTunnels(nodes, links, capacity, flows, k) 108 | end 109 | end 110 | # a = parseYatesSplittingRatios("$(topology)/paths/$(algorithm)", k, flows, zeroindex=zeroindex) 111 | # a = parseYatesAllocations("$(topology)/paths/$(algorithm)", k, demand, flows, zeroindex=zeroindex) 112 | 113 | env = Gurobi.Env() 114 | var, cvar, a = TEAVAR(env, links, capacity, flows, demand, beta, k, T, Tf, scenarios, scenario_probs) 115 | println(var) 116 | println(cvar) 117 | println(a) 118 | 119 | nscenarios = length(scenarios) 120 | ntunnels = length(T) 121 | X = ones(nscenarios,ntunnels) 122 | for s in 1:nscenarios 123 | for t in 1:ntunnels 124 | if size(T[t],1) == 0 125 | X[s,t] = 0 126 | else 127 | for e in 1:length(links) 128 | if scenarios[s][e] == 0 129 | back_edge = findfirst(x -> x == (links[e][2],links[e][1]), links) 130 | if in(e, T[t]) || in(back_edge, T[t]) 131 | X[s,t] = 0 132 | end 133 | end 134 | end 135 | end 136 | end 137 | end 138 | http_res = Response(var, cvar, [a[i, :] for i in 1:size(a, 1)], length(nodes), capacity, weibull_probs, flows, demand, T, Tf, links, scenarios, scenario_probs, [X[i, :] for i in 1:size(X, 1)]) 139 | return http_res 140 | end 141 | 142 | 143 | function main() 144 | println("Listening on localhost:8080....") 145 | HTTP.@register(ROUTER, "POST", "/api/teavar", teavar) 146 | # HTTP.serve(JSONHandler, Sockets.localhost, 8080) 147 | HTTP.serve(JSONHandler, "128.30.92.156", 8080) 148 | end 149 | 150 | main() 151 | -------------------------------------------------------------------------------- /ext/teavar/simulation.jl: -------------------------------------------------------------------------------- 1 | 2 | function calculateLossReallocation(edges, capacity, demand, flows, T, Tf, k, splittingratios, scenarios, probabilities; progress=false) 3 | nedges = length(edges) 4 | nflows = length(flows) 5 | ntunnels = length(T) 6 | nscenarios = length(scenarios) 7 | 8 | #CREATE TUNNEL SCENARIO MATRIX 9 | X = ones(nscenarios,ntunnels) 10 | for s in 1:nscenarios 11 | for t in 1:ntunnels 12 | if size(T[t],1) == 0 13 | X[s,t] = 0 14 | else 15 | for e in 1:nedges 16 | if scenarios[s][e] == 0 17 | back_edge = findfirst(x -> x == (edges[e][2],edges[e][1]), edges) 18 | if in(e, T[t]) || in(back_edge, T[t]) 19 | X[s,t] = 0 20 | end 21 | end 22 | end 23 | end 24 | end 25 | end 26 | 27 | 28 | #CREATE TUNNEL EDGE MATRIX 29 | L = zeros(ntunnels, nedges) 30 | for t in 1:ntunnels 31 | for e in 1:nedges 32 | if in(e, T[t]) 33 | L[t,e] = 1 34 | end 35 | end 36 | end 37 | 38 | as = zeros(nflows,k) 39 | routed = zeros(nscenarios, nflows, k) 40 | u = zeros(nscenarios, nflows) 41 | t = zeros(nscenarios) 42 | 43 | # SCENARIO LOSS PER FLOW 44 | for s in 1:nscenarios 45 | for f in 1:nflows 46 | totalup = 0 47 | for t in 1:size(Tf[f],1) 48 | totalup += splittingratios[f,t] * X[s,Tf[f][t]] 49 | end 50 | if totalup == 0 51 | splittingratios[f,:] = splittingratios[f,:] .+ .2 52 | for t in 1:size(Tf[f],1) 53 | totalup += splittingratios[f,t] * X[s,Tf[f][t]] 54 | end 55 | end 56 | 57 | for t in 1:size(Tf[f],1) 58 | # as[f,t] = max(as[f,t], splittingratios[f,t] / totalup * demand[f] * X[s,Tf[f][t]]) 59 | if totalup != 0 60 | routed[s,f,t] = splittingratios[f,t] / totalup * demand[f] * X[s,Tf[f][t]] 61 | end 62 | end 63 | end 64 | 65 | 66 | t[s] = sum(routed[s,:,:]) 67 | 68 | congestion_loss = 0 69 | for e in 1:nedges 70 | edge_utilization = 0 71 | for f in 1:nflows 72 | edge_utilization += sum(routed[s,f,t] * L[Tf[f][t],e] * X[s,Tf[f][t]] for t in 1:size(Tf[f],1)) 73 | for t in enumerate(Tf[f]) 74 | # edge_utilization += sum(as[f,t[1]] * L[t[2],e] * X[s,t[2]]) 75 | end 76 | end 77 | # println("Edge: ", edges[e]) 78 | # println(max(0, edge_utilization - capacity[e])) 79 | congestion_loss += max(0, round((edge_utilization - capacity[e])*1000)/1000) 80 | end 81 | t[s] -= congestion_loss 82 | end 83 | 84 | umax = map(x -> round((1 - x/sum(demand))*100000)/100000, t) 85 | return umax 86 | end 87 | 88 | 89 | function PDF(losses, probabilities, sla) 90 | usorted = [] 91 | psorted = [] 92 | umodified = losses 93 | pmodified = probabilities 94 | while length(umodified) > 0 95 | s = argmin(umodified) 96 | push!(usorted, umodified[s]) 97 | push!(psorted, pmodified[s]) 98 | umodified = umodified[1:end .!= s] 99 | pmodified = pmodified[1:end .!= s] 100 | end 101 | 102 | total = 0 103 | loss = 0 104 | for s in 1:length(usorted) 105 | loss = usorted[s] 106 | if loss > sla 107 | break 108 | end 109 | total += psorted[s] 110 | 111 | end 112 | return total 113 | end 114 | 115 | function PDF(losses, sla) 116 | c = 0 117 | for s in 1:length(losses) 118 | if (1-losses[s]) >= sla 119 | c += 1 120 | end 121 | end 122 | return c / length(losses) 123 | end 124 | 125 | function VarUniform(losses, beta) 126 | usorted = sort(losses) 127 | probabilities = zeros(length(losses)) .+ 1/length(losses) 128 | 129 | total = 0 130 | loss = 0 131 | varindex = 0 132 | for s in 1:size(usorted, 1) 133 | total += probabilities[s] 134 | loss = usorted[s] 135 | if total >= beta 136 | break 137 | end 138 | end 139 | return loss 140 | end 141 | 142 | function VAR(losses, probabilities, beta) 143 | usorted = [] 144 | psorted = [] 145 | umodified = losses 146 | pmodified = probabilities 147 | while length(umodified) > 0 148 | s = argmin(umodified) 149 | push!(usorted, umodified[s]) 150 | push!(psorted, pmodified[s]) 151 | umodified = umodified[1:end .!= s] 152 | pmodified = pmodified[1:end .!= s] 153 | end 154 | 155 | total = 0 156 | loss = 0 157 | varindex = 0 158 | for s in 1:size(usorted, 1) 159 | total += psorted[s] 160 | loss = usorted[s] 161 | if total >= beta 162 | break 163 | end 164 | end 165 | return loss 166 | end 167 | 168 | function CVAR(losses, probabilities, beta) 169 | usorted = [] 170 | psorted = [] 171 | umodified = losses 172 | pmodified = probabilities 173 | while length(umodified) > 0 174 | s = argmin(umodified) 175 | push!(usorted, umodified[s]) 176 | push!(psorted, pmodified[s]) 177 | umodified = umodified[1:end .!= s] 178 | pmodified = pmodified[1:end .!= s] 179 | end 180 | 181 | total = 0 182 | loss = 0 183 | prob_total = 0 184 | for s in 1:length(usorted) 185 | total += psorted[s] 186 | if total >= beta 187 | prob_total += psorted[s] 188 | loss += usorted[s]*psorted[s] 189 | end 190 | end 191 | return loss / prob_total 192 | end 193 | 194 | 195 | function simulateUtilizationNoFailures(edges, capacity, demand, flows, T, Tf, k, a, bandwidth_allowed) 196 | nedges = length(edges) 197 | nflows = length(flows) 198 | ntunnels = length(T) 199 | 200 | #CREATE TUNNEL EDGE MATRIX 201 | L = zeros(ntunnels, nedges) 202 | for t in 1:ntunnels 203 | for e in 1:nedges 204 | if in(e, T[t]) 205 | L[t,e] = 1 206 | end 207 | end 208 | end 209 | 210 | 211 | routed = zeros(nflows, k) 212 | for f in 1:nflows 213 | totalup = 0 214 | for t in 1:size(Tf[f],1) 215 | totalup += a[f,t] 216 | end 217 | if totalup == 0 218 | a[f,:] = a[f,:] .+ .2 219 | for t in 1:size(Tf[f],1) 220 | totalup += a[f,t] 221 | end 222 | end 223 | 224 | for t in 1:size(Tf[f],1) 225 | routed[f,t] = a[f,t] / totalup * bandwidth_allowed[f] * demand[f] 226 | end 227 | end 228 | 229 | edge_utilization = zeros(nedges) 230 | edge_utilization_percentage = zeros(nedges) 231 | 232 | for e in 1:nedges 233 | for f in 1:nflows 234 | edge_utilization[e] += sum(routed[f,t] * L[Tf[f][t],e] for t in 1:size(Tf[f],1)) 235 | end 236 | edge_utilization_percentage = edge_utilization[e]/capacity[e] 237 | end 238 | 239 | return edge_utilization, routed 240 | end -------------------------------------------------------------------------------- /ext/teavar/throughput.jl: -------------------------------------------------------------------------------- 1 | using DelimitedFiles,ProgressMeter, PyPlot 2 | 3 | include("./util.jl") 4 | include("./parsers.jl") 5 | include("./Algorithms/TEAVAR.jl") 6 | include("./Algorithms/MaxMin.jl") 7 | include("./Algorithms/SMORE.jl") 8 | include("./Algorithms/FFC.jl") 9 | include("./simulation.jl") 10 | 11 | function getThroughputGraphs(algorithms, 12 | topologies, 13 | demand_downscales, 14 | num_demands, 15 | iterations, 16 | bars, 17 | cutoff; 18 | k=12, 19 | teavar_paths="KSP", 20 | weibull_scale=.0001, 21 | plot=true, 22 | dirname="./data/raw/throughput_data/") 23 | env = Gurobi.Env() 24 | x_vals = bars 25 | y_vals_vars = [] 26 | y_vals_cvars = [] 27 | labels = [] 28 | 29 | 30 | ## COMPUTE SCENARIOS 31 | scenarios_all = [] 32 | scenario_probs_all = [] 33 | for t in 1:length(topologies) 34 | topology = topologies[t] 35 | links, capacity, link_probs, nodes = readTopology(topology, downscale=demand_downscales[t]) 36 | scenarios_all_top = [] 37 | scenario_probs_top = [] 38 | for i in 1:iterations 39 | link_probs = weibullProbs(length(links), shape=.8, scale=weibull_scale) 40 | scenarios, probs = subScenarios(link_probs, cutoff, first=true, last=false) 41 | push!(scenarios_all_top, scenarios) 42 | push!(scenario_probs_top, probs) 43 | end 44 | push!(scenarios_all, scenarios_all_top) 45 | push!(scenario_probs_all, scenario_probs_top) 46 | end 47 | 48 | progress = ProgressMeter.Progress(length(algorithms)*length(topologies)*num_demands*iterations*length(x_vals), .1, "Computing Throughput...", 50) 49 | for alg in 1:length(algorithms) 50 | beta_cvar_totals = zeros(length(x_vals)) 51 | beta_var_totals = zeros(length(x_vals)) 52 | for t in 1:length(topologies) 53 | links, capacity, link_probs, nodes = readTopology(topologies[t]) 54 | for d in 1:num_demands 55 | demand, flows = readDemand("$(topologies[t])/demand", length(nodes), d, downscale=demand_downscales[t]) 56 | for i in 1:iterations 57 | if algorithms[alg] == "TEAVAR" 58 | if teavar_paths != "KSP" 59 | T, Tf, k = parsePaths("$(topologies[t])/paths/$(teavar_paths)", links, flows) 60 | else 61 | T, Tf, g = getTunnels(nodes, links, capacity, flows, k) 62 | end 63 | for b in 1:length(x_vals) 64 | beta = x_vals[b] 65 | var, cvar, a = TEAVAR(env, links, capacity, flows, demand, beta, k, T, Tf, scenarios_all[t][i], scenario_probs_all[t][i], average=true) 66 | losses = calculateLossReallocation(links, capacity, demand, flows, T, Tf, k, a, scenarios_all[t][i], scenario_probs_all[t][i]) 67 | cvar = CVAR(losses, scenario_probs_all[t][i], beta) 68 | var = VAR(losses, scenario_probs_all[t][i], beta) 69 | beta_cvar_totals[b] += cvar 70 | beta_var_totals[b] += var 71 | ProgressMeter.next!(progress, showvalues = [(:algorithmn,algorithms[alg]), (:topology,topologies[t]), (:demand,"$(d)/$(num_demands)"), (:iteration, "$(i)/$(iterations)"), (:beta, x_vals[b]), (:cvar, cvar), (:var, var)]) 72 | end 73 | else 74 | T, Tf, k = parsePaths("$(topologies[t])/paths/$(algorithms[alg])", links, flows) 75 | a = parseYatesSplittingRatios("$(topologies[t])/paths/$(algorithms[alg])", k, flows) 76 | losses = calculateLossReallocation(links, capacity, demand, flows, T, Tf, k, a, scenarios_all[t][i], scenario_probs_all[t][i]) 77 | for b in 1:length(x_vals) 78 | beta = x_vals[b] 79 | cvar = CVAR(losses, scenario_probs_all[t][i], beta) 80 | var = VAR(losses, scenario_probs_all[t][i], beta) 81 | beta_cvar_totals[b] += cvar 82 | beta_var_totals[b] += var 83 | ProgressMeter.next!(progress, showvalues = [(:algorithmn,algorithms[alg]), (:topology,topologies[t]), (:demand,"$(d)/$(num_demands)"), (:iteration, "$(i)/$(iterations)"), (:beta, x_vals[b]), (:cvar, cvar), (:var, var)]) 84 | end 85 | end 86 | end 87 | end 88 | end 89 | beta_avg_vars = beta_var_totals ./ (num_demands * length(topologies) * iterations * length(x_vals)) 90 | beta_avg_cvars = beta_cvar_totals ./ (num_demands * length(topologies) * iterations * length(x_vals)) 91 | push!(y_vals_vars, 1 .- beta_avg_vars) 92 | push!(y_vals_cvars, 1 .- beta_avg_cvars) 93 | push!(labels, algorithms[alg]) 94 | end 95 | 96 | 97 | # LOG OUTPUTS 98 | dir = nextRun(dirname) 99 | writedlm("$(dir)/x_vals", x_vals) 100 | writedlm("$(dir)/y_vals_cvars", y_vals_cvars) 101 | writedlm("$(dir)/y_vals_vars", y_vals_vars) 102 | writedlm("$(dir)/params", [["algorithms", "topologies", "demand_downscales", "num_demands", "iterations", "bars", "cutoff", "k", "tevar_paths", "weibull_scale"], [algorithms, topologies, demand_downscales, num_demands, iterations, bars, cutoff, k, teavar_paths, weibull_scale]]) 103 | 104 | 105 | if plot 106 | PyPlot.clf() 107 | nbars = length(labels) 108 | ngroups = length(x_vals) 109 | barWidth = 1/(nbars + 1) 110 | for bar in 1:nbars 111 | group_ys = map(tup -> y_vals_vars[bar][tup[1]], enumerate(x_vals)) 112 | PyPlot.bar(collect(1:ngroups) .- 1 .+ (barWidth .* bar), group_ys, alpha=0.8, width=barWidth) 113 | end 114 | PyPlot.xlabel("Probability", fontweight="bold") 115 | PyPlot.ylabel("P(T > X)", fontweight="bold") 116 | PyPlot.legend(labels, loc="lower right") 117 | PyPlot.xticks(collect(1:ngroups) .- 1 .+ (barWidth * (nbars+1)/2), bars) 118 | PyPlot.savefig("$(dir)/plot.png") 119 | PyPlot.show() 120 | end 121 | end 122 | 123 | # getBarGraphsMultiple(["SMORE", "FFC", "ECMP"], 124 | # ["./data/B4", "./data/IBM"], 125 | # [10000, 5000], 126 | # 10, 127 | # 10, 128 | # [.9, .95, .99, .999, .9999], 129 | # .0005, 130 | # tevar_paths="SMORE", 131 | # weibull_scale=.01) 132 | -------------------------------------------------------------------------------- /ext/teavar/timer.jl: -------------------------------------------------------------------------------- 1 | using DelimitedFiles, ProgressMeter, PyPlot, Gurobi 2 | 3 | include("./util.jl") 4 | include("./parsers.jl") 5 | include("./Algorithms/TEAVAR.jl") 6 | 7 | function timeScenarios(cutoffs, iterations, weibull=true, shape=.8, scale=.0001) 8 | x_vals = [18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62, 66, 70, 74, 78, 82, 86, 90, 94, 100, 104, 108, 112] 9 | y_vals = [[] for i=1:length(cutoffs) + 1] 10 | p = Progress(length(x_vals)*iterations*(length(cutoffs)+1), .1, "Scenario Times...", 50) 11 | for k in 1:length(x_vals) 12 | vals = zeros(length(cutoffs) + 1) 13 | for i in 1:iterations 14 | if weibull 15 | probabilities = weibullProbs(x_vals[k], shape=.8, scale=.0001) 16 | else 17 | probabilities = map(n -> rand(n) .* 2 ./ 10, iterations) 18 | end 19 | cutoff = (sum(probabilities)/length(probabilities))^3 20 | next = @elapsed subScenariosRecursion(probabilities, cutoff) 21 | next!(p, showvalues = [(:x_vals,"$(k)/$(length(x_vals))"), (:iterations,"$(i)/$(iterations)"), (:cutoffs,cutoff), (:last, next)]) 22 | vals[1] += next 23 | for j in 1:length(cutoffs) 24 | next = @elapsed subScenariosRecursion(probabilities, cutoffs[j]) 25 | next!(p, showvalues = [(:x_vals,"$(k)/$(length(x_vals))"), (:iterations,"$(i)/$(iterations)"), (:cutoff,cutoffs[j]), (:last, next)]) 26 | vals[j+1] += next 27 | end 28 | end 29 | vals ./ iterations 30 | for v in 1:length(vals) 31 | push!(y_vals[v], vals[v]) 32 | end 33 | end 34 | 35 | z = zeros(length(x_vals), length(y_vals) + 1) 36 | z[:,1] = x_vals 37 | for i in 1:length(y_vals) 38 | z[:,i+1] = y_vals[i] 39 | end 40 | writedlm("./data/raw/time_scenarios/x_vals", x_vals) 41 | writedlm("./data/raw/time_scenarios/y_vals", y_vals) 42 | writedlm("./data/raw/time_scenarios/z", z) 43 | end 44 | 45 | function timeOptimizer(topologies, 46 | cutoffs, 47 | iterations; 48 | weibull=true, 49 | shape=.8, 50 | scale=.0001, 51 | paths="SMORE", 52 | plot=true, 53 | beta=.9, 54 | demand_num=1, 55 | dirname="./data/raw/time_optimizer") 56 | 57 | env = Gurobi.Env() 58 | 59 | y_vals = [[] for i=1:length(cutoffs) + 1] 60 | x_vals = [] 61 | 62 | p = Progress(length(topologies)*iterations*(length(cutoffs)+1), .1, "Scenario Times...", 50) 63 | for t in 1:length(topologies) 64 | vals = zeros(length(cutoffs) + 1) 65 | links, capacity, link_probs, nodes = readTopology(topologies[t]) 66 | demand, flows = readDemand("$(topologies[t])/demand", length(nodes), demand_num, matrix=true) 67 | T, Tf, k = parsePaths("$(topologies[t])/paths/$(paths)", links, flows) 68 | push!(x_vals, length(links)) 69 | 70 | for i in 1:iterations 71 | if weibull 72 | probabilities = weibullProbs(length(links), shape=.8, scale=.0001) 73 | else 74 | probabilities = map(n -> rand(n) .* 2 ./ 10, iterations) 75 | end 76 | 77 | ## OPTIMAL 78 | cutoff = (sum(probabilities)/length(probabilities))^2 79 | scenarios, scenario_probs = subScenariosRecursion(probabilities, cutoff) 80 | next = @elapsed TEAVAR(env, links, capacity, flows, demand, beta, k, T, Tf, scenarios, scenario_probs) 81 | next!(p, showvalues = [(:topologies,"$(t)/$(length(topologies))"), (:iterations,"$(i)/$(iterations)"), (:cutoffs,cutoff), (:last, next)]) 82 | vals[1] += next 83 | 84 | ## OTHERS 85 | for j in 1:length(cutoffs) 86 | scenarios, scenario_probs = subScenariosRecursion(probabilities, cutoffs[j]) 87 | next = @elapsed TEAVAR(env, links, capacity, flows, demand, beta, k, T, Tf, scenarios, scenario_probs) 88 | next!(p, showvalues = [(:topologies,"$(t)/$(length(topologies))"), (:iterations,"$(i)/$(iterations)"), (:cutoff,cutoffs[j]), (:last, next)]) 89 | vals[j+1] += next 90 | end 91 | end 92 | vals ./ iterations 93 | for v in 1:length(vals) 94 | push!(y_vals[v], vals[v]) 95 | end 96 | end 97 | 98 | # LOG RESULTS 99 | dir = nextRun(dirname) 100 | z = zeros(length(x_vals), length(y_vals) + 1) 101 | z[:,1] = x_vals 102 | for i in 1:length(y_vals) 103 | z[:,i+1] = y_vals[i] 104 | end 105 | writedlm("$(dir)/x_vals", x_vals) 106 | writedlm("$(dir)/y_vals", y_vals) 107 | writedlm("$(dir)/z", z) 108 | 109 | # PLOT 110 | if plot 111 | PyPlot.clf() 112 | for i in 1:length(y_vals) 113 | PyPlot.plot(x_vals, y_vals[i]) 114 | end 115 | PyPlot.xlabel("Number of Edges", fontweight="bold") 116 | PyPlot.ylabel("Time (s)", fontweight="bold") 117 | PyPlot.legend(pushfirst!(map(elt -> string(elt), cutoffs), "near optimal"), loc="lower right") 118 | PyPlot.show() 119 | end 120 | end 121 | 122 | # timeOptimizer(["./data/B4", "./data/IBM"], [.001, .0001, .00001, .000001, .0000001], 2) 123 | # timeScenarios([.001, .0001, .00001, .000001, .0000001], 200) 124 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- 1 | from .graph_utils import * 2 | from .problems import * 3 | from .vis import * 4 | from .config import * 5 | from .algorithms import * 6 | from .partitioning import * 7 | -------------------------------------------------------------------------------- /lib/algorithms/__init__.py: -------------------------------------------------------------------------------- 1 | from .abstract_formulation import Objective 2 | from .path_formulation import PathFormulation 3 | from .edge_formulation import EdgeFormulation 4 | from .min_max_flow_on_edge import MinMaxFlowOnEdgeOverCap 5 | from .smore import SMORE 6 | from .ncflow import * 7 | -------------------------------------------------------------------------------- /lib/algorithms/abstract_formulation.py: -------------------------------------------------------------------------------- 1 | from enum import Enum, unique 2 | import pickle 3 | import re 4 | import sys 5 | 6 | 7 | @unique 8 | class Objective(Enum): 9 | MAX_FLOW = 0 10 | MIN_MAX_UTIL = 1 11 | MAX_CONCURRENT_FLOW = 2 12 | MIN_MAX_LINK_UTIL = 3 13 | COMPUTE_DEMAND_SCALE_FACTOR = 4 14 | 15 | 16 | class AbstractFormulation(object): 17 | def __init__(self, objective, DEBUG=False, VERBOSE=False, out=None): 18 | if out is None: 19 | out = sys.stdout 20 | self._objective = objective 21 | self._warm_start_mode = False 22 | self.DEBUG = DEBUG 23 | self.VERBOSE = VERBOSE 24 | self.out = out 25 | 26 | def solve(self, problem, fixed_total_flows=[], **args): 27 | self._problem = problem 28 | self._solver = self._construct_lp(fixed_total_flows) 29 | return self._solver.solve_lp(**args) 30 | 31 | def solve_warm_start(self, problem): 32 | assert self._warm_start_mode 33 | for k, (_, _, d_k) in problem.sparse_commodity_list: 34 | constr = self._demand_constrs[k] 35 | constr.rhs = d_k 36 | self._solver.solve_lp() 37 | 38 | @property 39 | def problem(self): 40 | return self._problem 41 | 42 | @property 43 | def model(self): 44 | return self._solver.model 45 | 46 | def extract_sol_as_dict(self): 47 | raise NotImplementedError( 48 | 'extract_sol_as_dict needs to be implemented in the subclass: {}'. 49 | format(self.__class__)) 50 | 51 | def extract_sol_as_mat(self): 52 | raise NotImplementedError( 53 | 'extract_sol_as_mat needs to be implemented in the subclass: {}'. 54 | format(self.__class__)) 55 | ########################## 56 | # Private helper methods # 57 | ########################## 58 | def _print(self, *args): 59 | print(*args, file=self.out) 60 | 61 | def _extract_inds_from_var_name(self, varName, var_group_name='f'): 62 | match = re.match(r'{}\[(\d+),(\d+)\]'.format(var_group_name), varName) 63 | return int(match.group(1)), int(match.group(2)) 64 | 65 | def _create_sol_dict(self, sol_dict_def, commodity_list): 66 | # Set zero-flow commodities to be empty lists 67 | sol_dict = {} 68 | sol_dict_no_def = dict(sol_dict_def) 69 | 70 | for commod_key in commodity_list: 71 | if commod_key in sol_dict_no_def: 72 | sol_dict[commod_key] = sol_dict_no_def[commod_key] 73 | else: 74 | sol_dict[commod_key] = [] 75 | 76 | return sol_dict 77 | 78 | def _construct_lp(self, fixed_total_flows=[]): 79 | raise NotImplementedError( 80 | '_construct_lp needs to be implemented in the subclass: {}'.format( 81 | self.__class__)) 82 | 83 | def _save_pkl(self, obj, fname): 84 | if fname.endswith('.pkl'): 85 | with open(fname, 'wb') as w: 86 | pickle.dump(obj, w) 87 | 88 | def _save_txt(self, obj, fname): 89 | if fname.endswith('.txt'): 90 | with open(fname, 'w') as w: 91 | print(obj, file=w) 92 | 93 | @property 94 | def runtime(self): 95 | raise NotImplementedError( 96 | 'runtime needs to be implemented in the subclass: {}'.format( 97 | self.__class__)) 98 | 99 | @property 100 | def obj_val(self): 101 | raise NotImplementedError( 102 | 'obj_val needs to be implemented in the subclass: {}'.format( 103 | self.__class__)) 104 | -------------------------------------------------------------------------------- /lib/algorithms/min_max_flow_on_edge.py: -------------------------------------------------------------------------------- 1 | from .abstract_formulation import Objective 2 | from .edge_formulation import EdgeFormulation 3 | from ..lp_solver import LpSolver 4 | from gurobipy import GRB, Model, quicksum 5 | from collections import defaultdict 6 | 7 | 8 | class MinMaxFlowOnEdgeOverCap(EdgeFormulation): 9 | 10 | def __init__(self, *, out, DEBUG=False, VERBOSE=False, GAMMA=1e-3): 11 | super().__init__(objective=Objective.MIN_MAX_UTIL, 12 | DEBUG=DEBUG, VERBOSE=VERBOSE, out=out) 13 | self.GAMMA = GAMMA 14 | 15 | def _construct_lp(self, fixed_total_flows=[]): 16 | 17 | m = Model("min max flow on edge over cap") 18 | 19 | # Create variables 20 | M = len(self.problem.G.edges) # number of edges 21 | K = len(self.problem.commodity_list) # number of commodity flows 22 | 23 | vars = m.addVars(M, K, vtype=GRB.CONTINUOUS, lb=0.0, name='f') 24 | max_per_edge_flow = m.addVar(vtype=GRB.CONTINUOUS, lb=0.0, name='z') 25 | m.update() 26 | self.edges_list = list(self.problem.G.edges.data('capacity')) 27 | 28 | if self.DEBUG: 29 | from functools import partial 30 | 31 | def _debug_fn(e_l, c_l, var): 32 | e, k = self._extract_inds_from_var_name(var.varName) 33 | u, v, _ = e_l[e] 34 | k, (s_k, t_k, d_k) = c_l[k] 35 | return u, v, k, s_k, t_k, d_k 36 | 37 | self.debug_fn = partial( 38 | _debug_fn, self.edges_list, self.problem.commodity_list) 39 | else: 40 | self.debug_fn = None 41 | 42 | # Set objective 43 | # minimize the maximum flow on edge / capacity 44 | obj = max_per_edge_flow 45 | m.setObjective(obj, GRB.MINIMIZE) 46 | 47 | # Max Constraints 48 | for e, (_, _, c_e) in enumerate(self.problem.G.edges.data('capacity')): 49 | m.addConstr(vars.sum(e, '*') / c_e <= max_per_edge_flow) 50 | 51 | # Demand constraints at src/target, flow conservation constraints 52 | for k, (_, (src, target, d_k)) in enumerate(self.problem.commodity_list): 53 | flow_out = defaultdict(list) 54 | flow_in = defaultdict(list) 55 | for e, edge in enumerate(self.problem.G.edges()): 56 | flow_out[edge[0]].append(vars[e, k]) 57 | flow_in[edge[1]].append(vars[e, k]) 58 | 59 | m.addConstr(quicksum(flow_out[src]) == d_k) 60 | m.addConstr(quicksum(flow_out[src]) - 61 | quicksum(flow_in[target]) == 0) 62 | # Src should have nothing flowing in, target should have nothing flowing out 63 | m.addConstr(quicksum(flow_in[src]) + 64 | quicksum(flow_out[target]) == 0) 65 | 66 | for n in self.problem.G.nodes(): 67 | if n != src and n != target: 68 | m.addConstr( 69 | quicksum(flow_out[n]) - quicksum(flow_in[n]) == 0) 70 | 71 | edge_idx = {edge: e for e, edge in enumerate(self.problem.G.edges)} 72 | for edge, total_flow in fixed_total_flows: 73 | m.addConstr(vars.sum(edge_idx[edge], '*') == total_flow) 74 | 75 | return LpSolver(m, self.debug_fn, self.DEBUG, self.VERBOSE, self.out) 76 | -------------------------------------------------------------------------------- /lib/algorithms/ncflow/__init__.py: -------------------------------------------------------------------------------- 1 | from .ncflow_single_iter import NCFlowSingleIter as NcfSi 2 | from .ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 3 | -------------------------------------------------------------------------------- /lib/algorithms/ncflow/counter.py: -------------------------------------------------------------------------------- 1 | # For computing fib entries 2 | from itertools import count 3 | 4 | class Counter(object): 5 | def __init__(self): 6 | self.counter = count() 7 | self.paths_dict = {} 8 | 9 | def __getitem__(self, path): 10 | if not isinstance(path, tuple): 11 | path = tuple(path) 12 | if path not in self.paths_dict: 13 | self.paths_dict[path] = next(self.counter) 14 | 15 | return self.paths_dict[path] 16 | 17 | -------------------------------------------------------------------------------- /lib/algorithms/ncflow/ncflow_abstract.py: -------------------------------------------------------------------------------- 1 | from ..abstract_formulation import AbstractFormulation 2 | 3 | class NCFlowAbstract(AbstractFormulation): 4 | 5 | @property 6 | def runtime(self): 7 | return self.runtime_est(14) 8 | 9 | def runtime_est(self, num_threads, breakdown=False): 10 | 11 | from heapq import heappush, heappop 12 | 13 | def heapsched_rt(lrts, k): 14 | h = [] 15 | for rt in lrts[:k]: 16 | heappush(h, rt) 17 | 18 | curr_rt = 0 19 | for rt in lrts[k:]: 20 | curr_rt = heappop(h) 21 | heappush(h, rt + curr_rt) 22 | 23 | while len(h) > 0: 24 | curr_rt = heappop(h) 25 | 26 | return curr_rt 27 | 28 | def parallelized_rt(lrts, k): 29 | if len(lrts) == 0: 30 | return 0.0 31 | inorder_rt = heapsched_rt(lrts, k) 32 | cp_bound = max(lrts) 33 | area_bound = sum(lrts) / k 34 | lrts.sort(reverse=True) 35 | two_approx = heapsched_rt(lrts, k) 36 | 37 | if self.VERBOSE: 38 | self._print("-- in incoming order, schedule= ", inorder_rt) 39 | self._print("-- bounds cp= ", cp_bound, "; area= ", area_bound) 40 | self._print("-- sorted rts: ", lrts) 41 | self._print("-- in sorted order, schedule ", two_approx) 42 | 43 | return two_approx 44 | 45 | rts = self._runtime_dict 46 | r2_time = parallelized_rt(list(rts['r2'].values()), num_threads) 47 | reconciliation_time = parallelized_rt( 48 | list(rts['reconciliation'].values()), num_threads) 49 | 50 | if 'kirchoffs' in rts: 51 | kirchoffs_time = parallelized_rt( 52 | list(rts['kirchoffs'].values()), num_threads) 53 | else: 54 | kirchoffs_time = 0 55 | 56 | print('Runtime breakdown: R1 {} R2// {} Recon// {} R3 {} Kirchoffs// {} #threads {}'.format( 57 | rts['r1'], r2_time, reconciliation_time, rts['r3'], kirchoffs_time, num_threads)) 58 | if breakdown: 59 | return rts['r1'], r2_time, reconciliation_time, rts['r3'], kirchoffs_time 60 | 61 | total_time = rts['r1'] + r2_time + reconciliation_time + rts['r3'] + kirchoffs_time 62 | 63 | return total_time 64 | -------------------------------------------------------------------------------- /lib/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | TL_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..')) 4 | TOPOLOGIES_DIR = os.path.join(TL_DIR, 'topologies') 5 | TM_DIR = os.path.join(TL_DIR, 'traffic-matrices') 6 | TEAVAR_DATA_DIR = os.path.join(TL_DIR, 'ext', 'teavar', 'code', 'data') 7 | TEAVAR_RUNLOGS_DIR = os.path.join(TL_DIR, 'ext', 'teavar', 'code', 'runlogs') 8 | TEAVAR_BASELINE_RESULTS_DIR = os.path.join(TL_DIR, 'ext', 'teavar', 'code', 'teavar_star_plots', 'data') 9 | -------------------------------------------------------------------------------- /lib/lp_solver.py: -------------------------------------------------------------------------------- 1 | from gurobipy import GurobiError 2 | from enum import Enum, unique 3 | import sys 4 | 5 | 6 | @unique 7 | class Method(Enum): 8 | PRIMAL_SIMPLEX = 0 9 | DUAL_SIMPLEX = 1 10 | BARRIER = 2 11 | CONCURRENT = 3 12 | PRIMAL_AND_DUAL = 5 13 | 14 | 15 | class LpSolver(object): 16 | def __init__(self, 17 | model, 18 | debug_fn=None, 19 | DEBUG=False, 20 | VERBOSE=False, 21 | out=None, 22 | gurobi_out=''): 23 | if out is None: 24 | out = sys.stdout 25 | self._model = model 26 | self._debug_fn = debug_fn 27 | self.DEBUG = DEBUG 28 | self.VERBOSE = VERBOSE 29 | self.out = out 30 | self._gurobi_out = gurobi_out 31 | 32 | def _print(self, *args): 33 | print(*args, file=self.out) 34 | 35 | @property 36 | def gurobi_out(self): 37 | return self._gurobi_out 38 | 39 | @gurobi_out.setter 40 | def gurobi_out(self, gurobi_out): 41 | if gurobi_out == 'stdout' or gurobi_out == '': 42 | self._gurobi_out = 'gurobi.log' 43 | else: 44 | self._gurobi_out = gurobi_out 45 | 46 | # Note: this is not idempotent: the `model` parameter will be changed after invoking 47 | # this function 48 | def solve_lp(self, method=Method.CONCURRENT, bar_tol=None, err_tol=None, numeric_focus=False): 49 | model = self._model 50 | if numeric_focus: 51 | model.setParam('NumericFocus', 1) 52 | model.setParam('Method', method.value) 53 | model.setParam('LogFile', self.gurobi_out) 54 | try: 55 | if bar_tol: 56 | model.Params.BarConvTol = bar_tol 57 | if err_tol: 58 | model.Params.OptimalityTol = err_tol 59 | model.Params.FeasibilityTol = err_tol 60 | 61 | #if self.VERBOSE: 62 | self._print('\nSolving LP') 63 | model.optimize() 64 | 65 | if self.DEBUG or self.VERBOSE: 66 | for var in model.getVars(): 67 | if var.x != 0: 68 | if self.DEBUG and self._debug_fn: 69 | if not var.varName.startswith('f['): 70 | continue 71 | u, v, k, s_k, t_k, d_k = self._debug_fn(var) 72 | if self.VERBOSE: 73 | self._print( 74 | 'edge ({}, {}), demand ({}, ({}, {}, {})), flow: {}' 75 | .format(u, v, k, s_k, t_k, d_k, var.x)) 76 | elif self.VERBOSE: 77 | self._print('{} {}'.format(var.varName, var.x)) 78 | self._print('Obj: %g' % model.objVal) 79 | return model.objVal 80 | except GurobiError as e: 81 | self._print('Error code ' + str(e.errno) + ': ' + str(e)) 82 | except AttributeError as e: 83 | self._print(str(e)) 84 | self._print('Encountered an attribute error') 85 | 86 | @property 87 | def model(self): 88 | return self._model 89 | 90 | @property 91 | def obj_val(self): 92 | return self._model.objVal 93 | -------------------------------------------------------------------------------- /lib/partitioning/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .hard_coded_partitioning import * 3 | from .leader_election import * 4 | from .networkx_partitioning import * 5 | from .spectral_clustering import * 6 | from .fm_partitioning import * 7 | -------------------------------------------------------------------------------- /lib/partitioning/abstract_partitioning_method.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class AbstractPartitioningMethod(object): 5 | 6 | def __init__(self, *, num_partitions=None, weighted=True): 7 | if isinstance(num_partitions, int): 8 | self._num_partitions = num_partitions 9 | 10 | self._use_cache = True 11 | self._weighted = weighted 12 | 13 | self._best_partitions = { 14 | } 15 | 16 | @property 17 | def use_cache(self): 18 | return self._use_cache 19 | 20 | @use_cache.setter 21 | def use_cache(self, use_cache): 22 | self._use_cache = use_cache 23 | 24 | @property 25 | def G(self): 26 | return self._G 27 | 28 | @property 29 | def partition_vector(self): 30 | return self._partition_vector 31 | 32 | @property 33 | def size_of_largest_partition(self): 34 | counts = np.bincount(self._partition_vector) 35 | return counts[np.argmax(counts)] 36 | 37 | @property 38 | def largest_partition_index(self): 39 | counts = np.bincount(self._partition_vector) 40 | return np.argmax(counts) 41 | 42 | @property 43 | def num_partitions(self): 44 | if not hasattr(self, '_num_partitions'): 45 | return -1 46 | return self._num_partitions 47 | 48 | @property 49 | def weighted(self): 50 | return self._weighted 51 | 52 | # Private method # 53 | def _default_num_partitions(self, G): 54 | return int(np.sqrt(len(G.nodes))) 55 | 56 | def partition(self, problem, override_cache=False): 57 | if not override_cache and self._use_cache and problem.name in self._best_partitions: 58 | return self._best_partitions[problem.name] 59 | 60 | self._partition_vector = self._partition_impl(problem) 61 | self._best_partitions[problem.name] = self._partition_vector 62 | return self._best_partitions[problem.name] 63 | 64 | ################# 65 | # Public method # 66 | ################# 67 | @property 68 | def name(self): 69 | raise NotImplementedError( 70 | 'name needs to be implemented in the subclass: {}'.format( 71 | self.__class__)) 72 | 73 | def _partition_impl(self, problem): 74 | raise NotImplementedError( 75 | '_partition_impl needs to be implemented in the subclass: {}'.format( 76 | self.__class__)) 77 | -------------------------------------------------------------------------------- /lib/partitioning/fm_partitioning.py: -------------------------------------------------------------------------------- 1 | from .abstract_partitioning_method import AbstractPartitioningMethod 2 | from ..config import TL_DIR 3 | import hashlib 4 | import numpy as np 5 | import os 6 | import re 7 | import subprocess 8 | 9 | 10 | class FMPartitioning(AbstractPartitioningMethod): 11 | 12 | # assumes this file to be in $ROOT/py/partitioning; then, the rundir is at $ROOT/fm_rundir 13 | run_folder = os.path.join(TL_DIR, 'ext', 'modularity', 'rundir') 14 | if not os.path.exists(run_folder): 15 | os.makedirs(run_folder) 16 | exe_folder = os.path.join(TL_DIR, 17 | 'ext', 'modularity', 'FastCommunity_w_GPL_v1.0.1') 18 | fm_exe = os.path.join(exe_folder, 'FastCommunity_wMH') 19 | if not os.path.exists(fm_exe): 20 | curr_dir = os.getcwd() 21 | os.chdir(exe_folder) 22 | subprocess.call(['make']) 23 | os.chdir(curr_dir) 24 | 25 | opt_num_partitions = { 26 | } 27 | 28 | def __init__(self, num_partitions=None): 29 | super().__init__(num_partitions=num_partitions, weighted=False) 30 | 31 | @property 32 | def name(self): 33 | return 'fm_partitioning' 34 | 35 | def _partition_impl(self, problem, all_tm_files=[]): 36 | G = problem.G 37 | topo = problem.name 38 | 39 | # write weighted pairs to file 40 | wpfile = os.path.join(self.run_folder, topo + '.wpairs') 41 | with open(wpfile, "w") as outF: 42 | seen = set() 43 | for (u, v, c) in G.edges.data('capacity'): 44 | seen.add((u, v)) 45 | wt = c 46 | outF.write("%d\t%d\t%d\n" % (u, v, wt)) 47 | 48 | if not hasattr(self, '_num_partitions'): 49 | # Run without num partitions argument to determine optimal number 50 | # of partitions based on modularity 51 | cmd = self.fm_exe + ' -f ' + wpfile + \ 52 | ' | grep ^Q | sort -g -r -k3 | head -n 1 | sed -e "s/\[/ /g" | sed -e "s/\]/ /g" | awk \'{print $2}\'' 53 | print('cmd=[{}]'.format(cmd)) 54 | 55 | self._num_partitions = len(G.nodes) - int(os.popen(cmd).read()) 56 | print('opt #partitions= ', self._num_partitions) 57 | 58 | # Run with num partitions argument, save to temporary output file 59 | fm_param = len(G.nodes) - self._num_partitions 60 | cmd = self.fm_exe + ' -f ' + wpfile + ' -c ' + str(fm_param) 61 | print('cmd=[{}]'.format(cmd)) 62 | 63 | temp_fname = hashlib.md5(np.int64(np.random.randint(2**31 - 1))).hexdigest() 64 | with open(temp_fname, 'w') as w: 65 | subprocess.call(cmd, shell=True, stdout=w) 66 | 67 | # Extract time 68 | output = os.popen('grep "Total Time:" {}'.format(temp_fname)).read() 69 | match = re.match('Total Time: (\d+(\.\d+)?) seconds', output) 70 | self.runtime = float(match.group(1)) 71 | 72 | # Extract modularity score 73 | output = os.popen('grep "^Q\['+ str(fm_param) + '\]" ' + temp_fname + ' | sed -e "s/\[/ /g" | sed -e "s/\]/ /g" | awk \'{print $4}\'').read() 74 | self.modularity = float(output) 75 | print('Modularity:', self.modularity) 76 | 77 | # Remove temporary output file 78 | os.remove(temp_fname) 79 | 80 | # Read partition vector from output file generated by FM 81 | partition_vector = np.ones(len(G.nodes), dtype=np.int32) * -1 82 | currgroup = -1 83 | with open(os.path.join(self.run_folder,topo + '-fc_a.groups'), "r") as groups: 84 | for line in groups: 85 | if line.startswith("GROUP"): 86 | currgroup += 1 87 | else: 88 | partition_vector[int(line)] = currgroup 89 | 90 | assert np.sum(partition_vector == -1) == 0 91 | return partition_vector -------------------------------------------------------------------------------- /lib/partitioning/hard_coded_partitioning.py: -------------------------------------------------------------------------------- 1 | from .abstract_partitioning_method import AbstractPartitioningMethod 2 | import numpy as np 3 | 4 | 5 | class HardCodedPartitioning(AbstractPartitioningMethod): 6 | def __init__(self, partition_vector): 7 | if not isinstance(partition_vector, np.ndarray): 8 | _partition_vector = np.array(partition_vector) 9 | else: 10 | _partition_vector = partition_vector 11 | 12 | super().__init__(num_partitions=max(partition_vector) + 1, 13 | weighted=False) 14 | self._use_cache = False 15 | self._partition_vector = _partition_vector 16 | 17 | def _partition_impl(self, problem): 18 | assert len(self.partition_vector) == len(problem.G.nodes) 19 | return self.partition_vector 20 | -------------------------------------------------------------------------------- /lib/partitioning/leader_election.py: -------------------------------------------------------------------------------- 1 | from .abstract_partitioning_method import AbstractPartitioningMethod 2 | import numpy as np 3 | import networkx as nx 4 | import time 5 | 6 | 7 | # Randomly partitions the graph, but ensures that each subgraph is contiguous 8 | class LeaderElection(AbstractPartitioningMethod): 9 | def __init__(self, num_partitions=None, seed=0): 10 | super().__init__(num_partitions=num_partitions, weighted=False) 11 | self.seed = seed 12 | 13 | @property 14 | def name(self): 15 | return 'leader_election' 16 | 17 | def _partition_impl(self, problem): 18 | G = problem.G 19 | if not hasattr(self, '_num_partitions'): 20 | self._num_partitions = self._default_num_partitions(G) 21 | 22 | np.random.seed(self.seed) 23 | # First, select the "seed nodes" for our partitioning. Each seed node 24 | # represents a single partition. The remaining nodes will be assigned to 25 | # one of the seed nodes until every node is assigned 26 | start = time.time() 27 | seed_nodes = np.random.choice(G.nodes, 28 | self.num_partitions, 29 | replace=False) 30 | partition_vector = np.ones(len(G.nodes), dtype=np.int32) * -1 31 | partition_vector[seed_nodes] = np.arange(self.num_partitions) 32 | 33 | # while there are still unassigned nodes 34 | while np.sum(partition_vector == -1) != 0: 35 | # Select a node that has been unassigned 36 | new_node = np.random.choice( 37 | np.argwhere(partition_vector == -1).flatten()) 38 | 39 | # From this node, collect all of the partitions that it neighbors 40 | # in the graph. If all of its neighbors have been unassigned, pick 41 | # a new node 42 | neighboring_partitions = np.unique([ 43 | partition_vector[x] for x in nx.all_neighbors(G, new_node) 44 | if partition_vector[x] != -1 45 | ]) 46 | 47 | already_tried = [] 48 | while len(neighboring_partitions) == 0: 49 | already_tried.append(new_node) 50 | new_node = np.random.choice( 51 | np.setdiff1d(np.argwhere(partition_vector == -1).flatten(), already_tried)) 52 | 53 | neighboring_partitions = np.unique([ 54 | partition_vector[x] for x in nx.all_neighbors(G, new_node) 55 | if partition_vector[x] != -1 56 | ]) 57 | 58 | # Assign the selected node to one of the partitions it neighbors 59 | partition_assignment = np.random.choice(neighboring_partitions) 60 | partition_vector[new_node] = partition_assignment 61 | self.runtime = time.time() - start 62 | 63 | assert np.sum(partition_vector == -1) == 0 64 | return partition_vector 65 | -------------------------------------------------------------------------------- /lib/partitioning/networkx_partitioning.py: -------------------------------------------------------------------------------- 1 | 2 | from .abstract_partitioning_method import AbstractPartitioningMethod 3 | import numpy as np 4 | from networkx.algorithms import community 5 | 6 | 7 | # Partition based on community-finding algorithms implemented in NetworkX: 8 | # https://networkx.github.io/documentation/stable/reference/algorithms/community.html 9 | class NetworkXPartitioning(AbstractPartitioningMethod): 10 | def __init__(self, part_fn_str, num_partitions=None, seed=0): 11 | super().__init__(num_partitions=num_partitions, weighted=False) 12 | self.set_partition_fn(part_fn_str) 13 | self.seed=seed 14 | 15 | def asyn_lpa(self, prob): 16 | return community.asyn_lpa_communities(prob.G, weight='capacity', seed=self.seed) 17 | 18 | def set_partition_fn(self, part_fn_str): 19 | if part_fn_str == 'label_propagation': 20 | self._part_fn = self.asyn_lpa 21 | else: 22 | raise Exception( 23 | '{} not a valid NetworkX partition function'.format(part_fn_str)) 24 | 25 | def _partition_impl(self, problem): 26 | G = problem.G 27 | if not hasattr(self, '_num_partitions'): 28 | self._num_partitions = self._default_num_partitions(G) 29 | 30 | p_v = np.zeros(len(problem.G.nodes), dtype=np.int32) 31 | for part_id, part in enumerate(self._part_fn(problem)): 32 | p_v[list(part)] = part_id 33 | self._num_partitions = len(np.unique(p_v)) 34 | return p_v 35 | 36 | -------------------------------------------------------------------------------- /lib/partitioning/spectral_clustering.py: -------------------------------------------------------------------------------- 1 | from .abstract_partitioning_method import AbstractPartitioningMethod 2 | from sklearn.cluster import KMeans 3 | from .utils import all_partitions_contiguous 4 | import numpy as np 5 | import networkx as nx 6 | import time 7 | 8 | 9 | # Run NJW spectral clustering, use eigengap heuristic to select the number of partitions 10 | class SpectralClustering(AbstractPartitioningMethod): 11 | 12 | def __init__(self, num_partitions=None, weighted=True, seed=0): 13 | super().__init__(num_partitions=num_partitions, weighted=weighted) 14 | if weighted: 15 | self._adj_mat = lambda G: np.asarray( 16 | nx.adjacency_matrix(G, weight='capacity').todense(), dtype=np.float64) 17 | else: 18 | self._adj_mat = lambda G: np.asarray( 19 | nx.adjacency_matrix(G, weight='').todense(), dtype=np.float64) 20 | self.seed = seed 21 | 22 | @property 23 | def name(self): 24 | return 'spectral_clustering' 25 | 26 | def run_k_means_on_eigenvectors(self, eigvecs, num_nodes): 27 | start = time.time() 28 | V = eigvecs[:, :self._num_partitions] 29 | U = V / np.linalg.norm(V, axis=1).reshape(num_nodes, 1) 30 | 31 | k_means = KMeans(self._num_partitions, n_init=100, random_state=self.seed).fit(U) 32 | self.runtime = time.time() - start 33 | return k_means.labels_ 34 | 35 | # Normalized spectral clustering according to Ng, Jordan, and Weiss (2002) 36 | def _partition_impl(self, problem): 37 | 38 | def is_symmetric(a, rtol=1e-05, atol=1e-08): 39 | return np.allclose(a, a.T, rtol=rtol, atol=atol) 40 | 41 | def is_pos_semi_def(x): 42 | return np.all(np.linalg.eigvals(x) >= -1e-5) 43 | 44 | G = problem.G.copy() 45 | num_nodes = len(G.nodes) 46 | W = self._adj_mat(G.to_undirected()) 47 | 48 | # 1) Build Laplacian matrix L of the graph 49 | # = I − D−1/2W D−1/2, where D−1/2 is a diagonal matrix with (D−1/2)ii = (Dii)−1/2 50 | D = np.diag(np.sum(W, axis=1)) 51 | D_norm = np.power(D, -0.5) 52 | D_norm[D_norm == np.inf] = 0.0 53 | L = np.identity(W.shape[0]) - D_norm.dot(W).dot(D_norm) 54 | assert is_symmetric(L) 55 | assert is_pos_semi_def(L) 56 | 57 | # 2) Find eigenvalues and eigenvalues of L 58 | eigvals, eigvecs = np.linalg.eig(L) 59 | eigvals, eigvecs = eigvals.astype(np.float32), eigvecs.astype(np.float32) 60 | eigvecs = eigvecs[:, np.argsort(eigvals)] 61 | eigvals = eigvals[np.argsort(eigvals)] 62 | self.eigenvals = eigvals 63 | 64 | # 3) If number of partitions was not set, find largest eigengap between eigenvalues. If resulting 65 | # partition is not contiguous, try the 2nd-largest eigengap, and so on... 66 | if not hasattr(self, '_num_partitions'): 67 | max_num_parts = int(num_nodes / 4) 68 | print('Using eigengap heuristic to select number of partitions, max: {}'.format(max_num_parts)) 69 | self.eigengaps = np.array([eigvals[i+1] - eigvals[i] for i in range(len(eigvals[:max_num_parts]) - 1)]) 70 | 71 | k = 0 72 | indices = self.eigengaps.argsort()[::-1] 73 | 74 | while k < len(indices): 75 | self._num_partitions = indices[k] 76 | print('Trying {} partitions'.format(self._num_partitions)) 77 | p_v = self.run_k_means_on_eigenvectors(eigvecs, num_nodes) 78 | if all_partitions_contiguous(problem, p_v): 79 | break 80 | k += 1 81 | if k == len(indices): 82 | raise Exception('could not find valid partitioning') 83 | 84 | print('Eigengap heuristic selected {} partitions'.format(self._num_partitions)) 85 | return p_v 86 | 87 | else: 88 | return self.run_k_means_on_eigenvectors(eigvecs, num_nodes) 89 | 90 | 91 | -------------------------------------------------------------------------------- /lib/path_utils.py: -------------------------------------------------------------------------------- 1 | from .graph_utils import path_to_edge_list 2 | from itertools import islice 3 | import networkx as nx 4 | from sys import maxsize 5 | 6 | 7 | # Remove cycles from path 8 | def remove_cycles(path): 9 | stack = [] 10 | visited = set() 11 | for node in path: 12 | if node in visited: 13 | # remove elements from this cycle 14 | while stack[-1] != node: 15 | visited.remove(stack[-1]) 16 | stack = stack[:-1] 17 | else: 18 | stack.append(node) 19 | visited.add(node) 20 | return stack 21 | 22 | 23 | def graph_copy_with_edge_weights(_G, dist_metric): 24 | G = _G.copy() 25 | 26 | if dist_metric == 'inv-cap': 27 | for u, v, cap in G.edges.data('capacity'): 28 | if cap < 0.0: 29 | cap = 0.0 30 | try: 31 | G[u][v]['weight'] = 1.0 / cap 32 | except ZeroDivisionError: 33 | G[u][v]['weight'] = maxsize 34 | elif dist_metric == 'min-hop': 35 | for u, v, cap in G.edges.data('capacity'): 36 | if cap <= 0.0: 37 | G[u][v]['weight'] = maxsize 38 | else: 39 | G[u][v]['weight'] = 1.0 40 | else: 41 | raise Exception('invalid dist_metric: {}'.format(dist_metric)) 42 | 43 | return G 44 | 45 | 46 | def find_paths(G, s_k, t_k, num_paths, disjoint=True, include_weight=False): 47 | def compute_weight(G, path): 48 | return sum(G[u][v]['weight'] for u, v in path_to_edge_list(path)) 49 | 50 | def k_shortest_paths(G, source, target, k, weight='weight'): 51 | try: 52 | # Yen's shortest path algorithm 53 | return list( 54 | islice( 55 | nx.shortest_simple_paths( 56 | G, source, target, weight=weight), 57 | k)) 58 | except nx.NetworkXNoPath: 59 | return [] 60 | 61 | def k_shortest_edge_disjoint_paths(G, source, target, k, weight='weight'): 62 | def compute_distance(path): 63 | return sum(G[u][v][weight] for u, v in path_to_edge_list(path)) 64 | 65 | return [remove_cycles(path) for path in sorted(nx.edge_disjoint_paths(G, s_k, t_k), key=lambda path: compute_distance(path))[:k]] 66 | 67 | if disjoint: 68 | if include_weight: 69 | return [(path, compute_weight(path)) for path in k_shortest_edge_disjoint_paths(G, s_k, t_k, num_paths, weight='weight')] 70 | else: 71 | return k_shortest_edge_disjoint_paths(G, s_k, t_k, num_paths, weight='weight') 72 | else: 73 | if include_weight: 74 | return [(path, compute_weight(path)) for path in k_shortest_paths(G, s_k, t_k, num_paths, weight='weight')] 75 | else: 76 | return k_shortest_paths(G, s_k, t_k, num_paths, weight='weight') 77 | 78 | -------------------------------------------------------------------------------- /lib/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netcontract/ncflow/9879bb1c36637acea2692c81c628f6c81a375d7c/lib/tests/__init__.py -------------------------------------------------------------------------------- /lib/tests/abstract_test.py: -------------------------------------------------------------------------------- 1 | class bcolors: 2 | HEADER = '\033[95m' 3 | OKBLUE = '\033[94m' 4 | OKGREEN = '\033[92m' 5 | WARNING = '\033[93m' 6 | ERROR = '\033[91m' 7 | ENDC = '\033[0m' 8 | BOLD = '\033[1m' 9 | UNDERLINE = '\033[4m' 10 | 11 | class AbstractTest(object): 12 | 13 | def __init__(self): 14 | self.has_error = False 15 | 16 | @property 17 | def name(self): 18 | raise NotImplementedError( 19 | 'name needs to be implemented in the subclass: {}'.format( 20 | self.__class__)) 21 | 22 | def assert_feasibility(self, ncflow): 23 | try: 24 | ncflow.check_feasibility() 25 | except AssertionError: 26 | self.has_error = True 27 | print(bcolors.ERROR + '[ERROR] NCFlow did not find feasible flow' + bcolors.ENDC) 28 | 29 | def assert_eq_epsilon(self, actual_val, correct_val, epsilon=1e-5): 30 | try: 31 | assert abs(correct_val - actual_val) < epsilon 32 | except AssertionError: 33 | self.has_error = True 34 | print(bcolors.ERROR + '[ERROR] Correct value: {}, actual value: {}'.format( 35 | correct_val, actual_val) + bcolors.ENDC) 36 | 37 | def assert_geq_epsilon(self, actual_val, lower_val, epsilon=1e-5): 38 | try: 39 | assert actual_val >= lower_val - epsilon 40 | except AssertionError: 41 | self.has_error = True 42 | print(bcolors.ERROR + 43 | '[ERROR] Looking for >= {}, actual value: {}'.format(lower_val, actual_val) + 44 | bcolors.ENDC) 45 | 46 | def assert_leq_epsilon(self, actual_val, upper_val, epsilon=1e-5): 47 | try: 48 | assert actual_val <= upper_val + epsilon 49 | except AssertionError: 50 | self.has_error = True 51 | print(bcolors.ERROR + 52 | '[ERROR] Looking for <= {}, actual value: {}'.format(upper_val, actual_val) + 53 | bcolors.ENDC) 54 | 55 | def run(self): 56 | raise NotImplementedError( 57 | 'run needs to be implemented in the subclass: {}'.format( 58 | self.__class__)) 59 | -------------------------------------------------------------------------------- /lib/tests/feasibility_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import FeasibilityProblem1 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | class FeasibilityTest(AbstractTest): 7 | 8 | 9 | def __init__(self): 10 | super().__init__() 11 | self.problem = FeasibilityProblem1() 12 | 13 | @property 14 | def name(self): 15 | return 'feas' 16 | 17 | def run(self): 18 | ncf = NcfEpi.new_max_flow(4) 19 | hc = HardCodedPartitioning(partition_vector=[0, 0, 0, 1, 1, 2, 2, 2]) 20 | ncf.solve(self.problem, hc) 21 | 22 | self.assert_eq_epsilon(ncf.obj_val, 2.0) -------------------------------------------------------------------------------- /lib/tests/flow_path_construction_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import FlowPathConstruction 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | # This test case is useful for testing how we construct 7 | # flows per path per commod using NCFlow 8 | 9 | class FlowPathConstructionTest(AbstractTest): 10 | 11 | def __init__(self): 12 | super().__init__() 13 | self.problem = FlowPathConstruction() 14 | 15 | @property 16 | def name(self): 17 | return 'flow-path-construction' 18 | 19 | def run(self): 20 | ncf = NcfEpi.new_max_flow(4) 21 | hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 1, 2, 2]) 22 | ncf.solve(self.problem, hc) 23 | 24 | self.assert_feasibility(ncf) 25 | 26 | -------------------------------------------------------------------------------- /lib/tests/optgap4_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import OptGap4 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | # This test case illustrates the need for reconciliation to allow traffic to "criss-cross" 7 | # between nodes that are in neighboring meta-nodes. 8 | 9 | class OptGap4Test(AbstractTest): 10 | 11 | def __init__(self): 12 | super().__init__() 13 | self.problem = OptGap4() 14 | 15 | @property 16 | def name(self): 17 | return 'optgap4' 18 | 19 | def run(self): 20 | ncf = NcfEpi.new_max_flow(4) 21 | hc = HardCodedPartitioning(partition_vector=[0, 0, 0, 1, 1, 1]) 22 | ncf.solve(self.problem, hc) 23 | 24 | self.assert_feasibility(ncf) 25 | 26 | # this is a shame; the optimal solution here should be 8; we get 1.0 27 | self.assert_geq_epsilon(ncf.obj_val, 2.0) 28 | self.assert_leq_epsilon(ncf.obj_val, 2.0) -------------------------------------------------------------------------------- /lib/tests/optgapc1_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import OptGapC1 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | # This test illustrates the optimality gap from relaxing condition C1 7 | # That is, that all the demands be satisfiable in order for optimality to hold. 8 | # Here, a flow has multiple bottlenecks in different meta-nodes and 9 | # flows that share only one of those bottlenecks lose flow; leading to optimality gap. 10 | 11 | class OptGapC1Test(AbstractTest): 12 | 13 | def __init__(self): 14 | super().__init__() 15 | self.problem = OptGapC1() 16 | 17 | @property 18 | def name(self): 19 | return 'optgapc1' 20 | 21 | def run(self): 22 | ncf = NcfEpi.new_max_flow(4) 23 | hc = HardCodedPartitioning(partition_vector=[0, 0, 0, 1, 1, 1]) 24 | ncf.solve(self.problem, hc) 25 | 26 | self.assert_feasibility(ncf) 27 | 28 | # this is a shame; the optimal solution here should be 8; we get 1.0 29 | self.assert_geq_epsilon(ncf.obj_val, 5.0) 30 | self.assert_leq_epsilon(ncf.obj_val, 7.0) -------------------------------------------------------------------------------- /lib/tests/optgapc2_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import OptGapC2 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | # This test illustrates the optimality gap from relaxing condition C2 7 | # That is, that there is no undirected cycle among meta-nodes 8 | # Here, a flow has two paths but R1 picks one and only in R2 is the 9 | # bottleneck discovered leading to lost flow 10 | 11 | class OptGapC2Test(AbstractTest): 12 | 13 | def __init__(self): 14 | super().__init__() 15 | self.problem = OptGapC2() 16 | 17 | @property 18 | def name(self): 19 | return 'optgapc2' 20 | 21 | def run(self): 22 | ncf = NcfEpi.new_max_flow(4) 23 | hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 1, 2, 2, 3, 3]) 24 | ncf.solve(self.problem, hc) 25 | 26 | self.assert_feasibility(ncf) 27 | 28 | # this is a shame; the optimal solution here should be 8; we get 1.0 29 | self.assert_geq_epsilon(ncf.obj_val, 3.0) 30 | self.assert_leq_epsilon(ncf.obj_val, 8.0) -------------------------------------------------------------------------------- /lib/tests/optgapc3_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import OptGapC3 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | # This test illustrates the optimality gap from relaxing condition C3 7 | # That is, that all the demands be satisfiable in order for optimality to hold. 8 | # Here, a flow has multiple bottlenecks in different meta-nodes and 9 | # flows that share only one of those bottlenecks lose flow; leading to optimality gap. 10 | 11 | class OptGapC3Test(AbstractTest): 12 | 13 | def __init__(self): 14 | super().__init__() 15 | self.problem = OptGapC3() 16 | 17 | @property 18 | def name(self): 19 | return 'optgapc3' 20 | 21 | def run(self): 22 | ncf = NcfEpi.new_max_flow(4) 23 | hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 2, 2, 3, 4]) 24 | ncf.solve(self.problem, hc) 25 | 26 | self.assert_feasibility(ncf) 27 | 28 | # this is a shame; the optimal solution here should be 8; we get 1.0 29 | self.assert_geq_epsilon(ncf.obj_val, 1.0) 30 | self.assert_leq_epsilon(ncf.obj_val, 8.0) -------------------------------------------------------------------------------- /lib/tests/recon3_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import Recon3 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | class Recon3Test(AbstractTest): 7 | 8 | def __init__(self): 9 | super().__init__() 10 | self.problem = Recon3() 11 | 12 | @property 13 | def name(self): 14 | return 'recon3' 15 | 16 | def run(self): 17 | ncf = NcfEpi.new_max_flow(4) 18 | hc = HardCodedPartitioning(partition_vector=[0, 0, 0, 1, 1, 1]) 19 | ncf.solve(self.problem, hc) 20 | 21 | self.assert_feasibility(ncf) 22 | 23 | self.assert_eq_epsilon(ncf.obj_val, 5.0) -------------------------------------------------------------------------------- /lib/tests/reconciliation_problem_2_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import ReconciliationProblem2 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | class ReconciliationProblem2Test(AbstractTest): 7 | 8 | def __init__(self): 9 | super().__init__() 10 | self.problem = ReconciliationProblem2() 11 | 12 | @property 13 | def name(self): 14 | return 'recon2' 15 | 16 | def run(self): 17 | ncf = NcfEpi.new_max_flow(4) 18 | hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 1]) 19 | ncf.solve(self.problem, hc) 20 | 21 | self.assert_feasibility(ncf) 22 | 23 | self.assert_eq_epsilon(ncf.r1_obj_val, 40.0) 24 | self.assert_eq_epsilon(ncf.intra_obj_vals[0], 0.0) 25 | self.assert_eq_epsilon(ncf.intra_obj_vals[1], 0.0) 26 | self.assert_eq_epsilon(ncf.r3_obj_val, 10.0) 27 | self.assert_eq_epsilon(ncf.obj_val, 10.0) 28 | -------------------------------------------------------------------------------- /lib/tests/reconciliation_problem_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import ReconciliationProblem 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | class ReconciliationProblemTest(AbstractTest): 7 | 8 | def __init__(self): 9 | super().__init__() 10 | self.problem = ReconciliationProblem() 11 | 12 | @property 13 | def name(self): 14 | return 'recon' 15 | 16 | def run(self): 17 | ncf = NcfEpi.new_max_flow(4) 18 | hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 1]) 19 | ncf.solve(self.problem, hc) 20 | 21 | self.assert_feasibility(ncf) 22 | 23 | self.assert_eq_epsilon(ncf.r1_obj_val, 10.0) 24 | self.assert_eq_epsilon(ncf.intra_obj_vals[0], 0.0) 25 | self.assert_eq_epsilon(ncf.intra_obj_vals[1], 0.0) 26 | self.assert_eq_epsilon(ncf.r3_obj_val, 0.0) 27 | self.assert_eq_epsilon(ncf.obj_val, 0.0) 28 | -------------------------------------------------------------------------------- /lib/tests/single_edge_b.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import SingleEdgeB 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | 7 | class SingleEdgeBTest(AbstractTest): 8 | 9 | def __init__(self): 10 | super().__init__() 11 | self.problem = SingleEdgeB() 12 | 13 | @property 14 | def name(self): 15 | return 'SingleEdgeB' 16 | 17 | def run(self): 18 | ncf = NcfEpi.new_max_flow(4, verbose=True) 19 | hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 1, 1, 1, 2, 2]) 20 | ncf.solve(self.problem, hc) 21 | print(ncf.obj_val) 22 | 23 | self.assert_feasibility(ncf) 24 | -------------------------------------------------------------------------------- /lib/tests/test_runner.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | 4 | from .toy_problem_test import ToyProblemTest 5 | from .reconciliation_problem_test import ReconciliationProblemTest 6 | from .reconciliation_problem_2_test import ReconciliationProblem2Test 7 | from .recon3_test import Recon3Test 8 | from .optgapc1_test import OptGapC1Test 9 | from .optgapc2_test import OptGapC2Test 10 | from .optgapc3_test import OptGapC3Test 11 | from .optgap4_test import OptGap4Test 12 | from .single_edge_b import SingleEdgeBTest 13 | from .feasibility_test import FeasibilityTest 14 | from .flow_path_construction_test import FlowPathConstructionTest 15 | from .we_need_to_fix_this_test import WeNeedToFixThisTest 16 | from .abstract_test import bcolors 17 | 18 | 19 | import argparse 20 | 21 | 22 | ALL_TESTS = [ToyProblemTest(), ReconciliationProblemTest(), 23 | ReconciliationProblem2Test(), Recon3Test(), OptGapC1Test(), 24 | OptGapC2Test(), OptGapC3Test(), FeasibilityTest(), 25 | OptGap4Test(), FlowPathConstructionTest(), WeNeedToFixThisTest(), 26 | SingleEdgeBTest()] 27 | TEST_NAME_DICT = {test.name: test for test in ALL_TESTS} 28 | 29 | 30 | def run_tests(tests_to_run): 31 | tests_that_failed = [] 32 | for test in tests_to_run: 33 | print('\n\n---{} TEST---\n\n'.format(test.name.upper())) 34 | test.run() 35 | if test.has_error: 36 | tests_that_failed.append(test) 37 | for test in tests_that_failed: 38 | print() 39 | print(bcolors.ERROR + '\n\n---{} TEST failed---\n\n'.format(test.name.upper()) + bcolors.ENDC) 40 | if len(tests_that_failed) == 0: 41 | print(bcolors.OKGREEN + 'All tests passed!' + bcolors.ENDC) 42 | 43 | 44 | if __name__ == '__main__': 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument('--tests', nargs='+', required=False) 47 | args = parser.parse_args() 48 | 49 | if args.tests is not None: 50 | tests_to_run = [TEST_NAME_DICT[name] for name in args.tests] 51 | else: 52 | tests_to_run = ALL_TESTS 53 | 54 | print('RUNNING THE FOLLOWING TESTS: {}'.format( 55 | [test.name for test in tests_to_run])) 56 | run_tests(tests_to_run) 57 | 58 | -------------------------------------------------------------------------------- /lib/tests/toy_problem_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import ToyProblem 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | class ToyProblemTest(AbstractTest): 7 | 8 | def __init__(self): 9 | super().__init__() 10 | self.problem = ToyProblem() 11 | 12 | @property 13 | def name(self): 14 | return 'toy' 15 | 16 | def run(self): 17 | ncf = NcfEpi.new_max_flow(4) 18 | hc = HardCodedPartitioning(partition_vector=[0, 0, 0, 1, 1, 1]) 19 | ncf.solve(self.problem, hc) 20 | 21 | self.assert_feasibility(ncf) 22 | 23 | self.assert_eq_epsilon(ncf.r1_obj_val, 46.0) 24 | self.assert_eq_epsilon(ncf.intra_obj_vals[0], 5.0) 25 | self.assert_eq_epsilon(ncf.intra_obj_vals[1], 7.0) 26 | 27 | self.assert_geq_epsilon(ncf.r3_obj_val, 45) 28 | self.assert_geq_epsilon(ncf.obj_val, 57) 29 | 30 | -------------------------------------------------------------------------------- /lib/tests/we_need_to_fix_this_test.py: -------------------------------------------------------------------------------- 1 | from .abstract_test import AbstractTest 2 | from ..problems import WeNeedToFixThis 3 | from ..partitioning.hard_coded_partitioning import HardCodedPartitioning 4 | from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi 5 | 6 | 7 | # Testing WeNeedToFixThis: correct path to target 8 | # isn't visible in source meta-node 9 | class WeNeedToFixThisTest(AbstractTest): 10 | 11 | def __init__(self): 12 | super().__init__() 13 | self.problem = WeNeedToFixThis() 14 | 15 | @property 16 | def name(self): 17 | return 'we-need-to-fix-this' 18 | 19 | def run(self): 20 | ncf = NcfEpi.new_max_flow(4) 21 | hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 1, 1, 1, 2, 2]) 22 | ncf.solve(self.problem, hc) 23 | 24 | self.assert_feasibility(ncf) 25 | 26 | -------------------------------------------------------------------------------- /lib/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from collections import defaultdict 3 | 4 | # sort commods from lowest demand to highest demand 5 | # flow_remaining = flow_val 6 | # demand_remaining = [d_k for commod] 7 | # while len(demand_remaining) > 0: 8 | # peek at the first commod (the commod with the lowest demand) 9 | # flow_to_assign = min(flow_val / # commods, demand_remaining) 10 | # for every commod: 11 | # demand_remaining[commod] -= flow_to_assign 12 | # if demand_remaining[commod] == 0.0: 13 | # remove commod from demand_remaining 14 | # remove commod from sorted commods 15 | # flow_remaining -= flow_to_assign for every commod 16 | # if flow_remaining == 0.0: 17 | # break 18 | # return demand - demand_remaining for k 19 | def waterfall_memoized(): 20 | # Memoize results in demand_satisfied 21 | demand_satisfied = {} 22 | 23 | def fn(flow_val, k, commods): 24 | if k in demand_satisfied: 25 | return demand_satisfied[k] 26 | 27 | EPS = 1e-6 28 | demand_remaining = {commod[0]: commod[-1][-1] for commod in commods} 29 | flow_remaining = flow_val 30 | sorted_commods = [commod[0] for commod in sorted(commods, key=lambda x: x[-1][-1])] 31 | while len(demand_remaining) > 0: 32 | k_smallest = sorted_commods[0] 33 | flow_to_assign = min(flow_remaining / len(commods), demand_remaining[k_smallest]) 34 | for commod_id, (_, _, orig_demand) in commods: 35 | if commod_id not in demand_remaining: 36 | continue 37 | demand_remaining[commod_id] -= flow_to_assign 38 | if abs(demand_remaining[commod_id] - 0.0) < EPS: 39 | demand_satisfied[commod_id] = orig_demand 40 | del demand_remaining[commod_id] 41 | sorted_commods.remove(commod_id) 42 | flow_remaining -= flow_to_assign 43 | if abs(flow_remaining - 0.0) < EPS: 44 | break 45 | for commod_id, (_, _, orig_demand) in commods: 46 | if commod_id in demand_remaining: 47 | demand_satisfied[commod_id] = orig_demand - demand_remaining[commod_id] 48 | 49 | return demand_satisfied[k] 50 | 51 | return fn 52 | 53 | 54 | # Convert nested defaultdict to dict 55 | def nested_ddict_to_dict(ddict): 56 | for k, v in ddict.items(): 57 | ddict[k] = dict(v) 58 | return dict(ddict) 59 | 60 | # Converts {k1: [v1,... vn]} to {v1: k1,... vn: k1} 61 | def reverse_dict_value_list(dict_of_list): 62 | return {v: k for k, vals in dict_of_list.items() for v in vals} 63 | 64 | 65 | # Uniform random variable [low, high) 66 | def uni_rand(low=-1, high=1): 67 | return (high - low) * np.random.rand() + low 68 | 69 | 70 | def compute_max_link_util(G, sol_dict): 71 | total_flow_on_edge = defaultdict(float) 72 | for commod, flow_list in sol_dict.items(): 73 | for (u, v), l in flow_list: 74 | total_flow_on_edge[(u, v)] += l 75 | max_edge = None 76 | max_util = 0.0 77 | for (u, v), total_flow in total_flow_on_edge.items(): 78 | edge_util = total_flow / G[u][v]['capacity'] 79 | if edge_util > max_util: 80 | max_util = edge_util 81 | max_edge = (u, v) 82 | 83 | return max_edge, max_util 84 | 85 | 86 | def link_util_stats(G, sol_dict): 87 | edge_flows = defaultdict(float) 88 | 89 | for flow_list in sol_dict.values(): 90 | for (u, v), l in flow_list: 91 | edge_flows[(u, v)] += l 92 | 93 | edge_utils = {} 94 | for u, v, c_e in G.edges.data('capacity'): 95 | if c_e == 0.0: 96 | assert edge_flows[(u, v)] == 0.0 97 | edge_utils[(u, v)] = 0.0 98 | else: 99 | edge_utils[(u, v)] = edge_flows[(u, v)] / c_e 100 | 101 | values = list(edge_utils.values()) 102 | 103 | return np.min(values), np.median(values), np.mean(values), np.max(values) 104 | -------------------------------------------------------------------------------- /lib/vis.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import networkx as nx 3 | import numpy as np 4 | from .utils import uni_rand 5 | 6 | 7 | def vis_graph(G, node_label='label', edge_label='capacity'): 8 | def get_node_attrs_or_default(G, attr, default_val): 9 | attr_dict = nx.get_node_attributes(G, attr) 10 | if not attr_dict: 11 | attr_dict = {} 12 | for node in G.nodes: 13 | if node not in attr_dict: 14 | if hasattr(default_val, '__call__'): 15 | attr_dict[node] = default_val(node) 16 | else: 17 | attr_dict[node] = default_val 18 | return attr_dict 19 | 20 | def random_pos(node): 21 | return (uni_rand(-3, 3), uni_rand(-3, 3)) 22 | 23 | plt.figure(figsize=(14, 8)) 24 | pos = get_node_attrs_or_default(G, 'pos', random_pos) 25 | colors = get_node_attrs_or_default(G, 'color', 'yellow') 26 | colors = [colors[node] for node in G.nodes] 27 | nx.draw(G, pos, node_size=1000, node_color=colors) 28 | node_labels = get_node_attrs_or_default(G, node_label, str) 29 | nx.draw_networkx_labels(G, pos, labels=node_labels) 30 | 31 | edge_labels = nx.get_edge_attributes(G, edge_label) 32 | nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, 33 | label_pos=0.8) 34 | plt.show() 35 | 36 | 37 | def vis_partitions(G, partition_vector): 38 | # AT MOST 6 partitions 39 | COLORS = ['yellow', 'green', 'blue', 'red', 'orange', 'purple'] 40 | assert np.max(partition_vector) <= len(COLORS) 41 | color_dict = { 42 | part_id: COLORS[part_id] 43 | for part_id in np.unique(partition_vector) 44 | } 45 | for node in G.nodes: 46 | G.nodes[node]['color'] = color_dict[partition_vector[node]] 47 | vis_graph(G) 48 | -------------------------------------------------------------------------------- /scripts/find_demand_scale_factor.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import sys 4 | 5 | sys.path.append('..') 6 | 7 | from lib.algorithms.path_formulation import PathFormulation as PF 8 | from lib.problem import Problem 9 | from lib.traffic_matrix import GenericTrafficMatrix 10 | from lib.config import TOPOLOGIES_DIR 11 | from benchmarks.benchmark_consts import NCFLOW_HYPERPARAMS 12 | from lib.algorithms import NcfEpi 13 | from lib.partitioning import FMPartitioning, SpectralClustering 14 | 15 | import datetime 16 | 17 | t_arg = sys.argv[1] 18 | 19 | # read the topology 20 | if t_arg.endswith('.graphml'): 21 | topo_fname = os.path.join(TOPOLOGIES_DIR, 'topology-zoo/' + t_arg) 22 | else: 23 | topo_fname = os.path.join(TOPOLOGIES_DIR, t_arg) 24 | 25 | if t_arg.endswith('.json'): 26 | G = Problem._read_graph_json(topo_fname) 27 | elif t_arg.endswith('.graphml'): 28 | G = Problem._read_graph_graphml(topo_fname) 29 | num_nodes = len(G.nodes) 30 | print("#nodes={}".format(num_nodes)) 31 | 32 | # process each traffic matrix 33 | TEAVAR_DEMANDS_DIR = '../code/teavar/code/data' 34 | d_fname = os.path.join(TEAVAR_DEMANDS_DIR, t_arg, 'demand.txt') 35 | 36 | line_num = 0 37 | with open(d_fname, 'r') as input_file: 38 | for line in input_file: 39 | line_num = line_num + 1 40 | # if line_num != 7: 41 | # continue 42 | print("==================Demand {}==================".format(line_num)) 43 | tm = GenericTrafficMatrix(problem=None, 44 | tm=np.fromstring(line, np.float32, 45 | sep=' ').reshape( 46 | num_nodes, num_nodes)) 47 | 48 | # if line_num == 7: 49 | # print("tm=[{}]".format(tm.tm)) 50 | 51 | p = Problem(G, tm) 52 | p.name = t_arg 53 | 54 | # compute demand scale factor 55 | pf_cdsf = PF.compute_demand_scale_factor(4, edge_disjoint=True) 56 | pf_cdsf.solve(p) 57 | z = pf_cdsf.obj_val 58 | 59 | # compute pf solution and runtime 60 | pf = PF.new_max_flow(4, edge_disjoint=True) 61 | pf.solve(p) 62 | pf_flow = pf.obj_val 63 | pf_runtime = pf.runtime 64 | 65 | # compute nc solution and runtime 66 | # print("---> p.name = {}".format(p.name)) 67 | if p.name in NCFLOW_HYPERPARAMS: 68 | num_paths, edge_disjoint, dist_metric, partition_algo, sf = NCFLOW_HYPERPARAMS[ 69 | p.name] 70 | num_partitions = sf * int(np.sqrt(len(p.G.nodes))) 71 | 72 | # print("---> partition_algo = {}".format(partition_algo)) 73 | if False: 74 | if partition_algo.contains('spectral_clustering'): 75 | partition_cls = SpectralClustering 76 | elif partition_algo.contains('fm_partitioning'): 77 | partition_cls = FMPartitioning 78 | else: 79 | print("WARN un-parseable partition_algo = {}".format( 80 | partition_algo)) 81 | 82 | partitioner = partition_algo(num_partitions) 83 | 84 | ncflow = NcfEpi.new_max_flow(num_paths, 85 | edge_disjoint=True, 86 | dist_metric='inv-cap') 87 | begin = datetime.datetime.now() 88 | ncflow.solve(p, partitioner) 89 | end = datetime.datetime.now() 90 | 91 | nc_flow = ncflow.obj_val 92 | nc_runtime = ncflow.runtime_est(14) 93 | nc_wallclocktime = (end - begin).seconds 94 | else: 95 | nc_flow = pf_flow 96 | nc_runtime = pf_runtime 97 | nc_wallclocktime = -1 98 | 99 | print( 100 | "RESULT D {0} (paths=edinvcap4) z {1:1.3f} PF flow/runtime {2:1.3f} {3:1.3f} NCFlow flow/runtime/wc {4:1.3f} {5:1.3f} {6:1.3f}\n" 101 | .format(line_num, z, pf_flow, pf_runtime, nc_flow, nc_runtime, 102 | nc_wallclocktime)) 103 | 104 | # quit() 105 | -------------------------------------------------------------------------------- /scripts/generate_full_tms_for_fib_entries.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | # Use this script to generate a full TM so we can 4 | # compute the fib entries for a given topology 5 | import sys 6 | sys.path.append('..') 7 | 8 | from lib.problems import get_problem 9 | from pathos import multiprocessing 10 | import os 11 | 12 | TM_DIR = '../traffic-matrices/full-tms' 13 | 14 | 15 | def generate_traffic_matrix(prob_short_name): 16 | problem = get_problem(prob_short_name, 'gravity', scale=1.0, random=False) 17 | assert problem.traffic_matrix.is_full 18 | print(problem.name) 19 | problem.print_stats() 20 | 21 | try: 22 | problem.traffic_matrix.serialize(TM_DIR) 23 | except Exception: 24 | print('{} failed'.format(problem.name)) 25 | import traceback 26 | traceback.printexc() 27 | 28 | 29 | if __name__ == '__main__': 30 | PROBLEM_SHORT_NAMES = [ 31 | 'gtsce', 32 | 'delta', 33 | 'us-carrier', 34 | 'tata', 35 | 'cogentco', 36 | 'dial', 37 | 'colt', 38 | 'interoute', 39 | 'ion', 40 | 'uninett', 41 | 'kdl', 42 | ] 43 | if not os.path.exists(TM_DIR): 44 | os.makedirs(TM_DIR) 45 | 46 | pool = multiprocessing.ProcessPool(len(PROBLEM_SHORT_NAMES)) 47 | pool.map(generate_traffic_matrix, PROBLEM_SHORT_NAMES) 48 | -------------------------------------------------------------------------------- /scripts/generate_traffic_matrices.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from pathos import multiprocessing 4 | from itertools import product 5 | import numpy as np 6 | import traceback 7 | import os 8 | 9 | import sys 10 | sys.path.append('..') 11 | 12 | from lib.problems import get_problem 13 | 14 | TM_DIR = '../traffic-matrices' 15 | SCALE_FACTORS = [1., 2., 4., 8., 16., 32., 64., 128.] 16 | MODELS = [ 17 | 'gravity', 'uniform', 'poisson-high-intra', 'poisson-high-inter', 'bimodal' 18 | ] 19 | NUM_SAMPLES = 5 20 | 21 | 22 | def generate_traffic_matrix(args): 23 | prob_short_name, model, scale_factor = args 24 | tm_model_dir = os.path.join(TM_DIR, model) 25 | 26 | for _ in range(NUM_SAMPLES): 27 | print(prob_short_name, model, scale_factor) 28 | problem = get_problem(prob_short_name, 29 | model, 30 | scale_factor=scale_factor, 31 | seed=np.random.randint(2**31 - 1)) 32 | problem.print_stats() 33 | 34 | try: 35 | problem.traffic_matrix.serialize(tm_model_dir) 36 | except Exception: 37 | print('{}, model {}, scale factor {} failed'.format( 38 | problem.name, model, scale_factor)) 39 | traceback.printexc() 40 | 41 | 42 | if __name__ == '__main__': 43 | PROBLEM_SHORT_NAMES = [ 44 | 'gtsce', 45 | 'delta', 46 | 'us-carrier', 47 | 'tata', 48 | 'cogentco', 49 | 'dial', 50 | 'colt', 51 | 'interoute', 52 | 'ion', 53 | 'uninett', 54 | 'kdl', 55 | ] 56 | if len(sys.argv) == 2 and sys.argv[1] == '--holdout': 57 | TM_DIR += '/holdout' 58 | 59 | if not os.path.exists(TM_DIR): 60 | os.makedirs(TM_DIR) 61 | for model in MODELS: 62 | tm_model_dir = os.path.join(TM_DIR, model) 63 | if not os.path.exists(tm_model_dir): 64 | os.makedirs(tm_model_dir) 65 | 66 | pool = multiprocessing.ProcessPool(14) 67 | pool.map(generate_traffic_matrix, 68 | product(PROBLEM_SHORT_NAMES, MODELS, SCALE_FACTORS)) 69 | -------------------------------------------------------------------------------- /scripts/grid_search.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from itertools import product 4 | import numpy as np 5 | import traceback 6 | import os 7 | 8 | import sys 9 | sys.path.append('..') 10 | 11 | from lib.problem import Problem 12 | from lib.partitioning import FMPartitioning, SpectralClustering 13 | from lib.partitioning.utils import all_partitions_contiguous 14 | from lib.algorithms import NcfEpi 15 | from benchmarks.benchmark_consts import HOLDOUT_PROBLEMS 16 | 17 | OUTPUT_CSV = 'grid-search.csv' 18 | LOG_DIR = 'grid-search-logs' 19 | 20 | 21 | def print_(*args, file=None): 22 | if file is None: 23 | file = sys.stdout 24 | print(*args, file=file) 25 | file.flush() 26 | 27 | 28 | def grid_search( 29 | problem_name, 30 | topo_fname, 31 | tm_fname, 32 | num_paths_to_sweep=[4], 33 | edge_disjoint_to_sweep=[True, False], 34 | dist_metrics_to_sweep=['inv-cap'], 35 | partition_algos_to_sweep=['fm_partitioning', 'spectral_clustering'], 36 | num_parts_scale_factors_to_sweep=[1, 2, 3, 4]): 37 | 38 | problem = Problem.from_file(topo_fname, tm_fname) 39 | assert problem_name == problem.name 40 | print_(problem.name, tm_fname) 41 | traffic_seed = problem.traffic_matrix.seed 42 | total_demand = np.sum(problem.traffic_matrix.tm) 43 | print_('traffic seed: {}'.format(traffic_seed)) 44 | print_('traffic matrix model: {}'.format(problem.traffic_matrix.model)) 45 | print_('traffic scale factor: {}'.format( 46 | problem.traffic_matrix.scale_factor)) 47 | print_('total demand: {}'.format(total_demand)) 48 | 49 | num_parts_to_sweep = [ 50 | sf * int(np.sqrt(len(problem.G.nodes))) 51 | for sf in num_parts_scale_factors_to_sweep 52 | ] 53 | 54 | for partition_algo, num_partitions_to_set, num_paths, edge_disjoint, dist_metric in product( 55 | partition_algos_to_sweep, num_parts_to_sweep, num_paths_to_sweep, 56 | edge_disjoint_to_sweep, dist_metrics_to_sweep): 57 | if partition_algo == 'fm_partitioning': 58 | partitioner = FMPartitioning(num_partitions_to_set) 59 | elif partition_algo == 'spectral_clustering': 60 | partitioner = SpectralClustering(num_partitions_to_set) 61 | 62 | print_( 63 | '\nNCFlow, {} partitioner, {} partitions, {} paths, edge disjoint {}, dist metric {}' 64 | .format(partition_algo, num_partitions_to_set, num_paths, 65 | edge_disjoint, dist_metric)) 66 | run_nc_dir = os.path.join( 67 | LOG_DIR, 'ncflow', partition_algo, 68 | '{}-partitions'.format(num_partitions_to_set), 69 | '{}-paths'.format(num_paths), 70 | 'edge-disjoint-{}'.format(edge_disjoint), 71 | '{}-dist-metric'.format(dist_metric)) 72 | if not os.path.exists(run_nc_dir): 73 | os.makedirs(run_nc_dir) 74 | 75 | with open( 76 | os.path.join( 77 | run_nc_dir, 78 | '{}-ncflow-{}_partitioner-{}_partitions-{}_paths-{}_edge_disjoint-{}_dist_metric.txt' 79 | .format(problem.name, partition_algo, 80 | num_partitions_to_set, num_paths, edge_disjoint, 81 | dist_metric)), 'w') as log: 82 | partition_vector = partitioner.partition(problem) 83 | if not all_partitions_contiguous(problem, partition_vector): 84 | print_( 85 | 'Topology {}, partitioner {}, num_partitions_to_set {} did not find a valid partition' 86 | .format(topo_fname, partition_algo, num_partitions_to_set)) 87 | continue 88 | 89 | try: 90 | ncflow = NcfEpi.new_max_flow(num_paths, 91 | edge_disjoint=edge_disjoint, 92 | dist_metric=dist_metric, 93 | out=log) 94 | ncflow.solve(problem, partitioner) 95 | 96 | num_partitions = len(np.unique(ncflow._partition_vector)) 97 | size_of_largest_partition = partitioner.size_of_largest_partition 98 | runtime = ncflow.runtime_est(14) 99 | total_flow = ncflow.obj_val 100 | with open(OUTPUT_CSV, 'a') as w: 101 | print_('{},{},{},{},{},{},{},{},{},{}'.format( 102 | problem.name, os.path.basename(tm_fname), 103 | partition_algo, num_partitions, 104 | size_of_largest_partition, num_paths, edge_disjoint, 105 | dist_metric, total_flow, runtime), 106 | file=w) 107 | except: 108 | print_( 109 | 'TM {}, {} partitioner, {} partitions, {} paths, edge disjoint {}, dist metric {} failed' 110 | .format(tm_fname, partition_algo, num_partitions_to_set, 111 | num_paths, edge_disjoint, dist_metric)) 112 | traceback.print_exc(file=sys.stdout) 113 | 114 | 115 | if __name__ == '__main__': 116 | with open(OUTPUT_CSV, 'a') as w: 117 | print_( 118 | 'problem,tm_fname,partition_algo,num_partitions,size_of_largest_partition,num_paths,edge_disjoint,dist_metric,total_flow,runtime', 119 | file=w) 120 | 121 | for problem_name, topo_fname, tm_fname in HOLDOUT_PROBLEMS: 122 | grid_search(problem_name, topo_fname, tm_fname) 123 | -------------------------------------------------------------------------------- /scripts/networks.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import networkx as nx 4 | import numpy as np 5 | from networkx.readwrite import json_graph 6 | import json 7 | import sys 8 | import os 9 | 10 | OUTPUT_DIR = '../topologies' 11 | 12 | 13 | def uni_rand(low=-1, high=1): 14 | return (high - low) * np.random.rand() + low 15 | 16 | 17 | def read_graph_json(fname): 18 | assert fname.endswith('.json') 19 | with open(fname) as f: 20 | data = json.load(f) 21 | return json_graph.node_link_graph(data) 22 | 23 | 24 | def write_graph_json(fname, G): 25 | with open(fname, 'w') as f: 26 | json.dump(json_graph.node_link_data(G), f) 27 | 28 | 29 | def add_bi_edge(G, src, dest, capacity=None): 30 | G.add_edge(src, dest) 31 | G.add_edge(dest, src) 32 | if capacity: 33 | G[src][dest]['capacity'] = capacity 34 | G[dest][src]['capacity'] = capacity 35 | 36 | 37 | ################ 38 | # Toy Networks # 39 | ################ 40 | def two_srcs_from_meta_node(): 41 | G = nx.DiGraph() 42 | G.add_node(0, label='0', pos=(-2, 2)) 43 | G.add_node(1, label='1', pos=(-2, 1)) 44 | G.add_node(2, label='2', pos=(0, 2)) 45 | G.add_node(3, label='3', pos=(-1, 0)) 46 | G.add_node(4, label='4', pos=(1, 0)) 47 | 48 | add_bi_edge(G, 0, 2) 49 | add_bi_edge(G, 0, 1) 50 | add_bi_edge(G, 1, 3) 51 | add_bi_edge(G, 2, 3) 52 | add_bi_edge(G, 2, 4) 53 | add_bi_edge(G, 3, 4) 54 | 55 | return G 56 | 57 | 58 | def dumbell_bottleneck_network(): 59 | G = nx.DiGraph() 60 | G.add_node(0, label='0', pos=(-2, 2)) 61 | G.add_node(1, label='1', pos=(-2, 1.5)) 62 | 63 | G.add_node(2, label='2', pos=(0, 2)) 64 | G.add_node(3, label='3', pos=(0, 1.5)) 65 | 66 | G.add_node(4, label='4', pos=(-1, 1)) 67 | G.add_node(5, label='5', pos=(-1, 0.5)) 68 | 69 | G.add_node(6, label='6', pos=(1, 0)) 70 | G.add_node(7, label='7', pos=(1, -0.5)) 71 | 72 | # intra 73 | add_bi_edge(G, 0, 1) 74 | add_bi_edge(G, 2, 3) 75 | add_bi_edge(G, 4, 5) 76 | add_bi_edge(G, 6, 7) 77 | 78 | # inter 79 | add_bi_edge(G, 0, 2) 80 | add_bi_edge(G, 1, 4) 81 | add_bi_edge(G, 3, 4) 82 | add_bi_edge(G, 5, 6) 83 | add_bi_edge(G, 2, 6) 84 | 85 | return G 86 | 87 | 88 | def toy_network_1(): 89 | G = nx.DiGraph() 90 | G.add_node(0, label='0', pos=(-2, 2)) 91 | G.add_node(1, label='1', pos=(-1, 0)) 92 | G.add_node(2, label='2', pos=(-2, -2)) 93 | G.add_node(3, label='3', pos=(2, 2)) 94 | G.add_node(4, label='4', pos=(1, 0)) 95 | G.add_node(5, label='5', pos=(2, -2)) 96 | 97 | add_bi_edge(G, 0, 3) 98 | add_bi_edge(G, 0, 1) 99 | add_bi_edge(G, 1, 4) 100 | add_bi_edge(G, 1, 2) 101 | add_bi_edge(G, 2, 5) 102 | add_bi_edge(G, 3, 4) 103 | add_bi_edge(G, 4, 5) 104 | 105 | return G 106 | 107 | 108 | def toy_network_2(): 109 | G = nx.DiGraph() 110 | G.add_node(0, label='0', pos=(-2, 2)) 111 | G.add_node(1, label='1', pos=(-1, 0)) 112 | G.add_node(2, label='2', pos=(-2, -2)) 113 | G.add_node(3, label='3', pos=(2, 2)) 114 | G.add_node(4, label='4', pos=(1, 0)) 115 | G.add_node(5, label='5', pos=(2, -2)) 116 | 117 | G.add_node(6, label='6', pos=(-2, -4)) 118 | G.add_node(7, label='7', pos=(-1, -5)) 119 | G.add_node(8, label='8', pos=(-2, -6)) 120 | G.add_node(9, label='9', pos=(2, -4)) 121 | G.add_node(10, label='10', pos=(1, -5)) 122 | G.add_node(11, label='11', pos=(2, -6)) 123 | 124 | add_bi_edge(G, 0, 1) 125 | add_bi_edge(G, 0, 3) 126 | add_bi_edge(G, 1, 2) 127 | add_bi_edge(G, 1, 4) 128 | add_bi_edge(G, 2, 5) 129 | add_bi_edge(G, 3, 4) 130 | add_bi_edge(G, 4, 5) 131 | 132 | add_bi_edge(G, 6, 7) 133 | add_bi_edge(G, 6, 9) 134 | add_bi_edge(G, 7, 8) 135 | add_bi_edge(G, 7, 10) 136 | add_bi_edge(G, 8, 11) 137 | add_bi_edge(G, 9, 10) 138 | add_bi_edge(G, 10, 11) 139 | 140 | add_bi_edge(G, 2, 6) 141 | add_bi_edge(G, 1, 7) 142 | 143 | add_bi_edge(G, 4, 10) 144 | add_bi_edge(G, 5, 9) 145 | 146 | return G 147 | 148 | 149 | def toy_network_3(): 150 | G = nx.DiGraph() 151 | G.add_node(0, label='0', pos=(-2, 2)) 152 | G.add_node(1, label='1', pos=(-1, 0)) 153 | G.add_node(2, label='2', pos=(-2, -2)) 154 | G.add_node(3, label='3', pos=(2, 2)) 155 | G.add_node(4, label='4', pos=(1, 0)) 156 | G.add_node(5, label='5', pos=(2, -2)) 157 | 158 | add_bi_edge(G, 0, 1) 159 | add_bi_edge(G, 0, 3) 160 | add_bi_edge(G, 1, 2) 161 | add_bi_edge(G, 1, 4) 162 | add_bi_edge(G, 3, 4) 163 | add_bi_edge(G, 4, 5) 164 | return G 165 | 166 | 167 | def bottleneck_network(cap=10.0, epsilon=1e-3): 168 | G = nx.DiGraph() 169 | G.add_node(0, label='0', pos=(-2, 2)) 170 | G.add_node(1, label='1', pos=(-2, -2)) 171 | G.add_node(2, label='2', pos=(2, 2)) 172 | G.add_node(3, label='3', pos=(2, -2)) 173 | 174 | add_bi_edge(G, 0, 1, capacity=epsilon) 175 | add_bi_edge(G, 0, 2, capacity=cap) 176 | add_bi_edge(G, 1, 3, capacity=cap) 177 | add_bi_edge(G, 2, 3, capacity=epsilon) 178 | return G 179 | 180 | 181 | ################# 182 | # Real Networks # 183 | ################# 184 | def b4_teavar(): 185 | G = nx.DiGraph() 186 | with open('../topologies/b4-teavar-topology.txt') as f: 187 | f.readline() # skip header 188 | for line in f: 189 | vals = line.strip().split() 190 | # nodes are 1-indexed, capcity in Kbps 191 | from_node, to_node, cap = int(vals[0]) - 1, int( 192 | vals[1]) - 1, float(vals[2]) / 1000.0 193 | G.add_edge(from_node, to_node, capacity=cap) 194 | return G 195 | 196 | 197 | if __name__ == '__main__': 198 | arg = sys.argv[1] 199 | if arg == '1': 200 | G = toy_network_1() 201 | fname = 'toy-network.json' 202 | 203 | elif arg == '2': 204 | G = toy_network_2() 205 | fname = 'toy-network-2.json' 206 | 207 | elif arg == '3': 208 | G = toy_network_2() 209 | fname = 'toy-network-3.json' 210 | 211 | elif arg == 'b4-teavar': 212 | G = b4_teavar() 213 | fname = 'b4-teavar.json' 214 | 215 | elif arg == 'bottleneck': 216 | G = bottleneck_network() 217 | fname = 'bottleneck.json' 218 | 219 | elif arg == 'two-srcs': 220 | G = two_srcs_from_meta_node() 221 | fname = 'two-srcs.json' 222 | 223 | elif arg == 'dumbell-bottleneck': 224 | G = dumbell_bottleneck_network() 225 | fname = 'dumbell-bottleneck.json' 226 | 227 | data = json_graph.node_link_data(G) 228 | write_graph_json(os.path.join(OUTPUT_DIR, fname), G) 229 | -------------------------------------------------------------------------------- /scripts/pre_solve_path.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | import pickle 5 | from pathos import multiprocessing 6 | 7 | import sys 8 | sys.path.append('..') 9 | 10 | from lib.problems import get_problem 11 | from lib.algorithms.path_formulation import PathFormulation, PATHS_DIR 12 | from lib.path_utils import graph_copy_with_edge_weights, find_paths 13 | 14 | global G 15 | global num_paths 16 | global edge_disjoint 17 | global LOAD_FROM_DISK 18 | LOAD_FROM_DISK = True 19 | 20 | 21 | def find_paths_wrapper(commod): 22 | k, (s_k, t_k, d_k) = commod 23 | if LOAD_FROM_DISK: 24 | if (s_k, t_k) not in paths_dict: 25 | paths = find_paths(G, s_k, t_k, num_paths, edge_disjoint) 26 | return ((s_k, t_k), paths) 27 | else: 28 | paths = find_paths(G, s_k, t_k, num_paths, edge_disjoint) 29 | return ((s_k, t_k), paths) 30 | 31 | 32 | if __name__ == '__main__': 33 | problem = get_problem(sys.argv[1], model='gravity', random=False) 34 | assert problem.traffic_matrix.is_full 35 | 36 | global num_paths 37 | num_paths = int(sys.argv[2]) 38 | 39 | dist_metric = sys.argv[3] 40 | 41 | global edge_disjoint 42 | if sys.argv[4] == 'True': 43 | edge_disjoint = True 44 | elif sys.argv[4] == 'False': 45 | edge_disjoint = False 46 | else: 47 | raise Exception('invalid argument for edge_disjoint: {}'.format( 48 | sys.argv[4])) 49 | 50 | if not os.path.exists(PATHS_DIR): 51 | os.makedirs(PATHS_DIR) 52 | 53 | paths_fname = PathFormulation.paths_full_fname(problem, num_paths, 54 | edge_disjoint, dist_metric) 55 | 56 | if LOAD_FROM_DISK: 57 | print('Loading paths from pickle file', paths_fname) 58 | try: 59 | with open(paths_fname, 'rb') as f: 60 | paths_dict = pickle.load(f) 61 | print('paths_dict: ', len(paths_dict)) 62 | except FileNotFoundError: 63 | print('Unable to find {}'.format(paths_fname)) 64 | paths_dict = {} 65 | 66 | global G 67 | G = graph_copy_with_edge_weights(problem.G, dist_metric) 68 | 69 | pool = multiprocessing.ProcessPool(28) 70 | new_paths_dict = pool.map(find_paths_wrapper, problem.commodity_list) 71 | for ret_val in new_paths_dict: 72 | if ret_val is not None: 73 | k, v = ret_val 74 | paths_dict[k] = v 75 | 76 | print('paths_dict: ', len(paths_dict)) 77 | print('Saving paths to pickle file') 78 | with open(paths_fname, 'wb') as w: 79 | pickle.dump(paths_dict, w) 80 | -------------------------------------------------------------------------------- /scripts/run_yates_raeke.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | import subprocess 5 | from glob import iglob 6 | from pathos import multiprocessing 7 | 8 | import sys 9 | sys.path.append('..') 10 | 11 | from lib.config import TL_DIR 12 | from benchmarks.benchmark_consts import PROBLEM_NAMES 13 | 14 | YATES_HOME_DIR = os.path.join(os.getenv('HOME'), 'src', 'yates') 15 | YATES_TOPO_DIR = os.path.abspath( 16 | os.path.join(TL_DIR, 'topologies', 'yates-format')) 17 | YATES_TM_DIR = os.path.abspath( 18 | os.path.join(TL_DIR, 'traffic-matrices', 'yates-format')) 19 | YATES_HOSTS_DIR = os.path.join(YATES_TM_DIR, 'hosts') 20 | YATES_RESULTS_DIR = os.path.join('data', 'results') 21 | OUTPUT_DIR = os.path.join(TL_DIR, 'topologies', 'paths', 'raeke') 22 | 23 | # yates ${TL_DIR}/topologies/yates-format/GtsCe.dot \ 24 | # ${TL_DIR}/traffic-matrices/yates-format/GtsCe.graphml_traffic-matrix.txt \ 25 | # ${TL_DIR}/traffic-matrices/yates-format/GtsCe.graphml_traffic-matrix.txt \ 26 | # ${TL_DIR}/topologies/yates-format/GtsCe.hosts -raeke -budget 4 27 | def run_yates(args): 28 | os.chdir(YATES_HOME_DIR) 29 | problem_name, tm_fname, num_paths = args 30 | problem_name_dot = problem_name.replace('.graphml', 31 | '.dot').replace('.json', '.dot') 32 | cmd = [ 33 | 'yates', 34 | os.path.join(YATES_TOPO_DIR, problem_name_dot), 35 | os.path.join(YATES_TM_DIR, tm_fname), 36 | os.path.join(YATES_TM_DIR, tm_fname), 37 | os.path.join(YATES_HOSTS_DIR, 38 | problem_name_dot.replace('.dot', '.hosts')), '-raeke', 39 | '-budget', 40 | str(num_paths) 41 | ] 42 | print(' '.join(cmd)) 43 | print() 44 | subprocess.call(cmd) 45 | 46 | os.chdir( 47 | os.path.join(YATES_RESULTS_DIR, problem_name_dot.replace('.dot', ''), 48 | 'paths')) 49 | os.rename( 50 | 'raeke_0', 51 | os.path.join(OUTPUT_DIR, 52 | '{}-{}-paths-rrt.txt'.format(problem_name, num_paths))) 53 | 54 | 55 | # NOTE: run eval `opam config env` before executing 56 | if __name__ == '__main__': 57 | num_paths = int(sys.argv[1]) 58 | run_args = [] 59 | if not os.path.exists(OUTPUT_DIR): 60 | os.makedirs(OUTPUT_DIR) 61 | 62 | for problem_name in PROBLEM_NAMES: 63 | print(problem_name) 64 | for tm_fname in iglob( 65 | os.path.join(YATES_TM_DIR, 66 | '{}_traffic-matrix.txt'.format(problem_name))): 67 | run_args.append((problem_name, tm_fname, num_paths)) 68 | 69 | pool = multiprocessing.ProcessPool(7) 70 | pool.map(run_yates, run_args) 71 | -------------------------------------------------------------------------------- /scripts/serialize_all_fleischer.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from pathos import multiprocessing 4 | import argparse 5 | import os 6 | import pickle 7 | 8 | import sys 9 | sys.path.append('..') 10 | 11 | from lib.config import TL_DIR 12 | from benchmarks.benchmark_consts import get_problems 13 | from lib.problem import Problem 14 | from lib.algorithms.path_formulation import PathFormulation 15 | 16 | OUTPUT_DIR_ROOT = os.path.join(TL_DIR, 'traffic-matrices') 17 | 18 | 19 | def serialize_problem(prob, fname): 20 | with open(fname, 'w') as w: 21 | print(len(prob.G.nodes), file=w) 22 | for node in prob.G.nodes: 23 | print(node, 0, 0, file=w) 24 | 25 | print(len(prob.G.edges), file=w) 26 | for e, (u, v, c_e) in enumerate(prob.G.edges.data('capacity')): 27 | print(e, u, v, c_e, 1.0, file=w) 28 | 29 | print(len(prob.commodity_list), file=w) 30 | for k, (s_k, t_k, d_k) in prob.commodity_list: 31 | print(k, s_k, t_k, d_k, file=w) 32 | 33 | 34 | def serialize_paths(fname, paths_dict_fname): 35 | with open(paths_dict_fname, 'rb') as f: 36 | paths_dict = pickle.load(f) 37 | 38 | with open(fname, 'a') as w: 39 | for (src, target), paths in paths_dict.items(): 40 | print('{} -> {}'.format(src, target), file=w) 41 | for path in paths: 42 | print('[' + ','.join(str(x) for x in path) + ']', file=w) 43 | print(file=w) 44 | 45 | 46 | def serialize(run_args): 47 | prob_name, topo_fname, tm_fname, output_fname, cmd_args = run_args 48 | print(prob_name, topo_fname, tm_fname) 49 | if cmd_args.paths: 50 | num_paths = args.num_paths 51 | edge_disjoint = args.edge_disjoint 52 | dist_metric = args.dist_metric 53 | 54 | prob = Problem.from_file(topo_fname, tm_fname) 55 | serialize_problem(prob, output_fname) 56 | 57 | if cmd_args.paths: 58 | paths_dict_fname = PathFormulation.paths_full_fname( 59 | prob, num_paths, edge_disjoint, dist_metric) 60 | serialize_paths(output_fname, paths_dict_fname) 61 | 62 | 63 | if __name__ == '__main__': 64 | parser = argparse.ArgumentParser() 65 | subparsers = parser.add_subparsers() 66 | 67 | # create the parser for the "paths" arg 68 | parser_path = subparsers.add_parser('path') 69 | parser_path.set_defaults(paths=True) 70 | parser_path.add_argument('--num-paths', type=int, required=True) 71 | parser_path.add_argument('--edge-disjoint', type=bool, required=True) 72 | parser_path.add_argument('--dist-metric', 73 | type=str, 74 | choices=('inv-cap', 'min-hop'), 75 | required=True) 76 | 77 | # create the parser for the "edge" command 78 | parser_edge = subparsers.add_parser('edge') 79 | parser_edge.set_defaults(paths=False) 80 | 81 | args = parser.parse_args() 82 | if args.paths: 83 | output_dir_tl = os.path.join(OUTPUT_DIR_ROOT, 84 | 'fleischer-with-paths-format') 85 | fname_placeholder = '{}_' + '{}-paths_edge_disjoint-{}_dist_metric-{}.txt'.format( 86 | args.num_paths, args.edge_disjoint, args.dist_metric) 87 | else: 88 | output_dir_tl = os.path.join(OUTPUT_DIR_ROOT, 'fleischer-edge-format') 89 | fname_placeholder = '{}.txt' 90 | 91 | run_args = [] 92 | for slice in range(5): 93 | args.slices = [slice] 94 | problems = get_problems(args) 95 | output_dir = os.path.join(output_dir_tl, 'slice-{}'.format(slice)) 96 | if not os.path.exists(output_dir): 97 | os.makedirs(output_dir) 98 | 99 | for prob_name, topo_fname, tm_fname in problems: 100 | output_fname = os.path.join( 101 | output_dir, 102 | fname_placeholder.format(os.path.basename(tm_fname))) 103 | run_args.append( 104 | (prob_name, topo_fname, tm_fname, output_fname, args)) 105 | 106 | pool = multiprocessing.ProcessPool(14) 107 | pool.map(serialize, run_args) 108 | -------------------------------------------------------------------------------- /scripts/serialize_all_yates.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from benchmarks.benchmark_consts import PROBLEM_NAMES 4 | import networkx as nx 5 | import numpy as np 6 | from glob import iglob 7 | import pickle 8 | import os 9 | 10 | import sys 11 | sys.path.append('..') 12 | 13 | from lib.problem import Problem 14 | 15 | OUTPUT_DIR = '../traffic-matrices/yates-format' 16 | TOPOLOGIES_OUTPUT_DIR = '../topologies/yates-format' 17 | HOSTS_OUTPUT_DIR = os.path.join(OUTPUT_DIR, 'hosts') 18 | 19 | if __name__ == '__main__': 20 | if not os.path.exists(OUTPUT_DIR): 21 | os.makedirs(OUTPUT_DIR) 22 | 23 | if not os.path.exists(HOSTS_OUTPUT_DIR): 24 | os.makedirs(HOSTS_OUTPUT_DIR) 25 | 26 | if not os.path.exists(TOPOLOGIES_OUTPUT_DIR): 27 | os.makedirs(TOPOLOGIES_OUTPUT_DIR) 28 | 29 | for prob_name in PROBLEM_NAMES: 30 | print(prob_name) 31 | 32 | full_tm = None 33 | for tm_fname in iglob( 34 | '../traffic-matrices/full-tms/{}*_traffic-matrix.pkl'.format( 35 | prob_name)): 36 | with open(tm_fname, 'rb') as f: 37 | full_tm = pickle.load(f) 38 | 39 | if prob_name.endswith('.graphml'): 40 | problem = Problem.from_file( 41 | '../topologies/topology-zoo/{}'.format(prob_name), tm_fname) 42 | else: 43 | problem = Problem.from_file('../topologies/{}'.format(prob_name), 44 | tm_fname) 45 | 46 | for node, data in problem.G.nodes.data(True): 47 | keys = list(data.keys()) 48 | for key in keys: 49 | if key not in ['type', 'ip', 'mac']: 50 | del problem.G.nodes[node][key] 51 | problem.G.nodes[node]['type'] = 'host' 52 | problem.G.nodes[node]['ip'] = '111.0.{}.{}'.format(node, node) 53 | problem.G.nodes[node]['mac'] = '00:00:00:00:{}:{}'.format( 54 | node, node) 55 | 56 | edges_to_delete = [] 57 | for u, v, cap in problem.G.edges.data('capacity'): 58 | if cap == 0.0: 59 | edges_to_delete.append((u, v)) 60 | else: 61 | problem.G[u][v]['capacity'] = '{}Gbps'.format( 62 | np.around(cap / 1e3)) 63 | problem.G.remove_edges_from(edges_to_delete) 64 | 65 | nx.drawing.nx_agraph.write_dot( 66 | problem.G, 67 | os.path.join( 68 | TOPOLOGIES_OUTPUT_DIR, 69 | prob_name.replace('.graphml', '.dot').replace('.json', 70 | '.dot'))) 71 | 72 | with open( 73 | os.path.join( 74 | HOSTS_OUTPUT_DIR, 75 | prob_name.replace('.graphml', 76 | '.hosts').replace('.json', '.hosts')), 77 | 'w') as w: 78 | for node in problem.G.nodes: 79 | print(node, file=w) 80 | 81 | new_basename = '{}_traffic-matrix.txt'.format(prob_name) 82 | with open(os.path.join(OUTPUT_DIR, new_basename), 'w') as w: 83 | for row in full_tm[:-1, :]: 84 | print(' '.join(str(x) for x in row), end=' ', file=w) 85 | print(' '.join(str(x) for x in full_tm[-1]), end='', file=w) 86 | -------------------------------------------------------------------------------- /topologies/.gitignore: -------------------------------------------------------------------------------- 1 | paths/ 2 | -------------------------------------------------------------------------------- /topologies/b4-teavar.json: -------------------------------------------------------------------------------- 1 | {"directed": true, "multigraph": false, "graph": {}, "nodes": [{"id": 0}, {"id": 1}, {"id": 2}, {"id": 4}, {"id": 3}, {"id": 5}, {"id": 6}, {"id": 7}, {"id": 10}, {"id": 9}, {"id": 8}, {"id": 11}], "links": [{"capacity": 5000.0, "source": 0, "target": 1}, {"capacity": 5000.0, "source": 0, "target": 2}, {"capacity": 5000.0, "source": 1, "target": 0}, {"capacity": 5000.0, "source": 1, "target": 4}, {"capacity": 5000.0, "source": 2, "target": 0}, {"capacity": 5000.0, "source": 2, "target": 3}, {"capacity": 5000.0, "source": 2, "target": 5}, {"capacity": 5000.0, "source": 4, "target": 1}, {"capacity": 5000.0, "source": 4, "target": 3}, {"capacity": 5000.0, "source": 4, "target": 5}, {"capacity": 5000.0, "source": 3, "target": 2}, {"capacity": 5000.0, "source": 3, "target": 4}, {"capacity": 5000.0, "source": 3, "target": 6}, {"capacity": 5000.0, "source": 3, "target": 7}, {"capacity": 5000.0, "source": 5, "target": 2}, {"capacity": 5000.0, "source": 5, "target": 4}, {"capacity": 5000.0, "source": 5, "target": 6}, {"capacity": 5000.0, "source": 5, "target": 7}, {"capacity": 5000.0, "source": 6, "target": 3}, {"capacity": 5000.0, "source": 6, "target": 5}, {"capacity": 5000.0, "source": 6, "target": 7}, {"capacity": 5000.0, "source": 6, "target": 10}, {"capacity": 5000.0, "source": 7, "target": 3}, {"capacity": 5000.0, "source": 7, "target": 5}, {"capacity": 5000.0, "source": 7, "target": 6}, {"capacity": 5000.0, "source": 7, "target": 9}, {"capacity": 5000.0, "source": 10, "target": 6}, {"capacity": 5000.0, "source": 10, "target": 8}, {"capacity": 5000.0, "source": 10, "target": 9}, {"capacity": 5000.0, "source": 10, "target": 11}, {"capacity": 5000.0, "source": 9, "target": 7}, {"capacity": 5000.0, "source": 9, "target": 8}, {"capacity": 5000.0, "source": 9, "target": 10}, {"capacity": 5000.0, "source": 9, "target": 11}, {"capacity": 5000.0, "source": 8, "target": 9}, {"capacity": 5000.0, "source": 8, "target": 10}, {"capacity": 5000.0, "source": 11, "target": 9}, {"capacity": 5000.0, "source": 11, "target": 10}]} -------------------------------------------------------------------------------- /topologies/bottleneck-dumbell.json: -------------------------------------------------------------------------------- 1 | {"directed": true, "multigraph": false, "graph": {}, "nodes": [{"label": "0", "pos": [-2, 2], "id": 0}, {"label": "1", "pos": [-2, 1.5], "id": 1}, {"label": "2", "pos": [0, 2], "id": 2}, {"label": "3", "pos": [0, 1.5], "id": 3}, {"label": "4", "pos": [-1, 0], "id": 4}, {"label": "5", "pos": [-1, -0.5], "id": 5}, {"label": "6", "pos": [1, 0], "id": 6}, {"label": "7", "pos": [1, -0.5], "id": 7}], "links": [{"source": 0, "target": 1}, {"source": 0, "target": 2}, {"source": 1, "target": 0}, {"source": 2, "target": 3}, {"source": 2, "target": 0}, {"source": 2, "target": 7}, {"source": 3, "target": 2}, {"source": 3, "target": 4}, {"source": 4, "target": 5}, {"source": 4, "target": 3}, {"source": 5, "target": 4}, {"source": 5, "target": 6}, {"source": 6, "target": 7}, {"source": 6, "target": 5}, {"source": 7, "target": 6}, {"source": 7, "target": 2}]} -------------------------------------------------------------------------------- /topologies/bottleneck.json: -------------------------------------------------------------------------------- 1 | {"directed": true, "multigraph": false, "graph": {}, "nodes": [{"label": "0", "pos": [-2, 2], "id": 0}, {"label": "1", "pos": [-2, -2], "id": 1}, {"label": "2", "pos": [2, 2], "id": 2}, {"label": "3", "pos": [2, -2], "id": 3}], "links": [{"capacity": 0.001, "source": 0, "target": 1}, {"capacity": 10.0, "source": 0, "target": 2}, {"capacity": 0.001, "source": 1, "target": 0}, {"capacity": 10.0, "source": 1, "target": 3}, {"capacity": 10.0, "source": 2, "target": 0}, {"capacity": 0.001, "source": 2, "target": 3}, {"capacity": 10.0, "source": 3, "target": 1}, {"capacity": 0.001, "source": 3, "target": 2}]} -------------------------------------------------------------------------------- /topologies/dumbell-bottleneck.json: -------------------------------------------------------------------------------- 1 | {"directed": true, "multigraph": false, "graph": {}, "nodes": [{"label": "0", "pos": [-2, 2], "id": 0}, {"label": "1", "pos": [-2, 1.5], "id": 1}, {"label": "2", "pos": [0, 2], "id": 2}, {"label": "3", "pos": [0, 1.5], "id": 3}, {"label": "4", "pos": [-1, 1], "id": 4}, {"label": "5", "pos": [-1, 0.5], "id": 5}, {"label": "6", "pos": [1, 0], "id": 6}, {"label": "7", "pos": [1, -0.5], "id": 7}], "links": [{"source": 0, "target": 1}, {"source": 0, "target": 2}, {"source": 1, "target": 0}, {"source": 1, "target": 4}, {"source": 2, "target": 3}, {"source": 2, "target": 0}, {"source": 2, "target": 6}, {"source": 3, "target": 2}, {"source": 3, "target": 4}, {"source": 4, "target": 5}, {"source": 4, "target": 1}, {"source": 4, "target": 3}, {"source": 5, "target": 4}, {"source": 5, "target": 6}, {"source": 6, "target": 7}, {"source": 6, "target": 5}, {"source": 6, "target": 2}, {"source": 7, "target": 6}]} -------------------------------------------------------------------------------- /topologies/feasible1.json: -------------------------------------------------------------------------------- 1 | { 2 | "directed": true, 3 | "multigraph": false, 4 | "graph": {}, 5 | "nodes": [ 6 | {"label": "0", "pos": [-2, 2], "id": 0}, 7 | {"label": "1", "pos": [-2, -2], "id": 1}, 8 | {"label": "2", "pos": [-1, 0], "id": 2}, 9 | {"label": "3", "pos": [0, 0], "id": 3}, 10 | {"label": "4", "pos": [1, 0], "id": 4}, 11 | {"label": "5", "pos": [2, 0], "id": 5}, 12 | {"label": "6", "pos": [3, 2], "id": 6}, 13 | {"label": "7", "pos": [3, -2], "id": 7} 14 | ], 15 | "links": [ 16 | {"source": 0, "target": 2, "capacity": 1}, 17 | {"source": 1, "target": 2, "capacity": 5}, 18 | {"source": 2, "target": 3}, 19 | {"source": 3, "target": 4}, 20 | {"source": 4, "target": 5}, 21 | {"source": 5, "target": 6, "capacity": 5}, 22 | {"source": 5, "target": 7, "capacity": 1} 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /topologies/toy-network-2.json: -------------------------------------------------------------------------------- 1 | {"directed": true, "multigraph": false, "graph": {}, "nodes": [{"label": "0", "pos": [-2, 2], "id": 0}, {"label": "1", "pos": [-1, 0], "id": 1}, {"label": "2", "pos": [-2, -2], "id": 2}, {"label": "3", "pos": [2, 2], "id": 3}, {"label": "4", "pos": [1, 0], "id": 4}, {"label": "5", "pos": [2, -2], "id": 5}, {"label": "6", "pos": [-2, -4], "id": 6}, {"label": "7", "pos": [-1, -5], "id": 7}, {"label": "8", "pos": [-2, -6], "id": 8}, {"label": "9", "pos": [2, -4], "id": 9}, {"label": "10", "pos": [1, -5], "id": 10}, {"label": "11", "pos": [2, -6], "id": 11}], "links": [{"source": 0, "target": 1}, {"source": 0, "target": 3}, {"source": 1, "target": 0}, {"source": 1, "target": 2}, {"source": 1, "target": 4}, {"source": 1, "target": 7}, {"source": 2, "target": 1}, {"source": 2, "target": 5}, {"source": 2, "target": 6}, {"source": 3, "target": 0}, {"source": 3, "target": 4}, {"source": 4, "target": 1}, {"source": 4, "target": 3}, {"source": 4, "target": 5}, {"source": 4, "target": 10}, {"source": 5, "target": 2}, {"source": 5, "target": 4}, {"source": 5, "target": 9}, {"source": 6, "target": 7}, {"source": 6, "target": 9}, {"source": 6, "target": 2}, {"source": 7, "target": 6}, {"source": 7, "target": 8}, {"source": 7, "target": 10}, {"source": 7, "target": 1}, {"source": 8, "target": 7}, {"source": 8, "target": 11}, {"source": 9, "target": 6}, {"source": 9, "target": 10}, {"source": 9, "target": 5}, {"source": 10, "target": 7}, {"source": 10, "target": 9}, {"source": 10, "target": 11}, {"source": 10, "target": 4}, {"source": 11, "target": 8}, {"source": 11, "target": 10}]} -------------------------------------------------------------------------------- /topologies/toy-network-3.json: -------------------------------------------------------------------------------- 1 | {"directed": true, "multigraph": false, "graph": {}, "nodes": [{"label": "0", "pos": [-2, 2], "id": 0}, {"label": "1", "pos": [-1, 0], "id": 1}, {"label": "2", "pos": [-2, -2], "id": 2}, {"label": "3", "pos": [2, 2], "id": 3}, {"label": "4", "pos": [1, 0], "id": 4}, {"label": "5", "pos": [2, -2], "id": 5}, {"label": "6", "pos": [-2, -4], "id": 6}, {"label": "7", "pos": [-1, -5], "id": 7}, {"label": "8", "pos": [-2, -6], "id": 8}, {"label": "9", "pos": [2, -4], "id": 9}, {"label": "10", "pos": [1, -5], "id": 10}, {"label": "11", "pos": [2, -6], "id": 11}], "links": [{"source": 0, "target": 1}, {"source": 0, "target": 3}, {"source": 1, "target": 0}, {"source": 1, "target": 2}, {"source": 1, "target": 4}, {"source": 1, "target": 7}, {"source": 2, "target": 1}, {"source": 2, "target": 5}, {"source": 2, "target": 6}, {"source": 3, "target": 0}, {"source": 3, "target": 4}, {"source": 4, "target": 1}, {"source": 4, "target": 3}, {"source": 4, "target": 5}, {"source": 4, "target": 10}, {"source": 5, "target": 2}, {"source": 5, "target": 4}, {"source": 5, "target": 9}, {"source": 6, "target": 7}, {"source": 6, "target": 9}, {"source": 6, "target": 2}, {"source": 7, "target": 6}, {"source": 7, "target": 8}, {"source": 7, "target": 10}, {"source": 7, "target": 1}, {"source": 8, "target": 7}, {"source": 8, "target": 11}, {"source": 9, "target": 6}, {"source": 9, "target": 10}, {"source": 9, "target": 5}, {"source": 10, "target": 7}, {"source": 10, "target": 9}, {"source": 10, "target": 11}, {"source": 10, "target": 4}, {"source": 11, "target": 8}, {"source": 11, "target": 10}]} -------------------------------------------------------------------------------- /topologies/toy-network.json: -------------------------------------------------------------------------------- 1 | {"directed": true, "multigraph": false, "graph": {}, "nodes": [{"label": "0", "pos": [-2, 2], "id": 0}, {"label": "1", "pos": [-1, 0], "id": 1}, {"label": "2", "pos": [-2, -2], "id": 2}, {"label": "3", "pos": [2, 2], "id": 3}, {"label": "4", "pos": [1, 0], "id": 4}, {"label": "5", "pos": [2, -2], "id": 5}], "links": [{"source": 0, "target": 3}, {"source": 0, "target": 1}, {"source": 1, "target": 0}, {"source": 1, "target": 4}, {"source": 1, "target": 2}, {"source": 2, "target": 1}, {"source": 2, "target": 5}, {"source": 3, "target": 0}, {"source": 3, "target": 4}, {"source": 4, "target": 1}, {"source": 4, "target": 3}, {"source": 4, "target": 5}, {"source": 5, "target": 2}, {"source": 5, "target": 4}]} -------------------------------------------------------------------------------- /topologies/two-srcs.json: -------------------------------------------------------------------------------- 1 | {"directed": true, "multigraph": false, "graph": {}, "nodes": [{"label": "0", "pos": [-2, 2], "id": 0}, {"label": "1", "pos": [-2, 1], "id": 1}, {"label": "2", "pos": [0, 2], "id": 2}, {"label": "3", "pos": [-1, 0], "id": 3}, {"label": "4", "pos": [1, 0], "id": 4}], "links": [{"source": 0, "target": 2}, {"source": 0, "target": 1}, {"source": 1, "target": 0}, {"source": 1, "target": 3}, {"source": 2, "target": 0}, {"source": 2, "target": 3}, {"source": 2, "target": 4}, {"source": 3, "target": 1}, {"source": 3, "target": 2}, {"source": 3, "target": 4}, {"source": 4, "target": 2}, {"source": 4, "target": 3}]} --------------------------------------------------------------------------------