├── .gitattributes ├── MODULE.bazel ├── traffic ├── data │ ├── AliStorage.txt │ ├── MsftWebSearch.txt │ ├── FbHadoop.txt │ └── GoogleRPC.txt ├── BUILD ├── traffic.py └── tmgen.py ├── common ├── common.py ├── BUILD └── flags.py ├── tests ├── data │ ├── toy2_traffic.textproto │ ├── toy2_te_sol.textproto │ ├── toy4_te_sol.textproto │ └── toy2_topo.textproto ├── BUILD ├── global_TE_test.py ├── wcmp_alloc_test.py └── load_toy_test.py ├── globalTE ├── BUILD └── global_te.py ├── proto ├── BUILD ├── traffic.proto ├── te_solution.proto └── topology.proto ├── localTE ├── BUILD └── test_driver.py ├── e2e ├── BUILD ├── run.py └── tracegen.py ├── scripts ├── time_extract.py ├── parse.py └── test_dep.py ├── topology ├── BUILD ├── graph_db.py └── striping_plan.py ├── LICENSE ├── FabricEval-logo.svg ├── README.md └── .gitignore /.gitattributes: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MODULE.bazel: -------------------------------------------------------------------------------- 1 | bazel_dep(name = "protobuf", version = "3.19.6") 2 | -------------------------------------------------------------------------------- /traffic/data/AliStorage.txt: -------------------------------------------------------------------------------- 1 | 0 0 2 | 4000 22.93 3 | 8000 69.21 4 | 16000 80.61 5 | 32000 90.47 6 | 64000 93.53 7 | 128000 96.77 8 | 256000 97.53 9 | 2000000 100 10 | -------------------------------------------------------------------------------- /traffic/data/MsftWebSearch.txt: -------------------------------------------------------------------------------- 1 | 0 0 2 | 10000 15 3 | 20000 20 4 | 30000 30 5 | 50000 40 6 | 80000 53 7 | 200000 60 8 | 1000000 70 9 | 2000000 80 10 | 5000000 90 11 | 10000000 97 12 | 30000000 100 13 | -------------------------------------------------------------------------------- /common/common.py: -------------------------------------------------------------------------------- 1 | import common.flags as FLAG 2 | 3 | 4 | def PRINTV(verbose, logstr): 5 | ''' 6 | Print helper with verbosity control. 7 | ''' 8 | if FLAG.VERBOSE >= verbose: 9 | print(logstr, flush=True) 10 | -------------------------------------------------------------------------------- /traffic/data/FbHadoop.txt: -------------------------------------------------------------------------------- 1 | 0 0 2 | 100 1 3 | 200 2 4 | 300 5 5 | 350 15 6 | 400 20 7 | 500 30 8 | 600 40 9 | 700 50 10 | 1000 60 11 | 2000 67 12 | 7000 70 13 | 30000 72 14 | 50000 82 15 | 80000 87 16 | 120000 90 17 | 300000 95 18 | 1000000 97.5 19 | 2000000 99 20 | 10000000 100 21 | -------------------------------------------------------------------------------- /common/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | py_library( 4 | name = 'common', 5 | srcs = ['common.py'], 6 | deps = [ 7 | ], 8 | ) 9 | 10 | py_library( 11 | name = 'flags', 12 | srcs = ['flags.py'], 13 | deps = [ 14 | ], 15 | ) 16 | -------------------------------------------------------------------------------- /tests/data/toy2_traffic.textproto: -------------------------------------------------------------------------------- 1 | # Traffic demand for network toy2. 2 | type: LEVEL_AGGR_BLOCK 3 | demands { 4 | src: "toy2-c1-ab1" 5 | dst: "toy2-c3-ab1" 6 | volume_mbps: 300000 7 | } 8 | demands { 9 | src: "toy2-c3-ab1" 10 | dst: "toy2-c1-ab1" 11 | volume_mbps: 100000 12 | } 13 | -------------------------------------------------------------------------------- /globalTE/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | py_library( 4 | name = 'global_te', 5 | srcs = ['global_te.py'], 6 | deps = [ 7 | "//proto:topology_proto", 8 | "//proto:traffic_proto", 9 | "//proto:te_solution_proto", 10 | '//topology:topology', 11 | '//traffic:traffic', 12 | '//common:common', 13 | '//common:flags', 14 | ], 15 | ) 16 | -------------------------------------------------------------------------------- /traffic/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | filegroup( 4 | name = "tracedata", 5 | srcs = glob([ 6 | "data/*.txt", 7 | ]), 8 | ) 9 | 10 | py_library( 11 | name = 'traffic', 12 | srcs = ['traffic.py'], 13 | deps = [ 14 | "//proto:traffic_proto", 15 | ], 16 | ) 17 | 18 | py_library( 19 | name = 'tmgen', 20 | srcs = ['tmgen.py'], 21 | deps = [ 22 | "//proto:traffic_proto", 23 | "//common:flags", 24 | ], 25 | ) 26 | -------------------------------------------------------------------------------- /proto/BUILD: -------------------------------------------------------------------------------- 1 | # Credits: https://thethoughtfulkoala.com/posts/2020/05/08/py-protobuf-bazel.html 2 | package(default_visibility = ["//visibility:public"]) 3 | 4 | load("@protobuf//:protobuf.bzl", "py_proto_library") 5 | 6 | py_proto_library( 7 | name = "topology_proto", 8 | srcs = ["topology.proto"], 9 | ) 10 | 11 | py_proto_library( 12 | name = "traffic_proto", 13 | srcs = ["traffic.proto"], 14 | ) 15 | 16 | py_proto_library( 17 | name = "te_solution_proto", 18 | srcs = ["te_solution.proto"], 19 | ) 20 | -------------------------------------------------------------------------------- /localTE/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | py_library( 4 | name = 'wcmp_alloc', 5 | srcs = ['wcmp_alloc.py'], 6 | deps = [ 7 | "//proto:te_solution_proto", 8 | "//common:common", 9 | ], 10 | ) 11 | 12 | py_library( 13 | name = 'group_reduction', 14 | srcs = ['group_reduction.py'], 15 | deps = [ 16 | "//common:common", 17 | "//common:flags", 18 | "//proto:te_solution_proto", 19 | ], 20 | ) 21 | 22 | py_binary( 23 | name = 'test_driver', 24 | srcs = ['test_driver.py'], 25 | deps = [ 26 | "//common:flags", 27 | '//localTE:group_reduction', 28 | ], 29 | python_version = 'PY3', 30 | ) 31 | -------------------------------------------------------------------------------- /e2e/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | py_binary( 4 | name = 'run', 5 | srcs = ['run.py'], 6 | deps = [ 7 | '//topology:topology', 8 | '//topology:topogen', 9 | '//traffic:traffic', 10 | '//traffic:tmgen', 11 | '//globalTE:global_te', 12 | '//localTE:wcmp_alloc', 13 | '//localTE:group_reduction', 14 | ], 15 | data = [ 16 | "//tests:testdata", 17 | ], 18 | python_version = 'PY3', 19 | ) 20 | 21 | py_binary( 22 | name = 'tracegen', 23 | srcs = ['tracegen.py'], 24 | deps = [ 25 | '//traffic:tmgen', 26 | '//traffic:traffic', 27 | "//common:flags", 28 | ], 29 | data = [ 30 | "//traffic:tracedata", 31 | "//tests:testdata", 32 | ], 33 | python_version = 'PY3', 34 | ) 35 | -------------------------------------------------------------------------------- /scripts/time_extract.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # A simple script that parses the FabricSim log and extracts group reduction 4 | # time into a csv. To use it, execute: 5 | # $ python3 time_extract.py 6 | 7 | import csv 8 | import re 9 | import sys 10 | 11 | if __name__ == "__main__": 12 | logfile, csvfile = sys.argv[1], sys.argv[2] 13 | time_pts = [] 14 | p = re.compile(".*\[reduceGroups\].*in (.*) sec") 15 | with open(logfile, 'r') as f: 16 | for line in f: 17 | result = p.search(line) 18 | if result: 19 | time_pts.append(result.group(1)) 20 | 21 | with open(csvfile, 'w') as f: 22 | writer = csv.writer(f) 23 | writer.writerow(['time (sec)']) 24 | for time in time_pts: 25 | writer.writerow([time]) 26 | -------------------------------------------------------------------------------- /topology/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | py_library( 4 | name = 'topology', 5 | srcs = ['topology.py'], 6 | deps = [ 7 | "//proto:topology_proto", 8 | "//proto:te_solution_proto", 9 | "//topology:graph_db", 10 | ], 11 | ) 12 | 13 | py_library( 14 | name = 'topogen', 15 | srcs = ['topogen.py'], 16 | deps = [ 17 | "//proto:topology_proto", 18 | "//common:flags", 19 | "//topology:striping_plan", 20 | ], 21 | ) 22 | 23 | py_library( 24 | name = 'striping_plan', 25 | srcs = ['striping_plan.py'], 26 | deps = [ 27 | "//common:common", 28 | "//common:flags", 29 | ], 30 | ) 31 | 32 | py_library( 33 | name = 'graph_db', 34 | srcs = ['graph_db.py'], 35 | deps = [ 36 | "//common:common", 37 | "//common:flags", 38 | ], 39 | ) 40 | -------------------------------------------------------------------------------- /proto/traffic.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package traffic; 4 | 5 | message DemandEntry { 6 | // Unique name identifier of source (ToR or aggregation block). 7 | string src = 1; 8 | // Unique name identifier of destination (ToR or aggregation block). 9 | string dst = 2; 10 | // Traffic volume in Mbits/sec. 11 | uint64 volume_mbps = 3; 12 | } 13 | 14 | message TrafficDemand { 15 | enum DemandType { 16 | LEVEL_UNKNOWN = 0; // Unknown as the default forces explicitly set type. 17 | LEVEL_TOR = 1; // ToR-level demand. 18 | LEVEL_AGGR_BLOCK = 2; // Aggregation-block-level demand. 19 | } 20 | // Specifies demand as one of the enums. 21 | DemandType type = 1; 22 | // A traffic demand can either be a ToR-to-ToR demand or an aggregated one. 23 | // But all repeated entries must be consistent. 24 | repeated DemandEntry demands = 2; 25 | } 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Shawn Chen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /scripts/parse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # A simple script that parses the FabricSim log and extracts group weight ratrio 4 | # into a csv. To use it, execute: 5 | # $ python3 parse.py 6 | 7 | import csv 8 | import re 9 | import sys 10 | 11 | if __name__ == "__main__": 12 | logfile, csvfile = sys.argv[1], sys.argv[2] 13 | rows = [] 14 | p = re.compile("toy3-c(.*)-ab1.*orig max ratio (.*)") 15 | with open(logfile, 'r') as f: 16 | for line in f: 17 | result = p.search(line) 18 | if result: 19 | rows.append([int(result.group(1)), float(result.group(2))]) 20 | 21 | rows.sort(key=lambda x: x[0]) 22 | print(f'avg ratio gen1: {sum(n for _, n in rows[:22*8-1]) / 22}') 23 | print(f'avg ratio gen2: {sum(n for _, n in rows[22*8-1:44*8]) / 22}') 24 | print(f'avg ratio gen3: {sum(n for _, n in rows[44*8:]) / 21}') 25 | print(f'max ratio gen1: {max(n for _, n in rows[:22*8-1])}') 26 | print(f'max ratio gen2: {max(n for _, n in rows[22*8-1:44*8])}') 27 | print(f'max ratio gen3: {max(n for _, n in rows[44*8:])}') 28 | with open(csvfile, 'w') as f: 29 | writer = csv.writer(f) 30 | writer.writerow(['cluster', 'ratio']) 31 | for row in rows: 32 | writer.writerow(row) 33 | -------------------------------------------------------------------------------- /tests/data/toy2_te_sol.textproto: -------------------------------------------------------------------------------- 1 | type: LEVEL_AGGR_BLOCK 2 | te_intents { 3 | target_block: "toy2-c1-ab1" 4 | prefix_intents { 5 | dst_name: "toy2-c3-ab1" 6 | type: SRC 7 | nexthop_entries { 8 | nexthop_port: "toy2-c1-ab1-s3i1-p4" 9 | weight: 75000.0 10 | } 11 | nexthop_entries { 12 | nexthop_port: "toy2-c1-ab1-s3i2-p4" 13 | weight: 75000.0 14 | } 15 | nexthop_entries { 16 | nexthop_port: "toy2-c1-ab1-s3i1-p3" 17 | weight: 75000.0 18 | } 19 | nexthop_entries { 20 | nexthop_port: "toy2-c1-ab1-s3i2-p3" 21 | weight: 75000.0 22 | } 23 | } 24 | } 25 | te_intents { 26 | target_block: "toy2-c2-ab1" 27 | prefix_intents { 28 | dst_name: "toy2-c3-ab1" 29 | type: TRANSIT 30 | nexthop_entries { 31 | nexthop_port: "toy2-c2-ab1-s3i1-p4" 32 | weight: 75000.0 33 | } 34 | nexthop_entries { 35 | nexthop_port: "toy2-c2-ab1-s3i2-p4" 36 | weight: 75000.0 37 | } 38 | } 39 | } 40 | te_intents { 41 | target_block: "toy2-c3-ab1" 42 | prefix_intents { 43 | dst_name: "toy2-c1-ab1" 44 | type: SRC 45 | nexthop_entries { 46 | nexthop_port: "toy2-c3-ab1-s3i1-p3" 47 | weight: 50000.0 48 | } 49 | nexthop_entries { 50 | nexthop_port: "toy2-c3-ab1-s3i2-p3" 51 | weight: 50000.0 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /tests/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | filegroup( 4 | name = "testdata", 5 | srcs = glob([ 6 | "data/*.textproto", 7 | ]), 8 | ) 9 | 10 | py_test( 11 | name = 'load_toy_test', 12 | srcs = ['load_toy_test.py'], 13 | deps = [ 14 | '//topology:topology', 15 | '//topology:topogen', 16 | '//traffic:traffic', 17 | '//traffic:tmgen', 18 | '//common:flags', 19 | ], 20 | data = glob(['data/*.textproto']), 21 | python_version = 'PY3', 22 | size = 'medium' 23 | ) 24 | 25 | py_test( 26 | name = 'wcmp_alloc_test', 27 | srcs = ['wcmp_alloc_test.py'], 28 | deps = [ 29 | '//topology:topology', 30 | '//topology:topogen', 31 | '//traffic:traffic', 32 | '//localTE:wcmp_alloc', 33 | '//localTE:group_reduction', 34 | ], 35 | data = glob(['data/*.textproto']), 36 | python_version = 'PY3', 37 | size = 'small' 38 | ) 39 | 40 | py_test( 41 | name = 'global_TE_test', 42 | srcs = ['global_TE_test.py'], 43 | deps = [ 44 | '//topology:topology', 45 | '//topology:topogen', 46 | '//traffic:traffic', 47 | '//traffic:tmgen', 48 | '//globalTE:global_te', 49 | ], 50 | data = glob(['data/*.textproto']), 51 | python_version = 'PY3', 52 | size = 'small' 53 | ) 54 | -------------------------------------------------------------------------------- /proto/te_solution.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package te_solution; 4 | 5 | message PrefixIntent { 6 | enum PrefixType { 7 | UNKNOWN = 0; // Unknown as the default forces explicitly set type. 8 | SRC = 1; // The PrefixIntent is programmed at the source node. 9 | TRANSIT = 2; // The PrefixIntent is programmed at the transit node. 10 | } 11 | 12 | message NexthopEntry { 13 | // Unique name identifier for nexthop (physical) port. 14 | string nexthop_port = 1; 15 | // Weight fraction of traffic to be distributed on this port. 16 | double weight = 2; 17 | } 18 | 19 | // An ipv4 prefix of the destination. 20 | string dst_prefix = 1; 21 | // Net mask in slash notation. 22 | uint32 mask = 2; 23 | // (optional) FQDN of dst entity. If PrefixIntent is for aggregation blocks, 24 | // there is only one PrefixIntent per dst AggrBlock, and the dst_prefix may 25 | // not be set, in which case, dst_name should be set to the name of the dst 26 | // AggrBlock. For ToR-level PrefixIntent, dst_name may not be set. 27 | string dst_name = 3; 28 | // Type of this PrefixIntent. 29 | PrefixType type = 4; 30 | // Nexthop entries used to reach the destination prefix. 31 | // Note: all weights must normalize and sum to 1. 32 | repeated NexthopEntry nexthop_entries = 5; 33 | } 34 | 35 | message TEIntent { 36 | // Unique name identifier for the originating aggregation block. 37 | string target_block = 1; 38 | // TE traffic distribution intents for each destination prefix. 39 | repeated PrefixIntent prefix_intents = 2; 40 | } 41 | 42 | message TESolution { 43 | enum SolutionType { 44 | LEVEL_UNKNOWN = 0; // Unknown as the default forces explicitly set type. 45 | LEVEL_TOR = 1; // ToR-level solution. 46 | LEVEL_AGGR_BLOCK = 2; // Aggregation-block-level solution. 47 | } 48 | // TE solution type could be ToR-level or AggrBlock-level, depending on the 49 | // corresponding traffic demand type. 50 | SolutionType type = 1; 51 | // Traffic engineering intents grouped by target nodes. 52 | repeated TEIntent te_intents = 2; 53 | } 54 | -------------------------------------------------------------------------------- /scripts/test_dep.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # Tests whether all dependencies are met. 4 | # $ python3 test_dep.py 5 | 6 | import re 7 | import subprocess 8 | import sys 9 | 10 | import gurobipy as gp 11 | import neo4j as n4j 12 | import numpy as np 13 | import scipy as sp 14 | 15 | 16 | def str2tup(string): 17 | return tuple(map(int, string.split('.'))) 18 | 19 | 20 | def test_dep(): 21 | if sys.version_info.major < 3 or sys.version_info.minor < 8: 22 | print("ERROR: python version below 3.8") 23 | return 24 | if str2tup(np.__version__) < (1, 18, 0): 25 | print("ERROR: numpy version below 1.18.0") 26 | return 27 | if str2tup(sp.__version__) < (1, 8, 0): 28 | print("ERROR: scipy version below 1.8.0") 29 | return 30 | if str2tup(n4j.__version__) < (5, 0, 0): 31 | print("ERROR: Neo4j version below 5.0.0") 32 | return 33 | if gp.gurobi.version() < (9, 5, 0): 34 | print("ERROR: GurobiPy version below 9.5.0") 35 | return 36 | gurobi_ver = str( 37 | subprocess.run(['gurobi_cl', '--version'], capture_output=True).stdout) 38 | m = re.search(r"version (\d+)\.(\d+)\.(\d+) build", gurobi_ver) 39 | if (int(m[1]), int(m[2]), int(m[3])) < (9, 5, 0): 40 | print("ERROR: Gurobi version below 9.5.0") 41 | return 42 | gurobi_license = str( 43 | subprocess.run(['gurobi_cl', '--license'], capture_output=True).stdout) 44 | if 'Error' in gurobi_license: 45 | print("ERROR: no valid Gurobi license") 46 | return 47 | git_ver = str( 48 | subprocess.run(['git', '--version'], capture_output=True).stdout) 49 | if 'git version' not in git_ver: 50 | print("ERROR: could not find git") 51 | return 52 | bazel_ver = str( 53 | subprocess.run(['bazel', '--version'], capture_output=True).stdout) 54 | g = re.search(r"bazel (\d+)\.(\d+)\.(\d+)", bazel_ver) 55 | if (int(g[1]), int(g[2]), int(g[3])) < (7, 0, 0): 56 | print("ERROR: bazel version below 7.0.0") 57 | return 58 | print("All dependencies are met.") 59 | 60 | 61 | if __name__ == "__main__": 62 | test_dep() 63 | -------------------------------------------------------------------------------- /common/flags.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # VERBOSE=0: no Gurobi log. No informational prints. 4 | # VERBOSE=1: Gurobi final log only. Informational prints. 5 | # VERBOSE=2: full Gubrobi log. 6 | VERBOSE = 1 7 | 8 | # Probability of a link failure in the topology. N.B., setting it too high might 9 | # cause a network partition. 10 | P_LINK_FAILURE = 0.0 11 | 12 | # True means the block total ingress should equal its total egress. 13 | EQUAL_INGRESS_EGRESS = False 14 | 15 | # Fraction of blocks with 0 demand. 16 | P_SPARSE = 0.15 17 | 18 | # Flag to control whether to enable hedging. 19 | ENABLE_HEDGING = True 20 | 21 | # Spread in (0, 1] used by the hedging constraint. 22 | S = 0.5 23 | 24 | # If True, feeds GroupReduction solver with scaled up integer groups. 25 | USE_INT_INPUT_GROUPS = False 26 | 27 | # Infinite ECMP table size, overrides `TABLE_LIMIT` and `MAX_GROUP_SIZE` flag. 28 | INFINITE_ECMP_TABLE = False 29 | 30 | # Example (Broadcom Tomahawk 2) ECMP table limit. To simulate unlimited table 31 | # size, simply set the limit to the worst case consumption: 32 | # max port weight (200000 Mbps) * # ports/group (64) * [# src groups (64) + 33 | # # transit groups (65*65)] = 54899200000. 34 | TABLE_LIMIT = 16 * 1024 if not INFINITE_ECMP_TABLE else 54899200000 35 | 36 | # Max ECMP entries a group is allowed to use. To simulate unlimited table 37 | # size, simply set the limit to the worst case consumption: 38 | # max port weight (200000 Mbps) * # ports/group (64) = 12800000 39 | MAX_GROUP_SIZE = 256 if not INFINITE_ECMP_TABLE else 12800000 40 | 41 | # True to enable a set of improved heuristics in group reduction. 42 | # (1) pruning policy. (2) max group size. (3) table limit used. (4) group 43 | # admission policy. 44 | IMPROVED_HEURISTIC = False 45 | 46 | # True to enable modified EuroSys algorithm, i.e., perform pruning. 47 | EUROSYS_MOD = False 48 | 49 | # Number of parallel group reductions allowed to run. 50 | PARALLELISM = os.cpu_count() 51 | 52 | # Timeout in seconds for a single Gurobi invocation. 53 | GUROBI_TIMEOUT = 120 54 | 55 | # The algorithm to use for group reduction. 56 | # Must be one of eurosys[_mod]/google/igr/dmir/gurobi. 57 | GR_ALGO = 'dmir' 58 | 59 | # True to dump the original groups to a csv file. 60 | DUMP_GROUPS = False 61 | 62 | # True to a backend graph database and constructs topology graph. 63 | ENABLE_GRAPHDB = False 64 | 65 | # URI of the backend graph database. 66 | GRAPHDB_URI = 'bolt://localhost:7687' 67 | -------------------------------------------------------------------------------- /FabricEval-logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /proto/topology.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package topo; 4 | 5 | message Port { 6 | // Unique name identifier. 7 | string name = 1; 8 | // Port speed in Mbits/sec. 9 | int64 port_speed_mbps = 2; 10 | // True if this port is facing the data-center network (DCN). 11 | bool dcn_facing = 3; 12 | // True if this port is facing the hosts. 13 | bool host_facing = 4; 14 | // Unique index among the ports of the same nodes. 15 | int32 index = 5; 16 | } 17 | 18 | message Link { 19 | // Unique name identifier. 20 | string name = 1; 21 | // Unique name identifier of the source port. 22 | string src_port_id = 2; 23 | // Unique name identifier of the destination port. 24 | string dst_port_id = 3; 25 | // Link speed in Mbits/sec. 26 | int64 link_speed_mbps = 4; 27 | } 28 | 29 | message Node { 30 | // Unique name identifier. 31 | string name = 1; 32 | // Stage of the node, 1 for ToR, 2/3 for AggregationBlock. 33 | int32 stage = 2; 34 | // Unique index among the nodes of the same stage. 35 | int32 index = 3; 36 | // Number of LPM entries the flow table can hold. 37 | int64 flow_limit = 4; 38 | // Number of ECMP entries the ECMP table can hold. 39 | int64 ecmp_limit = 5; 40 | // Max number of ECMP entries each group can use. 41 | int64 group_limit = 6; 42 | // Member ports on the node. 43 | repeated Port ports = 7; 44 | // Aggregated IPv4 prefix of all hosts in the rack (only a ToR can have the 45 | // prefix fields set). 46 | string host_prefix = 8; 47 | // Netmask of the host_prefix in slash notation. 48 | uint32 host_mask = 9; 49 | // Assigned IPv4 management prefix for a ToR (used for out-of-band 50 | // management connection in an SDN network). 51 | string mgmt_prefix = 10; 52 | // Netmask of the mgmt_prefix in slash notation. 53 | uint32 mgmt_mask = 11; 54 | } 55 | 56 | message AggregationBlock { 57 | // Unique name identifier. 58 | string name = 1; 59 | // Member nodes in the aggregation block. 60 | repeated Node nodes = 2; 61 | } 62 | 63 | message Path { 64 | // Unique name identifier. 65 | string name = 1; 66 | // Unique name identifier of the source aggregation block. 67 | string src_aggr_block = 2; 68 | // Unique name identifier of the destination aggregation block. 69 | string dst_aggr_block = 3; 70 | // Path capacity in Mbits/sec, should match the sum of member link capacity. 71 | int64 capacity_mbps = 4; 72 | } 73 | 74 | message Cluster { 75 | // Unique name identifier. 76 | string name = 1; 77 | // Member aggregation blocks in the cluster. 78 | repeated AggregationBlock aggr_blocks = 2; 79 | // Member nodes directly belonging to the cluster (e.g., ToRs). 80 | repeated Node nodes = 3; 81 | } 82 | 83 | message Network { 84 | // Unique name identifier. 85 | string name = 1; 86 | // Member clusters in the network. 87 | repeated Cluster clusters = 2; 88 | // Member paths in the network. 89 | repeated Path paths = 3; 90 | // Member links in the network. 91 | repeated Link links = 4; 92 | } 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FabricEval 2 | FabricEval is a modular evaluation framework built for studying traffic 3 | engineering (TE) schemes. FabricEval works as follows: 4 | First, it constructs a data center network topology, overlaid with 5 | a traffic demand matrix for the topology. Then a snapshot is taken on the network 6 | and fed to a TE solver. The output of the TE solver is distributed to each switch 7 | and translated to switch rules in the form of Weighted-Cost Multi-Path (WCMP) groups. 8 | Finally, the data plane implementation is compared to the desired state derived 9 | from the original TE solution. A set of log files will be generated to record the precision loss. 10 | 11 | FabricEval by default generates production-like data center network topologies/traffic 12 | demand matrices from Google's [Jupiter fabrics](https://research.google/pubs/pub51587/), 13 | and uses the same TE algorithm powering the 14 | [Orion SDN controller](https://research.google/pubs/pub50245/). It also models 15 | switches with specs, e.g., port speed, table space, from commodity switch vendors 16 | like Broadcom. All of these are flexible modules, users can easily replace with 17 | their favorite configuration and TE algorithm. 18 | 19 | A main component of TE implementation is the group reduction algorithm that 20 | reduces original groups with large sizes to smaller ones to fit into the switch 21 | table limits. FabricEval includes a selection of such group reduction algorithms, 22 | including a re-implementation of WCMP TableFitting \[EuroSys'14\], our own DMIR 23 | and IGR algorithms, as well as a few other variants. The group reduction algorithm 24 | is also modular, and can easily be replaced with the user's own choice. 25 | 26 | ## Structure 27 | * **common/**: common helper functions and flags. 28 | * **e2e/**: pipeline implementation of the entire framework. 29 | * **globalTE/**: traffic engineering solver component, generates TE solution. 30 | * **localTE/**: handles TE solution to TE implementation mapping + group reduction. 31 | * **proto/**: protobuf definitions for topology, traffic demands, TE solution and so on. 32 | * **scripts/**: log parsing scripts. 33 | * **tests/**: unit tests. 34 | * **tests/data/**: protobuf format production-like network configs. 35 | * **topology/**: the topology component that represents a network in memory. 36 | * **traffic/**: the traffic component that represents traffic demands. 37 | 38 | ## Prerequisites 39 | * [Bazel 7.0.0+](https://docs.bazel.build/install.html). 40 | * [Python 3.8+](https://www.python.org/downloads/). 41 | * [Gurobi](https://www.gurobi.com/). Everything is only tested on Gurobi 9.5.0+. 42 | * [NumPy](https://numpy.org/) and [SciPy](https://scipy.org/). 43 | 44 | Make sure the above dependencies are all installed. 45 | 46 | ## Usage 47 | Run the following command to run all unit tests. 48 | ```bash 49 | bazel test //tests:all 50 | ``` 51 | Run the following command to invoke the e2e pipeline, and dump generated traffic matrix, 52 | TE solution, and link utilization stats to `igr/` in the current path: 53 | ```bash 54 | bazel run //e2e:run -- $(pwd) 55 | ``` 56 | 57 | More details can be found [here](https://shuoshuc.github.io/FabricEval/). 58 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | # Protobuf products 141 | *_pb2.py 142 | 143 | # Ignore all bazel-* symlinks. There is no full list since this can change 144 | # based on the name of the directory bazel is cloned into. 145 | /bazel-* 146 | # Ignore outputs generated during Bazel bootstrapping. 147 | /output/ 148 | # Bazelisk version file 149 | .bazelversion 150 | # User-specific .bazelrc 151 | user.bazelrc 152 | # csv output files. 153 | *.csv 154 | # protobuf dumps. 155 | *.textproto 156 | # Bzlmod lock. 157 | MODULE.bazel.lock 158 | -------------------------------------------------------------------------------- /localTE/test_driver.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import sys 3 | import time 4 | 5 | from group_reduction import GroupReduction, input_groups_gen 6 | 7 | import common.flags as FLAG 8 | 9 | if __name__ == "__main__": 10 | num_runs = 20 11 | # lower bound, upper bound, fraction precision, table size 12 | lb, ub, frac_digits, C = 0, 200000, 6, 16*1024 13 | 14 | with open(f'{sys.argv[1]}/grspeed.csv', 'w') as f: 15 | writer = csv.writer(f) 16 | writer.writerow(['run', 'algo', 'num_groups', 'num_ports', 17 | 'solve_time (msec)']) 18 | 19 | # Single group, sweep num ports. Repeat 20 times for each run. 20 | print(f'[Single group port sweep]') 21 | for run in range(num_runs): 22 | print(f'===== Run {run} starts =====') 23 | for p in [16, 32, 64]: 24 | runtime = [] 25 | orig_groups = input_groups_gen(1, p, lb, ub, frac_digits) 26 | print(f'Input: {orig_groups}') 27 | 28 | for algo in ['eurosys', 'igr', 'dmir']: 29 | # Initializes global flags before running the pipeline. 30 | if algo == 'eurosys': 31 | FLAG.EUROSYS_MOD = False 32 | elif algo == 'igr': 33 | FLAG.IMPROVED_HEURISTIC = True 34 | elif algo == 'dmir': 35 | FLAG.IMPROVED_HEURISTIC = True 36 | start = time.time_ns() 37 | group_reduction = GroupReduction(orig_groups, 1, C) 38 | reduced_groups = group_reduction.solve(algo) 39 | end = time.time_ns() 40 | print(f'Output [{algo}]: {reduced_groups}') 41 | solving_time = (end - start)/10**6 42 | print('Solving time (msec):', solving_time) 43 | writer.writerow([run, algo, 1, p, solving_time]) 44 | f.flush() 45 | print(f'===== Run {run} ends =====') 46 | 47 | # Fixed 64 ports, sweep num groups. Repeat 20 times for each run. 48 | print(f'[Fixed port group sweep]') 49 | for run in range(num_runs): 50 | print(f'===== Run {run} starts =====') 51 | for g in [16, 32, 64]: 52 | runtime = [] 53 | orig_groups = input_groups_gen(g, 64, lb, ub, frac_digits) 54 | print(f'Input: {orig_groups}') 55 | 56 | for algo in ['eurosys', 'igr', 'dmir']: 57 | # Initializes global flags before running the pipeline. 58 | if algo == 'eurosys': 59 | FLAG.EUROSYS_MOD = False 60 | elif algo == 'igr': 61 | FLAG.IMPROVED_HEURISTIC = True 62 | elif algo == 'dmir': 63 | FLAG.IMPROVED_HEURISTIC = True 64 | start = time.time_ns() 65 | group_reduction = GroupReduction(orig_groups, 1, C) 66 | reduced_groups = group_reduction.solve(algo) 67 | end = time.time_ns() 68 | print(f'Output [{algo}]: {reduced_groups}') 69 | solving_time = (end - start)/10**6 70 | print('Solving time (msec):', solving_time) 71 | writer.writerow([run, algo, g, 64, solving_time]) 72 | f.flush() 73 | print(f'===== Run {run} ends =====') 74 | -------------------------------------------------------------------------------- /tests/global_TE_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import proto.te_solution_pb2 as TESolution 5 | from google.protobuf import text_format 6 | 7 | import common.flags as FLAG 8 | from globalTE.global_te import GlobalTE 9 | from topology.topogen import generateToy3, generateToy4 10 | from topology.topology import Topology, filterPathSetWithSeg 11 | from traffic.tmgen import tmgen 12 | from traffic.traffic import Traffic 13 | 14 | TOY2_PATH = 'tests/data/toy2_topo.textproto' 15 | TOY2_TRAFFIC_PATH = 'tests/data/toy2_traffic.textproto' 16 | 17 | C1AB1 = 'toy2-c1-ab1' 18 | C2AB1 = 'toy2-c2-ab1' 19 | C3AB1 = 'toy2-c3-ab1' 20 | # toy4 entities. 21 | TOY4_C1 = 'toy4-c1-ab1' 22 | TOY4_C2 = 'toy4-c2-ab1' 23 | TOY4_C3 = 'toy4-c3-ab1' 24 | TOY4_C4 = 'toy4-c4-ab1' 25 | TOY4_C5 = 'toy4-c5-ab1' 26 | 27 | class TestGlobalTESolution(unittest.TestCase): 28 | def test_te_sol_toy2(self): 29 | FLAG.ENABLE_HEDGING = False 30 | toy2 = Topology(TOY2_PATH) 31 | toy2_traffic = Traffic(toy2, TOY2_TRAFFIC_PATH) 32 | global_te = GlobalTE(toy2, toy2_traffic) 33 | sol = global_te.solve() 34 | self.assertEqual(TESolution.TESolution.SolutionType.LEVEL_AGGR_BLOCK, 35 | sol.type) 36 | self.assertEqual(3, len(sol.te_intents)) 37 | for te_intent in sol.te_intents: 38 | if te_intent.target_block == C1AB1: 39 | self.assertEqual(1, len(te_intent.prefix_intents)) 40 | self.assertEqual(TESolution.PrefixIntent.PrefixType.SRC, 41 | te_intent.prefix_intents[0].type) 42 | self.assertEqual(4, 43 | len(te_intent.prefix_intents[0].nexthop_entries)) 44 | for nexthop_entry in te_intent.prefix_intents[0].nexthop_entries: 45 | self.assertEqual(75000.0, nexthop_entry.weight) 46 | if te_intent.target_block == C2AB1: 47 | self.assertEqual(1, len(te_intent.prefix_intents)) 48 | self.assertEqual(TESolution.PrefixIntent.PrefixType.TRANSIT, 49 | te_intent.prefix_intents[0].type) 50 | self.assertEqual(2, 51 | len(te_intent.prefix_intents[0].nexthop_entries)) 52 | for nexthop_entry in te_intent.prefix_intents[0].nexthop_entries: 53 | self.assertEqual(75000.0, nexthop_entry.weight) 54 | if te_intent.target_block == C3AB1: 55 | self.assertEqual(1, len(te_intent.prefix_intents)) 56 | self.assertEqual(TESolution.PrefixIntent.PrefixType.SRC, 57 | te_intent.prefix_intents[0].type) 58 | self.assertEqual(2, 59 | len(te_intent.prefix_intents[0].nexthop_entries)) 60 | for nexthop_entry in te_intent.prefix_intents[0].nexthop_entries: 61 | self.assertEqual(50000.0, nexthop_entry.weight) 62 | 63 | def test_te_sol_toy4(self): 64 | toy4 = Topology('', input_proto=generateToy4()) 65 | traffic_proto = tmgen(tor_level=False, 66 | cluster_vector=np.array([1]*5), 67 | num_nodes=4, 68 | model='single', 69 | dist='', 70 | netname='toy4') 71 | toy4_traffic = Traffic(toy4, '', traffic_proto) 72 | global_te = GlobalTE(toy4, toy4_traffic) 73 | sol = global_te.solve() 74 | self.assertEqual(TESolution.TESolution.SolutionType.LEVEL_AGGR_BLOCK, 75 | sol.type) 76 | self.assertEqual(4, len(sol.te_intents)) 77 | for te_intent in sol.te_intents: 78 | # There should not exist an intent for c5. 79 | self.assertNotEqual(TOY4_C5, te_intent.target_block) 80 | if te_intent.target_block == TOY4_C1: 81 | self.assertEqual(1, len(te_intent.prefix_intents)) 82 | self.assertEqual(TESolution.PrefixIntent.PrefixType.SRC, 83 | te_intent.prefix_intents[0].type) 84 | # c1->c5 uses VLB-like routing, all paths are used. 85 | self.assertEqual(16, 86 | len(te_intent.prefix_intents[0].nexthop_entries)) 87 | for nexthop_entry in te_intent.prefix_intents[0].nexthop_entries: 88 | self.assertEqual(5000.0, nexthop_entry.weight) 89 | if te_intent.target_block in [TOY4_C2, TOY4_C3, TOY4_C4]: 90 | self.assertEqual(1, len(te_intent.prefix_intents)) 91 | self.assertEqual(TESolution.PrefixIntent.PrefixType.TRANSIT, 92 | te_intent.prefix_intents[0].type) 93 | # Only 4 links in the direct connection to c5 are used. 94 | self.assertEqual(4, 95 | len(te_intent.prefix_intents[0].nexthop_entries)) 96 | for nexthop_entry in te_intent.prefix_intents[0].nexthop_entries: 97 | self.assertEqual(5000.0, nexthop_entry.weight) 98 | 99 | if __name__ == "__main__": 100 | unittest.main() 101 | -------------------------------------------------------------------------------- /traffic/traffic.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Dict, Tuple 3 | 4 | import proto.traffic_pb2 as traffic 5 | from google.protobuf import text_format 6 | 7 | 8 | def loadTraffic(filepath): 9 | if not filepath: 10 | return None 11 | demand = traffic.TrafficDemand() 12 | with open(filepath, 'r', encoding='utf-8') as f: 13 | text_format.Parse(f.read(), demand) 14 | return demand 15 | 16 | @dataclass 17 | class BlockDemands: 18 | ''' 19 | A data structure storing all ToR-level demands related to an AggrBlock. 20 | ''' 21 | # A map of demands where only src belongs to the block. 22 | src_only: Dict[Tuple[str, str], int] 23 | # A map of demands where only dst belongs to the block. 24 | dst_only: Dict[Tuple[str, str], int] 25 | # A map of demands where both src and dst belong to the block. 26 | src_dst: Dict[Tuple[str, str], int] 27 | 28 | class Traffic: 29 | ''' 30 | Traffic class that represents the demand matrix of a network. It contains 31 | ToR-level demand and/or aggregation-block-level demand. 32 | ''' 33 | def __init__(self, topo_obj, input_path, input_proto=None): 34 | ''' 35 | topo_obj: a topology object matching the input traffic demand. 36 | input_path (required): path to the textproto of the traffic demand. 37 | input_proto (optional): raw proto of the traffic demand. 38 | ''' 39 | self.topo = topo_obj 40 | # A map from (s, t) to demand. 41 | self.demand = {} 42 | # A map from AggrBlock name to BlockDemands dataclass. Only populated 43 | # when demand is ToR-level. 44 | self.demand_by_block = {} 45 | # parse input traffic and construct in-mem representation (this class). 46 | # If a raw proto is given, ignore `input_path`. 47 | proto_traffic = input_proto if input_proto else loadTraffic(input_path) 48 | self.demand_type = proto_traffic.type 49 | is_tor = self.demand_type == traffic.TrafficDemand.DemandType.LEVEL_TOR 50 | for demand_entry in proto_traffic.demands: 51 | src, dst = demand_entry.src, demand_entry.dst 52 | # Sanity check: src and dst cannot be the same. 53 | if src == dst: 54 | print(f'[ERROR] Traffic parsing: src {src} and dst {dst} cannot' 55 | f' be the same!') 56 | return 57 | # Sanity check: only positive demand allowed. 58 | vol = demand_entry.volume_mbps 59 | if vol <= 0: 60 | print(f'[ERROR] Traffic parsing: encountered negative demand: ' 61 | f'{vol} on {src} => {dst}.') 62 | return 63 | # Sanity check: only expects one entry for each src-dst pair. 64 | if (src, dst) in self.demand: 65 | print(f'[ERROR] Traffic parsing: found more than 1 entry for ' 66 | f'{src} => {dst}: {self.demand[(src, dst)]} and {vol}') 67 | return 68 | # If ToR demand matrix, finds the parent AggrBlocks so that we can 69 | # construct a block-level matrix out of it. If 2 ToRs are in the 70 | # same AggrBlock, the demand is *not* counted towards inter-block 71 | # demand. 72 | if is_tor: 73 | src_aggr_block = self.topo.findAggrBlockOfToR(src).name 74 | dst_aggr_block = self.topo.findAggrBlockOfToR(dst).name 75 | src_demands = self.demand_by_block.setdefault(src_aggr_block, 76 | BlockDemands({}, 77 | {}, 78 | {})) 79 | dst_demands = self.demand_by_block.setdefault(dst_aggr_block, 80 | BlockDemands({}, 81 | {}, 82 | {})) 83 | # Sanity check: only expects one entry for each src-dst pair. 84 | if (src, dst) in src_demands.src_only \ 85 | or (src, dst) in src_demands.src_dst: 86 | print(f'[ERROR] Traffic parsing: found more than 1 entry ' 87 | f'for pair {src} => {dst}: {vol}.') 88 | return 89 | 90 | if src_aggr_block != dst_aggr_block: 91 | tot = self.demand.setdefault((src_aggr_block, 92 | dst_aggr_block), 0) 93 | self.demand[(src_aggr_block, dst_aggr_block)] = tot + vol 94 | src_demands.src_only[(src, dst)] = vol 95 | dst_demands.dst_only[(src, dst)] = vol 96 | else: 97 | # If src and dst are in the same block, src_demands and 98 | # dst_demands point to the same map. 99 | src_demands.src_dst[(src, dst)] = vol 100 | else: 101 | self.demand[(src, dst)] = vol 102 | 103 | def getAllDemands(self): 104 | ''' 105 | Returns the whole network traffic demand. 106 | ''' 107 | return self.demand 108 | 109 | def getDemand(self, src, dst): 110 | ''' 111 | Returns a single demand for (src, dst). 112 | ''' 113 | return self.demand[(src, dst)] 114 | 115 | def getDemandType(self): 116 | ''' 117 | Returns the demand type. 118 | ''' 119 | return self.demand_type 120 | 121 | def getBlockDemands(self, block_name): 122 | ''' 123 | Returns the BlockDemands dataclass for the given block_name. Returns 124 | None if a block has no demand, which is valid. 125 | N.B.: should only be called when ToR-level demands exist. If there is 126 | only AggrBlock-level demands, the entire self.demand_by_block is 127 | empty. 128 | ''' 129 | if block_name not in self.demand_by_block: 130 | return None 131 | return self.demand_by_block[block_name] 132 | -------------------------------------------------------------------------------- /e2e/run.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import sys 3 | from datetime import datetime 4 | from pathlib import Path 5 | 6 | import numpy as np 7 | from google.protobuf import text_format 8 | 9 | import common.flags as FLAG 10 | from globalTE.global_te import GlobalTE 11 | from localTE.wcmp_alloc import WCMPAllocation 12 | from topology.topogen import generateFabric 13 | from topology.topology import Topology, filterPathSetWithSeg, loadTopo 14 | from traffic.tmgen import tmgen 15 | from traffic.traffic import Traffic 16 | 17 | NETWORK = 'f1' 18 | 19 | TOY_TOPO = f'tests/data/{NETWORK}_topo.textproto' 20 | TOY_TM = f'tests/data/{NETWORK}_traffic.textproto' 21 | TOY_SOL = f'tests/data/{NETWORK}_te_sol.textproto' 22 | # True to load topo from the above file. 23 | LOAD_TOPO = False 24 | # True to load TM from the above file. 25 | LOAD_TM = False 26 | # True to load TE solution from the above file. 27 | LOAD_SOL = False 28 | 29 | if __name__ == "__main__": 30 | # Initializes global flags before running the pipeline. 31 | if FLAG.GR_ALGO == 'eurosys': 32 | FLAG.EUROSYS_MOD = False 33 | elif FLAG.GR_ALGO == 'eurosys_mod': 34 | FLAG.EUROSYS_MOD = True 35 | elif FLAG.GR_ALGO == 'google': 36 | FLAG.IMPROVED_HEURISTIC = False 37 | elif FLAG.GR_ALGO == 'igr': 38 | FLAG.IMPROVED_HEURISTIC = True 39 | elif FLAG.GR_ALGO == 'dmir': 40 | FLAG.IMPROVED_HEURISTIC = True 41 | # Use single process if invoking Gurobi. Gurobi is able to use all CPU 42 | # cores, no need to multi-process, which adds extra overhead. 43 | FLAG.PARALLELISM = 1 44 | elif FLAG.GR_ALGO == 'gurobi': 45 | FLAG.IMPROVED_HEURISTIC = True 46 | FLAG.PARALLELISM = 1 47 | else: 48 | print(f'[ERROR] unknown group reduction algorithm {FLAG.GR_ALGO}.') 49 | 50 | logpath = Path(sys.argv[1] + f'/{FLAG.GR_ALGO}') 51 | logpath.mkdir(parents=True, exist_ok=True) 52 | 53 | # Generates topology. 54 | net_proto = None 55 | if not LOAD_TOPO: 56 | net_proto = generateFabric(NETWORK) 57 | with (logpath / 'topo.textproto').open('w') as topo: 58 | topo.write(text_format.MessageToString(net_proto)) 59 | toy_topo = Topology(TOY_TOPO, net_proto) 60 | print(f'{datetime.now()} [Step 1] topology generated.', flush=True) 61 | 62 | # Generates TM. 63 | traffic_proto = None 64 | if not LOAD_TM: 65 | traffic_proto = tmgen(tor_level=True, 66 | cluster_vector=np.array([1]*11 + [2.5]*11 + [5]*11), 67 | num_nodes=16, 68 | model='gravity', 69 | dist='exp', 70 | netname=NETWORK) 71 | with (logpath / 'TM.textproto').open('w') as tm: 72 | tm.write(text_format.MessageToString(traffic_proto)) 73 | toy_traffic = Traffic(toy_topo, TOY_TM, traffic_proto) 74 | print(f'{datetime.now()} [Step 2] traffic demand generated.', flush=True) 75 | 76 | # Runs global TE. 77 | sol = None 78 | if not LOAD_SOL: 79 | global_te = GlobalTE(toy_topo, toy_traffic) 80 | sol = global_te.solve() 81 | with (logpath / 'te_sol.textproto').open('w') as te_sol: 82 | te_sol.write(text_format.MessageToString(sol)) 83 | print(f'{datetime.now()} [Step 3] global TE solution generated.', flush=True) 84 | #print(text_format.MessageToString(sol)) 85 | 86 | # Runs local TE. 87 | wcmp_alloc = WCMPAllocation(toy_topo, toy_traffic, TOY_SOL, sol) 88 | wcmp_alloc.run() 89 | print(f'{datetime.now()} [Step 4] local TE solution generated.', flush=True) 90 | 91 | # Dumps stats. 92 | real_LUs = toy_topo.dumpRealLinkUtil() 93 | ideal_LUs = toy_topo.dumpIdealLinkUtil() 94 | delta_LUs = {} 95 | for k, (u, dcn) in real_LUs.items(): 96 | delta_LUs[k] = (u - ideal_LUs[k][0], dcn) 97 | 98 | with (logpath / 'LU.csv').open('w') as LU: 99 | writer = csv.writer(LU) 100 | writer.writerow(["link name", "dcn facing", "ideal LU", "real LU", "delta"]) 101 | for k, (v, dcn) in dict(sorted(delta_LUs.items(), key=lambda x: x[1][0], 102 | reverse=True)).items(): 103 | writer.writerow([k, f'{dcn}', f'{ideal_LUs[k][0]}', 104 | f'{real_LUs[k][0]}', f'{v}']) 105 | print(f'{datetime.now()} [Step 5] dump link util to LU.csv', flush=True) 106 | 107 | ecmp_util = toy_topo.dumpECMPUtil() 108 | with (logpath / 'node_ecmp.csv').open('w') as ecmp: 109 | writer = csv.writer(ecmp) 110 | writer.writerow(["node name", "ECMP util", "# groups"]) 111 | for k, (util, num_g) in ecmp_util.items(): 112 | writer.writerow([k, f'{util}', f'{num_g}']) 113 | print(f'{datetime.now()} [Step 6] dump node table util to node_ecmp.csv', 114 | flush=True) 115 | 116 | demand_admit = toy_topo.dumpDemandAdmission() 117 | with (logpath / 'node_demand.csv').open('w') as demand: 118 | writer = csv.writer(demand) 119 | writer.writerow(["node name", "total demand", "total admit", "ratio"]) 120 | for node, (tot_demand, tot_admit, f) in demand_admit.items(): 121 | writer.writerow([node, tot_demand, tot_admit, f]) 122 | print(f'{datetime.now()} [Step 7] dump node demand to node_demand.csv', 123 | flush=True) 124 | 125 | path_div = wcmp_alloc.dumpPathDiversityStats() 126 | with (logpath / 'path_diversity.csv').open('w') as path_diversity: 127 | writer = csv.writer(path_diversity) 128 | writer.writerow(["node", "gid", "total volume (Mbps)", "orig paths", 129 | "reduced paths", "pruned volume (Mbps)"]) 130 | for (node, gid, vol), [orig_p, reduced_p, pruned_vol] in path_div.items(): 131 | writer.writerow([node, gid, vol, orig_p, reduced_p, pruned_vol]) 132 | print(f'{datetime.now()} [Step 8] dump path diversity to path_diversity.csv', 133 | flush=True) 134 | 135 | if FLAG.DUMP_GROUPS: 136 | groups_by_node = wcmp_alloc.dumpOrigS3SrcGroups() 137 | for node, groups in groups_by_node.items(): 138 | newpath = logpath / 'group_dump' 139 | newpath.mkdir(parents=True, exist_ok=True) 140 | with (newpath / f'orig_groups_{node}.csv').open('w') as gdump: 141 | writer = csv.writer(gdump) 142 | for G in groups: 143 | writer.writerow(G) 144 | print(f'{datetime.now()} [Step 9] dump groups to group_dump/orig_groups_*.csv', 145 | flush=True) 146 | 147 | reduced_groups = wcmp_alloc.dumpReducedGroups() 148 | with (logpath / 'reduced_groups.csv').open('w') as g_dump: 149 | writer = csv.writer(g_dump) 150 | writer.writerow(["#group type", "src", "dst", "group"]) 151 | for g_type, src, dst, reduced_w in reduced_groups: 152 | writer.writerow([g_type, src, dst] + reduced_w) 153 | print(f'{datetime.now()} [Step 10] dump reduced groups to reduced_groups.csv', 154 | flush=True) 155 | -------------------------------------------------------------------------------- /topology/graph_db.py: -------------------------------------------------------------------------------- 1 | from neo4j import GraphDatabase 2 | 3 | import common.flags as FLAG 4 | from common.common import PRINTV 5 | 6 | 7 | class GraphDB: 8 | ''' 9 | GraphDB wraps the driver of backend database and translates requests from 10 | the Topology class. 11 | ''' 12 | def __init__(self, uri, user, password): 13 | ''' 14 | uri: the URI of the backend database. 15 | user: username 16 | password: password. 17 | ''' 18 | # Do nothing is flag is turned off. 19 | self._noop = not FLAG.ENABLE_GRAPHDB 20 | PRINTV(1, f'Enable GraphDB is {FLAG.ENABLE_GRAPHDB}') 21 | if self._noop: 22 | return 23 | self.driver = GraphDatabase.driver(uri, auth=(user, password)) 24 | 25 | def close(self): 26 | if self._noop: 27 | return 28 | self.driver.close() 29 | 30 | def nuke(self): 31 | ''' 32 | Deletes everything from the neo4j database. 33 | ''' 34 | if self._noop: 35 | return 36 | with self.driver.session(database="neo4j") as session: 37 | session.execute_write(self._run_trans, "MATCH (n) DETACH DELETE n") 38 | 39 | def addCluster(self, name): 40 | ''' 41 | Adds a cluster. 42 | ''' 43 | if self._noop: 44 | return 45 | with self.driver.session(database="neo4j") as session: 46 | session.execute_write(self._run_trans, 47 | f"CREATE (:Cluster:Abstract {{name: '{name}'}})") 48 | 49 | def addAggrBlock(self, name): 50 | ''' 51 | Adds an aggregation block. 52 | ''' 53 | if self._noop: 54 | return 55 | with self.driver.session(database="neo4j") as session: 56 | session.execute_write(self._run_trans, 57 | f"CREATE (:AggrBlock:Aggr {{name: '{name}'}})") 58 | 59 | def addSwitch(self, name, stage, index, ecmp_limit): 60 | ''' 61 | Adds a node. 62 | ''' 63 | if self._noop: 64 | return 65 | with self.driver.session(database="neo4j") as session: 66 | session.execute_write(self._run_trans, 67 | f"CREATE (:Switch:Phy {{name: '{name}', " 68 | f"stage: '{stage}', index: '{index}', " 69 | f"table: '{ecmp_limit}'}})") 70 | 71 | def addPort(self, name, index, speed, dcn_facing, host_facing): 72 | ''' 73 | Adds a port. 74 | ''' 75 | if self._noop: 76 | return 77 | with self.driver.session(database="neo4j") as session: 78 | session.execute_write(self._run_trans, 79 | f"CREATE (:Port:Phy {{name: '{name}', " 80 | f"index: '{index}', speed: '{speed}', " 81 | f"dcn_facing: '{dcn_facing}', " 82 | f"host_facing: '{host_facing}'}})") 83 | 84 | def addLink(self, name, speed, dcn): 85 | ''' 86 | Adds a link. 87 | ''' 88 | if self._noop: 89 | return 90 | with self.driver.session(database="neo4j") as session: 91 | session.execute_write(self._run_trans, 92 | f"CREATE (:Link:Phy {{name: '{name}', " 93 | f"speed: '{speed}', dcn_link: '{dcn}'}})") 94 | 95 | def addPath(self, name, capacity): 96 | ''' 97 | Adds a path. 98 | ''' 99 | if self._noop: 100 | return 101 | with self.driver.session(database="neo4j") as session: 102 | session.execute_write(self._run_trans, 103 | f"CREATE (:Path:Aggr {{name: '{name}', " 104 | f"capacity: '{capacity}'}})") 105 | 106 | def connectAggrBlockToCluster(self, aggrblock, cluster): 107 | ''' 108 | Connects an aggregation block to its parent cluster. 109 | ''' 110 | if self._noop: 111 | return 112 | with self.driver.session(database="neo4j") as session: 113 | long_query = (f"MATCH (ab:AggrBlock {{name: '{aggrblock}'}}), " 114 | f"(c:Cluster {{name: '{cluster}'}}) " 115 | f"CREATE (ab)-[:MEMBER_OF]->(c)-[:PARENT_OF]->(ab)") 116 | session.execute_write(self._run_trans, long_query) 117 | 118 | def connectSwitchToAggrBlock(self, switch, aggrblock): 119 | ''' 120 | Connects a switch to its parent aggregation block. 121 | ''' 122 | if self._noop: 123 | return 124 | with self.driver.session(database="neo4j") as session: 125 | long_query = (f"MATCH (ab:AggrBlock {{name: '{aggrblock}'}}), " 126 | f"(s:Switch {{name: '{switch}'}}) " 127 | f"CREATE (s)-[:MEMBER_OF]->(ab)-[:PARENT_OF]->(s)") 128 | session.execute_write(self._run_trans, long_query) 129 | 130 | def connectPortToSwitch(self, port, switch): 131 | ''' 132 | Connects a port to its parent switch. 133 | ''' 134 | if self._noop: 135 | return 136 | with self.driver.session(database="neo4j") as session: 137 | long_query = (f"MATCH (p:Port {{name: '{port}'}}), " 138 | f"(s:Switch {{name: '{switch}'}}) " 139 | f"CREATE (p)-[:MEMBER_OF]->(s)-[:PARENT_OF]->(p)") 140 | session.execute_write(self._run_trans, long_query) 141 | 142 | def connectToRToCluster(self, switch, cluster): 143 | ''' 144 | Connects a ToR (S1) switch to its parent cluster. 145 | ''' 146 | if self._noop: 147 | return 148 | with self.driver.session(database="neo4j") as session: 149 | long_query = (f"MATCH (t:Switch {{name: '{switch}'}}), " 150 | f"(c:Cluster {{name: '{cluster}'}}) " 151 | f"CREATE (t)-[:MEMBER_OF]->(c)-[:PARENT_OF]->(t)") 152 | session.execute_write(self._run_trans, long_query) 153 | 154 | def connectLinkToPorts(self, link, src, dst): 155 | ''' 156 | Connects a link to a src and dst port. 157 | ''' 158 | if self._noop: 159 | return 160 | with self.driver.session(database="neo4j") as session: 161 | long_query = (f"MATCH (l:Link {{name: '{link}'}}), " 162 | f"(src:Port {{name: '{src}'}}), " 163 | f"(dst:Port {{name: '{dst}'}}) " 164 | f"CREATE (src)-[:SRC_OF]->(l)<-[:DST_OF]-(dst), " 165 | f"(src)<-[:ORIGINATE_FROM]-(l)-[:TERMINATE_AT]->(dst)") 166 | session.execute_write(self._run_trans, long_query) 167 | 168 | def connectPathToAggrBlocks(self, path, src, dst): 169 | ''' 170 | Connects a path to a src and dst AggrBlock. 171 | ''' 172 | if self._noop: 173 | return 174 | with self.driver.session(database="neo4j") as session: 175 | long_query = (f"MATCH (path:Path {{name: '{path}'}}), " 176 | f"(src:AggrBlock {{name: '{src}'}}), " 177 | f"(dst:AggrBlock {{name: '{dst}'}}) " 178 | f"CREATE (src)-[:SRC_OF]->(path)<-[:DST_OF]-(dst), " 179 | f"(src)<-[:ORIGINATE_FROM]-(path)-[:TERMINATE_AT]->(dst)") 180 | session.execute_write(self._run_trans, long_query) 181 | 182 | @staticmethod 183 | def _run_trans(tx, cmd): 184 | tx.run(cmd) 185 | -------------------------------------------------------------------------------- /e2e/tracegen.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import random 3 | import re 4 | import sys 5 | from pathlib import Path 6 | 7 | import numpy as np 8 | 9 | import common.flags as FLAG 10 | from traffic.tmgen import tmgen 11 | from traffic.traffic import loadTraffic 12 | 13 | NETWORK = 'toy3' 14 | TM_PATH = f'tests/data/{NETWORK}_traffic_gravity.textproto' 15 | 16 | MSFT_WEBSEARCH = 'MsftWebSearch.txt' 17 | ALI_STORAGE = 'AliStorage.txt' 18 | GOOGLE_RPC = 'GoogleRPC.txt' 19 | FB_HADOOP = 'FbHadoop.txt' 20 | 21 | def loadCDF(filename): 22 | ''' 23 | Loads the given csv file and parses it into a CDF. 24 | ''' 25 | cdf = [] 26 | with open(filename, 'r', encoding='utf-8') as f: 27 | # saves the data points as [[x_i, p_i] ...] 28 | for line in f.readlines(): 29 | x, p = map(float, line.strip().split(' ')) 30 | cdf.append([x, p]) 31 | return cdf 32 | 33 | class CustomRand: 34 | ''' 35 | A custom random variable class that fits the given CDF. It is able to 36 | generate data points following the given CDF. 37 | ''' 38 | def testCdf(self, cdf): 39 | if cdf[0][1] != 0: 40 | return False 41 | if cdf[-1][1] != 100: 42 | return False 43 | for i in range(1, len(cdf)): 44 | if cdf[i][1] <= cdf[i-1][1] or cdf[i][0] <= cdf[i-1][0]: 45 | return False 46 | return True 47 | 48 | def setCdf(self, cdf): 49 | if not self.testCdf(cdf): 50 | return False 51 | self.cdf = cdf 52 | return True 53 | 54 | def getAvg(self): 55 | s = 0 56 | last_x, last_y = self.cdf[0] 57 | for c in self.cdf[1:]: 58 | x, y = c 59 | s += (x + last_x)/2.0 * (y - last_y) 60 | last_x = x 61 | last_y = y 62 | return s/100 63 | 64 | def rand(self): 65 | r = random.random() * 100 66 | return self.getValueFromPercentile(r) 67 | 68 | def getPercentileFromValue(self, x): 69 | if x < 0 or x > self.cdf[-1][0]: 70 | return -1 71 | for i in range(1, len(self.cdf)): 72 | if x <= self.cdf[i][0]: 73 | x0, y0 = self.cdf[i-1] 74 | x1, y1 = self.cdf[i] 75 | return y0 + (y1-y0)/(x1-x0)*(x-x0) 76 | 77 | def getValueFromPercentile(self, y): 78 | for i in range(1, len(self.cdf)): 79 | if y <= self.cdf[i][1]: 80 | x0,y0 = self.cdf[i-1] 81 | x1,y1 = self.cdf[i] 82 | return x0 + (x1-x0)/(y1-y0)*(y-y0) 83 | 84 | def getIntegralY(self, y): 85 | s = 0 86 | for i in range(1, len(self.cdf)): 87 | x0, y0 = self.cdf[i-1] 88 | x1, y1 = self.cdf[i] 89 | if y <= self.cdf[i][1]: 90 | s += 0.5 * (x0 + x0+(x1-x0)/(y1-y0)*(y-y0))*(y-y0) / 100. 91 | break 92 | else: 93 | s += 0.5 * (x1 + x0) * (y1 - y0) / 100. 94 | return s 95 | 96 | def tracegen(TM, cluster_vector, rv, duration, load): 97 | ''' 98 | Generates workload traces that matches the input demand traffic matrix while 99 | conforming to the flow size distribution defined in `rv`. 100 | 101 | TM: traffic matrix of format 102 | [[src node, src cluster id, dst node, dst cluster id, demand], ...] 103 | cluster_vector: a vector of cluster speed ratio, based speed is 40Gbps. 104 | rv: random variable that models the workload flow size distribution. 105 | duration: time duration (in nsec) the TM is measured on. 106 | load: link load between 0 and 1. 107 | 108 | Returns a trace of format [src, dst, flow size (Bytes), start time (nsec)]. 109 | ''' 110 | # Base speed is 40Gbps in bps. 111 | BASE_BW = 40 * 1000 * 1000 * 1000 112 | 113 | trace = [] 114 | # Start time of the last flow in the entire trace. 115 | t_last_flow = 0 116 | for src, sidx, dst, didx, demand in TM: 117 | # Speed auto-negotiation. 118 | BW = BASE_BW * min(cluster_vector[int(sidx) - 1], 119 | cluster_vector[int(didx) - 1]) 120 | # Get target flow size between two nodes. demand is in Mbps, duration 121 | # is in nsec. target_size is in bytes. 122 | target_size = (int(demand) / 8. * 1000000) * (duration / 1000000000) 123 | 124 | tot_size = 0 125 | prev_time = 0 126 | avg_inter_arrival_nsec = 1 / (BW * load / 8. / rv.getAvg()) * 1000000000 127 | while tot_size < target_size: 128 | flow_size = int(rv.rand()) 129 | iat_ns = int(np.random.exponential(avg_inter_arrival_nsec)) 130 | prev_time += iat_ns 131 | trace.append([src, sidx, dst, didx, flow_size, prev_time]) 132 | tot_size += flow_size 133 | if prev_time > duration: 134 | print(f'[WARN] trace {src} => {dst}: {target_size} exceeds ' 135 | f'duration: {prev_time} > {duration} nsec.') 136 | t_last_flow = max(t_last_flow, prev_time) 137 | print(f'Last flow start time {t_last_flow} nsec.') 138 | return trace 139 | 140 | if __name__ == "__main__": 141 | # Selected workload type. 142 | WORKLOAD = MSFT_WEBSEARCH 143 | # Trace duration 50 msec. 144 | DURATION = 20 * 1000 * 1000 145 | # Link load 40%. 146 | LOAD = 0.4 147 | 148 | # Loads all workload CDFs. 149 | CDF = { 150 | MSFT_WEBSEARCH: None, 151 | ALI_STORAGE: None, 152 | GOOGLE_RPC: None, 153 | FB_HADOOP: None 154 | } 155 | for workload in CDF.keys(): 156 | CDF[workload] = CustomRand() 157 | if not CDF[workload].setCdf(loadCDF(f'traffic/data/{workload}')): 158 | print(f"[ERROR] Invalid CDF, workload: {workload}") 159 | continue 160 | 161 | # Each demand entry looks like: 162 | # [src node, src cluster id, dst node, dst cluster id, demand (Mbps)]. 163 | rawTM = [] 164 | proto_traffic = loadTraffic(TM_PATH) 165 | 166 | pattern = re.compile("(.*)-c([0-9]+)-ab1-s1i([0-9]+)") 167 | for demand_entry in proto_traffic.demands: 168 | src, dst = demand_entry.src, demand_entry.dst 169 | # Sanity check: src and dst cannot be the same. 170 | if src == dst: 171 | print(f'[ERROR] Traffic parsing: src {src} and dst {dst} cannot' 172 | f' be the same!') 173 | break 174 | # Sanity check: only positive demand allowed. 175 | vol = demand_entry.volume_mbps 176 | if vol <= 0: 177 | print(f'[ERROR] Traffic parsing: encountered negative demand: ' 178 | f'{vol} on {src} => {dst}.') 179 | break 180 | 181 | match_src, match_dst = pattern.search(src), pattern.search(dst) 182 | if not match_src or not match_dst: 183 | continue 184 | netname = match_src.group(1) 185 | src_cid, dst_cid = match_src.group(2), match_dst.group(2) 186 | src_tid, dst_tid = match_src.group(3), match_dst.group(3) 187 | # Skip cluster local demands if we only focus on DCN traffic. 188 | if src_cid == dst_cid: 189 | continue 190 | rawTM.append([f'{netname}-c{src_cid}-t{src_tid}', f'{src_cid}', 191 | f'{netname}-c{dst_cid}-t{dst_tid}', f'{dst_cid}', vol]) 192 | 193 | # Generates trace. 194 | speed_vec = np.array([1]*22 + [2.5]*22 + [5]*21) 195 | trace = tracegen(rawTM, speed_vec, CDF[WORKLOAD], DURATION, LOAD) 196 | 197 | # Writes trace to filesystem as a csv. 198 | logpath = Path(sys.argv[1]) 199 | logpath.mkdir(parents=True, exist_ok=True) 200 | with (logpath / f'{NETWORK}-trace.csv').open('w') as f: 201 | writer = csv.writer(f) 202 | writer.writerows(trace) 203 | -------------------------------------------------------------------------------- /traffic/tmgen.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | from math import floor 3 | 4 | import numpy as np 5 | import proto.traffic_pb2 as traffic 6 | from numpy.random import default_rng 7 | from scipy.stats import bernoulli, truncexpon, uniform 8 | 9 | import common.flags as FLAG 10 | 11 | 12 | def tmgen(tor_level, cluster_vector, num_nodes, model, dist='exp', netname='', 13 | raw_tm=False): 14 | ''' 15 | Generates a traffic demand matrix according to `model`. 16 | 17 | Returns the populated traffic proto. 18 | 19 | tor_level: Boolean. False means AggrBlock-level demand. 20 | cluster_vector: NumPy vector of the scale factor (aka, relative speed). For 21 | example, a total of 2 40G clusters and 2 100G clusters would 22 | look like: array([1, 1, 2.5, 2.5]). 23 | num_nodes: number of S1 nodes per cluster. Only used when tor_level=True. 24 | model: the type of TM to use, can be flat/uniform/gravity. 25 | dist: what distribution to use for sampling ingress/egress total demand, can 26 | be exp/uniform/pareto. 27 | netname: name of the fabric. 28 | raw_tm: If True, returns the raw TM in Python list format instead of 29 | protobuf format. 30 | ''' 31 | rng = default_rng() 32 | num_clusters = cluster_vector.size 33 | size = num_clusters * num_nodes if tor_level else num_clusters 34 | tm = np.zeros(shape=(size, size)) 35 | if model == 'flat': 36 | # Generates a flat traffic demand matrix. If tor_level=True, each 37 | # src-dst pair sees 153 Mbps demand. If false, each src-dst pair sees 38 | # 80000 Mbps demand. 39 | tm[tm >= 0] = 30000 * 8 / (num_nodes * num_clusters - 1) if tor_level \ 40 | else 20000 * 256 / (num_clusters - 1) 41 | elif model == 'uniform': 42 | # Generates a uniform random traffic demand matrix. Each src-dst pair 43 | # will not exceed the value of a same entry in the flat TM. 44 | upper_bound = 40000 * 8 / (num_nodes * num_clusters - 1) if tor_level \ 45 | else 40000 * 256 / (num_clusters - 1) 46 | for r, c in np.ndindex(tm.shape): 47 | tm[r, c] = rng.uniform(low=0, high=upper_bound) 48 | elif model == 'single': 49 | # Send from the first node to the last node at a low load. There must 50 | # be at least 2 nodes in the TM. 51 | # Note: only generates 1 commodity, this is for debugging. 52 | tm[0, -1] = 40000 * 4 * 0.5 53 | elif model == 'gravity': 54 | # Generates a traffic demand matrix following the gravity model. Each 55 | # src-dst pair has a demand proportional to the product of their egress 56 | # and ingress demands. The block total ingress/egress volume is sampled 57 | # from a uniform random/exponential/Pareto distribution, as specified by 58 | # `dist`. 59 | egress, ingress = np.array([]), np.array([]) 60 | egress = genTotalDemand(tor_level, cluster_vector, num_nodes, dist) 61 | # Set block total ingress to be the same as egress if flag is true. 62 | # Otherwise, sample another set of values from the same distribution. 63 | if FLAG.EQUAL_INGRESS_EGRESS: 64 | ingress = egress 65 | else: 66 | ingress = genTotalDemand(tor_level, cluster_vector, num_nodes, dist) 67 | # Rescale the ingress vector so that its sum equals egress. (Total 68 | # egress and ingress must match). 69 | ingress *= egress.sum() / ingress.sum() 70 | # Finds the number of latest generation blocks. We only set the latest 71 | # generation blocks to empty. 72 | num_latest = np.unique(cluster_vector, return_counts=True)[1][-1] 73 | # Designates P_SPARSE blocks to be empty. Stores their block indices. 74 | # Note that if a block is empty, its egress and ingress demands are both 75 | # set to 0, regardless of EQUAL_INGRESS_EGRESS. 76 | empty_blocks = np.nonzero(bernoulli.rvs(1 - FLAG.P_SPARSE, 77 | size=num_latest) == 0)[0] \ 78 | + (len(cluster_vector) - num_latest) 79 | # `r` is row vector for src, `c` is column vector for dst. 80 | for r, c in np.ndindex(tm.shape): 81 | block_r = r // num_nodes if tor_level else r 82 | block_c = c // num_nodes if tor_level else c 83 | # If a block is designated empty, keep its row/column vector 0. 84 | if block_r in empty_blocks or block_c in empty_blocks: 85 | continue 86 | # Gravity model. 87 | tm[r, c] = egress[r] * ingress[c] / ingress.sum() 88 | 89 | return genProto(tor_level, num_clusters, num_nodes, tm, netname, raw_tm) 90 | 91 | def genTotalDemand(tor_level, cluster_vector, num_nodes, dist, p_spike=0.1): 92 | ''' 93 | Generates total ingress/egress demand for all end points. 94 | Returns a 1-D NumPy array. 95 | 96 | p_spike: probability to generate a spike (spike = 80% max capacity). 97 | ''' 98 | rng = default_rng() 99 | # Step 1: Generates AggrBlock-level total demand. 100 | block_demand = np.array([]) 101 | for i, f in enumerate(cluster_vector): 102 | upper_bound = 0 103 | for j, g in enumerate(cluster_vector): 104 | if i == j: 105 | continue 106 | upper_bound += 40000 * 4 * min(f, g) 107 | # With `p_spike` probability, generates a spike. 108 | upper_bound *= 0.8 if rng.uniform(low=0, high=1) < p_spike else 0.4 109 | scale = upper_bound / 2 110 | if dist == 'exp': 111 | X = truncexpon(b=upper_bound/scale, loc=0, scale=scale) 112 | elif dist == 'uniform': 113 | X = uniform(loc=0, scale=upper_bound) 114 | block_demand = np.concatenate((block_demand, X.rvs(1))) 115 | 116 | # Only needs block demand, job done. 117 | if not tor_level: 118 | return block_demand 119 | 120 | # Step 2: Generates ToR-level total demand. 121 | tor_demand = np.array([]) 122 | for i in range(len(cluster_vector)): 123 | upper_bound = block_demand[i] 124 | scale = upper_bound / 2 125 | if dist == 'exp': 126 | X = truncexpon(b=upper_bound/scale, loc=0, scale=scale) 127 | elif dist == 'uniform': 128 | X = uniform(loc=0, scale=upper_bound) 129 | tors_in_block = X.rvs(num_nodes) 130 | # Rescales the tor vector so that it sums to upper_bound. 131 | tor_demand = np.concatenate((tor_demand, 132 | tors_in_block / tors_in_block.sum() * upper_bound)) 133 | return tor_demand 134 | 135 | def genProto(tor_level, num_clusters, num_nodes, TM, netname, raw_tm): 136 | ''' 137 | Returns a traffic proto using the given traffic matrix `TM`. 138 | 139 | raw_tm: If True, constructs the return value in raw list format instead of 140 | protobuf format. Note that S1 nodes follow a slightly different 141 | naming scheme for convenience. 142 | ''' 143 | tm_proto = traffic.TrafficDemand() 144 | tm_proto.type = traffic.TrafficDemand.DemandType.LEVEL_TOR if tor_level \ 145 | else traffic.TrafficDemand.DemandType.LEVEL_AGGR_BLOCK 146 | 147 | # Each demand entry looks like: 148 | # [src node, src cluster id, dst node, dst cluster id, demand (Mbps)]. 149 | rawTM = [] 150 | 151 | for i, j in itertools.product(range(1, num_clusters + 1), 152 | range(1, num_clusters + 1)): 153 | if tor_level: 154 | # Populate ToR-level demand matrix. 155 | for u, v in itertools.product(range(1, num_nodes + 1), 156 | range(1, num_nodes + 1)): 157 | # A ToR cannot send traffic to itself. 158 | if i == j and u == v: 159 | continue 160 | # Skip zero entries for proto efficiency. 161 | if floor(TM[(i - 1) * num_nodes + u - 1, 162 | (j - 1) * num_nodes + v - 1]) <= 0: 163 | continue 164 | demand = tm_proto.demands.add() 165 | demand.src = f'{netname}-c{i}-ab1-s1i{u}' 166 | demand.dst = f'{netname}-c{j}-ab1-s1i{v}' 167 | demand.volume_mbps = floor(TM[(i - 1) * num_nodes + u - 1, 168 | (j - 1) * num_nodes + v - 1]) 169 | if raw_tm: 170 | rawTM.append([f'{netname}-c{i}-t{u}', f'{i}', 171 | f'{netname}-c{j}-t{v}', f'{j}', 172 | demand.volume_mbps]) 173 | else: 174 | # Populate AggrBlock-level demand matrix. 175 | if i == j: 176 | continue 177 | # Skip zero entries for proto efficiency. 178 | if floor(TM[i-1, j-1]) <= 0: 179 | continue 180 | demand = tm_proto.demands.add() 181 | demand.src = f'{netname}-c{i}-ab1' 182 | demand.dst = f'{netname}-c{j}-ab1' 183 | demand.volume_mbps = floor(TM[i-1, j-1]) 184 | if raw_tm: 185 | rawTM.append([demand.src, f'{i}', demand.dst, f'{j}', 186 | demand.volume_mbps]) 187 | 188 | return tm_proto if not raw_tm else rawTM 189 | -------------------------------------------------------------------------------- /tests/wcmp_alloc_test.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | import unittest 3 | 4 | import proto.te_solution_pb2 as te_sol 5 | 6 | import common.flags as FLAG 7 | from localTE.group_reduction import GroupReduction 8 | from localTE.wcmp_alloc import WCMPAllocation, loadTESolution 9 | from topology.topogen import generateToy4 10 | from topology.topology import Topology, loadTopo 11 | from traffic.traffic import Traffic 12 | 13 | TOY2_TOPO_PATH = 'tests/data/toy2_topo.textproto' 14 | TOY2_SOL_PATH = 'tests/data/toy2_te_sol.textproto' 15 | C1AB1 = 'toy2-c1-ab1' 16 | C2AB1 = 'toy2-c2-ab1' 17 | C3AB1 = 'toy2-c3-ab1' 18 | # Toy4 19 | TOY4_TM_PATH = 'tests/data/toy4_traffic.textproto' 20 | TOY4_SOL_PATH = 'tests/data/toy4_te_sol.textproto' 21 | TOY4_C1 = 'toy4-c1-ab1' 22 | TOY4_LINK1 = 'toy4-c4-ab1-s3i1-p1:toy4-c1-ab1-s3i1-p5' 23 | TOY4_NODE1 = 'toy4-c1-ab1-s3i1' 24 | 25 | class TestWCMPAlloc(unittest.TestCase): 26 | def test_load_invalid_te_solution(self): 27 | self.assertEqual(None, loadTESolution('')) 28 | 29 | def test_load_valid_te_solution(self): 30 | sol = loadTESolution(TOY2_SOL_PATH) 31 | self.assertNotEqual(None, sol) 32 | 33 | def test_toy2_sol_entries(self): 34 | sol = loadTESolution(TOY2_SOL_PATH) 35 | # expects 3 TEIntents 36 | self.assertEqual(3, len(sol.te_intents)) 37 | aggr_block_set = set() 38 | for te_intent in sol.te_intents: 39 | aggr_block_set.add(te_intent.target_block) 40 | self.assertEqual(set({C1AB1, C2AB1, C3AB1}), aggr_block_set) 41 | # expects 2 prefixes for c1-ab1, and 1 prefix for c3-ab1. 42 | for te_intent in sol.te_intents: 43 | if te_intent.target_block == C1AB1: 44 | self.assertEqual(1, len(te_intent.prefix_intents)) 45 | if te_intent.target_block == C3AB1: 46 | self.assertEqual(1, len(te_intent.prefix_intents)) 47 | 48 | def test_toy4_generated_groups(self): 49 | FLAG.GR_ALGO = 'eurosys' 50 | toy4 = Topology('', input_proto=generateToy4()) 51 | toy4_traffic = Traffic(toy4, TOY4_TM_PATH) 52 | wcmp_alloc = WCMPAllocation(toy4, toy4_traffic, TOY4_SOL_PATH) 53 | wcmp_alloc.run() 54 | c1_worker = wcmp_alloc._worker_map[TOY4_C1] 55 | self.assertEqual(TOY4_C1, c1_worker._target_block) 56 | self.assertEqual(TOY4_C1, c1_worker._te_intent.target_block) 57 | # Verify there exist 8 nodes * (SRC and TRANSIT) = 16 sets of groups. 58 | self.assertEqual(16, len(c1_worker.groups.values())) 59 | for node, _, _ in c1_worker.groups.keys(): 60 | # Verify node has non-zero ECMP utilization. 61 | self.assertTrue(toy4.getNodeByName(node).getECMPUtil() > 0) 62 | self.assertTrue(toy4.getNodeByName(node).getNumGroups() > 0) 63 | link_util = toy4.dumpRealLinkUtil() 64 | # Verify real link utilization. 65 | self.assertTrue(link_util[TOY4_LINK1][1]) 66 | self.assertTrue(link_util[TOY4_LINK1][0] > 0.16) 67 | self.assertTrue(link_util[TOY4_LINK1][0] < 0.17) 68 | ecmp_util = toy4.dumpECMPUtil() 69 | # Verify node ECMP utilization. 70 | self.assertTrue(ecmp_util[TOY4_NODE1][0] > 0.03) 71 | self.assertTrue(ecmp_util[TOY4_NODE1][0] < 0.04) 72 | self.assertEqual(7, ecmp_util[TOY4_NODE1][1]) 73 | demand = toy4.dumpDemandAdmission() 74 | # Verify node admits all demands. 75 | self.assertEqual(1.0, demand[TOY4_NODE1][2]) 76 | 77 | class TestGroupReduction(unittest.TestCase): 78 | def test_single_switch_single_group_1(self): 79 | gr = GroupReduction([[1, 2, 3, 4]], te_sol.PrefixIntent.PrefixType.SRC, 80 | 16*1024) 81 | self.assertEqual([[1, 2, 3, 4]], gr.sanitize([gr.solve_sssg()])) 82 | gr.reset() 83 | self.assertEqual([[1, 2, 3, 4]], gr.table_fitting_sssg()) 84 | 85 | def test_single_switch_single_group_2(self): 86 | gr = GroupReduction([[20, 40, 60, 80]], 87 | te_sol.PrefixIntent.PrefixType.SRC, 88 | 16*1024) 89 | self.assertEqual([[1, 2, 3, 4]], gr.sanitize([gr.solve_sssg()])) 90 | gr.reset() 91 | # EuroSys heuristic does not perform lossless reduction if groups fit. 92 | self.assertEqual([[20, 40, 60, 80]], gr.table_fitting_sssg()) 93 | 94 | def test_single_switch_single_group_3(self): 95 | gr = GroupReduction([[10.5, 20.1, 31.0, 39.7]], 96 | te_sol.PrefixIntent.PrefixType.SRC, 97 | 10) 98 | self.assertEqual([[1, 2, 3, 4]], gr.sanitize([gr.solve_sssg()])) 99 | gr.reset() 100 | self.assertEqual([[1, 2, 3, 4]], gr.table_fitting_sssg()) 101 | 102 | def test_single_switch_single_group_4(self): 103 | gr = GroupReduction([[i + 0.1 for i in range(1, 17)]], 104 | te_sol.PrefixIntent.PrefixType.SRC, 105 | 16*1024) 106 | self.assertEqual([[(i + 0.1) * 10 for i in range(1, 17)]], 107 | gr.sanitize([gr.solve_sssg()])) 108 | gr.reset() 109 | self.assertEqual([list(range(1, 17))], gr.table_fitting_sssg()) 110 | 111 | def test_single_switch_single_group_5(self): 112 | gr = GroupReduction([[2000.01, 0, 0, 0]], 113 | te_sol.PrefixIntent.PrefixType.SRC, 114 | 10) 115 | self.assertEqual([[1, 0, 0, 0]], gr.sanitize([gr.solve_sssg()])) 116 | gr.reset() 117 | self.assertEqual([[1, 0, 0, 0]], gr.table_fitting_sssg()) 118 | 119 | def test_single_switch_multi_group_1(self): 120 | group_reduction = GroupReduction([[1, 2], [3, 4]], 121 | te_sol.PrefixIntent.PrefixType.SRC, 122 | 16*1024) 123 | self.assertEqual([[1, 2], [3, 4]], group_reduction.solve_ssmg()) 124 | group_reduction.reset() 125 | self.assertEqual([[1, 2], [3, 4]], group_reduction.table_fitting_ssmg()) 126 | group_reduction.reset() 127 | self.assertEqual([[1, 2], [3, 4]], group_reduction.google_ssmg()) 128 | 129 | def test_single_switch_multi_group_2(self): 130 | group_reduction = GroupReduction([[1.1, 2.1], [3.1, 4.1]], 131 | te_sol.PrefixIntent.PrefixType.SRC, 132 | 16*1024) 133 | self.assertEqual([[1, 2], [3, 4]], group_reduction.table_fitting_ssmg()) 134 | group_reduction.reset() 135 | self.assertEqual([[1, 2], [3, 4]], group_reduction.google_ssmg()) 136 | 137 | def test_single_switch_multi_group_3(self): 138 | group_reduction = GroupReduction([[1, 0, 0], [0, 2, 4]], 139 | te_sol.PrefixIntent.PrefixType.SRC, 140 | 5) 141 | # Verify that zeroes are correctly stripped and unstripped. 142 | self.assertEqual([[1, 0, 0], [0, 1, 2]], 143 | group_reduction.solve_ssmg()) 144 | group_reduction.reset() 145 | self.assertEqual([[1, 0, 0], [0, 1, 2]], 146 | group_reduction.table_fitting_ssmg()) 147 | group_reduction.reset() 148 | self.assertEqual([[1, 0, 0], [0, 1, 2]], 149 | group_reduction.google_ssmg()) 150 | 151 | def test_single_switch_multi_group_4(self): 152 | group_reduction = GroupReduction([[6, 0, 0], [0, 2, 4], [2, 0, 0]], 153 | te_sol.PrefixIntent.PrefixType.TRANSIT, 154 | 5) 155 | # Google SSMG reduces transit groups to ECMP, does not de-duplicate. 156 | self.assertEqual([[1, 0, 0], [0, 1, 1], [1, 0, 0]], 157 | group_reduction.google_ssmg()) 158 | 159 | def test_single_switch_multi_group_5(self): 160 | FLAG.IMPROVED_HEURISTIC = False 161 | FLAG.EUROSYS_MOD = False 162 | group_reduction = GroupReduction([[100, 1, 1], [0, 2, 4]], 163 | te_sol.PrefixIntent.PrefixType.SRC, 164 | 4) 165 | # EuroSys SSMG will reduce to ECMP and give up. 166 | self.assertEqual([[1, 1, 1], [0, 1, 1]], 167 | group_reduction.table_fitting_ssmg()) 168 | group_reduction.reset() 169 | # Google SSMG prunes the first member of the largest group when simple 170 | # reduction cannot fit the groups. 171 | self.assertEqual([[0, 1, 1], [0, 1, 1]], 172 | group_reduction.google_ssmg()) 173 | 174 | def test_single_switch_multi_group_6(self): 175 | FLAG.IMPROVED_HEURISTIC = False 176 | FLAG.EUROSYS_MOD = True 177 | group_reduction = GroupReduction([[1, 3, 1], [0, 0, 4]], 178 | te_sol.PrefixIntent.PrefixType.SRC, 179 | 3) 180 | # Modified EuroSys SSMG (w/ pruning) will reduce to ECMP and prune the 181 | # first port of the largest group [1, 1, 1]. 182 | self.assertEqual([[0, 1, 1], [0, 0, 1]], 183 | group_reduction.table_fitting_ssmg()) 184 | 185 | def test_single_switch_multi_group_7(self): 186 | FLAG.IMPROVED_HEURISTIC = True 187 | group_reduction = GroupReduction([[0.01, 0.01, 0.01], [997, 1, 1]], 188 | te_sol.PrefixIntent.PrefixType.SRC, 189 | 1000) 190 | # First group gets 0 entry by simple table carving. But it will be 191 | # rebalanced to 1 entry, so there is at least one member in the final 192 | # output. 193 | groups = group_reduction.table_carving_ssmg() 194 | self.assertTrue(sum(groups[0]) > 1) 195 | self.assertTrue(1 in groups[0]) 196 | 197 | if __name__ == "__main__": 198 | unittest.main() 199 | -------------------------------------------------------------------------------- /globalTE/global_te.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import gurobipy as gp 4 | import numpy as np 5 | import proto.te_solution_pb2 as TESolution 6 | from google.protobuf import text_format 7 | from gurobipy import GRB 8 | 9 | import common.flags as FLAG 10 | from common.common import PRINTV 11 | from topology.topology import Topology, filterPathSetWithSeg 12 | from traffic.traffic import Traffic 13 | 14 | 15 | def prettyPrint(verbose, te_sol): 16 | ''' 17 | Pretty prints the TE solution. 18 | ''' 19 | if FLAG.VERBOSE >= verbose: 20 | print('\n===== TE solution starts =====') 21 | for c, sol in te_sol.items(): 22 | # Raw solution has flows in Tbps, converts back to Mbps. 23 | print(f'Demand: [{c[0]}] => [{c[1]}], {c[2] * 1000000} Mbps') 24 | for path_name, flow in sol.items(): 25 | print(f' {flow} Mbps on {path_name}') 26 | print('===== TE solution ends =====\n') 27 | 28 | class GlobalTE: 29 | ''' 30 | Global TE solves a multi-commodity flow (MCF) load balancing problem on 31 | a network defined by input topology. The commodities are defined in the 32 | input traffic information. It generates the optimal TE solution as protobuf. 33 | ''' 34 | def __init__(self, topo_obj, traffic_obj): 35 | self._topo = topo_obj 36 | self._traffic = traffic_obj 37 | # commodity path set map: integer index of commodity to its path set. 38 | self.commodity_path_sets = {} 39 | # Map from the integer index of a commodity to its (src, dst, demand). 40 | self.commodity_idx_std = {} 41 | for idx, ((s, t), d) in enumerate(traffic_obj.getAllDemands().items()): 42 | # Stores demand in Tbps instead of Mbps. 43 | self.commodity_idx_std[idx] = (s, t, d / 1000000) 44 | path_set = topo_obj.findPathSetOfAggrBlockPair(s, t) 45 | self.commodity_path_sets[idx] = path_set 46 | 47 | def solve(self): 48 | ''' 49 | Constructs and then solves the MCF optimization. 50 | ''' 51 | try: 52 | # Initialize a new model 53 | m = gp.Model("global_mcf") 54 | m.setParam("LogToConsole", 1 if FLAG.VERBOSE >= 2 else 0) 55 | #m.setParam("FeasibilityTol", 1e-9) 56 | #m.setParam("NodefileStart", 0.5) 57 | #m.setParam("NodefileDir", "/tmp") 58 | m.setParam("Threads", 0) 59 | #m.setParam("TimeLimit", 120) 60 | #m.setParam("LogFile", "gurobi.log") 61 | 62 | # Step 1: create decision variables. 63 | # umax for maximum link utilization. 64 | umax = m.addVar(vtype=GRB.CONTINUOUS, lb=0, ub=1, name="umax") 65 | # A map from link's (x, y) to link utilization, u(x, y). Note that 66 | # the term 'link' in this class means abstract level path. 67 | u = {} 68 | # A map from link's (x, y) to link capacity, c(x, y). 69 | c = {} 70 | # Iterate over all paths in topo. Again, we call path 'link' here. 71 | for link_name, link in self._topo.getAllPaths().items(): 72 | s, t = link.src_aggr_block.name, link.dst_aggr_block.name 73 | u[(s, t)] = m.addVar(vtype=GRB.CONTINUOUS, lb=0, ub=1, 74 | name="u_" + link_name) 75 | # Converts capacity from Mbps to Tbps. 76 | c[(s, t)] = link.capacity / 1000000 77 | # fip is the amount of flow in commodity i assigned on path p. 78 | # f is a map of a map: {commodity index: {(s, m, t): fip}} 79 | f = {} 80 | for i, path_set in self.commodity_path_sets.items(): 81 | for path in path_set.keys(): 82 | f.setdefault(i, {})[path] = m.addVar(vtype=GRB.CONTINUOUS, 83 | lb=0, name=f"f_{i}_{':'.join(path)}") 84 | 85 | # Step 2: set objective. 86 | m.setObjective(umax, GRB.MINIMIZE) 87 | 88 | # Step 3: add constraints. 89 | for link, u_link in u.items(): 90 | # Definition of max link utilization. 91 | # For each link, u(x, y) <= umax. 92 | m.addConstr(u_link <= umax) 93 | 94 | # Definition of link utilization. fip_link contains all fip that 95 | # traverses `link`. 96 | fip_link = [] 97 | for idx, path_set in self.commodity_path_sets.items(): 98 | # For each commodity, get the paths that contain `link`. 99 | filtered_path_set = filterPathSetWithSeg(path_set, link) 100 | # fip of paths that contain `link` will be summed up to 101 | # compute u(x, y). 102 | for path in filtered_path_set.keys(): 103 | fip_link.append(f[idx][path]) 104 | # For each link, u(x, y) == sum_i(sum_Pi[x,y](fip) / c(x, y), 105 | # which is equivalent to u(x, y) == sum(fip_link) / c(x, y) 106 | m.addConstr(u_link == gp.quicksum(fip_link) / c[link]) 107 | 108 | # Link capacity constraint. 109 | # For each link, sum(fip_link) <= c(x, y). 110 | m.addConstr(gp.quicksum(fip_link) <= c[link]) 111 | 112 | # Setp 3 continued: flow conservation constraint for each commodity. 113 | for idx, path_set in self.commodity_path_sets.items(): 114 | _, _, demand = self.commodity_idx_std[idx] 115 | # For each commodity i, sum_p(fip) == demand_i. 116 | m.addConstr(gp.quicksum(list(f[idx].values())) == demand) 117 | # Hedging constraint below. 118 | if not FLAG.ENABLE_HEDGING: 119 | continue 120 | # Spread S of 0 is illegal, should not add hedging constraint. 121 | if math.isclose(FLAG.S, 0.0, rel_tol=1e-10): 122 | print(f'[ERROR] hedging spread {FLAG.S} cannot be 0!') 123 | continue 124 | # Obtains the capacity of every path in the path_set. 125 | cp_list = [self._topo.findCapacityOfPathTuple(p) \ 126 | for p in path_set.keys()] 127 | # For each path in a commodity, the flow assigned to the path 128 | # cannot exceed a fraction S of the total bisection capacity. 129 | for k, p in enumerate(path_set.keys()): 130 | m.addConstr(f[idx][p] <= demand * 131 | cp_list[k] / (sum(cp_list) * FLAG.S)) 132 | 133 | # Optimize model 134 | m.optimize() 135 | 136 | # Extracts and organizes final solution. 137 | te_sol_by_commodity = {} 138 | te_sol_by_src = {} 139 | for f in m.getVars(): 140 | if 'f_' in f.VarName: 141 | # Skips empty flows. 142 | if f.X == 0.0: 143 | continue 144 | splits = f.VarName.split('_') 145 | # Extracts commodity and path from variable name. 146 | # Also converts flow from Tbps back to Mbps. 147 | i, path, flow = int(splits[1]), splits[2], f.X * 1000000 148 | te_sol_by_commodity.setdefault(self.commodity_idx_std[i], 149 | {})[path] = flow 150 | PRINTV(1, f'Solver obj MLU: {m.ObjVal}') 151 | prettyPrint(1, te_sol_by_commodity) 152 | 153 | for (s, t, _), path_map in te_sol_by_commodity.items(): 154 | # Allocates a new TEIntent for source node s. 155 | te_intent = te_sol_by_src.setdefault(s, TESolution.TEIntent()) 156 | te_intent.target_block = s 157 | # Allocates a new PrefixIntent for destination node t. 158 | prefix_intent = te_intent.prefix_intents.add() 159 | prefix_intent.dst_name = t 160 | # PrefixType of this entry is always SRC. But there might be 161 | # TRANSIT type entries generated along the parsing, which will 162 | # be appended to other TEIntent. 163 | prefix_intent.type = TESolution.PrefixIntent.PrefixType.SRC 164 | # Converts flows on paths to flows on links. 165 | flow_dist = self._topo.distributeFlows(path_map) 166 | for (u, v), port_weight_map in flow_dist.items(): 167 | if u == s: 168 | # Merge all source flows across single-segment paths 169 | # into this prefix_intent. 170 | for port, weight in port_weight_map.items(): 171 | nexthop_entry = prefix_intent.nexthop_entries.add() 172 | nexthop_entry.nexthop_port = port 173 | nexthop_entry.weight = weight 174 | else: 175 | # u != s means that we are dealing with transit flows. 176 | # Find the corresponding TEIntent for source AggrBlock 177 | # u and add them accordingly. 178 | transit_te_intent = te_sol_by_src.setdefault(u, 179 | TESolution.TEIntent()) 180 | transit_te_intent.target_block = u 181 | # Allocates a new PrefixIntent for destination node v. 182 | # Note that another PrefixIntent might exist, but that 183 | # is for SRC, not TRANSIT. 184 | prefix_intent_v = transit_te_intent.prefix_intents.add() 185 | prefix_intent_v.dst_name = v 186 | prefix_intent_v.type = \ 187 | TESolution.PrefixIntent.PrefixType.TRANSIT 188 | # Populates `prefix_intent_v` with transit flows. 189 | for port, weight in port_weight_map.items(): 190 | nexthop_entry = prefix_intent_v.nexthop_entries.add() 191 | nexthop_entry.nexthop_port = port 192 | nexthop_entry.weight = weight 193 | 194 | # Packs per-src TEIntent into a TESolution. 195 | sol = TESolution.TESolution() 196 | sol.type = self._traffic.getDemandType() 197 | for src, te_intent in te_sol_by_src.items(): 198 | sol.te_intents.append(te_intent) 199 | return sol 200 | 201 | except gp.GurobiError as e: 202 | print('Error code ' + str(e.errno) + ': ' + str(e)) 203 | return [] 204 | except AttributeError: 205 | print('Encountered an attribute error') 206 | return [] 207 | -------------------------------------------------------------------------------- /topology/striping_plan.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import math 3 | import random 4 | 5 | import gurobipy as gp 6 | import numpy as np 7 | from gurobipy import GRB 8 | 9 | import common.flags as FLAG 10 | from common.common import PRINTV 11 | 12 | 13 | def matrixIndexToSwitchIndex(mi): 14 | ''' 15 | Converts the index in the striping matrix to the corresponding cluster index 16 | and S3 switch index. 17 | ''' 18 | return mi // NUM_S3, mi % NUM_S3 19 | 20 | def switchIndexToMatrixIndex(ci, si): 21 | ''' 22 | Converts the cluster index and S3 switch index to the corresponding index 23 | in the striping matrix. 24 | ''' 25 | return ci * NUM_S3 + si 26 | 27 | class StripingPlan: 28 | ''' 29 | StripingPlan solves the problem of interconnecting clusters to form a 30 | network fabric. It assumes a full-mesh topology and tries to balance the 31 | links/paths assigned between any cluster pair as evenly as possible (unless 32 | explicitly requested otherwise). 33 | ''' 34 | def __init__(self, net_name, num_clusters, cluster_radices, num_s3, 35 | getClusterGenByIndex, genlist, port_speeds, no_connect_pairs): 36 | ''' 37 | net_name: name of the network, used for constructing physical striping 38 | plan. 39 | 40 | num_clusters: the total number of clusters to be connected. 41 | 42 | cluster_radices: a map from cluster id to the radix/degree (of egress 43 | links). Note that links are bidirectional here. 44 | Note that cluster id is 1-indexed. 45 | 46 | num_s3: number of S3 switches per cluster. 47 | 48 | getClusterGenByIndex: a function object that returns the generation of 49 | a given cluster index. It takes two arguments: 50 | 1. cluster index; 2. a list of the number of 51 | clusters for each generation. 52 | 53 | genlist: a list of the number of clusters for each generation. This is 54 | the second argument to getClusterGenByIndex(). 55 | 56 | port_speeds: a map from Gen. ID to the port speed. 57 | 58 | no_connect_pairs: a set of tuples that indicate whether two clusters 59 | should be connected. Pairs in the set should not. The 60 | tuple of cluster indices are always sorted in 61 | ascending order and deduplicated. 62 | ''' 63 | self.net_name = net_name 64 | self.num_clusters = num_clusters 65 | self.cluster_radices = cluster_radices 66 | global NUM_S3 67 | NUM_S3 = num_s3 68 | self.clusterGen = getClusterGenByIndex 69 | self.genlist = genlist 70 | self.port_speeds = port_speeds 71 | self.no_connect_pairs = no_connect_pairs 72 | # A map from globally unique S3 index to its radix. 73 | self.s3_radices = {} 74 | # Distribute the cluster radix to each S3 switch. If the radix is 75 | # not a multiple of NUM_S3, the residual links will be round robin 76 | # assigned to the S3 switches of the cluster. 77 | for ci in range(self.num_clusters): 78 | for si in range(NUM_S3): 79 | mi = switchIndexToMatrixIndex(ci, si) 80 | self.s3_radices[mi] = self.cluster_radices[ci+1] // NUM_S3 81 | for si in range(self.cluster_radices[ci+1] \ 82 | - self.cluster_radices[ci+1] // NUM_S3 * NUM_S3): 83 | mi = switchIndexToMatrixIndex(ci, si) 84 | self.s3_radices[mi] += 1 85 | 86 | def _max_assignment(self, model): 87 | ''' 88 | Builds an ILP formulation to maximize the number of links assigned 89 | between S3 switches subject to radix and balance constraints. 90 | 91 | model: pre-built empty model, needs decision vars and constraints. 92 | ''' 93 | min_radix = min(self.s3_radices.values()) 94 | 95 | # Create variables: x[i][j] is the number of links assigned between 96 | # S3 switch i and j. x is a matrix of connectivity assignment. 97 | x = [] 98 | for i in range(self.num_clusters * NUM_S3): 99 | x_row = [] 100 | for j in range(self.num_clusters * NUM_S3): 101 | x_row.append(model.addVar(vtype=GRB.INTEGER, lb=0, 102 | ub=self.s3_radices[i], 103 | name="x_{}_{}".format(i+1, j+1))) 104 | x.append(x_row) 105 | model.update() 106 | 107 | # Set objective: maximize total number of links assigned. 108 | model.setObjective(0.5 * gp.quicksum(model.getVars()), GRB.MAXIMIZE) 109 | 110 | # Add constraints. 111 | for i in range(self.num_clusters * NUM_S3): 112 | # Add constraint: S3 radix bound. 113 | model.addConstr(gp.quicksum(x[i]) <= self.s3_radices[i], 114 | "s3_radix_bound_{}".format(i+1)) 115 | for j in range(self.num_clusters * NUM_S3): 116 | if i == j: 117 | # Add constraint: no self loops. 118 | model.addConstr(x[i][i] == 0, "no_self_loop_{}".format(i+1)) 119 | continue 120 | ci, si = matrixIndexToSwitchIndex(i) 121 | cj, sj = matrixIndexToSwitchIndex(j) 122 | if tuple(sorted([ci + 1, cj + 1])) in self.no_connect_pairs: 123 | # Add constraint: no connection between this particular 124 | # pair of clusters. 125 | model.addConstr(x[i][j] == 0, 126 | "disconnect_{}_{}".format(i+1, j+1)) 127 | continue 128 | if si != sj: 129 | # Add constraint: no cross connect between S3 switches of 130 | # different locations (intra-cluster indices). 131 | model.addConstr(x[i][j] == 0, 132 | "no_x_connect_{}_{}".format(i+1, j+1)) 133 | continue 134 | # Add constraint: equal spread of links to peers. 135 | # Note: lower bound should be the S3 switch of min radix, since 136 | # we allow ports to be idle. 137 | model.addConstr(x[i][j] >= math.floor(min_radix \ 138 | / (self.num_clusters - 1)), 139 | "even_spread_lb_{}_{}".format(i+1, j+1)) 140 | model.addConstr(x[i][j] <= math.ceil(self.s3_radices[i] \ 141 | / (self.num_clusters - 1)), 142 | "even_spread_ub_{}_{}".format(i+1, j+1)) 143 | # Matrix x is symmetric, we can save half of the constraints. 144 | if i < j: 145 | # Add constraint: bidi connections - x_ij == x_ji. 146 | model.addConstr(x[i][j] == x[j][i], 147 | "bidi_connection_{}_{}".format(i+1, j+1)) 148 | 149 | return model 150 | 151 | def solve(self): 152 | ''' 153 | Solves the striping problem by modeling it as a max assignment 154 | optimization. 155 | Returns a list of tuples (port pairs) that should be connected, and a 156 | map of paths to connect. Port names are FQDNs. 157 | ''' 158 | try: 159 | # Initialize a new model 160 | m = gp.Model("striping") 161 | m.setParam("LogToConsole", 1 if FLAG.VERBOSE >= 2 else 0) 162 | m.setParam("FeasibilityTol", 1e-7) 163 | m.setParam("IntFeasTol", 1e-8) 164 | m.setParam("MIPGap", 1e-4) 165 | #m.setParam("NodefileStart", 0.5) 166 | m.setParam("NodefileDir", "/tmp") 167 | m.setParam("Threads", 0) 168 | #m.setParam("TimeLimit", FLAG.GUROBI_TIMEOUT) 169 | #m.setParam("LogFile", "gurobi.log") 170 | 171 | # Construct model 172 | m.setParam("NonConvex", 2) 173 | m.setParam("MIPFocus", 2) 174 | m = self._max_assignment(m) 175 | 176 | # Optimize model 177 | m.optimize() 178 | 179 | PRINTV(2, f'[Obj] max links assigned in DCN: {m.ObjVal}') 180 | 181 | # A striping matrix for all S3 switches. 182 | mat_s3 = np.zeros(shape=(self.num_clusters * NUM_S3, 183 | self.num_clusters * NUM_S3)) 184 | for v in m.getVars(): 185 | tot_links = int(v.X) 186 | if not tot_links: 187 | continue 188 | split = v.VarName.split('_') 189 | # Extract the two S3 switch indices. 190 | i, j = int(split[1]) - 1, int(split[2]) - 1 191 | mat_s3[i][j] = tot_links 192 | 193 | # A map from globally unique S3 switch id to a list of ports 194 | # available for establishing connections. 195 | available_ports = {} 196 | # A map from paths to path capacity. The key is a tuple of clusters. 197 | paths = {} 198 | # A list of tuples, each tuple consists of 2 ports that can form a 199 | # bidi link and the link speed. 200 | port_pairs = [] 201 | # Iterate through the S3 radix map and populate a list of available 202 | # ports for the connections. 203 | for mi, radix in self.s3_radices.items(): 204 | ci, si = matrixIndexToSwitchIndex(mi) 205 | for pi in range(radix): 206 | available_ports.setdefault(mi, []).append( 207 | f'{self.net_name}-c{ci+1}-ab1-s3i{si+1}-p{2*pi+1}') 208 | 209 | # Iterate through the striping matrix to construct port pairs and 210 | # paths. 211 | for mi, row in enumerate(mat_s3): 212 | ci, si = matrixIndexToSwitchIndex(mi) 213 | for mj, tot_links in enumerate(row): 214 | # The striping matrix is symmetric, we only need to process 215 | # half of it. 216 | if not tot_links or mi >= mj: 217 | continue 218 | cj, sj = matrixIndexToSwitchIndex(mj) 219 | i_gen = self.clusterGen(ci+1, self.genlist) 220 | j_gen = self.clusterGen(cj+1, self.genlist) 221 | link_speed = min(self.port_speeds[i_gen], 222 | self.port_speeds[j_gen]) 223 | # Add up all the link capacity in the same path. 224 | if ci != cj: 225 | paths.setdefault((ci+1, cj+1), 0) 226 | paths[(ci+1, cj+1)] += tot_links * link_speed 227 | for _ in range(int(tot_links)): 228 | pu = available_ports[mi].pop(0) 229 | pv = available_ports[mj].pop(0) 230 | port_pairs.append((pu, pv, link_speed)) 231 | 232 | return (paths, port_pairs) 233 | 234 | except gp.GurobiError as e: 235 | print('Error code ' + str(e.errno) + ': ' + str(e)) 236 | return [] 237 | except AttributeError: 238 | print('Encountered an attribute error') 239 | return [] 240 | -------------------------------------------------------------------------------- /tests/data/toy4_te_sol.textproto: -------------------------------------------------------------------------------- 1 | type: LEVEL_TOR 2 | te_intents { 3 | target_block: "toy4-c1-ab1" 4 | prefix_intents { 5 | dst_name: "toy4-c2-ab1" 6 | type: SRC 7 | nexthop_entries { 8 | nexthop_port: "toy4-c1-ab1-s3i1-p1" 9 | weight: 2615.812499999999 10 | } 11 | nexthop_entries { 12 | nexthop_port: "toy4-c1-ab1-s3i2-p1" 13 | weight: 2615.812499999999 14 | } 15 | nexthop_entries { 16 | nexthop_port: "toy4-c1-ab1-s3i3-p1" 17 | weight: 2615.812499999999 18 | } 19 | nexthop_entries { 20 | nexthop_port: "toy4-c1-ab1-s3i4-p1" 21 | weight: 2615.812499999999 22 | } 23 | nexthop_entries { 24 | nexthop_port: "toy4-c1-ab1-s3i1-p3" 25 | weight: 887.9375000000007 26 | } 27 | nexthop_entries { 28 | nexthop_port: "toy4-c1-ab1-s3i2-p3" 29 | weight: 887.9375000000007 30 | } 31 | nexthop_entries { 32 | nexthop_port: "toy4-c1-ab1-s3i3-p3" 33 | weight: 887.9375000000007 34 | } 35 | nexthop_entries { 36 | nexthop_port: "toy4-c1-ab1-s3i4-p3" 37 | weight: 887.9375000000007 38 | } 39 | } 40 | prefix_intents { 41 | dst_name: "toy4-c3-ab1" 42 | type: SRC 43 | nexthop_entries { 44 | nexthop_port: "toy4-c1-ab1-s3i1-p3" 45 | weight: 1771.5 46 | } 47 | nexthop_entries { 48 | nexthop_port: "toy4-c1-ab1-s3i2-p3" 49 | weight: 1771.5 50 | } 51 | nexthop_entries { 52 | nexthop_port: "toy4-c1-ab1-s3i3-p3" 53 | weight: 1771.5 54 | } 55 | nexthop_entries { 56 | nexthop_port: "toy4-c1-ab1-s3i4-p3" 57 | weight: 1771.5 58 | } 59 | } 60 | prefix_intents { 61 | dst_name: "toy4-c4-ab1" 62 | type: SRC 63 | nexthop_entries { 64 | nexthop_port: "toy4-c1-ab1-s3i1-p5" 65 | weight: 1570.5 66 | } 67 | nexthop_entries { 68 | nexthop_port: "toy4-c1-ab1-s3i2-p5" 69 | weight: 1570.5 70 | } 71 | nexthop_entries { 72 | nexthop_port: "toy4-c1-ab1-s3i3-p5" 73 | weight: 1570.5 74 | } 75 | nexthop_entries { 76 | nexthop_port: "toy4-c1-ab1-s3i4-p5" 77 | weight: 1570.5 78 | } 79 | } 80 | prefix_intents { 81 | dst_name: "toy4-c5-ab1" 82 | type: SRC 83 | nexthop_entries { 84 | nexthop_port: "toy4-c1-ab1-s3i1-p7" 85 | weight: 2182.0 86 | } 87 | nexthop_entries { 88 | nexthop_port: "toy4-c1-ab1-s3i2-p7" 89 | weight: 2182.0 90 | } 91 | nexthop_entries { 92 | nexthop_port: "toy4-c1-ab1-s3i3-p7" 93 | weight: 2182.0 94 | } 95 | nexthop_entries { 96 | nexthop_port: "toy4-c1-ab1-s3i4-p7" 97 | weight: 2182.0 98 | } 99 | } 100 | prefix_intents { 101 | dst_name: "toy4-c2-ab1" 102 | type: TRANSIT 103 | nexthop_entries { 104 | nexthop_port: "toy4-c1-ab1-s3i1-p1" 105 | weight: 3932.375000000001 106 | } 107 | nexthop_entries { 108 | nexthop_port: "toy4-c1-ab1-s3i2-p1" 109 | weight: 3932.375000000001 110 | } 111 | nexthop_entries { 112 | nexthop_port: "toy4-c1-ab1-s3i3-p1" 113 | weight: 3932.375000000001 114 | } 115 | nexthop_entries { 116 | nexthop_port: "toy4-c1-ab1-s3i4-p1" 117 | weight: 3932.375000000001 118 | } 119 | } 120 | prefix_intents { 121 | dst_name: "toy4-c5-ab1" 122 | type: TRANSIT 123 | nexthop_entries { 124 | nexthop_port: "toy4-c1-ab1-s3i1-p7" 125 | weight: 495.8124999999997 126 | } 127 | nexthop_entries { 128 | nexthop_port: "toy4-c1-ab1-s3i2-p7" 129 | weight: 495.8124999999997 130 | } 131 | nexthop_entries { 132 | nexthop_port: "toy4-c1-ab1-s3i3-p7" 133 | weight: 495.8124999999997 134 | } 135 | nexthop_entries { 136 | nexthop_port: "toy4-c1-ab1-s3i4-p7" 137 | weight: 495.8124999999997 138 | } 139 | } 140 | } 141 | te_intents { 142 | target_block: "toy4-c3-ab1" 143 | prefix_intents { 144 | dst_name: "toy4-c2-ab1" 145 | type: TRANSIT 146 | nexthop_entries { 147 | nexthop_port: "toy4-c3-ab1-s3i1-p3" 148 | weight: 887.9375000000007 149 | } 150 | nexthop_entries { 151 | nexthop_port: "toy4-c3-ab1-s3i2-p3" 152 | weight: 887.9375000000007 153 | } 154 | nexthop_entries { 155 | nexthop_port: "toy4-c3-ab1-s3i3-p3" 156 | weight: 887.9375000000007 157 | } 158 | nexthop_entries { 159 | nexthop_port: "toy4-c3-ab1-s3i4-p3" 160 | weight: 887.9375000000007 161 | } 162 | } 163 | prefix_intents { 164 | dst_name: "toy4-c1-ab1" 165 | type: SRC 166 | nexthop_entries { 167 | nexthop_port: "toy4-c3-ab1-s3i1-p1" 168 | weight: 843.75 169 | } 170 | nexthop_entries { 171 | nexthop_port: "toy4-c3-ab1-s3i2-p1" 172 | weight: 843.75 173 | } 174 | nexthop_entries { 175 | nexthop_port: "toy4-c3-ab1-s3i3-p1" 176 | weight: 843.75 177 | } 178 | nexthop_entries { 179 | nexthop_port: "toy4-c3-ab1-s3i4-p1" 180 | weight: 843.75 181 | } 182 | } 183 | prefix_intents { 184 | dst_name: "toy4-c2-ab1" 185 | type: SRC 186 | nexthop_entries { 187 | nexthop_port: "toy4-c3-ab1-s3i1-p3" 188 | weight: 4506.75 189 | } 190 | nexthop_entries { 191 | nexthop_port: "toy4-c3-ab1-s3i2-p3" 192 | weight: 4506.75 193 | } 194 | nexthop_entries { 195 | nexthop_port: "toy4-c3-ab1-s3i3-p3" 196 | weight: 4506.75 197 | } 198 | nexthop_entries { 199 | nexthop_port: "toy4-c3-ab1-s3i4-p3" 200 | weight: 4506.75 201 | } 202 | } 203 | prefix_intents { 204 | dst_name: "toy4-c4-ab1" 205 | type: SRC 206 | nexthop_entries { 207 | nexthop_port: "toy4-c3-ab1-s3i1-p5" 208 | weight: 2020.0 209 | } 210 | nexthop_entries { 211 | nexthop_port: "toy4-c3-ab1-s3i2-p5" 212 | weight: 2020.0 213 | } 214 | nexthop_entries { 215 | nexthop_port: "toy4-c3-ab1-s3i3-p5" 216 | weight: 2020.0 217 | } 218 | nexthop_entries { 219 | nexthop_port: "toy4-c3-ab1-s3i4-p5" 220 | weight: 2020.0 221 | } 222 | } 223 | prefix_intents { 224 | dst_name: "toy4-c5-ab1" 225 | type: SRC 226 | nexthop_entries { 227 | nexthop_port: "toy4-c3-ab1-s3i1-p7" 228 | weight: 2807.25 229 | } 230 | nexthop_entries { 231 | nexthop_port: "toy4-c3-ab1-s3i2-p7" 232 | weight: 2807.25 233 | } 234 | nexthop_entries { 235 | nexthop_port: "toy4-c3-ab1-s3i3-p7" 236 | weight: 2807.25 237 | } 238 | nexthop_entries { 239 | nexthop_port: "toy4-c3-ab1-s3i4-p7" 240 | weight: 2807.25 241 | } 242 | } 243 | prefix_intents { 244 | dst_name: "toy4-c2-ab1" 245 | type: TRANSIT 246 | nexthop_entries { 247 | nexthop_port: "toy4-c3-ab1-s3i1-p3" 248 | weight: 827.6875000000006 249 | } 250 | nexthop_entries { 251 | nexthop_port: "toy4-c3-ab1-s3i2-p3" 252 | weight: 827.6875000000006 253 | } 254 | nexthop_entries { 255 | nexthop_port: "toy4-c3-ab1-s3i3-p3" 256 | weight: 827.6875000000006 257 | } 258 | nexthop_entries { 259 | nexthop_port: "toy4-c3-ab1-s3i4-p3" 260 | weight: 827.6875000000006 261 | } 262 | } 263 | } 264 | te_intents { 265 | target_block: "toy4-c2-ab1" 266 | prefix_intents { 267 | dst_name: "toy4-c1-ab1" 268 | type: SRC 269 | nexthop_entries { 270 | nexthop_port: "toy4-c2-ab1-s3i1-p1" 271 | weight: 756.75 272 | } 273 | nexthop_entries { 274 | nexthop_port: "toy4-c2-ab1-s3i2-p1" 275 | weight: 756.75 276 | } 277 | nexthop_entries { 278 | nexthop_port: "toy4-c2-ab1-s3i3-p1" 279 | weight: 756.75 280 | } 281 | nexthop_entries { 282 | nexthop_port: "toy4-c2-ab1-s3i4-p1" 283 | weight: 756.75 284 | } 285 | } 286 | prefix_intents { 287 | dst_name: "toy4-c3-ab1" 288 | type: SRC 289 | nexthop_entries { 290 | nexthop_port: "toy4-c2-ab1-s3i1-p3" 291 | weight: 2044.4999999999998 292 | } 293 | nexthop_entries { 294 | nexthop_port: "toy4-c2-ab1-s3i2-p3" 295 | weight: 2044.4999999999998 296 | } 297 | nexthop_entries { 298 | nexthop_port: "toy4-c2-ab1-s3i3-p3" 299 | weight: 2044.4999999999998 300 | } 301 | nexthop_entries { 302 | nexthop_port: "toy4-c2-ab1-s3i4-p3" 303 | weight: 2044.4999999999998 304 | } 305 | } 306 | prefix_intents { 307 | dst_name: "toy4-c4-ab1" 308 | type: SRC 309 | nexthop_entries { 310 | nexthop_port: "toy4-c2-ab1-s3i1-p5" 311 | weight: 1812.25 312 | } 313 | nexthop_entries { 314 | nexthop_port: "toy4-c2-ab1-s3i2-p5" 315 | weight: 1812.25 316 | } 317 | nexthop_entries { 318 | nexthop_port: "toy4-c2-ab1-s3i3-p5" 319 | weight: 1812.25 320 | } 321 | nexthop_entries { 322 | nexthop_port: "toy4-c2-ab1-s3i4-p5" 323 | weight: 1812.25 324 | } 325 | } 326 | prefix_intents { 327 | dst_name: "toy4-c5-ab1" 328 | type: SRC 329 | nexthop_entries { 330 | nexthop_port: "toy4-c2-ab1-s3i1-p7" 331 | weight: 2518.0 332 | } 333 | nexthop_entries { 334 | nexthop_port: "toy4-c2-ab1-s3i2-p7" 335 | weight: 2518.0 336 | } 337 | nexthop_entries { 338 | nexthop_port: "toy4-c2-ab1-s3i3-p7" 339 | weight: 2518.0 340 | } 341 | nexthop_entries { 342 | nexthop_port: "toy4-c2-ab1-s3i4-p7" 343 | weight: 2518.0 344 | } 345 | } 346 | } 347 | te_intents { 348 | target_block: "toy4-c4-ab1" 349 | prefix_intents { 350 | dst_name: "toy4-c1-ab1" 351 | type: SRC 352 | nexthop_entries { 353 | nexthop_port: "toy4-c4-ab1-s3i1-p1" 354 | weight: 2120.0 355 | } 356 | nexthop_entries { 357 | nexthop_port: "toy4-c4-ab1-s3i2-p1" 358 | weight: 2120.0 359 | } 360 | nexthop_entries { 361 | nexthop_port: "toy4-c4-ab1-s3i3-p1" 362 | weight: 2120.0 363 | } 364 | nexthop_entries { 365 | nexthop_port: "toy4-c4-ab1-s3i4-p1" 366 | weight: 2120.0 367 | } 368 | } 369 | prefix_intents { 370 | dst_name: "toy4-c2-ab1" 371 | type: SRC 372 | nexthop_entries { 373 | nexthop_port: "toy4-c4-ab1-s3i1-p3" 374 | weight: 6548.1875 375 | } 376 | nexthop_entries { 377 | nexthop_port: "toy4-c4-ab1-s3i2-p3" 378 | weight: 6548.1875 379 | } 380 | nexthop_entries { 381 | nexthop_port: "toy4-c4-ab1-s3i3-p3" 382 | weight: 6548.1875 383 | } 384 | nexthop_entries { 385 | nexthop_port: "toy4-c4-ab1-s3i4-p3" 386 | weight: 6548.1875 387 | } 388 | nexthop_entries { 389 | nexthop_port: "toy4-c4-ab1-s3i1-p1" 390 | weight: 3932.375000000001 391 | } 392 | nexthop_entries { 393 | nexthop_port: "toy4-c4-ab1-s3i2-p1" 394 | weight: 3932.375000000001 395 | } 396 | nexthop_entries { 397 | nexthop_port: "toy4-c4-ab1-s3i3-p1" 398 | weight: 3932.375000000001 399 | } 400 | nexthop_entries { 401 | nexthop_port: "toy4-c4-ab1-s3i4-p1" 402 | weight: 3932.375000000001 403 | } 404 | nexthop_entries { 405 | nexthop_port: "toy4-c4-ab1-s3i1-p5" 406 | weight: 827.6875000000006 407 | } 408 | nexthop_entries { 409 | nexthop_port: "toy4-c4-ab1-s3i2-p5" 410 | weight: 827.6875000000006 411 | } 412 | nexthop_entries { 413 | nexthop_port: "toy4-c4-ab1-s3i3-p5" 414 | weight: 827.6875000000006 415 | } 416 | nexthop_entries { 417 | nexthop_port: "toy4-c4-ab1-s3i4-p5" 418 | weight: 827.6875000000006 419 | } 420 | } 421 | prefix_intents { 422 | dst_name: "toy4-c3-ab1" 423 | type: SRC 424 | nexthop_entries { 425 | nexthop_port: "toy4-c4-ab1-s3i1-p5" 426 | weight: 5720.5 427 | } 428 | nexthop_entries { 429 | nexthop_port: "toy4-c4-ab1-s3i2-p5" 430 | weight: 5720.5 431 | } 432 | nexthop_entries { 433 | nexthop_port: "toy4-c4-ab1-s3i3-p5" 434 | weight: 5720.5 435 | } 436 | nexthop_entries { 437 | nexthop_port: "toy4-c4-ab1-s3i4-p5" 438 | weight: 5720.5 439 | } 440 | } 441 | prefix_intents { 442 | dst_name: "toy4-c5-ab1" 443 | type: SRC 444 | nexthop_entries { 445 | nexthop_port: "toy4-c4-ab1-s3i1-p7" 446 | weight: 6548.1875 447 | } 448 | nexthop_entries { 449 | nexthop_port: "toy4-c4-ab1-s3i2-p7" 450 | weight: 6548.1875 451 | } 452 | nexthop_entries { 453 | nexthop_port: "toy4-c4-ab1-s3i3-p7" 454 | weight: 6548.1875 455 | } 456 | nexthop_entries { 457 | nexthop_port: "toy4-c4-ab1-s3i4-p7" 458 | weight: 6548.1875 459 | } 460 | nexthop_entries { 461 | nexthop_port: "toy4-c4-ab1-s3i1-p1" 462 | weight: 495.8124999999997 463 | } 464 | nexthop_entries { 465 | nexthop_port: "toy4-c4-ab1-s3i2-p1" 466 | weight: 495.8124999999997 467 | } 468 | nexthop_entries { 469 | nexthop_port: "toy4-c4-ab1-s3i3-p1" 470 | weight: 495.8124999999997 471 | } 472 | nexthop_entries { 473 | nexthop_port: "toy4-c4-ab1-s3i4-p1" 474 | weight: 495.8124999999997 475 | } 476 | } 477 | } 478 | te_intents { 479 | target_block: "toy4-c5-ab1" 480 | prefix_intents { 481 | dst_name: "toy4-c1-ab1" 482 | type: SRC 483 | nexthop_entries { 484 | nexthop_port: "toy4-c5-ab1-s3i1-p1" 485 | weight: 996.25 486 | } 487 | nexthop_entries { 488 | nexthop_port: "toy4-c5-ab1-s3i2-p1" 489 | weight: 996.25 490 | } 491 | nexthop_entries { 492 | nexthop_port: "toy4-c5-ab1-s3i3-p1" 493 | weight: 996.25 494 | } 495 | nexthop_entries { 496 | nexthop_port: "toy4-c5-ab1-s3i4-p1" 497 | weight: 996.25 498 | } 499 | } 500 | prefix_intents { 501 | dst_name: "toy4-c2-ab1" 502 | type: SRC 503 | nexthop_entries { 504 | nexthop_port: "toy4-c5-ab1-s3i1-p3" 505 | weight: 5321.0 506 | } 507 | nexthop_entries { 508 | nexthop_port: "toy4-c5-ab1-s3i2-p3" 509 | weight: 5321.0 510 | } 511 | nexthop_entries { 512 | nexthop_port: "toy4-c5-ab1-s3i3-p3" 513 | weight: 5321.0 514 | } 515 | nexthop_entries { 516 | nexthop_port: "toy4-c5-ab1-s3i4-p3" 517 | weight: 5321.0 518 | } 519 | } 520 | prefix_intents { 521 | dst_name: "toy4-c3-ab1" 522 | type: SRC 523 | nexthop_entries { 524 | nexthop_port: "toy4-c5-ab1-s3i1-p5" 525 | weight: 2691.0 526 | } 527 | nexthop_entries { 528 | nexthop_port: "toy4-c5-ab1-s3i2-p5" 529 | weight: 2691.0 530 | } 531 | nexthop_entries { 532 | nexthop_port: "toy4-c5-ab1-s3i3-p5" 533 | weight: 2691.0 534 | } 535 | nexthop_entries { 536 | nexthop_port: "toy4-c5-ab1-s3i4-p5" 537 | weight: 2691.0 538 | } 539 | } 540 | prefix_intents { 541 | dst_name: "toy4-c4-ab1" 542 | type: SRC 543 | nexthop_entries { 544 | nexthop_port: "toy4-c5-ab1-s3i1-p7" 545 | weight: 2384.75 546 | } 547 | nexthop_entries { 548 | nexthop_port: "toy4-c5-ab1-s3i2-p7" 549 | weight: 2384.75 550 | } 551 | nexthop_entries { 552 | nexthop_port: "toy4-c5-ab1-s3i3-p7" 553 | weight: 2384.75 554 | } 555 | nexthop_entries { 556 | nexthop_port: "toy4-c5-ab1-s3i4-p7" 557 | weight: 2384.75 558 | } 559 | } 560 | } 561 | -------------------------------------------------------------------------------- /traffic/data/GoogleRPC.txt: -------------------------------------------------------------------------------- 1 | 0 0 2 | 3 6.48826 3 | 32 9.73239 4 | 36 10.8913 5 | 40 12.0503 6 | 46 13.2092 7 | 53 14.3681 8 | 64 15.5271 9 | 70 18.7988 10 | 77 22.0705 11 | 85 25.3422 12 | 96 28.6139 13 | 110 31.8856 14 | 128 35.1573 15 | 137 36.9864 16 | 146 38.8155 17 | 158 40.6446 18 | 171 42.4737 19 | 186 44.3028 20 | 205 46.1319 21 | 228 47.961 22 | 256 49.7901 23 | 268 52.3994 24 | 282 55.0086 25 | 296 57.6179 26 | 313 60.2272 27 | 331 62.8365 28 | 352 65.4457 29 | 375 68.055 30 | 402 70.6643 31 | 433 73.2736 32 | 469 75.8828 33 | 512 78.4921 34 | 531 79.0513 35 | 551 79.6104 36 | 573 80.1695 37 | 597 80.7286 38 | 623 81.2878 39 | 652 81.8469 40 | 683 82.406 41 | 717 82.9652 42 | 755 83.5243 43 | 796 84.0834 44 | 843 84.6426 45 | 896 85.2017 46 | 956 85.7608 47 | 1024 86.32 48 | 1053 86.5736 49 | 1084 86.8273 50 | 1117 87.081 51 | 1152 87.3347 52 | 1189 87.5884 53 | 1229 87.842 54 | 1271 88.0957 55 | 1317 88.3494 56 | 1365 88.6031 57 | 1418 88.8568 58 | 1475 89.1104 59 | 1536 89.3641 60 | 1603 89.6178 61 | 1676 89.8715 62 | 1755 90.1252 63 | 1843 90.3788 64 | 1940 90.6325 65 | 2048 90.8862 66 | 2092 90.9949 67 | 2137 91.1036 68 | 2185 91.2124 69 | 2234 91.3211 70 | 2286 91.4298 71 | 2341 91.5385 72 | 2398 91.6472 73 | 2458 91.7559 74 | 2521 91.8647 75 | 2587 91.9734 76 | 2657 92.0821 77 | 2731 92.1908 78 | 2809 92.2995 79 | 2891 92.4083 80 | 2979 92.517 81 | 3072 92.6257 82 | 3171 92.7344 83 | 3277 92.8431 84 | 3390 92.9519 85 | 3511 93.0606 86 | 3641 93.1693 87 | 3781 93.278 88 | 3932 93.3867 89 | 4096 93.4955 90 | 4163 93.5902 91 | 4233 93.6848 92 | 4304 93.7795 93 | 4378 93.8742 94 | 4455 93.9689 95 | 4535 94.0636 96 | 4617 94.1583 97 | 4703 94.253 98 | 4792 94.3477 99 | 4884 94.4424 100 | 4979 94.5371 101 | 5079 94.6318 102 | 5183 94.7265 103 | 5291 94.8211 104 | 5403 94.9158 105 | 5521 95.0105 106 | 5643 95.1052 107 | 5772 95.1999 108 | 5906 95.2946 109 | 6046 95.3893 110 | 6194 95.484 111 | 6349 95.5787 112 | 6512 95.6734 113 | 6683 95.7681 114 | 6864 95.8628 115 | 7054 95.9574 116 | 7256 96.0521 117 | 7469 96.1468 118 | 7696 96.2415 119 | 7936 96.3362 120 | 8192 96.4309 121 | 8293 96.4806 122 | 8397 96.5303 123 | 8503 96.5801 124 | 8612 96.6298 125 | 8724 96.6795 126 | 8839 96.7292 127 | 8957 96.7789 128 | 9078 96.8287 129 | 9202 96.8784 130 | 9330 96.9281 131 | 9461 96.9778 132 | 9596 97.0275 133 | 9735 97.0773 134 | 9879 97.127 135 | 10026 97.1767 136 | 10178 97.2264 137 | 10335 97.2761 138 | 10496 97.3259 139 | 10663 97.3756 140 | 10835 97.4253 141 | 11012 97.475 142 | 11196 97.5247 143 | 11385 97.5745 144 | 11582 97.6242 145 | 11785 97.6739 146 | 11995 97.7236 147 | 12214 97.7733 148 | 12440 97.8231 149 | 12674 97.8728 150 | 12918 97.9225 151 | 13171 97.9722 152 | 13435 98.0219 153 | 13709 98.0716 154 | 13995 98.1214 155 | 14292 98.1711 156 | 14603 98.2208 157 | 14928 98.2705 158 | 15267 98.3202 159 | 15622 98.37 160 | 15994 98.4197 161 | 16384 98.4694 162 | 16540 98.4802 163 | 16699 98.4909 164 | 16861 98.5017 165 | 17027 98.5125 166 | 17195 98.5233 167 | 17367 98.534 168 | 17542 98.5448 169 | 17721 98.5556 170 | 17904 98.5663 171 | 18091 98.5771 172 | 18281 98.5879 173 | 18476 98.5986 174 | 18674 98.6094 175 | 18877 98.6202 176 | 19085 98.6309 177 | 19297 98.6417 178 | 19514 98.6525 179 | 19735 98.6633 180 | 19962 98.674 181 | 20194 98.6848 182 | 20432 98.6956 183 | 20675 98.7063 184 | 20924 98.7171 185 | 21179 98.7279 186 | 21441 98.7386 187 | 21709 98.7494 188 | 21984 98.7602 189 | 22265 98.7709 190 | 22555 98.7817 191 | 22851 98.7925 192 | 23156 98.8033 193 | 23469 98.814 194 | 23790 98.8248 195 | 24121 98.8356 196 | 24461 98.8463 197 | 24810 98.8571 198 | 25170 98.8679 199 | 25540 98.8786 200 | 25921 98.8894 201 | 26314 98.9002 202 | 26719 98.911 203 | 27136 98.9217 204 | 27567 98.9325 205 | 28011 98.9433 206 | 28471 98.954 207 | 28945 98.9648 208 | 29436 98.9756 209 | 29943 98.9863 210 | 30468 98.9971 211 | 31013 99.0079 212 | 31576 99.0186 213 | 32161 99.0294 214 | 32768 99.0402 215 | 33007 99.0443 216 | 33250 99.0485 217 | 33496 99.0526 218 | 33746 99.0567 219 | 34000 99.0609 220 | 34257 99.065 221 | 34519 99.0691 222 | 34784 99.0733 223 | 35054 99.0774 224 | 35328 99.0815 225 | 35606 99.0857 226 | 35889 99.0898 227 | 36176 99.094 228 | 36468 99.0981 229 | 36764 99.1022 230 | 37065 99.1064 231 | 37372 99.1105 232 | 37683 99.1146 233 | 38000 99.1188 234 | 38322 99.1229 235 | 38649 99.127 236 | 38983 99.1312 237 | 39322 99.1353 238 | 39667 99.1394 239 | 40018 99.1436 240 | 40375 99.1477 241 | 40739 99.1519 242 | 41109 99.156 243 | 41486 99.1601 244 | 41870 99.1643 245 | 42262 99.1684 246 | 42660 99.1725 247 | 43067 99.1767 248 | 43481 99.1808 249 | 43903 99.1849 250 | 44333 99.1891 251 | 44772 99.1932 252 | 45220 99.1974 253 | 45677 99.2015 254 | 46143 99.2056 255 | 46618 99.2098 256 | 47104 99.2139 257 | 47600 99.218 258 | 48106 99.2222 259 | 48623 99.2263 260 | 49152 99.2304 261 | 49692 99.2346 262 | 50244 99.2387 263 | 50809 99.2429 264 | 51386 99.247 265 | 51977 99.2511 266 | 52581 99.2553 267 | 53200 99.2594 268 | 53833 99.2635 269 | 54482 99.2677 270 | 55146 99.2718 271 | 55827 99.2759 272 | 56525 99.2801 273 | 57240 99.2842 274 | 57974 99.2883 275 | 58727 99.2925 276 | 59500 99.2966 277 | 60293 99.3008 278 | 61108 99.3049 279 | 61945 99.309 280 | 62805 99.3132 281 | 63690 99.3173 282 | 64600 99.3214 283 | 65536 99.3256 284 | 65902 99.3299 285 | 66272 99.3342 286 | 66647 99.3385 287 | 67025 99.3428 288 | 67408 99.3471 289 | 67796 99.3514 290 | 68188 99.3557 291 | 68584 99.36 292 | 68985 99.3643 293 | 69391 99.3686 294 | 69802 99.3729 295 | 70217 99.3772 296 | 70638 99.3815 297 | 71063 99.3858 298 | 71494 99.3901 299 | 71930 99.3944 300 | 72371 99.3987 301 | 72818 99.403 302 | 73270 99.4073 303 | 73728 99.4116 304 | 74192 99.4159 305 | 74661 99.4202 306 | 75137 99.4246 307 | 75618 99.4289 308 | 76106 99.4332 309 | 76601 99.4375 310 | 77101 99.4418 311 | 77608 99.4461 312 | 78122 99.4504 313 | 78643 99.4547 314 | 79171 99.459 315 | 79706 99.4633 316 | 80248 99.4676 317 | 80798 99.4719 318 | 81355 99.4762 319 | 81920 99.4805 320 | 82493 99.4848 321 | 83074 99.4891 322 | 83663 99.4934 323 | 84261 99.4977 324 | 84867 99.502 325 | 85482 99.5063 326 | 86106 99.5106 327 | 86739 99.5149 328 | 87381 99.5192 329 | 88033 99.5235 330 | 88695 99.5278 331 | 89367 99.5321 332 | 90049 99.5364 333 | 90742 99.5407 334 | 91446 99.5451 335 | 92160 99.5494 336 | 92886 99.5537 337 | 93623 99.558 338 | 94372 99.5623 339 | 95133 99.5666 340 | 95906 99.5709 341 | 96692 99.5752 342 | 97492 99.5795 343 | 98304 99.5838 344 | 99130 99.5881 345 | 99970 99.5924 346 | 100825 99.5967 347 | 101694 99.601 348 | 102578 99.6053 349 | 103478 99.6096 350 | 104394 99.6139 351 | 105326 99.6182 352 | 106275 99.6225 353 | 107241 99.6268 354 | 108225 99.6311 355 | 109227 99.6354 356 | 110247 99.6397 357 | 111288 99.644 358 | 112347 99.6483 359 | 113428 99.6526 360 | 114529 99.6569 361 | 115652 99.6612 362 | 116797 99.6655 363 | 117965 99.6699 364 | 119156 99.6742 365 | 120372 99.6785 366 | 121613 99.6828 367 | 122880 99.6871 368 | 124173 99.6914 369 | 125494 99.6957 370 | 126844 99.7 371 | 128223 99.7043 372 | 129632 99.7086 373 | 131072 99.7129 374 | 131630 99.714 375 | 132192 99.7151 376 | 132760 99.7163 377 | 133332 99.7174 378 | 133909 99.7185 379 | 134491 99.7196 380 | 135079 99.7207 381 | 135671 99.7219 382 | 136269 99.723 383 | 136872 99.7241 384 | 137480 99.7252 385 | 138094 99.7264 386 | 138713 99.7275 387 | 139338 99.7286 388 | 139968 99.7297 389 | 140605 99.7308 390 | 141247 99.732 391 | 141894 99.7331 392 | 142548 99.7342 393 | 143208 99.7353 394 | 143874 99.7365 395 | 144547 99.7376 396 | 145225 99.7387 397 | 145910 99.7398 398 | 146602 99.7409 399 | 147300 99.7421 400 | 148005 99.7432 401 | 148716 99.7443 402 | 149435 99.7454 403 | 150160 99.7466 404 | 150893 99.7477 405 | 151632 99.7488 406 | 152379 99.7499 407 | 153134 99.751 408 | 153895 99.7522 409 | 154665 99.7533 410 | 155442 99.7544 411 | 156227 99.7555 412 | 157020 99.7567 413 | 157821 99.7578 414 | 158631 99.7589 415 | 159448 99.76 416 | 160275 99.7611 417 | 161109 99.7623 418 | 161953 99.7634 419 | 162805 99.7645 420 | 163667 99.7656 421 | 164537 99.7668 422 | 165417 99.7679 423 | 166306 99.769 424 | 167205 99.7701 425 | 168114 99.7712 426 | 169033 99.7724 427 | 169961 99.7735 428 | 170901 99.7746 429 | 171850 99.7757 430 | 172810 99.7769 431 | 173781 99.778 432 | 174763 99.7791 433 | 175756 99.7802 434 | 176760 99.7813 435 | 177776 99.7825 436 | 178803 99.7836 437 | 179843 99.7847 438 | 180895 99.7858 439 | 181959 99.787 440 | 183035 99.7881 441 | 184125 99.7892 442 | 185227 99.7903 443 | 186343 99.7914 444 | 187473 99.7926 445 | 188616 99.7937 446 | 189773 99.7948 447 | 190944 99.7959 448 | 192130 99.7971 449 | 193331 99.7982 450 | 194547 99.7993 451 | 195778 99.8004 452 | 197025 99.8015 453 | 198288 99.8027 454 | 199568 99.8038 455 | 200864 99.8049 456 | 202176 99.806 457 | 203507 99.8072 458 | 204854 99.8083 459 | 206220 99.8094 460 | 207604 99.8105 461 | 209007 99.8116 462 | 210429 99.8128 463 | 211870 99.8139 464 | 213331 99.815 465 | 214812 99.8161 466 | 216315 99.8173 467 | 217838 99.8184 468 | 219383 99.8195 469 | 220950 99.8206 470 | 222540 99.8217 471 | 224152 99.8229 472 | 225788 99.824 473 | 227448 99.8251 474 | 229133 99.8262 475 | 230843 99.8274 476 | 232579 99.8285 477 | 234341 99.8296 478 | 236130 99.8307 479 | 237946 99.8318 480 | 239791 99.833 481 | 241664 99.8341 482 | 243567 99.8352 483 | 245500 99.8363 484 | 247464 99.8375 485 | 249460 99.8386 486 | 251488 99.8397 487 | 253549 99.8408 488 | 255645 99.8419 489 | 257775 99.8431 490 | 259941 99.8442 491 | 262144 99.8453 492 | 263003 99.846 493 | 263869 99.8467 494 | 264739 99.8474 495 | 265616 99.848 496 | 266499 99.8487 497 | 267387 99.8494 498 | 268281 99.8501 499 | 269181 99.8508 500 | 270088 99.8515 501 | 271000 99.8521 502 | 271919 99.8528 503 | 272844 99.8535 504 | 273775 99.8542 505 | 274713 99.8549 506 | 275657 99.8556 507 | 276607 99.8562 508 | 277564 99.8569 509 | 278528 99.8576 510 | 279498 99.8583 511 | 280476 99.859 512 | 281460 99.8597 513 | 282451 99.8603 514 | 283449 99.861 515 | 284454 99.8617 516 | 285466 99.8624 517 | 286486 99.8631 518 | 287513 99.8638 519 | 288547 99.8644 520 | 289589 99.8651 521 | 290638 99.8658 522 | 291695 99.8665 523 | 292759 99.8672 524 | 293832 99.8679 525 | 294912 99.8685 526 | 296000 99.8692 527 | 297097 99.8699 528 | 298201 99.8706 529 | 299314 99.8713 530 | 300435 99.872 531 | 301564 99.8726 532 | 302702 99.8733 533 | 303849 99.874 534 | 305004 99.8747 535 | 306168 99.8754 536 | 307341 99.876 537 | 308523 99.8767 538 | 309715 99.8774 539 | 310915 99.8781 540 | 312125 99.8788 541 | 313344 99.8795 542 | 314573 99.8801 543 | 315811 99.8808 544 | 317060 99.8815 545 | 318318 99.8822 546 | 319586 99.8829 547 | 320864 99.8836 548 | 322153 99.8842 549 | 323452 99.8849 550 | 324761 99.8856 551 | 326082 99.8863 552 | 327413 99.887 553 | 328754 99.8877 554 | 330107 99.8883 555 | 331471 99.889 556 | 332847 99.8897 557 | 334234 99.8904 558 | 335632 99.8911 559 | 337042 99.8918 560 | 338464 99.8924 561 | 339899 99.8931 562 | 341345 99.8938 563 | 342804 99.8945 564 | 344275 99.8952 565 | 345759 99.8959 566 | 347256 99.8965 567 | 348765 99.8972 568 | 350288 99.8979 569 | 351825 99.8986 570 | 353375 99.8993 571 | 354938 99.9 572 | 356516 99.9006 573 | 358107 99.9013 574 | 359713 99.902 575 | 361334 99.9027 576 | 362969 99.9034 577 | 364618 99.9041 578 | 366283 99.9047 579 | 367964 99.9054 580 | 369659 99.9061 581 | 371371 99.9068 582 | 373098 99.9075 583 | 374841 99.9082 584 | 376601 99.9088 585 | 378378 99.9095 586 | 380171 99.9102 587 | 381981 99.9109 588 | 383809 99.9116 589 | 385654 99.9123 590 | 387517 99.9129 591 | 389398 99.9136 592 | 391298 99.9143 593 | 393216 99.915 594 | 395153 99.9157 595 | 397109 99.9164 596 | 399085 99.917 597 | 401080 99.9177 598 | 403096 99.9184 599 | 405132 99.9191 600 | 407188 99.9198 601 | 409266 99.9205 602 | 411364 99.9211 603 | 413485 99.9218 604 | 415627 99.9225 605 | 417792 99.9232 606 | 419979 99.9239 607 | 422190 99.9245 608 | 424424 99.9252 609 | 426681 99.9259 610 | 428963 99.9266 611 | 431269 99.9273 612 | 433600 99.928 613 | 435957 99.9286 614 | 438339 99.9293 615 | 440748 99.93 616 | 443183 99.9307 617 | 445645 99.9314 618 | 448134 99.9321 619 | 450652 99.9327 620 | 453198 99.9334 621 | 455773 99.9341 622 | 458378 99.9348 623 | 461012 99.9355 624 | 463677 99.9362 625 | 466372 99.9368 626 | 469100 99.9375 627 | 471859 99.9382 628 | 474651 99.9389 629 | 477477 99.9396 630 | 480336 99.9403 631 | 483229 99.9409 632 | 486158 99.9416 633 | 489122 99.9423 634 | 492123 99.943 635 | 495161 99.9437 636 | 498236 99.9444 637 | 501350 99.945 638 | 504504 99.9457 639 | 507697 99.9464 640 | 510930 99.9471 641 | 514206 99.9478 642 | 517523 99.9485 643 | 520884 99.9491 644 | 524288 99.9498 645 | 526844 99.9501 646 | 529425 99.9503 647 | 532031 99.9506 648 | 534663 99.9508 649 | 537322 99.9511 650 | 540006 99.9513 651 | 542718 99.9516 652 | 545458 99.9518 653 | 548225 99.9521 654 | 551020 99.9523 655 | 553844 99.9526 656 | 556697 99.9528 657 | 559579 99.9531 658 | 562492 99.9534 659 | 565435 99.9536 660 | 568408 99.9539 661 | 571414 99.9541 662 | 574451 99.9544 663 | 577521 99.9546 664 | 580624 99.9549 665 | 583760 99.9551 666 | 586931 99.9554 667 | 590136 99.9556 668 | 593376 99.9559 669 | 596652 99.9561 670 | 599964 99.9564 671 | 603313 99.9566 672 | 606700 99.9569 673 | 610125 99.9571 674 | 613590 99.9574 675 | 617093 99.9576 676 | 620637 99.9579 677 | 624222 99.9581 678 | 627848 99.9584 679 | 631517 99.9586 680 | 635229 99.9589 681 | 638985 99.9592 682 | 642786 99.9594 683 | 646632 99.9597 684 | 650524 99.9599 685 | 654463 99.9602 686 | 658451 99.9604 687 | 662487 99.9607 688 | 666573 99.9609 689 | 670710 99.9612 690 | 674899 99.9614 691 | 679140 99.9617 692 | 683435 99.9619 693 | 687784 99.9622 694 | 692190 99.9624 695 | 696652 99.9627 696 | 701172 99.9629 697 | 705750 99.9632 698 | 710390 99.9634 699 | 715090 99.9637 700 | 719853 99.9639 701 | 724680 99.9642 702 | 729573 99.9644 703 | 734531 99.9647 704 | 739558 99.965 705 | 744654 99.9652 706 | 749820 99.9655 707 | 755059 99.9657 708 | 760371 99.966 709 | 765759 99.9662 710 | 771224 99.9665 711 | 776767 99.9667 712 | 782390 99.967 713 | 788096 99.9672 714 | 793885 99.9675 715 | 799760 99.9677 716 | 805723 99.968 717 | 811775 99.9682 718 | 817919 99.9685 719 | 824156 99.9687 720 | 830490 99.969 721 | 836921 99.9692 722 | 843453 99.9695 723 | 850088 99.9697 724 | 856827 99.97 725 | 863675 99.9702 726 | 870633 99.9705 727 | 877704 99.9708 728 | 884890 99.971 729 | 892196 99.9713 730 | 899623 99.9715 731 | 907174 99.9718 732 | 914854 99.972 733 | 922664 99.9723 734 | 930609 99.9725 735 | 938693 99.9728 736 | 946917 99.973 737 | 955288 99.9733 738 | 963807 99.9735 739 | 972480 99.9738 740 | 981310 99.974 741 | 990302 99.9743 742 | 999461 99.9745 743 | 1008790 99.9748 744 | 1018296 99.975 745 | 1027982 99.9753 746 | 1037854 99.9755 747 | 1047917 99.9758 748 | 1058178 99.976 749 | 1068642 99.9763 750 | 1079314 99.9766 751 | 1090202 99.9768 752 | 1101312 99.9771 753 | 1112651 99.9773 754 | 1124225 99.9776 755 | 1136043 99.9778 756 | 1148112 99.9781 757 | 1160440 99.9783 758 | 1173036 99.9786 759 | 1185908 99.9788 760 | 1199066 99.9791 761 | 1212519 99.9793 762 | 1226277 99.9796 763 | 1240351 99.9798 764 | 1254752 99.9801 765 | 1269492 99.9803 766 | 1284581 99.9806 767 | 1300034 99.9808 768 | 1315863 99.9811 769 | 1332082 99.9813 770 | 1348706 99.9816 771 | 1365751 99.9818 772 | 1383231 99.9821 773 | 1401165 99.9823 774 | 1419570 99.9826 775 | 1438465 99.9829 776 | 1457870 99.9831 777 | 1477805 99.9834 778 | 1498294 99.9836 779 | 1519358 99.9839 780 | 1541023 99.9841 781 | 1563315 99.9844 782 | 1586261 99.9846 783 | 1609891 99.9849 784 | 1634235 99.9851 785 | 1659327 99.9854 786 | 1685202 99.9856 787 | 1711896 99.9859 788 | 1739450 99.9861 789 | 1767905 99.9864 790 | 1797307 99.9866 791 | 1827703 99.9869 792 | 1859145 99.9871 793 | 1891687 99.9874 794 | 1925389 99.9876 795 | 1960314 99.9879 796 | 1996529 99.9881 797 | 2034108 99.9884 798 | 2073128 99.9887 799 | 2113675 99.9889 800 | 2155839 99.9892 801 | 2199720 99.9894 802 | 2245424 99.9897 803 | 2293067 99.9899 804 | 2342777 99.9902 805 | 2394689 99.9904 806 | 2448954 99.9907 807 | 2505735 99.9909 808 | 2565212 99.9912 809 | 2627581 99.9914 810 | 2693059 99.9917 811 | 2761883 99.9919 812 | 2834317 99.9922 813 | 2910653 99.9924 814 | 2991214 99.9927 815 | 3076362 99.9929 816 | 3166500 99.9932 817 | 3262080 99.9934 818 | 3363608 99.9937 819 | 3471660 99.9939 820 | 3586885 99.9942 821 | 3710020 99.9945 822 | 3841911 99.9947 823 | 3983524 99.995 824 | 4135977 99.9952 825 | 4300563 99.9955 826 | 4478791 99.9957 827 | 4672430 99.996 828 | 4883570 99.9962 829 | 5114695 99.9965 830 | 5368784 99.9967 831 | 5649438 99.997 832 | 5961053 99.9972 833 | 6309051 99.9975 834 | 6700199 99.9977 835 | 7143054 99.998 836 | 7648594 99.9982 837 | 8231141 99.9985 838 | 8909743 99.9987 839 | 9710291 99.999 840 | 10668901 99.9992 841 | 11837511 99.9995 842 | 13293619 99.9997 843 | 15158197 100 844 | -------------------------------------------------------------------------------- /tests/data/toy2_topo.textproto: -------------------------------------------------------------------------------- 1 | # A toy network with 3 clusters: c1 + ToRs, c2 (no ToR), and c3 + ToRs. 2 | # ┌─────────────────────┐ ┌────────────────────────┐ 3 | # │ │ │ │ 4 | # ┌─────┼───────────┬───┐ ┌───┼─┼───┬──────────────┐ │ 5 | # │ │ ├───┼─┼───┼─┼───┤ │ │ 6 | # │ │ │ │ │ │ │ │ C2 │ │ 7 | # │ │ │ ┌┴─┴┐ ┌┴─┴┐ │ │ │ 8 | # │ │ │ │ │ │ │ │ │ │ 9 | # │ │ │ └───┘ └───┘ │ │ │ 10 | # │ │ │ │ │ │ 11 | # │ │ │ ┌───┐ ┌───┐ │ │ │ 12 | # │ │ │ │ │ │ │ │ │ │ 13 | # │ │ │ └───┘ └───┘ │ │ │ 14 | # │ │ │ │ │ │ 15 | # │ │ └───────────────┘ │ │ 16 | # │ │ │ │ 17 | # │ │ ┌────────────────────────────────────────┼───┐ │ 18 | # │ │ │ │ │ │ 19 | # │ ┌───┼─┼──────────────────────────────────────┐ │ │ │ 20 | # │ │ │ │ │ │ │ │ 21 | # │ │ │ │ ┌───┼─┼───┼─┼───┐ 22 | # ┌───┼─┼───┼─┼───┐ │ │ │ │ │ │ C3 23 | # │ │ │ │ │ │ C1 │ ┌┴─┴┐ ┌┴─┴┐ │ 24 | # │ ┌┴─┴┐ ┌┴─┴┐ │ │ │ │ │ │ │ 25 | # │ │ │ │ │ │ │ └───┘ └───┘ │ 26 | # │ └───┘ └───┘ │ │ │ 27 | # │ │ │ ┌───┐ ┌───┐ │ 28 | # │ ┌───┐ ┌───┐ │ │ │ │ │ │ │ 29 | # │ │ │ │ │ │ ┌┼──┴─┬─┘ └─┬─┴──┼┐ 30 | # ┌┼──┴─┬─┘ └─┬─┴──┼┐ ││ │ │ ││ 31 | # ││ │ │ ││ ├┴────┼─────┼────┴┤ 32 | # ├┴────┼─────┼────┴┤ │ │ │ │ 33 | # │ │ │ │ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ 34 | # ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ │ │ │ │ │ │ │ │ 35 | # │ │ │ │ │ │ │ │ └───┘ └───┘ └───┘ └───┘ 36 | # └───┘ └───┘ └───┘ └───┘ 37 | name: "toy2" 38 | clusters { 39 | name: "toy2-c1" 40 | aggr_blocks { 41 | name: "toy2-c1-ab1" 42 | nodes { 43 | name: "toy2-c1-ab1-s2i1" 44 | stage: 2 45 | index: 1 46 | flow_limit: 4000 47 | ecmp_limit: 4000 48 | group_limit: 128 49 | ports { 50 | name: "toy2-c1-ab1-s2i1-p1" 51 | port_speed_mbps: 100000 52 | dcn_facing: false 53 | } 54 | ports { 55 | name: "toy2-c1-ab1-s2i1-p2" 56 | port_speed_mbps: 100000 57 | dcn_facing: false 58 | } 59 | ports { 60 | name: "toy2-c1-ab1-s2i1-p3" 61 | port_speed_mbps: 100000 62 | dcn_facing: false 63 | } 64 | ports { 65 | name: "toy2-c1-ab1-s2i1-p4" 66 | port_speed_mbps: 100000 67 | dcn_facing: false 68 | } 69 | } 70 | nodes { 71 | name: "toy2-c1-ab1-s3i1" 72 | stage: 3 73 | index: 1 74 | flow_limit: 4000 75 | ecmp_limit: 4000 76 | group_limit: 128 77 | ports { 78 | name: "toy2-c1-ab1-s3i1-p1" 79 | port_speed_mbps: 100000 80 | dcn_facing: false 81 | } 82 | ports { 83 | name: "toy2-c1-ab1-s3i1-p2" 84 | port_speed_mbps: 100000 85 | dcn_facing: false 86 | } 87 | ports { 88 | name: "toy2-c1-ab1-s3i1-p3" 89 | port_speed_mbps: 100000 90 | dcn_facing: true 91 | } 92 | ports { 93 | name: "toy2-c1-ab1-s3i1-p4" 94 | port_speed_mbps: 100000 95 | dcn_facing: true 96 | } 97 | } 98 | nodes { 99 | name: "toy2-c1-ab1-s2i2" 100 | stage: 2 101 | index: 2 102 | flow_limit: 4000 103 | ecmp_limit: 4000 104 | group_limit: 128 105 | ports { 106 | name: "toy2-c1-ab1-s2i2-p1" 107 | port_speed_mbps: 100000 108 | dcn_facing: false 109 | } 110 | ports { 111 | name: "toy2-c1-ab1-s2i2-p2" 112 | port_speed_mbps: 100000 113 | dcn_facing: false 114 | } 115 | ports { 116 | name: "toy2-c1-ab1-s2i2-p3" 117 | port_speed_mbps: 100000 118 | dcn_facing: false 119 | } 120 | ports { 121 | name: "toy2-c1-ab1-s2i2-p4" 122 | port_speed_mbps: 100000 123 | dcn_facing: false 124 | } 125 | } 126 | nodes { 127 | name: "toy2-c1-ab1-s3i2" 128 | stage: 3 129 | index: 2 130 | flow_limit: 4000 131 | ecmp_limit: 4000 132 | group_limit: 128 133 | ports { 134 | name: "toy2-c1-ab1-s3i2-p1" 135 | port_speed_mbps: 100000 136 | dcn_facing: false 137 | } 138 | ports { 139 | name: "toy2-c1-ab1-s3i2-p2" 140 | port_speed_mbps: 100000 141 | dcn_facing: false 142 | } 143 | ports { 144 | name: "toy2-c1-ab1-s3i2-p3" 145 | port_speed_mbps: 100000 146 | dcn_facing: true 147 | } 148 | ports { 149 | name: "toy2-c1-ab1-s3i2-p4" 150 | port_speed_mbps: 100000 151 | dcn_facing: true 152 | } 153 | } 154 | } 155 | nodes { 156 | name: "toy2-c1-ab1-s1i1" 157 | stage: 1 158 | index: 1 159 | flow_limit: 4000 160 | ecmp_limit: 4000 161 | group_limit: 128 162 | ports { 163 | name: "toy2-c1-ab1-s1i1-p1" 164 | port_speed_mbps: 100000 165 | dcn_facing: false 166 | } 167 | host_prefix: "172.16.0.0" 168 | host_mask: 27 169 | } 170 | nodes { 171 | name: "toy2-c1-ab1-s1i2" 172 | stage: 1 173 | index: 2 174 | flow_limit: 4000 175 | ecmp_limit: 4000 176 | group_limit: 128 177 | ports { 178 | name: "toy2-c1-ab1-s1i2-p1" 179 | port_speed_mbps: 100000 180 | dcn_facing: false 181 | } 182 | host_prefix: "172.16.0.32" 183 | host_mask: 27 184 | } 185 | nodes { 186 | name: "toy2-c1-ab1-s1i3" 187 | stage: 1 188 | index: 3 189 | flow_limit: 4000 190 | ecmp_limit: 4000 191 | group_limit: 128 192 | ports { 193 | name: "toy2-c1-ab1-s1i3-p1" 194 | port_speed_mbps: 100000 195 | dcn_facing: false 196 | } 197 | host_prefix: "172.16.0.64" 198 | host_mask: 27 199 | } 200 | nodes { 201 | name: "toy2-c1-ab1-s1i4" 202 | stage: 1 203 | index: 4 204 | flow_limit: 4000 205 | ecmp_limit: 4000 206 | group_limit: 128 207 | ports { 208 | name: "toy2-c1-ab1-s1i4-p1" 209 | port_speed_mbps: 100000 210 | dcn_facing: false 211 | } 212 | host_prefix: "172.16.0.96" 213 | host_mask: 27 214 | } 215 | } 216 | clusters { 217 | name: "toy2-c2" 218 | aggr_blocks { 219 | name: "toy2-c2-ab1" 220 | nodes { 221 | name: "toy2-c2-ab1-s2i1" 222 | stage: 2 223 | index: 1 224 | flow_limit: 4000 225 | ecmp_limit: 4000 226 | group_limit: 128 227 | ports { 228 | name: "toy2-c2-ab1-s2i1-p1" 229 | port_speed_mbps: 100000 230 | dcn_facing: false 231 | } 232 | ports { 233 | name: "toy2-c2-ab1-s2i1-p2" 234 | port_speed_mbps: 100000 235 | dcn_facing: false 236 | } 237 | ports { 238 | name: "toy2-c2-ab1-s2i1-p3" 239 | port_speed_mbps: 100000 240 | dcn_facing: false 241 | } 242 | ports { 243 | name: "toy2-c2-ab1-s2i1-p4" 244 | port_speed_mbps: 100000 245 | dcn_facing: false 246 | } 247 | } 248 | nodes { 249 | name: "toy2-c2-ab1-s3i1" 250 | stage: 3 251 | index: 1 252 | flow_limit: 4000 253 | ecmp_limit: 4000 254 | group_limit: 128 255 | ports { 256 | name: "toy2-c2-ab1-s3i1-p1" 257 | port_speed_mbps: 100000 258 | dcn_facing: false 259 | } 260 | ports { 261 | name: "toy2-c2-ab1-s3i1-p2" 262 | port_speed_mbps: 100000 263 | dcn_facing: false 264 | } 265 | ports { 266 | name: "toy2-c2-ab1-s3i1-p3" 267 | port_speed_mbps: 100000 268 | dcn_facing: true 269 | } 270 | ports { 271 | name: "toy2-c2-ab1-s3i1-p4" 272 | port_speed_mbps: 100000 273 | dcn_facing: true 274 | } 275 | } 276 | nodes { 277 | name: "toy2-c2-ab1-s2i2" 278 | stage: 2 279 | index: 2 280 | flow_limit: 4000 281 | ecmp_limit: 4000 282 | group_limit: 128 283 | ports { 284 | name: "toy2-c2-ab1-s2i2-p1" 285 | port_speed_mbps: 100000 286 | dcn_facing: false 287 | } 288 | ports { 289 | name: "toy2-c2-ab1-s2i2-p2" 290 | port_speed_mbps: 100000 291 | dcn_facing: false 292 | } 293 | ports { 294 | name: "toy2-c2-ab1-s2i2-p3" 295 | port_speed_mbps: 100000 296 | dcn_facing: false 297 | } 298 | ports { 299 | name: "toy2-c2-ab1-s2i2-p4" 300 | port_speed_mbps: 100000 301 | dcn_facing: false 302 | } 303 | } 304 | nodes { 305 | name: "toy2-c2-ab1-s3i2" 306 | stage: 3 307 | index: 2 308 | flow_limit: 4000 309 | ecmp_limit: 4000 310 | group_limit: 128 311 | ports { 312 | name: "toy2-c2-ab1-s3i2-p1" 313 | port_speed_mbps: 100000 314 | dcn_facing: false 315 | } 316 | ports { 317 | name: "toy2-c2-ab1-s3i2-p2" 318 | port_speed_mbps: 100000 319 | dcn_facing: false 320 | } 321 | ports { 322 | name: "toy2-c2-ab1-s3i2-p3" 323 | port_speed_mbps: 100000 324 | dcn_facing: true 325 | } 326 | ports { 327 | name: "toy2-c2-ab1-s3i2-p4" 328 | port_speed_mbps: 100000 329 | dcn_facing: true 330 | } 331 | } 332 | } 333 | } 334 | clusters { 335 | name: "toy2-c3" 336 | aggr_blocks { 337 | name: "toy2-c3-ab1" 338 | nodes { 339 | name: "toy2-c3-ab1-s2i1" 340 | stage: 2 341 | index: 1 342 | flow_limit: 4000 343 | ecmp_limit: 4000 344 | group_limit: 128 345 | ports { 346 | name: "toy2-c3-ab1-s2i1-p1" 347 | port_speed_mbps: 100000 348 | dcn_facing: false 349 | } 350 | ports { 351 | name: "toy2-c3-ab1-s2i1-p2" 352 | port_speed_mbps: 100000 353 | dcn_facing: false 354 | } 355 | ports { 356 | name: "toy2-c3-ab1-s2i1-p3" 357 | port_speed_mbps: 100000 358 | dcn_facing: false 359 | } 360 | ports { 361 | name: "toy2-c3-ab1-s2i1-p4" 362 | port_speed_mbps: 100000 363 | dcn_facing: false 364 | } 365 | } 366 | nodes { 367 | name: "toy2-c3-ab1-s3i1" 368 | stage: 3 369 | index: 1 370 | flow_limit: 4000 371 | ecmp_limit: 4000 372 | group_limit: 128 373 | ports { 374 | name: "toy2-c3-ab1-s3i1-p1" 375 | port_speed_mbps: 100000 376 | dcn_facing: false 377 | } 378 | ports { 379 | name: "toy2-c3-ab1-s3i1-p2" 380 | port_speed_mbps: 100000 381 | dcn_facing: false 382 | } 383 | ports { 384 | name: "toy2-c3-ab1-s3i1-p3" 385 | port_speed_mbps: 100000 386 | dcn_facing: true 387 | } 388 | ports { 389 | name: "toy2-c3-ab1-s3i1-p4" 390 | port_speed_mbps: 100000 391 | dcn_facing: true 392 | } 393 | } 394 | nodes { 395 | name: "toy2-c3-ab1-s2i2" 396 | stage: 2 397 | index: 2 398 | flow_limit: 4000 399 | ecmp_limit: 4000 400 | group_limit: 128 401 | ports { 402 | name: "toy2-c3-ab1-s2i2-p1" 403 | port_speed_mbps: 100000 404 | dcn_facing: false 405 | } 406 | ports { 407 | name: "toy2-c3-ab1-s2i2-p2" 408 | port_speed_mbps: 100000 409 | dcn_facing: false 410 | } 411 | ports { 412 | name: "toy2-c3-ab1-s2i2-p3" 413 | port_speed_mbps: 100000 414 | dcn_facing: false 415 | } 416 | ports { 417 | name: "toy2-c3-ab1-s2i2-p4" 418 | port_speed_mbps: 100000 419 | dcn_facing: false 420 | } 421 | } 422 | nodes { 423 | name: "toy2-c3-ab1-s3i2" 424 | stage: 3 425 | index: 2 426 | flow_limit: 4000 427 | ecmp_limit: 4000 428 | group_limit: 128 429 | ports { 430 | name: "toy2-c3-ab1-s3i2-p1" 431 | port_speed_mbps: 100000 432 | dcn_facing: false 433 | } 434 | ports { 435 | name: "toy2-c3-ab1-s3i2-p2" 436 | port_speed_mbps: 100000 437 | dcn_facing: false 438 | } 439 | ports { 440 | name: "toy2-c3-ab1-s3i2-p3" 441 | port_speed_mbps: 100000 442 | dcn_facing: true 443 | } 444 | ports { 445 | name: "toy2-c3-ab1-s3i2-p4" 446 | port_speed_mbps: 100000 447 | dcn_facing: true 448 | } 449 | } 450 | } 451 | nodes { 452 | name: "toy2-c3-ab1-s1i1" 453 | stage: 1 454 | index: 1 455 | flow_limit: 4000 456 | ecmp_limit: 4000 457 | group_limit: 128 458 | ports { 459 | name: "toy2-c3-ab1-s1i1-p1" 460 | port_speed_mbps: 100000 461 | dcn_facing: false 462 | } 463 | host_prefix: "172.16.1.0" 464 | host_mask: 27 465 | } 466 | nodes { 467 | name: "toy2-c3-ab1-s1i2" 468 | stage: 1 469 | index: 2 470 | flow_limit: 4000 471 | ecmp_limit: 4000 472 | group_limit: 128 473 | ports { 474 | name: "toy2-c3-ab1-s1i2-p1" 475 | port_speed_mbps: 100000 476 | dcn_facing: false 477 | } 478 | host_prefix: "172.16.1.32" 479 | host_mask: 27 480 | } 481 | nodes { 482 | name: "toy2-c3-ab1-s1i3" 483 | stage: 1 484 | index: 3 485 | flow_limit: 4000 486 | ecmp_limit: 4000 487 | group_limit: 128 488 | ports { 489 | name: "toy2-c3-ab1-s1i3-p1" 490 | port_speed_mbps: 100000 491 | dcn_facing: false 492 | } 493 | host_prefix: "172.16.1.64" 494 | host_mask: 27 495 | } 496 | nodes { 497 | name: "toy2-c3-ab1-s1i4" 498 | stage: 1 499 | index: 4 500 | flow_limit: 4000 501 | ecmp_limit: 4000 502 | group_limit: 128 503 | ports { 504 | name: "toy2-c3-ab1-s1i4-p1" 505 | port_speed_mbps: 100000 506 | dcn_facing: false 507 | } 508 | host_prefix: "172.16.1.96" 509 | host_mask: 27 510 | } 511 | } 512 | # Cluster 1 internal links 513 | links { 514 | name: "toy2-c1-ab1-s2i1-p3:toy2-c1-ab1-s3i1-p1" 515 | src_port_id: "toy2-c1-ab1-s2i1-p3" 516 | dst_port_id: "toy2-c1-ab1-s3i1-p1" 517 | link_speed_mbps: 100000 518 | } 519 | links { 520 | name: "toy2-c1-ab1-s3i1-p1:toy2-c1-ab1-s2i1-p3" 521 | src_port_id: "toy2-c1-ab1-s3i1-p1" 522 | dst_port_id: "toy2-c1-ab1-s2i1-p3" 523 | link_speed_mbps: 100000 524 | } 525 | links { 526 | name: "toy2-c1-ab1-s2i1-p4:toy2-c1-ab1-s3i2-p1" 527 | src_port_id: "toy2-c1-ab1-s2i1-p4" 528 | dst_port_id: "toy2-c1-ab1-s3i2-p1" 529 | link_speed_mbps: 100000 530 | } 531 | links { 532 | name: "toy2-c1-ab1-s3i2-p1:toy2-c1-ab1-s2i1-p4" 533 | src_port_id: "toy2-c1-ab1-s3i2-p1" 534 | dst_port_id: "toy2-c1-ab1-s2i1-p4" 535 | link_speed_mbps: 100000 536 | } 537 | links { 538 | name: "toy2-c1-ab1-s2i2-p3:toy2-c1-ab1-s3i1-p2" 539 | src_port_id: "toy2-c1-ab1-s2i2-p3" 540 | dst_port_id: "toy2-c1-ab1-s3i1-p2" 541 | link_speed_mbps: 100000 542 | } 543 | links { 544 | name: "toy2-c1-ab1-s3i1-p2:toy2-c1-ab1-s2i2-p3" 545 | src_port_id: "toy2-c1-ab1-s3i1-p2" 546 | dst_port_id: "toy2-c1-ab1-s2i2-p3" 547 | link_speed_mbps: 100000 548 | } 549 | links { 550 | name: "toy2-c1-ab1-s2i2-p4:toy2-c1-ab1-s3i2-p2" 551 | src_port_id: "toy2-c1-ab1-s2i2-p4" 552 | dst_port_id: "toy2-c1-ab1-s3i2-p2" 553 | link_speed_mbps: 100000 554 | } 555 | links { 556 | name: "toy2-c1-ab1-s3i2-p2:toy2-c1-ab1-s2i2-p4" 557 | src_port_id: "toy2-c1-ab1-s3i2-p2" 558 | dst_port_id: "toy2-c1-ab1-s2i2-p4" 559 | link_speed_mbps: 100000 560 | } 561 | links { 562 | name: "toy2-c1-ab1-s1i1-p1:toy2-c1-ab1-s2i1-p1" 563 | src_port_id: "toy2-c1-ab1-s1i1-p1" 564 | dst_port_id: "toy2-c1-ab1-s2i1-p1" 565 | link_speed_mbps: 100000 566 | } 567 | links { 568 | name: "toy2-c1-ab1-s2i1-p1:toy2-c1-ab1-s1i1-p1" 569 | src_port_id: "toy2-c1-ab1-s2i1-p1" 570 | dst_port_id: "toy2-c1-ab1-s1i1-p1" 571 | link_speed_mbps: 100000 572 | } 573 | links { 574 | name: "toy2-c1-ab1-s1i2-p1:toy2-c1-ab1-s2i1-p2" 575 | src_port_id: "toy2-c1-ab1-s1i2-p1" 576 | dst_port_id: "toy2-c1-ab1-s2i1-p2" 577 | link_speed_mbps: 100000 578 | } 579 | links { 580 | name: "toy2-c1-ab1-s2i1-p2:toy2-c1-ab1-s1i2-p1" 581 | src_port_id: "toy2-c1-ab1-s2i1-p2" 582 | dst_port_id: "toy2-c1-ab1-s1i2-p1" 583 | link_speed_mbps: 100000 584 | } 585 | links { 586 | name: "toy2-c1-ab1-s1i3-p1:toy2-c1-ab1-s2i2-p1" 587 | src_port_id: "toy2-c1-ab1-s1i3-p1" 588 | dst_port_id: "toy2-c1-ab1-s2i2-p1" 589 | link_speed_mbps: 100000 590 | } 591 | links { 592 | name: "toy2-c1-ab1-s2i2-p1:toy2-c1-ab1-s1i3-p1" 593 | src_port_id: "toy2-c1-ab1-s2i2-p1" 594 | dst_port_id: "toy2-c1-ab1-s1i3-p1" 595 | link_speed_mbps: 100000 596 | } 597 | links { 598 | name: "toy2-c1-ab1-s1i4-p1:toy2-c1-ab1-s2i2-p2" 599 | src_port_id: "toy2-c1-ab1-s1i4-p1" 600 | dst_port_id: "toy2-c1-ab1-s2i2-p2" 601 | link_speed_mbps: 100000 602 | } 603 | links { 604 | name: "toy2-c1-ab1-s2i2-p2:toy2-c1-ab1-s1i4-p1" 605 | src_port_id: "toy2-c1-ab1-s2i2-p2" 606 | dst_port_id: "toy2-c1-ab1-s1i4-p1" 607 | link_speed_mbps: 100000 608 | } 609 | # Cluster 2 internal links 610 | links { 611 | name: "toy2-c2-ab1-s2i1-p3:toy2-c2-ab1-s3i1-p1" 612 | src_port_id: "toy2-c2-ab1-s2i1-p3" 613 | dst_port_id: "toy2-c2-ab1-s3i1-p1" 614 | link_speed_mbps: 100000 615 | } 616 | links { 617 | name: "toy2-c2-ab1-s3i1-p1:toy2-c2-ab1-s2i1-p3" 618 | src_port_id: "toy2-c2-ab1-s3i1-p1" 619 | dst_port_id: "toy2-c2-ab1-s2i1-p3" 620 | link_speed_mbps: 100000 621 | } 622 | links { 623 | name: "toy2-c2-ab1-s2i1-p4:toy2-c2-ab1-s3i2-p1" 624 | src_port_id: "toy2-c2-ab1-s2i1-p4" 625 | dst_port_id: "toy2-c2-ab1-s3i2-p1" 626 | link_speed_mbps: 100000 627 | } 628 | links { 629 | name: "toy2-c2-ab1-s3i2-p1:toy2-c2-ab1-s2i1-p4" 630 | src_port_id: "toy2-c2-ab1-s3i2-p1" 631 | dst_port_id: "toy2-c2-ab1-s2i1-p4" 632 | link_speed_mbps: 100000 633 | } 634 | links { 635 | name: "toy2-c2-ab1-s2i2-p3:toy2-c2-ab1-s3i1-p2" 636 | src_port_id: "toy2-c2-ab1-s2i2-p3" 637 | dst_port_id: "toy2-c2-ab1-s3i1-p2" 638 | link_speed_mbps: 100000 639 | } 640 | links { 641 | name: "toy2-c2-ab1-s3i1-p2:toy2-c2-ab1-s2i2-p3" 642 | src_port_id: "toy2-c2-ab1-s3i1-p2" 643 | dst_port_id: "toy2-c2-ab1-s2i2-p3" 644 | link_speed_mbps: 100000 645 | } 646 | links { 647 | name: "toy2-c2-ab1-s2i2-p4:toy2-c2-ab1-s3i2-p2" 648 | src_port_id: "toy2-c2-ab1-s2i2-p4" 649 | dst_port_id: "toy2-c2-ab1-s3i2-p2" 650 | link_speed_mbps: 100000 651 | } 652 | links { 653 | name: "toy2-c2-ab1-s3i2-p2:toy2-c2-ab1-s2i2-p4" 654 | src_port_id: "toy2-c2-ab1-s3i2-p2" 655 | dst_port_id: "toy2-c2-ab1-s2i2-p4" 656 | link_speed_mbps: 100000 657 | } 658 | # Cluster 3 internal links 659 | links { 660 | name: "toy2-c3-ab1-s2i1-p3:toy2-c3-ab1-s3i1-p1" 661 | src_port_id: "toy2-c3-ab1-s2i1-p3" 662 | dst_port_id: "toy2-c3-ab1-s3i1-p1" 663 | link_speed_mbps: 100000 664 | } 665 | links { 666 | name: "toy2-c3-ab1-s3i1-p1:toy2-c3-ab1-s2i1-p3" 667 | src_port_id: "toy2-c3-ab1-s3i1-p1" 668 | dst_port_id: "toy2-c3-ab1-s2i1-p3" 669 | link_speed_mbps: 100000 670 | } 671 | links { 672 | name: "toy2-c3-ab1-s2i1-p4:toy2-c3-ab1-s3i2-p1" 673 | src_port_id: "toy2-c3-ab1-s2i1-p4" 674 | dst_port_id: "toy2-c3-ab1-s3i2-p1" 675 | link_speed_mbps: 100000 676 | } 677 | links { 678 | name: "toy2-c3-ab1-s3i2-p1:toy2-c3-ab1-s2i1-p4" 679 | src_port_id: "toy2-c3-ab1-s3i2-p1" 680 | dst_port_id: "toy2-c3-ab1-s2i1-p4" 681 | link_speed_mbps: 100000 682 | } 683 | links { 684 | name: "toy2-c3-ab1-s2i2-p3:toy2-c3-ab1-s3i1-p2" 685 | src_port_id: "toy2-c3-ab1-s2i2-p3" 686 | dst_port_id: "toy2-c3-ab1-s3i1-p2" 687 | link_speed_mbps: 100000 688 | } 689 | links { 690 | name: "toy2-c3-ab1-s3i1-p2:toy2-c3-ab1-s2i2-p3" 691 | src_port_id: "toy2-c3-ab1-s3i1-p2" 692 | dst_port_id: "toy2-c3-ab1-s2i2-p3" 693 | link_speed_mbps: 100000 694 | } 695 | links { 696 | name: "toy2-c3-ab1-s2i2-p4:toy2-c3-ab1-s3i2-p2" 697 | src_port_id: "toy2-c3-ab1-s2i2-p4" 698 | dst_port_id: "toy2-c3-ab1-s3i2-p2" 699 | link_speed_mbps: 100000 700 | } 701 | links { 702 | name: "toy2-c3-ab1-s3i2-p2:toy2-c3-ab1-s2i2-p4" 703 | src_port_id: "toy2-c3-ab1-s3i2-p2" 704 | dst_port_id: "toy2-c3-ab1-s2i2-p4" 705 | link_speed_mbps: 100000 706 | } 707 | links { 708 | name: "toy2-c3-ab1-s1i1-p1:toy2-c3-ab1-s2i1-p1" 709 | src_port_id: "toy2-c3-ab1-s1i1-p1" 710 | dst_port_id: "toy2-c3-ab1-s2i1-p1" 711 | link_speed_mbps: 100000 712 | } 713 | links { 714 | name: "toy2-c3-ab1-s2i1-p1:toy2-c3-ab1-s1i1-p1" 715 | src_port_id: "toy2-c3-ab1-s2i1-p1" 716 | dst_port_id: "toy2-c3-ab1-s1i1-p1" 717 | link_speed_mbps: 100000 718 | } 719 | links { 720 | name: "toy2-c3-ab1-s1i2-p1:toy2-c3-ab1-s2i1-p2" 721 | src_port_id: "toy2-c3-ab1-s1i2-p1" 722 | dst_port_id: "toy2-c3-ab1-s2i1-p2" 723 | link_speed_mbps: 100000 724 | } 725 | links { 726 | name: "toy2-c3-ab1-s2i1-p2:toy2-c3-ab1-s1i2-p1" 727 | src_port_id: "toy2-c3-ab1-s2i1-p2" 728 | dst_port_id: "toy2-c3-ab1-s1i2-p1" 729 | link_speed_mbps: 100000 730 | } 731 | links { 732 | name: "toy2-c3-ab1-s1i3-p1:toy2-c3-ab1-s2i2-p1" 733 | src_port_id: "toy2-c3-ab1-s1i3-p1" 734 | dst_port_id: "toy2-c3-ab1-s2i2-p1" 735 | link_speed_mbps: 100000 736 | } 737 | links { 738 | name: "toy2-c3-ab1-s2i2-p1:toy2-c3-ab1-s1i3-p1" 739 | src_port_id: "toy2-c3-ab1-s2i2-p1" 740 | dst_port_id: "toy2-c3-ab1-s1i3-p1" 741 | link_speed_mbps: 100000 742 | } 743 | links { 744 | name: "toy2-c3-ab1-s1i4-p1:toy2-c3-ab1-s2i2-p2" 745 | src_port_id: "toy2-c3-ab1-s1i4-p1" 746 | dst_port_id: "toy2-c3-ab1-s2i2-p2" 747 | link_speed_mbps: 100000 748 | } 749 | links { 750 | name: "toy2-c3-ab1-s2i2-p2:toy2-c3-ab1-s1i4-p1" 751 | src_port_id: "toy2-c3-ab1-s2i2-p2" 752 | dst_port_id: "toy2-c3-ab1-s1i4-p1" 753 | link_speed_mbps: 100000 754 | } 755 | # DCN links 756 | links { 757 | name: "toy2-c1-ab1-s3i1-p3:toy2-c2-ab1-s3i1-p3" 758 | src_port_id: "toy2-c1-ab1-s3i1-p3" 759 | dst_port_id: "toy2-c2-ab1-s3i1-p3" 760 | link_speed_mbps: 100000 761 | } 762 | links { 763 | name: "toy2-c2-ab1-s3i1-p3:toy2-c1-ab1-s3i1-p3" 764 | src_port_id: "toy2-c2-ab1-s3i1-p3" 765 | dst_port_id: "toy2-c1-ab1-s3i1-p3" 766 | link_speed_mbps: 100000 767 | } 768 | links { 769 | name: "toy2-c1-ab1-s3i1-p4:toy2-c3-ab1-s3i1-p3" 770 | src_port_id: "toy2-c1-ab1-s3i1-p4" 771 | dst_port_id: "toy2-c3-ab1-s3i1-p3" 772 | link_speed_mbps: 100000 773 | } 774 | links { 775 | name: "toy2-c3-ab1-s3i1-p3:toy2-c1-ab1-s3i1-p4" 776 | src_port_id: "toy2-c3-ab1-s3i1-p3" 777 | dst_port_id: "toy2-c1-ab1-s3i1-p4" 778 | link_speed_mbps: 100000 779 | } 780 | links { 781 | name: "toy2-c1-ab1-s3i2-p3:toy2-c2-ab1-s3i2-p3" 782 | src_port_id: "toy2-c1-ab1-s3i2-p3" 783 | dst_port_id: "toy2-c2-ab1-s3i2-p3" 784 | link_speed_mbps: 100000 785 | } 786 | links { 787 | name: "toy2-c2-ab1-s3i2-p3:toy2-c1-ab1-s3i2-p3" 788 | src_port_id: "toy2-c2-ab1-s3i2-p3" 789 | dst_port_id: "toy2-c1-ab1-s3i2-p3" 790 | link_speed_mbps: 100000 791 | } 792 | links { 793 | name: "toy2-c1-ab1-s3i2-p4:toy2-c3-ab1-s3i2-p3" 794 | src_port_id: "toy2-c1-ab1-s3i2-p4" 795 | dst_port_id: "toy2-c3-ab1-s3i2-p3" 796 | link_speed_mbps: 100000 797 | } 798 | links { 799 | name: "toy2-c3-ab1-s3i2-p3:toy2-c1-ab1-s3i2-p4" 800 | src_port_id: "toy2-c3-ab1-s3i2-p3" 801 | dst_port_id: "toy2-c1-ab1-s3i2-p4" 802 | link_speed_mbps: 100000 803 | } 804 | links { 805 | name: "toy2-c2-ab1-s3i1-p4:toy2-c3-ab1-s3i1-p4" 806 | src_port_id: "toy2-c2-ab1-s3i1-p4" 807 | dst_port_id: "toy2-c3-ab1-s3i1-p4" 808 | link_speed_mbps: 100000 809 | } 810 | links { 811 | name: "toy2-c3-ab1-s3i1-p4:toy2-c2-ab1-s3i1-p4" 812 | src_port_id: "toy2-c3-ab1-s3i1-p4" 813 | dst_port_id: "toy2-c2-ab1-s3i1-p4" 814 | link_speed_mbps: 100000 815 | } 816 | links { 817 | name: "toy2-c2-ab1-s3i2-p4:toy2-c3-ab1-s3i2-p4" 818 | src_port_id: "toy2-c2-ab1-s3i2-p4" 819 | dst_port_id: "toy2-c3-ab1-s3i2-p4" 820 | link_speed_mbps: 100000 821 | } 822 | links { 823 | name: "toy2-c3-ab1-s3i2-p4:toy2-c2-ab1-s3i2-p4" 824 | src_port_id: "toy2-c3-ab1-s3i2-p4" 825 | dst_port_id: "toy2-c2-ab1-s3i2-p4" 826 | link_speed_mbps: 100000 827 | } 828 | paths { 829 | name: "toy2-c1-ab1:toy2-c2-ab1" 830 | src_aggr_block: "toy2-c1-ab1" 831 | dst_aggr_block: "toy2-c2-ab1" 832 | capacity_mbps: 200000 833 | } 834 | paths { 835 | name: "toy2-c2-ab1:toy2-c1-ab1" 836 | src_aggr_block: "toy2-c2-ab1" 837 | dst_aggr_block: "toy2-c1-ab1" 838 | capacity_mbps: 200000 839 | } 840 | paths { 841 | name: "toy2-c1-ab1:toy2-c3-ab1" 842 | src_aggr_block: "toy2-c1-ab1" 843 | dst_aggr_block: "toy2-c3-ab1" 844 | capacity_mbps: 200000 845 | } 846 | paths { 847 | name: "toy2-c3-ab1:toy2-c1-ab1" 848 | src_aggr_block: "toy2-c3-ab1" 849 | dst_aggr_block: "toy2-c1-ab1" 850 | capacity_mbps: 200000 851 | } 852 | paths { 853 | name: "toy2-c2-ab1:toy2-c3-ab1" 854 | src_aggr_block: "toy2-c2-ab1" 855 | dst_aggr_block: "toy2-c3-ab1" 856 | capacity_mbps: 200000 857 | } 858 | paths { 859 | name: "toy2-c3-ab1:toy2-c2-ab1" 860 | src_aggr_block: "toy2-c3-ab1" 861 | dst_aggr_block: "toy2-c2-ab1" 862 | capacity_mbps: 200000 863 | } 864 | -------------------------------------------------------------------------------- /tests/load_toy_test.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | import unittest 3 | 4 | import numpy as np 5 | import proto.traffic_pb2 as traffic_pb2 6 | 7 | import common.flags as FLAG 8 | from topology.topogen import (generateF1, generateF2, generateToy3, 9 | generateToy4, generateToy5) 10 | from topology.topology import Topology, filterPathSetWithSeg, loadTopo 11 | from traffic.tmgen import tmgen 12 | from traffic.traffic import Traffic, loadTraffic 13 | 14 | P9 = 'toy2-c3-ab1-s1i1-p1' 15 | P10 = 'toy2-c3-ab1-s2i1-p1' 16 | P11 = 'toy2-c1-ab1-s3i2-p4' 17 | P12 = 'toy2-c3-ab1-s3i2-p3' 18 | PATH1 = 'toy2-c1-ab1:toy2-c2-ab1' 19 | PATH2 = 'toy2-c2-ab1:toy2-c1-ab1' 20 | LINK1 = 'toy2-c1-ab1-s3i1-p3:toy2-c2-ab1-s3i1-p3' 21 | LINK2 = 'toy2-c1-ab1-s3i2-p3:toy2-c2-ab1-s3i2-p3' 22 | C1AB1 = 'toy2-c1-ab1' 23 | C2AB1 = 'toy2-c2-ab1' 24 | C3AB1 = 'toy2-c3-ab1' 25 | TOY2_PATH = 'tests/data/toy2_topo.textproto' 26 | TOY2_TRAFFIC_PATH = 'tests/data/toy2_traffic.textproto' 27 | TOR1 = 'toy2-c1-ab1-s1i1' 28 | TOR2 = 'toy2-c3-ab1-s1i4' 29 | # Toy3 entities. 30 | TOY3_PATH1 = 'toy3-c1-ab1:toy3-c2-ab1' 31 | TOY3_PATH2 = 'toy3-c1-ab1:toy3-c65-ab1' 32 | TOY3_PATH3 = 'toy3-c64-ab1:toy3-c65-ab1' 33 | TOY3_LINK1 = 'toy3-c1-ab1-s3i1-p1:toy3-c2-ab1-s3i1-p1' 34 | TOY3_LINK2 = 'toy3-c1-ab1-s3i4-p1:toy3-c2-ab1-s3i4-p1' 35 | TOY3_PORT1 = 'toy3-c65-ab1-s3i2-p1' 36 | TOY3_PEER_PORT1 = 'toy3-c1-ab1-s3i2-p127' 37 | TOY3_PORT2 = 'toy3-c1-ab1-s3i1-p2' 38 | TOY3_PEER_PORT2 = 'toy3-c1-ab1-s2i1-p1' 39 | TOY3_PORT3 = 'toy3-c2-ab1-s2i1-p2' 40 | TOY3_PEER_PORT3 = 'toy3-c2-ab1-s1i1-p1' 41 | TOY3_AGGR_BLOCK1 = 'toy3-c65-ab1' 42 | TOY3_AGGR_BLOCK2 = 'toy3-c1-ab1' 43 | TOY3_TOR1 = 'toy3-c65-ab1-s1i1' 44 | TOY3_TOR2 = 'toy3-c1-ab1-s1i1' 45 | # Toy4 entities. 46 | TOY4_PATH1 = 'toy4-c1-ab1:toy4-c2-ab1' 47 | TOY4_LINK1 = 'toy4-c1-ab1-s3i1-p1:toy4-c2-ab1-s3i1-p1' 48 | TOY4_PORT1 = 'toy4-c1-ab1-s3i1-p1' 49 | TOY4_PEER_PORT1 = 'toy4-c2-ab1-s3i1-p1' 50 | TOY4_PORT2 = 'toy4-c1-ab1-s3i1-p2' 51 | TOY4_PEER_PORT2 = 'toy4-c1-ab1-s2i1-p1' 52 | TOY4_PORT3 = 'toy4-c1-ab1-s2i1-p2' 53 | TOY4_PEER_PORT3 = 'toy4-c1-ab1-s1i1-p1' 54 | TOY4_AGGR_BLOCK1 = 'toy4-c1-ab1' 55 | TOY4_AGGR_BLOCK2 = 'toy4-c5-ab1' 56 | TOY4_TOR1 = 'toy4-c1-ab1-s1i1' 57 | # Toy5 entities. 58 | TOY5_PATH1 = 'toy5-c1-ab1:toy5-c2-ab1' 59 | TOY5_PATH2 = 'toy5-c1-ab1:toy5-c33-ab1' 60 | TOY5_PATH3 = 'toy5-c32-ab1:toy5-c33-ab1' 61 | TOY5_LINK1 = 'toy5-c1-ab1-s3i1-p1:toy5-c2-ab1-s3i1-p1' 62 | TOY5_PORT1 = 'toy5-c1-ab1-s3i1-p1' 63 | TOY5_PEER_PORT1 = 'toy5-c2-ab1-s3i1-p1' 64 | TOY5_PORT2 = 'toy5-c1-ab1-s3i1-p2' 65 | TOY5_PEER_PORT2 = 'toy5-c1-ab1-s2i1-p1' 66 | TOY5_PORT3 = 'toy5-c1-ab1-s2i1-p2' 67 | TOY5_PEER_PORT3 = 'toy5-c1-ab1-s1i1-p1' 68 | TOY5_AGGR_BLOCK1 = 'toy5-c1-ab1' 69 | TOY5_AGGR_BLOCK2 = 'toy5-c2-ab1' 70 | TOY5_TOR1 = 'toy5-c1-ab1-s1i1' 71 | # F1 entities. 72 | F1_PATH1 = 'f1-c1-ab1:f1-c2-ab1' 73 | F1_PATH2 = 'f1-c1-ab1:f1-c33-ab1' 74 | F1_PATH3 = 'f1-c32-ab1:f1-c33-ab1' 75 | F1_LINK1 = 'f1-c1-ab1-s3i1-p1:f1-c2-ab1-s3i1-p1' 76 | F1_PORT1 = 'f1-c1-ab1-s3i1-p1' 77 | F1_PEER_PORT1 = 'f1-c2-ab1-s3i1-p1' 78 | F1_PORT2 = 'f1-c1-ab1-s3i1-p2' 79 | F1_PEER_PORT2 = 'f1-c1-ab1-s2i1-p1' 80 | F1_PORT3 = 'f1-c1-ab1-s2i1-p2' 81 | F1_PEER_PORT3 = 'f1-c1-ab1-s1i1-p1' 82 | F1_AGGR_BLOCK1 = 'f1-c1-ab1' 83 | F1_AGGR_BLOCK2 = 'f1-c2-ab1' 84 | F1_TOR1 = 'f1-c1-ab1-s1i1' 85 | # F2 entities. 86 | F2_PATH1 = 'f2-c1-ab1:f2-c2-ab1' 87 | F2_PATH2 = 'f2-c1-ab1:f2-c5-ab1' 88 | F2_PATH3 = 'f2-c4-ab1:f2-c5-ab1' 89 | F2_LINK1 = 'f2-c1-ab1-s3i1-p1:f2-c2-ab1-s3i1-p1' 90 | F2_PORT1 = 'f2-c1-ab1-s3i1-p1' 91 | F2_PEER_PORT1 = 'f2-c2-ab1-s3i1-p1' 92 | F2_PORT2 = 'f2-c1-ab1-s3i1-p2' 93 | F2_PEER_PORT2 = 'f2-c1-ab1-s2i1-p1' 94 | F2_PORT3 = 'f2-c1-ab1-s2i1-p2' 95 | F2_PEER_PORT3 = 'f2-c1-ab1-s1i1-p1' 96 | F2_AGGR_BLOCK1 = 'f2-c1-ab1' 97 | F2_AGGR_BLOCK2 = 'f2-c2-ab1' 98 | F2_TOR1 = 'f2-c1-ab1-s1i1' 99 | 100 | 101 | class TestLoadToyNet(unittest.TestCase): 102 | def test_load_invalid_topo(self): 103 | self.assertEqual(None, loadTopo('')) 104 | 105 | def test_load_invalid_traffic(self): 106 | self.assertEqual(None, loadTraffic('')) 107 | 108 | def test_load_valid_toynet(self): 109 | toy1 = loadTopo(TOY2_PATH) 110 | self.assertNotEqual(None, toy1) 111 | 112 | def test_toy2_topology_construction(self): 113 | toy2 = Topology(TOY2_PATH) 114 | self.assertEqual(3, toy2.numClusters()) 115 | self.assertEqual(20, toy2.numNodes()) 116 | self.assertEqual(56, toy2.numPorts()) 117 | self.assertEqual(52, toy2.numLinks()) 118 | self.assertEqual(P10, toy2.findPeerPortOfPort(P9).name) 119 | self.assertFalse(toy2.findPeerPortOfPort(P9).dcn_facing) 120 | self.assertEqual(P12, toy2.findPeerPortOfPort(P11).name) 121 | self.assertTrue(toy2.findPeerPortOfPort(P11).dcn_facing) 122 | self.assertEqual(-1, toy2.findCapacityOfPath('non-existent-path')) 123 | self.assertEqual(toy2.findCapacityOfPath(PATH1), 124 | toy2.findCapacityOfPath(PATH2)) 125 | self.assertEqual(200000, toy2.findCapacityOfPath(PATH1)) 126 | # verify IP prefix assignment 127 | ip_aggregate_1 = ipaddress.ip_network('172.16.0.0/24') 128 | ip_aggregate_2 = ipaddress.ip_network('172.16.1.0/24') 129 | ip_prefix1 = toy2.findHostPrefixOfToR(TOR1) 130 | self.assertTrue(ip_prefix1.subnet_of(ip_aggregate_1)) 131 | self.assertFalse(ip_prefix1.subnet_of(ip_aggregate_2)) 132 | ip_prefix2 = toy2.findHostPrefixOfToR(TOR2) 133 | self.assertTrue(ip_prefix2.subnet_of(ip_aggregate_2)) 134 | self.assertFalse(ip_prefix2.subnet_of(ip_aggregate_1)) 135 | # Verify topology query results. 136 | path_set = toy2.findPathSetOfAggrBlockPair(C1AB1, C3AB1) 137 | expected_path_set = { 138 | (C1AB1, C3AB1): [(C1AB1, C3AB1)], 139 | (C1AB1, C2AB1, C3AB1): [(C1AB1, C2AB1), (C2AB1, C3AB1)] 140 | } 141 | self.assertEqual(expected_path_set, path_set) 142 | expected_filtered_set = { 143 | (C1AB1, C2AB1, C3AB1): [(C1AB1, C2AB1), (C2AB1, C3AB1)] 144 | } 145 | filtered_path_set = filterPathSetWithSeg(path_set, (C2AB1, C3AB1)) 146 | self.assertEqual(expected_filtered_set, filtered_path_set) 147 | # Verify findLinksOfPath() 148 | self.assertEqual(None, toy2.findLinksOfPath('non-existent-path')) 149 | links = toy2.findLinksOfPath(PATH1) 150 | self.assertEqual(2, len(links)) 151 | self.assertEqual([LINK1, LINK2], [link.name for link in links]) 152 | 153 | def test_toy2_traffic_demand(self): 154 | toy2_traffic = loadTraffic(TOY2_TRAFFIC_PATH) 155 | self.assertNotEqual(None, toy2_traffic) 156 | self.assertEqual(traffic_pb2.TrafficDemand.DemandType.LEVEL_AGGR_BLOCK, 157 | toy2_traffic.type) 158 | self.assertEqual(2, len(toy2_traffic.demands)) 159 | self.assertEqual('toy2-c1-ab1', toy2_traffic.demands[0].src) 160 | self.assertEqual('toy2-c3-ab1', toy2_traffic.demands[0].dst) 161 | self.assertEqual(300000, toy2_traffic.demands[0].volume_mbps) 162 | self.assertEqual('toy2-c3-ab1', toy2_traffic.demands[1].src) 163 | self.assertEqual('toy2-c1-ab1', toy2_traffic.demands[1].dst) 164 | self.assertEqual(100000, toy2_traffic.demands[1].volume_mbps) 165 | 166 | def test_toy2_traffic_construction(self): 167 | toy2 = Topology(TOY2_PATH) 168 | toy2_traffic = Traffic(toy2, TOY2_TRAFFIC_PATH) 169 | self.assertEqual(2, len(toy2_traffic.getAllDemands())) 170 | self.assertEqual( 171 | { 172 | ('toy2-c1-ab1', 'toy2-c3-ab1'): 300000, 173 | ('toy2-c3-ab1', 'toy2-c1-ab1'): 100000 174 | }, toy2_traffic.getAllDemands()) 175 | 176 | def test_toy2_topology_serialization(self): 177 | toy2 = Topology(TOY2_PATH) 178 | toy2_proto = toy2.serialize() 179 | # check network 180 | self.assertEqual('toy2', toy2_proto.name) 181 | self.assertEqual(3, len(toy2_proto.clusters)) 182 | self.assertEqual(6, len(toy2_proto.paths)) 183 | 184 | 185 | class TestLoadToy3Net(unittest.TestCase): 186 | def test_toy3_topology_construction(self): 187 | FLAG.P_LINK_FAILURE = 0.0 188 | toy3 = Topology('', input_proto=generateToy3()) 189 | self.assertEqual(65, toy3.numClusters()) 190 | # 8 + 32 nodes per cluster 191 | self.assertEqual(65 * 40, toy3.numNodes()) 192 | # 8 * 32 * 2 * 65 S1-S2 links per cluster, 64 * 4 * 2 * 65 S2-S3 links, 193 | # 64 * 4 * 65 S3-S3 links. 194 | self.assertEqual(8 * 32 * 2 * 65 + 64 * 4 * 2 * 65 + 64 * 4 * 65, 195 | toy3.numLinks()) 196 | self.assertEqual(65 * 64, len(toy3.getAllPaths())) 197 | # Path between two 40G clusters: 4 * 40 198 | self.assertEqual(160000, toy3.findCapacityOfPath(TOY3_PATH1)) 199 | # Path between a 40G cluster and a 200G cluster: 4 * 40 200 | self.assertEqual(160000, toy3.findCapacityOfPath(TOY3_PATH2)) 201 | # Path between two 200G clusters: 4 * 200 202 | self.assertEqual(160000, toy3.findCapacityOfPath(TOY3_PATH1)) 203 | self.assertEqual(800000, toy3.findCapacityOfPath(TOY3_PATH3)) 204 | links = [l.name for l in toy3.findLinksOfPath(TOY3_PATH1)] 205 | self.assertTrue(TOY3_LINK1 in links) 206 | self.assertTrue(TOY3_LINK2 in links) 207 | # Verify S3-S3 port and peer. 208 | self.assertEqual(TOY3_PEER_PORT1, 209 | toy3.findPeerPortOfPort(TOY3_PORT1).name) 210 | # Verify that all DCN ports have odd port indices. 211 | p1 = toy3.getPortByName(TOY3_PORT1) 212 | self.assertTrue(p1.dcn_facing) 213 | self.assertEqual(1, p1.index % 2) 214 | pp1 = toy3.getPortByName(TOY3_PEER_PORT1) 215 | self.assertTrue(pp1.dcn_facing) 216 | self.assertEqual(1, pp1.index % 2) 217 | # Verify S2-S3 port and peer. 218 | self.assertEqual(TOY3_PEER_PORT2, 219 | toy3.findPeerPortOfPort(TOY3_PORT2).name) 220 | # Verify that S2-facing S3 ports have even indices. 221 | p2 = toy3.getPortByName(TOY3_PORT2) 222 | self.assertFalse(p2.dcn_facing) 223 | self.assertEqual(0, p2.index % 2) 224 | # Verify that S3-facing S2 ports have odd indices. 225 | pp2 = toy3.getPortByName(TOY3_PEER_PORT2) 226 | self.assertFalse(pp2.dcn_facing) 227 | self.assertEqual(1, pp2.index % 2) 228 | # Verify S1-S2 port and peer. 229 | self.assertEqual(TOY3_PEER_PORT3, 230 | toy3.findPeerPortOfPort(TOY3_PORT3).name) 231 | # Verify that S1-facing S2 ports have even indices. 232 | p3 = toy3.getPortByName(TOY3_PORT3) 233 | self.assertFalse(p3.dcn_facing) 234 | self.assertEqual(0, p3.index % 2) 235 | # Verify that S2-facing S1 ports have odd indices. 236 | pp3 = toy3.getPortByName(TOY3_PEER_PORT3) 237 | self.assertFalse(pp3.dcn_facing) 238 | self.assertEqual(1, pp3.index % 2) 239 | # Verify port and AggrBlock has correct child-parent relationship. 240 | self.assertEqual(TOY3_AGGR_BLOCK1, 241 | toy3.findAggrBlockOfPort(TOY3_PORT1).name) 242 | self.assertTrue(toy3.hasAggrBlock(TOY3_AGGR_BLOCK1)) 243 | # Verify the 'virutal' parent of ToRs. 244 | self.assertEqual(TOY3_AGGR_BLOCK1, 245 | toy3.findAggrBlockOfToR(TOY3_TOR1).name) 246 | self.assertEqual(TOY3_AGGR_BLOCK2, 247 | toy3.findAggrBlockOfToR(TOY3_TOR2).name) 248 | # Verify the stage and index of ToR1. 249 | self.assertEqual(1, toy3.getNodeByName(TOY3_TOR1).stage) 250 | self.assertEqual(1, toy3.getNodeByName(TOY3_TOR1).index) 251 | 252 | def test_toy3_traffic_construction1(self): 253 | toy3 = Topology('', input_proto=generateToy3()) 254 | traffic_proto = tmgen(tor_level=False, 255 | cluster_vector=np.array([1] * 22 + [2.5] * 22 + 256 | [5] * 21), 257 | num_nodes=32, 258 | model='flat', 259 | dist='', 260 | netname='toy3') 261 | toy3_traffic = Traffic(toy3, '', traffic_proto) 262 | self.assertEqual(traffic_pb2.TrafficDemand.DemandType.LEVEL_AGGR_BLOCK, 263 | toy3_traffic.getDemandType()) 264 | self.assertEqual(64 * 65, len(toy3_traffic.getAllDemands())) 265 | # Flat demand matrix has the same volume in both directions. 266 | self.assertEqual( 267 | 80000, toy3_traffic.getDemand(TOY3_AGGR_BLOCK1, TOY3_AGGR_BLOCK2)) 268 | self.assertEqual( 269 | 80000, toy3_traffic.getDemand(TOY3_AGGR_BLOCK2, TOY3_AGGR_BLOCK1)) 270 | 271 | def test_toy3_traffic_construction2(self): 272 | FLAG.P_SPARSE = 0.1 273 | toy3 = Topology('', input_proto=generateToy3()) 274 | traffic_proto = tmgen(tor_level=False, 275 | cluster_vector=np.array([1] * 22 + [2.5] * 22 + 276 | [5] * 21), 277 | num_nodes=32, 278 | model='gravity', 279 | dist='exp', 280 | netname='toy3') 281 | toy3_traffic = Traffic(toy3, '', traffic_proto) 282 | self.assertEqual(traffic_pb2.TrafficDemand.DemandType.LEVEL_AGGR_BLOCK, 283 | toy3_traffic.getDemandType()) 284 | self.assertTrue(64 * 65 >= len(toy3_traffic.getAllDemands())) 285 | # Bernoulli distribution may not generate the exact same number as 286 | # requested, so conservatively under-estimates by 20%. 287 | non_empty_blocks = round((1 - FLAG.P_SPARSE) * 65 * 0.8) 288 | num_demands = (non_empty_blocks - 1) * non_empty_blocks 289 | self.assertTrue(num_demands <= len(toy3_traffic.getAllDemands())) 290 | 291 | def test_toy3_traffic_construction3(self): 292 | FLAG.P_SPARSE = 0.0 293 | toy3 = Topology('', input_proto=generateToy3()) 294 | traffic_proto = tmgen(tor_level=False, 295 | cluster_vector=np.array([1] * 22 + [2.5] * 22 + 296 | [5] * 21), 297 | num_nodes=32, 298 | model='gravity', 299 | dist='exp', 300 | netname='toy3') 301 | toy3_traffic = Traffic(toy3, '', traffic_proto) 302 | self.assertEqual(traffic_pb2.TrafficDemand.DemandType.LEVEL_AGGR_BLOCK, 303 | toy3_traffic.getDemandType()) 304 | self.assertEqual(64 * 65, len(toy3_traffic.getAllDemands())) 305 | # Verify the sum of all demands from AggrBlock2 does not exceed its 306 | # total capacity. 307 | tot_traffic = 0 308 | for i in range(2, 66): 309 | dst_block_name = f'toy3-c{i}-ab1' 310 | tot_traffic += toy3_traffic.getDemand(TOY3_AGGR_BLOCK2, 311 | dst_block_name) 312 | self.assertTrue(tot_traffic < 40000 * 256) 313 | # Verify the sum of all demands from AggrBlock1 does not exceed its 314 | # total capacity. The effective total capacity is not 200G * 256, but 315 | # a function of the peer block speed due to speed auto-negotiation. 316 | tot_traffic = 0 317 | for i in range(1, 65): 318 | dst_block_name = f'toy3-c{i}-ab1' 319 | tot_traffic += toy3_traffic.getDemand(TOY3_AGGR_BLOCK1, 320 | dst_block_name) 321 | self.assertTrue( 322 | tot_traffic < 40000 * 22 * 4 + 100000 * 22 * 4 + 200000 * 20 * 4) 323 | 324 | def test_toy3_traffic_construction4(self): 325 | toy3 = Topology('', input_proto=generateToy3()) 326 | traffic_proto = tmgen(tor_level=True, 327 | cluster_vector=np.array([1] * 22 + [2.5] * 22 + 328 | [5] * 21), 329 | num_nodes=32, 330 | model='flat', 331 | dist='', 332 | netname='toy3') 333 | toy3_traffic = Traffic(toy3, '', traffic_proto) 334 | self.assertEqual(traffic_pb2.TrafficDemand.DemandType.LEVEL_TOR, 335 | toy3_traffic.getDemandType()) 336 | self.assertEqual(64 * 65, len(toy3_traffic.getAllDemands())) 337 | # Flat demand matrix has the same volume in both directions. 338 | # The inter-block demand should be 153 Mbps (per-ToR) * 32 peer ToRs * 339 | # 32 sister ToRs. 340 | self.assertEqual( 341 | 115 * 32 * 32, 342 | toy3_traffic.getDemand(TOY3_AGGR_BLOCK1, TOY3_AGGR_BLOCK2)) 343 | self.assertEqual( 344 | 115 * 32 * 32, 345 | toy3_traffic.getDemand(TOY3_AGGR_BLOCK2, TOY3_AGGR_BLOCK1)) 346 | 347 | 348 | class TestLoadToy4Net(unittest.TestCase): 349 | def test_toy4_topology_construction(self): 350 | toy4 = Topology('', input_proto=generateToy4()) 351 | self.assertEqual(5, toy4.numClusters()) 352 | # 8 + 32 nodes per cluster 353 | self.assertEqual(5 * 12, toy4.numNodes()) 354 | self.assertEqual(5 * 4, len(toy4.getAllPaths())) 355 | # All paths in Toy4 have 160G capacity. 356 | for path in toy4.getAllPaths().values(): 357 | self.assertEqual(160000, path.capacity) 358 | links = [l.name for l in toy4.findLinksOfPath(TOY4_PATH1)] 359 | self.assertTrue(TOY4_LINK1 in links) 360 | # Verify S3-S3 port and peer. 361 | self.assertEqual(TOY4_PEER_PORT1, 362 | toy4.findPeerPortOfPort(TOY4_PORT1).name) 363 | # Verify that all DCN ports have odd port indices. 364 | p1 = toy4.getPortByName(TOY4_PORT1) 365 | self.assertEqual(TOY4_PORT1, p1.name) 366 | self.assertTrue(p1.dcn_facing) 367 | self.assertEqual(1, p1.index % 2) 368 | pp1 = toy4.getPortByName(TOY4_PEER_PORT1) 369 | self.assertTrue(pp1.dcn_facing) 370 | self.assertEqual(1, pp1.index % 2) 371 | # Verify S2-S3 port and peer. 372 | self.assertEqual(TOY4_PEER_PORT2, 373 | toy4.findPeerPortOfPort(TOY4_PORT2).name) 374 | # Verify that S2-facing S3 ports have even indices. 375 | p2 = toy4.getPortByName(TOY4_PORT2) 376 | self.assertFalse(p2.dcn_facing) 377 | self.assertEqual(0, p2.index % 2) 378 | # Verify that S3-facing S2 ports have odd indices. 379 | pp2 = toy4.getPortByName(TOY4_PEER_PORT2) 380 | self.assertFalse(pp2.dcn_facing) 381 | self.assertEqual(1, pp2.index % 2) 382 | # Verify S1-S2 port and peer. 383 | self.assertEqual(TOY4_PEER_PORT3, 384 | toy4.findPeerPortOfPort(TOY4_PORT3).name) 385 | # Verify that S1-facing S2 ports have even indices. 386 | p3 = toy4.getPortByName(TOY4_PORT3) 387 | self.assertFalse(p3.dcn_facing) 388 | self.assertEqual(0, p3.index % 2) 389 | # Verify that S2-facing S1 ports have odd indices. 390 | pp3 = toy4.getPortByName(TOY4_PEER_PORT3) 391 | self.assertFalse(pp3.dcn_facing) 392 | self.assertEqual(1, pp3.index % 2) 393 | # Verify the 'virutal' parent of ToRs. 394 | self.assertEqual(TOY4_AGGR_BLOCK1, 395 | toy4.findAggrBlockOfToR(TOY4_TOR1).name) 396 | # Verify the stage and index of ToR1. 397 | self.assertEqual(1, toy4.getNodeByName(TOY4_TOR1).stage) 398 | 399 | def test_toy4_traffic_construction(self): 400 | toy4 = Topology('', input_proto=generateToy4()) 401 | traffic_proto = tmgen(tor_level=False, 402 | cluster_vector=np.array([1] * 5), 403 | num_nodes=4, 404 | model='single', 405 | dist='', 406 | netname='toy4') 407 | toy4_traffic = Traffic(toy4, '', traffic_proto) 408 | self.assertEqual(traffic_pb2.TrafficDemand.DemandType.LEVEL_AGGR_BLOCK, 409 | toy4_traffic.getDemandType()) 410 | # There should be only 1 demand. 411 | self.assertEqual(1, len(toy4_traffic.getAllDemands())) 412 | self.assertTrue((TOY4_AGGR_BLOCK1, TOY4_AGGR_BLOCK2) in \ 413 | toy4_traffic.getAllDemands()) 414 | self.assertEqual( 415 | 40000 * 4 * 0.5, 416 | toy4_traffic.getAllDemands()[(TOY4_AGGR_BLOCK1, TOY4_AGGR_BLOCK2)]) 417 | 418 | 419 | class TestLoadToy5Net(unittest.TestCase): 420 | def test_toy5_topology_construction(self): 421 | toy5 = Topology('', input_proto=generateToy5()) 422 | self.assertEqual(33, toy5.numClusters()) 423 | # 8 + 16 nodes per cluster 424 | self.assertEqual(33 * 24, toy5.numNodes()) 425 | self.assertEqual(33 * 32, len(toy5.getAllPaths())) 426 | # Path between two 40G clusters: 4 * 40 427 | self.assertEqual(160000, toy5.findCapacityOfPath(TOY5_PATH1)) 428 | # Path between a 40G cluster and a 200G cluster: 4 * 40 429 | self.assertEqual(160000, toy5.findCapacityOfPath(TOY5_PATH2)) 430 | # Path between two 200G clusters: 4 * 200 431 | self.assertEqual(800000, toy5.findCapacityOfPath(TOY5_PATH3)) 432 | links = [l.name for l in toy5.findLinksOfPath(TOY5_PATH1)] 433 | self.assertTrue(TOY5_LINK1 in links) 434 | # Verify S3-S3 port and peer. 435 | self.assertEqual(TOY5_PEER_PORT1, 436 | toy5.findPeerPortOfPort(TOY5_PORT1).name) 437 | # Verify that DCN ports have odd port indices. 438 | p1 = toy5.getPortByName(TOY5_PORT1) 439 | self.assertEqual(TOY5_PORT1, p1.name) 440 | self.assertTrue(p1.dcn_facing) 441 | self.assertEqual(1, p1.index % 2) 442 | pp1 = toy5.getPortByName(TOY5_PEER_PORT1) 443 | self.assertTrue(pp1.dcn_facing) 444 | self.assertEqual(1, pp1.index % 2) 445 | # Verify S2-S3 port and peer. 446 | self.assertEqual(TOY5_PEER_PORT2, 447 | toy5.findPeerPortOfPort(TOY5_PORT2).name) 448 | # Verify that S2-facing S3 ports have even indices. 449 | p2 = toy5.getPortByName(TOY5_PORT2) 450 | self.assertFalse(p2.dcn_facing) 451 | self.assertEqual(0, p2.index % 2) 452 | # Verify that S3-facing S2 ports have odd indices. 453 | pp2 = toy5.getPortByName(TOY5_PEER_PORT2) 454 | self.assertFalse(pp2.dcn_facing) 455 | self.assertEqual(1, pp2.index % 2) 456 | # Verify S1-S2 port and peer. 457 | self.assertEqual(TOY5_PEER_PORT3, 458 | toy5.findPeerPortOfPort(TOY5_PORT3).name) 459 | # Verify that S1-facing S2 ports have even indices. 460 | p3 = toy5.getPortByName(TOY5_PORT3) 461 | self.assertFalse(p3.dcn_facing) 462 | self.assertEqual(0, p3.index % 2) 463 | # Verify that S2-facing S1 ports have odd indices. 464 | pp3 = toy5.getPortByName(TOY5_PEER_PORT3) 465 | self.assertFalse(pp3.dcn_facing) 466 | self.assertEqual(1, pp3.index % 2) 467 | # Verify the 'virutal' parent of ToRs. 468 | self.assertEqual(TOY5_AGGR_BLOCK1, 469 | toy5.findAggrBlockOfToR(TOY5_TOR1).name) 470 | # Verify the stage and index of ToR1. 471 | self.assertEqual(1, toy5.getNodeByName(TOY5_TOR1).stage) 472 | 473 | def test_toy5_traffic_construction1(self): 474 | toy5 = Topology('', input_proto=generateToy5()) 475 | traffic_proto = tmgen(tor_level=False, 476 | cluster_vector=np.array([1] * 11 + [2.5] * 11 + 477 | [5] * 11), 478 | num_nodes=16, 479 | model='flat', 480 | dist='', 481 | netname='toy5') 482 | toy5_traffic = Traffic(toy5, '', traffic_proto) 483 | self.assertEqual(traffic_pb2.TrafficDemand.DemandType.LEVEL_AGGR_BLOCK, 484 | toy5_traffic.getDemandType()) 485 | self.assertEqual(33 * 32, len(toy5_traffic.getAllDemands())) 486 | # Flat demand matrix has the same volume in both directions. 487 | # The inter-block demand should be 160000 Mbps when tor_level=False. 488 | self.assertEqual( 489 | 160000, toy5_traffic.getDemand(TOY5_AGGR_BLOCK1, TOY5_AGGR_BLOCK2)) 490 | self.assertEqual( 491 | 160000, toy5_traffic.getDemand(TOY5_AGGR_BLOCK2, TOY5_AGGR_BLOCK1)) 492 | 493 | def test_toy5_traffic_construction2(self): 494 | toy5 = Topology('', input_proto=generateToy5()) 495 | traffic_proto = tmgen(tor_level=False, 496 | cluster_vector=np.array([1] * 11 + [2.5] * 11 + 497 | [5] * 11), 498 | num_nodes=16, 499 | model='gravity', 500 | dist='exp', 501 | netname='toy5') 502 | toy5_traffic = Traffic(toy5, '', traffic_proto) 503 | self.assertEqual(traffic_pb2.TrafficDemand.DemandType.LEVEL_AGGR_BLOCK, 504 | toy5_traffic.getDemandType()) 505 | self.assertEqual(33 * 32, len(toy5_traffic.getAllDemands())) 506 | # Gravity demand matrix should not set the 40G blocks to empty. 507 | self.assertNotEqual( 508 | 0, toy5_traffic.getDemand(TOY5_AGGR_BLOCK1, TOY5_AGGR_BLOCK2)) 509 | self.assertNotEqual( 510 | 0, toy5_traffic.getDemand(TOY5_AGGR_BLOCK2, TOY5_AGGR_BLOCK1)) 511 | 512 | 513 | class TestLoadF1Net(unittest.TestCase): 514 | def test_f1_topology_construction(self): 515 | f1 = Topology('', input_proto=generateF1()) 516 | self.assertEqual(33, f1.numClusters()) 517 | # 8 + 16 nodes per cluster 518 | self.assertEqual(33 * 24, f1.numNodes()) 519 | self.assertEqual(33 * 32, len(f1.getAllPaths())) 520 | # Path between two 40G clusters: 4 * 40 521 | self.assertEqual(160000, f1.findCapacityOfPath(F1_PATH1)) 522 | # Path between a 40G cluster and a 200G cluster: 4 * 40 523 | self.assertEqual(160000, f1.findCapacityOfPath(F1_PATH2)) 524 | # Path between two 200G clusters: 4 * 200 525 | self.assertEqual(800000, f1.findCapacityOfPath(F1_PATH3)) 526 | links = [l.name for l in f1.findLinksOfPath(F1_PATH1)] 527 | self.assertTrue(F1_LINK1 in links) 528 | # Verify S3-S3 port and peer. 529 | self.assertEqual(F1_PEER_PORT1, f1.findPeerPortOfPort(F1_PORT1).name) 530 | # Verify that DCN ports have odd port indices. 531 | p1 = f1.getPortByName(F1_PORT1) 532 | self.assertEqual(F1_PORT1, p1.name) 533 | self.assertTrue(p1.dcn_facing) 534 | self.assertEqual(1, p1.index % 2) 535 | pp1 = f1.getPortByName(F1_PEER_PORT1) 536 | self.assertTrue(pp1.dcn_facing) 537 | self.assertEqual(1, pp1.index % 2) 538 | # Verify S2-S3 port and peer. 539 | self.assertEqual(F1_PEER_PORT2, f1.findPeerPortOfPort(F1_PORT2).name) 540 | # Verify that S2-facing S3 ports have even indices. 541 | p2 = f1.getPortByName(F1_PORT2) 542 | self.assertFalse(p2.dcn_facing) 543 | self.assertEqual(0, p2.index % 2) 544 | # Verify that S3-facing S2 ports have odd indices. 545 | pp2 = f1.getPortByName(F1_PEER_PORT2) 546 | self.assertFalse(pp2.dcn_facing) 547 | self.assertEqual(1, pp2.index % 2) 548 | # Verify S1-S2 port and peer. 549 | self.assertEqual(F1_PEER_PORT3, f1.findPeerPortOfPort(F1_PORT3).name) 550 | # Verify that S1-facing S2 ports have even indices. 551 | p3 = f1.getPortByName(F1_PORT3) 552 | self.assertFalse(p3.dcn_facing) 553 | self.assertEqual(0, p3.index % 2) 554 | # Verify that S2-facing S1 ports have odd indices. 555 | pp3 = f1.getPortByName(F1_PEER_PORT3) 556 | self.assertFalse(pp3.dcn_facing) 557 | self.assertEqual(1, pp3.index % 2) 558 | # Verify the 'virutal' parent of ToRs. 559 | self.assertEqual(F1_AGGR_BLOCK1, f1.findAggrBlockOfToR(F1_TOR1).name) 560 | # Verify the stage and index of ToR1. 561 | self.assertEqual(1, f1.getNodeByName(F1_TOR1).stage) 562 | 563 | 564 | class TestLoadF2Net(unittest.TestCase): 565 | def test_f2_topology_construction(self): 566 | f2 = Topology('', input_proto=generateF2()) 567 | self.assertEqual(5, f2.numClusters()) 568 | # 8 + 16 nodes per cluster 569 | self.assertEqual(5 * 24, f2.numNodes()) 570 | self.assertEqual(5 * 4, len(f2.getAllPaths())) 571 | # Path between a 40G cluster and a 100G cluster: 32 * 40 572 | self.assertEqual(1280000, f2.findCapacityOfPath(F2_PATH1)) 573 | # Path between a 40G cluster and a 200G cluster: 32 * 40 574 | self.assertEqual(1280000, f2.findCapacityOfPath(F2_PATH2)) 575 | # Path between two 200G clusters: 32 * 200 576 | self.assertEqual(6400000, f2.findCapacityOfPath(F2_PATH3)) 577 | links = [l.name for l in f2.findLinksOfPath(F2_PATH1)] 578 | self.assertTrue(F2_LINK1 in links) 579 | # Verify S3-S3 port and peer. 580 | self.assertEqual(F2_PEER_PORT1, f2.findPeerPortOfPort(F2_PORT1).name) 581 | # Verify that DCN ports have odd port indices. 582 | p1 = f2.getPortByName(F2_PORT1) 583 | self.assertEqual(F2_PORT1, p1.name) 584 | self.assertTrue(p1.dcn_facing) 585 | self.assertEqual(1, p1.index % 2) 586 | pp1 = f2.getPortByName(F2_PEER_PORT1) 587 | self.assertTrue(pp1.dcn_facing) 588 | self.assertEqual(1, pp1.index % 2) 589 | # Verify S2-S3 port and peer. 590 | self.assertEqual(F2_PEER_PORT2, f2.findPeerPortOfPort(F2_PORT2).name) 591 | # Verify that S2-facing S3 ports have even indices. 592 | p2 = f2.getPortByName(F2_PORT2) 593 | self.assertFalse(p2.dcn_facing) 594 | self.assertEqual(0, p2.index % 2) 595 | # Verify that S3-facing S2 ports have odd indices. 596 | pp2 = f2.getPortByName(F2_PEER_PORT2) 597 | self.assertFalse(pp2.dcn_facing) 598 | self.assertEqual(1, pp2.index % 2) 599 | # Verify S1-S2 port and peer. 600 | self.assertEqual(F2_PEER_PORT3, f2.findPeerPortOfPort(F2_PORT3).name) 601 | # Verify that S1-facing S2 ports have even indices. 602 | p3 = f2.getPortByName(F2_PORT3) 603 | self.assertFalse(p3.dcn_facing) 604 | self.assertEqual(0, p3.index % 2) 605 | # Verify that S2-facing S1 ports have odd indices. 606 | pp3 = f2.getPortByName(F2_PEER_PORT3) 607 | self.assertFalse(pp3.dcn_facing) 608 | self.assertEqual(1, pp3.index % 2) 609 | # Verify the 'virutal' parent of ToRs. 610 | self.assertEqual(F2_AGGR_BLOCK1, f2.findAggrBlockOfToR(F2_TOR1).name) 611 | # Verify the stage and index of ToR1. 612 | self.assertEqual(1, f2.getNodeByName(F2_TOR1).stage) 613 | 614 | 615 | if __name__ == "__main__": 616 | unittest.main() 617 | --------------------------------------------------------------------------------