├── .gitignore ├── Makefile ├── README.md ├── bolt.p4 ├── entrygen ├── __init__.py ├── bucket_allocation │ ├── __init__.py │ ├── jsonio.py │ ├── rules_operate.py │ └── sat.py ├── bucket_pattern2rule_table.py ├── config.py ├── controller.py ├── entries_generator_shadow.py ├── entries_generator_simple.py └── shadow_code_gen.py ├── recv.py ├── s1-runtime-v1.json ├── s1-runtime-v2.json ├── s1-runtime.json ├── send.py └── topology.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Python byte code 2 | *.pyc 3 | *.pyo 4 | 5 | # Emacs 6 | *~ 7 | 8 | # Compiled JSON 9 | *.json 10 | !*p4app.json 11 | !topology.json 12 | !*-runtime-*.json 13 | !*-runtime.json 14 | *.pcap 15 | 16 | # Extracted solutions 17 | solution*/ 18 | !solution/ 19 | 20 | # Build folders 21 | build*/ 22 | 23 | # Logs folders 24 | logs/ 25 | *.txt 26 | 27 | # Vagrant 28 | .vagrant/ 29 | 30 | # Mac OS 31 | *.DS_Store 32 | 33 | # IntelliJ / PyCharm 34 | .idea/ 35 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BMV2_SWITCH_EXE = simple_switch_grpc 2 | 3 | include ../../utils/Makefile 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BOLT 2 | 3 | ## Requirements 4 | 5 | ### Controller 6 | 7 | The controller (Table entries generation module) requires Python2.7. The latest P4 behavior model version requires Python3.x, so we recommend conda to manage environments. We feel sorry for such inconvenience and will migrate to full python3.X as soon as possible. 8 | 9 | In Python 2.7, here are the prerequisites. 10 | 11 | ```bash 12 | conda create -n python27 python=27 13 | pip install pyahocorasick 14 | ``` 15 | 16 | ### Data plane 17 | 18 | Build the [P4 tutorial VM]("https://github.com/p4lang/tutorials"), and move the dir `BOLT` into `tutorials/exercise/`. 19 | 20 | ## Running 21 | 22 | ### Generating table entries 23 | 24 | In python2.7 environment, run `controller.py`: 25 | 26 | ```bash 27 | (python27) python controller.py 28 | ``` 29 | 30 | It will generate the p4 spec file in `*-runtime.json`, which contains the table entries about the rule set. 31 | 32 | ### Sending packet 33 | 34 | 1. Run the switch: 35 | 36 | In BOLT dir: 37 | 38 | ```bash 39 | make run 40 | ``` 41 | 42 | 2. In the Mininet shell, invoke the xterm console for simulated host `h1` and `h2`: 43 | 44 | ```bash 45 | mininet> xterm h1 h2 46 | ``` 47 | 48 | Sniff the packets in `h2`: 49 | 50 | ```bash 51 | python recv.py 52 | ``` 53 | 54 | Send packets conataining crafted payload in `h1`: 55 | 56 | ```bash 57 | python send.py 10.0.2.2 U "ashe" 58 | ``` 59 | 60 | `h2` displays a packet with srcport set as `2010`, which means a rule is hit. 61 | 62 | ```bash 63 | python send.py 10.0.2.2 U "asheahis" 64 | ``` 65 | 66 | `h2` displays a packet with srcport set as `1024`, which means a bucket get overwritten. 67 | 68 | ### The files 69 | 70 | `s1-runtime-v1.json` can perform normally forwarding (to differentiate from `send_to_backend_server`, we modify the srcport into `2010`) for payload containing "she" and "his" simultaneously, as `rule1` specified. 71 | 72 | `s1-runtime-v2.json` can send back to alert (to differentiate from `normally forwarding`, we modify the srcport into `1024`) for payload containing "she" and "his" simultaneously, which is a bucket overwriting. 73 | 74 | ### TODO 75 | 76 | * Refactoring codes for better code style. 77 | * Fix possible bugs. 78 | -------------------------------------------------------------------------------- /bolt.p4: -------------------------------------------------------------------------------- 1 | /* -*- P4_16 -*- */ 2 | #include 3 | #include 4 | 5 | const bit<16> TYPE_IPV4 = 0x800; 6 | const bit<8> TABLE_NUM = 1; 7 | 8 | const bit<16> ETHER_HEADER_LENGTH = 14; 9 | const bit<16> IPV4_HEADER_LENGTH = 20; 10 | const bit<16> ICMP_HEADER_LENGTH = 8; 11 | const bit<16> TCP_HEADER_LENGTH = 20; 12 | const bit<16> UDP_HEADER_LENGTH = 8; 13 | 14 | #define MAX_HOPS 29 15 | #define IP_PROTOCOLS_ICMP 1 16 | #define IP_PROTOCOLS_TCP 6 17 | #define IP_PROTOCOLS_UDP 17 18 | #define MAX_STRIDE 3 19 | #define MARK_RECIR 1 20 | #define MARK_DROP 2 21 | #define MARK_FORWARD 3 22 | #define MARK_SEND_BAK 4 23 | /************************************************************************* 24 | *********************** H E A D E R S *********************************** 25 | *************************************************************************/ 26 | 27 | typedef bit<8> patrn_state_t; 28 | typedef bit<9> egressSpec_t; 29 | typedef bit<48> macAddr_t; 30 | typedef bit<32> ip4Addr_t; 31 | typedef bit<8> string_t; 32 | typedef bit<16> state_t; 33 | typedef bit<8> bucket_t; 34 | typedef bit<8> bucket_counter_t; 35 | header ethernet_t { 36 | macAddr_t dstAddr; 37 | macAddr_t srcAddr; 38 | bit<16> etherType; 39 | } 40 | 41 | header ipv4_t { 42 | bit<4> version; 43 | bit<4> ihl; 44 | bit<8> diffserv; 45 | bit<16> totalLen; 46 | bit<16> identification; 47 | bit<3> flags; 48 | bit<13> fragOffset; 49 | bit<8> ttl; 50 | bit<8> protocol; 51 | bit<16> hdrChecksum; 52 | ip4Addr_t srcAddr; 53 | ip4Addr_t dstAddr; 54 | } 55 | 56 | header icmp_t { 57 | bit<8> type; 58 | bit<8> code; 59 | bit<16> icmpHdrChecksum; 60 | bit<16> id; 61 | bit<16> seq; 62 | } 63 | 64 | header tcp_t { 65 | bit<16> srcPort; 66 | bit<16> dstPort; 67 | bit<32> seqNo; 68 | bit<32> ackNo; 69 | bit<4> dataOffset; 70 | bit<3> res; 71 | bit<3> ecn; 72 | bit<6> ctrl; 73 | bit<16> window; 74 | bit<16> checksum; 75 | bit<16> urgentPtr; 76 | } 77 | 78 | header udp_t { 79 | bit<16> srcPort; 80 | bit<16> dstPort; 81 | bit<16> length_; 82 | bit<16> checksum; 83 | } 84 | 85 | header patrn_t { 86 | bit<8> pattern; 87 | } 88 | 89 | header bucket_array_t { 90 | bucket_t b1; 91 | bucket_t b2; 92 | bucket_t b3; 93 | bucket_counter_t bc1; 94 | bucket_counter_t bc2; 95 | bucket_counter_t bc3; 96 | } 97 | 98 | 99 | 100 | 101 | 102 | struct metadata { 103 | state_t state; 104 | bit<8> pattern_num; 105 | bit<16> payload_length; 106 | bit<16> non_payload_length; 107 | bit<8> flags;// 1 recir 2 drop 3 accept 4 send to backend 108 | bit<8> one_pass_pattern_num; 109 | bit<8> stride; 110 | bit<1> non_first_pass; 111 | 112 | // patrn_state_t pattern_state; //for multi-pattern logic 113 | bucket_t b1; 114 | bucket_t b2; 115 | bucket_t b3; 116 | bucket_counter_t bc1; 117 | bucket_counter_t bc2; 118 | bucket_counter_t bc3; 119 | } 120 | 121 | struct headers { 122 | @name("ethernet") 123 | ethernet_t ethernet; 124 | @name("ipv4") 125 | ipv4_t ipv4; 126 | @name("icmp") 127 | icmp_t icmp; 128 | @name("tcp") 129 | tcp_t tcp; 130 | @name("udp") 131 | udp_t udp; 132 | patrn_t[MAX_HOPS] patrns; 133 | } 134 | 135 | /************************************************************************* 136 | *********************** P A R S E R *********************************** 137 | *************************************************************************/ 138 | 139 | parser ParserImpl(packet_in packet, 140 | out headers hdr, 141 | inout metadata meta, 142 | inout standard_metadata_t standard_metadata) { 143 | 144 | state start { 145 | transition parse_ethernet; 146 | } 147 | 148 | state parse_ethernet { 149 | packet.extract(hdr.ethernet); 150 | // meta.non_first_pass = 1; 151 | meta.non_payload_length = ETHER_HEADER_LENGTH; 152 | transition select(hdr.ethernet.etherType) { 153 | TYPE_IPV4: parse_ipv4; 154 | default: accept; 155 | } 156 | } 157 | 158 | state parse_ipv4 { 159 | packet.extract(hdr.ipv4); 160 | meta.non_payload_length = meta.non_payload_length + IPV4_HEADER_LENGTH;//34 161 | 162 | transition select(hdr.ipv4.protocol){ 163 | IP_PROTOCOLS_ICMP: parse_icmp; 164 | IP_PROTOCOLS_TCP: parse_tcp; 165 | IP_PROTOCOLS_UDP: parse_udp; 166 | default: accept; 167 | } 168 | } 169 | 170 | state parse_icmp { 171 | packet.extract(hdr.icmp); 172 | meta.non_payload_length = meta.non_payload_length + ICMP_HEADER_LENGTH; 173 | meta.pattern_num = 0; 174 | 175 | meta.payload_length = hdr.ipv4.totalLen + 14 - meta.non_payload_length; 176 | transition prepare_parse_pattern; 177 | } 178 | 179 | state parse_tcp { 180 | packet.extract(hdr.tcp); 181 | meta.non_payload_length = meta.non_payload_length + TCP_HEADER_LENGTH; 182 | meta.pattern_num = 0; 183 | meta.payload_length = hdr.ipv4.totalLen + 14 - meta.non_payload_length; 184 | transition prepare_parse_pattern; 185 | } 186 | 187 | state parse_udp { 188 | packet.extract(hdr.udp); 189 | meta.non_payload_length = meta.non_payload_length + UDP_HEADER_LENGTH; 190 | meta.pattern_num = 0; 191 | meta.payload_length = hdr.ipv4.totalLen + 14 - meta.non_payload_length; 192 | transition prepare_parse_pattern; 193 | } 194 | 195 | state prepare_parse_pattern { 196 | transition select(meta.payload_length) { 197 | 0: accept; 198 | default: parse_pattern; 199 | } 200 | } 201 | 202 | state parse_pattern{ 203 | packet.extract(hdr.patrns.next); 204 | meta.pattern_num = meta.pattern_num + 1; 205 | meta.payload_length = meta.payload_length - 1; 206 | transition select(meta.payload_length) { 207 | 0: accept; 208 | default: parse_pattern; 209 | } 210 | } 211 | } 212 | 213 | 214 | /************************************************************************* 215 | ************ C H E C K S U M V E R I F I C A T I O N ************* 216 | *************************************************************************/ 217 | 218 | control verifyChecksum(inout headers hdr, inout metadata meta) { 219 | apply { } 220 | } 221 | 222 | 223 | /************************************************************************* 224 | ************** I N G R E S S P R O C E S S I N G ******************* 225 | *************************************************************************/ 226 | 227 | control MyIngress(inout headers hdr, 228 | inout metadata meta, 229 | inout standard_metadata_t standard_metadata) { 230 | 231 | 232 | //********** write the root state ID into metadate from table entries *************** 233 | action a_get_root_state(state_t root_state){ 234 | meta.state = root_state; 235 | meta.bc1 = 0; 236 | meta.bc2 = 0; 237 | meta.bc3 = 0; 238 | 239 | } 240 | 241 | table t_get_root_state{ 242 | key = {} 243 | actions ={ 244 | a_get_root_state; 245 | } 246 | } 247 | //***** k-stride DFA table **************************************************** 248 | action a_drop() { 249 | mark_to_drop(standard_metadata); 250 | } 251 | 252 | action a_nop() {} 253 | 254 | // action a_set_state_1(state_t _state, patrn_state_t modifier){ 255 | // meta.state = _state; 256 | // hdr.patrns.pop_front(1); 257 | // hdr.ipv4.totalLen = hdr.ipv4.totalLen - 1; 258 | // meta.pattern_num = meta.pattern_num - 1; 259 | // meta.pattern_state = meta.pattern_state | modifier; 260 | // } 261 | 262 | // action a_set_state_2(state_t _state, patrn_state_t modifier){ 263 | // meta.state = _state; 264 | // hdr.patrns.pop_front(2); 265 | // hdr.ipv4.totalLen = hdr.ipv4.totalLen - 2; 266 | // meta.pattern_num = meta.pattern_num - 2; 267 | // meta.pattern_state = meta.pattern_state | modifier; 268 | 269 | // } 270 | 271 | // action a_set_state_3(state_t _state, patrn_state_t modifier){ 272 | // meta.state = _state; 273 | // hdr.patrns.pop_front(3); 274 | // hdr.ipv4.totalLen = hdr.ipv4.totalLen - 3; 275 | // meta.pattern_num = meta.pattern_num - 3; 276 | // meta.pattern_state = meta.pattern_state | modifier; 277 | 278 | // } 279 | 280 | 281 | 282 | 283 | // table t_DFA_match_0 { 284 | // key = { 285 | // hdr.patrns[0].pattern: ternary; 286 | // hdr.patrns[1].pattern: ternary; 287 | 288 | // // hdr.patrns[2].pattern: ternary; 289 | // meta.state: ternary; 290 | // } 291 | // actions = { 292 | // a_set_state_1; 293 | // a_set_state_2; 294 | // a_set_state_3; 295 | // a_drop; 296 | // } 297 | // size = 1024; 298 | // } 299 | action a_set_state_1(state_t _state) 300 | { 301 | meta.state = _state; 302 | hdr.patrns.pop_front(1); 303 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 1; 304 | meta.pattern_num = meta.pattern_num - 1; 305 | } 306 | action a_set_state_2(state_t _state) 307 | { 308 | meta.state = _state; 309 | hdr.patrns.pop_front(2); 310 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 2; 311 | meta.pattern_num = meta.pattern_num - 2; 312 | } 313 | action a_set_state_3(state_t _state) 314 | { 315 | meta.state = _state; 316 | hdr.patrns.pop_front(3); 317 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 3; 318 | meta.pattern_num = meta.pattern_num - 3; 319 | } 320 | action a_set_state_1_b1(state_t _state, bucket_t pattern_code) 321 | { 322 | meta.state = _state; 323 | hdr.patrns.pop_front(1); 324 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 1; 325 | meta.pattern_num = meta.pattern_num - 1; 326 | meta.b1 = pattern_code; 327 | meta.bc1 = meta.bc1 + 1; 328 | } 329 | action a_set_state_1_b2(state_t _state, bucket_t pattern_code) 330 | { 331 | meta.state = _state; 332 | hdr.patrns.pop_front(1); 333 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 1; 334 | meta.pattern_num = meta.pattern_num - 1; 335 | meta.b2 = pattern_code; 336 | meta.bc2 = meta.bc2 + 1; 337 | } 338 | action a_set_state_1_b3(state_t _state, bucket_t pattern_code) 339 | { 340 | meta.state = _state; 341 | hdr.patrns.pop_front(1); 342 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 1; 343 | meta.pattern_num = meta.pattern_num - 1; 344 | meta.b3 = pattern_code; 345 | meta.bc3 = meta.bc3 + 1; 346 | } 347 | action a_set_state_2_b1(state_t _state, bucket_t pattern_code) 348 | { 349 | meta.state = _state; 350 | hdr.patrns.pop_front(2); 351 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 2; 352 | meta.pattern_num = meta.pattern_num - 2; 353 | meta.b1 = pattern_code; 354 | meta.bc1 = meta.bc1 + 1; 355 | } 356 | action a_set_state_2_b2(state_t _state, bucket_t pattern_code) 357 | { 358 | meta.state = _state; 359 | hdr.patrns.pop_front(1); 360 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 2; 361 | meta.pattern_num = meta.pattern_num - 2; 362 | meta.b2 = pattern_code; 363 | meta.bc2 = meta.bc2 + 1; 364 | } 365 | action a_set_state_2_b3(state_t _state, bucket_t pattern_code) 366 | { 367 | meta.state = _state; 368 | hdr.patrns.pop_front(2); 369 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 2; 370 | meta.pattern_num = meta.pattern_num - 2; 371 | meta.b3 = pattern_code; 372 | meta.bc3 = meta.bc3 + 1; 373 | } 374 | action a_set_state_3_b1(state_t _state, bucket_t pattern_code) 375 | { 376 | meta.state = _state; 377 | hdr.patrns.pop_front(3); 378 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 3; 379 | meta.pattern_num = meta.pattern_num - 3; 380 | meta.b1 = pattern_code; 381 | meta.bc1 = meta.bc1 + 1; 382 | } 383 | action a_set_state_3_b2(state_t _state, bucket_t pattern_code) 384 | { 385 | meta.state = _state; 386 | hdr.patrns.pop_front(3); 387 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 3; 388 | meta.pattern_num = meta.pattern_num - 3; 389 | meta.b2 = pattern_code; 390 | meta.bc2 = meta.bc2 + 1; 391 | } 392 | action a_set_state_3_b3(state_t _state, bucket_t pattern_code) 393 | { 394 | meta.state = _state; 395 | hdr.patrns.pop_front(3); 396 | hdr.ipv4.totalLen = hdr.ipv4.totalLen - 3; 397 | meta.pattern_num = meta.pattern_num - 3; 398 | meta.b3 = pattern_code; 399 | meta.bc3 = meta.bc3+ 1; 400 | } 401 | table t_DFA_match_0 { 402 | key = { 403 | hdr.patrns[0].pattern: ternary; 404 | hdr.patrns[1].pattern: ternary; 405 | 406 | // hdr.patrns[2].pattern: ternary; 407 | meta.state: ternary; 408 | } 409 | actions = { 410 | a_set_state_1_b1; 411 | a_set_state_1_b2; 412 | a_set_state_1_b3; 413 | a_set_state_2_b1; 414 | a_set_state_2_b2; 415 | a_set_state_2_b3; 416 | // a_set_state_3; 417 | a_set_state_1; 418 | a_set_state_2; 419 | a_set_state_3; 420 | a_drop; 421 | a_nop; 422 | } 423 | size = 1024; 424 | } 425 | table t_DFA_match_1 { 426 | key = { 427 | hdr.patrns[0].pattern: ternary; 428 | hdr.patrns[1].pattern: ternary; 429 | 430 | // hdr.patrns[2].pattern: ternary; 431 | meta.state: ternary; 432 | } 433 | actions = { 434 | a_set_state_1_b1; 435 | a_set_state_1_b2; 436 | a_set_state_1_b3; 437 | a_set_state_2_b1; 438 | a_set_state_2_b2; 439 | a_set_state_2_b3; 440 | // a_set_state_3; 441 | a_set_state_1; 442 | a_set_state_2; 443 | a_set_state_3; 444 | a_drop; 445 | a_nop; 446 | } 447 | size = 1024; 448 | } 449 | table t_DFA_match_2 { 450 | key = { 451 | hdr.patrns[0].pattern: ternary; 452 | hdr.patrns[1].pattern: ternary; 453 | 454 | // hdr.patrns[2].pattern: ternary; 455 | meta.state: ternary; 456 | } 457 | actions = { 458 | a_set_state_1_b1; 459 | a_set_state_1_b2; 460 | a_set_state_1_b3; 461 | a_set_state_2_b1; 462 | a_set_state_2_b2; 463 | a_set_state_2_b3; 464 | // a_set_state_3; 465 | a_set_state_1; 466 | a_set_state_2; 467 | a_set_state_3; 468 | a_drop; 469 | a_nop; 470 | } 471 | size = 1024; 472 | } 473 | 474 | //******Rule Table depending on meta.bucket array 475 | action a_mark_as_to_recirculate(){ 476 | meta.flags = MARK_RECIR; 477 | meta.non_first_pass = 1; 478 | } 479 | 480 | action a_mark_as_to_drop(){ 481 | meta.flags = MARK_DROP; 482 | } 483 | // action a_set_recir 484 | action a_mark_as_to_forward() { 485 | meta.flags = MARK_FORWARD; 486 | } 487 | action a_mark_as_to_send_backend() { 488 | meta.flags = MARK_SEND_BAK; 489 | } 490 | 491 | table t_pattern2rule { 492 | key = { 493 | meta.b1: ternary; 494 | meta.b2: ternary; 495 | // meta.b3: ternary; 496 | } 497 | actions = { 498 | a_mark_as_to_drop; 499 | a_mark_as_to_forward; 500 | a_mark_as_to_send_backend; 501 | } 502 | default_action = a_mark_as_to_send_backend; 503 | } 504 | // //***** Policy Table depending on meta.Pattern_state *********************** 505 | 506 | // action a_set_lpm(){ 507 | // meta.flags = 3; 508 | // } 509 | // table t_policy { 510 | // key = { 511 | // meta.pattern_state: ternary; 512 | // } 513 | // actions = { 514 | // a_drop; 515 | // a_set_lpm; 516 | // } 517 | // size = 1024; 518 | // } 519 | // ***************** send back 520 | action a_send_back() 521 | { 522 | standard_metadata.egress_spec = 2; 523 | hdr.udp.srcPort = 1024; 524 | // hdr.ethernet.srcAddr = hdr.ethernet.dstAddr; 525 | // hdr.ethernet.dstAddr = dstAddr; 526 | 527 | } 528 | table t_send_back { 529 | actions = { 530 | a_send_back; 531 | } 532 | default_action = a_send_back; 533 | } 534 | // ***************** send to h2 535 | action a_send_h2() 536 | { 537 | standard_metadata.egress_spec = 2; 538 | hdr.udp.srcPort = 2010; 539 | // hdr.ethernet.srcAddr = hdr.ethernet.dstAddr; 540 | // hdr.ethernet.dstAddr = dstAddr; 541 | 542 | } 543 | table t_send_h2 { 544 | actions = { 545 | a_send_h2; 546 | } 547 | default_action = a_send_h2; 548 | } 549 | //***** ipv4_lpm table **************************************************** 550 | action a_ipv4_forward(macAddr_t dstAddr, egressSpec_t port) { 551 | standard_metadata.egress_spec = port; 552 | hdr.ethernet.srcAddr = hdr.ethernet.dstAddr; 553 | hdr.ethernet.dstAddr = dstAddr; 554 | hdr.ipv4.ttl = hdr.ipv4.ttl - 1; 555 | } 556 | 557 | table t_ipv4_lpm { 558 | key = { 559 | hdr.ipv4.dstAddr: lpm; 560 | } 561 | actions = { 562 | a_ipv4_forward; 563 | a_drop; 564 | } 565 | size = 1024; 566 | } 567 | 568 | 569 | //***************************************************************************** 570 | apply { 571 | if (hdr.patrns[0].isValid()) 572 | { 573 | 574 | if(meta.non_first_pass == 0) 575 | { 576 | t_get_root_state.apply(); 577 | } 578 | 579 | if (meta.bc1>1 || meta.bc2>1 || meta.bc3 > 1) 580 | { 581 | a_mark_as_to_send_backend(); 582 | } 583 | else { 584 | if (meta.pattern_num > 0) 585 | { 586 | t_DFA_match_0.apply(); 587 | } 588 | 589 | } 590 | 591 | if (meta.bc1>1 || meta.bc2>1 || meta.bc3 > 1) 592 | { 593 | a_mark_as_to_send_backend(); 594 | } 595 | else { 596 | if (meta.pattern_num > 0) 597 | { 598 | t_DFA_match_1.apply(); 599 | } 600 | } 601 | 602 | if (meta.bc1>1 || meta.bc2>1 || meta.bc3 > 1) 603 | { 604 | a_mark_as_to_send_backend(); 605 | } 606 | else if (meta.flags != MARK_SEND_BAK) 607 | { 608 | if (meta.pattern_num > 0) 609 | { 610 | a_mark_as_to_recirculate(); 611 | } 612 | else { 613 | t_pattern2rule.apply(); 614 | } 615 | } 616 | else{ 617 | a_mark_as_to_send_backend(); 618 | } 619 | // else{ 620 | 621 | // } 622 | 623 | 624 | } 625 | 626 | if (meta.flags == MARK_DROP) 627 | { 628 | a_drop(); 629 | } 630 | if (meta.flags == MARK_FORWARD) 631 | { 632 | // t_send.apply() 633 | t_send_h2.apply(); 634 | } 635 | if ( meta.flags == MARK_SEND_BAK) 636 | { 637 | t_send_back.apply(); 638 | } 639 | 640 | // if (meta.flags == 3) 641 | // { 642 | // t_ipv4_lpm.apply(); 643 | // } 644 | } 645 | } 646 | 647 | /************************************************************************* 648 | **************** E G R E S S P R O C E S S I N G ******************* 649 | *************************************************************************/ 650 | 651 | control MyEgress(inout headers hdr, 652 | inout metadata meta, 653 | inout standard_metadata_t standard_metadata) { 654 | 655 | apply { 656 | if (hdr.ipv4.isValid()) 657 | { 658 | if (meta.flags == MARK_RECIR ) 659 | { 660 | recirculate(meta); 661 | } 662 | } 663 | } 664 | } 665 | 666 | /************************************************************************* 667 | ************* C H E C K S U M C O M P U T A T I O N ************** 668 | *************************************************************************/ 669 | 670 | control computeChecksum(inout headers hdr, inout metadata meta) { 671 | apply { 672 | update_checksum( 673 | hdr.ipv4.isValid(), 674 | { 675 | hdr.ipv4.version, 676 | hdr.ipv4.ihl, 677 | hdr.ipv4.diffserv, 678 | hdr.ipv4.totalLen, 679 | hdr.ipv4.identification, 680 | hdr.ipv4.flags, 681 | hdr.ipv4.fragOffset, 682 | hdr.ipv4.ttl, 683 | hdr.ipv4.protocol, 684 | hdr.ipv4.srcAddr, 685 | hdr.ipv4.dstAddr 686 | }, 687 | hdr.ipv4.hdrChecksum, 688 | HashAlgorithm.csum16); 689 | } 690 | } 691 | 692 | /************************************************************************* 693 | *********************** D E P A R S E R ******************************* 694 | *************************************************************************/ 695 | 696 | control DeparserImpl(packet_out packet, in headers hdr) { 697 | apply { 698 | packet.emit(hdr); 699 | } 700 | } 701 | 702 | /************************************************************************* 703 | *********************** S W I T C H ******************************* 704 | *************************************************************************/ 705 | V1Switch(ParserImpl(), verifyChecksum(), MyIngress(), MyEgress(), computeChecksum(), DeparserImpl()) main; 706 | -------------------------------------------------------------------------------- /entrygen/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangshicheng1225/BOLT-v2/d31b64f7a515b9911b764a60e2c0381e85a034f1/entrygen/__init__.py -------------------------------------------------------------------------------- /entrygen/bucket_allocation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangshicheng1225/BOLT-v2/d31b64f7a515b9911b764a60e2c0381e85a034f1/entrygen/bucket_allocation/__init__.py -------------------------------------------------------------------------------- /entrygen/bucket_allocation/jsonio.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import json 3 | # save data to json file 4 | def store(data,filename = 'data.json'): 5 | with open(filename, 'w') as fw: 6 | # json_str = json.dumps(data) 7 | # fw.write(json_str) 8 | # 9 | json.dump(data,fw) 10 | # load json data from file 11 | def load(filename = 'data.json'): 12 | with open(filename,'r') as f: 13 | data = json.load(f) 14 | return data 15 | def test(): 16 | args = sys.argv 17 | if len(args)==1: 18 | print('Hello, world!') 19 | elif len(args)==2: 20 | print('Hello, %s!' % args[1]) 21 | else: 22 | print('Too many arguments!') 23 | 24 | if __name__ == '__main__': 25 | a = {"a":1} 26 | store(a) -------------------------------------------------------------------------------- /entrygen/bucket_allocation/rules_operate.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # please run in python3 3 | from scapy.all import * 4 | import jsonio as jsio 5 | import sat as sat 6 | 7 | 8 | 9 | def process_ruleset(file_name = "snort297_ascii_content_ruleset.json"): 10 | 11 | # trim out the empty rule only 12 | 13 | ruleset = jsio.load(file_name) 14 | # preprocess the ruleset to remove rules with no pattern 15 | 16 | non_empty_ruleset = {key:pattern_lst for key, pattern_lst in ruleset.items() if (len(pattern_lst) > 0 )} 17 | processed_ruleset = non_empty_ruleset 18 | 19 | print(processed_ruleset) 20 | pattern_to_bucket_dict, idx_to_pattern_dict, pattern_to_idx_dict, bucket_num = sat.allocate_bucket(processed_ruleset) 21 | bucket_num_dict = {} 22 | bucket_num_dict["bktnum"] = bucket_num 23 | jsio.store(processed_ruleset, "non_empty_snort297_ascii_content_ruleset.json") 24 | jsio.store(pattern_to_bucket_dict,filename="non_empty_pattern_to_bucket_dict.json") 25 | jsio.store(idx_to_pattern_dict, "non_empty_idx_to_pattern_dict.json") 26 | jsio.store(pattern_to_idx_dict, "non_empty_pattern_to_idx_dict.json") 27 | jsio.store(bucket_num_dict, "non_empty_bucket_num_dict.json") 28 | return 0 -------------------------------------------------------------------------------- /entrygen/bucket_allocation/sat.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # please run in python3 3 | from math import log2 4 | from z3 import * 5 | import numpy as np 6 | import jsonio as jsio 7 | 8 | # TODO: the ruledata in json has a pattern lst may containing duplicated patterns, 9 | # such as rule 1156 10 | # the rules should be re-processed 11 | 12 | def pre_process_ruleset(ruleset, bucket_num_limit = 8): 13 | _rule_set = {} 14 | drop_counter = 0 15 | i = 0 16 | for _sid, _pattern_lst in ruleset.items(): 17 | # _rule_set[_sid] = list(set(_pattern_lst)) 18 | _lst = list(set(_pattern_lst)) 19 | if (len(_lst) > bucket_num_limit): 20 | drop_counter += 1 21 | continue 22 | _rule_set[i] = _lst 23 | i += 1 24 | print("drop overlimited rules: ", drop_counter) 25 | return _rule_set 26 | 27 | def aggreate_pattern_set(ruleset): 28 | pattern_list = [] 29 | for _sid, _pattern_lst in ruleset.items(): 30 | # print(_sid, _pattern_lst) 31 | for pattern in _pattern_lst: 32 | if not (pattern in pattern_list): 33 | pattern_list.append(pattern) 34 | 35 | 36 | idx_to_pattern_dict = {} 37 | 38 | pattern_to_idx_dict = {} 39 | i = 0 40 | for pattern in pattern_list: 41 | idx_to_pattern_dict[i] = pattern 42 | pattern_to_idx_dict[pattern] = i 43 | i += 1 44 | return (idx_to_pattern_dict, pattern_to_idx_dict) 45 | 46 | def parse_rule(ruleset): 47 | # return the lower limit of bucket_size 48 | bucket_num = 0 49 | for _sid, _pattern_lst in ruleset.items(): 50 | if len(_pattern_lst) > bucket_num: 51 | bucket_num = len(_pattern_lst) 52 | return bucket_num 53 | 54 | 55 | def generate_r0(pattern_set, bucket_num): 56 | clauses = "" 57 | return clauses 58 | 59 | def generate_ri(pattern_set, bucket_num, ruleset): 60 | clauses = "" 61 | 62 | return clauses 63 | 64 | def generate_solution_set(solution_matrix, pattern_set_size, bucket_num, _model): 65 | solution_set = set() 66 | for i in range(pattern_set_size): 67 | for j in range(bucket_num): 68 | # mat_ele = "x"+str(i)+str(j) 69 | if is_true(_model[solution_matrix[i][j]]) == True: 70 | solution_set.add(solution_matrix[i][j] == True) 71 | elif is_false(_model[solution_matrix[i][j]]): 72 | solution_set.add(solution_matrix[i][j] == False) 73 | else: 74 | pass 75 | 76 | return solution_set 77 | 78 | def check_buckets_alloc(solution_matrix, pattern_set_size, bucket_num, 79 | _model, idx_to_pattern_dict): 80 | print("check buckets_alloc ") 81 | for i in range(pattern_set_size): 82 | non_zero_counter = 0 83 | bucket_lst = [] 84 | for j in range(bucket_num): 85 | if is_true(_model[solution_matrix[i][j]]) == True: 86 | non_zero_counter += 1 87 | bucket_lst.append(j) 88 | 89 | assert (non_zero_counter != 0), str(idx_to_pattern_dict[i]) + " in no bucket!" 90 | assert (non_zero_counter == 1), str(idx_to_pattern_dict[i]) + " in multiple bucket!" + str(bucket_lst) 91 | return True 92 | 93 | def check_rules(solution_matrix, bucket_num, 94 | ruleset, _model, pattern_to_idx_dict): 95 | print("check rules ") 96 | for _sid, _pattern_lst in ruleset.items(): 97 | # print(_sid, _pattern_lst) 98 | bucket_dict = {} 99 | for pattern in _pattern_lst: 100 | i = pattern_to_idx_dict[pattern] 101 | for j in range(bucket_num): 102 | if is_true(_model[solution_matrix[i][j]]) == True: 103 | assert not (j in bucket_dict.keys()), "rule " + str(_sid) + ": bucket " + str(j) +\ 104 | " has multiple patterns: "+ str(bucket_dict[j]) + " " + pattern 105 | bucket_dict[j] = [] 106 | bucket_dict[j].append(pattern) 107 | return True 108 | def check_clauses(solution_matrix, pattern_set_size, bucket_num, ruleset, patternset,_model): 109 | # check if there is a pattern in no bucekt 110 | for i in range(pattern_set_size): 111 | non_zero_counter = 0 112 | for j in range(bucket_num): 113 | if is_true(_model[solution_matrix[i][j]]) == True: 114 | non_zero_counter += 1 115 | 116 | # assert (non_zero_counter != 0), str(idx_to_pattern_dict[i]) + " in no bucket!" 117 | # assert (non_zero_counter == 1), str(idx_to_pattern_dict[i]) + " in multiple bucket!" 118 | 119 | 120 | # check if there is a pattern in multiple bucket 121 | 122 | # check if there is a rule violated 123 | 124 | return False 125 | 126 | def generate_mat_table_entries(solution_matrix, _model, pattern_to_idx_dict, bucket_num, ruleset): 127 | 128 | pattern_set_size = len(pattern_to_idx_dict) 129 | 130 | 131 | # generate pattern_to_binstr map 132 | # a = f'{s:0{l}b}' 133 | pattern_to_binstr_dict = {} 134 | bit_width = math.ceil(log2(pattern_set_size)) 135 | for pattern, idx in pattern_to_idx_dict.items(): 136 | binstr = f'{idx:0{bit_width}b}' 137 | pattern_to_binstr_dict[pattern] = binstr 138 | 139 | # generate pattern_to_bucket map 140 | pattern_to_bucket_dict = {} 141 | for pattern, idx in pattern_to_idx_dict.items(): 142 | non_zero_counter = 0 143 | for j in range(bucket_num): 144 | if is_true(_model[solution_matrix[idx][j]]) == True: 145 | pattern_to_bucket_dict[pattern] = j 146 | 147 | print("pattern_to_binstr_dict: ", pattern_to_binstr_dict) 148 | print("pattern_to_bucket_dict: ", pattern_to_bucket_dict) 149 | mat_entry_lst = [] 150 | # ( ['110','010','000'], 'sid') 151 | for _sid, pattern_lst in ruleset.items(): 152 | bucket_dict = {} 153 | for pattern in pattern_lst: 154 | pattern_bin_str = pattern_to_binstr_dict[pattern] 155 | bucket_id = pattern_to_bucket_dict[pattern] 156 | bucket_dict[bucket_id] = pattern_bin_str 157 | 158 | bucket_lst = [] 159 | for i in range(bucket_num): 160 | if i in bucket_dict.keys(): 161 | bucket_lst.append(bucket_dict[i]) 162 | else: 163 | # bucket_lst.append(f'{0:0{bit_width}b}') # TODO: all zero or all * ?? 164 | bucket_lst.append("*"*bit_width) 165 | entry = (bucket_lst, _sid) 166 | mat_entry_lst.append(entry) 167 | return mat_entry_lst 168 | 169 | 170 | def allocate_bucket(rule_data, para_bucket_num = -1): 171 | # return pattern_to_bucket_dict 172 | 173 | idx_to_pattern_dict, pattern_to_idx_dict = aggreate_pattern_set(rule_data) 174 | print(idx_to_pattern_dict) 175 | print("*****************************") 176 | 177 | if para_bucket_num == -1: 178 | bucket_num = parse_rule(rule_data) 179 | else: 180 | bucket_num = para_bucket_num 181 | pattern_set_size = len(idx_to_pattern_dict) 182 | 183 | assert len(idx_to_pattern_dict) == len(pattern_to_idx_dict), \ 184 | "not len(idx_to_pattern_dict) == len(pattern_to_idx_dict)" 185 | matx = [[0 for j in range(bucket_num)] for i in range(pattern_set_size)] 186 | print("pattern_set_size: ", pattern_set_size) 187 | print("bucket_num: ", bucket_num) 188 | # print(matx[3][1]) 189 | s = Solver() 190 | for i in range(pattern_set_size): 191 | for j in range(bucket_num): 192 | matx[i][j] = Bool("x"+str(i)+'_'+str(j)) 193 | # r0 194 | for i in range(pattern_set_size): 195 | clause_idx = Or([matx[i][ii] for ii in range(bucket_num)]) 196 | s.add(clause_idx) 197 | for j in range(bucket_num): 198 | for k in range(bucket_num): 199 | # clause_idx=Or() 200 | if k > j: 201 | clause_idx = Or([Not(matx[i][j]), Not(matx[i][k])]) 202 | s.add(clause_idx) 203 | # print(s.check()) 204 | # print(s.model()) 205 | 206 | 207 | # ri 208 | 209 | for _sid, _pattern_lst in rule_data.items(): 210 | pattern_idx_lst = [ pattern_to_idx_dict[pattern] for pattern in _pattern_lst ] 211 | for pattern_idx in pattern_idx_lst: 212 | for bucket_jdx in range(bucket_num): 213 | for pattern_kdx in pattern_idx_lst: 214 | if pattern_kdx > pattern_idx: 215 | clause_idx = Or( [ Not(matx[pattern_idx][bucket_jdx]), Not(matx[pattern_kdx][bucket_jdx])]) 216 | # print(clause_idx) 217 | s.add(clause_idx) 218 | assert(s.check() == sat) 219 | print(s.check()) 220 | print(s.model()) 221 | 222 | check_buckets_alloc(solution_matrix = matx, pattern_set_size = pattern_set_size, bucket_num = bucket_num, 223 | _model= s.model(), idx_to_pattern_dict = idx_to_pattern_dict) 224 | check_rules(solution_matrix = matx, bucket_num = bucket_num, 225 | ruleset = rule_data, _model= s.model(), pattern_to_idx_dict = pattern_to_idx_dict) 226 | 227 | # generate pattern_to_bucket map 228 | pattern_to_bucket_dict = {} 229 | for pattern, idx in pattern_to_idx_dict.items(): 230 | non_zero_counter = 0 231 | for j in range(bucket_num): 232 | if is_true(s.model()[matx[idx][j]]) == True: 233 | pattern_to_bucket_dict[pattern] = j 234 | 235 | # print("pattern_to_binstr_dict: ", pattern_to_binstr_dict) 236 | print("pattern_to_bucket_dict: ", pattern_to_bucket_dict) 237 | return pattern_to_bucket_dict, idx_to_pattern_dict, pattern_to_idx_dict, bucket_num 238 | 239 | if __name__=='__main__': 240 | 241 | # rule_data = { 242 | # 1: ["p1","p2"], 243 | # 2: ["p3","p2"], 244 | # 3: ["p4","p5"] 245 | # } 246 | # rule_data = { 247 | # 1: ["p0","p1","p2"], 248 | # 2: ["p3","p4"], 249 | # # 2: ["p3","p2"], 250 | # # 3: ["p4","p5"] 251 | # } 252 | # rule_data = { 253 | # "1": [], "105": ["2|00 00 00 06 00 00 00|Drives|24 00|"], "108": ["qazwsx.hsq"], 254 | # } 255 | 256 | # rule_data = { 257 | # 1: ["p1","p2"], 258 | # 2: ["p3","p2"], 259 | # # 3: ["p1","p3"] 260 | # } 261 | raw_rule_data = jsio.load("snort297_ruleset.json") 262 | # raw_rule_data = jsio.load("suricata500_ruleset.json") 263 | 264 | rule_data = pre_process_ruleset(raw_rule_data, bucket_num_limit=100) 265 | # exit(0) 266 | print(rule_data) 267 | # exit(0) 268 | idx_to_pattern_dict, pattern_to_idx_dict = aggreate_pattern_set(rule_data) 269 | print(idx_to_pattern_dict) 270 | print("*****************************") 271 | 272 | bucket_num = parse_rule(rule_data) 273 | pattern_set_size = len(idx_to_pattern_dict) 274 | 275 | assert len(idx_to_pattern_dict) == len(pattern_to_idx_dict), \ 276 | "not len(idx_to_pattern_dict) == len(pattern_to_idx_dict)" 277 | matx = [[0 for j in range(bucket_num)] for i in range(pattern_set_size)] 278 | print("pattern_set_size: ", pattern_set_size) 279 | print("bucket_num: ", bucket_num) 280 | # print(matx[3][1]) 281 | s = Solver() 282 | for i in range(pattern_set_size): 283 | for j in range(bucket_num): 284 | matx[i][j] = Bool("x"+str(i)+'_'+str(j)) 285 | # r0 286 | for i in range(pattern_set_size): 287 | clause_idx = Or([matx[i][ii] for ii in range(bucket_num)]) 288 | s.add(clause_idx) 289 | for j in range(bucket_num): 290 | for k in range(bucket_num): 291 | # clause_idx=Or() 292 | if k > j: 293 | clause_idx = Or([Not(matx[i][j]), Not(matx[i][k])]) 294 | s.add(clause_idx) 295 | # print(s.check()) 296 | # print(s.model()) 297 | 298 | 299 | # ri 300 | 301 | for _sid, _pattern_lst in rule_data.items(): 302 | pattern_idx_lst = [ pattern_to_idx_dict[pattern] for pattern in _pattern_lst ] 303 | for pattern_idx in pattern_idx_lst: 304 | for bucket_jdx in range(bucket_num): 305 | for pattern_kdx in pattern_idx_lst: 306 | if pattern_kdx > pattern_idx: 307 | clause_idx = Or( [ Not(matx[pattern_idx][bucket_jdx]), Not(matx[pattern_kdx][bucket_jdx])]) 308 | # print(clause_idx) 309 | s.add(clause_idx) 310 | assert(s.check() == sat) 311 | print(s.check()) 312 | print(s.model()) 313 | 314 | check_buckets_alloc(solution_matrix = matx, pattern_set_size = pattern_set_size, bucket_num = bucket_num, 315 | _model= s.model(), idx_to_pattern_dict = idx_to_pattern_dict) 316 | check_rules(solution_matrix = matx, bucket_num = bucket_num, 317 | ruleset = rule_data, _model= s.model(), pattern_to_idx_dict = pattern_to_idx_dict) 318 | 319 | mat_entries_lst = generate_mat_table_entries(solution_matrix = matx, _model= s.model(), 320 | pattern_to_idx_dict = pattern_to_idx_dict, bucket_num = bucket_num, 321 | ruleset = rule_data) 322 | 323 | print(mat_entries_lst) -------------------------------------------------------------------------------- /entrygen/bucket_pattern2rule_table.py: -------------------------------------------------------------------------------- 1 | 2 | NULL_PATTERN_CODE = "NAN" 3 | NULL_BUCEKT_ID = -1 4 | from config import SWITCH_BUCKET_CONFIG, SWITCH_CONFIG 5 | 6 | class BucketPattern2ruleTable(object): 7 | ''' 8 | ******SC_ID******** 9 | ['***', '11*', '000', '011', '00*', '10*', '111'] 10 | ['010', '110', '000', '011', '001', '100', '111'] 11 | ******dump_var_stride_shadow_mat_lst******** 12 | (('11*', 'he'), 'goto', ('011', 1)) 13 | (('000', 'e'), 'goto', ('011', 1)) 14 | (('00*', 'is'), 'goto', ('111', 2)) 15 | (('10*', 's'), 'goto', ('111', 2)) 16 | (('***', 'hi'), 'goto', ('100', 0)) 17 | (('***', 'sh'), 'goto', ('000', 0)) 18 | (('***', '\xffh'), 'goto', ('001', 0)) 19 | (('***', '\xffs'), 'goto', ('110', 0)) 20 | 21 | return 22 | (('11*', 'he'), 'goto', ('011', (1, '01'))) 23 | (('000', 'e'), 'goto', ('011', (1, '01'))) 24 | (('00*', 'is'), 'goto', ('111', (2, '10'))) 25 | (('10*', 's'), 'goto', ('111', (2, '10'))) 26 | (('***', 'hi'), 'goto', ('100', (-1, 'NAN'))) 27 | (('***', 'sh'), 'goto', ('000', (-1, 'NAN'))) 28 | (('***', '\xffh'), 'goto', ('001', (-1, 'NAN'))) 29 | (('***', '\xffs'), 'goto', ('110', (-1, 'NAN'))) 30 | ''' 31 | def __init__(self, var_stride_shadow_mat_lst, pattern_list, SC_ID_tuple,\ 32 | switch_config, cover_code_length, \ 33 | ruleset = None, bucket_num = None, 34 | pattern2bucekt_dict = None, pattern2code_dict = None , nfa = None): 35 | self.var_stride_shadow_mat_lst = var_stride_shadow_mat_lst 36 | self.nfa = nfa 37 | # self.defer_tree_lst = defer_tree_lst 38 | print("INIT BUCKET RULE TABLE") 39 | print(self.var_stride_shadow_mat_lst) 40 | 41 | print(pattern_list) 42 | print(SC_ID_tuple) 43 | print("END FOR BUCKET RULE TABLE PARAMETERS") 44 | # bucke ruleset 45 | ''' 46 | r1 "she" "his" 47 | r2 "cat" 48 | ''' 49 | # some dirty codes for strawman validation 50 | if pattern2bucekt_dict == None or pattern2code_dict == None: 51 | self.pattern2bucekt_dict = {"she": 1, "his": 1, "cat":2} 52 | self.pattern2code_dict = {"she": "01", "his": "10", "cat":"11"} 53 | else: 54 | self.pattern2bucekt_dict = pattern2bucekt_dict 55 | self.pattern2code_dict = pattern2code_dict 56 | 57 | if pattern_list == None: 58 | self.pattern_list = ["she","his","cat"] 59 | else: 60 | self.pattern_list = pattern_list 61 | 62 | if ruleset == None: 63 | self.ruleset = {"r1":["she"],"r2":["his"]} 64 | self.bucket_num = 2 65 | else: 66 | self.ruleset = ruleset 67 | self.bucket_num - bucket_num 68 | self.SC_ID_tuple = SC_ID_tuple 69 | self.SC_lst = self.SC_ID_tuple[0] 70 | self.ID_lst = self.SC_ID_tuple[1] 71 | self.cover_code_length = cover_code_length 72 | self.state_width = len(var_stride_shadow_mat_lst[0][0][0]) 73 | 74 | # self.state_table = self.nfa[0]# {0: 0, 1: 0, 2: 0, 3: 1, 4: 0, 5: 0, 6: 2} 75 | # self.failure_transition_table = self.nfa[2] #{0: {}, 1: 0, 2: 4, 3: 0, 4: 0, 5: 0, 6: 1} 76 | # self.goto_transition_table = self.nfa[1] #{0: {'h': 4, 's': 1}, 1: {'h': 2}, 2: {'e': 3}, 3: {}, 4: {'i': 5}, 5: {'s': 6}, 6: {}} 77 | 78 | self.bucket_var_stride_shadow_mat_lst = [] 79 | self.bucket_pattern2rule_mat_lst =[] 80 | 81 | self.switch_config = switch_config 82 | self.runtime_nfa_shadow_mat_entries = [] 83 | self.runtime_default_mat_entries = [] 84 | pass 85 | 86 | 87 | 88 | def gen_var_stride_shadow_mat_bucket_lst(self): 89 | 90 | for entry in self.var_stride_shadow_mat_lst: 91 | match_state, match_chars = entry[0] 92 | goto_str = entry[1] 93 | dst_state, acc_pattern_idx = entry[2] 94 | if acc_pattern_idx > 0: 95 | acc_pattern = self.pattern_list[acc_pattern_idx-1] 96 | bucket_idx = self.pattern2bucekt_dict[acc_pattern] 97 | pattern_code = self.pattern2code_dict[acc_pattern] 98 | else: 99 | bucket_idx = NULL_BUCEKT_ID 100 | pattern_code = NULL_PATTERN_CODE 101 | new_entry = ( (match_state, match_chars), entry[1], \ 102 | (dst_state, (bucket_idx, pattern_code))) 103 | 104 | self.bucket_var_stride_shadow_mat_lst.append(new_entry) 105 | 106 | 107 | def gen_bucket_pattern2rule_mat_lst(self): 108 | 109 | ''' 110 | ( ("01", "10"), send) 111 | ''' 112 | for sid, pattern_lst in self.ruleset.items(): 113 | bucket_match_field = [] 114 | for i in range(self.bucket_num): 115 | bucket_match_field.append("*") 116 | for pattern in pattern_lst: 117 | bucket_idx = self.pattern2bucekt_dict[pattern] 118 | pattern_code = self.pattern2code_dict[pattern] 119 | bucket_match_field[bucket_idx-1] = pattern_code 120 | newentry = ( tuple(bucket_match_field), "forward") 121 | self.bucket_pattern2rule_mat_lst.append(newentry) 122 | 123 | 124 | def gen_runtime_var_stride_shadow_mat_lst(self, table_id_list): 125 | ''' 126 | bucket_var_stride_shadow_mat_lst 127 | (('11*', 'he'), 'goto', ('011', (1, '01'))) 128 | (('000', 'e'), 'goto', ('011', (1, '01'))) 129 | (('00*', 'is'), 'goto', ('111', (2, '10'))) 130 | (('10*', 's'), 'goto', ('111', (2, '10'))) 131 | (('***', 'hi'), 'goto', ('100', (-1, 'NAN'))) 132 | (('***', 'sh'), 'goto', ('000', (-1, 'NAN'))) 133 | (('***', '\xffh'), 'goto', ('001', (-1, 'NAN'))) 134 | (('***', '\xffs'), 'goto', ('110', (-1, 'NAN'))) 135 | ............................ 136 | ''' 137 | 138 | MAX_STRIDE = self.switch_config['max_stride'] 139 | self.runtime_nfa_shadow_mat_entries = [] 140 | for table_id in table_id_list: 141 | for (match, action, action_params) in self.bucket_var_stride_shadow_mat_lst: 142 | runtime_mat_entry = {} 143 | runtime_mat_entry["table"] = \ 144 | self.switch_config["dfa_mat_name"] % table_id 145 | 146 | state, mask = self.__generate_state_value_mask(match[0], int(self.state_width)) 147 | 148 | runtime_mat_entry["match"] = { 149 | SWITCH_BUCKET_CONFIG["current_state"]: [state, mask] 150 | 151 | } 152 | 153 | cur_entry_stride = len(match[1]) 154 | if len(match[1]) > MAX_STRIDE: 155 | print("MAX_STRIDE EXCEED ERROR") 156 | exit(1) 157 | for idx in range(len(match[1])): 158 | received_char = match[1][idx] 159 | # Follow systement is for difference 160 | # between python2 and python3 161 | if type(received_char) != int: 162 | received_char = ord(match[1][idx]) 163 | if ord(b'\xff') != received_char: 164 | field_name = SWITCH_BUCKET_CONFIG["received_char"] % idx 165 | runtime_mat_entry["match"][field_name] \ 166 | = [received_char, 255] 167 | else: 168 | field_name = SWITCH_BUCKET_CONFIG["received_char"] % idx 169 | runtime_mat_entry["match"][field_name] \ 170 | = [0, 0] 171 | if len(match[1]) < MAX_STRIDE:# padding the match str field 172 | for idx in range(len(match[1]),MAX_STRIDE): 173 | field_name = SWITCH_BUCKET_CONFIG["received_char"] % idx 174 | runtime_mat_entry["match"][field_name] \ 175 | = [0, 0] 176 | dst_state, (bucket_id, pattern_code)= action_params 177 | if bucket_id == NULL_BUCEKT_ID: 178 | runtime_mat_entry["action_name"] = \ 179 | SWITCH_BUCKET_CONFIG["goto_action_name"] % cur_entry_stride 180 | runtime_mat_entry["action_params"] = { 181 | SWITCH_BUCKET_CONFIG["next_state_para"]: self.__generate_state_value_mask(dst_state, int(self.state_width))[0], 182 | 183 | } 184 | else: 185 | runtime_mat_entry["action_name"] = \ 186 | SWITCH_BUCKET_CONFIG["accept_action_name"] % (cur_entry_stride, bucket_id) 187 | runtime_mat_entry["action_params"] = { 188 | SWITCH_BUCKET_CONFIG["next_state_para"]: self.__generate_state_value_mask(dst_state, int(self.state_width))[0], 189 | 190 | SWITCH_BUCKET_CONFIG["pattern_code_para"]: self.__generate_state_value_mask(pattern_code, len(pattern_code))[0], 191 | } 192 | # if action == "goto": 193 | # runtime_mat_entry["action_name"] = \ 194 | # SWITCH_CONFIG["goto_action_name"] % self.stride 195 | # runtime_mat_entry["action_params"] = { 196 | # SWITCH_CONFIG["next_state"]: self.__generate_state_value_mask(action_params[0], int(self.cover_code_length))[0], 197 | # SWITCH_CONFIG["modifier"]: action_params[1], 198 | # } 199 | # elif action == "accept": 200 | # runtime_mat_entry["action_name"] = \ 201 | # SWITCH_CONFIG["accept_action_name"] 202 | # runtime_mat_entry["action_params"] = { 203 | # SWITCH_CONFIG["next_state"]: action_params[0], 204 | # } 205 | self.runtime_nfa_shadow_mat_entries.append(runtime_mat_entry) 206 | 207 | 208 | 209 | return self.runtime_nfa_shadow_mat_entries 210 | 211 | pass 212 | def gen_runtime_default_entries(self, table_id_lst, **kwargs): 213 | ''' 214 | kwargs = { 215 | "root_state":2; 216 | } 217 | ''' 218 | # get root state 219 | # pattern_table default action 220 | ''' 221 | get_state_default_entry 222 | { 223 | "table": "MyIngress.t_get_root_state", 224 | "default_action": true, 225 | "action_name": "MyIngress.a_get_root_state", 226 | "action_params": {"_state":3, "pattern_code":1} 227 | 228 | } 229 | 230 | ''' 231 | self.runtime_default_mat_entries = [] 232 | root_state = kwargs["root_state"] 233 | max_stride = kwargs["max_stride"] 234 | t_get_state_default_entry = {} 235 | t_get_state_default_entry["table"] = SWITCH_BUCKET_CONFIG["root_state_mat_name"] 236 | t_get_state_default_entry["action_name"] = SWITCH_BUCKET_CONFIG["root_state_action_name"] 237 | t_get_state_default_entry["action_params"] = {"root_state": root_state} 238 | t_get_state_default_entry["default_action"] = True 239 | 240 | self.runtime_default_mat_entries.append(t_get_state_default_entry) 241 | for table_id in table_id_lst: 242 | 243 | t_dfa_match_default_entry = {} 244 | 245 | t_dfa_match_default_entry["table"] = SWITCH_BUCKET_CONFIG["dfa_mat_name"] % table_id 246 | t_dfa_match_default_entry["action_name"] = SWITCH_BUCKET_CONFIG["goto_action_name"] % max_stride 247 | t_dfa_match_default_entry["action_params"] = {"_state": root_state} 248 | t_dfa_match_default_entry["default_action"] = True 249 | self.runtime_default_mat_entries.append(t_dfa_match_default_entry) 250 | 251 | return self.runtime_default_mat_entries 252 | 253 | pass 254 | def gen_runtime_bucket_pattern2rule_mat_lst(self): 255 | ''' 256 | .............................. 257 | bucket_pattern2rule_mat_lst 258 | (('01', '10'), 'send') 259 | ''' 260 | pass 261 | 262 | def __generate_state_value_mask(self, match_state, state_width): 263 | #'11*' 264 | 265 | max_num_of_bits = (1<< int(state_width)) - 1 266 | star_num = match_state.count('*') 267 | exact_state_str = match_state.replace('*','0') 268 | mask = max_num_of_bits - ((1< 15 | - table_add => [priority] 16 | - table_delete
17 | 18 | - table_add t_ipv4_lpm a_ipv4_forward 10.0.0.10/32 => 00:04:00:00:00:00 1 19 | - table_add t_ipv4_lpm a_ipv4_forward 10.0.1.10/32 => 00:04:00:00:00:01 2 20 | ''' 21 | 22 | 23 | 24 | 25 | def writeRulesToConfig_from_mat_lst(ruleset, stride,table_id_list,filename=''): 26 | x = NFAMatchEntriesShadowGenerator(pattern_expression = ruleset, stride = stride, table_id_list=table_id_list) 27 | MAX_STRIDE = SWITCH_CONFIG['max_stride'] 28 | root_state_ID = int(x.SC_ID_tuple[1][0],2) 29 | # print x.nfa_mat_entries 30 | 31 | # print x.nfa_shadow_mat_entries 32 | 33 | # print x.vstride_nfa_mat_entries 34 | # print "vstride_nfa_shadow_mat_entries" 35 | # print x.vstride_nfa_shadow_mat_entries 36 | # print "*********************************" 37 | policy_runtime_mat_entries = [] 38 | # x.generate_runtime_mat_entries() 39 | runtime_nfa_shadow_mat_entries = x.get_runtime_nfa_shadow_mat_entries() 40 | runtime_policy_mat_entries = x.get_runtime_policy_mat_entries() 41 | runtime_mat_default_entries = x.get_runtime_mat_default_entries() 42 | 43 | max_priority = len(x.runtime_nfa_shadow_mat_entries) + 1 44 | cur_priority = max_priority 45 | entry_lst = [] 46 | for entry_idx in runtime_nfa_shadow_mat_entries: 47 | # print entry_idx 48 | if entry_idx["table_name"][0:-1] == "t_DFA_match_": 49 | if entry_idx["action_name"][0:-1] == "a_set_state_": 50 | entry_str = 'table_add '+ entry_idx["table_name"] +' ' + entry_idx["action_name"] + ' ' 51 | for idx in range(MAX_STRIDE): 52 | field_name = SWITCH_CONFIG["received_char"] % idx 53 | temp1 = str(entry_idx["match"][field_name][0]) 54 | temp2 = str(entry_idx["match"][field_name][1]) 55 | entry_str += temp1 + '&&&' + temp2 + ' ' 56 | 57 | 58 | temp3 = str(entry_idx["match"]["meta.state"][0]) 59 | temp4 = str(entry_idx["match"]["meta.state"][1]) 60 | entry_str += temp3 + '&&&' + temp4 + ' => ' + str(entry_idx["action_params"]["_state"]) +' ' + str(entry_idx["action_params"]["modifier"]) + ' ' + str(cur_priority) 61 | cur_priority -=1 62 | print entry_str 63 | entry_lst.append(entry_str) 64 | # f.write("table_add", entry["table_name"], entry["action_name"], temp1+'&&&'+temp2,entry["match"]["meta.state"],"=>", entry["action_params"]["_state"], entry["action_params"]["modifier"]) 65 | # print "table_add", entry["table_name"], entry["action_name"], temp1+'&&&'+temp2, temp3+'&&&'+temp4,"=>", entry["action_params"]["_state"], entry["action_params"]["modifier"],entry["priority"] 66 | # print "table_add", entry["table_name"], entry["action_name"], entry["match"]["hdr.patrns[0].string"],entry["match"]["meta.state"],"=>", entry["action_params"]["_state"], entry["action_params"]["modifier"] 67 | 68 | normal_priority = 1 69 | for entry_idx in runtime_policy_mat_entries: 70 | # print entry 71 | 72 | if entry_idx["table_name"] == "t_policy": 73 | entry_str = 'table_add ' + entry_idx["table_name"] +' ' + entry_idx["action_name"] + ' ' 74 | temp1 = str(entry_idx["match"]["meta.pattern_state"][0]) 75 | temp2 = str(entry_idx["match"]["meta.pattern_state"][1]) 76 | entry_str += temp1+'&&&'+temp2 + ' => ' + str(normal_priority) 77 | print entry_str 78 | entry_lst.append(entry_str) 79 | # print "table_add", entry_idx["table_name"], entry_idx["action_name"], temp1+'&&&'+temp2,"=>" , str(normal_priority) 80 | # f.write("table_add " + entry["table_name"] + ' '+ entry["action_name"] +' '+ entry["match"] + ' '+ "=>"+' '+entry["action_params"] ) 81 | 82 | for entry_idx in runtime_mat_default_entries: 83 | # print entry 84 | if entry_idx["table_name"] == "t_get_stride": 85 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] + ' ' + str(entry_idx["action_params"]["_stride"]) 86 | 87 | # print "table_set_default", entry["table_name"], entry["action_name"], entry["action_params"]["_stride"] 88 | if entry_idx["table_name"] == "t_policy": 89 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] 90 | 91 | # print "table_set_default", entry["table_name"], entry["action_name"] 92 | # print "table_set_default", entry["table_name"], entry["action_name"], entry["match"],"=>", entry["action_params"] 93 | elif entry_idx["table_name"][0:-1] == "t_DFA_match_": 94 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] + ' ' + str(entry_idx["action_params"]["_state"]) + ' ' + str(entry_idx["action_params"]["modifier"]) 95 | 96 | print entry_str 97 | entry_lst.append(entry_str) 98 | # print "table_set_default", entry["table_name"], entry["action_name"], entry["action_params"]["_state"], entry["action_params"]["modifier"] 99 | 100 | default_entry_lst = [ "table_set_default t_get_root_state a_get_root_state "+ str(root_state_ID), \ 101 | "table_add t_ipv4_lpm a_ipv4_forward 10.0.0.10/32 => 00:04:00:00:00:00 1",\ 102 | "table_add t_ipv4_lpm a_ipv4_forward 10.0.1.10/32 => 00:04:00:00:00:01 2"] 103 | 104 | for idx in default_entry_lst: 105 | entry_lst.append(idx) 106 | 107 | if filename == '': 108 | for idx in entry_lst: 109 | print idx 110 | else: 111 | with open(filename, 'w') as f: 112 | for idx in entry_lst: 113 | f.write(idx+'\n') 114 | 115 | def writeBucketRulesToConfig_from_mat_lst(ruleset, stride,table_id_list,filename=''): 116 | 117 | # TO REFACTOR: controller should readfrom the ruleset and pattern-bucket mapping to generate table entries. 118 | 119 | x = NFAMatchEntriesShadowGenerator(pattern_expression = ruleset, stride = stride, table_id_list=table_id_list) 120 | MAX_STRIDE = SWITCH_BUCKET_CONFIG['max_stride'] 121 | root_state_ID = int(x.SC_ID_tuple[1][0],2) 122 | 123 | print(x.nfa) 124 | print(x.nfa_mat_entries) 125 | 126 | print(x.nfa_shadow_mat_entries) 127 | 128 | # print x.vstride_nfa_mat_entries 129 | # print "vstride_nfa_shadow_mat_entries" 130 | # print x.vstride_nfa_shadow_mat_entries 131 | # print "*********************************" 132 | policy_runtime_mat_entries = [] 133 | # x.generate_runtime_mat_entries() 134 | runtime_nfa_shadow_mat_entries = x.pattern2rule_table.gen_runtime_var_stride_shadow_mat_lst(table_id_list) 135 | kwargs_1 = {"root_state":root_state_ID} 136 | runtime_default_entries = x.pattern2rule_table.gen_runtime_default_entries(table_id_list, root_state = root_state_ID, max_stride = stride) 137 | ''' 138 | { "action_params": {"_state": 3, "pattern_code": 1}, 139 | "table_name": "t_DFA_match_0", 140 | "match": {"hdr.patrns[0].string": [104, 255], "hdr.patrns[1].string": [101, 255], 141 | "meta.state": [6, 6]}, 142 | "action_name": "a_set_state_2_b1"}, 143 | ''' 144 | pipeline_name = "MyIngress" 145 | s1_runtime_config = {} 146 | s1_runtime_config["target"] = "bmv2" 147 | s1_runtime_config["p4info"] = "build/bolt.p4.p4info.txt" 148 | s1_runtime_config["bmv2_json"] = "build/bolt.json" 149 | s1_runtime_config["table_entries"] = [] 150 | max_priority = len(runtime_nfa_shadow_mat_entries) + 1 151 | cur_priority = max_priority 152 | entry_lst = [] 153 | 154 | print("runtime nfa shadow mat entries") 155 | for entry_idx in runtime_nfa_shadow_mat_entries: 156 | print(entry_idx) 157 | entry = {} 158 | entry["table"] = pipeline_name + "." + entry_idx["table"] 159 | entry["match"] = {key:value for key,value in entry_idx["match"].items() if value[1] != 0} 160 | # for key,value in entry_idx.items(): 161 | # # check if mask = 0, omit this field 162 | # if value[1] != 0: 163 | # entry["match"][key] = value 164 | entry["action_name"] = pipeline_name + "." + entry_idx["action_name"] 165 | entry["action_params"] = entry_idx["action_params"] 166 | entry["priority"] = cur_priority 167 | cur_priority -= 1 168 | ''' 169 | { 170 | "table": "MyIngress.ipv4_lpm", 171 | "match": { 172 | "hdr.ipv4.dstAddr": ["10.0.1.1", 32] 173 | }, 174 | "action_name": "MyIngress.ipv4_forward", 175 | "action_params": { 176 | "dstAddr": "08:00:00:00:01:11", 177 | "port": 1 178 | } 179 | }, 180 | ''' 181 | s1_runtime_config["table_entries"].append(entry) 182 | ''' 183 | # print entry 184 | if entry_idx["table_name"] == "t_get_stride": 185 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] + ' ' + str(entry_idx["action_params"]["_stride"]) 186 | 187 | # print "table_set_default", entry["table_name"], entry["action_name"], entry["action_params"]["_stride"] 188 | if entry_idx["table_name"] == "t_policy": 189 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] 190 | 191 | # print "table_set_default", entry["table_name"], entry["action_name"] 192 | # print "table_set_default", entry["table_name"], entry["action_name"], entry["match"],"=>", entry["action_params"] 193 | elif entry_idx["table_name"][0:-1] == "t_DFA_match_": 194 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] + ' ' + str(entry_idx["action_params"]["_state"]) + ' ' + str(entry_idx["action_params"]["modifier"]) 195 | 196 | ''' 197 | # Default entry 198 | # for 199 | # entry = {} 200 | # entry["table"] = "ingress." + entry_idx["table_name"] 201 | # entry["match"] = entry_idx["match"] 202 | # # for key,value in entry_idx.items(): 203 | # # entry["match"][key] = value 204 | # entry["action_name"] = "ingress." + entry_idx["action_name"] 205 | # entry["action_params"] = entry_idx["action_params"] 206 | # entry["priority"] = cur_priority 207 | # cur_priority = 1 208 | print("default entries") 209 | for entry_idx in runtime_default_entries: 210 | print(entry_idx) 211 | entry = {} 212 | entry["table"] = pipeline_name + "." + entry_idx["table"] 213 | entry["default_action"] = entry_idx["default_action"] 214 | entry["action_name"] = pipeline_name + "." + entry_idx["action_name"] 215 | entry["action_params"] = entry_idx["action_params"] 216 | # entry["priority"] = cur_priority 217 | s1_runtime_config["table_entries"].append(entry) 218 | 219 | rule_table_entry = {} 220 | rule_table_entry["table"] = pipeline_name + "." + SWITCH_BUCKET_CONFIG["rule_mat_name"] 221 | rule_table_entry["match"] = { 222 | "meta.b1": [1,255], 223 | "meta.b2": [2,255] 224 | } 225 | rule_table_entry["action_name"] = pipeline_name + "." + "a_mark_as_to_forward" 226 | rule_table_entry["action_params"] = {} 227 | rule_table_entry["priority"] = 2 228 | 229 | s1_runtime_config["table_entries"].append(rule_table_entry) 230 | # default_rule_table_entry = {} 231 | # rule_table_entry["table"] = pipeline_name + "." + SWITCH_BUCKET_CONFIG["rule_mat_name"] 232 | # rule_table_entry["match"] = { 233 | # "meta.b1": [1,255], 234 | # "meta.b2": [2,255] 235 | # } 236 | # rule_table_entry["action_name"] = pipeline_name + "." + "a_mark_as_to_forward" 237 | # rule_table_entry["action_params"] = {} 238 | # rule_table_entry["priority"] = 2 239 | 240 | jsio.store(s1_runtime_config) 241 | 242 | with open("test-runtime.json", 'a') as outfile: 243 | json.dump(s1_runtime_config, outfile, indent=2) 244 | # with open(, 'w') as fw: 245 | # json_str = json.dumps(data) 246 | # fw.write(json_str) 247 | # 248 | 249 | return 250 | runtime_nfa_shadow_mat_entries = x.get_runtime_nfa_shadow_mat_entries() 251 | runtime_policy_mat_entries = x.get_runtime_policy_mat_entries() 252 | runtime_mat_default_entries = x.get_runtime_mat_default_entries() 253 | 254 | max_priority = len(x.runtime_nfa_shadow_mat_entries) + 1 255 | cur_priority = max_priority 256 | entry_lst = [] 257 | 258 | print("runtime nfa shadow mat entries: ") 259 | for entry_idx in runtime_nfa_shadow_mat_entries: 260 | # print entry_idx 261 | if entry_idx["table_name"][0:-1] == "t_DFA_match_": 262 | if entry_idx["action_name"][0:-1] == "a_set_state_": 263 | entry_str = 'table_add '+ entry_idx["table_name"] +' ' + entry_idx["action_name"] + ' ' 264 | for idx in range(MAX_STRIDE): 265 | field_name = SWITCH_CONFIG["received_char"] % idx 266 | temp1 = str(entry_idx["match"][field_name][0]) 267 | temp2 = str(entry_idx["match"][field_name][1]) 268 | entry_str += temp1 + '&&&' + temp2 + ' ' 269 | 270 | 271 | temp3 = str(entry_idx["match"]["meta.state"][0]) 272 | temp4 = str(entry_idx["match"]["meta.state"][1]) 273 | entry_str += temp3 + '&&&' + temp4 + ' => ' + str(entry_idx["action_params"]["_state"]) +' ' + str(entry_idx["action_params"]["modifier"]) + ' ' + str(cur_priority) 274 | cur_priority -=1 275 | print entry_str 276 | entry_lst.append(entry_str) 277 | # f.write("table_add", entry["table_name"], entry["action_name"], temp1+'&&&'+temp2,entry["match"]["meta.state"],"=>", entry["action_params"]["_state"], entry["action_params"]["modifier"]) 278 | # print "table_add", entry["table_name"], entry["action_name"], temp1+'&&&'+temp2, temp3+'&&&'+temp4,"=>", entry["action_params"]["_state"], entry["action_params"]["modifier"],entry["priority"] 279 | # print "table_add", entry["table_name"], entry["action_name"], entry["match"]["hdr.patrns[0].string"],entry["match"]["meta.state"],"=>", entry["action_params"]["_state"], entry["action_params"]["modifier"] 280 | 281 | normal_priority = 1 282 | 283 | print("runtime_policy_mat_entries ") 284 | for entry_idx in runtime_policy_mat_entries: 285 | # print entry 286 | 287 | if entry_idx["table_name"] == "t_policy": 288 | entry_str = 'table_add ' + entry_idx["table_name"] +' ' + entry_idx["action_name"] + ' ' 289 | temp1 = str(entry_idx["match"]["meta.pattern_state"][0]) 290 | temp2 = str(entry_idx["match"]["meta.pattern_state"][1]) 291 | entry_str += temp1+'&&&'+temp2 + ' => ' + str(normal_priority) 292 | print entry_str 293 | entry_lst.append(entry_str) 294 | # print "table_add", entry_idx["table_name"], entry_idx["action_name"], temp1+'&&&'+temp2,"=>" , str(normal_priority) 295 | # f.write("table_add " + entry["table_name"] + ' '+ entry["action_name"] +' '+ entry["match"] + ' '+ "=>"+' '+entry["action_params"] ) 296 | 297 | print("runtime_mat_default_entries ") 298 | for entry_idx in runtime_mat_default_entries: 299 | # print entry 300 | if entry_idx["table_name"] == "t_get_stride": 301 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] + ' ' + str(entry_idx["action_params"]["_stride"]) 302 | 303 | # print "table_set_default", entry["table_name"], entry["action_name"], entry["action_params"]["_stride"] 304 | if entry_idx["table_name"] == "t_policy": 305 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] 306 | 307 | # print "table_set_default", entry["table_name"], entry["action_name"] 308 | # print "table_set_default", entry["table_name"], entry["action_name"], entry["match"],"=>", entry["action_params"] 309 | elif entry_idx["table_name"][0:-1] == "t_DFA_match_": 310 | entry_str = 'table_set_default ' + entry_idx["table_name"] + ' ' +entry_idx["action_name"] + ' ' + str(entry_idx["action_params"]["_state"]) + ' ' + str(entry_idx["action_params"]["modifier"]) 311 | 312 | print entry_str 313 | entry_lst.append(entry_str) 314 | # print "table_set_default", entry["table_name"], entry["action_name"], entry["action_params"]["_state"], entry["action_params"]["modifier"] 315 | 316 | default_entry_lst = [ "table_set_default t_get_root_state a_get_root_state "+ str(root_state_ID), \ 317 | "table_add t_ipv4_lpm a_ipv4_forward 10.0.0.10/32 => 00:04:00:00:00:00 1",\ 318 | "table_add t_ipv4_lpm a_ipv4_forward 10.0.1.10/32 => 00:04:00:00:00:01 2"] 319 | 320 | print("default_entry_lst ") 321 | for idx in default_entry_lst: 322 | entry_lst.append(idx) 323 | 324 | if filename == '': 325 | for idx in entry_lst: 326 | print idx 327 | else: 328 | with open(filename, 'w') as f: 329 | for idx in entry_lst: 330 | f.write(idx+'\n') 331 | 332 | 333 | 334 | if __name__ == '__main__': 335 | 336 | ruleset = "she | his" 337 | # writeRulesToConfig_from_mat_lst(ruleset,2,[0,1],filename='test.txt') 338 | writeBucketRulesToConfig_from_mat_lst(ruleset,2,[0,1],filename='test.txt') -------------------------------------------------------------------------------- /entrygen/entries_generator_shadow.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Generate an NFA with shadow-coding. 3 | import ahocorasick 4 | import json 5 | import math 6 | import copy 7 | import sys 8 | import math 9 | import string 10 | import random 11 | import Queue 12 | from config import SWITCH_BUCKET_CONFIG, SWITCH_CONFIG,PATTERN_MAX_NUM 13 | from shadow_code_gen import getShadowCodeWithNFA 14 | from shadow_code_gen import getSCIDWithNFA 15 | from bucket_pattern2rule_table import BucketPattern2ruleTable 16 | 17 | #Generate an NFA with shadowencoding. 18 | 19 | 20 | def get_tree(root,tree_son): 21 | tree = [] 22 | tree.append(root) 23 | if len(tree_son[root]) == 0: 24 | return tree 25 | for child in tree_son[root]: 26 | tree.append(get_tree(child,tree_son)) 27 | return tree 28 | 29 | def postOrderTranverseSort(treeLst,stateLst): 30 | root = treeLst[0] 31 | if len(treeLst)== 1: 32 | # print "postTran End in 2", treeLst, stateLst 33 | stateLst.append(root) 34 | return 35 | else: 36 | for index in range(1,len(treeLst)): 37 | postOrderTranverseSort(treeLst[index],stateLst) 38 | stateLst.append(root) 39 | # print "bianli End in 4", treeLst, treeLst, parentLst 40 | return 41 | 42 | def reorderTableEntries(raw_mat_lst,defer_tree_lst): 43 | item = raw_mat_lst[0] 44 | stateID_bit_length = len(item[0][0]) 45 | 46 | stateID_lst = [] 47 | # bianli(8,defer_treee,lst) 48 | # lst.append(defer_treee[0]) 49 | postOrderTranverseSort(defer_tree_lst,stateID_lst) 50 | reorder_mat_lst = [] 51 | for stateID in stateID_lst: 52 | stateID_bin_str = bin(stateID).replace('0b','') 53 | stateID_bin_str = stateID_bin_str.zfill(int(stateID_bit_length)) 54 | for item in raw_mat_lst: 55 | if item[0][0] == stateID_bin_str: 56 | reorder_mat_lst.append(item) 57 | 58 | return reorder_mat_lst 59 | 60 | def dim(treeLst): 61 | # treelst = [0, [1], [2, [4]], [3, [5], [6]]] 62 | root = treeLst[0] 63 | if len(treeLst) == 1: 64 | return 0 65 | else: 66 | temp=1 67 | for i in range(1,len(treeLst)): 68 | temp += (1 << int(dim(treeLst[i]))) 69 | return math.ceil(math.log(temp,2)) 70 | pass 71 | 72 | def bianli(stateID, treeLst,parentLst): 73 | # print "bianli start", stateID, treeLst,parentLst 74 | root = treeLst[0] 75 | if root == stateID: 76 | # parentLst.append(root) 77 | # print "parentLst appendRoots", root 78 | # print "bianli End in 1", stateID, treeLst, parentLst 79 | return True 80 | if len(treeLst)== 1: 81 | # print "bianli End in 2", stateID, treeLst, parentLst 82 | return False 83 | else: 84 | for index in range(1,len(treeLst)): 85 | if bianli(stateID,treeLst[index],parentLst): 86 | parentLst.append(treeLst[index][0]) 87 | # print "parentLst append", index 88 | # print "bianli End in 3", stateID, treeLst, parentLst 89 | return True 90 | # print "bianli End in 4", stateID, treeLst, parentLst 91 | return False 92 | # print "bianli End in 5", stateID, treeLst, parentLst 93 | return False 94 | def generate_state_value_mask( match_state, state_width): 95 | #'11*' 96 | 97 | max_num_of_bits = (1<< int(state_width)) - 1 98 | star_num = match_state.count('*') 99 | exact_state_str = match_state.replace('*','0') 100 | mask = max_num_of_bits - ((1<stride_max: 256 | temp_entry_kp1 = ((entry[0][0],input_str+suffix[0]),'goto',suffix[1])# suffix[1]==(dst_state_str1,action_mask_1)) 257 | sub_lst_si_kp1.append(temp_entry_kp1) 258 | else: 259 | pass 260 | # sub_lst_si_kp1.append(entry) 261 | 262 | return sub_lst_si_kp1 + sub_lst_si_k,no_larging_flag 263 | 264 | 265 | def increment_stride(self, sub_lst_si_k, defer_tree, nfa, Si, stride_max): 266 | 267 | ''' 268 | For (strs, dst) in T(sp, k-1): # the k-1 stride table 269 | for sj in defer_path( dst ) – [s0]: #[s2, s4, s0 ]- [s0] 270 | for (char, sd) in T(sj,1): #the Trie 271 | if sd in ACC: 272 | ACTION = ACTION || ACTION(Sd) 273 | add( (sp, strs+char, k/sd) ) 274 | 275 | 276 | si should be the initial code, NOT THE SHADOWCODE !!! 277 | (('001', 'h'), 'goto', ('010', 0)) 278 | (('010', 'e'), 'goto', ('011', 1)) 279 | (('100', 'i'), 'goto', ('101', 0)) 280 | (('101', 's'), 'goto', ('110', 2)) 281 | (('000', 'h'), 'goto', ('100', 0)) 282 | (('000', 's'), 'goto', ('001', 0)) 283 | [0, [1, [6]], [3], [4, [2]], [5]] 284 | ''' 285 | # si = sub_lst_si[0][0][0] #'0000' 286 | sub_lst_si_kp1 = [] 287 | 288 | # out_state_lst = [] 289 | no_larging_flag = 1 290 | for entry in sub_lst_si_k: 291 | input_str = entry[0][1] 292 | dst_state = entry[2][0] 293 | action_mask = entry[2][1] 294 | dst_state_code = int(dst_state,2) 295 | state_code_itr = dst_state_code 296 | suffix_lst = [] #('a', sj, mask) 297 | while(state_code_itr!= 0): 298 | goto_dict = self.goto_transition_table[state_code_itr] #{'h': 4, 's': 1} 299 | for key in goto_dict.keys(): 300 | char = key 301 | dst_state_1 = goto_dict[key] 302 | action_mask_1 = self.state_table[dst_state_1] 303 | action_mask_1 = action_mask | action_mask_1 304 | # (('0000', 'sh'), 'goto', ('1000', 0)) 305 | # (('0000', 'she'), 'goto', ('1001', 2)) 306 | dst_state_str1 = bin(dst_state_1).replace('0b','') 307 | dst_state_str1 = dst_state_str1.zfill(self.state_width) 308 | temp_suffix_kp1 = (char,(dst_state_str1,action_mask_1)) 309 | suffix_lst.append(temp_suffix_kp1) 310 | state_code_itr = self.failure_transition_table[state_code_itr] 311 | if len(suffix_lst)!=0: 312 | no_larging_flag = 0 313 | for suffix in suffix_lst: 314 | temp_entry_kp1 = ((entry[0][0],input_str+suffix[0]),'goto',suffix[1])# suffix[1]==(dst_state_str1,action_mask_1)) 315 | sub_lst_si_kp1.append(temp_entry_kp1) 316 | else: 317 | sub_lst_si_kp1.append(entry) 318 | 319 | return sub_lst_si_kp1,no_larging_flag 320 | 321 | 322 | def test_increment_stride(self): 323 | 324 | pass 325 | 326 | def increase_root_stride(self,root_lst, var_root_stride=2): 327 | var_root_lst = [] 328 | root_stride_max = 1 329 | # for i in range(3): 330 | var_root_lst = root_lst 331 | for i in range(1,var_root_stride): 332 | var_root_lst,no_larging_flag = self.increase_stride(var_root_lst,self.defer_tree_lst,self.nfa,0,i) 333 | 334 | # print var_root_lst 335 | # print no_larging_flag 336 | 337 | return var_root_lst 338 | 339 | def simple_increment_table(self, sub_lst_si_k, defer_tree, nfa, Si, K): 340 | encounter_acc = False 341 | sub_lst_si_kp1 = [] 342 | 343 | # no_larging_flag = 1 344 | stride = 1 345 | for k in range(1,K): 346 | 347 | if encounter_acc: 348 | break 349 | else : 350 | sub_lst_si_kp1=[] 351 | for entry in sub_lst_si_k: 352 | # (('0000', 'sh'), 'goto', ('1000', 0)) 353 | # (('0000', 'she'), 'goto', ('1001', 2)) 354 | input_str = entry[0][1] 355 | dst_state_str = entry[2][0] 356 | action_mask = entry[2][1] # shoul 357 | if action_mask !=0 : 358 | encounter_acc=True 359 | return sub_lst_si_k,stride 360 | dst_state= int(dst_state_str,2) 361 | 362 | suffix_lst = [] #('a', sj, mask) 363 | goto_dict = self.goto_transition_table[dst_state] 364 | for key in goto_dict.keys(): 365 | char = key 366 | dst_state_1 = goto_dict[key] 367 | action_mask_1 = self.state_table[dst_state_1] 368 | # action_mask_1 = action_mask | action_mask_1 369 | # (('0000', 'sh'), 'goto', ('1000', 0)) 370 | # (('0000', 'she'), 'goto', ('1001', 2)) 371 | dst_state_str1 = bin(dst_state_1).replace('0b','') 372 | dst_state_str1 = dst_state_str1.zfill(self.state_width) 373 | temp_suffix_kp1 = (char,(dst_state_str1,action_mask_1)) 374 | suffix_lst.append(temp_suffix_kp1) 375 | if action_mask_1 != 0:#ACC ENCOUNTER 376 | encounter_acc = True 377 | if len(suffix_lst)!=0: 378 | no_larging_flag = 0 379 | for suffix in suffix_lst: 380 | temp_entry_kp1 = ((entry[0][0],input_str+suffix[0]),'goto',suffix[1])# suffix[1]==(dst_state_str1,action_mask_1)) 381 | sub_lst_si_kp1.append(temp_entry_kp1) 382 | 383 | sub_lst_si_k = copy.deepcopy(sub_lst_si_kp1) 384 | stride = k + 1 385 | return sub_lst_si_k,stride 386 | 387 | def simple_increase_root_table(self, root_lst, defer_tree, nfa, Si, K): 388 | 389 | encounter_acc = False 390 | 391 | ''' 392 | For (strs, dst) in T(sp, k-1): # the k-1 stride table 393 | for sj in defer_path( dst ) – [s0]: #[s2, s4, s0 ]- [s0] 394 | for (char, sd) in T(sj,1): #the Trie 395 | if sd in ACC: 396 | ACTION = ACTION || ACTION(Sd) 397 | add( (sp, strs+char, k/sd) ) 398 | ''' 399 | # si = sub_lst_si[0][0][0] #'0000' 400 | 401 | 402 | root_lst_k = copy.deepcopy(root_lst) 403 | var_root_lst = copy.deepcopy(root_lst) 404 | # out_state_lst = [] 405 | no_larging_flag = 1 406 | stride = 1 407 | for k in range(1,K): 408 | # print "k", k 409 | # print "stride",stride 410 | if encounter_acc == True: 411 | break 412 | else: 413 | root_lst_kp1 = [] 414 | for entry in root_lst_k: 415 | input_str = entry[0][1] 416 | dst_state_str = entry[2][0] 417 | action_mask = entry[2][1] 418 | if action_mask !=0 : 419 | encounter_acc=True 420 | return var_root_lst,stride 421 | dst_state = int(dst_state_str,2) 422 | # state_code_itr = dst_state 423 | suffix_lst = [] #('a', sj, mask) 424 | 425 | goto_dict = self.goto_transition_table[dst_state] #{'h': 4, 's': 1} 426 | for key in goto_dict.keys(): 427 | char = key 428 | dst_state_1 = goto_dict[key] 429 | action_mask_1 = self.state_table[dst_state_1] 430 | if action_mask_1 != 0: 431 | encounter_acc = True 432 | # action_mask_1 = action_mask | action_mask_1 433 | # (('0000', 'sh'), 'goto', ('1000', 0)) 434 | # (('0000', 'she'), 'goto', ('1001', 2)) 435 | dst_state_str1 = bin(dst_state_1).replace('0b','') 436 | dst_state_str1 = dst_state_str1.zfill(self.state_width) 437 | temp_suffix_kp1 = (char,(dst_state_str1,action_mask_1)) 438 | suffix_lst.append(temp_suffix_kp1) 439 | if len(suffix_lst)!=0: 440 | no_larging_flag = 0 441 | for suffix in suffix_lst: 442 | temp_entry_kp1 = ((entry[0][0],input_str+suffix[0]),'goto',suffix[1])# suffix[1]==(dst_state_str1,action_mask_1)) 443 | root_lst_kp1.append(temp_entry_kp1) 444 | 445 | var_root_lst = root_lst_kp1 + var_root_lst 446 | root_lst_k = root_lst_kp1 447 | stride = k+1 448 | # sub_lst_si_kp1.append(entry) 449 | 450 | 451 | return var_root_lst,stride 452 | 453 | 454 | def var_striding_simple_main_procedure(self ,K=3): 455 | stateLst = [] 456 | postOrderTranverseSort(self.defer_tree_lst, stateLst) 457 | print stateLst 458 | root_lst = self.sub_table_dict[0] 459 | vroot_lst = [] 460 | #1. vroot_lst = self_looping_unrolling(K) 461 | ''' 462 | (('0000', 'h*'), 'goto', ('1001', 2)) 463 | (('0000', 's*'), 'goto', ('1001', 2)) 464 | (('0000', '*s*'), 'goto', ('1000', 0)) 465 | (('0000', '*h*'), 'goto', ('1000', 0)) 466 | (('0000', '**h'), 'goto', ('1000', 0)) 467 | (('0000', '**s'), 'goto', ('1000', 0)) 468 | (('0000', '***'), 'goto', ('1000', 0)) 469 | ''' 470 | #2. vroot_lst = simple_increase(K) 471 | vroot_lst,k = self.simple_increase_root_table(root_lst=root_lst,defer_tree=self.defer_tree_lst,\ 472 | nfa=self.nfa,Si=0,K=K) 473 | 474 | ''' 475 | (('0000', 'his'), 'goto', ('1001', 2)) 476 | (('0000', 'she'), 'goto', ('1001', 2)) 477 | (('0000', '*sh'), 'goto', ('1000', 0)) 478 | (('0000', '*hi'), 'goto', ('1000', 0)) 479 | (('0000', '**h'), 'goto', ('1000', 0)) 480 | (('0000', '**s'), 'goto', ('1000', 0)) 481 | (('0000', '***'), 'goto', ('1000', 0)) 482 | ''' 483 | v_self_unloop_root_lst = self.self_var_root_looping_unrolling(vroot_lst,K=K) 484 | self.sub_table_dict[0] = v_self_unloop_root_lst 485 | # print "vroot_lst" 486 | # for entry in vroot_lst: 487 | # print entry 488 | # print k 489 | # print "v_self_unloop_root_lst" 490 | # for entry in v_self_unloop_root_lst: 491 | # print entry 492 | # print k 493 | for key in self.sub_table_dict.keys(): 494 | if key == 0: 495 | continue 496 | # print "KEY ",key 497 | 498 | var_K_state_lst,k_state = self.simple_increment_table(sub_lst_si_k= self.sub_table_dict[key],defer_tree=self.defer_tree_lst,\ 499 | nfa=self.nfa,Si=key,K=K) 500 | # print var_K_state_lst,k_state 501 | 502 | self.sub_table_dict[key] = var_K_state_lst 503 | #get non-root 504 | for i in stateLst: 505 | if self.sub_table_dict.has_key(i): 506 | # var_mat_lst_si = sub_table_dict[i] 507 | self.var_stride_mat_lst += self.sub_table_dict[i] 508 | 509 | 510 | def var_striding_main_procedure(self, var_root_stride = 1, N=30,K=3): 511 | 512 | entries_num = len(self.entries_list) 513 | stateLst = [] 514 | postOrderTranverseSort(self.defer_tree_lst, stateLst) 515 | print stateLst 516 | root_lst = self.sub_table_dict[0] 517 | var_root_lst=self.increase_root_stride(root_lst,var_root_stride) 518 | # var_root_lst,no_larging_flag = self.increase_stride(var_root_lst,self.defer_tree_lst,self.nfa,0,2) 519 | # print var_root_lst 520 | # print no_larging_flag 521 | # if no_larging_flag ==1:# no enlarging 522 | # pass 523 | # else:# no enlarging 524 | # root_stride_max +=1 525 | 526 | # var_root_lst,no_larging_flag = self.increase_stride(var_root_lst,self.defer_tree_lst,self.nfa,0,3) 527 | 528 | # print var_root_lst 529 | # print no_larging_flag 530 | # if no_larging_flag ==1:# no enlarging 531 | # pass 532 | # else:# no enlarging 533 | # root_stride_max +=1 534 | 535 | # print "VAR ROOT LST" 536 | # print root_stride_max 537 | # for i in var_root_lst: 538 | # print i 539 | # print "***********" 540 | 541 | k_root_lst = self._selfLoopUnrolling(var_root_lst,K) 542 | entries_num = entries_num - len(root_lst) + len(k_root_lst) 543 | self.sub_table_dict[0] = k_root_lst 544 | 545 | pque = Queue.PriorityQueue() 546 | for state in self.sub_table_dict.keys(): 547 | if state == 0: 548 | continue #live state_0 alone 549 | pque.put(VarStrideState(state = state,\ 550 | stride = 1,depth = self.depth_dict[state],\ 551 | entries_num=len(self.sub_table_dict[state]))) 552 | 553 | # while not pque.empty(): 554 | # print pque.get() 555 | # print entries_num 556 | 557 | 558 | while(entries_num < N and not pque.empty()): 559 | state_tuple = pque.get() # state , stride , depth, entries_num 560 | si = state_tuple.state 561 | ki = state_tuple.stride 562 | 563 | depthi = state_tuple.depth 564 | # si_entries_num = state_tuple.entries_num 565 | # print state_tuple 566 | current_table_size = len(self.sub_table_dict[si]) 567 | sub_lst_si_kp1,no_larging_flag = self.increment_stride(self.sub_table_dict[si], self.defer_tree_lst,self.nfa, si,ki) 568 | ''' 569 | if return_table no enlarging: 570 | cannot enlarge without defering to s0 571 | do nothing 572 | else: 573 | if table num exceeds N: 574 | not apply incrementing this subtable 575 | not put into pque 576 | else 577 | apply incrementing this subtable 578 | if k+1 == K: 579 | put into pque 580 | ''' 581 | if no_larging_flag ==1:# no enlarging 582 | pass 583 | else:# no enlarging 584 | temp_num = entries_num - current_table_size + len(sub_lst_si_kp1) 585 | if temp_num <= N: 586 | self.sub_table_dict[si] = sub_lst_si_kp1 587 | entries_num = temp_num 588 | if ki+1 < K: 589 | pque.put(VarStrideState(state = si,\ 590 | stride = ki+1,depth = depthi,\ 591 | entries_num=len(self.sub_table_dict[si]))) 592 | 593 | # self.var_stride_mat_lst =[] 594 | # self.var_stride_shadow_mat_lst =[] 595 | for i in stateLst: 596 | if self.sub_table_dict.has_key(i): 597 | # var_mat_lst_si = sub_table_dict[i] 598 | self.var_stride_mat_lst += self.sub_table_dict[i] 599 | 600 | 601 | def self_var_root_looping_unrolling(self,var_root_lst, K): 602 | ''' 603 | (('000', 'his'), 'goto', ('110', 2)) 604 | (('000', 'she'), 'goto', ('011', 1)) 605 | (('000', 'hi'), 'goto', ('101', 0)) 606 | (('000', 'sh'), 'goto', ('010', 0)) 607 | (('000', 'h'), 'goto', ('100', 0)) 608 | (('000', 's'), 'goto', ('001', 0)) 609 | ''' 610 | # stateLst = [] 611 | # temp_lst=[] 612 | # postOrderTranverseSort(self.defer_tree_lst, stateLst) 613 | # root_lst = self.sub_table_dict[0] 614 | v_self_unloop_root_lst=[] 615 | for entry in var_root_lst: 616 | input_str = entry[0][1] 617 | self_unloop_input_str = '\xff'*(K-len(input_str))+input_str 618 | tempentry = ((entry[0][0],self_unloop_input_str),entry[1],entry[2]) 619 | v_self_unloop_root_lst.append(tempentry) 620 | return v_self_unloop_root_lst 621 | 622 | def self_looping_unrolling(self,K=3): 623 | stateLst = [] 624 | temp_lst=[] 625 | postOrderTranverseSort(self.defer_tree_lst, stateLst) 626 | root_lst = self.sub_table_dict[0] 627 | k_root_lst = self._selfLoopUnrolling(root_lst,K) 628 | self.sub_table_dict[0] = k_root_lst 629 | for i in stateLst: 630 | if self.sub_table_dict.has_key(i): 631 | # var_mat_lst_si = sub_table_dict[i] 632 | self.var_stride_mat_lst += self.sub_table_dict[i] 633 | pass 634 | # for i in sub_lst_s1_kp1: 635 | # print i 636 | # print "*******" 637 | # for i in sub_lst_s4_kp1: 638 | # print i 639 | 640 | # for key in sub_table_dict.keys(): 641 | # print key 642 | # for i in sub_table_dict[key]: 643 | # print i 644 | # print "*************" 645 | def allocate_shadowcode(self): 646 | # shadow_coded_var_mat_lst = [] 647 | for item in self.var_stride_mat_lst:#: 648 | shadow_state_ID = self.SC_lst[int(item[0][0],2)] 649 | ID_state_ID = self.ID_lst[int(item[2][0],2)] 650 | # print "reorder compress lst item", item, item[0][0], "SC",int(item[0][0],2), shadow_state_ID, "ID", item[2][0], int(item[2][0],2),ID_state_ID 651 | temp_item = ((shadow_state_ID,item[0][1]),item[1],(ID_state_ID,item[2][1])) 652 | self.var_stride_shadow_mat_lst.append(temp_item) 653 | return self.var_stride_shadow_mat_lst 654 | 655 | 656 | def dump_sub_table(self): 657 | stateLst = [] 658 | temp_lst=[] 659 | postOrderTranverseSort(self.defer_tree_lst, stateLst) 660 | print stateLst 661 | for i in stateLst: 662 | if self.sub_table_dict.has_key(i): 663 | # var_mat_lst_si = sub_table_dict[i] 664 | temp_lst += self.sub_table_dict[i] 665 | for i in temp_lst: 666 | print i 667 | 668 | def dump_var_stride_mat(self): 669 | print "******dump_var_stride_mat********" 670 | for i in self.var_stride_mat_lst: 671 | print i 672 | print "**************" 673 | def dump_var_stride_shadow_mat(self): 674 | print "******SC_ID********" 675 | print self.SC_lst 676 | print self.ID_lst 677 | 678 | print "******dump_var_stride_shadow_mat_lst********" 679 | for i in self.var_stride_shadow_mat_lst: 680 | print i 681 | print "**************" 682 | 683 | class VarStrideMatchTable(object): 684 | def __init__(self, entries_list = None,max_stride=1, ID_lst=[] , defer_tree_lst = []): 685 | # ('0000', 'hi'), 'goto', ('0010', 0)) 686 | # (('0000', 'he'), 'goto', ('0100', 2)) 687 | # (('0000', 'sh'), 'goto', ('1000', 0)) 688 | # (('0000', '\xffh'), 'goto', ('0001', 0)) 689 | # (('0000', '\xffs'), 'goto', ('0111', 0) 690 | self._entries_list = [] 691 | self.default_entry = None 692 | self.mat_stride = 0 693 | self.max_stride = max_stride #int 694 | self.state_width = len(entries_list[0][0][0]) 695 | self.defer_tree_lst = defer_tree_lst 696 | # print "stateWidth",self.state_width 697 | if entries_list != None: 698 | for entry in entries_list: 699 | state_value, state_mask = self.generate_state_value_mask(entry[0][0],self.state_width) 700 | match_char = entry[0][1] 701 | match_char_len = len(match_char) 702 | self._entries_list.append( 703 | ( (state_value, state_mask, match_char) ,(entry[2][0],match_char_len, entry[2][1])) ) 704 | # self._entries_list.append((entry[0],(entry[2][0],match_char_len))) 705 | self.root_ID = ID_lst[0] 706 | self.default_entry = ((0, 0, '\xff'*self.max_stride) ,(self.root_ID, match_char_len,0)) 707 | self._entries_list.append( 708 | ((0, 0, '\xff'*self.max_stride) ,(self.root_ID, match_char_len,0)) 709 | ) 710 | 711 | def generate_state_value_mask(self, match_state, state_width): 712 | #'11*' 713 | 714 | max_num_of_bits = (1< ", entry) 784 | def get_len(self): 785 | return len(self._entries_list) 786 | 787 | 788 | 789 | 790 | 791 | class NFAMatchEntriesShadowGenerator(): 792 | def __init__(self, pattern_expression, stride=1, table_id_list=[0]): 793 | # Init and configure the automaton 794 | self.stride = stride 795 | self.table_id_list = table_id_list 796 | self.pattern_list, self.policies = \ 797 | self.parse_pattern_expression(pattern_expression) 798 | self.automaton = self.generate_automaton(self.pattern_list) 799 | # Gegerate dfa descriptor according to the automaton 800 | 801 | self.nfa = self.generate_nfa(self.automaton.dump()) 802 | self.defer_tree = self.generate_defer_tree(self.nfa) 803 | 804 | # self.shadow_code = getShadowCodeWithNFA(self.nfa) 805 | # self.dfa = self.generate_dfa(self.automaton.dump()) 806 | # self.fulldfa = self.generate_fulldfa(self.automaton.dump()) 807 | # self.shadow_code = getShadowCodeWithDFA(self.dfa) 808 | # self.ID_code = 809 | print("PRINT PATTERNLEST") 810 | print(self.pattern_list) 811 | print("PRINT POLICIES") 812 | print( self.policies) 813 | print("END FOR POLICIES") 814 | self.SC_ID_tuple = getSCIDWithNFA(self.nfa, self.defer_tree) 815 | self.cover_code_length = self.get_cover_code_length(self.nfa, self.defer_tree) 816 | self.nfa_mat_entries = self.generate_nfa_mat_entreis(self.nfa, self.defer_tree) 817 | self.nfa_shadow_mat_entries = self.generate_nfa_shadow_mat_entries(self.nfa_mat_entries, self.defer_tree, self.SC_ID_tuple) 818 | self.nfa_shadow_default_entry = self.generate_default_shadow_mat_entry(self.nfa_mat_entries, self.defer_tree, self.SC_ID_tuple) 819 | # # self.dfa = self.recode_dfa(self.dfa, self.shadow_code) 820 | # # stride should always be 1, msdfa no use now 821 | # self.msdfa = self.generate_multi_stride_dfa(self.dfa, self.stride) 822 | # self.dfa_mat_entries = self.generate_dfa_mat_entries(self.msdfa) 823 | 824 | 825 | 826 | 827 | ## TODO & doing now integrate vstride_entreis_generator in this class, implementing k-stride mat entries 828 | self.vstride_table = VarStrideEnlarger(entries_list= self.nfa_mat_entries,nfa=self.nfa,defer_tree_lst=self.defer_tree, SC_ID_tuple=self.SC_ID_tuple) 829 | 830 | # var_stride_mat_lst = vtable.var_strding_enlarge() 831 | # var_stride_shadow_mat_lst = vtable.allocate_shadowcode() 832 | # for i in var_stride_shadow_mat_lst: 833 | # print i 834 | # var_strding_enlarge\ 835 | self.vstride_table.dump_sub_table() 836 | # print "************" 837 | 838 | self.vstride_table.var_striding_simple_main_procedure(K=stride) 839 | 840 | self.vstride_table.allocate_shadowcode() 841 | self.vstride_table.dump_var_stride_mat() 842 | self.vstride_table.dump_var_stride_shadow_mat() 843 | 844 | self.vstride_nfa_mat_entries = self.vstride_table.var_stride_mat_lst 845 | self.vstride_nfa_shadow_mat_entries = self.vstride_table.var_stride_shadow_mat_lst 846 | 847 | self.runtime_nfa_shadow_mat_entries = self.generate_runtime_nfa_shadow_mat_entries( 848 | self.vstride_nfa_shadow_mat_entries, self.table_id_list 849 | ) 850 | self.runtime_policy_mat_entries = \ 851 | self.generate_runtime_policy_mat_entries(self.policies) 852 | self.runtime_mat_default_entries = \ 853 | self.generate_runtime_mat_default_entries(self.nfa_shadow_default_entry,\ 854 | self.table_id_list) 855 | 856 | self.runtime_mat_entries = \ 857 | self.runtime_nfa_shadow_mat_entries + self.runtime_policy_mat_entries + \ 858 | self.runtime_mat_default_entries 859 | 860 | 861 | self.pattern2rule_table = \ 862 | BucketPattern2ruleTable(var_stride_shadow_mat_lst= self.vstride_nfa_shadow_mat_entries,\ 863 | pattern_list= self.pattern_list, SC_ID_tuple= self.SC_ID_tuple,\ 864 | switch_config= SWITCH_BUCKET_CONFIG, cover_code_length = self.cover_code_length) 865 | 866 | self.pattern2rule_table.gen_var_stride_shadow_mat_bucket_lst() 867 | self.pattern2rule_table.gen_bucket_pattern2rule_mat_lst() 868 | 869 | print("bucket_var_stride_shadow_mat_lst") 870 | for i in self.pattern2rule_table.bucket_var_stride_shadow_mat_lst: 871 | print(i) 872 | print( "..............................") 873 | print("bucket_var_stride_shadow_mat_lst") 874 | for i in self.pattern2rule_table.bucket_pattern2rule_mat_lst: 875 | print(i) 876 | print( "..............................") 877 | # self.bucket_runtime_nfa_shadow_mat_entries = \ 878 | 879 | # self.pattern2rule_table.gen_runtime_var_stride_shadow_mat_lst() 880 | # self.bucket_runtime_rule_mat_entries = \ 881 | # self.pattern2rule_table.gen_runtime_bucket_pattern2rule_mat_lst() 882 | 883 | 884 | 885 | 886 | def get_cover_code_length(self, nfa, defer_tree): 887 | #[0, [1], [2, [4]], [3, [5], [6]]] 888 | return dim(defer_tree) 889 | 890 | def parse_pattern_expression(self, patrn_expr_str): 891 | pattern_num = 0 892 | pattern_list = [] 893 | policies = [] 894 | # Remove space in the pattern expression strng 895 | patrn_expr_str = patrn_expr_str.replace(' ', '') 896 | # Replace \| with unseen ASCII char 897 | patrn_expr_str = patrn_expr_str.replace('\|', '\b') 898 | for policy in patrn_expr_str.split('|'): 899 | # Restore \| with | 900 | policy = policy.replace('\b', '|') 901 | # Replace \* with unseen ASCII char 902 | policy = policy.replace('\*', '\b') 903 | policy_mask = list('*' * PATTERN_MAX_NUM) 904 | for pattern in policy.split('*'): 905 | # Restore \* with * 906 | pattern = pattern.replace('\b', '*') 907 | reverse_flag = False 908 | if pattern[0] == '~': 909 | pattern = pattern[1:] 910 | reverse_flag = True 911 | if pattern in pattern_list: 912 | pattern_idx = pattern_list.index(pattern) 913 | else: 914 | pattern_idx = pattern_num 915 | pattern_list.append(pattern) 916 | pattern_num += 1 917 | if reverse_flag: 918 | policy_mask[pattern_idx] = '0' 919 | else: 920 | policy_mask[pattern_idx] = '1' 921 | policy_mask = ''.join(policy_mask) 922 | policies.append(policy_mask) 923 | 924 | # pattern has been preprocess, the ' ' is set as '\-' 925 | return_pattern_lst = [] 926 | for pattern in pattern_list: 927 | temp_pattern = pattern.replace('\-', ' ') 928 | return_pattern_lst.append(temp_pattern) 929 | return pattern_list, policies 930 | 931 | def generate_automaton(self, pattern_list): # 932 | automaton = ahocorasick.Automaton(ahocorasick.STORE_LENGTH) 933 | for pattern in pattern_list: 934 | automaton.add_word(pattern) 935 | automaton.make_automaton() 936 | return automaton 937 | 938 | def generate_nfa(self, automaton_graph_descriptor): 939 | ''' 940 | 941 | return ( 942 | nfa_nodes, nfa_goto_function, nfa_failure_function, \ 943 | nfa_next_nodes, default_code 944 | ) 945 | ''' 946 | nodes = automaton_graph_descriptor[0] 947 | goto_transitions = automaton_graph_descriptor[1] 948 | failure_transitions = automaton_graph_descriptor[2] 949 | converse_dict = {} 950 | 951 | nfa_nodes = {} 952 | nfa_goto_function = {} 953 | nfa_failure_function = {} 954 | default_code = [] 955 | pattern_idx = 0 956 | for node_id in range(len(nodes)): 957 | origin_node_id = nodes[node_id][0] 958 | converse_dict[origin_node_id] = node_id 959 | accept_flag = nodes[node_id][1] 960 | if accept_flag == 1: 961 | pattern_idx += 1 962 | accept_flag = pattern_idx 963 | nfa_nodes[node_id] = accept_flag 964 | # nfa_next_nodes[node_id] = [] 965 | nfa_goto_function[node_id] = {} 966 | nfa_failure_function[node_id] = {} 967 | 968 | for edge in goto_transitions: 969 | start_node_id = converse_dict[edge[0]] 970 | transfer_char = edge[1] 971 | end_node_id = converse_dict[edge[2]] 972 | nfa_goto_function[start_node_id][transfer_char] = end_node_id 973 | 974 | for failure_link in failure_transitions: 975 | start_node_id = converse_dict[failure_link[0]] 976 | intermediate_node_id = converse_dict[failure_link[1]] 977 | # dfa_failure_links.append((start_node_id, intermediate_node_id)) 978 | nfa_failure_function[start_node_id] = intermediate_node_id 979 | 980 | 981 | 982 | 983 | bit_width = math.ceil(math.log(len(nfa_nodes), 2)) 984 | for nfa_node_id in nfa_nodes: 985 | str1 = bin(nfa_node_id).replace('0b','') 986 | str1 = str1.zfill(int(bit_width)) 987 | # print str1 988 | default_code.append(str1) 989 | 990 | 991 | nfa_next_nodes = [] 992 | return ( 993 | nfa_nodes, nfa_goto_function, nfa_failure_function, \ 994 | nfa_next_nodes, default_code 995 | ) 996 | 997 | def generate_dfa(self, automaton_graph_descriptor): 998 | nodes = automaton_graph_descriptor[0] 999 | edges = automaton_graph_descriptor[1] 1000 | failure_links = automaton_graph_descriptor[2] 1001 | converse_dict = {} 1002 | dfa_nodes = {} 1003 | dfa_edges = [] 1004 | dfa_failure_links = [] 1005 | dfa_next_nodes = {} 1006 | default_code = [] 1007 | pattern_idx = 0 1008 | dfa_goto_function = {} 1009 | dfa_failure_function = {} 1010 | for node_id in range(len(nodes)): 1011 | origin_node_id = nodes[node_id][0] 1012 | converse_dict[origin_node_id] = node_id 1013 | accept_flag = nodes[node_id][1] 1014 | if accept_flag == 1: 1015 | pattern_idx += 1 1016 | accept_flag = pattern_idx 1017 | dfa_nodes[node_id] = accept_flag 1018 | dfa_next_nodes[node_id] = [] 1019 | dfa_goto_function[node_id] = {} 1020 | for edge in edges: 1021 | start_node_id = converse_dict[edge[0]] 1022 | transfer_char = edge[1] 1023 | end_node_id = converse_dict[edge[2]] 1024 | # dfa_edges.append( 1025 | # (start_node_id, transfer_char, end_node_id, 1) 1026 | # ) 1027 | dfa_next_nodes[start_node_id].append( 1028 | (transfer_char, end_node_id) 1029 | ) 1030 | dfa_goto_function[start_node_id][transfer_char] = end_node_id 1031 | for failure_link in failure_links: 1032 | start_node_id = converse_dict[failure_link[0]] 1033 | intermediate_node_id = converse_dict[failure_link[1]] 1034 | dfa_failure_links.append((start_node_id, intermediate_node_id)) 1035 | dfa_failure_function[start_node_id] = intermediate_node_id 1036 | for node in dfa_nodes: 1037 | for i in range(256): 1038 | c = chr(i) 1039 | if c in dfa_goto_function[node].keys(): 1040 | dfa_edges.append( 1041 | (node, c, dfa_goto_function[node][c], 1) 1042 | ) 1043 | else: 1044 | if node == 0: 1045 | continue 1046 | else: 1047 | current_node = node 1048 | while True: 1049 | next_node = dfa_failure_function[current_node] 1050 | if c in dfa_goto_function[next_node].keys(): 1051 | dfa_edges.append( 1052 | (node, c, dfa_goto_function[next_node][c], 0) 1053 | ) 1054 | break 1055 | elif next_node == 0: 1056 | break 1057 | current_node = next_node 1058 | bit_width = math.ceil(math.log(len(dfa_nodes), 2)) 1059 | for dfa_node_id in dfa_nodes: 1060 | str1 = bin(dfa_node_id).replace('0b','') 1061 | str1 = str1.zfill(int(bit_width)) 1062 | # print str1 1063 | default_code.append(str1) 1064 | return ( 1065 | dfa_nodes, dfa_edges, dfa_failure_links, \ 1066 | dfa_next_nodes, default_code 1067 | ) 1068 | ''' 1069 | dfa_nodes {0: 0, 1: 0, 2: 1, 3: 0, 4: 2} key: nodeID, value: accepting_flag 1070 | ''' 1071 | 1072 | def generate_defer_tree(self, nfa): 1073 | ''' 1074 | {0: 0, 1: 0, 2: 0, 3: 1, 4: 0, 5: 0, 6: 2} 1075 | {0: {'h': 4, 's': 1}, 1: {'h': 2}, 2: {'e': 3}, 3: {}, 4: {'i': 5}, 5: {'s': 6}, 6: {}} 1076 | {0: {}, 1: 0, 2: 4, 3: 0, 4: 0, 5: 0, 6: 1} 1077 | ''' 1078 | failure_transitions = nfa[2] 1079 | 1080 | tree_son = {} 1081 | for i in failure_transitions.keys(): 1082 | tree_son[i] = [] 1083 | for j in failure_transitions.keys(): 1084 | if j == 0: 1085 | continue 1086 | tree_son[failure_transitions[j]].append(j) 1087 | 1088 | root = 0 1089 | # print failure_transitions 1090 | # print "Tree son" 1091 | # print tree_son 1092 | tree = [] 1093 | tree = get_tree(root,tree_son) 1094 | return tree 1095 | 1096 | # tree = [0] 1097 | # q = Queue.Queue() 1098 | # q.put(0) 1099 | 1100 | # current_tree = tree 1101 | # while not q.empty(): 1102 | # sub_root = q.get() 1103 | # lst = tree_son[sub_root] 1104 | # for i in lst: 1105 | # tree.append([i]) 1106 | 1107 | 1108 | 1109 | def generate_fulldfa(self, automaton_graph_descriptor): 1110 | nodes = automaton_graph_descriptor[0] 1111 | edges = automaton_graph_descriptor[1] 1112 | failure_links = automaton_graph_descriptor[2] 1113 | converse_dict = {} 1114 | dfa_nodes = {} 1115 | dfa_edges = [] 1116 | dfa_failure_links = [] 1117 | dfa_next_nodes = {} 1118 | default_code = [] 1119 | pattern_idx = 0 1120 | dfa_goto_function = {} 1121 | dfa_failure_function = {} 1122 | for node_id in range(len(nodes)): 1123 | origin_node_id = nodes[node_id][0] 1124 | converse_dict[origin_node_id] = node_id 1125 | accept_flag = nodes[node_id][1] 1126 | if accept_flag == 1: 1127 | pattern_idx += 1 1128 | accept_flag = pattern_idx 1129 | dfa_nodes[node_id] = accept_flag 1130 | dfa_next_nodes[node_id] = [] 1131 | dfa_goto_function[node_id] = {} 1132 | for edge in edges: 1133 | start_node_id = converse_dict[edge[0]] 1134 | transfer_char = edge[1] 1135 | end_node_id = converse_dict[edge[2]] 1136 | # dfa_edges.append( 1137 | # (start_node_id, transfer_char, end_node_id, 1) 1138 | # ) 1139 | dfa_next_nodes[start_node_id].append( 1140 | (transfer_char, end_node_id) 1141 | ) 1142 | dfa_goto_function[start_node_id][transfer_char] = end_node_id 1143 | for failure_link in failure_links: 1144 | start_node_id = converse_dict[failure_link[0]] 1145 | intermediate_node_id = converse_dict[failure_link[1]] 1146 | dfa_failure_links.append((start_node_id, intermediate_node_id)) 1147 | dfa_failure_function[start_node_id] = intermediate_node_id 1148 | for node in dfa_nodes: 1149 | for i in range(256): 1150 | c = chr(i) 1151 | if c in dfa_goto_function[node].keys(): 1152 | dfa_edges.append( 1153 | (node, c, dfa_goto_function[node][c], 1) 1154 | ) 1155 | else: 1156 | if node == 0: 1157 | dfa_edges.append( 1158 | (node, c, node, 0) 1159 | ) 1160 | dfa_goto_function[node][c] = node 1161 | else: 1162 | current_node = node 1163 | while True: 1164 | next_node = dfa_failure_function[current_node] 1165 | if c in dfa_goto_function[next_node].keys(): 1166 | dfa_edges.append( 1167 | (node, c, dfa_goto_function[next_node][c], 0) 1168 | ) 1169 | break 1170 | elif next_node == 0: 1171 | break 1172 | current_node = next_node 1173 | bit_width = math.ceil(math.log(len(dfa_nodes), 2)) 1174 | for dfa_node_id in dfa_nodes: 1175 | str1 = bin(dfa_node_id).replace('0b','') 1176 | str1 = str1.zfill(int(bit_width)) 1177 | # print str1 1178 | default_code.append(str1) 1179 | return ( 1180 | dfa_nodes, dfa_edges, dfa_failure_links, \ 1181 | dfa_next_nodes, default_code 1182 | ) 1183 | ''' 1184 | dfa_nodes {0: 0, 1: 0, 2: 1, 3: 0, 4: 2} key: nodeID, value: accepting_flag 1185 | ''' 1186 | def recode_dfa(self, dfa_descriptor, shadow_code): 1187 | return ( 1188 | dfa_descriptor[0], dfa_descriptor[1], \ 1189 | dfa_descriptor[2], dfa_descriptor[3], shadow_code 1190 | ) 1191 | 1192 | def generate_multi_stride_dfa(self, dfa_descriptor, stride): 1193 | dfa_nodes = dfa_descriptor[0] 1194 | dfa_edges = dfa_descriptor[1] 1195 | dfa_failure_links = dfa_descriptor[2] 1196 | dfa_next_nodes = dfa_descriptor[3] 1197 | shadow_code = dfa_descriptor[4] 1198 | dfa_next_nodes_extend = {} 1199 | msdfa_nodes = dfa_nodes 1200 | msdfa_edges = [] 1201 | msdfa_next_nodes = {} 1202 | for dfa_node_id in dfa_nodes: 1203 | dfa_next_nodes_extend[dfa_node_id] = dfa_next_nodes[dfa_node_id][:] 1204 | msdfa_next_nodes[dfa_node_id] = [] 1205 | # Extend single stride DFA first 1206 | for (start_node_id, transfer_char, end_node_id, type) in dfa_edges: 1207 | if start_node_id == 0 and type == 1: 1208 | for star_num in range(1, stride): 1209 | transfer_chars = b'\xff' * star_num + transfer_char 1210 | dfa_next_nodes_extend[start_node_id].append( 1211 | (transfer_chars, end_node_id) 1212 | ) 1213 | if dfa_nodes[end_node_id] != 0 and type == 1: 1214 | for star_num in range(1, stride): 1215 | transfer_chars = transfer_char + b'\xff' * star_num 1216 | dfa_next_nodes_extend[start_node_id].append( 1217 | (transfer_chars, end_node_id) 1218 | ) 1219 | # Get all transistion edges of multi-stride DFA 1220 | for dfa_node in dfa_nodes: 1221 | start_node_id = dfa_node 1222 | self.find_multi_stride_edges( 1223 | msdfa_edges, msdfa_next_nodes, dfa_next_nodes_extend, \ 1224 | start_node_id, b'', start_node_id, stride 1225 | ) 1226 | # Process failure links finally 1227 | for failure_link in dfa_failure_links: 1228 | start_node_id = failure_link[0] 1229 | # # Below condition statements indicate what we care about is 1230 | # # the input whether hit one of the patterns, not all patterns 1231 | # if msdfa_next_nodes[start_node_id] != 0: 1232 | # continue 1233 | intermediate_node_id = failure_link[1] 1234 | for next_node in msdfa_next_nodes[intermediate_node_id]: 1235 | transfer_chars = next_node[0] 1236 | end_node_id = next_node[1] 1237 | cover_flag = False 1238 | # Check whether this failure link endge is valid 1239 | for origin_next_node in msdfa_next_nodes[start_node_id]: 1240 | existing_path = origin_next_node[0] 1241 | cover_flag = True 1242 | for idx in range(stride): 1243 | if transfer_chars[idx] != existing_path[idx] \ 1244 | and ord(b'\xff') != existing_path[idx]: 1245 | cover_flag = False 1246 | break 1247 | if not cover_flag: 1248 | msdfa_edges.append( 1249 | (start_node_id, transfer_chars, end_node_id, 0) 1250 | ) 1251 | return (msdfa_nodes, msdfa_edges, shadow_code) 1252 | 1253 | 1254 | def find_multi_stride_edges(self, msdfa_edges, msdfa_next_nodes, \ 1255 | dfa_next_nodes, start_node_id, \ 1256 | current_path, current_node_id, stride): 1257 | for next_node in dfa_next_nodes[current_node_id]: 1258 | next_path = current_path + next_node[0] 1259 | next_node_id = next_node[1] 1260 | if len(next_path) < stride: 1261 | self.find_multi_stride_edges( 1262 | msdfa_edges, msdfa_next_nodes, dfa_next_nodes, \ 1263 | start_node_id, next_path, next_node_id, stride 1264 | ) 1265 | elif len(next_path) == stride: 1266 | transfer_chars = next_path 1267 | end_node_id = next_node_id 1268 | msdfa_edges.append( 1269 | (start_node_id, transfer_chars, end_node_id, 1) 1270 | ) 1271 | msdfa_next_nodes[start_node_id].append( 1272 | (transfer_chars, end_node_id) 1273 | ) 1274 | else: 1275 | continue 1276 | 1277 | def generate_nfa_mat_entreis(self, nfa, defer_tree): 1278 | ''' 1279 | nfa: 1280 | {0: 0, 1: 0, 2: 0, 3: 1, 4: 0, 5: 0, 6: 2} 1281 | {0: {'h': 4, 's': 1}, 1: {'h': 2}, 2: {'e': 3}, 3: {}, 4: {'i': 5}, 5: {'s': 6}, 6: {}} 1282 | {0: {}, 1: 0, 2: 4, 3: 0, 4: 0, 5: 0, 6: 1} 1283 | ''' 1284 | #reorder entries 1285 | nfa_nodes = nfa[0] 1286 | goto_transitions= nfa[1] 1287 | failure_transitions = nfa[2] 1288 | default_code = nfa[4] 1289 | mat_entries = [] 1290 | stateLst = [] 1291 | postOrderTranverseSort(defer_tree, stateLst) 1292 | # print "default_code" 1293 | # print default_code 1294 | # print 'stateLst' 1295 | # print stateLst 1296 | for current_state in stateLst: 1297 | for received_chars in goto_transitions[current_state].keys(): 1298 | match = (default_code[current_state], received_chars) 1299 | next_state = goto_transitions[current_state][received_chars] 1300 | action = 'goto' 1301 | modifier = 0 1302 | if nfa_nodes[next_state] != 0: 1303 | modifier = 1 << (nfa_nodes[next_state] - 1) 1304 | action_params = (default_code[next_state], modifier) 1305 | mat_entries.append((match, action, action_params)) 1306 | 1307 | return mat_entries 1308 | def generate_nfa_shadow_mat_entries(self, mat_entries_lst, defer_tree,SC_ID_tuple): 1309 | ''' 1310 | (('001', 'h'), 'goto', ('010', 0)) 1311 | 1312 | (('010', 'e'), 'goto', ('011', 1)) 1313 | (('100', 'i'), 'goto', ('101', 0)) 1314 | (('101', 's'), 'goto', ('110', 2)) 1315 | (('000', 'h'), 'goto', ('100', 0)) 1316 | (('000', 's'), 'goto', ('001', 0)) 1317 | ''' 1318 | shadow_code = SC_ID_tuple[0] 1319 | ID_code = SC_ID_tuple[1] 1320 | shadow_coded_lst = [] 1321 | for item in mat_entries_lst:#: 1322 | shadow_state_ID = shadow_code[int(item[0][0],2)] 1323 | ID_state_ID = ID_code[int(item[2][0],2)] 1324 | # print "reorder compress lst item", item, item[0][0], "SC",int(item[0][0],2), shadow_state_ID, "ID", item[2][0], int(item[2][0],2),ID_state_ID 1325 | temp_item = ((shadow_state_ID,item[0][1]),item[1],(ID_state_ID,item[2][1])) 1326 | shadow_coded_lst.append(temp_item) 1327 | return shadow_coded_lst 1328 | def generate_default_shadow_mat_entry(self, mat_entries_lst, defer_tree,SC_ID_tuple): 1329 | """ 1330 | return a default shadow mat entry 1331 | (('***', '*'), 'goto', ('ROOT_EXACT_CODE', 0)) 1332 | """ 1333 | shadow_code = SC_ID_tuple[0] 1334 | ID_code = SC_ID_tuple[1] 1335 | temp_item = mat_entries_lst[-1] 1336 | default_entry = ((shadow_code[0],'*'),temp_item[1],(ID_code[0],0)) 1337 | return default_entry 1338 | def generate_dfa_mat_entries(self, msdfa_descriptor): 1339 | msdfa_nodes = msdfa_descriptor[0] 1340 | msdfa_edges = msdfa_descriptor[1] 1341 | shadow_code = msdfa_descriptor[2] 1342 | mat_entries = [] 1343 | for (current_state, received_chars, next_state, type) in msdfa_edges: 1344 | match = (shadow_code[current_state], received_chars) 1345 | # if msdfa_nodes[next_state] != 0: 1346 | # action = 'accept' 1347 | # else: 1348 | # action = 'goto' 1349 | action = 'goto' 1350 | modifier = 0 1351 | if msdfa_nodes[next_state] != 0: 1352 | modifier = 1 << (msdfa_nodes[next_state] - 1) 1353 | action_params = (shadow_code[next_state], modifier) 1354 | mat_entries.append((match, action, action_params)) 1355 | return mat_entries 1356 | 1357 | def generate_runtime_mat_default_entries(self, default_entry, table_id_list): 1358 | mat_default_entries = [] 1359 | # stride_mat_default_entry = {} 1360 | # stride_mat_default_entry["table_name"] = \ 1361 | # SWITCH_CONFIG["stride_mat_name"] 1362 | # stride_mat_default_entry["default_action"]= True 1363 | # stride_mat_default_entry["action_name"]= \ 1364 | # SWITCH_CONFIG["stride_action_name"] 1365 | # stride_mat_default_entry["action_params"]= { 1366 | # SWITCH_CONFIG["stride_param"]: 1, 1367 | # } 1368 | # mat_default_entries.append(stride_mat_default_entry) 1369 | for table_id in table_id_list: 1370 | dfa_mat_default_entry = {} 1371 | dfa_mat_default_entry["table_name"] = \ 1372 | SWITCH_CONFIG["dfa_mat_name"] % table_id 1373 | dfa_mat_default_entry["default_action"]= True 1374 | dfa_mat_default_entry["action_name"]= \ 1375 | SWITCH_CONFIG["goto_action_name"] % self.stride 1376 | dfa_mat_default_entry["action_params"]= { 1377 | SWITCH_CONFIG["next_state"]: \ 1378 | generate_state_value_mask(default_entry[2][0], int(self.cover_code_length))[0], 1379 | SWITCH_CONFIG["modifier"]: 0, 1380 | } 1381 | mat_default_entries.append(dfa_mat_default_entry) 1382 | policy_mat_default_entry = {} 1383 | policy_mat_default_entry["table_name"] = \ 1384 | SWITCH_CONFIG["policy_mat_name"] 1385 | policy_mat_default_entry["default_action"]= True 1386 | policy_mat_default_entry["action_name"]= \ 1387 | SWITCH_CONFIG["drop_action_name"] 1388 | policy_mat_default_entry["action_params"]= { 1389 | } 1390 | mat_default_entries.append(policy_mat_default_entry) 1391 | return mat_default_entries 1392 | 1393 | def generate_runtime_dfa_mat_entries(self, mat_entries, table_id_list): 1394 | runtime_mat_entries = [] 1395 | for table_id in table_id_list: 1396 | for (match, action, action_params) in mat_entries: 1397 | runtime_mat_entry = {} 1398 | runtime_mat_entry["table_name"] = \ 1399 | SWITCH_CONFIG["dfa_mat_name"] % table_id 1400 | runtime_mat_entry["match"] = { 1401 | SWITCH_CONFIG["current_state"]: match[0] 1402 | } 1403 | for idx in range(len(match[1])): 1404 | received_char = match[1][idx] 1405 | # Follow systement is for difference 1406 | # between python2 and python3 1407 | if type(received_char) != int: 1408 | received_char = ord(match[1][idx]) 1409 | if ord(b'\xff') != received_char: 1410 | field_name = SWITCH_CONFIG["received_char"] % idx 1411 | runtime_mat_entry["match"][field_name] \ 1412 | = [received_char, 255] 1413 | if action == "goto": 1414 | runtime_mat_entry["action_name"] = \ 1415 | SWITCH_CONFIG["goto_action_name"] % self.stride 1416 | runtime_mat_entry["action_params"] = { 1417 | SWITCH_CONFIG["next_state"]: action_params[0], 1418 | SWITCH_CONFIG["modifier"]: action_params[1], 1419 | } 1420 | # elif action == "accept": 1421 | # runtime_mat_entry["action_name"] = \ 1422 | # SWITCH_CONFIG["accept_action_name"] 1423 | # runtime_mat_entry["action_params"] = { 1424 | # SWITCH_CONFIG["next_state"]: action_params[0], 1425 | # } 1426 | runtime_mat_entries.append(runtime_mat_entry) 1427 | return runtime_mat_entries 1428 | 1429 | def generate_runtime_nfa_shadow_mat_entries(self, mat_entries, table_id_list): 1430 | ''' 1431 | Generate runtime nfa mat entries (default entry included) 1432 | 1433 | (('011', 'h'), 'goto', ('110', 0)) 1434 | (('110', 'e'), 'goto', ('000', 1)) 1435 | (('11*', 'e'), 'goto', ('001', 2)) 1436 | (('00*', 'r'), 'goto', ('100', 4)) 1437 | (('***', 'h'), 'goto', ('111', 0)) 1438 | (('***', 's'), 'goto', ('011', 0)) 1439 | (('***', '*'), 'goto', ('010', 0)) 1440 | ''' 1441 | MAX_STRIDE = SWITCH_CONFIG['max_stride'] 1442 | runtime_nfa_shadow_mat_entries = [] 1443 | for table_id in table_id_list: 1444 | for (match, action, action_params) in mat_entries: 1445 | runtime_mat_entry = {} 1446 | runtime_mat_entry["table_name"] = \ 1447 | SWITCH_CONFIG["dfa_mat_name"] % table_id 1448 | 1449 | state, mask = generate_state_value_mask(match[0], int(self.cover_code_length)) 1450 | 1451 | runtime_mat_entry["match"] = { 1452 | SWITCH_CONFIG["current_state"]: [state, mask] 1453 | 1454 | } 1455 | 1456 | if len(match[1]) > MAX_STRIDE: 1457 | print "MAX_STRIDE EXCEED ERROR" 1458 | exit(1) 1459 | for idx in range(len(match[1])): 1460 | received_char = match[1][idx] 1461 | # Follow systement is for difference 1462 | # between python2 and python3 1463 | if type(received_char) != int: 1464 | received_char = ord(match[1][idx]) 1465 | if ord(b'\xff') != received_char: 1466 | field_name = SWITCH_CONFIG["received_char"] % idx 1467 | runtime_mat_entry["match"][field_name] \ 1468 | = [received_char, 255] 1469 | else: 1470 | field_name = SWITCH_CONFIG["received_char"] % idx 1471 | runtime_mat_entry["match"][field_name] \ 1472 | = [0, 0] 1473 | if len(match[1]) < MAX_STRIDE:# padding the match str field 1474 | for idx in range(len(match[1]),MAX_STRIDE): 1475 | field_name = SWITCH_CONFIG["received_char"] % idx 1476 | runtime_mat_entry["match"][field_name] \ 1477 | = [0, 0] 1478 | if action == "goto": 1479 | runtime_mat_entry["action_name"] = \ 1480 | SWITCH_CONFIG["goto_action_name"] % self.stride 1481 | runtime_mat_entry["action_params"] = { 1482 | SWITCH_CONFIG["next_state"]: generate_state_value_mask(action_params[0], int(self.cover_code_length))[0], 1483 | SWITCH_CONFIG["modifier"]: action_params[1], 1484 | } 1485 | # elif action == "accept": 1486 | # runtime_mat_entry["action_name"] = \ 1487 | # SWITCH_CONFIG["accept_action_name"] 1488 | # runtime_mat_entry["action_params"] = { 1489 | # SWITCH_CONFIG["next_state"]: action_params[0], 1490 | # } 1491 | runtime_nfa_shadow_mat_entries.append(runtime_mat_entry) 1492 | 1493 | 1494 | 1495 | return runtime_nfa_shadow_mat_entries 1496 | 1497 | def generate_runtime_policy_mat_entries(self, policies): 1498 | runtime_mat_entries = [] 1499 | for policy_mask in policies: 1500 | match_field = 0 1501 | match_mask = 0 1502 | for pattern_bit in reversed(policy_mask): 1503 | if pattern_bit == '0': 1504 | match_field = (match_field << 1) + 0 1505 | match_mask = (match_mask << 1) + 1 1506 | elif pattern_bit == '1': 1507 | match_field = (match_field << 1) + 1 1508 | match_mask = (match_mask << 1) + 1 1509 | else: 1510 | match_field = (match_field << 1) + 0 1511 | match_mask = (match_mask << 1) + 0 1512 | runtime_mat_entry = {} 1513 | runtime_mat_entry["table_name"] = SWITCH_CONFIG["policy_mat_name"] 1514 | runtime_mat_entry["match"] = { 1515 | SWITCH_CONFIG["pattern_state"]: [match_field, match_mask] 1516 | } 1517 | runtime_mat_entry["action_name"] = SWITCH_CONFIG["policy_action_name"] 1518 | runtime_mat_entry["action_params"] = {} 1519 | runtime_mat_entries.append(runtime_mat_entry) 1520 | return runtime_mat_entries 1521 | 1522 | def get_pattern_list(self): 1523 | return self.pattern_list 1524 | 1525 | def get_policies(self): 1526 | return self.policies 1527 | 1528 | def get_automaton(self): 1529 | return self.automaton 1530 | 1531 | def get_dfa(self): 1532 | return self.dfa 1533 | 1534 | def get_multi_stride_dfa(self): 1535 | return self.msdfa 1536 | 1537 | def get_dfa_mat_entries(self): 1538 | return self.dfa_mat_entries 1539 | 1540 | def get_runtime_dfa_mat_entries(self): 1541 | return self.runtime_dfa_mat_entries 1542 | 1543 | def get_runtime_nfa_shadow_mat_entries(self): 1544 | return self.runtime_nfa_shadow_mat_entries 1545 | def get_runtime_policy_mat_entries(self): 1546 | return self.runtime_policy_mat_entries 1547 | 1548 | def get_runtime_mat_default_entries(self): 1549 | return self.runtime_mat_default_entries 1550 | 1551 | def get_runtime_mat_entries(self): 1552 | return self.runtime_mat_entries 1553 | 1554 | 1555 | 1556 | def get_runtime_mat_entries_json(self): 1557 | return json.dumps( 1558 | self.runtime_mat_entries, indent=4, separators=(',', ': ') 1559 | ) 1560 | 1561 | def get_runtime_mat_default_entries_json(self): 1562 | return json.dumps( 1563 | self.runtime_mat_default_entries, indent=4, separators=(',', ': ') 1564 | ) 1565 | 1566 | 1567 | if __name__ == '__main__': 1568 | 1569 | 1570 | # get_tree 1571 | x = NFAMatchEntriesShadowGenerator("she | his", stride=2,table_id_list=[0,1,2]) 1572 | 1573 | print("XXXXXXXXXXXXXXXXXXXXXXXXXXXx") 1574 | print x.nfa[0] 1575 | print x.nfa[1] 1576 | print x.nfa[2] 1577 | print x.defer_tree 1578 | #[0, [1, [6]], [3], [4, [2]], [5]] 1579 | print x.SC_ID_tuple[0] 1580 | print x.SC_ID_tuple[1] 1581 | print x.cover_code_length 1582 | print len(x.nfa_mat_entries) 1583 | for i in x.nfa_mat_entries: 1584 | print i 1585 | print("***************************************") 1586 | for i in x.nfa_shadow_mat_entries: 1587 | print i 1588 | print x.nfa_shadow_default_entry 1589 | 1590 | print("***************************************") 1591 | mat_lst = x.nfa_shadow_mat_entries 1592 | 1593 | vtable0 = VarStrideEnlarger(entries_list= x.nfa_mat_entries,nfa=x.nfa,defer_tree_lst=x.defer_tree, SC_ID_tuple=x.SC_ID_tuple) 1594 | 1595 | 1596 | vtable0.dump_sub_table() 1597 | print "************" 1598 | 1599 | vtable0.var_striding_simple_main_procedure(K=4) 1600 | 1601 | vtable0.allocate_shadowcode() 1602 | vtable0.dump_var_stride_mat() 1603 | vtable0.dump_var_stride_shadow_mat() 1604 | 1605 | print "************" 1606 | 1607 | -------------------------------------------------------------------------------- /entrygen/entries_generator_simple.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Generating a DFA, without Shadowencoding. 3 | import ahocorasick 4 | import json 5 | import math 6 | from config import * 7 | # from shadow_code_gen import getShadowCodeWithDFA 8 | # from shadow_code_gen import getSCIDWithDFA 9 | 10 | class DFAMatchEntriesSimpleGenerator(): 11 | def __init__(self, pattern_expression, stride=1, table_id_list=[0]): 12 | # Init and configure the automaton 13 | self.stride = stride 14 | self.table_id_list = table_id_list 15 | self.pattern_list, self.policies = \ 16 | self.parse_pattern_expression(pattern_expression) 17 | self.automaton = self.generate_automaton(self.pattern_list) 18 | # Gegerate dfa descriptor according to the automaton 19 | self.dfa = self.generate_dfa(self.automaton.dump()) 20 | self.fulldfa = self.generate_fulldfa(self.automaton.dump()) 21 | # self.shadow_code = getShadowCodeWithDFA(self.dfa) 22 | # self.ID_code = 23 | # self.SC_ID_tuple = getSCIDWithDFA(self.dfa) 24 | self.default_code = self.dfa[4] 25 | # self.dfa = self.recode_dfa(self.dfa, self.shadow_code) 26 | # stride should always be 1, msdfa no use now 27 | self.msdfa = self.generate_multi_stride_dfa(self.dfa, self.stride) 28 | self.dfa_mat_entries = self.generate_dfa_mat_entries(self.msdfa) 29 | self.runtime_dfa_mat_entries = self.generate_runtime_dfa_mat_entries( 30 | self.dfa_mat_entries, self.table_id_list 31 | ) 32 | self.runtime_policy_mat_entries = \ 33 | self.generate_runtime_policy_mat_entries(self.policies) 34 | self.runtime_mat_entries = \ 35 | self.runtime_dfa_mat_entries + self.runtime_policy_mat_entries 36 | self.runtime_mat_default_entries = \ 37 | self.generate_runtime_mat_default_entries(self.table_id_list) 38 | 39 | def parse_pattern_expression(self, patrn_expr_str): 40 | pattern_num = 0 41 | pattern_list = [] 42 | policies = [] 43 | # Remove space in the pattern expression strng 44 | patrn_expr_str = patrn_expr_str.replace(' ', '') 45 | # Replace \| with unseen ASCII char 46 | patrn_expr_str = patrn_expr_str.replace('\|', '\b') 47 | for policy in patrn_expr_str.split('|'): 48 | # Restore \| with | 49 | policy = policy.replace('\b', '|') 50 | # Replace \* with unseen ASCII char 51 | policy = policy.replace('\*', '\b') 52 | policy_mask = list('*' * PATTERN_MAX_NUM) 53 | for pattern in policy.split('*'): 54 | # Restore \* with * 55 | pattern = pattern.replace('\b', '*') 56 | reverse_flag = False 57 | if pattern[0] == '~': 58 | pattern = pattern[1:] 59 | reverse_flag = True 60 | if pattern in pattern_list: 61 | pattern_idx = pattern_list.index(pattern) 62 | else: 63 | pattern_idx = pattern_num 64 | pattern_list.append(pattern) 65 | pattern_num += 1 66 | if reverse_flag: 67 | policy_mask[pattern_idx] = '0' 68 | else: 69 | policy_mask[pattern_idx] = '1' 70 | policy_mask = ''.join(policy_mask) 71 | policies.append(policy_mask) 72 | 73 | # pattern has been preprocess, the ' ' is set as '\-' 74 | return_pattern_lst = [] 75 | for pattern in pattern_list: 76 | temp_pattern = pattern.replace('\-', ' ') 77 | return_pattern_lst.append(temp_pattern) 78 | 79 | return return_pattern_lst, policies 80 | 81 | def generate_automaton(self, pattern_list): 82 | automaton = ahocorasick.Automaton(ahocorasick.STORE_LENGTH) 83 | for pattern in pattern_list: 84 | automaton.add_word(pattern) 85 | automaton.make_automaton() 86 | return automaton 87 | 88 | def generate_dfa(self, automaton_graph_descriptor): 89 | nodes = automaton_graph_descriptor[0] 90 | edges = automaton_graph_descriptor[1] 91 | failure_links = automaton_graph_descriptor[2] 92 | converse_dict = {} 93 | dfa_nodes = {} 94 | dfa_edges = [] 95 | dfa_failure_links = [] 96 | dfa_next_nodes = {} 97 | default_code = [] 98 | pattern_idx = 0 99 | dfa_goto_function = {} 100 | dfa_failure_function = {} 101 | for node_id in range(len(nodes)): 102 | origin_node_id = nodes[node_id][0] 103 | converse_dict[origin_node_id] = node_id 104 | accept_flag = nodes[node_id][1] 105 | if accept_flag == 1: 106 | pattern_idx += 1 107 | accept_flag = pattern_idx 108 | dfa_nodes[node_id] = accept_flag 109 | dfa_next_nodes[node_id] = [] 110 | dfa_goto_function[node_id] = {} 111 | for edge in edges: 112 | start_node_id = converse_dict[edge[0]] 113 | transfer_char = edge[1] 114 | end_node_id = converse_dict[edge[2]] 115 | # dfa_edges.append( 116 | # (start_node_id, transfer_char, end_node_id, 1) 117 | # ) 118 | dfa_next_nodes[start_node_id].append( 119 | (transfer_char, end_node_id) 120 | ) 121 | dfa_goto_function[start_node_id][transfer_char] = end_node_id 122 | for failure_link in failure_links: 123 | start_node_id = converse_dict[failure_link[0]] 124 | intermediate_node_id = converse_dict[failure_link[1]] 125 | dfa_failure_links.append((start_node_id, intermediate_node_id)) 126 | dfa_failure_function[start_node_id] = intermediate_node_id 127 | for node in dfa_nodes: 128 | for i in range(256): 129 | c = chr(i) 130 | if c in dfa_goto_function[node].keys(): 131 | dfa_edges.append( 132 | (node, c, dfa_goto_function[node][c], 1) 133 | ) 134 | else: 135 | if node == 0: 136 | continue 137 | else: 138 | current_node = node 139 | while True: 140 | next_node = dfa_failure_function[current_node] 141 | if c in dfa_goto_function[next_node].keys(): 142 | dfa_edges.append( 143 | (node, c, dfa_goto_function[next_node][c], 0) 144 | ) 145 | break 146 | elif next_node == 0: 147 | break 148 | current_node = next_node 149 | bit_width = math.ceil(math.log(len(dfa_nodes), 2)) 150 | for dfa_node_id in dfa_nodes: 151 | str1 = bin(dfa_node_id).replace('0b','') 152 | str1 = str1.zfill(int(bit_width)) 153 | # print str1 154 | default_code.append(str1) 155 | return ( 156 | dfa_nodes, dfa_edges, dfa_failure_links, \ 157 | dfa_next_nodes, default_code 158 | ) 159 | ''' 160 | dfa_nodes {0: 0, 1: 0, 2: 1, 3: 0, 4: 2} key: nodeID, value: accepting_flag 161 | ''' 162 | 163 | def generate_fulldfa(self, automaton_graph_descriptor): 164 | nodes = automaton_graph_descriptor[0] 165 | edges = automaton_graph_descriptor[1] 166 | failure_links = automaton_graph_descriptor[2] 167 | converse_dict = {} 168 | dfa_nodes = {} 169 | dfa_edges = [] 170 | dfa_failure_links = [] 171 | dfa_next_nodes = {} 172 | default_code = [] 173 | pattern_idx = 0 174 | dfa_goto_function = {} 175 | dfa_failure_function = {} 176 | for node_id in range(len(nodes)): 177 | origin_node_id = nodes[node_id][0] 178 | converse_dict[origin_node_id] = node_id 179 | accept_flag = nodes[node_id][1] 180 | if accept_flag == 1: 181 | pattern_idx += 1 182 | accept_flag = pattern_idx 183 | dfa_nodes[node_id] = accept_flag 184 | dfa_next_nodes[node_id] = [] 185 | dfa_goto_function[node_id] = {} 186 | for edge in edges: 187 | start_node_id = converse_dict[edge[0]] 188 | transfer_char = edge[1] 189 | end_node_id = converse_dict[edge[2]] 190 | # dfa_edges.append( 191 | # (start_node_id, transfer_char, end_node_id, 1) 192 | # ) 193 | dfa_next_nodes[start_node_id].append( 194 | (transfer_char, end_node_id) 195 | ) 196 | dfa_goto_function[start_node_id][transfer_char] = end_node_id 197 | for failure_link in failure_links: 198 | start_node_id = converse_dict[failure_link[0]] 199 | intermediate_node_id = converse_dict[failure_link[1]] 200 | dfa_failure_links.append((start_node_id, intermediate_node_id)) 201 | dfa_failure_function[start_node_id] = intermediate_node_id 202 | for node in dfa_nodes: 203 | for i in range(256): 204 | c = chr(i) 205 | if c in dfa_goto_function[node].keys(): 206 | dfa_edges.append( 207 | (node, c, dfa_goto_function[node][c], 1) 208 | ) 209 | else: 210 | if node == 0: 211 | dfa_edges.append( 212 | (node, c, node, 0) 213 | ) 214 | dfa_goto_function[node][c] = node 215 | else: 216 | current_node = node 217 | while True: 218 | next_node = dfa_failure_function[current_node] 219 | if c in dfa_goto_function[next_node].keys(): 220 | dfa_edges.append( 221 | (node, c, dfa_goto_function[next_node][c], 0) 222 | ) 223 | break 224 | elif next_node == 0: 225 | break 226 | current_node = next_node 227 | bit_width = math.ceil(math.log(len(dfa_nodes), 2)) 228 | for dfa_node_id in dfa_nodes: 229 | str1 = bin(dfa_node_id).replace('0b','') 230 | str1 = str1.zfill(int(bit_width)) 231 | # print str1 232 | default_code.append(str1) 233 | return ( 234 | dfa_nodes, dfa_edges, dfa_failure_links, \ 235 | dfa_next_nodes, default_code 236 | ) 237 | ''' 238 | dfa_nodes {0: 0, 1: 0, 2: 1, 3: 0, 4: 2} key: nodeID, value: accepting_flag 239 | ''' 240 | def recode_dfa(self, dfa_descriptor, shadow_code): 241 | return ( 242 | dfa_descriptor[0], dfa_descriptor[1], \ 243 | dfa_descriptor[2], dfa_descriptor[3], shadow_code 244 | ) 245 | 246 | def generate_multi_stride_dfa(self, dfa_descriptor, stride): 247 | dfa_nodes = dfa_descriptor[0] 248 | dfa_edges = dfa_descriptor[1] 249 | dfa_failure_links = dfa_descriptor[2] 250 | dfa_next_nodes = dfa_descriptor[3] 251 | shadow_code = dfa_descriptor[4] 252 | dfa_next_nodes_extend = {} 253 | msdfa_nodes = dfa_nodes 254 | msdfa_edges = [] 255 | msdfa_next_nodes = {} 256 | for dfa_node_id in dfa_nodes: 257 | dfa_next_nodes_extend[dfa_node_id] = dfa_next_nodes[dfa_node_id][:] 258 | msdfa_next_nodes[dfa_node_id] = [] 259 | # Extend single stride DFA first 260 | for (start_node_id, transfer_char, end_node_id, type) in dfa_edges: 261 | if start_node_id == 0 and type == 1: 262 | for star_num in range(1, stride): 263 | transfer_chars = b'\xff' * star_num + transfer_char 264 | dfa_next_nodes_extend[start_node_id].append( 265 | (transfer_chars, end_node_id) 266 | ) 267 | if dfa_nodes[end_node_id] != 0 and type == 1: 268 | for star_num in range(1, stride): 269 | transfer_chars = transfer_char + b'\xff' * star_num 270 | dfa_next_nodes_extend[start_node_id].append( 271 | (transfer_chars, end_node_id) 272 | ) 273 | # Get all transistion edges of multi-stride DFA 274 | for dfa_node in dfa_nodes: 275 | start_node_id = dfa_node 276 | self.find_multi_stride_edges( 277 | msdfa_edges, msdfa_next_nodes, dfa_next_nodes_extend, \ 278 | start_node_id, b'', start_node_id, stride 279 | ) 280 | # Process failure links finally 281 | for failure_link in dfa_failure_links: 282 | start_node_id = failure_link[0] 283 | # # Below condition statements indicate what we care about is 284 | # # the input whether hit one of the patterns, not all patterns 285 | # if msdfa_next_nodes[start_node_id] != 0: 286 | # continue 287 | intermediate_node_id = failure_link[1] 288 | for next_node in msdfa_next_nodes[intermediate_node_id]: 289 | transfer_chars = next_node[0] 290 | end_node_id = next_node[1] 291 | cover_flag = False 292 | # Check whether this failure link endge is valid 293 | for origin_next_node in msdfa_next_nodes[start_node_id]: 294 | existing_path = origin_next_node[0] 295 | cover_flag = True 296 | for idx in range(stride): 297 | if transfer_chars[idx] != existing_path[idx] \ 298 | and ord(b'\xff') != existing_path[idx]: 299 | cover_flag = False 300 | break 301 | if not cover_flag: 302 | msdfa_edges.append( 303 | (start_node_id, transfer_chars, end_node_id, 0) 304 | ) 305 | return (msdfa_nodes, msdfa_edges, shadow_code) 306 | 307 | 308 | def find_multi_stride_edges(self, msdfa_edges, msdfa_next_nodes, \ 309 | dfa_next_nodes, start_node_id, \ 310 | current_path, current_node_id, stride): 311 | for next_node in dfa_next_nodes[current_node_id]: 312 | next_path = current_path + next_node[0] 313 | next_node_id = next_node[1] 314 | if len(next_path) < stride: 315 | self.find_multi_stride_edges( 316 | msdfa_edges, msdfa_next_nodes, dfa_next_nodes, \ 317 | start_node_id, next_path, next_node_id, stride 318 | ) 319 | elif len(next_path) == stride: 320 | transfer_chars = next_path 321 | end_node_id = next_node_id 322 | msdfa_edges.append( 323 | (start_node_id, transfer_chars, end_node_id, 1) 324 | ) 325 | msdfa_next_nodes[start_node_id].append( 326 | (transfer_chars, end_node_id) 327 | ) 328 | else: 329 | continue 330 | 331 | def generate_dfa_mat_entries(self, msdfa_descriptor): 332 | msdfa_nodes = msdfa_descriptor[0] 333 | msdfa_edges = msdfa_descriptor[1] 334 | shadow_code = msdfa_descriptor[2] 335 | mat_entries = [] 336 | for (current_state, received_chars, next_state, type) in msdfa_edges: 337 | match = (shadow_code[current_state], received_chars) 338 | # if msdfa_nodes[next_state] != 0: 339 | # action = 'accept' 340 | # else: 341 | # action = 'goto' 342 | action = 'goto' 343 | modifier = 0 344 | if msdfa_nodes[next_state] != 0: 345 | modifier = 1 << (msdfa_nodes[next_state] - 1) 346 | action_params = (shadow_code[next_state], modifier) 347 | mat_entries.append((match, action, action_params)) 348 | return mat_entries 349 | 350 | def generate_runtime_mat_default_entries(self, table_id_list): 351 | mat_default_entries = [] 352 | stride_mat_default_entry = {} 353 | stride_mat_default_entry["table_name"] = \ 354 | SWITCH_CONFIG["stride_mat_name"] 355 | stride_mat_default_entry["default_action"]= True 356 | stride_mat_default_entry["action_name"]= \ 357 | SWITCH_CONFIG["stride_action_name"] 358 | stride_mat_default_entry["action_params"]= { 359 | SWITCH_CONFIG["stride_param"]: 1, 360 | } 361 | mat_default_entries.append(stride_mat_default_entry) 362 | for table_id in table_id_list: 363 | dfa_mat_default_entry = {} 364 | dfa_mat_default_entry["table_name"] = \ 365 | SWITCH_CONFIG["dfa_mat_name"] % table_id 366 | dfa_mat_default_entry["default_action"]= True 367 | dfa_mat_default_entry["action_name"]= \ 368 | SWITCH_CONFIG["goto_action_name"] % self.stride 369 | dfa_mat_default_entry["action_params"]= { 370 | SWITCH_CONFIG["next_state"]: 0, 371 | SWITCH_CONFIG["modifier"]: 0, 372 | } 373 | mat_default_entries.append(dfa_mat_default_entry) 374 | policy_mat_default_entry = {} 375 | policy_mat_default_entry["table_name"] = \ 376 | SWITCH_CONFIG["policy_mat_name"] 377 | policy_mat_default_entry["default_action"]= True 378 | policy_mat_default_entry["action_name"]= \ 379 | SWITCH_CONFIG["drop_action_name"] 380 | policy_mat_default_entry["action_params"]= { 381 | } 382 | mat_default_entries.append(policy_mat_default_entry) 383 | return mat_default_entries 384 | 385 | def generate_runtime_dfa_mat_entries(self, mat_entries, table_id_list): 386 | runtime_mat_entries = [] 387 | for table_id in table_id_list: 388 | for (match, action, action_params) in mat_entries: 389 | runtime_mat_entry = {} 390 | runtime_mat_entry["table_name"] = \ 391 | SWITCH_CONFIG["dfa_mat_name"] % table_id 392 | runtime_mat_entry["match"] = { 393 | SWITCH_CONFIG["current_state"]: match[0] 394 | } 395 | for idx in range(len(match[1])): 396 | received_char = match[1][idx] 397 | # Follow systement is for difference 398 | # between python2 and python3 399 | if type(received_char) != int: 400 | received_char = ord(match[1][idx]) 401 | if ord(b'\xff') != received_char: 402 | field_name = SWITCH_CONFIG["received_char"] % idx 403 | runtime_mat_entry["match"][field_name] \ 404 | = [received_char, 255] 405 | if action == "goto": 406 | runtime_mat_entry["action_name"] = \ 407 | SWITCH_CONFIG["goto_action_name"] % self.stride 408 | runtime_mat_entry["action_params"] = { 409 | SWITCH_CONFIG["next_state"]: action_params[0], 410 | SWITCH_CONFIG["modifier"]: action_params[1], 411 | } 412 | # elif action == "accept": 413 | # runtime_mat_entry["action_name"] = \ 414 | # SWITCH_CONFIG["accept_action_name"] 415 | # runtime_mat_entry["action_params"] = { 416 | # SWITCH_CONFIG["next_state"]: action_params[0], 417 | # } 418 | runtime_mat_entries.append(runtime_mat_entry) 419 | return runtime_mat_entries 420 | 421 | def generate_runtime_policy_mat_entries(self, policies): 422 | runtime_mat_entries = [] 423 | for policy_mask in policies: 424 | match_field = 0 425 | match_mask = 0 426 | for pattern_bit in reversed(policy_mask): 427 | if pattern_bit == '0': 428 | match_field = (match_field << 1) + 0 429 | match_mask = (match_mask << 1) + 1 430 | elif pattern_bit == '1': 431 | match_field = (match_field << 1) + 1 432 | match_mask = (match_mask << 1) + 1 433 | else: 434 | match_field = (match_field << 1) + 0 435 | match_mask = (match_mask << 1) + 0 436 | runtime_mat_entry = {} 437 | runtime_mat_entry["table_name"] = SWITCH_CONFIG["policy_mat_name"] 438 | runtime_mat_entry["match"] = { 439 | SWITCH_CONFIG["pattern_state"]: [match_field, match_mask] 440 | } 441 | runtime_mat_entry["action_name"] = SWITCH_CONFIG["policy_action_name"] 442 | runtime_mat_entry["action_params"] = {} 443 | runtime_mat_entries.append(runtime_mat_entry) 444 | return runtime_mat_entries 445 | 446 | def get_pattern_list(self): 447 | return self.pattern_list 448 | 449 | def get_policies(self): 450 | return self.policies 451 | 452 | def get_automaton(self): 453 | return self.automaton 454 | 455 | def get_dfa(self): 456 | return self.dfa 457 | 458 | def get_multi_stride_dfa(self): 459 | return self.msdfa 460 | 461 | def get_dfa_mat_entries(self): 462 | return self.dfa_mat_entries 463 | 464 | def get_runtime_dfa_mat_entries(self): 465 | return self.runtime_dfa_mat_entries 466 | 467 | def get_runtime_policy_mat_entries(self): 468 | return self.runtime_policy_mat_entries 469 | 470 | def get_runtime_mat_entries(self): 471 | return self.runtime_mat_entries 472 | 473 | def get_runtime_mat_default_entries(self): 474 | return self.runtime_mat_default_entries 475 | 476 | def get_runtime_mat_entries_json(self): 477 | return json.dumps( 478 | self.runtime_mat_entries, indent=4, separators=(',', ': ') 479 | ) 480 | 481 | def get_runtime_mat_default_entries_json(self): 482 | return json.dumps( 483 | self.runtime_mat_default_entries, indent=4, separators=(',', ': ') 484 | ) 485 | 486 | if __name__ == '__main__': 487 | x = DFAMatchEntriesSimpleGenerator("\|oo\-aa\| | ~his", 2) 488 | print("Full DFA") 489 | print("***************************************") 490 | print(len(x.fulldfa[1])) 491 | print("***************************************") 492 | print x.pattern_list 493 | print("***************************************") 494 | print x.policies 495 | exit(0) 496 | # x = DFAMatchEntriesSimpleGenerator("her | hers", 1) 497 | dfa = x.dfa 498 | msdfa = x.msdfa 499 | 500 | lst = [] 501 | # for i in range(1, 4): 502 | # x = DFAMatchEntriesSimpleGenerator("dog", i) 503 | # lst.append(x.msdfa) 504 | # for i in lst: 505 | # for j in i: 506 | # print j 507 | # print "*************" 508 | for i in dfa: 509 | print i 510 | print("*******************") 511 | for i in msdfa: 512 | print i 513 | # print("*******************") 514 | print len(x.get_dfa_mat_entries()) 515 | for i in x.get_dfa_mat_entries(): 516 | print(i) 517 | print("*******************") 518 | # for i in x.get_runtime_mat_entries(): 519 | # print(i) 520 | # print("*******************") 521 | # for i in x.get_runtime_mat_default_entries(): 522 | # print(i) 523 | -------------------------------------------------------------------------------- /entrygen/shadow_code_gen.py: -------------------------------------------------------------------------------- 1 | 2 | def generateDefermentForest(DFA, stateNum): 3 | 4 | DF = [] 5 | 6 | for i in range(stateNum): 7 | DF.append([]) 8 | for j in range(i + 1, stateNum): 9 | sameTrans = 0 10 | for k in range(256): 11 | if DFA[i][k] == DFA[j][k]: 12 | sameTrans = sameTrans + 1 13 | DF[i].append(sameTrans) 14 | 15 | return DF 16 | 17 | def generateDFA(stateNum, transitions): 18 | 19 | DFA = [] 20 | 21 | for i in range(stateNum): 22 | DFA.append([]) 23 | for j in range(256): 24 | DFA[i].append(0) 25 | 26 | for transition in transitions: 27 | if isinstance(transition[1], int) == True: 28 | if transition > 255: 29 | print("integer is bigger than 255") 30 | break 31 | 32 | DFA[transition[0]][transition[1]] = transition[2] 33 | elif isinstance(transition[1], str) == True: 34 | for ch in transition[1]: 35 | DFA[transition[0]][ord(ch)] = transition[2] 36 | elif isinstance(transition[1], tuple) == True: 37 | if len(transition[1]) != 2 or isinstance(transition[1][0], int) == False or isinstance(transition[1][1], int) == False or transition[1][0] > transition[1][1]: 38 | print("a tuple with 2 integers is needed") 39 | break 40 | 41 | for index in range(transition[1][0], transition[1][1]+1): 42 | DFA[transition[0]][index] = transition[2] 43 | 44 | 45 | return DFA 46 | 47 | def getRoot(DFA, stateNum): 48 | maxSelfTransition = -1 49 | root = 0 50 | for i in range(stateNum): 51 | selfTransiton = 0 52 | for j in range(256): 53 | if DFA[i][j] == i: 54 | selfTransiton = selfTransiton + 1 55 | if selfTransiton > maxSelfTransition: 56 | maxSelfTransition = selfTransiton 57 | root = i 58 | 59 | return root 60 | 61 | def getMaximumSpanningTree(stateNum, DF, root): 62 | edges = [] 63 | fuset = [] 64 | graph = [] 65 | visited = [] 66 | 67 | for i in range(stateNum): 68 | visited.append(0) 69 | 70 | for i in range(stateNum): 71 | graph.append([]) 72 | for j in range(stateNum): 73 | graph[i].append(0) 74 | 75 | for i in range(stateNum): 76 | fuset.append(i) 77 | for j in range(stateNum-i-1): 78 | edges.append([-DF[i][j], i, j+i+1]) 79 | edges.sort() 80 | 81 | 'print edges' 82 | 83 | for edge in edges: 84 | # print getAncestor(fuset, edge[1]) 85 | # print getAncestor(fuset, edge[2]) 86 | # print '*****' 87 | if getAncestor(fuset, edge[1]) != getAncestor(fuset, edge[2]): 88 | graph[edge[1]][edge[2]] = graph[edge[2]][edge[1]] = 1 89 | if getAncestor(fuset, edge[1]) < getAncestor(fuset, edge[2]): 90 | fuset[getAncestor(fuset, edge[2])] = getAncestor(fuset, edge[1]) 91 | else: 92 | fuset[getAncestor(fuset, edge[1])] = getAncestor(fuset, edge[2]) 93 | 94 | 'print graph' 95 | 96 | visited[root] = 1 97 | tree = getTreeWithRoot(stateNum, graph, root, visited) 98 | 99 | return tree 100 | 101 | 102 | def getTreeWithRoot(nodeNum, graph, root, visited): 103 | tree = [] 104 | children = [] 105 | tree.append(root) 106 | for i in range(nodeNum): 107 | if graph[root][i] == 1 and visited[i] == 0: 108 | children.append(i) 109 | visited[i] = 1 110 | 111 | if len(children) == 0: 112 | return tree 113 | 114 | for child in children: 115 | tree.append(getTreeWithRoot(nodeNum, graph, child, visited)) 116 | 117 | return tree 118 | 119 | def getAncestor(fuset, index): 120 | while fuset[fuset[index]] != fuset[index]: 121 | fuset[index] = fuset[fuset[index]] 122 | 123 | return fuset[index] 124 | 125 | def generateShadowCode(defermentTree, stateInfo): 126 | stateList = [] 127 | 128 | if len(defermentTree) == 1: 129 | return 130 | 131 | stateList.append([stateInfo[defermentTree[0]]['weight'], defermentTree[0]]) 132 | for i in range(1, len(defermentTree)): 133 | generateShadowCode(defermentTree[i], stateInfo) 134 | stateList.append([stateInfo[defermentTree[i][0]]['weight'], defermentTree[i][0]]) 135 | stateList.sort() 136 | while len(stateList) != 1: 137 | lnode = stateList.pop(0) 138 | rnode = stateList.pop(0) 139 | stateList.append([max(lnode[0], rnode[0]) + 1, [ lnode[1], rnode[1] ] ]) 140 | stateList.sort() 141 | 142 | stateInfo[defermentTree[0]]['weight'] = stateList[0][0] 143 | 144 | # print '*****' 145 | # print stateList[0][1] 146 | # print '*****' 147 | updateInfoWithHufTree(stateInfo, stateList[0][1], defermentTree[0], '', defermentTree) 148 | 149 | return 150 | 151 | def updateInfoWithHufTree(stateInfo, hufTree, fatherNode, prefix, defermentTree): 152 | if isinstance(hufTree, int): 153 | if hufTree == fatherNode: 154 | stateInfo[fatherNode]['localCode'] = prefix 155 | else: 156 | subtree = findSubTreeByRoot(defermentTree, hufTree) 157 | iteratedUpdate(subtree, prefix, stateInfo) 158 | return 159 | else: 160 | for i in range(2): 161 | updateInfoWithHufTree(stateInfo, hufTree[i], fatherNode, prefix+'%d'%i, defermentTree) 162 | return 163 | 164 | '''if isinstance(hufTree[1], int): 165 | if hufTree[1] == fatherNode: 166 | stateInfo[fatherNode]['localCode'] = prefix 167 | else: 168 | iteratedUpdate(defermentTree, hufTree[1]]) 169 | else: 170 | updateInfoWithHufTree(stateInfo, hufTree[1], fatherNode, '1'+prefix)''' 171 | 172 | def findSubTreeByRoot(tree, root): 173 | if tree[0] == root: 174 | return tree 175 | elif len(tree) == 1: 176 | return None 177 | else: 178 | for i in range(1, len(tree)): 179 | res = findSubTreeByRoot(tree[i], root) 180 | if res != None: 181 | return res 182 | return None 183 | 184 | def iteratedUpdate(tree, prefix, stateInfo): 185 | stateInfo[tree[0]]['globalCode'] = prefix + stateInfo[tree[0]]['globalCode'] 186 | 187 | if len(tree) == 1: 188 | return 189 | else: 190 | for i in range(1, len(tree)): 191 | iteratedUpdate(tree[i], prefix, stateInfo) 192 | 193 | 194 | def getSpanningDeferTree(DFA): 195 | stateNum = len(DFA[0]) 196 | transitions = [] 197 | for i in range(len(DFA[1])): 198 | transitions.append([DFA[1][i][0], DFA[1][i][1], DFA[1][i][2]]) 199 | 200 | newDFA = generateDFA(stateNum, transitions) 201 | 202 | DF = generateDefermentForest(newDFA, stateNum) 203 | print("SRG") 204 | print(DF) 205 | 206 | root = getRoot(newDFA, stateNum) 207 | # print root 208 | # print "root: ", root 209 | tree = getMaximumSpanningTree(stateNum, DF, root) 210 | 211 | return tree 212 | 213 | def getSCIDWithDFA(DFA): 214 | stateNum = len(DFA[0]) 215 | 216 | transitions = [] 217 | for i in range(len(DFA[1])): 218 | transitions.append([DFA[1][i][0], DFA[1][i][1], DFA[1][i][2]]) 219 | 220 | newDFA = generateDFA(stateNum, transitions) 221 | 222 | DF = generateDefermentForest(newDFA, stateNum) 223 | # print DF 224 | 225 | root = getRoot(newDFA, stateNum) 226 | # print root 227 | 228 | tree = getMaximumSpanningTree(stateNum, DF, root) 229 | #print tree 230 | 231 | stateInfo = [{"globalCode" : '', 'localCode' : '', 'weight' : 0} for i in range(stateNum)] 232 | generateShadowCode(tree, stateInfo) 233 | # print stateInfo 234 | 235 | shadowCode = [] 236 | shadowCodeLen = stateInfo[tree[0]]["weight"] 237 | for i in range(stateNum): 238 | shadowCode.append(stateInfo[i]["globalCode"] + (shadowCodeLen - len(stateInfo[i]["globalCode"])) * '*') 239 | # print shadowCode 240 | IDCode = [] 241 | for i in range(stateNum): 242 | IDCode.append(stateInfo[i]["globalCode"] + stateInfo[i]["localCode"] + (shadowCodeLen - len(stateInfo[i]["globalCode"]) - len(stateInfo[i]["localCode"]))*'0') 243 | 244 | # templst = [] 245 | # templst.append(shadowCode) 246 | # templst.append(IDCode) 247 | return (shadowCode,IDCode) 248 | 249 | def getSCIDWithNFA(NFA, defer_tree): 250 | stateNum = len(NFA[0]) 251 | 252 | # transitions = [] 253 | # for i in range(len(DFA[1])): 254 | # transitions.append([DFA[1][i][0], DFA[1][i][1], DFA[1][i][2]]) 255 | 256 | # newDFA = generateDFA(stateNum, transitions) 257 | 258 | # DF = generateDefermentForest(newDFA, stateNum) 259 | # # print DF 260 | 261 | # root = getRoot(newDFA, stateNum) 262 | # # print root 263 | 264 | # tree = getMaximumSpanningTree(stateNum, DF, root) 265 | #print tree 266 | 267 | stateInfo = [{"globalCode" : '', 'localCode' : '', 'weight' : 0} for i in range(stateNum)] 268 | generateShadowCode(defer_tree, stateInfo) 269 | # print stateInfo 270 | 271 | shadowCode = [] 272 | shadowCodeLen = stateInfo[defer_tree[0]]["weight"] 273 | for i in range(stateNum): 274 | shadowCode.append(stateInfo[i]["globalCode"] + (shadowCodeLen - len(stateInfo[i]["globalCode"])) * '*') 275 | # print shadowCode 276 | IDCode = [] 277 | for i in range(stateNum): 278 | IDCode.append(stateInfo[i]["globalCode"] + stateInfo[i]["localCode"] + (shadowCodeLen - len(stateInfo[i]["globalCode"]) - len(stateInfo[i]["localCode"]))*'0') 279 | 280 | # templst = [] 281 | # templst.append(shadowCode) 282 | # templst.append(IDCode) 283 | return (shadowCode,IDCode) 284 | 285 | def getShadowCodeWithNFA(NFA,defer_tree): 286 | stateNum = len(DFA[0]) 287 | 288 | # transitions = [] 289 | # for i in range(len(DFA[1])): 290 | # transitions.append([DFA[1][i][0], DFA[1][i][1], DFA[1][i][2]]) 291 | 292 | # newDFA = generateDFA(stateNum, transitions) 293 | 294 | # DF = generateDefermentForest(newDFA, stateNum) 295 | # # print DF 296 | 297 | # root = getRoot(newDFA, stateNum) 298 | # # print root 299 | 300 | # tree = getMaximumSpanningTree(stateNum, DF, root) 301 | # #print tree 302 | 303 | stateInfo = [{"globalCode" : '', 'localCode' : '', 'weight' : 0} for i in range(stateNum)] 304 | generateShadowCode(defer_tree, stateInfo) 305 | # print stateInfo 306 | 307 | shadowCode = [] 308 | shadowCodeLen = stateInfo[defer_tree[0]]["weight"] 309 | for i in range(stateNum): 310 | shadowCode.append(stateInfo[i]["globalCode"] + (shadowCodeLen - len(stateInfo[i]["globalCode"])) * '*') 311 | # print shadowCode 312 | 313 | return shadowCode 314 | 315 | def getShadowCodeWithDFA(DFA): 316 | stateNum = len(DFA[0]) 317 | 318 | transitions = [] 319 | for i in range(len(DFA[1])): 320 | transitions.append([DFA[1][i][0], DFA[1][i][1], DFA[1][i][2]]) 321 | 322 | newDFA = generateDFA(stateNum, transitions) 323 | 324 | DF = generateDefermentForest(newDFA, stateNum) 325 | # print DF 326 | 327 | root = getRoot(newDFA, stateNum) 328 | # print root 329 | 330 | tree = getMaximumSpanningTree(stateNum, DF, root) 331 | #print tree 332 | 333 | stateInfo = [{"globalCode" : '', 'localCode' : '', 'weight' : 0} for i in range(stateNum)] 334 | generateShadowCode(tree, stateInfo) 335 | # print stateInfo 336 | 337 | shadowCode = [] 338 | shadowCodeLen = stateInfo[tree[0]]["weight"] 339 | for i in range(stateNum): 340 | shadowCode.append(stateInfo[i]["globalCode"] + (shadowCodeLen - len(stateInfo[i]["globalCode"])) * '*') 341 | # print shadowCode 342 | 343 | return shadowCode 344 | 345 | if __name__ == "__main__": 346 | transitions = [[0, 'abcdefghijklmno', 1], [1, 'b', 1], [1, 'acdefghijklmno', 2], [2, 'bc', 1], [2, 'adefghijklmno', 2]] 347 | 348 | DFA = generateDFA(3, transitions) 349 | print(DFA[0]) 350 | print(DFA[1]) 351 | print(DFA[2]) 352 | 353 | 354 | DF = generateDefermentForest(DFA, 3) 355 | print("Deferment Forest ") 356 | print(DF) 357 | print("Deferment Forest End") 358 | 359 | root = getRoot(DFA, 3) 360 | print(root) 361 | 362 | tree = getMaximumSpanningTree(3, DF, root) 363 | print("TREE") 364 | print(tree) 365 | 366 | stateInfo = [{"globalCode" : '', 'localCode' : '', 'weight' : 0} for i in range(7)] 367 | tree = [0, [1], [2, [4]], [3, [5], [6]]] 368 | generateShadowCode(tree, stateInfo) 369 | print(stateInfo) 370 | -------------------------------------------------------------------------------- /recv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import struct 4 | import os 5 | 6 | from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr 7 | from scapy.all import Packet, IPOption 8 | from scapy.all import ShortField, IntField, LongField, BitField, FieldListField, FieldLenField 9 | from scapy.all import IP, TCP, UDP, Raw 10 | from scapy.layers.inet import _IPOption_HDR 11 | 12 | def get_if(): 13 | ifs=get_if_list() 14 | iface=None 15 | for i in get_if_list(): 16 | if "eth0" in i: 17 | iface=i 18 | break; 19 | if not iface: 20 | print("Cannot find eth0 interface") 21 | exit(1) 22 | return iface 23 | 24 | # class IPOption_MRI(IPOption): 25 | # name = "MRI" 26 | # option = 31 27 | # fields_desc = [ _IPOption_HDR, 28 | # FieldLenField("length", None, fmt="B", 29 | # length_of="swids", 30 | # adjust=lambda pkt,l:l+4), 31 | # ShortField("count", 0), 32 | # FieldListField("swids", 33 | # [], 34 | # IntField("", 0), 35 | # length_from=lambda pkt:pkt.count*4) ] 36 | # def handle_pkt(pkt): 37 | # if TCP in pkt and pkt[TCP].dport == 6666: 38 | # print("got a packet") 39 | # pkt.show2() 40 | # # hexdump(pkt) 41 | # sys.stdout.flush() 42 | def handle_pkt(pkt): 43 | if IP in pkt: 44 | print("got a packet") 45 | pkt.show2() 46 | # hexdump(pkt) 47 | sys.stdout.flush() 48 | 49 | def main(): 50 | ifaces = [i for i in os.listdir('/sys/class/net/') if 'eth' in i] 51 | iface = ifaces[0] 52 | print(("sniffing on %s" % iface)) 53 | sys.stdout.flush() 54 | sniff(iface = iface, 55 | prn = lambda x: handle_pkt(x)) 56 | 57 | if __name__ == '__main__': 58 | main() -------------------------------------------------------------------------------- /s1-runtime-v1.json: -------------------------------------------------------------------------------- 1 | { 2 | "table_entries": [ 3 | { 4 | "priority": 17, 5 | "table": "MyIngress.t_DFA_match_0", 6 | "action_params": { 7 | "_state": 3, 8 | "pattern_code": 1 9 | }, 10 | "match": { 11 | "hdr.patrns[0].pattern": [ 12 | 104, 13 | 255 14 | ], 15 | "meta.state": [ 16 | 6, 17 | 6 18 | ], 19 | "hdr.patrns[1].pattern": [ 20 | 101, 21 | 255 22 | ] 23 | }, 24 | "action_name": "MyIngress.a_set_state_2_b1" 25 | }, 26 | { 27 | "priority": 16, 28 | "table": "MyIngress.t_DFA_match_0", 29 | "action_params": { 30 | "_state": 3, 31 | "pattern_code": 1 32 | }, 33 | "match": { 34 | "hdr.patrns[0].pattern": [ 35 | 101, 36 | 255 37 | ], 38 | "meta.state": [ 39 | 0, 40 | 7 41 | ] 42 | }, 43 | "action_name": "MyIngress.a_set_state_1_b1" 44 | }, 45 | { 46 | "priority": 15, 47 | "table": "MyIngress.t_DFA_match_0", 48 | "action_params": { 49 | "_state": 7, 50 | "pattern_code": 2 51 | }, 52 | "match": { 53 | "hdr.patrns[0].pattern": [ 54 | 105, 55 | 255 56 | ], 57 | "meta.state": [ 58 | 0, 59 | 6 60 | ], 61 | "hdr.patrns[1].pattern": [ 62 | 115, 63 | 255 64 | ] 65 | }, 66 | "action_name": "MyIngress.a_set_state_2_b2" 67 | }, 68 | { 69 | "priority": 14, 70 | "table": "MyIngress.t_DFA_match_0", 71 | "action_params": { 72 | "_state": 7, 73 | "pattern_code": 2 74 | }, 75 | "match": { 76 | "hdr.patrns[0].pattern": [ 77 | 115, 78 | 255 79 | ], 80 | "meta.state": [ 81 | 4, 82 | 6 83 | ] 84 | }, 85 | "action_name": "MyIngress.a_set_state_1_b2" 86 | }, 87 | { 88 | "priority": 13, 89 | "table": "MyIngress.t_DFA_match_0", 90 | "action_params": { 91 | "_state": 4 92 | }, 93 | "match": { 94 | "hdr.patrns[0].pattern": [ 95 | 104, 96 | 255 97 | ], 98 | "hdr.patrns[1].pattern": [ 99 | 105, 100 | 255 101 | ] 102 | }, 103 | "action_name": "MyIngress.a_set_state_2" 104 | }, 105 | { 106 | "priority": 12, 107 | "table": "MyIngress.t_DFA_match_0", 108 | "action_params": { 109 | "_state": 0 110 | }, 111 | "match": { 112 | "hdr.patrns[0].pattern": [ 113 | 115, 114 | 255 115 | ], 116 | "hdr.patrns[1].pattern": [ 117 | 104, 118 | 255 119 | ] 120 | }, 121 | "action_name": "MyIngress.a_set_state_2" 122 | }, 123 | { 124 | "priority": 11, 125 | "table": "MyIngress.t_DFA_match_0", 126 | "action_params": { 127 | "_state": 1 128 | }, 129 | "match": { 130 | "hdr.patrns[1].pattern": [ 131 | 104, 132 | 255 133 | ] 134 | }, 135 | "action_name": "MyIngress.a_set_state_2" 136 | }, 137 | { 138 | "priority": 10, 139 | "table": "MyIngress.t_DFA_match_0", 140 | "action_params": { 141 | "_state": 6 142 | }, 143 | "match": { 144 | "hdr.patrns[1].pattern": [ 145 | 115, 146 | 255 147 | ] 148 | }, 149 | "action_name": "MyIngress.a_set_state_2" 150 | }, 151 | { 152 | "priority": 9, 153 | "table": "MyIngress.t_DFA_match_1", 154 | "action_params": { 155 | "_state": 3, 156 | "pattern_code": 1 157 | }, 158 | "match": { 159 | "hdr.patrns[0].pattern": [ 160 | 104, 161 | 255 162 | ], 163 | "meta.state": [ 164 | 6, 165 | 6 166 | ], 167 | "hdr.patrns[1].pattern": [ 168 | 101, 169 | 255 170 | ] 171 | }, 172 | "action_name": "MyIngress.a_set_state_2_b1" 173 | }, 174 | { 175 | "priority": 8, 176 | "table": "MyIngress.t_DFA_match_1", 177 | "action_params": { 178 | "_state": 3, 179 | "pattern_code": 1 180 | }, 181 | "match": { 182 | "hdr.patrns[0].pattern": [ 183 | 101, 184 | 255 185 | ], 186 | "meta.state": [ 187 | 0, 188 | 7 189 | ] 190 | }, 191 | "action_name": "MyIngress.a_set_state_1_b1" 192 | }, 193 | { 194 | "priority": 7, 195 | "table": "MyIngress.t_DFA_match_1", 196 | "action_params": { 197 | "_state": 7, 198 | "pattern_code": 2 199 | }, 200 | "match": { 201 | "hdr.patrns[0].pattern": [ 202 | 105, 203 | 255 204 | ], 205 | "meta.state": [ 206 | 0, 207 | 6 208 | ], 209 | "hdr.patrns[1].pattern": [ 210 | 115, 211 | 255 212 | ] 213 | }, 214 | "action_name": "MyIngress.a_set_state_2_b2" 215 | }, 216 | { 217 | "priority": 6, 218 | "table": "MyIngress.t_DFA_match_1", 219 | "action_params": { 220 | "_state": 7, 221 | "pattern_code": 2 222 | }, 223 | "match": { 224 | "hdr.patrns[0].pattern": [ 225 | 115, 226 | 255 227 | ], 228 | "meta.state": [ 229 | 4, 230 | 6 231 | ] 232 | }, 233 | "action_name": "MyIngress.a_set_state_1_b2" 234 | }, 235 | { 236 | "priority": 5, 237 | "table": "MyIngress.t_DFA_match_1", 238 | "action_params": { 239 | "_state": 4 240 | }, 241 | "match": { 242 | "hdr.patrns[0].pattern": [ 243 | 104, 244 | 255 245 | ], 246 | "hdr.patrns[1].pattern": [ 247 | 105, 248 | 255 249 | ] 250 | }, 251 | "action_name": "MyIngress.a_set_state_2" 252 | }, 253 | { 254 | "priority": 4, 255 | "table": "MyIngress.t_DFA_match_1", 256 | "action_params": { 257 | "_state": 0 258 | }, 259 | "match": { 260 | "hdr.patrns[0].pattern": [ 261 | 115, 262 | 255 263 | ], 264 | "hdr.patrns[1].pattern": [ 265 | 104, 266 | 255 267 | ] 268 | }, 269 | "action_name": "MyIngress.a_set_state_2" 270 | }, 271 | { 272 | "priority": 3, 273 | "table": "MyIngress.t_DFA_match_1", 274 | "action_params": { 275 | "_state": 1 276 | }, 277 | "match": { 278 | "hdr.patrns[1].pattern": [ 279 | 104, 280 | 255 281 | ] 282 | }, 283 | "action_name": "MyIngress.a_set_state_2" 284 | }, 285 | { 286 | "priority": 2, 287 | "table": "MyIngress.t_DFA_match_1", 288 | "action_params": { 289 | "_state": 6 290 | }, 291 | "match": { 292 | "hdr.patrns[1].pattern": [ 293 | 115, 294 | 255 295 | ] 296 | }, 297 | "action_name": "MyIngress.a_set_state_2" 298 | }, 299 | { 300 | "table": "MyIngress.t_get_root_state", 301 | "default_action": true, 302 | "action_params": { 303 | "root_state": 2 304 | }, 305 | "action_name": "MyIngress.a_get_root_state" 306 | }, 307 | { 308 | "table": "MyIngress.t_DFA_match_0", 309 | "default_action": true, 310 | "action_params": { 311 | "_state": 2 312 | }, 313 | "action_name": "MyIngress.a_set_state_2" 314 | }, 315 | { 316 | "table": "MyIngress.t_DFA_match_1", 317 | "default_action": true, 318 | "action_params": { 319 | "_state": 2 320 | }, 321 | "action_name": "MyIngress.a_set_state_2" 322 | }, 323 | { 324 | "priority": 2, 325 | "table": "MyIngress.t_pattern2rule", 326 | "action_params": {}, 327 | "match": { 328 | "meta.b2": [ 329 | 2, 330 | 255 331 | ], 332 | "meta.b1": [ 333 | 1, 334 | 255 335 | ] 336 | }, 337 | "action_name": "MyIngress.a_mark_as_to_forward" 338 | } 339 | ], 340 | "bmv2_json": "build/bolt.json", 341 | "target": "bmv2", 342 | "p4info": "build/bolt.p4.p4info.txt" 343 | } -------------------------------------------------------------------------------- /s1-runtime-v2.json: -------------------------------------------------------------------------------- 1 | { 2 | "table_entries": [ 3 | { 4 | "priority": 17, 5 | "table": "MyIngress.t_DFA_match_0", 6 | "action_params": { 7 | "_state": 3, 8 | "pattern_code": 1 9 | }, 10 | "match": { 11 | "hdr.patrns[0].pattern": [ 12 | 104, 13 | 255 14 | ], 15 | "meta.state": [ 16 | 6, 17 | 6 18 | ], 19 | "hdr.patrns[1].pattern": [ 20 | 101, 21 | 255 22 | ] 23 | }, 24 | "action_name": "MyIngress.a_set_state_2_b1" 25 | }, 26 | { 27 | "priority": 16, 28 | "table": "MyIngress.t_DFA_match_0", 29 | "action_params": { 30 | "_state": 3, 31 | "pattern_code": 1 32 | }, 33 | "match": { 34 | "hdr.patrns[0].pattern": [ 35 | 101, 36 | 255 37 | ], 38 | "meta.state": [ 39 | 0, 40 | 7 41 | ] 42 | }, 43 | "action_name": "MyIngress.a_set_state_1_b1" 44 | }, 45 | { 46 | "priority": 15, 47 | "table": "MyIngress.t_DFA_match_0", 48 | "action_params": { 49 | "_state": 7, 50 | "pattern_code": 2 51 | }, 52 | "match": { 53 | "hdr.patrns[0].pattern": [ 54 | 105, 55 | 255 56 | ], 57 | "meta.state": [ 58 | 0, 59 | 6 60 | ], 61 | "hdr.patrns[1].pattern": [ 62 | 115, 63 | 255 64 | ] 65 | }, 66 | "action_name": "MyIngress.a_set_state_2_b1" 67 | }, 68 | { 69 | "priority": 14, 70 | "table": "MyIngress.t_DFA_match_0", 71 | "action_params": { 72 | "_state": 7, 73 | "pattern_code": 2 74 | }, 75 | "match": { 76 | "hdr.patrns[0].pattern": [ 77 | 115, 78 | 255 79 | ], 80 | "meta.state": [ 81 | 4, 82 | 6 83 | ] 84 | }, 85 | "action_name": "MyIngress.a_set_state_1_b1" 86 | }, 87 | { 88 | "priority": 13, 89 | "table": "MyIngress.t_DFA_match_0", 90 | "action_params": { 91 | "_state": 4 92 | }, 93 | "match": { 94 | "hdr.patrns[0].pattern": [ 95 | 104, 96 | 255 97 | ], 98 | "hdr.patrns[1].pattern": [ 99 | 105, 100 | 255 101 | ] 102 | }, 103 | "action_name": "MyIngress.a_set_state_2" 104 | }, 105 | { 106 | "priority": 12, 107 | "table": "MyIngress.t_DFA_match_0", 108 | "action_params": { 109 | "_state": 0 110 | }, 111 | "match": { 112 | "hdr.patrns[0].pattern": [ 113 | 115, 114 | 255 115 | ], 116 | "hdr.patrns[1].pattern": [ 117 | 104, 118 | 255 119 | ] 120 | }, 121 | "action_name": "MyIngress.a_set_state_2" 122 | }, 123 | { 124 | "priority": 11, 125 | "table": "MyIngress.t_DFA_match_0", 126 | "action_params": { 127 | "_state": 1 128 | }, 129 | "match": { 130 | "hdr.patrns[1].pattern": [ 131 | 104, 132 | 255 133 | ] 134 | }, 135 | "action_name": "MyIngress.a_set_state_2" 136 | }, 137 | { 138 | "priority": 10, 139 | "table": "MyIngress.t_DFA_match_0", 140 | "action_params": { 141 | "_state": 6 142 | }, 143 | "match": { 144 | "hdr.patrns[1].pattern": [ 145 | 115, 146 | 255 147 | ] 148 | }, 149 | "action_name": "MyIngress.a_set_state_2" 150 | }, 151 | { 152 | "priority": 9, 153 | "table": "MyIngress.t_DFA_match_1", 154 | "action_params": { 155 | "_state": 3, 156 | "pattern_code": 1 157 | }, 158 | "match": { 159 | "hdr.patrns[0].pattern": [ 160 | 104, 161 | 255 162 | ], 163 | "meta.state": [ 164 | 6, 165 | 6 166 | ], 167 | "hdr.patrns[1].pattern": [ 168 | 101, 169 | 255 170 | ] 171 | }, 172 | "action_name": "MyIngress.a_set_state_2_b1" 173 | }, 174 | { 175 | "priority": 8, 176 | "table": "MyIngress.t_DFA_match_1", 177 | "action_params": { 178 | "_state": 3, 179 | "pattern_code": 1 180 | }, 181 | "match": { 182 | "hdr.patrns[0].pattern": [ 183 | 101, 184 | 255 185 | ], 186 | "meta.state": [ 187 | 0, 188 | 7 189 | ] 190 | }, 191 | "action_name": "MyIngress.a_set_state_1_b1" 192 | }, 193 | { 194 | "priority": 7, 195 | "table": "MyIngress.t_DFA_match_1", 196 | "action_params": { 197 | "_state": 7, 198 | "pattern_code": 2 199 | }, 200 | "match": { 201 | "hdr.patrns[0].pattern": [ 202 | 105, 203 | 255 204 | ], 205 | "meta.state": [ 206 | 0, 207 | 6 208 | ], 209 | "hdr.patrns[1].pattern": [ 210 | 115, 211 | 255 212 | ] 213 | }, 214 | "action_name": "MyIngress.a_set_state_2_b1" 215 | }, 216 | { 217 | "priority": 6, 218 | "table": "MyIngress.t_DFA_match_1", 219 | "action_params": { 220 | "_state": 7, 221 | "pattern_code": 2 222 | }, 223 | "match": { 224 | "hdr.patrns[0].pattern": [ 225 | 115, 226 | 255 227 | ], 228 | "meta.state": [ 229 | 4, 230 | 6 231 | ] 232 | }, 233 | "action_name": "MyIngress.a_set_state_1_b1" 234 | }, 235 | { 236 | "priority": 5, 237 | "table": "MyIngress.t_DFA_match_1", 238 | "action_params": { 239 | "_state": 4 240 | }, 241 | "match": { 242 | "hdr.patrns[0].pattern": [ 243 | 104, 244 | 255 245 | ], 246 | "hdr.patrns[1].pattern": [ 247 | 105, 248 | 255 249 | ] 250 | }, 251 | "action_name": "MyIngress.a_set_state_2" 252 | }, 253 | { 254 | "priority": 4, 255 | "table": "MyIngress.t_DFA_match_1", 256 | "action_params": { 257 | "_state": 0 258 | }, 259 | "match": { 260 | "hdr.patrns[0].pattern": [ 261 | 115, 262 | 255 263 | ], 264 | "hdr.patrns[1].pattern": [ 265 | 104, 266 | 255 267 | ] 268 | }, 269 | "action_name": "MyIngress.a_set_state_2" 270 | }, 271 | { 272 | "priority": 3, 273 | "table": "MyIngress.t_DFA_match_1", 274 | "action_params": { 275 | "_state": 1 276 | }, 277 | "match": { 278 | "hdr.patrns[1].pattern": [ 279 | 104, 280 | 255 281 | ] 282 | }, 283 | "action_name": "MyIngress.a_set_state_2" 284 | }, 285 | { 286 | "priority": 2, 287 | "table": "MyIngress.t_DFA_match_1", 288 | "action_params": { 289 | "_state": 6 290 | }, 291 | "match": { 292 | "hdr.patrns[1].pattern": [ 293 | 115, 294 | 255 295 | ] 296 | }, 297 | "action_name": "MyIngress.a_set_state_2" 298 | }, 299 | { 300 | "table": "MyIngress.t_get_root_state", 301 | "default_action": true, 302 | "action_params": { 303 | "root_state": 2 304 | }, 305 | "action_name": "MyIngress.a_get_root_state" 306 | }, 307 | { 308 | "table": "MyIngress.t_DFA_match_0", 309 | "default_action": true, 310 | "action_params": { 311 | "_state": 2 312 | }, 313 | "action_name": "MyIngress.a_set_state_2" 314 | }, 315 | { 316 | "table": "MyIngress.t_DFA_match_1", 317 | "default_action": true, 318 | "action_params": { 319 | "_state": 2 320 | }, 321 | "action_name": "MyIngress.a_set_state_2" 322 | }, 323 | { 324 | "priority": 2, 325 | "table": "MyIngress.t_pattern2rule", 326 | "action_params": {}, 327 | "match": { 328 | "meta.b1": [ 329 | 1, 330 | 255 331 | ] 332 | }, 333 | "action_name": "MyIngress.a_mark_as_to_forward" 334 | }, 335 | { 336 | "priority": 3, 337 | "table": "MyIngress.t_pattern2rule", 338 | "action_params": {}, 339 | "match": { 340 | 341 | "meta.b1": [ 342 | 2, 343 | 255 344 | ] 345 | }, 346 | "action_name": "MyIngress.a_mark_as_to_forward" 347 | } 348 | ], 349 | "bmv2_json": "build/bolt.json", 350 | "target": "bmv2", 351 | "p4info": "build/bolt.p4.p4info.txt" 352 | } -------------------------------------------------------------------------------- /s1-runtime.json: -------------------------------------------------------------------------------- 1 | { 2 | "table_entries": [ 3 | { 4 | "priority": 17, 5 | "table": "MyIngress.t_DFA_match_0", 6 | "action_params": { 7 | "_state": 3, 8 | "pattern_code": 1 9 | }, 10 | "match": { 11 | "hdr.patrns[0].pattern": [ 12 | 104, 13 | 255 14 | ], 15 | "meta.state": [ 16 | 6, 17 | 6 18 | ], 19 | "hdr.patrns[1].pattern": [ 20 | 101, 21 | 255 22 | ] 23 | }, 24 | "action_name": "MyIngress.a_set_state_2_b1" 25 | }, 26 | { 27 | "priority": 16, 28 | "table": "MyIngress.t_DFA_match_0", 29 | "action_params": { 30 | "_state": 3, 31 | "pattern_code": 1 32 | }, 33 | "match": { 34 | "hdr.patrns[0].pattern": [ 35 | 101, 36 | 255 37 | ], 38 | "meta.state": [ 39 | 0, 40 | 7 41 | ] 42 | }, 43 | "action_name": "MyIngress.a_set_state_1_b1" 44 | }, 45 | { 46 | "priority": 15, 47 | "table": "MyIngress.t_DFA_match_0", 48 | "action_params": { 49 | "_state": 7, 50 | "pattern_code": 2 51 | }, 52 | "match": { 53 | "hdr.patrns[0].pattern": [ 54 | 105, 55 | 255 56 | ], 57 | "meta.state": [ 58 | 0, 59 | 6 60 | ], 61 | "hdr.patrns[1].pattern": [ 62 | 115, 63 | 255 64 | ] 65 | }, 66 | "action_name": "MyIngress.a_set_state_2_b1" 67 | }, 68 | { 69 | "priority": 14, 70 | "table": "MyIngress.t_DFA_match_0", 71 | "action_params": { 72 | "_state": 7, 73 | "pattern_code": 2 74 | }, 75 | "match": { 76 | "hdr.patrns[0].pattern": [ 77 | 115, 78 | 255 79 | ], 80 | "meta.state": [ 81 | 4, 82 | 6 83 | ] 84 | }, 85 | "action_name": "MyIngress.a_set_state_1_b1" 86 | }, 87 | { 88 | "priority": 13, 89 | "table": "MyIngress.t_DFA_match_0", 90 | "action_params": { 91 | "_state": 4 92 | }, 93 | "match": { 94 | "hdr.patrns[0].pattern": [ 95 | 104, 96 | 255 97 | ], 98 | "hdr.patrns[1].pattern": [ 99 | 105, 100 | 255 101 | ] 102 | }, 103 | "action_name": "MyIngress.a_set_state_2" 104 | }, 105 | { 106 | "priority": 12, 107 | "table": "MyIngress.t_DFA_match_0", 108 | "action_params": { 109 | "_state": 0 110 | }, 111 | "match": { 112 | "hdr.patrns[0].pattern": [ 113 | 115, 114 | 255 115 | ], 116 | "hdr.patrns[1].pattern": [ 117 | 104, 118 | 255 119 | ] 120 | }, 121 | "action_name": "MyIngress.a_set_state_2" 122 | }, 123 | { 124 | "priority": 11, 125 | "table": "MyIngress.t_DFA_match_0", 126 | "action_params": { 127 | "_state": 1 128 | }, 129 | "match": { 130 | "hdr.patrns[1].pattern": [ 131 | 104, 132 | 255 133 | ] 134 | }, 135 | "action_name": "MyIngress.a_set_state_2" 136 | }, 137 | { 138 | "priority": 10, 139 | "table": "MyIngress.t_DFA_match_0", 140 | "action_params": { 141 | "_state": 6 142 | }, 143 | "match": { 144 | "hdr.patrns[1].pattern": [ 145 | 115, 146 | 255 147 | ] 148 | }, 149 | "action_name": "MyIngress.a_set_state_2" 150 | }, 151 | { 152 | "priority": 9, 153 | "table": "MyIngress.t_DFA_match_1", 154 | "action_params": { 155 | "_state": 3, 156 | "pattern_code": 1 157 | }, 158 | "match": { 159 | "hdr.patrns[0].pattern": [ 160 | 104, 161 | 255 162 | ], 163 | "meta.state": [ 164 | 6, 165 | 6 166 | ], 167 | "hdr.patrns[1].pattern": [ 168 | 101, 169 | 255 170 | ] 171 | }, 172 | "action_name": "MyIngress.a_set_state_2_b1" 173 | }, 174 | { 175 | "priority": 8, 176 | "table": "MyIngress.t_DFA_match_1", 177 | "action_params": { 178 | "_state": 3, 179 | "pattern_code": 1 180 | }, 181 | "match": { 182 | "hdr.patrns[0].pattern": [ 183 | 101, 184 | 255 185 | ], 186 | "meta.state": [ 187 | 0, 188 | 7 189 | ] 190 | }, 191 | "action_name": "MyIngress.a_set_state_1_b1" 192 | }, 193 | { 194 | "priority": 7, 195 | "table": "MyIngress.t_DFA_match_1", 196 | "action_params": { 197 | "_state": 7, 198 | "pattern_code": 2 199 | }, 200 | "match": { 201 | "hdr.patrns[0].pattern": [ 202 | 105, 203 | 255 204 | ], 205 | "meta.state": [ 206 | 0, 207 | 6 208 | ], 209 | "hdr.patrns[1].pattern": [ 210 | 115, 211 | 255 212 | ] 213 | }, 214 | "action_name": "MyIngress.a_set_state_2_b1" 215 | }, 216 | { 217 | "priority": 6, 218 | "table": "MyIngress.t_DFA_match_1", 219 | "action_params": { 220 | "_state": 7, 221 | "pattern_code": 2 222 | }, 223 | "match": { 224 | "hdr.patrns[0].pattern": [ 225 | 115, 226 | 255 227 | ], 228 | "meta.state": [ 229 | 4, 230 | 6 231 | ] 232 | }, 233 | "action_name": "MyIngress.a_set_state_1_b1" 234 | }, 235 | { 236 | "priority": 5, 237 | "table": "MyIngress.t_DFA_match_1", 238 | "action_params": { 239 | "_state": 4 240 | }, 241 | "match": { 242 | "hdr.patrns[0].pattern": [ 243 | 104, 244 | 255 245 | ], 246 | "hdr.patrns[1].pattern": [ 247 | 105, 248 | 255 249 | ] 250 | }, 251 | "action_name": "MyIngress.a_set_state_2" 252 | }, 253 | { 254 | "priority": 4, 255 | "table": "MyIngress.t_DFA_match_1", 256 | "action_params": { 257 | "_state": 0 258 | }, 259 | "match": { 260 | "hdr.patrns[0].pattern": [ 261 | 115, 262 | 255 263 | ], 264 | "hdr.patrns[1].pattern": [ 265 | 104, 266 | 255 267 | ] 268 | }, 269 | "action_name": "MyIngress.a_set_state_2" 270 | }, 271 | { 272 | "priority": 3, 273 | "table": "MyIngress.t_DFA_match_1", 274 | "action_params": { 275 | "_state": 1 276 | }, 277 | "match": { 278 | "hdr.patrns[1].pattern": [ 279 | 104, 280 | 255 281 | ] 282 | }, 283 | "action_name": "MyIngress.a_set_state_2" 284 | }, 285 | { 286 | "priority": 2, 287 | "table": "MyIngress.t_DFA_match_1", 288 | "action_params": { 289 | "_state": 6 290 | }, 291 | "match": { 292 | "hdr.patrns[1].pattern": [ 293 | 115, 294 | 255 295 | ] 296 | }, 297 | "action_name": "MyIngress.a_set_state_2" 298 | }, 299 | { 300 | "table": "MyIngress.t_get_root_state", 301 | "default_action": true, 302 | "action_params": { 303 | "root_state": 2 304 | }, 305 | "action_name": "MyIngress.a_get_root_state" 306 | }, 307 | { 308 | "table": "MyIngress.t_DFA_match_0", 309 | "default_action": true, 310 | "action_params": { 311 | "_state": 2 312 | }, 313 | "action_name": "MyIngress.a_set_state_2" 314 | }, 315 | { 316 | "table": "MyIngress.t_DFA_match_1", 317 | "default_action": true, 318 | "action_params": { 319 | "_state": 2 320 | }, 321 | "action_name": "MyIngress.a_set_state_2" 322 | }, 323 | { 324 | "priority": 2, 325 | "table": "MyIngress.t_pattern2rule", 326 | "action_params": {}, 327 | "match": { 328 | "meta.b1": [ 329 | 1, 330 | 255 331 | ] 332 | }, 333 | "action_name": "MyIngress.a_mark_as_to_forward" 334 | }, 335 | { 336 | "priority": 3, 337 | "table": "MyIngress.t_pattern2rule", 338 | "action_params": {}, 339 | "match": { 340 | 341 | "meta.b1": [ 342 | 2, 343 | 255 344 | ] 345 | }, 346 | "action_name": "MyIngress.a_mark_as_to_forward" 347 | } 348 | ], 349 | "bmv2_json": "build/bolt.json", 350 | "target": "bmv2", 351 | "p4info": "build/bolt.p4.p4info.txt" 352 | } -------------------------------------------------------------------------------- /send.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import sys 4 | import socket 5 | import random 6 | import struct 7 | 8 | from scapy.all import sendp, send, get_if_list, get_if_hwaddr 9 | from scapy.all import Packet 10 | from scapy.all import Ether, IP, UDP, TCP, ICMP 11 | 12 | def get_if(): 13 | ifs=get_if_list() 14 | iface=None # "h1-eth0" 15 | for i in get_if_list(): 16 | if "eth0" in i: 17 | iface=i 18 | break 19 | if not iface: 20 | print("Cannot find eth0 interface") 21 | exit(1) 22 | return iface 23 | 24 | def main(): 25 | 26 | if len(sys.argv)<4: 27 | print('pass 3 arguments: ') 28 | exit(1) 29 | 30 | addr = socket.gethostbyname(sys.argv[1]) 31 | iface = get_if() 32 | 33 | print("sending on interface %s to %s" % (iface, str(addr))) 34 | # pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff') 35 | #pkt = pkt /IP(dst=addr) / TCP(dport=1234, sport=random.randint(49152,65535)) / sys.argv[2] 36 | # pkt = pkt /IP(dst=addr, proto=244) / sys.argv[2] 37 | 38 | pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff')/IP(dst=addr) 39 | if sys.argv[2] == 'T': 40 | pkt = pkt / TCP(sport = 1234, dport = 6666) 41 | elif sys.argv[2] == 'I': 42 | pkt = pkt / ICMP(code = 6) 43 | elif sys.argv[2] == 'U': 44 | pkt = pkt / UDP(sport = 1234, dport = 6666) 45 | else: 46 | pass 47 | 48 | pkt = pkt/ sys.argv[3] 49 | pkt.show2() 50 | sendp(pkt, iface=iface, verbose=False) 51 | 52 | 53 | if __name__ == '__main__': 54 | main() -------------------------------------------------------------------------------- /topology.json: -------------------------------------------------------------------------------- 1 | { 2 | "hosts": { 3 | "h1": {"ip": "10.0.1.1/24", "mac": "08:00:00:00:01:11"}, 4 | "h2": {"ip": "10.0.2.2/24", "mac": "08:00:00:00:02:22"} 5 | }, 6 | "switches": { 7 | "s1": { "runtime_json" : "s1-runtime.json" } 8 | }, 9 | "links": [ 10 | ["h1", "s1-p1"], ["h2", "s1-p2"] 11 | ] 12 | } 13 | --------------------------------------------------------------------------------