├── p4utils ├── __init__.py ├── utils │ ├── __init__.py │ ├── p4runtime_API │ │ ├── __init__.py │ │ ├── README.md │ │ ├── utils.py │ │ ├── bytes_utils.py │ │ ├── context.py │ │ └── p4runtime.py │ ├── monitor.py │ ├── traffic_utils.py │ ├── client.py │ ├── sswitch_thrift_API.py │ ├── compiler.py │ └── helper.py └── mininetlib │ ├── __init__.py │ ├── log.py │ ├── net.py │ └── cli.py ├── .gitignore ├── rules └── default │ ├── s2-commands.txt │ ├── s1-commands.txt │ └── s3-commands.txt ├── grafana └── provisioning │ ├── dashboards │ ├── dashboard.yaml │ └── InfluxDB INT Dashboard.json │ └── datasources │ └── datasource.yaml ├── Makefile ├── report_collector ├── collector_exporter.py ├── collector_influxdb.py ├── collector_graphite.py ├── colllector.py └── report_rx.py ├── p4src ├── include │ ├── headers.p4 │ ├── defines.p4 │ ├── checksum.p4 │ ├── forward.p4 │ ├── parser.p4 │ ├── int_source.p4 │ ├── int_sink.p4 │ ├── int_headers.p4 │ └── int_transit.p4 └── int_md.p4 ├── receive.py ├── network.py ├── send.py ├── README.md └── receive_report.py /p4utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /p4utils/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /p4utils/mininetlib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /p4utils/utils/p4runtime_API/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /p4utils/utils/p4runtime_API/README.md: -------------------------------------------------------------------------------- 1 | # P4Runtime Client 2 | This is a pure API and multi-switch version of the [p4runtime-shell](https://github.com/p4lang/p4runtime-shell) repository. 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | # vim swap files 3 | .*.sw? 4 | .sw? 5 | # vscode settings directory 6 | .vscode 7 | #OS X specific files. 8 | .DS_store 9 | *.p4i 10 | *.log 11 | *.pcap 12 | pcap 13 | log 14 | topology.json 15 | int_md.json -------------------------------------------------------------------------------- /rules/default/s2-commands.txt: -------------------------------------------------------------------------------- 1 | //set up ipv4_lpm table 2 | table_add l3_forward.ipv4_lpm ipv4_forward 10.0.1.1/32 => 00:00:00:00:00:00 1 3 | table_add l3_forward.ipv4_lpm ipv4_forward 10.0.3.2/32 => 00:00:00:00:00:00 2 4 | 5 | //set up switch ID 6 | table_set_default process_int_transit.tb_int_insert init_metadata 2 -------------------------------------------------------------------------------- /grafana/provisioning/dashboards/dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'int' 5 | # name of the dashboard folder. Required 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: true 10 | updateIntervalSeconds: 20 11 | allowUiUpdates: true 12 | options: 13 | path: /etc/grafana/provisioning/dashboards 14 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | PCAP_DIR = pcap 3 | LOG_DIR = log 4 | P4SRC_DIR = p4src 5 | 6 | ifndef P4SRC_FILE 7 | P4SRC_FILE = p4src/int_md.p4 8 | endif 9 | 10 | all: run 11 | 12 | # start network 13 | run: 14 | sudo python3 network.py --p4 ${P4SRC_FILE} 15 | 16 | stop: 17 | sudo mn -c 18 | 19 | clean: stop 20 | rm -f *.pcap 21 | rm -rf $(PCAP_DIR) $(LOG_DIR) $(RULE_DIR)/rule* 22 | -------------------------------------------------------------------------------- /grafana/provisioning/datasources/datasource.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | datasources: 3 | - name: Graphite 4 | type: graphite 5 | access: proxy 6 | url: http://graphite:80 7 | version: 1 8 | editable: true 9 | 10 | - name: InfluxDB 11 | type: influxdb 12 | access: proxy 13 | url: http://influxdb:8086 14 | database: int 15 | version: 1 16 | editable: true 17 | 18 | - name: Prometheus 19 | type: prometheus 20 | access: proxy 21 | url: http://prometheus:9090 22 | version: 1 23 | editable: true 24 | -------------------------------------------------------------------------------- /rules/default/s1-commands.txt: -------------------------------------------------------------------------------- 1 | //set up ipv4_lpm table 2 | table_add l3_forward.ipv4_lpm ipv4_forward 10.0.1.1/32 => 00:00:0a:00:01:01 1 3 | table_add l3_forward.ipv4_lpm ipv4_forward 10.0.3.2/32 => 00:00:00:00:00:00 2 4 | 5 | //set up process_int_source_sink 6 | table_add process_int_source_sink.tb_set_source int_set_source 1 => 7 | table_add process_int_source.tb_int_source int_source 10.0.1.1&&&0xFFFFFFFF 10.0.3.2&&&0xFFFFFFFF 0x00&&&0x00 0x00&&&0x00 => 11 10 0xF 0xF 10 8 | 9 | //set up switch ID 10 | table_set_default process_int_transit.tb_int_insert init_metadata 1 -------------------------------------------------------------------------------- /rules/default/s3-commands.txt: -------------------------------------------------------------------------------- 1 | //creates a mirroring ID 100 to output port 3 2 | mirroring_add 500 3 3 | 4 | table_add l3_forward.ipv4_lpm ipv4_forward 10.0.1.1/32 => 00:00:00:00:00:00 2 5 | table_add l3_forward.ipv4_lpm ipv4_forward 10.0.3.2/32 => 00:00:0a:00:03:02 1 6 | 7 | //set up process_int_source_sink 8 | table_add process_int_source_sink.tb_set_sink int_set_sink 1 => 9 | table_add process_int_report.tb_generate_report do_report_encapsulation => 00:01:0a:00:03:07 00:01:0a:00:03:0A 10.0.0.1 10.0.0.2 1234 10 | 11 | //set up switch ID 12 | table_set_default process_int_transit.tb_int_insert init_metadata 3 -------------------------------------------------------------------------------- /report_collector/collector_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | from scapy.all import sniff 5 | from colllector import * 6 | 7 | def handle_pkt(pkt,c): 8 | if INTREP in pkt : 9 | print("\n\n********* Receiving Telemtry Report ********") 10 | flow_info = c.parser_int_pkt(pkt) 11 | flow_info.show() 12 | 13 | def main(): 14 | iface = 's3-cpu-eth1' 15 | print("sniffing on %s" % iface) 16 | sys.stdout.flush() 17 | 18 | c = Collector() 19 | sniff(iface = iface,filter='inbound and tcp or udp', 20 | prn = lambda x: handle_pkt(x,c)) 21 | 22 | if __name__ == '__main__': 23 | main() 24 | -------------------------------------------------------------------------------- /report_collector/collector_influxdb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | from scapy.all import sniff 5 | from influxdb import InfluxDBClient 6 | from colllector import * 7 | 8 | INFLUX_HOST = 'localhost' 9 | INFLUX_DB = 'int' 10 | 11 | def handle_pkt(pkt,c): 12 | if INTREP in pkt : 13 | print("\n\n********* Receiving Telemtry Report ********") 14 | flow_info = c.parser_int_pkt(pkt) 15 | flow_info.show() 16 | c.export_influxdb(flow_info) 17 | 18 | def main(): 19 | iface = 's3-cpu-eth1' 20 | print("sniffing on %s" % iface) 21 | sys.stdout.flush() 22 | 23 | influx_client = InfluxDBClient(host=INFLUX_HOST,database=INFLUX_DB) 24 | c = Collector(influx_client) 25 | sniff(iface = iface,filter='inbound and tcp or udp', 26 | prn = lambda x: handle_pkt(x,c)) 27 | 28 | if __name__ == '__main__': 29 | main() 30 | -------------------------------------------------------------------------------- /p4utils/utils/p4runtime_API/utils.py: -------------------------------------------------------------------------------- 1 | # See https://stackoverflow.com/a/32997046 2 | def my_partialmethod(func, *args1, **kwargs1): 3 | def method(self, *args2, **kwargs2): 4 | return func(self, *args1, *args2, **kwargs1, **kwargs2) 5 | return method 6 | 7 | 8 | class UserError(Exception): 9 | def __init__(self, info=""): 10 | self.info = info 11 | 12 | def __str__(self): 13 | return self.info 14 | 15 | # TODO(antonin): is this the best way to get a custom traceback? 16 | def _render_traceback_(self): 17 | return [str(self)] 18 | 19 | 20 | class InvalidP4InfoError(Exception): 21 | def __init__(self, info=""): 22 | self.info = info 23 | 24 | def __str__(self): 25 | return "Invalid P4Info message: {}".format(self.info) 26 | 27 | def _render_traceback_(self): 28 | return [str(self)] 29 | -------------------------------------------------------------------------------- /p4src/include/headers.p4: -------------------------------------------------------------------------------- 1 | #ifndef __HEADERS__ 2 | #define __HEADERS__ 3 | 4 | header ethernet_t { 5 | bit<48> dst_addr; 6 | bit<48> src_addr; 7 | bit<16> ether_type; 8 | } 9 | const bit<8> ETH_HEADER_LEN = 14; 10 | 11 | header ipv4_t { 12 | bit<4> version; 13 | bit<4> ihl; 14 | bit<6> dscp; 15 | bit<2> ecn; 16 | bit<16> len; 17 | bit<16> identification; 18 | bit<3> flags; 19 | bit<13> frag_offset; 20 | bit<8> ttl; 21 | bit<8> protocol; 22 | bit<16> hdr_checksum; 23 | bit<32> src_addr; 24 | bit<32> dst_addr; 25 | } 26 | const bit<8> IPV4_MIN_HEAD_LEN = 20; 27 | 28 | header tcp_t { 29 | bit<16> src_port; 30 | bit<16> dst_port; 31 | bit<32> seq_no; 32 | bit<32> ack_no; 33 | bit<4> data_offset; 34 | bit<3> res; 35 | bit<3> ecn; 36 | bit<6> ctrl; 37 | bit<16> window; 38 | bit<16> checksum; 39 | bit<16> urgent_ptr; 40 | } 41 | 42 | header udp_t { 43 | bit<16> src_port; 44 | bit<16> dst_port; 45 | bit<16> length_; 46 | bit<16> checksum; 47 | } 48 | const bit<8> UDP_HEADER_LEN = 8; 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /p4src/include/defines.p4: -------------------------------------------------------------------------------- 1 | #ifndef __DEFINES__ 2 | #define __DEFINES__ 3 | 4 | //protocol type 5 | #define ETH_TYPE_IPV4 0x0800 6 | #define IP_PROTO_TCP 8w6 7 | #define IP_PROTO_UDP 8w17 8 | #define IP_VERSION_4 4w4 9 | #define IPV4_IHL_MIN 4w5 10 | #define MAX_PORTS 511 11 | 12 | //packet type 13 | #define PKT_INSTANCE_TYPE_NORMAL 0 14 | #define PKT_INSTANCE_TYPE_INGRESS_CLONE 1 15 | #define PKT_INSTANCE_TYPE_EGRESS_CLONE 2 16 | #define PKT_INSTANCE_TYPE_COALESCED 3 17 | #define PKT_INSTANCE_TYPE_INGRESS_RECIRC 4 18 | #define PKT_INSTANCE_TYPE_REPLICATION 5 19 | #define PKT_INSTANCE_TYPE_RESUBMIT 6 20 | 21 | typedef bit<48> mac_t; 22 | typedef bit<32> ip_address_t; 23 | typedef bit<16> l4_port_t; 24 | typedef bit<9> port_t; 25 | typedef bit<16> next_hop_id_t; 26 | const port_t CPU_PORT = 255; 27 | 28 | /* indicate INT by DSCP value */ 29 | const bit<6> DSCP_INT = 0x17; 30 | const bit<6> DSCP_MASK = 0x3F; 31 | 32 | typedef bit<48> timestamp_t; 33 | typedef bit<32> switch_id_t; 34 | 35 | const bit<8> INT_SHIM_HEADER_WORD = 1; 36 | const bit<8> INT_HEADER_WORD = 3; 37 | const bit<8> INT_TOTAL_HEADER_WORD = 4; 38 | 39 | const bit<8> CPU_MIRROR_SESSION_ID = 250; 40 | const bit<32> REPORT_MIRROR_SESSION_ID = 500; 41 | const bit<6> HW_ID = 1; 42 | const bit<8> REPORT_HDR_TTL = 64; 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /receive.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import struct 4 | import os 5 | 6 | from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr 7 | from scapy.all import Packet, IPOption 8 | from scapy.all import PacketListField, ShortField, IntField, LongField, BitField, FieldListField, FieldLenField 9 | from scapy.all import IP, TCP, UDP, Raw 10 | from scapy.layers.inet import _IPOption_HDR, TCP, bind_layers 11 | 12 | def get_if(): 13 | ifs=get_if_list() 14 | iface=None 15 | for i in get_if_list(): 16 | if "eth0" in i: 17 | iface=i 18 | break 19 | if not iface: 20 | print("Cannot find eth0 interface") 21 | exit(1) 22 | return iface 23 | 24 | def handle_pkt(pkt): 25 | if IP in pkt: 26 | if TCP in pkt or UDP in pkt: 27 | print("got a packet") 28 | pkt.show2() 29 | # hexdump(pkt) 30 | sys.stdout.flush() 31 | 32 | 33 | def main(): 34 | ifaces = [i for i in os.listdir('/sys/class/net/') if 'eth' in i] 35 | iface = ifaces[0] 36 | print("sniffing on %s" % iface) 37 | sys.stdout.flush() 38 | sniff(iface = iface,filter='inbound and tcp or udp', 39 | prn = lambda x: handle_pkt(x)) 40 | 41 | if __name__ == '__main__': 42 | main() 43 | -------------------------------------------------------------------------------- /network.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from p4utils.mininetlib.network_API import NetworkAPI 3 | 4 | default_rule = 'rules/default/' 5 | 6 | def config_network(p4): 7 | net = NetworkAPI() 8 | 9 | # Network general options 10 | net.setLogLevel('info') 11 | net.enableCli() 12 | 13 | # Network definition 14 | net.addP4Switch('s1',cli_input= default_rule + 's1-commands.txt') 15 | net.addP4Switch('s2',cli_input= default_rule + 's2-commands.txt') 16 | net.addP4Switch('s3',cli_input= default_rule + 's3-commands.txt') 17 | 18 | net.setP4SourceAll(p4) 19 | 20 | net.addHost('h1') 21 | net.addHost('h2') 22 | 23 | net.addLink('h1', 's1') 24 | net.addLink('h2', 's3') 25 | net.addLink('s1', 's2') 26 | net.addLink('s2', 's3') 27 | 28 | # Assignment strategy 29 | net.mixed() 30 | 31 | # Nodes general options 32 | net.enableCpuPortAll() 33 | net.enablePcapDumpAll() 34 | net.enableLogAll() 35 | 36 | return net 37 | 38 | 39 | def get_args(): 40 | parser = argparse.ArgumentParser() 41 | parser.add_argument('--p4', help='p4 src file.', 42 | type=str, required=False, default='p4src/int_mri.p4') 43 | 44 | return parser.parse_args() 45 | 46 | 47 | def main(): 48 | args = get_args() 49 | net = config_network(args.p4) 50 | net.startNetwork() 51 | 52 | 53 | if __name__ == '__main__': 54 | main() -------------------------------------------------------------------------------- /p4src/include/checksum.p4: -------------------------------------------------------------------------------- 1 | #include "headers.p4" 2 | #include "int_headers.p4" 3 | 4 | control MyVerifyChecksum(inout headers hdr, inout local_metadata_t local_metadata) { 5 | apply { } 6 | } 7 | 8 | control MyComputeChecksum(inout headers hdr, inout local_metadata_t local_metadata){ 9 | apply{ 10 | update_checksum( 11 | hdr.ipv4.isValid(), 12 | { hdr.ipv4.version, 13 | hdr.ipv4.ihl, 14 | hdr.ipv4.dscp, 15 | hdr.ipv4.ecn, 16 | hdr.ipv4.len, 17 | hdr.ipv4.identification, 18 | hdr.ipv4.flags, 19 | hdr.ipv4.frag_offset, 20 | hdr.ipv4.ttl, 21 | hdr.ipv4.protocol, 22 | hdr.ipv4.src_addr, 23 | hdr.ipv4.dst_addr }, 24 | hdr.ipv4.hdr_checksum, 25 | HashAlgorithm.csum16 26 | ); 27 | 28 | 29 | // #ifdef __INT_HEADERS__ 30 | // update_checksum(hdr.report_ipv4.isValid(), 31 | // { 32 | // hdr.report_ipv4.version, 33 | // hdr.report_ipv4.ihl, 34 | // hdr.report_ipv4.dscp, 35 | // hdr.report_ipv4.ecn, 36 | // hdr.report_ipv4.len, 37 | // hdr.report_ipv4.identification, 38 | // hdr.report_ipv4.flags, 39 | // hdr.report_ipv4.frag_offset, 40 | // hdr.report_ipv4.ttl, 41 | // hdr.report_ipv4.protocol, 42 | // hdr.report_ipv4.src_addr, 43 | // hdr.report_ipv4.dst_addr 44 | // }, 45 | // hdr.report_ipv4.hdr_checksum, 46 | // HashAlgorithm.csum16 47 | // ); 48 | // #endif // __INT_HEADERS__ 49 | } 50 | } -------------------------------------------------------------------------------- /p4src/include/forward.p4: -------------------------------------------------------------------------------- 1 | #include "defines.p4" 2 | #include "headers.p4" 3 | 4 | control l3_forward(inout headers hdr, 5 | inout local_metadata_t local_metadata, 6 | inout standard_metadata_t standard_metadata) { 7 | 8 | action drop(){ 9 | mark_to_drop(standard_metadata); 10 | } 11 | 12 | action ipv4_forward(mac_t dstAddr, port_t port) { 13 | standard_metadata.egress_spec = port; 14 | standard_metadata.egress_port = port; 15 | hdr.ethernet.src_addr = hdr.ethernet.dst_addr; 16 | hdr.ethernet.dst_addr = dstAddr; 17 | hdr.ipv4.ttl = hdr.ipv4.ttl - 1; 18 | } 19 | 20 | table ipv4_lpm { 21 | key = { 22 | hdr.ipv4.dst_addr : lpm; 23 | } 24 | actions = { 25 | ipv4_forward; 26 | drop; 27 | NoAction; 28 | } 29 | size = 1024; 30 | default_action = drop(); 31 | } 32 | 33 | apply { 34 | if(hdr.ipv4.isValid()) { 35 | ipv4_lpm.apply(); 36 | } 37 | 38 | } 39 | } 40 | 41 | control port_forward(inout headers hdr, 42 | inout local_metadata_t local_metadata, 43 | inout standard_metadata_t standard_metadata) { 44 | 45 | action send_to_cpu() { 46 | standard_metadata.egress_port = CPU_PORT; 47 | standard_metadata.egress_spec = CPU_PORT; 48 | } 49 | 50 | action set_egress_port(port_t port) { 51 | standard_metadata.egress_port = port; 52 | standard_metadata.egress_spec = port; 53 | } 54 | 55 | action drop(){ 56 | mark_to_drop(standard_metadata); 57 | } 58 | 59 | table tb_port_forward { 60 | key = { 61 | hdr.ipv4.dst_addr: lpm; 62 | } 63 | actions = { 64 | set_egress_port; 65 | send_to_cpu; 66 | drop; 67 | } 68 | const default_action = drop(); 69 | } 70 | 71 | apply { 72 | tb_port_forward.apply(); 73 | } 74 | } -------------------------------------------------------------------------------- /send.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | import sys 4 | import socket 5 | import random 6 | import struct 7 | from time import sleep 8 | 9 | from scapy.all import sendp, send, get_if_list, get_if_hwaddr 10 | from scapy.all import Packet 11 | from scapy.all import Ether, IP, UDP, TCP 12 | 13 | def get_if(): 14 | ifs=get_if_list() 15 | iface=None # "h1-eth0" 16 | for i in get_if_list(): 17 | if "eth0" in i: 18 | iface=i 19 | break; 20 | if not iface: 21 | print("Cannot find eth0 interface") 22 | exit(1) 23 | return iface 24 | 25 | def main(args): 26 | 27 | addr = socket.gethostbyname(args.ip) 28 | iface = get_if() 29 | 30 | print("sending on interface %s to %s" % (iface, str(addr))) 31 | pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff') 32 | if(args.l4 == 'tcp'): 33 | pkt = pkt /IP(dst=addr) / TCP(dport=args.port, sport=random.randint(49152,65535)) / args.m 34 | if(args.l4 == 'udp'): 35 | pkt = pkt /IP(dst=addr) / UDP(dport=int(args.port), sport=random.randint(49152,65535)) / args.m 36 | pkt.show2() 37 | for i in range(args.c): 38 | sendp(pkt, iface=iface, verbose=False) 39 | sleep(1) 40 | 41 | if __name__ == '__main__': 42 | parser = argparse.ArgumentParser(description='receiver parser') 43 | parser.add_argument('--c', help='number of probe packets', 44 | type=int, action="store", required=False, 45 | default=1) 46 | parser.add_argument('--ip', help='dst ip', 47 | type=str, action="store", required=True) 48 | parser.add_argument('--port', help="dest port", type=int, 49 | action="store", required=True) 50 | parser.add_argument('--l4', help="layer 4 proto (tcp or udp)", 51 | type=str, action="store", required=True) 52 | parser.add_argument('--m', help="message", type=str, 53 | action='store', required=False, default="") 54 | args = parser.parse_args() 55 | main(args) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # P4-INT 2 | Implementation In­Band Network Telemetry in P4 3 | 4 | 5 | # Getting Started 6 | This is an example of how you may give instructions on setting up your project locally. To get a local copy up and running follow these simple example steps. 7 | 8 | ## Prerequisites 9 | ### environment 10 | * win10 11 | * vm-ubuntu20.04 12 | 13 | ## Installation 14 | 1. install Mininet 15 | ``` 16 | git clone https://github.com/mininet/mininet 17 | cd mininet 18 | 19 | sudo PYTHON=python3 mininet/util/install.sh -n 20 | ``` 21 | 2. install P4 22 | 23 | For Ubuntu 20.04 and Ubuntu 21.04 it can be installed as follows: 24 | ``` 25 | . /etc/os-release 26 | echo "deb http://download.opensuse.org/repositories/home:/p4lang/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/home:p4lang.list 27 | curl -L "http://download.opensuse.org/repositories/home:/p4lang/xUbuntu_${VERSION_ID}/Release.key" | sudo apt-key add - 28 | sudo apt-get update 29 | sudo apt install p4lang-p4c 30 | ``` 31 | 3. python3 Dependency package 32 | ``` 33 | sudo pip3 install psutil networkx 34 | ``` 35 | 4. Influxdb 36 | ```sh 37 | sudo apt-get install influxdb 38 | sudo service influxdb start 39 | sudo pip3 install influxdb 40 | ``` 41 | 42 | 43 | ## Usage 44 | 45 | 1. Clone the repo 46 | ```sh 47 | git clone github.com/laofan13/P4-INT.git 48 | ``` 49 | 2. run 50 | ```sh 51 | make run 52 | ``` 53 | 3. in mininet terminal 54 | ```sh 55 | xterm h1 h2 56 | ``` 57 | 4. open other desktop terminal,start collector 58 | ``` 59 | sudo python3 ./receive_report.py 60 | ``` 61 | 5. in xterm h2 62 | ```sh 63 | python3 ./receive.py 64 | ``` 65 | 6. in xterm h1 66 | ```sh 67 | python3 ./send.py --ip 10.0.1.1 --l4 udp --port 8080 --m "hello world !" --c 1 68 | ``` 69 | 70 | # influxdb operation 71 | ```sh 72 | INSERT flow_latency,src_ip="10.0.1.1",dst_ip="10.0.3.2",src_port=1234,dst_port=1234,protocol=17 value=0.64 73 | INSERT switch_latency,switch_id=1 value=0.64 74 | INSERT queue_occupancy,switch_id=1,queue_id=1 value=0.1 75 | INSERT link_latency,ingress_switch_id=2,ingress_port_id=1,egress_switch_id=1,egress_port_id=2 value= 76 | 77 | SELECT * FROM flow_latency 78 | drop measurement flow_latency 79 | ``` 80 | -------------------------------------------------------------------------------- /p4utils/mininetlib/log.py: -------------------------------------------------------------------------------- 1 | """__ https://github.com/mininet/mininet/blob/master/mininet/log.py 2 | 3 | This module is an extension of `mininet.log`__ that implements colored logs. 4 | """ 5 | 6 | import sys 7 | import logging 8 | from mininet.log import * 9 | 10 | 11 | class ShellStyles: 12 | """Shell styles.""" 13 | reset='\033[0m' 14 | bold='\033[01m' 15 | disable='\033[02m' 16 | underline='\033[04m' 17 | reverse='\033[07m' 18 | strikethrough='\033[09m' 19 | invisible='\033[08m' 20 | 21 | 22 | class ShellFGColors: 23 | """Shell foreground colors.""" 24 | black='\033[30m' 25 | red='\033[31m' 26 | green='\033[32m' 27 | orange='\033[33m' 28 | blue='\033[34m' 29 | purple='\033[35m' 30 | cyan='\033[36m' 31 | lightgrey='\033[37m' 32 | darkgrey='\033[90m' 33 | lightred='\033[91m' 34 | lightgreen='\033[92m' 35 | yellow='\033[93m' 36 | lightblue='\033[94m' 37 | pink='\033[95m' 38 | lightcyan='\033[96m' 39 | 40 | 41 | class ShellBGColors: 42 | """Shell background colors.""" 43 | black='\033[40m' 44 | red='\033[41m' 45 | green='\033[42m' 46 | orange='\033[43m' 47 | blue='\033[44m' 48 | purple='\033[45m' 49 | cyan='\033[46m' 50 | lightgrey='\033[47m' 51 | darkgrey='\033[100m' 52 | lightred='\033[101m' 53 | lightgreen='\033[102m' 54 | yellow='\033[103m' 55 | lightblue='\033[104m' 56 | pink='\033[105m' 57 | lightcyan='\033[106m' 58 | 59 | 60 | LOG_FORMAT = { 61 | LEVELS['debug']: ShellStyles.disable, 62 | LEVELS['info']: ShellStyles.reset, 63 | LEVELS['output']: ShellStyles.bold, 64 | LEVELS['warning']: ShellStyles.bold + ShellFGColors.yellow, 65 | LEVELS['warn']: ShellStyles.bold + ShellFGColors.yellow, 66 | LEVELS['error']: ShellStyles.bold + ShellFGColors.red, 67 | LEVELS['critical']: ShellStyles.bold + ShellBGColors.red 68 | } 69 | 70 | 71 | class ColoredFormatter(logging.Formatter): 72 | """Get colored logs.""" 73 | def format(self, record): 74 | s = super().format(record) 75 | if record.levelno in LOG_FORMAT: 76 | s = LOG_FORMAT[record.levelno] + s 77 | if record.levelno == LEVELS['critical']: 78 | s += '\n' 79 | if s[-1] == '\n': 80 | s = s[:-1] + ShellStyles.reset + '\n' 81 | else: 82 | s += ShellStyles.reset 83 | return s 84 | 85 | 86 | # Add critical level 87 | critical = lg.critical 88 | 89 | # Set formatter 90 | formatter = ColoredFormatter( LOGMSGFORMAT ) 91 | lg.ch.setFormatter( formatter ) 92 | 93 | # Handle exceptions as critical 94 | def excepthook(type, value, traceback): 95 | critical('', exc_info=(type, value, traceback)) 96 | 97 | sys.excepthook = excepthook -------------------------------------------------------------------------------- /p4utils/utils/monitor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import csv 3 | import time 4 | import argparse 5 | 6 | class Monitor: 7 | """Monitor the interface bandwidth and dump a .csv file with 8 | the rates in Mbps. 9 | 10 | Args: 11 | csv_file (string): path to the output file 12 | i (string) : name of the interface to monitor 13 | t (float) : interval between data points 14 | d (float) : monitoring duration 15 | """ 16 | 17 | def __init__(self, csv_file, i, t=0.5, d=60): 18 | 19 | current_time = time.time() 20 | start_time = current_time 21 | stop_time = current_time + d 22 | old_tx = None 23 | old_rx = None 24 | data = [] 25 | 26 | while current_time < stop_time: 27 | current_time = time.time() 28 | with open('/sys/class/net/{}/statistics/tx_bytes'.format(i), 'r') as tx: 29 | with open('/sys/class/net/{}/statistics/rx_bytes'.format(i), 'r') as rx: 30 | tx = int(tx.read()) * 8 31 | rx = int(rx.read()) * 8 32 | if not (old_tx is None or old_rx is None): 33 | delta_tx = tx - old_tx 34 | delta_rx = rx - old_rx 35 | tx_rate = (delta_tx / t) / 10**6 36 | rx_rate = (delta_rx / t) / 10**6 37 | row = { 38 | 'time': current_time-start_time, 39 | 'tx_rate': tx_rate, 40 | 'rx_rate': rx_rate 41 | } 42 | data.append(row) 43 | old_tx = tx 44 | old_rx = rx 45 | time.sleep(max(current_time + t - time.time(), 0)) 46 | 47 | with open(csv_file, 'w', newline='') as f: 48 | fieldnames = ['time', 'tx_rate', 'rx_rate'] 49 | writer = csv.DictWriter(f, fieldnames=fieldnames) 50 | writer.writeheader() 51 | for row in data: 52 | writer.writerow(row) 53 | 54 | def get_args(): 55 | parser = argparse.ArgumentParser() 56 | 57 | parser.add_argument('-i', metavar='intf', help='interface to monitor', type=str, required=True) 58 | parser.add_argument('-t', metavar='interval', help='interval between data points in seconds', type=float, required=False, default=0.5) 59 | parser.add_argument('-d', metavar='duration', help='monitoring duration in seconds', required=False, type=float, default=60) 60 | parser.add_argument('csv', metavar='OUTFILE', type=str, help='csv dump file') 61 | 62 | return parser.parse_args() 63 | 64 | if __name__ == '__main__': 65 | 66 | args = get_args() 67 | monitor = Monitor(args.csv, 68 | args.i, 69 | t=args.t, 70 | d=args.d) -------------------------------------------------------------------------------- /p4src/include/parser.p4: -------------------------------------------------------------------------------- 1 | /************************************************************************* 2 | *********************** P A R S E R ******************************* 3 | *************************************************************************/ 4 | 5 | parser MyParser(packet_in packet, 6 | out headers hdr, 7 | inout local_metadata_t local_metadata, 8 | inout standard_metadata_t standard_metadata) { 9 | state start { 10 | transition parse_ethernet; 11 | } 12 | 13 | state parse_ethernet { 14 | packet.extract(hdr.ethernet); 15 | transition select(hdr.ethernet.ether_type) { 16 | ETH_TYPE_IPV4 : parse_ipv4; 17 | default : accept; 18 | } 19 | } 20 | 21 | state parse_ipv4 { 22 | packet.extract(hdr.ipv4); 23 | transition select(hdr.ipv4.protocol) { 24 | IP_PROTO_TCP : parse_tcp; 25 | IP_PROTO_UDP : parse_udp; 26 | default: accept; 27 | } 28 | } 29 | 30 | state parse_tcp { 31 | packet.extract(hdr.tcp); 32 | local_metadata.l4_src_port = hdr.tcp.src_port; 33 | local_metadata.l4_dst_port = hdr.tcp.dst_port; 34 | transition select(hdr.ipv4.dscp) { 35 | DSCP_INT &&& DSCP_MASK: parse_intl4_shim; 36 | default: accept; 37 | } 38 | } 39 | 40 | state parse_udp { 41 | packet.extract(hdr.udp); 42 | local_metadata.l4_src_port = hdr.udp.src_port; 43 | local_metadata.l4_dst_port = hdr.udp.dst_port; 44 | transition select(hdr.ipv4.dscp) { 45 | DSCP_INT &&& DSCP_MASK: parse_intl4_shim; 46 | default: accept; 47 | } 48 | } 49 | 50 | state parse_intl4_shim { 51 | packet.extract(hdr.intl4_shim); 52 | local_metadata.int_meta.intl4_shim_len = hdr.intl4_shim.len; 53 | transition parse_int_header; 54 | } 55 | 56 | state parse_int_header { 57 | packet.extract(hdr.int_header); 58 | transition parse_int_data; 59 | } 60 | 61 | state parse_int_data { 62 | // Parse INT metadata stack 63 | packet.extract(hdr.int_data, ((bit<32>) (local_metadata.int_meta.intl4_shim_len - INT_HEADER_WORD)) << 5); 64 | transition accept; 65 | } 66 | } 67 | 68 | control MyDeparser(packet_out packet, in headers hdr) { 69 | apply { 70 | // raport headers 71 | packet.emit(hdr.report_ethernet); 72 | packet.emit(hdr.report_ipv4); 73 | packet.emit(hdr.report_udp); 74 | packet.emit(hdr.report_group_header); 75 | packet.emit(hdr.report_individual_header); 76 | 77 | // original headers 78 | packet.emit(hdr.ethernet); 79 | packet.emit(hdr.ipv4); 80 | packet.emit(hdr.udp); 81 | packet.emit(hdr.tcp); 82 | 83 | // int header 84 | packet.emit(hdr.intl4_shim); 85 | packet.emit(hdr.int_header); 86 | // hop metadata 87 | packet.emit(hdr.int_switch_id); 88 | packet.emit(hdr.int_level1_port_ids); 89 | packet.emit(hdr.int_hop_latency); 90 | packet.emit(hdr.int_q_occupancy); 91 | packet.emit(hdr.int_ingress_tstamp); 92 | packet.emit(hdr.int_egress_tstamp); 93 | packet.emit(hdr.int_level2_port_ids); 94 | packet.emit(hdr.int_egress_tx_util); 95 | 96 | // int data 97 | packet.emit(hdr.int_data); 98 | 99 | } 100 | } -------------------------------------------------------------------------------- /p4utils/utils/p4runtime_API/bytes_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Barefoot Networks, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | from ipaddr import IPv4Address, IPv6Address, AddressValueError 17 | 18 | from p4utils.utils.p4runtime_API.utils import UserError 19 | 20 | 21 | class UserBadIPv4Error(UserError): 22 | def __init__(self, addr): 23 | self.addr = addr 24 | 25 | def __str__(self): 26 | return "'{}' is not a valid IPv4 address".format(self.addr) 27 | 28 | def _render_traceback_(self): 29 | return [str(self)] 30 | 31 | 32 | class UserBadIPv6Error(UserError): 33 | def __init__(self, addr): 34 | self.addr = addr 35 | 36 | def __str__(self): 37 | return "'{}' is not a valid IPv6 address".format(self.addr) 38 | 39 | def _render_traceback_(self): 40 | return [str(self)] 41 | 42 | 43 | class UserBadMacError(UserError): 44 | def __init__(self, addr): 45 | self.addr = addr 46 | 47 | def __str__(self): 48 | return "'{}' is not a valid MAC address".format(self.addr) 49 | 50 | def _render_traceback_(self): 51 | return [str(self)] 52 | 53 | 54 | class UserBadValueError(UserError): 55 | def __init__(self, info=""): 56 | self.info = info 57 | 58 | def __str__(self): 59 | return self.info 60 | 61 | def _render_traceback_(self): 62 | return [str(self)] 63 | 64 | 65 | def ipv4Addr_to_bytes(addr): 66 | try: 67 | ip = IPv4Address(addr) 68 | except AddressValueError: 69 | raise UserBadIPv4Error(addr) 70 | return ip.packed 71 | 72 | 73 | def ipv6Addr_to_bytes(addr): 74 | try: 75 | ip = IPv6Address(addr) 76 | except AddressValueError: 77 | raise UserBadIPv6Error(addr) 78 | return ip.packed 79 | 80 | 81 | def macAddr_to_bytes(addr): 82 | bytes_ = [int(b, 16) for b in addr.split(':')] 83 | if len(bytes_) != 6: 84 | raise UserBadMacError(addr) 85 | return bytes(bytes_) 86 | 87 | 88 | def parse_value(value_str, bitwidth, base=0): 89 | if bitwidth == 32 and '.' in value_str: 90 | return ipv4Addr_to_bytes(value_str) 91 | elif bitwidth == 48 and ':' in value_str: 92 | return macAddr_to_bytes(value_str) 93 | elif bitwidth == 128 and ':' in value_str: 94 | return ipv6Addr_to_bytes(value_str) 95 | try: 96 | value = int(value_str, base) 97 | except ValueError: 98 | raise UserBadValueError( 99 | "Invalid value '{}': could not cast to integer, try in hex with 0x prefix".format( 100 | value_str)) 101 | nbytes = (bitwidth + 7) // 8 102 | try: 103 | return value.to_bytes(nbytes, byteorder='big') 104 | except OverflowError: 105 | raise UserBadValueError( 106 | "Invalid value '{}': cannot be represented with '{}' bytes".format( 107 | value_str, nbytes)) 108 | -------------------------------------------------------------------------------- /p4utils/utils/traffic_utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | import time 3 | import socket 4 | 5 | 6 | def setSizeToInt(size): 7 | """Converts the sizes string notation to the corresponding integer 8 | (in bytes). Input size can be given with the following magnitudes: B, K, M and G. 9 | """ 10 | if isinstance(size, int): 11 | return size 12 | elif isinstance(size, float): 13 | return int(size) 14 | try: 15 | conversions = {'B': 1, 'K': 1e3, 'M': 1e6, 'G': 1e9} 16 | digits_list = list(range(48, 58)) + [ord(".")] 17 | magnitude = chr( 18 | sum([ord(x) if (ord(x) not in digits_list) else 0 for x in size])) 19 | digit = float(size[0:(size.index(magnitude))]) 20 | magnitude = conversions[magnitude] 21 | return int(magnitude*digit) 22 | except: 23 | print("Conversion Fail") 24 | return 0 25 | 26 | 27 | def send_udp_flow(dst="10.0.0.2", sport=5000, dport=5001, tos=0, rate='10M', duration=0, 28 | packet_size=1400, batch_size=1, **kwargs): 29 | """Udp sending function that keeps a constant rate and logs sent packets to a file. 30 | 31 | Args: 32 | dst (str, optional): destination IP. Defaults to "10.0.0.2". 33 | sport (int, optional): destination port. Defaults to 5000. 34 | dport (int, optional): source port. Defaults to 5001. 35 | tos (int, optional): type of service. Defaults to 0. 36 | rate (str, optional): flow rate. Defaults to '10M'. 37 | duration (int, optional): flow duration. Defaults to 0, i.e. no time limit. 38 | packet_size (int, optional): packet size. Defaults to 1400. 39 | batch_size (int, optional): batch size. Defaults to 1. 40 | """ 41 | 42 | sport = int(sport) 43 | dport = int(dport) 44 | packet_size = int(packet_size) 45 | tos = int(tos) 46 | 47 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 48 | s.setsockopt(socket.SOL_IP, socket.IP_TOS, tos) 49 | s.bind(('', sport)) 50 | 51 | rate = int(setSizeToInt(rate)/8) 52 | totalTime = float(duration) 53 | 54 | # we use 17 to correct a bit the bw 55 | packet = b"A" * int((packet_size - 17)) 56 | seq = 0 57 | 58 | try: 59 | startTime = time.time() 60 | while True: 61 | 62 | # If a finite duration is given 63 | if totalTime > 0: 64 | if time.time() - startTime >= totalTime: 65 | break 66 | 67 | packets_to_send = rate/packet_size 68 | times = math.ceil((float(rate) / (packet_size))/batch_size) 69 | time_step = 1/times 70 | start = time.time() 71 | i = 0 72 | packets_sent = 0 73 | # batches of 1 sec 74 | while packets_sent < packets_to_send: 75 | for _ in range(batch_size): 76 | s.sendto(seq.to_bytes(4, byteorder='big') + 77 | packet, (dst, dport)) 78 | # sequence_numbers.append(seq) 79 | packets_sent += 1 80 | seq += 1 81 | 82 | i += 1 83 | next_send_time = start + (i * time_step) 84 | time.sleep(max(0, next_send_time - time.time())) 85 | # return 86 | time.sleep(max(0, 1-(time.time()-start))) 87 | 88 | finally: 89 | s.close() 90 | 91 | 92 | def recv_udp_flow(dport): 93 | """ 94 | Receiving function. 95 | 96 | Args: 97 | dport (int): port to listen 98 | """ 99 | dport = int(dport) 100 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 101 | s.bind(("", dport)) 102 | try: 103 | while True: 104 | data, address = s.recvfrom(2048) 105 | except: 106 | s.close() 107 | -------------------------------------------------------------------------------- /p4src/int_md.p4: -------------------------------------------------------------------------------- 1 | /* -*- P4_16 -*- */ 2 | #include 3 | #include 4 | #include "include/defines.p4" 5 | #include "include/headers.p4" 6 | #include "include/int_headers.p4" 7 | #include "include/parser.p4" 8 | #include "include/checksum.p4" 9 | #include "include/forward.p4" 10 | #include "include/int_source.p4" 11 | #include "include/int_transit.p4" 12 | #include "include/int_sink.p4" 13 | 14 | 15 | /************************************************************************* 16 | **************** I N G R E S S P R O C E S S I N G ****************** 17 | *************************************************************************/ 18 | 19 | control MyIngress(inout headers hdr, 20 | inout local_metadata_t local_metadata, 21 | inout standard_metadata_t standard_metadata) { 22 | 23 | 24 | apply { 25 | if(hdr.ipv4.isValid()) { 26 | l3_forward.apply(hdr, local_metadata, standard_metadata); 27 | 28 | if(hdr.udp.isValid() ) { 29 | process_int_source_sink.apply(hdr, local_metadata, standard_metadata); 30 | } 31 | 32 | if (local_metadata.int_meta.source == true) { 33 | process_int_source.apply(hdr, local_metadata); 34 | } 35 | 36 | if (local_metadata.int_meta.sink == true && hdr.int_header.isValid()) { 37 | // clone packet for Telemetry Report 38 | // clone3(CloneType.I2E, REPORT_MIRROR_SESSION_ID,standard_metadata); 39 | // clone(CloneType.I2E, REPORT_MIRROR_SESSION_ID); 40 | local_metadata.perserv_meta.ingress_port = standard_metadata.ingress_port; 41 | local_metadata.perserv_meta.egress_port = standard_metadata.egress_port; 42 | local_metadata.perserv_meta.deq_qdepth = standard_metadata.deq_qdepth; 43 | local_metadata.perserv_meta.ingress_global_timestamp = standard_metadata.ingress_global_timestamp; 44 | clone_preserving_field_list(CloneType.I2E, REPORT_MIRROR_SESSION_ID, CLONE_FL_1); 45 | } 46 | } 47 | } 48 | } 49 | 50 | /************************************************************************* 51 | **************** E G R E S S P R O C E S S I N G ******************* 52 | *************************************************************************/ 53 | 54 | 55 | control MyEgress(inout headers hdr, 56 | inout local_metadata_t local_metadata, 57 | inout standard_metadata_t standard_metadata) { 58 | 59 | apply { 60 | if(hdr.int_header.isValid()) { 61 | if(standard_metadata.instance_type == PKT_INSTANCE_TYPE_INGRESS_CLONE) { 62 | standard_metadata.ingress_port = local_metadata.perserv_meta.ingress_port; 63 | standard_metadata.egress_port = local_metadata.perserv_meta.egress_port; 64 | standard_metadata.deq_qdepth = local_metadata.perserv_meta.deq_qdepth; 65 | standard_metadata.ingress_global_timestamp = local_metadata.perserv_meta.ingress_global_timestamp; 66 | } 67 | 68 | process_int_transit.apply(hdr, local_metadata, standard_metadata); 69 | 70 | if (standard_metadata.instance_type == PKT_INSTANCE_TYPE_INGRESS_CLONE) { 71 | /* send int report */ 72 | process_int_report.apply(hdr, local_metadata, standard_metadata); 73 | } 74 | 75 | if (local_metadata.int_meta.sink == true && standard_metadata.instance_type != PKT_INSTANCE_TYPE_INGRESS_CLONE) { 76 | process_int_sink.apply(hdr, local_metadata, standard_metadata); 77 | } 78 | } 79 | } 80 | } 81 | 82 | /************************************************************************* 83 | *********************** S W I T C H ******************************* 84 | *************************************************************************/ 85 | 86 | V1Switch( 87 | MyParser(), 88 | MyVerifyChecksum(), 89 | MyIngress(), 90 | MyEgress(), 91 | MyComputeChecksum(), 92 | MyDeparser() 93 | ) main; -------------------------------------------------------------------------------- /p4src/include/int_source.p4: -------------------------------------------------------------------------------- 1 | /* -*- P4_16 -*- */ 2 | control process_int_source_sink ( 3 | inout headers hdr, 4 | inout local_metadata_t local_metadata, 5 | inout standard_metadata_t standard_metadata) { 6 | 7 | action int_set_source () { 8 | local_metadata.int_meta.source = true; 9 | } 10 | 11 | action int_set_sink () { 12 | local_metadata.int_meta.sink = true; 13 | } 14 | 15 | table tb_set_source { 16 | key = { 17 | standard_metadata.ingress_port: exact; 18 | } 19 | actions = { 20 | int_set_source; 21 | NoAction(); 22 | } 23 | const default_action = NoAction(); 24 | size = MAX_PORTS; 25 | } 26 | 27 | table tb_set_sink { 28 | key = { 29 | standard_metadata.egress_port: exact; 30 | } 31 | actions = { 32 | int_set_sink; 33 | NoAction(); 34 | } 35 | const default_action = NoAction(); 36 | size = MAX_PORTS; 37 | } 38 | 39 | apply { 40 | tb_set_source.apply(); 41 | tb_set_sink.apply(); 42 | } 43 | } 44 | 45 | // Insert INT header to the packet 46 | control process_int_source ( 47 | inout headers hdr, 48 | inout local_metadata_t local_metadata) { 49 | 50 | action int_source(bit<5> hop_metadata_len, bit<8> remaining_hop_cnt, bit<4> ins_mask0003, bit<4> ins_mask0407) { 51 | // insert INT shim header 52 | hdr.intl4_shim.setValid(); 53 | hdr.intl4_shim.int_type = 1; // int_type: Hop-by-hop type (1) , destination type (2), MX-type (3) 54 | hdr.intl4_shim.npt = 0; // next protocol type: 0 55 | hdr.intl4_shim.len = INT_HEADER_WORD; // This is 3 from 0xC (INT_TOTAL_HEADER_SIZE >> 2) 56 | hdr.intl4_shim.udp_ip_dscp = hdr.ipv4.dscp; // although should be first 6 bits of the second byte 57 | hdr.intl4_shim.udp_ip = 0; // although should be first 6 bits of the second byte 58 | 59 | // insert INT header 60 | hdr.int_header.setValid(); 61 | hdr.int_header.ver = 2; 62 | hdr.int_header.d = 0; 63 | hdr.int_header.e = 0; 64 | hdr.int_header.m = 0; 65 | hdr.int_header.rsvd = 0; 66 | hdr.int_header.hop_metadata_len = hop_metadata_len; 67 | hdr.int_header.remaining_hop_cnt = remaining_hop_cnt; 68 | hdr.int_header.instruction_mask_0003 = ins_mask0003; 69 | hdr.int_header.instruction_mask_0407 = ins_mask0407; 70 | hdr.int_header.instruction_mask_0811 = 0; // not supported 71 | hdr.int_header.instruction_mask_1215 = 0; // not supported 72 | 73 | hdr.int_header.domain_specific_id = 0; // Unique INT Domain ID 74 | hdr.int_header.ds_instruction = 0; // Instruction bitmap specific to the INT Domain identified by the Domain specific ID 75 | hdr.int_header.ds_flags = 0; // Domain specific flags 76 | 77 | // add the header len (3 words) to total len 78 | hdr.ipv4.len = hdr.ipv4.len + INT_TOTAL_HEADER_SIZE; 79 | 80 | if(hdr.udp.isValid()) { 81 | hdr.udp.length_ = hdr.udp.length_ + INT_TOTAL_HEADER_SIZE; 82 | } 83 | 84 | hdr.ipv4.dscp = DSCP_INT; 85 | } 86 | 87 | table tb_int_source { 88 | key = { 89 | //configure for each flow to be monitored 90 | // 4 fields identifying flow 91 | //include ip src, udp/tcp src and dest too 92 | hdr.ipv4.src_addr: ternary; 93 | hdr.ipv4.dst_addr: ternary; 94 | local_metadata.l4_src_port: ternary; 95 | local_metadata.l4_dst_port: ternary; 96 | } 97 | actions = { 98 | int_source; 99 | NoAction; 100 | } 101 | const default_action = NoAction(); 102 | } 103 | 104 | apply { 105 | tb_int_source.apply(); 106 | } 107 | } -------------------------------------------------------------------------------- /p4utils/utils/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | from p4utils.utils.helper import * 5 | from p4utils.mininetlib.log import debug, info, error 6 | 7 | class ThriftClient: 8 | """ 9 | This controller reads commands from a thrift configuration 10 | file and uses it to set up the thrift switch. 11 | 12 | 13 | Args: 14 | cli_bin (string) : client binary file path. 15 | cli_input (string) : path of the configuration text file. 16 | log_enabled (bool) : whether to enable logs. 17 | log_dir (string) : directory to store logs. 18 | thrift_port (int) : thrift server thrift_port number. 19 | sw_name (string) : name of the switch to configure. 20 | """ 21 | cli_bin = 'simple_switch_CLI' 22 | 23 | @classmethod 24 | def set_binary(self, cli_bin): 25 | """Set class default binary.""" 26 | ThriftClient.cli_bin = cli_bin 27 | 28 | def __init__(self, thrift_port, 29 | sw_name, 30 | cli_bin=None, 31 | cli_input=None, 32 | log_enabled=True, 33 | log_dir='/tmp', 34 | **kwargs): 35 | 36 | self.set_conf(cli_input) 37 | self.sw_name = sw_name 38 | self.thrift_port = thrift_port 39 | self.log_enabled = log_enabled 40 | self.log_dir = log_dir 41 | 42 | if self.log_enabled: 43 | # Make sure that the provided log path is not pointing to a file 44 | # and, if necessary, create an empty log dir 45 | if not os.path.isdir(self.log_dir): 46 | if os.path.exists(self.log_dir): 47 | raise NotADirectoryError("'{}' exists and is not a directory.".format(self.log_dir)) 48 | else: 49 | os.mkdir(self.log_dir) 50 | 51 | if cli_bin is not None: 52 | self.set_binary(cli_bin) 53 | 54 | def get_conf(self): 55 | """Returns self.cli_input""" 56 | return self.cli_input 57 | 58 | def set_conf(self, cli_input): 59 | """Set the configuration file path.""" 60 | # Check whether the conf file is valid 61 | if cli_input is not None: 62 | self.cli_input = os.path.realpath(cli_input) 63 | else: 64 | self.cli_input = None 65 | 66 | def configure(self): 67 | """This method configures the switch with the provided file.""" 68 | if self.cli_input is not None: 69 | if not os.path.isfile(self.cli_input): 70 | raise FileNotFoundError('could not find file {} for switch {}.'.format(self.cli_input, self.sw_name)) 71 | elif check_listening_on_port(self.thrift_port): 72 | log_path = self.log_dir + '/{}_cli_output.log'.format(self.sw_name) 73 | with open(self.cli_input, 'r') as fin: 74 | entries = [x.strip() for x in fin.readlines() if x.strip() != ''] 75 | # Remove comments 76 | entries = [x for x in entries if ( not x.startswith('//') and not x.startswith('#')) ] 77 | # Join commands 78 | entries = '\n'.join(entries) 79 | # Execute commands 80 | debug(self.cli_bin + ' --thrift-port ' + str(self.thrift_port) + '\n') 81 | p = subprocess.Popen([self.cli_bin, '--thrift-port', str(self.thrift_port)], 82 | stdin=subprocess.PIPE, 83 | stdout=subprocess.PIPE, 84 | stderr=subprocess.STDOUT) 85 | stdout, _ = p.communicate(input=entries.encode()) 86 | stdout = stdout.decode(errors='backslashreplace') 87 | # Save logs 88 | if self.log_enabled: 89 | with open(log_path, 'w') as log_file: 90 | log_file.write(stdout) 91 | # Successful configuration 92 | success = True 93 | # Check for errors in the commands (they are in the stdout) 94 | if 'Invalid' in stdout or 'Error' in stdout: 95 | error('Switch {}: error in file {}, ' 96 | 'check {} for details.\n'.format(self.sw_name, 97 | self.cli_input, 98 | log_path)) 99 | success = False 100 | # Check returncode 101 | if p.returncode != 0: 102 | error('Switch {}: thrift client exited with error, ' 103 | 'check {} for details.\n'.format(self.sw_name, 104 | log_path)) 105 | 106 | success = False 107 | # Print success 108 | if success: 109 | info('Switch {}: successfully configured with file {}.\n'.format(self.sw_name, 110 | self.cli_input)) 111 | else: 112 | raise ConnectionRefusedError('could not connect to switch {} on port {}.'.format(self.sw_name, self.thrift_port)) 113 | else: 114 | raise FileNotFoundError('could not find file {} for switch {}.'.format(self.cli_input, self.sw_name)) 115 | -------------------------------------------------------------------------------- /receive_report.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | 5 | import io 6 | 7 | from scapy.all import sniff 8 | from scapy.all import Packet 9 | from scapy.all import ShortField, BitField 10 | from scapy.layers.inet import Ether,IP, TCP, UDP, bind_layers 11 | 12 | class INTREP(Packet): 13 | name = "INT Report Header v2.0" 14 | fields_desc = [ 15 | BitField("version", 0, 4), 16 | BitField("hw_id", 0, 6), 17 | BitField("seq_number", 0, 22), 18 | BitField("node_id", 0, 32)] 19 | 20 | class INTIndiviREP(Packet): 21 | name = "INT Report Individual Header v2.0" 22 | 23 | fields_desc = [ 24 | BitField("rep_type", 0, 4), 25 | BitField("in_type", 0, 4), 26 | BitField("rep_len", 0, 8), 27 | BitField("md_len", 0, 8), 28 | BitField("flag", 0, 4), 29 | BitField("rsvd", 0, 4), 30 | ShortField("RepMdBits", 0), 31 | ShortField("DomainID", 0), 32 | ShortField("DSMdBits", 0), 33 | ShortField("DSMdstatus", 0)] 34 | 35 | class INTShim(Packet): 36 | name = "INT Shim header v2.1" 37 | fields_desc = [ 38 | BitField("type", 0, 4), 39 | BitField("next_protocol", 0, 2), 40 | BitField("rsvd", 0, 2), 41 | BitField("int_length", 0, 8), 42 | ShortField("NPT Dependent Field", 0)] 43 | 44 | class INTMD(Packet): 45 | name = "INT-MD Header v2.1" 46 | fields_desc = [ 47 | BitField("version", 0, 4), 48 | BitField("flags", 0, 3), 49 | BitField("reserved", 0, 12), 50 | BitField("HopMetaLength", 0, 5), 51 | BitField("RemainingHopCount", 0, 8), 52 | BitField("instruction_mask_0003", 0, 4), 53 | BitField("instruction_mask_0407", 0, 4), 54 | BitField("instruction_mask_0811", 0, 4), 55 | BitField("instruction_mask_1215", 0, 4), 56 | ShortField("DomainID", 0), 57 | ShortField("DomainInstructions", 0), 58 | ShortField("DomainFlags", 0)] 59 | 60 | bind_layers(UDP,INTREP,dport=1234) 61 | bind_layers(INTREP,INTIndiviREP) 62 | bind_layers(INTIndiviREP,Ether,in_type=3) 63 | bind_layers(INTShim,INTMD,type = 1) 64 | 65 | SWITCH_ID_BIT = 0b10000000 66 | L1_PORT_IDS_BIT = 0b01000000 67 | HOP_LATENCY_BIT = 0b00100000 68 | QUEUE_BIT = 0b00010000 69 | INGRESS_TSTAMP_BIT = 0b00001000 70 | EGRESS_TSTAMP_BIT = 0b00000100 71 | L2_PORT_IDS_BIT = 0b00000010 72 | EGRESS_PORT_TX_UTIL_BIT = 0b00000001 73 | 74 | 75 | class HopMetadata(): 76 | def __init__(self): 77 | self.switch_id = None 78 | self.l1_ingress_port_id = None 79 | self.l1_egress_port_id = None 80 | self.hop_latency = None 81 | self.q_id = None 82 | self.q_occupancy = None 83 | self.ingress_tstamp = None 84 | self.egress_tstamp = None 85 | self.l2_ingress_port_id = None 86 | self.l2_egress_port_id = None 87 | self.egress_port_tx_util = None 88 | 89 | @staticmethod 90 | def from_bytes(data, ins_map): 91 | hop = HopMetadata() 92 | d = io.BytesIO(data) 93 | if ins_map & SWITCH_ID_BIT: 94 | hop.switch_id = int.from_bytes(d.read(4), byteorder='big') 95 | if ins_map & L1_PORT_IDS_BIT: 96 | hop.l1_ingress_port_id = int.from_bytes(d.read(2), byteorder='big') 97 | hop.l1_egress_port_id = int.from_bytes(d.read(2), byteorder='big') 98 | if ins_map & HOP_LATENCY_BIT: 99 | hop.hop_latency = int.from_bytes(d.read(4), byteorder='big') 100 | if ins_map & QUEUE_BIT: 101 | hop.q_id = int.from_bytes(d.read(1), byteorder='big') 102 | hop.q_occupancy = int.from_bytes(d.read(3), byteorder='big') 103 | if ins_map & INGRESS_TSTAMP_BIT: 104 | hop.ingress_tstamp = int.from_bytes(d.read(8), byteorder='big') 105 | if ins_map & EGRESS_TSTAMP_BIT: 106 | hop.egress_tstamp = int.from_bytes(d.read(8), byteorder='big') 107 | if ins_map & L2_PORT_IDS_BIT: 108 | hop.l2_ingress_port_id = int.from_bytes(d.read(4), byteorder='big') 109 | hop.l2_egress_port_id = int.from_bytes(d.read(4), byteorder='big') 110 | if ins_map & EGRESS_PORT_TX_UTIL_BIT: 111 | hop.egress_port_tx_util = int.from_bytes(d.read(4), byteorder='big') 112 | return hop 113 | 114 | def __str__(self): 115 | return str(vars(self)) 116 | 117 | 118 | def parse_metadata(int_pkt): 119 | int_pkt.show() 120 | 121 | instructions = (int_pkt[INTMD].instruction_mask_0003 << 4) + int_pkt[INTMD].instruction_mask_0407 122 | int_len = int_pkt.int_length-3 123 | hop_meta_len = int_pkt[INTMD].HopMetaLength 124 | int_metadata = int_pkt.load[:int_len<<2] 125 | 126 | hop_count = int(int_len /hop_meta_len) 127 | hop_metadata = [] 128 | for i in range(hop_count): 129 | metadata_source = int_metadata[i*hop_meta_len<<2:(i+1)*hop_meta_len<<2] 130 | meta = HopMetadata.from_bytes(metadata_source, instructions) 131 | print(meta) 132 | hop_metadata.append(meta) 133 | 134 | return hop_metadata 135 | 136 | 137 | def handle_pkt(pkt): 138 | if IP in pkt : 139 | print("\n\n********* Receiving Telemtry Report ********") 140 | pkt[INTREP].show() 141 | parse_metadata(INTShim(pkt.load)) 142 | 143 | def main(): 144 | iface = 's3-cpu-eth1' 145 | print("sniffing on %s" % iface) 146 | sys.stdout.flush() 147 | sniff(iface = iface,filter='inbound and tcp or udp', 148 | prn = lambda x: handle_pkt(x)) 149 | 150 | if __name__ == '__main__': 151 | main() -------------------------------------------------------------------------------- /p4src/include/int_sink.p4: -------------------------------------------------------------------------------- 1 | /* -*- P4_16 -*- */ 2 | 3 | control process_int_sink ( 4 | inout headers hdr, 5 | inout local_metadata_t local_metadata, 6 | inout standard_metadata_t standard_metadata) { 7 | 8 | action int_sink() { 9 | // restore original headers 10 | hdr.ipv4.dscp = hdr.intl4_shim.udp_ip_dscp; 11 | // restore length fields of IPv4 header and UDP header 12 | bit<16> len_bytes = (((bit<16>)hdr.intl4_shim.len) << 2) + INT_SHIM_HEADER_SIZE; 13 | hdr.ipv4.len = hdr.ipv4.len - len_bytes; 14 | if(hdr.udp.isValid()) { 15 | hdr.udp.length_ = hdr.udp.length_ - len_bytes; 16 | } 17 | // remove all the INT information from the packet 18 | hdr.intl4_shim.setInvalid(); 19 | hdr.int_header.setInvalid(); 20 | hdr.int_switch_id.setInvalid(); 21 | hdr.int_level1_port_ids.setInvalid(); 22 | hdr.int_hop_latency.setInvalid(); 23 | hdr.int_q_occupancy.setInvalid(); 24 | hdr.int_ingress_tstamp.setInvalid(); 25 | hdr.int_egress_tstamp.setInvalid(); 26 | hdr.int_level2_port_ids.setInvalid(); 27 | hdr.int_egress_tx_util.setInvalid(); 28 | hdr.int_data.setInvalid(); 29 | } 30 | 31 | table tb_int_sink { 32 | actions = { 33 | int_sink; 34 | } 35 | default_action = int_sink(); 36 | } 37 | 38 | apply { 39 | tb_int_sink.apply(); 40 | } 41 | } 42 | 43 | control process_int_report ( 44 | inout headers hdr, 45 | inout local_metadata_t local_metadata, 46 | inout standard_metadata_t standard_metadata) { 47 | 48 | register>(1) seq_number; 49 | /********************** A C T I O N S **********************/ 50 | 51 | action increment_counter() { 52 | bit<22> tmp; 53 | seq_number.read(tmp, 0); 54 | tmp = tmp + 1; 55 | seq_number.write(0, tmp); 56 | } 57 | 58 | action do_report_encapsulation( mac_t src_mac, 59 | mac_t mon_mac, 60 | ip_address_t src_ip, 61 | ip_address_t mon_ip, 62 | l4_port_t mon_port) { 63 | 64 | // INT Raport structure 65 | // [Eth][IP][UDP][INT RAPORT HDR][ETH][IP][UDP/TCP][INT HDR][INT DATA] 66 | //Report Ethernet Header 67 | hdr.report_ethernet.setValid(); 68 | hdr.report_ethernet.dst_addr = mon_mac; 69 | hdr.report_ethernet.src_addr = src_mac; 70 | hdr.report_ethernet.ether_type = ETH_TYPE_IPV4; 71 | 72 | //Report IPV4 Header 73 | hdr.report_ipv4.setValid(); 74 | hdr.report_ipv4.version = IP_VERSION_4; 75 | hdr.report_ipv4.ihl = IPV4_IHL_MIN; 76 | hdr.report_ipv4.dscp = 6w0; 77 | hdr.report_ipv4.ecn = 2w0; 78 | 79 | /* Total Len is report_ipv4_len + report_udp_len + report_fixed_hdr_len + ethernet_len + ipv4_totalLen */ 80 | hdr.report_ipv4.len = (bit<16>) IPV4_MIN_HEAD_LEN + 81 | (bit<16>) UDP_HEADER_LEN + 82 | (bit<16>) REPORT_GROUP_HEADER_LEN + 83 | (bit<16>) REPORT_INDIVIDUAL_HEADER_LEN + 84 | (bit<16>) ETH_HEADER_LEN + 85 | (bit<16>) IPV4_MIN_HEAD_LEN + 86 | (bit<16>) UDP_HEADER_LEN + 87 | INT_SHIM_HEADER_SIZE + (((bit<16>) hdr.intl4_shim.len)<< 2); 88 | 89 | hdr.report_ipv4.identification = 0; 90 | hdr.report_ipv4.flags = 0; 91 | hdr.report_ipv4.frag_offset = 0; 92 | hdr.report_ipv4.ttl = REPORT_HDR_TTL; 93 | hdr.report_ipv4.protocol = IP_PROTO_UDP; 94 | hdr.report_ipv4.src_addr = src_ip; 95 | hdr.report_ipv4.dst_addr = mon_ip; 96 | 97 | //Report UDP Header 98 | hdr.report_udp.setValid(); 99 | hdr.report_udp.src_port = 1234; 100 | hdr.report_udp.dst_port = mon_port; 101 | hdr.report_udp.length_ = (bit<16>) UDP_HEADER_LEN + 102 | (bit<16>) REPORT_GROUP_HEADER_LEN + 103 | (bit<16>) REPORT_INDIVIDUAL_HEADER_LEN + 104 | (bit<16>) ETH_HEADER_LEN + 105 | (bit<16>) IPV4_MIN_HEAD_LEN + 106 | (bit<16>) UDP_HEADER_LEN + 107 | INT_SHIM_HEADER_SIZE + (((bit<16>) hdr.intl4_shim.len)<< 2); 108 | 109 | hdr.report_group_header.setValid(); 110 | hdr.report_group_header.ver = 2; 111 | hdr.report_group_header.hw_id = HW_ID; 112 | seq_number.read(hdr.report_group_header.seq_no, 0); 113 | increment_counter(); 114 | hdr.report_group_header.node_id = local_metadata.int_meta.switch_id; 115 | 116 | /* Telemetry Report Individual Header */ 117 | hdr.report_individual_header.setValid(); 118 | hdr.report_individual_header.rep_type = 1; 119 | hdr.report_individual_header.in_type = 3; 120 | hdr.report_individual_header.rep_len = 0; 121 | hdr.report_individual_header.md_len = 0; 122 | hdr.report_individual_header.d = 0; 123 | hdr.report_individual_header.q = 0; 124 | hdr.report_individual_header.f = 1; 125 | hdr.report_individual_header.i = 0; 126 | hdr.report_individual_header.rsvd = 0; 127 | 128 | /* Individual report inner contents */ 129 | 130 | hdr.report_individual_header.rep_md_bits = 0; 131 | hdr.report_individual_header.domain_specific_id = 0; 132 | hdr.report_individual_header.domain_specific_md_bits = 0; 133 | hdr.report_individual_header.domain_specific_md_status = 0; 134 | 135 | truncate((bit<32>)hdr.report_ipv4.len + (bit<32>) ETH_HEADER_LEN); 136 | } 137 | 138 | table tb_generate_report { 139 | actions = { 140 | do_report_encapsulation; 141 | NoAction(); 142 | } 143 | default_action = NoAction(); 144 | } 145 | 146 | apply { 147 | tb_generate_report.apply(); 148 | } 149 | } -------------------------------------------------------------------------------- /p4src/include/int_headers.p4: -------------------------------------------------------------------------------- 1 | #include "defines.p4" 2 | #include "headers.p4" 3 | 4 | #ifndef __INT_HEADERS__ 5 | #define __INT_HEADERS__ 6 | 7 | // INT shim header for TCP/UDP 8 | header intl4_shim_t { 9 | bit<4> int_type; // Type of INT Header 10 | bit<2> npt; // Next protocol type 11 | bit<2> rsvd; // Reserved 12 | bit<8> len; // Length of INT Metadata header and INT stack in 4-byte words, not including the shim header (1 word) 13 | bit<6> udp_ip_dscp; // depends on npt field. either original dscp, ip protocol or udp dest port 14 | bit<10> udp_ip; // depends on npt field. either original dscp, ip protocol or udp dest port 15 | } 16 | 17 | const bit<16> INT_SHIM_HEADER_SIZE = 4; 18 | 19 | // INT header 20 | header int_header_t { 21 | bit<4> ver; // Version 22 | bit<1> d; // Discard 23 | bit<1> e; 24 | bit<1> m; 25 | bit<12> rsvd; 26 | bit<5> hop_metadata_len; 27 | bit<8> remaining_hop_cnt; 28 | bit<4> instruction_mask_0003; /* split the bits for lookup */ 29 | bit<4> instruction_mask_0407; 30 | bit<4> instruction_mask_0811; 31 | bit<4> instruction_mask_1215; 32 | bit<16> domain_specific_id; // Unique INT Domain ID 33 | bit<16> ds_instruction; // Instruction bitmap specific to the INT Domain identified by the Domain specific ID 34 | bit<16> ds_flags; // Domain specific flags 35 | } 36 | 37 | const bit<16> INT_HEADER_SIZE = 12; 38 | 39 | const bit<16> INT_TOTAL_HEADER_SIZE = INT_HEADER_SIZE + INT_SHIM_HEADER_SIZE; 40 | 41 | 42 | // INT meta-value headers - different header for each value type 43 | header int_switch_id_t { 44 | bit<32> switch_id; 45 | } 46 | header int_level1_port_ids_t { 47 | bit<16> ingress_port_id; 48 | bit<16> egress_port_id; 49 | } 50 | header int_hop_latency_t { 51 | bit<32> hop_latency; 52 | } 53 | header int_q_occupancy_t { 54 | bit<8> q_id; 55 | bit<24> q_occupancy; 56 | } 57 | header int_ingress_tstamp_t { 58 | bit<64> ingress_tstamp; 59 | } 60 | header int_egress_tstamp_t { 61 | bit<64> egress_tstamp; 62 | } 63 | header int_level2_port_ids_t { 64 | bit<32> ingress_port_id; 65 | bit<32> egress_port_id; 66 | } 67 | 68 | // these two not implemented yet 69 | header int_egress_port_tx_util_t { 70 | bit<32> egress_port_tx_util; 71 | } 72 | header int_buffer_t { 73 | bit<8> buffer_id; 74 | bit<24> buffer_occupancy; 75 | } 76 | 77 | header int_data_t { 78 | // Maximum int metadata stack size in bits: 79 | // (0x3F - 3) * 4 * 8 (excluding INT shim header and INT header) 80 | varbit<1920> data; 81 | } 82 | 83 | 84 | // Report Telemetry Headers 85 | header report_group_header_t { 86 | bit<4> ver; 87 | bit<6> hw_id; 88 | bit<22> seq_no; 89 | bit<32> node_id; 90 | } 91 | 92 | const bit<8> REPORT_GROUP_HEADER_LEN = 8; 93 | 94 | header report_individual_header_t { 95 | bit<4> rep_type; 96 | bit<4> in_type; 97 | bit<8> rep_len; 98 | bit<8> md_len; 99 | bit<1> d; 100 | bit<1> q; 101 | bit<1> f; 102 | bit<1> i; 103 | bit<4> rsvd; 104 | // Individual report inner contents for Reptype 1 = INT 105 | bit<16> rep_md_bits; 106 | bit<16> domain_specific_id; 107 | bit<16> domain_specific_md_bits; 108 | bit<16> domain_specific_md_status; 109 | } 110 | const bit<8> REPORT_INDIVIDUAL_HEADER_LEN = 12; 111 | 112 | // Telemetry drop report header 113 | header drop_report_header_t { 114 | bit<32> switch_id; 115 | bit<16> ingress_port_id; 116 | bit<16> egress_port_id; 117 | bit<8> queue_id; 118 | bit<8> drop_reason; 119 | bit<16> pad; 120 | } 121 | const bit<8> DROP_REPORT_HEADER_LEN = 12; 122 | 123 | struct headers { 124 | 125 | // Original Packet Headers 126 | ethernet_t ethernet; 127 | ipv4_t ipv4; 128 | udp_t udp; 129 | tcp_t tcp; 130 | 131 | // INT Report Encapsulation 132 | ethernet_t report_ethernet; 133 | ipv4_t report_ipv4; 134 | udp_t report_udp; 135 | 136 | // INT Headers 137 | intl4_shim_t intl4_shim; 138 | int_header_t int_header; 139 | 140 | //INT Metadata 141 | int_switch_id_t int_switch_id; 142 | int_level1_port_ids_t int_level1_port_ids; 143 | int_hop_latency_t int_hop_latency; 144 | int_q_occupancy_t int_q_occupancy; 145 | int_ingress_tstamp_t int_ingress_tstamp; 146 | int_egress_tstamp_t int_egress_tstamp; 147 | int_level2_port_ids_t int_level2_port_ids; 148 | int_egress_port_tx_util_t int_egress_tx_util; 149 | int_data_t int_data; 150 | 151 | //INT Report Headers 152 | report_group_header_t report_group_header; 153 | report_individual_header_t report_individual_header; 154 | drop_report_header_t drop_report_header; 155 | } 156 | 157 | const bit<8> CLONE_FL_1 = 1; 158 | 159 | struct preserving_metadata_t { 160 | @field_list(CLONE_FL_1) 161 | bit<9> ingress_port; 162 | bit<9> egress_spec; 163 | @field_list(CLONE_FL_1) 164 | bit<9> egress_port; 165 | bit<32> clone_spec; 166 | bit<32> instance_type; 167 | bit<1> drop; 168 | bit<16> recirculate_port; 169 | bit<32> packet_length; 170 | bit<32> enq_timestamp; 171 | bit<19> enq_qdepth; 172 | bit<32> deq_timedelta; 173 | @field_list(CLONE_FL_1) 174 | bit<19> deq_qdepth; 175 | @field_list(CLONE_FL_1) 176 | bit<48> ingress_global_timestamp; 177 | bit<48> egress_global_timestamp; 178 | bit<32> lf_field_list; 179 | bit<16> mcast_grp; 180 | bit<32> resubmit_flag; 181 | bit<16> egress_rid; 182 | bit<1> checksum_error; 183 | bit<32> recirculate_flag; 184 | } 185 | 186 | struct int_metadata_t { 187 | switch_id_t switch_id; 188 | bit<16> new_bytes; 189 | bit<8> new_words; 190 | bool source; 191 | bool sink; 192 | bool transit; 193 | bit<8> intl4_shim_len; 194 | bit<16> int_shim_len; 195 | } 196 | 197 | struct local_metadata_t { 198 | bit<16> l4_src_port; 199 | bit<16> l4_dst_port; 200 | int_metadata_t int_meta; 201 | preserving_metadata_t perserv_meta; 202 | } 203 | 204 | #endif -------------------------------------------------------------------------------- /p4utils/utils/p4runtime_API/context.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Barefoot Networks, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | from collections import Counter 17 | import enum 18 | from functools import partialmethod 19 | 20 | 21 | @enum.unique 22 | class P4Type(enum.Enum): 23 | table = 1 24 | action = 2 25 | action_profile = 3 26 | counter = 4 27 | direct_counter = 5 28 | meter = 6 29 | direct_meter = 7 30 | digest = 8 31 | 32 | 33 | P4Type.table.p4info_name = "tables" 34 | P4Type.action.p4info_name = "actions" 35 | P4Type.action_profile.p4info_name = "action_profiles" 36 | P4Type.counter.p4info_name = "counters" 37 | P4Type.direct_counter.p4info_name = "direct_counters" 38 | P4Type.meter.p4info_name = "meters" 39 | P4Type.direct_meter.p4info_name = "direct_meters" 40 | P4Type.digest.p4info_name = "digests" 41 | 42 | 43 | for obj_type in P4Type: 44 | obj_type.pretty_name = obj_type.name.replace('_', ' ') 45 | obj_type.pretty_names = obj_type.pretty_name + 's' 46 | 47 | 48 | @enum.unique 49 | class P4RuntimeEntity(enum.Enum): 50 | table_entry = 1 51 | action_profile_member = 2 52 | action_profile_group = 3 53 | meter_entry = 4 54 | direct_meter_entry = 5 55 | counter_entry = 6 56 | direct_counter_entry = 7 57 | packet_replication_engine_entry = 8 58 | digest_entry = 9 59 | 60 | 61 | class Context: 62 | def __init__(self): 63 | self.p4info = None 64 | 65 | def set_p4info(self, p4info): 66 | self.p4info = p4info 67 | self.p4info_obj_map = {} 68 | self.p4info_obj_map_by_id = {} 69 | self.p4info_objs_by_type = {} 70 | self._import_p4info_names() 71 | 72 | def get_obj(self, obj_type, name): 73 | key = (obj_type, name) 74 | return self.p4info_obj_map.get(key, None) 75 | 76 | def get_obj_id(self, obj_type, name): 77 | obj = self.get_obj(obj_type, name) 78 | if obj is None: 79 | return None 80 | return obj.preamble.id 81 | 82 | def get_param(self, action_name, name): 83 | a = self.get_obj(P4Type.action, action_name) 84 | if a is None: 85 | return None 86 | for p in a.params: 87 | if p.name == name: 88 | return p 89 | 90 | def get_param_len(self, action_name): 91 | a = self.get_obj(P4Type.action, action_name) 92 | if a is None: 93 | return None 94 | return len(a.params) 95 | 96 | def get_mf(self, table_name, name): 97 | t = self.get_obj(P4Type.table, table_name) 98 | if t is None: 99 | return None 100 | for mf in t.match_fields: 101 | if mf.name == name: 102 | return mf 103 | 104 | def get_mf_len(self, table_name): 105 | t = self.get_obj(P4Type.table, table_name) 106 | if t is None: 107 | return None 108 | return len(t.match_fields) 109 | 110 | def get_param_id(self, action_name, name): 111 | p = self.get_param(action_name, name) 112 | return None if p is None else p.id 113 | 114 | def get_mf_id(self, table_name, name): 115 | mf = self.get_mf(table_name, name) 116 | return None if mf is None else mf.id 117 | 118 | def get_param_name(self, action_name, id_): 119 | a = self.get_obj(P4Type.action, action_name) 120 | if a is None: 121 | return None 122 | for p in a.params: 123 | if p.id == id_: 124 | return p.name 125 | 126 | def get_mf_name(self, table_name, id_): 127 | t = self.get_obj(P4Type.table, table_name) 128 | if t is None: 129 | return None 130 | for mf in t.match_fields: 131 | if mf.id == id_: 132 | return mf.name 133 | 134 | def get_objs(self, obj_type): 135 | m = self.p4info_objs_by_type[obj_type] 136 | for name, obj in m.items(): 137 | yield name, obj 138 | 139 | def get_name_from_id(self, id_): 140 | return self.p4info_obj_map_by_id[id_].preamble.name 141 | 142 | def get_obj_by_id(self, id_): 143 | return self.p4info_obj_map_by_id[id_] 144 | 145 | # In order to make the CLI easier to use, we accept any suffix that 146 | # uniquely identifies the object among p4info objects of the same type. 147 | def _import_p4info_names(self): 148 | suffix_count = Counter() 149 | for obj_type in P4Type: 150 | self.p4info_objs_by_type[obj_type] = {} 151 | for obj in getattr(self.p4info, obj_type.p4info_name): 152 | pre = obj.preamble 153 | self.p4info_obj_map_by_id[pre.id] = obj 154 | self.p4info_objs_by_type[obj_type][pre.name] = obj 155 | suffix = None 156 | for s in reversed(pre.name.split(".")): 157 | suffix = s if suffix is None else s + "." + suffix 158 | key = (obj_type, suffix) 159 | self.p4info_obj_map[key] = obj 160 | suffix_count[key] += 1 161 | for key, c in suffix_count.items(): 162 | if c > 1: 163 | del self.p4info_obj_map[key] 164 | 165 | 166 | # Add p4info object and object id "getters" for each object type; these are just 167 | # wrappers around Context.get_obj and Context.get_obj_id. 168 | # For example: get_table(x) and get_table_id(x) respectively call 169 | # get_obj(P4Type.table, x) and get_obj_id(P4Type.table, x) 170 | for obj_type in P4Type: 171 | name = "_".join(["get", obj_type.name]) 172 | setattr(Context, name, partialmethod( 173 | Context.get_obj, obj_type)) 174 | name = "_".join(["get", obj_type.name, "id"]) 175 | setattr(Context, name, partialmethod( 176 | Context.get_obj_id, obj_type)) 177 | 178 | for obj_type in P4Type: 179 | name = "_".join(["get", obj_type.p4info_name]) 180 | setattr(Context, name, partialmethod(Context.get_objs, obj_type)) 181 | -------------------------------------------------------------------------------- /report_collector/collector_graphite.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | import ctypes 4 | import pickle 5 | import time 6 | import struct 7 | import socket 8 | from ipaddress import IPv4Address 9 | from bcc import BPF 10 | 11 | MAX_INT_HOP = 4 12 | INT_DST_PORT = 9555 13 | FLOW_LATENCY_THRESHOLD = 50 14 | HOP_LATENCY_THRESHOLD = 5 15 | LINK_LATENCY_THRESHOLD = 5 16 | QUEUE_OCCUPANCY_THRESHOLD = 1 17 | 18 | GRAPHITE_HOST = '10.0.128.1' 19 | GRAPHITE_PORT = 2004 20 | 21 | class Event(ctypes.Structure): 22 | _fields_ = [ 23 | ("src_ip", ctypes.c_uint32), 24 | ("dst_ip", ctypes.c_uint32), 25 | ("src_port", ctypes.c_ushort), 26 | ("dst_port", ctypes.c_ushort), 27 | ("ip_proto", ctypes.c_ushort), 28 | 29 | ("hop_cnt", ctypes.c_ubyte), 30 | 31 | ("flow_latency", ctypes.c_uint32), 32 | ("switch_ids", ctypes.c_uint32 * MAX_INT_HOP), 33 | ("ingress_ports", ctypes.c_uint16 * MAX_INT_HOP), 34 | ("egress_ports", ctypes.c_uint16 * MAX_INT_HOP), 35 | ("hop_latencies", ctypes.c_uint32 * MAX_INT_HOP), 36 | ("queue_ids", ctypes.c_uint32 * MAX_INT_HOP), 37 | ("queue_occups", ctypes.c_uint32 * MAX_INT_HOP), 38 | ("ingress_tstamps", ctypes.c_uint32 * MAX_INT_HOP), 39 | ("egress_tstamps", ctypes.c_uint32 * MAX_INT_HOP), 40 | ("egress_tx_util", ctypes.c_uint32 * MAX_INT_HOP), 41 | 42 | ("e_new_flow", ctypes.c_ubyte), 43 | ("e_flow_latency", ctypes.c_ubyte), 44 | ("e_sw_latency", ctypes.c_ubyte), 45 | ("e_link_latency", ctypes.c_ubyte), 46 | ("e_q_occupancy", ctypes.c_ubyte) 47 | ] 48 | 49 | class Collector: 50 | 51 | def __init__(self): 52 | self.xdp_collector = BPF(src_file="xdp_report_collector.c", debug=0, 53 | cflags=[ 54 | "-w", 55 | "-D_MAX_INT_HOP=%s" % MAX_INT_HOP, 56 | "-D_INT_DST_PORT=%s" % INT_DST_PORT, 57 | "-D_FLOW_LATENCY_THRESHOLD=%s" % FLOW_LATENCY_THRESHOLD, 58 | "-D_HOP_LATENCY_THRESHOLD=%s" % HOP_LATENCY_THRESHOLD, 59 | "-D_LINK_LATENCY_THRESHOLD=%s" % LINK_LATENCY_THRESHOLD, 60 | "-D_QUEUE_OCCUPANCY_THRESHOLD=%s" % QUEUE_OCCUPANCY_THRESHOLD, 61 | ]) 62 | self.collector_fn = self.xdp_collector.load_func("report_collector", BPF.XDP) 63 | 64 | self.ifaces = [] 65 | 66 | self.tb_flow = self.xdp_collector.get_table("tb_flow") 67 | self.tb_switch = self.xdp_collector.get_table("tb_switch") 68 | self.tb_link = self.xdp_collector.get_table("tb_link") 69 | self.tb_queue = self.xdp_collector.get_table("tb_queue") 70 | 71 | def graphite_send(self, metrics): 72 | payload = pickle.dumps(metrics, protocol=2) 73 | header = struct.pack("!L", len(payload)) 74 | message = header + payload 75 | try: 76 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 77 | s.connect((GRAPHITE_HOST, GRAPHITE_PORT)) 78 | s.sendall(message) 79 | except Exception as e: 80 | print(e) 81 | else: 82 | s.close() 83 | 84 | def attach_iface(self, iface): 85 | self.ifaces.append(iface) 86 | self.xdp_collector.attach_xdp(iface, self.collector_fn) 87 | 88 | def detach_all_ifaces(self): 89 | for iface in self.ifaces: 90 | self.xdp_collector.remove_xdp(iface, 0) 91 | self.ifaces = [] 92 | 93 | def open_events(self): 94 | def _process_event(ctx, data, size): 95 | event = ctypes.cast(data, ctypes.POINTER(Event)).contents 96 | print("Received packet") 97 | print(event.e_new_flow, event.e_sw_latency, 98 | event.e_q_occupancy, event.e_link_latency) 99 | print(event.src_ip, event.dst_ip, event.src_port, 100 | event.dst_port, event.ip_proto) 101 | 102 | metric_timestamp = int(time.time()) 103 | metrics = [] 104 | if event.e_new_flow: 105 | metrics.append(( 106 | 'int.flow_latency;src_ip={};dst_ip={};src_port={};dst_port={};protocol={}'.format( 107 | str(IPv4Address(event.src_ip)), str(IPv4Address(event.dst_ip)), 108 | event.src_port, event.dst_port, event.ip_proto 109 | ), (metric_timestamp, event.flow_latency) 110 | )) 111 | 112 | 113 | if event.e_flow_latency: 114 | metrics.append(( 115 | 'int.flow_latency;src_ip={};dst_ip={};src_port={};dst_port={};protocol={}'.format( 116 | event.src_ip, event.dst_ip, event.src_port, event.dst_port, event.ip_proto 117 | ), (metric_timestamp, event.flow_latency) 118 | )) 119 | 120 | if event.e_sw_latency: 121 | for i in range(event.hop_cnt): 122 | metrics.append(( 123 | 'int.switch_latency;switch_id={}'.format(event.switch_ids[i]), 124 | (metric_timestamp, event.hop_latencies[i]) 125 | )) 126 | if event.e_q_occupancy: 127 | for i in range(event.hop_cnt): 128 | metrics.append(( 129 | 'int.queue_occupancy;switch_id={};queue_id={}'.format( 130 | event.switch_ids[i], event.queue_ids[i] 131 | ), (metric_timestamp, event.queue_occups[i]) 132 | )) 133 | 134 | if event.e_link_latency: 135 | for i in range(event.hop_cnt - 1): 136 | metrics.append(( 137 | 'int.link_latency;egress_switch_id={};egress_port_id={};ingress_switch_id={};ingress_port_id={}'.format( 138 | event.switch_ids[i+1], event.egress_ports[i+1], event.switch_ids[i], event.ingress_ports[i] 139 | ), (metric_timestamp, abs(event.egress_tstamps[i+1] - event.ingress_tstamps[i])) 140 | )) 141 | 142 | self.graphite_send(metrics) 143 | 144 | self.xdp_collector["events"].open_perf_buffer(_process_event, page_cnt=512) 145 | 146 | def poll_events(self): 147 | self.xdp_collector.perf_buffer_poll() 148 | 149 | ######## 150 | 151 | if __name__ == "__main__": 152 | # handle arguments 153 | parser = argparse.ArgumentParser(description='INT collector.') 154 | parser.add_argument("iface") 155 | args = parser.parse_args() 156 | 157 | collector = Collector() 158 | 159 | print("Attaching interface") 160 | collector.attach_iface(sys.argv[1]) 161 | collector.open_events() 162 | print("eBPF loaded") 163 | try: 164 | while True: 165 | collector.poll_events() 166 | except KeyboardInterrupt: 167 | pass 168 | 169 | finally: 170 | collector.detach_all_ifaces() 171 | print("Detaching interfaces") 172 | 173 | print("Exitting...") 174 | -------------------------------------------------------------------------------- /p4utils/mininetlib/net.py: -------------------------------------------------------------------------------- 1 | """__ https://github.com/mininet/mininet/blob/master/mininet/net.py 2 | 3 | This module is an extension of `mininet.net`__ that allows also router nodes. 4 | """ 5 | 6 | import os 7 | from itertools import groupby 8 | from mininet.net import Mininet 9 | from mininet.node import Controller 10 | 11 | from p4utils.mininetlib.node import FRRouter 12 | from p4utils.mininetlib.log import debug, info, output, warning, error 13 | 14 | 15 | class P4Mininet(Mininet): 16 | """P4Mininet is the Mininet Class extended with P4 switches.""" 17 | 18 | def __init__(self, *args, router=FRRouter, **kwargs): 19 | """Adds p4switches.""" 20 | self.router = router 21 | self.p4switches = [] 22 | self.tofinos = [] 23 | self.routers = [] 24 | super().__init__(*args, **kwargs) 25 | 26 | def build(self): 27 | """Build P4Mininet.""" 28 | super().build() 29 | 30 | for switch in self.switches: 31 | name = switch.name 32 | if self.topo.isP4Switch(name): 33 | self.p4switches.append(switch) 34 | if self.topo.isTofino(name): 35 | self.tofinos.append(switch) 36 | 37 | info( '*** Configuring Tofinos switches\n' ) 38 | self.configTofinos() 39 | # Increase number of HugePages according to number of Tofinos 40 | info( '\n' ) 41 | debug( os.popen('sudo sysctl -w vm.nr_hugepages={}'.format(128*len(self.tofinos)) + '\n' ).read()) 42 | 43 | def addRouter(self, name, cls=None, **params): 44 | """Adds a router to the network. 45 | 46 | Arguments: 47 | name (string): name of the router to add 48 | cls (type) : custom router class/constructor (optional) 49 | 50 | Returns: 51 | added router 52 | """ 53 | defaults = {} # Default parameters to set (maybe in the future) 54 | defaults.update(params) 55 | if not cls: 56 | cls = self.router 57 | r = cls(name, **defaults) 58 | self.routers.append(r) 59 | self.nameToNode[name] = r 60 | return r 61 | 62 | def configTofinos( self ): 63 | """Configure a set of Tofinos.""" 64 | for tofino in self.tofinos: 65 | info( tofino.name + ' ' ) 66 | # Configure loopback interface 67 | tofino.configDefault( mac=None, 68 | ip=None, 69 | defaultRoute=None, 70 | lo='up' ) 71 | info( '\n' ) 72 | 73 | def buildFromTopo( self, topo=None ): 74 | """ 75 | Build mininet from a topology object. At the end of this 76 | function, everything should be connected and up. 77 | """ 78 | 79 | # Possibly we should clean up here and/or validate 80 | # the topo 81 | if self.cleanup: 82 | pass 83 | 84 | info( '*** Creating network\n' ) 85 | 86 | if not self.controllers and self.controller: 87 | # Add a default controller 88 | info( '*** Adding controller\n' ) 89 | classes = self.controller 90 | if not isinstance( classes, list ): 91 | classes = [ classes ] 92 | for i, cls in enumerate( classes ): 93 | # Allow Controller objects because nobody understands partial() 94 | if isinstance( cls, Controller ): 95 | self.addController( cls ) 96 | else: 97 | self.addController( 'c%d' % i, cls ) 98 | 99 | info( '*** Adding hosts:\n' ) 100 | for hostName in topo.hosts(): 101 | self.addHost( hostName, **topo.nodeInfo( hostName ) ) 102 | info( hostName + ' ' ) 103 | 104 | info( '\n*** Adding switches:\n' ) 105 | for switchName in topo.switches(): 106 | # A bit ugly: add batch parameter if appropriate 107 | params = topo.nodeInfo( switchName) 108 | cls = params.get( 'cls', self.switch ) 109 | if hasattr( cls, 'batchStartup' ): 110 | params.setdefault( 'batch', True ) 111 | self.addSwitch( switchName, **params ) 112 | info( switchName + ' ' ) 113 | 114 | info( '\n*** Adding routers:\n' ) 115 | for routerName in topo.routers(): 116 | self.addRouter( routerName, **topo.nodeInfo( routerName )) 117 | info( routerName + ' ') 118 | 119 | info( '\n*** Adding links:\n' ) 120 | for srcName, dstName, params in topo.links( 121 | sort=True, withInfo=True ): 122 | self.addLink( **params ) 123 | info( '(%s, %s) ' % ( srcName, dstName ) ) 124 | 125 | info( '\n' ) 126 | 127 | def start(self): 128 | super().start() 129 | 130 | # start routers 131 | info( '*** Starting %s routers\n' % len( self.routers ) ) 132 | for router in self.routers: 133 | info( router.name + ' ') 134 | router.start() 135 | info( '\n' ) 136 | 137 | hosts_mtu = 9500 138 | # Trick to allow switches to add headers 139 | # when packets have the max MTU 140 | switches_mtu = 9520 141 | 142 | #remove Ipv6 for all the interfaces 143 | for link in self.links: 144 | cmd1 = "/sbin/ethtool -k {0} rx off tx off sg off" 145 | cmd2 = "sysctl net.ipv6.conf.{0}.disable_ipv6=1" 146 | cmd3 = "ip link set {} mtu {}" 147 | 148 | #execute the ethtool command to remove some offloads 149 | link.intf1.cmd(cmd1.format(link.intf1.name)) 150 | link.intf2.cmd(cmd1.format(link.intf2.name)) 151 | 152 | #remove ipv6 153 | link.intf1.cmd(cmd2.format(link.intf1.name)) 154 | link.intf2.cmd(cmd2.format(link.intf2.name)) 155 | 156 | #increase mtu to 9500 (jumbo frames) for switches we do it special 157 | node1_is_host = link.intf1.node in self.hosts 158 | node2_is_host = link.intf2.node in self.hosts 159 | 160 | if node1_is_host or node2_is_host: 161 | mtu = hosts_mtu 162 | else: 163 | mtu = switches_mtu 164 | 165 | link.intf1.cmd(cmd3.format(link.intf1.name, mtu)) 166 | link.intf2.cmd(cmd3.format(link.intf2.name, mtu)) 167 | 168 | def stop( self ): 169 | """Stop the controller(s), switches and hosts""" 170 | info( '*** Stopping %i controllers\n' % len( self.controllers ) ) 171 | for controller in self.controllers: 172 | info( controller.name + ' ' ) 173 | controller.stop() 174 | info( '\n' ) 175 | if self.terms: 176 | info( '*** Stopping %i terms\n' % len( self.terms ) ) 177 | self.stopXterms() 178 | info( '*** Stopping %i links\n' % len( self.links ) ) 179 | for link in self.links: 180 | info( '.' ) 181 | link.stop() 182 | info( '\n' ) 183 | info( '*** Stopping %i routers\n' % len( self.routers ) ) 184 | for router in self.routers: 185 | info( router.name + ' ' ) 186 | router.stop() 187 | router.terminate() 188 | info('\n') 189 | info( '*** Stopping %i switches\n' % len( self.switches ) ) 190 | stopped = {} 191 | for swclass, switches in groupby( 192 | sorted( self.switches, 193 | key=lambda s: str( type( s ) ) ), type ): 194 | switches = tuple( switches ) 195 | if hasattr( swclass, 'batchShutdown' ): 196 | success = swclass.batchShutdown( switches ) 197 | stopped.update( { s: s for s in success } ) 198 | for switch in self.switches: 199 | info( switch.name + ' ' ) 200 | if switch not in stopped: 201 | switch.stop() 202 | switch.terminate() 203 | 204 | # Set number of HugePages to 0 205 | info( '\n' ) 206 | debug( os.popen('sudo sysctl -w vm.nr_hugepages=0').read() + '\n' ) 207 | 208 | info( '\n' ) 209 | info( '*** Stopping %i hosts\n' % len( self.hosts ) ) 210 | for host in self.hosts: 211 | info( host.name + ' ' ) 212 | host.terminate() 213 | info( '\n*** Done\n' ) -------------------------------------------------------------------------------- /p4utils/utils/sswitch_thrift_API.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2013-present Barefoot Networks, Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | # 18 | # Antonin Bas (antonin@barefootnetworks.com) 19 | # 20 | # Modified version of the sswitch_CLI.py from behavioural model 21 | # Edgar Costa (cedgar@ethz.ch) 22 | 23 | """__ p4utils.utils.thrift_API.html 24 | 25 | __ https://github.com/p4lang/behavioral-model/blob/main/targets/simple_switch/sswitch_CLI.py 26 | 27 | This module provides the *Simple Switch Thrift API*. It builds 28 | on the generic `Thrift API`__. It is a modified version of 29 | `sswitch_CLI.py`__ from behavioral model. 30 | """ 31 | 32 | from functools import wraps 33 | 34 | from sswitch_runtime import SimpleSwitch 35 | from sswitch_runtime.ttypes import * 36 | 37 | import p4utils.utils.thrift_API as thrift_API 38 | 39 | 40 | def handle_bad_input(f): 41 | """Handles bad input. 42 | 43 | Args: 44 | f (types.FunctionType): function or method to handle 45 | """ 46 | @wraps(f) 47 | @thrift_API.handle_bad_input 48 | def handle(*args, **kwargs): 49 | try: 50 | return f(*args, **kwargs) 51 | except InvalidMirroringOperation as e: 52 | error = MirroringOperationErrorCode._VALUES_TO_NAMES[e.code] 53 | print("Invalid mirroring operation ({})".format(error)) 54 | return handle 55 | 56 | 57 | class SimpleSwitchThriftAPI(thrift_API.ThriftAPI): 58 | """ Simple Switch *Thrift* control plane API. 59 | 60 | Args: 61 | thrift_port (int): port to connect to 62 | thrift_ip (str) : IP the *Thrift* server is listening on 63 | json_path (str) : optional JSON compiled P4 file to push to the switch 64 | 65 | Attributes: 66 | sswitch_client: *Thrift* client instance to communicate with the switch 67 | """ 68 | 69 | @staticmethod 70 | def get_thrift_services(): 71 | """Get available *Thrift* services.""" 72 | return [("simple_switch", SimpleSwitch.Client)] 73 | 74 | def __init__(self, thrift_port, 75 | thrift_ip='localhost', 76 | json_path=None): 77 | 78 | pre_type = thrift_API.PreType.SimplePreLAG 79 | 80 | super().__init__(thrift_port, 81 | thrift_ip, 82 | pre_type, 83 | json_path) 84 | 85 | self.sswitch_client = thrift_API.thrift_connect(thrift_ip, 86 | thrift_port, 87 | SimpleSwitchThriftAPI.get_thrift_services())[0] 88 | 89 | def parse_int(self, arg, name): 90 | """Tries to convert the argument to :py:class:`int`. 91 | 92 | Args: 93 | arg : argument that can be converted to :py:class:`int` 94 | name (str): name of the argument 95 | 96 | Returns: 97 | int: integer value of the argument 98 | 99 | Raises: 100 | p4utils.utils.thrift_API.UIn_Error: if the argument cannot be transformed in 101 | an integer. 102 | """ 103 | try: 104 | return int(arg) 105 | except: 106 | raise thrift_API.UIn_Error("Bad format for {}, expected integer".format(name)) 107 | 108 | @handle_bad_input 109 | def set_queue_depth(self, queue_depth, egress_port=None): 110 | """Sets depth of one / all egress queue(s). 111 | 112 | Args: 113 | queue_depth (int): number of packets 114 | egress_port (int): optional *egress port*, otherwise all ports 115 | are considered 116 | """ 117 | 118 | depth = self.parse_int(queue_depth, "queue_depth") 119 | if egress_port: 120 | egress_port = self.parse_int(egress_port, "egress_port") 121 | self.sswitch_client.set_egress_queue_depth(egress_port, depth) 122 | else: 123 | self.sswitch_client.set_all_egress_queue_depths(depth) 124 | 125 | @handle_bad_input 126 | def set_queue_rate(self, rate, egress_port=None): 127 | """Sets rate of one / all egress queue(s). 128 | 129 | Args: 130 | rate (int) : rate (packets per seconds) 131 | egress_port (int): optional *egress port*, otherwise all ports 132 | are considered 133 | """ 134 | 135 | rate = self.parse_int(rate, "rate_pps") 136 | if egress_port: 137 | egress_port = self.parse_int(egress_port, "egress_port") 138 | self.sswitch_client.set_egress_queue_rate(egress_port, rate) 139 | else: 140 | self.sswitch_client.set_all_egress_queue_rates(rate) 141 | 142 | @handle_bad_input 143 | def mirroring_add(self, mirror_id, egress_port): 144 | """Adds mirroring mapping. 145 | 146 | Args: 147 | mirror_id (int) : *mirror id* to use 148 | egress_port (int): *egress port* to associate with the mirror 149 | """ 150 | mirror_id, egress_port = self.parse_int(mirror_id, "mirror_id"), self.parse_int(egress_port, "egress_port") 151 | config = MirroringSessionConfig(port=egress_port) 152 | self.sswitch_client.mirroring_session_add(mirror_id, config) 153 | 154 | @handle_bad_input 155 | def mirroring_add_mc(self, mirror_id, mgrp): 156 | """Adds mirroring session to multicast group. 157 | 158 | Args: 159 | mirror_id (int): *mirror id* to associate 160 | mgrp (int) : *multicast group* 161 | """ 162 | mirror_id, mgrp = self.parse_int(mirror_id, "mirror_id"), self.parse_int(mgrp, "mgrp") 163 | config = MirroringSessionConfig(mgid=mgrp) 164 | self.sswitch_client.mirroring_session_add(mirror_id, config) 165 | 166 | @handle_bad_input 167 | def mirroring_add_port_and_mgrp(self, mirror_id, egress_port, mgrp): 168 | """Adds mirroring session to multicast group. 169 | 170 | Args: 171 | mirror_id (int) : *mirror id* to use 172 | egress_port (int): *egress port* to associate with the mirror 173 | mgrp (int) : *multicast group* 174 | """ 175 | mirror_id, egress_port, mgrp = self.parse_int(mirror_id, "mirror_id"), self.parse_int(egress_port, "egress_port"), self.parse_int(mgrp, "mgrp") 176 | config = MirroringSessionConfig(mgid=mgrp, port=egress_port) 177 | self.sswitch_client.mirroring_session_add(mirror_id, config) 178 | 179 | @handle_bad_input 180 | def mirroring_delete(self, mirror_id): 181 | """Deletes mirroring mapping. 182 | 183 | Args: 184 | mirror_id (int): *id* of the mirror to delete 185 | """ 186 | mirror_id = self.parse_int(mirror_id, "mirror_id") 187 | self.sswitch_client.mirroring_session_delete(mirror_id) 188 | 189 | @handle_bad_input 190 | def mirroring_get(self, mirror_id): 191 | """Prints mirroring session information 192 | 193 | Args: 194 | mirror_id (int): *id* of the mirror to display 195 | """ 196 | mirror_id = self.parse_int(mirror_id, "mirror_id") 197 | config = self.sswitch_client.mirroring_session_get(mirror_id) 198 | print(config) 199 | 200 | @handle_bad_input 201 | def get_time_elapsed(self): 202 | """Prints time elapsed (in microseconds) since the switch started.""" 203 | print(self.sswitch_client.get_time_elapsed_us()) 204 | 205 | @handle_bad_input 206 | def get_time_since_epoch(self): 207 | """Prints time elapsed (in microseconds) since the switch clock's epoch.""" 208 | print(self.sswitch_client.get_time_since_epoch_us()) 209 | 210 | 211 | if __name__ == "__main__": 212 | controller = SimpleSwitchAPI(9090) 213 | -------------------------------------------------------------------------------- /p4utils/utils/compiler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shlex 3 | import hashlib 4 | import subprocess 5 | 6 | from p4utils.utils.helper import * 7 | from p4utils.mininetlib.log import debug, info, output, warning, error, critical 8 | 9 | 10 | class CompilationError(Exception): 11 | pass 12 | 13 | 14 | class NotCompiledError(Exception): 15 | pass 16 | 17 | 18 | class P4InfoDisabled(Exception): 19 | pass 20 | 21 | 22 | class P4C: 23 | """This compiler reads the P4 program and generates 24 | the configuration files used by switches. 25 | 26 | Args: 27 | p4_src (str) : path of the source P4 file to compile 28 | p4c_bin (str): path to the compiler binary 29 | outdir (str) : directory containing all the output files. If set to **None**, 30 | then every output is stored in the directory of ``p4_src`` 31 | opts (str) : ``p4c`` compilation options 32 | p4rt (bool) : generate the P4Info file used to establish P4Runtime connection 33 | to ``simple_switch_grpc`` 34 | """ 35 | p4c_bin = 'p4c' 36 | 37 | @classmethod 38 | def set_binary(self, p4c_bin): 39 | """Sets class default binary.""" 40 | P4C.p4c_bin = p4c_bin 41 | 42 | def __init__(self, p4_src, 43 | p4c_bin=None, 44 | outdir=None, 45 | opts='--target bmv2 --arch v1model --std p4-16', 46 | p4rt=False, 47 | **kwargs): 48 | 49 | if p4c_bin is not None: 50 | self.set_binary(p4c_bin) 51 | 52 | # Check whether the p4file is valid 53 | if p4_src is not None: 54 | if os.path.isfile(p4_src): 55 | self.p4_src = os.path.realpath(p4_src) 56 | else: 57 | raise FileNotFoundError('could not find file {}.'.format(p4_src)) 58 | else: 59 | raise FileNotFoundError('no source file provided.'.format(p4_src)) 60 | 61 | if outdir is None: 62 | self.outdir = os.path.dirname(self.p4_src) 63 | else: 64 | # Make sure that the provided outdir path is not pointing to a file 65 | # and, if necessary, create an empty outdir 66 | if not os.path.isdir(outdir): 67 | if os.path.exists(outdir): 68 | raise NotADirectoryError("'{}' exists and is not a directory.".format(self.outdir)) 69 | else: 70 | os.mkdir(outdir) 71 | self.outdir = outdir 72 | 73 | self.opts = opts 74 | self.p4rt = p4rt 75 | self.compiled = False 76 | 77 | p4_basename = os.path.basename(self.p4_src) 78 | p4rt_out_basename = p4_basename.replace('.p4', '') + '_p4rt.txt' 79 | json_out_basename = p4_basename.replace('.p4', '') + '.json' 80 | 81 | self.p4rt_out = self.outdir + '/' + p4rt_out_basename 82 | self.json_out = self.outdir + '/' + json_out_basename 83 | 84 | def compile(self): 85 | """Compiles the P4 file and generates the configuration files.""" 86 | # Compute checksum of P4 file. This allows to recognize modified files. 87 | self.cksum = cksum(self.p4_src) 88 | debug('source: {}\tcksum: {}\n'.format(self.p4_src, self.cksum)) 89 | 90 | # Compiler command to execute 91 | cmd = self.p4c_bin + ' ' 92 | cmd += '"{}" '.format(self.p4_src) 93 | cmd += self.opts + ' ' 94 | cmd += '-o "{}" '.format(self.outdir) 95 | 96 | if self.p4rt: 97 | cmd += '--p4runtime-files "{}" '.format(self.p4rt_out) 98 | 99 | debug(cmd + '\n') 100 | 101 | # Execute command 102 | p = subprocess.Popen(shlex.split(cmd), 103 | stdin=subprocess.DEVNULL, 104 | stdout=subprocess.PIPE, 105 | stderr=subprocess.PIPE) 106 | stdout, stderr = p.communicate() 107 | 108 | if p.returncode != 0: 109 | info(stdout.decode(errors='backslashreplace')) 110 | error(stderr.decode(errors='backslashreplace')) 111 | raise CompilationError 112 | else: 113 | if len(stderr) == 0: 114 | info('{} compiled successfully.\n'.format(self.p4_src)) 115 | info(stdout.decode(errors='backslashreplace')) 116 | else: 117 | info('{} compiled with warnings.\n'.format(self.p4_src)) 118 | info(stdout.decode(errors='backslashreplace')) 119 | warning(stderr.decode(errors='backslashreplace')) 120 | self.compiled = True 121 | 122 | def get_json_out(self): 123 | """Returns the JSON configuration filepath.""" 124 | if self.compiled: 125 | return self.json_out 126 | else: 127 | raise NotCompiledError 128 | 129 | def get_p4rt_out(self): 130 | """Returns the P4Info configuration filepath.""" 131 | if self.compiled: 132 | if self.p4rt: 133 | return self.p4rt_out 134 | else: 135 | raise P4InfoDisabled 136 | else: 137 | raise NotCompiledError 138 | 139 | def clean(self): 140 | """Removes output files and set compiler as uncompiled.""" 141 | os.remove(self.p4rt_out) 142 | os.remove(self.json_out) 143 | self.compiled = False 144 | 145 | def new_source(self): 146 | """Checks whether a new source was provided to the 147 | compiler. 148 | 149 | Returns: 150 | bool: **True** if the source P4 file has changed since 151 | the last time it was compiled, **False** otherwise. 152 | """ 153 | return cksum(self.p4_src) != self.cksum 154 | 155 | 156 | class BF_P4C: 157 | """This compiler reads the P4 program and generates 158 | the configuration files used by Tofinos. 159 | 160 | Args: 161 | p4_src (str) : path of the source P4 file to compile 162 | build_dir (str) : directory where the Tofino's configuration is built 163 | build_script (str) : directory where the Tofino's build script can be found 164 | sde (str) : Tofino SDE path ($SDE) 165 | sde_install (str): Tofino SDE install path ($SDE_INSTALL) 166 | """ 167 | 168 | def __init__(self, p4_src, 169 | sde, 170 | sde_install, 171 | build_dir=None, 172 | build_script="~/tools/p4_build.sh", 173 | **kwargs): 174 | 175 | self.sde = os.path.realpath(sde) 176 | self.sde_install = os.path.realpath(sde_install) 177 | 178 | # check if the build script exists if not falls back to 179 | # cmake 180 | self.build_script = None 181 | if build_script: 182 | home = os.path.expanduser("~") 183 | build_script = build_script.replace("~", home) 184 | if os.path.isfile(build_script): 185 | self.build_script = build_script 186 | 187 | # Check whether the p4file is valid 188 | if p4_src is not None: 189 | if os.path.isfile(p4_src): 190 | self.p4_src = os.path.realpath(p4_src) 191 | else: 192 | raise FileNotFoundError('could not find file {}.'.format(p4_src)) 193 | else: 194 | raise FileNotFoundError('no source file provided.'.format(p4_src)) 195 | 196 | if build_dir is None: 197 | self.build_dir = os.path.join(os.path.dirname(self.p4_src), 'build') 198 | else: 199 | self.build_dir = os.path.realpath(build_dir) 200 | 201 | # Make sure that the provided outdir path is not pointing to a file 202 | # and, if necessary, create an empty build_dir 203 | if not os.path.isdir(self.build_dir): 204 | if os.path.exists(self.build_dir): 205 | raise NotADirectoryError("'{}' exists and is not a directory.".format(self.build_dir)) 206 | else: 207 | os.mkdir(self.build_dir) 208 | 209 | # Remove existent files and subdirectories 210 | os.system('rm -rf {}'.format(os.path.join(self.build_dir, '*'))) 211 | 212 | self.p4_name, _ = os.path.splitext(os.path.basename(self.p4_src)) 213 | self.compiled = False 214 | 215 | def compile(self): 216 | """Compiles the P4 file and generates the configuration files.""" 217 | # Compute checksum of P4 file. This allows to recognize modified files. 218 | self.cksum = cksum(self.p4_src) 219 | debug('source: {}\tcksum: {}\n'.format(self.p4_src, self.cksum)) 220 | 221 | # Set environmental variables 222 | cmd = 'export SDE={} && '.format(self.sde) 223 | cmd += 'export SDE_INSTALL={} && '.format(self.sde_install) 224 | 225 | # manual cmake 226 | if not self.build_script: 227 | cmd += 'cd {}; '.format(self.build_dir) 228 | cmd += 'cmake $SDE/p4studio/ -DCMAKE_INSTALL_PREFIX=$SDE/install ' + \ 229 | '-DCMAKE_MODULE_PATH=$SDE/cmake -DP4_NAME={} '.format(self.p4_name) + \ 230 | '-DP4_PATH={} && '.format(self.p4_src) 231 | cmd += 'make {} && make install'.format(self.p4_name) 232 | # if we use the p4_build script 233 | else: 234 | cmd += '{} --with-tofino {}'.format(self.build_script, self.p4_src) 235 | 236 | debug(cmd + '\n') 237 | 238 | # Execute command 239 | p = subprocess.Popen(cmd, 240 | shell=True, 241 | stdin=subprocess.DEVNULL, 242 | stdout=subprocess.PIPE, 243 | stderr=subprocess.PIPE) 244 | 245 | stdout, stderr = p.communicate() 246 | 247 | if p.returncode != 0: 248 | info(stdout.decode(errors='backslashreplace')) 249 | error(stderr.decode(errors='backslashreplace')) 250 | raise CompilationError 251 | else: 252 | if len(stderr) == 0: 253 | info('{} compiled successfully.\n'.format(self.p4_src)) 254 | info(stdout.decode(errors='backslashreplace')) 255 | else: 256 | info('{} compiled with warnings.\n'.format(self.p4_src)) 257 | info(stdout.decode(errors='backslashreplace')) 258 | warning(stderr.decode(errors='backslashreplace')) 259 | self.compiled = True 260 | 261 | def get_p4name(self): 262 | """Returns the JSON configuration filepath.""" 263 | if self.compiled: 264 | return self.p4_name 265 | else: 266 | raise NotCompiledError 267 | 268 | def clean(self): 269 | """Removes output files and set compiler as uncompiled.""" 270 | os.remove(self.build_dir) 271 | self.compiled = False 272 | 273 | def new_source(self): 274 | """Checks whether a new source was provided to the 275 | compiler. 276 | 277 | Returns: 278 | bool: **True** if the source P4 file has changed since 279 | the last time it was compiled, **False** otherwise. 280 | """ 281 | return cksum(self.p4_src) != self.cksum -------------------------------------------------------------------------------- /report_collector/colllector.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | import io 4 | import time 5 | 6 | from scapy.all import Packet 7 | from scapy.all import BitField,ShortField 8 | from scapy.layers.inet import Ether,IP, TCP, UDP, bind_layers 9 | 10 | class INTREP(Packet): 11 | name = "INT Report Header v2.0" 12 | fields_desc = [ 13 | BitField("version", 0, 4), 14 | BitField("hw_id", 0, 6), 15 | BitField("seq_number", 0, 22), 16 | BitField("node_id", 0, 32)] 17 | 18 | class INTIndiviREP(Packet): 19 | name = "INT Report Individual Header v2.0" 20 | 21 | fields_desc = [ 22 | BitField("rep_type", 0, 4), 23 | BitField("in_type", 0, 4), 24 | BitField("rep_len", 0, 8), 25 | BitField("md_len", 0, 8), 26 | BitField("flag", 0, 4), 27 | BitField("rsvd", 0, 4), 28 | ShortField("RepMdBits", 0), 29 | ShortField("DomainID", 0), 30 | ShortField("DSMdBits", 0), 31 | ShortField("DSMdstatus", 0)] 32 | 33 | class INTShim(Packet): 34 | name = "INT Shim header v2.1" 35 | fields_desc = [ 36 | BitField("type", 0, 4), 37 | BitField("next_protocol", 0, 2), 38 | BitField("rsvd", 0, 2), 39 | BitField("int_length", 0, 8), 40 | ShortField("NPT Dependent Field", 0)] 41 | 42 | class INTMD(Packet): 43 | name = "INT-MD Header v2.1" 44 | fields_desc = [ 45 | BitField("version", 0, 4), 46 | BitField("flags", 0, 3), 47 | BitField("reserved", 0, 12), 48 | BitField("HopMetaLength", 0, 5), 49 | BitField("RemainingHopCount", 0, 8), 50 | BitField("instruction_mask_0003", 0, 4), 51 | BitField("instruction_mask_0407", 0, 4), 52 | BitField("instruction_mask_0811", 0, 4), 53 | BitField("instruction_mask_1215", 0, 4), 54 | ShortField("DomainID", 0), 55 | ShortField("DomainInstructions", 0), 56 | ShortField("DomainFlags", 0)] 57 | 58 | bind_layers(UDP,INTREP,dport=1234) 59 | bind_layers(INTREP,INTIndiviREP) 60 | bind_layers(INTIndiviREP,Ether,in_type=3) 61 | bind_layers(INTShim,INTMD,type = 1) 62 | 63 | SWITCH_ID_BIT = 0b10000000 64 | L1_PORT_IDS_BIT = 0b01000000 65 | HOP_LATENCY_BIT = 0b00100000 66 | QUEUE_BIT = 0b00010000 67 | INGRESS_TSTAMP_BIT = 0b00001000 68 | EGRESS_TSTAMP_BIT = 0b00000100 69 | L2_PORT_IDS_BIT = 0b00000010 70 | EGRESS_PORT_TX_UTIL_BIT = 0b00000001 71 | 72 | class FlowInfo(): 73 | def __init__(self): 74 | # flow information 75 | self.src_ip = None 76 | self.dst_ip = None 77 | self.src_port = None 78 | self.dst_port = None 79 | self.ip_proto = None 80 | 81 | # flow hop count and flow total latency 82 | self.hop_cnt = 0 83 | self.flow_latency = 0 84 | 85 | # flow telemetry metadata 86 | self.switch_ids = [] 87 | self.l1_ingress_ports = [] 88 | self.l1_egress_ports = [] 89 | self.hop_latencies = [] 90 | self.queue_ids = [] 91 | self.queue_occups = [] 92 | self.ingress_tstamps = [] 93 | self.egress_tstamps = [] 94 | self.l2_ingress_ports = [] 95 | self.l2_egress_ports = [] 96 | self.egress_tx_utils = [] 97 | 98 | self.e_new_flow = None 99 | self.e_flow_latency = None 100 | self.e_sw_latency = None 101 | self.e_link_latency = None 102 | self.e_q_occupancy = None 103 | 104 | def show(self): 105 | print("src_ip %s" % (self.src_ip)) 106 | print("dst_ip %s" % (self.dst_ip)) 107 | print("src_port %s" % (self.src_port)) 108 | print("dst_port %s" % (self.dst_port)) 109 | print("ip_proto %s" % (self.ip_proto)) 110 | 111 | print("hop_cnt %s" % (self.hop_cnt)) 112 | print("flow_latency %s" % (self.flow_latency)) 113 | 114 | #switch_ids 115 | if len(self.switch_ids) > 0: 116 | print("switch_ids %s" % (self.switch_ids)) 117 | # l1_ingress_ports and l1_egress_ports 118 | if len(self.l1_ingress_ports) > 0: 119 | print("l1_ingress_ports %s" % (self.l1_ingress_ports)) 120 | print("l1_egress_ports %s" % (self.l1_egress_ports)) 121 | # hop_latencies 122 | if len(self.hop_latencies) > 0: 123 | print("hop_latencies %s" % (self.hop_latencies)) 124 | # queue_ids and queue_occups 125 | if len(self.queue_ids) > 0: 126 | print("queue_ids %s" % (self.queue_ids)) 127 | print("queue_occups %s" % (self.queue_occups)) 128 | # ingress_tstamps and egress_tstamps 129 | if len(self.ingress_tstamps) > 0: 130 | print("ingress_tstamps %s" % (self.ingress_tstamps)) 131 | print("egress_tstamps %s" % (self.egress_tstamps)) 132 | # l2_ingress_ports and l2_egress_ports 133 | if len(self.l2_ingress_ports) > 0: 134 | print("l2_ingress_ports %s" % (self.l2_ingress_ports)) 135 | print("l2_egress_ports %s" % (self.l2_egress_ports)) 136 | # egress_tx_utils 137 | if len(self.egress_tx_utils) > 0: 138 | print("egress_tx_utils %s" % (self.egress_tx_utils)) 139 | 140 | def __str__(self) -> str: 141 | pass 142 | 143 | 144 | class Collector(): 145 | def __init__(self,influx_client) -> None: 146 | self.influx_client = influx_client 147 | 148 | def parse_flow_info(self,flow_info,ip_pkt): 149 | flow_info.src_ip = ip_pkt.src 150 | flow_info.dst_ip = ip_pkt.dst 151 | flow_info.ip_proto = ip_pkt.proto 152 | 153 | if UDP in ip_pkt: 154 | flow_info.src_port = ip_pkt[UDP].sport 155 | flow_info.dst_port = ip_pkt[UDP].dport 156 | elif TCP in ip_pkt: 157 | flow_info.src_port = ip_pkt[TCP].sport 158 | flow_info.dst_port = ip_pkt[TCP].dport 159 | 160 | def parse_int_metadata(self,flow_info,int_pkt): 161 | if INTShim not in int_pkt: 162 | return 163 | # telemetry instructions 164 | ins_map = (int_pkt[INTMD].instruction_mask_0003 << 4) + int_pkt[INTMD].instruction_mask_0407 165 | # telemetry metadata length 166 | int_len = int_pkt.int_length-3 167 | # hop telemetry metadata length 168 | hop_meta_len = int_pkt[INTMD].HopMetaLength 169 | # telemetry metadata 170 | int_metadata = int_pkt.load[:int_len<<2] 171 | # hop count 172 | hop_count = int(int_len /hop_meta_len) 173 | flow_info.hop_cnt = hop_count 174 | 175 | hop_metadata = [] 176 | 177 | for i in range(hop_count): 178 | index = i*hop_meta_len << 2 179 | hop_metadata = io.BytesIO(int_metadata[index:index+(hop_meta_len << 2)]) 180 | # switch_ids 181 | if ins_map & SWITCH_ID_BIT: 182 | flow_info.switch_ids.append(int.from_bytes(hop_metadata.read(4), byteorder='big')) 183 | # ingress_ports and egress_ports 184 | if ins_map & L1_PORT_IDS_BIT: 185 | flow_info.l1_ingress_ports.append(int.from_bytes(hop_metadata.read(2), byteorder='big')) 186 | flow_info.l1_egress_ports.append(int.from_bytes(hop_metadata.read(2), byteorder='big')) 187 | # hop_latencies 188 | if ins_map & HOP_LATENCY_BIT: 189 | flow_info.hop_latencies.append(int.from_bytes(hop_metadata.read(4), byteorder='big')) 190 | flow_info.flow_latency += flow_info.hop_latencies[i] 191 | # queue_ids and queue_occups 192 | if ins_map & QUEUE_BIT: 193 | flow_info.queue_ids.append(int.from_bytes(hop_metadata.read(1), byteorder='big')) 194 | flow_info.queue_occups.append(int.from_bytes(hop_metadata.read(3), byteorder='big')) 195 | # ingress_tstamps 196 | if ins_map & INGRESS_TSTAMP_BIT: 197 | flow_info.ingress_tstamps.append(int.from_bytes(hop_metadata.read(8), byteorder='big')) 198 | # egress_tstamps 199 | if ins_map & EGRESS_TSTAMP_BIT: 200 | flow_info.egress_tstamps.append(int.from_bytes(hop_metadata.read(8), byteorder='big')) 201 | # l2_ingress_ports and l2_egress_ports 202 | if ins_map & L2_PORT_IDS_BIT: 203 | flow_info.l2_ingress_ports.append(int.from_bytes(hop_metadata.read(4), byteorder='big')) 204 | flow_info.l2_egress_ports.append(int.from_bytes(hop_metadata.read(4), byteorder='big')) 205 | # egress_tx_utils 206 | if ins_map & EGRESS_PORT_TX_UTIL_BIT: 207 | flow_info.egress_tx_utils.append(int.from_bytes(hop_metadata.read(4), byteorder='big')) 208 | 209 | def parser_int_pkt(self,pkt): 210 | if INTREP not in pkt: 211 | return 212 | int_rep_pkt = pkt[INTREP] 213 | # int_rep_pkt.show() 214 | 215 | flow_info = FlowInfo() 216 | # parse five tuple (src_ip,dst_ip,src_port,dst_port,ip_proto) 217 | self.parse_flow_info(flow_info,int_rep_pkt[IP]) 218 | # int metadata 219 | int_shim_pkt = INTShim(int_rep_pkt.load) 220 | self.parse_int_metadata(flow_info,int_shim_pkt) 221 | sys.stdout.flush() 222 | 223 | return flow_info 224 | 225 | def export_influxdb(self,flow_info): 226 | if self.influx_client is None: 227 | print("collector.influx_client is Uninitialized") 228 | sys.exit(0) 229 | 230 | if not flow_info: 231 | return 232 | 233 | metric_timestamp = int(time.time()*1000000000) 234 | metrics = [] 235 | if flow_info.flow_latency: 236 | metrics.append({ 237 | 'measurement': 'flow_latency', 238 | 'tags': { 239 | 'src_ip': str(flow_info.src_ip), 240 | 'dst_ip': str(flow_info.dst_ip), 241 | 'src_port': flow_info.src_port, 242 | 'dst_port': flow_info.dst_port, 243 | 'protocol': flow_info.ip_proto 244 | }, 245 | 'time': metric_timestamp, 246 | 'fields': { 247 | 'value': flow_info.flow_latency 248 | } 249 | }) 250 | 251 | if len(flow_info.switch_ids) > 0 and len(flow_info.egress_tstamps) > 0 and len(flow_info.hop_latencies) > 0: 252 | for i in range(flow_info.hop_cnt): 253 | metrics.append({ 254 | 'measurement': 'switch_latency', 255 | 'tags': { 256 | 'switch_id': flow_info.switch_ids[i] 257 | }, 258 | 'time': flow_info.egress_tstamps[i]*1000, 259 | 'fields': { 260 | 'value': flow_info.hop_latencies[i] 261 | } 262 | }) 263 | 264 | 265 | if len(flow_info.switch_ids) > 0 and len(flow_info.queue_ids) > 0: 266 | for i in range(flow_info.hop_cnt): 267 | metrics.append({ 268 | 'measurement': 'queue_occupancy', 269 | 'tags': { 270 | 'switch_id': flow_info.switch_ids[i], 271 | 'queue_id': flow_info.queue_ids[i] 272 | }, 273 | 'time': flow_info.egress_tstamps[i]*1000, 274 | 'fields': { 275 | 'value': flow_info.queue_occups[i] 276 | } 277 | }) 278 | 279 | if len(flow_info.switch_ids) > 0 and len(flow_info.l1_egress_ports) > 0 and len(flow_info.l1_ingress_ports) > 0: 280 | for i in range(flow_info.hop_cnt - 1): 281 | metrics.append({ 282 | 'measurement': 'link_latency', 283 | 'tags': { 284 | 'egress_switch_id': flow_info.switch_ids[i+1], 285 | 'egress_port_id': flow_info.l1_egress_ports[i+1], 286 | 'ingress_switch_id': flow_info.switch_ids[i], 287 | 'ingress_port_id': flow_info.l1_ingress_ports[i] 288 | }, 289 | 'time': metric_timestamp, 290 | 'fields': { 291 | 'value': abs(flow_info.egress_tstamps[i+1] - flow_info.ingress_tstamps[i]) 292 | } 293 | }) 294 | 295 | self.influx_client.write_points(points=metrics, protocol="json") -------------------------------------------------------------------------------- /grafana/provisioning/dashboards/InfluxDB INT Dashboard.json: -------------------------------------------------------------------------------- 1 | { 2 | "editable": true, 3 | "gnetId": null, 4 | "graphTooltip": 0, 5 | "id": 1, 6 | "links": [], 7 | "panels": [ 8 | { 9 | "aliasColors": {}, 10 | "bars": false, 11 | "dashLength": 10, 12 | "dashes": false, 13 | "datasource": "InfluxDB", 14 | "fill": 1, 15 | "fillGradient": 0, 16 | "gridPos": { 17 | "h": 9, 18 | "w": 12, 19 | "x": 0, 20 | "y": 0 21 | }, 22 | "id": 2, 23 | "legend": { 24 | "avg": false, 25 | "current": false, 26 | "max": false, 27 | "min": false, 28 | "show": true, 29 | "total": false, 30 | "values": false 31 | }, 32 | "lines": true, 33 | "linewidth": 1, 34 | "nullPointMode": "null", 35 | "options": { 36 | "dataLinks": [] 37 | }, 38 | "percentage": false, 39 | "pointradius": 2, 40 | "points": false, 41 | "renderer": "flot", 42 | "seriesOverrides": [], 43 | "spaceLength": 10, 44 | "stack": false, 45 | "steppedLine": false, 46 | "targets": [ 47 | { 48 | "groupBy": [ 49 | { 50 | "params": [ 51 | "dst_ip" 52 | ], 53 | "type": "tag" 54 | }, 55 | { 56 | "params": [ 57 | "dst_port" 58 | ], 59 | "type": "tag" 60 | }, 61 | { 62 | "params": [ 63 | "src_port" 64 | ], 65 | "type": "tag" 66 | }, 67 | { 68 | "params": [ 69 | "src_ip" 70 | ], 71 | "type": "tag" 72 | }, 73 | { 74 | "params": [ 75 | "protocol" 76 | ], 77 | "type": "tag" 78 | } 79 | ], 80 | "measurement": "flow_latency", 81 | "orderByTime": "ASC", 82 | "policy": "default", 83 | "query": "SELECT \"value\" FROM \"flow_latency\" WHERE $timeFilter GROUP BY \"dst_ip\", \"dst_port\", \"src_port\", \"src_ip\", \"protocol\"", 84 | "rawQuery": true, 85 | "refId": "A", 86 | "resultFormat": "time_series", 87 | "select": [ 88 | [ 89 | { 90 | "params": [ 91 | "value" 92 | ], 93 | "type": "field" 94 | } 95 | ] 96 | ], 97 | "tags": [] 98 | } 99 | ], 100 | "thresholds": [], 101 | "timeFrom": null, 102 | "timeRegions": [], 103 | "timeShift": null, 104 | "title": "Flow Latency", 105 | "tooltip": { 106 | "shared": true, 107 | "sort": 0, 108 | "value_type": "individual" 109 | }, 110 | "type": "graph", 111 | "xaxis": { 112 | "buckets": null, 113 | "mode": "time", 114 | "name": null, 115 | "show": true, 116 | "values": [] 117 | }, 118 | "yaxes": [ 119 | { 120 | "format": "short", 121 | "label": null, 122 | "logBase": 1, 123 | "max": null, 124 | "min": null, 125 | "show": true 126 | }, 127 | { 128 | "format": "short", 129 | "label": null, 130 | "logBase": 1, 131 | "max": null, 132 | "min": null, 133 | "show": true 134 | } 135 | ], 136 | "yaxis": { 137 | "align": false, 138 | "alignLevel": null 139 | } 140 | }, 141 | { 142 | "aliasColors": {}, 143 | "bars": false, 144 | "dashLength": 10, 145 | "dashes": false, 146 | "datasource": "InfluxDB", 147 | "fill": 1, 148 | "fillGradient": 0, 149 | "gridPos": { 150 | "h": 9, 151 | "w": 12, 152 | "x": 12, 153 | "y": 0 154 | }, 155 | "id": 4, 156 | "legend": { 157 | "avg": false, 158 | "current": false, 159 | "max": false, 160 | "min": false, 161 | "show": true, 162 | "total": false, 163 | "values": false 164 | }, 165 | "lines": true, 166 | "linewidth": 1, 167 | "nullPointMode": "null", 168 | "options": { 169 | "dataLinks": [] 170 | }, 171 | "percentage": false, 172 | "pointradius": 2, 173 | "points": false, 174 | "renderer": "flot", 175 | "seriesOverrides": [], 176 | "spaceLength": 10, 177 | "stack": false, 178 | "steppedLine": false, 179 | "targets": [ 180 | { 181 | "groupBy": [ 182 | { 183 | "params": [ 184 | "switch_id" 185 | ], 186 | "type": "tag" 187 | } 188 | ], 189 | "measurement": "switch_latency", 190 | "orderByTime": "ASC", 191 | "policy": "default", 192 | "refId": "A", 193 | "resultFormat": "time_series", 194 | "select": [ 195 | [ 196 | { 197 | "params": [ 198 | "value" 199 | ], 200 | "type": "field" 201 | } 202 | ] 203 | ], 204 | "tags": [] 205 | } 206 | ], 207 | "thresholds": [], 208 | "timeFrom": null, 209 | "timeRegions": [], 210 | "timeShift": null, 211 | "title": "Switch Latency", 212 | "tooltip": { 213 | "shared": true, 214 | "sort": 0, 215 | "value_type": "individual" 216 | }, 217 | "type": "graph", 218 | "xaxis": { 219 | "buckets": null, 220 | "mode": "time", 221 | "name": null, 222 | "show": true, 223 | "values": [] 224 | }, 225 | "yaxes": [ 226 | { 227 | "format": "short", 228 | "label": null, 229 | "logBase": 1, 230 | "max": null, 231 | "min": null, 232 | "show": true 233 | }, 234 | { 235 | "format": "short", 236 | "label": null, 237 | "logBase": 1, 238 | "max": null, 239 | "min": null, 240 | "show": true 241 | } 242 | ], 243 | "yaxis": { 244 | "align": false, 245 | "alignLevel": null 246 | } 247 | }, 248 | { 249 | "aliasColors": {}, 250 | "bars": false, 251 | "dashLength": 10, 252 | "dashes": false, 253 | "datasource": "InfluxDB", 254 | "fill": 1, 255 | "fillGradient": 0, 256 | "gridPos": { 257 | "h": 9, 258 | "w": 12, 259 | "x": 0, 260 | "y": 9 261 | }, 262 | "id": 6, 263 | "legend": { 264 | "avg": false, 265 | "current": false, 266 | "max": false, 267 | "min": false, 268 | "show": true, 269 | "total": false, 270 | "values": false 271 | }, 272 | "lines": true, 273 | "linewidth": 1, 274 | "nullPointMode": "null", 275 | "options": { 276 | "dataLinks": [] 277 | }, 278 | "percentage": false, 279 | "pointradius": 2, 280 | "points": false, 281 | "renderer": "flot", 282 | "seriesOverrides": [], 283 | "spaceLength": 10, 284 | "stack": false, 285 | "steppedLine": false, 286 | "targets": [ 287 | { 288 | "groupBy": [ 289 | { 290 | "params": [ 291 | "egress_port_id" 292 | ], 293 | "type": "tag" 294 | }, 295 | { 296 | "params": [ 297 | "egress_switch_id" 298 | ], 299 | "type": "tag" 300 | }, 301 | { 302 | "params": [ 303 | "ingress_port_id" 304 | ], 305 | "type": "tag" 306 | }, 307 | { 308 | "params": [ 309 | "ingress_switch_id" 310 | ], 311 | "type": "tag" 312 | } 313 | ], 314 | "measurement": "link_latency", 315 | "orderByTime": "ASC", 316 | "policy": "default", 317 | "refId": "A", 318 | "resultFormat": "time_series", 319 | "select": [ 320 | [ 321 | { 322 | "params": [ 323 | "value" 324 | ], 325 | "type": "field" 326 | } 327 | ] 328 | ], 329 | "tags": [] 330 | } 331 | ], 332 | "thresholds": [], 333 | "timeFrom": null, 334 | "timeRegions": [], 335 | "timeShift": null, 336 | "title": "Link Latency", 337 | "tooltip": { 338 | "shared": true, 339 | "sort": 0, 340 | "value_type": "individual" 341 | }, 342 | "type": "graph", 343 | "xaxis": { 344 | "buckets": null, 345 | "mode": "time", 346 | "name": null, 347 | "show": true, 348 | "values": [] 349 | }, 350 | "yaxes": [ 351 | { 352 | "format": "short", 353 | "label": null, 354 | "logBase": 1, 355 | "max": null, 356 | "min": null, 357 | "show": true 358 | }, 359 | { 360 | "format": "short", 361 | "label": null, 362 | "logBase": 1, 363 | "max": null, 364 | "min": null, 365 | "show": true 366 | } 367 | ], 368 | "yaxis": { 369 | "align": false, 370 | "alignLevel": null 371 | } 372 | }, 373 | { 374 | "aliasColors": {}, 375 | "bars": false, 376 | "dashLength": 10, 377 | "dashes": false, 378 | "datasource": "InfluxDB", 379 | "fill": 1, 380 | "fillGradient": 0, 381 | "gridPos": { 382 | "h": 9, 383 | "w": 12, 384 | "x": 12, 385 | "y": 9 386 | }, 387 | "id": 8, 388 | "legend": { 389 | "avg": false, 390 | "current": false, 391 | "max": false, 392 | "min": false, 393 | "show": true, 394 | "total": false, 395 | "values": false 396 | }, 397 | "lines": true, 398 | "linewidth": 1, 399 | "nullPointMode": "null", 400 | "options": { 401 | "dataLinks": [] 402 | }, 403 | "percentage": false, 404 | "pointradius": 2, 405 | "points": false, 406 | "renderer": "flot", 407 | "seriesOverrides": [], 408 | "spaceLength": 10, 409 | "stack": false, 410 | "steppedLine": false, 411 | "targets": [ 412 | { 413 | "groupBy": [ 414 | { 415 | "params": [ 416 | "queue_id" 417 | ], 418 | "type": "tag" 419 | }, 420 | { 421 | "params": [ 422 | "switch_id" 423 | ], 424 | "type": "tag" 425 | } 426 | ], 427 | "measurement": "queue_occupancy", 428 | "orderByTime": "ASC", 429 | "policy": "default", 430 | "refId": "A", 431 | "resultFormat": "time_series", 432 | "select": [ 433 | [ 434 | { 435 | "params": [ 436 | "value" 437 | ], 438 | "type": "field" 439 | } 440 | ] 441 | ], 442 | "tags": [] 443 | } 444 | ], 445 | "thresholds": [], 446 | "timeFrom": null, 447 | "timeRegions": [], 448 | "timeShift": null, 449 | "title": "Queue Occupancy", 450 | "tooltip": { 451 | "shared": true, 452 | "sort": 0, 453 | "value_type": "individual" 454 | }, 455 | "type": "graph", 456 | "xaxis": { 457 | "buckets": null, 458 | "mode": "time", 459 | "name": null, 460 | "show": true, 461 | "values": [] 462 | }, 463 | "yaxes": [ 464 | { 465 | "format": "short", 466 | "label": null, 467 | "logBase": 1, 468 | "max": null, 469 | "min": null, 470 | "show": true 471 | }, 472 | { 473 | "format": "short", 474 | "label": null, 475 | "logBase": 1, 476 | "max": null, 477 | "min": null, 478 | "show": true 479 | } 480 | ], 481 | "yaxis": { 482 | "align": false, 483 | "alignLevel": null 484 | } 485 | } 486 | ], 487 | "refresh": false, 488 | "schemaVersion": 19, 489 | "style": "dark", 490 | "tags": [], 491 | "templating": { 492 | "list": [] 493 | }, 494 | "time": { 495 | "from": "2020-01-25T17:52:01.436Z", 496 | "to": "2020-01-25T17:53:24.439Z" 497 | }, 498 | "timepicker": { 499 | "refresh_intervals": [ 500 | "5s", 501 | "10s", 502 | "30s", 503 | "1m", 504 | "5m", 505 | "15m", 506 | "30m", 507 | "1h", 508 | "2h", 509 | "1d" 510 | ] 511 | }, 512 | "timezone": "", 513 | "title": "InfluxDB INT Dashboard", 514 | "uid": "Gwx6YJyZz", 515 | "version": 3 516 | } 517 | -------------------------------------------------------------------------------- /p4src/include/int_transit.p4: -------------------------------------------------------------------------------- 1 | control process_int_transit ( 2 | inout headers hdr, 3 | inout local_metadata_t local_metadata, 4 | inout standard_metadata_t standard_metadata) { 5 | 6 | action init_metadata(switch_id_t switch_id) { 7 | local_metadata.int_meta.transit = true; 8 | local_metadata.int_meta.switch_id = switch_id; 9 | } 10 | 11 | action int_set_header_0() { //switch_id 12 | hdr.int_switch_id.setValid(); 13 | hdr.int_switch_id.switch_id = local_metadata.int_meta.switch_id; 14 | } 15 | 16 | action int_set_header_1() { //level1_port_id 17 | hdr.int_level1_port_ids.setValid(); 18 | hdr.int_level1_port_ids.ingress_port_id = (bit<16>) standard_metadata.ingress_port; 19 | hdr.int_level1_port_ids.egress_port_id = (bit<16>) standard_metadata.egress_port; 20 | } 21 | 22 | action int_set_header_2() { //hop_latency 23 | hdr.int_hop_latency.setValid(); 24 | hdr.int_hop_latency.hop_latency = (bit<32>) standard_metadata.egress_global_timestamp - (bit<32>) standard_metadata.ingress_global_timestamp; 25 | } 26 | 27 | action int_set_header_3() { //q_occupancy 28 | // TODO: Support egress queue ID 29 | hdr.int_q_occupancy.setValid(); 30 | hdr.int_q_occupancy.q_id =0; 31 | // (bit<8>) standard_metadata.egress_qid; 32 | hdr.int_q_occupancy.q_occupancy = (bit<24>) standard_metadata.deq_qdepth; 33 | } 34 | 35 | action int_set_header_4() { //ingress_tstamp 36 | hdr.int_ingress_tstamp.setValid(); 37 | hdr.int_ingress_tstamp.ingress_tstamp = (bit<64>)standard_metadata.ingress_global_timestamp; 38 | } 39 | 40 | action int_set_header_5() { //egress_timestamp 41 | hdr.int_egress_tstamp.setValid(); 42 | hdr.int_egress_tstamp.egress_tstamp = (bit<64>)standard_metadata.egress_global_timestamp; 43 | } 44 | 45 | action int_set_header_6() { //level2_port_id 46 | hdr.int_level2_port_ids.setValid(); 47 | // level2_port_id indicates Logical port ID 48 | hdr.int_level2_port_ids.ingress_port_id = (bit<32>) standard_metadata.ingress_port; 49 | hdr.int_level2_port_ids.egress_port_id = (bit<32>) standard_metadata.egress_port; 50 | } 51 | 52 | action int_set_header_7() { //egress_port_tx_utilization 53 | // TODO: implement tx utilization support in BMv2 54 | hdr.int_egress_tx_util.setValid(); 55 | hdr.int_egress_tx_util.egress_port_tx_util = 56 | // (bit<32>) queueing_metadata.tx_utilization; 57 | 0; 58 | } 59 | 60 | // Actions to keep track of the new metadata added. 61 | 62 | action add_1() { 63 | local_metadata.int_meta.new_words = local_metadata.int_meta.new_words + 1; 64 | local_metadata.int_meta.new_bytes = local_metadata.int_meta.new_bytes + 4; 65 | } 66 | 67 | 68 | action add_2() { 69 | local_metadata.int_meta.new_words = local_metadata.int_meta.new_words + 2; 70 | local_metadata.int_meta.new_bytes = local_metadata.int_meta.new_bytes + 8; 71 | } 72 | 73 | 74 | action add_3() { 75 | local_metadata.int_meta.new_words = local_metadata.int_meta.new_words + 3; 76 | local_metadata.int_meta.new_bytes = local_metadata.int_meta.new_bytes + 12; 77 | } 78 | 79 | 80 | action add_4() { 81 | local_metadata.int_meta.new_words = local_metadata.int_meta.new_words + 4; 82 | local_metadata.int_meta.new_bytes = local_metadata.int_meta.new_bytes + 16; 83 | } 84 | 85 | action add_5() { 86 | local_metadata.int_meta.new_words = local_metadata.int_meta.new_words + 5; 87 | local_metadata.int_meta.new_bytes = local_metadata.int_meta.new_bytes + 20; 88 | } 89 | 90 | action add_6() { 91 | local_metadata.int_meta.new_words = local_metadata.int_meta.new_words + 6; 92 | local_metadata.int_meta.new_bytes = local_metadata.int_meta.new_bytes + 24; 93 | } 94 | 95 | action add_7() { 96 | local_metadata.int_meta.new_words = local_metadata.int_meta.new_words + 7; 97 | local_metadata.int_meta.new_bytes = local_metadata.int_meta.new_bytes + 28; 98 | } 99 | /* action function for bits 0-3 combinations, 0 is msb, 3 is lsb */ 100 | /* Each bit set indicates that corresponding INT header should be added */ 101 | 102 | action int_set_header_0003_i0() { 103 | } 104 | 105 | action int_set_header_0003_i1() { 106 | int_set_header_3(); 107 | add_1(); 108 | } 109 | 110 | action int_set_header_0003_i2() { 111 | int_set_header_2(); 112 | add_1(); 113 | } 114 | 115 | action int_set_header_0003_i3() { 116 | int_set_header_3(); 117 | int_set_header_2(); 118 | add_2(); 119 | } 120 | 121 | action int_set_header_0003_i4() { 122 | int_set_header_1(); 123 | add_1(); 124 | } 125 | 126 | action int_set_header_0003_i5() { 127 | int_set_header_3(); 128 | int_set_header_1(); 129 | add_2(); 130 | } 131 | 132 | action int_set_header_0003_i6() { 133 | int_set_header_2(); 134 | int_set_header_1(); 135 | add_2(); 136 | } 137 | 138 | action int_set_header_0003_i7() { 139 | int_set_header_3(); 140 | int_set_header_2(); 141 | int_set_header_1(); 142 | add_3(); 143 | } 144 | 145 | action int_set_header_0003_i8() { 146 | int_set_header_0(); 147 | add_1(); 148 | } 149 | 150 | action int_set_header_0003_i9() { 151 | int_set_header_3(); 152 | int_set_header_0(); 153 | add_2(); 154 | } 155 | 156 | action int_set_header_0003_i10() { 157 | int_set_header_2(); 158 | int_set_header_0(); 159 | add_2(); 160 | } 161 | 162 | action int_set_header_0003_i11() { 163 | int_set_header_3(); 164 | int_set_header_2(); 165 | int_set_header_0(); 166 | add_3(); 167 | } 168 | 169 | action int_set_header_0003_i12() { 170 | int_set_header_1(); 171 | int_set_header_0(); 172 | add_2(); 173 | } 174 | 175 | action int_set_header_0003_i13() { 176 | int_set_header_3(); 177 | int_set_header_1(); 178 | int_set_header_0(); 179 | add_3(); 180 | } 181 | 182 | action int_set_header_0003_i14() { 183 | int_set_header_2(); 184 | int_set_header_1(); 185 | int_set_header_0(); 186 | add_3(); 187 | } 188 | 189 | action int_set_header_0003_i15() { 190 | int_set_header_3(); 191 | int_set_header_2(); 192 | int_set_header_1(); 193 | int_set_header_0(); 194 | add_4(); 195 | } 196 | 197 | /* action function for bits 4-7 combinations, 4 is msb, 7 is lsb */ 198 | action int_set_header_0407_i0() { 199 | } 200 | 201 | action int_set_header_0407_i1() { 202 | int_set_header_7(); 203 | add_1(); 204 | } 205 | 206 | action int_set_header_0407_i2() { 207 | int_set_header_6(); 208 | add_2(); 209 | } 210 | 211 | action int_set_header_0407_i3() { 212 | int_set_header_7(); 213 | int_set_header_6(); 214 | add_3(); 215 | } 216 | 217 | action int_set_header_0407_i4() { 218 | int_set_header_5(); 219 | add_2(); 220 | } 221 | 222 | action int_set_header_0407_i5() { 223 | int_set_header_7(); 224 | int_set_header_5(); 225 | add_3(); 226 | } 227 | 228 | action int_set_header_0407_i6() { 229 | int_set_header_6(); 230 | int_set_header_5(); 231 | add_4(); 232 | } 233 | 234 | action int_set_header_0407_i7() { 235 | int_set_header_7(); 236 | int_set_header_6(); 237 | int_set_header_5(); 238 | add_5(); 239 | } 240 | 241 | action int_set_header_0407_i8() { 242 | int_set_header_4(); 243 | add_2(); 244 | } 245 | 246 | action int_set_header_0407_i9() { 247 | int_set_header_7(); 248 | int_set_header_4(); 249 | add_3(); 250 | } 251 | 252 | action int_set_header_0407_i10() { 253 | int_set_header_6(); 254 | int_set_header_4(); 255 | add_4(); 256 | } 257 | 258 | action int_set_header_0407_i11() { 259 | int_set_header_7(); 260 | int_set_header_6(); 261 | int_set_header_4(); 262 | add_5(); 263 | } 264 | 265 | action int_set_header_0407_i12() { 266 | int_set_header_5(); 267 | int_set_header_4(); 268 | add_4(); 269 | } 270 | 271 | action int_set_header_0407_i13() { 272 | int_set_header_7(); 273 | int_set_header_5(); 274 | int_set_header_4(); 275 | add_5(); 276 | } 277 | 278 | action int_set_header_0407_i14() { 279 | int_set_header_6(); 280 | int_set_header_5(); 281 | int_set_header_4(); 282 | add_6(); 283 | } 284 | 285 | action int_set_header_0407_i15() { 286 | int_set_header_7(); 287 | int_set_header_6(); 288 | int_set_header_5(); 289 | int_set_header_4(); 290 | add_7(); 291 | } 292 | 293 | // Default action used to set switch ID. 294 | table tb_int_insert { 295 | actions = { 296 | init_metadata; 297 | NoAction; 298 | } 299 | default_action = NoAction(); 300 | size = 1; 301 | } 302 | 303 | /* Table to process instruction bits 0-3 */ 304 | table tb_int_inst_0003 { 305 | key = { 306 | hdr.int_header.instruction_mask_0003 : exact; 307 | } 308 | actions = { 309 | int_set_header_0003_i0; 310 | int_set_header_0003_i1; 311 | int_set_header_0003_i2; 312 | int_set_header_0003_i3; 313 | int_set_header_0003_i4; 314 | int_set_header_0003_i5; 315 | int_set_header_0003_i6; 316 | int_set_header_0003_i7; 317 | int_set_header_0003_i8; 318 | int_set_header_0003_i9; 319 | int_set_header_0003_i10; 320 | int_set_header_0003_i11; 321 | int_set_header_0003_i12; 322 | int_set_header_0003_i13; 323 | int_set_header_0003_i14; 324 | int_set_header_0003_i15; 325 | } 326 | 327 | const entries = { 328 | (0x0) : int_set_header_0003_i0(); 329 | (0x1) : int_set_header_0003_i1(); 330 | (0x2) : int_set_header_0003_i2(); 331 | (0x3) : int_set_header_0003_i3(); 332 | (0x4) : int_set_header_0003_i4(); 333 | (0x5) : int_set_header_0003_i5(); 334 | (0x6) : int_set_header_0003_i6(); 335 | (0x7) : int_set_header_0003_i7(); 336 | (0x8) : int_set_header_0003_i8(); 337 | (0x9) : int_set_header_0003_i9(); 338 | (0xA) : int_set_header_0003_i10(); 339 | (0xB) : int_set_header_0003_i11(); 340 | (0xC) : int_set_header_0003_i12(); 341 | (0xD) : int_set_header_0003_i13(); 342 | (0xE) : int_set_header_0003_i14(); 343 | (0xF) : int_set_header_0003_i15(); 344 | } 345 | } 346 | 347 | /* Table to process instruction bits 4-7 */ 348 | table tb_int_inst_0407 { 349 | key = { 350 | hdr.int_header.instruction_mask_0407 : exact; 351 | } 352 | actions = { 353 | int_set_header_0407_i0; 354 | int_set_header_0407_i1; 355 | int_set_header_0407_i2; 356 | int_set_header_0407_i3; 357 | int_set_header_0407_i4; 358 | int_set_header_0407_i5; 359 | int_set_header_0407_i6; 360 | int_set_header_0407_i7; 361 | int_set_header_0407_i8; 362 | int_set_header_0407_i9; 363 | int_set_header_0407_i10; 364 | int_set_header_0407_i11; 365 | int_set_header_0407_i12; 366 | int_set_header_0407_i13; 367 | int_set_header_0407_i14; 368 | int_set_header_0407_i15; 369 | } 370 | 371 | const entries = { 372 | (0x0) : int_set_header_0407_i0(); 373 | (0x1) : int_set_header_0407_i1(); 374 | (0x2) : int_set_header_0407_i2(); 375 | (0x3) : int_set_header_0407_i3(); 376 | (0x4) : int_set_header_0407_i4(); 377 | (0x5) : int_set_header_0407_i5(); 378 | (0x6) : int_set_header_0407_i6(); 379 | (0x7) : int_set_header_0407_i7(); 380 | (0x8) : int_set_header_0407_i8(); 381 | (0x9) : int_set_header_0407_i9(); 382 | (0xA) : int_set_header_0407_i10(); 383 | (0xB) : int_set_header_0407_i11(); 384 | (0xC) : int_set_header_0407_i12(); 385 | (0xD) : int_set_header_0407_i13(); 386 | (0xE) : int_set_header_0407_i14(); 387 | (0xF) : int_set_header_0407_i15(); 388 | } 389 | } 390 | 391 | apply { 392 | tb_int_insert.apply(); 393 | if (local_metadata.int_meta.transit == false) { 394 | return; 395 | } 396 | tb_int_inst_0003.apply(); 397 | tb_int_inst_0407.apply(); 398 | 399 | // Decrement remaining hop cnt 400 | hdr.int_header.remaining_hop_cnt = hdr.int_header.remaining_hop_cnt - 1; 401 | 402 | // Update headers lengths. 403 | if (hdr.ipv4.isValid()) { 404 | hdr.ipv4.len = hdr.ipv4.len + local_metadata.int_meta.new_bytes; 405 | } 406 | if (hdr.udp.isValid()) { 407 | hdr.udp.length_ = hdr.udp.length_ + local_metadata.int_meta.new_bytes; 408 | } 409 | if (hdr.intl4_shim.isValid()) { 410 | hdr.intl4_shim.len = hdr.intl4_shim.len + local_metadata.int_meta.new_words; 411 | } 412 | } 413 | } -------------------------------------------------------------------------------- /p4utils/utils/p4runtime_API/p4runtime.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Barefoot Networks, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | 16 | from functools import wraps 17 | import google.protobuf.text_format 18 | from google.rpc import status_pb2, code_pb2 19 | import grpc 20 | import logging 21 | import queue 22 | import sys 23 | import threading 24 | import time 25 | 26 | from p4.v1 import p4runtime_pb2 27 | from p4.v1 import p4runtime_pb2_grpc 28 | 29 | 30 | class P4RuntimeErrorFormatException(Exception): 31 | def __init__(self, message): 32 | super().__init__(message) 33 | 34 | 35 | # Used to iterate over the p4.Error messages in a gRPC error Status object 36 | class P4RuntimeErrorIterator: 37 | def __init__(self, grpc_error): 38 | assert(grpc_error.code() == grpc.StatusCode.UNKNOWN) 39 | self.grpc_error = grpc_error 40 | 41 | error = None 42 | # The gRPC Python package does not have a convenient way to access the 43 | # binary details for the error: they are treated as trailing metadata. 44 | for meta in self.grpc_error.trailing_metadata(): 45 | if meta[0] == "grpc-status-details-bin": 46 | error = status_pb2.Status() 47 | error.ParseFromString(meta[1]) 48 | break 49 | if error is None: 50 | raise P4RuntimeErrorFormatException("No binary details field") 51 | 52 | if len(error.details) == 0: 53 | raise P4RuntimeErrorFormatException( 54 | "Binary details field has empty Any details repeated field") 55 | self.errors = error.details 56 | self.idx = 0 57 | 58 | def __iter__(self): 59 | return self 60 | 61 | def __next__(self): 62 | while self.idx < len(self.errors): 63 | p4_error = p4runtime_pb2.Error() 64 | one_error_any = self.errors[self.idx] 65 | if not one_error_any.Unpack(p4_error): 66 | raise P4RuntimeErrorFormatException( 67 | "Cannot convert Any message to p4.Error") 68 | if p4_error.canonical_code == code_pb2.OK: 69 | continue 70 | v = self.idx, p4_error 71 | self.idx += 1 72 | return v 73 | raise StopIteration 74 | 75 | 76 | # P4Runtime uses a 3-level message in case of an error during the processing of 77 | # a write batch. This means that if we do not wrap the grpc.RpcError inside a 78 | # custom exception, we can end-up with a non-helpful exception message in case 79 | # of failure as only the first level will be printed. In this custom exception 80 | # class, we extract the nested error message (one for each operation included in 81 | # the batch) in order to print error code + user-facing message. See P4 Runtime 82 | # documentation for more details on error-reporting. 83 | class P4RuntimeWriteException(Exception): 84 | def __init__(self, grpc_error): 85 | assert(grpc_error.code() == grpc.StatusCode.UNKNOWN) 86 | super().__init__() 87 | self.errors = [] 88 | try: 89 | error_iterator = P4RuntimeErrorIterator(grpc_error) 90 | for error_tuple in error_iterator: 91 | self.errors.append(error_tuple) 92 | except P4RuntimeErrorFormatException: 93 | raise # just propagate exception for now 94 | 95 | def __str__(self): 96 | message = "Error(s) during Write:\n" 97 | for idx, p4_error in self.errors: 98 | code_name = code_pb2._CODE.values_by_number[ 99 | p4_error.canonical_code].name 100 | message += "\t* At index {}: {}, '{}'\n".format( 101 | idx, code_name, p4_error.message) 102 | return message 103 | 104 | 105 | class P4RuntimeException(Exception): 106 | def __init__(self, grpc_error): 107 | super().__init__() 108 | self.grpc_error = grpc_error 109 | 110 | def __str__(self): 111 | message = "P4Runtime RPC error ({}): {}".format( 112 | self.grpc_error.code().name, self.grpc_error.details()) 113 | return message 114 | 115 | 116 | def parse_p4runtime_write_error(f): 117 | @wraps(f) 118 | def handle(*args, **kwargs): 119 | try: 120 | return f(*args, **kwargs) 121 | except grpc.RpcError as e: 122 | if e.code() != grpc.StatusCode.UNKNOWN: 123 | raise e 124 | raise P4RuntimeWriteException(e) from None 125 | return handle 126 | 127 | 128 | def parse_p4runtime_error(f): 129 | @wraps(f) 130 | def handle(*args, **kwargs): 131 | try: 132 | return f(*args, **kwargs) 133 | except grpc.RpcError as e: 134 | raise P4RuntimeException(e) from None 135 | return handle 136 | 137 | 138 | class P4RuntimeClient: 139 | def __init__(self, device_id, grpc_addr, election_id): 140 | self.device_id = device_id 141 | self.election_id = election_id 142 | logging.debug("Connecting to device {} at {}".format(device_id, grpc_addr)) 143 | try: 144 | self.channel = grpc.insecure_channel(grpc_addr) 145 | except Exception: 146 | logging.critical("Failed to connect to P4Runtime server") 147 | sys.exit(1) 148 | self.stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel) 149 | self.set_up_stream() 150 | 151 | def set_up_stream(self): 152 | self.stream_out_q = queue.Queue() 153 | self.stream_in_q = queue.Queue() 154 | 155 | def stream_req_iterator(): 156 | while True: 157 | p = self.stream_out_q.get() 158 | if p is None: 159 | break 160 | yield p 161 | 162 | def stream_recv_wrapper(stream): 163 | @parse_p4runtime_error 164 | def stream_recv(): 165 | for p in stream: 166 | self.stream_in_q.put(p) 167 | try: 168 | stream_recv() 169 | except P4RuntimeException as e: 170 | logging.critical("StreamChannel error, closing stream") 171 | logging.critical(e) 172 | self.stream_in_q.put(None) 173 | 174 | self.stream = self.stub.StreamChannel(stream_req_iterator()) 175 | self.stream_recv_thread = threading.Thread(daemon=True, 176 | target=stream_recv_wrapper, 177 | args=(self.stream,)) 178 | self.stream_recv_thread.start() 179 | 180 | self.handshake() 181 | 182 | def handshake(self): 183 | req = p4runtime_pb2.StreamMessageRequest() 184 | arbitration = req.arbitration 185 | arbitration.device_id = self.device_id 186 | election_id = arbitration.election_id 187 | election_id.high = self.election_id[0] 188 | election_id.low = self.election_id[1] 189 | self.stream_out_q.put(req) 190 | 191 | rep = self.get_stream_packet("arbitration", timeout=2) 192 | if rep is None: 193 | logging.critical("Failed to establish session with server") 194 | sys.exit(1) 195 | is_primary = (rep.arbitration.status.code == code_pb2.OK) 196 | logging.debug("Session established, client is '{}'".format( 197 | 'primary' if is_primary else 'backup')) 198 | if not is_primary: 199 | print("You are not the primary client, you only have read access to the server") 200 | 201 | def get_stream_packet(self, type_, timeout=1): 202 | """ 203 | Retrieve packet from the StreamChannel. 204 | 205 | Args: 206 | type_ (string) : name of a field present in the packet 207 | timeout (int or None): time to wait for packet, if set to None, 208 | the function will wait indefinitely 209 | 210 | Return: 211 | packet (protobuf message) 212 | """ 213 | start = time.time() 214 | try: 215 | while True: 216 | if timeout is not None: 217 | remaining = timeout - (time.time() - start) 218 | if remaining < 0: 219 | break 220 | else: 221 | remaining = None 222 | msg = self.stream_in_q.get(timeout=remaining) 223 | if msg is None: 224 | return None 225 | if not msg.HasField(type_): 226 | continue 227 | return msg 228 | except queue.Empty: # timeout expired 229 | pass 230 | return None 231 | 232 | def get_digest_list(self, timeout=None): 233 | """ 234 | Retrieve DigestList and send back acknowledgment. 235 | 236 | Args: 237 | timeout (int or None): time to wait for packet, if set to None, 238 | the function will wait indefinitely 239 | 240 | Return: 241 | DigestList packet (protobuf message) or None if the timeout has 242 | expired and no packet has been received. 243 | 244 | Notice: 245 | See https://github.com/p4lang/p4runtime/blob/45d1c7ce2aad5dae819e8bba2cd72640af189cfe/proto/p4/v1/p4runtime.proto#L543 246 | for further details. 247 | """ 248 | # Listen for StreamMessageResponse DigestLists 249 | rep = self.get_stream_packet("digest", timeout) 250 | # Initialize digList to None 251 | dig_list = None 252 | 253 | if rep is not None: 254 | # Retrieve digList 255 | dig_list = rep.digest 256 | # Retrieve fields 257 | digest_id = dig_list.digest_id 258 | list_id = dig_list.list_id 259 | 260 | # Generate acknowledgment 261 | req = p4runtime_pb2.StreamMessageRequest() 262 | ack = req.digest_ack 263 | ack.digest_id = digest_id 264 | ack.list_id = list_id 265 | # Send acknowledgment 266 | self.stream_out_q.put(req) 267 | 268 | # Return DigestList 269 | return dig_list 270 | 271 | @parse_p4runtime_error 272 | def get_p4info(self): 273 | logging.debug("Retrieving P4Info file") 274 | req = p4runtime_pb2.GetForwardingPipelineConfigRequest() 275 | req.device_id = self.device_id 276 | req.response_type = p4runtime_pb2.GetForwardingPipelineConfigRequest.P4INFO_AND_COOKIE 277 | rep = self.stub.GetForwardingPipelineConfig(req) 278 | return rep.config.p4info 279 | 280 | @parse_p4runtime_error 281 | def set_fwd_pipe_config(self, p4info_path, bin_path): 282 | logging.debug("Setting forwarding pipeline config") 283 | req = p4runtime_pb2.SetForwardingPipelineConfigRequest() 284 | req.device_id = self.device_id 285 | election_id = req.election_id 286 | election_id.high = self.election_id[0] 287 | election_id.low = self.election_id[1] 288 | req.action = p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT 289 | with open(p4info_path, 'r') as f1: 290 | with open(bin_path, 'rb') as f2: 291 | try: 292 | google.protobuf.text_format.Merge(f1.read(), req.config.p4info) 293 | except google.protobuf.text_format.ParseError: 294 | logging.error("Error when parsing P4Info") 295 | raise 296 | req.config.p4_device_config = f2.read() 297 | return self.stub.SetForwardingPipelineConfig(req) 298 | 299 | def tear_down(self): 300 | if self.stream_out_q: 301 | logging.debug("Cleaning up stream") 302 | self.stream_out_q.put(None) 303 | self.stream_recv_thread.join() 304 | self.channel.close() 305 | del self.channel # avoid a race condition if channel deleted when process terminates 306 | 307 | @parse_p4runtime_write_error 308 | def write(self, req): 309 | req.device_id = self.device_id 310 | election_id = req.election_id 311 | election_id.high = self.election_id[0] 312 | election_id.low = self.election_id[1] 313 | return self.stub.Write(req) 314 | 315 | @parse_p4runtime_write_error 316 | def write_update(self, update): 317 | req = p4runtime_pb2.WriteRequest() 318 | req.device_id = self.device_id 319 | election_id = req.election_id 320 | election_id.high = self.election_id[0] 321 | election_id.low = self.election_id[1] 322 | req.updates.extend([update]) 323 | return self.stub.Write(req) 324 | 325 | # Decorator is useless here: in case of server error, the exception is raised during the 326 | # iteration (when next() is called). 327 | @parse_p4runtime_error 328 | def read_one(self, entity): 329 | req = p4runtime_pb2.ReadRequest() 330 | req.device_id = self.device_id 331 | req.entities.extend([entity]) 332 | return self.stub.Read(req) 333 | 334 | @parse_p4runtime_error 335 | def api_version(self): 336 | req = p4runtime_pb2.CapabilitiesRequest() 337 | rep = self.stub.Capabilities(req) 338 | return rep.p4runtime_api_version -------------------------------------------------------------------------------- /p4utils/mininetlib/cli.py: -------------------------------------------------------------------------------- 1 | """__ https://github.com/mininet/mininet/blob/master/mininet/cli.py 2 | 3 | This module is an extension of `mininet.cli`__. It provides a CLI interface that the user can enable 4 | using the :py:class:`~p4utils.mininetlib.network_API.NetworkAPI` or the JSON network configuration file. 5 | If enabled, the CLI starts right after the network boot and provides useful commands. 6 | """ 7 | 8 | import os 9 | import sys 10 | import traceback as tbk 11 | from functools import wraps 12 | from mininet.cli import CLI 13 | 14 | from p4utils.utils.helper import * 15 | from p4utils.mininetlib.log import debug, info, output, warning, error, critical 16 | 17 | 18 | def exception_handler(f): 19 | """Prevents exceptions from terminating the client, but still 20 | prints them. 21 | """ 22 | @wraps(f) 23 | def handle(*args, **kwargs): 24 | try: 25 | return f(*args, **kwargs) 26 | except: 27 | error(*tbk.format_exception(*sys.exc_info())) 28 | return False 29 | return handle 30 | 31 | 32 | class P4CLI(CLI): 33 | """Client class to interact with the network once it has been created. 34 | 35 | Attributes: 36 | network_api (:py:class:`~p4utils.mininetlib.network_API.NetworkAPI`): instance of the network orchestrator. 37 | """ 38 | 39 | def __init__(self, network_api, *args, **kwargs): 40 | self.net_api = network_api 41 | super().__init__(network_api.net, *args, **kwargs) 42 | # self.mn stores the Mininet network object according to the parent object 43 | 44 | def getNode(self, node_name): 45 | """Retrieves the requested node. 46 | 47 | Args: 48 | node_name (str): node name 49 | 50 | Returns: 51 | mininet.node.Node: requested node or **None** if no such object was found. 52 | """ 53 | # Check if the node is in Mininet 54 | if node_name not in self.mn: 55 | error('Node {} not found in the network.\n'.format(node_name)) 56 | return None 57 | node = self.mn[node_name] 58 | return node 59 | 60 | def getP4Switch(self, node_name): 61 | """Retrieves the requested P4 Switch. 62 | 63 | Args: 64 | node_name (string): P4 switch name 65 | 66 | Returns: 67 | mininet.node.Node: requested node or **None** if no such object was found. 68 | """ 69 | node = self.getNode(node_name) 70 | 71 | if node is None: 72 | return None 73 | else: 74 | isP4Switch = get_node_attr(node, 'isP4Switch', False) 75 | if not isP4Switch: 76 | error('P4 Switch {} not found in the network\n'.format(node_name)) 77 | return None 78 | else: 79 | return node 80 | 81 | @exception_handler 82 | def do_p4switch_stop(self, line=""): 83 | """Stops execution of the specified P4 switch. 84 | 85 | **Usage**:: 86 | 87 | mininet> p4switch_stop 88 | """ 89 | switch_name = parse_line(line) 90 | 91 | # Check args validity 92 | if not switch_name or len(switch_name) > 1: 93 | error('Wrong syntax.\n') 94 | error('usage: p4switch_stop \n') 95 | return False 96 | 97 | switch_name = switch_name[0] 98 | p4switch = self.getP4Switch(switch_name) 99 | 100 | if p4switch is None: 101 | error('usage: p4switch_stop \n') 102 | return False 103 | 104 | # Check if switch is running 105 | if not p4switch.switch_running(): 106 | error('P4 Switch already stopped, start it first: p4switch_start {} \n'.format(switch_name)) 107 | return False 108 | 109 | p4switch.stop(deleteIntfs=False) 110 | 111 | @exception_handler 112 | def do_p4switch_start(self, line=""): 113 | """Starts a P4 switch. 114 | 115 | **Usage**:: 116 | 117 | mininet> p4switch_start [--p4src ] [--cmds ] 118 | 119 | Note: 120 | This command also allows to specify new configuration files for the switch: 121 | 122 | - ``--p4src`` provides a new P4 source, 123 | - ``--cmds`` provides a new command file. 124 | """ 125 | args = parse_line(line) 126 | 127 | # Check args validity 128 | if len(args) > 5: 129 | error('Wrong syntax.\n') 130 | error( 131 | 'usage: p4switch_start [--p4src ] [--cmds ]\n') 132 | return False 133 | 134 | switch_name = args[0] 135 | 136 | p4switch = self.getP4Switch(switch_name) 137 | 138 | if p4switch is None: 139 | error('usage: p4switch_start \n') 140 | return False 141 | 142 | # Check if switch is running 143 | if p4switch.switch_running(): 144 | error('P4 Switch already running, stop it first: p4switch_stop {} \n'.format(switch_name)) 145 | return False 146 | 147 | # Check if new P4 source file has been provided 148 | p4_src = get_node_attr(p4switch, 'p4_src') 149 | if '--p4src' in args: 150 | p4_src = args[args.index('--p4src') + 1] 151 | # Check if file exists 152 | if not os.path.exists(p4_src): 153 | error('File Error: P4 source {} does not exist\n'.format(p4_src)) 154 | return False 155 | # Check if its not a file 156 | if not os.path.isfile(p4_src): 157 | error('File Error: p4source {} is not a file\n'.format(p4_src)) 158 | return False 159 | if p4_src is not None: 160 | compiler = get_by_attr( 161 | 'p4_src', os.path.realpath(p4_src), 162 | self.net_api.compilers) 163 | # If a compiler for the same p4_src has been found 164 | if compiler is not None: 165 | # If new file has been provided 166 | if compiler.new_source(): 167 | debug('New p4 source file detected!\n') 168 | compiler.compile() 169 | else: 170 | debug('P4 source already compiled!\n') 171 | # If this file is compiled for the first time 172 | elif self.net_api.modules['comp'] is not None: 173 | debug('New p4 source file detected!\n') 174 | compiler = self.net_api.modules['comp']['class']( 175 | p4_src=p4_src, **self.net_api.modules['comp']['kwargs']) 176 | compiler.compile() 177 | self.net_api.compilers.append(compiler) 178 | else: 179 | error('No compiler module provided!\n') 180 | return False 181 | 182 | # Start switch 183 | p4switch.start() 184 | 185 | cmd_path = None 186 | # Check if new cmd file has been provided 187 | if '--cmds' in args: 188 | cmd_path = args[args.index('--cmds') + 1] 189 | # Check if file exists 190 | if not os.path.exists(cmd_path): 191 | error( 192 | 'File Error: command file {} does not exist\n'.format( 193 | cmd_path)) 194 | return False 195 | # Check if its not a file 196 | if not os.path.isfile(cmd_path): 197 | error( 198 | 'File Error: command file {} is not a file\n'.format( 199 | cmd_path)) 200 | return False 201 | if cmd_path is not None: 202 | client = get_by_attr('sw_name', switch_name, 203 | self.net_api.sw_clients) 204 | # If a client is present 205 | if client is not None: 206 | client.set_conf(cmd_path) 207 | client.configure() 208 | # If the switch has no client yet 209 | elif self.net_api.modules['sw_cli'] is not None: 210 | thrift_port = get_node_attr(p4switch, 'thrift_port') 211 | if thrift_port is not None: 212 | client = self.net_api.modules['sw_cli']['class'](sw_name=switch_name, 213 | thrift_port=thrift_port, 214 | **self.net_api.modules['sw_cli']['kwargs']) 215 | client.set_conf(cmd_path) 216 | client.configure() 217 | self.net_api.sw_clients.append(client) 218 | else: 219 | error( 220 | 'Switch {} has not thrift server enabled.\n'.format( 221 | switch_name)) 222 | return False 223 | else: 224 | error('No client module provided!\n') 225 | return False 226 | 227 | @exception_handler 228 | def do_p4switch_reboot(self, line=""): 229 | """Reboots a P4 switch. 230 | 231 | **Usage**:: 232 | 233 | mininet> p4switch_reboot [--p4src ] [--cmds ] 234 | 235 | Note: 236 | This command also allows to specify new configuration files for the switch: 237 | 238 | - ``--p4src`` provides a new P4 source, 239 | - ``--cmds`` provides a new command file. 240 | """ 241 | if not line or len(parse_line(line)) > 5: 242 | error( 243 | 'usage: p4switch_reboot [--p4src ] [--cmds ]\n') 244 | return False 245 | else: 246 | switch_name = parse_line(line)[0] 247 | self.do_p4switch_stop(line=switch_name) 248 | self.do_p4switch_start(line=line) 249 | 250 | @exception_handler 251 | def do_p4switches_reboot(self, line=""): 252 | """Reboots all P4 switches with new program. 253 | 254 | **Usage**:: 255 | 256 | mininet> p4switches_reboot [--p4src ] [--cmds ] 257 | 258 | Note: 259 | This command also allows to specify the same 260 | new configuration files for all the switches: 261 | 262 | - ``--p4src`` provides a new P4 source, 263 | - ``--cmds`` provides a new command file. 264 | """ 265 | if len(parse_line(line)) > 4: 266 | error( 267 | 'usage: p4switches_reboot [--p4src ] [--cmds ]\n') 268 | return False 269 | else: 270 | for sw in self.mn.p4switches: 271 | switch_name = sw.name 272 | self.do_p4switch_stop(line=switch_name) 273 | 274 | tmp_line = switch_name + " " + line 275 | self.do_p4switch_start(line=tmp_line) 276 | 277 | # Run scripts 278 | if isinstance(self.net_api.scripts, list): 279 | for script in self.net_api.scripts: 280 | if script['reboot_run']: 281 | info('Exec Script: {}\n'.format(script['cmd'])) 282 | run_command(script['cmd'], script['out_file']) 283 | 284 | @exception_handler 285 | def do_test_p4(self, line=""): 286 | """Tests start stop functionalities. 287 | 288 | **Usage**:: 289 | 290 | mininet> test_p4 291 | """ 292 | self.do_p4switch_stop('s1') 293 | self.do_p4switch_start('s1') 294 | self.do_p4switch_reboot('s1') 295 | self.do_p4switches_reboot() 296 | 297 | @exception_handler 298 | def do_printSwitches(self, line=""): 299 | """Prints the names of all switches. 300 | 301 | **Usage**:: 302 | 303 | mininet> printSwitches 304 | """ 305 | for sw in self.mn.p4switches: 306 | output(sw.name+'\n') 307 | 308 | @exception_handler 309 | def do_pingset(self, line=""): 310 | """Pings between the hosts in the set. 311 | 312 | **Usage**:: 313 | 314 | mininet> pingset ... 315 | """ 316 | hosts_names = line.strip().split() 317 | hosts = [x for x in self.mn.hosts if x.name in hosts_names] 318 | self.mn.ping(hosts=hosts, timeout=1) 319 | 320 | @exception_handler 321 | def do_task(self, line=""): 322 | """Executes a task on the given host. 323 | 324 | **Usage**:: 325 | 326 | mininet> task [] ... [] [--mod ] [-- ] ... [-- ] 327 | 328 | Note: 329 | The starting delay (specified with ````) is taken with 330 | respect to the current time. The deafult module in which functions 331 | are looked up is :py:mod:`p4utils.utils.traffic_utils`. A different 332 | module can be specified in the command with ``--mod ``. 333 | """ 334 | args, kwargs = parse_task_line(line) 335 | node = args[0] 336 | if self.getNode(node) is not None: 337 | if not self.net_api.hasScheduler(node): 338 | self.net_api.enableScheduler(node) 339 | self.net_api.start_scheduler(node) 340 | self.net_api.addTask(*args, enableScheduler=False, **kwargs) 341 | self.net_api.distribute_tasks() 342 | else: 343 | error('Node {} does not exist!\n'.format(node)) 344 | return False 345 | 346 | @exception_handler 347 | def do_enable_scheduler(self, line=""): 348 | """Enables the :py:class:`~p4utils.utils.task_scheduler.TaskServer` on a node. 349 | 350 | **Usage**:: 351 | 352 | mininet> enable_scheduler [] [--path ] 353 | 354 | Note: 355 | The directory where the socket file will be placed can be specified 356 | using ``--path ``. 357 | """ 358 | args = parse_line(line) 359 | node = args[0] 360 | if self.getNode(node) is not None: 361 | if len(args) > 2: 362 | error('usage: enable_scheduler [] [--path ]\n') 363 | return False 364 | else: 365 | if not self.net_api.hasScheduler(node): 366 | if len(args) == 2: 367 | try: 368 | self.net_api.enableScheduler(node, path=args[1]) 369 | self.net_api.start_scheduler(node) 370 | except Exception as e: 371 | error(e+'\n') 372 | return False 373 | else: 374 | try: 375 | self.net_api.enableScheduler(node) 376 | self.net_api.start_scheduler(node) 377 | except Exception as e: 378 | error(e+'\n') 379 | return False 380 | else: 381 | error( 382 | 'Node {} has already a task scheduler running.\n'.format(node)) 383 | return False 384 | else: 385 | error('Node {} does not exist!\n'.format(node)) 386 | return False 387 | -------------------------------------------------------------------------------- /report_collector/report_rx.py: -------------------------------------------------------------------------------- 1 | import io, sys, signal, time, socket 2 | 3 | from prometheus_client import start_http_server, Gauge 4 | 5 | # ethernet(14B) + IP(20B) + UDP(8B) 6 | UDP_OFFSET = 14 + 20 + 8 7 | # ethernet(14B) + IP(20B) + TCP(20B) 8 | TCP_OFFSET = 14 + 20 + 20 9 | 10 | SWITCH_ID_BIT = 0b10000000 11 | L1_PORT_IDS_BIT = 0b01000000 12 | HOP_LATENCY_BIT = 0b00100000 13 | QUEUE_BIT = 0b00010000 14 | INGRESS_TSTAMP_BIT = 0b00001000 15 | EGRESS_TSTAMP_BIT = 0b00000100 16 | L2_PORT_IDS_BIT = 0b00000010 17 | EGRESS_PORT_TX_UTIL_BIT = 0b00000001 18 | 19 | # host,port for UDP report receiver 20 | HOST = '' 21 | PORT = 9555 22 | 23 | HOP_METADATA = ( 24 | 'switch_id', 25 | 'l1_ingress_port_id', 26 | 'l1_egress_port_id', 27 | 'hop_latency', 28 | 'q_id', 29 | 'q_occupancy', 30 | 'ingress_tstamp', 31 | 'egress_tstamp', 32 | 'l2_ingress_port_id', 33 | 'l2_egress_port_id', 34 | 'egress_port_tx_util' 35 | ) 36 | 37 | # prometheus metric 38 | FLOW_METRICS = Gauge( 39 | "flow_info", "Flow metrics", 40 | ['src_ip','dst_ip','src_port','dst_port','protocol','switch_id','metadata'] 41 | ) 42 | 43 | DEBUG = False 44 | TIMER = False 45 | 46 | ###### CLASSESS ############################################################### 47 | 48 | class HopMetadata(): 49 | def __init__(self): 50 | self.switch_id = None 51 | self.l1_ingress_port_id = None 52 | self.l1_egress_port_id = None 53 | self.hop_latency = None 54 | self.q_id = None 55 | self.q_occupancy = None 56 | self.ingress_tstamp = None 57 | self.egress_tstamp = None 58 | self.l2_ingress_port_id = None 59 | self.l2_egress_port_id = None 60 | self.egress_port_tx_util = None 61 | 62 | @staticmethod 63 | def from_bytes(data, ins_map): 64 | hop = HopMetadata() 65 | d = io.BytesIO(data) 66 | print('received hop metadata:', data) 67 | if ins_map & SWITCH_ID_BIT: 68 | hop.switch_id = int.from_bytes(d.read(4), byteorder='big') 69 | if ins_map & L1_PORT_IDS_BIT: 70 | hop.l1_ingress_port_id = int.from_bytes(d.read(2), byteorder='big') 71 | hop.l1_egress_port_id = int.from_bytes(d.read(2), byteorder='big') 72 | if ins_map & HOP_LATENCY_BIT: 73 | hop.hop_latency = int.from_bytes(d.read(4), byteorder='big') 74 | if ins_map & QUEUE_BIT: 75 | hop.q_id = int.from_bytes(d.read(1), byteorder='big') 76 | hop.q_occupancy = int.from_bytes(d.read(3), byteorder='big') 77 | if ins_map & INGRESS_TSTAMP_BIT: 78 | hop.ingress_tstamp = int.from_bytes(d.read(4), byteorder='big') 79 | if ins_map & EGRESS_TSTAMP_BIT: 80 | hop.egress_tstamp = int.from_bytes(d.read(4), byteorder='big') 81 | if ins_map & L2_PORT_IDS_BIT: 82 | hop.l2_ingress_port_id = int.from_bytes(d.read(4), byteorder='big') 83 | hop.l2_egress_port_id = int.from_bytes(d.read(4), byteorder='big') 84 | if ins_map & EGRESS_PORT_TX_UTIL_BIT: 85 | hop.egress_port_tx_util = int.from_bytes(d.read(4), byteorder='big') 86 | return hop 87 | 88 | def __str__(self): 89 | return str(vars(self)) 90 | 91 | class Report(): 92 | def __init__(self, ): 93 | # report header 94 | hdr = data[:16] 95 | self.ver = hdr[0] >> 4 96 | self.len = hdr[0] & 0x0f 97 | self.nprot = hdr[1] >> 5 98 | self.rep_md_bits = (hdr[1] & 0x1f) + (hdr[2] >> 7) 99 | self.d = hdr[2] & 0x01 100 | self.q = hdr[3] >> 7 101 | self.f = (hdr[3] >> 6) & 0x01 102 | self.hw_id = hdr[3] & 0x3f 103 | self.switch_id = int.from_bytes(hdr[4:8], byteorder='big') 104 | self.seq_num = int.from_bytes(hdr[8:12], byteorder='big') 105 | self.ingress_tstamp = int.from_bytes(hdr[12:16], byteorder='big') 106 | 107 | # flow id 108 | ip_hdr = data[30:50] 109 | udp_hdr = data[50:58] 110 | protocol = ip_hdr[9] 111 | self.flow_id = ( 112 | ip_hdr[12:16], # src_ip 113 | ip_hdr[16:20], # dst_ip 114 | udp_hdr[:2], # src_port 115 | udp_hdr[2:4], # dst_port 116 | ip_hdr[9] # protocol 117 | ) 118 | 119 | # check next protocol 120 | # offset: udp/tcp + report header(16B) 121 | offset = 16 122 | if protocol == 17: 123 | offset = offset + UDP_OFFSET 124 | if protocol == 6: 125 | offset = offset + TCP_OFFSET 126 | 127 | # int shim 128 | self.int_shim = data[offset:offset + 4] 129 | self.int_data_len = int(self.int_shim[2]) - 3 130 | 131 | # int header 132 | self.int_hdr = data[offset + 4:offset + 12] 133 | self.hop_data_len = int(self.int_hdr[2] & 0x1f) 134 | self.ins_map = int.from_bytes(self.int_hdr[4:6], byteorder='big') 135 | self.hop_count = int(self.int_data_len / self.hop_data_len) 136 | 137 | # int metadata 138 | self.int_meta = data[offset + 12:] 139 | print(self.int_meta) 140 | self.hop_metadata = [] 141 | for i in range(self.hop_count): 142 | metadata_source = self.int_meta[i*self.hop_data_len*4:(i+1)*self.hop_data_len*4] 143 | print(metadata_source) 144 | self.hop_metadata.append(HopMetadata.from_bytes(metadata_source, self.ins_map)) 145 | 146 | def __str__(self): 147 | hop_info = '' 148 | for hop in self.hop_metadata: 149 | hop_info += str(hop) + '\n' 150 | return "sw: {} seq: {} tstamp: {} ins_map: {} \n {}".format( 151 | self.switch_id, 152 | self.seq_num, 153 | self.ingress_tstamp, 154 | self.ins_map, 155 | hop_info 156 | ) 157 | 158 | class FlowInfo(): 159 | 160 | def __init__(self): 161 | # flow id - 5 tuple: (src_ip, dst_ip, src_port, dst_port, ip_proto) 162 | self.flow_id = None 163 | self.hop_cnt = 0 164 | 165 | self.switch_ids = [] 166 | self.l1_ingress_port_ids = [] 167 | self.l1_egress_port_ids = [] 168 | self.hop_latencies = [] 169 | self.q_ids = [] 170 | self.q_occups = [] 171 | self.ingress_tstamps = [] 172 | self.egress_tstamps = [] 173 | self.l2_ingress_port_ids = [] 174 | self.l2_egress_port_ids = [] 175 | self.egress_port_tx_utils = [] 176 | 177 | @staticmethod 178 | def from_report(report: Report): 179 | flow = FlowInfo() 180 | flow.flow_id = report.flow_id 181 | flow.hop_cnt = len(report.hop_metadata) 182 | 183 | for hop in report.hop_metadata: 184 | if hop.switch_id is not None: 185 | flow.switch_ids.append(hop.switch_id) 186 | if hop.l1_ingress_port_id is not None: 187 | flow.l1_ingress_port_ids.append(hop.l1_ingress_port_id) 188 | if hop.l1_egress_port_id is not None: 189 | flow.l1_egress_port_ids.append(hop.l1_egress_port_id) 190 | if hop.hop_latency is not None: 191 | flow.hop_latencies.append(hop.hop_latency) 192 | if hop.q_id is not None: 193 | flow.q_ids.append(hop.q_id) 194 | flow.q_occups.append(hop.q_occupancy) 195 | if hop.ingress_tstamp is not None: 196 | flow.ingress_tstamps.append(hop.ingress_tstamp) 197 | if hop.egress_tstamp is not None: 198 | flow.egress_tstamps.append(hop.egress_tstamp) 199 | if hop.l2_ingress_port_id is not None: 200 | flow.l2_ingress_port_ids.append(hop.l2_ingress_port_id) 201 | flow.l2_egress_port_ids.append(hop.l2_egress_port_id) 202 | if hop.egress_port_tx_util is not None: 203 | flow.egress_port_tx_utils.append(hop.egress_port_tx_util) 204 | 205 | return flow 206 | 207 | def __str__(self): 208 | return str(vars(self)) 209 | 210 | class Collector(): 211 | 212 | def __init__(self): 213 | self.flow_table = {} 214 | 215 | class GracefulKiller: 216 | kill_now = False 217 | def __init__(self): 218 | signal.signal(signal.SIGINT, self.exit_gracefully) 219 | signal.signal(signal.SIGTERM, self.exit_gracefully) 220 | 221 | def exit_gracefully(self, signum, frame): 222 | self.kill_now = True 223 | print("Got signal, quitting") 224 | 225 | ###### FUNCTIONS ############################################################## 226 | 227 | def ip2str(ip): 228 | return "{}.{}.{}.{}".format(ip[0],ip[1],ip[2],ip[3]) 229 | 230 | def receiver(): 231 | collector = Collector() 232 | killer = GracefulKiller() 233 | with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: 234 | s.bind((HOST, PORT)) 235 | try: 236 | while not killer.kill_now: 237 | data, addr = s.recvfrom(512) 238 | if TIMER: t1 = time.time() 239 | rep = Report(data) 240 | if DEBUG: 241 | print("-- Received report from {} --------".format(addr)) 242 | print(rep) 243 | 244 | new_flow = FlowInfo.from_report(rep) 245 | collector.flow_table[new_flow.flow_id] = new_flow 246 | if DEBUG: print(new_flow) 247 | 248 | for hop in range(new_flow.hop_cnt): 249 | if new_flow.switch_ids: 250 | FLOW_METRICS.labels( 251 | src_ip=ip2str(new_flow.flow_id[0]), 252 | dst_ip=ip2str(new_flow.flow_id[1]), 253 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 254 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 255 | protocol=str(int(new_flow.flow_id[4])), 256 | switch_id=new_flow.switch_ids[hop], 257 | metadata='hop_num' 258 | ).set(str(hop)) 259 | if new_flow.l1_ingress_port_ids: 260 | FLOW_METRICS.labels( 261 | src_ip=ip2str(new_flow.flow_id[0]), 262 | dst_ip=ip2str(new_flow.flow_id[1]), 263 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 264 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 265 | protocol=str(int(new_flow.flow_id[4])), 266 | switch_id=new_flow.switch_ids[hop], 267 | metadata='l1_ingress_port_id' 268 | ).set(new_flow.l1_ingress_port_ids[hop]) 269 | if new_flow.l1_egress_port_ids: 270 | FLOW_METRICS.labels( 271 | src_ip=ip2str(new_flow.flow_id[0]), 272 | dst_ip=ip2str(new_flow.flow_id[1]), 273 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 274 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 275 | protocol=str(int(new_flow.flow_id[4])), 276 | switch_id=new_flow.switch_ids[hop], 277 | metadata='l1_egress_port_id' 278 | ).set(new_flow.l1_egress_port_ids[hop]) 279 | if new_flow.hop_latencies: 280 | FLOW_METRICS.labels( 281 | src_ip=ip2str(new_flow.flow_id[0]), 282 | dst_ip=ip2str(new_flow.flow_id[1]), 283 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 284 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 285 | protocol=str(int(new_flow.flow_id[4])), 286 | switch_id=new_flow.switch_ids[hop], 287 | metadata='hop_latency' 288 | ).set(new_flow.hop_latencies[hop]) 289 | if new_flow.q_ids: 290 | FLOW_METRICS.labels( 291 | src_ip=ip2str(new_flow.flow_id[0]), 292 | dst_ip=ip2str(new_flow.flow_id[1]), 293 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 294 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 295 | protocol=str(int(new_flow.flow_id[4])), 296 | switch_id=new_flow.switch_ids[hop], 297 | metadata='q_id' 298 | ).set(new_flow.q_ids[hop]) 299 | if new_flow.q_occups: 300 | FLOW_METRICS.labels( 301 | src_ip=ip2str(new_flow.flow_id[0]), 302 | dst_ip=ip2str(new_flow.flow_id[1]), 303 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 304 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 305 | protocol=str(int(new_flow.flow_id[4])), 306 | switch_id=new_flow.switch_ids[hop], 307 | metadata='q_occupancy' 308 | ).set(new_flow.q_occups[hop]) 309 | if new_flow.ingress_tstamps: 310 | FLOW_METRICS.labels( 311 | src_ip=ip2str(new_flow.flow_id[0]), 312 | dst_ip=ip2str(new_flow.flow_id[1]), 313 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 314 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 315 | protocol=str(int(new_flow.flow_id[4])), 316 | switch_id=new_flow.switch_ids[hop], 317 | metadata='ingress_tstamp' 318 | ).set(new_flow.ingress_tstamps[hop]) 319 | if new_flow.egress_tstamps: 320 | FLOW_METRICS.labels( 321 | src_ip=ip2str(new_flow.flow_id[0]), 322 | dst_ip=ip2str(new_flow.flow_id[1]), 323 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 324 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 325 | protocol=str(int(new_flow.flow_id[4])), 326 | switch_id=new_flow.switch_ids[hop], 327 | metadata='egress_tstamp' 328 | ).set(new_flow.egress_tstamps[hop]) 329 | if new_flow.l2_ingress_port_ids: 330 | FLOW_METRICS.labels( 331 | src_ip=ip2str(new_flow.flow_id[0]), 332 | dst_ip=ip2str(new_flow.flow_id[1]), 333 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 334 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 335 | protocol=str(int(new_flow.flow_id[4])), 336 | switch_id=new_flow.switch_ids[hop], 337 | metadata='l2_ingress_port_id' 338 | ).set(new_flow.l2_ingress_port_ids[hop]) 339 | if new_flow.l2_egress_port_ids: 340 | FLOW_METRICS.labels( 341 | src_ip=ip2str(new_flow.flow_id[0]), 342 | dst_ip=ip2str(new_flow.flow_id[1]), 343 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 344 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 345 | protocol=str(int(new_flow.flow_id[4])), 346 | switch_id=new_flow.switch_ids[hop], 347 | metadata='l2_egress_port_id' 348 | ).set(new_flow.l2_egress_port_ids[hop]) 349 | if new_flow.egress_port_tx_utils: 350 | FLOW_METRICS.labels( 351 | src_ip=ip2str(new_flow.flow_id[0]), 352 | dst_ip=ip2str(new_flow.flow_id[1]), 353 | src_port=str(int.from_bytes(new_flow.flow_id[2], byteorder='big')), 354 | dst_port=str(int.from_bytes(new_flow.flow_id[3], byteorder='big')), 355 | protocol=str(int(new_flow.flow_id[4])), 356 | switch_id=new_flow.switch_ids[hop], 357 | metadata='egress_port_tx_utils' 358 | ).set(new_flow.egress_port_tx_utils[hop]) 359 | if TIMER: 360 | t2 = time.time() 361 | print("\rReports per second: {}".format(1/(t2-t1)), end='') 362 | 363 | except KeyboardInterrupt: 364 | s.close() 365 | 366 | if __name__ == "__main__": 367 | if len(sys.argv) > 1: 368 | if sys.argv[1] == '--debug': DEBUG = True 369 | if sys.argv[1] == '--time': TIMER = True 370 | 371 | start_http_server(8000) 372 | receiver() 373 | -------------------------------------------------------------------------------- /p4utils/utils/helper.py: -------------------------------------------------------------------------------- 1 | """This module includes all the functions that are frequently used 2 | in different parts of the code. These functions usually perform low level 3 | operations on data. 4 | """ 5 | 6 | import os 7 | import re 8 | import subprocess 9 | import sys 10 | import json 11 | import time 12 | import types 13 | import random 14 | import psutil 15 | import signal 16 | import hashlib 17 | import importlib 18 | from networkx.readwrite.json_graph import node_link_graph 19 | 20 | from p4utils.utils.topology import NetworkGraph 21 | from p4utils.mininetlib.log import info, output, error, warning, debug 22 | 23 | _prefixLenMatchRegex = re.compile('netmask (\d+\.\d+\.\d+\.\d+)') 24 | 25 | 26 | def wait_condition(func, value, args=[], kwargs={}, timeout=0): 27 | """Waits for the function to return the specified value. 28 | 29 | Args: 30 | func (types.FunctionType): function to check 31 | value : condition to meet 32 | args (list) : positional arguments of the function 33 | kwargs (dict) : key-word arguments of the function 34 | timeout (float) : time to wait for condition in seconds 35 | 36 | Returns: 37 | bool: **True** if the condition is met before the timeout 38 | expires, **False** otherwise. 39 | 40 | Note: 41 | If ``timeout`` is set to ``0``, this function will wait forever. 42 | """ 43 | start_time = time.time() 44 | if timeout > 0: 45 | while func(*args, **kwargs) != value: 46 | if time.time() - start_time >= timeout: 47 | return False 48 | else: 49 | return True 50 | else: 51 | while func(*args, **kwargs) != value: 52 | pass 53 | else: 54 | return True 55 | 56 | 57 | def merge_dict(dst, src): 58 | """Merges source dictionary fields and subfields into destionation dictionary. 59 | 60 | Args: 61 | dst (dict): destination dictionary 62 | src (dict): source dictionary 63 | """ 64 | stack = [(dst, src)] 65 | while stack: 66 | current_dst, current_src = stack.pop() 67 | for key in current_src: 68 | if key not in current_dst: 69 | current_dst[key] = current_src[key] 70 | else: 71 | if isinstance( 72 | current_src[key], 73 | dict) and isinstance( 74 | current_dst[key], 75 | dict): 76 | stack.append((current_dst[key], current_src[key])) 77 | else: 78 | current_dst[key] = current_src[key] 79 | 80 | 81 | def next_element(elems, minimum=None, maximum=None): 82 | """Given a list of integers, return the lowest number not already 83 | present in the set, starting from minimum and ending in maximum. 84 | 85 | Args: 86 | elems (list) : list of integers 87 | minimum (int): minimum value allowed for elements 88 | maximum (int): maximum value allowed for elements 89 | 90 | Returns: 91 | int: the lowest number not already present in the set. 92 | """ 93 | elements = set(elems) 94 | if len(elems) != len(elements): 95 | raise Exception('the list contains duplicates.') 96 | if len(elems) == 0: 97 | return minimum 98 | else: 99 | if maximum is None: 100 | maximum = max(elements) 101 | if minimum is None: 102 | minimum = min(elements) 103 | else: 104 | # Remove elements lower than minimum 105 | del_elements = set() 106 | for elem in elements: 107 | if elem < minimum: 108 | del_elements.add(elem) 109 | elements.difference_update(del_elements) 110 | # Update maximum 111 | maximum = max(maximum, minimum) 112 | 113 | if len(elements) == (maximum - minimum) + 1: 114 | return maximum + 1 115 | elif len(elements) < (maximum - minimum) + 1: 116 | for elem in range(minimum, maximum+1): 117 | if elem not in elements: 118 | return elem 119 | else: 120 | raise Exception('too many elements in the list.') 121 | 122 | 123 | def rand_mac(): 124 | """Generate a random, non-multicas MAC address. 125 | 126 | Returns: 127 | str: MAC address. 128 | """ 129 | hex_str = hex(random.randint(1, 2**48-1) & 130 | 0xfeffffffffff | 0x020000000000)[2:] 131 | hex_str = '0'*(12-len(hex_str)) + hex_str 132 | mac_str = '' 133 | i = 0 134 | while i < len(hex_str): 135 | mac_str += hex_str[i] 136 | mac_str += hex_str[i+1] 137 | mac_str += ':' 138 | i += 2 139 | return mac_str[:-1] 140 | 141 | 142 | def dpidToStr(id): 143 | """Compute a string **dpid** from an integer **id**. 144 | 145 | Args: 146 | id (int): integer device id 147 | 148 | Returns: 149 | str: device dpid. 150 | """ 151 | strDpid = hex(id)[2:] 152 | if len(strDpid) < 16: 153 | return '0'*(16-len(strDpid)) + strDpid 154 | return strDpid 155 | 156 | 157 | def check_listening_on_port(port): 158 | """Checks if the given port is listening in the main namespace. 159 | 160 | Args: 161 | port (int): port number 162 | 163 | Returns: 164 | bool: **True** if the port is listening, **False** otherwise. 165 | """ 166 | for c in psutil.net_connections(kind='inet'): 167 | if c.status == 'LISTEN' and c.laddr[1] == port: 168 | return True 169 | return False 170 | 171 | 172 | def cksum(filename): 173 | """Returns the md5 checksum of a file. 174 | 175 | Args: 176 | filename (str): path to the file 177 | 178 | Returns: 179 | str: md5 checksum of the file. 180 | """ 181 | return hashlib.md5(open(filename, 'rb').read()).hexdigest() 182 | 183 | 184 | def get_node_attr(node, attr_name, default=None): 185 | """Finds the value of the specified attribute of a *Mininet* node 186 | by looking also inside its unparsed parameters. 187 | 188 | Args: 189 | node (object) : *Mininet* node object 190 | attr_name (string) : attribute to look for 191 | 192 | Returns: 193 | the value of the requested attribute. 194 | """ 195 | try: 196 | return getattr(node, attr_name) 197 | except AttributeError: 198 | try: 199 | params = getattr(node, 'params') 200 | if attr_name in params.keys(): 201 | return params[attr_name] 202 | else: 203 | return default 204 | except AttributeError: 205 | return default 206 | 207 | 208 | def get_by_attr(attr_name, attr_value, obj_list): 209 | """Return the first object in the list that has an attribute matching with 210 | the attribute name and value provided. 211 | 212 | Args: 213 | attr_name (string) : attribute name 214 | attr_value : attrubute value 215 | obj_list (list) : list of objects 216 | 217 | Returns: 218 | object: the requested object or **None**. 219 | """ 220 | for obj in obj_list: 221 | if attr_value == getattr(obj, attr_name): 222 | return obj 223 | else: 224 | return None 225 | 226 | 227 | def ip_address_to_mac(ip): 228 | """Generate MAC from IP address. 229 | 230 | Args: 231 | ip (str): IPv4 address 232 | 233 | Returns: 234 | str: MAC address obtained from the IPv4 value. 235 | """ 236 | if "/" in ip: 237 | ip = ip.split('/')[0] 238 | 239 | split_ip = list(map(int, ip.split('.'))) 240 | mac_address = '00:%02x' + ':%02x:%02x:%02x:%02x' % tuple(split_ip) 241 | return mac_address 242 | 243 | 244 | def is_compiled(p4_src, compilers): 245 | """Check if a file has been already compiled by at least one compiler in the list. 246 | 247 | Arguments: 248 | p4_src (string) : P4 file path 249 | compilers (list): list of P4 compiler objects (see compiler.py) 250 | 251 | Returns: 252 | bool: **True** if the file has been already compiled, **False** otherwise. 253 | """ 254 | for compiler in compilers: 255 | if getattr( 256 | compiler, 'compiled') and getattr( 257 | compiler, 'p4_src') == p4_src: 258 | return True 259 | else: 260 | return False 261 | 262 | 263 | def load_conf(conf_file): 264 | """Load JSON application configuration file. 265 | 266 | Args: 267 | conf_file (str): path to the JSON network configuration file 268 | 269 | Returns: 270 | dict: network configuration dictionary. 271 | """ 272 | with open(conf_file, 'r') as f: 273 | config = json.load(f) 274 | return config 275 | 276 | 277 | def load_topo(json_path): 278 | """Load the topology from the path provided. 279 | 280 | Args: 281 | json_path (string): path of the JSON file to load 282 | 283 | Returns: 284 | p4utils.utils.topology.NetworkGraph: the topology graph. 285 | """ 286 | with open(json_path, 'r') as f: 287 | graph_dict = json.load(f) 288 | graph = node_link_graph(graph_dict) 289 | return NetworkGraph(graph) 290 | 291 | 292 | def load_custom_object(obj): 293 | """Loads object from module. 294 | 295 | Args: 296 | dict: JSON object to load 297 | 298 | Returns: 299 | object: Python object retrieved from the module. 300 | 301 | Example: 302 | This function takes as input a module JSON object:: 303 | 304 | { 305 | "file_path": (string) (*), 306 | "module_name": (string), 307 | "object_name": (string), 308 | } 309 | 310 | Note: 311 | None of the fields marked with ``(*)`` is mandatory. The ``file_path`` field 312 | is optional and has to be used if the module is not present in ``sys.path``. 313 | """ 314 | 315 | file_path = obj.get('file_path', '.') 316 | sys.path.insert(0, file_path) 317 | 318 | module_name = obj['module_name'] 319 | object_name = obj['object_name'] 320 | 321 | module = importlib.import_module(module_name) 322 | return getattr(module, object_name) 323 | 324 | 325 | def old_run_command(command): 326 | """Execute command in the main namespace. 327 | 328 | Args: 329 | command (str): command to execute 330 | 331 | Returns: 332 | int: an integer value used by a process. 333 | """ 334 | debug(command+'\n') 335 | return os.WEXITSTATUS(os.system(command)) 336 | 337 | 338 | def run_command(command, out_file=None): 339 | """Execute command in the main namespace. 340 | 341 | Args: 342 | command (str) : command to execute 343 | out_file (str): where to redirect *stdout* and *stderr* 344 | 345 | Returns: 346 | int: returns parent pid. 347 | """ 348 | if isinstance(command, str): 349 | debug(command+'\n') 350 | command = command.split() 351 | else: 352 | debug(' '.join(command) + '\n') 353 | 354 | if not out_file: 355 | of = subprocess.DEVNULL 356 | else: 357 | of = open(out_file, 'w') 358 | 359 | proc = subprocess.Popen(command, stdout=of, stderr=of) 360 | return proc.pid 361 | 362 | 363 | def parse_line(line): 364 | """Parse text line returning a list of substrings. 365 | 366 | Args: 367 | line (str): line to parse 368 | 369 | Returns: 370 | list: list of args obtained from the parsing. 371 | 372 | Example: 373 | As an example, consider the following string:: 374 | 375 | 'ahjdjf djdfkfo1 --jdke hdjejeek --dfjfj "vneovn rijvtg"' 376 | 377 | The function will parse it and give as output the following list:: 378 | 379 | ["ahjdjf", "djdfkfo1", "--jdke", "hdjejeek", "--dfjfj", "vneovn rijvtg"] 380 | """ 381 | # Isolate "" terms 382 | args1 = line.split('"') 383 | args2 = [] 384 | for i in range(len(args1)): 385 | if i % 2 == 0: 386 | # Isolate and append spaced terms 387 | args2.extend(args1[i].split()) 388 | else: 389 | # Append "" terms 390 | args2.append(args1[i]) 391 | return args2 392 | 393 | 394 | def parse_task_line(line, def_mod='p4utils.utils.traffic_utils'): 395 | """Parse text line and return all the parameters needed 396 | to create a task with :py:func:`p4utils.mininetlib.network_API.NetworkAPI.addTask()`. 397 | 398 | Args: 399 | line (str) : string containing all the task information 400 | def_mod (str): default module where to look for exe functions 401 | 402 | Returns: 403 | tuple: a tuple (**args**, **kwargs**) where **args** is a list of arguments and **kwargs** 404 | is a dictionary of key-word pairs. 405 | 406 | Example: 407 | The file has to be a set of lines, where each has the following syntax:: 408 | 409 | [] ... [] [--mod ] [-- ] ... [-- ] 410 | 411 | Note: 412 | A non-default module can be specified in the command with ``--mod ``. 413 | """ 414 | args = [] 415 | kwargs = {} 416 | skip_next = False 417 | mod = importlib.import_module(def_mod) 418 | parsed_cmd = parse_line(line) 419 | if len(parsed_cmd) < 4: 420 | error( 421 | 'usage: [] ... [] [--mod ] [-- ] ... [-- ]\n') 422 | for i in range(len(parsed_cmd)): 423 | if skip_next: 424 | skip_next = False 425 | continue 426 | # Parse node (index 0 in args) 427 | if i == 0: 428 | args.append(parsed_cmd[i]) 429 | # Parse start 430 | elif i == 1: 431 | kwargs['start'] = float(parsed_cmd[i]) 432 | # Parse duration 433 | elif i == 2: 434 | kwargs['duration'] = float(parsed_cmd[i]) 435 | # Parse exe (index 1 in args) 436 | elif i == 3: 437 | args.append(parsed_cmd[i]) 438 | # Parse args and kwargs 439 | elif i >= 4: 440 | # Parse kwargs 441 | if len(parsed_cmd[i]) > 2 and parsed_cmd[i][:2] == '--': 442 | # Parse module 443 | if parsed_cmd[i] == '--mod': 444 | mod = importlib.import_module(parsed_cmd[i+1]) 445 | else: 446 | kwargs.setdefault('kwargs', {}) 447 | kwargs['kwargs'][parsed_cmd[i][2:]] = parsed_cmd[i+1] 448 | skip_next = True 449 | # Parse args 450 | else: 451 | kwargs.setdefault('args', []) 452 | kwargs['args'].append(parsed_cmd[i]) 453 | 454 | try: 455 | # Import function from module 456 | exe = getattr(mod, args[1]) 457 | # Set function as the executable 458 | args[1] = exe 459 | except AttributeError: 460 | # Interpret the executable as a command 461 | pass 462 | 463 | return args, kwargs 464 | 465 | 466 | def kill_proc_tree(pid, sig=signal.SIGKILL, include_parent=True, 467 | timeout=None, on_terminate=None): 468 | """Kills a process tree (including children). 469 | 470 | Args: 471 | pid (int) : PID of the parent process 472 | sig (int) : signal used to kill the tree 473 | include_parent (bool) : whether to kill the parent process or not 474 | timeout (int or float) : time to wait for a process to terminate 475 | on_terminate (types.FunctionType): callback function executed as soon as a child terminates. 476 | 477 | Returns: 478 | tuple: ``(gone, still_alive)``. 479 | """ 480 | assert pid != os.getpid(), "won't kill myself" 481 | parent = psutil.Process(pid) 482 | children = parent.children(recursive=True) 483 | if include_parent: 484 | children.append(parent) 485 | for p in children: 486 | try: 487 | p.send_signal(sig) 488 | except psutil.NoSuchProcess: 489 | pass 490 | gone, alive = psutil.wait_procs(children, timeout=timeout, 491 | callback=on_terminate) 492 | return (gone, alive) 493 | 494 | 495 | class WrapFunc: 496 | """Wraps a function is such a way that they can be executed 497 | across different Python interpreters in the same system. 498 | 499 | Args: 500 | func (types.FunctionType): function to wrap 501 | """ 502 | 503 | def __init__(self, func): 504 | # Sanity check 505 | assert isinstance(func, types.FunctionType) 506 | 507 | # Set function name 508 | self.f_name = func.__name__ 509 | 510 | # Set module nome 511 | if func.__module__ == '__main__': 512 | self.m_name, _ = os.path.splitext( 513 | os.path.basename(sys.modules[func.__module__].__file__)) 514 | else: 515 | self.m_name = func.__module__ 516 | 517 | # Get module relative path from package 518 | m_rel_path = str.replace(self.m_name, '.', '/') 519 | # Get module absolute path 520 | m_abs_path, _ = os.path.splitext( 521 | os.path.realpath(sys.modules[func.__module__].__file__)) 522 | # Get package absolute path 523 | if m_abs_path[-len(m_rel_path):] == m_rel_path: 524 | self.p_path = m_abs_path[:len(m_abs_path)-len(m_rel_path)] 525 | else: 526 | raise Exception('module name does not match its path!') 527 | 528 | def __repr__(self): 529 | return 'function {}.{}'.format(self.m_name, self.f_name) 530 | 531 | def unwrap(self): 532 | """Unwraps function and returns it.""" 533 | # Add path in the sys.path 534 | for path in sys.path: 535 | # Get absolute path 536 | abs_path = os.path.realpath(path) 537 | # Check if module path is a subpath of a path already in sys.path 538 | if os.path.commonpath([abs_path, self.p_path]) == abs_path: 539 | # Break loop 540 | break 541 | # If the path is not in sys.path, add it 542 | else: 543 | sys.path.append(self.p_path) 544 | # Import module 545 | module = importlib.import_module(self.m_name) 546 | # Return function 547 | return getattr(module, self.f_name) 548 | --------------------------------------------------------------------------------