├── doc
├── 5g_arch.jpg
├── n4_pfcp.jpg
├── components.jpg
├── gtpu_header.jpg
├── process_flow.jpg
└── gtpu_pdu_session.jpg
├── src
├── control
│ ├── go.mod
│ └── main.go
└── datapath
│ └── upf.p4
├── test_script
├── send_udp_downlink.py
├── send_gtp_uplink.py
└── pfcp_request.py
├── Makefile
└── README.md
/doc/5g_arch.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/801room/upf_p4_poc/HEAD/doc/5g_arch.jpg
--------------------------------------------------------------------------------
/doc/n4_pfcp.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/801room/upf_p4_poc/HEAD/doc/n4_pfcp.jpg
--------------------------------------------------------------------------------
/doc/components.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/801room/upf_p4_poc/HEAD/doc/components.jpg
--------------------------------------------------------------------------------
/doc/gtpu_header.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/801room/upf_p4_poc/HEAD/doc/gtpu_header.jpg
--------------------------------------------------------------------------------
/doc/process_flow.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/801room/upf_p4_poc/HEAD/doc/process_flow.jpg
--------------------------------------------------------------------------------
/doc/gtpu_pdu_session.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/801room/upf_p4_poc/HEAD/doc/gtpu_pdu_session.jpg
--------------------------------------------------------------------------------
/src/control/go.mod:
--------------------------------------------------------------------------------
1 | module 801room/upf-p4
2 |
3 | go 1.14
4 |
5 | require (
6 | github.com/antoninbas/p4runtime-go-client v0.0.0-20201006021624-9212bb6aa549 // indirect
7 | github.com/sirupsen/logrus v1.7.0 // indirect
8 | github.com/wmnsk/go-pfcp v0.0.6 // indirect
9 | google.golang.org/grpc v1.33.0 // indirect
10 | )
11 |
--------------------------------------------------------------------------------
/test_script/send_udp_downlink.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | # Send downlink packets to UE.
4 |
5 | from scapy.layers.inet import IP, UDP
6 | from scapy.sendrecv import send
7 | from scapy.layers.l2 import Ether
8 |
9 | gNB_ADDR = '193.168.1.2'
10 | N3_ADDR = '193.168.1.3'
11 | UE_ADDR = '10.10.10.2'
12 | DN_ADDR = '10.10.10.3'
13 |
14 | RATE = 5 # packets per second
15 | PAYLOAD = ' '.join(['P4 is great!'] * 50)
16 | print "Sending %d UDP packets per second to %s..." % (RATE, UE_ADDR)
17 | pkt = IP(src=DN_ADDR,dst=UE_ADDR) /UDP(sport=10053,dport=10053)/PAYLOAD
18 | send(pkt, iface='h1b-eth0',inter=1.0 / RATE, loop=True, verbose=True)
--------------------------------------------------------------------------------
/test_script/send_gtp_uplink.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | # Send uplink packets to DN.
4 |
5 | from scapy.layers.inet import IP, UDP
6 | from scapy.sendrecv import send
7 | from scapy.contrib.gtp import GTPHeader as GTPHeader
8 | from scapy.layers.l2 import Ether
9 |
10 | gNB_ADDR = '193.168.1.2'
11 | N3_ADDR = '193.168.1.3'
12 | UE_ADDR = '10.10.10.2'
13 | DN_ADDR = '10.10.10.3'
14 |
15 | RATE = 5 # packets per second
16 | PAYLOAD = ' '.join(['P4 is great!'] * 50)
17 | GTP = GTPHeader(version=1, teid=1111,length=677,gtp_type=0xff)
18 | print "Sending %d UDP packets per second to %s..." % (RATE, UE_ADDR)
19 | pkt = IP(src=gNB_ADDR,dst=N3_ADDR) / UDP(sport=2152, dport=2152) /GTP/IP(src=UE_ADDR,dst=DN_ADDR)/UDP(sport=10053,dport=10053)/PAYLOAD
20 | send(pkt, iface='h1a-eth0',inter=1.0 / RATE, loop=True, verbose=True)
21 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | base_path := $(shell pwd)
2 |
3 | default:
4 | $(error Please specify a make target (see README.md ; QuickStart:make deps-env && make build))
5 |
6 | deps-env:
7 | test -d ngsdn-tutorial || git clone -b master https://github.com/opennetworkinglab/ngsdn-tutorial
8 | cd ngsdn-tutorial && make pull-deps
9 |
10 | build: clean
11 | @mkdir build
12 | cp src/datapath/upf.p4 ngsdn-tutorial/p4src/main.p4
13 | cd ${base_path}/ngsdn-tutorial && make p4-build
14 | cp ngsdn-tutorial/p4src/build/bmv2.json build/
15 | cp ngsdn-tutorial/p4src/build/p4info.txt build/
16 | cd src/control/ && go build -o cp-upf main.go && mv cp-upf ${base_path}/build
17 | cp test_script/send_gtp_uplink.py ./ngsdn-tutorial/mininet/
18 | cp test_script/send_udp_downlink.py ./ngsdn-tutorial/mininet/
19 | clean:
20 | @rm -rf build
21 | @rm -f ./ngsdn-tutorial/mininet/send_gtp_uplink.py
22 | @rm -f ./ngsdn-tutorial/mininet/send_udp_downlink.py
23 |
24 |
25 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # upf_p4_poc
2 | This project is a proof of concept for 5g upf based on p4.
3 | The project uses open source project to verify key technologies, such as pfcp, p4runtime RPC and programmable dataplane pipeline.
4 |
5 | ## Related open source projects
6 | 1) **go-pfcp** https://github.com/wmnsk/go-pfcp
7 | PFCP implementation in Golang.This project use go-pfcp to decode/code pfcp message.
8 | 2) **p4runtime-go-client** https://github.com/antoninbas/p4runtime-go-client
9 | Go client for P4Runtime.This project use p4runtime-go-client to connect stratum_bmv2(as swithos and pipeline instance),config pipeline and insert table entry.
10 | 3) **scapy** https://github.com/secdev/scapy
11 | Scapy is a powerful Python-based interactive packet manipulation program and library.
12 | This project use it to construct and send PFCP request messages.I also use it to send user plane uplink and downlink data packets for function test.
13 | 4) **ngsdn-tutorial** https://github.com/opennetworkinglab/ngsdn-tutorial
14 | Tutorial for Next-generation SDN (NG-SDN).It contains a set of containers, such as p4c, mininet(include stratum-bmv2), onos. Use it as a p4 compilation and test runtime environment.
15 |
16 | ## What is UPF?
17 | The User Plane Function (UPF) is a fundamental component of a 3GPP 5G core infrastructure system architecture.
18 | It acts as a gateway between the base station and the data network.The UPF identifies user plane traffic flow and action based on information received from control plane network function(as SMF in 5g).
19 | 
20 |
21 | PFCP(Packet Forwarding Control Protocol) used in CP NF to control UPF,it defined in 3GPP TS29.244.
22 | It is similar to OpenFlow. Maybe consider it as the southbound interface of the dataplane in the mobile networks.
23 | 
24 | Packet processing flow in the UP function(ps:I think this processing flow is just an ideal definition, and there are multiple implementations ^_^)
25 | 
26 |
27 | The GTP (GPRS Tunnelling Protocol) protocol provides protocol channels between mobile access network and core network. All UE access DN should be encapsulated by the GTP protocol.
28 | GTP-U header.
29 | 
30 | User plane protocol stack for a PDU session
31 | 
32 |
33 | ## Requirements
34 | OS:Ubuntu 18.04
35 | Go (>= 1.14)
36 | ps:maybe need to set proxy getting package,like me
37 | ```
38 | export GO111MODULE=on
39 | export GOPROXY=https://goproxy.cn
40 | ```
41 | docker docker-compose git python2.7 make ...
42 |
43 | ## Getting Started
44 | 1.Download and Prepare the environment.(one time)
45 | Note:It will take some time to download docker images.
46 | ```
47 | git clone
48 | make deps-env
49 | ```
50 | 2.Build
51 | The result in "build" folder.
52 | ```
53 | make build
54 | ```
55 | 3.Runing
56 | The components of poc
57 | 
58 | Start stratum environment,include mininet.see aslo [ngsdn-tutorial](https://github.com/opennetworkinglab/ngsdn-tutorial) .
59 | ```
60 | cd ./ngsdn-tutorial
61 | make start
62 | ```
63 |
Start cp-upf
64 |
Note:replace param as your environment.
65 | ```
66 | cd ./build
67 | ./cp-upf --addr ${STRATUMIP}:${STRATUMPORT} -bin bmv2.json -p4info p4info.txt -device-id 1 -n4addr {N4IP}:{N4PORT}
68 | ```
69 |
70 | 4.Test
71 | Mininet config ip and downlink route by hands.
72 | ```
73 | cd ./ngsdn-tutorial
74 | make mn-cli
75 | mininet> h1a ip addr add 193.168.1.2/24 dev h1a-eth0
76 | mininet> h1b ip addr add 193.168.1.3/24 dev h1b-eth0
77 | mininet> h1b ip route add 10.10.10.2/32 via 193.168.1.2
78 | ```
79 | Send PFCP request to create pdu session.The cp-upf receive pfcp messages will translate to some p4table operations.
80 | Note:need to modify ip in the scripts based on run environment.
81 | ```
82 | ./test_script/pfcp_request.py
83 | ```
84 | Send gtpu uplink
85 | ```
86 | cd ./ngsdn-tutorial
87 | util/mn-cmd h1a python /mininet/send_gtp_uplink.py
88 | util/mn-cmd h1a tcpdump -i h1a-eth0 -w /tmp/uplink-ue.pcap
89 | util/mn-cmd h1b tcpdump -i h1b-eth0 -w /tmp/uplink-dn.pcap
90 | ```
91 | Send gtpu downlink
92 | ```
93 | util/mn-cmd h1b python /mininet/send_udp_downlink.py
94 | util/mn-cmd h1a tcpdump -i h1a-eth0 -w /tmp/downlink-ue.pcap
95 | util/mn-cmd h1b tcpdump -i h1b-eth0 -w /tmp/downlink-dn.pcap
96 | ```
97 | ## Planning
98 | Add more counter and connect to onos.
99 |
Add packout to cpu and dpi.
100 |
Also I have a plan to coding upf poc based on ebpf(xdp).that will be another story,but all of them will be one story.
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
--------------------------------------------------------------------------------
/test_script/pfcp_request.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | import uuid
3 | from random import getrandbits
4 | from scapy.contrib.pfcp import CauseValues, IE_ApplyAction, IE_Cause, \
5 | IE_CreateFAR, IE_CreatePDR, IE_CreateURR, IE_DestinationInterface, \
6 | IE_DurationMeasurement, IE_EndTime, IE_EnterpriseSpecific, IE_FAR_Id, \
7 | IE_ForwardingParameters, IE_FSEID, IE_MeasurementMethod,IE_OuterHeaderCreation, \
8 | IE_NetworkInstance, IE_NodeId, IE_PDI, IE_PDR_Id, IE_Precedence, \
9 | IE_QueryURR, IE_RecoveryTimeStamp, IE_RedirectInformation, IE_ReportType, \
10 | IE_ReportingTriggers, IE_SDF_Filter, IE_SourceInterface, IE_StartTime, \
11 | IE_TimeQuota, IE_UE_IP_Address, IE_URR_Id, IE_UR_SEQN, IE_OuterHeaderRemoval,\
12 | IE_UsageReportTrigger, IE_VolumeMeasurement, IE_ApplicationId, PFCP,IE_FTEID, \
13 | PFCPAssociationSetupRequest, PFCPAssociationSetupResponse, \
14 | PFCPHeartbeatRequest, PFCPHeartbeatResponse, PFCPSessionDeletionRequest, \
15 | PFCPSessionDeletionResponse, PFCPSessionEstablishmentRequest, \
16 | PFCPSessionEstablishmentResponse, PFCPSessionModificationRequest, \
17 | PFCPSessionModificationResponse, PFCPSessionReportRequest
18 | from scapy.contrib.pfcp import PFCPAssociationReleaseRequest
19 | from scapy.layers.l2 import Ether
20 | from scapy.layers.inet import IP, UDP, TCP
21 | from scapy.layers.inet6 import IPv6
22 | from scapy.packet import Raw
23 | from scapy.all import send,sniff
24 | import logging
25 | import threading
26 | import signal,os
27 |
28 | PFCP_CP_IP_V4 = "192.168.0.115"
29 | PFCP_UP_IP_V4 = "192.168.0.118"
30 | N3_IP_V4 = "193.168.1.3"
31 | GNB_IP_V4 = "193.168.1.2"
32 | UE_IP_V4 = "10.10.10.2"
33 |
34 |
35 | def seid():
36 | return uuid.uuid4().int & (1 << 64) - 1
37 |
38 | class PfcpSkeleton(object):
39 | def __init__(self, pfcp_cp_ip,pfcp_up_ip):
40 | self.pfcp_cp_ip = pfcp_cp_ip
41 | self.pfcp_up_ip = pfcp_up_ip
42 | self.ue_ip = UE_IP_V4
43 | self.ts = int((datetime.now() - datetime(1900, 1, 1)).total_seconds())
44 | self.seq = 1
45 | self.nodeId = IE_NodeId(id_type=2, id="ergw")
46 | logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
47 | self.logger = logging.getLogger(__name__)
48 |
49 | def ie_ue_ip_address(self, SD=0):
50 | return IE_UE_IP_Address(ipv4=self.ue_ip, V4=1, SD=SD)
51 |
52 | def ie_fseid(self):
53 | return IE_FSEID(ipv4=self.pfcp_cp_ip, v4=1, seid=self.cur_seid)
54 |
55 | def associate(self):
56 | self.chat(PFCPAssociationSetupRequest(IE_list=[
57 | IE_RecoveryTimeStamp(timestamp=self.ts),
58 | self.nodeId
59 | ]))
60 |
61 | def heartbeatRep(self):
62 | pkt = sniff(iface="veth0",filter="udp port 8805", count=1)
63 | resp = pkt[0][PFCP]
64 | self.logger.info("REQ: %r" % resp.message_type)
65 | if resp.message_type == 1:
66 | heartReq = resp[PFCPHeartbeatRequest]
67 | heartRep = PFCPHeartbeatResponse(IE_list=[
68 | IE_RecoveryTimeStamp(timestamp=self.ts)])
69 | self.chat(heartRep, resp.seq)
70 |
71 | def heartbeat(self):
72 | resp = self.chat(PFCPHeartbeatRequest(IE_list=[
73 | IE_RecoveryTimeStamp(timestamp=self.ts)
74 | ]))
75 |
76 | def establish_session_request(self):
77 | self.cur_seid = seid()
78 | resp = self.chat(PFCPSessionEstablishmentRequest(IE_list=[
79 | #UPLINK FAR=1,PDR=1
80 | IE_CreateFAR(IE_list=[
81 | IE_ApplyAction(FORW=1),
82 | IE_FAR_Id(id=1)
83 | ]),
84 | IE_CreatePDR(IE_list=[
85 | IE_FAR_Id(id=1),
86 | IE_PDI(IE_list=[
87 | IE_NetworkInstance(instance="access"),
88 | IE_SDF_Filter(
89 | FD=1,
90 | flow_description="permit out ip from any to assigned"),
91 | IE_SourceInterface(interface="Access"),
92 | self.ie_ue_ip_address(SD=0),
93 | IE_FTEID(V4=1,TEID=1111,ipv4=N3_IP_V4)
94 | ]),
95 | IE_PDR_Id(id=1),
96 | IE_Precedence(precedence=200),
97 | IE_OuterHeaderRemoval(),
98 | ]),
99 |
100 | #DOWNLINK FAR=2,PDR=2
101 | IE_CreateFAR(IE_list=[
102 | IE_ApplyAction(FORW=1),
103 | IE_FAR_Id(id=2),
104 | IE_ForwardingParameters(IE_list=[
105 | IE_DestinationInterface(interface="Access"),
106 | IE_NetworkInstance(instance="n6"),
107 | IE_OuterHeaderCreation(GTPUUDPIPV4=1,TEID=2222,ipv4=GNB_IP_V4)
108 | ])
109 | ]),
110 | IE_CreatePDR(IE_list=[
111 | IE_FAR_Id(id=2),
112 | IE_PDI(IE_list=[
113 | IE_NetworkInstance(instance="n6"),
114 | IE_SourceInterface(interface="Core"),
115 | self.ie_ue_ip_address(SD=1)
116 | ]),
117 | IE_PDR_Id(id=2),
118 | IE_Precedence(precedence=200),
119 | IE_OuterHeaderRemoval(),
120 | ]),
121 | self.ie_fseid(),
122 | IE_NodeId(id_type=2, id="cp")
123 | ]), seid=self.cur_seid)
124 |
125 | def chat(self, pkt, seq=None,seid=None):
126 | self.logger.info("REQ: %r" % pkt)
127 | send(
128 | IP(src=self.pfcp_cp_ip, dst=self.pfcp_up_ip) /
129 | UDP(sport=8805, dport=8805) /
130 | PFCP(
131 | version=1,
132 | S=0 if seid is None else 1,
133 | seid=0 if seid is None else seid,
134 | seq=self.seq if seq is None else seq) /
135 | pkt)
136 | if seq is None:
137 | self.seq +=1
138 | def signal_fun(self,signum,frame):
139 | self.chat(PFCPAssociationReleaseRequest(IE_list=[
140 | self.nodeId
141 | ]))
142 | os._exit(0)
143 |
144 | class HeartBeatThread(threading.Thread):
145 | def __init__(self, threadID, name, counter,ass):
146 | threading.Thread.__init__(self)
147 | self.threadID = threadID
148 | self.name = name
149 | self.counter = counter
150 | self.ass = ass
151 | def run(self):
152 | while self.counter:
153 | self.ass.heartbeatRep()
154 | self.counter -= 1
155 |
156 |
157 | if __name__ =="__main__":
158 | pfcp_client = PfcpSkeleton(PFCP_CP_IP_V4,PFCP_UP_IP_V4)
159 | pfcp_client.heartbeat()
160 | pfcp_client.establish_session_request()
161 |
--------------------------------------------------------------------------------
/src/control/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "flag"
6 | "time"
7 | "fmt"
8 | "net"
9 |
10 | log "github.com/sirupsen/logrus"
11 | "google.golang.org/grpc"
12 |
13 | p4_v1 "github.com/p4lang/p4runtime/go/p4/v1"
14 |
15 | "github.com/antoninbas/p4runtime-go-client/pkg/client"
16 | "github.com/antoninbas/p4runtime-go-client/pkg/signals"
17 | "github.com/antoninbas/p4runtime-go-client/pkg/util/conversion"
18 | "github.com/wmnsk/go-pfcp/ie"
19 | "github.com/wmnsk/go-pfcp/message"
20 | )
21 |
22 | const (
23 | defaultAddr = "127.0.0.1:50051"
24 | defaultN4Addr = "127.0.0.1:8805"
25 | defaultN3Addr = "193.168.1.3"
26 | defaultDeviceID = 0
27 | UPLINK = 1
28 | DOWNLINK = 2
29 | )
30 |
31 | type Pdr struct {
32 | pdrId uint16
33 | fteid_teid uint32
34 | fteid_ip net.IP
35 | ueip net.IP
36 | farId uint32
37 | outer_header_remove uint8
38 | direction int32
39 | }
40 |
41 | type Far struct{
42 | farId uint32
43 | bFoward bool
44 | fwdParm *ie.OuterHeaderCreationFields
45 | }
46 |
47 | var g_farMap map[uint32]Far
48 | var g_n3Ip net.IP
49 |
50 | func handleStreamMessages(p4RtC *client.Client, messageCh <-chan *p4_v1.StreamMessageResponse) {
51 | for message := range messageCh {
52 | switch m := message.Update.(type) {
53 | case *p4_v1.StreamMessageResponse_Packet:
54 | log.Debugf("Received PacketIn")
55 | case *p4_v1.StreamMessageResponse_IdleTimeoutNotification:
56 | log.Debugf("Received IdleTimeoutNotification")
57 | case *p4_v1.StreamMessageResponse_Error:
58 | log.Errorf("Received StreamError")
59 | default:
60 | log.Errorf("Received unknown stream message %t",m)
61 | }
62 | }
63 | }
64 |
65 | func initialize(p4RtC *client.Client) error {
66 | /*step1: set unic l2 foward table */
67 | /*h1a mac:00:00:00:00:00:1A port 3*/
68 | h1a_mac := "00:00:00:00:00:1A"
69 | h1aMac_p4, _:= conversion.MacToBinary(h1a_mac)
70 | egressPort, _ := conversion.UInt32ToBinary(3, 2)
71 | dmacEntry := p4RtC.NewTableEntry("IngressPipeImpl.l2_exact_table", "IngressPipeImpl.set_egress_port",
72 | []client.MatchInterface{&client.ExactMatch{h1aMac_p4}}, [][]byte{egressPort}, nil)
73 | if err := p4RtC.InsertTableEntry(dmacEntry); err != nil {
74 | return fmt.Errorf("Cannot insert default action for 'dmac': %v", err)
75 | }
76 | /*h1b mac:00:00:00:00:00:1B port 4*/
77 | h1b_mac := "00:00:00:00:00:1B"
78 | h1bMac_p4, _:= conversion.MacToBinary(h1b_mac)
79 | egressPort, _ = conversion.UInt32ToBinary(4, 2)
80 | dmacEntry = p4RtC.NewTableEntry("IngressPipeImpl.l2_exact_table", "IngressPipeImpl.set_egress_port",
81 | []client.MatchInterface{&client.ExactMatch{h1bMac_p4}}, [][]byte{egressPort}, nil)
82 | if err := p4RtC.InsertTableEntry(dmacEntry); err != nil {
83 | return fmt.Errorf("Cannot insert default action for 'dmac': %v", err)
84 | }
85 |
86 | /*step2:set broadcast l2 foward for bypass arp */
87 | /*h1a arp request fwd to h1b(port:4)*/
88 | brdcastMac := "FF:FF:FF:FF:FF:FF"
89 | brdMac_p4,_ := conversion.MacToBinary(brdcastMac)
90 | egressPort, _ = conversion.UInt32ToBinary(4, 2)
91 | dmacEntry = p4RtC.NewTableEntry("IngressPipeImpl.l2_forward_bypass_table", "IngressPipeImpl.set_egress_port",
92 | []client.MatchInterface{&client.ExactMatch{h1aMac_p4},&client.ExactMatch{brdMac_p4}}, [][]byte{egressPort}, nil)
93 | if err := p4RtC.InsertTableEntry(dmacEntry); err != nil {
94 | return fmt.Errorf("Cannot insert default action for 'dmac': %v", err)
95 | }
96 |
97 | /*h1b arp request fwd to h1a(port:3)*/
98 | egressPort, _ = conversion.UInt32ToBinary(3, 2)
99 | dmacEntry = p4RtC.NewTableEntry("IngressPipeImpl.l2_forward_bypass_table", "IngressPipeImpl.set_egress_port",
100 | []client.MatchInterface{&client.ExactMatch{h1bMac_p4},&client.ExactMatch{brdMac_p4}}, [][]byte{egressPort}, nil)
101 | if err := p4RtC.InsertTableEntry(dmacEntry); err != nil {
102 | return fmt.Errorf("Cannot insert default action for 'dmac': %v", err)
103 | }
104 | return nil
105 | }
106 |
107 | func pfcp_tran_uplink(p4RtC *client.Client,pdr *Pdr) error{
108 | log.Debug("pdr id:",pdr.pdrId,"pdr fteid-ip:",pdr.fteid_ip," teid ",pdr.fteid_teid)
109 | dst_addr_p4, err:= conversion.IpToBinary(pdr.fteid_ip.String())
110 | if err != nil{
111 | log.Info("conversion dst_addr_p4 error:",err)
112 | }
113 | teid_p4, err:= conversion.UInt32ToBinary(pdr.fteid_teid, 0)
114 | if err != nil{
115 | log.Info("conversion teid_p4 error:",err)
116 | }
117 |
118 | pdrId_p4, err:= conversion.UInt32ToBinary(uint32(pdr.pdrId),2)
119 | if err != nil{
120 | log.Info("conversion pdrId_p4 error:",err)
121 | }
122 | teidEntry := p4RtC.NewTableEntry("IngressPipeImpl.upf_f_teid_ueip_filter_table", "IngressPipeImpl.set_pdr_id",
123 | []client.MatchInterface{&client.ExactMatch{dst_addr_p4},
124 | &client.ExactMatch{teid_p4}}, [][]byte{pdrId_p4}, nil)
125 | teidEntry.Priority = 1
126 | if err := p4RtC.InsertTableEntry(teidEntry); err != nil {
127 | return fmt.Errorf("Cannot insert 'teidEntry': %v", err)
128 | }
129 | log.Info("pfcp_tran_uplink teidEntry ok!")
130 |
131 | if 0==pdr.outer_header_remove {
132 | pdr_hdrm_entry := p4RtC.NewTableEntry("IngressPipeImpl.upf_pdr_header_rm_table", "IngressPipeImpl.gtpu_decap",
133 | []client.MatchInterface{&client.ExactMatch{pdrId_p4}}, nil, nil)
134 | if err := p4RtC.InsertTableEntry(pdr_hdrm_entry); err != nil {
135 | return fmt.Errorf("Cannot insert 'pdr_hdrm_entry': %v", err)
136 | }
137 | }
138 | log.Info("pfcp_tran_uplink pdr_hdrm_entry ok!!")
139 |
140 | farId_p4, err:= conversion.UInt32ToBinary(pdr.farId,0)
141 | if err != nil{
142 | log.Info("conversion pdrId_p4 error:",err)
143 | }
144 |
145 | pdr_getfar_entry := p4RtC.NewTableEntry("IngressPipeImpl.upf_pdr_getfar_table", "IngressPipeImpl.set_far_id",
146 | []client.MatchInterface{&client.ExactMatch{pdrId_p4}}, [][]byte{farId_p4}, nil)
147 | if err := p4RtC.InsertTableEntry(pdr_getfar_entry); err != nil {
148 | return fmt.Errorf("Cannot insert 'pdr_getfar_entry': %v", err)
149 | }
150 | log.Info("pfcp_tran_uplink pdr_getfar_entry ok!!!")
151 | far, ok:= g_farMap[pdr.farId]
152 | if ok && far.bFoward {
153 | if far.fwdParm == nil {
154 | far_action_entry := p4RtC.NewTableEntry("IngressPipeImpl.upf_far_action_table", "IngressPipeImpl.nop",
155 | []client.MatchInterface{&client.ExactMatch{farId_p4}}, nil, nil)
156 | if err := p4RtC.InsertTableEntry(far_action_entry); err != nil {
157 | return fmt.Errorf("Cannot insert 'far_action_entry': %v", err)
158 | }
159 | log.Info("pfcp_tran_uplink far_action_entry ok!!!!")
160 | }
161 | }
162 | return nil
163 | }
164 |
165 | func pfcp_tran_downlink(p4RtC *client.Client,pdr *Pdr) error {
166 | log.Debug("pdr id:",pdr.pdrId,"pdr ueip:",pdr.ueip)
167 |
168 | pdrId_p4, err:= conversion.UInt32ToBinary(uint32(pdr.pdrId),2)
169 | if err != nil{
170 | log.Info("conversion pdrId_p4 error:",err)
171 | }
172 | dst_addr_p4, err:= conversion.IpToBinary(pdr.ueip.String())
173 | if err != nil{
174 | log.Info("conversion dst_addr_p4 error:",err)
175 | }
176 |
177 | ueEntry := p4RtC.NewTableEntry("IngressPipeImpl.upf_ue_filter_table", "IngressPipeImpl.set_pdr_id",
178 | []client.MatchInterface{&client.ExactMatch{dst_addr_p4}}, [][]byte{pdrId_p4}, nil)
179 | if err := p4RtC.InsertTableEntry(ueEntry); err != nil {
180 | return fmt.Errorf("Cannot insert 'ueEntry': %v", err)
181 | }
182 | log.Info("pfcp_tran_downlink ueEntry ok!")
183 |
184 | farId_p4, err:= conversion.UInt32ToBinary(pdr.farId,0)
185 | if err != nil{
186 | log.Info("conversion pdrId_p4 error:",err)
187 | }
188 | pdr_getfar_entry := p4RtC.NewTableEntry("IngressPipeImpl.upf_pdr_getfar_table", "IngressPipeImpl.set_far_id",
189 | []client.MatchInterface{&client.ExactMatch{pdrId_p4}}, [][]byte{farId_p4}, nil)
190 | if err := p4RtC.InsertTableEntry(pdr_getfar_entry); err != nil {
191 | return fmt.Errorf("Cannot insert 'pdr_getfar_entry': %v", err)
192 | }
193 | log.Info("pfcp_tran_downlink pdr_getfar_entry ok!!")
194 |
195 | /*far table*/
196 | far, ok := g_farMap[pdr.farId]
197 | log.Debug("farid:", pdr.farId,"map find:",ok,"bFoward:",far.bFoward,"fwdParam:",far.fwdParm)
198 | if ok && far.bFoward && far.fwdParm != nil{
199 | teid_p4, _:= conversion.UInt32ToBinary(far.fwdParm.TEID,0)
200 | n3ip_p4, _:= conversion.IpToBinary(g_n3Ip.String())
201 | gnbIp_p4, _:= conversion.IpToBinary(far.fwdParm.IPv4Address.String())
202 | far_action_entry := p4RtC.NewTableEntry("IngressPipeImpl.upf_far_action_table", "IngressPipeImpl.gtpu_encap",
203 | []client.MatchInterface{&client.ExactMatch{farId_p4}}, [][]byte{teid_p4,n3ip_p4,gnbIp_p4}, nil)
204 | if err := p4RtC.InsertTableEntry(far_action_entry); err != nil {
205 | return fmt.Errorf("Cannot insert 'far_action_entry': %v", err)
206 | }
207 | log.Info("pfcp_tran_downlink far_action_entry ok!!!")
208 | }
209 | return nil
210 | }
211 | func pfcp_rule_tran_p4table(p4RtC *client.Client,pdr *Pdr) error{
212 | if pdr.direction == UPLINK{
213 | return pfcp_tran_uplink(p4RtC,pdr)
214 | }else{
215 | return pfcp_tran_downlink(p4RtC,pdr)
216 | }
217 | }
218 |
219 |
220 | func pfcp_HeartBeat_handle(msg message.Message,addr net.Addr){
221 | hbreq, ok := msg.(*message.HeartbeatRequest)
222 | if !ok {
223 | log.Info("got unexpected message: %s, from: %s", msg.MessageTypeName(),addr)
224 | }
225 |
226 | ts, err := hbreq.RecoveryTimeStamp.RecoveryTimeStamp()
227 | if err != nil {
228 | log.Info("got Heartbeat Request with invalid TS: %s, from: %s", err, addr)
229 | } else {
230 | log.Info("got Heartbeat Request with TS:", ts," from:" ,addr)
231 | }
232 | }
233 |
234 | func pfcp_SessionEstablish_handle(msg message.Message,addr net.Addr,p4RtC *client.Client){
235 | if g_farMap == nil{
236 | g_farMap = make(map[uint32]Far, 10)
237 | }
238 | pdrs := make([]Pdr, 0)
239 | req, ok := msg.(*message.SessionEstablishmentRequest)
240 | if !ok {
241 | log.Info("got unexpected message: %s, from: %s", msg.MessageTypeName(),addr)
242 | }
243 | /**session decode
244 | * f-seid
245 | */
246 | g_n3Ip = net.ParseIP(defaultN3Addr)
247 |
248 | fseid,err := req.CPFSEID.FSEID()
249 | if err != nil{
250 | log.Info("SessionEstablish Request with invalid fseid err:",err," from:", addr)
251 | } else{
252 | log.Info("cp fseid-seid:", fseid.SEID, " addr:",fseid.IPv4Address)
253 | }
254 |
255 | /*pdr decode
256 | * pdrId,pdi->sourceInterface,pdi->f-teid,pdi->ueip
257 | */
258 | for _,crtPdrItem := range req.CreatePDR{
259 | var pdr Pdr
260 | pdrId,err:=crtPdrItem.PDRID()
261 | if err != nil{
262 | log.Info("SessionEstablish Request with invalid pdrId err:",err," from:", addr)
263 | } else{
264 | log.Info("pdrId:", pdrId, " addr:",addr)
265 | pdr.pdrId = pdrId
266 | }
267 |
268 | farId,err:=crtPdrItem.FARID()
269 | if err == nil{
270 | pdr.farId = farId
271 | }
272 |
273 | sourceInt,err := crtPdrItem.SourceInterface()
274 | if err == nil{
275 | log.Info("pdi source interface:", sourceInt)
276 | }
277 | //"Access" interface value is 0
278 | if 0 == sourceInt{
279 | pdr.direction = UPLINK
280 | }else{
281 | pdr.direction = DOWNLINK
282 | }
283 |
284 | /*go-pfcp FTEID() has a bug ,need enumerate to find PDI*/
285 | crtIEs,_ := crtPdrItem.CreatePDR()
286 | if crtIEs != nil{
287 | for _,item := range crtIEs{
288 | if item.Type == ie.PDI{
289 | pdiIEs ,_ := item.PDI()
290 | for _,pdiIe := range pdiIEs{
291 | if pdiIe.Type == ie.FTEID{
292 | fteid, _ := pdiIe.FTEID()
293 | if fteid != nil{
294 | log.Info("fteid teid:",fteid.TEID," fteid addr:",fteid.IPv4Address)
295 | pdr.fteid_teid = fteid.TEID
296 | pdr.fteid_ip = fteid.IPv4Address
297 | g_n3Ip = pdr.fteid_ip
298 | }
299 | }
300 | }
301 | }
302 | }
303 | }
304 |
305 | ueip,err := crtPdrItem.UEIPAddress()
306 | if err == nil{
307 | log.Info("ueip:",ueip.IPv4Address)
308 | pdr.ueip = ueip.IPv4Address
309 | }
310 |
311 | outerRm,err:=crtPdrItem.OuterHeaderRemovalDescription()
312 | if err == nil{
313 | log.Info("outerRm:",outerRm)
314 | pdr.outer_header_remove = outerRm
315 | }else{
316 | pdr.outer_header_remove = 255
317 | }
318 | pdrs = append(pdrs, pdr)
319 | }
320 | /*far decode*/
321 | for _,crtFarItem := range req.CreateFAR{
322 | var far Far
323 | farId,err := crtFarItem.FARID()
324 | if err == nil{
325 | far.farId = farId
326 | }else{
327 | log.Error("CreateFAR decode farid error ",err)
328 | }
329 | bForw := crtFarItem.HasFORW()
330 | if err == nil{
331 | far.bFoward = bForw
332 | }
333 | frIEs,err := crtFarItem.ForwardingParameters()
334 | for _,frIe := range frIEs{
335 | if frIe.Type == ie.OuterHeaderCreation{
336 | outerHeaderField,_:= frIe.OuterHeaderCreation()
337 | if outerHeaderField != nil{
338 | far.fwdParm = outerHeaderField
339 | log.Info("far.fwdParm.TEID",outerHeaderField.TEID)
340 | log.Info("outerHeaderField",outerHeaderField)
341 | }
342 | }
343 | }
344 | log.Debug("g_farMap[far.farId]:",far.farId)
345 | g_farMap[far.farId] = far
346 | }
347 |
348 | log.Debug("pdrs length:",len(pdrs))
349 | for _, pdr_item:=range pdrs{
350 | err = pfcp_rule_tran_p4table(p4RtC,&pdr_item)
351 | if err != nil{
352 | log.Error("pfcp_rule_tran_p4table error:",err)
353 | }
354 | }
355 | }
356 |
357 | func n4Server(listen *string,p4RtC *client.Client){
358 | laddr, err := net.ResolveUDPAddr("udp", *listen)
359 | if err != nil {
360 | log.Fatalf("Cannot resolve n4 addr: %v", err)
361 | }
362 | conn, err := net.ListenUDP("udp", laddr)
363 | if err != nil {
364 | log.Fatalf("Cannot start n4 socket: %v",err)
365 | }
366 |
367 | buf := make([]byte, 1500)
368 | for{
369 | log.Info("input")
370 | n, addr, err := conn.ReadFrom(buf)
371 | if err != nil {
372 | log.Fatal(err)
373 | }
374 | log.Info("message len:%d",n)
375 | msg, err := message.Parse(buf[:n])
376 | if err != nil {
377 | log.Info("ignored undecodable message: %x, error: %s msg:%s", buf[:n], err,msg)
378 | continue
379 | }
380 | switch(msg.MessageTypeName()){
381 | case "Heartbeat Request":
382 | log.Info("message.HeartbeatRequest")
383 | pfcp_HeartBeat_handle(msg,addr)
384 | case "Session Establishment Request":
385 | log.Info("message.SessionEstablishmentRequest")
386 | pfcp_SessionEstablish_handle(msg,addr,p4RtC)
387 | default:
388 | log.Info("unknow pfcp message")
389 | }
390 | }
391 | }
392 | func main() {
393 | var addr string
394 | flag.StringVar(&addr, "addr", defaultAddr, "P4Runtime server socket")
395 | var deviceID uint64
396 | flag.Uint64Var(&deviceID, "device-id", defaultDeviceID, "Device id")
397 | var verbose bool
398 | flag.BoolVar(&verbose, "verbose", false, "Enable verbose mode with debug log messages")
399 | var binPath string
400 | flag.StringVar(&binPath, "bin", "", "Path to P4 bin (not needed for bmv2 simple_switch_grpc)")
401 | var p4infoPath string
402 | flag.StringVar(&p4infoPath, "p4info", "", "Path to P4Info (not needed for bmv2 simple_switch_grpc)")
403 | var n4Addr string
404 | flag.StringVar(&n4Addr,"n4addr",defaultN4Addr,"N4 server socket")
405 | flag.Parse()
406 |
407 | if verbose {
408 | log.SetLevel(log.DebugLevel)
409 | }
410 |
411 | if binPath == "" || p4infoPath == "" {
412 | log.Fatalf("Missing .bin or P4Info")
413 | }
414 |
415 | log.Infof("Connecting to server at %s", addr)
416 | conn, err := grpc.Dial(addr, grpc.WithInsecure())
417 | if err != nil {
418 | log.Fatalf("Cannot connect to server: %v", err)
419 | }
420 | defer conn.Close()
421 |
422 | c := p4_v1.NewP4RuntimeClient(conn)
423 | /*
424 | resp, err := c.Capabilities(context.Background(), &p4_v1.CapabilitiesRequest{})
425 | if err != nil {
426 | log.Fatalf("Error in Capabilities RPC: %v", err)
427 | }
428 | log.Infof("P4Runtime server version is %s", resp.P4RuntimeApiVersion)
429 | */
430 | stopCh := signals.RegisterSignalHandlers()
431 |
432 | electionID := p4_v1.Uint128{High: 0, Low: 2}
433 |
434 | p4RtC := client.NewClient(c, deviceID, electionID)
435 | mastershipCh := make(chan bool)
436 | messageCh := make(chan *p4_v1.StreamMessageResponse, 1000)
437 | defer close(messageCh)
438 | go p4RtC.Run(stopCh, mastershipCh, messageCh)
439 |
440 | waitCh := make(chan struct{})
441 |
442 | go func() {
443 | sent := false
444 | for isMaster := range mastershipCh {
445 | if isMaster {
446 | log.Infof("We are master!")
447 | if !sent {
448 | waitCh <- struct{}{}
449 | sent = true
450 | }
451 | } else {
452 | log.Infof("We are not master!")
453 | }
454 | }
455 | }()
456 |
457 | // it would also be safe to spawn multiple goroutines to handle messages from the channel
458 | go handleStreamMessages(p4RtC, messageCh)
459 |
460 | timeout := 5 * time.Second
461 | ctx, cancel := context.WithTimeout(context.Background(), timeout)
462 | defer cancel()
463 | select {
464 | case <-ctx.Done():
465 | log.Fatalf("Could not acquire mastership within %v", timeout)
466 | case <-waitCh:
467 | }
468 |
469 | log.Info("Setting forwarding pipe")
470 | if err := p4RtC.SetFwdPipe(binPath, p4infoPath); err != nil {
471 | log.Fatalf("Error when setting forwarding pipe: %v", err)
472 | }
473 |
474 | if err := initialize(p4RtC); err != nil {
475 | log.Fatalf("Error when initializing defaults: %v", err)
476 | }
477 |
478 | /*N4 server start*/
479 | go n4Server(&n4Addr,p4RtC)
480 |
481 | log.Info("Do Ctrl-C to quit")
482 | <-stopCh
483 | log.Info("Stopping client")
484 | }
485 |
486 |
--------------------------------------------------------------------------------
/src/datapath/upf.p4:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2019-present Open Networking Foundation
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | // Any P4 program usually starts by including the P4 core library and the
18 | // architecture definition, v1model in this case.
19 | // https://github.com/p4lang/p4c/blob/master/p4include/core.p4
20 | // https://github.com/p4lang/p4c/blob/master/p4include/v1model.p4
21 | #include
22 | #include
23 |
24 | #ifndef _BOOL
25 | #define _BOOL bool
26 | #endif
27 | // *** V1MODEL
28 | //
29 | // V1Model is a P4_16 architecture that defines 7 processing blocks.
30 | //
31 | // +------+ +------+ +-------+ +-------+ +------+ +------+ +--------+
32 | // ->|PARSER|->|VERIFY|->|INGRESS|->|TRAFFIC|->|EGRESS|->|UPDATE|->+DEPARSER|->
33 | // | | |CKSUM | |PIPE | |MANAGER| |PIPE | |CKSUM | | |
34 | // +------+ +------+ +-------+ +-------- +------+ +------+ +--------+
35 | //
36 | // All blocks are P4 programmable, except for the Traffic Manager, which is
37 | // fixed-function. In the rest of this P4 program, we provide an implementation
38 | // for each one of the 6 programmable blocks.
39 |
40 | //------------------------------------------------------------------------------
41 | // PRE-PROCESSOR constants
42 | // Can be defined at compile time.
43 | //------------------------------------------------------------------------------
44 |
45 | // CPU_PORT specifies the P4 port number associated to packet-in and packet-out.
46 | // All packets forwarded via this port will be delivered to the controller as
47 | // PacketIn messages. Similarly, PacketOut messages from the controller will be
48 | // seen by the P4 pipeline as coming from the CPU_PORT.
49 | #define CPU_PORT 255
50 |
51 | // CPU_CLONE_SESSION_ID specifies the mirroring session for packets to be cloned
52 | // to the CPU port. Packets associated with this session ID will be cloned to
53 | // the CPU_PORT as well as being transmitted via their egress port as set by the
54 | // bridging/routing/acl table. For cloning to work, the P4Runtime controller
55 | // needs first to insert a CloneSessionEntry that maps this session ID to the
56 | // CPU_PORT.
57 | #define CPU_CLONE_SESSION_ID 99
58 |
59 | #define UDP_PORT_GTPU 2152
60 | #define GTP_GPDU 0xff
61 | #define GTPU_VERSION 0x01
62 | #define GTP_PROTOCOL_TYPE_GTP 0x01
63 | #define ETH_HDR_SIZE 14
64 | #define IPV4_HDR_SIZE 20
65 | #define UDP_HDR_SIZE 8
66 | #define GTP_HDR_SIZE 8
67 | #define IP_VERSION_4 4
68 | //------------------------------------------------------------------------------
69 | // TYPEDEF DECLARATIONS
70 | // To favor readability.
71 | //------------------------------------------------------------------------------
72 | typedef bit<9> port_num_t;
73 | typedef bit<48> mac_addr_t;
74 | typedef bit<16> mcast_group_id_t;
75 | typedef bit<32> ipv4_addr_t;
76 | typedef bit<128> ipv6_addr_t;
77 | typedef bit<16> l4_port_t;
78 | typedef bit<2> direction_t;
79 | typedef bit<32> far_id_t;
80 | typedef bit<16> pdr_id_t;
81 | typedef bit<32> teid_t;
82 | typedef bit<16> far_action_id;
83 |
84 | //------------------------------------------------------------------------------
85 | // CONSTANT VALUES
86 | //------------------------------------------------------------------------------
87 | const bit<16> ETHERTYPE_IPV4 = 0x0800;
88 | const bit<16> ETHERTYPE_IPV6 = 0x86dd;
89 |
90 | const bit<8> IP_PROTO_ICMP = 1;
91 | const bit<8> IP_PROTO_TCP = 6;
92 | const bit<8> IP_PROTO_UDP = 17;
93 | const bit<8> IP_PROTO_ICMPV6 = 58;
94 |
95 | const mac_addr_t IPV6_MCAST_01 = 0x33_33_00_00_00_01;
96 |
97 | const bit<8> ICMP6_TYPE_NS = 135;
98 | const bit<8> ICMP6_TYPE_NA = 136;
99 | const bit<8> NDP_OPT_TARGET_LL_ADDR = 2;
100 | const bit<32> NDP_FLAG_ROUTER = 0x80000000;
101 | const bit<32> NDP_FLAG_SOLICITED = 0x40000000;
102 | const bit<32> NDP_FLAG_OVERRIDE = 0x20000000;
103 | const bit<16> FAR_ACTION_FORW = 0x2;
104 | const bit<16> FAR_ACTION_DROP = 0x1;
105 |
106 | const direction_t UPF_DIR_UPLINK = 2w1;
107 | const direction_t UPF_DIR_DOWNLINK = 2w2;
108 |
109 | const bit<8> DEFAULT_IPV4_TTL = 64;
110 | const bit<4> IPV4_MIN_IHL = 5;
111 | const bit<8> PROTO_UDP = 17;
112 | //------------------------------------------------------------------------------
113 | // HEADER DEFINITIONS
114 | //------------------------------------------------------------------------------
115 | header ethernet_t {
116 | mac_addr_t dst_addr;
117 | mac_addr_t src_addr;
118 | bit<16> ether_type;
119 | }
120 |
121 | header ipv4_t {
122 | bit<4> version;
123 | bit<4> ihl;
124 | bit<6> dscp;
125 | bit<2> ecn;
126 | bit<16> total_len;
127 | bit<16> identification;
128 | bit<3> flags;
129 | bit<13> frag_offset;
130 | bit<8> ttl;
131 | bit<8> protocol;
132 | bit<16> hdr_checksum;
133 | bit<32> src_addr;
134 | bit<32> dst_addr;
135 | }
136 |
137 | header ipv6_t {
138 | bit<4> version;
139 | bit<8> traffic_class;
140 | bit<20> flow_label;
141 | bit<16> payload_len;
142 | bit<8> next_hdr;
143 | bit<8> hop_limit;
144 | bit<128> src_addr;
145 | bit<128> dst_addr;
146 | }
147 |
148 | header tcp_t {
149 | bit<16> src_port;
150 | bit<16> dst_port;
151 | bit<32> seq_no;
152 | bit<32> ack_no;
153 | bit<4> data_offset;
154 | bit<3> res;
155 | bit<3> ecn;
156 | bit<6> ctrl;
157 | bit<16> window;
158 | bit<16> checksum;
159 | bit<16> urgent_ptr;
160 | }
161 |
162 | header udp_t {
163 | bit<16> src_port;
164 | bit<16> dst_port;
165 | bit<16> len;
166 | bit<16> checksum;
167 | }
168 |
169 | // GTPU v1
170 | header gtpu_t {
171 | bit<3> version; /* version */
172 | bit<1> pt; /* protocol type */
173 | bit<1> spare; /* reserved */
174 | bit<1> ex_flag; /* next extension hdr present? */
175 | bit<1> seq_flag; /* sequence no. */
176 | bit<1> npdu_flag; /* n-pdn number present ? */
177 | bit<8> msgtype; /* message type */
178 | bit<16> msglen; /* message length */
179 | bit<32> teid; /* tunnel endpoint id */
180 | }
181 |
182 | struct upf_meta_t {
183 | direction_t direction;
184 | bit<16> ipv4_len;
185 | teid_t teid;
186 | bit<16> tunnel_src_port;
187 | bit<32> tunnel_src_addr;
188 | bit<32> tunnel_dst_addr;
189 | pdr_id_t pdr_id;
190 | far_id_t far_id;
191 | _BOOL pdr_hit;
192 | _BOOL far_dropped;
193 | _BOOL needs_gtpu_encap;
194 | _BOOL needs_gtpu_decap;
195 | }
196 |
197 | header icmp_t {
198 | bit<8> type;
199 | bit<8> icmp_code;
200 | bit<16> checksum;
201 | bit<16> identifier;
202 | bit<16> sequence_number;
203 | bit<64> timestamp;
204 | }
205 |
206 | header icmpv6_t {
207 | bit<8> type;
208 | bit<8> code;
209 | bit<16> checksum;
210 | }
211 |
212 | header ndp_t {
213 | bit<32> flags;
214 | ipv6_addr_t target_ipv6_addr;
215 | // NDP option.
216 | bit<8> type;
217 | bit<8> length;
218 | bit<48> target_mac_addr;
219 | }
220 |
221 | // Packet-in header. Prepended to packets sent to the CPU_PORT and used by the
222 | // P4Runtime server (Stratum) to populate the PacketIn message metadata fields.
223 | // Here we use it to carry the original ingress port where the packet was
224 | // received.
225 | @controller_header("packet_in")
226 | header packet_in_t {
227 | port_num_t ingress_port;
228 | bit<7> _pad;
229 | }
230 |
231 | // Packet-out header. Prepended to packets received from the CPU_PORT. Fields of
232 | // this header are populated by the P4Runtime server based on the P4Runtime
233 | // PacketOut metadata fields. Here we use it to inform the P4 pipeline on which
234 | // port this packet-out should be transmitted.
235 | @controller_header("packet_out")
236 | header packet_out_t {
237 | port_num_t egress_port;
238 | bit<7> _pad;
239 | }
240 |
241 | // We collect all headers under the same data structure, associated with each
242 | // packet. The goal of the parser is to populate the fields of this struct.
243 | struct parsed_headers_t {
244 | packet_out_t packet_out;
245 | packet_in_t packet_in;
246 | ethernet_t ethernet;
247 | ipv4_t ipv4;
248 | ipv6_t ipv6;
249 | tcp_t tcp;
250 | udp_t udp;
251 | icmpv6_t icmpv6;
252 | ndp_t ndp;
253 | icmp_t icmp;
254 |
255 | /*gtpu*/
256 | ipv4_t gtpu_ipv4;
257 | udp_t gtpu_udp;
258 | gtpu_t outer_gtpu;
259 | gtpu_t gtpu;
260 | ipv4_t inner_ipv4;
261 | udp_t inner_udp;
262 | }
263 |
264 |
265 | //------------------------------------------------------------------------------
266 | // USER-DEFINED METADATA
267 | // User-defined data structures associated with each packet.
268 | //------------------------------------------------------------------------------
269 | struct local_metadata_t {
270 | l4_port_t l4_src_port;
271 | l4_port_t l4_dst_port;
272 | bool is_multicast;
273 | bit<16> ip_eth_type;
274 | bit<8> ip_proto;
275 | upf_meta_t upf;
276 | }
277 |
278 | // *** INTRINSIC METADATA
279 | //
280 | // The v1model architecture also defines an intrinsic metadata structure, which
281 | // fields are automatically populated by the target before feeding the
282 | // packet to the parser. For convenience, we provide here its definition:
283 | /*
284 | struct standard_metadata_t {
285 | bit<9> ingress_port;
286 | bit<9> egress_spec; // Set by the ingress pipeline
287 | bit<9> egress_port; // Read-only, available in the egress pipeline
288 | bit<32> instance_type;
289 | bit<32> packet_length;
290 | bit<48> ingress_global_timestamp;
291 | bit<48> egress_global_timestamp;
292 | bit<16> mcast_grp; // ID for the mcast replication table
293 | bit<1> checksum_error; // 1 indicates that verify_checksum() method failed
294 |
295 | // Etc... See v1model.p4 for the complete definition.
296 | }
297 | */
298 |
299 |
300 | //------------------------------------------------------------------------------
301 | // 1. PARSER IMPLEMENTATION
302 | //
303 | // Described as a state machine with one "start" state and two final states,
304 | // "accept" (indicating successful parsing) and "reject" (indicating a parsing
305 | // failure, not used here). Each intermediate state can specify the next state
306 | // by using a select statement over the header fields extracted, or other
307 | // values.
308 | //------------------------------------------------------------------------------
309 | parser ParserImpl (packet_in packet,
310 | out parsed_headers_t hdr,
311 | inout local_metadata_t local_metadata,
312 | inout standard_metadata_t standard_metadata)
313 | {
314 | // We assume the first header will always be the Ethernet one, unless the
315 | // the packet is a packet-out coming from the CPU_PORT.
316 | state start {
317 | transition select(standard_metadata.ingress_port) {
318 | CPU_PORT: parse_packet_out;
319 | default: parse_ethernet;
320 | }
321 | }
322 |
323 | state parse_packet_out {
324 | packet.extract(hdr.packet_out);
325 | transition parse_ethernet;
326 | }
327 |
328 | state parse_ethernet {
329 | packet.extract(hdr.ethernet);
330 | transition select(hdr.ethernet.ether_type){
331 | ETHERTYPE_IPV4: parse_ipv4;
332 | ETHERTYPE_IPV6: parse_ipv6;
333 | default: accept;
334 | }
335 | }
336 |
337 | state parse_ipv4 {
338 | packet.extract(hdr.ipv4);
339 | local_metadata.ip_proto = hdr.ipv4.protocol;
340 | local_metadata.ip_eth_type = ETHERTYPE_IPV4;
341 | //Need header verification?
342 | transition select(hdr.ipv4.protocol) {
343 | IP_PROTO_TCP: parse_tcp;
344 | IP_PROTO_UDP: parse_udp;
345 | IP_PROTO_ICMP: parse_icmp;
346 | default: accept;
347 | }
348 | }
349 |
350 | state parse_ipv6 {
351 | packet.extract(hdr.ipv6);
352 | transition select(hdr.ipv6.next_hdr) {
353 | IP_PROTO_TCP: parse_tcp;
354 | IP_PROTO_UDP: parse_udp;
355 | IP_PROTO_ICMPV6: parse_icmpv6;
356 | default: accept;
357 | }
358 | }
359 |
360 | state parse_tcp {
361 | packet.extract(hdr.tcp);
362 | // For convenience, we copy the port numbers on generic metadata fields
363 | // that are independent of the protocol type (TCP or UDP). This makes it
364 | // easier to specify the ECMP hash inputs, or when defining match fields
365 | // for the ACL table.
366 | local_metadata.l4_src_port = hdr.tcp.src_port;
367 | local_metadata.l4_dst_port = hdr.tcp.dst_port;
368 | transition accept;
369 | }
370 |
371 | state parse_udp {
372 | packet.extract(hdr.udp);
373 | // Same here...
374 | local_metadata.l4_src_port = hdr.udp.src_port;
375 | local_metadata.l4_dst_port = hdr.udp.dst_port;
376 |
377 | transition select(hdr.udp.dst_port){
378 | UDP_PORT_GTPU: parse_gtpu;
379 | default: accept;
380 | }
381 | }
382 |
383 | state parse_gtpu {
384 | // transition select(hdr.ipv4.dst_addr[31:32-S1U_SGW_PREFIX_LEN]) {
385 | // Avoid parsing GTP and inner headers if we know this GTP packet
386 | // is not to be processed by this switch.
387 | // FIXME: use parser value sets when support is ready in ONOS.
388 | // To set the S1U_SGW_PREFIX value at runtime.
389 | //S1U_SGW_PREFIX[31:32-S1U_SGW_PREFIX_LEN]: do_parse_gtpu;
390 | // }
391 | transition do_parse_gtpu;
392 | }
393 |
394 | state do_parse_gtpu {
395 | packet.extract(hdr.gtpu);
396 | transition parse_inner_ipv4;
397 | }
398 |
399 | state parse_inner_ipv4 {
400 | packet.extract(hdr.inner_ipv4);
401 | transition select(hdr.inner_ipv4.protocol) {
402 | IP_PROTO_TCP: parse_tcp;
403 | IP_PROTO_UDP: parse_inner_udp;
404 | IP_PROTO_ICMP: parse_icmp;
405 | default: accept;
406 | }
407 | }
408 |
409 | state parse_inner_udp {
410 | packet.extract(hdr.inner_udp);
411 | local_metadata.l4_src_port = hdr.inner_udp.src_port;
412 | local_metadata.l4_dst_port = hdr.inner_udp.dst_port;
413 | transition accept;
414 | }
415 |
416 | state parse_icmp {
417 | packet.extract(hdr.icmp);
418 | transition accept;
419 | }
420 |
421 | state parse_icmpv6 {
422 | packet.extract(hdr.icmpv6);
423 | transition select(hdr.icmpv6.type) {
424 | ICMP6_TYPE_NS: parse_ndp;
425 | ICMP6_TYPE_NA: parse_ndp;
426 | default: accept;
427 | }
428 | }
429 |
430 | state parse_ndp {
431 | packet.extract(hdr.ndp);
432 | transition accept;
433 | }
434 | }
435 |
436 | //------------------------------------------------------------------------------
437 | // 2. CHECKSUM VERIFICATION
438 | //
439 | // Used to verify the checksum of incoming packets.
440 | //------------------------------------------------------------------------------
441 | control VerifyChecksumImpl(inout parsed_headers_t hdr,
442 | inout local_metadata_t meta)
443 | {
444 | // Not used here. We assume all packets have valid checksum, if not, we let
445 | // the end hosts detect errors.
446 | apply { /* EMPTY */ }
447 | }
448 |
449 |
450 | //------------------------------------------------------------------------------
451 | // 3. INGRESS PIPELINE IMPLEMENTATION
452 | //
453 | // All packets will be processed by this pipeline right after the parser block.
454 | // It provides the logic for forwarding behaviors such as:
455 | // - L2 bridging
456 | // - L3 routing
457 | // - ACL
458 | // - NDP handling
459 | //
460 | // The first part of the block defines the match-action tables needed for the
461 | // different behaviors, while the implementation is concluded with the *apply*
462 | // statement, where we specify the order of tables in the pipeline.
463 | //
464 | // This block operates on the parsed headers (hdr), the user-defined metadata
465 | // (local_metadata), and the architecture-specific instrinsic metadata
466 | // (standard_metadata).
467 | //------------------------------------------------------------------------------
468 | control IngressPipeImpl (inout parsed_headers_t hdr,
469 | inout local_metadata_t local_metadata,
470 | inout standard_metadata_t standard_metadata) {
471 |
472 | // Drop action definition, shared by many tables. Hence we define it on top.
473 | action drop() {
474 | // Sets an architecture-specific metadata field to signal that the
475 | // packet should be dropped at the end of this pipeline.
476 | mark_to_drop(standard_metadata);
477 | }
478 |
479 | action nop() {
480 | NoAction();
481 | }
482 | // *** L2 BRIDGING
483 | //
484 | // Here we define tables to forward packets based on their Ethernet
485 | // destination address. There are two types of L2 entries that we
486 | // need to support:
487 | //
488 | // 1. Unicast entries: which will be filled in by the control plane when the
489 | // location (port) of new hosts is learned.
490 | // 2. Broadcast/multicast entries: used replicate NDP Neighbor Solicitation
491 | // (NS) messages to all host-facing ports;
492 | //
493 | // For (2), unlike ARP messages in IPv4 which are broadcasted to Ethernet
494 | // destination address FF:FF:FF:FF:FF:FF, NDP messages are sent to special
495 | // Ethernet addresses specified by RFC2464. These addresses are prefixed
496 | // with 33:33 and the last four octets are the last four octets of the IPv6
497 | // destination multicast address. The most straightforward way of matching
498 | // on such IPv6 broadcast/multicast packets, without digging in the details
499 | // of RFC2464, is to use a ternary match on 33:33:**:**:**:**, where * means
500 | // "don't care".
501 | //
502 | // For this reason, we define two tables. One that matches in an exact
503 | // fashion (easier to scale on switch ASIC memory) and one that uses ternary
504 | // matching (which requires more expensive TCAM memories, usually much
505 | // smaller).
506 |
507 | // --- l2_exact_table (for unicast entries) --------------------------------
508 |
509 | action set_egress_port(port_num_t port_num) {
510 | standard_metadata.egress_spec = port_num;
511 | }
512 |
513 | table l2_exact_table {
514 | key = {
515 | hdr.ethernet.dst_addr: exact;
516 | }
517 | actions = {
518 | set_egress_port;
519 | @defaultonly drop;
520 | }
521 | const default_action = drop;
522 | // The @name annotation is used here to provide a name to this table
523 | // counter, as it will be needed by the compiler to generate the
524 | // corresponding P4Info entity.
525 | @name("l2_exact_table_counter")
526 | counters = direct_counter(CounterType.packets_and_bytes);
527 | }
528 |
529 | // --- l2_ternary_table (for broadcast/multicast entries) ------------------
530 |
531 | action set_multicast_group(mcast_group_id_t gid) {
532 | // gid will be used by the Packet Replication Engine (PRE) in the
533 | // Traffic Manager--located right after the ingress pipeline, to
534 | // replicate a packet to multiple egress ports, specified by the control
535 | // plane by means of P4Runtime MulticastGroupEntry messages.
536 | standard_metadata.mcast_grp = gid;
537 | local_metadata.is_multicast = true;
538 | }
539 |
540 | table l2_ternary_table {
541 | key = {
542 | hdr.ethernet.dst_addr: ternary;
543 | }
544 | actions = {
545 | set_multicast_group;
546 | @defaultonly drop;
547 | }
548 | const default_action = drop;
549 | @name("l2_ternary_table_counter")
550 | counters = direct_counter(CounterType.packets_and_bytes);
551 | }
552 |
553 | // *** L3 ROUTING
554 | //
555 | // Here we define tables to route packets based on their IPv6 destination
556 | // address. We assume the following:
557 | //
558 | // * Not all packets need to be routed, but only those that have destination
559 | // MAC address the "router MAC" addres, which we call "my_station" MAC.
560 | // Such address is defined at runtime by the control plane.
561 | // * If a packet matches a routing entry, it should be forwarded to a
562 | // given next hop and the packet's Ethernet addresses should be modified
563 | // accordingly (source set to my_station MAC and destination to the next
564 | // hop one);
565 | // * When routing packets to a different leaf across the spines, leaf
566 | // switches should be able to use ECMP to distribute traffic via multiple
567 | // links.
568 |
569 | // --- my_station_table ----------------------------------------------------
570 |
571 | // Matches on all possible my_station MAC addresses associated with this
572 | // switch. This table defines only one action that does nothing to the
573 | // packet. Later in the apply block, we define logic such that packets are
574 | // routed if and only if this table is "hit", i.e. a matching entry is found
575 | // for the given packet.
576 |
577 | table my_station_table {
578 | key = {
579 | hdr.ethernet.dst_addr: exact;
580 | }
581 | actions = { NoAction; }
582 | @name("my_station_table_counter")
583 | counters = direct_counter(CounterType.packets_and_bytes);
584 | }
585 |
586 | // --- routing_v6_table ----------------------------------------------------
587 |
588 | // To implement ECMP, we use Action Selectors, a v1model-specific construct.
589 | // A P4Runtime controller, can use action selectors to associate a group of
590 | // actions to one table entry. The speficic action in the group will be
591 | // selected by perfoming a hash function over a pre-determined set of header
592 | // fields. Here we instantiate an action selector named "ecmp_selector" that
593 | // uses crc16 as the hash function, can hold up to 1024 entries (distinct
594 | // action specifications), and produces a selector key of size 16 bits.
595 |
596 | action_selector(HashAlgorithm.crc16, 32w1024, 32w16) ecmp_selector;
597 |
598 | action set_next_hop(mac_addr_t dmac) {
599 | hdr.ethernet.src_addr = hdr.ethernet.dst_addr;
600 | hdr.ethernet.dst_addr = dmac;
601 | // Decrement TTL
602 | hdr.ipv6.hop_limit = hdr.ipv6.hop_limit - 1;
603 | }
604 |
605 | // Look for the "implementation" property in the table definition.
606 | table routing_v6_table {
607 | key = {
608 | hdr.ipv6.dst_addr: lpm;
609 | // The following fields are not used for matching, but as input to the
610 | // ecmp_selector hash function.
611 | hdr.ipv6.dst_addr: selector;
612 | hdr.ipv6.src_addr: selector;
613 | hdr.ipv6.flow_label: selector;
614 | hdr.ipv6.next_hdr: selector;
615 | local_metadata.l4_src_port: selector;
616 | local_metadata.l4_dst_port: selector;
617 | }
618 | actions = {
619 | set_next_hop;
620 | }
621 | implementation = ecmp_selector;
622 | @name("routing_v6_table_counter")
623 | counters = direct_counter(CounterType.packets_and_bytes);
624 | }
625 |
626 | // *** ACL
627 | //
628 | // Provides ways to override a previous forwarding decision, for example
629 | // requiring that a packet is cloned/sent to the CPU, or dropped.
630 | //
631 | // We use this table to clone all NDP packets to the control plane, so to
632 | // enable host discovery. When the location of a new host is discovered, the
633 | // controller is expected to update the L2 and L3 tables with the
634 | // correspionding brinding and routing entries.
635 |
636 | // --- acl_table -----------------------------------------------------------
637 |
638 | action send_to_cpu() {
639 | standard_metadata.egress_spec = CPU_PORT;
640 | }
641 |
642 | action clone_to_cpu() {
643 | // Cloning is achieved by using a v1model-specific primitive. Here we
644 | // set the type of clone operation (ingress-to-egress pipeline), the
645 | // clone session ID (the CPU one), and the metadata fields we want to
646 | // preserve for the cloned packet replica.
647 | clone3(CloneType.I2E, CPU_CLONE_SESSION_ID, { standard_metadata.ingress_port });
648 | }
649 |
650 | table acl_table {
651 | key = {
652 | standard_metadata.ingress_port: ternary;
653 | hdr.ethernet.dst_addr: ternary;
654 | hdr.ethernet.src_addr: ternary;
655 | hdr.ethernet.ether_type: ternary;
656 | hdr.ipv6.next_hdr: ternary;
657 | hdr.icmpv6.type: ternary;
658 | local_metadata.l4_src_port: ternary;
659 | local_metadata.l4_dst_port: ternary;
660 | }
661 | actions = {
662 | send_to_cpu;
663 | clone_to_cpu;
664 | set_egress_port;
665 | drop;
666 | }
667 | @name("acl_table_counter")
668 | counters = direct_counter(CounterType.packets_and_bytes);
669 | }
670 |
671 | // *** NDP HANDLING
672 | //
673 | // NDP Handling will be the focus of exercise 4. If you are still working on
674 | // a previous exercise, it's OK if you ignore this part for now.
675 |
676 | // Action that transforms an NDP NS packet into an NDP NA one for the given
677 | // target MAC address. The action also sets the egress port to the ingress
678 | // one where the NDP NS packet was received.
679 |
680 | action ndp_ns_to_na(mac_addr_t target_mac) {
681 | hdr.ethernet.src_addr = target_mac;
682 | hdr.ethernet.dst_addr = IPV6_MCAST_01;
683 | ipv6_addr_t host_ipv6_tmp = hdr.ipv6.src_addr;
684 | hdr.ipv6.src_addr = hdr.ndp.target_ipv6_addr;
685 | hdr.ipv6.dst_addr = host_ipv6_tmp;
686 | hdr.ipv6.next_hdr = IP_PROTO_ICMPV6;
687 | hdr.icmpv6.type = ICMP6_TYPE_NA;
688 | hdr.ndp.flags = NDP_FLAG_ROUTER | NDP_FLAG_OVERRIDE;
689 | hdr.ndp.type = NDP_OPT_TARGET_LL_ADDR;
690 | hdr.ndp.length = 1;
691 | hdr.ndp.target_mac_addr = target_mac;
692 | standard_metadata.egress_spec = standard_metadata.ingress_port;
693 | }
694 |
695 | table ndp_reply_table {
696 | key = {
697 | hdr.ndp.target_ipv6_addr: exact;
698 | }
699 | actions = {
700 | ndp_ns_to_na;
701 | }
702 | @name("ndp_reply_table_counter")
703 | counters = direct_counter(CounterType.packets_and_bytes);
704 | }
705 |
706 | // ---- END SOLUTION ----
707 |
708 |
709 | // *** APPLY BLOCK STATEMENT
710 | //
711 | // The apply { ... } block defines the main function applied to every packet
712 | // that goes though a given "control", the ingress pipeline in this case.
713 | //
714 | // This is where we define which tables a packets should traverse and in
715 | // which order. It contains a sequence of statements and declarations, which
716 | // are executed sequentially.
717 |
718 | //gtpu
719 | action gtpu_decap(){
720 | hdr.ipv4.setInvalid();
721 | hdr.udp.setInvalid();
722 | hdr.gtpu.setInvalid();
723 | }
724 |
725 | action gtpu_encap(teid_t teid,bit<32> up_ip,bit<32> gnb_ip){
726 | hdr.gtpu_ipv4.setValid();
727 | hdr.gtpu_ipv4.version = IP_VERSION_4;
728 | hdr.gtpu_ipv4.ihl = IPV4_MIN_IHL;
729 | hdr.gtpu_ipv4.dscp = 0;
730 | hdr.gtpu_ipv4.ecn = 0;
731 | hdr.gtpu_ipv4.total_len = hdr.ipv4.total_len
732 | + (IPV4_HDR_SIZE + UDP_HDR_SIZE + GTP_HDR_SIZE);
733 | hdr.gtpu_ipv4.identification = 0x1513;
734 | hdr.gtpu_ipv4.flags = 0;
735 | hdr.gtpu_ipv4.frag_offset = 0;
736 | hdr.gtpu_ipv4.ttl = DEFAULT_IPV4_TTL;
737 | hdr.gtpu_ipv4.protocol = PROTO_UDP;
738 | hdr.gtpu_ipv4.src_addr = up_ip;
739 | hdr.gtpu_ipv4.dst_addr = gnb_ip;
740 | hdr.gtpu_ipv4.hdr_checksum = 0;
741 | hdr.gtpu_udp.setValid();
742 | hdr.gtpu_udp.src_port = UDP_PORT_GTPU;
743 | hdr.gtpu_udp.dst_port = UDP_PORT_GTPU;
744 | hdr.gtpu_udp.len = hdr.ipv4.total_len
745 | + (UDP_HDR_SIZE + GTP_HDR_SIZE);
746 | hdr.gtpu_udp.checksum = 0;
747 | hdr.outer_gtpu.setValid();
748 | hdr.outer_gtpu.version = GTPU_VERSION;
749 | hdr.outer_gtpu.pt = GTP_PROTOCOL_TYPE_GTP;
750 | hdr.outer_gtpu.spare = 0;
751 | hdr.outer_gtpu.ex_flag = 0;
752 | hdr.outer_gtpu.seq_flag = 0;
753 | hdr.outer_gtpu.npdu_flag = 0;
754 | hdr.outer_gtpu.msgtype = GTP_GPDU;
755 | hdr.outer_gtpu.msglen = hdr.ipv4.total_len;
756 | hdr.outer_gtpu.teid = teid;
757 | }
758 |
759 | action set_pdr_id(pdr_id_t id){
760 | local_metadata.upf.pdr_id = id;
761 | }
762 |
763 | action set_far_id(far_id_t id){
764 | local_metadata.upf.far_id = id;
765 | }
766 |
767 | table upf_pdr_header_rm_table{
768 | key = {
769 | local_metadata.upf.pdr_id: exact;
770 | }
771 | actions = {
772 | gtpu_decap;
773 | nop;
774 | }
775 | const default_action = nop;
776 | }
777 |
778 | table upf_pdr_getfar_table{
779 | key = {
780 | local_metadata.upf.pdr_id: exact;
781 | }
782 | actions = {
783 | set_far_id;
784 | drop;
785 | }
786 | const default_action = drop;
787 | }
788 |
789 | table upf_far_action_table{
790 | key = {
791 | local_metadata.upf.far_id: exact;
792 | }
793 | actions = {
794 | nop;
795 | drop;
796 | gtpu_encap;
797 | }
798 | const default_action = drop;
799 | }
800 |
801 | table upf_f_teid_ueip_filter_table{
802 | key = {
803 | hdr.ipv4.dst_addr: exact;
804 | hdr.gtpu.teid: exact;
805 | hdr.inner_ipv4.src_addr: ternary;
806 | }
807 | actions = {
808 | set_pdr_id;
809 | drop;
810 | }
811 | const default_action = drop;
812 | }
813 |
814 | table upf_ue_filter_table{
815 | key = {
816 | hdr.ipv4.dst_addr:exact;
817 | }
818 | actions ={
819 | nop;
820 | set_pdr_id;
821 | }
822 | const default_action = nop;
823 | }
824 |
825 | table l2_forward_bypass_table{
826 | key = {
827 | hdr.ethernet.src_addr: exact;
828 | hdr.ethernet.dst_addr: exact;
829 | }
830 | actions ={
831 | nop;
832 | set_egress_port;
833 | }
834 | const default_action = nop;
835 | }
836 | apply {
837 |
838 | // If this is a packet-out from the controller...
839 | if (hdr.packet_out.isValid()) {
840 | // Set the egress port to that found in the packet-out metadata...
841 | standard_metadata.egress_spec = hdr.packet_out.egress_port;
842 | // Remove the packet-out header...
843 | hdr.packet_out.setInvalid();
844 | // Exit the pipeline here, no need to go through other tables.
845 | exit;
846 | }
847 |
848 | bool do_l3_l2 = true;
849 |
850 | // *** TODO EXERCISE 4
851 | // Fill in the name of the NDP reply table created before
852 | // ---- START SOLUTION ----
853 | // If this is an NDP NS packet, attempt to generate a reply using the
854 | // ndp_reply_table. If a matching entry is found, unset the "do_l3_l2"
855 | // flag to skip the L3 and L2 tables, as the "ndp_ns_to_na" action
856 | // already set an egress port.
857 |
858 | if (hdr.icmpv6.isValid() && hdr.icmpv6.type == ICMP6_TYPE_NS) {
859 | if (ndp_reply_table.apply().hit) {
860 | do_l3_l2 = false;
861 | }
862 | }
863 |
864 | // ---- END SOLUTION ----
865 |
866 | if (do_l3_l2) {
867 |
868 | // Apply the L3 routing table to IPv6 packets, only if the
869 | // destination MAC is found in the my_station_table.
870 | if (hdr.ipv6.isValid() && my_station_table.apply().hit) {
871 | routing_v6_table.apply();
872 | // Checl TTL, drop packet if necessary to avoid loops.
873 | if(hdr.ipv6.hop_limit == 0) { drop(); }
874 | }
875 |
876 | //gtpu process
877 | if(hdr.gtpu.isValid()){
878 | upf_f_teid_ueip_filter_table.apply();
879 | local_metadata.upf.direction = UPF_DIR_UPLINK;
880 | upf_pdr_header_rm_table.apply();
881 | upf_pdr_getfar_table.apply();
882 | upf_far_action_table.apply();
883 | }else{
884 | if(upf_ue_filter_table.apply().hit){
885 | upf_pdr_getfar_table.apply();
886 | upf_far_action_table.apply();
887 | }
888 | }
889 |
890 | // L2 bridging. Apply the exact table first (for unicast entries)..
891 | if (!l2_exact_table.apply().hit) {
892 | // If an entry is NOT found, apply the ternary one in case this
893 | // is a multicast/broadcast NDP NS packet for another host
894 | // attached to this switch.
895 | l2_ternary_table.apply();
896 | }
897 | }
898 |
899 | // Lastly, apply the ACL table.
900 | acl_table.apply();
901 | l2_forward_bypass_table.apply();
902 | }
903 | }
904 |
905 | //------------------------------------------------------------------------------
906 | // 4. EGRESS PIPELINE
907 | //
908 | // In the v1model architecture, after the ingress pipeline, packets are
909 | // processed by the Traffic Manager, which provides capabilities such as
910 | // replication (for multicast or clone sessions), queuing, and scheduling.
911 | //
912 | // After the Traffic Manager, packets are processed by a so-called egress
913 | // pipeline. Differently from the ingress one, egress tables can match on the
914 | // egress_port intrinsic metadata as set by the Traffic Manager. If the Traffic
915 | // Manager is configured to replicate the packet to multiple ports, the egress
916 | // pipeline will see all replicas, each one with its own egress_port value.
917 | //
918 | // +---------------------+ +-------------+ +----------------------+
919 | // | INGRESS PIPE | | TM | | EGRESS PIPE |
920 | // | ------------------- | pkt | ----------- | pkt(s) | -------------------- |
921 | // | Set egress_spec, |---->| Replication |------->| Match on egress port |
922 | // | mcast_grp, or clone | | Queues | | |
923 | // | sess | | Scheduler | | |
924 | // +---------------------+ +-------------+ +----------------------+
925 | //
926 | // Similarly to the ingress pipeline, the egress one operates on the parsed
927 | // headers (hdr), the user-defined metadata (local_metadata), and the
928 | // architecture-specific instrinsic one (standard_metadata) which now
929 | // defines a read-only "egress_port" field.
930 | //------------------------------------------------------------------------------
931 | control EgressPipeImpl (inout parsed_headers_t hdr,
932 | inout local_metadata_t local_metadata,
933 | inout standard_metadata_t standard_metadata) {
934 | apply {
935 | // If this is a packet-in to the controller, e.g., if in ingress we
936 | // matched on the ACL table with action send/clone_to_cpu...
937 | if (standard_metadata.egress_port == CPU_PORT) {
938 | // Add packet_in header and set relevant fields, such as the
939 | // switch ingress port where the packet was received.
940 | hdr.packet_in.setValid();
941 | hdr.packet_in.ingress_port = standard_metadata.ingress_port;
942 | // Exit the pipeline here.
943 | exit;
944 | }
945 |
946 | // If this is a multicast packet (flag set by l2_ternary_table), make
947 | // sure we are not replicating the packet on the same port where it was
948 | // received. This is useful to avoid broadcasting NDP requests on the
949 | // ingress port.
950 | if (local_metadata.is_multicast == true &&
951 | standard_metadata.ingress_port == standard_metadata.egress_port) {
952 | mark_to_drop(standard_metadata);
953 | }
954 | }
955 | }
956 |
957 | //------------------------------------------------------------------------------
958 | // 5. CHECKSUM UPDATE
959 | //
960 | // Provide logic to update the checksum of outgoing packets.
961 | //------------------------------------------------------------------------------
962 | control ComputeChecksumImpl(inout parsed_headers_t hdr,
963 | inout local_metadata_t local_metadata)
964 | {
965 | apply {
966 | // The following function is used to update the ICMPv6 checksum of NDP
967 | // NA packets generated by the ndp_reply_table in the ingress pipeline.
968 | // This function is executed only if the NDP header is present.
969 | update_checksum(hdr.ndp.isValid(),
970 | {
971 | hdr.ipv6.src_addr,
972 | hdr.ipv6.dst_addr,
973 | hdr.ipv6.payload_len,
974 | 8w0,
975 | hdr.ipv6.next_hdr,
976 | hdr.icmpv6.type,
977 | hdr.icmpv6.code,
978 | hdr.ndp.flags,
979 | hdr.ndp.target_ipv6_addr,
980 | hdr.ndp.type,
981 | hdr.ndp.length,
982 | hdr.ndp.target_mac_addr
983 | },
984 | hdr.icmpv6.checksum,
985 | HashAlgorithm.csum16
986 | );
987 | }
988 | }
989 |
990 |
991 | //------------------------------------------------------------------------------
992 | // 6. DEPARSER
993 | //
994 | // This is the last block of the V1Model architecture. The deparser specifies in
995 | // which order headers should be serialized on the wire. When calling the emit
996 | // primitive, only headers that are marked as "valid" are serialized, otherwise,
997 | // they are ignored.
998 | //------------------------------------------------------------------------------
999 | control DeparserImpl(packet_out packet, in parsed_headers_t hdr) {
1000 | apply {
1001 | packet.emit(hdr.packet_in);
1002 | packet.emit(hdr.ethernet);
1003 | packet.emit(hdr.gtpu_ipv4);
1004 | packet.emit(hdr.gtpu_udp);
1005 | packet.emit(hdr.outer_gtpu);
1006 | packet.emit(hdr.ipv4);
1007 | packet.emit(hdr.icmp);
1008 | packet.emit(hdr.tcp);
1009 | packet.emit(hdr.udp);
1010 | packet.emit(hdr.inner_ipv4);
1011 | packet.emit(hdr.inner_udp);
1012 | }
1013 | }
1014 |
1015 | //------------------------------------------------------------------------------
1016 | // V1MODEL SWITCH INSTANTIATION
1017 | //
1018 | // Finally, we instantiate a v1model switch with all the control block
1019 | // instances defined so far.
1020 | //------------------------------------------------------------------------------
1021 | V1Switch(
1022 | ParserImpl(),
1023 | VerifyChecksumImpl(),
1024 | IngressPipeImpl(),
1025 | EgressPipeImpl(),
1026 | ComputeChecksumImpl(),
1027 | DeparserImpl()
1028 | ) main;
1029 |
--------------------------------------------------------------------------------