├── bpf
├── xdp.elf
└── xdp.c
├── README.md
├── go.mod
├── main.go
├── go.sum
└── headers
└── bpf_helpers.h
/bpf/xdp.elf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xenbyte/xdp-firewall-tutorial/HEAD/bpf/xdp.elf
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # XDP Firewall with Go tutorial
2 |
3 | This is a simple XDP firwall made with Go.
4 | For a full guide on how to make this firewall, refer to [this blog post](https://dev.to/xenbytes/simple-xdp-firewall-with-golang-1da3).
5 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module xdp-firewall
2 |
3 | go 1.19
4 |
5 | require (
6 | github.com/dropbox/goebpf v0.0.0-20220926213112-398a646c8fc1 // indirect
7 | github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
8 | github.com/vishvananda/netns v0.0.1 // indirect
9 | golang.org/x/sys v0.2.0 // indirect
10 | )
11 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "os"
7 | "os/signal"
8 |
9 | "github.com/dropbox/goebpf"
10 | )
11 |
12 | func main() {
13 |
14 | // Specify Interface Name
15 | interfaceName := "lo"
16 | // IP BlockList
17 | // Add the IPs you want to be blocked
18 | ipList := []string{
19 | "12.12.11.32",
20 | }
21 |
22 | // Load XDP Into App
23 | bpf := goebpf.NewDefaultEbpfSystem()
24 | err := bpf.LoadElf("bpf/xdp.elf")
25 | if err != nil {
26 | log.Fatalf("LoadELF() failed: %s", err)
27 | }
28 | blacklist := bpf.GetMapByName("blacklist")
29 | if blacklist == nil {
30 | log.Fatalf("eBPF map 'blacklist' not found\n")
31 | }
32 | xdp := bpf.GetProgramByName("firewall")
33 | if xdp == nil {
34 | log.Fatalln("Program 'firewall' not found in Program")
35 | }
36 | err = xdp.Load()
37 | if err != nil {
38 | fmt.Printf("xdp.Attach(): %v", err)
39 | }
40 | err = xdp.Attach(interfaceName)
41 | if err != nil {
42 | log.Fatalf("Error attaching to Interface: %s", err)
43 | }
44 |
45 | BlockIPAddress(ipList, blacklist)
46 |
47 | defer xdp.Detach()
48 | ctrlC := make(chan os.Signal, 1)
49 | signal.Notify(ctrlC, os.Interrupt)
50 | log.Println("XDP Program Loaded successfuly into the Kernel.")
51 | log.Println("Press CTRL+C to stop.")
52 | <-ctrlC
53 |
54 | }
55 |
56 | // The Function That adds the IPs to the blacklist map
57 | func BlockIPAddress(ipAddreses []string, blacklist goebpf.Map) error {
58 | for index, ip := range ipAddreses {
59 | err := blacklist.Insert(goebpf.CreateLPMtrieKey(ip), index)
60 | if err != nil {
61 | return err
62 | }
63 | }
64 | return nil
65 | }
66 |
--------------------------------------------------------------------------------
/bpf/xdp.c:
--------------------------------------------------------------------------------
1 | #include "bpf_helpers.h"
2 |
3 |
4 |
5 | // Ethernet header
6 | struct ethhdr {
7 | __u8 h_dest[6];
8 | __u8 h_source[6];
9 | __u16 h_proto;
10 | } __attribute__((packed));
11 |
12 | // IPv4 header
13 | struct iphdr {
14 | __u8 ihl : 4;
15 | __u8 version : 4;
16 | __u8 tos;
17 | __u16 tot_len;
18 | __u16 id;
19 | __u16 frag_off;
20 | __u8 ttl;
21 | __u8 protocol;
22 | __u16 check;
23 | __u32 saddr;
24 | __u32 daddr;
25 | } __attribute__((packed));
26 |
27 |
28 |
29 | BPF_MAP_DEF(blacklist) = {
30 | .map_type = BPF_MAP_TYPE_LPM_TRIE,
31 | .key_size = sizeof(__u64),
32 | .value_size = sizeof(__u32),
33 | .max_entries = 16,
34 | };
35 | BPF_MAP_ADD(blacklist);
36 |
37 | // XDP program //
38 | SEC("xdp")
39 | int firewall(struct xdp_md *ctx) {
40 | void *data_end = (void *)(long)ctx->data_end;
41 | void *data = (void *)(long)ctx->data;
42 |
43 | // Only IPv4 supported for this example
44 | struct ethhdr *ether = data;
45 | if (data + sizeof(*ether) > data_end) {
46 | // Malformed Ethernet header
47 | return XDP_ABORTED;
48 | }
49 |
50 | if (ether->h_proto != 0x08U) { // htons(ETH_P_IP) -> 0x08U
51 | // Non IPv4 traffic
52 | return XDP_PASS;
53 | }
54 |
55 | data += sizeof(*ether);
56 | struct iphdr *ip = data;
57 | if (data + sizeof(*ip) > data_end) {
58 | // Malformed IPv4 header
59 | return XDP_ABORTED;
60 | }
61 |
62 | struct {
63 | __u32 prefixlen;
64 | __u32 saddr;
65 | } key;
66 |
67 | key.prefixlen = 32;
68 | key.saddr = ip->saddr;
69 |
70 | // Lookup SRC IP in blacklisted IPs
71 | __u64 *rule_idx = bpf_map_lookup_elem(&blacklist, &key);
72 | if (rule_idx) {
73 | // Matched, increase match counter for matched "rule"
74 | __u32 index = *(__u32*)rule_idx; // make verifier happy
75 | return XDP_DROP;
76 | }
77 |
78 | return XDP_PASS;
79 | }
80 |
81 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
2 | github.com/dropbox/goebpf v0.0.0-20220926213112-398a646c8fc1 h1:AkG9rAb6prUNHC5p+mqlSYrx+FAgS1Hhjx4v5qPt5I4=
3 | github.com/dropbox/goebpf v0.0.0-20220926213112-398a646c8fc1/go.mod h1:U44fqiGgxszGKp6WDm/xtCXJqx4h2hDYnCpu4/m0uQk=
4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
5 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
6 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
7 | github.com/vishvananda/netlink v1.1.1-0.20200218174631-5f2fc868c2d0/go.mod h1:FSQhuTO7eHT34mPzX+B04SUAjiqLxtXs1et0S6l9k4k=
8 | github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
9 | github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
10 | github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
11 | github.com/vishvananda/netns v0.0.0-20200520041808-52d707b772fe/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
12 | github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
13 | github.com/vishvananda/netns v0.0.1 h1:JDkWS7Axy5ziNM3svylLhpSgqjPDb+BgVUbXoDo+iPw=
14 | github.com/vishvananda/netns v0.0.1/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
15 | golang.org/x/sys v0.0.0-20200121082415-34d275377bf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
16 | golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
17 | golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
18 | golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
19 | golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
20 | golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
21 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
22 | gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
23 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
24 |
--------------------------------------------------------------------------------
/headers/bpf_helpers.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2019 Dropbox, Inc.
2 | // Full license can be found in the LICENSE file.
3 |
4 | // BPF helpers
5 | // Set of defines / prototypes to use from eBPF programs as well as from regular
6 | // linux/mac "cross" compilation.
7 |
8 | #ifndef __BPF_HELPERS_H
9 | #define __BPF_HELPERS_H
10 |
11 | // Standard types.
12 | // Due to tooons of dependencies in standard linux kernel headers
13 | // Define types explicitly.
14 | typedef unsigned short __u16; // NOLINT
15 | typedef unsigned char __u8;
16 | typedef unsigned int __u32;
17 | typedef unsigned long long __u64;
18 | typedef int __s32;
19 | typedef unsigned long size_t;
20 | typedef __u32 __be32;
21 | typedef __u16 __be16;
22 |
23 | // BPF map types
24 | enum bpf_map_type {
25 | BPF_MAP_TYPE_UNSPEC = 0,
26 | BPF_MAP_TYPE_HASH,
27 | BPF_MAP_TYPE_ARRAY,
28 | BPF_MAP_TYPE_PROG_ARRAY,
29 | BPF_MAP_TYPE_PERF_EVENT_ARRAY,
30 | BPF_MAP_TYPE_PERCPU_HASH,
31 | BPF_MAP_TYPE_PERCPU_ARRAY,
32 | BPF_MAP_TYPE_STACK_TRACE,
33 | BPF_MAP_TYPE_CGROUP_ARRAY,
34 | BPF_MAP_TYPE_LRU_HASH,
35 | BPF_MAP_TYPE_LRU_PERCPU_HASH,
36 | BPF_MAP_TYPE_LPM_TRIE,
37 | BPF_MAP_TYPE_ARRAY_OF_MAPS,
38 | BPF_MAP_TYPE_HASH_OF_MAPS,
39 | BPF_MAP_TYPE_DEVMAP,
40 | BPF_MAP_TYPE_SOCKMAP,
41 | BPF_MAP_TYPE_CPUMAP,
42 | BPF_MAP_TYPE_XSKMAP,
43 | BPF_MAP_TYPE_SOCKHASH,
44 | BPF_MAP_TYPE_CGROUP_STORAGE,
45 | BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
46 | BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
47 | BPF_MAP_TYPE_QUEUE,
48 | BPF_MAP_TYPE_STACK,
49 | BPF_MAP_TYPE_SK_STORAGE,
50 | };
51 |
52 | /* BPF_FUNC_skb_store_bytes flags. */
53 | enum {
54 | BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
55 | BPF_F_INVALIDATE_HASH = (1ULL << 1),
56 | };
57 |
58 | /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
59 | * First 4 bits are for passing the header field size.
60 | */
61 | enum {
62 | BPF_F_HDR_FIELD_MASK = 0xfULL,
63 | };
64 |
65 | /* BPF_FUNC_l4_csum_replace flags. */
66 | enum {
67 | BPF_F_PSEUDO_HDR = (1ULL << 4),
68 | BPF_F_MARK_MANGLED_0 = (1ULL << 5),
69 | BPF_F_MARK_ENFORCE = (1ULL << 6),
70 | };
71 |
72 | /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
73 | enum {
74 | BPF_F_INGRESS = (1ULL << 0),
75 | };
76 |
77 | /* BPF_FUNC_skb_set_tunnel_key flags. */
78 | enum {
79 | BPF_F_ZERO_CSUM_TX = (1ULL << 1),
80 | BPF_F_DONT_FRAGMENT = (1ULL << 2),
81 | BPF_F_SEQ_NUMBER = (1ULL << 3),
82 | };
83 |
84 | /* BPF_FUNC_skb_adjust_room flags. */
85 | enum {
86 | BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
87 | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
88 | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
89 | BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
90 | BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
91 | BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
92 | BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
93 | };
94 |
95 | /* flags for BPF_MAP_UPDATE_ELEM command */
96 | #define BPF_ANY 0 /* create new element or update existing */
97 | #define BPF_NOEXIST 1 /* create new element if it didn't exist */
98 | #define BPF_EXIST 2 /* update existing element */
99 | #define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
100 |
101 | /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
102 | * BPF_FUNC_perf_event_read_value flags.
103 | */
104 | #define BPF_F_INDEX_MASK 0xffffffffULL
105 | #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
106 |
107 | // A helper structure used by eBPF C program
108 | // to describe map attributes to BPF program loader
109 | struct bpf_map_def {
110 | __u32 map_type;
111 | __u32 key_size;
112 | __u32 value_size;
113 | __u32 max_entries;
114 | __u32 map_flags;
115 | // Array/Hash of maps use case: pointer to inner map template
116 | void *inner_map_def;
117 | // Define this to make map system wide ("object pinning")
118 | // path could be anything, like '/sys/fs/bpf/foo'
119 | // WARN: You must have BPF filesystem mounted on provided location
120 | const char *persistent_path;
121 | };
122 |
123 | #define BPF_MAP_DEF_SIZE sizeof(struct bpf_map_def)
124 | #define BPF_MAP_OFFSET_PERSISTENT offsetof(struct bpf_map_def, persistent_path)
125 | #define BPF_MAP_OFFSET_INNER_MAP offsetof(struct bpf_map_def, inner_map_def)
126 |
127 | /* Generic BPF return codes which all BPF program types may support.
128 | * The values are binary compatible with their TC_ACT_* counter-part to
129 | * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
130 | * programs.
131 | *
132 | * XDP is handled seprately, see XDP_*.
133 | */
134 | enum bpf_ret_code {
135 | BPF_OK = 0,
136 | /* 1 reserved */
137 | BPF_DROP = 2,
138 | /* 3-6 reserved */
139 | BPF_REDIRECT = 7,
140 | /* >127 are reserved for prog type specific return codes.
141 | *
142 | * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
143 | * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
144 | * changed and should be routed based on its new L3 header.
145 | * (This is an L3 redirect, as opposed to L2 redirect
146 | * represented by BPF_REDIRECT above).
147 | */
148 | BPF_LWT_REROUTE = 128,
149 | };
150 |
151 | // XDP related constants
152 | enum xdp_action {
153 | XDP_ABORTED = 0,
154 | XDP_DROP,
155 | XDP_PASS,
156 | XDP_TX,
157 | XDP_REDIRECT,
158 | };
159 |
160 | // Socket Filter programs return code
161 | enum socket_filter_action {
162 | SOCKET_FILTER_DENY = 0,
163 | SOCKET_FILTER_ALLOW,
164 | };
165 |
166 | // Kprobe required constants / structs
167 | // (arch/x86/include/asm/ptrace.h)
168 | #define PT_REGS_PARM1(x) ((x)->di)
169 | #define PT_REGS_PARM2(x) ((x)->si)
170 | #define PT_REGS_PARM3(x) ((x)->dx)
171 | #define PT_REGS_PARM4(x) ((x)->r10)
172 | #define PT_REGS_PARM5(x) ((x)->r8)
173 | #define PT_REGS_PARM6(x) ((x)->r9)
174 | #define PT_REGS_RET(x) ((x)->sp)
175 | #define PT_REGS_FP(x) ((x)->bp)
176 | #define PT_REGS_RC(x) ((x)->ax)
177 | #define PT_REGS_SP(x) ((x)->sp)
178 | #define PT_REGS_IP(x) ((x)->ip)
179 |
180 | struct pt_regs {
181 | unsigned long r15;
182 | unsigned long r14;
183 | unsigned long r13;
184 | unsigned long r12;
185 | unsigned long bp;
186 | unsigned long bx;
187 | unsigned long r11;
188 | unsigned long r10;
189 | unsigned long r9;
190 | unsigned long r8;
191 | unsigned long ax;
192 | unsigned long cx;
193 | unsigned long dx;
194 | unsigned long si;
195 | unsigned long di;
196 | unsigned long orig_ax;
197 | unsigned long ip;
198 | unsigned long cs;
199 | unsigned long flags;
200 | unsigned long sp;
201 | unsigned long ss;
202 | };
203 |
204 | #define bpf_likely(X) __builtin_expect(!!(X), 1)
205 | #define bpf_unlikely(X) __builtin_expect(!!(X), 0)
206 | #define UNUSED __attribute__((unused))
207 |
208 | // In order to cross compile BPF program for BPF / Linux / Mac
209 | // we need to define platform specific things like:
210 | // 1. Custom (non kernel) implementation for bpf_map_* functions
211 | // 2. For BPF we need to put programs into special sections, but, for
212 | // regular linux target (mostly for tests) we don't.
213 | // 3. BPF does not support function calls, so __always_inline__ is must have.
214 | // However, for testing it doesn't make sense.
215 | // 4. Debug prints - for BPF it is done by calling helper, for linux just
216 | // regular printf()
217 | #ifdef __BPF__
218 |
219 | // Clang for eBPF missed static_assert declaration because programs are C, not
220 | // CPP
221 | #define static_assert _Static_assert
222 |
223 | // Helper macro to place programs, maps, license in
224 | // different sections in ELF file.
225 | #define SEC(NAME) __attribute__((section(NAME), used))
226 |
227 | // eBPF does not support functions (yet), so, all functions MUST be inlined.
228 | // Starting from kernel 4.16 it is not required to always inline functions
229 | // since support has been added
230 | #define INLINE __attribute__((__always_inline__))
231 |
232 | // XDP metadata - basically data packet
233 | // P.S. for some reason XDP programs uses 32bit pointers
234 | struct xdp_md {
235 | __u32 data;
236 | __u32 data_end;
237 | __u32 data_meta;
238 | /* Below access go through struct xdp_rxq_info */
239 | __u32 ingress_ifindex; /* rxq->dev->ifindex */
240 | __u32 rx_queue_index; /* rxq->queue_index */
241 |
242 | __u32 egress_ifindex; /* txq->dev->ifindex */
243 | };
244 |
245 |
246 | /* user accessible mirror of in-kernel sk_buff.
247 | * new fields can only be added to the end of this structure
248 | */
249 | struct __sk_buff {
250 | __u32 len;
251 | __u32 pkt_type;
252 | __u32 mark;
253 | __u32 queue_mapping;
254 | __u32 protocol;
255 | __u32 vlan_present;
256 | __u32 vlan_tci;
257 | __u32 vlan_proto;
258 | __u32 priority;
259 | __u32 ingress_ifindex;
260 | __u32 ifindex;
261 | __u32 tc_index;
262 | __u32 cb[5];
263 | __u32 hash;
264 | __u32 tc_classid;
265 | __u32 data;
266 | __u32 data_end;
267 | __u32 napi_id;
268 |
269 | /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
270 | __u32 family;
271 | __u32 remote_ip4; /* Stored in network byte order */
272 | __u32 local_ip4; /* Stored in network byte order */
273 | __u32 remote_ip6[4]; /* Stored in network byte order */
274 | __u32 local_ip6[4]; /* Stored in network byte order */
275 | __u32 remote_port; /* Stored in network byte order */
276 | __u32 local_port; /* stored in host byte order */
277 | /* ... here. */
278 |
279 | __u32 data_meta;
280 | };
281 |
282 | struct bpf_sock_tuple {
283 | union {
284 | struct {
285 | __be32 saddr;
286 | __be32 daddr;
287 | __be16 sport;
288 | __be16 dport;
289 | } ipv4;
290 | struct {
291 | __be32 saddr[4];
292 | __be32 daddr[4];
293 | __be16 sport;
294 | __be16 dport;
295 | } ipv6;
296 | };
297 | };
298 |
299 | struct bpf_spin_lock {
300 | __u32 val;
301 | };
302 |
303 | struct bpf_sysctl {
304 | __u32 write; /* Sysctl is being read (= 0) or written (= 1).
305 | * Allows 1,2,4-byte read, but no write.
306 | */
307 | __u32 file_pos; /* Sysctl file position to read from, write to.
308 | * Allows 1,2,4-byte read an 4-byte write.
309 | */
310 | };
311 |
312 | // BPF helper functions supported on linux kernel 5.2+
313 | // clang-format off
314 | #define __BPF_FUNC_MAPPER(FN) \
315 | FN(unspec), \
316 | FN(map_lookup_elem), \
317 | FN(map_update_elem), \
318 | FN(map_delete_elem), \
319 | FN(probe_read), \
320 | FN(ktime_get_ns), \
321 | FN(trace_printk), \
322 | FN(get_prandom_u32), \
323 | FN(get_smp_processor_id), \
324 | FN(skb_store_bytes), \
325 | FN(l3_csum_replace), \
326 | FN(l4_csum_replace), \
327 | FN(tail_call), \
328 | FN(clone_redirect), \
329 | FN(get_current_pid_tgid), \
330 | FN(get_current_uid_gid), \
331 | FN(get_current_comm), \
332 | FN(get_cgroup_classid), \
333 | FN(skb_vlan_push), \
334 | FN(skb_vlan_pop), \
335 | FN(skb_get_tunnel_key), \
336 | FN(skb_set_tunnel_key), \
337 | FN(perf_event_read), \
338 | FN(redirect), \
339 | FN(get_route_realm), \
340 | FN(perf_event_output), \
341 | FN(skb_load_bytes), \
342 | FN(get_stackid), \
343 | FN(csum_diff), \
344 | FN(skb_get_tunnel_opt), \
345 | FN(skb_set_tunnel_opt), \
346 | FN(skb_change_proto), \
347 | FN(skb_change_type), \
348 | FN(skb_under_cgroup), \
349 | FN(get_hash_recalc), \
350 | FN(get_current_task), \
351 | FN(probe_write_user), \
352 | FN(current_task_under_cgroup), \
353 | FN(skb_change_tail), \
354 | FN(skb_pull_data), \
355 | FN(csum_update), \
356 | FN(set_hash_invalid), \
357 | FN(get_numa_node_id), \
358 | FN(skb_change_head), \
359 | FN(xdp_adjust_head), \
360 | FN(probe_read_str), \
361 | FN(get_socket_cookie), \
362 | FN(get_socket_uid), \
363 | FN(set_hash), \
364 | FN(setsockopt), \
365 | FN(skb_adjust_room), \
366 | FN(redirect_map), \
367 | FN(sk_redirect_map), \
368 | FN(sock_map_update), \
369 | FN(xdp_adjust_meta), \
370 | FN(perf_event_read_value), \
371 | FN(perf_prog_read_value), \
372 | FN(getsockopt), \
373 | FN(override_return), \
374 | FN(sock_ops_cb_flags_set), \
375 | FN(msg_redirect_map), \
376 | FN(msg_apply_bytes), \
377 | FN(msg_cork_bytes), \
378 | FN(msg_pull_data), \
379 | FN(bind), \
380 | FN(xdp_adjust_tail), \
381 | FN(skb_get_xfrm_state), \
382 | FN(get_stack), \
383 | FN(skb_load_bytes_relative), \
384 | FN(fib_lookup), \
385 | FN(sock_hash_update), \
386 | FN(msg_redirect_hash), \
387 | FN(sk_redirect_hash), \
388 | FN(lwt_push_encap), \
389 | FN(lwt_seg6_store_bytes), \
390 | FN(lwt_seg6_adjust_srh), \
391 | FN(lwt_seg6_action), \
392 | FN(rc_repeat), \
393 | FN(rc_keydown), \
394 | FN(skb_cgroup_id), \
395 | FN(get_current_cgroup_id), \
396 | FN(get_local_storage), \
397 | FN(sk_select_reuseport), \
398 | FN(skb_ancestor_cgroup_id), \
399 | FN(sk_lookup_tcp), \
400 | FN(sk_lookup_udp), \
401 | FN(sk_release), \
402 | FN(map_push_elem), \
403 | FN(map_pop_elem), \
404 | FN(map_peek_elem), \
405 | FN(msg_push_data), \
406 | FN(msg_pop_data), \
407 | FN(rc_pointer_rel), \
408 | FN(spin_lock), \
409 | FN(spin_unlock), \
410 | FN(sk_fullsock), \
411 | FN(tcp_sock), \
412 | FN(skb_ecn_set_ce), \
413 | FN(get_listener_sock), \
414 | FN(skc_lookup_tcp), \
415 | FN(tcp_check_syncookie), \
416 | FN(sysctl_get_name), \
417 | FN(sysctl_get_current_value), \
418 | FN(sysctl_get_new_value), \
419 | FN(sysctl_set_new_value), \
420 | FN(strtol), \
421 | FN(strtoul), \
422 | FN(sk_storage_get), \
423 | FN(sk_storage_delete), \
424 | FN(send_signal),
425 |
426 | #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
427 | enum bpf_func_id {
428 | __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
429 | __BPF_FUNC_MAX_ID,
430 | };
431 | #undef __BPF_ENUM_FN
432 | // clang-format on
433 |
434 | // BPF helper functions - this construction looks complicated, but actually
435 | // it explained to just:
436 | // static void* bpf_map_lookup_elem(void *map, void *key) = 1
437 | // In other words bpf_map_lookup_elem points to memory address 0x1 - which is
438 | // BPF function number 1.
439 | // More details about helper functions at: http://docs.cilium.io/en/v1.1/bpf/
440 | // Search for "Helper Functions"
441 | // clang-format off
442 |
443 | // Lookup bpf map element by key.
444 | // Return: Map value or NULL
445 | static void *(*bpf_map_lookup_elem)(const void *map, const void *key) = (void *) // NOLINT
446 | BPF_FUNC_map_lookup_elem;
447 |
448 | // Update bpf map element by key to value
449 | // Return: 0 on success or negative error
450 | static int (*bpf_map_update_elem)(const void *map, const void *key,
451 | const void *value, __u64 flags) = (void *) // NOLINT
452 | BPF_FUNC_map_update_elem;
453 |
454 | // Delete element. Actually applicable on HASH maps
455 | // Return: 0 on success or negative error
456 | static int (*bpf_map_delete_elem)(const void *map, void *key) = (void *) // NOLINT
457 | BPF_FUNC_map_delete_elem;
458 |
459 | static int (*bpf_probe_read)(void *dst, __u64 size, const void *unsafe_ptr) = (void *) // NOLINT
460 | BPF_FUNC_probe_read;
461 |
462 | static __u64 (*bpf_ktime_get_ns)(void) = (void *) // NOLINT
463 | BPF_FUNC_ktime_get_ns;
464 |
465 | static __u32 (*bpf_get_prandom_u32)(void) = (void *) // NOLINT
466 | BPF_FUNC_get_prandom_u32;
467 |
468 | // Like printf() for BPF
469 | // Return: length of buffer written or negative error
470 | static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) = (void *) // NOLINT
471 | BPF_FUNC_trace_printk;
472 |
473 | static int (*bpf_probe_read_str)(void *dst, __u64 size, const void *unsafe_ptr) = (void *) // NOLINT
474 | BPF_FUNC_probe_read_str;
475 |
476 | // Jump into another BPF program
477 | // prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
478 | // index: 32-bit index inside array that selects specific program to run
479 | // Return: 0 on success or negative error
480 | static void (*bpf_tail_call)(const void *ctx, void *map, int index) = (void *) // NOLINT
481 | BPF_FUNC_tail_call;
482 |
483 | static int (*bpf_clone_redirect)(void *ctx, int ifindex, __u32 flags) = (void*) // NOLINT
484 | BPF_FUNC_clone_redirect;
485 |
486 | static __u64 (*bpf_get_smp_processor_id)(void) = (void*) // NOLINT
487 | BPF_FUNC_get_smp_processor_id;
488 |
489 | static __u64 (*bpf_get_current_pid_tgid)(void) = (void*) // NOLINT
490 | BPF_FUNC_get_current_pid_tgid;
491 |
492 | static __u64 (*bpf_get_current_uid_gid)(void) = (void*) // NOLINT
493 | BPF_FUNC_get_current_uid_gid;
494 |
495 | static int (*bpf_get_current_comm)(void *buf, int buf_size) = (void*) // NOLINT
496 | BPF_FUNC_get_current_comm;
497 |
498 | static __u64 (*bpf_get_cgroup_classid)(void *ctx) = (void*) // NOLINT
499 | BPF_FUNC_get_cgroup_classid;
500 |
501 | static __u64 (*bpf_skb_vlan_push)(void *ctx, __u16 proto, __u16 vlan_tci) = (void*) // NOLINT
502 | BPF_FUNC_skb_vlan_push;
503 |
504 | static __u64 (*bpf_skb_vlan_pop)(void *ctx) = (void*) // NOLINT
505 | BPF_FUNC_skb_vlan_pop;
506 |
507 | static int (*bpf_skb_get_tunnel_key)(void *ctx, void *to, __u32 size, __u64 flags) = (void*) // NOLINT
508 | BPF_FUNC_skb_get_tunnel_key;
509 |
510 | static int (*bpf_skb_set_tunnel_key)(void *ctx, void *from, __u32 size, __u64 flags) = (void*) // NOLINT
511 | BPF_FUNC_skb_set_tunnel_key;
512 |
513 | static __u64 (*bpf_perf_event_read)(void *map, __u64 flags) = (void*) // NOLINT
514 | BPF_FUNC_perf_event_read;
515 |
516 | static int (*bpf_redirect)(int ifindex, __u32 flags) = (void*) // NOLINT
517 | BPF_FUNC_redirect;
518 |
519 | static __u32 (*bpf_get_route_realm)(void *ctx) = (void*) // NOLINT
520 | BPF_FUNC_get_route_realm;
521 |
522 | static int (*bpf_perf_event_output)(void *ctx, void *map, __u64 index, void *data, __u32 size) = (void*) // NOLINT
523 | BPF_FUNC_perf_event_output;
524 |
525 | static int (*bpf_l3_csum_replace)(void *ctx, int offset, __u64 from, __u64 to, __u64 size) = (void *) // NOLINT
526 | BPF_FUNC_l3_csum_replace;
527 |
528 | static int (*bpf_l4_csum_replace)(void *ctx, int offset, __u64 from, __u64 to, __u64 flags) = (void *) // NOLINT
529 | BPF_FUNC_l4_csum_replace;
530 |
531 | static int (*bpf_skb_load_bytes)(void *ctx, int offset, void *to, __u32 len) = (void*) // NOLINT
532 | BPF_FUNC_skb_load_bytes;
533 |
534 | static int (*bpf_skb_store_bytes)(void *ctx, int offset, const void *from, __u32 len, __u64 flags) = (void *) // NOLINT
535 | BPF_FUNC_skb_store_bytes;
536 |
537 | static int (*bpf_perf_event_read_value)(void *map, __u64 flags, void *buf, __u32 buf_size) = (void*) // NOLINT
538 | BPF_FUNC_perf_event_read_value;
539 |
540 | static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, __u32 buf_size) = (void*) // NOLINT
541 | BPF_FUNC_perf_prog_read_value;
542 |
543 | static int (*bpf_current_task_under_cgroup)(void *map, int index) = (void*) // NOLINT
544 | BPF_FUNC_current_task_under_cgroup;
545 |
546 | static __u32 (*bpf_get_socket_cookie)(void *ctx) = (void*) // NOLINT
547 | BPF_FUNC_get_socket_cookie;
548 |
549 | static __u64 (*bpf_get_socket_uid)(void *ctx) = (void*) // NOLINT
550 | BPF_FUNC_get_socket_uid;
551 |
552 | static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, int optlen) = (void*) // NOLINT
553 | BPF_FUNC_getsockopt;
554 |
555 | static int (*bpf_redirect_map)(void *map, __u32 key, __u64 flags) = (void*) // NOLINT
556 | BPF_FUNC_redirect_map;
557 |
558 | static int (*bpf_set_hash)(void *ctx, __u32 hash) = (void*) // NOLINT
559 | BPF_FUNC_set_hash;
560 |
561 | static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, int optlen) = (void*) // NOLINT
562 | BPF_FUNC_setsockopt;
563 |
564 | static int (*bpf_skb_adjust_room)(void *ctx, int len_diff, __u32 mode, __u64 flags) = (void*) // NOLINT
565 | BPF_FUNC_skb_adjust_room;
566 |
567 | static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = (void*) // NOLINT
568 | BPF_FUNC_skb_under_cgroup;
569 |
570 | static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, int size,
571 | unsigned long long netns_id,
572 | unsigned long long flags) = (void*) // NOLINT
573 | BPF_FUNC_skc_lookup_tcp;
574 |
575 | static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) = (void*) // NOLINT
576 | BPF_FUNC_sk_redirect_map;
577 |
578 | static int (*bpf_sock_map_update)(void *map, void *key, void *value, unsigned long long flags) = (void*) // NOLINT
579 | BPF_FUNC_sock_map_update;
580 |
581 | static int (*bpf_strtol)(const char *buf, size_t buf_len, __u64 flags, long *res) = (void*) // NOLINT
582 | BPF_FUNC_strtol;
583 |
584 | static int (*bpf_strtoul)(const char *buf, size_t buf_len, __u64 flags, unsigned long *res) = (void*) // NOLINT
585 | BPF_FUNC_strtoul;
586 |
587 | static int (*bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, size_t buf_len) = (void*) // NOLINT
588 | BPF_FUNC_sysctl_get_current_value;
589 |
590 | static int (*bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, size_t buf_len, __u64 flags) = (void*) // NOLINT
591 | BPF_FUNC_sysctl_get_name;
592 |
593 | static int (*bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, size_t buf_len) = (void*) // NOLINT
594 | BPF_FUNC_sysctl_get_new_value;
595 |
596 | static int (*bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) = (void*) // NOLINT
597 | BPF_FUNC_sysctl_set_new_value;
598 |
599 | static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk, void *ip, int ip_len, void *tcp,
600 | int tcp_len) = (void*) // NOLINT
601 | BPF_FUNC_tcp_check_syncookie;
602 |
603 | // Adjust the xdp_md.data_meta by delta
604 | // ctx: pointer to xdp_md
605 | // delta: An positive/negative integer to be added to ctx.data_meta
606 | // Return: 0 on success or negative on error
607 | static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) = (void*) // NOLINT
608 | BPF_FUNC_xdp_adjust_meta;
609 |
610 | static int (*bpf_get_stackid)(void *ctx, void *map, __u64 flags) = (void*) // NOLINT
611 | BPF_FUNC_get_stackid;
612 |
613 | static int (*bpf_csum_diff)(void *from, __u64 from_size, void *to, __u64 to_size, __u64 seed) = (void*) // NOLINT
614 | BPF_FUNC_csum_diff;
615 |
616 | static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, __u32 size) = (void*) // NOLINT
617 | BPF_FUNC_skb_get_tunnel_opt;
618 |
619 | static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, __u32 size) = (void*) // NOLINT
620 | BPF_FUNC_skb_set_tunnel_opt;
621 |
622 | static int (*bpf_skb_change_proto)(void *ctx, __u16 proto, __u64 flags) = (void*) // NOLINT
623 | BPF_FUNC_skb_change_proto;
624 |
625 | static int (*bpf_skb_change_type)(void *ctx, __u32 type) = (void*) // NOLINT
626 | BPF_FUNC_skb_change_type;
627 |
628 | static __u32 (*bpf_get_hash_recalc)(void *ctx) = (void*) // NOLINT
629 | BPF_FUNC_get_hash_recalc;
630 |
631 | static __u64 (*bpf_get_current_task)(void) = (void*) // NOLINT
632 | BPF_FUNC_get_current_task;
633 |
634 | static int (*bpf_probe_write_user)(void *dst, void *src, __u32 size) = (void*) // NOLINT
635 | BPF_FUNC_probe_write_user;
636 |
637 | static int (*bpf_skb_change_tail)(void *ctx, __u32 new_len, __u64 flags) = (void*) // NOLINT
638 | BPF_FUNC_skb_change_tail;
639 |
640 | static int (*bpf_skb_pull_data)(void *ctx, __u32 len) = (void*) // NOLINT
641 | BPF_FUNC_skb_pull_data;
642 |
643 | static int (*bpf_csum_update)(void *ctx, __u16 csum) = (void*) // NOLINT
644 | BPF_FUNC_csum_update;
645 |
646 | static int (*bpf_set_hash_invalid)(void *ctx) = (void*) // NOLINT
647 | BPF_FUNC_set_hash_invalid;
648 |
649 | static int (*bpf_get_numa_node_id)(void) = (void*) // NOLINT
650 | BPF_FUNC_get_numa_node_id;
651 |
652 | static int (*bpf_skb_change_head)(void *ctx, __u32 len, __u64 flags) = (void*) // NOLINT
653 | BPF_FUNC_skb_change_head;
654 |
655 |
656 | static int (*bpf_override_return)(void *pt_regs, unsigned long rc) = (void*) // NOLINT
657 | BPF_FUNC_override_return;
658 |
659 | static int (*bpf_sock_ops_cb_flags_set)(void *skops, int flags) = (void*) // NOLINT
660 | BPF_FUNC_sock_ops_cb_flags_set;
661 |
662 | static int (*bpf_msg_redirect_map)(void *msg, void *map, __u32 key, __u64 flags) = (void*) // NOLINT
663 | BPF_FUNC_msg_redirect_map;
664 |
665 | static int (*bpf_msg_apply_bytes)(void *msg, __u32 bytes) = (void*) // NOLINT
666 | BPF_FUNC_msg_apply_bytes;
667 |
668 | static int (*bpf_msg_cork_bytes)(void *msg, __u32 bytes) = (void*) // NOLINT
669 | BPF_FUNC_msg_cork_bytes;
670 |
671 | static int (*bpf_msg_pull_data)(void *msg, __u32 start, __u32 end, __u64 flags) = (void*) // NOLINT
672 | BPF_FUNC_msg_pull_data;
673 |
674 | static int (*bpf_bind)(void *ctx, void *addr, int addr_len) = (void*) // NOLINT
675 | BPF_FUNC_bind;
676 |
677 | static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) = (void*) // NOLINT
678 | BPF_FUNC_xdp_adjust_tail;
679 |
680 | static int (*bpf_skb_get_xfrm_state)(void *ctx, __u32 index, void *xfrm_state, __u32 size, __u64 flags) = (void*) // NOLINT
681 | BPF_FUNC_skb_get_xfrm_state;
682 |
683 | static int (*bpf_get_stack)(void *ctx, void *buf, __u32 size, __u64 flags) = (void*) // NOLINT
684 | BPF_FUNC_get_stack;
685 |
686 | static int (*bpf_skb_load_bytes_relative)(void *ctx, __u32 offset, void *to, __u32 len, __u32 start_header) = (void*) // NOLINT
687 | BPF_FUNC_skb_load_bytes_relative;
688 |
689 | static int (*bpf_fib_lookup)(void *ctx, void *params, int plen, __u32 flags) = (void*) // NOLINT
690 | BPF_FUNC_fib_lookup;
691 |
692 | static int (*bpf_sock_hash_update)(void *ctx, void *map, void *key, __u64 flags) = (void*) // NOLINT
693 | BPF_FUNC_sock_hash_update;
694 |
695 | static int (*bpf_msg_redirect_hash)(void *ctx, void *map, void *key, __u64 flags) = (void*) // NOLINT
696 | BPF_FUNC_msg_redirect_hash;
697 |
698 | static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, __u64 flags) = (void*) // NOLINT
699 | BPF_FUNC_sk_redirect_hash;
700 |
701 | static int (*bpf_lwt_push_encap)(void *skb, __u32 type, void *hdr, __u32 len) = (void*) // NOLINT
702 | BPF_FUNC_lwt_push_encap;
703 |
704 | static int (*bpf_lwt_seg6_store_bytes)(void *ctx, __u32 offset, const void *from, __u32 len) = (void*) // NOLINT
705 | BPF_FUNC_lwt_seg6_store_bytes;
706 |
707 | static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, __u32 offset, __s32 delta) = (void*) // NOLINT
708 | BPF_FUNC_lwt_seg6_adjust_srh;
709 |
710 | static int (*bpf_lwt_seg6_action)(void *ctx, __u32 action, void *param, __u32 param_len) = (void*) // NOLINT
711 | BPF_FUNC_lwt_seg6_action;
712 |
713 | static int (*bpf_rc_keydown)(void *ctx, __u32 protocol, __u64 scancode, __u32 toggle) = (void*) // NOLINT
714 | BPF_FUNC_rc_keydown;
715 |
716 | static int (*bpf_rc_repeat)(void *ctx) = (void*) // NOLINT
717 | BPF_FUNC_rc_repeat;
718 |
719 | static __u64 (*bpf_skb_cgroup_id)(void *skb) = (void*) // NOLINT
720 | BPF_FUNC_skb_cgroup_id;
721 |
722 | static __u64 (*bpf_get_current_cgroup_id)(void) = (void*) // NOLINT
723 | BPF_FUNC_get_current_cgroup_id;
724 |
725 | static __u64 (*bpf_skb_ancestor_cgroup_id)(void *skb, int ancestor_level) = (void*) // NOLINT
726 | BPF_FUNC_skb_ancestor_cgroup_id;
727 |
728 | static void * (*bpf_get_local_storage)(void *map, __u64 flags) = (void*) // NOLINT
729 | BPF_FUNC_get_local_storage;
730 |
731 | static int (*bpf_sk_select_reuseport)(void *reuse, void *map, void *key, __u64 flags) = (void*) // NOLINT
732 | BPF_FUNC_sk_select_reuseport;
733 |
734 | static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
735 | struct bpf_sock_tuple *tuple,
736 | int size, unsigned int netns_id,
737 | unsigned long long flags) = (void*) // NOLINT
738 | BPF_FUNC_sk_lookup_tcp;
739 |
740 | static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
741 | struct bpf_sock_tuple *tuple,
742 | int size, unsigned int netns_id,
743 | unsigned long long flags) = (void*) // NOLINT
744 | BPF_FUNC_sk_lookup_udp;
745 |
746 | static int (*bpf_sk_release)(struct bpf_sock *sk) = (void*) // NOLINT
747 | BPF_FUNC_sk_release;
748 |
749 | static int (*bpf_map_push_elem)(void *map, const void *value, __u64 flags) = (void*) // NOLINT
750 | BPF_FUNC_map_push_elem;
751 |
752 | static int (*bpf_map_pop_elem)(void *map, void *value) = (void*) // NOLINT
753 | BPF_FUNC_map_pop_elem;
754 |
755 | static int (*bpf_map_peek_elem)(void *map, void *value) = (void*) // NOLINT
756 | BPF_FUNC_map_peek_elem;
757 |
758 | static int (*bpf_msg_push_data)(void *skb, __u32 start, __u32 len, __u64 flags) = (void*) // NOLINT
759 | BPF_FUNC_msg_push_data;
760 |
761 | static int (*bpf_msg_pop_data)(void *msg, __u32 start, __u32 pop, __u64 flags) = (void*) // NOLINT
762 | BPF_FUNC_msg_pop_data;
763 |
764 | static int (*bpf_rc_pointer_rel)(void *ctx, __s32 rel_x, __s32 rel_y) = (void*) // NOLINT
765 | BPF_FUNC_rc_pointer_rel;
766 |
767 | static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) = (void*) // NOLINT
768 | BPF_FUNC_spin_lock;
769 |
770 | static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = (void*) // NOLINT
771 | BPF_FUNC_spin_unlock;
772 |
773 | static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = (void*) // NOLINT
774 | BPF_FUNC_sk_fullsock;
775 |
776 | static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = (void*) // NOLINT
777 | BPF_FUNC_tcp_sock;
778 |
779 | static int (*bpf_skb_ecn_set_ce)(void *ctx) = (void*) // NOLINT
780 | BPF_FUNC_skb_ecn_set_ce;
781 |
782 | static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = (void*) // NOLINT
783 | BPF_FUNC_get_listener_sock;
784 |
785 | static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk,
786 | void *value, __u64 flags) = (void*) // NOLINT
787 | BPF_FUNC_sk_storage_get;
788 |
789 | static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) = (void*) // NOLINT
790 | BPF_FUNC_sk_storage_delete;
791 |
792 | static int (*bpf_send_signal)(unsigned sig) = (void *) // NOLINT
793 | BPF_FUNC_send_signal;
794 |
795 | // Adjust the xdp_md.data by delta
796 | // ctx: pointer to xdp_md
797 | // delta: An positive/negative integer to be added to ctx.data
798 | // Return: 0 on success or negative on error
799 | static int (*bpf_xdp_adjust_head)(const void *ctx, int delta) = (void *) // NOLINT
800 | BPF_FUNC_xdp_adjust_head;
801 |
802 | // clang-format on
803 |
804 | // printk() - kernel trace mechanism, like printf()
805 | // To get trace (debug) messages:
806 | // - Add #define DEBUG into your eBPF program before includes
807 | // - $ sudo cat /sys/kernel/debug/tracing/trace
808 | #ifdef DEBUG
809 | #define bpf_printk(fmt, ...) \
810 | ({ \
811 | char ____fmt[] = fmt; \
812 | bpf_trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
813 | })
814 | #else
815 | #define bpf_printk(fmt, ...)
816 | #endif
817 |
818 | // Since BPF programs cannot perform any function calls other than
819 | // those to BPF helpers, common library code needs to be implemented
820 | // as inline functions. In addition, also LLVM provides some built-ins
821 | // that can be used for constant sizes.
822 | #define memset(dest, chr, n) __builtin_memset((dest), (chr), (n))
823 | #define memcpy(dest, src, n) __builtin_memcpy((dest), (src), (n))
824 | #define memmove(dest, src, n) __builtin_memmove((dest), (src), (n))
825 |
826 | // Do not allow use printf()
827 | #define printf(fmt, ...) do_not_use_printf_use_bpf_printk
828 |
829 | // Macro to define BPF Map
830 | #define BPF_MAP_DEF(name) struct bpf_map_def SEC("maps") name
831 | #define BPF_MAP_ADD(x)
832 |
833 | /* https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/bpf.h#L4283 */
834 | /* DIRECT: Skip the FIB rules and go to FIB table associated with device
835 | * OUTPUT: Do lookup from egress perspective; default is ingress
836 | */
837 | enum {
838 | BPF_FIB_LOOKUP_DIRECT = (1U << 0),
839 | BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
840 | };
841 |
842 | enum {
843 | BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
844 | BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
845 | BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
846 | BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
847 | BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
848 | BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
849 | BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
850 | BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
851 | BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
852 | };
853 |
854 | struct bpf_fib_lookup {
855 | /* input: network family for lookup (AF_INET, AF_INET6)
856 | * output: network family of egress nexthop
857 | */
858 | __u8 family;
859 |
860 | /* set if lookup is to consider L4 data - e.g., FIB rules */
861 | __u8 l4_protocol;
862 | __be16 sport;
863 | __be16 dport;
864 |
865 | /* total length of packet from network header - used for MTU check */
866 | __u16 tot_len;
867 |
868 | /* input: L3 device index for lookup
869 | * output: device index from FIB lookup
870 | */
871 | __u32 ifindex;
872 |
873 | union {
874 | /* inputs to lookup */
875 | __u8 tos; /* AF_INET */
876 | __be32 flowinfo; /* AF_INET6, flow_label + priority */
877 |
878 | /* output: metric of fib result (IPv4/IPv6 only) */
879 | __u32 rt_metric;
880 | };
881 |
882 | union {
883 | __be32 ipv4_src;
884 | __u32 ipv6_src[4]; /* in6_addr; network order */
885 | };
886 |
887 | /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
888 | * network header. output: bpf_fib_lookup sets to gateway address
889 | * if FIB lookup returns gateway route
890 | */
891 | union {
892 | __be32 ipv4_dst;
893 | __u32 ipv6_dst[4]; /* in6_addr; network order */
894 | };
895 |
896 | /* output */
897 | __be16 h_vlan_proto;
898 | __be16 h_vlan_TCI;
899 | __u8 smac[6]; /* ETH_ALEN */
900 | __u8 dmac[6]; /* ETH_ALEN */
901 | };
902 |
903 | // offsetof gets the offset of a struct member
904 | #ifndef offsetof
905 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
906 | #endif
907 |
908 | ///// end of __BPF__ /////
909 |
910 | #else
911 |
912 | //// All other platforms ////
913 |
914 | // SEC() is useless for non eBPF - so just dummy
915 | #define SEC(NAME)
916 | // Functions must be inlined only for eBPF, so don't enforce it for *nix/mac.
917 | // Also disable "unused function" warning -
918 | // since eBPF programs define functions mostly in headers.
919 | #define INLINE static __attribute__((unused))
920 |
921 | // Disable warnings for "pragma unroll(all)"
922 | #pragma GCC diagnostic ignored "-Wunknown-pragmas"
923 |
924 | #include
925 | #include
926 | #include
927 | #include
928 |
929 | // XDP metadata - defined twice because of real eBPF uses 32 bit pointers
930 | // which are not acceptable for cross platform compilation.
931 | struct xdp_md {
932 | void *data;
933 | void *data_end;
934 | void *data_meta;
935 | /* Below access go through struct xdp_rxq_info */
936 | __u32 ingress_ifindex; /* rxq->dev->ifindex */
937 | __u32 rx_queue_index; /* rxq->queue_index */
938 |
939 | __u32 egress_ifindex; /* txq->dev->ifindex */
940 | };
941 |
942 | // Mock BPF map support:
943 | // In order to automatically find all defined BPF maps from GO program we need
944 | // to
945 | // maintain linked list of maps (to be able to iterate and create them all)
946 | // This could be easily and nicely done using __attribute__ ((constructor))
947 | // Which is logically close to func init() int GO.
948 | struct __create_map_def {
949 | const char *name;
950 | void *map_data; // Mock version only: holds head to single linked list of map
951 | // items
952 | struct bpf_map_def *map_def;
953 | SLIST_ENTRY(__create_map_def) next;
954 | };
955 |
956 | // Declaration only. Definition held in mock_map package.
957 | SLIST_HEAD(__maps_head_def, __create_map_def);
958 | extern struct __maps_head_def *__maps_head;
959 |
960 | #define BPF_MAP_DEF(x) static struct bpf_map_def x
961 |
962 | #define BPF_MAP_ADD(x) \
963 | static __attribute__((constructor)) void __bpf_map_##x() { \
964 | static struct __create_map_def __bpf_map_entry_##x; \
965 | __bpf_map_entry_##x.name = #x; \
966 | __bpf_map_entry_##x.map_data = NULL; \
967 | __bpf_map_entry_##x.map_def = &x; \
968 | SLIST_INSERT_HEAD(__maps_head, &__bpf_map_entry_##x, next); \
969 | }
970 |
971 | // BPF helper prototypes - definition is up to mac/linux host program
972 | void *bpf_map_lookup_elem(const void *map, const void *key);
973 | int bpf_map_update_elem(const void *map, const void *key, const void *value,
974 | __u64 flags);
975 | int bpf_map_delete_elem(const void *map, const void *key);
976 |
977 | // bpf_printk() is just printf()
978 | #define bpf_printk(fmt, ...) \
979 | printf(fmt, ##__VA_ARGS__); \
980 | fflush(stdout);
981 |
982 | // bpf_tail_call() is nothing: only relevant for BPF arch
983 | #define bpf_tail_call(ctx, map, index)
984 |
985 | // adjust_meta / ajdust_header are simple functions to move pointer
986 |
987 | UNUSED static int bpf_xdp_adjust_meta(struct xdp_md *ctx, int offset) {
988 | // For unittests only - function returns error if data_meta points to data_end
989 | // which never the case in real world
990 | if (ctx->data_meta == ctx->data_end) {
991 | return 1;
992 | }
993 | ctx->data_meta = (__u8 *)ctx->data_meta + offset; // NOLINT
994 |
995 | return 0;
996 | }
997 |
998 | UNUSED static int bpf_xdp_adjust_head(struct xdp_md *ctx, int offset) {
999 | ctx->data = (__u8 *)ctx->data + offset; // NOLINT
1000 |
1001 | return 0;
1002 | }
1003 |
1004 | UNUSED static int bpf_perf_event_output(void *ctx, void *map, __u64 index,
1005 | void *data, __u32 size) {
1006 | return 0;
1007 | }
1008 |
1009 | #endif // of other than __BPF__
1010 |
1011 | // Finally make sure that all types have expected size regardless of platform
1012 | static_assert(sizeof(__u8) == 1, "wrong_u8_size");
1013 | static_assert(sizeof(__u16) == 2, "wrong_u16_size");
1014 | static_assert(sizeof(__u32) == 4, "wrong_u32_size");
1015 | static_assert(sizeof(__u64) == 8, "wrong_u64_size");
1016 |
1017 | #endif
1018 |
--------------------------------------------------------------------------------