├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── bpf ├── headers │ ├── bpf_endian.h │ ├── bpf_helper_defs.h │ ├── bpf_helpers.h │ └── vmlinux.h └── iptables-bpf.c ├── go.mod ├── go.sum └── main.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | iptables-bpf 18 | iptables-bpf.elf 19 | *.log -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CLANG := clang 2 | CLANG_INCLUDE := -I./bpf/headers 3 | 4 | EBPF_SOURCE := ./bpf/iptables-bpf.c 5 | EBPF_BINARY := iptables-bpf.elf 6 | EBPF_PINNED := /sys/fs/bpf/iptbpf 7 | 8 | GO := go 9 | GOBUILD := $(GO) build -v 10 | 11 | GO_SOURCE := main.go 12 | GO_BINARY := iptables-bpf 13 | 14 | .PHONY: build clean rebuild setup mapid 15 | 16 | build: $(EBPF_BINARY) $(GO_BINARY) 17 | 18 | $(EBPF_BINARY): $(EBPF_SOURCE) 19 | $(CLANG) $(CLANG_INCLUDE) -O2 -g -target bpf -c $^ -o $@ 20 | 21 | $(GO_BINARY): $(GO_SOURCE) 22 | $(GOBUILD) -o $(GO_BINARY) $(GO_SOURCE) 23 | 24 | clean: 25 | rm -f $(EBPF_BINARY) 26 | rm -f $(GO_BINARY) 27 | iptables -D OUTPUT -m bpf --object-pinned $(EBPF_PINNED) -j DROP 28 | rm -f $(EBPF_PINNED) 29 | 30 | rebuild: clean build 31 | 32 | setup: 33 | bpftool prog load $(EBPF_BINARY) $(EBPF_PINNED) 34 | iptables -I OUTPUT -m bpf --object-pinned $(EBPF_PINNED) -j DROP 35 | 36 | mapid: 37 | @bpftool map list | grep filter_daddrs | awk -F: '{print $$1}' 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 该如此玩转 iptables-bpf 2 | 3 | 在看 [iptables-nfqueue](https://asphaltt.github.io/post/iptables-nfqueue/) 源代码的时候,发现 iptables 有 bpf 特性,于是查了下 `iptables-bpf` 的资料。 4 | 5 | - [使用iptables的bpf match来优化规则集-HiPAC/ipset/n+1模型之外的方法](https://blog.csdn.net/dog250/article/details/77790504) 6 | - [HOW WE USED EBPF TO BUILD PROGRAMMABLE PACKET FILTERING IN MAGIC FIREWALL](https://noise.getoto.net/2021/12/06/how-we-used-ebpf-to-build-programmable-packet-filtering-in-magic-firewall/) 7 | - [iptables-extensions](https://ipset.netfilter.org/iptables-extensions.man.html) 8 | 9 | `iptables-bpf` 资料甚少,bpf 资料 [ebpf.io](https://ebpf.io)、 [ebpf.top](https://ebpf.top)。看了 dog250 大神的玩法,心疼大神一两秒,扣 bpf 字节码不是小弟能够企及的。 10 | 11 | ```txt 12 | bpf 13 | 14 | Match using Linux Socket Filter. Expects a path to an eBPF object or a cBPF program in decimal format. 15 | 16 | --object-pinned path 17 | Pass a path to a pinned eBPF object. 18 | 19 | Applications load eBPF programs into the kernel with the bpf() system call and BPF_PROG_LOAD command and can pin them in a virtual filesystem with BPF_OBJ_PIN. To use a pinned object in iptables, mount the bpf filesystem using 20 | 21 | mount -t bpf bpffs ${BPF_MOUNT} 22 | 23 | then insert the filter in iptables by path: 24 | 25 | iptables -A OUTPUT -m bpf --object-pinned ${BPF_MOUNT}/{PINNED_PATH} -j ACCEPT 26 | ``` 27 | 28 | 这才是我想要的。编写一个 `socket` bpf 程序才是我在行的。 29 | 30 | > 源代码:[github.com/Asphaltt/iptables-bpf](https://github.com/Asphaltt/iptables-bpf) 31 | 32 | ## bpf 程序怎么能少了 bpf map 33 | 34 | 不能将 IP 地址写死在 bpf 程序里,那就将 IP 地址放到 bpf map 里。源代码如下: 35 | 36 | ```c 37 | struct { 38 | __uint(type, BPF_MAP_TYPE_HASH); 39 | __uint(max_entries, 16); 40 | __type(key, u32); 41 | __type(value, u8); 42 | } filter_daddrs SEC(".maps"); 43 | 44 | SEC("socket") 45 | int filter_iptables(void *skb) { 46 | struct iphdr iph; 47 | u8 *filtered; 48 | 49 | if (bpf_skb_load_bytes_relative(skb, 0, &iph, sizeof(iph), BPF_HDR_START_NET) < 0) 50 | return BPF_OK; 51 | 52 | filtered = bpf_map_lookup_elem(&filter_daddrs, &iph.daddr); 53 | if (filtered != NULL && *filtered == 1) 54 | return BPF_DROP; 55 | 56 | return BPF_OK; 57 | } 58 | ``` 59 | 60 | `iptables -I OUTPUT -m bpf --object-pinned $(EBPF_PINNED) -j DROP` 里使用这个 bpf 程序,将 bpf map 里匹配到的目的地址的网络包都 drop 掉。效果如下: 61 | 62 | ```bash 63 | # ping -c4 223.5.5.5 64 | PING 223.5.5.5 (223.5.5.5) 56(84) bytes of data. 65 | 64 bytes from 223.5.5.5: icmp_seq=1 ttl=63 time=167 ms 66 | 64 bytes from 223.5.5.5: icmp_seq=2 ttl=63 time=159 ms 67 | 64 bytes from 223.5.5.5: icmp_seq=3 ttl=63 time=1047 ms 68 | 69 | --- 223.5.5.5 ping statistics --- 70 | 4 packets transmitted, 3 received, 25% packet loss, time 3081ms 71 | rtt min/avg/max/mdev = 158.744/457.539/1047.019/416.838 ms, pipe 2 72 | 73 | # make 74 | clang -I./bpf/headers -O2 -g -target bpf -c bpf/iptables-bpf.c -o iptables-bpf.elf 75 | go build -v -o iptables-bpf main.go 76 | 77 | # make setup 78 | bpftool prog load iptables-bpf.elf /sys/fs/bpf/iptbpf 79 | iptables -I OUTPUT -m bpf --object-pinned /sys/fs/bpf/iptbpf -j DROP 80 | 81 | # make mapid 82 | 986 83 | 84 | #./iptables-bpf -m 986 -d 223.5.5.5 85 | 2021/12/16 15:18:49 223.5.5.5 can't be pinged 86 | 87 | # ping -c4 223.5.5.5 88 | PING 223.5.5.5 (223.5.5.5) 56(84) bytes of data. 89 | 90 | --- 223.5.5.5 ping statistics --- 91 | 4 packets transmitted, 0 received, 100% packet loss, time 3065ms 92 | 93 | # make clean 94 | rm -f iptables-bpf.elf 95 | rm -f iptables-bpf 96 | iptables -D OUTPUT -m bpf --object-pinned /sys/fs/bpf/iptbpf -j DROP 97 | rm -f /sys/fs/bpf/iptbpf 98 | 99 | # ping -c4 223.5.5.5 100 | PING 223.5.5.5 (223.5.5.5) 56(84) bytes of data. 101 | 64 bytes from 223.5.5.5: icmp_seq=1 ttl=63 time=139 ms 102 | 64 bytes from 223.5.5.5: icmp_seq=2 ttl=63 time=157 ms 103 | 64 bytes from 223.5.5.5: icmp_seq=3 ttl=63 time=169 ms 104 | 64 bytes from 223.5.5.5: icmp_seq=4 ttl=63 time=121 ms 105 | 106 | --- 223.5.5.5 ping statistics --- 107 | 4 packets transmitted, 4 received, 0% packet loss, time 3087ms 108 | rtt min/avg/max/mdev = 120.720/146.495/168.600/18.163 ms 109 | ``` 110 | 111 | ## 管理 bpf map 112 | 113 | 困难山头已经跨过,剩下个小山头。 114 | 115 | 使用 bpftool 工具人肉来管理 bpf map?不可能的,用 Go 来管理吧。 116 | 117 | ```go 118 | func main() { 119 | 120 | var daddr string 121 | var bpfMap int 122 | flag.StringVar(&daddr, "d", "", "ip addresses to drop, separated by ','") 123 | flag.IntVar(&bpfMap, "m", 0, "the id of the bpf map(filter_daddrs)") 124 | flag.Parse() 125 | 126 | var ips []netaddr.IP 127 | addrs := strings.FieldsFunc(daddr, func(r rune) bool { return r == ',' }) 128 | for _, addr := range addrs { 129 | ip, err := netaddr.ParseIP(addr) 130 | if err != nil { 131 | log.Fatalf("%s is not a valid IPv4 address", ip) 132 | } 133 | 134 | ips = append(ips, ip) 135 | } 136 | if len(ips) == 0 { 137 | log.Fatalf("no ip address(es) to be dropped") 138 | } 139 | 140 | m, err := ebpf.NewMapFromID(ebpf.MapID(bpfMap)) 141 | if err != nil { 142 | log.Fatalf("bpf map(%d) not found, err: %v", bpfMap, err) 143 | } 144 | 145 | val := uint8(1) 146 | for _, ip := range ips { 147 | _ip := ip.As4() 148 | ipval := binary.LittleEndian.Uint32(_ip[:]) 149 | if err := m.Update(ipval, val, ebpf.UpdateAny); err != nil { 150 | log.Fatalf("failed to upsert data to bpf map(%d), err: %v", bpfMap, err) 151 | } 152 | } 153 | 154 | log.Printf("%s can't be pinged", daddr) 155 | } 156 | ``` 157 | 158 | 使用 Go 来管理 bpf map,将 `iptables-bpf` 的易用性提升了一个台阶。相比于单纯的 iptables 规则,`iptables-bpf` 给 iptables 带来了无与伦比的可编程性。 159 | 160 | ## 实验环境 161 | 162 | ```bash 163 | # lsb_release -a 164 | No LSB modules are available. 165 | Distributor ID: Ubuntu 166 | Description: Ubuntu 21.04 167 | Release: 21.04 168 | Codename: hirsute 169 | 170 | # uname -a 171 | Linux pagani 5.11.0-31-generic #33-Ubuntu SMP Wed Aug 11 13:19:04 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux 172 | ``` 173 | 174 | ### iptables 175 | 176 | 有的 iptables 默认带有 bpf 特性,有的没带。如果没带,则在执行 `iptables -I OUTPUT -m bpf --object-pinned /sys/fs/bpf/iptbpf -j DROP` 的时候,提示 `iptables v1.6.1: No bpf header, kernel headers too old?`。需要重新编译 iptables。 177 | 178 | ```bash 179 | git clone git://git.netfilter.org/iptables.git 180 | cd iptables 181 | bash autogen.sh 182 | apt install -y libpcap-dev # bpf 依赖 libpcap 183 | ./configure --enable-bpf-compiler --disable-nftables # disable nftables 是为了快速安装一个能用 bpf 的 iptables 184 | # 留意结果 185 | Iptables Configuration: 186 | ... 187 | BPF utils support: yes 188 | 189 | make -j4 190 | make install 191 | # 此时 iptables 已开启 bpf 特性 192 | ``` 193 | 194 | ## 总结 195 | 196 | bpf 带给了 iptables 不少想象力,只是目前还没有释放出来。 197 | 198 | 如果使用 `iptables-bpf` 实现 iptables 的匹配能力,该如何在 bpf 程序里实现一个高性能的匹配算法呢? 199 | 200 | -------------------------------------------------------------------------------- /bpf/headers/bpf_endian.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 | #ifndef __BPF_ENDIAN__ 3 | #define __BPF_ENDIAN__ 4 | 5 | /* 6 | * Isolate byte #n and put it into byte #m, for __u##b type. 7 | * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: 8 | * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 9 | * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 10 | * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 11 | * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 12 | */ 13 | #define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) 14 | 15 | #define ___bpf_swab16(x) ((__u16)( \ 16 | ___bpf_mvb(x, 16, 0, 1) | \ 17 | ___bpf_mvb(x, 16, 1, 0))) 18 | 19 | #define ___bpf_swab32(x) ((__u32)( \ 20 | ___bpf_mvb(x, 32, 0, 3) | \ 21 | ___bpf_mvb(x, 32, 1, 2) | \ 22 | ___bpf_mvb(x, 32, 2, 1) | \ 23 | ___bpf_mvb(x, 32, 3, 0))) 24 | 25 | #define ___bpf_swab64(x) ((__u64)( \ 26 | ___bpf_mvb(x, 64, 0, 7) | \ 27 | ___bpf_mvb(x, 64, 1, 6) | \ 28 | ___bpf_mvb(x, 64, 2, 5) | \ 29 | ___bpf_mvb(x, 64, 3, 4) | \ 30 | ___bpf_mvb(x, 64, 4, 3) | \ 31 | ___bpf_mvb(x, 64, 5, 2) | \ 32 | ___bpf_mvb(x, 64, 6, 1) | \ 33 | ___bpf_mvb(x, 64, 7, 0))) 34 | 35 | /* LLVM's BPF target selects the endianness of the CPU 36 | * it compiles on, or the user specifies (bpfel/bpfeb), 37 | * respectively. The used __BYTE_ORDER__ is defined by 38 | * the compiler, we cannot rely on __BYTE_ORDER from 39 | * libc headers, since it doesn't reflect the actual 40 | * requested byte order. 41 | * 42 | * Note, LLVM's BPF target has different __builtin_bswapX() 43 | * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE 44 | * in bpfel and bpfeb case, which means below, that we map 45 | * to cpu_to_be16(). We could use it unconditionally in BPF 46 | * case, but better not rely on it, so that this header here 47 | * can be used from application and BPF program side, which 48 | * use different targets. 49 | */ 50 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 51 | # define __bpf_ntohs(x) __builtin_bswap16(x) 52 | # define __bpf_htons(x) __builtin_bswap16(x) 53 | # define __bpf_constant_ntohs(x) ___bpf_swab16(x) 54 | # define __bpf_constant_htons(x) ___bpf_swab16(x) 55 | # define __bpf_ntohl(x) __builtin_bswap32(x) 56 | # define __bpf_htonl(x) __builtin_bswap32(x) 57 | # define __bpf_constant_ntohl(x) ___bpf_swab32(x) 58 | # define __bpf_constant_htonl(x) ___bpf_swab32(x) 59 | # define __bpf_be64_to_cpu(x) __builtin_bswap64(x) 60 | # define __bpf_cpu_to_be64(x) __builtin_bswap64(x) 61 | # define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) 62 | # define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) 63 | #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 64 | # define __bpf_ntohs(x) (x) 65 | # define __bpf_htons(x) (x) 66 | # define __bpf_constant_ntohs(x) (x) 67 | # define __bpf_constant_htons(x) (x) 68 | # define __bpf_ntohl(x) (x) 69 | # define __bpf_htonl(x) (x) 70 | # define __bpf_constant_ntohl(x) (x) 71 | # define __bpf_constant_htonl(x) (x) 72 | # define __bpf_be64_to_cpu(x) (x) 73 | # define __bpf_cpu_to_be64(x) (x) 74 | # define __bpf_constant_be64_to_cpu(x) (x) 75 | # define __bpf_constant_cpu_to_be64(x) (x) 76 | #else 77 | # error "Fix your compiler's __BYTE_ORDER__?!" 78 | #endif 79 | 80 | #define bpf_htons(x) \ 81 | (__builtin_constant_p(x) ? \ 82 | __bpf_constant_htons(x) : __bpf_htons(x)) 83 | #define bpf_ntohs(x) \ 84 | (__builtin_constant_p(x) ? \ 85 | __bpf_constant_ntohs(x) : __bpf_ntohs(x)) 86 | #define bpf_htonl(x) \ 87 | (__builtin_constant_p(x) ? \ 88 | __bpf_constant_htonl(x) : __bpf_htonl(x)) 89 | #define bpf_ntohl(x) \ 90 | (__builtin_constant_p(x) ? \ 91 | __bpf_constant_ntohl(x) : __bpf_ntohl(x)) 92 | #define bpf_cpu_to_be64(x) \ 93 | (__builtin_constant_p(x) ? \ 94 | __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) 95 | #define bpf_be64_to_cpu(x) \ 96 | (__builtin_constant_p(x) ? \ 97 | __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) 98 | 99 | #endif /* __BPF_ENDIAN__ */ 100 | -------------------------------------------------------------------------------- /bpf/headers/bpf_helper_defs.h: -------------------------------------------------------------------------------- 1 | /* This is auto-generated file. See bpf_doc.py for details. */ 2 | 3 | /* Forward declarations of BPF structs */ 4 | struct bpf_fib_lookup; 5 | struct bpf_sk_lookup; 6 | struct bpf_perf_event_data; 7 | struct bpf_perf_event_value; 8 | struct bpf_pidns_info; 9 | struct bpf_redir_neigh; 10 | struct bpf_sock; 11 | struct bpf_sock_addr; 12 | struct bpf_sock_ops; 13 | struct bpf_sock_tuple; 14 | struct bpf_spin_lock; 15 | struct bpf_sysctl; 16 | struct bpf_tcp_sock; 17 | struct bpf_tunnel_key; 18 | struct bpf_xfrm_state; 19 | struct linux_binprm; 20 | struct pt_regs; 21 | struct sk_reuseport_md; 22 | struct sockaddr; 23 | struct tcphdr; 24 | struct seq_file; 25 | struct tcp6_sock; 26 | struct tcp_sock; 27 | struct tcp_timewait_sock; 28 | struct tcp_request_sock; 29 | struct udp6_sock; 30 | struct task_struct; 31 | struct __sk_buff; 32 | struct sk_msg_md; 33 | struct xdp_md; 34 | struct path; 35 | struct btf_ptr; 36 | struct inode; 37 | struct socket; 38 | struct file; 39 | 40 | /* 41 | * bpf_map_lookup_elem 42 | * 43 | * Perform a lookup in *map* for an entry associated to *key*. 44 | * 45 | * Returns 46 | * Map value associated to *key*, or **NULL** if no entry was 47 | * found. 48 | */ 49 | static void *(*bpf_map_lookup_elem)(void *map, const void *key) = (void *) 1; 50 | 51 | /* 52 | * bpf_map_update_elem 53 | * 54 | * Add or update the value of the entry associated to *key* in 55 | * *map* with *value*. *flags* is one of: 56 | * 57 | * **BPF_NOEXIST** 58 | * The entry for *key* must not exist in the map. 59 | * **BPF_EXIST** 60 | * The entry for *key* must already exist in the map. 61 | * **BPF_ANY** 62 | * No condition on the existence of the entry for *key*. 63 | * 64 | * Flag value **BPF_NOEXIST** cannot be used for maps of types 65 | * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all 66 | * elements always exist), the helper would return an error. 67 | * 68 | * Returns 69 | * 0 on success, or a negative error in case of failure. 70 | */ 71 | static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2; 72 | 73 | /* 74 | * bpf_map_delete_elem 75 | * 76 | * Delete entry with *key* from *map*. 77 | * 78 | * Returns 79 | * 0 on success, or a negative error in case of failure. 80 | */ 81 | static long (*bpf_map_delete_elem)(void *map, const void *key) = (void *) 3; 82 | 83 | /* 84 | * bpf_probe_read 85 | * 86 | * For tracing programs, safely attempt to read *size* bytes from 87 | * kernel space address *unsafe_ptr* and store the data in *dst*. 88 | * 89 | * Generally, use **bpf_probe_read_user**\ () or 90 | * **bpf_probe_read_kernel**\ () instead. 91 | * 92 | * Returns 93 | * 0 on success, or a negative error in case of failure. 94 | */ 95 | static long (*bpf_probe_read)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 4; 96 | 97 | /* 98 | * bpf_ktime_get_ns 99 | * 100 | * Return the time elapsed since system boot, in nanoseconds. 101 | * Does not include time the system was suspended. 102 | * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) 103 | * 104 | * Returns 105 | * Current *ktime*. 106 | */ 107 | static __u64 (*bpf_ktime_get_ns)(void) = (void *) 5; 108 | 109 | /* 110 | * bpf_trace_printk 111 | * 112 | * This helper is a "printk()-like" facility for debugging. It 113 | * prints a message defined by format *fmt* (of size *fmt_size*) 114 | * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if 115 | * available. It can take up to three additional **u64** 116 | * arguments (as an eBPF helpers, the total number of arguments is 117 | * limited to five). 118 | * 119 | * Each time the helper is called, it appends a line to the trace. 120 | * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is 121 | * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. 122 | * The format of the trace is customizable, and the exact output 123 | * one will get depends on the options set in 124 | * *\/sys/kernel/debug/tracing/trace_options* (see also the 125 | * *README* file under the same directory). However, it usually 126 | * defaults to something like: 127 | * 128 | * :: 129 | * 130 | * telnet-470 [001] .N.. 419421.045894: 0x00000001: 131 | * 132 | * In the above: 133 | * 134 | * * ``telnet`` is the name of the current task. 135 | * * ``470`` is the PID of the current task. 136 | * * ``001`` is the CPU number on which the task is 137 | * running. 138 | * * In ``.N..``, each character refers to a set of 139 | * options (whether irqs are enabled, scheduling 140 | * options, whether hard/softirqs are running, level of 141 | * preempt_disabled respectively). **N** means that 142 | * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** 143 | * are set. 144 | * * ``419421.045894`` is a timestamp. 145 | * * ``0x00000001`` is a fake value used by BPF for the 146 | * instruction pointer register. 147 | * * ```` is the message formatted with 148 | * *fmt*. 149 | * 150 | * The conversion specifiers supported by *fmt* are similar, but 151 | * more limited than for printk(). They are **%d**, **%i**, 152 | * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, 153 | * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size 154 | * of field, padding with zeroes, etc.) is available, and the 155 | * helper will return **-EINVAL** (but print nothing) if it 156 | * encounters an unknown specifier. 157 | * 158 | * Also, note that **bpf_trace_printk**\ () is slow, and should 159 | * only be used for debugging purposes. For this reason, a notice 160 | * block (spanning several lines) is printed to kernel logs and 161 | * states that the helper should not be used "for production use" 162 | * the first time this helper is used (or more precisely, when 163 | * **trace_printk**\ () buffers are allocated). For passing values 164 | * to user space, perf events should be preferred. 165 | * 166 | * Returns 167 | * The number of bytes written to the buffer, or a negative error 168 | * in case of failure. 169 | */ 170 | static long (*bpf_trace_printk)(const char *fmt, __u32 fmt_size, ...) = (void *) 6; 171 | 172 | /* 173 | * bpf_get_prandom_u32 174 | * 175 | * Get a pseudo-random number. 176 | * 177 | * From a security point of view, this helper uses its own 178 | * pseudo-random internal state, and cannot be used to infer the 179 | * seed of other random functions in the kernel. However, it is 180 | * essential to note that the generator used by the helper is not 181 | * cryptographically secure. 182 | * 183 | * Returns 184 | * A random 32-bit unsigned value. 185 | */ 186 | static __u32 (*bpf_get_prandom_u32)(void) = (void *) 7; 187 | 188 | /* 189 | * bpf_get_smp_processor_id 190 | * 191 | * Get the SMP (symmetric multiprocessing) processor id. Note that 192 | * all programs run with preemption disabled, which means that the 193 | * SMP processor id is stable during all the execution of the 194 | * program. 195 | * 196 | * Returns 197 | * The SMP id of the processor running the program. 198 | */ 199 | static __u32 (*bpf_get_smp_processor_id)(void) = (void *) 8; 200 | 201 | /* 202 | * bpf_skb_store_bytes 203 | * 204 | * Store *len* bytes from address *from* into the packet 205 | * associated to *skb*, at *offset*. *flags* are a combination of 206 | * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the 207 | * checksum for the packet after storing the bytes) and 208 | * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ 209 | * **->swhash** and *skb*\ **->l4hash** to 0). 210 | * 211 | * A call to this helper is susceptible to change the underlying 212 | * packet buffer. Therefore, at load time, all checks on pointers 213 | * previously done by the verifier are invalidated and must be 214 | * performed again, if the helper is used in combination with 215 | * direct packet access. 216 | * 217 | * Returns 218 | * 0 on success, or a negative error in case of failure. 219 | */ 220 | static long (*bpf_skb_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len, __u64 flags) = (void *) 9; 221 | 222 | /* 223 | * bpf_l3_csum_replace 224 | * 225 | * Recompute the layer 3 (e.g. IP) checksum for the packet 226 | * associated to *skb*. Computation is incremental, so the helper 227 | * must know the former value of the header field that was 228 | * modified (*from*), the new value of this field (*to*), and the 229 | * number of bytes (2 or 4) for this field, stored in *size*. 230 | * Alternatively, it is possible to store the difference between 231 | * the previous and the new values of the header field in *to*, by 232 | * setting *from* and *size* to 0. For both methods, *offset* 233 | * indicates the location of the IP checksum within the packet. 234 | * 235 | * This helper works in combination with **bpf_csum_diff**\ (), 236 | * which does not update the checksum in-place, but offers more 237 | * flexibility and can handle sizes larger than 2 or 4 for the 238 | * checksum to update. 239 | * 240 | * A call to this helper is susceptible to change the underlying 241 | * packet buffer. Therefore, at load time, all checks on pointers 242 | * previously done by the verifier are invalidated and must be 243 | * performed again, if the helper is used in combination with 244 | * direct packet access. 245 | * 246 | * Returns 247 | * 0 on success, or a negative error in case of failure. 248 | */ 249 | static long (*bpf_l3_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 size) = (void *) 10; 250 | 251 | /* 252 | * bpf_l4_csum_replace 253 | * 254 | * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the 255 | * packet associated to *skb*. Computation is incremental, so the 256 | * helper must know the former value of the header field that was 257 | * modified (*from*), the new value of this field (*to*), and the 258 | * number of bytes (2 or 4) for this field, stored on the lowest 259 | * four bits of *flags*. Alternatively, it is possible to store 260 | * the difference between the previous and the new values of the 261 | * header field in *to*, by setting *from* and the four lowest 262 | * bits of *flags* to 0. For both methods, *offset* indicates the 263 | * location of the IP checksum within the packet. In addition to 264 | * the size of the field, *flags* can be added (bitwise OR) actual 265 | * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left 266 | * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and 267 | * for updates resulting in a null checksum the value is set to 268 | * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates 269 | * the checksum is to be computed against a pseudo-header. 270 | * 271 | * This helper works in combination with **bpf_csum_diff**\ (), 272 | * which does not update the checksum in-place, but offers more 273 | * flexibility and can handle sizes larger than 2 or 4 for the 274 | * checksum to update. 275 | * 276 | * A call to this helper is susceptible to change the underlying 277 | * packet buffer. Therefore, at load time, all checks on pointers 278 | * previously done by the verifier are invalidated and must be 279 | * performed again, if the helper is used in combination with 280 | * direct packet access. 281 | * 282 | * Returns 283 | * 0 on success, or a negative error in case of failure. 284 | */ 285 | static long (*bpf_l4_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 flags) = (void *) 11; 286 | 287 | /* 288 | * bpf_tail_call 289 | * 290 | * This special helper is used to trigger a "tail call", or in 291 | * other words, to jump into another eBPF program. The same stack 292 | * frame is used (but values on stack and in registers for the 293 | * caller are not accessible to the callee). This mechanism allows 294 | * for program chaining, either for raising the maximum number of 295 | * available eBPF instructions, or to execute given programs in 296 | * conditional blocks. For security reasons, there is an upper 297 | * limit to the number of successive tail calls that can be 298 | * performed. 299 | * 300 | * Upon call of this helper, the program attempts to jump into a 301 | * program referenced at index *index* in *prog_array_map*, a 302 | * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes 303 | * *ctx*, a pointer to the context. 304 | * 305 | * If the call succeeds, the kernel immediately runs the first 306 | * instruction of the new program. This is not a function call, 307 | * and it never returns to the previous program. If the call 308 | * fails, then the helper has no effect, and the caller continues 309 | * to run its subsequent instructions. A call can fail if the 310 | * destination program for the jump does not exist (i.e. *index* 311 | * is superior to the number of entries in *prog_array_map*), or 312 | * if the maximum number of tail calls has been reached for this 313 | * chain of programs. This limit is defined in the kernel by the 314 | * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), 315 | * which is currently set to 32. 316 | * 317 | * Returns 318 | * 0 on success, or a negative error in case of failure. 319 | */ 320 | static long (*bpf_tail_call)(void *ctx, void *prog_array_map, __u32 index) = (void *) 12; 321 | 322 | /* 323 | * bpf_clone_redirect 324 | * 325 | * Clone and redirect the packet associated to *skb* to another 326 | * net device of index *ifindex*. Both ingress and egress 327 | * interfaces can be used for redirection. The **BPF_F_INGRESS** 328 | * value in *flags* is used to make the distinction (ingress path 329 | * is selected if the flag is present, egress path otherwise). 330 | * This is the only flag supported for now. 331 | * 332 | * In comparison with **bpf_redirect**\ () helper, 333 | * **bpf_clone_redirect**\ () has the associated cost of 334 | * duplicating the packet buffer, but this can be executed out of 335 | * the eBPF program. Conversely, **bpf_redirect**\ () is more 336 | * efficient, but it is handled through an action code where the 337 | * redirection happens only after the eBPF program has returned. 338 | * 339 | * A call to this helper is susceptible to change the underlying 340 | * packet buffer. Therefore, at load time, all checks on pointers 341 | * previously done by the verifier are invalidated and must be 342 | * performed again, if the helper is used in combination with 343 | * direct packet access. 344 | * 345 | * Returns 346 | * 0 on success, or a negative error in case of failure. 347 | */ 348 | static long (*bpf_clone_redirect)(struct __sk_buff *skb, __u32 ifindex, __u64 flags) = (void *) 13; 349 | 350 | /* 351 | * bpf_get_current_pid_tgid 352 | * 353 | * 354 | * Returns 355 | * A 64-bit integer containing the current tgid and pid, and 356 | * created as such: 357 | * *current_task*\ **->tgid << 32 \|** 358 | * *current_task*\ **->pid**. 359 | */ 360 | static __u64 (*bpf_get_current_pid_tgid)(void) = (void *) 14; 361 | 362 | /* 363 | * bpf_get_current_uid_gid 364 | * 365 | * 366 | * Returns 367 | * A 64-bit integer containing the current GID and UID, and 368 | * created as such: *current_gid* **<< 32 \|** *current_uid*. 369 | */ 370 | static __u64 (*bpf_get_current_uid_gid)(void) = (void *) 15; 371 | 372 | /* 373 | * bpf_get_current_comm 374 | * 375 | * Copy the **comm** attribute of the current task into *buf* of 376 | * *size_of_buf*. The **comm** attribute contains the name of 377 | * the executable (excluding the path) for the current task. The 378 | * *size_of_buf* must be strictly positive. On success, the 379 | * helper makes sure that the *buf* is NUL-terminated. On failure, 380 | * it is filled with zeroes. 381 | * 382 | * Returns 383 | * 0 on success, or a negative error in case of failure. 384 | */ 385 | static long (*bpf_get_current_comm)(void *buf, __u32 size_of_buf) = (void *) 16; 386 | 387 | /* 388 | * bpf_get_cgroup_classid 389 | * 390 | * Retrieve the classid for the current task, i.e. for the net_cls 391 | * cgroup to which *skb* belongs. 392 | * 393 | * This helper can be used on TC egress path, but not on ingress. 394 | * 395 | * The net_cls cgroup provides an interface to tag network packets 396 | * based on a user-provided identifier for all traffic coming from 397 | * the tasks belonging to the related cgroup. See also the related 398 | * kernel documentation, available from the Linux sources in file 399 | * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. 400 | * 401 | * The Linux kernel has two versions for cgroups: there are 402 | * cgroups v1 and cgroups v2. Both are available to users, who can 403 | * use a mixture of them, but note that the net_cls cgroup is for 404 | * cgroup v1 only. This makes it incompatible with BPF programs 405 | * run on cgroups, which is a cgroup-v2-only feature (a socket can 406 | * only hold data for one version of cgroups at a time). 407 | * 408 | * This helper is only available is the kernel was compiled with 409 | * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to 410 | * "**y**" or to "**m**". 411 | * 412 | * Returns 413 | * The classid, or 0 for the default unconfigured classid. 414 | */ 415 | static __u32 (*bpf_get_cgroup_classid)(struct __sk_buff *skb) = (void *) 17; 416 | 417 | /* 418 | * bpf_skb_vlan_push 419 | * 420 | * Push a *vlan_tci* (VLAN tag control information) of protocol 421 | * *vlan_proto* to the packet associated to *skb*, then update 422 | * the checksum. Note that if *vlan_proto* is different from 423 | * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to 424 | * be **ETH_P_8021Q**. 425 | * 426 | * A call to this helper is susceptible to change the underlying 427 | * packet buffer. Therefore, at load time, all checks on pointers 428 | * previously done by the verifier are invalidated and must be 429 | * performed again, if the helper is used in combination with 430 | * direct packet access. 431 | * 432 | * Returns 433 | * 0 on success, or a negative error in case of failure. 434 | */ 435 | static long (*bpf_skb_vlan_push)(struct __sk_buff *skb, __be16 vlan_proto, __u16 vlan_tci) = (void *) 18; 436 | 437 | /* 438 | * bpf_skb_vlan_pop 439 | * 440 | * Pop a VLAN header from the packet associated to *skb*. 441 | * 442 | * A call to this helper is susceptible to change the underlying 443 | * packet buffer. Therefore, at load time, all checks on pointers 444 | * previously done by the verifier are invalidated and must be 445 | * performed again, if the helper is used in combination with 446 | * direct packet access. 447 | * 448 | * Returns 449 | * 0 on success, or a negative error in case of failure. 450 | */ 451 | static long (*bpf_skb_vlan_pop)(struct __sk_buff *skb) = (void *) 19; 452 | 453 | /* 454 | * bpf_skb_get_tunnel_key 455 | * 456 | * Get tunnel metadata. This helper takes a pointer *key* to an 457 | * empty **struct bpf_tunnel_key** of **size**, that will be 458 | * filled with tunnel metadata for the packet associated to *skb*. 459 | * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which 460 | * indicates that the tunnel is based on IPv6 protocol instead of 461 | * IPv4. 462 | * 463 | * The **struct bpf_tunnel_key** is an object that generalizes the 464 | * principal parameters used by various tunneling protocols into a 465 | * single struct. This way, it can be used to easily make a 466 | * decision based on the contents of the encapsulation header, 467 | * "summarized" in this struct. In particular, it holds the IP 468 | * address of the remote end (IPv4 or IPv6, depending on the case) 469 | * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, 470 | * this struct exposes the *key*\ **->tunnel_id**, which is 471 | * generally mapped to a VNI (Virtual Network Identifier), making 472 | * it programmable together with the **bpf_skb_set_tunnel_key**\ 473 | * () helper. 474 | * 475 | * Let's imagine that the following code is part of a program 476 | * attached to the TC ingress interface, on one end of a GRE 477 | * tunnel, and is supposed to filter out all messages coming from 478 | * remote ends with IPv4 address other than 10.0.0.1: 479 | * 480 | * :: 481 | * 482 | * int ret; 483 | * struct bpf_tunnel_key key = {}; 484 | * 485 | * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); 486 | * if (ret < 0) 487 | * return TC_ACT_SHOT; // drop packet 488 | * 489 | * if (key.remote_ipv4 != 0x0a000001) 490 | * return TC_ACT_SHOT; // drop packet 491 | * 492 | * return TC_ACT_OK; // accept packet 493 | * 494 | * This interface can also be used with all encapsulation devices 495 | * that can operate in "collect metadata" mode: instead of having 496 | * one network device per specific configuration, the "collect 497 | * metadata" mode only requires a single device where the 498 | * configuration can be extracted from this helper. 499 | * 500 | * This can be used together with various tunnels such as VXLan, 501 | * Geneve, GRE or IP in IP (IPIP). 502 | * 503 | * Returns 504 | * 0 on success, or a negative error in case of failure. 505 | */ 506 | static long (*bpf_skb_get_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 20; 507 | 508 | /* 509 | * bpf_skb_set_tunnel_key 510 | * 511 | * Populate tunnel metadata for packet associated to *skb.* The 512 | * tunnel metadata is set to the contents of *key*, of *size*. The 513 | * *flags* can be set to a combination of the following values: 514 | * 515 | * **BPF_F_TUNINFO_IPV6** 516 | * Indicate that the tunnel is based on IPv6 protocol 517 | * instead of IPv4. 518 | * **BPF_F_ZERO_CSUM_TX** 519 | * For IPv4 packets, add a flag to tunnel metadata 520 | * indicating that checksum computation should be skipped 521 | * and checksum set to zeroes. 522 | * **BPF_F_DONT_FRAGMENT** 523 | * Add a flag to tunnel metadata indicating that the 524 | * packet should not be fragmented. 525 | * **BPF_F_SEQ_NUMBER** 526 | * Add a flag to tunnel metadata indicating that a 527 | * sequence number should be added to tunnel header before 528 | * sending the packet. This flag was added for GRE 529 | * encapsulation, but might be used with other protocols 530 | * as well in the future. 531 | * 532 | * Here is a typical usage on the transmit path: 533 | * 534 | * :: 535 | * 536 | * struct bpf_tunnel_key key; 537 | * populate key ... 538 | * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); 539 | * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); 540 | * 541 | * See also the description of the **bpf_skb_get_tunnel_key**\ () 542 | * helper for additional information. 543 | * 544 | * Returns 545 | * 0 on success, or a negative error in case of failure. 546 | */ 547 | static long (*bpf_skb_set_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 21; 548 | 549 | /* 550 | * bpf_perf_event_read 551 | * 552 | * Read the value of a perf event counter. This helper relies on a 553 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of 554 | * the perf event counter is selected when *map* is updated with 555 | * perf event file descriptors. The *map* is an array whose size 556 | * is the number of available CPUs, and each cell contains a value 557 | * relative to one CPU. The value to retrieve is indicated by 558 | * *flags*, that contains the index of the CPU to look up, masked 559 | * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 560 | * **BPF_F_CURRENT_CPU** to indicate that the value for the 561 | * current CPU should be retrieved. 562 | * 563 | * Note that before Linux 4.13, only hardware perf event can be 564 | * retrieved. 565 | * 566 | * Also, be aware that the newer helper 567 | * **bpf_perf_event_read_value**\ () is recommended over 568 | * **bpf_perf_event_read**\ () in general. The latter has some ABI 569 | * quirks where error and counter value are used as a return code 570 | * (which is wrong to do since ranges may overlap). This issue is 571 | * fixed with **bpf_perf_event_read_value**\ (), which at the same 572 | * time provides more features over the **bpf_perf_event_read**\ 573 | * () interface. Please refer to the description of 574 | * **bpf_perf_event_read_value**\ () for details. 575 | * 576 | * Returns 577 | * The value of the perf event counter read from the map, or a 578 | * negative error code in case of failure. 579 | */ 580 | static __u64 (*bpf_perf_event_read)(void *map, __u64 flags) = (void *) 22; 581 | 582 | /* 583 | * bpf_redirect 584 | * 585 | * Redirect the packet to another net device of index *ifindex*. 586 | * This helper is somewhat similar to **bpf_clone_redirect**\ 587 | * (), except that the packet is not cloned, which provides 588 | * increased performance. 589 | * 590 | * Except for XDP, both ingress and egress interfaces can be used 591 | * for redirection. The **BPF_F_INGRESS** value in *flags* is used 592 | * to make the distinction (ingress path is selected if the flag 593 | * is present, egress path otherwise). Currently, XDP only 594 | * supports redirection to the egress interface, and accepts no 595 | * flag at all. 596 | * 597 | * The same effect can also be attained with the more generic 598 | * **bpf_redirect_map**\ (), which uses a BPF map to store the 599 | * redirect target instead of providing it directly to the helper. 600 | * 601 | * Returns 602 | * For XDP, the helper returns **XDP_REDIRECT** on success or 603 | * **XDP_ABORTED** on error. For other program types, the values 604 | * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on 605 | * error. 606 | */ 607 | static long (*bpf_redirect)(__u32 ifindex, __u64 flags) = (void *) 23; 608 | 609 | /* 610 | * bpf_get_route_realm 611 | * 612 | * Retrieve the realm or the route, that is to say the 613 | * **tclassid** field of the destination for the *skb*. The 614 | * identifier retrieved is a user-provided tag, similar to the 615 | * one used with the net_cls cgroup (see description for 616 | * **bpf_get_cgroup_classid**\ () helper), but here this tag is 617 | * held by a route (a destination entry), not by a task. 618 | * 619 | * Retrieving this identifier works with the clsact TC egress hook 620 | * (see also **tc-bpf(8)**), or alternatively on conventional 621 | * classful egress qdiscs, but not on TC ingress path. In case of 622 | * clsact TC egress hook, this has the advantage that, internally, 623 | * the destination entry has not been dropped yet in the transmit 624 | * path. Therefore, the destination entry does not need to be 625 | * artificially held via **netif_keep_dst**\ () for a classful 626 | * qdisc until the *skb* is freed. 627 | * 628 | * This helper is available only if the kernel was compiled with 629 | * **CONFIG_IP_ROUTE_CLASSID** configuration option. 630 | * 631 | * Returns 632 | * The realm of the route for the packet associated to *skb*, or 0 633 | * if none was found. 634 | */ 635 | static __u32 (*bpf_get_route_realm)(struct __sk_buff *skb) = (void *) 24; 636 | 637 | /* 638 | * bpf_perf_event_output 639 | * 640 | * Write raw *data* blob into a special BPF perf event held by 641 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 642 | * event must have the following attributes: **PERF_SAMPLE_RAW** 643 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 644 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 645 | * 646 | * The *flags* are used to indicate the index in *map* for which 647 | * the value must be put, masked with **BPF_F_INDEX_MASK**. 648 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 649 | * to indicate that the index of the current CPU core should be 650 | * used. 651 | * 652 | * The value to write, of *size*, is passed through eBPF stack and 653 | * pointed by *data*. 654 | * 655 | * The context of the program *ctx* needs also be passed to the 656 | * helper. 657 | * 658 | * On user space, a program willing to read the values needs to 659 | * call **perf_event_open**\ () on the perf event (either for 660 | * one or for all CPUs) and to store the file descriptor into the 661 | * *map*. This must be done before the eBPF program can send data 662 | * into it. An example is available in file 663 | * *samples/bpf/trace_output_user.c* in the Linux kernel source 664 | * tree (the eBPF program counterpart is in 665 | * *samples/bpf/trace_output_kern.c*). 666 | * 667 | * **bpf_perf_event_output**\ () achieves better performance 668 | * than **bpf_trace_printk**\ () for sharing data with user 669 | * space, and is much better suitable for streaming data from eBPF 670 | * programs. 671 | * 672 | * Note that this helper is not restricted to tracing use cases 673 | * and can be used with programs attached to TC or XDP as well, 674 | * where it allows for passing data to user space listeners. Data 675 | * can be: 676 | * 677 | * * Only custom structs, 678 | * * Only the packet payload, or 679 | * * A combination of both. 680 | * 681 | * Returns 682 | * 0 on success, or a negative error in case of failure. 683 | */ 684 | static long (*bpf_perf_event_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 25; 685 | 686 | /* 687 | * bpf_skb_load_bytes 688 | * 689 | * This helper was provided as an easy way to load data from a 690 | * packet. It can be used to load *len* bytes from *offset* from 691 | * the packet associated to *skb*, into the buffer pointed by 692 | * *to*. 693 | * 694 | * Since Linux 4.7, usage of this helper has mostly been replaced 695 | * by "direct packet access", enabling packet data to be 696 | * manipulated with *skb*\ **->data** and *skb*\ **->data_end** 697 | * pointing respectively to the first byte of packet data and to 698 | * the byte after the last byte of packet data. However, it 699 | * remains useful if one wishes to read large quantities of data 700 | * at once from a packet into the eBPF stack. 701 | * 702 | * Returns 703 | * 0 on success, or a negative error in case of failure. 704 | */ 705 | static long (*bpf_skb_load_bytes)(const void *skb, __u32 offset, void *to, __u32 len) = (void *) 26; 706 | 707 | /* 708 | * bpf_get_stackid 709 | * 710 | * Walk a user or a kernel stack and return its id. To achieve 711 | * this, the helper needs *ctx*, which is a pointer to the context 712 | * on which the tracing program is executed, and a pointer to a 713 | * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. 714 | * 715 | * The last argument, *flags*, holds the number of stack frames to 716 | * skip (from 0 to 255), masked with 717 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 718 | * a combination of the following flags: 719 | * 720 | * **BPF_F_USER_STACK** 721 | * Collect a user space stack instead of a kernel stack. 722 | * **BPF_F_FAST_STACK_CMP** 723 | * Compare stacks by hash only. 724 | * **BPF_F_REUSE_STACKID** 725 | * If two different stacks hash into the same *stackid*, 726 | * discard the old one. 727 | * 728 | * The stack id retrieved is a 32 bit long integer handle which 729 | * can be further combined with other data (including other stack 730 | * ids) and used as a key into maps. This can be useful for 731 | * generating a variety of graphs (such as flame graphs or off-cpu 732 | * graphs). 733 | * 734 | * For walking a stack, this helper is an improvement over 735 | * **bpf_probe_read**\ (), which can be used with unrolled loops 736 | * but is not efficient and consumes a lot of eBPF instructions. 737 | * Instead, **bpf_get_stackid**\ () can collect up to 738 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that 739 | * this limit can be controlled with the **sysctl** program, and 740 | * that it should be manually increased in order to profile long 741 | * user stacks (such as stacks for Java programs). To do so, use: 742 | * 743 | * :: 744 | * 745 | * # sysctl kernel.perf_event_max_stack= 746 | * 747 | * Returns 748 | * The positive or null stack id on success, or a negative error 749 | * in case of failure. 750 | */ 751 | static long (*bpf_get_stackid)(void *ctx, void *map, __u64 flags) = (void *) 27; 752 | 753 | /* 754 | * bpf_csum_diff 755 | * 756 | * Compute a checksum difference, from the raw buffer pointed by 757 | * *from*, of length *from_size* (that must be a multiple of 4), 758 | * towards the raw buffer pointed by *to*, of size *to_size* 759 | * (same remark). An optional *seed* can be added to the value 760 | * (this can be cascaded, the seed may come from a previous call 761 | * to the helper). 762 | * 763 | * This is flexible enough to be used in several ways: 764 | * 765 | * * With *from_size* == 0, *to_size* > 0 and *seed* set to 766 | * checksum, it can be used when pushing new data. 767 | * * With *from_size* > 0, *to_size* == 0 and *seed* set to 768 | * checksum, it can be used when removing data from a packet. 769 | * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it 770 | * can be used to compute a diff. Note that *from_size* and 771 | * *to_size* do not need to be equal. 772 | * 773 | * This helper can be used in combination with 774 | * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to 775 | * which one can feed in the difference computed with 776 | * **bpf_csum_diff**\ (). 777 | * 778 | * Returns 779 | * The checksum result, or a negative error code in case of 780 | * failure. 781 | */ 782 | static __s64 (*bpf_csum_diff)(__be32 *from, __u32 from_size, __be32 *to, __u32 to_size, __wsum seed) = (void *) 28; 783 | 784 | /* 785 | * bpf_skb_get_tunnel_opt 786 | * 787 | * Retrieve tunnel options metadata for the packet associated to 788 | * *skb*, and store the raw tunnel option data to the buffer *opt* 789 | * of *size*. 790 | * 791 | * This helper can be used with encapsulation devices that can 792 | * operate in "collect metadata" mode (please refer to the related 793 | * note in the description of **bpf_skb_get_tunnel_key**\ () for 794 | * more details). A particular example where this can be used is 795 | * in combination with the Geneve encapsulation protocol, where it 796 | * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) 797 | * and retrieving arbitrary TLVs (Type-Length-Value headers) from 798 | * the eBPF program. This allows for full customization of these 799 | * headers. 800 | * 801 | * Returns 802 | * The size of the option data retrieved. 803 | */ 804 | static long (*bpf_skb_get_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 29; 805 | 806 | /* 807 | * bpf_skb_set_tunnel_opt 808 | * 809 | * Set tunnel options metadata for the packet associated to *skb* 810 | * to the option data contained in the raw buffer *opt* of *size*. 811 | * 812 | * See also the description of the **bpf_skb_get_tunnel_opt**\ () 813 | * helper for additional information. 814 | * 815 | * Returns 816 | * 0 on success, or a negative error in case of failure. 817 | */ 818 | static long (*bpf_skb_set_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 30; 819 | 820 | /* 821 | * bpf_skb_change_proto 822 | * 823 | * Change the protocol of the *skb* to *proto*. Currently 824 | * supported are transition from IPv4 to IPv6, and from IPv6 to 825 | * IPv4. The helper takes care of the groundwork for the 826 | * transition, including resizing the socket buffer. The eBPF 827 | * program is expected to fill the new headers, if any, via 828 | * **skb_store_bytes**\ () and to recompute the checksums with 829 | * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ 830 | * (). The main case for this helper is to perform NAT64 831 | * operations out of an eBPF program. 832 | * 833 | * Internally, the GSO type is marked as dodgy so that headers are 834 | * checked and segments are recalculated by the GSO/GRO engine. 835 | * The size for GSO target is adapted as well. 836 | * 837 | * All values for *flags* are reserved for future usage, and must 838 | * be left at zero. 839 | * 840 | * A call to this helper is susceptible to change the underlying 841 | * packet buffer. Therefore, at load time, all checks on pointers 842 | * previously done by the verifier are invalidated and must be 843 | * performed again, if the helper is used in combination with 844 | * direct packet access. 845 | * 846 | * Returns 847 | * 0 on success, or a negative error in case of failure. 848 | */ 849 | static long (*bpf_skb_change_proto)(struct __sk_buff *skb, __be16 proto, __u64 flags) = (void *) 31; 850 | 851 | /* 852 | * bpf_skb_change_type 853 | * 854 | * Change the packet type for the packet associated to *skb*. This 855 | * comes down to setting *skb*\ **->pkt_type** to *type*, except 856 | * the eBPF program does not have a write access to *skb*\ 857 | * **->pkt_type** beside this helper. Using a helper here allows 858 | * for graceful handling of errors. 859 | * 860 | * The major use case is to change incoming *skb*s to 861 | * **PACKET_HOST** in a programmatic way instead of having to 862 | * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for 863 | * example. 864 | * 865 | * Note that *type* only allows certain values. At this time, they 866 | * are: 867 | * 868 | * **PACKET_HOST** 869 | * Packet is for us. 870 | * **PACKET_BROADCAST** 871 | * Send packet to all. 872 | * **PACKET_MULTICAST** 873 | * Send packet to group. 874 | * **PACKET_OTHERHOST** 875 | * Send packet to someone else. 876 | * 877 | * Returns 878 | * 0 on success, or a negative error in case of failure. 879 | */ 880 | static long (*bpf_skb_change_type)(struct __sk_buff *skb, __u32 type) = (void *) 32; 881 | 882 | /* 883 | * bpf_skb_under_cgroup 884 | * 885 | * Check whether *skb* is a descendant of the cgroup2 held by 886 | * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 887 | * 888 | * Returns 889 | * The return value depends on the result of the test, and can be: 890 | * 891 | * * 0, if the *skb* failed the cgroup2 descendant test. 892 | * * 1, if the *skb* succeeded the cgroup2 descendant test. 893 | * * A negative error code, if an error occurred. 894 | */ 895 | static long (*bpf_skb_under_cgroup)(struct __sk_buff *skb, void *map, __u32 index) = (void *) 33; 896 | 897 | /* 898 | * bpf_get_hash_recalc 899 | * 900 | * Retrieve the hash of the packet, *skb*\ **->hash**. If it is 901 | * not set, in particular if the hash was cleared due to mangling, 902 | * recompute this hash. Later accesses to the hash can be done 903 | * directly with *skb*\ **->hash**. 904 | * 905 | * Calling **bpf_set_hash_invalid**\ (), changing a packet 906 | * prototype with **bpf_skb_change_proto**\ (), or calling 907 | * **bpf_skb_store_bytes**\ () with the 908 | * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear 909 | * the hash and to trigger a new computation for the next call to 910 | * **bpf_get_hash_recalc**\ (). 911 | * 912 | * Returns 913 | * The 32-bit hash. 914 | */ 915 | static __u32 (*bpf_get_hash_recalc)(struct __sk_buff *skb) = (void *) 34; 916 | 917 | /* 918 | * bpf_get_current_task 919 | * 920 | * 921 | * Returns 922 | * A pointer to the current task struct. 923 | */ 924 | static __u64 (*bpf_get_current_task)(void) = (void *) 35; 925 | 926 | /* 927 | * bpf_probe_write_user 928 | * 929 | * Attempt in a safe way to write *len* bytes from the buffer 930 | * *src* to *dst* in memory. It only works for threads that are in 931 | * user context, and *dst* must be a valid user space address. 932 | * 933 | * This helper should not be used to implement any kind of 934 | * security mechanism because of TOC-TOU attacks, but rather to 935 | * debug, divert, and manipulate execution of semi-cooperative 936 | * processes. 937 | * 938 | * Keep in mind that this feature is meant for experiments, and it 939 | * has a risk of crashing the system and running programs. 940 | * Therefore, when an eBPF program using this helper is attached, 941 | * a warning including PID and process name is printed to kernel 942 | * logs. 943 | * 944 | * Returns 945 | * 0 on success, or a negative error in case of failure. 946 | */ 947 | static long (*bpf_probe_write_user)(void *dst, const void *src, __u32 len) = (void *) 36; 948 | 949 | /* 950 | * bpf_current_task_under_cgroup 951 | * 952 | * Check whether the probe is being run is the context of a given 953 | * subset of the cgroup2 hierarchy. The cgroup2 to test is held by 954 | * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 955 | * 956 | * Returns 957 | * The return value depends on the result of the test, and can be: 958 | * 959 | * * 0, if current task belongs to the cgroup2. 960 | * * 1, if current task does not belong to the cgroup2. 961 | * * A negative error code, if an error occurred. 962 | */ 963 | static long (*bpf_current_task_under_cgroup)(void *map, __u32 index) = (void *) 37; 964 | 965 | /* 966 | * bpf_skb_change_tail 967 | * 968 | * Resize (trim or grow) the packet associated to *skb* to the 969 | * new *len*. The *flags* are reserved for future usage, and must 970 | * be left at zero. 971 | * 972 | * The basic idea is that the helper performs the needed work to 973 | * change the size of the packet, then the eBPF program rewrites 974 | * the rest via helpers like **bpf_skb_store_bytes**\ (), 975 | * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () 976 | * and others. This helper is a slow path utility intended for 977 | * replies with control messages. And because it is targeted for 978 | * slow path, the helper itself can afford to be slow: it 979 | * implicitly linearizes, unclones and drops offloads from the 980 | * *skb*. 981 | * 982 | * A call to this helper is susceptible to change the underlying 983 | * packet buffer. Therefore, at load time, all checks on pointers 984 | * previously done by the verifier are invalidated and must be 985 | * performed again, if the helper is used in combination with 986 | * direct packet access. 987 | * 988 | * Returns 989 | * 0 on success, or a negative error in case of failure. 990 | */ 991 | static long (*bpf_skb_change_tail)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 38; 992 | 993 | /* 994 | * bpf_skb_pull_data 995 | * 996 | * Pull in non-linear data in case the *skb* is non-linear and not 997 | * all of *len* are part of the linear section. Make *len* bytes 998 | * from *skb* readable and writable. If a zero value is passed for 999 | * *len*, then the whole length of the *skb* is pulled. 1000 | * 1001 | * This helper is only needed for reading and writing with direct 1002 | * packet access. 1003 | * 1004 | * For direct packet access, testing that offsets to access 1005 | * are within packet boundaries (test on *skb*\ **->data_end**) is 1006 | * susceptible to fail if offsets are invalid, or if the requested 1007 | * data is in non-linear parts of the *skb*. On failure the 1008 | * program can just bail out, or in the case of a non-linear 1009 | * buffer, use a helper to make the data available. The 1010 | * **bpf_skb_load_bytes**\ () helper is a first solution to access 1011 | * the data. Another one consists in using **bpf_skb_pull_data** 1012 | * to pull in once the non-linear parts, then retesting and 1013 | * eventually access the data. 1014 | * 1015 | * At the same time, this also makes sure the *skb* is uncloned, 1016 | * which is a necessary condition for direct write. As this needs 1017 | * to be an invariant for the write part only, the verifier 1018 | * detects writes and adds a prologue that is calling 1019 | * **bpf_skb_pull_data()** to effectively unclone the *skb* from 1020 | * the very beginning in case it is indeed cloned. 1021 | * 1022 | * A call to this helper is susceptible to change the underlying 1023 | * packet buffer. Therefore, at load time, all checks on pointers 1024 | * previously done by the verifier are invalidated and must be 1025 | * performed again, if the helper is used in combination with 1026 | * direct packet access. 1027 | * 1028 | * Returns 1029 | * 0 on success, or a negative error in case of failure. 1030 | */ 1031 | static long (*bpf_skb_pull_data)(struct __sk_buff *skb, __u32 len) = (void *) 39; 1032 | 1033 | /* 1034 | * bpf_csum_update 1035 | * 1036 | * Add the checksum *csum* into *skb*\ **->csum** in case the 1037 | * driver has supplied a checksum for the entire packet into that 1038 | * field. Return an error otherwise. This helper is intended to be 1039 | * used in combination with **bpf_csum_diff**\ (), in particular 1040 | * when the checksum needs to be updated after data has been 1041 | * written into the packet through direct packet access. 1042 | * 1043 | * Returns 1044 | * The checksum on success, or a negative error code in case of 1045 | * failure. 1046 | */ 1047 | static __s64 (*bpf_csum_update)(struct __sk_buff *skb, __wsum csum) = (void *) 40; 1048 | 1049 | /* 1050 | * bpf_set_hash_invalid 1051 | * 1052 | * Invalidate the current *skb*\ **->hash**. It can be used after 1053 | * mangling on headers through direct packet access, in order to 1054 | * indicate that the hash is outdated and to trigger a 1055 | * recalculation the next time the kernel tries to access this 1056 | * hash or when the **bpf_get_hash_recalc**\ () helper is called. 1057 | * 1058 | */ 1059 | static void (*bpf_set_hash_invalid)(struct __sk_buff *skb) = (void *) 41; 1060 | 1061 | /* 1062 | * bpf_get_numa_node_id 1063 | * 1064 | * Return the id of the current NUMA node. The primary use case 1065 | * for this helper is the selection of sockets for the local NUMA 1066 | * node, when the program is attached to sockets using the 1067 | * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), 1068 | * but the helper is also available to other eBPF program types, 1069 | * similarly to **bpf_get_smp_processor_id**\ (). 1070 | * 1071 | * Returns 1072 | * The id of current NUMA node. 1073 | */ 1074 | static long (*bpf_get_numa_node_id)(void) = (void *) 42; 1075 | 1076 | /* 1077 | * bpf_skb_change_head 1078 | * 1079 | * Grows headroom of packet associated to *skb* and adjusts the 1080 | * offset of the MAC header accordingly, adding *len* bytes of 1081 | * space. It automatically extends and reallocates memory as 1082 | * required. 1083 | * 1084 | * This helper can be used on a layer 3 *skb* to push a MAC header 1085 | * for redirection into a layer 2 device. 1086 | * 1087 | * All values for *flags* are reserved for future usage, and must 1088 | * be left at zero. 1089 | * 1090 | * A call to this helper is susceptible to change the underlying 1091 | * packet buffer. Therefore, at load time, all checks on pointers 1092 | * previously done by the verifier are invalidated and must be 1093 | * performed again, if the helper is used in combination with 1094 | * direct packet access. 1095 | * 1096 | * Returns 1097 | * 0 on success, or a negative error in case of failure. 1098 | */ 1099 | static long (*bpf_skb_change_head)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 43; 1100 | 1101 | /* 1102 | * bpf_xdp_adjust_head 1103 | * 1104 | * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that 1105 | * it is possible to use a negative value for *delta*. This helper 1106 | * can be used to prepare the packet for pushing or popping 1107 | * headers. 1108 | * 1109 | * A call to this helper is susceptible to change the underlying 1110 | * packet buffer. Therefore, at load time, all checks on pointers 1111 | * previously done by the verifier are invalidated and must be 1112 | * performed again, if the helper is used in combination with 1113 | * direct packet access. 1114 | * 1115 | * Returns 1116 | * 0 on success, or a negative error in case of failure. 1117 | */ 1118 | static long (*bpf_xdp_adjust_head)(struct xdp_md *xdp_md, int delta) = (void *) 44; 1119 | 1120 | /* 1121 | * bpf_probe_read_str 1122 | * 1123 | * Copy a NUL terminated string from an unsafe kernel address 1124 | * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for 1125 | * more details. 1126 | * 1127 | * Generally, use **bpf_probe_read_user_str**\ () or 1128 | * **bpf_probe_read_kernel_str**\ () instead. 1129 | * 1130 | * Returns 1131 | * On success, the strictly positive length of the string, 1132 | * including the trailing NUL character. On error, a negative 1133 | * value. 1134 | */ 1135 | static long (*bpf_probe_read_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 45; 1136 | 1137 | /* 1138 | * bpf_get_socket_cookie 1139 | * 1140 | * If the **struct sk_buff** pointed by *skb* has a known socket, 1141 | * retrieve the cookie (generated by the kernel) of this socket. 1142 | * If no cookie has been set yet, generate a new cookie. Once 1143 | * generated, the socket cookie remains stable for the life of the 1144 | * socket. This helper can be useful for monitoring per socket 1145 | * networking traffic statistics as it provides a global socket 1146 | * identifier that can be assumed unique. 1147 | * 1148 | * Returns 1149 | * A 8-byte long unique number on success, or 0 if the socket 1150 | * field is missing inside *skb*. 1151 | */ 1152 | static __u64 (*bpf_get_socket_cookie)(void *ctx) = (void *) 46; 1153 | 1154 | /* 1155 | * bpf_get_socket_uid 1156 | * 1157 | * 1158 | * Returns 1159 | * The owner UID of the socket associated to *skb*. If the socket 1160 | * is **NULL**, or if it is not a full socket (i.e. if it is a 1161 | * time-wait or a request socket instead), **overflowuid** value 1162 | * is returned (note that **overflowuid** might also be the actual 1163 | * UID value for the socket). 1164 | */ 1165 | static __u32 (*bpf_get_socket_uid)(struct __sk_buff *skb) = (void *) 47; 1166 | 1167 | /* 1168 | * bpf_set_hash 1169 | * 1170 | * Set the full hash for *skb* (set the field *skb*\ **->hash**) 1171 | * to value *hash*. 1172 | * 1173 | * Returns 1174 | * 0 1175 | */ 1176 | static long (*bpf_set_hash)(struct __sk_buff *skb, __u32 hash) = (void *) 48; 1177 | 1178 | /* 1179 | * bpf_setsockopt 1180 | * 1181 | * Emulate a call to **setsockopt()** on the socket associated to 1182 | * *bpf_socket*, which must be a full socket. The *level* at 1183 | * which the option resides and the name *optname* of the option 1184 | * must be specified, see **setsockopt(2)** for more information. 1185 | * The option value of length *optlen* is pointed by *optval*. 1186 | * 1187 | * *bpf_socket* should be one of the following: 1188 | * 1189 | * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 1190 | * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 1191 | * and **BPF_CGROUP_INET6_CONNECT**. 1192 | * 1193 | * This helper actually implements a subset of **setsockopt()**. 1194 | * It supports the following *level*\ s: 1195 | * 1196 | * * **SOL_SOCKET**, which supports the following *optname*\ s: 1197 | * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, 1198 | * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, 1199 | * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. 1200 | * * **IPPROTO_TCP**, which supports the following *optname*\ s: 1201 | * **TCP_CONGESTION**, **TCP_BPF_IW**, 1202 | * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, 1203 | * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, 1204 | * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**. 1205 | * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1206 | * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1207 | * 1208 | * Returns 1209 | * 0 on success, or a negative error in case of failure. 1210 | */ 1211 | static long (*bpf_setsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 49; 1212 | 1213 | /* 1214 | * bpf_skb_adjust_room 1215 | * 1216 | * Grow or shrink the room for data in the packet associated to 1217 | * *skb* by *len_diff*, and according to the selected *mode*. 1218 | * 1219 | * By default, the helper will reset any offloaded checksum 1220 | * indicator of the skb to CHECKSUM_NONE. This can be avoided 1221 | * by the following flag: 1222 | * 1223 | * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded 1224 | * checksum data of the skb to CHECKSUM_NONE. 1225 | * 1226 | * There are two supported modes at this time: 1227 | * 1228 | * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer 1229 | * (room space is added or removed below the layer 2 header). 1230 | * 1231 | * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer 1232 | * (room space is added or removed below the layer 3 header). 1233 | * 1234 | * The following flags are supported at this time: 1235 | * 1236 | * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. 1237 | * Adjusting mss in this way is not allowed for datagrams. 1238 | * 1239 | * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, 1240 | * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: 1241 | * Any new space is reserved to hold a tunnel header. 1242 | * Configure skb offsets and other fields accordingly. 1243 | * 1244 | * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, 1245 | * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: 1246 | * Use with ENCAP_L3 flags to further specify the tunnel type. 1247 | * 1248 | * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): 1249 | * Use with ENCAP_L3/L4 flags to further specify the tunnel 1250 | * type; *len* is the length of the inner MAC header. 1251 | * 1252 | * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: 1253 | * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the 1254 | * L2 type as Ethernet. 1255 | * 1256 | * A call to this helper is susceptible to change the underlying 1257 | * packet buffer. Therefore, at load time, all checks on pointers 1258 | * previously done by the verifier are invalidated and must be 1259 | * performed again, if the helper is used in combination with 1260 | * direct packet access. 1261 | * 1262 | * Returns 1263 | * 0 on success, or a negative error in case of failure. 1264 | */ 1265 | static long (*bpf_skb_adjust_room)(struct __sk_buff *skb, __s32 len_diff, __u32 mode, __u64 flags) = (void *) 50; 1266 | 1267 | /* 1268 | * bpf_redirect_map 1269 | * 1270 | * Redirect the packet to the endpoint referenced by *map* at 1271 | * index *key*. Depending on its type, this *map* can contain 1272 | * references to net devices (for forwarding packets through other 1273 | * ports), or to CPUs (for redirecting XDP frames to another CPU; 1274 | * but this is only implemented for native XDP (with driver 1275 | * support) as of this writing). 1276 | * 1277 | * The lower two bits of *flags* are used as the return code if 1278 | * the map lookup fails. This is so that the return value can be 1279 | * one of the XDP program return codes up to **XDP_TX**, as chosen 1280 | * by the caller. The higher bits of *flags* can be set to 1281 | * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. 1282 | * 1283 | * With BPF_F_BROADCAST the packet will be broadcasted to all the 1284 | * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress 1285 | * interface will be excluded when do broadcasting. 1286 | * 1287 | * See also **bpf_redirect**\ (), which only supports redirecting 1288 | * to an ifindex, but doesn't require a map to do so. 1289 | * 1290 | * Returns 1291 | * **XDP_REDIRECT** on success, or the value of the two lower bits 1292 | * of the *flags* argument on error. 1293 | */ 1294 | static long (*bpf_redirect_map)(void *map, __u32 key, __u64 flags) = (void *) 51; 1295 | 1296 | /* 1297 | * bpf_sk_redirect_map 1298 | * 1299 | * Redirect the packet to the socket referenced by *map* (of type 1300 | * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1301 | * egress interfaces can be used for redirection. The 1302 | * **BPF_F_INGRESS** value in *flags* is used to make the 1303 | * distinction (ingress path is selected if the flag is present, 1304 | * egress path otherwise). This is the only flag supported for now. 1305 | * 1306 | * Returns 1307 | * **SK_PASS** on success, or **SK_DROP** on error. 1308 | */ 1309 | static long (*bpf_sk_redirect_map)(struct __sk_buff *skb, void *map, __u32 key, __u64 flags) = (void *) 52; 1310 | 1311 | /* 1312 | * bpf_sock_map_update 1313 | * 1314 | * Add an entry to, or update a *map* referencing sockets. The 1315 | * *skops* is used as a new value for the entry associated to 1316 | * *key*. *flags* is one of: 1317 | * 1318 | * **BPF_NOEXIST** 1319 | * The entry for *key* must not exist in the map. 1320 | * **BPF_EXIST** 1321 | * The entry for *key* must already exist in the map. 1322 | * **BPF_ANY** 1323 | * No condition on the existence of the entry for *key*. 1324 | * 1325 | * If the *map* has eBPF programs (parser and verdict), those will 1326 | * be inherited by the socket being added. If the socket is 1327 | * already attached to eBPF programs, this results in an error. 1328 | * 1329 | * Returns 1330 | * 0 on success, or a negative error in case of failure. 1331 | */ 1332 | static long (*bpf_sock_map_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 53; 1333 | 1334 | /* 1335 | * bpf_xdp_adjust_meta 1336 | * 1337 | * Adjust the address pointed by *xdp_md*\ **->data_meta** by 1338 | * *delta* (which can be positive or negative). Note that this 1339 | * operation modifies the address stored in *xdp_md*\ **->data**, 1340 | * so the latter must be loaded only after the helper has been 1341 | * called. 1342 | * 1343 | * The use of *xdp_md*\ **->data_meta** is optional and programs 1344 | * are not required to use it. The rationale is that when the 1345 | * packet is processed with XDP (e.g. as DoS filter), it is 1346 | * possible to push further meta data along with it before passing 1347 | * to the stack, and to give the guarantee that an ingress eBPF 1348 | * program attached as a TC classifier on the same device can pick 1349 | * this up for further post-processing. Since TC works with socket 1350 | * buffers, it remains possible to set from XDP the **mark** or 1351 | * **priority** pointers, or other pointers for the socket buffer. 1352 | * Having this scratch space generic and programmable allows for 1353 | * more flexibility as the user is free to store whatever meta 1354 | * data they need. 1355 | * 1356 | * A call to this helper is susceptible to change the underlying 1357 | * packet buffer. Therefore, at load time, all checks on pointers 1358 | * previously done by the verifier are invalidated and must be 1359 | * performed again, if the helper is used in combination with 1360 | * direct packet access. 1361 | * 1362 | * Returns 1363 | * 0 on success, or a negative error in case of failure. 1364 | */ 1365 | static long (*bpf_xdp_adjust_meta)(struct xdp_md *xdp_md, int delta) = (void *) 54; 1366 | 1367 | /* 1368 | * bpf_perf_event_read_value 1369 | * 1370 | * Read the value of a perf event counter, and store it into *buf* 1371 | * of size *buf_size*. This helper relies on a *map* of type 1372 | * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event 1373 | * counter is selected when *map* is updated with perf event file 1374 | * descriptors. The *map* is an array whose size is the number of 1375 | * available CPUs, and each cell contains a value relative to one 1376 | * CPU. The value to retrieve is indicated by *flags*, that 1377 | * contains the index of the CPU to look up, masked with 1378 | * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1379 | * **BPF_F_CURRENT_CPU** to indicate that the value for the 1380 | * current CPU should be retrieved. 1381 | * 1382 | * This helper behaves in a way close to 1383 | * **bpf_perf_event_read**\ () helper, save that instead of 1384 | * just returning the value observed, it fills the *buf* 1385 | * structure. This allows for additional data to be retrieved: in 1386 | * particular, the enabled and running times (in *buf*\ 1387 | * **->enabled** and *buf*\ **->running**, respectively) are 1388 | * copied. In general, **bpf_perf_event_read_value**\ () is 1389 | * recommended over **bpf_perf_event_read**\ (), which has some 1390 | * ABI issues and provides fewer functionalities. 1391 | * 1392 | * These values are interesting, because hardware PMU (Performance 1393 | * Monitoring Unit) counters are limited resources. When there are 1394 | * more PMU based perf events opened than available counters, 1395 | * kernel will multiplex these events so each event gets certain 1396 | * percentage (but not all) of the PMU time. In case that 1397 | * multiplexing happens, the number of samples or counter value 1398 | * will not reflect the case compared to when no multiplexing 1399 | * occurs. This makes comparison between different runs difficult. 1400 | * Typically, the counter value should be normalized before 1401 | * comparing to other experiments. The usual normalization is done 1402 | * as follows. 1403 | * 1404 | * :: 1405 | * 1406 | * normalized_counter = counter * t_enabled / t_running 1407 | * 1408 | * Where t_enabled is the time enabled for event and t_running is 1409 | * the time running for event since last normalization. The 1410 | * enabled and running times are accumulated since the perf event 1411 | * open. To achieve scaling factor between two invocations of an 1412 | * eBPF program, users can use CPU id as the key (which is 1413 | * typical for perf array usage model) to remember the previous 1414 | * value and do the calculation inside the eBPF program. 1415 | * 1416 | * Returns 1417 | * 0 on success, or a negative error in case of failure. 1418 | */ 1419 | static long (*bpf_perf_event_read_value)(void *map, __u64 flags, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 55; 1420 | 1421 | /* 1422 | * bpf_perf_prog_read_value 1423 | * 1424 | * For en eBPF program attached to a perf event, retrieve the 1425 | * value of the event counter associated to *ctx* and store it in 1426 | * the structure pointed by *buf* and of size *buf_size*. Enabled 1427 | * and running times are also stored in the structure (see 1428 | * description of helper **bpf_perf_event_read_value**\ () for 1429 | * more details). 1430 | * 1431 | * Returns 1432 | * 0 on success, or a negative error in case of failure. 1433 | */ 1434 | static long (*bpf_perf_prog_read_value)(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 56; 1435 | 1436 | /* 1437 | * bpf_getsockopt 1438 | * 1439 | * Emulate a call to **getsockopt()** on the socket associated to 1440 | * *bpf_socket*, which must be a full socket. The *level* at 1441 | * which the option resides and the name *optname* of the option 1442 | * must be specified, see **getsockopt(2)** for more information. 1443 | * The retrieved value is stored in the structure pointed by 1444 | * *opval* and of length *optlen*. 1445 | * 1446 | * *bpf_socket* should be one of the following: 1447 | * 1448 | * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 1449 | * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 1450 | * and **BPF_CGROUP_INET6_CONNECT**. 1451 | * 1452 | * This helper actually implements a subset of **getsockopt()**. 1453 | * It supports the following *level*\ s: 1454 | * 1455 | * * **IPPROTO_TCP**, which supports *optname* 1456 | * **TCP_CONGESTION**. 1457 | * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1458 | * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1459 | * 1460 | * Returns 1461 | * 0 on success, or a negative error in case of failure. 1462 | */ 1463 | static long (*bpf_getsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 57; 1464 | 1465 | /* 1466 | * bpf_override_return 1467 | * 1468 | * Used for error injection, this helper uses kprobes to override 1469 | * the return value of the probed function, and to set it to *rc*. 1470 | * The first argument is the context *regs* on which the kprobe 1471 | * works. 1472 | * 1473 | * This helper works by setting the PC (program counter) 1474 | * to an override function which is run in place of the original 1475 | * probed function. This means the probed function is not run at 1476 | * all. The replacement function just returns with the required 1477 | * value. 1478 | * 1479 | * This helper has security implications, and thus is subject to 1480 | * restrictions. It is only available if the kernel was compiled 1481 | * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration 1482 | * option, and in this case it only works on functions tagged with 1483 | * **ALLOW_ERROR_INJECTION** in the kernel code. 1484 | * 1485 | * Also, the helper is only available for the architectures having 1486 | * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, 1487 | * x86 architecture is the only one to support this feature. 1488 | * 1489 | * Returns 1490 | * 0 1491 | */ 1492 | static long (*bpf_override_return)(struct pt_regs *regs, __u64 rc) = (void *) 58; 1493 | 1494 | /* 1495 | * bpf_sock_ops_cb_flags_set 1496 | * 1497 | * Attempt to set the value of the **bpf_sock_ops_cb_flags** field 1498 | * for the full TCP socket associated to *bpf_sock_ops* to 1499 | * *argval*. 1500 | * 1501 | * The primary use of this field is to determine if there should 1502 | * be calls to eBPF programs of type 1503 | * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP 1504 | * code. A program of the same type can change its value, per 1505 | * connection and as necessary, when the connection is 1506 | * established. This field is directly accessible for reading, but 1507 | * this helper must be used for updates in order to return an 1508 | * error if an eBPF program tries to set a callback that is not 1509 | * supported in the current kernel. 1510 | * 1511 | * *argval* is a flag array which can combine these flags: 1512 | * 1513 | * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) 1514 | * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) 1515 | * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) 1516 | * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) 1517 | * 1518 | * Therefore, this function can be used to clear a callback flag by 1519 | * setting the appropriate bit to zero. e.g. to disable the RTO 1520 | * callback: 1521 | * 1522 | * **bpf_sock_ops_cb_flags_set(bpf_sock,** 1523 | * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** 1524 | * 1525 | * Here are some examples of where one could call such eBPF 1526 | * program: 1527 | * 1528 | * * When RTO fires. 1529 | * * When a packet is retransmitted. 1530 | * * When the connection terminates. 1531 | * * When a packet is sent. 1532 | * * When a packet is received. 1533 | * 1534 | * Returns 1535 | * Code **-EINVAL** if the socket is not a full TCP socket; 1536 | * otherwise, a positive number containing the bits that could not 1537 | * be set is returned (which comes down to 0 if all bits were set 1538 | * as required). 1539 | */ 1540 | static long (*bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops *bpf_sock, int argval) = (void *) 59; 1541 | 1542 | /* 1543 | * bpf_msg_redirect_map 1544 | * 1545 | * This helper is used in programs implementing policies at the 1546 | * socket level. If the message *msg* is allowed to pass (i.e. if 1547 | * the verdict eBPF program returns **SK_PASS**), redirect it to 1548 | * the socket referenced by *map* (of type 1549 | * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1550 | * egress interfaces can be used for redirection. The 1551 | * **BPF_F_INGRESS** value in *flags* is used to make the 1552 | * distinction (ingress path is selected if the flag is present, 1553 | * egress path otherwise). This is the only flag supported for now. 1554 | * 1555 | * Returns 1556 | * **SK_PASS** on success, or **SK_DROP** on error. 1557 | */ 1558 | static long (*bpf_msg_redirect_map)(struct sk_msg_md *msg, void *map, __u32 key, __u64 flags) = (void *) 60; 1559 | 1560 | /* 1561 | * bpf_msg_apply_bytes 1562 | * 1563 | * For socket policies, apply the verdict of the eBPF program to 1564 | * the next *bytes* (number of bytes) of message *msg*. 1565 | * 1566 | * For example, this helper can be used in the following cases: 1567 | * 1568 | * * A single **sendmsg**\ () or **sendfile**\ () system call 1569 | * contains multiple logical messages that the eBPF program is 1570 | * supposed to read and for which it should apply a verdict. 1571 | * * An eBPF program only cares to read the first *bytes* of a 1572 | * *msg*. If the message has a large payload, then setting up 1573 | * and calling the eBPF program repeatedly for all bytes, even 1574 | * though the verdict is already known, would create unnecessary 1575 | * overhead. 1576 | * 1577 | * When called from within an eBPF program, the helper sets a 1578 | * counter internal to the BPF infrastructure, that is used to 1579 | * apply the last verdict to the next *bytes*. If *bytes* is 1580 | * smaller than the current data being processed from a 1581 | * **sendmsg**\ () or **sendfile**\ () system call, the first 1582 | * *bytes* will be sent and the eBPF program will be re-run with 1583 | * the pointer for start of data pointing to byte number *bytes* 1584 | * **+ 1**. If *bytes* is larger than the current data being 1585 | * processed, then the eBPF verdict will be applied to multiple 1586 | * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are 1587 | * consumed. 1588 | * 1589 | * Note that if a socket closes with the internal counter holding 1590 | * a non-zero value, this is not a problem because data is not 1591 | * being buffered for *bytes* and is sent as it is received. 1592 | * 1593 | * Returns 1594 | * 0 1595 | */ 1596 | static long (*bpf_msg_apply_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 61; 1597 | 1598 | /* 1599 | * bpf_msg_cork_bytes 1600 | * 1601 | * For socket policies, prevent the execution of the verdict eBPF 1602 | * program for message *msg* until *bytes* (byte number) have been 1603 | * accumulated. 1604 | * 1605 | * This can be used when one needs a specific number of bytes 1606 | * before a verdict can be assigned, even if the data spans 1607 | * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme 1608 | * case would be a user calling **sendmsg**\ () repeatedly with 1609 | * 1-byte long message segments. Obviously, this is bad for 1610 | * performance, but it is still valid. If the eBPF program needs 1611 | * *bytes* bytes to validate a header, this helper can be used to 1612 | * prevent the eBPF program to be called again until *bytes* have 1613 | * been accumulated. 1614 | * 1615 | * Returns 1616 | * 0 1617 | */ 1618 | static long (*bpf_msg_cork_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 62; 1619 | 1620 | /* 1621 | * bpf_msg_pull_data 1622 | * 1623 | * For socket policies, pull in non-linear data from user space 1624 | * for *msg* and set pointers *msg*\ **->data** and *msg*\ 1625 | * **->data_end** to *start* and *end* bytes offsets into *msg*, 1626 | * respectively. 1627 | * 1628 | * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 1629 | * *msg* it can only parse data that the (**data**, **data_end**) 1630 | * pointers have already consumed. For **sendmsg**\ () hooks this 1631 | * is likely the first scatterlist element. But for calls relying 1632 | * on the **sendpage** handler (e.g. **sendfile**\ ()) this will 1633 | * be the range (**0**, **0**) because the data is shared with 1634 | * user space and by default the objective is to avoid allowing 1635 | * user space to modify data while (or after) eBPF verdict is 1636 | * being decided. This helper can be used to pull in data and to 1637 | * set the start and end pointer to given values. Data will be 1638 | * copied if necessary (i.e. if data was not linear and if start 1639 | * and end pointers do not point to the same chunk). 1640 | * 1641 | * A call to this helper is susceptible to change the underlying 1642 | * packet buffer. Therefore, at load time, all checks on pointers 1643 | * previously done by the verifier are invalidated and must be 1644 | * performed again, if the helper is used in combination with 1645 | * direct packet access. 1646 | * 1647 | * All values for *flags* are reserved for future usage, and must 1648 | * be left at zero. 1649 | * 1650 | * Returns 1651 | * 0 on success, or a negative error in case of failure. 1652 | */ 1653 | static long (*bpf_msg_pull_data)(struct sk_msg_md *msg, __u32 start, __u32 end, __u64 flags) = (void *) 63; 1654 | 1655 | /* 1656 | * bpf_bind 1657 | * 1658 | * Bind the socket associated to *ctx* to the address pointed by 1659 | * *addr*, of length *addr_len*. This allows for making outgoing 1660 | * connection from the desired IP address, which can be useful for 1661 | * example when all processes inside a cgroup should use one 1662 | * single IP address on a host that has multiple IP configured. 1663 | * 1664 | * This helper works for IPv4 and IPv6, TCP and UDP sockets. The 1665 | * domain (*addr*\ **->sa_family**) must be **AF_INET** (or 1666 | * **AF_INET6**). It's advised to pass zero port (**sin_port** 1667 | * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like 1668 | * behavior and lets the kernel efficiently pick up an unused 1669 | * port as long as 4-tuple is unique. Passing non-zero port might 1670 | * lead to degraded performance. 1671 | * 1672 | * Returns 1673 | * 0 on success, or a negative error in case of failure. 1674 | */ 1675 | static long (*bpf_bind)(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) = (void *) 64; 1676 | 1677 | /* 1678 | * bpf_xdp_adjust_tail 1679 | * 1680 | * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is 1681 | * possible to both shrink and grow the packet tail. 1682 | * Shrink done via *delta* being a negative integer. 1683 | * 1684 | * A call to this helper is susceptible to change the underlying 1685 | * packet buffer. Therefore, at load time, all checks on pointers 1686 | * previously done by the verifier are invalidated and must be 1687 | * performed again, if the helper is used in combination with 1688 | * direct packet access. 1689 | * 1690 | * Returns 1691 | * 0 on success, or a negative error in case of failure. 1692 | */ 1693 | static long (*bpf_xdp_adjust_tail)(struct xdp_md *xdp_md, int delta) = (void *) 65; 1694 | 1695 | /* 1696 | * bpf_skb_get_xfrm_state 1697 | * 1698 | * Retrieve the XFRM state (IP transform framework, see also 1699 | * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. 1700 | * 1701 | * The retrieved value is stored in the **struct bpf_xfrm_state** 1702 | * pointed by *xfrm_state* and of length *size*. 1703 | * 1704 | * All values for *flags* are reserved for future usage, and must 1705 | * be left at zero. 1706 | * 1707 | * This helper is available only if the kernel was compiled with 1708 | * **CONFIG_XFRM** configuration option. 1709 | * 1710 | * Returns 1711 | * 0 on success, or a negative error in case of failure. 1712 | */ 1713 | static long (*bpf_skb_get_xfrm_state)(struct __sk_buff *skb, __u32 index, struct bpf_xfrm_state *xfrm_state, __u32 size, __u64 flags) = (void *) 66; 1714 | 1715 | /* 1716 | * bpf_get_stack 1717 | * 1718 | * Return a user or a kernel stack in bpf program provided buffer. 1719 | * To achieve this, the helper needs *ctx*, which is a pointer 1720 | * to the context on which the tracing program is executed. 1721 | * To store the stacktrace, the bpf program provides *buf* with 1722 | * a nonnegative *size*. 1723 | * 1724 | * The last argument, *flags*, holds the number of stack frames to 1725 | * skip (from 0 to 255), masked with 1726 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 1727 | * the following flags: 1728 | * 1729 | * **BPF_F_USER_STACK** 1730 | * Collect a user space stack instead of a kernel stack. 1731 | * **BPF_F_USER_BUILD_ID** 1732 | * Collect buildid+offset instead of ips for user stack, 1733 | * only valid if **BPF_F_USER_STACK** is also specified. 1734 | * 1735 | * **bpf_get_stack**\ () can collect up to 1736 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 1737 | * to sufficient large buffer size. Note that 1738 | * this limit can be controlled with the **sysctl** program, and 1739 | * that it should be manually increased in order to profile long 1740 | * user stacks (such as stacks for Java programs). To do so, use: 1741 | * 1742 | * :: 1743 | * 1744 | * # sysctl kernel.perf_event_max_stack= 1745 | * 1746 | * Returns 1747 | * A non-negative value equal to or less than *size* on success, 1748 | * or a negative error in case of failure. 1749 | */ 1750 | static long (*bpf_get_stack)(void *ctx, void *buf, __u32 size, __u64 flags) = (void *) 67; 1751 | 1752 | /* 1753 | * bpf_skb_load_bytes_relative 1754 | * 1755 | * This helper is similar to **bpf_skb_load_bytes**\ () in that 1756 | * it provides an easy way to load *len* bytes from *offset* 1757 | * from the packet associated to *skb*, into the buffer pointed 1758 | * by *to*. The difference to **bpf_skb_load_bytes**\ () is that 1759 | * a fifth argument *start_header* exists in order to select a 1760 | * base offset to start from. *start_header* can be one of: 1761 | * 1762 | * **BPF_HDR_START_MAC** 1763 | * Base offset to load data from is *skb*'s mac header. 1764 | * **BPF_HDR_START_NET** 1765 | * Base offset to load data from is *skb*'s network header. 1766 | * 1767 | * In general, "direct packet access" is the preferred method to 1768 | * access packet data, however, this helper is in particular useful 1769 | * in socket filters where *skb*\ **->data** does not always point 1770 | * to the start of the mac header and where "direct packet access" 1771 | * is not available. 1772 | * 1773 | * Returns 1774 | * 0 on success, or a negative error in case of failure. 1775 | */ 1776 | static long (*bpf_skb_load_bytes_relative)(const void *skb, __u32 offset, void *to, __u32 len, __u32 start_header) = (void *) 68; 1777 | 1778 | /* 1779 | * bpf_fib_lookup 1780 | * 1781 | * Do FIB lookup in kernel tables using parameters in *params*. 1782 | * If lookup is successful and result shows packet is to be 1783 | * forwarded, the neighbor tables are searched for the nexthop. 1784 | * If successful (ie., FIB lookup shows forwarding and nexthop 1785 | * is resolved), the nexthop address is returned in ipv4_dst 1786 | * or ipv6_dst based on family, smac is set to mac address of 1787 | * egress device, dmac is set to nexthop mac address, rt_metric 1788 | * is set to metric from route (IPv4/IPv6 only), and ifindex 1789 | * is set to the device index of the nexthop from the FIB lookup. 1790 | * 1791 | * *plen* argument is the size of the passed in struct. 1792 | * *flags* argument can be a combination of one or more of the 1793 | * following values: 1794 | * 1795 | * **BPF_FIB_LOOKUP_DIRECT** 1796 | * Do a direct table lookup vs full lookup using FIB 1797 | * rules. 1798 | * **BPF_FIB_LOOKUP_OUTPUT** 1799 | * Perform lookup from an egress perspective (default is 1800 | * ingress). 1801 | * 1802 | * *ctx* is either **struct xdp_md** for XDP programs or 1803 | * **struct sk_buff** tc cls_act programs. 1804 | * 1805 | * Returns 1806 | * * < 0 if any input argument is invalid 1807 | * * 0 on success (packet is forwarded, nexthop neighbor exists) 1808 | * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the 1809 | * packet is not forwarded or needs assist from full stack 1810 | * 1811 | * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU 1812 | * was exceeded and output params->mtu_result contains the MTU. 1813 | */ 1814 | static long (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, int plen, __u32 flags) = (void *) 69; 1815 | 1816 | /* 1817 | * bpf_sock_hash_update 1818 | * 1819 | * Add an entry to, or update a sockhash *map* referencing sockets. 1820 | * The *skops* is used as a new value for the entry associated to 1821 | * *key*. *flags* is one of: 1822 | * 1823 | * **BPF_NOEXIST** 1824 | * The entry for *key* must not exist in the map. 1825 | * **BPF_EXIST** 1826 | * The entry for *key* must already exist in the map. 1827 | * **BPF_ANY** 1828 | * No condition on the existence of the entry for *key*. 1829 | * 1830 | * If the *map* has eBPF programs (parser and verdict), those will 1831 | * be inherited by the socket being added. If the socket is 1832 | * already attached to eBPF programs, this results in an error. 1833 | * 1834 | * Returns 1835 | * 0 on success, or a negative error in case of failure. 1836 | */ 1837 | static long (*bpf_sock_hash_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 70; 1838 | 1839 | /* 1840 | * bpf_msg_redirect_hash 1841 | * 1842 | * This helper is used in programs implementing policies at the 1843 | * socket level. If the message *msg* is allowed to pass (i.e. if 1844 | * the verdict eBPF program returns **SK_PASS**), redirect it to 1845 | * the socket referenced by *map* (of type 1846 | * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 1847 | * egress interfaces can be used for redirection. The 1848 | * **BPF_F_INGRESS** value in *flags* is used to make the 1849 | * distinction (ingress path is selected if the flag is present, 1850 | * egress path otherwise). This is the only flag supported for now. 1851 | * 1852 | * Returns 1853 | * **SK_PASS** on success, or **SK_DROP** on error. 1854 | */ 1855 | static long (*bpf_msg_redirect_hash)(struct sk_msg_md *msg, void *map, void *key, __u64 flags) = (void *) 71; 1856 | 1857 | /* 1858 | * bpf_sk_redirect_hash 1859 | * 1860 | * This helper is used in programs implementing policies at the 1861 | * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. 1862 | * if the verdict eBPF program returns **SK_PASS**), redirect it 1863 | * to the socket referenced by *map* (of type 1864 | * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 1865 | * egress interfaces can be used for redirection. The 1866 | * **BPF_F_INGRESS** value in *flags* is used to make the 1867 | * distinction (ingress path is selected if the flag is present, 1868 | * egress otherwise). This is the only flag supported for now. 1869 | * 1870 | * Returns 1871 | * **SK_PASS** on success, or **SK_DROP** on error. 1872 | */ 1873 | static long (*bpf_sk_redirect_hash)(struct __sk_buff *skb, void *map, void *key, __u64 flags) = (void *) 72; 1874 | 1875 | /* 1876 | * bpf_lwt_push_encap 1877 | * 1878 | * Encapsulate the packet associated to *skb* within a Layer 3 1879 | * protocol header. This header is provided in the buffer at 1880 | * address *hdr*, with *len* its size in bytes. *type* indicates 1881 | * the protocol of the header and can be one of: 1882 | * 1883 | * **BPF_LWT_ENCAP_SEG6** 1884 | * IPv6 encapsulation with Segment Routing Header 1885 | * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, 1886 | * the IPv6 header is computed by the kernel. 1887 | * **BPF_LWT_ENCAP_SEG6_INLINE** 1888 | * Only works if *skb* contains an IPv6 packet. Insert a 1889 | * Segment Routing Header (**struct ipv6_sr_hdr**) inside 1890 | * the IPv6 header. 1891 | * **BPF_LWT_ENCAP_IP** 1892 | * IP encapsulation (GRE/GUE/IPIP/etc). The outer header 1893 | * must be IPv4 or IPv6, followed by zero or more 1894 | * additional headers, up to **LWT_BPF_MAX_HEADROOM** 1895 | * total bytes in all prepended headers. Please note that 1896 | * if **skb_is_gso**\ (*skb*) is true, no more than two 1897 | * headers can be prepended, and the inner header, if 1898 | * present, should be either GRE or UDP/GUE. 1899 | * 1900 | * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs 1901 | * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can 1902 | * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and 1903 | * **BPF_PROG_TYPE_LWT_XMIT**. 1904 | * 1905 | * A call to this helper is susceptible to change the underlying 1906 | * packet buffer. Therefore, at load time, all checks on pointers 1907 | * previously done by the verifier are invalidated and must be 1908 | * performed again, if the helper is used in combination with 1909 | * direct packet access. 1910 | * 1911 | * Returns 1912 | * 0 on success, or a negative error in case of failure. 1913 | */ 1914 | static long (*bpf_lwt_push_encap)(struct __sk_buff *skb, __u32 type, void *hdr, __u32 len) = (void *) 73; 1915 | 1916 | /* 1917 | * bpf_lwt_seg6_store_bytes 1918 | * 1919 | * Store *len* bytes from address *from* into the packet 1920 | * associated to *skb*, at *offset*. Only the flags, tag and TLVs 1921 | * inside the outermost IPv6 Segment Routing Header can be 1922 | * modified through this helper. 1923 | * 1924 | * A call to this helper is susceptible to change the underlying 1925 | * packet buffer. Therefore, at load time, all checks on pointers 1926 | * previously done by the verifier are invalidated and must be 1927 | * performed again, if the helper is used in combination with 1928 | * direct packet access. 1929 | * 1930 | * Returns 1931 | * 0 on success, or a negative error in case of failure. 1932 | */ 1933 | static long (*bpf_lwt_seg6_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len) = (void *) 74; 1934 | 1935 | /* 1936 | * bpf_lwt_seg6_adjust_srh 1937 | * 1938 | * Adjust the size allocated to TLVs in the outermost IPv6 1939 | * Segment Routing Header contained in the packet associated to 1940 | * *skb*, at position *offset* by *delta* bytes. Only offsets 1941 | * after the segments are accepted. *delta* can be as well 1942 | * positive (growing) as negative (shrinking). 1943 | * 1944 | * A call to this helper is susceptible to change the underlying 1945 | * packet buffer. Therefore, at load time, all checks on pointers 1946 | * previously done by the verifier are invalidated and must be 1947 | * performed again, if the helper is used in combination with 1948 | * direct packet access. 1949 | * 1950 | * Returns 1951 | * 0 on success, or a negative error in case of failure. 1952 | */ 1953 | static long (*bpf_lwt_seg6_adjust_srh)(struct __sk_buff *skb, __u32 offset, __s32 delta) = (void *) 75; 1954 | 1955 | /* 1956 | * bpf_lwt_seg6_action 1957 | * 1958 | * Apply an IPv6 Segment Routing action of type *action* to the 1959 | * packet associated to *skb*. Each action takes a parameter 1960 | * contained at address *param*, and of length *param_len* bytes. 1961 | * *action* can be one of: 1962 | * 1963 | * **SEG6_LOCAL_ACTION_END_X** 1964 | * End.X action: Endpoint with Layer-3 cross-connect. 1965 | * Type of *param*: **struct in6_addr**. 1966 | * **SEG6_LOCAL_ACTION_END_T** 1967 | * End.T action: Endpoint with specific IPv6 table lookup. 1968 | * Type of *param*: **int**. 1969 | * **SEG6_LOCAL_ACTION_END_B6** 1970 | * End.B6 action: Endpoint bound to an SRv6 policy. 1971 | * Type of *param*: **struct ipv6_sr_hdr**. 1972 | * **SEG6_LOCAL_ACTION_END_B6_ENCAP** 1973 | * End.B6.Encap action: Endpoint bound to an SRv6 1974 | * encapsulation policy. 1975 | * Type of *param*: **struct ipv6_sr_hdr**. 1976 | * 1977 | * A call to this helper is susceptible to change the underlying 1978 | * packet buffer. Therefore, at load time, all checks on pointers 1979 | * previously done by the verifier are invalidated and must be 1980 | * performed again, if the helper is used in combination with 1981 | * direct packet access. 1982 | * 1983 | * Returns 1984 | * 0 on success, or a negative error in case of failure. 1985 | */ 1986 | static long (*bpf_lwt_seg6_action)(struct __sk_buff *skb, __u32 action, void *param, __u32 param_len) = (void *) 76; 1987 | 1988 | /* 1989 | * bpf_rc_repeat 1990 | * 1991 | * This helper is used in programs implementing IR decoding, to 1992 | * report a successfully decoded repeat key message. This delays 1993 | * the generation of a key up event for previously generated 1994 | * key down event. 1995 | * 1996 | * Some IR protocols like NEC have a special IR message for 1997 | * repeating last button, for when a button is held down. 1998 | * 1999 | * The *ctx* should point to the lirc sample as passed into 2000 | * the program. 2001 | * 2002 | * This helper is only available is the kernel was compiled with 2003 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2004 | * "**y**". 2005 | * 2006 | * Returns 2007 | * 0 2008 | */ 2009 | static long (*bpf_rc_repeat)(void *ctx) = (void *) 77; 2010 | 2011 | /* 2012 | * bpf_rc_keydown 2013 | * 2014 | * This helper is used in programs implementing IR decoding, to 2015 | * report a successfully decoded key press with *scancode*, 2016 | * *toggle* value in the given *protocol*. The scancode will be 2017 | * translated to a keycode using the rc keymap, and reported as 2018 | * an input key down event. After a period a key up event is 2019 | * generated. This period can be extended by calling either 2020 | * **bpf_rc_keydown**\ () again with the same values, or calling 2021 | * **bpf_rc_repeat**\ (). 2022 | * 2023 | * Some protocols include a toggle bit, in case the button was 2024 | * released and pressed again between consecutive scancodes. 2025 | * 2026 | * The *ctx* should point to the lirc sample as passed into 2027 | * the program. 2028 | * 2029 | * The *protocol* is the decoded protocol number (see 2030 | * **enum rc_proto** for some predefined values). 2031 | * 2032 | * This helper is only available is the kernel was compiled with 2033 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2034 | * "**y**". 2035 | * 2036 | * Returns 2037 | * 0 2038 | */ 2039 | static long (*bpf_rc_keydown)(void *ctx, __u32 protocol, __u64 scancode, __u32 toggle) = (void *) 78; 2040 | 2041 | /* 2042 | * bpf_skb_cgroup_id 2043 | * 2044 | * Return the cgroup v2 id of the socket associated with the *skb*. 2045 | * This is roughly similar to the **bpf_get_cgroup_classid**\ () 2046 | * helper for cgroup v1 by providing a tag resp. identifier that 2047 | * can be matched on or used for map lookups e.g. to implement 2048 | * policy. The cgroup v2 id of a given path in the hierarchy is 2049 | * exposed in user space through the f_handle API in order to get 2050 | * to the same 64-bit id. 2051 | * 2052 | * This helper can be used on TC egress path, but not on ingress, 2053 | * and is available only if the kernel was compiled with the 2054 | * **CONFIG_SOCK_CGROUP_DATA** configuration option. 2055 | * 2056 | * Returns 2057 | * The id is returned or 0 in case the id could not be retrieved. 2058 | */ 2059 | static __u64 (*bpf_skb_cgroup_id)(struct __sk_buff *skb) = (void *) 79; 2060 | 2061 | /* 2062 | * bpf_get_current_cgroup_id 2063 | * 2064 | * 2065 | * Returns 2066 | * A 64-bit integer containing the current cgroup id based 2067 | * on the cgroup within which the current task is running. 2068 | */ 2069 | static __u64 (*bpf_get_current_cgroup_id)(void) = (void *) 80; 2070 | 2071 | /* 2072 | * bpf_get_local_storage 2073 | * 2074 | * Get the pointer to the local storage area. 2075 | * The type and the size of the local storage is defined 2076 | * by the *map* argument. 2077 | * The *flags* meaning is specific for each map type, 2078 | * and has to be 0 for cgroup local storage. 2079 | * 2080 | * Depending on the BPF program type, a local storage area 2081 | * can be shared between multiple instances of the BPF program, 2082 | * running simultaneously. 2083 | * 2084 | * A user should care about the synchronization by himself. 2085 | * For example, by using the **BPF_ATOMIC** instructions to alter 2086 | * the shared data. 2087 | * 2088 | * Returns 2089 | * A pointer to the local storage area. 2090 | */ 2091 | static void *(*bpf_get_local_storage)(void *map, __u64 flags) = (void *) 81; 2092 | 2093 | /* 2094 | * bpf_sk_select_reuseport 2095 | * 2096 | * Select a **SO_REUSEPORT** socket from a 2097 | * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. 2098 | * It checks the selected socket is matching the incoming 2099 | * request in the socket buffer. 2100 | * 2101 | * Returns 2102 | * 0 on success, or a negative error in case of failure. 2103 | */ 2104 | static long (*bpf_sk_select_reuseport)(struct sk_reuseport_md *reuse, void *map, void *key, __u64 flags) = (void *) 82; 2105 | 2106 | /* 2107 | * bpf_skb_ancestor_cgroup_id 2108 | * 2109 | * Return id of cgroup v2 that is ancestor of cgroup associated 2110 | * with the *skb* at the *ancestor_level*. The root cgroup is at 2111 | * *ancestor_level* zero and each step down the hierarchy 2112 | * increments the level. If *ancestor_level* == level of cgroup 2113 | * associated with *skb*, then return value will be same as that 2114 | * of **bpf_skb_cgroup_id**\ (). 2115 | * 2116 | * The helper is useful to implement policies based on cgroups 2117 | * that are upper in hierarchy than immediate cgroup associated 2118 | * with *skb*. 2119 | * 2120 | * The format of returned id and helper limitations are same as in 2121 | * **bpf_skb_cgroup_id**\ (). 2122 | * 2123 | * Returns 2124 | * The id is returned or 0 in case the id could not be retrieved. 2125 | */ 2126 | static __u64 (*bpf_skb_ancestor_cgroup_id)(struct __sk_buff *skb, int ancestor_level) = (void *) 83; 2127 | 2128 | /* 2129 | * bpf_sk_lookup_tcp 2130 | * 2131 | * Look for TCP socket matching *tuple*, optionally in a child 2132 | * network namespace *netns*. The return value must be checked, 2133 | * and if non-**NULL**, released via **bpf_sk_release**\ (). 2134 | * 2135 | * The *ctx* should point to the context of the program, such as 2136 | * the skb or socket (depending on the hook in use). This is used 2137 | * to determine the base network namespace for the lookup. 2138 | * 2139 | * *tuple_size* must be one of: 2140 | * 2141 | * **sizeof**\ (*tuple*\ **->ipv4**) 2142 | * Look for an IPv4 socket. 2143 | * **sizeof**\ (*tuple*\ **->ipv6**) 2144 | * Look for an IPv6 socket. 2145 | * 2146 | * If the *netns* is a negative signed 32-bit integer, then the 2147 | * socket lookup table in the netns associated with the *ctx* 2148 | * will be used. For the TC hooks, this is the netns of the device 2149 | * in the skb. For socket hooks, this is the netns of the socket. 2150 | * If *netns* is any other signed 32-bit value greater than or 2151 | * equal to zero then it specifies the ID of the netns relative to 2152 | * the netns associated with the *ctx*. *netns* values beyond the 2153 | * range of 32-bit integers are reserved for future use. 2154 | * 2155 | * All values for *flags* are reserved for future usage, and must 2156 | * be left at zero. 2157 | * 2158 | * This helper is available only if the kernel was compiled with 2159 | * **CONFIG_NET** configuration option. 2160 | * 2161 | * Returns 2162 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2163 | * For sockets with reuseport option, the **struct bpf_sock** 2164 | * result is from *reuse*\ **->socks**\ [] using the hash of the 2165 | * tuple. 2166 | */ 2167 | static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 84; 2168 | 2169 | /* 2170 | * bpf_sk_lookup_udp 2171 | * 2172 | * Look for UDP socket matching *tuple*, optionally in a child 2173 | * network namespace *netns*. The return value must be checked, 2174 | * and if non-**NULL**, released via **bpf_sk_release**\ (). 2175 | * 2176 | * The *ctx* should point to the context of the program, such as 2177 | * the skb or socket (depending on the hook in use). This is used 2178 | * to determine the base network namespace for the lookup. 2179 | * 2180 | * *tuple_size* must be one of: 2181 | * 2182 | * **sizeof**\ (*tuple*\ **->ipv4**) 2183 | * Look for an IPv4 socket. 2184 | * **sizeof**\ (*tuple*\ **->ipv6**) 2185 | * Look for an IPv6 socket. 2186 | * 2187 | * If the *netns* is a negative signed 32-bit integer, then the 2188 | * socket lookup table in the netns associated with the *ctx* 2189 | * will be used. For the TC hooks, this is the netns of the device 2190 | * in the skb. For socket hooks, this is the netns of the socket. 2191 | * If *netns* is any other signed 32-bit value greater than or 2192 | * equal to zero then it specifies the ID of the netns relative to 2193 | * the netns associated with the *ctx*. *netns* values beyond the 2194 | * range of 32-bit integers are reserved for future use. 2195 | * 2196 | * All values for *flags* are reserved for future usage, and must 2197 | * be left at zero. 2198 | * 2199 | * This helper is available only if the kernel was compiled with 2200 | * **CONFIG_NET** configuration option. 2201 | * 2202 | * Returns 2203 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2204 | * For sockets with reuseport option, the **struct bpf_sock** 2205 | * result is from *reuse*\ **->socks**\ [] using the hash of the 2206 | * tuple. 2207 | */ 2208 | static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 85; 2209 | 2210 | /* 2211 | * bpf_sk_release 2212 | * 2213 | * Release the reference held by *sock*. *sock* must be a 2214 | * non-**NULL** pointer that was returned from 2215 | * **bpf_sk_lookup_xxx**\ (). 2216 | * 2217 | * Returns 2218 | * 0 on success, or a negative error in case of failure. 2219 | */ 2220 | static long (*bpf_sk_release)(void *sock) = (void *) 86; 2221 | 2222 | /* 2223 | * bpf_map_push_elem 2224 | * 2225 | * Push an element *value* in *map*. *flags* is one of: 2226 | * 2227 | * **BPF_EXIST** 2228 | * If the queue/stack is full, the oldest element is 2229 | * removed to make room for this. 2230 | * 2231 | * Returns 2232 | * 0 on success, or a negative error in case of failure. 2233 | */ 2234 | static long (*bpf_map_push_elem)(void *map, const void *value, __u64 flags) = (void *) 87; 2235 | 2236 | /* 2237 | * bpf_map_pop_elem 2238 | * 2239 | * Pop an element from *map*. 2240 | * 2241 | * Returns 2242 | * 0 on success, or a negative error in case of failure. 2243 | */ 2244 | static long (*bpf_map_pop_elem)(void *map, void *value) = (void *) 88; 2245 | 2246 | /* 2247 | * bpf_map_peek_elem 2248 | * 2249 | * Get an element from *map* without removing it. 2250 | * 2251 | * Returns 2252 | * 0 on success, or a negative error in case of failure. 2253 | */ 2254 | static long (*bpf_map_peek_elem)(void *map, void *value) = (void *) 89; 2255 | 2256 | /* 2257 | * bpf_msg_push_data 2258 | * 2259 | * For socket policies, insert *len* bytes into *msg* at offset 2260 | * *start*. 2261 | * 2262 | * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2263 | * *msg* it may want to insert metadata or options into the *msg*. 2264 | * This can later be read and used by any of the lower layer BPF 2265 | * hooks. 2266 | * 2267 | * This helper may fail if under memory pressure (a malloc 2268 | * fails) in these cases BPF programs will get an appropriate 2269 | * error and BPF programs will need to handle them. 2270 | * 2271 | * Returns 2272 | * 0 on success, or a negative error in case of failure. 2273 | */ 2274 | static long (*bpf_msg_push_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 90; 2275 | 2276 | /* 2277 | * bpf_msg_pop_data 2278 | * 2279 | * Will remove *len* bytes from a *msg* starting at byte *start*. 2280 | * This may result in **ENOMEM** errors under certain situations if 2281 | * an allocation and copy are required due to a full ring buffer. 2282 | * However, the helper will try to avoid doing the allocation 2283 | * if possible. Other errors can occur if input parameters are 2284 | * invalid either due to *start* byte not being valid part of *msg* 2285 | * payload and/or *pop* value being to large. 2286 | * 2287 | * Returns 2288 | * 0 on success, or a negative error in case of failure. 2289 | */ 2290 | static long (*bpf_msg_pop_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 91; 2291 | 2292 | /* 2293 | * bpf_rc_pointer_rel 2294 | * 2295 | * This helper is used in programs implementing IR decoding, to 2296 | * report a successfully decoded pointer movement. 2297 | * 2298 | * The *ctx* should point to the lirc sample as passed into 2299 | * the program. 2300 | * 2301 | * This helper is only available is the kernel was compiled with 2302 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2303 | * "**y**". 2304 | * 2305 | * Returns 2306 | * 0 2307 | */ 2308 | static long (*bpf_rc_pointer_rel)(void *ctx, __s32 rel_x, __s32 rel_y) = (void *) 92; 2309 | 2310 | /* 2311 | * bpf_spin_lock 2312 | * 2313 | * Acquire a spinlock represented by the pointer *lock*, which is 2314 | * stored as part of a value of a map. Taking the lock allows to 2315 | * safely update the rest of the fields in that value. The 2316 | * spinlock can (and must) later be released with a call to 2317 | * **bpf_spin_unlock**\ (\ *lock*\ ). 2318 | * 2319 | * Spinlocks in BPF programs come with a number of restrictions 2320 | * and constraints: 2321 | * 2322 | * * **bpf_spin_lock** objects are only allowed inside maps of 2323 | * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this 2324 | * list could be extended in the future). 2325 | * * BTF description of the map is mandatory. 2326 | * * The BPF program can take ONE lock at a time, since taking two 2327 | * or more could cause dead locks. 2328 | * * Only one **struct bpf_spin_lock** is allowed per map element. 2329 | * * When the lock is taken, calls (either BPF to BPF or helpers) 2330 | * are not allowed. 2331 | * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not 2332 | * allowed inside a spinlock-ed region. 2333 | * * The BPF program MUST call **bpf_spin_unlock**\ () to release 2334 | * the lock, on all execution paths, before it returns. 2335 | * * The BPF program can access **struct bpf_spin_lock** only via 2336 | * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () 2337 | * helpers. Loading or storing data into the **struct 2338 | * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. 2339 | * * To use the **bpf_spin_lock**\ () helper, the BTF description 2340 | * of the map value must be a struct and have **struct 2341 | * bpf_spin_lock** *anyname*\ **;** field at the top level. 2342 | * Nested lock inside another struct is not allowed. 2343 | * * The **struct bpf_spin_lock** *lock* field in a map value must 2344 | * be aligned on a multiple of 4 bytes in that value. 2345 | * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy 2346 | * the **bpf_spin_lock** field to user space. 2347 | * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from 2348 | * a BPF program, do not update the **bpf_spin_lock** field. 2349 | * * **bpf_spin_lock** cannot be on the stack or inside a 2350 | * networking packet (it can only be inside of a map values). 2351 | * * **bpf_spin_lock** is available to root only. 2352 | * * Tracing programs and socket filter programs cannot use 2353 | * **bpf_spin_lock**\ () due to insufficient preemption checks 2354 | * (but this may change in the future). 2355 | * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. 2356 | * 2357 | * Returns 2358 | * 0 2359 | */ 2360 | static long (*bpf_spin_lock)(struct bpf_spin_lock *lock) = (void *) 93; 2361 | 2362 | /* 2363 | * bpf_spin_unlock 2364 | * 2365 | * Release the *lock* previously locked by a call to 2366 | * **bpf_spin_lock**\ (\ *lock*\ ). 2367 | * 2368 | * Returns 2369 | * 0 2370 | */ 2371 | static long (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = (void *) 94; 2372 | 2373 | /* 2374 | * bpf_sk_fullsock 2375 | * 2376 | * This helper gets a **struct bpf_sock** pointer such 2377 | * that all the fields in this **bpf_sock** can be accessed. 2378 | * 2379 | * Returns 2380 | * A **struct bpf_sock** pointer on success, or **NULL** in 2381 | * case of failure. 2382 | */ 2383 | static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = (void *) 95; 2384 | 2385 | /* 2386 | * bpf_tcp_sock 2387 | * 2388 | * This helper gets a **struct bpf_tcp_sock** pointer from a 2389 | * **struct bpf_sock** pointer. 2390 | * 2391 | * Returns 2392 | * A **struct bpf_tcp_sock** pointer on success, or **NULL** in 2393 | * case of failure. 2394 | */ 2395 | static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = (void *) 96; 2396 | 2397 | /* 2398 | * bpf_skb_ecn_set_ce 2399 | * 2400 | * Set ECN (Explicit Congestion Notification) field of IP header 2401 | * to **CE** (Congestion Encountered) if current value is **ECT** 2402 | * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 2403 | * and IPv4. 2404 | * 2405 | * Returns 2406 | * 1 if the **CE** flag is set (either by the current helper call 2407 | * or because it was already present), 0 if it is not set. 2408 | */ 2409 | static long (*bpf_skb_ecn_set_ce)(struct __sk_buff *skb) = (void *) 97; 2410 | 2411 | /* 2412 | * bpf_get_listener_sock 2413 | * 2414 | * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. 2415 | * **bpf_sk_release**\ () is unnecessary and not allowed. 2416 | * 2417 | * Returns 2418 | * A **struct bpf_sock** pointer on success, or **NULL** in 2419 | * case of failure. 2420 | */ 2421 | static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = (void *) 98; 2422 | 2423 | /* 2424 | * bpf_skc_lookup_tcp 2425 | * 2426 | * Look for TCP socket matching *tuple*, optionally in a child 2427 | * network namespace *netns*. The return value must be checked, 2428 | * and if non-**NULL**, released via **bpf_sk_release**\ (). 2429 | * 2430 | * This function is identical to **bpf_sk_lookup_tcp**\ (), except 2431 | * that it also returns timewait or request sockets. Use 2432 | * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the 2433 | * full structure. 2434 | * 2435 | * This helper is available only if the kernel was compiled with 2436 | * **CONFIG_NET** configuration option. 2437 | * 2438 | * Returns 2439 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2440 | * For sockets with reuseport option, the **struct bpf_sock** 2441 | * result is from *reuse*\ **->socks**\ [] using the hash of the 2442 | * tuple. 2443 | */ 2444 | static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 99; 2445 | 2446 | /* 2447 | * bpf_tcp_check_syncookie 2448 | * 2449 | * Check whether *iph* and *th* contain a valid SYN cookie ACK for 2450 | * the listening socket in *sk*. 2451 | * 2452 | * *iph* points to the start of the IPv4 or IPv6 header, while 2453 | * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2454 | * **sizeof**\ (**struct ip6hdr**). 2455 | * 2456 | * *th* points to the start of the TCP header, while *th_len* 2457 | * contains **sizeof**\ (**struct tcphdr**). 2458 | * 2459 | * Returns 2460 | * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative 2461 | * error otherwise. 2462 | */ 2463 | static long (*bpf_tcp_check_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 100; 2464 | 2465 | /* 2466 | * bpf_sysctl_get_name 2467 | * 2468 | * Get name of sysctl in /proc/sys/ and copy it into provided by 2469 | * program buffer *buf* of size *buf_len*. 2470 | * 2471 | * The buffer is always NUL terminated, unless it's zero-sized. 2472 | * 2473 | * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is 2474 | * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name 2475 | * only (e.g. "tcp_mem"). 2476 | * 2477 | * Returns 2478 | * Number of character copied (not including the trailing NUL). 2479 | * 2480 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2481 | * truncated name in this case). 2482 | */ 2483 | static long (*bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len, __u64 flags) = (void *) 101; 2484 | 2485 | /* 2486 | * bpf_sysctl_get_current_value 2487 | * 2488 | * Get current value of sysctl as it is presented in /proc/sys 2489 | * (incl. newline, etc), and copy it as a string into provided 2490 | * by program buffer *buf* of size *buf_len*. 2491 | * 2492 | * The whole value is copied, no matter what file position user 2493 | * space issued e.g. sys_read at. 2494 | * 2495 | * The buffer is always NUL terminated, unless it's zero-sized. 2496 | * 2497 | * Returns 2498 | * Number of character copied (not including the trailing NUL). 2499 | * 2500 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2501 | * truncated name in this case). 2502 | * 2503 | * **-EINVAL** if current value was unavailable, e.g. because 2504 | * sysctl is uninitialized and read returns -EIO for it. 2505 | */ 2506 | static long (*bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 102; 2507 | 2508 | /* 2509 | * bpf_sysctl_get_new_value 2510 | * 2511 | * Get new value being written by user space to sysctl (before 2512 | * the actual write happens) and copy it as a string into 2513 | * provided by program buffer *buf* of size *buf_len*. 2514 | * 2515 | * User space may write new value at file position > 0. 2516 | * 2517 | * The buffer is always NUL terminated, unless it's zero-sized. 2518 | * 2519 | * Returns 2520 | * Number of character copied (not including the trailing NUL). 2521 | * 2522 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2523 | * truncated name in this case). 2524 | * 2525 | * **-EINVAL** if sysctl is being read. 2526 | */ 2527 | static long (*bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 103; 2528 | 2529 | /* 2530 | * bpf_sysctl_set_new_value 2531 | * 2532 | * Override new value being written by user space to sysctl with 2533 | * value provided by program in buffer *buf* of size *buf_len*. 2534 | * 2535 | * *buf* should contain a string in same form as provided by user 2536 | * space on sysctl write. 2537 | * 2538 | * User space may write new value at file position > 0. To override 2539 | * the whole sysctl value file position should be set to zero. 2540 | * 2541 | * Returns 2542 | * 0 on success. 2543 | * 2544 | * **-E2BIG** if the *buf_len* is too big. 2545 | * 2546 | * **-EINVAL** if sysctl is being read. 2547 | */ 2548 | static long (*bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, unsigned long buf_len) = (void *) 104; 2549 | 2550 | /* 2551 | * bpf_strtol 2552 | * 2553 | * Convert the initial part of the string from buffer *buf* of 2554 | * size *buf_len* to a long integer according to the given base 2555 | * and save the result in *res*. 2556 | * 2557 | * The string may begin with an arbitrary amount of white space 2558 | * (as determined by **isspace**\ (3)) followed by a single 2559 | * optional '**-**' sign. 2560 | * 2561 | * Five least significant bits of *flags* encode base, other bits 2562 | * are currently unused. 2563 | * 2564 | * Base must be either 8, 10, 16 or 0 to detect it automatically 2565 | * similar to user space **strtol**\ (3). 2566 | * 2567 | * Returns 2568 | * Number of characters consumed on success. Must be positive but 2569 | * no more than *buf_len*. 2570 | * 2571 | * **-EINVAL** if no valid digits were found or unsupported base 2572 | * was provided. 2573 | * 2574 | * **-ERANGE** if resulting value was out of range. 2575 | */ 2576 | static long (*bpf_strtol)(const char *buf, unsigned long buf_len, __u64 flags, long *res) = (void *) 105; 2577 | 2578 | /* 2579 | * bpf_strtoul 2580 | * 2581 | * Convert the initial part of the string from buffer *buf* of 2582 | * size *buf_len* to an unsigned long integer according to the 2583 | * given base and save the result in *res*. 2584 | * 2585 | * The string may begin with an arbitrary amount of white space 2586 | * (as determined by **isspace**\ (3)). 2587 | * 2588 | * Five least significant bits of *flags* encode base, other bits 2589 | * are currently unused. 2590 | * 2591 | * Base must be either 8, 10, 16 or 0 to detect it automatically 2592 | * similar to user space **strtoul**\ (3). 2593 | * 2594 | * Returns 2595 | * Number of characters consumed on success. Must be positive but 2596 | * no more than *buf_len*. 2597 | * 2598 | * **-EINVAL** if no valid digits were found or unsupported base 2599 | * was provided. 2600 | * 2601 | * **-ERANGE** if resulting value was out of range. 2602 | */ 2603 | static long (*bpf_strtoul)(const char *buf, unsigned long buf_len, __u64 flags, unsigned long *res) = (void *) 106; 2604 | 2605 | /* 2606 | * bpf_sk_storage_get 2607 | * 2608 | * Get a bpf-local-storage from a *sk*. 2609 | * 2610 | * Logically, it could be thought of getting the value from 2611 | * a *map* with *sk* as the **key**. From this 2612 | * perspective, the usage is not much different from 2613 | * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this 2614 | * helper enforces the key must be a full socket and the map must 2615 | * be a **BPF_MAP_TYPE_SK_STORAGE** also. 2616 | * 2617 | * Underneath, the value is stored locally at *sk* instead of 2618 | * the *map*. The *map* is used as the bpf-local-storage 2619 | * "type". The bpf-local-storage "type" (i.e. the *map*) is 2620 | * searched against all bpf-local-storages residing at *sk*. 2621 | * 2622 | * *sk* is a kernel **struct sock** pointer for LSM program. 2623 | * *sk* is a **struct bpf_sock** pointer for other program types. 2624 | * 2625 | * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be 2626 | * used such that a new bpf-local-storage will be 2627 | * created if one does not exist. *value* can be used 2628 | * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify 2629 | * the initial value of a bpf-local-storage. If *value* is 2630 | * **NULL**, the new bpf-local-storage will be zero initialized. 2631 | * 2632 | * Returns 2633 | * A bpf-local-storage pointer is returned on success. 2634 | * 2635 | * **NULL** if not found or there was an error in adding 2636 | * a new bpf-local-storage. 2637 | */ 2638 | static void *(*bpf_sk_storage_get)(void *map, void *sk, void *value, __u64 flags) = (void *) 107; 2639 | 2640 | /* 2641 | * bpf_sk_storage_delete 2642 | * 2643 | * Delete a bpf-local-storage from a *sk*. 2644 | * 2645 | * Returns 2646 | * 0 on success. 2647 | * 2648 | * **-ENOENT** if the bpf-local-storage cannot be found. 2649 | * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). 2650 | */ 2651 | static long (*bpf_sk_storage_delete)(void *map, void *sk) = (void *) 108; 2652 | 2653 | /* 2654 | * bpf_send_signal 2655 | * 2656 | * Send signal *sig* to the process of the current task. 2657 | * The signal may be delivered to any of this process's threads. 2658 | * 2659 | * Returns 2660 | * 0 on success or successfully queued. 2661 | * 2662 | * **-EBUSY** if work queue under nmi is full. 2663 | * 2664 | * **-EINVAL** if *sig* is invalid. 2665 | * 2666 | * **-EPERM** if no permission to send the *sig*. 2667 | * 2668 | * **-EAGAIN** if bpf program can try again. 2669 | */ 2670 | static long (*bpf_send_signal)(__u32 sig) = (void *) 109; 2671 | 2672 | /* 2673 | * bpf_tcp_gen_syncookie 2674 | * 2675 | * Try to issue a SYN cookie for the packet with corresponding 2676 | * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. 2677 | * 2678 | * *iph* points to the start of the IPv4 or IPv6 header, while 2679 | * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2680 | * **sizeof**\ (**struct ip6hdr**). 2681 | * 2682 | * *th* points to the start of the TCP header, while *th_len* 2683 | * contains the length of the TCP header. 2684 | * 2685 | * Returns 2686 | * On success, lower 32 bits hold the generated SYN cookie in 2687 | * followed by 16 bits which hold the MSS value for that cookie, 2688 | * and the top 16 bits are unused. 2689 | * 2690 | * On failure, the returned value is one of the following: 2691 | * 2692 | * **-EINVAL** SYN cookie cannot be issued due to error 2693 | * 2694 | * **-ENOENT** SYN cookie should not be issued (no SYN flood) 2695 | * 2696 | * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies 2697 | * 2698 | * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 2699 | */ 2700 | static __s64 (*bpf_tcp_gen_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 110; 2701 | 2702 | /* 2703 | * bpf_skb_output 2704 | * 2705 | * Write raw *data* blob into a special BPF perf event held by 2706 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2707 | * event must have the following attributes: **PERF_SAMPLE_RAW** 2708 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2709 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2710 | * 2711 | * The *flags* are used to indicate the index in *map* for which 2712 | * the value must be put, masked with **BPF_F_INDEX_MASK**. 2713 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2714 | * to indicate that the index of the current CPU core should be 2715 | * used. 2716 | * 2717 | * The value to write, of *size*, is passed through eBPF stack and 2718 | * pointed by *data*. 2719 | * 2720 | * *ctx* is a pointer to in-kernel struct sk_buff. 2721 | * 2722 | * This helper is similar to **bpf_perf_event_output**\ () but 2723 | * restricted to raw_tracepoint bpf programs. 2724 | * 2725 | * Returns 2726 | * 0 on success, or a negative error in case of failure. 2727 | */ 2728 | static long (*bpf_skb_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 111; 2729 | 2730 | /* 2731 | * bpf_probe_read_user 2732 | * 2733 | * Safely attempt to read *size* bytes from user space address 2734 | * *unsafe_ptr* and store the data in *dst*. 2735 | * 2736 | * Returns 2737 | * 0 on success, or a negative error in case of failure. 2738 | */ 2739 | static long (*bpf_probe_read_user)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 112; 2740 | 2741 | /* 2742 | * bpf_probe_read_kernel 2743 | * 2744 | * Safely attempt to read *size* bytes from kernel space address 2745 | * *unsafe_ptr* and store the data in *dst*. 2746 | * 2747 | * Returns 2748 | * 0 on success, or a negative error in case of failure. 2749 | */ 2750 | static long (*bpf_probe_read_kernel)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 113; 2751 | 2752 | /* 2753 | * bpf_probe_read_user_str 2754 | * 2755 | * Copy a NUL terminated string from an unsafe user address 2756 | * *unsafe_ptr* to *dst*. The *size* should include the 2757 | * terminating NUL byte. In case the string length is smaller than 2758 | * *size*, the target is not padded with further NUL bytes. If the 2759 | * string length is larger than *size*, just *size*-1 bytes are 2760 | * copied and the last byte is set to NUL. 2761 | * 2762 | * On success, returns the number of bytes that were written, 2763 | * including the terminal NUL. This makes this helper useful in 2764 | * tracing programs for reading strings, and more importantly to 2765 | * get its length at runtime. See the following snippet: 2766 | * 2767 | * :: 2768 | * 2769 | * SEC("kprobe/sys_open") 2770 | * void bpf_sys_open(struct pt_regs *ctx) 2771 | * { 2772 | * char buf[PATHLEN]; // PATHLEN is defined to 256 2773 | * int res = bpf_probe_read_user_str(buf, sizeof(buf), 2774 | * ctx->di); 2775 | * 2776 | * // Consume buf, for example push it to 2777 | * // userspace via bpf_perf_event_output(); we 2778 | * // can use res (the string length) as event 2779 | * // size, after checking its boundaries. 2780 | * } 2781 | * 2782 | * In comparison, using **bpf_probe_read_user**\ () helper here 2783 | * instead to read the string would require to estimate the length 2784 | * at compile time, and would often result in copying more memory 2785 | * than necessary. 2786 | * 2787 | * Another useful use case is when parsing individual process 2788 | * arguments or individual environment variables navigating 2789 | * *current*\ **->mm->arg_start** and *current*\ 2790 | * **->mm->env_start**: using this helper and the return value, 2791 | * one can quickly iterate at the right offset of the memory area. 2792 | * 2793 | * Returns 2794 | * On success, the strictly positive length of the output string, 2795 | * including the trailing NUL character. On error, a negative 2796 | * value. 2797 | */ 2798 | static long (*bpf_probe_read_user_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 114; 2799 | 2800 | /* 2801 | * bpf_probe_read_kernel_str 2802 | * 2803 | * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* 2804 | * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. 2805 | * 2806 | * Returns 2807 | * On success, the strictly positive length of the string, including 2808 | * the trailing NUL character. On error, a negative value. 2809 | */ 2810 | static long (*bpf_probe_read_kernel_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 115; 2811 | 2812 | /* 2813 | * bpf_tcp_send_ack 2814 | * 2815 | * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. 2816 | * *rcv_nxt* is the ack_seq to be sent out. 2817 | * 2818 | * Returns 2819 | * 0 on success, or a negative error in case of failure. 2820 | */ 2821 | static long (*bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) = (void *) 116; 2822 | 2823 | /* 2824 | * bpf_send_signal_thread 2825 | * 2826 | * Send signal *sig* to the thread corresponding to the current task. 2827 | * 2828 | * Returns 2829 | * 0 on success or successfully queued. 2830 | * 2831 | * **-EBUSY** if work queue under nmi is full. 2832 | * 2833 | * **-EINVAL** if *sig* is invalid. 2834 | * 2835 | * **-EPERM** if no permission to send the *sig*. 2836 | * 2837 | * **-EAGAIN** if bpf program can try again. 2838 | */ 2839 | static long (*bpf_send_signal_thread)(__u32 sig) = (void *) 117; 2840 | 2841 | /* 2842 | * bpf_jiffies64 2843 | * 2844 | * Obtain the 64bit jiffies 2845 | * 2846 | * Returns 2847 | * The 64 bit jiffies 2848 | */ 2849 | static __u64 (*bpf_jiffies64)(void) = (void *) 118; 2850 | 2851 | /* 2852 | * bpf_read_branch_records 2853 | * 2854 | * For an eBPF program attached to a perf event, retrieve the 2855 | * branch records (**struct perf_branch_entry**) associated to *ctx* 2856 | * and store it in the buffer pointed by *buf* up to size 2857 | * *size* bytes. 2858 | * 2859 | * Returns 2860 | * On success, number of bytes written to *buf*. On error, a 2861 | * negative value. 2862 | * 2863 | * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to 2864 | * instead return the number of bytes required to store all the 2865 | * branch entries. If this flag is set, *buf* may be NULL. 2866 | * 2867 | * **-EINVAL** if arguments invalid or **size** not a multiple 2868 | * of **sizeof**\ (**struct perf_branch_entry**\ ). 2869 | * 2870 | * **-ENOENT** if architecture does not support branch records. 2871 | */ 2872 | static long (*bpf_read_branch_records)(struct bpf_perf_event_data *ctx, void *buf, __u32 size, __u64 flags) = (void *) 119; 2873 | 2874 | /* 2875 | * bpf_get_ns_current_pid_tgid 2876 | * 2877 | * Returns 0 on success, values for *pid* and *tgid* as seen from the current 2878 | * *namespace* will be returned in *nsdata*. 2879 | * 2880 | * Returns 2881 | * 0 on success, or one of the following in case of failure: 2882 | * 2883 | * **-EINVAL** if dev and inum supplied don't match dev_t and inode number 2884 | * with nsfs of current task, or if dev conversion to dev_t lost high bits. 2885 | * 2886 | * **-ENOENT** if pidns does not exists for the current task. 2887 | */ 2888 | static long (*bpf_get_ns_current_pid_tgid)(__u64 dev, __u64 ino, struct bpf_pidns_info *nsdata, __u32 size) = (void *) 120; 2889 | 2890 | /* 2891 | * bpf_xdp_output 2892 | * 2893 | * Write raw *data* blob into a special BPF perf event held by 2894 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2895 | * event must have the following attributes: **PERF_SAMPLE_RAW** 2896 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2897 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2898 | * 2899 | * The *flags* are used to indicate the index in *map* for which 2900 | * the value must be put, masked with **BPF_F_INDEX_MASK**. 2901 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2902 | * to indicate that the index of the current CPU core should be 2903 | * used. 2904 | * 2905 | * The value to write, of *size*, is passed through eBPF stack and 2906 | * pointed by *data*. 2907 | * 2908 | * *ctx* is a pointer to in-kernel struct xdp_buff. 2909 | * 2910 | * This helper is similar to **bpf_perf_eventoutput**\ () but 2911 | * restricted to raw_tracepoint bpf programs. 2912 | * 2913 | * Returns 2914 | * 0 on success, or a negative error in case of failure. 2915 | */ 2916 | static long (*bpf_xdp_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 121; 2917 | 2918 | /* 2919 | * bpf_get_netns_cookie 2920 | * 2921 | * Retrieve the cookie (generated by the kernel) of the network 2922 | * namespace the input *ctx* is associated with. The network 2923 | * namespace cookie remains stable for its lifetime and provides 2924 | * a global identifier that can be assumed unique. If *ctx* is 2925 | * NULL, then the helper returns the cookie for the initial 2926 | * network namespace. The cookie itself is very similar to that 2927 | * of **bpf_get_socket_cookie**\ () helper, but for network 2928 | * namespaces instead of sockets. 2929 | * 2930 | * Returns 2931 | * A 8-byte long opaque number. 2932 | */ 2933 | static __u64 (*bpf_get_netns_cookie)(void *ctx) = (void *) 122; 2934 | 2935 | /* 2936 | * bpf_get_current_ancestor_cgroup_id 2937 | * 2938 | * Return id of cgroup v2 that is ancestor of the cgroup associated 2939 | * with the current task at the *ancestor_level*. The root cgroup 2940 | * is at *ancestor_level* zero and each step down the hierarchy 2941 | * increments the level. If *ancestor_level* == level of cgroup 2942 | * associated with the current task, then return value will be the 2943 | * same as that of **bpf_get_current_cgroup_id**\ (). 2944 | * 2945 | * The helper is useful to implement policies based on cgroups 2946 | * that are upper in hierarchy than immediate cgroup associated 2947 | * with the current task. 2948 | * 2949 | * The format of returned id and helper limitations are same as in 2950 | * **bpf_get_current_cgroup_id**\ (). 2951 | * 2952 | * Returns 2953 | * The id is returned or 0 in case the id could not be retrieved. 2954 | */ 2955 | static __u64 (*bpf_get_current_ancestor_cgroup_id)(int ancestor_level) = (void *) 123; 2956 | 2957 | /* 2958 | * bpf_sk_assign 2959 | * 2960 | * Helper is overloaded depending on BPF program type. This 2961 | * description applies to **BPF_PROG_TYPE_SCHED_CLS** and 2962 | * **BPF_PROG_TYPE_SCHED_ACT** programs. 2963 | * 2964 | * Assign the *sk* to the *skb*. When combined with appropriate 2965 | * routing configuration to receive the packet towards the socket, 2966 | * will cause *skb* to be delivered to the specified socket. 2967 | * Subsequent redirection of *skb* via **bpf_redirect**\ (), 2968 | * **bpf_clone_redirect**\ () or other methods outside of BPF may 2969 | * interfere with successful delivery to the socket. 2970 | * 2971 | * This operation is only valid from TC ingress path. 2972 | * 2973 | * The *flags* argument must be zero. 2974 | * 2975 | * Returns 2976 | * 0 on success, or a negative error in case of failure: 2977 | * 2978 | * **-EINVAL** if specified *flags* are not supported. 2979 | * 2980 | * **-ENOENT** if the socket is unavailable for assignment. 2981 | * 2982 | * **-ENETUNREACH** if the socket is unreachable (wrong netns). 2983 | * 2984 | * **-EOPNOTSUPP** if the operation is not supported, for example 2985 | * a call from outside of TC ingress. 2986 | * 2987 | * **-ESOCKTNOSUPPORT** if the socket type is not supported 2988 | * (reuseport). 2989 | */ 2990 | static long (*bpf_sk_assign)(void *ctx, void *sk, __u64 flags) = (void *) 124; 2991 | 2992 | /* 2993 | * bpf_ktime_get_boot_ns 2994 | * 2995 | * Return the time elapsed since system boot, in nanoseconds. 2996 | * Does include the time the system was suspended. 2997 | * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) 2998 | * 2999 | * Returns 3000 | * Current *ktime*. 3001 | */ 3002 | static __u64 (*bpf_ktime_get_boot_ns)(void) = (void *) 125; 3003 | 3004 | /* 3005 | * bpf_seq_printf 3006 | * 3007 | * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print 3008 | * out the format string. 3009 | * The *m* represents the seq_file. The *fmt* and *fmt_size* are for 3010 | * the format string itself. The *data* and *data_len* are format string 3011 | * arguments. The *data* are a **u64** array and corresponding format string 3012 | * values are stored in the array. For strings and pointers where pointees 3013 | * are accessed, only the pointer values are stored in the *data* array. 3014 | * The *data_len* is the size of *data* in bytes. 3015 | * 3016 | * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. 3017 | * Reading kernel memory may fail due to either invalid address or 3018 | * valid address but requiring a major memory fault. If reading kernel memory 3019 | * fails, the string for **%s** will be an empty string, and the ip 3020 | * address for **%p{i,I}{4,6}** will be 0. Not returning error to 3021 | * bpf program is consistent with what **bpf_trace_printk**\ () does for now. 3022 | * 3023 | * Returns 3024 | * 0 on success, or a negative error in case of failure: 3025 | * 3026 | * **-EBUSY** if per-CPU memory copy buffer is busy, can try again 3027 | * by returning 1 from bpf program. 3028 | * 3029 | * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. 3030 | * 3031 | * **-E2BIG** if *fmt* contains too many format specifiers. 3032 | * 3033 | * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 3034 | */ 3035 | static long (*bpf_seq_printf)(struct seq_file *m, const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 126; 3036 | 3037 | /* 3038 | * bpf_seq_write 3039 | * 3040 | * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. 3041 | * The *m* represents the seq_file. The *data* and *len* represent the 3042 | * data to write in bytes. 3043 | * 3044 | * Returns 3045 | * 0 on success, or a negative error in case of failure: 3046 | * 3047 | * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 3048 | */ 3049 | static long (*bpf_seq_write)(struct seq_file *m, const void *data, __u32 len) = (void *) 127; 3050 | 3051 | /* 3052 | * bpf_sk_cgroup_id 3053 | * 3054 | * Return the cgroup v2 id of the socket *sk*. 3055 | * 3056 | * *sk* must be a non-**NULL** pointer to a socket, e.g. one 3057 | * returned from **bpf_sk_lookup_xxx**\ (), 3058 | * **bpf_sk_fullsock**\ (), etc. The format of returned id is 3059 | * same as in **bpf_skb_cgroup_id**\ (). 3060 | * 3061 | * This helper is available only if the kernel was compiled with 3062 | * the **CONFIG_SOCK_CGROUP_DATA** configuration option. 3063 | * 3064 | * Returns 3065 | * The id is returned or 0 in case the id could not be retrieved. 3066 | */ 3067 | static __u64 (*bpf_sk_cgroup_id)(void *sk) = (void *) 128; 3068 | 3069 | /* 3070 | * bpf_sk_ancestor_cgroup_id 3071 | * 3072 | * Return id of cgroup v2 that is ancestor of cgroup associated 3073 | * with the *sk* at the *ancestor_level*. The root cgroup is at 3074 | * *ancestor_level* zero and each step down the hierarchy 3075 | * increments the level. If *ancestor_level* == level of cgroup 3076 | * associated with *sk*, then return value will be same as that 3077 | * of **bpf_sk_cgroup_id**\ (). 3078 | * 3079 | * The helper is useful to implement policies based on cgroups 3080 | * that are upper in hierarchy than immediate cgroup associated 3081 | * with *sk*. 3082 | * 3083 | * The format of returned id and helper limitations are same as in 3084 | * **bpf_sk_cgroup_id**\ (). 3085 | * 3086 | * Returns 3087 | * The id is returned or 0 in case the id could not be retrieved. 3088 | */ 3089 | static __u64 (*bpf_sk_ancestor_cgroup_id)(void *sk, int ancestor_level) = (void *) 129; 3090 | 3091 | /* 3092 | * bpf_ringbuf_output 3093 | * 3094 | * Copy *size* bytes from *data* into a ring buffer *ringbuf*. 3095 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3096 | * of new data availability is sent. 3097 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3098 | * of new data availability is sent unconditionally. 3099 | * If **0** is specified in *flags*, an adaptive notification 3100 | * of new data availability is sent. 3101 | * 3102 | * An adaptive notification is a notification sent whenever the user-space 3103 | * process has caught up and consumed all available payloads. In case the user-space 3104 | * process is still processing a previous payload, then no notification is needed 3105 | * as it will process the newly added payload automatically. 3106 | * 3107 | * Returns 3108 | * 0 on success, or a negative error in case of failure. 3109 | */ 3110 | static long (*bpf_ringbuf_output)(void *ringbuf, void *data, __u64 size, __u64 flags) = (void *) 130; 3111 | 3112 | /* 3113 | * bpf_ringbuf_reserve 3114 | * 3115 | * Reserve *size* bytes of payload in a ring buffer *ringbuf*. 3116 | * *flags* must be 0. 3117 | * 3118 | * Returns 3119 | * Valid pointer with *size* bytes of memory available; NULL, 3120 | * otherwise. 3121 | */ 3122 | static void *(*bpf_ringbuf_reserve)(void *ringbuf, __u64 size, __u64 flags) = (void *) 131; 3123 | 3124 | /* 3125 | * bpf_ringbuf_submit 3126 | * 3127 | * Submit reserved ring buffer sample, pointed to by *data*. 3128 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3129 | * of new data availability is sent. 3130 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3131 | * of new data availability is sent unconditionally. 3132 | * If **0** is specified in *flags*, an adaptive notification 3133 | * of new data availability is sent. 3134 | * 3135 | * See 'bpf_ringbuf_output()' for the definition of adaptive notification. 3136 | * 3137 | * Returns 3138 | * Nothing. Always succeeds. 3139 | */ 3140 | static void (*bpf_ringbuf_submit)(void *data, __u64 flags) = (void *) 132; 3141 | 3142 | /* 3143 | * bpf_ringbuf_discard 3144 | * 3145 | * Discard reserved ring buffer sample, pointed to by *data*. 3146 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3147 | * of new data availability is sent. 3148 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3149 | * of new data availability is sent unconditionally. 3150 | * If **0** is specified in *flags*, an adaptive notification 3151 | * of new data availability is sent. 3152 | * 3153 | * See 'bpf_ringbuf_output()' for the definition of adaptive notification. 3154 | * 3155 | * Returns 3156 | * Nothing. Always succeeds. 3157 | */ 3158 | static void (*bpf_ringbuf_discard)(void *data, __u64 flags) = (void *) 133; 3159 | 3160 | /* 3161 | * bpf_ringbuf_query 3162 | * 3163 | * Query various characteristics of provided ring buffer. What 3164 | * exactly is queries is determined by *flags*: 3165 | * 3166 | * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. 3167 | * * **BPF_RB_RING_SIZE**: The size of ring buffer. 3168 | * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). 3169 | * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). 3170 | * 3171 | * Data returned is just a momentary snapshot of actual values 3172 | * and could be inaccurate, so this facility should be used to 3173 | * power heuristics and for reporting, not to make 100% correct 3174 | * calculation. 3175 | * 3176 | * Returns 3177 | * Requested value, or 0, if *flags* are not recognized. 3178 | */ 3179 | static __u64 (*bpf_ringbuf_query)(void *ringbuf, __u64 flags) = (void *) 134; 3180 | 3181 | /* 3182 | * bpf_csum_level 3183 | * 3184 | * Change the skbs checksum level by one layer up or down, or 3185 | * reset it entirely to none in order to have the stack perform 3186 | * checksum validation. The level is applicable to the following 3187 | * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of 3188 | * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | 3189 | * through **bpf_skb_adjust_room**\ () helper with passing in 3190 | * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call 3191 | * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since 3192 | * the UDP header is removed. Similarly, an encap of the latter 3193 | * into the former could be accompanied by a helper call to 3194 | * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the 3195 | * skb is still intended to be processed in higher layers of the 3196 | * stack instead of just egressing at tc. 3197 | * 3198 | * There are three supported level settings at this time: 3199 | * 3200 | * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs 3201 | * with CHECKSUM_UNNECESSARY. 3202 | * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs 3203 | * with CHECKSUM_UNNECESSARY. 3204 | * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and 3205 | * sets CHECKSUM_NONE to force checksum validation by the stack. 3206 | * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current 3207 | * skb->csum_level. 3208 | * 3209 | * Returns 3210 | * 0 on success, or a negative error in case of failure. In the 3211 | * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level 3212 | * is returned or the error code -EACCES in case the skb is not 3213 | * subject to CHECKSUM_UNNECESSARY. 3214 | */ 3215 | static long (*bpf_csum_level)(struct __sk_buff *skb, __u64 level) = (void *) 135; 3216 | 3217 | /* 3218 | * bpf_skc_to_tcp6_sock 3219 | * 3220 | * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. 3221 | * 3222 | * Returns 3223 | * *sk* if casting is valid, or **NULL** otherwise. 3224 | */ 3225 | static struct tcp6_sock *(*bpf_skc_to_tcp6_sock)(void *sk) = (void *) 136; 3226 | 3227 | /* 3228 | * bpf_skc_to_tcp_sock 3229 | * 3230 | * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. 3231 | * 3232 | * Returns 3233 | * *sk* if casting is valid, or **NULL** otherwise. 3234 | */ 3235 | static struct tcp_sock *(*bpf_skc_to_tcp_sock)(void *sk) = (void *) 137; 3236 | 3237 | /* 3238 | * bpf_skc_to_tcp_timewait_sock 3239 | * 3240 | * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. 3241 | * 3242 | * Returns 3243 | * *sk* if casting is valid, or **NULL** otherwise. 3244 | */ 3245 | static struct tcp_timewait_sock *(*bpf_skc_to_tcp_timewait_sock)(void *sk) = (void *) 138; 3246 | 3247 | /* 3248 | * bpf_skc_to_tcp_request_sock 3249 | * 3250 | * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. 3251 | * 3252 | * Returns 3253 | * *sk* if casting is valid, or **NULL** otherwise. 3254 | */ 3255 | static struct tcp_request_sock *(*bpf_skc_to_tcp_request_sock)(void *sk) = (void *) 139; 3256 | 3257 | /* 3258 | * bpf_skc_to_udp6_sock 3259 | * 3260 | * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. 3261 | * 3262 | * Returns 3263 | * *sk* if casting is valid, or **NULL** otherwise. 3264 | */ 3265 | static struct udp6_sock *(*bpf_skc_to_udp6_sock)(void *sk) = (void *) 140; 3266 | 3267 | /* 3268 | * bpf_get_task_stack 3269 | * 3270 | * Return a user or a kernel stack in bpf program provided buffer. 3271 | * To achieve this, the helper needs *task*, which is a valid 3272 | * pointer to **struct task_struct**. To store the stacktrace, the 3273 | * bpf program provides *buf* with a nonnegative *size*. 3274 | * 3275 | * The last argument, *flags*, holds the number of stack frames to 3276 | * skip (from 0 to 255), masked with 3277 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 3278 | * the following flags: 3279 | * 3280 | * **BPF_F_USER_STACK** 3281 | * Collect a user space stack instead of a kernel stack. 3282 | * **BPF_F_USER_BUILD_ID** 3283 | * Collect buildid+offset instead of ips for user stack, 3284 | * only valid if **BPF_F_USER_STACK** is also specified. 3285 | * 3286 | * **bpf_get_task_stack**\ () can collect up to 3287 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 3288 | * to sufficient large buffer size. Note that 3289 | * this limit can be controlled with the **sysctl** program, and 3290 | * that it should be manually increased in order to profile long 3291 | * user stacks (such as stacks for Java programs). To do so, use: 3292 | * 3293 | * :: 3294 | * 3295 | * # sysctl kernel.perf_event_max_stack= 3296 | * 3297 | * Returns 3298 | * A non-negative value equal to or less than *size* on success, 3299 | * or a negative error in case of failure. 3300 | */ 3301 | static long (*bpf_get_task_stack)(struct task_struct *task, void *buf, __u32 size, __u64 flags) = (void *) 141; 3302 | 3303 | /* 3304 | * bpf_load_hdr_opt 3305 | * 3306 | * Load header option. Support reading a particular TCP header 3307 | * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). 3308 | * 3309 | * If *flags* is 0, it will search the option from the 3310 | * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** 3311 | * has details on what skb_data contains under different 3312 | * *skops*\ **->op**. 3313 | * 3314 | * The first byte of the *searchby_res* specifies the 3315 | * kind that it wants to search. 3316 | * 3317 | * If the searching kind is an experimental kind 3318 | * (i.e. 253 or 254 according to RFC6994). It also 3319 | * needs to specify the "magic" which is either 3320 | * 2 bytes or 4 bytes. It then also needs to 3321 | * specify the size of the magic by using 3322 | * the 2nd byte which is "kind-length" of a TCP 3323 | * header option and the "kind-length" also 3324 | * includes the first 2 bytes "kind" and "kind-length" 3325 | * itself as a normal TCP header option also does. 3326 | * 3327 | * For example, to search experimental kind 254 with 3328 | * 2 byte magic 0xeB9F, the searchby_res should be 3329 | * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. 3330 | * 3331 | * To search for the standard window scale option (3), 3332 | * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. 3333 | * Note, kind-length must be 0 for regular option. 3334 | * 3335 | * Searching for No-Op (0) and End-of-Option-List (1) are 3336 | * not supported. 3337 | * 3338 | * *len* must be at least 2 bytes which is the minimal size 3339 | * of a header option. 3340 | * 3341 | * Supported flags: 3342 | * 3343 | * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the 3344 | * saved_syn packet or the just-received syn packet. 3345 | * 3346 | * 3347 | * Returns 3348 | * > 0 when found, the header option is copied to *searchby_res*. 3349 | * The return value is the total length copied. On failure, a 3350 | * negative error code is returned: 3351 | * 3352 | * **-EINVAL** if a parameter is invalid. 3353 | * 3354 | * **-ENOMSG** if the option is not found. 3355 | * 3356 | * **-ENOENT** if no syn packet is available when 3357 | * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. 3358 | * 3359 | * **-ENOSPC** if there is not enough space. Only *len* number of 3360 | * bytes are copied. 3361 | * 3362 | * **-EFAULT** on failure to parse the header options in the 3363 | * packet. 3364 | * 3365 | * **-EPERM** if the helper cannot be used under the current 3366 | * *skops*\ **->op**. 3367 | */ 3368 | static long (*bpf_load_hdr_opt)(struct bpf_sock_ops *skops, void *searchby_res, __u32 len, __u64 flags) = (void *) 142; 3369 | 3370 | /* 3371 | * bpf_store_hdr_opt 3372 | * 3373 | * Store header option. The data will be copied 3374 | * from buffer *from* with length *len* to the TCP header. 3375 | * 3376 | * The buffer *from* should have the whole option that 3377 | * includes the kind, kind-length, and the actual 3378 | * option data. The *len* must be at least kind-length 3379 | * long. The kind-length does not have to be 4 byte 3380 | * aligned. The kernel will take care of the padding 3381 | * and setting the 4 bytes aligned value to th->doff. 3382 | * 3383 | * This helper will check for duplicated option 3384 | * by searching the same option in the outgoing skb. 3385 | * 3386 | * This helper can only be called during 3387 | * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 3388 | * 3389 | * 3390 | * Returns 3391 | * 0 on success, or negative error in case of failure: 3392 | * 3393 | * **-EINVAL** If param is invalid. 3394 | * 3395 | * **-ENOSPC** if there is not enough space in the header. 3396 | * Nothing has been written 3397 | * 3398 | * **-EEXIST** if the option already exists. 3399 | * 3400 | * **-EFAULT** on failrue to parse the existing header options. 3401 | * 3402 | * **-EPERM** if the helper cannot be used under the current 3403 | * *skops*\ **->op**. 3404 | */ 3405 | static long (*bpf_store_hdr_opt)(struct bpf_sock_ops *skops, const void *from, __u32 len, __u64 flags) = (void *) 143; 3406 | 3407 | /* 3408 | * bpf_reserve_hdr_opt 3409 | * 3410 | * Reserve *len* bytes for the bpf header option. The 3411 | * space will be used by **bpf_store_hdr_opt**\ () later in 3412 | * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 3413 | * 3414 | * If **bpf_reserve_hdr_opt**\ () is called multiple times, 3415 | * the total number of bytes will be reserved. 3416 | * 3417 | * This helper can only be called during 3418 | * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. 3419 | * 3420 | * 3421 | * Returns 3422 | * 0 on success, or negative error in case of failure: 3423 | * 3424 | * **-EINVAL** if a parameter is invalid. 3425 | * 3426 | * **-ENOSPC** if there is not enough space in the header. 3427 | * 3428 | * **-EPERM** if the helper cannot be used under the current 3429 | * *skops*\ **->op**. 3430 | */ 3431 | static long (*bpf_reserve_hdr_opt)(struct bpf_sock_ops *skops, __u32 len, __u64 flags) = (void *) 144; 3432 | 3433 | /* 3434 | * bpf_inode_storage_get 3435 | * 3436 | * Get a bpf_local_storage from an *inode*. 3437 | * 3438 | * Logically, it could be thought of as getting the value from 3439 | * a *map* with *inode* as the **key**. From this 3440 | * perspective, the usage is not much different from 3441 | * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this 3442 | * helper enforces the key must be an inode and the map must also 3443 | * be a **BPF_MAP_TYPE_INODE_STORAGE**. 3444 | * 3445 | * Underneath, the value is stored locally at *inode* instead of 3446 | * the *map*. The *map* is used as the bpf-local-storage 3447 | * "type". The bpf-local-storage "type" (i.e. the *map*) is 3448 | * searched against all bpf_local_storage residing at *inode*. 3449 | * 3450 | * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 3451 | * used such that a new bpf_local_storage will be 3452 | * created if one does not exist. *value* can be used 3453 | * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 3454 | * the initial value of a bpf_local_storage. If *value* is 3455 | * **NULL**, the new bpf_local_storage will be zero initialized. 3456 | * 3457 | * Returns 3458 | * A bpf_local_storage pointer is returned on success. 3459 | * 3460 | * **NULL** if not found or there was an error in adding 3461 | * a new bpf_local_storage. 3462 | */ 3463 | static void *(*bpf_inode_storage_get)(void *map, void *inode, void *value, __u64 flags) = (void *) 145; 3464 | 3465 | /* 3466 | * bpf_inode_storage_delete 3467 | * 3468 | * Delete a bpf_local_storage from an *inode*. 3469 | * 3470 | * Returns 3471 | * 0 on success. 3472 | * 3473 | * **-ENOENT** if the bpf_local_storage cannot be found. 3474 | */ 3475 | static int (*bpf_inode_storage_delete)(void *map, void *inode) = (void *) 146; 3476 | 3477 | /* 3478 | * bpf_d_path 3479 | * 3480 | * Return full path for given **struct path** object, which 3481 | * needs to be the kernel BTF *path* object. The path is 3482 | * returned in the provided buffer *buf* of size *sz* and 3483 | * is zero terminated. 3484 | * 3485 | * 3486 | * Returns 3487 | * On success, the strictly positive length of the string, 3488 | * including the trailing NUL character. On error, a negative 3489 | * value. 3490 | */ 3491 | static long (*bpf_d_path)(struct path *path, char *buf, __u32 sz) = (void *) 147; 3492 | 3493 | /* 3494 | * bpf_copy_from_user 3495 | * 3496 | * Read *size* bytes from user space address *user_ptr* and store 3497 | * the data in *dst*. This is a wrapper of **copy_from_user**\ (). 3498 | * 3499 | * Returns 3500 | * 0 on success, or a negative error in case of failure. 3501 | */ 3502 | static long (*bpf_copy_from_user)(void *dst, __u32 size, const void *user_ptr) = (void *) 148; 3503 | 3504 | /* 3505 | * bpf_snprintf_btf 3506 | * 3507 | * Use BTF to store a string representation of *ptr*->ptr in *str*, 3508 | * using *ptr*->type_id. This value should specify the type 3509 | * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) 3510 | * can be used to look up vmlinux BTF type ids. Traversing the 3511 | * data structure using BTF, the type information and values are 3512 | * stored in the first *str_size* - 1 bytes of *str*. Safe copy of 3513 | * the pointer data is carried out to avoid kernel crashes during 3514 | * operation. Smaller types can use string space on the stack; 3515 | * larger programs can use map data to store the string 3516 | * representation. 3517 | * 3518 | * The string can be subsequently shared with userspace via 3519 | * bpf_perf_event_output() or ring buffer interfaces. 3520 | * bpf_trace_printk() is to be avoided as it places too small 3521 | * a limit on string size to be useful. 3522 | * 3523 | * *flags* is a combination of 3524 | * 3525 | * **BTF_F_COMPACT** 3526 | * no formatting around type information 3527 | * **BTF_F_NONAME** 3528 | * no struct/union member names/types 3529 | * **BTF_F_PTR_RAW** 3530 | * show raw (unobfuscated) pointer values; 3531 | * equivalent to printk specifier %px. 3532 | * **BTF_F_ZERO** 3533 | * show zero-valued struct/union members; they 3534 | * are not displayed by default 3535 | * 3536 | * 3537 | * Returns 3538 | * The number of bytes that were written (or would have been 3539 | * written if output had to be truncated due to string size), 3540 | * or a negative error in cases of failure. 3541 | */ 3542 | static long (*bpf_snprintf_btf)(char *str, __u32 str_size, struct btf_ptr *ptr, __u32 btf_ptr_size, __u64 flags) = (void *) 149; 3543 | 3544 | /* 3545 | * bpf_seq_printf_btf 3546 | * 3547 | * Use BTF to write to seq_write a string representation of 3548 | * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). 3549 | * *flags* are identical to those used for bpf_snprintf_btf. 3550 | * 3551 | * Returns 3552 | * 0 on success or a negative error in case of failure. 3553 | */ 3554 | static long (*bpf_seq_printf_btf)(struct seq_file *m, struct btf_ptr *ptr, __u32 ptr_size, __u64 flags) = (void *) 150; 3555 | 3556 | /* 3557 | * bpf_skb_cgroup_classid 3558 | * 3559 | * See **bpf_get_cgroup_classid**\ () for the main description. 3560 | * This helper differs from **bpf_get_cgroup_classid**\ () in that 3561 | * the cgroup v1 net_cls class is retrieved only from the *skb*'s 3562 | * associated socket instead of the current process. 3563 | * 3564 | * Returns 3565 | * The id is returned or 0 in case the id could not be retrieved. 3566 | */ 3567 | static __u64 (*bpf_skb_cgroup_classid)(struct __sk_buff *skb) = (void *) 151; 3568 | 3569 | /* 3570 | * bpf_redirect_neigh 3571 | * 3572 | * Redirect the packet to another net device of index *ifindex* 3573 | * and fill in L2 addresses from neighboring subsystem. This helper 3574 | * is somewhat similar to **bpf_redirect**\ (), except that it 3575 | * populates L2 addresses as well, meaning, internally, the helper 3576 | * relies on the neighbor lookup for the L2 address of the nexthop. 3577 | * 3578 | * The helper will perform a FIB lookup based on the skb's 3579 | * networking header to get the address of the next hop, unless 3580 | * this is supplied by the caller in the *params* argument. The 3581 | * *plen* argument indicates the len of *params* and should be set 3582 | * to 0 if *params* is NULL. 3583 | * 3584 | * The *flags* argument is reserved and must be 0. The helper is 3585 | * currently only supported for tc BPF program types, and enabled 3586 | * for IPv4 and IPv6 protocols. 3587 | * 3588 | * Returns 3589 | * The helper returns **TC_ACT_REDIRECT** on success or 3590 | * **TC_ACT_SHOT** on error. 3591 | */ 3592 | static long (*bpf_redirect_neigh)(__u32 ifindex, struct bpf_redir_neigh *params, int plen, __u64 flags) = (void *) 152; 3593 | 3594 | /* 3595 | * bpf_per_cpu_ptr 3596 | * 3597 | * Take a pointer to a percpu ksym, *percpu_ptr*, and return a 3598 | * pointer to the percpu kernel variable on *cpu*. A ksym is an 3599 | * extern variable decorated with '__ksym'. For ksym, there is a 3600 | * global var (either static or global) defined of the same name 3601 | * in the kernel. The ksym is percpu if the global var is percpu. 3602 | * The returned pointer points to the global percpu var on *cpu*. 3603 | * 3604 | * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the 3605 | * kernel, except that bpf_per_cpu_ptr() may return NULL. This 3606 | * happens if *cpu* is larger than nr_cpu_ids. The caller of 3607 | * bpf_per_cpu_ptr() must check the returned value. 3608 | * 3609 | * Returns 3610 | * A pointer pointing to the kernel percpu variable on *cpu*, or 3611 | * NULL, if *cpu* is invalid. 3612 | */ 3613 | static void *(*bpf_per_cpu_ptr)(const void *percpu_ptr, __u32 cpu) = (void *) 153; 3614 | 3615 | /* 3616 | * bpf_this_cpu_ptr 3617 | * 3618 | * Take a pointer to a percpu ksym, *percpu_ptr*, and return a 3619 | * pointer to the percpu kernel variable on this cpu. See the 3620 | * description of 'ksym' in **bpf_per_cpu_ptr**\ (). 3621 | * 3622 | * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in 3623 | * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would 3624 | * never return NULL. 3625 | * 3626 | * Returns 3627 | * A pointer pointing to the kernel percpu variable on this cpu. 3628 | */ 3629 | static void *(*bpf_this_cpu_ptr)(const void *percpu_ptr) = (void *) 154; 3630 | 3631 | /* 3632 | * bpf_redirect_peer 3633 | * 3634 | * Redirect the packet to another net device of index *ifindex*. 3635 | * This helper is somewhat similar to **bpf_redirect**\ (), except 3636 | * that the redirection happens to the *ifindex*' peer device and 3637 | * the netns switch takes place from ingress to ingress without 3638 | * going through the CPU's backlog queue. 3639 | * 3640 | * The *flags* argument is reserved and must be 0. The helper is 3641 | * currently only supported for tc BPF program types at the ingress 3642 | * hook and for veth device types. The peer device must reside in a 3643 | * different network namespace. 3644 | * 3645 | * Returns 3646 | * The helper returns **TC_ACT_REDIRECT** on success or 3647 | * **TC_ACT_SHOT** on error. 3648 | */ 3649 | static long (*bpf_redirect_peer)(__u32 ifindex, __u64 flags) = (void *) 155; 3650 | 3651 | /* 3652 | * bpf_task_storage_get 3653 | * 3654 | * Get a bpf_local_storage from the *task*. 3655 | * 3656 | * Logically, it could be thought of as getting the value from 3657 | * a *map* with *task* as the **key**. From this 3658 | * perspective, the usage is not much different from 3659 | * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this 3660 | * helper enforces the key must be an task_struct and the map must also 3661 | * be a **BPF_MAP_TYPE_TASK_STORAGE**. 3662 | * 3663 | * Underneath, the value is stored locally at *task* instead of 3664 | * the *map*. The *map* is used as the bpf-local-storage 3665 | * "type". The bpf-local-storage "type" (i.e. the *map*) is 3666 | * searched against all bpf_local_storage residing at *task*. 3667 | * 3668 | * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 3669 | * used such that a new bpf_local_storage will be 3670 | * created if one does not exist. *value* can be used 3671 | * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 3672 | * the initial value of a bpf_local_storage. If *value* is 3673 | * **NULL**, the new bpf_local_storage will be zero initialized. 3674 | * 3675 | * Returns 3676 | * A bpf_local_storage pointer is returned on success. 3677 | * 3678 | * **NULL** if not found or there was an error in adding 3679 | * a new bpf_local_storage. 3680 | */ 3681 | static void *(*bpf_task_storage_get)(void *map, struct task_struct *task, void *value, __u64 flags) = (void *) 156; 3682 | 3683 | /* 3684 | * bpf_task_storage_delete 3685 | * 3686 | * Delete a bpf_local_storage from a *task*. 3687 | * 3688 | * Returns 3689 | * 0 on success. 3690 | * 3691 | * **-ENOENT** if the bpf_local_storage cannot be found. 3692 | */ 3693 | static long (*bpf_task_storage_delete)(void *map, struct task_struct *task) = (void *) 157; 3694 | 3695 | /* 3696 | * bpf_get_current_task_btf 3697 | * 3698 | * Return a BTF pointer to the "current" task. 3699 | * This pointer can also be used in helpers that accept an 3700 | * *ARG_PTR_TO_BTF_ID* of type *task_struct*. 3701 | * 3702 | * Returns 3703 | * Pointer to the current task. 3704 | */ 3705 | static struct task_struct *(*bpf_get_current_task_btf)(void) = (void *) 158; 3706 | 3707 | /* 3708 | * bpf_bprm_opts_set 3709 | * 3710 | * Set or clear certain options on *bprm*: 3711 | * 3712 | * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit 3713 | * which sets the **AT_SECURE** auxv for glibc. The bit 3714 | * is cleared if the flag is not specified. 3715 | * 3716 | * Returns 3717 | * **-EINVAL** if invalid *flags* are passed, zero otherwise. 3718 | */ 3719 | static long (*bpf_bprm_opts_set)(struct linux_binprm *bprm, __u64 flags) = (void *) 159; 3720 | 3721 | /* 3722 | * bpf_ktime_get_coarse_ns 3723 | * 3724 | * Return a coarse-grained version of the time elapsed since 3725 | * system boot, in nanoseconds. Does not include time the system 3726 | * was suspended. 3727 | * 3728 | * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) 3729 | * 3730 | * Returns 3731 | * Current *ktime*. 3732 | */ 3733 | static __u64 (*bpf_ktime_get_coarse_ns)(void) = (void *) 160; 3734 | 3735 | /* 3736 | * bpf_ima_inode_hash 3737 | * 3738 | * Returns the stored IMA hash of the *inode* (if it's avaialable). 3739 | * If the hash is larger than *size*, then only *size* 3740 | * bytes will be copied to *dst* 3741 | * 3742 | * Returns 3743 | * The **hash_algo** is returned on success, 3744 | * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if 3745 | * invalid arguments are passed. 3746 | */ 3747 | static long (*bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) = (void *) 161; 3748 | 3749 | /* 3750 | * bpf_sock_from_file 3751 | * 3752 | * If the given file represents a socket, returns the associated 3753 | * socket. 3754 | * 3755 | * Returns 3756 | * A pointer to a struct socket on success or NULL if the file is 3757 | * not a socket. 3758 | */ 3759 | static struct socket *(*bpf_sock_from_file)(struct file *file) = (void *) 162; 3760 | 3761 | /* 3762 | * bpf_check_mtu 3763 | * 3764 | * Check packet size against exceeding MTU of net device (based 3765 | * on *ifindex*). This helper will likely be used in combination 3766 | * with helpers that adjust/change the packet size. 3767 | * 3768 | * The argument *len_diff* can be used for querying with a planned 3769 | * size change. This allows to check MTU prior to changing packet 3770 | * ctx. Providing an *len_diff* adjustment that is larger than the 3771 | * actual packet size (resulting in negative packet size) will in 3772 | * principle not exceed the MTU, why it is not considered a 3773 | * failure. Other BPF-helpers are needed for performing the 3774 | * planned size change, why the responsability for catch a negative 3775 | * packet size belong in those helpers. 3776 | * 3777 | * Specifying *ifindex* zero means the MTU check is performed 3778 | * against the current net device. This is practical if this isn't 3779 | * used prior to redirect. 3780 | * 3781 | * On input *mtu_len* must be a valid pointer, else verifier will 3782 | * reject BPF program. If the value *mtu_len* is initialized to 3783 | * zero then the ctx packet size is use. When value *mtu_len* is 3784 | * provided as input this specify the L3 length that the MTU check 3785 | * is done against. Remember XDP and TC length operate at L2, but 3786 | * this value is L3 as this correlate to MTU and IP-header tot_len 3787 | * values which are L3 (similar behavior as bpf_fib_lookup). 3788 | * 3789 | * The Linux kernel route table can configure MTUs on a more 3790 | * specific per route level, which is not provided by this helper. 3791 | * For route level MTU checks use the **bpf_fib_lookup**\ () 3792 | * helper. 3793 | * 3794 | * *ctx* is either **struct xdp_md** for XDP programs or 3795 | * **struct sk_buff** for tc cls_act programs. 3796 | * 3797 | * The *flags* argument can be a combination of one or more of the 3798 | * following values: 3799 | * 3800 | * **BPF_MTU_CHK_SEGS** 3801 | * This flag will only works for *ctx* **struct sk_buff**. 3802 | * If packet context contains extra packet segment buffers 3803 | * (often knows as GSO skb), then MTU check is harder to 3804 | * check at this point, because in transmit path it is 3805 | * possible for the skb packet to get re-segmented 3806 | * (depending on net device features). This could still be 3807 | * a MTU violation, so this flag enables performing MTU 3808 | * check against segments, with a different violation 3809 | * return code to tell it apart. Check cannot use len_diff. 3810 | * 3811 | * On return *mtu_len* pointer contains the MTU value of the net 3812 | * device. Remember the net device configured MTU is the L3 size, 3813 | * which is returned here and XDP and TC length operate at L2. 3814 | * Helper take this into account for you, but remember when using 3815 | * MTU value in your BPF-code. 3816 | * 3817 | * 3818 | * Returns 3819 | * * 0 on success, and populate MTU value in *mtu_len* pointer. 3820 | * 3821 | * * < 0 if any input argument is invalid (*mtu_len* not updated) 3822 | * 3823 | * MTU violations return positive values, but also populate MTU 3824 | * value in *mtu_len* pointer, as this can be needed for 3825 | * implementing PMTU handing: 3826 | * 3827 | * * **BPF_MTU_CHK_RET_FRAG_NEEDED** 3828 | * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** 3829 | */ 3830 | static long (*bpf_check_mtu)(void *ctx, __u32 ifindex, __u32 *mtu_len, __s32 len_diff, __u64 flags) = (void *) 163; 3831 | 3832 | /* 3833 | * bpf_for_each_map_elem 3834 | * 3835 | * For each element in **map**, call **callback_fn** function with 3836 | * **map**, **callback_ctx** and other map-specific parameters. 3837 | * The **callback_fn** should be a static function and 3838 | * the **callback_ctx** should be a pointer to the stack. 3839 | * The **flags** is used to control certain aspects of the helper. 3840 | * Currently, the **flags** must be 0. 3841 | * 3842 | * The following are a list of supported map types and their 3843 | * respective expected callback signatures: 3844 | * 3845 | * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, 3846 | * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, 3847 | * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY 3848 | * 3849 | * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); 3850 | * 3851 | * For per_cpu maps, the map_value is the value on the cpu where the 3852 | * bpf_prog is running. 3853 | * 3854 | * If **callback_fn** return 0, the helper will continue to the next 3855 | * element. If return value is 1, the helper will skip the rest of 3856 | * elements and return. Other return values are not used now. 3857 | * 3858 | * 3859 | * Returns 3860 | * The number of traversed map elements for success, **-EINVAL** for 3861 | * invalid **flags**. 3862 | */ 3863 | static long (*bpf_for_each_map_elem)(void *map, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 164; 3864 | 3865 | /* 3866 | * bpf_snprintf 3867 | * 3868 | * Outputs a string into the **str** buffer of size **str_size** 3869 | * based on a format string stored in a read-only map pointed by 3870 | * **fmt**. 3871 | * 3872 | * Each format specifier in **fmt** corresponds to one u64 element 3873 | * in the **data** array. For strings and pointers where pointees 3874 | * are accessed, only the pointer values are stored in the *data* 3875 | * array. The *data_len* is the size of *data* in bytes. 3876 | * 3877 | * Formats **%s** and **%p{i,I}{4,6}** require to read kernel 3878 | * memory. Reading kernel memory may fail due to either invalid 3879 | * address or valid address but requiring a major memory fault. If 3880 | * reading kernel memory fails, the string for **%s** will be an 3881 | * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. 3882 | * Not returning error to bpf program is consistent with what 3883 | * **bpf_trace_printk**\ () does for now. 3884 | * 3885 | * 3886 | * Returns 3887 | * The strictly positive length of the formatted string, including 3888 | * the trailing zero character. If the return value is greater than 3889 | * **str_size**, **str** contains a truncated string, guaranteed to 3890 | * be zero-terminated except when **str_size** is 0. 3891 | * 3892 | * Or **-EBUSY** if the per-CPU memory copy buffer is busy. 3893 | */ 3894 | static long (*bpf_snprintf)(char *str, __u32 str_size, const char *fmt, __u64 *data, __u32 data_len) = (void *) 165; 3895 | 3896 | /* 3897 | * bpf_sys_bpf 3898 | * 3899 | * Execute bpf syscall with given arguments. 3900 | * 3901 | * Returns 3902 | * A syscall result. 3903 | */ 3904 | static long (*bpf_sys_bpf)(__u32 cmd, void *attr, __u32 attr_size) = (void *) 166; 3905 | 3906 | /* 3907 | * bpf_btf_find_by_name_kind 3908 | * 3909 | * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. 3910 | * 3911 | * Returns 3912 | * Returns btf_id and btf_obj_fd in lower and upper 32 bits. 3913 | */ 3914 | static long (*bpf_btf_find_by_name_kind)(char *name, int name_sz, __u32 kind, int flags) = (void *) 167; 3915 | 3916 | /* 3917 | * bpf_sys_close 3918 | * 3919 | * Execute close syscall for given FD. 3920 | * 3921 | * Returns 3922 | * A syscall result. 3923 | */ 3924 | static long (*bpf_sys_close)(__u32 fd) = (void *) 168; 3925 | 3926 | 3927 | -------------------------------------------------------------------------------- /bpf/headers/bpf_helpers.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 | #ifndef __BPF_HELPERS__ 3 | #define __BPF_HELPERS__ 4 | 5 | /* 6 | * Note that bpf programs need to include either 7 | * vmlinux.h (auto-generated from BTF) or linux/types.h 8 | * in advance since bpf_helper_defs.h uses such types 9 | * as __u64. 10 | */ 11 | #include "bpf_helper_defs.h" 12 | 13 | #define __uint(name, val) int (*name)[val] 14 | #define __type(name, val) typeof(val) *name 15 | #define __array(name, val) typeof(val) *name[] 16 | 17 | /* Helper macro to print out debug messages */ 18 | #define bpf_printk(fmt, ...) \ 19 | ({ \ 20 | char ____fmt[] = fmt; \ 21 | bpf_trace_printk(____fmt, sizeof(____fmt), \ 22 | ##__VA_ARGS__); \ 23 | }) 24 | 25 | /* 26 | * Helper macro to place programs, maps, license in 27 | * different sections in elf_bpf file. Section names 28 | * are interpreted by libbpf depending on the context (BPF programs, BPF maps, 29 | * extern variables, etc). 30 | * To allow use of SEC() with externs (e.g., for extern .maps declarations), 31 | * make sure __attribute__((unused)) doesn't trigger compilation warning. 32 | */ 33 | #define SEC(name) \ 34 | _Pragma("GCC diagnostic push") \ 35 | _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \ 36 | __attribute__((section(name), used)) \ 37 | _Pragma("GCC diagnostic pop") \ 38 | 39 | /* Avoid 'linux/stddef.h' definition of '__always_inline'. */ 40 | #undef __always_inline 41 | #define __always_inline inline __attribute__((always_inline)) 42 | 43 | #ifndef __noinline 44 | #define __noinline __attribute__((noinline)) 45 | #endif 46 | #ifndef __weak 47 | #define __weak __attribute__((weak)) 48 | #endif 49 | 50 | /* 51 | * Use __hidden attribute to mark a non-static BPF subprogram effectively 52 | * static for BPF verifier's verification algorithm purposes, allowing more 53 | * extensive and permissive BPF verification process, taking into account 54 | * subprogram's caller context. 55 | */ 56 | #define __hidden __attribute__((visibility("hidden"))) 57 | 58 | /* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include 59 | * any system-level headers (such as stddef.h, linux/version.h, etc), and 60 | * commonly-used macros like NULL and KERNEL_VERSION aren't available through 61 | * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define 62 | * them on their own. So as a convenience, provide such definitions here. 63 | */ 64 | #ifndef NULL 65 | #define NULL ((void *)0) 66 | #endif 67 | 68 | #ifndef KERNEL_VERSION 69 | #define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c))) 70 | #endif 71 | 72 | /* 73 | * Helper macros to manipulate data structures 74 | */ 75 | #ifndef offsetof 76 | #define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) 77 | #endif 78 | #ifndef container_of 79 | #define container_of(ptr, type, member) \ 80 | ({ \ 81 | void *__mptr = (void *)(ptr); \ 82 | ((type *)(__mptr - offsetof(type, member))); \ 83 | }) 84 | #endif 85 | 86 | /* 87 | * Helper macro to throw a compilation error if __bpf_unreachable() gets 88 | * built into the resulting code. This works given BPF back end does not 89 | * implement __builtin_trap(). This is useful to assert that certain paths 90 | * of the program code are never used and hence eliminated by the compiler. 91 | * 92 | * For example, consider a switch statement that covers known cases used by 93 | * the program. __bpf_unreachable() can then reside in the default case. If 94 | * the program gets extended such that a case is not covered in the switch 95 | * statement, then it will throw a build error due to the default case not 96 | * being compiled out. 97 | */ 98 | #ifndef __bpf_unreachable 99 | # define __bpf_unreachable() __builtin_trap() 100 | #endif 101 | 102 | /* 103 | * Helper function to perform a tail call with a constant/immediate map slot. 104 | */ 105 | #if __clang_major__ >= 8 && defined(__bpf__) 106 | static __always_inline void 107 | bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) 108 | { 109 | if (!__builtin_constant_p(slot)) 110 | __bpf_unreachable(); 111 | 112 | /* 113 | * Provide a hard guarantee that LLVM won't optimize setting r2 (map 114 | * pointer) and r3 (constant map index) from _different paths_ ending 115 | * up at the _same_ call insn as otherwise we won't be able to use the 116 | * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel 117 | * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key 118 | * tracking for prog array pokes") for details on verifier tracking. 119 | * 120 | * Note on clobber list: we need to stay in-line with BPF calling 121 | * convention, so even if we don't end up using r0, r4, r5, we need 122 | * to mark them as clobber so that LLVM doesn't end up using them 123 | * before / after the call. 124 | */ 125 | asm volatile("r1 = %[ctx]\n\t" 126 | "r2 = %[map]\n\t" 127 | "r3 = %[slot]\n\t" 128 | "call 12" 129 | :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot) 130 | : "r0", "r1", "r2", "r3", "r4", "r5"); 131 | } 132 | #endif 133 | 134 | /* 135 | * Helper structure used by eBPF C program 136 | * to describe BPF map attributes to libbpf loader 137 | */ 138 | struct bpf_map_def { 139 | unsigned int type; 140 | unsigned int key_size; 141 | unsigned int value_size; 142 | unsigned int max_entries; 143 | unsigned int map_flags; 144 | }; 145 | 146 | enum libbpf_pin_type { 147 | LIBBPF_PIN_NONE, 148 | /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ 149 | LIBBPF_PIN_BY_NAME, 150 | }; 151 | 152 | enum libbpf_tristate { 153 | TRI_NO = 0, 154 | TRI_YES = 1, 155 | TRI_MODULE = 2, 156 | }; 157 | 158 | #define __kconfig __attribute__((section(".kconfig"))) 159 | #define __ksym __attribute__((section(".ksyms"))) 160 | 161 | #ifndef ___bpf_concat 162 | #define ___bpf_concat(a, b) a ## b 163 | #endif 164 | #ifndef ___bpf_apply 165 | #define ___bpf_apply(fn, n) ___bpf_concat(fn, n) 166 | #endif 167 | #ifndef ___bpf_nth 168 | #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N 169 | #endif 170 | #ifndef ___bpf_narg 171 | #define ___bpf_narg(...) \ 172 | ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 173 | #endif 174 | 175 | #define ___bpf_fill0(arr, p, x) do {} while (0) 176 | #define ___bpf_fill1(arr, p, x) arr[p] = x 177 | #define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args) 178 | #define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args) 179 | #define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args) 180 | #define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args) 181 | #define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args) 182 | #define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args) 183 | #define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args) 184 | #define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args) 185 | #define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args) 186 | #define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args) 187 | #define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args) 188 | #define ___bpf_fill(arr, args...) \ 189 | ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args) 190 | 191 | /* 192 | * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values 193 | * in a structure. 194 | */ 195 | #define BPF_SEQ_PRINTF(seq, fmt, args...) \ 196 | ({ \ 197 | static const char ___fmt[] = fmt; \ 198 | unsigned long long ___param[___bpf_narg(args)]; \ 199 | \ 200 | _Pragma("GCC diagnostic push") \ 201 | _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 202 | ___bpf_fill(___param, args); \ 203 | _Pragma("GCC diagnostic pop") \ 204 | \ 205 | bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \ 206 | ___param, sizeof(___param)); \ 207 | }) 208 | 209 | /* 210 | * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of 211 | * an array of u64. 212 | */ 213 | #define BPF_SNPRINTF(out, out_size, fmt, args...) \ 214 | ({ \ 215 | static const char ___fmt[] = fmt; \ 216 | unsigned long long ___param[___bpf_narg(args)]; \ 217 | \ 218 | _Pragma("GCC diagnostic push") \ 219 | _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 220 | ___bpf_fill(___param, args); \ 221 | _Pragma("GCC diagnostic pop") \ 222 | \ 223 | bpf_snprintf(out, out_size, ___fmt, \ 224 | ___param, sizeof(___param)); \ 225 | }) 226 | 227 | #endif 228 | -------------------------------------------------------------------------------- /bpf/iptables-bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux.h" 2 | #include "bpf_endian.h" 3 | #include "bpf_helpers.h" 4 | 5 | char _license[] SEC("license") = "GPL"; 6 | 7 | 8 | struct { 9 | __uint(type, BPF_MAP_TYPE_HASH); 10 | __uint(max_entries, 16); 11 | __type(key, u32); 12 | __type(value, u8); 13 | } filter_daddrs SEC(".maps"); 14 | 15 | SEC("socket") 16 | int filter_iptables(void *skb) { 17 | struct iphdr iph; 18 | u8 *filtered; 19 | 20 | if (bpf_skb_load_bytes_relative(skb, 0, &iph, sizeof(iph), BPF_HDR_START_NET) < 0) 21 | return BPF_OK; 22 | 23 | filtered = bpf_map_lookup_elem(&filter_daddrs, &iph.daddr); 24 | if (filtered != NULL && *filtered == 1) 25 | return BPF_DROP; 26 | 27 | return BPF_OK; 28 | } -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module iptables-bpf 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/cilium/ebpf v0.7.0 7 | inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 8 | ) 9 | 10 | require ( 11 | go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect 12 | go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 // indirect 13 | golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 // indirect 14 | ) 15 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= 2 | github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= 3 | github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= 4 | github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= 5 | github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= 6 | github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= 7 | github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 8 | github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= 9 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 10 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 11 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 12 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 13 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 14 | go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= 15 | go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= 16 | go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 h1:Tx9kY6yUkLge/pFG7IEMwDZy6CS2ajFc9TvQdPCW0uA= 17 | go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= 18 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 19 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 20 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 21 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 22 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 23 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 24 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 25 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 26 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 27 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 28 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 29 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 30 | golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 31 | golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI= 32 | golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 33 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 34 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 35 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 36 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 37 | golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= 38 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 39 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 40 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 41 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= 42 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 43 | inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 h1:acCzuUSQ79tGsM/O50VRFySfMm19IoMKL+sZztZkCxw= 44 | inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6/go.mod h1:y3MGhcFMlh0KZPMuXXow8mpjxxAk3yoDNsp4cQz54i8= 45 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/binary" 5 | "flag" 6 | "log" 7 | "strings" 8 | 9 | "github.com/cilium/ebpf" 10 | "inet.af/netaddr" 11 | ) 12 | 13 | func main() { 14 | 15 | var daddr string 16 | var bpfMap int 17 | flag.StringVar(&daddr, "d", "", "ip addresses to drop, separated by ','") 18 | flag.IntVar(&bpfMap, "m", 0, "the id of the bpf map(filter_daddrs)") 19 | flag.Parse() 20 | 21 | var ips []netaddr.IP 22 | addrs := strings.FieldsFunc(daddr, func(r rune) bool { return r == ',' }) 23 | for _, addr := range addrs { 24 | ip, err := netaddr.ParseIP(addr) 25 | if err != nil { 26 | log.Fatalf("%s is not a valid IPv4 address", ip) 27 | } 28 | 29 | ips = append(ips, ip) 30 | } 31 | if len(ips) == 0 { 32 | log.Fatalf("no ip address(es) to be dropped") 33 | } 34 | 35 | m, err := ebpf.NewMapFromID(ebpf.MapID(bpfMap)) 36 | if err != nil { 37 | log.Fatalf("bpf map(%d) not found, err: %v", bpfMap, err) 38 | } 39 | 40 | val := uint8(1) 41 | for _, ip := range ips { 42 | _ip := ip.As4() 43 | ipval := binary.LittleEndian.Uint32(_ip[:]) 44 | if err := m.Update(ipval, val, ebpf.UpdateAny); err != nil { 45 | log.Fatalf("failed to upsert data to bpf map(%d), err: %v", bpfMap, err) 46 | } 47 | } 48 | 49 | log.Printf("%s can't be pinged", daddr) 50 | } 51 | --------------------------------------------------------------------------------