├── libbpf-rs ├── LICENSE ├── README.md ├── LICENSE.LPGL-2.1 ├── LICENSE.BSD-2-Clause ├── .cargo │ └── config ├── tests │ ├── bin │ │ ├── ringbuf.bpf.o │ │ ├── taskiter.bpf.o │ │ ├── percpu_map.bpf.o │ │ ├── runqslower.bpf.o │ │ └── src │ │ │ ├── percpu_map.bpf.c │ │ │ ├── taskiter.bpf.c │ │ │ ├── ringbuf.bpf.c │ │ │ └── runqslower.bpf.c │ ├── README.md │ ├── test_print.rs │ └── test.rs ├── src │ ├── error.rs │ ├── iter.rs │ ├── util.rs │ ├── link.rs │ ├── lib.rs │ ├── print.rs │ ├── ringbuf.rs │ ├── perf_buffer.rs │ ├── skeleton.rs │ ├── query.rs │ ├── program.rs │ ├── object.rs │ └── map.rs └── Cargo.toml ├── libbpf-cargo ├── LICENSE ├── README.md ├── LICENSE.LPGL-2.1 ├── LICENSE.BSD-2-Clause ├── src │ ├── btf │ │ ├── mod.rs │ │ ├── c_types.rs │ │ └── types.rs │ ├── make.rs │ ├── main.rs │ ├── metadata.rs │ ├── lib.rs │ └── build.rs └── Cargo.toml ├── LICENSE ├── examples ├── runqslower │ ├── .gitignore │ ├── src │ │ ├── bpf │ │ │ ├── vmlinux.h │ │ │ ├── runqslower.h │ │ │ └── runqslower.bpf.c │ │ └── main.rs │ ├── Cargo.toml │ ├── build.rs │ └── README.md ├── capable │ ├── .gitignore │ ├── src │ │ ├── bpf │ │ │ ├── vmlinux.h │ │ │ ├── capable.h │ │ │ └── capable.bpf.c │ │ └── main.rs │ ├── Cargo.toml │ ├── build.rs │ └── README.md └── query │ ├── Cargo.toml │ └── src │ └── main.rs ├── .gitattributes ├── Cargo.toml ├── .gitignore ├── .github └── workflows │ └── rust.yml ├── README.md └── LICENSE.BSD-2-Clause /libbpf-rs/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /libbpf-cargo/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /libbpf-rs/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | LGPL-2.1 OR BSD-2-Clause 2 | -------------------------------------------------------------------------------- /libbpf-cargo/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /libbpf-cargo/LICENSE.LPGL-2.1: -------------------------------------------------------------------------------- 1 | ../LICENSE.LPGL-2.1 -------------------------------------------------------------------------------- /libbpf-rs/LICENSE.LPGL-2.1: -------------------------------------------------------------------------------- 1 | ../LICENSE.LPGL-2.1 -------------------------------------------------------------------------------- /examples/runqslower/.gitignore: -------------------------------------------------------------------------------- 1 | src/bpf/.output 2 | -------------------------------------------------------------------------------- /libbpf-rs/LICENSE.BSD-2-Clause: -------------------------------------------------------------------------------- 1 | ../LICENSE.BSD-2-Clause -------------------------------------------------------------------------------- /examples/capable/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | src/bpf/.output 3 | -------------------------------------------------------------------------------- /examples/capable/src/bpf/vmlinux.h: -------------------------------------------------------------------------------- 1 | ../../../vmlinux_515.h -------------------------------------------------------------------------------- /libbpf-cargo/LICENSE.BSD-2-Clause: -------------------------------------------------------------------------------- 1 | ../LICENSE.BSD-2-Clause -------------------------------------------------------------------------------- /examples/runqslower/src/bpf/vmlinux.h: -------------------------------------------------------------------------------- 1 | ../../../vmlinux_515.h -------------------------------------------------------------------------------- /libbpf-rs/.cargo/config: -------------------------------------------------------------------------------- 1 | [target.x86_64-unknown-linux-gnu] 2 | runner = "sudo -E" 3 | -------------------------------------------------------------------------------- /libbpf-rs/tests/bin/ringbuf.bpf.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oyyd/libbpf-rs/master/libbpf-rs/tests/bin/ringbuf.bpf.o -------------------------------------------------------------------------------- /libbpf-rs/tests/bin/taskiter.bpf.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oyyd/libbpf-rs/master/libbpf-rs/tests/bin/taskiter.bpf.o -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | vmlinux*.h linguist-generated 2 | bpf_helper_defs.h linguist-generated 3 | bpf_helpers.h linguist-generated 4 | -------------------------------------------------------------------------------- /libbpf-rs/tests/bin/percpu_map.bpf.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oyyd/libbpf-rs/master/libbpf-rs/tests/bin/percpu_map.bpf.o -------------------------------------------------------------------------------- /libbpf-rs/tests/bin/runqslower.bpf.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oyyd/libbpf-rs/master/libbpf-rs/tests/bin/runqslower.bpf.o -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "libbpf-cargo", 4 | "libbpf-rs", 5 | "examples/runqslower", 6 | "examples/query", 7 | "examples/capable", 8 | ] 9 | -------------------------------------------------------------------------------- /libbpf-cargo/src/btf/mod.rs: -------------------------------------------------------------------------------- 1 | #[allow(clippy::module_inception)] 2 | pub mod btf; 3 | pub mod c_types; 4 | pub mod types; 5 | 6 | pub use btf::*; 7 | pub use types::*; 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | target 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | 8 | #IDE files 9 | .idea -------------------------------------------------------------------------------- /examples/query/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "query" 3 | version = "0.1.0" 4 | authors = ["Daniel Xu "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | libbpf-rs = { path = "../../libbpf-rs" } 9 | nix = "0.17" 10 | structopt = "0.3" 11 | -------------------------------------------------------------------------------- /examples/runqslower/src/bpf/runqslower.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 | #ifndef __RUNQSLOWER_H 3 | #define __RUNQSLOWER_H 4 | 5 | #define TASK_COMM_LEN 16 6 | 7 | struct event { 8 | u8 task[TASK_COMM_LEN]; 9 | __u64 delta_us; 10 | pid_t pid; 11 | }; 12 | 13 | #endif /* __RUNQSLOWER_H */ 14 | -------------------------------------------------------------------------------- /libbpf-rs/tests/README.md: -------------------------------------------------------------------------------- 1 | # libbpf-rs tests 2 | 3 | libbpf-rs tests are designed to be independent of libbpf-cargo and underlying 4 | compiler versions. To that end, we check in pre-compiled bpf object files in 5 | `libbpf-rs/tests/bin`. To help with writing new tests, the original source 6 | code for the pre-compiled objects are placed in `libbpf-rs/tests/bin/src`. 7 | -------------------------------------------------------------------------------- /libbpf-rs/tests/bin/src/percpu_map.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux.h" 2 | 3 | #include 4 | #include 5 | 6 | struct { 7 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 8 | __uint(key_size, sizeof(u32)); 9 | __uint(value_size, sizeof(u32)); 10 | __uint(max_entries, 1); 11 | } percpu_map SEC(".maps"); 12 | 13 | char _license[] SEC("license") = "GPL"; 14 | -------------------------------------------------------------------------------- /examples/runqslower/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "runqslower" 3 | version = "0.1.0" 4 | authors = ["Daniel Xu "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | anyhow = "1.0" 9 | chrono = "0.4" 10 | libbpf-rs = { path = "../../libbpf-rs" } 11 | libc = "0.2" 12 | plain = "0.2" 13 | structopt = "0.3" 14 | 15 | [build-dependencies] 16 | libbpf-cargo = { path = "../../libbpf-cargo" } 17 | -------------------------------------------------------------------------------- /libbpf-rs/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::result; 2 | 3 | use thiserror::Error; 4 | 5 | /// Canonical error type for this crate. 6 | #[derive(Error, Debug)] 7 | pub enum Error { 8 | #[error("System error, errno: {0}")] 9 | System(i32), 10 | #[error("Input input: {0}")] 11 | InvalidInput(String), 12 | #[error("Internal error: {0}")] 13 | Internal(String), 14 | } 15 | 16 | pub type Result = result::Result; 17 | -------------------------------------------------------------------------------- /examples/capable/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "capable" 3 | version = "0.1.0" 4 | authors = ["Devasia Thomas "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | anyhow = "1.0" 9 | chrono = "0.4" 10 | libbpf-rs = { path = "../../libbpf-rs" } 11 | libc = "0.2" 12 | phf = { version = "0.10", features = ["macros"] } 13 | plain = "0.2" 14 | structopt = "0.3" 15 | 16 | [build-dependencies] 17 | libbpf-cargo = { path = "../../libbpf-cargo" } 18 | -------------------------------------------------------------------------------- /examples/capable/src/bpf/capable.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 | 3 | #ifndef CAPABLE_CAPABLE_H 4 | #define CAPABLE_CAPABLE_H 5 | 6 | #define TASK_COMM_LEN 16 7 | #define BPF_MAX_STACK_DEPTH 127 8 | 9 | struct event { 10 | gid_t tgid; 11 | pid_t pid; 12 | uid_t uid; 13 | int cap; 14 | int audit; 15 | int insetid; 16 | u8 comm[TASK_COMM_LEN]; 17 | int kernel_stack_id; 18 | int user_stack_id; 19 | }; 20 | 21 | enum uniqueness { 22 | UNQ_OFF, UNQ_PID, UNQ_CGROUP 23 | }; 24 | 25 | #endif //CAPABLE_CAPABLE_H 26 | -------------------------------------------------------------------------------- /libbpf-rs/tests/bin/src/taskiter.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux.h" 2 | 3 | #include 4 | #include 5 | 6 | struct index_pid_pair { 7 | uint32_t i; 8 | pid_t pid; 9 | }; 10 | 11 | static uint32_t i = 0; 12 | 13 | SEC("iter/task") 14 | int dump_pid(struct bpf_iter__task *ctx) 15 | { 16 | struct seq_file *seq = ctx->meta->seq; 17 | struct task_struct *task = ctx->task; 18 | struct index_pid_pair p; 19 | 20 | if (!task) 21 | return 0; 22 | 23 | p.i = i++; 24 | p.pid = task->tgid; 25 | 26 | bpf_seq_write(seq, &p, sizeof(p)); 27 | return 0; 28 | } 29 | 30 | char _license[] SEC("license") = "GPL"; 31 | 32 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Install deps 15 | run: sudo apt-get install -y clang-10 libelf-dev 16 | - name: Symlink clang 17 | run: sudo rm -f /bin/clang && sudo ln -s /usr/bin/clang-10 /bin/clang 18 | - name: Build 19 | run: cargo build --verbose --workspace --exclude runqslower 20 | - name: Run tests 21 | # Skip tests that require BTF built into kernel 22 | run: cargo test --verbose --workspace --exclude runqslower -- --skip test_object 23 | - name: Run rustfmt 24 | run: cargo fmt --package libbpf-cargo libbpf-rs -- --check 25 | -------------------------------------------------------------------------------- /examples/capable/build.rs: -------------------------------------------------------------------------------- 1 | use std::fs::create_dir_all; 2 | use std::path::Path; 3 | 4 | use libbpf_cargo::SkeletonBuilder; 5 | 6 | const SRC: &str = "./src/bpf/capable.bpf.c"; 7 | 8 | fn main() { 9 | // It's unfortunate we cannot use `OUT_DIR` to store the generated skeleton. 10 | // Reasons are because the generated skeleton contains compiler attributes 11 | // that cannot be `include!()`ed via macro. And we cannot use the `#[path = "..."]` 12 | // trick either because you cannot yet `concat!(env!("OUT_DIR"), "/skel.rs")` inside 13 | // the path attribute either (see https://github.com/rust-lang/rust/pull/83366). 14 | // 15 | // However, there is hope! When the above feature stabilizes we can clean this 16 | // all up. 17 | create_dir_all("./src/bpf/.output").unwrap(); 18 | let skel = Path::new("./src/bpf/.output/capable.skel.rs"); 19 | SkeletonBuilder::new(SRC).generate(&skel).unwrap(); 20 | println!("cargo:rerun-if-changed={}", SRC); 21 | } 22 | -------------------------------------------------------------------------------- /libbpf-rs/tests/bin/src/ringbuf.bpf.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (c) 2021 William Findlay 3 | #include "vmlinux.h" 4 | #include 5 | 6 | struct { 7 | __uint(type, BPF_MAP_TYPE_RINGBUF); 8 | __uint(max_entries, 4096 /* one page */); 9 | } ringbuf1 SEC(".maps"); 10 | 11 | struct { 12 | __uint(type, BPF_MAP_TYPE_RINGBUF); 13 | __uint(max_entries, 4096 /* one page */); 14 | } ringbuf2 SEC(".maps"); 15 | 16 | SEC("tp/syscalls/sys_enter_getpid") 17 | int handle__sys_enter_getpid(void *ctx) 18 | { 19 | int *value; 20 | 21 | value = bpf_ringbuf_reserve(&ringbuf1, sizeof(int), 0); 22 | if (value) { 23 | *value = 1; 24 | bpf_ringbuf_submit(value, 0); 25 | } 26 | 27 | value = bpf_ringbuf_reserve(&ringbuf2, sizeof(int), 0); 28 | if (value) { 29 | *value = 2; 30 | bpf_ringbuf_submit(value, 0); 31 | } 32 | 33 | return 0; 34 | } 35 | 36 | char LICENSE[] SEC("license") = "GPL"; 37 | -------------------------------------------------------------------------------- /examples/runqslower/build.rs: -------------------------------------------------------------------------------- 1 | use std::fs::create_dir_all; 2 | use std::path::Path; 3 | 4 | use libbpf_cargo::SkeletonBuilder; 5 | 6 | const SRC: &str = "./src/bpf/runqslower.bpf.c"; 7 | 8 | fn main() { 9 | // It's unfortunate we cannot use `OUT_DIR` to store the generated skeleton. 10 | // Reasons are because the generated skeleton contains compiler attributes 11 | // that cannot be `include!()`ed via macro. And we cannot use the `#[path = "..."]` 12 | // trick either because you cannot yet `concat!(env!("OUT_DIR"), "/skel.rs")` inside 13 | // the path attribute either (see https://github.com/rust-lang/rust/pull/83366). 14 | // 15 | // However, there is hope! When the above feature stabilizes we can clean this 16 | // all up. 17 | create_dir_all("./src/bpf/.output").unwrap(); 18 | let skel = Path::new("./src/bpf/.output/runqslower.skel.rs"); 19 | SkeletonBuilder::new(SRC).generate(&skel).unwrap(); 20 | println!("cargo:rerun-if-changed={}", SRC); 21 | } 22 | -------------------------------------------------------------------------------- /libbpf-rs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "libbpf-rs" 3 | description = "libbpf-rs is a safe, idiomatic, and opinionated wrapper around libbpf-sys" 4 | repository = "https://github.com/libbpf/libbpf-rs" 5 | homepage = "https://github.com/libbpf/libbpf-rs" 6 | readme = "../README.md" 7 | version = "0.15.0" 8 | authors = ["Daniel Xu "] 9 | edition = "2018" 10 | license = "LGPL-2.1 OR BSD-2-Clause" 11 | keywords = ["bpf", "ebpf", "libbpf"] 12 | 13 | [badges] 14 | maintenance = { status = "actively-developed" } 15 | 16 | [features] 17 | # When turned on, link against system-installed libbpf instead of building 18 | # and linking against vendored libbpf sources 19 | novendor = ["libbpf-sys/novendor"] 20 | 21 | [dependencies] 22 | bitflags = "1.3" 23 | lazy_static = "1.4" 24 | libbpf-sys = { version = "0.6.0-1" } 25 | nix = "0.22" 26 | num_enum = "0.5" 27 | strum_macros = "0.21" 28 | thiserror = "1.0" 29 | vsprintf = "2.0" 30 | 31 | [dev-dependencies] 32 | libc = "0.2" 33 | log = "0.4" 34 | plain = "0.2.3" 35 | scopeguard = "1.1" 36 | serial_test = "0.5" 37 | -------------------------------------------------------------------------------- /examples/runqslower/README.md: -------------------------------------------------------------------------------- 1 | # runqslower-rs 2 | 3 | `runqslower-rs` provides a canonical example on how to use `libbpf-cargo` and `libbpf-rs` 4 | effectively. 5 | 6 | --- 7 | 8 | To build the project: 9 | ```shell 10 | $ cd examples/runqslower 11 | $ cargo build 12 | $ sudo ../../target/debug/runqslower 1000 13 | Tracing run queue latency higher than 1000 us 14 | TIME COMM TID LAT(us) 15 | 13:40:58 WebExtensions 961211 1287 16 | 13:40:58 WebExtensions 961211 1516 17 | 13:40:58 Timer 961076 2255 18 | 13:40:58 AudioIPC0 1111261 2375 19 | 13:40:58 Gecko_IOThread 961074 2252 20 | 13:40:58 WebExtensions 961211 1030 21 | ^C 22 | ``` 23 | 24 | --- 25 | 26 | To generate an updated `vmlinux.h`: 27 | ```shell 28 | $ bpftool btf dump file /sys/kernel/btf/vmlinux format c > ./vmlinux.h 29 | ``` 30 | 31 | BTF might also be found at `/boot/vmlinux-$(uname -r)`, depending on which 32 | linux distribution you run. 33 | 34 | You can see if your kernel is compiled with BTF by running: 35 | ```shell 36 | $ zgrep CONFIG_DEBUG_INFO_BTF /proc/config.gz 37 | ``` 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Rust](https://github.com/libbpf/libbpf-rs/workflows/Rust/badge.svg?branch=master) 2 | 3 | WARNING: The API is not stable and is subject to breakage. Any breakage will 4 | include a minor version bump pre-1.0 and a major version bump post-1.0. 5 | [Semantic versioning](https://semver.org/) will be followed post-1.0 release. 6 | 7 | # libbpf-rs 8 | 9 | [![crates.io badge](https://img.shields.io/crates/v/libbpf-rs.svg)](https://crates.io/crates/libbpf-rs) 10 | 11 | Idiomatic rust wrapper around 12 | [libbpf](https://github.com/libbpf/libbpf) 13 | 14 | To use in your project, add into your `Cargo.toml`: 15 | 16 | ```toml 17 | [dependencies] 18 | libbpf-rs = "0.14" 19 | ``` 20 | 21 | See [full documentation here](https://docs.rs/libbpf-rs). 22 | 23 | # libbpf-cargo 24 | 25 | [![crates.io badge](https://img.shields.io/crates/v/libbpf-cargo.svg)](https://crates.io/crates/libbpf-cargo) 26 | 27 | Helps you build and develop eBPF programs with standard rust tooling 28 | 29 | To use in your project, add into your `Cargo.toml`: 30 | 31 | ```toml 32 | [build-dependencies] 33 | libbpf-cargo = "0.9" 34 | ``` 35 | 36 | See [full documentation here](https://docs.rs/libbpf-cargo). 37 | -------------------------------------------------------------------------------- /libbpf-rs/src/iter.rs: -------------------------------------------------------------------------------- 1 | use nix::{errno, libc, unistd}; 2 | use std::io; 3 | 4 | use crate::*; 5 | 6 | /// Represents a bpf iterator for reading kernel data structures. This requires 7 | /// Linux 5.8. 8 | /// 9 | /// This implements [`std::io::Read`] for reading bytes from the iterator. 10 | /// Methods require working with raw bytes. You may find libraries such as 11 | /// [`plain`](https://crates.io/crates/plain) helpful. 12 | pub struct Iter { 13 | fd: i32, 14 | } 15 | 16 | impl Iter { 17 | pub fn new(link: &Link) -> Result { 18 | let link_fd = link.get_fd(); 19 | let fd = unsafe { libbpf_sys::bpf_iter_create(link_fd) }; 20 | if fd < 0 { 21 | return Err(Error::System(errno::errno())); 22 | } 23 | Ok(Self { fd }) 24 | } 25 | } 26 | 27 | impl io::Read for Iter { 28 | fn read(&mut self, buf: &mut [u8]) -> std::result::Result { 29 | let bytes_read = unsafe { libc::read(self.fd, buf.as_mut_ptr() as *mut _, buf.len()) }; 30 | if bytes_read < 0 { 31 | return Err(std::io::Error::last_os_error()); 32 | } 33 | Ok(bytes_read as usize) 34 | } 35 | } 36 | 37 | impl Drop for Iter { 38 | fn drop(&mut self) { 39 | let _ = unistd::close(self.fd); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /libbpf-cargo/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "libbpf-cargo" 3 | description = "Cargo plugin to build bpf programs" 4 | repository = "https://github.com/libbpf/libbpf-rs" 5 | homepage = "https://github.com/libbpf/libbpf-rs" 6 | documentation = "https://docs.rs/crate/libbpf-cargo" 7 | readme = "../README.md" 8 | version = "0.9.4" 9 | authors = ["Daniel Xu "] 10 | edition = "2018" 11 | license = "LGPL-2.1 OR BSD-2-Clause" 12 | keywords = ["bpf", "ebpf", "libbpf"] 13 | 14 | [badges] 15 | maintenance = { status = "actively-developed" } 16 | 17 | # Crate is named libbpf-cargo to be consistent with libbpf-rs. 18 | # Binary must be named cargo-${SUBCOMMAND} to interop with cargo. 19 | [[bin]] 20 | name = "cargo-libbpf" 21 | path = "src/main.rs" 22 | 23 | [lib] 24 | path = "src/lib.rs" 25 | 26 | [features] 27 | # When turned on, link against system-installed libbpf instead of building 28 | # and linking against vendored libbpf sources 29 | novendor = ["libbpf-sys/novendor"] 30 | 31 | [dependencies] 32 | anyhow = "1.0" 33 | cargo_metadata = "0.12" 34 | libbpf-sys = { version = "0.6.0-1" } 35 | memmap2 = "0.3" 36 | num_enum = "0.5" 37 | regex = "1.5" 38 | scroll = "0.10" 39 | scroll_derive = "0.10" 40 | semver = "1.0" 41 | serde = { version = "1.0", features = ["derive"] } 42 | serde_json = "1.0" 43 | structopt = "0.3" 44 | tempfile = "3.2" 45 | thiserror = "1.0" 46 | 47 | [dev-dependencies] 48 | goblin = "0.2" 49 | -------------------------------------------------------------------------------- /libbpf-cargo/src/make.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | use std::process::Command; 3 | 4 | use anyhow::{bail, Context, Result}; 5 | 6 | use crate::{build, gen}; 7 | 8 | pub fn make( 9 | debug: bool, 10 | manifest_path: Option<&PathBuf>, 11 | clang: Option<&PathBuf>, 12 | skip_clang_version_checks: bool, 13 | quiet: bool, 14 | cargo_build_args: Vec, 15 | rustfmt_path: Option<&PathBuf>, 16 | ) -> Result<()> { 17 | if !quiet { 18 | println!("Compiling BPF objects"); 19 | } 20 | build::build(debug, manifest_path, clang, skip_clang_version_checks) 21 | .context("Failed to compile BPF objects")?; 22 | 23 | if !quiet { 24 | println!("Generating skeletons"); 25 | } 26 | gen::gen(debug, manifest_path, None, rustfmt_path).context("Failed to generate skeletons")?; 27 | 28 | let mut cmd = Command::new("cargo"); 29 | cmd.arg("build"); 30 | if quiet { 31 | cmd.arg("--quiet"); 32 | } 33 | for arg in cargo_build_args { 34 | cmd.arg(arg); 35 | } 36 | 37 | let status = cmd.status().context("Failed to spawn child")?; 38 | if !status.success() { 39 | let reason = match status.code() { 40 | Some(rc) => format!("exit code {}", rc), 41 | None => "killed by signal".to_string(), 42 | }; 43 | 44 | bail!("Failed to `cargo build`: {}", reason); 45 | } 46 | 47 | Ok(()) 48 | } 49 | -------------------------------------------------------------------------------- /libbpf-cargo/src/btf/c_types.rs: -------------------------------------------------------------------------------- 1 | use scroll_derive::{IOread, Pread as DerivePread, Pwrite, SizeWith}; 2 | 3 | pub const BTF_MAGIC: u16 = 0xEB9F; 4 | pub const BTF_VERSION: u8 = 1; 5 | 6 | /// All offsets are in bytes relative to the end of this header 7 | #[repr(C)] 8 | #[derive(Debug, Clone, DerivePread, Pwrite, IOread, SizeWith)] 9 | pub struct btf_header { 10 | pub magic: u16, 11 | pub version: u8, 12 | pub flags: u8, 13 | pub hdr_len: u32, 14 | pub type_off: u32, 15 | pub type_len: u32, 16 | pub str_off: u32, 17 | pub str_len: u32, 18 | } 19 | 20 | #[repr(C)] 21 | #[derive(Debug, Clone, DerivePread, Pwrite, IOread, SizeWith)] 22 | pub struct btf_type { 23 | pub name_off: u32, 24 | pub info: u32, 25 | pub type_id: u32, 26 | } 27 | 28 | #[repr(C)] 29 | #[derive(Debug, Clone, DerivePread, Pwrite, IOread, SizeWith)] 30 | pub struct btf_enum { 31 | pub name_off: u32, 32 | pub val: i32, 33 | } 34 | 35 | #[repr(C)] 36 | #[derive(Debug, Clone, DerivePread, Pwrite, IOread, SizeWith)] 37 | pub struct btf_array { 38 | pub val_type_id: u32, 39 | pub idx_type_id: u32, 40 | pub nelems: u32, 41 | } 42 | 43 | #[repr(C)] 44 | #[derive(Debug, Clone, DerivePread, Pwrite, IOread, SizeWith)] 45 | pub struct btf_member { 46 | pub name_off: u32, 47 | pub type_id: u32, 48 | pub offset: u32, 49 | } 50 | 51 | #[repr(C)] 52 | #[derive(Debug, Clone, DerivePread, Pwrite, IOread, SizeWith)] 53 | pub struct btf_param { 54 | pub name_off: u32, 55 | pub type_id: u32, 56 | } 57 | 58 | #[repr(C)] 59 | #[derive(Debug, Clone, DerivePread, Pwrite, IOread, SizeWith)] 60 | pub struct btf_datasec_var { 61 | pub type_id: u32, 62 | pub offset: u32, 63 | pub size: u32, 64 | } 65 | -------------------------------------------------------------------------------- /LICENSE.BSD-2-Clause: -------------------------------------------------------------------------------- 1 | Valid-License-Identifier: BSD-2-Clause 2 | SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html 3 | Usage-Guide: 4 | To use the BSD 2-clause "Simplified" License put the following SPDX 5 | tag/value pair into a comment according to the placement guidelines in 6 | the licensing rules documentation: 7 | SPDX-License-Identifier: BSD-2-Clause 8 | License-Text: 9 | 10 | Copyright (c) . All rights reserved. 11 | 12 | Redistribution and use in source and binary forms, with or without 13 | modification, are permitted provided that the following conditions are met: 14 | 15 | 1. Redistributions of source code must retain the above copyright notice, 16 | this list of conditions and the following disclaimer. 17 | 18 | 2. Redistributions in binary form must reproduce the above copyright 19 | notice, this list of conditions and the following disclaimer in the 20 | documentation and/or other materials provided with the distribution. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 | POSSIBILITY OF SUCH DAMAGE. 33 | -------------------------------------------------------------------------------- /libbpf-rs/src/util.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::{CStr, CString}; 2 | use std::os::raw::c_char; 3 | use std::path::Path; 4 | 5 | use crate::*; 6 | 7 | pub fn str_to_cstring(s: &str) -> Result { 8 | CString::new(s).map_err(|e| Error::InvalidInput(e.to_string())) 9 | } 10 | 11 | pub fn path_to_cstring>(path: P) -> Result { 12 | let path_str = path.as_ref().to_str().ok_or_else(|| { 13 | Error::InvalidInput(format!("{} is not valid unicode", path.as_ref().display())) 14 | })?; 15 | 16 | str_to_cstring(path_str) 17 | } 18 | 19 | pub fn c_ptr_to_string(p: *const c_char) -> Result { 20 | if p.is_null() { 21 | return Err(Error::Internal("Null string".to_owned())); 22 | } 23 | 24 | let c_str = unsafe { CStr::from_ptr(p) }; 25 | Ok(c_str 26 | .to_str() 27 | .map_err(|e| Error::Internal(e.to_string()))? 28 | .to_owned()) 29 | } 30 | 31 | /// Round up a number to the next multiple of `r` 32 | pub fn roundup(num: usize, r: usize) -> usize { 33 | ((num + (r - 1)) / r) * r 34 | } 35 | 36 | /// Get the number of CPUs in the system, e.g., to interact with per-cpu maps. 37 | pub fn num_possible_cpus() -> Result { 38 | let ret = unsafe { libbpf_sys::libbpf_num_possible_cpus() }; 39 | if ret < 0 { 40 | // Error code is returned negative, flip to positive to match errno 41 | Err(Error::System(-ret)) 42 | } else { 43 | Ok(ret as usize) 44 | } 45 | } 46 | 47 | #[cfg(test)] 48 | mod tests { 49 | use super::*; 50 | 51 | #[test] 52 | fn test_roundup() { 53 | for i in 1..=256 { 54 | let up = roundup(i, 8); 55 | assert!(up % 8 == 0); 56 | assert!(i <= up); 57 | assert!(up - i < 8); 58 | } 59 | } 60 | 61 | #[test] 62 | fn test_roundup_multiples() { 63 | for i in (8..=256).step_by(8) { 64 | assert_eq!(roundup(i, 8), i); 65 | } 66 | } 67 | 68 | #[test] 69 | fn test_num_possible_cpus() { 70 | let num = num_possible_cpus().unwrap(); 71 | assert!(num > 0); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /examples/query/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::process::exit; 2 | 3 | use libbpf_rs::query; 4 | use nix::unistd::Uid; 5 | use structopt::StructOpt; 6 | 7 | /// Query the system about BPF-related information 8 | #[derive(Debug, StructOpt)] 9 | enum Command { 10 | /// Display information about progs 11 | Prog, 12 | /// Display information about maps 13 | Map, 14 | /// Display information about BTF 15 | Btf, 16 | /// Display information about links 17 | Link, 18 | } 19 | 20 | fn prog() { 21 | for prog in query::ProgInfoIter::default() { 22 | println!( 23 | "name={:<16} type={:<15} run_count={:<2} runtime_ns={}", 24 | prog.name, 25 | prog.ty.to_string(), 26 | prog.run_cnt, 27 | prog.run_time_ns 28 | ); 29 | } 30 | } 31 | 32 | fn map() { 33 | for map in query::MapInfoIter::default() { 34 | println!("name={:<16} type={}", map.name, map.ty.to_string(),); 35 | } 36 | } 37 | 38 | fn btf() { 39 | for btf in query::BtfInfoIter::default() { 40 | println!("id={:4} size={}", btf.id, btf.btf_size); 41 | } 42 | } 43 | 44 | fn link() { 45 | for link in query::LinkInfoIter::default() { 46 | let link_type_str = match link.info { 47 | query::LinkTypeInfo::RawTracepoint(_) => "raw_tracepoint", 48 | query::LinkTypeInfo::Tracing(_) => "tracing", 49 | query::LinkTypeInfo::Cgroup(_) => "cgroup", 50 | query::LinkTypeInfo::Iter => "iter", 51 | query::LinkTypeInfo::NetNs(_) => "netns", 52 | query::LinkTypeInfo::Unknown => "unknown", 53 | }; 54 | 55 | println!( 56 | "id={:4} prog_id={:4} type={}", 57 | link.id, link.prog_id, link_type_str 58 | ); 59 | } 60 | } 61 | 62 | fn main() { 63 | if !Uid::effective().is_root() { 64 | eprintln!("Must run as root"); 65 | exit(1); 66 | } 67 | 68 | let opts = Command::from_args(); 69 | 70 | match opts { 71 | Command::Prog => prog(), 72 | Command::Map => map(), 73 | Command::Btf => btf(), 74 | Command::Link => link(), 75 | }; 76 | } 77 | -------------------------------------------------------------------------------- /libbpf-rs/tests/test_print.rs: -------------------------------------------------------------------------------- 1 | //! This test is in its own file because the underlying libbpf_set_print function used by 2 | //! set_print() and ObjectBuilder::debug() sets global state. The default is to run multiple tests 3 | //! in different threads, so this test will always race with the others unless its isolated to a 4 | //! different process. 5 | //! 6 | //! For the same reason, all tests here must run serially. 7 | 8 | use libbpf_rs::{get_print, set_print, ObjectBuilder, PrintCallback, PrintLevel}; 9 | use serial_test::serial; 10 | use std::sync::atomic::{AtomicBool, Ordering}; 11 | 12 | #[test] 13 | #[serial] 14 | fn test_set_print() { 15 | static CORRECT_LEVEL: AtomicBool = AtomicBool::new(false); 16 | static CORRECT_MESSAGE: AtomicBool = AtomicBool::new(false); 17 | 18 | fn callback(level: PrintLevel, msg: String) { 19 | if level == PrintLevel::Warn { 20 | CORRECT_LEVEL.store(true, Ordering::Relaxed); 21 | } 22 | 23 | if msg.starts_with("libbpf: ") { 24 | CORRECT_MESSAGE.store(true, Ordering::Relaxed); 25 | } 26 | } 27 | 28 | set_print(Some((PrintLevel::Debug, callback))); 29 | // expect_err requires that OpenObject implement Debug, which it does not. 30 | let obj = ObjectBuilder::default().open_file("/dev/null"); 31 | assert!(obj.is_err(), "Successfully loaded /dev/null?"); 32 | 33 | let correct_level = CORRECT_LEVEL.load(Ordering::Relaxed); 34 | let correct_message = CORRECT_MESSAGE.load(Ordering::Relaxed); 35 | assert!(correct_level, "Did not capture a warning"); 36 | assert!(correct_message, "Did not capture the correct message"); 37 | } 38 | 39 | #[test] 40 | #[serial] 41 | fn test_set_restore_print() { 42 | fn callback1(_: PrintLevel, _: String) { 43 | println!("one"); 44 | } 45 | fn callback2(_: PrintLevel, _: String) { 46 | println!("two"); 47 | } 48 | 49 | set_print(Some((PrintLevel::Warn, callback1))); 50 | let prev = get_print(); 51 | assert_eq!(prev, Some((PrintLevel::Warn, callback1 as PrintCallback))); 52 | 53 | set_print(Some((PrintLevel::Debug, callback2))); 54 | let prev = get_print(); 55 | assert_eq!(prev, Some((PrintLevel::Debug, callback2 as PrintCallback))); 56 | } 57 | 58 | #[test] 59 | #[serial] 60 | fn test_set_and_save_print() { 61 | fn callback1(_: PrintLevel, _: String) { 62 | println!("one"); 63 | } 64 | fn callback2(_: PrintLevel, _: String) { 65 | println!("two"); 66 | } 67 | 68 | set_print(Some((PrintLevel::Warn, callback1))); 69 | let prev = set_print(Some((PrintLevel::Debug, callback2))); 70 | assert_eq!(prev, Some((PrintLevel::Warn, callback1 as PrintCallback))); 71 | 72 | let prev = set_print(None); 73 | assert_eq!(prev, Some((PrintLevel::Debug, callback2 as PrintCallback))); 74 | } 75 | -------------------------------------------------------------------------------- /libbpf-rs/tests/bin/src/runqslower.bpf.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (c) 2019 Facebook 3 | #include "vmlinux.h" 4 | #include 5 | #include "runqslower.h" 6 | 7 | #define TASK_RUNNING 0 8 | 9 | const volatile __u64 min_us = 0; 10 | const volatile pid_t targ_pid = 0; 11 | const volatile pid_t targ_tgid = 0; 12 | 13 | struct { 14 | __uint(type, BPF_MAP_TYPE_HASH); 15 | __uint(max_entries, 10240); 16 | __type(key, u32); 17 | __type(value, u64); 18 | } start SEC(".maps"); 19 | 20 | struct { 21 | __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); 22 | __uint(key_size, sizeof(u32)); 23 | __uint(value_size, sizeof(u32)); 24 | } events SEC(".maps"); 25 | 26 | /* record enqueue timestamp */ 27 | static __always_inline 28 | int trace_enqueue(u32 tgid, u32 pid) 29 | { 30 | u64 ts; 31 | 32 | if (!pid) 33 | return 0; 34 | if (targ_tgid && targ_tgid != tgid) 35 | return 0; 36 | if (targ_pid && targ_pid != pid) 37 | return 0; 38 | 39 | ts = bpf_ktime_get_ns(); 40 | bpf_map_update_elem(&start, &pid, &ts, 0); 41 | return 0; 42 | } 43 | 44 | SEC("tp_btf/sched_wakeup") 45 | int handle__sched_wakeup(u64 *ctx) 46 | { 47 | /* TP_PROTO(struct task_struct *p) */ 48 | struct task_struct *p = (void *)ctx[0]; 49 | 50 | return trace_enqueue(p->tgid, p->pid); 51 | } 52 | 53 | SEC("tp_btf/sched_wakeup_new") 54 | int handle__sched_wakeup_new(u64 *ctx) 55 | { 56 | /* TP_PROTO(struct task_struct *p) */ 57 | struct task_struct *p = (void *)ctx[0]; 58 | 59 | return trace_enqueue(p->tgid, p->pid); 60 | } 61 | 62 | SEC("tp_btf/sched_switch") 63 | int handle__sched_switch(u64 *ctx) 64 | { 65 | /* TP_PROTO(bool preempt, struct task_struct *prev, 66 | * struct task_struct *next) 67 | */ 68 | struct task_struct *prev = (struct task_struct *)ctx[1]; 69 | struct task_struct *next = (struct task_struct *)ctx[2]; 70 | struct event event = {}; 71 | u64 *tsp, delta_us; 72 | long state; 73 | u32 pid; 74 | 75 | /* ivcsw: treat like an enqueue event and store timestamp */ 76 | if (prev->state == TASK_RUNNING) 77 | trace_enqueue(prev->tgid, prev->pid); 78 | 79 | pid = next->pid; 80 | 81 | /* fetch timestamp and calculate delta */ 82 | tsp = bpf_map_lookup_elem(&start, &pid); 83 | if (!tsp) 84 | return 0; /* missed enqueue */ 85 | 86 | delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; 87 | if (min_us && delta_us <= min_us) 88 | return 0; 89 | 90 | event.pid = pid; 91 | event.delta_us = delta_us; 92 | bpf_probe_read_str(&event.task, sizeof(event.task), next->comm); 93 | 94 | /* output */ 95 | bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, 96 | &event, sizeof(event)); 97 | 98 | bpf_map_delete_elem(&start, &pid); 99 | return 0; 100 | } 101 | 102 | char LICENSE[] SEC("license") = "GPL"; 103 | -------------------------------------------------------------------------------- /examples/runqslower/src/main.rs: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 | 3 | use core::time::Duration; 4 | 5 | use anyhow::{bail, Result}; 6 | use chrono::Local; 7 | use libbpf_rs::PerfBufferBuilder; 8 | use plain::Plain; 9 | use structopt::StructOpt; 10 | 11 | #[path = "bpf/.output/runqslower.skel.rs"] 12 | mod runqslower; 13 | use runqslower::*; 14 | 15 | /// Trace high run queue latency 16 | #[derive(Debug, StructOpt)] 17 | struct Command { 18 | /// Trace latency higher than this value 19 | #[structopt(default_value = "10000")] 20 | latency: u64, 21 | /// Process PID to trace 22 | #[structopt(default_value = "0")] 23 | pid: i32, 24 | /// Thread TID to trace 25 | #[structopt(default_value = "0")] 26 | tid: i32, 27 | /// Verbose debug output 28 | #[structopt(short, long)] 29 | verbose: bool, 30 | } 31 | 32 | unsafe impl Plain for runqslower_bss_types::event {} 33 | 34 | fn bump_memlock_rlimit() -> Result<()> { 35 | let rlimit = libc::rlimit { 36 | rlim_cur: 128 << 20, 37 | rlim_max: 128 << 20, 38 | }; 39 | 40 | if unsafe { libc::setrlimit(libc::RLIMIT_MEMLOCK, &rlimit) } != 0 { 41 | bail!("Failed to increase rlimit"); 42 | } 43 | 44 | Ok(()) 45 | } 46 | 47 | fn handle_event(_cpu: i32, data: &[u8]) { 48 | let mut event = runqslower_bss_types::event::default(); 49 | plain::copy_from_bytes(&mut event, data).expect("Data buffer was too short"); 50 | 51 | let now = Local::now(); 52 | let task = std::str::from_utf8(&event.task).unwrap(); 53 | 54 | println!( 55 | "{:8} {:16} {:<7} {:<14}", 56 | now.format("%H:%M:%S"), 57 | task.trim_end_matches(char::from(0)), 58 | event.pid, 59 | event.delta_us 60 | ); 61 | } 62 | 63 | fn handle_lost_events(cpu: i32, count: u64) { 64 | eprintln!("Lost {} events on CPU {}", count, cpu); 65 | } 66 | 67 | fn main() -> Result<()> { 68 | let opts = Command::from_args(); 69 | 70 | let mut skel_builder = RunqslowerSkelBuilder::default(); 71 | if opts.verbose { 72 | skel_builder.obj_builder.debug(true); 73 | } 74 | 75 | bump_memlock_rlimit()?; 76 | let mut open_skel = skel_builder.open()?; 77 | 78 | // Write arguments into prog 79 | open_skel.rodata().min_us = opts.latency; 80 | open_skel.rodata().targ_pid = opts.pid; 81 | open_skel.rodata().targ_tgid = opts.tid; 82 | 83 | // Begin tracing 84 | let mut skel = open_skel.load()?; 85 | skel.attach()?; 86 | println!("Tracing run queue latency higher than {} us", opts.latency); 87 | println!("{:8} {:16} {:7} {:14}", "TIME", "COMM", "TID", "LAT(us)"); 88 | 89 | let perf = PerfBufferBuilder::new(skel.maps_mut().events()) 90 | .sample_cb(handle_event) 91 | .lost_cb(handle_lost_events) 92 | .build()?; 93 | 94 | loop { 95 | perf.poll(Duration::from_millis(100))?; 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /examples/capable/src/bpf/capable.bpf.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (c) 2021 BMC Software, Inc. 3 | // Author Devasia Thomas 4 | 5 | #include "vmlinux.h" 6 | #include "capable.h" 7 | #include 8 | #include 9 | #include 10 | 11 | extern int LINUX_KERNEL_VERSION __kconfig; 12 | 13 | const volatile struct { 14 | gid_t tgid; //PID to filter 15 | bool verbose; // Include non audit logs 16 | enum uniqueness unique_type; // Only unique info traces for same pid or cgroup 17 | 18 | } tool_config = {}; 19 | 20 | 21 | struct event _event = {}; //Dummy instance for skeleton to generate definition 22 | 23 | struct unique_key { 24 | int cap; 25 | u32 tgid; 26 | u64 cgroupid; 27 | }; 28 | 29 | struct { 30 | __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); 31 | __uint(key_size, sizeof(u32)); 32 | __uint(value_size, sizeof(u32)); 33 | } events 34 | SEC(".maps"); 35 | 36 | struct { 37 | __uint(type, BPF_MAP_TYPE_HASH); 38 | __uint(max_entries, 10240); 39 | __type(key, 40 | struct unique_key); 41 | __type(value, u64); 42 | } seen 43 | SEC(".maps"); 44 | 45 | static __always_inline int record_cap(void *ctx, const struct cred *cred, 46 | struct user_namespace *targ_ns, int cap, int cap_opt) { 47 | u64 __pid_tgid = bpf_get_current_pid_tgid(); 48 | gid_t tgid = __pid_tgid >> 32; 49 | pid_t pid = __pid_tgid; 50 | int audit; 51 | int insetid; 52 | 53 | if (LINUX_KERNEL_VERSION >= KERNEL_VERSION(5, 1, 0)) { 54 | // Field changed in v5.1.0 55 | audit = (cap_opt & 0b10) == 0; 56 | insetid = (cap_opt & 0b100) != 0; 57 | } else { 58 | audit = cap_opt; 59 | insetid = -1; 60 | } 61 | 62 | if (tool_config.tgid && tgid != tool_config.tgid) { 63 | return 0; 64 | } 65 | 66 | if (!tool_config.verbose && audit == 0) { 67 | return 0; 68 | } 69 | 70 | uid_t uid = bpf_get_current_uid_gid(); 71 | 72 | struct event event = { 73 | .tgid = tgid, 74 | .pid = pid, 75 | .uid = uid, 76 | .cap = cap, 77 | .audit = audit, 78 | .insetid = insetid}; 79 | 80 | if (tool_config.unique_type) { 81 | struct unique_key key = {.cap = cap}; 82 | if (tool_config.unique_type == UNQ_CGROUP) { 83 | key.cgroupid = bpf_get_current_cgroup_id(); 84 | } else { 85 | key.tgid = tgid; 86 | } 87 | 88 | if (bpf_map_lookup_elem(&seen, &key) != NULL) { 89 | return 0; 90 | } 91 | u64 zero = 0; 92 | bpf_map_update_elem(&seen, &key, &zero, 0); 93 | } 94 | 95 | bpf_get_current_comm(&event.comm, sizeof(event.comm)); 96 | bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event)); 97 | return 0; 98 | } 99 | 100 | SEC("kprobe/cap_capable") 101 | 102 | int BPF_KPROBE(kprobe__cap_capable, const struct cred *cred, 103 | struct user_namespace *targ_ns, int cap, int cap_opt) { 104 | return record_cap(ctx, cred, targ_ns, cap, cap_opt); 105 | 106 | } 107 | 108 | char LICENSE[] SEC("license") = "GPL"; -------------------------------------------------------------------------------- /examples/runqslower/src/bpf/runqslower.bpf.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | // Copyright (c) 2019 Facebook 3 | #include "vmlinux.h" 4 | #include 5 | #include 6 | #include "runqslower.h" 7 | 8 | #define TASK_RUNNING 0 9 | 10 | const volatile __u64 min_us = 0; 11 | const volatile pid_t targ_pid = 0; 12 | const volatile pid_t targ_tgid = 0; 13 | 14 | // Dummy instance to get skeleton to generate definition for `struct event` 15 | struct event _event = {0}; 16 | 17 | // Kernel 5.14 changed the state field to __state 18 | struct task_struct___pre_5_14 { 19 | long int state; 20 | }; 21 | 22 | struct { 23 | __uint(type, BPF_MAP_TYPE_HASH); 24 | __uint(max_entries, 10240); 25 | __type(key, u32); 26 | __type(value, u64); 27 | } start SEC(".maps"); 28 | 29 | struct { 30 | __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); 31 | __uint(key_size, sizeof(u32)); 32 | __uint(value_size, sizeof(u32)); 33 | } events SEC(".maps"); 34 | 35 | /* record enqueue timestamp */ 36 | static __always_inline 37 | int trace_enqueue(u32 tgid, u32 pid) 38 | { 39 | u64 ts; 40 | 41 | if (!pid) 42 | return 0; 43 | if (targ_tgid && targ_tgid != tgid) 44 | return 0; 45 | if (targ_pid && targ_pid != pid) 46 | return 0; 47 | 48 | ts = bpf_ktime_get_ns(); 49 | bpf_map_update_elem(&start, &pid, &ts, 0); 50 | return 0; 51 | } 52 | 53 | SEC("tp_btf/sched_wakeup") 54 | int handle__sched_wakeup(u64 *ctx) 55 | { 56 | /* TP_PROTO(struct task_struct *p) */ 57 | struct task_struct *p = (void *)ctx[0]; 58 | 59 | return trace_enqueue(p->tgid, p->pid); 60 | } 61 | 62 | SEC("tp_btf/sched_wakeup_new") 63 | int handle__sched_wakeup_new(u64 *ctx) 64 | { 65 | /* TP_PROTO(struct task_struct *p) */ 66 | struct task_struct *p = (void *)ctx[0]; 67 | 68 | return trace_enqueue(p->tgid, p->pid); 69 | } 70 | 71 | static inline long get_task_state(struct task_struct *t) 72 | { 73 | if (bpf_core_field_exists(t->__state)) 74 | return t->__state; 75 | 76 | return ((struct task_struct___pre_5_14*)t)->state; 77 | } 78 | 79 | SEC("tp_btf/sched_switch") 80 | int handle__sched_switch(u64 *ctx) 81 | { 82 | /* TP_PROTO(bool preempt, struct task_struct *prev, 83 | * struct task_struct *next) 84 | */ 85 | struct task_struct *prev = (struct task_struct *)ctx[1]; 86 | struct task_struct *next = (struct task_struct *)ctx[2]; 87 | struct event event = {}; 88 | u64 *tsp, delta_us; 89 | long state = get_task_state(prev); 90 | u32 pid; 91 | 92 | /* ivcsw: treat like an enqueue event and store timestamp */ 93 | if (state == TASK_RUNNING) 94 | trace_enqueue(prev->tgid, prev->pid); 95 | 96 | pid = next->pid; 97 | 98 | /* fetch timestamp and calculate delta */ 99 | tsp = bpf_map_lookup_elem(&start, &pid); 100 | if (!tsp) 101 | return 0; /* missed enqueue */ 102 | 103 | delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; 104 | if (min_us && delta_us <= min_us) 105 | return 0; 106 | 107 | event.pid = pid; 108 | event.delta_us = delta_us; 109 | bpf_probe_read_kernel_str(&event.task, sizeof(event.task), next->comm); 110 | 111 | /* output */ 112 | bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, 113 | &event, sizeof(event)); 114 | 115 | bpf_map_delete_elem(&start, &pid); 116 | return 0; 117 | } 118 | 119 | char LICENSE[] SEC("license") = "GPL"; 120 | -------------------------------------------------------------------------------- /libbpf-rs/src/link.rs: -------------------------------------------------------------------------------- 1 | use nix::errno; 2 | use std::path::Path; 3 | 4 | use crate::*; 5 | 6 | /// Represents an attached [`Program`]. 7 | /// 8 | /// This struct is used to model ownership. The underlying program will be detached 9 | /// when this object is dropped if nothing else is holding a reference count. 10 | pub struct Link { 11 | ptr: *mut libbpf_sys::bpf_link, 12 | } 13 | 14 | impl Link { 15 | pub(crate) fn new(ptr: *mut libbpf_sys::bpf_link) -> Self { 16 | Link { ptr } 17 | } 18 | 19 | /// Takes ownership from pointer. 20 | /// 21 | /// # Safety 22 | /// 23 | /// It is not safe to manipulate `ptr` after this operation. 24 | pub unsafe fn from_ptr(ptr: *mut libbpf_sys::bpf_link) -> Self { 25 | Self::new(ptr) 26 | } 27 | 28 | /// Replace the underlying prog with `prog`. 29 | pub fn update_prog(&mut self, prog: Program) -> Result<()> { 30 | let ret = unsafe { libbpf_sys::bpf_link__update_program(self.ptr, prog.ptr) }; 31 | if ret != 0 { 32 | Err(Error::System(errno::errno())) 33 | } else { 34 | Ok(()) 35 | } 36 | } 37 | 38 | /// Release "ownership" of underlying BPF resource (typically, a BPF program 39 | /// attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected 40 | /// links, when destructed through bpf_link__destroy() call won't attempt to 41 | /// detach/unregisted that BPF resource. This is useful in situations where, 42 | /// say, attached BPF program has to outlive userspace program that attached it 43 | /// in the system. Depending on type of BPF program, though, there might be 44 | /// additional steps (like pinning BPF program in BPF FS) necessary to ensure 45 | /// exit of userspace program doesn't trigger automatic detachment and clean up 46 | /// inside the kernel. 47 | pub fn disconnect(&mut self) { 48 | unsafe { libbpf_sys::bpf_link__disconnect(self.ptr) } 49 | } 50 | 51 | /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) 52 | /// this link to bpffs. 53 | pub fn pin>(&mut self, path: P) -> Result<()> { 54 | let path_c = util::path_to_cstring(path)?; 55 | let path_ptr = path_c.as_ptr(); 56 | 57 | let ret = unsafe { libbpf_sys::bpf_link__pin(self.ptr, path_ptr) }; 58 | if ret != 0 { 59 | // Error code is returned negative, flip to positive to match errno 60 | Err(Error::System(-ret)) 61 | } else { 62 | Ok(()) 63 | } 64 | } 65 | 66 | /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) 67 | /// from bpffs 68 | pub fn unpin(&mut self) -> Result<()> { 69 | let ret = unsafe { libbpf_sys::bpf_link__unpin(self.ptr) }; 70 | if ret != 0 { 71 | // Error code is returned negative, flip to positive to match errno 72 | Err(Error::System(-ret)) 73 | } else { 74 | Ok(()) 75 | } 76 | } 77 | 78 | /// Returns the file descriptor of the link. 79 | pub fn get_fd(&self) -> i32 { 80 | unsafe { libbpf_sys::bpf_link__fd(self.ptr) } 81 | } 82 | } 83 | 84 | impl Drop for Link { 85 | fn drop(&mut self) { 86 | let _ = unsafe { libbpf_sys::bpf_link__destroy(self.ptr) }; 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /libbpf-rs/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # libbpf-rs 2 | //! 3 | //! `libbpf-rs` is a safe, idiomatic, and opinionated wrapper around 4 | //! [libbpf](https://github.com/libbpf/libbpf/). 5 | //! 6 | //! libbpf-rs, together with `libbpf-cargo` (libbpf cargo plugin) allow you 7 | //! to write Compile-Once-Run-Everywhere (CO-RE) eBPF programs. Note this document 8 | //! uses "eBPF" and "BPF" interchangeably. 9 | //! 10 | //! More information about CO-RE is [available 11 | //! here](https://facebookmicrosites.github.io/bpf/blog/2020/02/19/bpf-portability-and-co-re.html). 12 | //! 13 | //! ## High level workflow 14 | //! 15 | //! 1. Create new rust project (via `cargo new` or similar) at path `$PROJ_PATH` 16 | //! 1. Create directory `$PROJ_PATH/src/bpf` 17 | //! 1. Write CO-RE bpf code in `$PROJ_PATH/src/bpf/${MYFILE}.bpf.c`, where `$MYFILE` may be any 18 | //! valid filename. Note the `.bpf.c` extension is required. 19 | //! 1. Create a [build script](https://doc.rust-lang.org/cargo/reference/build-scripts.html) 20 | //! that builds and generates a skeleton module using `libbpf_cargo::SkeletonBuilder` 21 | //! 1. Write your userspace code by importing and using the generated module. Import the 22 | //! module by using the [path 23 | //! attribute](https://doc.rust-lang.org/reference/items/modules.html#the-path-attribute). 24 | //! Your userspace code goes in `$PROJ_PATH/src/` as it would in a normal rust project. 25 | //! 1. Continue regular rust workflow (ie `cargo build`, `cargo run`, etc) 26 | //! 27 | //! ## Alternate workflow 28 | //! 29 | //! While using the skeleton is recommended, it is also possible to directly use libbpf-rs. 30 | //! 31 | //! 1. Follow steps 1-3 of "High level workflow" 32 | //! 1. Generate a BPF object file. Options include manually invoking `clang`, creating a build 33 | //! script to invoke `clang`, or using `libbpf-cargo` cargo plugins. 34 | //! 1. Write your userspace code in `$PROJ_PATH/src/` as you would a normal rust project and point 35 | //! libbpf-rs at your BPF object file 36 | //! 1. Continue regular rust workflow (ie `cargo build`, `cargo run`, etc) 37 | //! 38 | //! ## Design 39 | //! 40 | //! libbpf-rs models various "phases": 41 | //! ```text 42 | //! from_*() load() 43 | //! | | 44 | //! v v 45 | //! ObjectBuilder -> OpenObject -> Object 46 | //! ^ ^ 47 | //! | | 48 | //! | 49 | //! | 50 | //! 51 | //! ``` 52 | //! 53 | //! The entry point into libbpf-rs is [`ObjectBuilder`]. `ObjectBuilder` helps open the BPF object 54 | //! file. After the object file is opened, you are returned an [`OpenObject`] where you can 55 | //! perform all your pre-load operations. Pre-load means before any BPF maps are created or BPF 56 | //! programs are loaded and verified by the kernel. Finally, after the BPF object is loaded, you 57 | //! are returned an [`Object`] instance where you can read/write to BPF maps, attach BPF programs 58 | //! to hooks, etc. 59 | //! 60 | //! You _must_ keep the [`Object`] alive the entire duration you interact with anything inside the 61 | //! BPF object it represents. This is further documented in [`Object`] documentation. 62 | //! 63 | //! ## Example 64 | //! 65 | //! This is probably the best way to understand how libbpf-rs and libbpf-cargo work together. 66 | //! 67 | //! [See example here](https://github.com/libbpf/libbpf-rs/tree/master/examples/runqslower). 68 | 69 | mod error; 70 | mod iter; 71 | mod link; 72 | mod map; 73 | mod object; 74 | mod perf_buffer; 75 | mod print; 76 | mod program; 77 | pub mod query; 78 | mod ringbuf; 79 | /// Used for skeleton -- an end user may not consider this API stable 80 | #[doc(hidden)] 81 | pub mod skeleton; 82 | mod util; 83 | 84 | pub use libbpf_sys; 85 | 86 | pub use crate::error::{Error, Result}; 87 | pub use crate::iter::Iter; 88 | pub use crate::link::Link; 89 | pub use crate::map::{Map, MapFlags, MapType, OpenMap}; 90 | pub use crate::object::{Object, ObjectBuilder, OpenObject}; 91 | pub use crate::perf_buffer::{PerfBuffer, PerfBufferBuilder}; 92 | pub use crate::print::{get_print, set_print, PrintCallback, PrintLevel}; 93 | pub use crate::program::{OpenProgram, Program, ProgramAttachType, ProgramType}; 94 | pub use crate::ringbuf::{RingBuffer, RingBufferBuilder}; 95 | pub use crate::util::num_possible_cpus; 96 | -------------------------------------------------------------------------------- /libbpf-cargo/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use anyhow::Result; 4 | use structopt::StructOpt; 5 | 6 | mod btf; 7 | #[doc(hidden)] 8 | mod build; 9 | mod gen; 10 | mod make; 11 | mod metadata; 12 | 13 | #[doc(hidden)] 14 | #[derive(Debug, StructOpt)] 15 | struct Opt { 16 | #[structopt(subcommand)] 17 | wrapper: Wrapper, 18 | } 19 | 20 | // cargo invokes subcommands with the first argument as 21 | // the subcommand name. ie. 22 | // 23 | // cargo ${command} --help 24 | // 25 | // into 26 | // 27 | // cargo-${command} ${command} --help 28 | // 29 | // so we must have a dummy subcommand here to eat the arg. 30 | #[doc(hidden)] 31 | #[derive(Debug, StructOpt)] 32 | enum Wrapper { 33 | Libbpf(Command), 34 | } 35 | 36 | #[doc(hidden)] 37 | #[derive(Debug, StructOpt)] 38 | #[structopt(verbatim_doc_comment)] 39 | /// 40 | /// cargo-libbpf is a cargo subcommand that helps develop and build eBPF (BPF) programs. 41 | enum Command { 42 | /// Build bpf programs 43 | Build { 44 | #[structopt(short, long)] 45 | debug: bool, 46 | #[structopt(long, parse(from_os_str))] 47 | /// Path to top level Cargo.toml 48 | manifest_path: Option, 49 | #[structopt(long, parse(from_os_str))] 50 | /// Path to clang binary 51 | clang_path: Option, 52 | #[structopt(long)] 53 | /// Skip clang version checks 54 | skip_clang_version_checks: bool, 55 | }, 56 | /// Generate skeleton files 57 | Gen { 58 | #[structopt(short, long)] 59 | debug: bool, 60 | #[structopt(long, parse(from_os_str))] 61 | /// Path to top level Cargo.toml 62 | manifest_path: Option, 63 | #[structopt(long, parse(from_os_str))] 64 | /// Path to rustfmt binary 65 | rustfmt_path: Option, 66 | #[structopt(long, parse(from_os_str))] 67 | /// Generate skeleton for the specified object file and print results to stdout 68 | /// 69 | /// When specified, skeletons for the rest of the project will not be generated 70 | object: Option, 71 | }, 72 | /// Build project 73 | Make { 74 | #[structopt(short, long)] 75 | debug: bool, 76 | #[structopt(long, parse(from_os_str))] 77 | /// Path to top level Cargo.toml 78 | manifest_path: Option, 79 | #[structopt(long, parse(from_os_str))] 80 | /// Path to clang binary 81 | clang_path: Option, 82 | #[structopt(long)] 83 | /// Skip clang version checks 84 | skip_clang_version_checks: bool, 85 | #[structopt(short, long)] 86 | /// Quiet output 87 | quiet: bool, 88 | /// Arguments to pass to `cargo build` 89 | /// 90 | /// Example: cargo libbpf build -- --package mypackage 91 | cargo_build_args: Vec, 92 | #[structopt(long, parse(from_os_str))] 93 | /// Path to rustfmt binary 94 | rustfmt_path: Option, 95 | }, 96 | } 97 | 98 | #[doc(hidden)] 99 | fn main() -> Result<()> { 100 | let opts = Opt::from_args(); 101 | 102 | match opts.wrapper { 103 | Wrapper::Libbpf(cmd) => match cmd { 104 | Command::Build { 105 | debug, 106 | manifest_path, 107 | clang_path, 108 | skip_clang_version_checks, 109 | } => build::build( 110 | debug, 111 | manifest_path.as_ref(), 112 | clang_path.as_ref(), 113 | skip_clang_version_checks, 114 | ), 115 | Command::Gen { 116 | debug, 117 | manifest_path, 118 | rustfmt_path, 119 | object, 120 | } => gen::gen( 121 | debug, 122 | manifest_path.as_ref(), 123 | rustfmt_path.as_ref(), 124 | object.as_ref(), 125 | ), 126 | Command::Make { 127 | debug, 128 | manifest_path, 129 | clang_path, 130 | skip_clang_version_checks, 131 | quiet, 132 | cargo_build_args, 133 | rustfmt_path, 134 | } => make::make( 135 | debug, 136 | manifest_path.as_ref(), 137 | clang_path.as_ref(), 138 | skip_clang_version_checks, 139 | quiet, 140 | cargo_build_args, 141 | rustfmt_path.as_ref(), 142 | ), 143 | }, 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /libbpf-rs/src/print.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | use lazy_static::lazy_static; 3 | use std::io::{self, Write}; 4 | use std::os::raw::c_char; 5 | use std::sync::Mutex; 6 | 7 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] 8 | #[repr(u32)] 9 | pub enum PrintLevel { 10 | Warn = libbpf_sys::LIBBPF_WARN, 11 | Info = libbpf_sys::LIBBPF_INFO, 12 | Debug = libbpf_sys::LIBBPF_DEBUG, 13 | } 14 | 15 | impl From for PrintLevel { 16 | fn from(level: libbpf_sys::libbpf_print_level) -> Self { 17 | match level { 18 | libbpf_sys::LIBBPF_WARN => Self::Warn, 19 | libbpf_sys::LIBBPF_INFO => Self::Info, 20 | libbpf_sys::LIBBPF_DEBUG => Self::Debug, 21 | // shouldn't happen, but anything unknown becomes the highest level 22 | _ => Self::Warn, 23 | } 24 | } 25 | } 26 | 27 | pub type PrintCallback = fn(PrintLevel, String); 28 | 29 | /// Mimic the default print functionality of libbpf. This way if the user calls `get_print` when no 30 | /// previous callback had been set, with the intention of restoring it, everything will behave as 31 | /// expected. 32 | fn default_callback(_lvl: PrintLevel, msg: String) { 33 | let _ = io::stderr().write(msg.as_bytes()); 34 | } 35 | 36 | // While we can't say that set_print is thread-safe, because we shouldn't assume that of 37 | // libbpf_set_print, we should still make sure that things are sane on the rust side of things. 38 | // Therefore we are using a lock to keep the log level and the callback in sync. 39 | // 40 | // We don't do anything that can panic with the lock held, so we'll unconditionally unwrap() when 41 | // locking the mutex. 42 | // 43 | // Note that default print behavior ignores debug messages. 44 | lazy_static! { 45 | static ref PRINT_CB: Mutex> = 46 | Mutex::new(Some((PrintLevel::Info, default_callback))); 47 | } 48 | 49 | extern "C" fn outer_print_cb( 50 | level: libbpf_sys::libbpf_print_level, 51 | fmtstr: *const c_char, 52 | va_list: *mut libbpf_sys::__va_list_tag, 53 | ) -> i32 { 54 | let level = level.into(); 55 | if let Some((min_level, func)) = { *PRINT_CB.lock().unwrap() } { 56 | if level <= min_level { 57 | let msg = match unsafe { vsprintf::vsprintf(fmtstr, va_list) } { 58 | Ok(s) => s, 59 | Err(e) => format!("Failed to parse libbpf output: {}", e), 60 | }; 61 | func(level, msg); 62 | } 63 | } 64 | 0 // return value is ignored by libbpf 65 | } 66 | 67 | /// Set a callback to receive log messages from libbpf, instead of printing them to stderr. 68 | /// 69 | /// # Arguments 70 | /// 71 | /// * `callback` - Either a tuple `(min_level, function)` where `min_level` is the lowest priority 72 | /// log message to handle, or `None` to disable all printing. 73 | /// 74 | /// This overrides (and is overridden by) [`ObjectBuilder::debug`] 75 | /// 76 | /// # Examples 77 | /// 78 | /// To pass all messages to the `log` crate: 79 | /// 80 | /// ``` 81 | /// use log; 82 | /// use libbpf_rs::{PrintLevel, set_print}; 83 | /// 84 | /// fn print_to_log(level: PrintLevel, msg: String) { 85 | /// match level { 86 | /// PrintLevel::Debug => log::debug!("{}", msg), 87 | /// PrintLevel::Info => log::info!("{}", msg), 88 | /// PrintLevel::Warn => log::warn!("{}", msg), 89 | /// } 90 | /// } 91 | /// 92 | /// set_print(Some((PrintLevel::Debug, print_to_log))); 93 | /// ``` 94 | /// 95 | /// To disable printing completely: 96 | /// 97 | /// ``` 98 | /// use libbpf_rs::set_print; 99 | /// set_print(None); 100 | /// ``` 101 | /// 102 | /// To temporarliy suppress output: 103 | /// 104 | /// ``` 105 | /// use libbpf_rs::set_print; 106 | /// 107 | /// let prev = set_print(None); 108 | /// // do things quietly 109 | /// set_print(prev); 110 | /// ``` 111 | pub fn set_print( 112 | mut callback: Option<(PrintLevel, PrintCallback)>, 113 | ) -> Option<(PrintLevel, PrintCallback)> { 114 | let real_cb: libbpf_sys::libbpf_print_fn_t; 115 | real_cb = callback.as_ref().and(Some(outer_print_cb)); 116 | std::mem::swap(&mut callback, &mut *PRINT_CB.lock().unwrap()); 117 | unsafe { libbpf_sys::libbpf_set_print(real_cb) }; 118 | callback 119 | } 120 | 121 | /// Return the current print callback and level. 122 | /// 123 | /// # Examples 124 | /// 125 | /// To temporarliy suppress output: 126 | /// 127 | /// ``` 128 | /// use libbpf_rs::{get_print, set_print}; 129 | /// 130 | /// let prev = get_print(); 131 | /// set_print(None); 132 | /// // do things quietly 133 | /// set_print(prev); 134 | /// ``` 135 | pub fn get_print() -> Option<(PrintLevel, PrintCallback)> { 136 | *PRINT_CB.lock().unwrap() 137 | } 138 | -------------------------------------------------------------------------------- /libbpf-cargo/src/metadata.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::path::Path; 3 | use std::path::PathBuf; 4 | 5 | use anyhow::{bail, Result}; 6 | use cargo_metadata::{MetadataCommand, Package}; 7 | use serde::Deserialize; 8 | use serde_json::value::Value; 9 | 10 | #[derive(Default, Deserialize)] 11 | struct LibbpfPackageMetadata { 12 | prog_dir: Option, 13 | target_dir: Option, 14 | } 15 | 16 | #[derive(Deserialize)] 17 | #[serde(rename_all = "lowercase")] 18 | enum PackageMetadata { 19 | Libbpf(LibbpfPackageMetadata), 20 | } 21 | 22 | #[derive(Debug, Clone)] 23 | pub struct UnprocessedObj { 24 | /// Package the object belongs to 25 | pub package: String, 26 | /// Path to .c 27 | pub path: PathBuf, 28 | /// Where to place compiled object 29 | pub out: PathBuf, 30 | /// Object name (eg: `runqslower.bpf.c` -> `runqslower`) 31 | pub name: String, 32 | } 33 | 34 | fn get_package( 35 | debug: bool, 36 | package: &Package, 37 | workspace_target_dir: &Path, 38 | ) -> Result> { 39 | if debug { 40 | println!("Metadata for package={}", package.name); 41 | println!("\t{}", package.metadata); 42 | } 43 | 44 | let package_metadata = if package.metadata != Value::Null { 45 | let PackageMetadata::Libbpf(lpm) = serde_json::from_value(package.metadata.clone())?; 46 | lpm 47 | } else { 48 | LibbpfPackageMetadata::default() 49 | }; 50 | 51 | // Respect custom target directories specified by package 52 | let mut package_root = package.manifest_path.clone(); 53 | // Remove "Cargo.toml" 54 | package_root.pop(); 55 | let in_dir = if let Some(d) = package_metadata.prog_dir { 56 | if debug { 57 | println!("Custom prog_dir={}", d.to_string_lossy()); 58 | } 59 | // Add requested path 60 | package_root.push(d); 61 | package_root 62 | } else { 63 | // Add default path 64 | package_root.push("src/bpf"); 65 | package_root 66 | }; 67 | 68 | // Respect custom target directories specified by package 69 | let mut target_dir = workspace_target_dir.to_path_buf(); 70 | let out_dir = if let Some(d) = package_metadata.target_dir { 71 | if debug { 72 | println!("Custom target_dir={}", d.to_string_lossy()); 73 | } 74 | 75 | // Add requested path 76 | target_dir.push(d); 77 | target_dir 78 | } else { 79 | // Add default path 80 | target_dir.push("bpf"); 81 | target_dir 82 | }; 83 | 84 | // Get an iterator to the input directory. If directory is missing, 85 | // skip the current project 86 | let dir_iter = match fs::read_dir(&in_dir) { 87 | Ok(d) => d, 88 | Err(e) => { 89 | if let Some(ec) = e.raw_os_error() { 90 | // ENOENT == 2 91 | if ec == 2 { 92 | return Ok(vec![]); 93 | } else { 94 | bail!("Invalid directory: {}: {}", in_dir.to_string_lossy(), e); 95 | } 96 | } else { 97 | bail!(e); 98 | } 99 | } 100 | }; 101 | 102 | Ok(dir_iter 103 | .filter_map(|file| { 104 | let file_path = match file { 105 | Ok(f) => f.path(), 106 | Err(_) => return None, 107 | }; 108 | 109 | if !file_path.is_file() { 110 | return None; 111 | } 112 | 113 | // Only take files with extension ".bpf.c" 114 | if let Some(file_name) = file_path.as_path().file_name() { 115 | if file_name.to_string_lossy().ends_with(".bpf.c") { 116 | return Some(UnprocessedObj { 117 | package: package.name.clone(), 118 | name: file_path 119 | .as_path() 120 | .file_stem() // Remove `.c` suffix 121 | .unwrap() // We already know it's a file 122 | .to_string_lossy() 123 | .rsplitn(2, '.') // Remove `.bpf` suffix 124 | .nth(1) 125 | .unwrap() // Already know it has enough `.`s 126 | .to_string(), 127 | out: out_dir.clone(), 128 | path: file_path, 129 | }); 130 | } 131 | } 132 | 133 | None 134 | }) 135 | .collect()) 136 | } 137 | 138 | pub fn get(debug: bool, manifest_path: Option<&PathBuf>) -> Result> { 139 | let mut cmd = MetadataCommand::new(); 140 | 141 | if let Some(path) = manifest_path { 142 | cmd.manifest_path(path); 143 | } 144 | 145 | let metadata = match cmd.exec() { 146 | Ok(m) => m, 147 | Err(e) => bail!("Failed to get cargo metadata: {}", e), 148 | }; 149 | 150 | if metadata.workspace_members.is_empty() { 151 | bail!("Failed to find targets") 152 | } 153 | 154 | let mut v: Vec = Vec::new(); 155 | for id in &metadata.workspace_members { 156 | for package in &metadata.packages { 157 | if id == &package.id { 158 | match &mut get_package(debug, &package, &metadata.target_directory) { 159 | Ok(vv) => v.append(vv), 160 | Err(e) => bail!("Failed to process package={}, error={}", package.name, e), 161 | } 162 | } 163 | } 164 | } 165 | 166 | Ok(v) 167 | } 168 | -------------------------------------------------------------------------------- /libbpf-rs/src/ringbuf.rs: -------------------------------------------------------------------------------- 1 | use core::ffi::c_void; 2 | use std::boxed::Box; 3 | use std::os::raw::c_ulong; 4 | use std::ptr; 5 | use std::slice; 6 | use std::time::Duration; 7 | 8 | use crate::*; 9 | 10 | struct RingBufferCallback { 11 | cb: Box i32>, 12 | } 13 | 14 | impl RingBufferCallback { 15 | fn new(cb: F) -> Self 16 | where 17 | F: FnMut(&[u8]) -> i32 + 'static, 18 | { 19 | RingBufferCallback { cb: Box::new(cb) } 20 | } 21 | } 22 | 23 | /// Builds [`RingBuffer`] instances. 24 | /// 25 | /// `ringbuf`s are a special kind of [`Map`], used to transfer data between 26 | /// [`Program`]s and userspace. As of Linux 5.8, the `ringbuf` map is now 27 | /// preferred over the `perf buffer`. 28 | #[derive(Default)] 29 | pub struct RingBufferBuilder { 30 | fd_callbacks: Vec<(i32, RingBufferCallback)>, 31 | } 32 | 33 | impl RingBufferBuilder { 34 | pub fn new() -> Self { 35 | RingBufferBuilder { 36 | fd_callbacks: vec![], 37 | } 38 | } 39 | 40 | /// Add a new ringbuf `map` and associated `callback` to this ring buffer 41 | /// manager. The callback should take one argument, a slice of raw bytes, 42 | /// and return an i32. 43 | /// 44 | /// Non-zero return values in the callback will stop ring buffer consumption early. 45 | /// 46 | /// The callback provides a raw byte slice. You may find libraries such as 47 | /// [`plain`](https://crates.io/crates/plain) helpful. 48 | pub fn add(&mut self, map: &Map, callback: NewF) -> Result<&mut Self> 49 | where 50 | NewF: FnMut(&[u8]) -> i32 + 'static, 51 | { 52 | if map.map_type() != MapType::RingBuf { 53 | return Err(Error::InvalidInput("Must use a RingBuf map".into())); 54 | } 55 | self.fd_callbacks 56 | .push((map.fd(), RingBufferCallback::new(callback))); 57 | Ok(self) 58 | } 59 | 60 | /// Build a new [`RingBuffer`]. Must have added at least one ringbuf. 61 | pub fn build(self) -> Result { 62 | let mut cbs = vec![]; 63 | let mut ptr: *mut libbpf_sys::ring_buffer = ptr::null_mut(); 64 | let c_sample_cb: libbpf_sys::ring_buffer_sample_fn = Some(Self::call_sample_cb); 65 | 66 | for (fd, callback) in self.fd_callbacks { 67 | let sample_cb_ptr = Box::into_raw(Box::new(callback)); 68 | if ptr.is_null() { 69 | // Allocate a new ringbuf manager and add a ringbuf to it 70 | ptr = unsafe { 71 | libbpf_sys::ring_buffer__new( 72 | fd, 73 | c_sample_cb, 74 | sample_cb_ptr as *mut _, 75 | std::ptr::null_mut(), 76 | ) 77 | }; 78 | 79 | // Handle errors 80 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 81 | if err != 0 { 82 | return Err(Error::System(err as i32)); 83 | } 84 | } else { 85 | // Add a ringbuf to the existing ringbuf manager 86 | let err = unsafe { 87 | libbpf_sys::ring_buffer__add(ptr, fd, c_sample_cb, sample_cb_ptr as *mut _) 88 | }; 89 | 90 | // Handle errors 91 | if err != 0 { 92 | return Err(Error::System(err as i32)); 93 | } 94 | } 95 | 96 | unsafe { cbs.push(Box::from_raw(sample_cb_ptr)) }; 97 | } 98 | 99 | if ptr.is_null() { 100 | return Err(Error::InvalidInput( 101 | "You must add at least one ring buffer map and callback before building".into(), 102 | )); 103 | } 104 | 105 | Ok(RingBuffer { ptr, _cbs: cbs }) 106 | } 107 | 108 | unsafe extern "C" fn call_sample_cb(ctx: *mut c_void, data: *mut c_void, size: c_ulong) -> i32 { 109 | let callback_struct = ctx as *mut RingBufferCallback; 110 | let callback = (*callback_struct).cb.as_mut(); 111 | 112 | callback(slice::from_raw_parts(data as *const u8, size as usize)) 113 | } 114 | } 115 | 116 | /// The canonical interface for managing a collection of `ringbuf` maps. 117 | /// 118 | /// `ringbuf`s are a special kind of [`Map`], used to transfer data between 119 | /// [`Program`]s and userspace. As of Linux 5.8, the `ringbuf` map is now 120 | /// preferred over the `perf buffer`. 121 | pub struct RingBuffer { 122 | ptr: *mut libbpf_sys::ring_buffer, 123 | #[allow(clippy::vec_box)] 124 | _cbs: Vec>, 125 | } 126 | 127 | impl RingBuffer { 128 | /// Poll from all open ring buffers, calling the registered callback for 129 | /// each one. Polls continually until we either run out of events to consume 130 | /// or `timeout` is reached. 131 | pub fn poll(&self, timeout: Duration) -> Result<()> { 132 | assert!(!self.ptr.is_null()); 133 | 134 | let ret = unsafe { libbpf_sys::ring_buffer__poll(self.ptr, timeout.as_millis() as i32) }; 135 | 136 | if ret < 0 { 137 | Err(Error::System(-ret)) 138 | } else { 139 | Ok(()) 140 | } 141 | } 142 | 143 | /// Greedily consume from all open ring buffers, calling the registered 144 | /// callback for each one. Consumes continually until we run out of events 145 | /// to consume or one of the callbacks returns a non-zero integer. 146 | pub fn consume(&self) -> Result<()> { 147 | assert!(!self.ptr.is_null()); 148 | 149 | let ret = unsafe { libbpf_sys::ring_buffer__consume(self.ptr) }; 150 | 151 | if ret < 0 { 152 | Err(Error::System(-ret)) 153 | } else { 154 | Ok(()) 155 | } 156 | } 157 | } 158 | 159 | impl Drop for RingBuffer { 160 | fn drop(&mut self) { 161 | unsafe { 162 | if !self.ptr.is_null() { 163 | libbpf_sys::ring_buffer__free(self.ptr); 164 | } 165 | } 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /libbpf-cargo/src/btf/types.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use num_enum::TryFromPrimitive; 4 | 5 | #[derive(Debug, Copy, Clone, TryFromPrimitive, PartialEq)] 6 | #[repr(u32)] 7 | pub enum BtfKind { 8 | Void = 0, 9 | Int = 1, 10 | Ptr = 2, 11 | Array = 3, 12 | Struct = 4, 13 | Union = 5, 14 | Enum = 6, 15 | Fwd = 7, 16 | Typedef = 8, 17 | Volatile = 9, 18 | Const = 10, 19 | Restrict = 11, 20 | Func = 12, 21 | FuncProto = 13, 22 | Var = 14, 23 | Datasec = 15, 24 | } 25 | 26 | #[derive(Debug, Copy, Clone, TryFromPrimitive, PartialEq)] 27 | #[repr(u8)] 28 | pub enum BtfIntEncoding { 29 | None = 0, 30 | Signed = 1 << 0, 31 | Char = 1 << 1, 32 | Bool = 1 << 2, 33 | } 34 | 35 | #[derive(Debug)] 36 | pub struct BtfInt<'a> { 37 | pub name: &'a str, 38 | pub bits: u8, 39 | pub offset: u8, 40 | pub encoding: BtfIntEncoding, 41 | } 42 | 43 | #[derive(Debug)] 44 | pub struct BtfPtr { 45 | pub pointee_type: u32, 46 | } 47 | 48 | #[derive(Debug)] 49 | pub struct BtfArray { 50 | pub nelems: u32, 51 | pub index_type_id: u32, 52 | pub val_type_id: u32, 53 | } 54 | 55 | #[derive(Debug)] 56 | pub struct BtfMember<'a> { 57 | pub name: &'a str, 58 | pub type_id: u32, 59 | pub bit_offset: u32, 60 | pub bit_size: u8, 61 | } 62 | 63 | #[derive(Debug)] 64 | pub struct BtfComposite<'a> { 65 | pub name: String, 66 | pub is_struct: bool, 67 | pub size: u32, 68 | pub members: Vec>, 69 | } 70 | 71 | #[derive(Debug)] 72 | pub struct BtfEnumValue<'a> { 73 | pub name: &'a str, 74 | pub value: i32, 75 | } 76 | 77 | #[derive(Debug)] 78 | pub struct BtfEnum<'a> { 79 | pub name: String, 80 | pub size: u32, 81 | pub values: Vec>, 82 | } 83 | 84 | #[derive(Debug, Copy, Clone, PartialEq)] 85 | pub enum BtfFwdKind { 86 | Struct, 87 | Union, 88 | } 89 | 90 | #[derive(Debug)] 91 | pub struct BtfFwd<'a> { 92 | pub name: &'a str, 93 | pub kind: BtfFwdKind, 94 | } 95 | 96 | #[derive(Debug)] 97 | pub struct BtfTypedef<'a> { 98 | pub name: &'a str, 99 | pub type_id: u32, 100 | } 101 | 102 | #[derive(Debug)] 103 | pub struct BtfVolatile { 104 | pub type_id: u32, 105 | } 106 | 107 | #[derive(Debug)] 108 | pub struct BtfConst { 109 | pub type_id: u32, 110 | } 111 | 112 | #[derive(Debug)] 113 | pub struct BtfRestrict { 114 | pub type_id: u32, 115 | } 116 | 117 | #[derive(Debug)] 118 | pub struct BtfFunc<'a> { 119 | pub name: &'a str, 120 | pub type_id: u32, 121 | } 122 | 123 | #[derive(Debug)] 124 | pub struct BtfFuncParam<'a> { 125 | pub name: &'a str, 126 | pub type_id: u32, 127 | } 128 | 129 | #[derive(Debug)] 130 | pub struct BtfFuncProto<'a> { 131 | pub ret_type_id: u32, 132 | pub params: Vec>, 133 | } 134 | 135 | #[derive(Debug, Copy, Clone, TryFromPrimitive, PartialEq)] 136 | #[repr(u32)] 137 | pub enum BtfVarLinkage { 138 | Static = 0, 139 | GlobalAlloc = 1, 140 | GlobalExtern = 2, 141 | } 142 | 143 | #[derive(Debug)] 144 | pub struct BtfVar<'a> { 145 | pub name: &'a str, 146 | pub type_id: u32, 147 | pub linkage: BtfVarLinkage, 148 | } 149 | 150 | #[derive(Debug)] 151 | pub struct BtfDatasecVar { 152 | pub type_id: u32, 153 | pub offset: u32, 154 | pub size: u32, 155 | } 156 | 157 | #[derive(Debug)] 158 | pub struct BtfDatasec<'a> { 159 | pub name: &'a str, 160 | pub size: u32, 161 | pub vars: Vec, 162 | } 163 | 164 | pub enum BtfType<'a> { 165 | Void, 166 | Int(BtfInt<'a>), 167 | Ptr(BtfPtr), 168 | Array(BtfArray), 169 | Struct(BtfComposite<'a>), 170 | Union(BtfComposite<'a>), 171 | Enum(BtfEnum<'a>), 172 | Fwd(BtfFwd<'a>), 173 | Typedef(BtfTypedef<'a>), 174 | Volatile(BtfVolatile), 175 | Const(BtfConst), 176 | Restrict(BtfRestrict), 177 | Func(BtfFunc<'a>), 178 | FuncProto(BtfFuncProto<'a>), 179 | Var(BtfVar<'a>), 180 | Datasec(BtfDatasec<'a>), 181 | } 182 | 183 | impl<'a> BtfType<'a> { 184 | pub fn kind(&self) -> BtfKind { 185 | match self { 186 | BtfType::Void => BtfKind::Void, 187 | BtfType::Ptr(_) => BtfKind::Ptr, 188 | BtfType::Fwd(_) => BtfKind::Fwd, 189 | BtfType::Typedef(_) => BtfKind::Typedef, 190 | BtfType::Volatile(_) => BtfKind::Volatile, 191 | BtfType::Const(_) => BtfKind::Const, 192 | BtfType::Restrict(_) => BtfKind::Restrict, 193 | BtfType::Func(_) => BtfKind::Func, 194 | BtfType::Int(_) => BtfKind::Int, 195 | BtfType::Var(_) => BtfKind::Var, 196 | BtfType::Array(_) => BtfKind::Array, 197 | BtfType::Struct(_) => BtfKind::Struct, 198 | BtfType::Union(_) => BtfKind::Union, 199 | BtfType::Enum(_) => BtfKind::Enum, 200 | BtfType::FuncProto(_) => BtfKind::FuncProto, 201 | BtfType::Datasec(_) => BtfKind::Datasec, 202 | } 203 | } 204 | } 205 | 206 | impl<'a> fmt::Display for BtfType<'a> { 207 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 208 | match self { 209 | BtfType::Void => write!(f, "void"), 210 | BtfType::Ptr(_) => write!(f, "ptr"), 211 | BtfType::Fwd(_) => write!(f, "fwd"), 212 | BtfType::Typedef(_) => write!(f, "typedef"), 213 | BtfType::Volatile(_) => write!(f, "volatile"), 214 | BtfType::Const(_) => write!(f, "const"), 215 | BtfType::Restrict(_) => write!(f, "restrict"), 216 | BtfType::Func(_) => write!(f, "func"), 217 | BtfType::Int(_) => write!(f, "int"), 218 | BtfType::Var(_) => write!(f, "var"), 219 | BtfType::Array(_) => write!(f, "array"), 220 | BtfType::Struct(_) => write!(f, "struct"), 221 | BtfType::Union(_) => write!(f, "union"), 222 | BtfType::Enum(_) => write!(f, "enum"), 223 | BtfType::FuncProto(_) => write!(f, "funcproto"), 224 | BtfType::Datasec(_) => write!(f, "datasec"), 225 | } 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /libbpf-rs/src/perf_buffer.rs: -------------------------------------------------------------------------------- 1 | use core::ffi::c_void; 2 | use std::boxed::Box; 3 | use std::slice; 4 | use std::time::Duration; 5 | 6 | use crate::*; 7 | 8 | fn is_power_of_two(i: usize) -> bool { 9 | i > 0 && (i & (i - 1)) == 0 10 | } 11 | 12 | // Workaround for `trait_alias` 13 | // (https://doc.rust-lang.org/unstable-book/language-features/trait-alias.html) 14 | // not being available yet. This is just a custom trait plus a blanket implementation. 15 | pub trait SampleCb: FnMut(i32, &[u8]) {} 16 | impl SampleCb for T where T: FnMut(i32, &[u8]) {} 17 | 18 | pub trait LostCb: FnMut(i32, u64) {} 19 | impl LostCb for T where T: FnMut(i32, u64) {} 20 | 21 | struct CbStruct<'b> { 22 | sample_cb: Option>, 23 | lost_cb: Option>, 24 | } 25 | 26 | /// Builds [`PerfBuffer`] instances. 27 | pub struct PerfBufferBuilder<'a, 'b> { 28 | map: &'a Map, 29 | pages: usize, 30 | sample_cb: Option>, 31 | lost_cb: Option>, 32 | } 33 | 34 | impl<'a, 'b> PerfBufferBuilder<'a, 'b> { 35 | pub fn new(map: &'a Map) -> Self { 36 | Self { 37 | map, 38 | pages: 64, 39 | sample_cb: None, 40 | lost_cb: None, 41 | } 42 | } 43 | } 44 | 45 | impl<'a, 'b> PerfBufferBuilder<'a, 'b> { 46 | /// Callback to run when a sample is received. 47 | /// 48 | /// This callback provides a raw byte slice. You may find libraries such as 49 | /// [`plain`](https://crates.io/crates/plain) helpful. 50 | /// 51 | /// Callback arguments are: `(cpu, data)`. 52 | pub fn sample_cb(self, cb: NewCb) -> PerfBufferBuilder<'a, 'b> { 53 | PerfBufferBuilder { 54 | map: self.map, 55 | pages: self.pages, 56 | sample_cb: Some(Box::new(cb)), 57 | lost_cb: self.lost_cb, 58 | } 59 | } 60 | 61 | /// Callback to run when a sample is received. 62 | /// 63 | /// Callback arguments are: `(cpu, lost_count)`. 64 | pub fn lost_cb(self, cb: NewCb) -> PerfBufferBuilder<'a, 'b> { 65 | PerfBufferBuilder { 66 | map: self.map, 67 | pages: self.pages, 68 | sample_cb: self.sample_cb, 69 | lost_cb: Some(Box::new(cb)), 70 | } 71 | } 72 | 73 | /// The number of pages to size the ring buffer. 74 | pub fn pages(&mut self, pages: usize) -> &mut Self { 75 | self.pages = pages; 76 | self 77 | } 78 | 79 | pub fn build(self) -> Result> { 80 | if self.map.map_type() != MapType::PerfEventArray { 81 | return Err(Error::InvalidInput( 82 | "Must use a PerfEventArray map".to_string(), 83 | )); 84 | } 85 | 86 | if !is_power_of_two(self.pages) { 87 | return Err(Error::InvalidInput( 88 | "Page count must be power of two".to_string(), 89 | )); 90 | } 91 | 92 | let c_sample_cb: libbpf_sys::perf_buffer_sample_fn = if self.sample_cb.is_some() { 93 | Some(Self::call_sample_cb) 94 | } else { 95 | None 96 | }; 97 | 98 | let c_lost_cb: libbpf_sys::perf_buffer_lost_fn = if self.lost_cb.is_some() { 99 | Some(Self::call_lost_cb) 100 | } else { 101 | None 102 | }; 103 | 104 | let callback_struct_ptr = Box::into_raw(Box::new(CbStruct { 105 | sample_cb: self.sample_cb, 106 | lost_cb: self.lost_cb, 107 | })); 108 | 109 | let ptr = unsafe { 110 | libbpf_sys::perf_buffer__new( 111 | self.map.fd(), 112 | self.pages as libbpf_sys::size_t, 113 | c_sample_cb, 114 | c_lost_cb, 115 | callback_struct_ptr as *mut _, 116 | std::ptr::null(), 117 | ) 118 | }; 119 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 120 | if err != 0 { 121 | Err(Error::System(err as i32)) 122 | } else { 123 | Ok(PerfBuffer { 124 | ptr, 125 | _cb_struct: unsafe { Box::from_raw(callback_struct_ptr) }, 126 | }) 127 | } 128 | } 129 | 130 | unsafe extern "C" fn call_sample_cb(ctx: *mut c_void, cpu: i32, data: *mut c_void, size: u32) { 131 | let callback_struct = ctx as *mut CbStruct; 132 | 133 | if let Some(cb) = &mut (*callback_struct).sample_cb { 134 | cb(cpu, slice::from_raw_parts(data as *const u8, size as usize)); 135 | } 136 | } 137 | 138 | unsafe extern "C" fn call_lost_cb(ctx: *mut c_void, cpu: i32, count: u64) { 139 | let callback_struct = ctx as *mut CbStruct; 140 | 141 | if let Some(cb) = &mut (*callback_struct).lost_cb { 142 | cb(cpu, count); 143 | } 144 | } 145 | } 146 | 147 | /// Represents a special kind of [`Map`]. Typically used to transfer data between 148 | /// [`Program`]s and userspace. 149 | pub struct PerfBuffer<'b> { 150 | ptr: *mut libbpf_sys::perf_buffer, 151 | // Hold onto the box so it'll get dropped when PerfBuffer is dropped 152 | _cb_struct: Box>, 153 | } 154 | 155 | impl<'b> PerfBuffer<'b> { 156 | pub fn poll(&self, timeout: Duration) -> Result<()> { 157 | let ret = unsafe { libbpf_sys::perf_buffer__poll(self.ptr, timeout.as_millis() as i32) }; 158 | if ret < 0 { 159 | Err(Error::System(-ret)) 160 | } else { 161 | Ok(()) 162 | } 163 | } 164 | } 165 | 166 | impl<'b> Drop for PerfBuffer<'b> { 167 | fn drop(&mut self) { 168 | unsafe { 169 | libbpf_sys::perf_buffer__free(self.ptr); 170 | } 171 | } 172 | } 173 | 174 | #[cfg(test)] 175 | mod tests { 176 | use super::*; 177 | 178 | fn is_power_of_two_slow(i: usize) -> bool { 179 | if i == 0 { 180 | return false; 181 | } 182 | 183 | let mut n = i; 184 | while n > 1 { 185 | if n & 0x01 as usize == 1 { 186 | return false; 187 | } 188 | n >>= 1; 189 | } 190 | true 191 | } 192 | 193 | #[test] 194 | fn test_is_power_of_two() { 195 | for i in 0..=256 { 196 | assert_eq!(is_power_of_two(i), is_power_of_two_slow(i)); 197 | } 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /examples/capable/README.md: -------------------------------------------------------------------------------- 1 | Demonstrations of capable, the Linux eBPF/libpf-rs version. 2 | 3 | To build this project: 4 | ``` 5 | $ cd examples/capable 6 | $ cargo build 7 | $ cd ../../target/debug 8 | ``` 9 | capable traces calls to the kernel cap_capable() function, which does security 10 | capability checks, and prints details for each call. For example: 11 | ``` 12 | $ ./capable 13 | TIME UID PID COMM CAP NAME AUDIT 14 | 22:11:23 114 2676 snmpd 12 CAP_NET_ADMIN 1 15 | 22:11:23 0 6990 run 24 CAP_SYS_RESOURCE 1 16 | 22:11:23 0 7003 chmod 3 CAP_FOWNER 1 17 | 22:11:23 0 7003 chmod 4 CAP_FSETID 1 18 | 22:11:23 0 7005 chmod 4 CAP_FSETID 1 19 | 22:11:23 0 7005 chmod 4 CAP_FSETID 1 20 | 22:11:23 0 7006 chown 4 CAP_FSETID 1 21 | 22:11:23 0 7006 chown 4 CAP_FSETID 1 22 | 22:11:23 0 6990 setuidgid 6 CAP_SETGID 1 23 | 22:11:23 0 6990 setuidgid 6 CAP_SETGID 1 24 | 22:11:23 0 6990 setuidgid 7 CAP_SETUID 1 25 | 22:11:24 0 7013 run 24 CAP_SYS_RESOURCE 1 26 | 22:11:24 0 7026 chmod 3 CAP_FOWNER 1 27 | 22:11:24 0 7026 chmod 4 CAP_FSETID 1 28 | 22:11:24 0 7028 chmod 4 CAP_FSETID 1 29 | 22:11:24 0 7028 chmod 4 CAP_FSETID 1 30 | 22:11:24 0 7029 chown 4 CAP_FSETID 1 31 | 22:11:24 0 7029 chown 4 CAP_FSETID 1 32 | 22:11:24 0 7013 setuidgid 6 CAP_SETGID 1 33 | 22:11:24 0 7013 setuidgid 6 CAP_SETGID 1 34 | 22:11:24 0 7013 setuidgid 7 CAP_SETUID 1 35 | 22:11:25 0 7036 run 24 CAP_SYS_RESOURCE 1 36 | 22:11:25 0 7049 chmod 3 CAP_FOWNER 1 37 | 22:11:25 0 7049 chmod 4 CAP_FSETID 1 38 | 22:11:25 0 7051 chmod 4 CAP_FSETID 1 39 | 22:11:25 0 7051 chmod 4 CAP_FSETID 1 40 | ``` 41 | 42 | Checks where ``AUDIT`` is ``0`` are ignored by default, which can be changed 43 | with ``-v`` but is more verbose. 44 | 45 | We can show the ``TID`` and ``INSETID`` columns with ``-x``. 46 | Since only a recent kernel version >= 5.1 reports the ``INSETID`` bit to cap_capable(), 47 | the fallback value "N/A" will be displayed on older kernels. 48 | ``` 49 | $ ./capable -x 50 | TIME UID PID TID COMM CAP NAME AUDIT INSETID 51 | 08:22:36 0 12869 12869 chown 0 CAP_CHOWN 1 0 52 | 08:22:36 0 12869 12869 chown 0 CAP_CHOWN 1 0 53 | 08:22:36 0 12869 12869 chown 0 CAP_CHOWN 1 0 54 | 08:23:02 0 13036 13036 setuidgid 6 CAP_SETGID 1 0 55 | 08:23:02 0 13036 13036 setuidgid 6 CAP_SETGID 1 0 56 | 08:23:02 0 13036 13036 setuidgid 7 CAP_SETUID 1 1 57 | 08:23:13 0 13085 13085 chmod 3 CAP_FOWNER 1 0 58 | 08:23:13 0 13085 13085 chmod 4 CAP_FSETID 1 0 59 | 08:23:13 0 13085 13085 chmod 3 CAP_FOWNER 1 0 60 | 08:23:13 0 13085 13085 chmod 4 CAP_FSETID 1 0 61 | 08:23:13 0 13085 13085 chmod 4 CAP_FSETID 1 0 62 | 08:24:27 0 13522 13522 ping 13 CAP_NET_RAW 1 0 63 | [...] 64 | ``` 65 | 66 | This can be useful for general debugging, and also security enforcement: 67 | determining a whitelist of capabilities an application needs. 68 | 69 | The output above includes various capability checks: ``snmpd`` checking 70 | ``CAP_NET_ADMIN``, run checking ``CAP_SYS_RESOURCES``, then some short-lived processes 71 | checking ``CAP_FOWNER``, ``CAP_FSETID``, etc. 72 | 98 | Some processes can do a lot of security capability checks, generating a lot of 99 | ouput. In this case, the --unique option is useful to only print once the same 100 | set of capability, pid(1) or cgroup (2) 102 | ``` 103 | # ./capable --unique 1 104 | ``` 105 | 113 | 114 | ### USAGE: 115 | 116 | ``` 117 | sudo capable -h 118 | examples 0.1.0 119 | Usage instructions 120 | 121 | USAGE: 122 | capable [FLAGS] [OPTIONS] 123 | 124 | FLAGS: 125 | --debug debug output for libbpf-rs 126 | -x, --extra extra fields: Show TID and INSETID columns 127 | -h, --help Prints help information 128 | -V, --version Prints version information 129 | -v, --verbose verbose: include non-audit checks 130 | 131 | OPTIONS: 132 | -p, --pid only trace [default: 0] 133 | --unique don't repeat stacks for the same pid<1> or cgroup<2> [default: 0] 134 | ``` -------------------------------------------------------------------------------- /examples/capable/src/main.rs: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 | // Copyright (c) 2021 BMC Software, Inc. 3 | // Author Devasia Thomas 4 | // 5 | // Based on capable(8) by Brendan Gregg 6 | use core::time::Duration; 7 | use std::str::FromStr; 8 | 9 | use anyhow::{bail, Result}; 10 | use chrono::Local; 11 | use libbpf_rs::PerfBufferBuilder; 12 | use phf::phf_map; 13 | use plain::Plain; 14 | use structopt::StructOpt; 15 | 16 | #[path = "bpf/.output/capable.skel.rs"] 17 | mod capable; 18 | 19 | use capable::capable_rodata_types::uniqueness; 20 | use capable::*; 21 | 22 | static CAPS: phf::Map = phf_map! { 23 | 0i32 => "CAP_CHOWN", 24 | 1i32 => "CAP_DAC_OVERRIDE", 25 | 2i32 => "CAP_DAC_READ_SEARCH", 26 | 3i32 => "CAP_FOWNER", 27 | 4i32 => "CAP_FSETID", 28 | 5i32 => "CAP_KILL", 29 | 6i32 => "CAP_SETGID", 30 | 7i32 => "CAP_SETUID", 31 | 8i32 => "CAP_SETPCAP", 32 | 9i32 => "CAP_LINUX_IMMUTABLE", 33 | 10i32 => "CAP_NET_BIND_SERVICE", 34 | 11i32 => "CAP_NET_BROADCAST", 35 | 12i32 => "CAP_NET_ADMIN", 36 | 13i32 => "CAP_NET_RAW", 37 | 14i32 => "CAP_IPC_LOCK", 38 | 15i32 => "CAP_IPC_OWNER", 39 | 16i32 => "CAP_SYS_MODULE", 40 | 17i32 => "CAP_SYS_RAWIO", 41 | 18i32 => "CAP_SYS_CHROOT", 42 | 19i32 => "CAP_SYS_PTRACE", 43 | 20i32 => "CAP_SYS_PACCT", 44 | 21i32 => "CAP_SYS_ADMIN", 45 | 22i32 => "CAP_SYS_BOOT", 46 | 23i32 => "CAP_SYS_NICE", 47 | 24i32 => "CAP_SYS_RESOURCE", 48 | 25i32 => "CAP_SYS_TIME", 49 | 26i32 => "CAP_SYS_TTY_CONFIG", 50 | 27i32 => "CAP_MKNOD", 51 | 28i32 => "CAP_LEASE", 52 | 29i32 => "CAP_AUDIT_WRITE", 53 | 30i32 => "CAP_AUDIT_CONTROL", 54 | 31i32 => "CAP_SETFCAP", 55 | 32i32 => "CAP_MAC_OVERRIDE", 56 | 33i32 => "CAP_MAC_ADMIN", 57 | 34i32 => "CAP_SYSLOG", 58 | 35i32 => "CAP_WAKE_ALARM", 59 | 36i32 => "CAP_BLOCK_SUSPEND", 60 | 37i32 => "CAP_AUDIT_READ", 61 | 38i32 => "CAP_PERFMON", 62 | 39i32 => "CAP_BPF", 63 | 40i32 => "CAP_CHECKPOINT_RESTORE", 64 | }; 65 | 66 | impl FromStr for uniqueness { 67 | type Err = &'static str; 68 | fn from_str(unq_type: &str) -> Result { 69 | let unq_type_lower: &str = &unq_type.to_lowercase(); 70 | match unq_type_lower { 71 | "off" => Ok(uniqueness::UNQ_OFF), 72 | "pid" => Ok(uniqueness::UNQ_PID), 73 | "cgroup" => Ok(uniqueness::UNQ_CGROUP), 74 | _ => Err("Use 1 for pid (default), 2 for cgroups"), 75 | } 76 | } 77 | } 78 | 79 | /// Trace capabilities 80 | #[derive(Debug, Copy, Clone, StructOpt)] 81 | #[structopt(name = "examples", about = "Usage instructions")] 82 | struct Command { 83 | /// verbose: include non-audit checks 84 | #[structopt(short, long)] 85 | verbose: bool, 86 | /// only trace 87 | #[structopt(short, long, default_value = "0")] 88 | pid: u32, 89 | /// extra fields: Show TID and INSETID columns 90 | #[structopt(short = "x", long = "extra")] 91 | extra_fields: bool, 92 | /// don't repeat same info for the same or 93 | #[structopt(long = "unique", default_value = "off")] 94 | unique_type: uniqueness, 95 | /// debug output for libbpf-rs 96 | #[structopt(long)] 97 | debug: bool, 98 | } 99 | 100 | unsafe impl Plain for capable_bss_types::event {} 101 | 102 | fn bump_memlock_rlimit() -> Result<()> { 103 | let rlimit = libc::rlimit { 104 | rlim_cur: 128 << 20, 105 | rlim_max: 128 << 20, 106 | }; 107 | 108 | if unsafe { libc::setrlimit(libc::RLIMIT_MEMLOCK, &rlimit) } != 0 { 109 | bail!("Failed to increase rlimit"); 110 | } 111 | 112 | Ok(()) 113 | } 114 | 115 | fn print_banner(extra_fields: bool) { 116 | if extra_fields { 117 | println!( 118 | "{:9} {:6} {:6} {:6} {:16} {:4} {:20} {:6} {}", 119 | "TIME", "UID", "PID", "TID", "COMM", "CAP", "NAME", "AUDIT", "INSETID" 120 | ); 121 | } else { 122 | println!( 123 | "{:9} {:6} {:6} {:16} {:4} {:20} {:6}", 124 | "TIME", "UID", "PID", "COMM", "CAP", "NAME", "AUDIT" 125 | ); 126 | } 127 | } 128 | 129 | fn _handle_event(opts: Command, event: capable_bss_types::event) { 130 | let now = Local::now().format("%H:%M:%S"); 131 | let comm_str = std::str::from_utf8(&event.comm) 132 | .unwrap() 133 | .trim_end_matches(char::from(0)); 134 | let cap_name = match CAPS.get(&event.cap) { 135 | Some(&x) => x, 136 | None => "?", 137 | }; 138 | if opts.extra_fields { 139 | println!( 140 | "{:9} {:6} {:<6} {:<6} {:<16} {:<4} {:<20} {:<6} {}", 141 | now, 142 | event.uid, 143 | event.tgid, 144 | event.pid, 145 | comm_str, 146 | event.cap, 147 | cap_name, 148 | event.audit, 149 | event.insetid 150 | ); 151 | } else { 152 | println!( 153 | "{:9} {:6} {:<6} {:<16} {:<4} {:<20} {:<6}", 154 | now, event.uid, event.tgid, comm_str, event.cap, cap_name, event.audit 155 | ); 156 | } 157 | } 158 | 159 | fn handle_lost_events(cpu: i32, count: u64) { 160 | eprintln!("Lost {} events on CPU {}", count, cpu); 161 | } 162 | 163 | fn main() -> Result<()> { 164 | let opts = Command::from_args(); 165 | 166 | let mut skel_builder = CapableSkelBuilder::default(); 167 | if opts.debug { 168 | skel_builder.obj_builder.debug(true); 169 | } 170 | 171 | bump_memlock_rlimit()?; 172 | 173 | let mut open_skel = skel_builder.open()?; 174 | //Pass configuration to BPF 175 | open_skel.rodata().tool_config.tgid = opts.pid; //tgid in kernel is pid in userland 176 | open_skel.rodata().tool_config.verbose = opts.verbose; 177 | open_skel.rodata().tool_config.unique_type = opts.unique_type; 178 | 179 | let mut skel = open_skel.load()?; 180 | skel.attach()?; 181 | 182 | print_banner(opts.extra_fields); 183 | let handle_event = move |_cpu: i32, data: &[u8]| { 184 | let mut event = capable_bss_types::event::default(); 185 | plain::copy_from_bytes(&mut event, data).expect("Data buffer was too short"); 186 | _handle_event(opts, event); 187 | }; 188 | let perf = PerfBufferBuilder::new(skel.maps_mut().events()) 189 | .sample_cb(handle_event) 190 | .lost_cb(handle_lost_events) 191 | .build()?; 192 | 193 | loop { 194 | perf.poll(Duration::from_millis(100))?; 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /libbpf-cargo/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! libbpf-cargo helps you develop and build eBPF (BPF) programs with standard rust tooling. 2 | //! 3 | //! libbpf-cargo supports two interfaces: 4 | //! * [`SkeletonBuilder`] API, for use with [build scripts](https://doc.rust-lang.org/cargo/reference/build-scripts.html) 5 | //! * `cargo-libbpf` cargo subcommand, for use with `cargo` 6 | //! 7 | //! The **build script interface is recommended** over the cargo subcommand interface because: 8 | //! * once set up, you cannot forget to update the generated skeletons if your source changes 9 | //! * build scripts are standard practice for projects that include codegen 10 | //! * newcomers to your project can `cargo build` and it will "just work" 11 | //! 12 | //! The following sections in this document describe the `cargo-libbpf` plugin. See the API 13 | //! reference for documentation on the build script interface. 14 | //! 15 | //! # Configuration 16 | //! 17 | //! cargo-libbpf consumes the following Cargo.toml configuration options: 18 | //! 19 | //! ```text 20 | //! [package.metadata.libbpf] 21 | //! prog_dir = "src/other_bpf_dir" # default: /src/bpf 22 | //! target_dir = "other_target_dir" # default: /bpf 23 | //! ``` 24 | //! 25 | //! * `prog_dir`: path relative to package Cargo.toml to search for bpf progs 26 | //! * `target_dir`: path relative to workspace target directory to place compiled bpf progs 27 | //! 28 | //! # Subcommands 29 | //! 30 | //! ## build 31 | //! 32 | //! `cargo libbpf build` compiles `.bpf.c` C files into corresponding `.bpf.o` ELF 33 | //! object files. Each object file may contain one or more BPF programs, maps, and associated 34 | //! metadata. The object file may then be handed over to `libbpf-rs` for loading and interaction. 35 | //! 36 | //! cargo-libbpf-build enforces a few conventions: 37 | //! 38 | //! * source file names must be in the `.bpf.c` format 39 | //! * object file names will be generated in `.bpf.o` format 40 | //! * there may not be any two identical `.bpf.c` file names in any two projects in a 41 | //! cargo workspace 42 | //! 43 | //! ## gen 44 | //! 45 | //! `cargo libbpf gen` generates a skeleton module for each BPF object file in the project. Each 46 | //! `.bpf.o` object file will have its own module. One `mod.rs` file is also generated. All 47 | //! output files are placed into `package.metadata.libbpf.prog_dir`. 48 | //! 49 | //! Be careful to run cargo-libbpf-build before running cargo-libbpf-gen. cargo-libbpf-gen reads 50 | //! object files from `package.metadata.libbpf.target_dir`. 51 | //! 52 | //! ## make 53 | //! 54 | //! `cargo libbpf make` sequentially runs cargo-libbpf-build, cargo-libbpf-gen, and `cargo 55 | //! build`. This is a convenience command so you don't forget any steps. Alternatively, you could 56 | //! write a Makefile for your project. 57 | 58 | use std::path::{Path, PathBuf}; 59 | use std::result; 60 | 61 | use tempfile::tempdir; 62 | use thiserror::Error; 63 | 64 | // libbpf-cargo binary is the primary consumer of the following modules. As such, 65 | // we do not use all the symbols. Silence any unused code warnings. 66 | #[allow(dead_code)] 67 | mod btf; 68 | #[allow(dead_code)] 69 | mod build; 70 | #[allow(dead_code)] 71 | mod gen; 72 | #[allow(dead_code)] 73 | mod make; 74 | #[allow(dead_code)] 75 | mod metadata; 76 | 77 | #[cfg(test)] 78 | mod test; 79 | 80 | /// Canonical error type for this crate. 81 | #[derive(Error, Debug)] 82 | pub enum Error { 83 | #[error("Error building BPF object file: {0}")] 84 | Build(String), 85 | #[error("Error generating skeleton: {0}")] 86 | Generate(String), 87 | } 88 | 89 | pub type Result = result::Result; 90 | 91 | /// `SkeletonBuilder` builds and generates a single skeleton. 92 | /// 93 | /// This interface is meant to be used in build scripts. 94 | /// 95 | /// # Examples 96 | /// 97 | /// ```no_run 98 | /// use libbpf_cargo::SkeletonBuilder; 99 | /// 100 | /// SkeletonBuilder::new("myobject.bpf.c") 101 | /// .debug(true) 102 | /// .clang("/opt/clang/clang") 103 | /// .generate("/output/path") 104 | /// .unwrap(); 105 | /// ``` 106 | pub struct SkeletonBuilder { 107 | debug: bool, 108 | source: PathBuf, 109 | clang: Option, 110 | clang_args: String, 111 | skip_clang_version_check: bool, 112 | rustfmt: PathBuf, 113 | } 114 | 115 | impl SkeletonBuilder { 116 | /// Create a new builder instance, where `source` is the path to the BPF object source 117 | /// (typically suffixed by `.bpf.c`) 118 | pub fn new>(source: P) -> Self { 119 | SkeletonBuilder { 120 | debug: false, 121 | source: source.as_ref().to_path_buf(), 122 | clang: None, 123 | clang_args: String::new(), 124 | skip_clang_version_check: false, 125 | rustfmt: "rustfmt".into(), 126 | } 127 | } 128 | 129 | /// Turn debug output on or off 130 | /// 131 | /// Default is off 132 | pub fn debug(&mut self, debug: bool) -> &mut SkeletonBuilder { 133 | self.debug = debug; 134 | self 135 | } 136 | 137 | /// Specify which `clang` binary to use 138 | /// 139 | /// Default searchs `$PATH` for `clang` 140 | pub fn clang>(&mut self, clang: P) -> &mut SkeletonBuilder { 141 | self.clang = Some(clang.as_ref().to_path_buf()); 142 | self 143 | } 144 | 145 | /// Pass additional arguments to `clang` when buildling BPF object file 146 | /// 147 | /// # Examples 148 | /// 149 | /// ```no_run 150 | /// use libbpf_cargo::SkeletonBuilder; 151 | /// 152 | /// SkeletonBuilder::new("myobject.bpf.c") 153 | /// .clang_args("-DMACRO=value -I/some/include/dir") 154 | /// .generate("/output/path") 155 | /// .unwrap(); 156 | /// ``` 157 | pub fn clang_args>(&mut self, opts: S) -> &mut SkeletonBuilder { 158 | self.clang_args = opts.as_ref().to_string(); 159 | self 160 | } 161 | 162 | /// Specify whether or not to skip clang version check 163 | /// 164 | /// Default is `false` 165 | pub fn skip_clang_version_check(&mut self, skip: bool) -> &mut SkeletonBuilder { 166 | self.skip_clang_version_check = skip; 167 | self 168 | } 169 | 170 | /// Specify which `rustfmt` binary to use 171 | /// 172 | /// Default searches `$PATH` for `rustfmt` 173 | pub fn rustfmt>(&mut self, rustfmt: P) -> &mut SkeletonBuilder { 174 | self.rustfmt = rustfmt.as_ref().to_path_buf(); 175 | self 176 | } 177 | 178 | /// Generate the skeleton at path `output` 179 | pub fn generate>(&self, output: P) -> Result<()> { 180 | let filename = self 181 | .source 182 | .file_name() 183 | .ok_or_else(|| Error::Build("Missing file name".into()))? 184 | .to_str() 185 | .ok_or_else(|| Error::Build("Invalid unicode in file name".into()))?; 186 | if !filename.ends_with(".bpf.c") { 187 | return Err(Error::Build(format!( 188 | "Source file={} does not have .bpf.c suffix", 189 | self.source.display() 190 | ))); 191 | } 192 | 193 | // Safe to unwrap b/c we already checked suffix 194 | let name = filename.split('.').next().unwrap(); 195 | let dir = tempdir().map_err(|e| Error::Build(e.to_string()))?; 196 | let objfile = dir.path().join(format!("{}.o", name)); 197 | 198 | build::build_single( 199 | self.debug, 200 | &self.source, 201 | &objfile, 202 | self.clang.as_ref(), 203 | self.skip_clang_version_check, 204 | &self.clang_args, 205 | ) 206 | .map_err(|e| Error::Build(e.to_string()))?; 207 | 208 | gen::gen_single( 209 | self.debug, 210 | &objfile, 211 | gen::OutputDest::File(output.as_ref()), 212 | Some(&self.rustfmt), 213 | ) 214 | .map_err(|e| Error::Generate(e.to_string()))?; 215 | 216 | Ok(()) 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /libbpf-cargo/src/build.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::fs; 3 | use std::path::{Path, PathBuf}; 4 | use std::process::Command; 5 | 6 | use anyhow::{anyhow, bail, Context, Result}; 7 | use regex::Regex; 8 | use semver::Version; 9 | use tempfile::TempDir; 10 | 11 | use crate::metadata; 12 | use crate::metadata::UnprocessedObj; 13 | 14 | fn check_progs(objs: &[UnprocessedObj]) -> Result<()> { 15 | let mut set = HashSet::with_capacity(objs.len()); 16 | for obj in objs { 17 | // OK to unwrap() file_name() b/c we already checked earlier that this is a valid file 18 | let dest = obj 19 | .out 20 | .as_path() 21 | .join(obj.path.as_path().file_name().unwrap()); 22 | if !set.insert(dest) { 23 | bail!( 24 | "Duplicate obj={} detected", 25 | obj.path.as_path().file_name().unwrap().to_string_lossy() 26 | ); 27 | } 28 | } 29 | 30 | Ok(()) 31 | } 32 | 33 | fn extract_version(output: &str) -> Result<&str> { 34 | let re = Regex::new(r"clang\s+version\s+(?P\d+\.\d+\.\d+)")?; 35 | let captures = re 36 | .captures(output) 37 | .ok_or_else(|| anyhow!("Failed to run regex on version string"))?; 38 | 39 | captures.name("version_str").map_or_else( 40 | || Err(anyhow!("Failed to find version capture group")), 41 | |v| Ok(v.as_str()), 42 | ) 43 | } 44 | 45 | /// Extract vendored libbpf header files to a temporary directory. 46 | /// 47 | /// Directory and enclosed contents will be removed when return object is dropped. 48 | #[cfg(not(feature = "novendor"))] 49 | fn extract_libbpf_headers_to_disk() -> Result> { 50 | use std::fs::OpenOptions; 51 | use std::io::Write; 52 | 53 | let tempdir = TempDir::new()?; 54 | let dir = tempdir.path().join("bpf"); 55 | fs::create_dir_all(&dir)?; 56 | for (filename, contents) in libbpf_sys::API_HEADERS.iter() { 57 | let path = dir.as_path().join(filename); 58 | let mut file = OpenOptions::new().write(true).create(true).open(path)?; 59 | file.write_all(contents.as_bytes())?; 60 | } 61 | 62 | Ok(Some(tempdir)) 63 | } 64 | 65 | #[cfg(feature = "novendor")] 66 | fn extract_libbpf_headers_to_disk() -> Result> { 67 | return Ok(None); 68 | } 69 | 70 | fn check_clang(debug: bool, clang: &Path, skip_version_checks: bool) -> Result<()> { 71 | let output = Command::new(clang.as_os_str()) 72 | .arg("--version") 73 | .output() 74 | .context("Failed to execute clang")?; 75 | 76 | if !output.status.success() { 77 | bail!("Failed to execute clang binary"); 78 | } 79 | 80 | if skip_version_checks { 81 | return Ok(()); 82 | } 83 | 84 | // Example output: 85 | // 86 | // clang version 10.0.0 87 | // Target: x86_64-pc-linux-gnu 88 | // Thread model: posix 89 | // InstalledDir: /bin 90 | // 91 | let output = String::from_utf8_lossy(&output.stdout); 92 | let version_str = extract_version(&output)?; 93 | let version = Version::parse(version_str)?; 94 | if debug { 95 | println!("{} is version {}", clang.display(), version); 96 | } 97 | 98 | if version < Version::parse("10.0.0").unwrap() { 99 | bail!( 100 | "version {} is too old. Use --skip-clang-version-checks to skip version check", 101 | version 102 | ); 103 | } 104 | 105 | Ok(()) 106 | } 107 | 108 | /// We're essentially going to run: 109 | /// 110 | /// clang -g -O2 -target bpf -c -D__TARGET_ARCH_$(ARCH) runqslower.bpf.c -o runqslower.bpf.o 111 | /// 112 | /// for each prog. 113 | fn compile_one(debug: bool, source: &Path, out: &Path, clang: &Path, options: &str) -> Result<()> { 114 | let arch = match std::env::consts::ARCH { 115 | "x86_64" => "x86", 116 | "aarch64" => "arm64", 117 | _ => std::env::consts::ARCH, 118 | }; 119 | 120 | if debug { 121 | println!("Building {}", source.display()); 122 | } 123 | 124 | let mut cmd = Command::new(clang.as_os_str()); 125 | if !options.is_empty() { 126 | cmd.args(options.split_whitespace()); 127 | } 128 | cmd.arg("-g") 129 | .arg("-O2") 130 | .arg("-target") 131 | .arg("bpf") 132 | .arg("-c") 133 | .arg(format!("-D__TARGET_ARCH_{}", arch)) 134 | .arg(source.as_os_str()) 135 | .arg("-o") 136 | .arg(out); 137 | 138 | let output = cmd.output().context("Failed to execute clang")?; 139 | if !output.status.success() { 140 | bail!( 141 | "Failed to compile obj={} with status={}\n \ 142 | stdout=\n \ 143 | {}\n \ 144 | stderr=\n \ 145 | {}\n", 146 | out.to_string_lossy(), 147 | output.status, 148 | String::from_utf8(output.stdout).unwrap(), 149 | String::from_utf8(output.stderr).unwrap() 150 | ); 151 | } 152 | 153 | Ok(()) 154 | } 155 | 156 | fn compile(debug: bool, objs: &[UnprocessedObj], clang: &Path) -> Result<()> { 157 | let header_dir = extract_libbpf_headers_to_disk()?; 158 | let compiler_options = if let Some(dir) = &header_dir { 159 | format!("-I{}", dir.path().to_str().unwrap()) 160 | } else { 161 | "".to_string() 162 | }; 163 | 164 | for obj in objs { 165 | let dest_name = if let Some(f) = obj.path.file_stem() { 166 | let mut stem = f.to_os_string(); 167 | stem.push(".o"); 168 | stem 169 | } else { 170 | bail!( 171 | "Could not calculate destination name for obj={}", 172 | obj.path.display() 173 | ); 174 | }; 175 | let mut dest_path = obj.out.to_path_buf(); 176 | dest_path.push(&dest_name); 177 | fs::create_dir_all(&obj.out)?; 178 | compile_one(debug, &obj.path, &dest_path, clang, &compiler_options)?; 179 | } 180 | 181 | Ok(()) 182 | } 183 | 184 | fn extract_clang_or_default(clang: Option<&PathBuf>) -> PathBuf { 185 | match clang { 186 | Some(c) => c.into(), 187 | // Searches $PATH 188 | None => "clang".into(), 189 | } 190 | } 191 | 192 | pub fn build( 193 | debug: bool, 194 | manifest_path: Option<&PathBuf>, 195 | clang: Option<&PathBuf>, 196 | skip_clang_version_checks: bool, 197 | ) -> Result<()> { 198 | let to_compile = metadata::get(debug, manifest_path)?; 199 | 200 | if debug && !to_compile.is_empty() { 201 | println!("Found bpf progs to compile:"); 202 | for obj in &to_compile { 203 | println!("\t{:?}", obj); 204 | } 205 | } else if to_compile.is_empty() { 206 | bail!("Did not find any bpf progs to compile"); 207 | } 208 | 209 | check_progs(&to_compile)?; 210 | 211 | let clang = extract_clang_or_default(clang); 212 | if let Err(e) = check_clang(debug, &clang, skip_clang_version_checks) { 213 | bail!("{} is invalid: {}", clang.display(), e); 214 | } 215 | 216 | if let Err(e) = compile(debug, &to_compile, &clang) { 217 | bail!("Failed to compile progs: {}", e); 218 | } 219 | 220 | Ok(()) 221 | } 222 | 223 | // Only used in libbpf-cargo library 224 | #[allow(dead_code)] 225 | pub fn build_single( 226 | debug: bool, 227 | source: &Path, 228 | out: &Path, 229 | clang: Option<&PathBuf>, 230 | skip_clang_version_checks: bool, 231 | options: &str, 232 | ) -> Result<()> { 233 | let clang = extract_clang_or_default(clang); 234 | check_clang(debug, &clang, skip_clang_version_checks)?; 235 | let header_dir = extract_libbpf_headers_to_disk()?; 236 | let compiler_options = if let Some(dir) = &header_dir { 237 | format!("{} -I{}", options, dir.path().to_str().unwrap()) 238 | } else { 239 | options.to_string() 240 | }; 241 | compile_one(debug, source, out, &clang, &compiler_options)?; 242 | 243 | Ok(()) 244 | } 245 | 246 | #[test] 247 | fn test_extract_version() { 248 | let upstream_format = r"clang version 10.0.0 249 | Target: x86_64-pc-linux-gnu 250 | Thread model: posix 251 | InstalledDir: /bin 252 | "; 253 | assert_eq!(extract_version(upstream_format).unwrap(), "10.0.0"); 254 | 255 | let ubuntu_format = r"Ubuntu clang version 11.0.1-++20201121072624+973b95e0a84-1~exp1~20201121063303.19 256 | Target: x86_64-pc-linux-gnu 257 | Thread model: posix 258 | InstalledDir: /bin 259 | "; 260 | assert_eq!(extract_version(ubuntu_format).unwrap(), "11.0.1"); 261 | 262 | assert!(extract_version("askldfjwe").is_err()); 263 | assert!(extract_version("my clang version 1.5").is_err()); 264 | } 265 | -------------------------------------------------------------------------------- /libbpf-rs/src/skeleton.rs: -------------------------------------------------------------------------------- 1 | use core::ffi::c_void; 2 | use std::alloc::{alloc_zeroed, dealloc, Layout}; 3 | use std::boxed::Box; 4 | use std::ffi::CString; 5 | use std::mem::size_of; 6 | use std::os::raw::{c_char, c_ulong}; 7 | use std::ptr; 8 | 9 | use libbpf_sys::{ 10 | bpf_link, bpf_map, bpf_map_skeleton, bpf_object, bpf_object_skeleton, bpf_prog_skeleton, 11 | bpf_program, 12 | }; 13 | 14 | use crate::util; 15 | use crate::*; 16 | 17 | struct MapSkelConfig { 18 | name: String, 19 | p: Box<*mut bpf_map>, 20 | mmaped: Option>, 21 | } 22 | 23 | struct ProgSkelConfig { 24 | name: String, 25 | p: Box<*mut bpf_program>, 26 | link: Box<*mut bpf_link>, 27 | } 28 | 29 | pub struct ObjectSkeletonConfigBuilder<'a> { 30 | data: &'a [u8], 31 | p: Box<*mut bpf_object>, 32 | name: Option, 33 | maps: Vec, 34 | progs: Vec, 35 | } 36 | 37 | fn str_to_cstring_and_pool(s: &str, pool: &mut Vec) -> Result<*const c_char> { 38 | let cname = util::str_to_cstring(s)?; 39 | let p = cname.as_ptr(); 40 | pool.push(cname); 41 | 42 | Ok(p) 43 | } 44 | 45 | impl<'a> ObjectSkeletonConfigBuilder<'a> { 46 | /// Construct a new instance 47 | /// 48 | /// `object_data` is the contents of the `.o` from clang 49 | /// 50 | /// `p` is a reference to the pointer where `libbpf_sys::bpf_object` should be 51 | /// stored/retrieved 52 | pub fn new(object_data: &'a [u8]) -> Self { 53 | Self { 54 | data: object_data, 55 | p: Box::new(ptr::null_mut()), 56 | name: None, 57 | maps: Vec::new(), 58 | progs: Vec::new(), 59 | } 60 | } 61 | 62 | pub fn name>(&mut self, name: T) -> &mut Self { 63 | self.name = Some(name.as_ref().to_string()); 64 | self 65 | } 66 | 67 | /// Adds a map to the config 68 | /// 69 | /// Set `mmaped` to `true` if the map is mmap'able to userspace 70 | pub fn map>(&mut self, name: T, mmaped: bool) -> &mut Self { 71 | let m = if mmaped { 72 | Some(Box::new(ptr::null_mut())) 73 | } else { 74 | None 75 | }; 76 | 77 | self.maps.push(MapSkelConfig { 78 | name: name.as_ref().to_string(), 79 | p: Box::new(ptr::null_mut()), 80 | mmaped: m, 81 | }); 82 | 83 | self 84 | } 85 | 86 | /// Adds a prog to the config 87 | pub fn prog>(&mut self, name: T) -> &mut Self { 88 | self.progs.push(ProgSkelConfig { 89 | name: name.as_ref().to_string(), 90 | p: Box::new(ptr::null_mut()), 91 | link: Box::new(ptr::null_mut()), 92 | }); 93 | 94 | self 95 | } 96 | 97 | fn build_maps( 98 | &mut self, 99 | s: &mut bpf_object_skeleton, 100 | string_pool: &mut Vec, 101 | ) -> Option { 102 | if self.maps.is_empty() { 103 | return None; 104 | } 105 | 106 | s.map_cnt = self.maps.len() as i32; 107 | s.map_skel_sz = size_of::() as i32; 108 | 109 | let layout = Layout::array::(self.maps.len()) 110 | .expect("Failed to allocate memory for maps skeleton"); 111 | 112 | unsafe { 113 | s.maps = alloc_zeroed(layout) as *mut bpf_map_skeleton; 114 | for (i, map) in self.maps.iter_mut().enumerate() { 115 | let current_map = s.maps.add(i); 116 | 117 | // Opt to panic on error here. We've already allocated memory and we'd rather not 118 | // leak. Extremely unlikely to have invalid unicode anyways. 119 | (*current_map).name = str_to_cstring_and_pool(&map.name, string_pool) 120 | .expect("Invalid unicode in map name"); 121 | (*current_map).map = &mut *map.p; 122 | (*current_map).mmaped = if let Some(ref mut mmaped) = map.mmaped { 123 | &mut **mmaped 124 | } else { 125 | ptr::null_mut() 126 | }; 127 | } 128 | } 129 | 130 | Some(layout) 131 | } 132 | 133 | fn build_progs( 134 | &mut self, 135 | s: &mut bpf_object_skeleton, 136 | string_pool: &mut Vec, 137 | ) -> Option { 138 | if self.progs.is_empty() { 139 | return None; 140 | } 141 | 142 | s.prog_cnt = self.progs.len() as i32; 143 | s.prog_skel_sz = size_of::() as i32; 144 | 145 | let layout = Layout::array::(self.progs.len()) 146 | .expect("Failed to allocate memory for progs skeleton"); 147 | 148 | unsafe { 149 | s.progs = alloc_zeroed(layout) as *mut bpf_prog_skeleton; 150 | for (i, prog) in self.progs.iter_mut().enumerate() { 151 | let current_prog = s.progs.add(i); 152 | 153 | // See above for `expect()` rationale 154 | (*current_prog).name = str_to_cstring_and_pool(&prog.name, string_pool) 155 | .expect("Invalid unicode in prog name"); 156 | (*current_prog).prog = &mut *prog.p; 157 | (*current_prog).link = &mut *prog.link; 158 | } 159 | } 160 | 161 | Some(layout) 162 | } 163 | 164 | pub fn build(mut self) -> Result> { 165 | // Holds `CString`s alive so pointers to them stay valid 166 | let mut string_pool = Vec::new(); 167 | 168 | let mut s = libbpf_sys::bpf_object_skeleton { 169 | sz: size_of::() as c_ulong, 170 | ..Default::default() 171 | }; 172 | 173 | if let Some(ref n) = self.name { 174 | s.name = str_to_cstring_and_pool(&n, &mut string_pool)?; 175 | } 176 | 177 | // libbpf_sys will use it as const despite the signature 178 | s.data = self.data.as_ptr() as *mut c_void; 179 | s.data_sz = self.data.len() as c_ulong; 180 | 181 | s.obj = &mut *self.p; 182 | 183 | let maps_layout = self.build_maps(&mut s, &mut string_pool); 184 | let progs_layout = self.build_progs(&mut s, &mut string_pool); 185 | 186 | Ok(ObjectSkeletonConfig { 187 | inner: s, 188 | obj: self.p, 189 | maps: self.maps, 190 | progs: self.progs, 191 | maps_layout, 192 | progs_layout, 193 | _data: self.data, 194 | _string_pool: string_pool, 195 | }) 196 | } 197 | } 198 | 199 | /// Helper struct that wraps a `libbpf_sys::bpf_object_skeleton`. 200 | /// 201 | /// This struct will: 202 | /// * ensure lifetimes are valid for dependencies (pointers, data buffer) 203 | /// * free any allocated memory on drop 204 | /// 205 | /// This struct can be moved around at will. Upon drop, all allocated resources will be freed 206 | pub struct ObjectSkeletonConfig<'a> { 207 | inner: bpf_object_skeleton, 208 | obj: Box<*mut bpf_object>, 209 | maps: Vec, 210 | progs: Vec, 211 | /// Layout necessary to `dealloc` memory 212 | maps_layout: Option, 213 | /// Same as above 214 | progs_layout: Option, 215 | /// Hold this reference so that compiler guarantees buffer lives as long as us 216 | _data: &'a [u8], 217 | /// Hold strings alive so pointers to them stay valid 218 | _string_pool: Vec, 219 | } 220 | 221 | impl<'a> ObjectSkeletonConfig<'a> { 222 | pub fn get(&mut self) -> &mut bpf_object_skeleton { 223 | &mut self.inner 224 | } 225 | 226 | /// Warning: the returned pointer is only valid while the `ObjectSkeletonConfig` is alive. 227 | pub fn object_ptr(&mut self) -> *mut bpf_object { 228 | *self.obj 229 | } 230 | 231 | /// Returns the `mmaped` pointer for a map at the specified `index`. 232 | /// 233 | /// The index is determined by the order in which the map was passed to 234 | /// `ObjectSkeletonConfigBuilder::map`. Index starts at 0. 235 | /// 236 | /// Warning: the returned pointer is only valid while the `ObjectSkeletonConfig` is alive. 237 | pub fn map_mmap_ptr(&mut self, index: usize) -> Result<*mut c_void> { 238 | if index >= self.maps.len() { 239 | return Err(Error::Internal(format!("Invalid map index: {}", index))); 240 | } 241 | 242 | self.maps[index].mmaped.as_ref().map_or_else( 243 | || Err(Error::Internal("Map does not have mmaped ptr".to_string())), 244 | |p| Ok(**p), 245 | ) 246 | } 247 | 248 | /// Returns the link pointer for a prog at the specified `index`. 249 | /// 250 | /// The index is determined by the order in which the prog was passed to 251 | /// `ObjectSkeletonConfigBuilder::prog`. Index starts at 0. 252 | /// 253 | /// Warning: the returned pointer is only valid while the `ObjectSkeletonConfig` is alive. 254 | pub fn prog_link_ptr(&mut self, index: usize) -> Result<*mut bpf_link> { 255 | if index >= self.progs.len() { 256 | return Err(Error::Internal(format!("Invalid prog index: {}", index))); 257 | } 258 | 259 | Ok(*self.progs[index].link) 260 | } 261 | } 262 | 263 | impl<'a> Drop for ObjectSkeletonConfig<'a> { 264 | // Note we do *not* run `libbpf_sys::bpf_object__destroy_skeleton` here. 265 | // 266 | // Couple reasons: 267 | // 268 | // 1) We did not allocate `libbpf_sys::bpf_object_skeleton` on the heap and 269 | // `libbpf_sys::bpf_object__destroy_skeleton` will try to free from heap 270 | // 271 | // 2) `libbpf_object_skeleton` assumes it "owns" the object and everything inside it. 272 | // libbpf-cargo's generated skeleton instead gives ownership of the object to 273 | // libbpf-rs::*Object. The destructors in libbpf-rs::*Object will know when and how to do 274 | // cleanup. 275 | fn drop(&mut self) { 276 | assert_eq!(self.maps_layout.is_none(), self.inner.maps.is_null()); 277 | assert_eq!(self.progs_layout.is_none(), self.inner.progs.is_null()); 278 | 279 | if let Some(layout) = self.maps_layout { 280 | unsafe { 281 | dealloc(self.inner.maps as _, layout); 282 | } 283 | } 284 | 285 | if let Some(layout) = self.progs_layout { 286 | unsafe { 287 | dealloc(self.inner.progs as _, layout); 288 | } 289 | } 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /libbpf-rs/src/query.rs: -------------------------------------------------------------------------------- 1 | //! Query the host about BPF 2 | //! 3 | //! For example, to list the name of every bpf program running on the system: 4 | //! ``` 5 | //! use libbpf_rs::query::ProgInfoIter; 6 | //! 7 | //! let mut iter = ProgInfoIter::default(); 8 | //! for prog in iter { 9 | //! println!("{}", prog.name); 10 | //! } 11 | //! ``` 12 | 13 | use core::ffi::c_void; 14 | use std::convert::TryFrom; 15 | use std::mem::size_of; 16 | use std::os::raw::c_char; 17 | use std::string::String; 18 | use std::time::Duration; 19 | 20 | use nix::{errno, unistd::close}; 21 | 22 | use crate::*; 23 | 24 | macro_rules! gen_info_impl { 25 | // This magic here allows us to embed doc comments into macro expansions 26 | ($(#[$attr:meta])* 27 | $name:ident, $info_ty:ty, $uapi_info_ty:ty, $next_id:expr, $fd_by_id:expr) => { 28 | $(#[$attr])* 29 | #[derive(Default)] 30 | pub struct $name { 31 | cur_id: u32, 32 | } 33 | 34 | impl $name { 35 | // Returns Some(next_valid_fd), None on none left 36 | fn get_next_valid_fd(&mut self) -> Option { 37 | loop { 38 | if unsafe { $next_id(self.cur_id, &mut self.cur_id) } != 0 { 39 | return None; 40 | } 41 | 42 | let fd = unsafe { $fd_by_id(self.cur_id) }; 43 | if fd < 0 { 44 | if errno::errno() == errno::Errno::ENOENT as i32 { 45 | continue; 46 | } 47 | 48 | return None; 49 | } 50 | 51 | return Some(fd); 52 | } 53 | } 54 | } 55 | 56 | impl Iterator for $name { 57 | type Item = $info_ty; 58 | 59 | fn next(&mut self) -> Option { 60 | let fd = match self.get_next_valid_fd() { 61 | Some(fd) => fd, 62 | None => return None, 63 | }; 64 | 65 | // We need to use std::mem::zeroed() instead of just using 66 | // ::default() because padding bytes need to be zero as well. 67 | // Old kernels which know about fewer fields than we do will 68 | // check to make sure every byte past what they know is zero 69 | // and will return E2BIG otherwise. 70 | let mut item: $uapi_info_ty = unsafe { std::mem::zeroed() }; 71 | let item_ptr: *mut $uapi_info_ty = &mut item; 72 | let mut len = size_of::<$uapi_info_ty>() as u32; 73 | 74 | let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd(fd, item_ptr as *mut c_void, &mut len) }; 75 | let parsed_uapi = if ret != 0 { 76 | None 77 | } else { 78 | <$info_ty>::from_uapi(fd, item) 79 | }; 80 | 81 | let _ = close(fd); 82 | parsed_uapi 83 | } 84 | } 85 | }; 86 | } 87 | 88 | fn name_arr_to_string(a: &[c_char], default: &str) -> String { 89 | let converted_arr: Vec = a 90 | .iter() 91 | .take_while(|x| **x != 0) 92 | .map(|x| *x as u8) 93 | .collect(); 94 | if !converted_arr.is_empty() { 95 | String::from_utf8(converted_arr).unwrap_or_else(|_| default.to_string()) 96 | } else { 97 | default.to_string() 98 | } 99 | } 100 | 101 | /// Information about a BPF program 102 | pub struct ProgramInfo { 103 | pub name: String, 104 | pub ty: ProgramType, 105 | pub tag: [u8; 8], 106 | pub id: u32, 107 | pub jited_prog_len: u32, 108 | pub xlated_prog_len: u32, 109 | pub jited_prog_insns: u64, 110 | pub xlated_prog_insns: u64, 111 | /// Duration since system boot 112 | pub load_time: Duration, 113 | pub created_by_uid: u32, 114 | pub nr_map_ids: u32, 115 | pub map_ids: u64, 116 | pub ifindex: u32, 117 | pub gpl_compatible: bool, 118 | pub netns_dev: u64, 119 | pub netns_ino: u64, 120 | pub nr_jited_ksyms: u32, 121 | pub nr_jited_func_lens: u32, 122 | pub jited_ksyms: u64, 123 | pub jited_func_lens: u64, 124 | pub btf_id: u32, 125 | pub func_info_rec_size: u32, 126 | pub func_info: u64, 127 | pub nr_func_info: u32, 128 | pub nr_line_info: u32, 129 | pub line_info: u64, 130 | pub jited_line_info: u64, 131 | pub nr_jited_line_info: u32, 132 | pub line_info_rec_size: u32, 133 | pub jited_line_info_rec_size: u32, 134 | pub nr_prog_tags: u32, 135 | pub prog_tags: u64, 136 | pub run_time_ns: u64, 137 | pub run_cnt: u64, 138 | } 139 | 140 | impl ProgramInfo { 141 | fn from_uapi(_fd: i32, s: libbpf_sys::bpf_prog_info) -> Option { 142 | let name = name_arr_to_string(&s.name, "(?)"); 143 | let ty = match ProgramType::try_from(s.type_) { 144 | Ok(ty) => ty, 145 | Err(_) => ProgramType::Unknown, 146 | }; 147 | 148 | Some(ProgramInfo { 149 | name, 150 | ty, 151 | tag: s.tag, 152 | id: s.id, 153 | jited_prog_len: s.jited_prog_len, 154 | xlated_prog_len: s.xlated_prog_len, 155 | jited_prog_insns: s.jited_prog_insns, 156 | xlated_prog_insns: s.xlated_prog_insns, 157 | load_time: Duration::from_nanos(s.load_time), 158 | created_by_uid: s.created_by_uid, 159 | nr_map_ids: s.nr_map_ids, 160 | map_ids: s.map_ids, 161 | ifindex: s.ifindex, 162 | gpl_compatible: s._bitfield_1.get_bit(0), 163 | netns_dev: s.netns_dev, 164 | netns_ino: s.netns_ino, 165 | nr_jited_ksyms: s.nr_jited_ksyms, 166 | nr_jited_func_lens: s.nr_jited_func_lens, 167 | jited_ksyms: s.jited_ksyms, 168 | jited_func_lens: s.jited_func_lens, 169 | btf_id: s.btf_id, 170 | func_info_rec_size: s.func_info_rec_size, 171 | func_info: s.func_info, 172 | nr_func_info: s.nr_func_info, 173 | nr_line_info: s.nr_line_info, 174 | line_info: s.line_info, 175 | jited_line_info: s.jited_line_info, 176 | nr_jited_line_info: s.nr_jited_line_info, 177 | line_info_rec_size: s.line_info_rec_size, 178 | jited_line_info_rec_size: s.jited_line_info_rec_size, 179 | nr_prog_tags: s.nr_prog_tags, 180 | prog_tags: s.prog_tags, 181 | run_time_ns: s.run_time_ns, 182 | run_cnt: s.run_cnt, 183 | }) 184 | } 185 | } 186 | 187 | gen_info_impl!( 188 | /// Iterator that returns [`ProgramInfo`]s. 189 | ProgInfoIter, 190 | ProgramInfo, 191 | libbpf_sys::bpf_prog_info, 192 | libbpf_sys::bpf_prog_get_next_id, 193 | libbpf_sys::bpf_prog_get_fd_by_id 194 | ); 195 | 196 | /// Information about a BPF map 197 | pub struct MapInfo { 198 | pub name: String, 199 | pub ty: MapType, 200 | pub id: u32, 201 | pub key_size: u32, 202 | pub value_size: u32, 203 | pub max_entries: u32, 204 | pub map_flags: u32, 205 | pub ifindex: u32, 206 | pub btf_vmlinux_value_type_id: u32, 207 | pub netns_dev: u64, 208 | pub netns_ino: u64, 209 | pub btf_id: u32, 210 | pub btf_key_type_id: u32, 211 | pub btf_value_type_id: u32, 212 | } 213 | 214 | impl MapInfo { 215 | fn from_uapi(_fd: i32, s: libbpf_sys::bpf_map_info) -> Option { 216 | let name = name_arr_to_string(&s.name, "(?)"); 217 | let ty = match MapType::try_from(s.type_) { 218 | Ok(ty) => ty, 219 | Err(_) => MapType::Unknown, 220 | }; 221 | 222 | Some(Self { 223 | name, 224 | ty, 225 | id: s.id, 226 | key_size: s.key_size, 227 | value_size: s.value_size, 228 | max_entries: s.max_entries, 229 | map_flags: s.map_flags, 230 | ifindex: s.ifindex, 231 | btf_vmlinux_value_type_id: s.btf_vmlinux_value_type_id, 232 | netns_dev: s.netns_dev, 233 | netns_ino: s.netns_ino, 234 | btf_id: s.btf_id, 235 | btf_key_type_id: s.btf_key_type_id, 236 | btf_value_type_id: s.btf_value_type_id, 237 | }) 238 | } 239 | } 240 | 241 | gen_info_impl!( 242 | /// Iterator that returns [`MapInfo`]s. 243 | MapInfoIter, 244 | MapInfo, 245 | libbpf_sys::bpf_map_info, 246 | libbpf_sys::bpf_map_get_next_id, 247 | libbpf_sys::bpf_map_get_fd_by_id 248 | ); 249 | 250 | /// Information about BPF type format 251 | pub struct BtfInfo { 252 | pub btf: u64, 253 | pub btf_size: u32, 254 | pub id: u32, 255 | } 256 | 257 | impl BtfInfo { 258 | fn from_uapi(_fd: i32, s: libbpf_sys::bpf_btf_info) -> Option { 259 | Some(Self { 260 | btf: s.btf, 261 | btf_size: s.btf_size, 262 | id: s.id, 263 | }) 264 | } 265 | } 266 | 267 | gen_info_impl!( 268 | /// Iterator that returns [`BtfInfo`]s. 269 | BtfInfoIter, 270 | BtfInfo, 271 | libbpf_sys::bpf_btf_info, 272 | libbpf_sys::bpf_btf_get_next_id, 273 | libbpf_sys::bpf_btf_get_fd_by_id 274 | ); 275 | 276 | pub struct RawTracepointLinkInfo { 277 | pub name: String, 278 | } 279 | 280 | pub struct TracingLinkInfo { 281 | pub attach_type: ProgramAttachType, 282 | } 283 | 284 | pub struct CgroupLinkInfo { 285 | pub cgroup_id: u64, 286 | pub attach_type: ProgramAttachType, 287 | } 288 | 289 | pub struct NetNsLinkInfo { 290 | pub ino: u32, 291 | pub attach_type: ProgramAttachType, 292 | } 293 | 294 | pub enum LinkTypeInfo { 295 | RawTracepoint(RawTracepointLinkInfo), 296 | Tracing(TracingLinkInfo), 297 | Cgroup(CgroupLinkInfo), 298 | Iter, 299 | NetNs(NetNsLinkInfo), 300 | Unknown, 301 | } 302 | 303 | /// Information about a BPF link 304 | pub struct LinkInfo { 305 | pub info: LinkTypeInfo, 306 | pub id: u32, 307 | pub prog_id: u32, 308 | } 309 | 310 | impl LinkInfo { 311 | fn from_uapi(fd: i32, mut s: libbpf_sys::bpf_link_info) -> Option { 312 | let type_info = match s.type_ { 313 | libbpf_sys::BPF_LINK_TYPE_RAW_TRACEPOINT => { 314 | let mut buf = [0; 256]; 315 | s.__bindgen_anon_1.raw_tracepoint.tp_name = buf.as_mut_ptr() as u64; 316 | s.__bindgen_anon_1.raw_tracepoint.tp_name_len = buf.len() as u32; 317 | let item_ptr: *mut libbpf_sys::bpf_link_info = &mut s; 318 | let mut len = size_of::() as u32; 319 | 320 | let ret = unsafe { 321 | libbpf_sys::bpf_obj_get_info_by_fd(fd, item_ptr as *mut c_void, &mut len) 322 | }; 323 | if ret != 0 { 324 | return None; 325 | } 326 | 327 | LinkTypeInfo::RawTracepoint(RawTracepointLinkInfo { 328 | name: util::c_ptr_to_string( 329 | unsafe { s.__bindgen_anon_1.raw_tracepoint.tp_name } as *const c_char, 330 | ) 331 | .unwrap_or_else(|_| "?".to_string()), 332 | }) 333 | } 334 | libbpf_sys::BPF_LINK_TYPE_TRACING => LinkTypeInfo::Tracing(TracingLinkInfo { 335 | attach_type: ProgramAttachType::try_from(unsafe { 336 | s.__bindgen_anon_1.tracing.attach_type 337 | }) 338 | .unwrap_or(ProgramAttachType::Unknown), 339 | }), 340 | libbpf_sys::BPF_LINK_TYPE_CGROUP => LinkTypeInfo::Cgroup(CgroupLinkInfo { 341 | cgroup_id: unsafe { s.__bindgen_anon_1.cgroup.cgroup_id }, 342 | attach_type: ProgramAttachType::try_from(unsafe { 343 | s.__bindgen_anon_1.cgroup.attach_type 344 | }) 345 | .unwrap_or(ProgramAttachType::Unknown), 346 | }), 347 | libbpf_sys::BPF_LINK_TYPE_ITER => LinkTypeInfo::Iter, 348 | libbpf_sys::BPF_LINK_TYPE_NETNS => LinkTypeInfo::NetNs(NetNsLinkInfo { 349 | ino: unsafe { s.__bindgen_anon_1.netns.netns_ino }, 350 | attach_type: ProgramAttachType::try_from(unsafe { 351 | s.__bindgen_anon_1.netns.attach_type 352 | }) 353 | .unwrap_or(ProgramAttachType::Unknown), 354 | }), 355 | _ => LinkTypeInfo::Unknown, 356 | }; 357 | 358 | Some(Self { 359 | info: type_info, 360 | id: s.id, 361 | prog_id: s.prog_id, 362 | }) 363 | } 364 | } 365 | 366 | gen_info_impl!( 367 | /// Iterator that returns [`LinkInfo`]s. 368 | LinkInfoIter, 369 | LinkInfo, 370 | libbpf_sys::bpf_link_info, 371 | libbpf_sys::bpf_link_get_next_id, 372 | libbpf_sys::bpf_link_get_fd_by_id 373 | ); 374 | -------------------------------------------------------------------------------- /libbpf-rs/src/program.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::path::Path; 3 | 4 | use nix::errno; 5 | use num_enum::TryFromPrimitive; 6 | use strum_macros::Display; 7 | 8 | use crate::*; 9 | 10 | /// Represents a parsed but not yet loaded BPF program. 11 | /// 12 | /// This object exposes operations that need to happen before the program is loaded. 13 | pub struct OpenProgram { 14 | ptr: *mut libbpf_sys::bpf_program, 15 | } 16 | 17 | impl OpenProgram { 18 | pub(crate) fn new(ptr: *mut libbpf_sys::bpf_program) -> Self { 19 | OpenProgram { ptr } 20 | } 21 | 22 | pub fn set_prog_type(&mut self, prog_type: ProgramType) { 23 | unsafe { 24 | libbpf_sys::bpf_program__set_type(self.ptr, prog_type as u32); 25 | } 26 | } 27 | 28 | pub fn set_attach_type(&mut self, attach_type: ProgramAttachType) { 29 | unsafe { 30 | libbpf_sys::bpf_program__set_expected_attach_type(self.ptr, attach_type as u32); 31 | } 32 | } 33 | 34 | pub fn set_ifindex(&mut self, idx: u32) { 35 | unsafe { 36 | libbpf_sys::bpf_program__set_ifindex(self.ptr, idx); 37 | } 38 | } 39 | } 40 | 41 | /// Type of a [`Program`]. Maps to `enum bpf_prog_type` in kernel uapi. 42 | #[non_exhaustive] 43 | #[repr(u32)] 44 | #[derive(Clone, TryFromPrimitive, Display)] 45 | pub enum ProgramType { 46 | Unspec = 0, 47 | SocketFilter, 48 | Kprobe, 49 | SchedCls, 50 | SchedAct, 51 | Tracepoint, 52 | Xdp, 53 | PerfEvent, 54 | CgroupSkb, 55 | CgroupSock, 56 | LwtIn, 57 | LwtOut, 58 | LwtXmit, 59 | SockOps, 60 | SkSkb, 61 | CgroupDevice, 62 | SkMsg, 63 | RawTracepoint, 64 | CgroupSockAddr, 65 | LwtSeg6local, 66 | LircMode2, 67 | SkReuseport, 68 | FlowDissector, 69 | CgroupSysctl, 70 | RawTracepointWritable, 71 | CgroupSockopt, 72 | Tracing, 73 | StructOps, 74 | Ext, 75 | Lsm, 76 | SkLookup, 77 | Syscall, 78 | /// See [`MapType::Unknown`] 79 | Unknown = u32::MAX, 80 | } 81 | 82 | /// Attach type of a [`Program`]. Maps to `enum bpf_attach_type` in kernel uapi. 83 | #[non_exhaustive] 84 | #[repr(u32)] 85 | #[derive(Clone, TryFromPrimitive, Display)] 86 | pub enum ProgramAttachType { 87 | CgroupInetIngress, 88 | CgroupInetEgress, 89 | CgroupInetSockCreate, 90 | CgroupSockOps, 91 | SkSkbStreamParser, 92 | SkSkbStreamVerdict, 93 | CgroupDevice, 94 | SkMsgVerdict, 95 | CgroupInet4Bind, 96 | CgroupInet6Bind, 97 | CgroupInet4Connect, 98 | CgroupInet6Connect, 99 | CgroupInet4PostBind, 100 | CgroupInet6PostBind, 101 | CgroupUdp4Sendmsg, 102 | CgroupUdp6Sendmsg, 103 | LircMode2, 104 | FlowDissector, 105 | CgroupSysctl, 106 | CgroupUdp4Recvmsg, 107 | CgroupUdp6Recvmsg, 108 | CgroupGetsockopt, 109 | CgroupSetsockopt, 110 | TraceRawTp, 111 | TraceFentry, 112 | TraceFexit, 113 | ModifyReturn, 114 | LsmMac, 115 | TraceIter, 116 | CgroupInet4Getpeername, 117 | CgroupInet6Getpeername, 118 | CgroupInet4Getsockname, 119 | CgroupInet6Getsockname, 120 | XdpDevmap, 121 | CgroupInetSockRelease, 122 | XdpCpumap, 123 | SkLookup, 124 | Xdp, 125 | SkSkbVerdict, 126 | SkReuseportSelect, 127 | SkReuseportSelectOrMigrate, 128 | PerfEvent, 129 | /// See [`MapType::Unknown`] 130 | Unknown = u32::MAX, 131 | } 132 | 133 | /// Represents a loaded [`Program`]. 134 | /// 135 | /// This struct is not safe to clone because the underlying libbpf resource cannot currently 136 | /// be protected from data races. 137 | /// 138 | /// If you attempt to attach a `Program` with the wrong attach method, the `attach_*` 139 | /// method will fail with the appropriate error. 140 | pub struct Program { 141 | pub(crate) ptr: *mut libbpf_sys::bpf_program, 142 | name: String, 143 | section: String, 144 | } 145 | 146 | impl Program { 147 | pub(crate) fn new(ptr: *mut libbpf_sys::bpf_program, name: String, section: String) -> Self { 148 | Program { ptr, name, section } 149 | } 150 | 151 | pub fn name(&self) -> &str { 152 | &self.name 153 | } 154 | 155 | /// Name of the section this `Program` belongs to. 156 | pub fn section(&self) -> &str { 157 | &self.section 158 | } 159 | 160 | pub fn prog_type(&self) -> ProgramType { 161 | match ProgramType::try_from(unsafe { libbpf_sys::bpf_program__get_type(self.ptr) }) { 162 | Ok(ty) => ty, 163 | Err(_) => ProgramType::Unknown, 164 | } 165 | } 166 | 167 | /// Returns a file descriptor to the underlying program. 168 | pub fn fd(&self) -> i32 { 169 | unsafe { libbpf_sys::bpf_program__fd(self.ptr) } 170 | } 171 | 172 | pub fn attach_type(&self) -> ProgramAttachType { 173 | match ProgramAttachType::try_from(unsafe { 174 | libbpf_sys::bpf_program__get_expected_attach_type(self.ptr) 175 | }) { 176 | Ok(ty) => ty, 177 | Err(_) => ProgramAttachType::Unknown, 178 | } 179 | } 180 | 181 | /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) 182 | /// this program to bpffs. 183 | pub fn pin>(&mut self, path: P) -> Result<()> { 184 | let path_c = util::path_to_cstring(path)?; 185 | let path_ptr = path_c.as_ptr(); 186 | 187 | let ret = unsafe { libbpf_sys::bpf_program__pin(self.ptr, path_ptr) }; 188 | if ret != 0 { 189 | // Error code is returned negative, flip to positive to match errno 190 | Err(Error::System(-ret)) 191 | } else { 192 | Ok(()) 193 | } 194 | } 195 | 196 | /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) 197 | /// this program from bpffs 198 | pub fn unpin>(&mut self, path: P) -> Result<()> { 199 | let path_c = util::path_to_cstring(path)?; 200 | let path_ptr = path_c.as_ptr(); 201 | 202 | let ret = unsafe { libbpf_sys::bpf_program__unpin(self.ptr, path_ptr) }; 203 | if ret != 0 { 204 | // Error code is returned negative, flip to positive to match errno 205 | Err(Error::System(-ret)) 206 | } else { 207 | Ok(()) 208 | } 209 | } 210 | 211 | /// Auto-attach based on prog section 212 | pub fn attach(&mut self) -> Result { 213 | let ptr = unsafe { libbpf_sys::bpf_program__attach(self.ptr) }; 214 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 215 | if err != 0 { 216 | Err(Error::System(err as i32)) 217 | } else { 218 | Ok(Link::new(ptr)) 219 | } 220 | } 221 | 222 | /// Attach this program to a 223 | /// [cgroup](https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html). 224 | pub fn attach_cgroup(&mut self, cgroup_fd: i32) -> Result { 225 | let ptr = unsafe { libbpf_sys::bpf_program__attach_cgroup(self.ptr, cgroup_fd) }; 226 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 227 | if err != 0 { 228 | Err(Error::System(err as i32)) 229 | } else { 230 | Ok(Link::new(ptr)) 231 | } 232 | } 233 | 234 | /// Attach this program to a [perf event](https://linux.die.net/man/2/perf_event_open). 235 | pub fn attach_perf_event(&mut self, pfd: i32) -> Result { 236 | let ptr = unsafe { libbpf_sys::bpf_program__attach_perf_event(self.ptr, pfd) }; 237 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 238 | if err != 0 { 239 | Err(Error::System(err as i32)) 240 | } else { 241 | Ok(Link::new(ptr)) 242 | } 243 | } 244 | 245 | /// Attach this program to a [userspace 246 | /// probe](https://www.kernel.org/doc/html/latest/trace/uprobetracer.html). 247 | pub fn attach_uprobe>( 248 | &mut self, 249 | retprobe: bool, 250 | pid: i32, 251 | binary_path: T, 252 | func_offset: usize, 253 | ) -> Result { 254 | let path = util::path_to_cstring(binary_path.as_ref())?; 255 | let path_ptr = path.as_ptr(); 256 | let ptr = unsafe { 257 | libbpf_sys::bpf_program__attach_uprobe( 258 | self.ptr, 259 | retprobe, 260 | pid, 261 | path_ptr, 262 | func_offset as libbpf_sys::size_t, 263 | ) 264 | }; 265 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 266 | if err != 0 { 267 | Err(Error::System(err as i32)) 268 | } else { 269 | Ok(Link::new(ptr)) 270 | } 271 | } 272 | 273 | /// Attach this program to a [kernel 274 | /// probe](https://www.kernel.org/doc/html/latest/trace/kprobetrace.html). 275 | pub fn attach_kprobe>(&mut self, retprobe: bool, func_name: T) -> Result { 276 | let func_name = util::str_to_cstring(func_name.as_ref())?; 277 | let func_name_ptr = func_name.as_ptr(); 278 | let ptr = 279 | unsafe { libbpf_sys::bpf_program__attach_kprobe(self.ptr, retprobe, func_name_ptr) }; 280 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 281 | if err != 0 { 282 | Err(Error::System(err as i32)) 283 | } else { 284 | Ok(Link::new(ptr)) 285 | } 286 | } 287 | 288 | /// Attach this program to a [kernel 289 | /// tracepoint](https://www.kernel.org/doc/html/latest/trace/tracepoints.html). 290 | pub fn attach_tracepoint>(&mut self, tp_category: T, tp_name: T) -> Result { 291 | let tp_category = util::str_to_cstring(tp_category.as_ref())?; 292 | let tp_category_ptr = tp_category.as_ptr(); 293 | let tp_name = util::str_to_cstring(tp_name.as_ref())?; 294 | let tp_name_ptr = tp_name.as_ptr(); 295 | let ptr = unsafe { 296 | libbpf_sys::bpf_program__attach_tracepoint(self.ptr, tp_category_ptr, tp_name_ptr) 297 | }; 298 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 299 | if err != 0 { 300 | Err(Error::System(err as i32)) 301 | } else { 302 | Ok(Link::new(ptr)) 303 | } 304 | } 305 | 306 | /// Attach this program to a [raw kernel 307 | /// tracepoint](https://lwn.net/Articles/748352/). 308 | pub fn attach_raw_tracepoint>(&mut self, tp_name: T) -> Result { 309 | let tp_name = util::str_to_cstring(tp_name.as_ref())?; 310 | let tp_name_ptr = tp_name.as_ptr(); 311 | let ptr = unsafe { libbpf_sys::bpf_program__attach_raw_tracepoint(self.ptr, tp_name_ptr) }; 312 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 313 | if err != 0 { 314 | Err(Error::System(err as i32)) 315 | } else { 316 | Ok(Link::new(ptr)) 317 | } 318 | } 319 | 320 | /// Attach to an [LSM](https://en.wikipedia.org/wiki/Linux_Security_Modules) hook 321 | pub fn attach_lsm(&mut self) -> Result { 322 | let ptr = unsafe { libbpf_sys::bpf_program__attach_lsm(self.ptr) }; 323 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 324 | if err != 0 { 325 | Err(Error::System(err as i32)) 326 | } else { 327 | Ok(Link::new(ptr)) 328 | } 329 | } 330 | 331 | /// Attach to a [fentry/fexit kernel probe](https://lwn.net/Articles/801479/) 332 | pub fn attach_trace(&mut self) -> Result { 333 | let ptr = unsafe { libbpf_sys::bpf_program__attach_trace(self.ptr) }; 334 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 335 | if err != 0 { 336 | Err(Error::System(err as i32)) 337 | } else { 338 | Ok(Link::new(ptr)) 339 | } 340 | } 341 | 342 | /// Attach a verdict/parser to a [sockmap/sockhash](https://lwn.net/Articles/731133/) 343 | pub fn attach_sockmap(&self, map_fd: i32) -> Result<()> { 344 | let err = 345 | unsafe { libbpf_sys::bpf_prog_attach(self.fd(), map_fd, self.attach_type() as u32, 0) }; 346 | if err != 0 { 347 | Err(Error::System(errno::errno())) 348 | } else { 349 | Ok(()) 350 | } 351 | } 352 | 353 | /// Attach this program to [XDP](https://lwn.net/Articles/825998/) 354 | pub fn attach_xdp(&mut self, ifindex: i32) -> Result { 355 | let ptr = unsafe { libbpf_sys::bpf_program__attach_xdp(self.ptr, ifindex) }; 356 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 357 | if err != 0 { 358 | Err(Error::System(err as i32)) 359 | } else { 360 | Ok(Link::new(ptr)) 361 | } 362 | } 363 | 364 | /// Attach this program to [netns-based programs](https://lwn.net/Articles/819618/) 365 | pub fn attach_netns(&mut self, netns_fd: i32) -> Result { 366 | let ptr = unsafe { libbpf_sys::bpf_program__attach_netns(self.ptr, netns_fd) }; 367 | let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; 368 | if err != 0 { 369 | Err(Error::System(err as i32)) 370 | } else { 371 | Ok(Link::new(ptr)) 372 | } 373 | } 374 | } 375 | -------------------------------------------------------------------------------- /libbpf-rs/src/object.rs: -------------------------------------------------------------------------------- 1 | use core::ffi::c_void; 2 | use std::collections::HashMap; 3 | use std::ffi::CStr; 4 | use std::mem; 5 | use std::os::raw::c_char; 6 | use std::path::Path; 7 | use std::ptr; 8 | 9 | use crate::util; 10 | use crate::*; 11 | 12 | /// Builder for creating an [`OpenObject`]. Typically the entry point into libbpf-rs. 13 | pub struct ObjectBuilder { 14 | name: String, 15 | relaxed_maps: bool, 16 | } 17 | 18 | impl ObjectBuilder { 19 | /// Override the generated name that would have been inferred from the constructor. 20 | pub fn name>(&mut self, name: T) -> &mut Self { 21 | self.name = name.as_ref().to_string(); 22 | self 23 | } 24 | 25 | /// Option to parse map definitions non-strictly, allowing extra attributes/data 26 | pub fn relaxed_maps(&mut self, relaxed_maps: bool) -> &mut Self { 27 | self.relaxed_maps = relaxed_maps; 28 | self 29 | } 30 | 31 | /// Option to print debug output to stderr. 32 | /// 33 | /// Note: This function uses [`set_print`] internally and will overwrite any callbacks 34 | /// currently in use. 35 | pub fn debug(&mut self, dbg: bool) -> &mut Self { 36 | if dbg { 37 | set_print(Some((PrintLevel::Debug, |_, s| print!("{}", s)))); 38 | } else { 39 | set_print(None); 40 | } 41 | self 42 | } 43 | 44 | /// Used for skeleton -- an end user may not consider this API stable 45 | #[doc(hidden)] 46 | pub fn opts(&mut self, name: *const c_char) -> libbpf_sys::bpf_object_open_opts { 47 | libbpf_sys::bpf_object_open_opts { 48 | sz: mem::size_of::() as libbpf_sys::size_t, 49 | object_name: name, 50 | relaxed_maps: self.relaxed_maps, 51 | relaxed_core_relocs: false, 52 | pin_root_path: ptr::null(), 53 | attach_prog_fd: 0, 54 | kconfig: ptr::null(), 55 | btf_custom_path: ptr::null(), 56 | __bindgen_padding_0: <[u8; 6]>::default(), 57 | __bindgen_padding_1: <[u8; 4]>::default(), 58 | } 59 | } 60 | 61 | pub fn open_file>(&mut self, path: P) -> Result { 62 | // Convert path to a C style pointer 63 | let path_str = path.as_ref().to_str().ok_or_else(|| { 64 | Error::InvalidInput(format!("{} is not valid unicode", path.as_ref().display())) 65 | })?; 66 | let path_c = util::str_to_cstring(path_str)?; 67 | let path_ptr = path_c.as_ptr(); 68 | 69 | // Convert name to a C style pointer 70 | // 71 | // NB: we must hold onto a CString otherwise our pointer dangles 72 | let name = util::str_to_cstring(&self.name)?; 73 | let name_ptr = if !self.name.is_empty() { 74 | name.as_ptr() 75 | } else { 76 | ptr::null() 77 | }; 78 | 79 | let opts = self.opts(name_ptr); 80 | 81 | let obj = unsafe { libbpf_sys::bpf_object__open_file(path_ptr, &opts) }; 82 | let err = unsafe { libbpf_sys::libbpf_get_error(obj as *const _) }; 83 | if err != 0 { 84 | return Err(Error::System(err as i32)); 85 | } 86 | 87 | OpenObject::new(obj) 88 | } 89 | 90 | pub fn open_memory>(&mut self, name: T, mem: &[u8]) -> Result { 91 | // Convert name to a C style pointer 92 | // 93 | // NB: we must hold onto a CString otherwise our pointer dangles 94 | let name = util::str_to_cstring(name.as_ref())?; 95 | let name_ptr = if !name.to_bytes().is_empty() { 96 | name.as_ptr() 97 | } else { 98 | ptr::null() 99 | }; 100 | 101 | let opts = self.opts(name_ptr); 102 | 103 | let obj = unsafe { 104 | libbpf_sys::bpf_object__open_mem( 105 | mem.as_ptr() as *const c_void, 106 | mem.len() as libbpf_sys::size_t, 107 | &opts, 108 | ) 109 | }; 110 | let err = unsafe { libbpf_sys::libbpf_get_error(obj as *const _) }; 111 | if err != 0 { 112 | return Err(Error::System(err as i32)); 113 | } 114 | 115 | OpenObject::new(obj) 116 | } 117 | } 118 | 119 | impl Default for ObjectBuilder { 120 | fn default() -> Self { 121 | ObjectBuilder { 122 | name: String::new(), 123 | relaxed_maps: false, 124 | } 125 | } 126 | } 127 | 128 | /// Represents an opened (but not yet loaded) BPF object file. 129 | /// 130 | /// Use this object to access [`OpenMap`]s and [`OpenProgram`]s. 131 | pub struct OpenObject { 132 | ptr: *mut libbpf_sys::bpf_object, 133 | maps: HashMap, 134 | progs: HashMap, 135 | } 136 | 137 | impl OpenObject { 138 | fn new(ptr: *mut libbpf_sys::bpf_object) -> Result { 139 | let mut obj = OpenObject { 140 | ptr, 141 | maps: HashMap::new(), 142 | progs: HashMap::new(), 143 | }; 144 | 145 | // Populate obj.maps 146 | let mut map: *mut libbpf_sys::bpf_map = std::ptr::null_mut(); 147 | loop { 148 | // Get the pointer to the next BPF map 149 | let next_ptr = unsafe { libbpf_sys::bpf_map__next(map, obj.ptr) }; 150 | if next_ptr.is_null() { 151 | break; 152 | } 153 | 154 | // Get the map name 155 | // bpf_map__name can return null but only if it's passed a null. 156 | // We already know next_ptr is not null. 157 | let name = unsafe { libbpf_sys::bpf_map__name(next_ptr) }; 158 | let name = util::c_ptr_to_string(name)?; 159 | 160 | // Add the map to the hashmap 161 | obj.maps.insert(name, OpenMap::new(next_ptr)); 162 | map = next_ptr; 163 | } 164 | 165 | // Populate obj.progs 166 | let mut prog: *mut libbpf_sys::bpf_program = std::ptr::null_mut(); 167 | loop { 168 | // Get the pointer to the next BPF program 169 | let next_ptr = unsafe { libbpf_sys::bpf_program__next(prog, obj.ptr) }; 170 | if next_ptr.is_null() { 171 | break; 172 | } 173 | 174 | // Get the program name. 175 | // bpf_program__name never returns NULL, so no need to check the pointer. 176 | let name = unsafe { libbpf_sys::bpf_program__name(next_ptr) }; 177 | let name = util::c_ptr_to_string(name)?; 178 | 179 | // Add the program to the hashmap 180 | obj.progs.insert(name, OpenProgram::new(next_ptr)); 181 | prog = next_ptr; 182 | } 183 | 184 | Ok(obj) 185 | } 186 | 187 | /// Takes ownership from pointer. 188 | /// 189 | /// # Safety 190 | /// 191 | /// If `ptr` is unopen or already loaded then further operations on the returned object are 192 | /// undefined. 193 | /// 194 | /// It is not safe to manipulate `ptr` after this operation. 195 | pub unsafe fn from_ptr(ptr: *mut libbpf_sys::bpf_object) -> Result { 196 | Self::new(ptr) 197 | } 198 | 199 | /// Takes underlying `libbpf_sys::bpf_object` pointer. 200 | pub fn take_ptr(mut self) -> *mut libbpf_sys::bpf_object { 201 | let ptr = self.ptr; 202 | self.ptr = ptr::null_mut(); 203 | ptr 204 | } 205 | 206 | pub fn name(&self) -> Result<&str> { 207 | unsafe { 208 | let ptr = libbpf_sys::bpf_object__name(self.ptr); 209 | let err = libbpf_sys::libbpf_get_error(ptr as *const _); 210 | if err != 0 { 211 | return Err(Error::System(err as i32)); 212 | } 213 | 214 | CStr::from_ptr(ptr) 215 | .to_str() 216 | .map_err(|e| Error::Internal(e.to_string())) 217 | } 218 | } 219 | 220 | /// Get a reference to `OpenMap` with the name `name`, if one exists. 221 | pub fn map>(&self, name: T) -> Option<&OpenMap> { 222 | self.maps.get(name.as_ref()) 223 | } 224 | 225 | /// Get a mutable reference to `OpenMap` with the name `name`, if one exists. 226 | pub fn map_mut>(&mut self, name: T) -> Option<&mut OpenMap> { 227 | self.maps.get_mut(name.as_ref()) 228 | } 229 | 230 | /// Get an iterator over references to all `OpenMap`s. 231 | /// Note that this will include automatically generated .data, .rodata, .bss, and 232 | /// .kconfig maps. 233 | pub fn maps_iter(&self) -> impl Iterator { 234 | self.maps.values() 235 | } 236 | 237 | /// Get an iterator over mutable references to all `OpenMap`s. 238 | /// Note that this will include automatically generated .data, .rodata, .bss, and 239 | /// .kconfig maps. 240 | pub fn maps_iter_mut(&mut self) -> impl Iterator { 241 | self.maps.values_mut() 242 | } 243 | 244 | /// Get a reference to `OpenProgram` with the name `name`, if one exists. 245 | pub fn prog>(&self, name: T) -> Option<&OpenProgram> { 246 | self.progs.get(name.as_ref()) 247 | } 248 | 249 | /// Get a mutable reference to `OpenProgram` with the name `name`, if one exists. 250 | pub fn prog_mut>(&mut self, name: T) -> Option<&mut OpenProgram> { 251 | self.progs.get_mut(name.as_ref()) 252 | } 253 | 254 | /// Get an iterator over references to all `OpenProgram`s. 255 | pub fn progs_iter(&self) -> impl Iterator { 256 | self.progs.values() 257 | } 258 | 259 | /// Get an iterator over mutable references to all `OpenProgram`s. 260 | pub fn progs_iter_mut(&mut self) -> impl Iterator { 261 | self.progs.values_mut() 262 | } 263 | 264 | /// Load the maps and programs contained in this BPF object into the system. 265 | pub fn load(mut self) -> Result { 266 | let ret = unsafe { libbpf_sys::bpf_object__load(self.ptr) }; 267 | if ret != 0 { 268 | // bpf_object__load() returns errno as negative, so flip 269 | return Err(Error::System(-ret)); 270 | } 271 | 272 | let obj = Object::new(self.ptr)?; 273 | 274 | // Prevent object from being closed once `self` is dropped 275 | self.ptr = ptr::null_mut(); 276 | 277 | Ok(obj) 278 | } 279 | } 280 | 281 | impl Drop for OpenObject { 282 | fn drop(&mut self) { 283 | // `self.ptr` may be null if `load()` was called. This is ok: libbpf noops 284 | unsafe { 285 | libbpf_sys::bpf_object__close(self.ptr); 286 | } 287 | } 288 | } 289 | 290 | /// Represents a loaded BPF object file. 291 | /// 292 | /// An `Object` is logically in charge of all the contained [`Program`]s and [`Map`]s as well as 293 | /// the associated metadata and runtime state that underpins the userspace portions of BPF program 294 | /// execution. As a libbpf-rs user, you must keep the `Object` alive during the entire lifetime 295 | /// of your interaction with anything inside the `Object`. 296 | /// 297 | /// Note that this is an explanation of the motivation -- Rust's lifetime system should already be 298 | /// enforcing this invariant. 299 | pub struct Object { 300 | ptr: *mut libbpf_sys::bpf_object, 301 | maps: HashMap, 302 | progs: HashMap, 303 | } 304 | 305 | impl Object { 306 | fn new(ptr: *mut libbpf_sys::bpf_object) -> Result { 307 | let mut obj = Object { 308 | ptr, 309 | maps: HashMap::new(), 310 | progs: HashMap::new(), 311 | }; 312 | 313 | // Populate obj.maps 314 | let mut map: *mut libbpf_sys::bpf_map = std::ptr::null_mut(); 315 | loop { 316 | // Get the pointer to the next BPF map 317 | let next_ptr = unsafe { libbpf_sys::bpf_map__next(map, obj.ptr) }; 318 | if next_ptr.is_null() { 319 | break; 320 | } 321 | 322 | // Get the map name 323 | // bpf_map__name can return null but only if it's passed a null. 324 | // We already know next_ptr is not null. 325 | let name = unsafe { libbpf_sys::bpf_map__name(next_ptr) }; 326 | let name = util::c_ptr_to_string(name)?; 327 | 328 | // Get the map def 329 | // bpf_map__def can return null but only if it's passed a null. 330 | // We already know next_ptr is not null. 331 | let def = unsafe { ptr::read(libbpf_sys::bpf_map__def(next_ptr)) }; 332 | 333 | // Get the map fd 334 | let fd = unsafe { libbpf_sys::bpf_map__fd(next_ptr) }; 335 | if fd < 0 { 336 | return Err(Error::System(-fd)); 337 | } 338 | 339 | // Add the map to the hashmap 340 | obj.maps.insert( 341 | name.clone(), 342 | Map::new(fd, name, def.type_, def.key_size, def.value_size, next_ptr), 343 | ); 344 | map = next_ptr; 345 | } 346 | 347 | // Populate obj.progs 348 | let mut prog: *mut libbpf_sys::bpf_program = std::ptr::null_mut(); 349 | loop { 350 | // Get the pointer to the next BPF program 351 | let next_ptr = unsafe { libbpf_sys::bpf_program__next(prog, obj.ptr) }; 352 | if next_ptr.is_null() { 353 | break; 354 | } 355 | 356 | // Get the program name 357 | // bpf_program__name never returns NULL, so no need to check the pointer. 358 | let name = unsafe { libbpf_sys::bpf_program__name(next_ptr) }; 359 | let name = util::c_ptr_to_string(name)?; 360 | 361 | // Get the program section 362 | // bpf_program__section_name never returns NULL, so no need to check the pointer. 363 | let section = unsafe { libbpf_sys::bpf_program__section_name(next_ptr) }; 364 | let section = util::c_ptr_to_string(section)?; 365 | 366 | // Get the program fd 367 | let fd = unsafe { libbpf_sys::bpf_program__fd(next_ptr) }; 368 | if fd < 0 { 369 | return Err(Error::System(-fd)); 370 | } 371 | 372 | // Add the program to the hashmap 373 | obj.progs 374 | .insert(name.clone(), Program::new(next_ptr, name, section)); 375 | prog = next_ptr; 376 | } 377 | 378 | Ok(obj) 379 | } 380 | 381 | /// Takes ownership from pointer. 382 | /// 383 | /// # Safety 384 | /// 385 | /// If `ptr` is not already loaded then further operations on the returned object are 386 | /// undefined. 387 | /// 388 | /// It is not safe to manipulate `ptr` after this operation. 389 | pub unsafe fn from_ptr(ptr: *mut libbpf_sys::bpf_object) -> Result { 390 | Self::new(ptr) 391 | } 392 | 393 | /// Get a reference to `Map` with the name `name`, if one exists. 394 | pub fn map>(&self, name: T) -> Option<&Map> { 395 | self.maps.get(name.as_ref()) 396 | } 397 | 398 | /// Get a mutable reference to `Map` with the name `name`, if one exists. 399 | pub fn map_mut>(&mut self, name: T) -> Option<&mut Map> { 400 | self.maps.get_mut(name.as_ref()) 401 | } 402 | 403 | /// Get an iterator over references to all `Map`s. 404 | /// Note that this will include automatically generated .data, .rodata, .bss, and 405 | /// .kconfig maps. You may wish to filter this. 406 | pub fn maps_iter(&self) -> impl Iterator { 407 | self.maps.values() 408 | } 409 | 410 | /// Get an iterator over mutable references to all `Map`s. 411 | /// Note that this will include automatically generated .data, .rodata, .bss, and 412 | /// .kconfig maps. You may wish to filter this. 413 | pub fn maps_iter_mut(&mut self) -> impl Iterator { 414 | self.maps.values_mut() 415 | } 416 | 417 | /// Get a reference to `Program` with the name `name`, if one exists. 418 | pub fn prog>(&self, name: T) -> Option<&Program> { 419 | self.progs.get(name.as_ref()) 420 | } 421 | 422 | /// Get a mutable reference to `Program` with the name `name`, if one exists. 423 | pub fn prog_mut>(&mut self, name: T) -> Option<&mut Program> { 424 | self.progs.get_mut(name.as_ref()) 425 | } 426 | 427 | /// Get an iterator over references to all `Program`s. 428 | pub fn progs_iter(&self) -> impl Iterator { 429 | self.progs.values() 430 | } 431 | 432 | /// Get an iterator over mutable references to all `Program`s. 433 | pub fn progs_iter_mut(&mut self) -> impl Iterator { 434 | self.progs.values_mut() 435 | } 436 | } 437 | 438 | impl Drop for Object { 439 | fn drop(&mut self) { 440 | unsafe { 441 | libbpf_sys::bpf_object__close(self.ptr); 442 | } 443 | } 444 | } 445 | -------------------------------------------------------------------------------- /libbpf-rs/src/map.rs: -------------------------------------------------------------------------------- 1 | use core::ffi::c_void; 2 | use std::convert::TryFrom; 3 | use std::path::Path; 4 | use std::ptr; 5 | 6 | use bitflags::bitflags; 7 | use nix::{errno, unistd}; 8 | use num_enum::TryFromPrimitive; 9 | use strum_macros::Display; 10 | 11 | use crate::*; 12 | 13 | /// Represents a parsed but not yet loaded BPF map. 14 | /// 15 | /// This object exposes operations that need to happen before the map is created. 16 | /// 17 | /// Some methods require working with raw bytes. You may find libraries such as 18 | /// [`plain`](https://crates.io/crates/plain) helpful. 19 | pub struct OpenMap { 20 | ptr: *mut libbpf_sys::bpf_map, 21 | } 22 | 23 | impl OpenMap { 24 | pub(crate) fn new(ptr: *mut libbpf_sys::bpf_map) -> Self { 25 | OpenMap { ptr } 26 | } 27 | 28 | pub fn set_map_ifindex(&mut self, idx: u32) { 29 | unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr, idx) }; 30 | } 31 | 32 | pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> { 33 | let ret = unsafe { 34 | libbpf_sys::bpf_map__set_initial_value( 35 | self.ptr, 36 | data.as_ptr() as *const std::ffi::c_void, 37 | data.len() as libbpf_sys::size_t, 38 | ) 39 | }; 40 | 41 | if ret != 0 { 42 | // Error code is returned negative, flip to positive to match errno 43 | return Err(Error::System(-ret)); 44 | } 45 | 46 | Ok(()) 47 | } 48 | 49 | pub fn set_max_entries(&mut self, count: u32) -> Result<()> { 50 | let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr, count) }; 51 | 52 | if ret != 0 { 53 | // Error code is returned negative, flip to positive to match errno 54 | return Err(Error::System(-ret)); 55 | } 56 | 57 | Ok(()) 58 | } 59 | 60 | pub fn set_inner_map_fd(&mut self, inner: &Map) { 61 | unsafe { libbpf_sys::bpf_map__set_inner_map_fd(self.ptr, inner.fd()) }; 62 | } 63 | 64 | /// Reuse an already-pinned map for `self`. 65 | pub fn reuse_pinned_map>(&mut self, path: P) -> Result<()> { 66 | let cstring = util::path_to_cstring(path)?; 67 | 68 | let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) }; 69 | if fd < 0 { 70 | return Err(Error::System(errno::errno())); 71 | } 72 | 73 | let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr, fd) }; 74 | 75 | // Always close `fd` regardless of if `bpf_map__reuse_fd` succeeded or failed 76 | // 77 | // Ignore errors b/c can't really recover from failure 78 | let _ = unistd::close(fd); 79 | 80 | if ret != 0 { 81 | return Err(Error::System(-ret)); 82 | } 83 | 84 | Ok(()) 85 | } 86 | } 87 | 88 | /// Represents a created map. 89 | /// 90 | /// Some methods require working with raw bytes. You may find libraries such as 91 | /// [`plain`](https://crates.io/crates/plain) helpful. 92 | pub struct Map { 93 | fd: i32, 94 | name: String, 95 | ty: libbpf_sys::bpf_map_type, 96 | key_size: u32, 97 | value_size: u32, 98 | ptr: *mut libbpf_sys::bpf_map, 99 | } 100 | 101 | impl Map { 102 | pub(crate) fn new( 103 | fd: i32, 104 | name: String, 105 | ty: libbpf_sys::bpf_map_type, 106 | key_size: u32, 107 | value_size: u32, 108 | ptr: *mut libbpf_sys::bpf_map, 109 | ) -> Self { 110 | Map { 111 | fd, 112 | name, 113 | ty, 114 | key_size, 115 | value_size, 116 | ptr, 117 | } 118 | } 119 | 120 | pub fn name(&self) -> &str { 121 | &self.name 122 | } 123 | 124 | /// Returns a file descriptor to the underlying map. 125 | pub fn fd(&self) -> i32 { 126 | self.fd 127 | } 128 | 129 | pub fn map_type(&self) -> MapType { 130 | match MapType::try_from(self.ty) { 131 | Ok(t) => t, 132 | Err(_) => MapType::Unknown, 133 | } 134 | } 135 | 136 | /// Key size in bytes 137 | pub fn key_size(&self) -> u32 { 138 | self.key_size 139 | } 140 | 141 | /// Value size in bytes 142 | pub fn value_size(&self) -> u32 { 143 | self.value_size 144 | } 145 | 146 | /// Return the size of one value including padding for interacting with per-cpu 147 | /// maps. The values are aligned to 8 bytes. 148 | fn percpu_aligned_value_size(&self) -> usize { 149 | let val_size = self.value_size() as usize; 150 | return util::roundup(val_size, 8); 151 | } 152 | 153 | /// Returns the size of the buffer needed for a lookup/update of a per-cpu map. 154 | fn percpu_buffer_size(&self) -> Result { 155 | let aligned_val_size = self.percpu_aligned_value_size(); 156 | let ncpu = util::num_possible_cpus()?; 157 | return Ok(ncpu * aligned_val_size); 158 | } 159 | 160 | /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) 161 | /// this map to bpffs. 162 | pub fn pin>(&mut self, path: P) -> Result<()> { 163 | let path_c = util::path_to_cstring(path)?; 164 | let path_ptr = path_c.as_ptr(); 165 | 166 | let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr, path_ptr) }; 167 | if ret != 0 { 168 | // Error code is returned negative, flip to positive to match errno 169 | Err(Error::System(-ret)) 170 | } else { 171 | Ok(()) 172 | } 173 | } 174 | 175 | /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) 176 | /// from bpffs 177 | pub fn unpin>(&mut self, path: P) -> Result<()> { 178 | let path_c = util::path_to_cstring(path)?; 179 | let path_ptr = path_c.as_ptr(); 180 | 181 | let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr, path_ptr) }; 182 | if ret != 0 { 183 | // Error code is returned negative, flip to positive to match errno 184 | Err(Error::System(-ret)) 185 | } else { 186 | Ok(()) 187 | } 188 | } 189 | 190 | /// Returns map value as `Vec` of `u8`. 191 | /// 192 | /// `key` must have exactly [`Map::key_size()`] elements. 193 | /// 194 | /// If the map is one of the per-cpu data structures, the function [`Map::lookup_percpu()`] 195 | /// must be used. 196 | pub fn lookup(&self, key: &[u8], flags: MapFlags) -> Result>> { 197 | if self.map_type().is_percpu() { 198 | return Err(Error::InvalidInput(format!( 199 | "lookup_percpu() must be used for per-cpu maps (type of the map is {})", 200 | self.map_type(), 201 | ))); 202 | } 203 | 204 | let out_size = self.value_size() as usize; 205 | self.lookup_raw(key, flags, out_size) 206 | } 207 | 208 | /// Returns one value per cpu as `Vec` of `Vec` of `u8` for per per-cpu maps. 209 | /// 210 | /// For normal maps, [`Map::lookup()`] must be used. 211 | pub fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result>>> { 212 | if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown { 213 | return Err(Error::InvalidInput(format!( 214 | "lookup() must be used for maps that are not per-cpu (type of the map is {})", 215 | self.map_type(), 216 | ))); 217 | } 218 | 219 | let val_size = self.value_size() as usize; 220 | let aligned_val_size = self.percpu_aligned_value_size(); 221 | let out_size = self.percpu_buffer_size()?; 222 | 223 | let raw_res = self.lookup_raw(key, flags, out_size)?; 224 | if let Some(raw_vals) = raw_res { 225 | let mut out = Vec::new(); 226 | for chunk in raw_vals.chunks_exact(aligned_val_size) { 227 | out.push(chunk[..val_size].to_vec()); 228 | } 229 | return Ok(Some(out)); 230 | } else { 231 | return Ok(None); 232 | } 233 | } 234 | 235 | /// Internal function to return a value from a map into a buffer of the given size. 236 | fn lookup_raw(&self, key: &[u8], flags: MapFlags, out_size: usize) -> Result>> { 237 | if key.len() != self.key_size() as usize { 238 | return Err(Error::InvalidInput(format!( 239 | "key_size {} != {}", 240 | key.len(), 241 | self.key_size() 242 | ))); 243 | }; 244 | 245 | let mut out: Vec = Vec::with_capacity(out_size); 246 | 247 | let ret = unsafe { 248 | libbpf_sys::bpf_map_lookup_elem_flags( 249 | self.fd as i32, 250 | key.as_ptr() as *const c_void, 251 | out.as_mut_ptr() as *mut c_void, 252 | flags.bits, 253 | ) 254 | }; 255 | 256 | if ret == 0 { 257 | unsafe { 258 | out.set_len(out_size); 259 | } 260 | Ok(Some(out)) 261 | } else { 262 | let errno = errno::errno(); 263 | if errno::Errno::from_i32(errno) == errno::Errno::ENOENT { 264 | Ok(None) 265 | } else { 266 | Err(Error::System(errno)) 267 | } 268 | } 269 | } 270 | 271 | /// Deletes an element from the map. 272 | /// 273 | /// `key` must have exactly [`Map::key_size()`] elements. 274 | pub fn delete(&mut self, key: &[u8]) -> Result<()> { 275 | if key.len() != self.key_size() as usize { 276 | return Err(Error::InvalidInput(format!( 277 | "key_size {} != {}", 278 | key.len(), 279 | self.key_size() 280 | ))); 281 | }; 282 | 283 | let ret = unsafe { 284 | libbpf_sys::bpf_map_delete_elem(self.fd as i32, key.as_ptr() as *const c_void) 285 | }; 286 | 287 | if ret == 0 { 288 | Ok(()) 289 | } else { 290 | Err(Error::System(errno::errno())) 291 | } 292 | } 293 | 294 | /// Same as [`Map::lookup()`] except this also deletes the key from the map. 295 | /// 296 | /// Note that this operation is currently only implemented in the kernel for [`MapType::Queue`] 297 | /// and [`MapType::Stack`]. 298 | /// 299 | /// `key` must have exactly [`Map::key_size()`] elements. 300 | pub fn lookup_and_delete(&mut self, key: &[u8]) -> Result>> { 301 | if key.len() != self.key_size() as usize { 302 | return Err(Error::InvalidInput(format!( 303 | "key_size {} != {}", 304 | key.len(), 305 | self.key_size() 306 | ))); 307 | }; 308 | 309 | let mut out: Vec = Vec::with_capacity(self.value_size() as usize); 310 | 311 | let ret = unsafe { 312 | libbpf_sys::bpf_map_lookup_and_delete_elem( 313 | self.fd as i32, 314 | key.as_ptr() as *const c_void, 315 | out.as_mut_ptr() as *mut c_void, 316 | ) 317 | }; 318 | 319 | if ret == 0 { 320 | unsafe { 321 | out.set_len(self.value_size() as usize); 322 | } 323 | Ok(Some(out)) 324 | } else { 325 | let errno = errno::errno(); 326 | if errno::Errno::from_i32(errno) == errno::Errno::ENOENT { 327 | Ok(None) 328 | } else { 329 | Err(Error::System(errno)) 330 | } 331 | } 332 | } 333 | 334 | /// Update an element. 335 | /// 336 | /// `key` must have exactly [`Map::key_size()`] elements. `value` must have exactly 337 | /// [`Map::value_size()`] elements. 338 | /// 339 | /// For per-cpu maps, [`Map::update_percpu()`] must be used. 340 | pub fn update(&mut self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> { 341 | if self.map_type().is_percpu() { 342 | return Err(Error::InvalidInput(format!( 343 | "update_percpu() must be used for per-cpu maps (type of the map is {})", 344 | self.map_type(), 345 | ))); 346 | } 347 | 348 | if value.len() != self.value_size() as usize { 349 | return Err(Error::InvalidInput(format!( 350 | "value_size {} != {}", 351 | value.len(), 352 | self.value_size() 353 | ))); 354 | }; 355 | 356 | self.update_raw(key, value, flags) 357 | } 358 | 359 | /// Update an element in an per-cpu map with one value per cpu. 360 | /// 361 | /// `key` must have exactly [`Map::key_size()`] elements. `value` must have one 362 | /// element per cpu (see [`num_possible_cpus()`]) with exactly [`Map::value_size()`] 363 | /// elements each. 364 | /// 365 | /// For per-cpu maps, [`Map::update_percpu()`] must be used. 366 | pub fn update_percpu( 367 | &mut self, 368 | key: &[u8], 369 | values: &Vec>, 370 | flags: MapFlags, 371 | ) -> Result<()> { 372 | if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown { 373 | return Err(Error::InvalidInput(format!( 374 | "update() must be used for maps that are not per-cpu (type of the map is {})", 375 | self.map_type(), 376 | ))); 377 | } 378 | 379 | if values.len() != num_possible_cpus()? { 380 | return Err(Error::InvalidInput(format!( 381 | "number of values {} != number of cpus {}", 382 | values.len(), 383 | num_possible_cpus()? 384 | ))); 385 | }; 386 | 387 | let val_size = self.value_size() as usize; 388 | let aligned_val_size = self.percpu_aligned_value_size(); 389 | let buf_size = self.percpu_buffer_size()?; 390 | 391 | let mut value_buf = Vec::new(); 392 | value_buf.resize(buf_size, 0); 393 | 394 | for (i, val) in values.iter().enumerate() { 395 | if val.len() != val_size { 396 | return Err(Error::InvalidInput(format!( 397 | "value size for cpu {} is {} != {}", 398 | i, 399 | val.len(), 400 | val_size 401 | ))); 402 | } 403 | 404 | value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)] 405 | .copy_from_slice(val); 406 | } 407 | 408 | self.update_raw(key, &value_buf, flags) 409 | } 410 | 411 | /// Internal function to update a map. This does not check the length of the 412 | /// supplied value. 413 | fn update_raw(&mut self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> { 414 | if key.len() != self.key_size() as usize { 415 | return Err(Error::InvalidInput(format!( 416 | "key_size {} != {}", 417 | key.len(), 418 | self.key_size() 419 | ))); 420 | }; 421 | 422 | let ret = unsafe { 423 | libbpf_sys::bpf_map_update_elem( 424 | self.fd as i32, 425 | key.as_ptr() as *const c_void, 426 | value.as_ptr() as *const c_void, 427 | flags.bits, 428 | ) 429 | }; 430 | 431 | if ret == 0 { 432 | Ok(()) 433 | } else { 434 | Err(Error::System(errno::errno())) 435 | } 436 | } 437 | 438 | /// Returns an iterator over keys in this map 439 | /// 440 | /// Note that if the map is not stable (stable meaning no updates or deletes) during iteration, 441 | /// iteration can skip keys, restart from the beginning, or duplicate keys. In other words, 442 | /// iteration becomes unpredictable. 443 | pub fn keys(&self) -> MapKeyIter { 444 | MapKeyIter::new(self, self.key_size()) 445 | } 446 | } 447 | 448 | #[rustfmt::skip] 449 | bitflags! { 450 | /// Flags to configure [`Map`] operations. 451 | pub struct MapFlags: u64 { 452 | const ANY = 0; 453 | const NO_EXIST = 1; 454 | const EXIST = 1 << 1; 455 | const LOCK = 1 << 2; 456 | } 457 | } 458 | 459 | /// Type of a [`Map`]. Maps to `enum bpf_map_type` in kernel uapi. 460 | // If you add a new per-cpu map, also update `is_percpu`. 461 | #[non_exhaustive] 462 | #[repr(u32)] 463 | #[derive(Clone, TryFromPrimitive, PartialEq, Display)] 464 | pub enum MapType { 465 | Unspec = 0, 466 | Hash, 467 | Array, 468 | ProgArray, 469 | PerfEventArray, 470 | PercpuHash, 471 | PercpuArray, 472 | StackTrace, 473 | CgroupArray, 474 | LruHash, 475 | LruPercpuHash, 476 | LpmTrie, 477 | ArrayOfMaps, 478 | HashOfMaps, 479 | Devmap, 480 | Sockmap, 481 | Cpumap, 482 | Xskmap, 483 | Sockhash, 484 | CgroupStorage, 485 | ReuseportSockarray, 486 | PercpuCgroupStorage, 487 | Queue, 488 | Stack, 489 | SkStorage, 490 | DevmapHash, 491 | StructOps, 492 | RingBuf, 493 | /// We choose to specify our own "unknown" type here b/c it's really up to the kernel 494 | /// to decide if it wants to reject the map. If it accepts it, it just means whoever 495 | /// using this library is a bit out of date. 496 | Unknown = u32::MAX, 497 | } 498 | 499 | impl MapType { 500 | /// Returns if the map is of one of the per-cpu types. 501 | pub fn is_percpu(&self) -> bool { 502 | match self { 503 | MapType::PercpuArray 504 | | MapType::PercpuHash 505 | | MapType::LruPercpuHash 506 | | MapType::PercpuCgroupStorage => true, 507 | _ => false, 508 | } 509 | } 510 | } 511 | 512 | pub struct MapKeyIter<'a> { 513 | map: &'a Map, 514 | prev: Option>, 515 | next: Vec, 516 | } 517 | 518 | impl<'a> MapKeyIter<'a> { 519 | fn new(map: &'a Map, key_size: u32) -> Self { 520 | Self { 521 | map, 522 | prev: None, 523 | next: vec![0; key_size as usize], 524 | } 525 | } 526 | } 527 | 528 | impl<'a> Iterator for MapKeyIter<'a> { 529 | type Item = Vec; 530 | 531 | fn next(&mut self) -> Option { 532 | let prev = self.prev.as_ref().map_or(ptr::null(), |p| p.as_ptr()); 533 | 534 | let ret = unsafe { 535 | libbpf_sys::bpf_map_get_next_key(self.map.fd(), prev as _, self.next.as_mut_ptr() as _) 536 | }; 537 | if ret != 0 { 538 | None 539 | } else { 540 | self.prev = Some(self.next.clone()); 541 | Some(self.next.clone()) 542 | } 543 | } 544 | } 545 | -------------------------------------------------------------------------------- /libbpf-rs/tests/test.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::fs; 3 | use std::io::Read; 4 | use std::path::{Path, PathBuf}; 5 | use std::sync::mpsc::channel; 6 | use std::time::Duration; 7 | 8 | use nix::errno; 9 | use plain::Plain; 10 | use scopeguard::defer; 11 | 12 | use libbpf_rs::{num_possible_cpus, Iter, MapFlags, Object, ObjectBuilder}; 13 | 14 | fn get_test_object_path(filename: &str) -> PathBuf { 15 | let mut path = PathBuf::new(); 16 | // env!() macro fails at compile time if var not found 17 | path.push(env!("CARGO_MANIFEST_DIR")); 18 | path.push("tests/bin"); 19 | path.push(filename); 20 | path 21 | } 22 | 23 | fn get_test_object(filename: &str) -> Object { 24 | let obj_path = get_test_object_path(filename); 25 | let mut builder = ObjectBuilder::default(); 26 | // Invoke cargo with: 27 | // 28 | // cargo test -- --nocapture 29 | // 30 | // To get all the output 31 | builder.debug(true); 32 | builder 33 | .open_file(obj_path) 34 | .expect("failed to open object") 35 | .load() 36 | .expect("failed to load object") 37 | } 38 | 39 | fn bump_rlimit_mlock() { 40 | let rlimit = libc::rlimit { 41 | rlim_cur: 128 << 20, 42 | rlim_max: 128 << 20, 43 | }; 44 | 45 | let ret = unsafe { libc::setrlimit(libc::RLIMIT_MEMLOCK, &rlimit) }; 46 | assert_eq!( 47 | ret, 48 | 0, 49 | "Setting RLIMIT_MEMLOCK failed with errno: {}", 50 | errno::errno() 51 | ); 52 | } 53 | 54 | #[test] 55 | fn test_object_build_and_load() { 56 | bump_rlimit_mlock(); 57 | 58 | get_test_object("runqslower.bpf.o"); 59 | } 60 | 61 | #[test] 62 | fn test_object_build_from_memory() { 63 | let obj_path = get_test_object_path("runqslower.bpf.o"); 64 | let contents = fs::read(obj_path).expect("failed to read object file"); 65 | let mut builder = ObjectBuilder::default(); 66 | let obj = builder 67 | .open_memory("memory name", &contents) 68 | .expect("failed to build object"); 69 | let name = obj.name().expect("failed to get object name"); 70 | assert!(name == "memory name"); 71 | } 72 | 73 | #[test] 74 | fn test_object_name() { 75 | let obj_path = get_test_object_path("runqslower.bpf.o"); 76 | let mut builder = ObjectBuilder::default(); 77 | builder.name("test name"); 78 | let obj = builder.open_file(obj_path).expect("failed to build object"); 79 | let obj_name = obj.name().expect("failed to get object name"); 80 | assert!(obj_name == "test name"); 81 | } 82 | 83 | #[test] 84 | fn test_object_maps() { 85 | bump_rlimit_mlock(); 86 | 87 | let obj = get_test_object("runqslower.bpf.o"); 88 | obj.map("start").expect("failed to find map"); 89 | obj.map("events").expect("failed to find map"); 90 | assert!(obj.map("asdf").is_none()); 91 | } 92 | 93 | #[test] 94 | fn test_object_maps_iter() { 95 | bump_rlimit_mlock(); 96 | 97 | let obj = get_test_object("runqslower.bpf.o"); 98 | for map in obj.maps_iter() { 99 | eprintln!("{}", map.name()); 100 | } 101 | // This will include .rodata, so our expected count is 3, not 2 102 | assert!(obj.maps_iter().count() == 3); 103 | } 104 | 105 | #[test] 106 | fn test_object_map_key_value_size() { 107 | bump_rlimit_mlock(); 108 | 109 | let mut obj = get_test_object("runqslower.bpf.o"); 110 | let start = obj.map_mut("start").expect("failed to find map"); 111 | 112 | assert!(start.lookup(&[1, 2, 3, 4, 5], MapFlags::empty()).is_err()); 113 | assert!(start.delete(&[1]).is_err()); 114 | assert!(start.lookup_and_delete(&[1, 2, 3, 4, 5]).is_err()); 115 | assert!(start 116 | .update(&[1, 2, 3, 4, 5], &[1], MapFlags::empty()) 117 | .is_err()); 118 | } 119 | 120 | #[test] 121 | fn test_object_percpu_lookup() { 122 | bump_rlimit_mlock(); 123 | 124 | let mut obj = get_test_object("percpu_map.bpf.o"); 125 | let map = obj.map_mut("percpu_map").expect("failed to find map"); 126 | 127 | let res = map 128 | .lookup_percpu(&(0 as u32).to_ne_bytes(), MapFlags::ANY) 129 | .expect("failed to lookup") 130 | .expect("failed to find value for key"); 131 | 132 | assert_eq!( 133 | res.len(), 134 | num_possible_cpus().expect("must be one value per cpu") 135 | ); 136 | assert_eq!(res[0].len(), std::mem::size_of::()); 137 | } 138 | 139 | #[test] 140 | fn test_object_percpu_invalid_lookup_fn() { 141 | bump_rlimit_mlock(); 142 | 143 | let mut obj = get_test_object("percpu_map.bpf.o"); 144 | let map = obj.map_mut("percpu_map").expect("failed to find map"); 145 | 146 | assert!(map 147 | .lookup(&(0 as u32).to_ne_bytes(), MapFlags::ANY) 148 | .is_err()); 149 | } 150 | 151 | #[test] 152 | fn test_object_percpu_update() { 153 | bump_rlimit_mlock(); 154 | 155 | let mut obj = get_test_object("percpu_map.bpf.o"); 156 | let map = obj.map_mut("percpu_map").expect("failed to find map"); 157 | let key = (0 as u32).to_ne_bytes(); 158 | 159 | let mut vals: Vec> = Vec::new(); 160 | for i in 0..num_possible_cpus().unwrap() { 161 | vals.push((i as u32).to_ne_bytes().to_vec()); 162 | } 163 | 164 | map.update_percpu(&key, &vals, MapFlags::ANY) 165 | .expect("failed to update map"); 166 | 167 | let res = map 168 | .lookup_percpu(&key, MapFlags::ANY) 169 | .expect("failed to lookup") 170 | .expect("failed to find value for key"); 171 | 172 | assert_eq!(vals, res); 173 | } 174 | 175 | #[test] 176 | fn test_object_percpu_invalid_update_fn() { 177 | bump_rlimit_mlock(); 178 | 179 | let mut obj = get_test_object("percpu_map.bpf.o"); 180 | let map = obj.map_mut("percpu_map").expect("failed to find map"); 181 | let key = (0 as u32).to_ne_bytes(); 182 | 183 | let val = (1 as u32).to_ne_bytes().to_vec(); 184 | 185 | assert!(map.update(&key, &val, MapFlags::ANY).is_err()); 186 | } 187 | 188 | #[test] 189 | fn test_object_percpu_lookup_update() { 190 | bump_rlimit_mlock(); 191 | 192 | let mut obj = get_test_object("percpu_map.bpf.o"); 193 | let map = obj.map_mut("percpu_map").expect("failed to find map"); 194 | let key = (0 as u32).to_ne_bytes(); 195 | 196 | let mut res = map 197 | .lookup_percpu(&key, MapFlags::ANY) 198 | .expect("failed to lookup") 199 | .expect("failed to find value for key"); 200 | 201 | for e in res.iter_mut() { 202 | e[0] &= 0xf0; 203 | } 204 | 205 | map.update_percpu(&key, &res, MapFlags::ANY) 206 | .expect("failed to update after first lookup"); 207 | 208 | let res2 = map 209 | .lookup_percpu(&key, MapFlags::ANY) 210 | .expect("failed to lookup") 211 | .expect("failed to find value for key"); 212 | 213 | assert_eq!(res, res2); 214 | } 215 | 216 | #[test] 217 | fn test_object_map_empty_lookup() { 218 | bump_rlimit_mlock(); 219 | 220 | let obj = get_test_object("runqslower.bpf.o"); 221 | let start = obj.map("start").expect("failed to find map"); 222 | 223 | assert!(start 224 | .lookup(&[1, 2, 3, 4], MapFlags::empty()) 225 | .expect("err in map lookup") 226 | .is_none()); 227 | } 228 | 229 | #[test] 230 | fn test_object_map_mutation() { 231 | bump_rlimit_mlock(); 232 | 233 | let mut obj = get_test_object("runqslower.bpf.o"); 234 | let start = obj.map_mut("start").expect("failed to find map"); 235 | 236 | start 237 | .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty()) 238 | .expect("failed to write"); 239 | let val = start 240 | .lookup(&[1, 2, 3, 4], MapFlags::empty()) 241 | .expect("failed to read map") 242 | .expect("failed to find key"); 243 | assert_eq!(val.len(), 8); 244 | assert_eq!(val, &[1, 2, 3, 4, 5, 6, 7, 8]); 245 | 246 | start.delete(&[1, 2, 3, 4]).expect("failed to delete key"); 247 | 248 | assert!(start 249 | .lookup(&[1, 2, 3, 4], MapFlags::empty()) 250 | .expect("failed to read map") 251 | .is_none()); 252 | } 253 | 254 | #[test] 255 | fn test_object_map_lookup_flags() { 256 | bump_rlimit_mlock(); 257 | 258 | let mut obj = get_test_object("runqslower.bpf.o"); 259 | let start = obj.map_mut("start").expect("failed to find map"); 260 | 261 | start 262 | .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST) 263 | .expect("failed to write"); 264 | assert!(start 265 | .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST) 266 | .is_err()); 267 | } 268 | 269 | #[test] 270 | fn test_object_map_key_iter() { 271 | bump_rlimit_mlock(); 272 | 273 | let mut obj = get_test_object("runqslower.bpf.o"); 274 | 275 | let start = obj.map_mut("start").expect("failed to find map"); 276 | 277 | let key1 = vec![1, 2, 3, 4]; 278 | let key2 = vec![1, 2, 3, 5]; 279 | let key3 = vec![1, 2, 3, 6]; 280 | 281 | start 282 | .update(&key1, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty()) 283 | .expect("failed to write"); 284 | start 285 | .update(&key2, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty()) 286 | .expect("failed to write"); 287 | start 288 | .update(&key3, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty()) 289 | .expect("failed to write"); 290 | 291 | let mut keys = HashSet::new(); 292 | for key in start.keys() { 293 | keys.insert(key); 294 | } 295 | assert_eq!(keys.len(), 3); 296 | assert!(keys.contains(&key1)); 297 | assert!(keys.contains(&key2)); 298 | assert!(keys.contains(&key3)); 299 | } 300 | 301 | #[test] 302 | fn test_object_map_key_iter_empty() { 303 | bump_rlimit_mlock(); 304 | 305 | let obj = get_test_object("runqslower.bpf.o"); 306 | let start = obj.map("start").expect("failed to find map"); 307 | 308 | let mut count = 0; 309 | for _ in start.keys() { 310 | count += 1; 311 | } 312 | assert_eq!(count, 0); 313 | } 314 | 315 | #[test] 316 | fn test_object_map_pin() { 317 | bump_rlimit_mlock(); 318 | 319 | let mut obj = get_test_object("runqslower.bpf.o"); 320 | let map = obj.map_mut("start").expect("failed to find map"); 321 | 322 | let path = "/sys/fs/bpf/mymap"; 323 | 324 | // Unpinning a unpinned map should be an error 325 | assert!(map.unpin(path).is_err()); 326 | assert!(!Path::new(path).exists()); 327 | 328 | // Pin and unpin should be successful 329 | map.pin(path).expect("failed to pin map"); 330 | assert!(Path::new(path).exists()); 331 | map.unpin(path).expect("failed to unpin map"); 332 | assert!(!Path::new(path).exists()); 333 | } 334 | 335 | #[test] 336 | fn test_object_programs() { 337 | bump_rlimit_mlock(); 338 | 339 | let obj = get_test_object("runqslower.bpf.o"); 340 | obj.prog("handle__sched_wakeup") 341 | .expect("failed to find program"); 342 | obj.prog("handle__sched_wakeup_new") 343 | .expect("failed to find program"); 344 | obj.prog("handle__sched_switch") 345 | .expect("failed to find program"); 346 | assert!(obj.prog("asdf").is_none()); 347 | } 348 | 349 | #[test] 350 | fn test_object_programs_iter_mut() { 351 | bump_rlimit_mlock(); 352 | 353 | let obj = get_test_object("runqslower.bpf.o"); 354 | assert!(obj.progs_iter().count() == 3); 355 | } 356 | 357 | #[test] 358 | fn test_object_program_pin() { 359 | bump_rlimit_mlock(); 360 | 361 | let mut obj = get_test_object("runqslower.bpf.o"); 362 | let prog = obj 363 | .prog_mut("handle__sched_wakeup") 364 | .expect("failed to find program"); 365 | 366 | let path = "/sys/fs/bpf/myprog"; 367 | 368 | // Unpinning a unpinned prog should be an error 369 | assert!(prog.unpin(path).is_err()); 370 | assert!(!Path::new(path).exists()); 371 | 372 | // Pin should be successful 373 | prog.pin(path).expect("failed to pin prog"); 374 | assert!(Path::new(path).exists()); 375 | 376 | // Backup cleanup method in case test errors 377 | defer! { 378 | let _ = fs::remove_file(path); 379 | } 380 | 381 | // Unpin should be successful 382 | prog.unpin(path).expect("failed to unpin prog"); 383 | assert!(!Path::new(path).exists()); 384 | } 385 | 386 | #[test] 387 | fn test_object_link_pin() { 388 | bump_rlimit_mlock(); 389 | 390 | let mut obj = get_test_object("runqslower.bpf.o"); 391 | let prog = obj 392 | .prog_mut("handle__sched_wakeup") 393 | .expect("failed to find program"); 394 | let mut link = prog.attach().expect("failed to attach prog"); 395 | 396 | let path = "/sys/fs/bpf/mylink"; 397 | 398 | // Unpinning a unpinned prog should be an error 399 | assert!(link.unpin().is_err()); 400 | assert!(!Path::new(path).exists()); 401 | 402 | // Pin should be successful 403 | link.pin(path).expect("failed to pin prog"); 404 | assert!(Path::new(path).exists()); 405 | 406 | // Backup cleanup method in case test errors 407 | defer! { 408 | let _ = fs::remove_file(path); 409 | } 410 | 411 | // Unpin should be successful 412 | link.unpin().expect("failed to unpin prog"); 413 | assert!(!Path::new(path).exists()); 414 | } 415 | 416 | #[test] 417 | fn test_object_reuse_pined_map() { 418 | bump_rlimit_mlock(); 419 | 420 | let path = "/sys/fs/bpf/mymap"; 421 | let key = vec![1, 2, 3, 4]; 422 | let val = vec![1, 2, 3, 4, 5, 6, 7, 8]; 423 | 424 | // Pin a map 425 | { 426 | let mut obj = get_test_object("runqslower.bpf.o"); 427 | let map = obj.map_mut("start").expect("failed to find map"); 428 | 429 | map.update(&key, &val, MapFlags::empty()) 430 | .expect("failed to write"); 431 | 432 | // Pin map 433 | map.pin(path).expect("failed to pin map"); 434 | assert!(Path::new(path).exists()); 435 | } 436 | 437 | // Backup cleanup method in case test errors somewhere 438 | defer! { 439 | let _ = fs::remove_file(path); 440 | } 441 | 442 | // Reuse the pinned map 443 | let obj_path = get_test_object_path("runqslower.bpf.o"); 444 | let mut builder = ObjectBuilder::default(); 445 | builder.debug(true); 446 | let mut open_obj = builder.open_file(obj_path).expect("failed to open object"); 447 | 448 | let start = open_obj.map_mut("start").expect("failed to find map"); 449 | assert!(start.reuse_pinned_map("/asdf").is_err()); 450 | start.reuse_pinned_map(path).expect("failed to reuse map"); 451 | 452 | let mut obj = open_obj.load().expect("Failed to load object"); 453 | let reused_map = obj.map_mut("start").expect("failed to find map"); 454 | 455 | let found_val = reused_map 456 | .lookup(&key, MapFlags::empty()) 457 | .expect("failed to read map") 458 | .expect("failed to find key"); 459 | assert_eq!(&found_val, &val); 460 | 461 | // Cleanup 462 | reused_map.unpin(path).expect("failed to unpin map"); 463 | assert!(!Path::new(path).exists()); 464 | } 465 | 466 | #[test] 467 | fn test_object_ringbuf() { 468 | bump_rlimit_mlock(); 469 | 470 | let mut obj = get_test_object("ringbuf.bpf.o"); 471 | let prog = obj 472 | .prog_mut("handle__sys_enter_getpid") 473 | .expect("failed to find program"); 474 | let _link = prog.attach().expect("failed to attach prog"); 475 | 476 | static mut V1: i32 = 0; 477 | static mut V2: i32 = 0; 478 | 479 | fn callback1(data: &[u8]) -> i32 { 480 | let mut value: i32 = 0; 481 | plain::copy_from_bytes(&mut value, data).expect("Wrong size"); 482 | 483 | unsafe { 484 | V1 = value; 485 | } 486 | 487 | 0 488 | } 489 | 490 | fn callback2(data: &[u8]) -> i32 { 491 | let mut value: i32 = 0; 492 | plain::copy_from_bytes(&mut value, data).expect("Wrong size"); 493 | 494 | unsafe { 495 | V2 = value; 496 | } 497 | 498 | 0 499 | } 500 | 501 | // Test trying to build without adding any ringbufs 502 | // Can't use expect_err here since RingBuffer does not implement Debug 503 | let builder = libbpf_rs::RingBufferBuilder::new(); 504 | assert!( 505 | builder.build().is_err(), 506 | "Should not be able to build without adding at least one ringbuf" 507 | ); 508 | 509 | // Test building with multiple map objects 510 | let mut builder = libbpf_rs::RingBufferBuilder::new(); 511 | 512 | // Add a first map and callback 513 | let map1 = obj.map("ringbuf1").expect("Failed to get ringbuf1 map"); 514 | 515 | builder.add(map1, callback1).expect("Failed to add ringbuf"); 516 | 517 | // Add a second map and callback 518 | let map2 = obj.map("ringbuf2").expect("Failed to get ringbuf2 map"); 519 | 520 | builder.add(map2, callback2).expect("Failed to add ringbuf"); 521 | 522 | let mgr = builder.build().expect("Failed to build"); 523 | 524 | // Call getpid to ensure the BPF program runs 525 | unsafe { libc::getpid() }; 526 | 527 | // This should result in both callbacks being called 528 | mgr.consume().expect("Failed to consume ringbuf"); 529 | 530 | // Our values should both reflect that the callbacks have been called 531 | unsafe { assert_eq!(V1, 1) }; 532 | unsafe { assert_eq!(V2, 2) }; 533 | 534 | // Reset both values 535 | unsafe { V1 = 0 }; 536 | unsafe { V2 = 0 }; 537 | 538 | // Call getpid to ensure the BPF program runs 539 | unsafe { libc::getpid() }; 540 | 541 | // This should result in both callbacks being called 542 | mgr.poll(Duration::from_millis(100)) 543 | .expect("Failed to poll ringbuf"); 544 | 545 | // Our values should both reflect that the callbacks have been called 546 | unsafe { assert_eq!(V1, 1) }; 547 | unsafe { assert_eq!(V2, 2) }; 548 | } 549 | 550 | #[test] 551 | fn test_object_ringbuf_closure() { 552 | bump_rlimit_mlock(); 553 | 554 | let mut obj = get_test_object("ringbuf.bpf.o"); 555 | let prog = obj 556 | .prog_mut("handle__sys_enter_getpid") 557 | .expect("failed to find program"); 558 | let _link = prog.attach().expect("failed to attach prog"); 559 | 560 | let (sender1, receiver1) = channel(); 561 | let callback1 = move |data: &[u8]| -> i32 { 562 | let mut value: i32 = 0; 563 | plain::copy_from_bytes(&mut value, data).expect("Wrong size"); 564 | 565 | sender1.send(value).expect("Failed to send value"); 566 | 567 | 0 568 | }; 569 | 570 | let (sender2, receiver2) = channel(); 571 | let callback2 = move |data: &[u8]| -> i32 { 572 | let mut value: i32 = 0; 573 | plain::copy_from_bytes(&mut value, data).expect("Wrong size"); 574 | 575 | sender2.send(value).expect("Failed to send value"); 576 | 577 | 0 578 | }; 579 | 580 | // Test trying to build without adding any ringbufs 581 | // Can't use expect_err here since RingBuffer does not implement Debug 582 | let builder = libbpf_rs::RingBufferBuilder::new(); 583 | assert!( 584 | builder.build().is_err(), 585 | "Should not be able to build without adding at least one ringbuf" 586 | ); 587 | 588 | // Test building with multiple map objects 589 | let mut builder = libbpf_rs::RingBufferBuilder::new(); 590 | 591 | // Add a first map and callback 592 | let map1 = obj.map("ringbuf1").expect("Failed to get ringbuf1 map"); 593 | 594 | builder.add(map1, callback1).expect("Failed to add ringbuf"); 595 | 596 | // Add a second map and callback 597 | let map2 = obj.map("ringbuf2").expect("Failed to get ringbuf2 map"); 598 | 599 | builder.add(map2, callback2).expect("Failed to add ringbuf"); 600 | 601 | let mgr = builder.build().expect("Failed to build"); 602 | 603 | // Call getpid to ensure the BPF program runs 604 | unsafe { libc::getpid() }; 605 | 606 | // This should result in both callbacks being called 607 | mgr.consume().expect("Failed to consume ringbuf"); 608 | 609 | let v1 = receiver1.recv().expect("Failed to receive value"); 610 | let v2 = receiver2.recv().expect("Failed to receive value"); 611 | 612 | assert_eq!(v1, 1); 613 | assert_eq!(v2, 2); 614 | } 615 | 616 | #[test] 617 | fn test_object_task_iter() { 618 | bump_rlimit_mlock(); 619 | 620 | let mut obj = get_test_object("taskiter.bpf.o"); 621 | let prog = obj.prog_mut("dump_pid").expect("Failed to find program"); 622 | let link = prog.attach().expect("Failed to attach prog"); 623 | let mut iter = Iter::new(&link).expect("Failed to create iterator"); 624 | 625 | #[repr(C)] 626 | #[derive(Clone, Copy)] 627 | struct IndexPidPair { 628 | i: u32, 629 | pid: i32, 630 | } 631 | 632 | unsafe impl Plain for IndexPidPair {} 633 | 634 | let mut buf = Vec::new(); 635 | let bytes_read = iter 636 | .read_to_end(&mut buf) 637 | .expect("Failed to read from iterator"); 638 | 639 | assert!(bytes_read > 0); 640 | assert_eq!(bytes_read % std::mem::size_of::(), 0); 641 | let items: &[IndexPidPair] = 642 | plain::slice_from_bytes(buf.as_slice()).expect("Input slice cannot satisfy length"); 643 | 644 | assert!(!items.is_empty()); 645 | assert_eq!(items[0].i, 0); 646 | assert!(items.windows(2).all(|w| w[0].i + 1 == w[1].i)); 647 | // Check for init 648 | assert!(items.iter().any(|&item| item.pid == 1)); 649 | } 650 | --------------------------------------------------------------------------------