├── .github └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── bench ├── .gitignore ├── Cargo.toml ├── README.md ├── benches │ └── min.rs └── src │ └── lib.rs ├── examples ├── dev1_to_dev2.rs ├── hello_xdp.rs ├── setup │ ├── mod.rs │ ├── util.rs │ └── veth_setup.rs └── shared_umem.rs ├── run_all_tests.sh ├── src ├── config │ ├── mod.rs │ ├── socket.rs │ └── umem.rs ├── lib.rs ├── ring.rs ├── socket │ ├── fd.rs │ ├── mod.rs │ ├── rx_queue.rs │ └── tx_queue.rs ├── umem │ ├── comp_queue.rs │ ├── fill_queue.rs │ ├── frame │ │ ├── cursor.rs │ │ └── mod.rs │ ├── mem │ │ ├── mmap.rs │ │ └── mod.rs │ └── mod.rs └── util.rs └── tests ├── comp_queue_tests.rs ├── fill_queue_tests.rs ├── rx_queue_tests.rs ├── setup ├── mod.rs ├── util.rs └── veth_setup.rs ├── tx_queue_tests.rs └── umem_tests.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # Based on https://github.com/actions-rs/meta/blob/master/recipes/quickstart.md 2 | name: CI 3 | 4 | on: 5 | push: 6 | branches: [ master ] 7 | pull_request: 8 | branches: [ master ] 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | check: 15 | name: Check 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v2 19 | - run: | 20 | sudo apt update 21 | sudo apt install clang llvm gcc-multilib libelf-dev libpcap-dev build-essential 22 | - uses: actions-rs/toolchain@v1 23 | with: 24 | profile: minimal 25 | toolchain: stable 26 | override: true 27 | - uses: actions-rs/cargo@v1 28 | with: 29 | command: check 30 | 31 | test: 32 | name: Test 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v2 36 | - run: | 37 | sudo apt update 38 | sudo apt install clang llvm gcc-multilib libelf-dev libpcap-dev build-essential 39 | - uses: actions-rs/toolchain@v1 40 | with: 41 | profile: minimal 42 | toolchain: stable 43 | override: true 44 | - uses: actions-rs/cargo@v1 45 | with: 46 | command: build 47 | args: --tests 48 | - run: sudo ./run_all_tests.sh 49 | 50 | miri: 51 | name: Miri 52 | runs-on: ubuntu-latest 53 | steps: 54 | - uses: actions/checkout@v2 55 | - run: | 56 | sudo apt update 57 | sudo apt install clang llvm gcc-multilib libelf-dev libpcap-dev build-essential 58 | - uses: actions-rs/toolchain@v1 59 | with: 60 | profile: minimal 61 | toolchain: nightly 62 | override: true 63 | - run: | 64 | MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri) 65 | rustup set profile minimal 66 | rustup override set "$MIRI_NIGHTLY" 67 | rustup component add miri 68 | - uses: actions-rs/cargo@v1 69 | with: 70 | command: miri 71 | args: test --lib 72 | 73 | fmt: 74 | name: Rustfmt 75 | runs-on: ubuntu-latest 76 | steps: 77 | - uses: actions/checkout@v2 78 | - uses: actions-rs/toolchain@v1 79 | with: 80 | profile: minimal 81 | toolchain: stable 82 | override: true 83 | components: rustfmt 84 | - uses: actions-rs/cargo@v1 85 | with: 86 | command: fmt 87 | args: --all -- --check 88 | 89 | clippy: 90 | name: Clippy 91 | runs-on: ubuntu-latest 92 | steps: 93 | - uses: actions/checkout@v2 94 | - run: | 95 | sudo apt update 96 | sudo apt install clang llvm gcc-multilib libelf-dev libpcap-dev build-essential 97 | - uses: actions-rs/toolchain@v1 98 | with: 99 | profile: minimal 100 | toolchain: stable 101 | override: true 102 | components: clippy 103 | - uses: actions-rs/cargo@v1 104 | with: 105 | command: clippy 106 | args: -- -D warnings 107 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | perf* 4 | *.svg 5 | .idea -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Unreleased 4 | 5 | ## [0.7.0] - 2025-04-11 6 | 7 | ## Fixed 8 | - add missing lifetime to `umem::frame::Data::contents` 9 | - in the `dev1_to_dev2` example, use the sender completion queue size 10 | to calculate sender frame count 11 | 12 | ## Changed 13 | - bump dependencies 14 | 15 | ## [0.6.1] - 2024-05-19 16 | 17 | ## Changed 18 | - updated example in readme 19 | 20 | ## [0.6.0] - 2024-05-19 21 | 22 | ## Changed 23 | - use `libxdp-sys` instead of `libbpf-sys` 24 | 25 | ## [0.5.0] - 2022-10-18 26 | 27 | ## Changed 28 | - bump `libbpf-sys` version 29 | 30 | ## [0.4.1] - 2022-03-10 31 | 32 | ## Added 33 | - provide `FrameDesc` with a `Default` impl to make generating empty 34 | descs for rx simpler 35 | 36 | ## Fixed 37 | - negate error codes when calling `io::Error::from_raw_os_error` 38 | - some `libc` calls just return `-1` on error, not an informative 39 | error code so in these cases call `io::Error::last_os_error()` 40 | instead of `io::Error::from_raw_os_error(err)`, where `err` is 41 | always equal to `-1`... 42 | 43 | ## [0.4.0] - 2022-02-09 44 | 45 | ## Added 46 | - add `contents_mut` to `{Data, Headroom}Mut`, along with other 47 | convenience traits (`{As, Borrow, Deref}{Mut}`) 48 | 49 | ## Changed 50 | - update `{Data, Headroom}Mut::cursor` docs to clarify when `{Data, 51 | Headroom}Mut::contents_mut` might be more appropriate 52 | - more colour to safety section of `Umem::frame` and `Umem::frame_mut` 53 | indicating why using the frame desc of another UMEM might be 54 | problematic 55 | 56 | ## [0.3.0] - 2022-01-17 57 | 58 | ## Added 59 | - support shared UMEM 60 | - support retrieving XDP statistics 61 | - new frame level structs to allow more granular UMEM access along 62 | with clearer separation between headroom and packet data. Includes a 63 | cursor for convenient writing 64 | - config builders and add extra types to enforce restrictions on 65 | certain values / sizes (e.g queue sizes) 66 | 67 | ## Changed 68 | - bump libs, e.g. `libbpf-sys` to 0.6.0-1 69 | 70 | ## Removed 71 | - got rid of lifetimes by packaging the various queues with an `Arc`'d 72 | UMEM or socket where needed to ensure they don't outlive what they 73 | depend on. Shouldn't cause any slowdown in the single threaded case 74 | since the `Arc`s aren't dereferenced in the fast path 75 | 76 | ## [0.2.4] - 2021-07-10 77 | 78 | ## Changes 79 | - expose the socket file descriptor on the `Fd` struct to make it 80 | possible to register the socket manually 81 | - bump libbpf-sys to version 0.4 82 | 83 | ## [0.2.3] - 2021-06-09 84 | 85 | ## Changed 86 | - added CI, fixed docs 87 | 88 | ## [0.2.2] - 2020-05-25 89 | 90 | ## Changed 91 | - bumped lib versions, libbpf-sys specifically 92 | 93 | ## [0.2.1] - 2020-01-29 94 | 95 | ### Changed 96 | - bumped libbpf-sys version to 0.3 97 | - fixed docs, wasn't showing some stuff since the structs/enums 98 | weren't exposed 99 | 100 | ## [0.2.0] - 2021-01-17 101 | Breaking change 102 | 103 | ### Changed 104 | - Changed the APIs for the UMEM and socket to be `unsafe` where 105 | required. It's possible in a number of locations to get into a race 106 | with the kernel for a bit of shared memory, so tried to make those 107 | areas clearer. 108 | - Can now set the `addr` on `FrameDesc` manually, previously had to go 109 | through the library. 110 | - Cleared up examples and hopefully made them a bit more illustrative. 111 | 112 | ### Added 113 | - A `bench` sub-project, work on which is ongoing. 114 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xsk-rs" 3 | version = "0.7.0" 4 | authors = ["Douglas Gray "] 5 | edition = "2018" 6 | description = "Rust bindings for Linux AF_XDP sockets" 7 | license = "MIT" 8 | repository = "https://github.com/DouglasGray/xsk-rs" 9 | readme = "README.md" 10 | keywords = ["AF_XDP", "XSK", "eBPF", "XDP"] 11 | 12 | [dependencies] 13 | bitflags = "2.9.0" 14 | cfg-if = "1.0.0" 15 | libc = "0.2.171" 16 | libxdp-sys = "0.2.1" 17 | log = "0.4.27" 18 | 19 | [dev-dependencies] 20 | anyhow = "1.0.97" 21 | crossbeam-channel = "0.5.15" 22 | ctrlc = "3.4.6" 23 | env_logger = "0.11.8" 24 | etherparse = "0.17.0" 25 | futures = "0.3.31" 26 | rand = "0.9.0" 27 | rtnetlink = "0.14.1" 28 | serial_test = "3.2.0" 29 | structopt = "0.3.26" 30 | 31 | [dev-dependencies.tokio] 32 | version = "1.44.2" 33 | default-features = false 34 | features = ["rt-multi-thread", "macros", "sync", "signal", "time"] 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2020 Douglas Gray 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # xsk-rs 2 | 3 | A Rust interface for Linux AF_XDP sockets using libxdp. 4 | 5 | [API documentation](https://docs.rs/xsk-rs). 6 | 7 | For more information please see the [networking docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html) 8 | or a more [detailed overview](http://vger.kernel.org/lpc_net2018_talks/lpc18_paper_af_xdp_perf-v2.pdf). 9 | 10 | An overview of XDP [setup 11 | dependencies](https://github.com/xdp-project/xdp-tutorial/blob/main/setup_dependencies.org) 12 | may also come in handy. 13 | 14 | Initially inspired by Jesse DuMond's [OCaml implementation](https://github.com/suttonshire/ocaml-xsk). 15 | 16 | ### Examples 17 | 18 | A few may be found in the `examples` directory. A simple example of 19 | moving bytes between two sockets via a veth pair can be found in 20 | `examples/hello_xdp.rs`, while a slightly more complex example of 21 | sending and receiving eth frames (also via a veth pair) is in 22 | `examples/dev2_to_dev1.rs`, which includes a single-threaded and 23 | multi-threaded implementation. Note that neither example will be 24 | indicative of actual performance, since binding the sockets to the 25 | veth pair means that packets will pass through the kernel network 26 | stack. 27 | 28 | An example with shared UMEM is in `examples/shared_umem.rs`. 29 | 30 | ### Running tests / examples 31 | 32 | Root permissions may be required to run the tests or examples, since 33 | they require a veth pair to be set up. However to avoid running cargo 34 | under `root` it's best to first build the tests/examples and run the 35 | binaries directly. 36 | 37 | ``` 38 | # tests 39 | cargo build --tests 40 | sudo run_all_tests.sh 41 | 42 | # examples 43 | cargo build --examples --release 44 | sudo target/release/examples/hello_xdp 45 | sudo target/release/examples/dev1_to_dev2 -- [FLAGS] [OPTIONS] 46 | ``` 47 | 48 | ### Compatibility 49 | 50 | Tested on a 64-bit machine running Linux kernel version 6.5.0. 51 | 52 | ### Safety 53 | 54 | There is a fair amount of unsafe involved when using this library, and 55 | so the potential for disaster, however if you keep in mind the 56 | following then there should hopefully be few avenues for catastrophe: 57 | - When a frame / address has been submitted to the fill queue or tx 58 | ring, do not use it again until you have consumed it from either the 59 | completion queue or rx ring. 60 | - Do not use one UMEM's frame descriptors to access frames of another, 61 | different UMEM. 62 | 63 | ### Usage 64 | 65 | The below example sends a packet from one interface to another. 66 | 67 | ```rust 68 | use std::{convert::TryInto, io::Write}; 69 | use xsk_rs::{ 70 | config::{SocketConfig, UmemConfig}, 71 | socket::Socket, 72 | umem::Umem, 73 | }; 74 | 75 | const ETHERNET_PACKET: [u8; 42] = [ 76 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a, 0x08, 0x06, 0x00, 0x01, 77 | 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a, 0xc0, 0xa8, 0x45, 0x01, 78 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x45, 0xfe, 79 | ]; 80 | 81 | fn main() { 82 | // Create a UMEM for dev1 with 32 frames, whose sizes are 83 | // specified via the `UmemConfig` instance. 84 | let (dev1_umem, mut dev1_descs) = 85 | Umem::new(UmemConfig::default(), 32.try_into().unwrap(), false) 86 | .expect("failed to create UMEM"); 87 | 88 | // Bind an AF_XDP socket to the interface named `xsk_dev1`, on 89 | // queue 0. 90 | let (mut dev1_tx_q, _dev1_rx_q, _dev1_fq_and_cq) = unsafe { 91 | Socket::new( 92 | SocketConfig::default(), 93 | &dev1_umem, 94 | &"xsk_dev1".parse().unwrap(), 95 | 0, 96 | ) 97 | } 98 | .expect("failed to create dev1 socket"); 99 | 100 | // Create a UMEM for dev2. Another option is to use the same UMEM 101 | // as dev1 - to do that we'd just pass `dev1_umem` to the 102 | // `Socket::new` call. In this case the UMEM would be shared, and 103 | // so `dev1_descs` could be used in either context, but each 104 | // socket would have its own completion queue and fill queue. 105 | let (dev2_umem, mut dev2_descs) = 106 | Umem::new(UmemConfig::default(), 32.try_into().unwrap(), false) 107 | .expect("failed to create UMEM"); 108 | 109 | // Bind an AF_XDP socket to the interface named `xsk_dev2`, on 110 | // queue 0. 111 | let (_dev2_tx_q, mut dev2_rx_q, dev2_fq_and_cq) = unsafe { 112 | Socket::new( 113 | SocketConfig::default(), 114 | &dev2_umem, 115 | &"xsk_dev2".parse().unwrap(), 116 | 0, 117 | ) 118 | } 119 | .expect("failed to create dev2 socket"); 120 | 121 | let (mut dev2_fq, _dev2_cq) = dev2_fq_and_cq.expect("missing dev2 fill queue and comp queue"); 122 | 123 | // 1. Add frames to dev2's fill queue so we are ready to receive 124 | // some packets. 125 | unsafe { 126 | dev2_fq.produce(&dev2_descs); 127 | } 128 | 129 | // 2. Write to dev1's UMEM. 130 | let pkt = "Hello, world!".as_bytes(); 131 | 132 | unsafe { 133 | dev1_umem 134 | .data_mut(&mut dev1_descs[0]) 135 | .cursor() 136 | .write_all(pkt) 137 | .expect("failed writing packet to frame") 138 | } 139 | 140 | // 3. Submit the frame to the kernel for transmission. 141 | println!("sending packet"); 142 | 143 | unsafe { 144 | dev1_tx_q.produce_and_wakeup(&dev1_descs[..1]).unwrap(); 145 | } 146 | 147 | // 4. Read on dev2. 148 | let pkts_recvd = unsafe { dev2_rx_q.poll_and_consume(&mut dev2_descs, 100).unwrap() }; 149 | 150 | // 5. Confirm that one of the packets we received matches what we expect. 151 | for recv_desc in dev2_descs.iter().take(pkts_recvd) { 152 | let data = unsafe { dev2_umem.data(recv_desc) }; 153 | 154 | if data.contents() == ÐERNET_PACKET { 155 | println!("received packet!"); 156 | return; 157 | } 158 | } 159 | 160 | panic!("no matching packets received") 161 | } 162 | ``` 163 | -------------------------------------------------------------------------------- /bench/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock -------------------------------------------------------------------------------- /bench/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bench" 3 | version = "0.1.0" 4 | authors = ["Douglas Gray "] 5 | edition = "2018" 6 | 7 | [[bench]] 8 | name = "min" 9 | harness = false 10 | 11 | [dev-dependencies] 12 | criterion = "0.3" 13 | rand = "0.8" -------------------------------------------------------------------------------- /bench/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DouglasGray/xsk-rs/02c05b38acc7c1d1391d45da46cff4d649428755/bench/README.md -------------------------------------------------------------------------------- /bench/benches/min.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 2 | use std::cmp; 3 | 4 | fn min_cmp(fst: usize, snd: usize) -> usize { 5 | cmp::min(fst, snd) 6 | } 7 | 8 | fn min_if_else(fst: usize, snd: usize) -> usize { 9 | if fst < snd { 10 | fst 11 | } else { 12 | snd 13 | } 14 | } 15 | 16 | fn bench_min(c: &mut Criterion) { 17 | let mut group = c.benchmark_group("min"); 18 | 19 | for vals in [(1, 0), (0, 1), (1, 1)] { 20 | let p = format!("({}, {})", vals.0, vals.1); 21 | 22 | group.bench_with_input(BenchmarkId::new("cmp", p.clone()), &vals, |b, vals| { 23 | b.iter(|| min_cmp(vals.0, vals.1)); 24 | }); 25 | 26 | group.bench_with_input(BenchmarkId::new("if_else", p), &vals, |b, vals| { 27 | b.iter(|| min_if_else(vals.0, vals.1)); 28 | }); 29 | } 30 | 31 | group.finish(); 32 | } 33 | 34 | criterion_group!(benches, bench_min); 35 | criterion_main!(benches); 36 | -------------------------------------------------------------------------------- /bench/src/lib.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /examples/dev1_to_dev2.rs: -------------------------------------------------------------------------------- 1 | use crossbeam_channel::{self, Receiver, Sender}; 2 | use std::{ 3 | cmp, 4 | convert::TryInto, 5 | fmt::Debug, 6 | io::Write, 7 | iter, 8 | net::Ipv4Addr, 9 | num::NonZeroU32, 10 | sync::atomic::{AtomicBool, Ordering}, 11 | thread, 12 | time::Instant, 13 | }; 14 | use structopt::StructOpt; 15 | use tokio::runtime::Runtime; 16 | use xsk_rs::{ 17 | config::{BindFlags, FrameSize, Interface, QueueSize, SocketConfig, UmemConfig}, 18 | CompQueue, FillQueue, FrameDesc, RxQueue, Socket, TxQueue, Umem, 19 | }; 20 | 21 | mod setup; 22 | use setup::{util, veth_setup, LinkIpAddr, PacketGenerator, VethDevConfig}; 23 | 24 | // Reqd for the multithreaded case to signal when all packets have 25 | // been sent 26 | static SENDER_DONE: AtomicBool = AtomicBool::new(false); 27 | 28 | pub struct Xsk { 29 | pub umem: Umem, 30 | pub fq: FillQueue, 31 | pub cq: CompQueue, 32 | pub tx_q: TxQueue, 33 | pub rx_q: RxQueue, 34 | pub descs: Vec, 35 | } 36 | 37 | #[derive(Debug, Clone, Copy)] 38 | struct XskConfig { 39 | tx_q_size: QueueSize, 40 | rx_q_size: QueueSize, 41 | cq_size: QueueSize, 42 | fq_size: QueueSize, 43 | frame_size: FrameSize, 44 | frame_count: u32, 45 | } 46 | 47 | #[derive(Debug, Clone, Copy)] 48 | struct Config { 49 | multithreaded: bool, 50 | poll_ms_timeout: i32, 51 | payload_size: usize, 52 | max_batch_size: usize, 53 | num_packets_to_send: usize, 54 | sender: XskConfig, 55 | receiver: XskConfig, 56 | } 57 | 58 | impl From for Config { 59 | fn from(opt: Opt) -> Self { 60 | let sender = XskConfig { 61 | tx_q_size: opt.tx_q_size_sender.try_into().unwrap(), 62 | rx_q_size: opt.rx_q_size_sender.try_into().unwrap(), 63 | cq_size: opt.cq_size_sender.try_into().unwrap(), 64 | fq_size: opt.fq_size_sender.try_into().unwrap(), 65 | frame_count: opt.fq_size_sender + opt.cq_size_sender, 66 | frame_size: opt.frame_size_sender.try_into().unwrap(), 67 | }; 68 | 69 | let receiver = XskConfig { 70 | tx_q_size: opt.tx_q_size_receiver.try_into().unwrap(), 71 | rx_q_size: opt.rx_q_size_receiver.try_into().unwrap(), 72 | cq_size: opt.cq_size_receiver.try_into().unwrap(), 73 | fq_size: opt.fq_size_receiver.try_into().unwrap(), 74 | frame_count: opt.fq_size_receiver + opt.cq_size_receiver, 75 | frame_size: opt.frame_size_receiver.try_into().unwrap(), 76 | }; 77 | 78 | Config { 79 | multithreaded: opt.multithreaded, 80 | poll_ms_timeout: opt.poll_ms_timeout, 81 | payload_size: opt.payload_size, 82 | max_batch_size: opt.max_batch_size, 83 | num_packets_to_send: opt.num_packets_to_send, 84 | sender, 85 | receiver, 86 | } 87 | } 88 | } 89 | 90 | #[derive(Debug, StructOpt)] 91 | #[structopt(name = "dev1_to_dev2")] 92 | struct Opt { 93 | /// Run sender and receiver in separate threads 94 | #[structopt(short, long)] 95 | multithreaded: bool, 96 | 97 | /// Sender fill queue size 98 | #[structopt(default_value = "8192")] 99 | fq_size_sender: u32, 100 | 101 | /// Sender comp queue size 102 | #[structopt(default_value = "4096")] 103 | cq_size_sender: u32, 104 | 105 | /// Sender tx queue size 106 | #[structopt(default_value = "4096")] 107 | tx_q_size_sender: u32, 108 | 109 | /// Sender rx queue size 110 | #[structopt(default_value = "4096")] 111 | rx_q_size_sender: u32, 112 | 113 | /// Sender frame size 114 | #[structopt(default_value = "2048")] 115 | frame_size_sender: u32, 116 | 117 | /// Receiver fill queue size 118 | #[structopt(default_value = "8192")] 119 | fq_size_receiver: u32, 120 | 121 | /// Receiver comp queue size 122 | #[structopt(default_value = "4096")] 123 | cq_size_receiver: u32, 124 | 125 | /// Receiver tx queue size 126 | #[structopt(default_value = "4096")] 127 | tx_q_size_receiver: u32, 128 | 129 | /// Receuver rx queue size 130 | #[structopt(default_value = "4096")] 131 | rx_q_size_receiver: u32, 132 | 133 | /// Receiver frame size 134 | #[structopt(default_value = "2048")] 135 | frame_size_receiver: u32, 136 | 137 | /// Socket poll timeout in milliseconds 138 | #[structopt(default_value = "100")] 139 | poll_ms_timeout: i32, 140 | 141 | /// Packet payload size 142 | #[structopt(default_value = "32")] 143 | payload_size: usize, 144 | 145 | /// Max number of packets to send at once 146 | #[structopt(default_value = "64")] 147 | max_batch_size: usize, 148 | 149 | /// Total number of packets to send 150 | #[structopt(default_value = "5000000")] 151 | num_packets_to_send: usize, 152 | } 153 | 154 | fn dev1_to_dev2_single_thread( 155 | config: Config, 156 | tx: (Xsk, PacketGenerator), 157 | rx: (Xsk, PacketGenerator), 158 | ) { 159 | let (mut xsk_tx, pkt_gen) = tx; 160 | let (mut xsk_rx, _) = rx; 161 | 162 | let rx_cfg = config.receiver; 163 | 164 | let tx_umem = &xsk_tx.umem; 165 | 166 | let tx_descs = &mut xsk_tx.descs; 167 | let rx_descs = &mut xsk_rx.descs; 168 | 169 | let start = Instant::now(); 170 | 171 | // Packets to write 172 | let mut pkts = iter::repeat_with(|| { 173 | pkt_gen 174 | .generate_packet(1234, 1234, config.payload_size) 175 | .unwrap() 176 | }); 177 | 178 | // Populate receiver fill queue 179 | let frames_filled = unsafe { 180 | xsk_rx 181 | .fq 182 | .produce(&rx_descs[..rx_cfg.fq_size.get() as usize]) 183 | }; 184 | 185 | assert_eq!(frames_filled, rx_cfg.fq_size.get() as usize); 186 | 187 | log::debug!("frames added to receiver fill queue: {}", frames_filled); 188 | 189 | // Write packets to UMEM and populate sender tx queue 190 | tx_descs[0..config.max_batch_size] 191 | .iter_mut() 192 | .for_each(|desc| { 193 | let pkt = pkts.next().unwrap(); 194 | 195 | unsafe { 196 | tx_umem.data_mut(desc).cursor().write_all(&pkt).unwrap(); 197 | } 198 | }); 199 | 200 | let mut total_frames_sent = unsafe { xsk_tx.tx_q.produce(&tx_descs[..config.max_batch_size]) }; 201 | 202 | assert_eq!(total_frames_sent, config.max_batch_size); 203 | 204 | log::debug!("frames added to sender tx queue: {}", total_frames_sent); 205 | 206 | let mut total_frames_rcvd = 0; 207 | let mut total_frames_consumed = 0; 208 | 209 | while total_frames_consumed < config.num_packets_to_send 210 | || total_frames_rcvd < config.num_packets_to_send 211 | { 212 | while total_frames_rcvd < total_frames_sent { 213 | // In copy mode tx is driven by a syscall, so we need to 214 | // wakeup the kernel with a call to either sendto() or 215 | // poll() (wakeup() below uses sendto()). 216 | if xsk_tx.tx_q.needs_wakeup() { 217 | log::debug!("waking up sender tx queue"); 218 | xsk_tx.tx_q.wakeup().unwrap(); 219 | } 220 | 221 | // Handle rx 222 | match unsafe { 223 | xsk_rx 224 | .rx_q 225 | .poll_and_consume(&mut tx_descs[..], config.poll_ms_timeout) 226 | .unwrap() 227 | } { 228 | 0 => { 229 | // No frames consumed, wake up fill queue if required 230 | log::debug!("receiver rx queue consumed 0 frames"); 231 | 232 | if xsk_rx.fq.needs_wakeup() { 233 | log::debug!("waking up receiver fill queue"); 234 | let fd = xsk_rx.rx_q.fd_mut(); 235 | xsk_rx.fq.wakeup(fd, config.poll_ms_timeout).unwrap(); 236 | } 237 | } 238 | frames_rcvd => { 239 | log::debug!("receiver rx queue consumed {} frames", frames_rcvd); 240 | 241 | // Add frames back to fill queue 242 | while unsafe { 243 | let fd = xsk_rx.rx_q.fd_mut(); 244 | xsk_rx 245 | .fq 246 | .produce_and_wakeup( 247 | &rx_descs[..frames_rcvd], 248 | fd, 249 | config.poll_ms_timeout, 250 | ) 251 | .unwrap() 252 | } != frames_rcvd 253 | { 254 | // Loop until frames added to the fill ring. 255 | log::debug!("receiver fill queue failed to allocate"); 256 | } 257 | 258 | log::debug!("submitted {} frames to receiver fill queue", frames_rcvd); 259 | 260 | total_frames_rcvd += frames_rcvd; 261 | 262 | log::debug!("total frames received: {}", total_frames_rcvd); 263 | } 264 | } 265 | } 266 | 267 | if total_frames_sent < config.num_packets_to_send 268 | || total_frames_consumed < config.num_packets_to_send 269 | { 270 | // Handle tx 271 | match unsafe { xsk_tx.cq.consume(&mut tx_descs[..]) } { 272 | 0 => { 273 | log::debug!("sender comp queue consumed 0 frames"); 274 | 275 | if xsk_tx.tx_q.needs_wakeup() { 276 | log::debug!("waking up sender tx queue"); 277 | xsk_tx.tx_q.wakeup().unwrap(); 278 | } 279 | } 280 | frames_rcvd => { 281 | log::debug!("sender comp queue consumed {} frames", frames_rcvd); 282 | 283 | total_frames_consumed += frames_rcvd; 284 | 285 | if total_frames_sent < config.num_packets_to_send { 286 | // Write new data 287 | tx_descs[..frames_rcvd].iter_mut().for_each(|desc| { 288 | let pkt = pkts.next().unwrap(); 289 | 290 | unsafe { 291 | tx_umem.data_mut(desc).cursor().write_all(&pkt).unwrap(); 292 | } 293 | }); 294 | 295 | // Wait until we're ok to write 296 | while !xsk_tx.tx_q.poll(config.poll_ms_timeout).unwrap() { 297 | log::debug!("sender socket not ready to write"); 298 | continue; 299 | } 300 | 301 | let frames_to_send = cmp::min( 302 | frames_rcvd, 303 | cmp::min( 304 | config.max_batch_size, 305 | config.num_packets_to_send - total_frames_sent, 306 | ), 307 | ); 308 | 309 | // Add consumed frames back to the tx queue 310 | while unsafe { 311 | xsk_tx 312 | .tx_q 313 | .produce_and_wakeup(&tx_descs[..frames_to_send]) 314 | .unwrap() 315 | } != frames_to_send 316 | { 317 | // Loop until frames added to the tx ring. 318 | log::debug!("sender tx queue failed to allocate"); 319 | } 320 | log::debug!("submitted {} frames to sender tx queue", frames_to_send); 321 | 322 | total_frames_sent += frames_to_send; 323 | } 324 | 325 | log::debug!("total frames consumed: {}", total_frames_consumed); 326 | log::debug!("total frames sent: {}", total_frames_sent); 327 | } 328 | } 329 | } 330 | } 331 | 332 | let elapsed_secs = start.elapsed().as_secs_f64(); 333 | 334 | // Bytes sent per second is (number_of_packets * packet_size) / seconds_elapsed 335 | let pkt_len = pkts.next().unwrap().len(); 336 | 337 | let bytes_sent_per_sec: f64 = (total_frames_sent as f64) * (pkt_len as f64) / elapsed_secs; 338 | let bytes_rcvd_per_sec: f64 = (total_frames_rcvd as f64) * (pkt_len as f64) / elapsed_secs; 339 | 340 | // 1 bit/second = 1e-9 Gbps 341 | // gbps_sent = (bytes_sent_per_sec * 8) / 1e9 = bytes_sent_per_sec / 0.125e9 342 | let gbps_sent = bytes_sent_per_sec / 0.125e9; 343 | let gbps_rcvd = bytes_rcvd_per_sec / 0.125e9; 344 | 345 | // Note that this is being 346 | println!( 347 | "time taken to send {} {}-byte eth frames: {:.3} secs", 348 | config.num_packets_to_send, pkt_len, elapsed_secs 349 | ); 350 | println!( 351 | "send throughput: {:.3} Gbps (eth frames sent: {})", 352 | gbps_sent, total_frames_sent 353 | ); 354 | println!( 355 | "recv throughout: {:.3} Gbps (eth frames rcvd: {})", 356 | gbps_rcvd, total_frames_rcvd 357 | ); 358 | println!( 359 | "note that these numbers are not reflective of actual AF_XDP socket performance, 360 | since packets are being sent over a VETH pair, and so pass through the kernel" 361 | ); 362 | } 363 | 364 | fn dev1_to_dev2_multithreaded( 365 | config: Config, 366 | tx: (Xsk, PacketGenerator), 367 | rx: (Xsk, PacketGenerator), 368 | ) { 369 | let rx_cfg = config.receiver; 370 | 371 | let payload_size = config.payload_size; 372 | let max_batch_size = config.max_batch_size; 373 | let num_frames_to_send = config.num_packets_to_send; 374 | let poll_ms_timeout = config.poll_ms_timeout; 375 | 376 | let (begin_send_tx, begin_send_rx): (Sender<()>, Receiver<()>) = crossbeam_channel::bounded(1); 377 | 378 | let start = Instant::now(); 379 | 380 | let (mut xsk_tx, pkt_gen) = tx; 381 | 382 | // Packets to write 383 | let mut pkts = 384 | iter::repeat_with(move || pkt_gen.generate_packet(1234, 1234, payload_size).unwrap()); 385 | 386 | let pkt_len = pkts.next().unwrap().len(); 387 | 388 | let rx_handle = thread::spawn(move || { 389 | let (mut xsk_rx, _) = rx; 390 | 391 | let rx_frames = &mut xsk_rx.descs; 392 | 393 | // Populate receiver fill queue 394 | let frames_filled = unsafe { 395 | xsk_rx 396 | .fq 397 | .produce(&rx_frames[..rx_cfg.fq_size.get() as usize]) 398 | }; 399 | 400 | assert_eq!(frames_filled, rx_cfg.fq_size.get() as usize); 401 | 402 | log::debug!("frames added to receiver fill queue: {}", frames_filled); 403 | 404 | if let Err(_) = begin_send_tx.send(()) { 405 | println!("sender thread has gone away"); 406 | return 0; 407 | } 408 | 409 | let mut total_frames_rcvd = 0; 410 | 411 | while total_frames_rcvd < num_frames_to_send { 412 | // Handle rx 413 | match unsafe { 414 | xsk_rx 415 | .rx_q 416 | .poll_and_consume(&mut rx_frames[..], poll_ms_timeout) 417 | .unwrap() 418 | } { 419 | 0 => { 420 | // No frames consumed, wake up fill queue if required 421 | log::debug!("receiver rx queue consumed 0 frames"); 422 | 423 | if xsk_rx.fq.needs_wakeup() { 424 | log::debug!("waking up receiver fill queue"); 425 | let fd = xsk_rx.rx_q.fd_mut(); 426 | xsk_rx.fq.wakeup(fd, poll_ms_timeout).unwrap(); 427 | } 428 | 429 | // Or it might be that there are no packets left to receive 430 | if SENDER_DONE.load(Ordering::Relaxed) { 431 | break; 432 | } 433 | } 434 | frames_rcvd => { 435 | log::debug!("receiver rx queue consumed {} frames", frames_rcvd); 436 | 437 | // Add frames back to fill queue 438 | while unsafe { 439 | let fd = xsk_rx.rx_q.fd_mut(); 440 | xsk_rx 441 | .fq 442 | .produce_and_wakeup(&rx_frames[..frames_rcvd], fd, poll_ms_timeout) 443 | .unwrap() 444 | } != frames_rcvd 445 | { 446 | // Loop until frames added to the fill ring. 447 | log::debug!("receiver fill queue failed to allocate"); 448 | } 449 | 450 | log::debug!("submitted {} frames to receiver fill queue", frames_rcvd); 451 | 452 | total_frames_rcvd += frames_rcvd; 453 | 454 | log::debug!("total frames received: {}", total_frames_rcvd); 455 | } 456 | } 457 | } 458 | 459 | log::debug!("receiver complete"); 460 | 461 | total_frames_rcvd 462 | }); 463 | 464 | let tx_handle = thread::spawn(move || { 465 | let tx_umem = &xsk_tx.umem; 466 | let tx_descs = &mut xsk_tx.descs; 467 | 468 | tx_descs[0..max_batch_size].iter_mut().for_each(|frame| { 469 | let pkt = pkts.next().unwrap(); 470 | 471 | unsafe { 472 | tx_umem.data_mut(frame).cursor().write_all(&pkt).unwrap(); 473 | } 474 | }); 475 | 476 | let mut total_frames_consumed = 0; 477 | 478 | let mut total_frames_sent = unsafe { xsk_tx.tx_q.produce(&tx_descs[..max_batch_size]) }; 479 | 480 | assert_eq!(total_frames_sent, max_batch_size); 481 | 482 | log::debug!("frames added to sender tx queue: {}", total_frames_sent); 483 | 484 | // Let the receiver populate its fill queue first and wait for the go-ahead. 485 | if let Err(_) = begin_send_rx.recv() { 486 | println!("receiver thread has gone away"); 487 | return 0; 488 | } 489 | 490 | while total_frames_consumed < num_frames_to_send { 491 | match unsafe { xsk_tx.cq.consume(&mut tx_descs[..]) } { 492 | 0 => { 493 | log::debug!("sender comp queue consumed 0 frames"); 494 | 495 | if xsk_tx.tx_q.needs_wakeup() { 496 | log::debug!("waking up sender tx queue"); 497 | xsk_tx.tx_q.wakeup().unwrap(); 498 | } 499 | } 500 | frames_rcvd => { 501 | log::debug!("sender comp queue consumed {} frames", frames_rcvd); 502 | 503 | total_frames_consumed += frames_rcvd; 504 | 505 | if total_frames_sent < num_frames_to_send { 506 | // Write new data 507 | tx_descs[..frames_rcvd].iter_mut().for_each(|desc| { 508 | let pkt = pkts.next().unwrap(); 509 | 510 | unsafe { 511 | tx_umem.data_mut(desc).cursor().write_all(&pkt).unwrap(); 512 | } 513 | }); 514 | 515 | // Wait until we're ok to write 516 | while !xsk_tx.tx_q.poll(poll_ms_timeout).unwrap() { 517 | log::debug!("sender socket not ready to write"); 518 | continue; 519 | } 520 | 521 | let frames_to_send = cmp::min( 522 | frames_rcvd, 523 | cmp::min(max_batch_size, num_frames_to_send - total_frames_sent), 524 | ); 525 | 526 | // Add consumed frames back to the tx queue 527 | while unsafe { 528 | xsk_tx 529 | .tx_q 530 | .produce_and_wakeup(&tx_descs[..frames_to_send]) 531 | .unwrap() 532 | } != frames_to_send 533 | { 534 | // Loop until frames added to the tx ring. 535 | log::debug!("sender tx queue failed to allocate"); 536 | } 537 | log::debug!("submitted {} frames to sender tx queue", frames_to_send); 538 | 539 | total_frames_sent += frames_to_send; 540 | } 541 | 542 | log::debug!("total frames consumed: {}", total_frames_consumed); 543 | log::debug!("total frames sent: {}", total_frames_sent); 544 | } 545 | } 546 | } 547 | 548 | log::debug!("sender complete"); 549 | 550 | // Mark sender as done so receiver knows when to return 551 | SENDER_DONE.store(true, Ordering::Relaxed); 552 | 553 | total_frames_consumed 554 | }); 555 | 556 | let tx_res = tx_handle.join(); 557 | let rx_res = rx_handle.join(); 558 | 559 | if let (Ok(pkts_sent), Ok(pkts_rcvd)) = (&tx_res, &rx_res) { 560 | let elapsed_secs = start.elapsed().as_secs_f64(); 561 | 562 | // Bytes sent per second is (number_of_packets * packet_size) / seconds_elapsed 563 | let bytes_sent_per_sec: f64 = (*pkts_sent as f64) * (pkt_len as f64) / elapsed_secs; 564 | let bytes_rcvd_per_sec: f64 = (*pkts_rcvd as f64) * (pkt_len as f64) / elapsed_secs; 565 | 566 | // 1 bit/second = 1e-9 Gbps 567 | // gbps_sent = (bytes_sent_per_sec * 8) / 1e9 = bytes_sent_per_sec / 0.125e9 568 | let gbps_sent = bytes_sent_per_sec / 0.125e9; 569 | let gbps_rcvd = bytes_rcvd_per_sec / 0.125e9; 570 | 571 | println!( 572 | "time taken to send {} {}-byte eth frames: {:.3} secs", 573 | config.num_packets_to_send, pkt_len, elapsed_secs 574 | ); 575 | println!( 576 | "send throughput: {:.3} Gbps (eth frames sent: {})", 577 | gbps_sent, pkts_sent 578 | ); 579 | println!( 580 | "recv throughout: {:.3} Gbps (eth frames rcvd: {})", 581 | gbps_rcvd, pkts_rcvd 582 | ); 583 | println!( 584 | "note that these numbers are not reflective of actual AF_XDP socket performance, 585 | since packets are being sent over a VETH pair, and so pass through the kernel" 586 | ); 587 | } else { 588 | println!("error (tx_res: {:?}) (rx_res: {:?})", tx_res, rx_res); 589 | } 590 | } 591 | 592 | pub fn build_socket_and_umem( 593 | umem_config: UmemConfig, 594 | socket_config: SocketConfig, 595 | frame_count: NonZeroU32, 596 | if_name: &Interface, 597 | queue_id: u32, 598 | ) -> Xsk { 599 | let (umem, frames) = Umem::new(umem_config, frame_count, false).expect("failed to build umem"); 600 | 601 | let (tx_q, rx_q, fq_and_cq) = unsafe { 602 | Socket::new(socket_config, &umem, if_name, queue_id).expect("failed to build socket") 603 | }; 604 | 605 | let (fq, cq) = fq_and_cq.expect(&format!( 606 | "missing fill and comp queue - interface {:?} may already be bound to", 607 | if_name 608 | )); 609 | 610 | Xsk { 611 | umem, 612 | fq, 613 | cq, 614 | tx_q, 615 | rx_q, 616 | descs: frames, 617 | } 618 | } 619 | 620 | fn build_umem_and_socket_config(config: &XskConfig) -> (UmemConfig, SocketConfig) { 621 | let umem_config = UmemConfig::builder() 622 | .frame_size(config.frame_size) 623 | .fill_queue_size(config.fq_size) 624 | .comp_queue_size(config.cq_size) 625 | .build() 626 | .unwrap(); 627 | 628 | let socket_config = SocketConfig::builder() 629 | .rx_queue_size(config.rx_q_size) 630 | .tx_queue_size(config.tx_q_size) 631 | .bind_flags(BindFlags::XDP_USE_NEED_WAKEUP) 632 | .build(); 633 | 634 | (umem_config, socket_config) 635 | } 636 | 637 | fn run_example( 638 | config: Config, 639 | dev_tx: (VethDevConfig, PacketGenerator), 640 | dev_rx: (VethDevConfig, PacketGenerator), 641 | ) { 642 | let (umem_config_tx, socket_config_tx) = build_umem_and_socket_config(&config.sender); 643 | let (umem_config_rx, socket_config_rx) = build_umem_and_socket_config(&config.receiver); 644 | 645 | let xsk_tx = build_socket_and_umem( 646 | umem_config_tx.clone(), 647 | socket_config_tx.clone(), 648 | config.sender.frame_count.try_into().unwrap(), 649 | &dev_tx.0.if_name().parse().unwrap(), 650 | 0, 651 | ); 652 | 653 | let xsk_rx = build_socket_and_umem( 654 | umem_config_rx.clone(), 655 | socket_config_rx.clone(), 656 | config.receiver.frame_count.try_into().unwrap(), 657 | &dev_rx.0.if_name().parse().unwrap(), 658 | 0, 659 | ); 660 | 661 | if config.multithreaded { 662 | println!( 663 | "sending {} eth frames w/ {}-byte payload (total msg size: {} bytes) (multi-threaded)", 664 | config.num_packets_to_send, 665 | config.payload_size, 666 | &config.sender.frame_size.get() 667 | ); 668 | dev1_to_dev2_multithreaded(config, (xsk_tx, dev_tx.1), (xsk_rx, dev_rx.1)); 669 | } else { 670 | println!( 671 | "sending {} eth frames w/ {}-byte payload (total msg size: {} bytes) (single-threaded)", 672 | config.num_packets_to_send, 673 | config.payload_size, 674 | &config.sender.frame_size.get() 675 | ); 676 | dev1_to_dev2_single_thread(config, (xsk_tx, dev_tx.1), (xsk_rx, dev_rx.1)); 677 | } 678 | } 679 | 680 | fn main() { 681 | env_logger::init(); 682 | 683 | let config = Opt::from_args().into(); 684 | 685 | let dev1_config = VethDevConfig { 686 | if_name: "xsk_test_dev1".into(), 687 | addr: [0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a], 688 | ip_addr: LinkIpAddr::new(Ipv4Addr::new(192, 168, 69, 1), 24), 689 | }; 690 | 691 | let dev2_config = VethDevConfig { 692 | if_name: "xsk_test_dev2".into(), 693 | addr: [0x4a, 0xf1, 0x30, 0xeb, 0x0d, 0x31], 694 | ip_addr: LinkIpAddr::new(Ipv4Addr::new(192, 168, 69, 1), 24), 695 | }; 696 | 697 | // We'll keep track of ctrl+c events but not let them kill the process 698 | // immediately as we may need to clean up the veth pair. 699 | let ctrl_c_events = util::ctrl_channel().unwrap(); 700 | 701 | let (complete_tx, complete_rx) = crossbeam_channel::bounded(1); 702 | 703 | let runtime = Runtime::new().unwrap(); 704 | 705 | let example_handle = thread::spawn(move || { 706 | let res = runtime.block_on(veth_setup::run_with_veth_pair( 707 | dev1_config, 708 | dev2_config, 709 | move |dev1, dev2| run_example(config, dev1, dev2), 710 | )); 711 | 712 | let _ = complete_tx.send(()); 713 | 714 | res 715 | }); 716 | 717 | // Wait for either the example to finish or for a ctrl+c event to occur 718 | crossbeam_channel::select! { 719 | recv(complete_rx) -> _ => { 720 | }, 721 | recv(ctrl_c_events) -> _ => { 722 | println!("SIGINT received"); 723 | } 724 | } 725 | 726 | example_handle.join().unwrap().unwrap(); 727 | } 728 | -------------------------------------------------------------------------------- /examples/hello_xdp.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::TryInto, io::Write, net::Ipv4Addr, thread}; 2 | use tokio::runtime::Runtime; 3 | use xsk_rs::{ 4 | config::{SocketConfig, UmemConfig}, 5 | Socket, Umem, 6 | }; 7 | 8 | #[allow(dead_code)] 9 | mod setup; 10 | use setup::{util, veth_setup, LinkIpAddr, PacketGenerator, VethDevConfig, ETHERNET_PACKET}; 11 | 12 | fn hello_xdp(dev1: (VethDevConfig, PacketGenerator), dev2: (VethDevConfig, PacketGenerator)) { 13 | // Create a UMEM for dev1. 14 | let (dev1_umem, mut dev1_descs) = 15 | Umem::new(UmemConfig::default(), 32.try_into().unwrap(), false) 16 | .expect("failed to create UMEM"); 17 | 18 | // Bind an AF_XDP socket to the interface named `xsk_dev1`, on 19 | // queue 0. 20 | let (mut dev1_tx_q, _dev1_rx_q, _dev1_fq_and_cq) = unsafe { 21 | Socket::new( 22 | SocketConfig::default(), 23 | &dev1_umem, 24 | &dev1.0.if_name().parse().unwrap(), 25 | 0, 26 | ) 27 | } 28 | .expect("failed to create dev1 socket"); 29 | 30 | // Create a UMEM for dev2. 31 | let (dev2_umem, mut dev2_descs) = 32 | Umem::new(UmemConfig::default(), 32.try_into().unwrap(), false) 33 | .expect("failed to create UMEM"); 34 | 35 | // Bind an AF_XDP socket to the interface named `xsk_dev2`, on 36 | // queue 0. 37 | let (_dev2_tx_q, mut dev2_rx_q, dev2_fq_and_cq) = unsafe { 38 | Socket::new( 39 | SocketConfig::default(), 40 | &dev2_umem, 41 | &dev2.0.if_name().parse().unwrap(), 42 | 0, 43 | ) 44 | } 45 | .expect("failed to create dev2 socket"); 46 | 47 | let (mut dev2_fq, _dev2_cq) = dev2_fq_and_cq.expect("missing dev2 fill queue and comp queue"); 48 | 49 | // 1. Add frames to dev2's fill queue so we are ready to receive 50 | // some packets. 51 | unsafe { 52 | dev2_fq.produce(&dev2_descs); 53 | } 54 | 55 | // 2. Write to dev1's UMEM. 56 | unsafe { 57 | dev1_umem 58 | .data_mut(&mut dev1_descs[0]) 59 | .cursor() 60 | .write_all(ÐERNET_PACKET) 61 | .expect("failed writing packet to frame") 62 | } 63 | 64 | // 3. Submit the frame to the kernel for transmission. 65 | println!("sending packet"); 66 | 67 | unsafe { 68 | dev1_tx_q.produce_and_wakeup(&dev1_descs[..1]).unwrap(); 69 | } 70 | 71 | // 4. Read on dev2. 72 | let pkts_recvd = unsafe { dev2_rx_q.poll_and_consume(&mut dev2_descs, 100).unwrap() }; 73 | 74 | // 5. Confirm that one of the packets we received matches what we expect. 75 | for recv_desc in dev2_descs.iter().take(pkts_recvd) { 76 | let data = unsafe { dev2_umem.data(recv_desc) }; 77 | 78 | if data.contents() == ÐERNET_PACKET { 79 | println!("received packet!"); 80 | return; 81 | } 82 | } 83 | 84 | panic!("no matching packets received") 85 | } 86 | 87 | fn main() { 88 | let dev1_config = VethDevConfig { 89 | if_name: "xsk_test_dev1".into(), 90 | addr: [0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a], 91 | ip_addr: LinkIpAddr::new(Ipv4Addr::new(192, 168, 69, 1), 24), 92 | }; 93 | 94 | let dev2_config = VethDevConfig { 95 | if_name: "xsk_test_dev2".into(), 96 | addr: [0x4a, 0xf1, 0x30, 0xeb, 0x0d, 0x31], 97 | ip_addr: LinkIpAddr::new(Ipv4Addr::new(192, 168, 69, 1), 24), 98 | }; 99 | 100 | // We'll keep track of ctrl+c events but not let them kill the process 101 | // immediately as we may need to clean up the veth pair. 102 | let ctrl_c_events = util::ctrl_channel().unwrap(); 103 | 104 | let (complete_tx, complete_rx) = crossbeam_channel::bounded(1); 105 | 106 | let runtime = Runtime::new().unwrap(); 107 | 108 | let example_handle = thread::spawn(move || { 109 | let res = runtime.block_on(veth_setup::run_with_veth_pair( 110 | dev1_config, 111 | dev2_config, 112 | hello_xdp, 113 | )); 114 | 115 | let _ = complete_tx.send(()); 116 | 117 | res 118 | }); 119 | 120 | // Wait for either the example to finish or for a ctrl+c event to occur. 121 | crossbeam_channel::select! { 122 | recv(complete_rx) -> _ => { 123 | }, 124 | recv(ctrl_c_events) -> _ => { 125 | println!("SIGINT received"); 126 | } 127 | } 128 | 129 | example_handle.join().unwrap().unwrap(); 130 | } 131 | -------------------------------------------------------------------------------- /examples/setup/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod util; 2 | pub use util::PacketGenerator; 3 | 4 | pub mod veth_setup; 5 | pub use veth_setup::{LinkIpAddr, VethDevConfig}; 6 | 7 | #[allow(dead_code)] 8 | pub const ETHERNET_PACKET: [u8; 42] = [ 9 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a, 0x08, 0x06, 0x00, 0x01, 10 | 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a, 0xc0, 0xa8, 0x45, 0x01, 11 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x45, 0xfe, 12 | ]; 13 | -------------------------------------------------------------------------------- /examples/setup/util.rs: -------------------------------------------------------------------------------- 1 | use crossbeam_channel::{self, Receiver}; 2 | use etherparse::{err::packet::BuildWriteError, PacketBuilder}; 3 | 4 | use super::veth_setup::VethDevConfig; 5 | 6 | pub fn ctrl_channel() -> Result, ctrlc::Error> { 7 | let (tx, rx) = crossbeam_channel::bounded(1); 8 | ctrlc::set_handler(move || { 9 | let _ = tx.send(()); 10 | })?; 11 | 12 | Ok(rx) 13 | } 14 | 15 | #[derive(Debug, Clone)] 16 | pub struct PacketGenerator { 17 | src: VethDevConfig, 18 | dst: VethDevConfig, 19 | } 20 | 21 | impl PacketGenerator { 22 | pub fn new(src: VethDevConfig, dst: VethDevConfig) -> Self { 23 | Self { src, dst } 24 | } 25 | 26 | /// Generate an ETH frame w/ UDP as transport layer and payload size `payload_len` 27 | pub fn generate_packet( 28 | &self, 29 | src_port: u16, 30 | dst_port: u16, 31 | payload_len: usize, 32 | ) -> Result, BuildWriteError> { 33 | let builder = PacketBuilder::ethernet2( 34 | self.src.addr(), // src mac 35 | self.dst.addr(), // dst mac 36 | ) 37 | .ipv4( 38 | self.src.ip_addr().octets(), // src ip 39 | self.dst.ip_addr().octets(), // dst ip 40 | 20, // time to live 41 | ) 42 | .udp(src_port, dst_port); 43 | 44 | let payload = generate_random_bytes(payload_len); 45 | 46 | let mut result = Vec::with_capacity(builder.size(payload.len())); 47 | 48 | builder.write(&mut result, &payload)?; 49 | 50 | Ok(result) 51 | } 52 | 53 | /// Packet generator with `src` and `dst` swapped. 54 | pub fn into_swapped(self) -> Self { 55 | Self { 56 | src: self.dst.clone(), 57 | dst: self.src.clone(), 58 | } 59 | } 60 | } 61 | 62 | fn generate_random_bytes(len: usize) -> Vec { 63 | (0..len).map(|_| rand::random::()).collect() 64 | } 65 | -------------------------------------------------------------------------------- /examples/setup/veth_setup.rs: -------------------------------------------------------------------------------- 1 | use futures::stream::TryStreamExt; 2 | use rtnetlink::Handle; 3 | use std::net::{IpAddr, Ipv4Addr}; 4 | use tokio::{runtime, task}; 5 | 6 | use super::util::PacketGenerator; 7 | 8 | #[derive(Debug, Clone, Copy)] 9 | enum LinkStatus { 10 | Up, 11 | Down, 12 | } 13 | 14 | struct VethDev { 15 | handle: Handle, 16 | index: u32, 17 | if_name: String, 18 | } 19 | 20 | impl VethDev { 21 | async fn set_status(&self, status: LinkStatus) -> anyhow::Result<()> { 22 | Ok(match status { 23 | LinkStatus::Up => { 24 | self.handle.link().set(self.index).up().execute().await?; 25 | } 26 | LinkStatus::Down => { 27 | self.handle.link().set(self.index).down().execute().await?; 28 | } 29 | }) 30 | } 31 | 32 | async fn set_addr(&self, addr: Vec) -> anyhow::Result<()> { 33 | self.handle 34 | .link() 35 | .set(self.index) 36 | .address(addr) 37 | .execute() 38 | .await?; 39 | 40 | Ok(()) 41 | } 42 | 43 | async fn set_ip_addr(&self, ip_addr: LinkIpAddr) -> anyhow::Result<()> { 44 | self.handle 45 | .address() 46 | .add( 47 | self.index, 48 | IpAddr::V4(ip_addr.addr.clone()), 49 | ip_addr.prefix_len, 50 | ) 51 | .execute() 52 | .await?; 53 | 54 | Ok(()) 55 | } 56 | } 57 | 58 | struct VethPair { 59 | dev1: VethDev, 60 | dev2: VethDev, 61 | } 62 | 63 | impl VethPair { 64 | async fn set_status(&self, status: LinkStatus) -> anyhow::Result<()> { 65 | for dev in [&self.dev1, &self.dev2] { 66 | dev.set_status(status).await?; 67 | } 68 | Ok(()) 69 | } 70 | } 71 | 72 | impl Drop for VethPair { 73 | fn drop(&mut self) { 74 | let (handle, index, if_name) = (&self.dev1.handle, self.dev1.index, &self.dev1.if_name); 75 | 76 | let res = task::block_in_place(move || { 77 | runtime::Handle::current() 78 | .block_on(async move { handle.link().del(index).execute().await }) 79 | }); 80 | 81 | if let Err(e) = res { 82 | eprintln!("failed to delete link: {:?} (you may need to delete it manually with 'sudo ip link del {}')", e, if_name); 83 | } 84 | } 85 | } 86 | 87 | #[derive(Debug, Clone, Copy)] 88 | pub struct LinkIpAddr { 89 | addr: Ipv4Addr, 90 | prefix_len: u8, 91 | } 92 | 93 | impl LinkIpAddr { 94 | pub fn new(addr: Ipv4Addr, prefix_len: u8) -> Self { 95 | LinkIpAddr { addr, prefix_len } 96 | } 97 | 98 | pub fn octets(&self) -> [u8; 4] { 99 | self.addr.octets() 100 | } 101 | } 102 | 103 | #[derive(Clone, Debug)] 104 | pub struct VethDevConfig { 105 | pub if_name: String, 106 | pub addr: [u8; 6], 107 | pub ip_addr: LinkIpAddr, 108 | } 109 | 110 | impl VethDevConfig { 111 | pub fn if_name(&self) -> &str { 112 | &self.if_name 113 | } 114 | 115 | pub fn addr(&self) -> [u8; 6] { 116 | self.addr 117 | } 118 | 119 | pub fn ip_addr(&self) -> LinkIpAddr { 120 | self.ip_addr 121 | } 122 | } 123 | 124 | async fn get_link_index(handle: &Handle, name: &str) -> anyhow::Result { 125 | Ok(handle 126 | .link() 127 | .get() 128 | .match_name(name.into()) 129 | .execute() 130 | .try_next() 131 | .await? 132 | .expect(format!("no link with name {} found", name).as_str()) 133 | .header 134 | .index) 135 | } 136 | 137 | async fn build_veth_pair(dev1_if_name: &str, dev2_if_name: &str) -> anyhow::Result { 138 | let (connection, handle, _) = rtnetlink::new_connection().unwrap(); 139 | 140 | tokio::spawn(connection); 141 | 142 | handle 143 | .link() 144 | .add() 145 | .veth(dev1_if_name.into(), dev2_if_name.into()) 146 | .execute() 147 | .await?; 148 | 149 | let dev1_index = get_link_index(&handle, dev1_if_name).await.expect( 150 | format!( 151 | "failed to retrieve index for dev1, delete link manually: 'sudo ip link del {}'", 152 | dev1_if_name 153 | ) 154 | .as_str(), 155 | ); 156 | 157 | let dev2_index = get_link_index(&handle, dev2_if_name).await.expect( 158 | format!( 159 | "failed to retrieve index for dev2, delete link manually: 'sudo ip link del {}'", 160 | dev1_if_name 161 | ) 162 | .as_str(), 163 | ); 164 | 165 | Ok(VethPair { 166 | dev1: VethDev { 167 | handle: handle.clone(), 168 | index: dev1_index, 169 | if_name: dev1_if_name.into(), 170 | }, 171 | dev2: VethDev { 172 | handle: handle.clone(), 173 | index: dev2_index, 174 | if_name: dev2_if_name.into(), 175 | }, 176 | }) 177 | } 178 | 179 | pub async fn run_with_veth_pair( 180 | dev1_config: VethDevConfig, 181 | dev2_config: VethDevConfig, 182 | f: F, 183 | ) -> anyhow::Result 184 | where 185 | F: FnOnce((VethDevConfig, PacketGenerator), (VethDevConfig, PacketGenerator)) -> T 186 | + Send 187 | + 'static, 188 | T: Send + 'static, 189 | { 190 | let veth_pair = build_veth_pair(&dev1_config.if_name(), &dev2_config.if_name()) 191 | .await 192 | .unwrap(); 193 | 194 | veth_pair.set_status(LinkStatus::Up).await?; 195 | 196 | veth_pair.dev1.set_addr(dev1_config.addr.into()).await?; 197 | veth_pair.dev2.set_addr(dev2_config.addr.into()).await?; 198 | 199 | veth_pair.dev1.set_ip_addr(dev1_config.ip_addr).await?; 200 | veth_pair.dev2.set_ip_addr(dev2_config.ip_addr).await?; 201 | 202 | let dev1_pkt_gen = PacketGenerator::new(dev1_config.clone(), dev2_config.clone()); 203 | let dev2_pkt_gen = dev1_pkt_gen.clone().into_swapped(); 204 | 205 | let res = 206 | task::spawn_blocking(move || f((dev1_config, dev1_pkt_gen), (dev2_config, dev2_pkt_gen))) 207 | .await; 208 | 209 | veth_pair.set_status(LinkStatus::Down).await?; 210 | 211 | Ok(res?) 212 | } 213 | -------------------------------------------------------------------------------- /examples/shared_umem.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::TryInto, io::Write, net::Ipv4Addr, thread}; 2 | use tokio::runtime::Runtime; 3 | use xsk_rs::{ 4 | config::{SocketConfig, UmemConfig}, 5 | Socket, Umem, 6 | }; 7 | 8 | #[allow(dead_code)] 9 | mod setup; 10 | use setup::{util, veth_setup, LinkIpAddr, PacketGenerator, VethDevConfig, ETHERNET_PACKET}; 11 | 12 | fn hello_xdp(dev1: (VethDevConfig, PacketGenerator), dev2: (VethDevConfig, PacketGenerator)) { 13 | // This UMEM will be shared between both sockets. 14 | let (umem, mut descs) = Umem::new(UmemConfig::default(), 32.try_into().unwrap(), false) 15 | .expect("failed to create UMEM"); 16 | 17 | // Bind an AF_XDP socket to the interface named `xsk_dev1`, on 18 | // queue 0. 19 | let (mut dev1_tx_q, _dev1_rx_q, _dev1_fq_and_cq) = unsafe { 20 | Socket::new( 21 | SocketConfig::default(), 22 | &umem, 23 | &dev1.0.if_name().parse().unwrap(), 24 | 0, 25 | ) 26 | } 27 | .expect("failed to create dev1 socket"); 28 | 29 | // Bind an AF_XDP socket to the interface named `xsk_dev2`, on 30 | // queue 0. Also uses the UMEM above. 31 | let (_dev2_tx_q, mut dev2_rx_q, dev2_fq_and_cq) = unsafe { 32 | Socket::new( 33 | SocketConfig::default(), 34 | &umem, 35 | &dev2.0.if_name().parse().unwrap(), 36 | 0, 37 | ) 38 | } 39 | .expect("failed to create dev2 socket"); 40 | 41 | let (mut dev2_fq, _dev2_cq) = dev2_fq_and_cq.expect("missing dev2 fill queue and comp queue"); 42 | 43 | // Just split the UMEM frames between the two sockets for 44 | // convenience. 45 | let (dev1_descs, mut dev2_descs) = descs.split_at_mut(16); 46 | 47 | // 1. Add frames to dev2's fill queue so we are ready to receive 48 | // some packets. 49 | unsafe { 50 | dev2_fq.produce(&dev2_descs); 51 | } 52 | 53 | // 2. Write to the UMEM. 54 | unsafe { 55 | umem.data_mut(&mut dev1_descs[0]) 56 | .cursor() 57 | .write_all(ÐERNET_PACKET) 58 | .expect("failed writing packet to frame") 59 | } 60 | 61 | // 3. Submit the frame to the kernel for transmission. 62 | println!("sending packet"); 63 | 64 | unsafe { 65 | dev1_tx_q.produce_and_wakeup(&dev1_descs[..1]).unwrap(); 66 | } 67 | 68 | // 4. Read on dev2. 69 | let pkts_recvd = unsafe { dev2_rx_q.poll_and_consume(&mut dev2_descs, 100).unwrap() }; 70 | 71 | // 5. Confirm that one of the packets we received matches what we expect. 72 | for recv_desc in dev2_descs.iter().take(pkts_recvd) { 73 | let data = unsafe { umem.data(recv_desc) }; 74 | 75 | if data.contents() == ÐERNET_PACKET { 76 | println!("received packet!"); 77 | return; 78 | } 79 | } 80 | 81 | panic!("no matching packets received") 82 | } 83 | 84 | fn main() { 85 | let dev1_config = VethDevConfig { 86 | if_name: "xsk_test_dev1".into(), 87 | addr: [0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a], 88 | ip_addr: LinkIpAddr::new(Ipv4Addr::new(192, 168, 69, 1), 24), 89 | }; 90 | 91 | let dev2_config = VethDevConfig { 92 | if_name: "xsk_test_dev2".into(), 93 | addr: [0x4a, 0xf1, 0x30, 0xeb, 0x0d, 0x31], 94 | ip_addr: LinkIpAddr::new(Ipv4Addr::new(192, 168, 69, 1), 24), 95 | }; 96 | 97 | // We'll keep track of ctrl+c events but not let them kill the process 98 | // immediately as we may need to clean up the veth pair. 99 | let ctrl_c_events = util::ctrl_channel().unwrap(); 100 | 101 | let (complete_tx, complete_rx) = crossbeam_channel::bounded(1); 102 | 103 | let runtime = Runtime::new().unwrap(); 104 | 105 | let example_handle = thread::spawn(move || { 106 | let res = runtime.block_on(veth_setup::run_with_veth_pair( 107 | dev1_config, 108 | dev2_config, 109 | hello_xdp, 110 | )); 111 | 112 | let _ = complete_tx.send(()); 113 | 114 | res 115 | }); 116 | 117 | // Wait for either the example to finish or for a ctrl+c event to occur. 118 | crossbeam_channel::select! { 119 | recv(complete_rx) -> _ => { 120 | }, 121 | recv(ctrl_c_events) -> _ => { 122 | println!("SIGINT received"); 123 | } 124 | } 125 | 126 | example_handle.join().unwrap().unwrap(); 127 | } 128 | -------------------------------------------------------------------------------- /run_all_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | find ./target/debug/deps/ -maxdepth 1 -perm -111 -type f -regextype egrep -regex "(.*tests.*|.*xsk_rs.*)" | xargs -0 -n1 bash -c 4 | -------------------------------------------------------------------------------- /src/config/mod.rs: -------------------------------------------------------------------------------- 1 | //! [`Umem`](crate::umem::Umem) and [`Socket`](crate::socket::Socket) 2 | //! configuration. 3 | 4 | mod socket; 5 | pub use socket::{ 6 | BindFlags, Config as SocketConfig, ConfigBuilder as SocketConfigBuilder, Interface, 7 | LibxdpFlags, XdpFlags, 8 | }; 9 | 10 | mod umem; 11 | pub use umem::{ 12 | Config as UmemConfig, ConfigBuildError as UmemConfigBuilderError, 13 | ConfigBuilder as UmemConfigBuilder, 14 | }; 15 | 16 | use std::{convert::TryFrom, error, fmt}; 17 | 18 | use crate::util; 19 | 20 | /// The minimum [`Umem`](crate::Umem) frame size. 21 | /// 22 | /// Matches the constant of the same name defined in the linux source 23 | /// at `net/xdp/xdp_umem.c` 24 | pub const XDP_UMEM_MIN_CHUNK_SIZE: u32 = 2048; 25 | 26 | /// A ring's buffer size. Must be a power of two. 27 | #[derive(Debug, Clone, Copy)] 28 | pub struct QueueSize(u32); 29 | 30 | impl QueueSize { 31 | /// Create a new `QueueSize` instance. Fails if `size` is not a 32 | /// power of two. 33 | pub fn new(size: u32) -> Result { 34 | if !util::is_pow_of_two(size) { 35 | Err(QueueSizeError(size)) 36 | } else { 37 | Ok(Self(size)) 38 | } 39 | } 40 | 41 | /// The queue size. 42 | pub fn get(&self) -> u32 { 43 | self.0 44 | } 45 | } 46 | 47 | impl TryFrom for QueueSize { 48 | type Error = QueueSizeError; 49 | 50 | fn try_from(size: u32) -> Result { 51 | QueueSize::new(size) 52 | } 53 | } 54 | 55 | /// Error signifying incorrect queue size. 56 | #[derive(Debug)] 57 | pub struct QueueSizeError(u32); 58 | 59 | impl fmt::Display for QueueSizeError { 60 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 61 | write!(f, "expected a power of two as queue size, got {}", self.0) 62 | } 63 | } 64 | 65 | impl error::Error for QueueSizeError {} 66 | 67 | /// The size of a [`Umem`](crate::umem::Umem) frame. Cannot be smaller 68 | /// than [`XDP_UMEM_MIN_CHUNK_SIZE`]. 69 | #[derive(Debug, Clone, Copy)] 70 | pub struct FrameSize(u32); 71 | 72 | impl FrameSize { 73 | /// Create a new `FrameSize` instance. Fails if `size` is smaller 74 | /// than [`XDP_UMEM_MIN_CHUNK_SIZE`]. 75 | pub fn new(size: u32) -> Result { 76 | if size < XDP_UMEM_MIN_CHUNK_SIZE { 77 | Err(FrameSizeError(size)) 78 | } else { 79 | Ok(Self(size)) 80 | } 81 | } 82 | 83 | /// The frame size. 84 | pub fn get(&self) -> u32 { 85 | self.0 86 | } 87 | } 88 | 89 | impl TryFrom for FrameSize { 90 | type Error = FrameSizeError; 91 | 92 | fn try_from(size: u32) -> Result { 93 | FrameSize::new(size) 94 | } 95 | } 96 | 97 | /// Error signifying incorrect frame size. 98 | #[derive(Debug)] 99 | pub struct FrameSizeError(u32); 100 | 101 | impl fmt::Display for FrameSizeError { 102 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 103 | write!( 104 | f, 105 | "expected frame size >= {}, got {}", 106 | XDP_UMEM_MIN_CHUNK_SIZE, self.0 107 | ) 108 | } 109 | } 110 | 111 | impl error::Error for FrameSizeError {} 112 | 113 | #[cfg(test)] 114 | mod tests { 115 | use super::*; 116 | 117 | #[test] 118 | fn queue_size_should_accept_only_non_zero_powers_of_two() { 119 | assert!(QueueSize::new(0).is_err()); 120 | assert!(QueueSize::new(1).is_ok()); 121 | assert!(QueueSize::new(2).is_ok()); 122 | assert!(QueueSize::new(3).is_err()); 123 | assert!(QueueSize::new(4).is_ok()); 124 | } 125 | 126 | #[test] 127 | fn frame_size_should_reject_values_below_2048() { 128 | assert!(FrameSize::new(0).is_err()); 129 | assert!(FrameSize::new(XDP_UMEM_MIN_CHUNK_SIZE - 1).is_err()); 130 | assert!(FrameSize::new(XDP_UMEM_MIN_CHUNK_SIZE).is_ok()); 131 | assert!(FrameSize::new(XDP_UMEM_MIN_CHUNK_SIZE + 1).is_ok()) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/config/socket.rs: -------------------------------------------------------------------------------- 1 | use bitflags::bitflags; 2 | use libxdp_sys::{ 3 | xsk_socket_config, xsk_socket_config__bindgen_ty_1, XSK_RING_CONS__DEFAULT_NUM_DESCS, 4 | XSK_RING_PROD__DEFAULT_NUM_DESCS, 5 | }; 6 | use std::{ 7 | convert::{TryFrom, TryInto}, 8 | ffi::{CStr, CString, NulError}, 9 | str::FromStr, 10 | }; 11 | 12 | use super::QueueSize; 13 | 14 | bitflags! { 15 | /// Libbpf flags. 16 | #[derive(Debug, Clone, Copy)] 17 | pub struct LibxdpFlags: u32 { 18 | /// Set to avoid loading of default XDP program on socket 19 | /// creation. 20 | const XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD = 1; 21 | } 22 | } 23 | 24 | bitflags! { 25 | /// XDP flags. 26 | /// 27 | /// Some may not be applicable if an XDP program is already loaded 28 | /// on the target interface. 29 | #[derive(Debug, Clone, Copy)] 30 | pub struct XdpFlags: u32 { 31 | /// Fail if an XDP program is already loaded on the target 32 | /// interface. 33 | const XDP_FLAGS_UPDATE_IF_NOEXIST = 1; 34 | /// Force generic/SKB mode. 35 | const XDP_FLAGS_SKB_MODE = 2; 36 | /// Force driver mode. The driver must support XDP. 37 | const XDP_FLAGS_DRV_MODE = 4; 38 | /// Offload to hardware. The NIC must support XDP. 39 | const XDP_FLAGS_HW_MODE = 8; 40 | } 41 | } 42 | 43 | bitflags! { 44 | /// Bind flags. 45 | #[derive(Debug, Clone, Copy)] 46 | pub struct BindFlags: u16 { 47 | /// Forces copy-mode. 48 | const XDP_COPY = 2; 49 | /// Forces zero-copy mode. Socket creation will fail if not 50 | /// available. 51 | const XDP_ZEROCOPY = 4; 52 | /// If set, the driver may go to sleep, meaning the 53 | /// [`FillQueue`](crate::FillQueue) and/or 54 | /// [`TxQueue`](crate::TxQueue) will need waking up (using the 55 | /// `*_wakeup` or `poll` functions available on either 56 | /// struct). It is recommended to enable this flag as it often 57 | /// leads to better performance but especially if the driver 58 | /// and application are running on the same core. More details 59 | /// in the 60 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html#xdp-use-need-wakeup-bind-flag). 61 | const XDP_USE_NEED_WAKEUP = 8; 62 | } 63 | } 64 | 65 | /// A device interface name. 66 | #[derive(Debug, Clone)] 67 | pub struct Interface(CString); 68 | 69 | impl Interface { 70 | /// Creates a new `Interface` instance. 71 | pub fn new(name: CString) -> Self { 72 | Self(name) 73 | } 74 | 75 | pub(crate) fn as_cstr(&self) -> &CStr { 76 | &self.0 77 | } 78 | } 79 | 80 | impl FromStr for Interface { 81 | type Err = NulError; 82 | 83 | fn from_str(s: &str) -> Result { 84 | s.as_bytes().try_into() 85 | } 86 | } 87 | 88 | impl TryFrom<&[u8]> for Interface { 89 | type Error = NulError; 90 | 91 | fn try_from(bytes: &[u8]) -> Result { 92 | CString::new(bytes).map(Self) 93 | } 94 | } 95 | 96 | impl TryFrom> for Interface { 97 | type Error = NulError; 98 | 99 | fn try_from(bytes: Vec) -> Result { 100 | CString::new(bytes).map(Self) 101 | } 102 | } 103 | 104 | /// Builder for a [`SocketConfig`](Config). 105 | #[derive(Debug, Default, Clone, Copy)] 106 | pub struct ConfigBuilder { 107 | config: Config, 108 | } 109 | 110 | impl ConfigBuilder { 111 | /// Creates a new [`SocketConfigBuilder`](ConfigBuilder) instance 112 | /// with no flags set and with queue sizes as per the `libbpf` 113 | /// defaults. 114 | pub fn new() -> Self { 115 | Self::default() 116 | } 117 | 118 | /// Set the [`RxQueue`](crate::RxQueue) size. Default is 119 | /// [`XSK_RING_CONS__DEFAULT_NUM_DESCS`]. 120 | pub fn rx_queue_size(&mut self, size: QueueSize) -> &mut Self { 121 | self.config.rx_queue_size = size; 122 | self 123 | } 124 | 125 | /// Set the [`TxQueue`](crate::RxQueue) size. Default is 126 | /// [`XSK_RING_PROD__DEFAULT_NUM_DESCS`]. 127 | pub fn tx_queue_size(&mut self, size: QueueSize) -> &mut Self { 128 | self.config.tx_queue_size = size; 129 | self 130 | } 131 | 132 | /// Set the [`LibxdpFlags`]. Default is no flags set. 133 | pub fn libxdp_flags(&mut self, flags: LibxdpFlags) -> &mut Self { 134 | self.config.libxdp_flags = flags; 135 | self 136 | } 137 | 138 | /// Set the [`XdpFlags`]. Default is no flags set. 139 | pub fn xdp_flags(&mut self, flags: XdpFlags) -> &mut Self { 140 | self.config.xdp_flags = flags; 141 | self 142 | } 143 | 144 | /// Set the socket [`BindFlags`]. Default is no flags set. 145 | pub fn bind_flags(&mut self, flags: BindFlags) -> &mut Self { 146 | self.config.bind_flags = flags; 147 | self 148 | } 149 | 150 | /// Build a [`SocketConfig`](Config) instance using the values set 151 | /// in this builder. 152 | pub fn build(&self) -> Config { 153 | self.config 154 | } 155 | } 156 | 157 | /// Config for an AF_XDP [`Socket`](crate::Socket) instance. 158 | #[derive(Debug, Clone, Copy)] 159 | pub struct Config { 160 | rx_queue_size: QueueSize, 161 | tx_queue_size: QueueSize, 162 | libxdp_flags: LibxdpFlags, 163 | xdp_flags: XdpFlags, 164 | bind_flags: BindFlags, 165 | } 166 | 167 | impl Config { 168 | /// Creates a [`SocketConfigBuilder`](ConfigBuilder) instance. 169 | pub fn builder() -> ConfigBuilder { 170 | ConfigBuilder::new() 171 | } 172 | 173 | /// The socket's [`RxQueue`](crate::RxQueue) size. 174 | pub fn rx_queue_size(&self) -> QueueSize { 175 | self.rx_queue_size 176 | } 177 | 178 | /// The socket's [`TxQueue`](crate::TxQueue) size. 179 | pub fn tx_queue_size(&self) -> QueueSize { 180 | self.tx_queue_size 181 | } 182 | 183 | /// The [`LibxdpFlags`] set. 184 | pub fn libxdp_flags(&self) -> &LibxdpFlags { 185 | &self.libxdp_flags 186 | } 187 | 188 | /// The [`XdpFlags`] set. 189 | pub fn xdp_flags(&self) -> &XdpFlags { 190 | &self.xdp_flags 191 | } 192 | 193 | /// The [`BindFlags`] set. 194 | pub fn bind_flags(&self) -> &BindFlags { 195 | &self.bind_flags 196 | } 197 | } 198 | 199 | impl Default for Config { 200 | fn default() -> Self { 201 | Self { 202 | rx_queue_size: QueueSize(XSK_RING_CONS__DEFAULT_NUM_DESCS), 203 | tx_queue_size: QueueSize(XSK_RING_PROD__DEFAULT_NUM_DESCS), 204 | libxdp_flags: LibxdpFlags::empty(), 205 | xdp_flags: XdpFlags::empty(), 206 | bind_flags: BindFlags::empty(), 207 | } 208 | } 209 | } 210 | 211 | impl From for xsk_socket_config { 212 | fn from(c: Config) -> Self { 213 | let xsk_socket_config = xsk_socket_config__bindgen_ty_1 { 214 | libxdp_flags: c.libxdp_flags.bits(), 215 | }; 216 | 217 | xsk_socket_config { 218 | rx_size: c.rx_queue_size.get(), 219 | tx_size: c.tx_queue_size.get(), 220 | xdp_flags: c.xdp_flags.bits(), 221 | bind_flags: c.bind_flags.bits(), 222 | __bindgen_anon_1: xsk_socket_config, 223 | } 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /src/config/umem.rs: -------------------------------------------------------------------------------- 1 | use libxdp_sys::{ 2 | xsk_umem_config, XDP_PACKET_HEADROOM, XSK_RING_CONS__DEFAULT_NUM_DESCS, 3 | XSK_RING_PROD__DEFAULT_NUM_DESCS, XSK_UMEM__DEFAULT_FRAME_HEADROOM, 4 | XSK_UMEM__DEFAULT_FRAME_SIZE, 5 | }; 6 | use std::{error, fmt}; 7 | 8 | use super::{FrameSize, QueueSize}; 9 | 10 | /// Builder for a [`UmemConfig`](Config). 11 | #[derive(Debug, Default, Clone, Copy)] 12 | pub struct ConfigBuilder { 13 | config: Config, 14 | } 15 | 16 | impl ConfigBuilder { 17 | /// Creates a new [`UmemConfigBuilder`](ConfigBuilder) instance. 18 | pub fn new() -> Self { 19 | Self::default() 20 | } 21 | 22 | /// Set the frame size. Default is 23 | /// [`XSK_UMEM__DEFAULT_FRAME_SIZE`]. 24 | pub fn frame_size(&mut self, size: FrameSize) -> &mut Self { 25 | self.config.frame_size = size; 26 | self 27 | } 28 | 29 | /// Set the [`FillQueue`](crate::FillQueue) size. Default is 30 | /// [`XSK_RING_PROD__DEFAULT_NUM_DESCS`]. 31 | pub fn fill_queue_size(&mut self, size: QueueSize) -> &mut Self { 32 | self.config.fill_queue_size = size; 33 | self 34 | } 35 | 36 | /// Set the [`CompQueue`](crate::CompQueue) size. Default is 37 | /// [`XSK_RING_CONS__DEFAULT_NUM_DESCS`]. 38 | pub fn comp_queue_size(&mut self, size: QueueSize) -> &mut Self { 39 | self.config.comp_queue_size = size; 40 | self 41 | } 42 | 43 | /// Set the frame headroom available to the user. Default size is 44 | /// [`XSK_UMEM__DEFAULT_FRAME_HEADROOM`]. 45 | /// 46 | /// Not to be confused with [`XDP_PACKET_HEADROOM`] which is the 47 | /// amount of headroom reserved by XDP. 48 | pub fn frame_headroom(&mut self, headroom: u32) -> &mut Self { 49 | self.config.frame_headroom = headroom; 50 | self 51 | } 52 | 53 | /// Build a [`UmemConfig`](Config) instance using the values set 54 | /// in this builder. 55 | /// 56 | /// May fail if some of the values are incompatible. For example, 57 | /// if the requested frame headroom exceeds the frame size. 58 | pub fn build(&self) -> Result { 59 | let frame_size = self.config.frame_size.get(); 60 | let total_headroom = XDP_PACKET_HEADROOM + self.config.frame_headroom; 61 | 62 | if total_headroom > frame_size { 63 | Err(ConfigBuildError { 64 | frame_size, 65 | total_headroom, 66 | }) 67 | } else { 68 | Ok(self.config) 69 | } 70 | } 71 | } 72 | 73 | /// Config for a [`Umem`](crate::umem::Umem) instance. 74 | /// 75 | /// It's worth noting that the specified `frame_size` is not 76 | /// necessarily the buffer size that will be available to write data 77 | /// into. Some of this will be eaten up by XDP program headroom 78 | /// ([`XDP_PACKET_HEADROOM`]) and any non-zero `frame_headroom`. Use 79 | /// the [`mtu`](Config::mtu) function to determine whether the frame 80 | /// is large enough to hold the data you wish to transmit. 81 | #[derive(Debug, Clone, Copy)] 82 | pub struct Config { 83 | frame_size: FrameSize, 84 | fill_queue_size: QueueSize, 85 | comp_queue_size: QueueSize, 86 | frame_headroom: u32, 87 | } 88 | 89 | impl Config { 90 | /// Creates a new [`UmemConfigBuilder`](ConfigBuilder) instance 91 | /// with with sizes as per the `libbpf` defaults. 92 | pub fn builder() -> ConfigBuilder { 93 | ConfigBuilder::new() 94 | } 95 | 96 | /// The size of each frame in the [`Umem`](crate::Umem). 97 | pub fn frame_size(&self) -> FrameSize { 98 | self.frame_size 99 | } 100 | 101 | /// The [`FillQueue`](crate::FillQueue) size. 102 | pub fn fill_queue_size(&self) -> QueueSize { 103 | self.fill_queue_size 104 | } 105 | 106 | /// The [`CompQueue`](crate::CompQueue) size. 107 | pub fn comp_queue_size(&self) -> QueueSize { 108 | self.comp_queue_size 109 | } 110 | 111 | /// The frame headroom reserved for the XDP program. 112 | pub fn xdp_headroom(&self) -> u32 { 113 | XDP_PACKET_HEADROOM 114 | } 115 | 116 | /// The frame headroom available to the user. 117 | pub fn frame_headroom(&self) -> u32 { 118 | self.frame_headroom 119 | } 120 | 121 | /// The maximum transmission unit, or the length of the packet 122 | /// data segment of the frame. 123 | /// 124 | /// Is defined as the frame size minus both the XDP headroom and 125 | /// user headroom. 126 | pub fn mtu(&self) -> u32 { 127 | self.frame_size.get() - (self.xdp_headroom() + self.frame_headroom) 128 | } 129 | } 130 | 131 | impl Default for Config { 132 | fn default() -> Self { 133 | Self { 134 | frame_size: FrameSize(XSK_UMEM__DEFAULT_FRAME_SIZE), 135 | fill_queue_size: QueueSize(XSK_RING_PROD__DEFAULT_NUM_DESCS), 136 | comp_queue_size: QueueSize(XSK_RING_CONS__DEFAULT_NUM_DESCS), 137 | frame_headroom: XSK_UMEM__DEFAULT_FRAME_HEADROOM, 138 | } 139 | } 140 | } 141 | 142 | impl From for xsk_umem_config { 143 | fn from(c: Config) -> Self { 144 | xsk_umem_config { 145 | fill_size: c.fill_queue_size.get(), 146 | comp_size: c.comp_queue_size.get(), 147 | frame_size: c.frame_size.get(), 148 | frame_headroom: c.frame_headroom, 149 | flags: 0, 150 | } 151 | } 152 | } 153 | 154 | /// Error detailing why [`UmemConfig`](Config) creation failed. 155 | #[derive(Debug)] 156 | pub struct ConfigBuildError { 157 | frame_size: u32, 158 | total_headroom: u32, 159 | } 160 | 161 | impl fmt::Display for ConfigBuildError { 162 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 163 | write!( 164 | f, 165 | "total headroom {} cannot be greater than frame size {}", 166 | self.total_headroom, self.frame_size 167 | ) 168 | } 169 | } 170 | 171 | impl error::Error for ConfigBuildError {} 172 | 173 | #[cfg(test)] 174 | mod tests { 175 | use std::convert::TryInto; 176 | 177 | use crate::config::XDP_UMEM_MIN_CHUNK_SIZE; 178 | 179 | use super::*; 180 | 181 | #[test] 182 | fn frame_size_must_be_greater_than_total_headroom() { 183 | assert!(ConfigBuilder::new() 184 | .frame_headroom(XDP_UMEM_MIN_CHUNK_SIZE - XDP_PACKET_HEADROOM) 185 | .frame_size(XDP_UMEM_MIN_CHUNK_SIZE.try_into().unwrap()) 186 | .build() 187 | .is_ok()); 188 | 189 | assert!(ConfigBuilder::new() 190 | .frame_headroom(XDP_UMEM_MIN_CHUNK_SIZE - (XDP_PACKET_HEADROOM - 1)) 191 | .frame_size(XDP_UMEM_MIN_CHUNK_SIZE.try_into().unwrap()) 192 | .build() 193 | .is_err()); 194 | } 195 | 196 | #[test] 197 | fn frame_mtu_has_expected_value() { 198 | let frame_headroom = 1024; 199 | 200 | let config = ConfigBuilder::new() 201 | .frame_headroom(frame_headroom) 202 | .frame_size(XDP_UMEM_MIN_CHUNK_SIZE.try_into().unwrap()) 203 | .build() 204 | .unwrap(); 205 | 206 | assert_eq!( 207 | config.mtu(), 208 | XDP_UMEM_MIN_CHUNK_SIZE - (frame_headroom + XDP_PACKET_HEADROOM) 209 | ); 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # xsk-rs 2 | //! 3 | //! A rust interface for AF_XDP sockets using libbpf. 4 | //! 5 | //! For more information please see the [networking 6 | //! docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html) 7 | //! or a more [detailed 8 | //! overview](http://vger.kernel.org/lpc_net2018_talks/lpc18_paper_af_xdp_perf-v2.pdf). 9 | //! 10 | //! An overview of XDP [setup 11 | //! dependencies](https://github.com/xdp-project/xdp-tutorial/blob/main/setup_dependencies.org) 12 | //! may also come in handy. 13 | //! 14 | //! Some simple examples may be found in the [GitHub 15 | //! repo](https://github.com/DouglasGray/xsk-rs/tree/master/examples), 16 | //! including an example of use in a multithreaded context and another 17 | //! creating a socket with a shared [`Umem`]. 18 | //! 19 | //! ### Safety 20 | //! 21 | //! There is a fair amount of unsafe involved when using this library, and 22 | //! so the potential for disaster, however if you keep in mind the 23 | //! following then there should hopefully be few avenues for catastrophe: 24 | //! - When a frame / address has been submitted to the [`FillQueue`] 25 | //! or [`TxQueue`], do not use it again until you have consumed it 26 | //! from either the [`CompQueue`] or [`RxQueue`]. 27 | //! - Do not use one [`Umem`]'s frame descriptors to access frames of 28 | //! another, different [`Umem`]. For example, via [`Umem::frame`]. 29 | //! 30 | //! ### Usage 31 | //! 32 | //! The below example sends a packet from one interface to another. 33 | //! 34 | //! ```no_run 35 | //! use std::{convert::TryInto, io::Write, str}; 36 | //! use xsk_rs::{ 37 | //! config::{SocketConfig, UmemConfig}, 38 | //! socket::Socket, 39 | //! umem::Umem, 40 | //! }; 41 | //! 42 | //! // Create a UMEM for dev1 with 32 frames, whose sizes are 43 | //! // specified via the `UmemConfig` instance. 44 | //! let (dev1_umem, mut dev1_descs) = 45 | //! Umem::new(UmemConfig::default(), 32.try_into().unwrap(), false) 46 | //! .expect("failed to create UMEM"); 47 | //! 48 | //! // Bind an AF_XDP socket to the interface named `xsk_dev1`, on 49 | //! // queue 0. 50 | //! let (mut dev1_tx_q, _dev1_rx_q, _dev1_fq_and_cq) = Socket::new( 51 | //! SocketConfig::default(), 52 | //! &dev1_umem, 53 | //! &"xsk_dev1".parse().unwrap(), 54 | //! 0, 55 | //! ) 56 | //! .expect("failed to create dev1 socket"); 57 | //! 58 | //! // Create a UMEM for dev2. Another option is to use the same UMEM 59 | //! // as dev1 - to do that we'd just pass `dev1_umem` to the 60 | //! // `Socket::new` call. In this case the UMEM would be shared, and 61 | //! // so `dev1_descs` could be used in either context, but each 62 | //! // socket would have its own completion queue and fill queue. 63 | //! let (dev2_umem, mut dev2_descs) = 64 | //! Umem::new(UmemConfig::default(), 32.try_into().unwrap(), false) 65 | //! .expect("failed to create UMEM"); 66 | //! 67 | //! // Bind an AF_XDP socket to the interface named `xsk_dev2`, on 68 | //! // queue 0. 69 | //! let (_dev2_tx_q, mut dev2_rx_q, dev2_fq_and_cq) = Socket::new( 70 | //! SocketConfig::default(), 71 | //! &dev2_umem, 72 | //! &"xsk_dev2".parse().unwrap(), 73 | //! 0, 74 | //! ) 75 | //! .expect("failed to create dev2 socket"); 76 | //! 77 | //! let (mut dev2_fq, _dev2_cq) = dev2_fq_and_cq.expect("missing dev2 fill queue and comp queue"); 78 | //! 79 | //! // 1. Add frames to dev2's fill queue so we are ready to receive 80 | //! // some packets. 81 | //! unsafe { 82 | //! dev2_fq.produce(&dev2_descs); 83 | //! } 84 | //! 85 | //! // 2. Write to dev1's UMEM. 86 | //! let pkt = "Hello, world!".as_bytes(); 87 | //! 88 | //! unsafe { 89 | //! dev1_umem 90 | //! .data_mut(&mut dev1_descs[0]) 91 | //! .cursor() 92 | //! .write_all(pkt) 93 | //! .expect("failed writing packet to frame") 94 | //! } 95 | //! 96 | //! // 3. Submit the frame to the kernel for transmission. 97 | //! println!("sending: {:?}", str::from_utf8(&pkt).unwrap()); 98 | //! 99 | //! unsafe { 100 | //! dev1_tx_q.produce_and_wakeup(&dev1_descs[..1]).unwrap(); 101 | //! } 102 | //! 103 | //! // 4. Read on dev2. 104 | //! let pkts_recvd = unsafe { dev2_rx_q.poll_and_consume(&mut dev2_descs, 100).unwrap() }; 105 | //! 106 | //! // 5. Confirm that one of the packets we received matches what we expect. 107 | //! for recv_desc in dev2_descs.iter().take(pkts_recvd) { 108 | //! let data = unsafe { dev2_umem.data(recv_desc) }; 109 | //! 110 | //! if data.contents() == &pkt[..] { 111 | //! println!("received: {:?}", str::from_utf8(data.contents()).unwrap()); 112 | //! return; 113 | //! } 114 | //! } 115 | //! 116 | //! panic!("no matching packets received") 117 | //! ``` 118 | #![deny(missing_docs)] 119 | #![deny(missing_debug_implementations)] 120 | #![deny(unsafe_op_in_unsafe_fn)] 121 | #![allow(clippy::doc_lazy_continuation)] 122 | 123 | use cfg_if::cfg_if; 124 | 125 | cfg_if! { 126 | if #[cfg(all(target_pointer_width = "64", target_family = "unix"))] { 127 | pub mod umem; 128 | pub use umem::{frame::FrameDesc, CompQueue, FillQueue, Umem}; 129 | 130 | pub mod socket; 131 | pub use socket::{RxQueue, Socket, TxQueue}; 132 | 133 | pub mod config; 134 | 135 | mod ring; 136 | mod util; 137 | 138 | #[cfg(test)] 139 | mod tests { 140 | use std::mem; 141 | 142 | #[test] 143 | fn ensure_usize_and_u64_are_same_size() { 144 | assert_eq!(mem::size_of::(), mem::size_of::()); 145 | } 146 | } 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /src/ring.rs: -------------------------------------------------------------------------------- 1 | use std::ptr; 2 | 3 | use libxdp_sys::{xsk_ring_cons, xsk_ring_prod}; 4 | 5 | #[derive(Debug)] 6 | pub struct XskRingCons(xsk_ring_cons); 7 | 8 | impl XskRingCons { 9 | pub fn as_mut(&mut self) -> &mut xsk_ring_cons { 10 | &mut self.0 11 | } 12 | 13 | pub fn as_ref(&self) -> &xsk_ring_cons { 14 | &self.0 15 | } 16 | 17 | pub fn is_ring_null(&self) -> bool { 18 | self.0.ring.is_null() 19 | } 20 | } 21 | 22 | impl Default for XskRingCons { 23 | fn default() -> Self { 24 | Self(xsk_ring_cons { 25 | cached_prod: 0, 26 | cached_cons: 0, 27 | mask: 0, 28 | size: 0, 29 | producer: ptr::null_mut(), 30 | consumer: ptr::null_mut(), 31 | ring: ptr::null_mut(), 32 | flags: ptr::null_mut(), 33 | }) 34 | } 35 | } 36 | 37 | unsafe impl Send for XskRingCons {} 38 | 39 | #[derive(Debug)] 40 | pub struct XskRingProd(xsk_ring_prod); 41 | 42 | impl XskRingProd { 43 | pub fn as_mut(&mut self) -> &mut xsk_ring_prod { 44 | &mut self.0 45 | } 46 | 47 | pub fn as_ref(&self) -> &xsk_ring_prod { 48 | &self.0 49 | } 50 | 51 | pub fn is_ring_null(&self) -> bool { 52 | self.0.ring.is_null() 53 | } 54 | } 55 | 56 | impl Default for XskRingProd { 57 | fn default() -> Self { 58 | Self(xsk_ring_prod { 59 | cached_prod: 0, 60 | cached_cons: 0, 61 | mask: 0, 62 | size: 0, 63 | producer: ptr::null_mut(), 64 | consumer: ptr::null_mut(), 65 | ring: ptr::null_mut(), 66 | flags: ptr::null_mut(), 67 | }) 68 | } 69 | } 70 | 71 | unsafe impl Send for XskRingProd {} 72 | -------------------------------------------------------------------------------- /src/socket/fd.rs: -------------------------------------------------------------------------------- 1 | //! File descriptor utilities. 2 | 3 | use libc::{EINTR, POLLIN, POLLOUT, SOL_XDP}; 4 | use libxdp_sys::{xdp_statistics, XDP_STATISTICS}; 5 | use std::{ 6 | fmt, 7 | io::{self, ErrorKind}, 8 | mem, 9 | os::unix::prelude::{AsRawFd, RawFd}, 10 | }; 11 | 12 | use crate::util; 13 | 14 | const XDP_STATISTICS_SIZEOF: u32 = mem::size_of::() as u32; 15 | 16 | #[derive(Clone, Copy)] 17 | struct PollFd(libc::pollfd); 18 | 19 | impl PollFd { 20 | #[inline] 21 | fn poll(&mut self, timeout_ms: i32) -> io::Result { 22 | let ret = unsafe { libc::poll(&mut self.0, 1, timeout_ms) }; 23 | 24 | if ret < 0 { 25 | if util::get_errno() != EINTR { 26 | return Err(io::Error::last_os_error()); 27 | } else { 28 | return Ok(false); 29 | } 30 | } 31 | 32 | if ret == 0 { 33 | Ok(false) 34 | } else { 35 | Ok(true) 36 | } 37 | } 38 | } 39 | 40 | /// A pollable AF_XDP [`Socket`](crate::Socket) file descriptor. 41 | pub struct Fd { 42 | id: i32, 43 | pollfd_read: PollFd, 44 | pollfd_write: PollFd, 45 | } 46 | 47 | impl Fd { 48 | pub(super) fn new(id: i32) -> Self { 49 | let pollfd_read = PollFd(libc::pollfd { 50 | fd: id, 51 | events: POLLIN, 52 | revents: 0, 53 | }); 54 | 55 | let pollfd_write = PollFd(libc::pollfd { 56 | fd: id, 57 | events: POLLOUT, 58 | revents: 0, 59 | }); 60 | 61 | Fd { 62 | id, 63 | pollfd_read, 64 | pollfd_write, 65 | } 66 | } 67 | 68 | pub(super) fn clone(&self) -> Self { 69 | Self { 70 | id: self.id, 71 | pollfd_read: self.pollfd_read, 72 | pollfd_write: self.pollfd_write, 73 | } 74 | } 75 | 76 | #[inline] 77 | pub(crate) fn poll_read(&mut self, timeout_ms: i32) -> io::Result { 78 | self.pollfd_read.poll(timeout_ms) 79 | } 80 | 81 | #[inline] 82 | pub(crate) fn poll_write(&mut self, timeout_ms: i32) -> io::Result { 83 | self.pollfd_write.poll(timeout_ms) 84 | } 85 | 86 | /// Returns [`Socket`](crate::Socket) statistics. 87 | #[inline] 88 | pub fn xdp_statistics(&self) -> io::Result { 89 | let mut stats = XdpStatistics::default(); 90 | 91 | let mut optlen = XDP_STATISTICS_SIZEOF; 92 | 93 | let err = unsafe { 94 | libc::getsockopt( 95 | self.as_raw_fd(), 96 | SOL_XDP, 97 | XDP_STATISTICS as i32, 98 | &mut stats.0 as *mut _ as *mut libc::c_void, 99 | &mut optlen, 100 | ) 101 | }; 102 | 103 | if err != 0 { 104 | return Err(io::Error::last_os_error()); 105 | } 106 | 107 | if optlen == XDP_STATISTICS_SIZEOF { 108 | Ok(stats) 109 | } else { 110 | Err(io::Error::new( 111 | ErrorKind::Other, 112 | "`optlen` returned from `getsockopt` does not match `xdp_statistics` struct size", 113 | )) 114 | } 115 | } 116 | } 117 | 118 | impl fmt::Debug for Fd { 119 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 120 | f.debug_struct("Fd").field("id", &self.id).finish() 121 | } 122 | } 123 | 124 | impl AsRawFd for Fd { 125 | /// The inner file descriptor. 126 | /// 127 | /// May be required, for example, in the case where the default 128 | /// libbpf program has not been loaded (using the 129 | /// [`XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD`] flag) and the socket's 130 | /// file descriptor must be available to register it in the 131 | /// `XSKMAP`. 132 | /// 133 | /// [`XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD`]: crate::config::LibxdpFlags::XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD 134 | #[inline] 135 | fn as_raw_fd(&self) -> RawFd { 136 | self.id 137 | } 138 | } 139 | 140 | /// AF_XDP [`Socket`](crate::Socket) statistics. 141 | /// 142 | /// Can be retrieved by calling [`xdp_statistics`](Fd::xdp_statistics). 143 | #[derive(Debug, Clone, Copy)] 144 | pub struct XdpStatistics(xdp_statistics); 145 | 146 | impl Default for XdpStatistics { 147 | fn default() -> Self { 148 | Self(xdp_statistics { 149 | rx_dropped: 0, 150 | rx_invalid_descs: 0, 151 | tx_invalid_descs: 0, 152 | rx_ring_full: 0, 153 | rx_fill_ring_empty_descs: 0, 154 | tx_ring_empty_descs: 0, 155 | }) 156 | } 157 | } 158 | 159 | impl XdpStatistics { 160 | /// Received packets dropped due to an invalid descriptor. 161 | #[inline] 162 | pub fn rx_invalid_descs(&self) -> u64 { 163 | self.0.rx_invalid_descs 164 | } 165 | 166 | /// Received packets dropped due to rx ring being full. 167 | #[inline] 168 | pub fn rx_ring_full(&self) -> u64 { 169 | self.0.rx_ring_full 170 | } 171 | 172 | /// Received packets dropped for other reasons. 173 | #[inline] 174 | pub fn rx_dropped(&self) -> u64 { 175 | self.0.rx_dropped 176 | } 177 | 178 | /// Packets to be sent but dropped due to an invalid desccriptor. 179 | #[inline] 180 | pub fn tx_invalid_descs(&self) -> u64 { 181 | self.0.tx_invalid_descs 182 | } 183 | 184 | /// Items failed to be retrieved from fill ring. 185 | #[inline] 186 | pub fn rx_fill_ring_empty_descs(&self) -> u64 { 187 | self.0.rx_fill_ring_empty_descs 188 | } 189 | 190 | /// Items failed to be retrieved from tx ring. 191 | #[inline] 192 | pub fn tx_ring_empty_descs(&self) -> u64 { 193 | self.0.tx_ring_empty_descs 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /src/socket/mod.rs: -------------------------------------------------------------------------------- 1 | //! Types for creating and using an AF_XDP [`Socket`]. 2 | 3 | mod fd; 4 | pub use fd::{Fd, XdpStatistics}; 5 | 6 | mod rx_queue; 7 | pub use rx_queue::RxQueue; 8 | 9 | mod tx_queue; 10 | pub use tx_queue::TxQueue; 11 | 12 | use libxdp_sys::xsk_socket; 13 | use std::{ 14 | borrow::Borrow, 15 | error::Error, 16 | fmt, io, 17 | ptr::{self, NonNull}, 18 | sync::{Arc, Mutex}, 19 | }; 20 | 21 | use crate::{ 22 | config::{Interface, SocketConfig}, 23 | ring::{XskRingCons, XskRingProd}, 24 | umem::{CompQueue, FillQueue, Umem}, 25 | }; 26 | 27 | /// Wrapper around a pointer to some AF_XDP socket. 28 | #[derive(Debug)] 29 | struct XskSocket(NonNull); 30 | 31 | impl XskSocket { 32 | /// # Safety 33 | /// 34 | /// Only one instance of this struct may exist since it deletes 35 | /// the socket as part of its [`Drop`] impl. If there are copies or 36 | /// clones of `ptr` then care must be taken to ensure they aren't 37 | /// used once this struct goes out of scope, and that they don't 38 | /// delete the socket themselves. 39 | unsafe fn new(ptr: NonNull) -> Self { 40 | Self(ptr) 41 | } 42 | } 43 | 44 | impl Drop for XskSocket { 45 | fn drop(&mut self) { 46 | // SAFETY: unsafe constructor contract guarantees that the 47 | // socket has not been deleted already. 48 | unsafe { 49 | libxdp_sys::xsk_socket__delete(self.0.as_mut()); 50 | } 51 | } 52 | } 53 | 54 | unsafe impl Send for XskSocket {} 55 | 56 | #[derive(Debug)] 57 | struct SocketInner { 58 | // `ptr` must appear before `umem` to ensure correct drop order. 59 | _ptr: XskSocket, 60 | _umem: Umem, 61 | } 62 | 63 | impl SocketInner { 64 | fn new(ptr: XskSocket, umem: Umem) -> Self { 65 | Self { 66 | _ptr: ptr, 67 | _umem: umem, 68 | } 69 | } 70 | } 71 | 72 | /// An AF_XDP socket. 73 | /// 74 | /// More details can be found in the 75 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html) 76 | #[derive(Debug)] 77 | pub struct Socket { 78 | fd: Fd, 79 | _inner: Arc>, 80 | } 81 | 82 | impl Socket { 83 | /// Create and bind a new AF_XDP socket to a given interface and 84 | /// queue id using the underlying UMEM. 85 | /// 86 | /// May require root permissions to create successfully. 87 | /// 88 | /// Whether you can expect the returned `Option<(FillQueue, 89 | /// CompQueue)>` to be [`Some`] or [`None`] depends on a couple of 90 | /// things: 91 | /// 92 | /// 1. If the [`Umem`] is currently shared (i.e. being used for 93 | /// >=1 AF_XDP sockets elsewhere): 94 | /// 95 | /// - If the `(if_name, queue_id)` pair is not bound to, expect 96 | /// [`Some`]. 97 | /// 98 | /// - If the `(if_name, queue_id)` pair is bound to, expect 99 | /// [`None`] and use the [`FillQueue`] and [`CompQueue`] 100 | /// originally returned for this pair. 101 | /// 102 | /// 2. If the [`Umem`] is not currently shared, expect [`Some`]. 103 | /// 104 | /// For further details on using a shared [`Umem`] please see the 105 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html#xdp-shared-umem-bind-flag). 106 | /// 107 | /// # Safety 108 | /// 109 | /// If sharing the [`Umem`] and the `(if_name, queue_id)` pair is 110 | /// already bound to, then the 111 | /// [`XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD`] flag must be 112 | /// set. Otherwise, a double-free may occur when dropping sockets 113 | /// if the program has already been detached. 114 | /// 115 | /// [`XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD`]: crate::config::LibxdpFlags::XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD 116 | #[allow(clippy::new_ret_no_self)] 117 | #[allow(clippy::type_complexity)] 118 | pub unsafe fn new( 119 | config: SocketConfig, 120 | umem: &Umem, 121 | if_name: &Interface, 122 | queue_id: u32, 123 | ) -> Result<(TxQueue, RxQueue, Option<(FillQueue, CompQueue)>), SocketCreateError> { 124 | let mut socket_ptr = ptr::null_mut(); 125 | let mut tx_q = XskRingProd::default(); 126 | let mut rx_q = XskRingCons::default(); 127 | 128 | let (err, fq, cq) = unsafe { 129 | umem.with_ptr_and_saved_queues(|xsk_umem, saved_fq_and_cq| { 130 | let (mut fq, mut cq) = saved_fq_and_cq 131 | .take() 132 | .unwrap_or_else(|| (Box::default(), Box::default())); 133 | 134 | let err = libxdp_sys::xsk_socket__create_shared( 135 | &mut socket_ptr, 136 | if_name.as_cstr().as_ptr(), 137 | queue_id, 138 | xsk_umem, 139 | rx_q.as_mut(), 140 | tx_q.as_mut(), 141 | fq.as_mut().as_mut(), // double deref due to Box 142 | cq.as_mut().as_mut(), 143 | &config.into(), 144 | ); 145 | 146 | (err, fq, cq) 147 | }) 148 | }; 149 | 150 | if err != 0 { 151 | return Err(SocketCreateError { 152 | reason: "non-zero error code returned when creating AF_XDP socket", 153 | err: io::Error::from_raw_os_error(-err), 154 | }); 155 | } 156 | 157 | let socket_ptr = match NonNull::new(socket_ptr) { 158 | Some(init_xsk) => { 159 | // SAFETY: this is the only `XskSocket` instance for 160 | // this pointer, and no other pointers to the socket 161 | // exist. 162 | unsafe { XskSocket::new(init_xsk) } 163 | } 164 | None => { 165 | return Err(SocketCreateError { 166 | reason: "returned socket pointer was null", 167 | err: io::Error::from_raw_os_error(-err), 168 | }); 169 | } 170 | }; 171 | 172 | let fd = unsafe { libxdp_sys::xsk_socket__fd(socket_ptr.0.as_ref()) }; 173 | 174 | if fd < 0 { 175 | return Err(SocketCreateError { 176 | reason: "failed to retrieve AF_XDP socket file descriptor", 177 | err: io::Error::from_raw_os_error(-fd), 178 | }); 179 | } 180 | 181 | let socket = Socket { 182 | fd: Fd::new(fd), 183 | _inner: Arc::new(Mutex::new(SocketInner::new(socket_ptr, umem.clone()))), 184 | }; 185 | 186 | let tx_q = if tx_q.is_ring_null() { 187 | return Err(SocketCreateError { 188 | reason: "returned tx queue ring is null", 189 | err: io::Error::from_raw_os_error(-err), 190 | }); 191 | } else { 192 | TxQueue::new(tx_q, socket.clone()) 193 | }; 194 | 195 | let rx_q = if rx_q.is_ring_null() { 196 | return Err(SocketCreateError { 197 | reason: "returned rx queue ring is null", 198 | err: io::Error::from_raw_os_error(-err), 199 | }); 200 | } else { 201 | RxQueue::new(rx_q, socket) 202 | }; 203 | 204 | let fq_and_cq = match (fq.is_ring_null(), cq.is_ring_null()) { 205 | (true, true) => None, 206 | (false, false) => { 207 | let fq = FillQueue::new(*fq, umem.clone()); 208 | let cq = CompQueue::new(*cq, umem.clone()); 209 | 210 | Some((fq, cq)) 211 | } 212 | _ => { 213 | return Err(SocketCreateError { 214 | reason: "fill queue xor comp queue ring is null, either both or neither should be non-null", 215 | err: io::Error::from_raw_os_error(-err), 216 | }); 217 | } 218 | }; 219 | 220 | Ok((tx_q, rx_q, fq_and_cq)) 221 | } 222 | } 223 | 224 | impl Clone for Socket { 225 | fn clone(&self) -> Self { 226 | Self { 227 | fd: self.fd.clone(), 228 | _inner: self._inner.clone(), 229 | } 230 | } 231 | } 232 | 233 | /// Error detailing why [`Socket`] creation failed. 234 | #[derive(Debug)] 235 | pub struct SocketCreateError { 236 | reason: &'static str, 237 | err: io::Error, 238 | } 239 | 240 | impl fmt::Display for SocketCreateError { 241 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 242 | write!(f, "{}", self.reason) 243 | } 244 | } 245 | 246 | impl Error for SocketCreateError { 247 | fn source(&self) -> Option<&(dyn Error + 'static)> { 248 | Some(self.err.borrow()) 249 | } 250 | } 251 | -------------------------------------------------------------------------------- /src/socket/rx_queue.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use crate::{ring::XskRingCons, umem::frame::FrameDesc}; 4 | 5 | use super::{fd::Fd, Socket}; 6 | 7 | /// The receiving side of an AF_XDP [`Socket`]. 8 | /// 9 | /// More details can be found in the 10 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html#rx-ring). 11 | #[derive(Debug)] 12 | pub struct RxQueue { 13 | ring: XskRingCons, 14 | socket: Socket, 15 | } 16 | 17 | impl RxQueue { 18 | pub(super) fn new(ring: XskRingCons, socket: Socket) -> Self { 19 | Self { ring, socket } 20 | } 21 | 22 | /// Update `descs` with information on which [`Umem`] frames have 23 | /// received packets. Returns the number of elements of `descs` 24 | /// which have been updated. 25 | /// 26 | /// The number of entries updated will be less than or equal to 27 | /// the length of `descs`. Entries will be updated sequentially 28 | /// from the start of `descs` until the end. 29 | /// 30 | /// Once the contents of the consumed frames have been dealt with 31 | /// and are no longer required, the frames should eventually be 32 | /// added back on to either the [`FillQueue`] or the [`TxQueue`]. 33 | /// 34 | /// # Safety 35 | /// 36 | /// The frames passed to this queue must belong to the same 37 | /// [`Umem`] that this `RxQueue` instance is tied to. 38 | /// 39 | /// [`Umem`]: crate::Umem 40 | /// [`FillQueue`]: crate::FillQueue 41 | /// [`TxQueue`]: crate::TxQueue 42 | #[inline] 43 | pub unsafe fn consume(&mut self, descs: &mut [FrameDesc]) -> usize { 44 | let nb = descs.len() as u32; 45 | 46 | if nb == 0 { 47 | return 0; 48 | } 49 | 50 | let mut idx = 0; 51 | 52 | let cnt = unsafe { libxdp_sys::xsk_ring_cons__peek(self.ring.as_mut(), nb, &mut idx) }; 53 | 54 | if cnt > 0 { 55 | for desc in descs.iter_mut().take(cnt as usize) { 56 | let recv_pkt_desc = 57 | unsafe { libxdp_sys::xsk_ring_cons__rx_desc(self.ring.as_ref(), idx) }; 58 | 59 | unsafe { 60 | desc.addr = (*recv_pkt_desc).addr as usize; 61 | desc.lengths.data = (*recv_pkt_desc).len as usize; 62 | desc.lengths.headroom = 0; 63 | desc.options = (*recv_pkt_desc).options; 64 | } 65 | 66 | idx += 1; 67 | } 68 | 69 | unsafe { libxdp_sys::xsk_ring_cons__release(self.ring.as_mut(), cnt) }; 70 | } 71 | 72 | cnt as usize 73 | } 74 | 75 | /// Same as [`consume`] but for a single frame descriptor. 76 | /// 77 | /// # Safety 78 | /// 79 | /// See [`consume`]. 80 | /// 81 | /// [`consume`]: Self::consume 82 | #[inline] 83 | pub unsafe fn consume_one(&mut self, desc: &mut FrameDesc) -> usize { 84 | let mut idx = 0; 85 | 86 | let cnt = unsafe { libxdp_sys::xsk_ring_cons__peek(self.ring.as_mut(), 1, &mut idx) }; 87 | 88 | if cnt > 0 { 89 | let recv_pkt_desc = 90 | unsafe { libxdp_sys::xsk_ring_cons__rx_desc(self.ring.as_ref(), idx) }; 91 | 92 | unsafe { 93 | desc.addr = (*recv_pkt_desc).addr as usize; 94 | desc.lengths.data = (*recv_pkt_desc).len as usize; 95 | desc.lengths.headroom = 0; 96 | desc.options = (*recv_pkt_desc).options; 97 | } 98 | 99 | unsafe { libxdp_sys::xsk_ring_cons__release(self.ring.as_mut(), cnt) }; 100 | } 101 | 102 | cnt as usize 103 | } 104 | 105 | /// Same as [`consume`] but poll first to check if there is 106 | /// anything to read beforehand. 107 | /// 108 | /// # Safety 109 | /// 110 | /// See [`consume`]. 111 | /// 112 | /// [`consume`]: RxQueue::consume 113 | #[inline] 114 | pub unsafe fn poll_and_consume( 115 | &mut self, 116 | descs: &mut [FrameDesc], 117 | poll_timeout: i32, 118 | ) -> io::Result { 119 | match self.poll(poll_timeout)? { 120 | true => Ok(unsafe { self.consume(descs) }), 121 | false => Ok(0), 122 | } 123 | } 124 | 125 | /// Same as [`poll_and_consume`] but for a single frame descriptor. 126 | /// 127 | /// # Safety 128 | /// 129 | /// See [`consume`]. 130 | /// 131 | /// [`poll_and_consume`]: Self::poll_and_consume 132 | /// [`consume`]: Self::consume 133 | #[inline] 134 | pub unsafe fn poll_and_consume_one( 135 | &mut self, 136 | desc: &mut FrameDesc, 137 | poll_timeout: i32, 138 | ) -> io::Result { 139 | match self.poll(poll_timeout)? { 140 | true => Ok(unsafe { self.consume_one(desc) }), 141 | false => Ok(0), 142 | } 143 | } 144 | 145 | /// Polls the socket, returning `true` if there is data to read. 146 | #[inline] 147 | pub fn poll(&mut self, poll_timeout: i32) -> io::Result { 148 | self.socket.fd.poll_read(poll_timeout) 149 | } 150 | 151 | /// A reference to the underlying [`Socket`]'s file descriptor. 152 | #[inline] 153 | pub fn fd(&self) -> &Fd { 154 | &self.socket.fd 155 | } 156 | 157 | /// A mutable reference to the underlying [`Socket`]'s file descriptor. 158 | #[inline] 159 | pub fn fd_mut(&mut self) -> &mut Fd { 160 | &mut self.socket.fd 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /src/socket/tx_queue.rs: -------------------------------------------------------------------------------- 1 | use libc::{EAGAIN, EBUSY, ENETDOWN, ENOBUFS, MSG_DONTWAIT}; 2 | use std::{io, os::unix::prelude::AsRawFd, ptr}; 3 | 4 | use crate::{ring::XskRingProd, umem::frame::FrameDesc, util}; 5 | 6 | use super::{fd::Fd, Socket}; 7 | 8 | /// The transmitting side of an AF_XDP [`Socket`]. 9 | /// 10 | /// More details can be found in the 11 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html#tx-ring). 12 | #[derive(Debug)] 13 | pub struct TxQueue { 14 | ring: XskRingProd, 15 | socket: Socket, 16 | } 17 | 18 | impl TxQueue { 19 | pub(super) fn new(ring: XskRingProd, socket: Socket) -> Self { 20 | Self { ring, socket } 21 | } 22 | 23 | /// Let the kernel know that the frames described by `descs` are 24 | /// ready to be transmitted. Returns the number of frames 25 | /// submitted to the kernel. 26 | /// 27 | /// Note that if the length of `descs` is greater than the number 28 | /// of available spaces on the underlying ring buffer then no 29 | /// frames at all will be submitted for transmission. 30 | /// 31 | /// Once the frames have been submitted to this queue they should 32 | /// not be used again until consumed via the [`CompQueue`]. 33 | /// 34 | /// # Safety 35 | /// 36 | /// This function is unsafe as it is possible to cause a data race 37 | /// if used improperly. For example, by simultaneously submitting 38 | /// the same frame to this `TxQueue` and the [`FillQueue`]. 39 | /// 40 | /// Furthermore, the frames passed to this queue must belong to 41 | /// the same [`Umem`] that this `TxQueue` instance is tied to. 42 | /// 43 | /// [`FillQueue`]: crate::FillQueue 44 | /// [`CompQueue`]: crate::CompQueue 45 | /// [`Umem`]: crate::Umem 46 | #[inline] 47 | pub unsafe fn produce(&mut self, descs: &[FrameDesc]) -> usize { 48 | let nb = descs.len() as u32; 49 | 50 | if nb == 0 { 51 | return 0; 52 | } 53 | 54 | let mut idx = 0; 55 | 56 | let cnt = unsafe { libxdp_sys::xsk_ring_prod__reserve(self.ring.as_mut(), nb, &mut idx) }; 57 | 58 | if cnt > 0 { 59 | for desc in descs.iter().take(cnt as usize) { 60 | let send_pkt_desc = 61 | unsafe { libxdp_sys::xsk_ring_prod__tx_desc(self.ring.as_mut(), idx) }; 62 | 63 | // SAFETY: unsafe contract of this function guarantees 64 | // `desc` describes a frame belonging to the same UMEM as 65 | // this queue. 66 | unsafe { desc.write_xdp_desc(&mut *send_pkt_desc) }; 67 | 68 | idx += 1; 69 | } 70 | 71 | unsafe { libxdp_sys::xsk_ring_prod__submit(self.ring.as_mut(), cnt) }; 72 | } 73 | 74 | cnt as usize 75 | } 76 | 77 | /// Same as [`produce`] but for a single frame descriptor. 78 | /// 79 | /// # Safety 80 | /// 81 | /// See [`produce`]. 82 | /// 83 | /// [`produce`]: Self::produce 84 | #[inline] 85 | pub unsafe fn produce_one(&mut self, desc: &FrameDesc) -> usize { 86 | let mut idx = 0; 87 | 88 | let cnt = unsafe { libxdp_sys::xsk_ring_prod__reserve(self.ring.as_mut(), 1, &mut idx) }; 89 | 90 | if cnt > 0 { 91 | let send_pkt_desc = 92 | unsafe { libxdp_sys::xsk_ring_prod__tx_desc(self.ring.as_mut(), idx) }; 93 | 94 | // SAFETY: unsafe contract of this function guarantees 95 | // `desc` describes a frame belonging to the same UMEM as 96 | // this queue. 97 | unsafe { desc.write_xdp_desc(&mut *send_pkt_desc) }; 98 | 99 | unsafe { libxdp_sys::xsk_ring_prod__submit(self.ring.as_mut(), cnt) }; 100 | } 101 | 102 | cnt as usize 103 | } 104 | 105 | /// Same as [`produce`] but wake up the kernel to continue 106 | /// processing produced frames (if required). 107 | /// 108 | /// For more details see the 109 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html#xdp-use-need-wakeup-bind-flag). 110 | /// 111 | /// # Safety 112 | /// 113 | /// See [`produce`]. 114 | /// 115 | /// [`produce`]: Self::produce 116 | #[inline] 117 | pub unsafe fn produce_and_wakeup(&mut self, descs: &[FrameDesc]) -> io::Result { 118 | let cnt = unsafe { self.produce(descs) }; 119 | 120 | if self.needs_wakeup() { 121 | self.wakeup()?; 122 | } 123 | 124 | Ok(cnt) 125 | } 126 | 127 | /// Same as [`produce_and_wakeup`] but for a single frame 128 | /// descriptor. 129 | /// 130 | /// # Safety 131 | /// 132 | /// See [`produce`]. 133 | /// 134 | /// [`produce_and_wakeup`]: Self::produce_and_wakeup 135 | /// [`produce`]: Self::produce 136 | #[inline] 137 | pub unsafe fn produce_one_and_wakeup(&mut self, desc: &FrameDesc) -> io::Result { 138 | let cnt = unsafe { self.produce_one(desc) }; 139 | 140 | if self.needs_wakeup() { 141 | self.wakeup()?; 142 | } 143 | 144 | Ok(cnt) 145 | } 146 | 147 | /// Wake up the kernel to continue processing produced frames. 148 | /// 149 | /// See [`produce_and_wakeup`] for a link to docs with further 150 | /// explanation. 151 | /// 152 | /// [`produce_and_wakeup`]: Self::produce_and_wakeup 153 | #[inline] 154 | pub fn wakeup(&self) -> io::Result<()> { 155 | let ret = unsafe { 156 | libc::sendto( 157 | self.socket.fd.as_raw_fd(), 158 | ptr::null(), 159 | 0, 160 | MSG_DONTWAIT, 161 | ptr::null(), 162 | 0, 163 | ) 164 | }; 165 | 166 | if ret < 0 { 167 | match util::get_errno() { 168 | ENOBUFS | EAGAIN | EBUSY | ENETDOWN => (), 169 | _ => return Err(io::Error::last_os_error()), 170 | } 171 | } 172 | 173 | Ok(()) 174 | } 175 | 176 | /// Check if the [`XDP_USE_NEED_WAKEUP`] flag is set on the tx 177 | /// ring. If so then this means a call to [`wakeup`] will be 178 | /// required to continue processing produced frames. 179 | /// 180 | /// See [`produce_and_wakeup`] for link to docs with further 181 | /// explanation. 182 | /// 183 | /// [`XDP_USE_NEED_WAKEUP`]: libxdp_sys::XDP_USE_NEED_WAKEUP 184 | /// [`wakeup`]: Self::wakeup 185 | /// [`produce_and_wakeup`]: Self::produce_and_wakeup 186 | #[inline] 187 | pub fn needs_wakeup(&self) -> bool { 188 | unsafe { libxdp_sys::xsk_ring_prod__needs_wakeup(self.ring.as_ref()) != 0 } 189 | } 190 | 191 | /// Polls the socket, returning `true` if it is ready to write. 192 | #[inline] 193 | pub fn poll(&mut self, poll_timeout: i32) -> io::Result { 194 | self.socket.fd.poll_write(poll_timeout) 195 | } 196 | 197 | /// A reference to the underlying [`Socket`]'s file descriptor. 198 | #[inline] 199 | pub fn fd(&self) -> &Fd { 200 | &self.socket.fd 201 | } 202 | 203 | /// A mutable reference to the underlying [`Socket`]'s file descriptor. 204 | #[inline] 205 | pub fn fd_mut(&mut self) -> &mut Fd { 206 | &mut self.socket.fd 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/umem/comp_queue.rs: -------------------------------------------------------------------------------- 1 | use crate::ring::XskRingCons; 2 | 3 | use super::{frame::FrameDesc, Umem}; 4 | 5 | /// Used to transfer ownership of [`Umem`](super::Umem) frames from 6 | /// kernel-space to user-space. 7 | /// 8 | /// Frames received in this queue are those that have been sent via 9 | /// the [`TxQueue`](crate::socket::TxQueue). 10 | /// 11 | /// For more information see the 12 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html#umem-completion-ring). 13 | #[derive(Debug)] 14 | pub struct CompQueue { 15 | ring: XskRingCons, 16 | _umem: Umem, 17 | } 18 | 19 | impl CompQueue { 20 | pub(crate) fn new(ring: XskRingCons, umem: Umem) -> Self { 21 | Self { ring, _umem: umem } 22 | } 23 | 24 | /// Update `descs` with details of frames whose contents have been 25 | /// sent (after submission via the [`TxQueue`]) and may now be 26 | /// used again. Returns the number of elements of `descs` which 27 | /// have been updated. 28 | /// 29 | /// The number of entries updated will be less than or equal to 30 | /// the length of `descs`. Entries will be updated sequentially 31 | /// from the start of `descs` until the end. 32 | /// 33 | /// Free frames should eventually be added back on to either the 34 | /// [`FillQueue`] or the [`TxQueue`]. 35 | /// 36 | /// # Safety 37 | /// 38 | /// The frames passed to this queue must belong to the same 39 | /// [`Umem`] that this `CompQueue` instance is tied to. 40 | /// 41 | /// [`TxQueue`]: crate::socket::TxQueue 42 | /// [`FillQueue`]: crate::FillQueue 43 | #[inline] 44 | pub unsafe fn consume(&mut self, descs: &mut [FrameDesc]) -> usize { 45 | let nb = descs.len() as u32; 46 | 47 | if nb == 0 { 48 | return 0; 49 | } 50 | 51 | let mut idx = 0; 52 | 53 | let cnt = unsafe { libxdp_sys::xsk_ring_cons__peek(self.ring.as_mut(), nb, &mut idx) }; 54 | 55 | if cnt > 0 { 56 | for desc in descs.iter_mut().take(cnt as usize) { 57 | let addr = 58 | unsafe { *libxdp_sys::xsk_ring_cons__comp_addr(self.ring.as_ref(), idx) }; 59 | 60 | desc.addr = addr as usize; 61 | desc.lengths.data = 0; 62 | desc.lengths.headroom = 0; 63 | desc.options = 0; 64 | 65 | idx += 1; 66 | } 67 | 68 | unsafe { libxdp_sys::xsk_ring_cons__release(self.ring.as_mut(), cnt) }; 69 | } 70 | 71 | cnt as usize 72 | } 73 | 74 | /// Same as [`consume`] but for a single frame descriptor. 75 | /// 76 | /// # Safety 77 | /// 78 | /// See [`consume`]. 79 | /// 80 | /// [`consume`]: Self::consume 81 | #[inline] 82 | pub unsafe fn consume_one(&mut self, desc: &mut FrameDesc) -> usize { 83 | let mut idx = 0; 84 | 85 | let cnt = unsafe { libxdp_sys::xsk_ring_cons__peek(self.ring.as_mut(), 1, &mut idx) }; 86 | 87 | if cnt > 0 { 88 | let addr = unsafe { *libxdp_sys::xsk_ring_cons__comp_addr(self.ring.as_ref(), idx) }; 89 | 90 | desc.addr = addr as usize; 91 | desc.lengths.data = 0; 92 | desc.lengths.headroom = 0; 93 | desc.options = 0; 94 | 95 | unsafe { libxdp_sys::xsk_ring_cons__release(self.ring.as_mut(), cnt) }; 96 | } 97 | 98 | cnt as usize 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/umem/fill_queue.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use crate::{ring::XskRingProd, socket::Fd}; 4 | 5 | use super::{frame::FrameDesc, Umem}; 6 | 7 | /// Used to transfer ownership of [`Umem`](super::Umem) frames from 8 | /// user-space to kernel-space. 9 | /// 10 | /// These frames will be used to receive packets, and will eventually 11 | /// be returned via the [`RxQueue`](crate::socket::RxQueue). 12 | /// 13 | /// For more information see the 14 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html#umem-fill-ring). 15 | #[derive(Debug)] 16 | pub struct FillQueue { 17 | ring: XskRingProd, 18 | _umem: Umem, 19 | } 20 | 21 | impl FillQueue { 22 | pub(crate) fn new(ring: XskRingProd, umem: Umem) -> Self { 23 | Self { ring, _umem: umem } 24 | } 25 | 26 | /// Let the kernel know that the [`Umem`] frames described by 27 | /// `descs` may be used to receive data. Returns the number of 28 | /// frames submitted to the kernel. 29 | /// 30 | /// Note that if the length of `descs` is greater than the number 31 | /// of available spaces on the underlying ring buffer then no 32 | /// frames at all will be handed over to the kernel. 33 | /// 34 | /// Once the frames have been submitted to this queue they should 35 | /// not be used again until consumed via the [`RxQueue`]. 36 | /// 37 | /// # Safety 38 | /// 39 | /// This function is unsafe as it is possible to cause a data race 40 | /// if used improperly. For example, by simultaneously submitting 41 | /// the same frame descriptor to this `FillQueue` and the 42 | /// [`TxQueue`]. 43 | /// 44 | /// Furthermore, the frames passed to this queue must belong to 45 | /// the same [`Umem`] that this `FillQueue` instance is tied to. 46 | /// 47 | /// [`TxQueue`]: crate::TxQueue 48 | /// [`RxQueue`]: crate::RxQueue 49 | #[inline] 50 | pub unsafe fn produce(&mut self, descs: &[FrameDesc]) -> usize { 51 | let nb = descs.len() as u32; 52 | 53 | if nb == 0 { 54 | return 0; 55 | } 56 | 57 | let mut idx = 0; 58 | 59 | let cnt = unsafe { libxdp_sys::xsk_ring_prod__reserve(self.ring.as_mut(), nb, &mut idx) }; 60 | 61 | if cnt > 0 { 62 | for desc in descs.iter().take(cnt as usize) { 63 | unsafe { 64 | *libxdp_sys::xsk_ring_prod__fill_addr(self.ring.as_mut(), idx) = 65 | desc.addr as u64 66 | }; 67 | 68 | idx += 1; 69 | } 70 | 71 | unsafe { libxdp_sys::xsk_ring_prod__submit(self.ring.as_mut(), cnt) }; 72 | } 73 | 74 | cnt as usize 75 | } 76 | 77 | /// Same as [`produce`] but for a single frame descriptor. 78 | /// 79 | /// # Safety 80 | /// 81 | /// See [`produce`]. 82 | /// 83 | /// [`produce`]: Self::produce 84 | #[inline] 85 | pub unsafe fn produce_one(&mut self, desc: &FrameDesc) -> usize { 86 | let mut idx = 0; 87 | 88 | let cnt = unsafe { libxdp_sys::xsk_ring_prod__reserve(self.ring.as_mut(), 1, &mut idx) }; 89 | 90 | if cnt > 0 { 91 | unsafe { 92 | *libxdp_sys::xsk_ring_prod__fill_addr(self.ring.as_mut(), idx) = desc.addr as u64 93 | }; 94 | 95 | unsafe { libxdp_sys::xsk_ring_prod__submit(self.ring.as_mut(), cnt) }; 96 | } 97 | 98 | cnt as usize 99 | } 100 | 101 | /// Same as [`produce`] but wake up the kernel if required to let 102 | /// it know there are frames available that may be used to receive 103 | /// data. 104 | /// 105 | /// For more details see the 106 | /// [docs](https://www.kernel.org/doc/html/latest/networking/af_xdp.html#xdp-use-need-wakeup-bind-flag). 107 | /// 108 | /// # Safety 109 | /// 110 | /// See [`produce`]. 111 | /// 112 | /// [`produce`]: Self::produce 113 | #[inline] 114 | pub unsafe fn produce_and_wakeup( 115 | &mut self, 116 | descs: &[FrameDesc], 117 | socket_fd: &mut Fd, 118 | poll_timeout: i32, 119 | ) -> io::Result { 120 | let cnt = unsafe { self.produce(descs) }; 121 | 122 | if cnt > 0 && self.needs_wakeup() { 123 | self.wakeup(socket_fd, poll_timeout)?; 124 | } 125 | 126 | Ok(cnt) 127 | } 128 | 129 | /// Same as [`produce_and_wakeup`] but for a single frame 130 | /// descriptor. 131 | /// 132 | /// # Safety 133 | /// 134 | /// See [`produce`]. 135 | /// 136 | /// [`produce_and_wakeup`]: Self::produce_and_wakeup 137 | /// [`produce`]: Self::produce 138 | #[inline] 139 | pub unsafe fn produce_one_and_wakeup( 140 | &mut self, 141 | desc: &FrameDesc, 142 | socket_fd: &mut Fd, 143 | poll_timeout: i32, 144 | ) -> io::Result { 145 | let cnt = unsafe { self.produce_one(desc) }; 146 | 147 | if cnt > 0 && self.needs_wakeup() { 148 | self.wakeup(socket_fd, poll_timeout)?; 149 | } 150 | 151 | Ok(cnt) 152 | } 153 | 154 | /// Wake up the kernel to let it know it can continue using the 155 | /// fill ring to process received data. 156 | /// 157 | /// See [`produce_and_wakeup`] for link to docs with further 158 | /// explanation. 159 | /// 160 | /// [`produce_and_wakeup`]: Self::produce_and_wakeup 161 | #[inline] 162 | pub fn wakeup(&self, fd: &mut Fd, poll_timeout: i32) -> io::Result<()> { 163 | fd.poll_read(poll_timeout)?; 164 | Ok(()) 165 | } 166 | 167 | /// Check if the [`XDP_USE_NEED_WAKEUP`] flag is set on the fill 168 | /// ring. If so then this means a call to [`wakeup`] will be 169 | /// required to continue processing received data. 170 | /// 171 | /// See [`produce_and_wakeup`] for a link to docs with further 172 | /// explanation. 173 | /// 174 | /// [`produce_and_wakeup`]: Self::produce_and_wakeup 175 | /// [`XDP_USE_NEED_WAKEUP`]: libxdp_sys::XDP_USE_NEED_WAKEUP 176 | /// [`wakeup`]: Self::wakeup 177 | #[inline] 178 | pub fn needs_wakeup(&self) -> bool { 179 | unsafe { libxdp_sys::xsk_ring_prod__needs_wakeup(self.ring.as_ref()) != 0 } 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/umem/frame/cursor.rs: -------------------------------------------------------------------------------- 1 | //! A wrapper for convenient writing to a [`Umem`](crate::umem::Umem) frame. 2 | 3 | use std::io::{self, IoSlice, Write}; 4 | 5 | use crate::util; 6 | 7 | /// Wraps a buffer and a value denoting its current write position and 8 | /// provides a convenient [`Write`] implementation. 9 | /// 10 | /// Practically it allows us to write to a [`Umem`](crate::umem::Umem) frame 11 | /// and update its descriptor's length at the same time, avoiding some 12 | /// potentially error prone logic. 13 | #[derive(Debug)] 14 | pub struct Cursor<'a> { 15 | pos: &'a mut usize, 16 | buf: &'a mut [u8], 17 | } 18 | 19 | impl<'a> Cursor<'a> { 20 | #[inline] 21 | pub(super) fn new(pos: &'a mut usize, buf: &'a mut [u8]) -> Self { 22 | Self { pos, buf } 23 | } 24 | 25 | /// The cursor's current write position in the buffer. 26 | #[inline] 27 | pub fn pos(&self) -> usize { 28 | *self.pos 29 | } 30 | 31 | /// Sets the cursor's write position. 32 | #[inline] 33 | pub fn set_pos(&mut self, pos: usize) { 34 | *self.pos = util::min_usize(pos, self.buf.len()); 35 | } 36 | 37 | /// The length of the underlying buffer. 38 | #[inline] 39 | pub fn buf_len(&mut self) -> usize { 40 | self.buf.len() 41 | } 42 | 43 | /// Fills the buffer with zeroes and sets the cursor's write 44 | /// position to the start of the buffer. 45 | #[inline] 46 | pub fn zero_out(&mut self) { 47 | self.buf.fill(0); 48 | self.set_pos(0); 49 | } 50 | } 51 | 52 | // Taken almost verbatim from 53 | // [`std::io::Cursor`](https://doc.rust-lang.org/src/std/io/cursor.rs.html#437) 54 | impl Write for Cursor<'_> { 55 | #[inline] 56 | fn write(&mut self, buf: &[u8]) -> io::Result { 57 | let pos = util::min_usize(*self.pos, self.buf.len()); 58 | let amt = (&mut self.buf[pos..]).write(buf)?; 59 | 60 | *self.pos += amt; 61 | 62 | Ok(amt) 63 | } 64 | 65 | #[inline] 66 | fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { 67 | let mut nwritten = 0; 68 | for buf in bufs { 69 | let n = self.write(buf)?; 70 | nwritten += n; 71 | if n < buf.len() { 72 | break; 73 | } 74 | } 75 | Ok(nwritten) 76 | } 77 | 78 | #[inline] 79 | fn flush(&mut self) -> io::Result<()> { 80 | Ok(()) 81 | } 82 | } 83 | 84 | #[cfg(test)] 85 | mod tests { 86 | use super::*; 87 | 88 | #[test] 89 | fn write_works() { 90 | let mut pos = 0; 91 | let mut buf = [0; 32]; 92 | 93 | { 94 | let mut cursor = Cursor::new(&mut pos, &mut buf[..]); 95 | 96 | cursor.write_all(b"hello").unwrap(); 97 | } 98 | 99 | assert_eq!(pos, 5); 100 | assert_eq!(&buf[..pos], b"hello"); 101 | 102 | { 103 | let mut cursor = Cursor::new(&mut pos, &mut buf[..]); 104 | 105 | cursor.write_all(b", world").unwrap(); 106 | } 107 | 108 | assert_eq!(pos, 12); 109 | assert_eq!(&buf[..pos], b"hello, world"); 110 | } 111 | 112 | #[test] 113 | fn zero_out_works() { 114 | let mut pos = 0; 115 | let mut buf = [0; 32]; 116 | 117 | { 118 | let mut cursor = Cursor::new(&mut pos, &mut buf[..]); 119 | 120 | cursor.write_all(b"hello").unwrap(); 121 | } 122 | 123 | assert_eq!(pos, 5); 124 | assert_eq!(&buf[..pos], b"hello"); 125 | 126 | { 127 | let mut cursor = Cursor::new(&mut pos, &mut buf[..]); 128 | 129 | cursor.zero_out(); 130 | } 131 | 132 | assert_eq!(pos, 0); 133 | assert_eq!(&buf, &[0; 32]); 134 | } 135 | 136 | #[test] 137 | fn set_pos_cannot_exceed_buf_len() { 138 | let mut pos = 0; 139 | let mut buf = [0; 32]; 140 | 141 | let mut cursor = Cursor::new(&mut pos, &mut buf[..]); 142 | 143 | cursor.set_pos(1); 144 | assert_eq!(cursor.pos(), 1); 145 | 146 | cursor.set_pos(32); 147 | assert_eq!(cursor.pos(), 32); 148 | 149 | cursor.set_pos(33); 150 | assert_eq!(cursor.pos(), 32); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/umem/frame/mod.rs: -------------------------------------------------------------------------------- 1 | //! Types for representing and working with a [`Umem`](super::Umem) 2 | //! frame. 3 | 4 | mod cursor; 5 | pub use cursor::Cursor; 6 | 7 | use std::{ 8 | borrow::{Borrow, BorrowMut}, 9 | ops::{Deref, DerefMut}, 10 | }; 11 | 12 | /// The length (in bytes) of data in a frame's packet data and 13 | /// headroom segments. 14 | /// 15 | /// Not to be confused with the [`frame_headroom`] and [`mtu`], the 16 | /// lengths here describe the amount of data that has been written to 17 | /// either segment, either by the kernel or by the user. Hence they 18 | /// vary as frames are used to send and receive data. 19 | /// 20 | /// The two sets of values are related however, in that `headroom` 21 | /// will always be less than or equal to [`frame_headroom`], and 22 | /// `data` less than or equal to [`mtu`]. 23 | /// 24 | /// [`frame_headroom`]: crate::config::UmemConfig::frame_headroom 25 | /// [`mtu`]: crate::config::UmemConfig::mtu 26 | #[derive(Debug, Default, Clone, Copy)] 27 | pub struct SegmentLengths { 28 | pub(crate) headroom: usize, 29 | pub(crate) data: usize, 30 | } 31 | 32 | impl SegmentLengths { 33 | /// Current length of the headroom segment. 34 | #[inline] 35 | pub fn headroom(&self) -> usize { 36 | self.headroom 37 | } 38 | 39 | /// Current length of the packet data segment. 40 | #[inline] 41 | pub fn data(&self) -> usize { 42 | self.data 43 | } 44 | } 45 | 46 | /// A [`Umem`](super::Umem) frame descriptor. 47 | /// 48 | /// Used to pass frame information between the kernel and 49 | /// userspace. `addr` is an offset in bytes from the start of the 50 | /// [`Umem`](super::Umem) and corresponds to the starting address of 51 | /// the packet data segment of some frame. `lengths` describes the 52 | /// length (in bytes) of any data stored in the frame's headroom or 53 | /// data segments. 54 | #[derive(Debug, Clone, Copy)] 55 | pub struct FrameDesc { 56 | pub(crate) addr: usize, 57 | pub(crate) options: u32, 58 | pub(crate) lengths: SegmentLengths, 59 | } 60 | 61 | impl FrameDesc { 62 | /// Creates a new frame descriptor. 63 | /// 64 | /// `addr` must be the starting address of the packet data segment 65 | /// of some [`Umem`](super::Umem) frame. 66 | pub(super) fn new(addr: usize) -> Self { 67 | Self { 68 | addr, 69 | options: 0, 70 | lengths: SegmentLengths::default(), 71 | } 72 | } 73 | 74 | /// The starting address of the packet data segment of the frame 75 | /// pointed at by this descriptor. 76 | #[inline] 77 | pub fn addr(&self) -> usize { 78 | self.addr 79 | } 80 | 81 | /// Current headroom and packet data lengths for the frame pointed 82 | /// at by this descriptor. 83 | #[inline] 84 | pub fn lengths(&self) -> &SegmentLengths { 85 | &self.lengths 86 | } 87 | 88 | /// Frame options. 89 | #[inline] 90 | pub fn options(&self) -> u32 { 91 | self.options 92 | } 93 | 94 | /// Set the frame options. 95 | #[inline] 96 | pub fn set_options(&mut self, options: u32) { 97 | self.options = options 98 | } 99 | 100 | #[inline] 101 | pub(crate) fn write_xdp_desc(&self, desc: &mut libxdp_sys::xdp_desc) { 102 | desc.addr = self.addr as u64; 103 | desc.options = self.options; 104 | desc.len = self.lengths.data as u32; 105 | } 106 | } 107 | 108 | impl Default for FrameDesc { 109 | /// Creates an empty frame descriptor with an address of zero and 110 | /// segment lengths also set to zero. 111 | /// 112 | /// Since the address of any descriptors created this way is 113 | /// always zero, before using them to write to the [`Umem`] they 114 | /// should first be 'initialised' by passing them to either the 115 | /// [`RxQueue`] or the [`CompQueue`], so they can be populated 116 | /// with the details of a free frame. 117 | /// 118 | /// [`Umem`]: crate::Umem 119 | /// [`RxQueue`]: crate::RxQueue 120 | /// [`CompQueue`]: crate::CompQueue 121 | fn default() -> Self { 122 | Self { 123 | addr: 0, 124 | options: 0, 125 | lengths: Default::default(), 126 | } 127 | } 128 | } 129 | 130 | /// Headroom segment of a [`Umem`](crate::umem::Umem) frame. 131 | #[derive(Debug)] 132 | pub struct Headroom<'umem> { 133 | contents: &'umem [u8], 134 | } 135 | 136 | impl<'umem> Headroom<'umem> { 137 | pub(super) fn new(contents: &'umem [u8]) -> Self { 138 | Self { contents } 139 | } 140 | 141 | /// Returns this segment's contents, up to its current length. 142 | #[inline] 143 | pub fn contents(&self) -> &[u8] { 144 | self.contents 145 | } 146 | } 147 | 148 | impl AsRef<[u8]> for Headroom<'_> { 149 | #[inline] 150 | fn as_ref(&self) -> &[u8] { 151 | self.contents 152 | } 153 | } 154 | 155 | impl Borrow<[u8]> for Headroom<'_> { 156 | #[inline] 157 | fn borrow(&self) -> &[u8] { 158 | self.contents 159 | } 160 | } 161 | 162 | impl Deref for Headroom<'_> { 163 | type Target = [u8]; 164 | 165 | #[inline] 166 | fn deref(&self) -> &Self::Target { 167 | self.contents 168 | } 169 | } 170 | 171 | /// Mutable headroom segment of a [`Umem`](crate::umem::Umem) frame. 172 | #[derive(Debug)] 173 | pub struct HeadroomMut<'umem> { 174 | len: &'umem mut usize, 175 | buf: &'umem mut [u8], 176 | } 177 | 178 | impl<'umem> HeadroomMut<'umem> { 179 | pub(super) fn new(len: &'umem mut usize, buf: &'umem mut [u8]) -> Self { 180 | Self { len, buf } 181 | } 182 | 183 | /// Returns this segment's contents, up to its current length. 184 | #[inline] 185 | pub fn contents(&self) -> &[u8] { 186 | &self.buf[..*self.len] 187 | } 188 | 189 | /// Returns a mutable view of this segment's contents, up to its 190 | /// current length. 191 | #[inline] 192 | pub fn contents_mut(&mut self) -> &mut [u8] { 193 | &mut self.buf[..*self.len] 194 | } 195 | 196 | /// A cursor for writing to this segment. 197 | /// 198 | /// Modifications via the cursor will change the length of the 199 | /// segment, i.e. the headroom length of the frame descriptor. If 200 | /// in-place modifications just need to be made then 201 | /// [`contents_mut`] may be sufficient. 202 | /// 203 | /// [`contents_mut`]: Self::contents_mut 204 | #[inline] 205 | pub fn cursor(&mut self) -> Cursor<'_> { 206 | Cursor::new(self.len, self.buf) 207 | } 208 | } 209 | 210 | impl AsRef<[u8]> for HeadroomMut<'_> { 211 | #[inline] 212 | fn as_ref(&self) -> &[u8] { 213 | self.contents() 214 | } 215 | } 216 | 217 | impl AsMut<[u8]> for HeadroomMut<'_> { 218 | #[inline] 219 | fn as_mut(&mut self) -> &mut [u8] { 220 | self.contents_mut() 221 | } 222 | } 223 | 224 | impl Borrow<[u8]> for HeadroomMut<'_> { 225 | #[inline] 226 | fn borrow(&self) -> &[u8] { 227 | self.contents() 228 | } 229 | } 230 | 231 | impl BorrowMut<[u8]> for HeadroomMut<'_> { 232 | #[inline] 233 | fn borrow_mut(&mut self) -> &mut [u8] { 234 | self.contents_mut() 235 | } 236 | } 237 | 238 | impl Deref for HeadroomMut<'_> { 239 | type Target = [u8]; 240 | 241 | #[inline] 242 | fn deref(&self) -> &Self::Target { 243 | self.contents() 244 | } 245 | } 246 | 247 | impl DerefMut for HeadroomMut<'_> { 248 | #[inline] 249 | fn deref_mut(&mut self) -> &mut Self::Target { 250 | self.contents_mut() 251 | } 252 | } 253 | 254 | /// Packet data segment of a [`Umem`](crate::umem::Umem) frame. 255 | #[derive(Debug)] 256 | pub struct Data<'umem> { 257 | contents: &'umem [u8], 258 | } 259 | 260 | impl<'umem> Data<'umem> { 261 | pub(super) fn new(contents: &'umem [u8]) -> Self { 262 | Self { contents } 263 | } 264 | 265 | /// Returns this segment's contents, up to its current length. 266 | /// 267 | /// Will change as packets are sent or received using this frame. 268 | #[inline] 269 | pub fn contents(&self) -> &'umem [u8] { 270 | self.contents 271 | } 272 | } 273 | 274 | impl AsRef<[u8]> for Data<'_> { 275 | #[inline] 276 | fn as_ref(&self) -> &[u8] { 277 | self.contents 278 | } 279 | } 280 | 281 | impl Borrow<[u8]> for Data<'_> { 282 | #[inline] 283 | fn borrow(&self) -> &[u8] { 284 | self.contents 285 | } 286 | } 287 | 288 | impl Deref for Data<'_> { 289 | type Target = [u8]; 290 | 291 | #[inline] 292 | fn deref(&self) -> &Self::Target { 293 | self.contents 294 | } 295 | } 296 | 297 | /// Mutable packet data segment of a [`Umem`](crate::umem::Umem) 298 | /// frame. 299 | #[derive(Debug)] 300 | pub struct DataMut<'umem> { 301 | len: &'umem mut usize, 302 | buf: &'umem mut [u8], 303 | } 304 | 305 | impl<'umem> DataMut<'umem> { 306 | pub(super) fn new(len: &'umem mut usize, buf: &'umem mut [u8]) -> Self { 307 | Self { len, buf } 308 | } 309 | 310 | /// Returns this segment's contents, up to its current length. 311 | /// 312 | /// Will change as packets are sent or received using this frame. 313 | #[inline] 314 | pub fn contents(&self) -> &[u8] { 315 | &self.buf[..*self.len] 316 | } 317 | 318 | /// Returns a mutable view of this segment's contents, up to its 319 | /// current length. 320 | /// 321 | /// Will change as packets are sent or received using this frame. 322 | #[inline] 323 | pub fn contents_mut(&mut self) -> &mut [u8] { 324 | &mut self.buf[..*self.len] 325 | } 326 | 327 | /// A cursor for writing to this segment. 328 | /// 329 | /// Modifications via the cursor will change the length of the 330 | /// segment, i.e. the data length of the frame descriptor, and in 331 | /// this case the size of the packet that will be submitted. If 332 | /// in-place modifications just need to be made then 333 | /// [`contents_mut`] may be sufficient. 334 | /// 335 | /// [`contents_mut`]: Self::contents_mut 336 | #[inline] 337 | pub fn cursor(&mut self) -> Cursor<'_> { 338 | Cursor::new(self.len, self.buf) 339 | } 340 | } 341 | 342 | impl AsRef<[u8]> for DataMut<'_> { 343 | #[inline] 344 | fn as_ref(&self) -> &[u8] { 345 | self.contents() 346 | } 347 | } 348 | 349 | impl AsMut<[u8]> for DataMut<'_> { 350 | #[inline] 351 | fn as_mut(&mut self) -> &mut [u8] { 352 | self.contents_mut() 353 | } 354 | } 355 | 356 | impl Borrow<[u8]> for DataMut<'_> { 357 | #[inline] 358 | fn borrow(&self) -> &[u8] { 359 | self.contents() 360 | } 361 | } 362 | 363 | impl BorrowMut<[u8]> for DataMut<'_> { 364 | #[inline] 365 | fn borrow_mut(&mut self) -> &mut [u8] { 366 | self.contents_mut() 367 | } 368 | } 369 | 370 | impl Deref for DataMut<'_> { 371 | type Target = [u8]; 372 | 373 | #[inline] 374 | fn deref(&self) -> &Self::Target { 375 | self.contents() 376 | } 377 | } 378 | 379 | impl DerefMut for DataMut<'_> { 380 | #[inline] 381 | fn deref_mut(&mut self) -> &mut Self::Target { 382 | self.contents_mut() 383 | } 384 | } 385 | 386 | #[cfg(test)] 387 | mod tests { 388 | use core::slice; 389 | use std::{ 390 | convert::TryInto, 391 | io::{self, Write}, 392 | }; 393 | 394 | use libxdp_sys::xdp_desc; 395 | 396 | use crate::umem::{FrameDesc, FrameLayout, UmemRegion}; 397 | 398 | #[test] 399 | fn writes_persist() { 400 | let layout = FrameLayout { 401 | xdp_headroom: 0, 402 | frame_headroom: 512, 403 | mtu: 2048, 404 | }; 405 | 406 | let frame_count = 16.try_into().unwrap(); 407 | let frame_size = layout.frame_size(); 408 | 409 | let umem_region = UmemRegion::new(frame_count, layout, false).unwrap(); 410 | 411 | let mut desc_0 = FrameDesc::new(0 * frame_size + layout.frame_headroom); 412 | 413 | let mut desc_1 = FrameDesc::new(1 * frame_size + layout.frame_headroom); 414 | 415 | let mut xdp_desc = xdp_desc { 416 | addr: 0, 417 | len: 0, 418 | options: 0, 419 | }; 420 | 421 | unsafe { umem_region.data_mut(&mut desc_0) } 422 | .cursor() 423 | .write_all(b"hello") 424 | .unwrap(); 425 | 426 | desc_0.write_xdp_desc(&mut xdp_desc); 427 | 428 | assert_eq!( 429 | xdp_desc.addr, 430 | (0 * frame_size + layout.frame_headroom) as u64 431 | ); 432 | assert_eq!(xdp_desc.len, 5); 433 | assert_eq!(xdp_desc.options, 0); 434 | 435 | unsafe { umem_region.data_mut(&mut desc_1) } 436 | .cursor() 437 | .write_all(b"world!") 438 | .unwrap(); 439 | 440 | desc_1.write_xdp_desc(&mut xdp_desc); 441 | 442 | assert_eq!( 443 | xdp_desc.addr, 444 | (1 * frame_size + layout.frame_headroom) as u64 445 | ); 446 | assert_eq!(xdp_desc.len, 6); 447 | assert_eq!(xdp_desc.options, 0); 448 | 449 | assert_eq!( 450 | unsafe { 451 | slice::from_raw_parts( 452 | umem_region 453 | .as_ptr() 454 | .add(0 * frame_size + layout.frame_headroom) 455 | as *const u8, 456 | 5, 457 | ) 458 | }, 459 | b"hello" 460 | ); 461 | 462 | assert_eq!( 463 | unsafe { 464 | slice::from_raw_parts( 465 | umem_region 466 | .as_ptr() 467 | .add(1 * frame_size + layout.frame_headroom) 468 | as *const u8, 469 | 6, 470 | ) 471 | }, 472 | b"world!" 473 | ); 474 | } 475 | 476 | #[test] 477 | fn writes_are_contiguous() { 478 | let layout = FrameLayout { 479 | xdp_headroom: 4, 480 | frame_headroom: 8, 481 | mtu: 12, 482 | }; 483 | 484 | let frame_count = 4.try_into().unwrap(); 485 | let umem_region = UmemRegion::new(frame_count, layout, false).unwrap(); 486 | 487 | // An arbitrary layout 488 | let xdp_headroom_segment = [0, 0, 0, 0]; 489 | let frame_headroom_segment = [1, 1, 1, 1, 1, 1, 1, 1]; 490 | let data_segment = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]; 491 | 492 | let mut cursor = io::Cursor::new(Vec::new()); 493 | 494 | cursor.write_all(&xdp_headroom_segment).unwrap(); 495 | cursor.write_all(&frame_headroom_segment).unwrap(); 496 | cursor.write_all(&data_segment).unwrap(); 497 | 498 | let base_layout: Vec = cursor.into_inner(); 499 | 500 | let expected_layout: Vec = (0..frame_count.get() as u8) 501 | .into_iter() 502 | .map(|i| { 503 | base_layout 504 | .iter() 505 | .map(|el| el * (i + 1)) 506 | .collect::>() 507 | }) 508 | .flatten() 509 | .collect(); 510 | 511 | (0..frame_count.get() as usize).into_iter().for_each(|i| { 512 | let mut desc = FrameDesc::new( 513 | (i * layout.frame_size()) + layout.xdp_headroom + layout.frame_headroom, 514 | ); 515 | 516 | let (mut headroom, mut data) = unsafe { umem_region.frame_mut(&mut desc) }; 517 | 518 | headroom 519 | .cursor() 520 | .write_all( 521 | &frame_headroom_segment 522 | .iter() 523 | .map(|el| el * (i as u8 + 1)) 524 | .collect::>(), 525 | ) 526 | .unwrap(); 527 | 528 | data.cursor() 529 | .write_all( 530 | &data_segment 531 | .iter() 532 | .map(|el| el * (i as u8 + 1)) 533 | .collect::>(), 534 | ) 535 | .unwrap(); 536 | }); 537 | 538 | // Check they match 539 | let mmap_region = 540 | unsafe { slice::from_raw_parts(umem_region.as_ptr() as *const u8, umem_region.len()) }; 541 | 542 | assert_eq!(mmap_region, expected_layout) 543 | } 544 | } 545 | -------------------------------------------------------------------------------- /src/umem/mem/mmap.rs: -------------------------------------------------------------------------------- 1 | pub use inner::Mmap; 2 | 3 | use std::{io, ptr::NonNull}; 4 | 5 | #[cfg(not(test))] 6 | mod inner { 7 | use libc::{ 8 | MAP_ANONYMOUS, MAP_FAILED, MAP_HUGETLB, MAP_POPULATE, MAP_SHARED, PROT_READ, PROT_WRITE, 9 | }; 10 | use log::error; 11 | use std::ptr; 12 | 13 | use super::*; 14 | 15 | /// An anonymous memory mapped region. 16 | #[derive(Debug)] 17 | pub struct Mmap { 18 | addr: NonNull, 19 | len: usize, 20 | } 21 | 22 | unsafe impl Send for Mmap {} 23 | 24 | impl Mmap { 25 | pub fn new(len: usize, use_huge_pages: bool) -> io::Result { 26 | // MAP_ANONYMOUS: mapping not backed by a file. 27 | // MAP_SHARED: shares this mapping, so changes are visible 28 | // to other processes mapping the same file. 29 | // MAP_POPULATE: pre-populate page tables, reduces 30 | // blocking on page faults later. 31 | let mut flags = MAP_ANONYMOUS | MAP_SHARED | MAP_POPULATE; 32 | 33 | if use_huge_pages { 34 | flags |= MAP_HUGETLB; 35 | } 36 | 37 | let addr = unsafe { 38 | libc::mmap( 39 | ptr::null_mut(), 40 | len, 41 | PROT_READ | PROT_WRITE, // prot 42 | flags, 43 | -1, // file 44 | 0, // offset 45 | ) 46 | }; 47 | 48 | if addr == MAP_FAILED { 49 | Err(io::Error::last_os_error()) 50 | } else { 51 | let addr = 52 | NonNull::new(addr).expect("ptr non-null since we confirmed `mmap()` succeeded"); 53 | 54 | Ok(Mmap { addr, len }) 55 | } 56 | } 57 | 58 | /// Returns a pointer to the start of the mmap'd region. 59 | #[inline] 60 | pub fn addr(&self) -> NonNull { 61 | self.addr 62 | } 63 | } 64 | 65 | impl Drop for Mmap { 66 | fn drop(&mut self) { 67 | let err = unsafe { libc::munmap(self.addr.as_ptr(), self.len) }; 68 | 69 | if err != 0 { 70 | error!( 71 | "`munmap()` failed with error: {}", 72 | io::Error::last_os_error() 73 | ); 74 | } 75 | } 76 | } 77 | } 78 | 79 | #[cfg(test)] 80 | mod inner { 81 | use std::mem::ManuallyDrop; 82 | 83 | use super::*; 84 | 85 | #[derive(Debug)] 86 | struct VecParts { 87 | ptr: NonNull, 88 | len: usize, 89 | capacity: usize, 90 | } 91 | 92 | unsafe impl Send for VecParts {} 93 | 94 | impl VecParts { 95 | fn new(v: Vec) -> Self { 96 | let mut v = ManuallyDrop::new(v); 97 | 98 | Self { 99 | ptr: NonNull::new(v.as_mut_ptr()).expect("obtained pointer from Vec"), 100 | len: v.len(), 101 | capacity: v.capacity(), 102 | } 103 | } 104 | } 105 | 106 | impl Drop for VecParts { 107 | fn drop(&mut self) { 108 | unsafe { Vec::from_raw_parts(self.ptr.as_ptr(), self.len, self.capacity) }; 109 | } 110 | } 111 | 112 | /// A mocked [`Mmap`] that uses the heap for memory. 113 | #[derive(Debug)] 114 | pub struct Mmap(VecParts); 115 | 116 | impl Mmap { 117 | pub fn new(len: usize, _use_huge_pages: bool) -> io::Result { 118 | Ok(Self(VecParts::new(vec![0; len]))) 119 | } 120 | 121 | /// Returns a pointer to the start of the mmap'd region. 122 | #[inline] 123 | pub fn addr(&self) -> NonNull { 124 | NonNull::new(self.0.ptr.as_ptr() as *mut libc::c_void).unwrap() 125 | } 126 | } 127 | } 128 | 129 | #[cfg(test)] 130 | mod tests { 131 | #[test] 132 | fn confirm_pointer_offset_is_a_single_byte() { 133 | assert_eq!(std::mem::size_of::(), 1); 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /src/umem/mem/mod.rs: -------------------------------------------------------------------------------- 1 | mod mmap; 2 | use mmap::Mmap; 3 | 4 | use std::{ 5 | io, 6 | num::NonZeroU32, 7 | ptr::NonNull, 8 | slice, 9 | sync::{Arc, Mutex}, 10 | }; 11 | 12 | use super::{ 13 | frame::{Data, DataMut, FrameDesc, Headroom, HeadroomMut}, 14 | FrameLayout, 15 | }; 16 | 17 | /// A framed, memory mapped region which functions as the working 18 | /// memory for some UMEM. 19 | #[derive(Clone, Debug)] 20 | pub struct UmemRegion { 21 | layout: FrameLayout, 22 | // Keep a copy of the pointer to the mmap region to avoid a double 23 | // deref, through for example an `Arc`. We know this won't 24 | // dangle since this struct holds an `Arc`d copy of the mmap 25 | // region. 26 | addr: NonNull, 27 | len: usize, 28 | _mmap: Arc>, 29 | } 30 | 31 | unsafe impl Send for UmemRegion {} 32 | 33 | // SAFETY: this impl is only safe in the context of this library and 34 | // assuming the various unsafe requirements are upheld. Mutations to 35 | // the memory region may occur concurrently but always in disjoint 36 | // sections by either the user space process xor the kernel. 37 | unsafe impl Sync for UmemRegion {} 38 | 39 | impl UmemRegion { 40 | pub(super) fn new( 41 | frame_count: NonZeroU32, 42 | frame_layout: FrameLayout, 43 | use_huge_pages: bool, 44 | ) -> io::Result { 45 | let len = (frame_count.get() as usize) * frame_layout.frame_size(); 46 | 47 | let mmap = Mmap::new(len, use_huge_pages)?; 48 | 49 | Ok(Self { 50 | layout: frame_layout, 51 | addr: mmap.addr(), 52 | len, 53 | _mmap: Arc::new(Mutex::new(mmap)), 54 | }) 55 | } 56 | 57 | /// The size of the underlying memory region. 58 | #[inline] 59 | pub fn len(&self) -> usize { 60 | self.len 61 | } 62 | 63 | /// Get a pointer to the start of the memory region. 64 | #[inline] 65 | pub fn as_ptr(&self) -> *mut libc::c_void { 66 | self.addr.as_ptr() 67 | } 68 | 69 | /// A pointer to the headroom segment of the frame described by 70 | /// `desc`. 71 | /// 72 | /// # Safety 73 | /// 74 | /// `desc` must describe a frame belonging to this [`UmemRegion`]. 75 | #[inline] 76 | unsafe fn headroom_ptr(&self, desc: &FrameDesc) -> *mut u8 { 77 | let addr = desc.addr - self.layout.frame_headroom; 78 | unsafe { self.as_ptr().add(addr) as *mut u8 } 79 | } 80 | 81 | /// A pointer to the headroom segment of the frame described to by 82 | /// `desc`. 83 | /// 84 | /// # Safety 85 | /// 86 | /// `desc` must describe a frame belonging to this [`UmemRegion`]. 87 | #[inline] 88 | unsafe fn data_ptr(&self, desc: &FrameDesc) -> *mut u8 { 89 | unsafe { self.as_ptr().add(desc.addr) as *mut u8 } 90 | } 91 | 92 | /// See docs for [`super::Umem::frame`]. 93 | #[inline] 94 | pub unsafe fn frame(&self, desc: &FrameDesc) -> (Headroom, Data) { 95 | // SAFETY: see `super::Umem::frame` 96 | unsafe { (self.headroom(desc), self.data(desc)) } 97 | } 98 | 99 | /// See docs for [`super::Umem::headroom`]. 100 | #[inline] 101 | pub unsafe fn headroom(&self, desc: &FrameDesc) -> Headroom { 102 | // SAFETY: see `frame`. 103 | let headroom_ptr = unsafe { self.headroom_ptr(desc) }; 104 | 105 | Headroom::new(unsafe { slice::from_raw_parts(headroom_ptr, desc.lengths.headroom) }) 106 | } 107 | 108 | /// See docs for [`super::Umem::data`]. 109 | #[inline] 110 | pub unsafe fn data(&self, desc: &FrameDesc) -> Data { 111 | // SAFETY: see `frame`. 112 | let data_ptr = unsafe { self.data_ptr(desc) }; 113 | 114 | Data::new(unsafe { slice::from_raw_parts(data_ptr, desc.lengths.data) }) 115 | } 116 | 117 | /// See docs for [`super::Umem::frame_mut`]. 118 | #[inline] 119 | pub unsafe fn frame_mut<'a>( 120 | &'a self, 121 | desc: &'a mut FrameDesc, 122 | ) -> (HeadroomMut<'a>, DataMut<'a>) { 123 | // SAFETY: see `super::Umem::frame_mut` 124 | let headroom_ptr = unsafe { self.headroom_ptr(desc) }; 125 | let data_ptr = unsafe { self.data_ptr(desc) }; 126 | 127 | let headroom = 128 | unsafe { slice::from_raw_parts_mut(headroom_ptr, self.layout.frame_headroom) }; 129 | 130 | let data = unsafe { slice::from_raw_parts_mut(data_ptr, self.layout.mtu) }; 131 | 132 | ( 133 | HeadroomMut::new(&mut desc.lengths.headroom, headroom), 134 | DataMut::new(&mut desc.lengths.data, data), 135 | ) 136 | } 137 | 138 | /// See docs for [`super::Umem::headroom_mut`]. 139 | #[inline] 140 | pub unsafe fn headroom_mut<'a>(&'a self, desc: &'a mut FrameDesc) -> HeadroomMut<'a> { 141 | // SAFETY: see `frame_mut`. 142 | let headroom_ptr = unsafe { self.headroom_ptr(desc) }; 143 | 144 | let headroom = 145 | unsafe { slice::from_raw_parts_mut(headroom_ptr, self.layout.frame_headroom) }; 146 | 147 | HeadroomMut::new(&mut desc.lengths.headroom, headroom) 148 | } 149 | 150 | /// See docs for [`super::Umem::data_mut`]. 151 | #[inline] 152 | pub unsafe fn data_mut<'a>(&'a self, desc: &'a mut FrameDesc) -> DataMut<'a> { 153 | // SAFETY: see `frame_mut`. 154 | let data_ptr = unsafe { self.data_ptr(desc) }; 155 | 156 | let data = unsafe { slice::from_raw_parts_mut(data_ptr, self.layout.mtu) }; 157 | 158 | DataMut::new(&mut desc.lengths.data, data) 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/umem/mod.rs: -------------------------------------------------------------------------------- 1 | //! Types for interacting with and creating a [`Umem`]. 2 | 3 | mod mem; 4 | use mem::UmemRegion; 5 | 6 | pub mod frame; 7 | use frame::{Data, DataMut, FrameDesc, Headroom, HeadroomMut}; 8 | 9 | mod fill_queue; 10 | pub use fill_queue::FillQueue; 11 | 12 | mod comp_queue; 13 | pub use comp_queue::CompQueue; 14 | 15 | use libxdp_sys::xsk_umem; 16 | use log::error; 17 | use std::{ 18 | borrow::Borrow, 19 | error::Error, 20 | fmt, io, 21 | num::NonZeroU32, 22 | ptr::{self, NonNull}, 23 | sync::{Arc, Mutex}, 24 | }; 25 | 26 | use crate::{ 27 | config::UmemConfig, 28 | ring::{XskRingCons, XskRingProd}, 29 | }; 30 | 31 | /// Wrapper around a pointer to some [`Umem`]. 32 | #[derive(Debug)] 33 | struct XskUmem(NonNull); 34 | 35 | unsafe impl Send for XskUmem {} 36 | 37 | impl XskUmem { 38 | /// # Safety 39 | /// 40 | /// Only one instance of this struct may exist since it deletes 41 | /// the UMEM as part of its [`Drop`] impl. If there are copies or 42 | /// clones of `ptr` then care must be taken to ensure they aren't 43 | /// used once this struct goes out of scope, and that they don't 44 | /// delete the UMEM themselves. 45 | unsafe fn new(ptr: NonNull) -> Self { 46 | Self(ptr) 47 | } 48 | 49 | fn as_mut_ptr(&self) -> *mut xsk_umem { 50 | self.0.as_ptr() 51 | } 52 | } 53 | 54 | impl Drop for XskUmem { 55 | fn drop(&mut self) { 56 | // SAFETY: unsafe constructor contract guarantees that the 57 | // UMEM has not been deleted already. 58 | let err = unsafe { libxdp_sys::xsk_umem__delete(self.0.as_ptr()) }; 59 | 60 | if err != 0 { 61 | error!( 62 | "failed to delete UMEM with error: {}", 63 | io::Error::from_raw_os_error(-err) 64 | ); 65 | } 66 | } 67 | } 68 | 69 | /// Wraps the [`Umem`] pointer and any saved fill queue or comp queue 70 | /// rings. These are required for creation of the socket. 71 | /// 72 | /// When we create the [`Umem`] we pass it pointers to two rings - a 73 | /// producer and consumer, representing the [`FillQueue`] and 74 | /// [`CompQueue`] respectively. The `xsk_umem` C struct also keeps a 75 | /// pair of pointers to these two queues and pops them when creating a 76 | /// socket for the first time with this [`Umem`]. Hence we store them 77 | /// here so we don't prematurely clear up the rings' memory between 78 | /// creating the [`Umem`] and creating the socket. 79 | #[derive(Debug)] 80 | struct UmemInner { 81 | ptr: XskUmem, 82 | saved_fq_and_cq: Option<(Box, Box)>, 83 | } 84 | 85 | impl UmemInner { 86 | fn new(ptr: XskUmem, saved_fq_and_cq: Option<(Box, Box)>) -> Self { 87 | Self { 88 | ptr, 89 | saved_fq_and_cq, 90 | } 91 | } 92 | } 93 | 94 | /// A region of virtual contiguous memory divided into equal-sized 95 | /// frames. It provides the underlying working memory for an AF_XDP 96 | /// [`Socket`](crate::socket::Socket). 97 | #[derive(Debug, Clone)] 98 | pub struct Umem { 99 | // `inner` must appear before `mem` to ensure correct drop order. 100 | inner: Arc>, 101 | mem: UmemRegion, 102 | } 103 | 104 | impl Umem { 105 | /// Create a new `Umem` instance backed by an anonymous memory 106 | /// mapped region. 107 | /// 108 | /// Setting `use_huge_pages` to `true` will instructed `mmap()` to 109 | /// allocate the underlying memory using huge pages. If you are 110 | /// getting errors as a result of this, check that the 111 | /// `HugePages_Total` setting is non-zero when you run `cat 112 | /// /proc/meminfo`. 113 | pub fn new( 114 | config: UmemConfig, 115 | frame_count: NonZeroU32, 116 | use_huge_pages: bool, 117 | ) -> Result<(Self, Vec), UmemCreateError> { 118 | let frame_layout = config.into(); 119 | 120 | let mem = UmemRegion::new(frame_count, frame_layout, use_huge_pages).map_err(|e| { 121 | UmemCreateError { 122 | reason: "failed to create mmap'd UMEM region", 123 | err: e, 124 | } 125 | })?; 126 | 127 | let mut umem_ptr = ptr::null_mut(); 128 | let mut fq: Box = Box::default(); 129 | let mut cq: Box = Box::default(); 130 | 131 | let err = unsafe { 132 | libxdp_sys::xsk_umem__create( 133 | &mut umem_ptr, 134 | mem.as_ptr(), 135 | mem.len() as u64, 136 | fq.as_mut().as_mut(), // double deref due to to Box 137 | cq.as_mut().as_mut(), 138 | &config.into(), 139 | ) 140 | }; 141 | 142 | if err != 0 { 143 | return Err(UmemCreateError { 144 | reason: "non-zero error code returned when creating UMEM", 145 | err: io::Error::from_raw_os_error(-err), 146 | }); 147 | } 148 | 149 | let umem_ptr = match NonNull::new(umem_ptr) { 150 | Some(umem_ptr) => { 151 | // SAFETY: this is the only `XskUmem` instance for 152 | // this pointer, and no other pointers to the UMEM 153 | // exist. 154 | unsafe { XskUmem::new(umem_ptr) } 155 | } 156 | None => { 157 | return Err(UmemCreateError { 158 | reason: "UMEM is null", 159 | err: io::Error::from_raw_os_error(-err), 160 | }); 161 | } 162 | }; 163 | 164 | if fq.is_ring_null() { 165 | return Err(UmemCreateError { 166 | reason: "fill queue ring is null", 167 | err: io::Error::from_raw_os_error(-err), 168 | }); 169 | }; 170 | 171 | if cq.is_ring_null() { 172 | return Err(UmemCreateError { 173 | reason: "comp queue ring is null", 174 | err: io::Error::from_raw_os_error(-err), 175 | }); 176 | } 177 | 178 | let inner = UmemInner::new(umem_ptr, Some((fq, cq))); 179 | 180 | let frame_count = frame_count.get() as usize; 181 | 182 | let mut frame_descs: Vec = Vec::with_capacity(frame_count); 183 | 184 | for i in 0..frame_count { 185 | let addr = (i * frame_layout.frame_size()) 186 | + frame_layout.xdp_headroom 187 | + frame_layout.frame_headroom; 188 | 189 | frame_descs.push(FrameDesc::new(addr)); 190 | } 191 | 192 | let umem = Umem { 193 | inner: Arc::new(Mutex::new(inner)), 194 | mem, 195 | }; 196 | 197 | Ok((umem, frame_descs)) 198 | } 199 | 200 | /// The headroom and packet data segments of the `Umem` frame 201 | /// pointed at by `desc`. Contents are read-only. 202 | /// 203 | /// # Safety 204 | /// 205 | /// `desc` must correspond to a frame belonging to this 206 | /// `Umem`. Passing the descriptor of another `Umem` is very 207 | /// likely to result in incorrect memory access, by either 208 | /// straddling frames or accessing memory outside the underlying 209 | /// `Umem` area. 210 | /// 211 | /// Furthermore, the memory region accessed must not be mutably 212 | /// accessed anywhere else at the same time, either in userspace 213 | /// or by the kernel. To ensure this, care should be taken not to 214 | /// use the frame after submission to either the [`TxQueue`] or 215 | /// [`FillQueue`] until received over the [`CompQueue`] or 216 | /// [`RxQueue`] respectively. 217 | /// 218 | /// [`TxQueue`]: crate::TxQueue 219 | /// [`RxQueue`]: crate::RxQueue 220 | #[inline] 221 | pub unsafe fn frame(&self, desc: &FrameDesc) -> (Headroom, Data) { 222 | // SAFETY: We know from the unsafe contract of this function that: 223 | // a. Accessing the headroom and data segment identified by 224 | // `desc` is valid, since it describes a frame in this UMEM. 225 | // b. This access is sound since there are no mutable 226 | // references to the headroom and data segments. 227 | unsafe { self.mem.frame(desc) } 228 | } 229 | 230 | /// The headroom segment of the `Umem` frame pointed at by 231 | /// `desc`. Contents are read-only. 232 | /// 233 | /// # Safety 234 | /// 235 | /// See [`frame`](Self::frame). 236 | #[inline] 237 | pub unsafe fn headroom(&self, desc: &FrameDesc) -> Headroom { 238 | // SAFETY: see `frame`. 239 | unsafe { self.mem.headroom(desc) } 240 | } 241 | 242 | /// The data segment of the `Umem` frame pointed at by 243 | /// `desc`. Contents are read-only. 244 | /// 245 | /// # Safety 246 | /// 247 | /// See [`frame`](Self::frame). 248 | #[inline] 249 | pub unsafe fn data(&self, desc: &FrameDesc) -> Data { 250 | // SAFETY: see `frame`. 251 | unsafe { self.mem.data(desc) } 252 | } 253 | 254 | /// The headroom and packet data segments of the `Umem` frame 255 | /// pointed at by `desc`. Contents are writeable. 256 | /// 257 | /// # Safety 258 | /// 259 | /// `desc` must correspond to a frame belonging to this 260 | /// `Umem`. Passing the descriptor of another `Umem` is very 261 | /// likely to result in incorrect memory access, by either 262 | /// straddling frames or accessing memory outside the underlying 263 | /// `Umem` area. 264 | /// 265 | /// Furthermore, the memory region accessed must not be mutably or 266 | /// immutably accessed anywhere else at the same time, either in 267 | /// userspace or by the kernel. To ensure this, care should be 268 | /// taken not to use the frame after submission to either the 269 | /// [`TxQueue`] or [`FillQueue`] until received over the 270 | /// [`CompQueue`] or [`RxQueue`] respectively. 271 | /// 272 | /// [`TxQueue`]: crate::TxQueue 273 | /// [`RxQueue`]: crate::RxQueue 274 | #[inline] 275 | pub unsafe fn frame_mut<'a>( 276 | &'a self, 277 | desc: &'a mut FrameDesc, 278 | ) -> (HeadroomMut<'a>, DataMut<'a>) { 279 | // SAFETY: We know from the unsafe contract of this function that: 280 | // a. Accessing the headroom and data segment identified by 281 | // `desc` is valid, since it describes a frame in this UMEM. 282 | // b. This access is sound since there are no other mutable or 283 | // immutable references to the headroom and data segments. 284 | unsafe { self.mem.frame_mut(desc) } 285 | } 286 | 287 | /// The headroom segment of the `Umem` frame pointed at by 288 | /// `desc`. Contents are writeable. 289 | /// 290 | /// # Safety 291 | /// 292 | /// See [`frame_mut`](Self::frame_mut). 293 | #[inline] 294 | pub unsafe fn headroom_mut<'a>(&'a self, desc: &'a mut FrameDesc) -> HeadroomMut<'a> { 295 | // SAFETY: see `frame_mut`. 296 | unsafe { self.mem.headroom_mut(desc) } 297 | } 298 | 299 | /// The data segment of the `Umem` frame pointed at by 300 | /// `desc`. Contents are writeable. 301 | /// 302 | /// # Safety 303 | /// 304 | /// See [`frame_mut`](Self::frame_mut). 305 | #[inline] 306 | pub unsafe fn data_mut<'a>(&'a self, desc: &'a mut FrameDesc) -> DataMut<'a> { 307 | // SAFETY: see `frame_mut`. 308 | unsafe { self.mem.data_mut(desc) } 309 | } 310 | 311 | /// Intended to be called on socket creation, this passes the 312 | /// create function a pointer to the UMEM and any saved fill queue 313 | /// or completion queue. 314 | /// 315 | /// Regarding the saved queues, this is a byproduct of how the 316 | /// UMEM is created in the C code and we save them here to avoid 317 | /// leaking memory. 318 | #[inline] 319 | pub(crate) fn with_ptr_and_saved_queues(&self, mut f: F) -> T 320 | where 321 | F: FnMut(*mut xsk_umem, &mut Option<(Box, Box)>) -> T, 322 | { 323 | let mut inner = self.inner.lock().unwrap(); 324 | 325 | f(inner.ptr.as_mut_ptr(), &mut inner.saved_fq_and_cq) 326 | } 327 | } 328 | 329 | /// Error detailing why [`Umem`] creation failed. 330 | #[derive(Debug)] 331 | pub struct UmemCreateError { 332 | reason: &'static str, 333 | err: io::Error, 334 | } 335 | 336 | impl fmt::Display for UmemCreateError { 337 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 338 | write!(f, "{}", self.reason) 339 | } 340 | } 341 | 342 | impl Error for UmemCreateError { 343 | fn source(&self) -> Option<&(dyn Error + 'static)> { 344 | Some(self.err.borrow()) 345 | } 346 | } 347 | 348 | /// Dimensions of a [`Umem`] frame. 349 | #[derive(Debug, Clone, Copy)] 350 | struct FrameLayout { 351 | xdp_headroom: usize, 352 | frame_headroom: usize, 353 | mtu: usize, 354 | } 355 | 356 | impl FrameLayout { 357 | fn frame_size(&self) -> usize { 358 | self.xdp_headroom + self.frame_headroom + self.mtu 359 | } 360 | } 361 | 362 | impl From for FrameLayout { 363 | fn from(c: UmemConfig) -> Self { 364 | Self { 365 | xdp_headroom: c.xdp_headroom() as usize, 366 | frame_headroom: c.frame_headroom() as usize, 367 | mtu: c.mtu() as usize, 368 | } 369 | } 370 | } 371 | 372 | #[cfg(test)] 373 | mod tests { 374 | use std::convert::TryInto; 375 | 376 | use crate::config::{UmemConfigBuilder, XDP_UMEM_MIN_CHUNK_SIZE}; 377 | 378 | use super::*; 379 | 380 | #[test] 381 | fn config_frame_size_equals_layout_frame_size() { 382 | let config = UmemConfigBuilder::new() 383 | .frame_headroom(512) 384 | .frame_size(XDP_UMEM_MIN_CHUNK_SIZE.try_into().unwrap()) 385 | .build() 386 | .unwrap(); 387 | 388 | let layout: FrameLayout = config.into(); 389 | 390 | assert_eq!(config.frame_size().get() as usize, layout.frame_size()) 391 | } 392 | } 393 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | #[inline] 2 | pub fn get_errno() -> i32 { 3 | unsafe { *libc::__errno_location() } 4 | } 5 | 6 | #[inline] 7 | pub fn is_pow_of_two(val: u32) -> bool { 8 | if val == 0 { 9 | return false; 10 | } 11 | (val & (val - 1)) == 0 12 | } 13 | 14 | /// A handrolled `min` calc for usizes that appears to be ~20% faster 15 | /// than using [`cmp::min`](std::cmp::min) - though the difference is 16 | /// still only ~50-60 picoseconds when tested on a CPU with max clock 17 | /// speed of 4.9 GHz (see bench sub-crate for code). Decided it would 18 | /// be worth it since the need for `min` appears a fair bit in normal 19 | /// control flow. 20 | #[inline] 21 | pub fn min_usize(fst: usize, snd: usize) -> usize { 22 | if fst < snd { 23 | fst 24 | } else { 25 | snd 26 | } 27 | } 28 | 29 | #[cfg(test)] 30 | mod tests { 31 | use super::*; 32 | 33 | #[test] 34 | fn check_powers_of_two() { 35 | assert_eq!(is_pow_of_two(0), false); 36 | assert_eq!(is_pow_of_two(1), true); 37 | assert_eq!(is_pow_of_two(2), true); 38 | assert_eq!(is_pow_of_two(13), false); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /tests/comp_queue_tests.rs: -------------------------------------------------------------------------------- 1 | #[allow(dead_code)] 2 | mod setup; 3 | use setup::{PacketGenerator, Xsk, XskConfig, ETHERNET_PACKET}; 4 | 5 | use serial_test::serial; 6 | use std::{convert::TryInto, io::Write, thread, time::Duration}; 7 | use xsk_rs::config::{QueueSize, SocketConfig, UmemConfig}; 8 | use xsk_rs::umem::frame::FrameDesc; 9 | 10 | const CQ_SIZE: u32 = 16; 11 | const TX_Q_SIZE: u32 = 16; 12 | const FRAME_COUNT: u32 = 32; 13 | 14 | fn build_configs() -> (UmemConfig, SocketConfig) { 15 | let umem_config = UmemConfig::builder() 16 | .comp_queue_size(QueueSize::new(CQ_SIZE).unwrap()) 17 | .build() 18 | .unwrap(); 19 | 20 | let socket_config = SocketConfig::builder() 21 | .tx_queue_size(QueueSize::new(TX_Q_SIZE).unwrap()) 22 | .build(); 23 | 24 | (umem_config, socket_config) 25 | } 26 | 27 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 28 | #[serial] 29 | async fn comp_queue_consumes_nothing_if_tx_q_unused() { 30 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 31 | let mut xsk1 = dev1.0; 32 | 33 | unsafe { 34 | assert_eq!(xsk1.cq.consume(&mut xsk1.descs), 0); 35 | } 36 | 37 | unsafe { 38 | assert_eq!(xsk1.cq.consume_one(&mut xsk1.descs[0]), 0); 39 | } 40 | } 41 | 42 | build_configs_and_run_test(test).await 43 | } 44 | 45 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 46 | #[serial] 47 | async fn num_frames_consumed_match_those_produced() { 48 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 49 | let mut xsk1 = dev1.0; 50 | 51 | for i in 0..2 { 52 | unsafe { 53 | xsk1.umem 54 | .data_mut(&mut xsk1.descs[i]) 55 | .cursor() 56 | .write_all(ÐERNET_PACKET[..]) 57 | .unwrap(); 58 | } 59 | } 60 | 61 | assert_eq!( 62 | unsafe { xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..2]).unwrap() }, 63 | 2 64 | ); 65 | 66 | // Wait briefly so we don't try to consume too early 67 | thread::sleep(Duration::from_millis(5)); 68 | 69 | assert_eq!(unsafe { xsk1.cq.consume(&mut xsk1.descs) }, 2); 70 | } 71 | 72 | build_configs_and_run_test(test).await 73 | } 74 | 75 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 76 | #[serial] 77 | async fn consume_one_should_consume_a_single_frame_even_if_multiple_produced() { 78 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 79 | let mut xsk1 = dev1.0; 80 | 81 | for i in 0..2 { 82 | unsafe { 83 | xsk1.umem 84 | .data_mut(&mut xsk1.descs[i]) 85 | .cursor() 86 | .write_all(ÐERNET_PACKET[..]) 87 | .unwrap(); 88 | } 89 | } 90 | assert_eq!( 91 | unsafe { xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..2]).unwrap() }, 92 | 2 93 | ); 94 | 95 | // Wait briefly so we don't try to consume too early 96 | thread::sleep(Duration::from_millis(5)); 97 | 98 | assert_eq!(unsafe { xsk1.cq.consume_one(&mut xsk1.descs[0]) }, 1); 99 | } 100 | 101 | build_configs_and_run_test(test).await 102 | } 103 | 104 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 105 | #[serial] 106 | async fn addr_of_frames_consumed_match_addr_of_those_produced() { 107 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 108 | let mut xsk1 = dev1.0; 109 | let nb = (FRAME_COUNT / 2) as usize; 110 | 111 | assert!(nb > 0); 112 | 113 | let (tx_frames, rx_frames) = xsk1.descs.split_at_mut(nb); 114 | 115 | for i in 0..nb { 116 | unsafe { 117 | xsk1.umem 118 | .data_mut(&mut tx_frames[i]) 119 | .cursor() 120 | .write_all(ÐERNET_PACKET[..]) 121 | .unwrap(); 122 | } 123 | } 124 | assert_eq!( 125 | unsafe { xsk1.tx_q.produce_and_wakeup(&tx_frames).unwrap() }, 126 | nb 127 | ); 128 | 129 | // Wait briefly so we don't try to consume too early 130 | thread::sleep(Duration::from_millis(5)); 131 | 132 | assert_eq!(unsafe { xsk1.cq.consume(&mut rx_frames[..nb]) }, nb); 133 | 134 | let mut txd_addrs = tx_frames 135 | .iter() 136 | .map(FrameDesc::addr) 137 | .collect::>(); 138 | 139 | let mut rxd_addrs = rx_frames[..nb] 140 | .iter() 141 | .map(FrameDesc::addr) 142 | .collect::>(); 143 | 144 | txd_addrs.sort(); 145 | rxd_addrs.sort(); 146 | 147 | assert_eq!(txd_addrs, rxd_addrs); 148 | } 149 | 150 | build_configs_and_run_test(test).await 151 | } 152 | 153 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 154 | #[serial] 155 | async fn frame_consumed_with_consume_one_should_match_addr_of_one_produced() { 156 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 157 | let mut xsk1 = dev1.0; 158 | let nb = (FRAME_COUNT / 2) as usize; 159 | 160 | assert!(nb > 0); 161 | 162 | let (tx_frames, rx_frames) = xsk1.descs.split_at_mut(nb); 163 | 164 | unsafe { 165 | xsk1.umem 166 | .data_mut(&mut tx_frames[0]) 167 | .cursor() 168 | .write_all(ÐERNET_PACKET[..]) 169 | .unwrap(); 170 | } 171 | 172 | assert_eq!( 173 | unsafe { xsk1.tx_q.produce_and_wakeup(&tx_frames).unwrap() }, 174 | nb 175 | ); 176 | 177 | // Wait briefly so we don't try to consume too early 178 | thread::sleep(Duration::from_millis(5)); 179 | 180 | assert_eq!(unsafe { xsk1.cq.consume_one(&mut rx_frames[0]) }, 1); 181 | 182 | assert!(tx_frames.iter().any(|f| rx_frames[0].addr() == f.addr())); 183 | } 184 | 185 | build_configs_and_run_test(test).await 186 | } 187 | 188 | async fn build_configs_and_run_test(test: F) 189 | where 190 | F: Fn((Xsk, PacketGenerator), (Xsk, PacketGenerator)) + Send + 'static, 191 | { 192 | let (dev1_umem_config, dev1_socket_config) = build_configs(); 193 | let (dev2_umem_config, dev2_socket_config) = build_configs(); 194 | 195 | setup::run_test( 196 | XskConfig { 197 | frame_count: FRAME_COUNT.try_into().unwrap(), 198 | umem_config: dev1_umem_config, 199 | socket_config: dev1_socket_config, 200 | }, 201 | XskConfig { 202 | frame_count: FRAME_COUNT.try_into().unwrap(), 203 | umem_config: dev2_umem_config, 204 | socket_config: dev2_socket_config, 205 | }, 206 | test, 207 | ) 208 | .await; 209 | } 210 | -------------------------------------------------------------------------------- /tests/fill_queue_tests.rs: -------------------------------------------------------------------------------- 1 | #[allow(dead_code)] 2 | mod setup; 3 | use std::convert::TryInto; 4 | 5 | use setup::{PacketGenerator, Xsk, XskConfig}; 6 | 7 | use serial_test::serial; 8 | use xsk_rs::config::{QueueSize, SocketConfig, UmemConfig}; 9 | 10 | const FQ_SIZE: u32 = 4; 11 | const FRAME_COUNT: u32 = 32; 12 | 13 | fn build_configs() -> (UmemConfig, SocketConfig) { 14 | let umem_config = UmemConfig::builder() 15 | .fill_queue_size(QueueSize::new(FQ_SIZE).unwrap()) 16 | .build() 17 | .unwrap(); 18 | 19 | let socket_config = SocketConfig::default(); 20 | 21 | (umem_config, socket_config) 22 | } 23 | 24 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 25 | #[serial] 26 | async fn producing_fq_size_frames_is_ok() { 27 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 28 | let mut xsk1 = dev1.0; 29 | 30 | assert_eq!(unsafe { xsk1.fq.produce(&xsk1.descs[..4]) }, 4); 31 | } 32 | 33 | build_configs_and_run_test(test).await 34 | } 35 | 36 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 37 | #[serial] 38 | async fn producing_more_than_fq_size_frames_fails() { 39 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 40 | let mut xsk1 = dev1.0; 41 | 42 | assert_eq!(unsafe { xsk1.fq.produce(&xsk1.descs[..5]) }, 0); 43 | } 44 | 45 | build_configs_and_run_test(test).await 46 | } 47 | 48 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 49 | #[serial] 50 | async fn produce_frames_until_full() { 51 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 52 | let mut xsk1 = dev1.0; 53 | 54 | assert_eq!(unsafe { xsk1.fq.produce(&xsk1.descs[..2]) }, 2); 55 | assert_eq!(unsafe { xsk1.fq.produce(&xsk1.descs[2..3]) }, 1); 56 | assert_eq!(unsafe { xsk1.fq.produce(&xsk1.descs[3..8]) }, 0); 57 | assert_eq!(unsafe { xsk1.fq.produce(&xsk1.descs[3..4]) }, 1); 58 | } 59 | 60 | build_configs_and_run_test(test).await 61 | } 62 | 63 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 64 | #[serial] 65 | async fn produce_one_is_ok() { 66 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 67 | let mut xsk1 = dev1.0; 68 | 69 | assert_eq!(unsafe { xsk1.fq.produce_one(&xsk1.descs[0]) }, 1); 70 | } 71 | 72 | build_configs_and_run_test(test).await 73 | } 74 | 75 | async fn build_configs_and_run_test(test: F) 76 | where 77 | F: Fn((Xsk, PacketGenerator), (Xsk, PacketGenerator)) + Send + 'static, 78 | { 79 | let (dev1_umem_config, dev1_socket_config) = build_configs(); 80 | let (dev2_umem_config, dev2_socket_config) = build_configs(); 81 | 82 | setup::run_test( 83 | XskConfig { 84 | frame_count: FRAME_COUNT.try_into().unwrap(), 85 | umem_config: dev1_umem_config, 86 | socket_config: dev1_socket_config, 87 | }, 88 | XskConfig { 89 | frame_count: FRAME_COUNT.try_into().unwrap(), 90 | umem_config: dev2_umem_config, 91 | socket_config: dev2_socket_config, 92 | }, 93 | test, 94 | ) 95 | .await; 96 | } 97 | -------------------------------------------------------------------------------- /tests/rx_queue_tests.rs: -------------------------------------------------------------------------------- 1 | #[allow(dead_code)] 2 | mod setup; 3 | use setup::{PacketGenerator, Xsk, XskConfig, ETHERNET_PACKET}; 4 | 5 | use libxdp_sys::XDP_PACKET_HEADROOM; 6 | use serial_test::serial; 7 | use std::{convert::TryInto, io::Write}; 8 | use xsk_rs::config::{FrameSize, QueueSize, SocketConfig, UmemConfig, XDP_UMEM_MIN_CHUNK_SIZE}; 9 | 10 | const CQ_SIZE: u32 = 4; 11 | const FQ_SIZE: u32 = 4; 12 | const TX_Q_SIZE: u32 = 4; 13 | const RX_Q_SIZE: u32 = 4; 14 | const FRAME_SIZE: u32 = XDP_UMEM_MIN_CHUNK_SIZE; 15 | const FRAME_COUNT: u32 = 8; 16 | const FRAME_HEADROOM: u32 = 512; 17 | 18 | fn build_configs() -> (UmemConfig, SocketConfig) { 19 | let umem_config = UmemConfig::builder() 20 | .comp_queue_size(QueueSize::new(CQ_SIZE).unwrap()) 21 | .fill_queue_size(QueueSize::new(FQ_SIZE).unwrap()) 22 | .frame_size(FrameSize::new(FRAME_SIZE).unwrap()) 23 | .frame_headroom(FRAME_HEADROOM) 24 | .build() 25 | .unwrap(); 26 | 27 | let socket_config = SocketConfig::builder() 28 | .tx_queue_size(QueueSize::new(TX_Q_SIZE).unwrap()) 29 | .rx_queue_size(QueueSize::new(RX_Q_SIZE).unwrap()) 30 | .build(); 31 | 32 | (umem_config, socket_config) 33 | } 34 | 35 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 36 | #[serial] 37 | async fn nothing_is_consumed_when_no_tx_sent_and_fill_q_empty() { 38 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 39 | let mut xsk1 = dev1.0; 40 | 41 | unsafe { 42 | assert_eq!(xsk1.rx_q.consume(&mut xsk1.descs[..2]), 0); 43 | 44 | assert_eq!( 45 | xsk1.rx_q 46 | .poll_and_consume(&mut xsk1.descs[..2], 100) 47 | .unwrap(), 48 | 0 49 | ); 50 | 51 | assert_eq!(xsk1.rx_q.consume_one(&mut xsk1.descs[0]), 0); 52 | 53 | assert_eq!( 54 | xsk1.rx_q 55 | .poll_and_consume_one(&mut xsk1.descs[0], 100) 56 | .unwrap(), 57 | 0 58 | ); 59 | } 60 | } 61 | 62 | build_configs_and_run_test(test).await 63 | } 64 | 65 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 66 | #[serial] 67 | async fn nothing_is_consumed_when_tx_sent_but_fill_q_empty() { 68 | fn test(dev1: (Xsk, PacketGenerator), dev2: (Xsk, PacketGenerator)) { 69 | let mut xsk1 = dev1.0; 70 | let mut xsk2 = dev2.0; 71 | 72 | unsafe { 73 | assert_eq!(xsk2.tx_q.produce_and_wakeup(&xsk2.descs[..4]).unwrap(), 4); 74 | 75 | assert_eq!(xsk1.rx_q.consume(&mut xsk1.descs[..4]), 0); 76 | 77 | assert_eq!( 78 | xsk1.rx_q 79 | .poll_and_consume(&mut xsk1.descs[..4], 100) 80 | .unwrap(), 81 | 0 82 | ); 83 | 84 | assert_eq!(xsk1.rx_q.consume_one(&mut xsk1.descs[0]), 0); 85 | 86 | assert_eq!( 87 | xsk1.rx_q 88 | .poll_and_consume_one(&mut xsk1.descs[0], 100) 89 | .unwrap(), 90 | 0 91 | ); 92 | } 93 | } 94 | 95 | build_configs_and_run_test(test).await 96 | } 97 | 98 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 99 | #[serial] 100 | async fn consumed_frame_data_matches_what_was_sent() { 101 | fn test(dev1: (Xsk, PacketGenerator), dev2: (Xsk, PacketGenerator)) { 102 | let mut xsk1 = dev1.0; 103 | let mut xsk2 = dev2.0; 104 | 105 | unsafe { 106 | // Add a frame in the dev2 fill queue ready to receive 107 | assert_eq!(xsk2.fq.produce(&xsk2.descs[0..1]), 1); 108 | 109 | xsk1.umem 110 | .data_mut(&mut xsk1.descs[0]) 111 | .cursor() 112 | .write_all(ÐERNET_PACKET[..]) 113 | .unwrap(); 114 | 115 | assert_eq!(xsk1.descs[0].lengths().data(), ETHERNET_PACKET.len()); 116 | 117 | // Send data 118 | assert_eq!(xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..1]).unwrap(), 1); 119 | 120 | // Read on dev2 121 | assert_eq!(xsk2.rx_q.poll_and_consume(&mut xsk2.descs, 100).unwrap(), 1); 122 | 123 | assert_eq!(xsk2.descs[0].lengths().data(), ETHERNET_PACKET.len()); 124 | 125 | // Check that the data is correct 126 | assert_eq!(xsk2.umem.data(&xsk2.descs[0]).contents(), ETHERNET_PACKET); 127 | assert_eq!( 128 | xsk2.umem.data_mut(&mut xsk2.descs[0]).contents(), 129 | ETHERNET_PACKET 130 | ); 131 | } 132 | } 133 | 134 | build_configs_and_run_test(test).await 135 | } 136 | 137 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 138 | #[serial] 139 | async fn consume_one_frame_data_matches_what_was_sent() { 140 | fn test(dev1: (Xsk, PacketGenerator), dev2: (Xsk, PacketGenerator)) { 141 | let mut xsk1 = dev1.0; 142 | let mut xsk2 = dev2.0; 143 | 144 | unsafe { 145 | // Add a frame in the dev2 fill queue ready to receive 146 | assert_eq!(xsk2.fq.produce(&xsk2.descs[0..1]), 1); 147 | 148 | xsk1.umem 149 | .data_mut(&mut xsk1.descs[0]) 150 | .cursor() 151 | .write_all(ÐERNET_PACKET[..]) 152 | .unwrap(); 153 | 154 | assert_eq!(xsk1.descs[0].lengths().data(), ETHERNET_PACKET.len()); 155 | 156 | // Send data 157 | assert_eq!(xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..1]).unwrap(), 1); 158 | 159 | // Read on dev2 160 | assert_eq!( 161 | xsk2.rx_q 162 | .poll_and_consume_one(&mut xsk2.descs[0], 100) 163 | .unwrap(), 164 | 1 165 | ); 166 | 167 | assert_eq!(xsk2.descs[0].lengths().data(), ETHERNET_PACKET.len()); 168 | 169 | // Check that the data is correct 170 | assert_eq!(xsk2.umem.data(&xsk2.descs[0]).contents(), ETHERNET_PACKET); 171 | assert_eq!( 172 | xsk2.umem.data_mut(&mut xsk2.descs[0]).contents(), 173 | ETHERNET_PACKET 174 | ); 175 | } 176 | } 177 | 178 | build_configs_and_run_test(test).await 179 | } 180 | 181 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 182 | #[serial] 183 | async fn consumed_frame_addresses_include_xdp_and_frame_headroom() { 184 | fn test(dev1: (Xsk, PacketGenerator), dev2: (Xsk, PacketGenerator)) { 185 | unsafe { 186 | let mut xsk1 = dev1.0; 187 | let mut xsk2 = dev2.0; 188 | 189 | // Add a frame in the dev2 fill queue ready to receive 190 | assert_eq!(xsk2.fq.produce(&xsk2.descs[0..1]), 1); 191 | 192 | xsk1.umem 193 | .data_mut(&mut xsk1.descs[0]) 194 | .cursor() 195 | .write_all(ÐERNET_PACKET[..]) 196 | .unwrap(); 197 | 198 | assert_eq!(xsk1.descs[0].lengths().data(), ETHERNET_PACKET.len()); 199 | 200 | // Transmit data 201 | assert_eq!(xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..1]).unwrap(), 1); 202 | 203 | // Read on dev2 204 | assert_eq!(xsk2.rx_q.poll_and_consume(&mut xsk2.descs, 100).unwrap(), 1); 205 | 206 | assert_eq!(xsk2.descs[0].lengths().data(), ETHERNET_PACKET.len()); 207 | 208 | // Check that the data is correct 209 | assert_eq!(xsk2.umem.data(&xsk2.descs[0]).contents(), ETHERNET_PACKET); 210 | assert_eq!( 211 | xsk2.umem.data_mut(&mut xsk2.descs[0]).contents(), 212 | ETHERNET_PACKET 213 | ); 214 | 215 | // Check addr starts where we expect 216 | assert_eq!( 217 | xsk2.descs[0].addr(), 218 | (XDP_PACKET_HEADROOM + FRAME_HEADROOM) as usize 219 | ); 220 | } 221 | } 222 | 223 | build_configs_and_run_test(test).await 224 | } 225 | 226 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 227 | #[serial] 228 | async fn consume_one_frame_address_includes_xdp_and_frame_headroom() { 229 | fn test(dev1: (Xsk, PacketGenerator), dev2: (Xsk, PacketGenerator)) { 230 | unsafe { 231 | let mut xsk1 = dev1.0; 232 | let mut xsk2 = dev2.0; 233 | 234 | // Add a frame in the dev2 fill queue ready to receive 235 | assert_eq!(xsk2.fq.produce(&xsk2.descs[0..1]), 1); 236 | 237 | xsk1.umem 238 | .data_mut(&mut xsk1.descs[0]) 239 | .cursor() 240 | .write_all(ÐERNET_PACKET[..]) 241 | .unwrap(); 242 | 243 | assert_eq!(xsk1.descs[0].lengths().data(), ETHERNET_PACKET.len()); 244 | 245 | // Transmit data 246 | assert_eq!(xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..1]).unwrap(), 1); 247 | 248 | // Read on dev2 249 | assert_eq!( 250 | xsk2.rx_q 251 | .poll_and_consume_one(&mut xsk2.descs[0], 100) 252 | .unwrap(), 253 | 1 254 | ); 255 | 256 | assert_eq!(xsk2.descs[0].lengths().data(), ETHERNET_PACKET.len()); 257 | 258 | // Check that the data is correct 259 | assert_eq!(xsk2.umem.data(&xsk2.descs[0]).contents(), ETHERNET_PACKET); 260 | assert_eq!( 261 | xsk2.umem.data_mut(&mut xsk2.descs[0]).contents(), 262 | ETHERNET_PACKET 263 | ); 264 | 265 | // Check addr starts where we expect 266 | assert_eq!( 267 | xsk2.descs[0].addr(), 268 | (XDP_PACKET_HEADROOM + FRAME_HEADROOM) as usize 269 | ); 270 | } 271 | } 272 | 273 | build_configs_and_run_test(test).await 274 | } 275 | 276 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 277 | #[serial] 278 | async fn headroom_len_reset_after_receive() { 279 | fn test(dev1: (Xsk, PacketGenerator), dev2: (Xsk, PacketGenerator)) { 280 | unsafe { 281 | let mut xsk1 = dev1.0; 282 | let mut xsk2 = dev2.0; 283 | 284 | // Write to dev2 frame headroom and put in fill queue 285 | xsk2.umem 286 | .headroom_mut(&mut xsk2.descs[0]) 287 | .cursor() 288 | .write_all(ÐERNET_PACKET[..]) 289 | .unwrap(); 290 | 291 | assert_eq!(xsk2.descs[0].lengths().data(), 0); 292 | assert_eq!(xsk2.descs[0].lengths().headroom(), ETHERNET_PACKET.len()); 293 | 294 | assert_eq!(xsk2.fq.produce(&xsk2.descs[0..1]), 1); 295 | 296 | // Send from dev1 297 | xsk1.umem 298 | .data_mut(&mut xsk1.descs[0]) 299 | .cursor() 300 | .write_all(ÐERNET_PACKET[..]) 301 | .unwrap(); 302 | 303 | assert_eq!(xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..1]).unwrap(), 1); 304 | 305 | // Read on dev2 306 | assert_eq!(xsk2.rx_q.poll_and_consume(&mut xsk2.descs, 100).unwrap(), 1); 307 | 308 | assert_eq!(xsk2.descs[0].lengths().data(), ETHERNET_PACKET.len()); 309 | assert_eq!(xsk2.descs[0].lengths().headroom(), 0); 310 | 311 | // Length reset to zero but data should still be there 312 | xsk2.umem 313 | .headroom_mut(&mut xsk2.descs[0]) 314 | .cursor() 315 | .set_pos(ETHERNET_PACKET.len()); 316 | 317 | assert_eq!( 318 | xsk2.umem.headroom(&xsk2.descs[0]).contents(), 319 | ÐERNET_PACKET[..] 320 | ); 321 | assert_eq!( 322 | xsk2.umem.headroom_mut(&mut xsk2.descs[0]).contents(), 323 | ÐERNET_PACKET[..] 324 | ); 325 | } 326 | } 327 | 328 | build_configs_and_run_test(test).await 329 | } 330 | 331 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 332 | #[serial] 333 | async fn consume_one_headroom_len_reset_after_receive() { 334 | fn test(dev1: (Xsk, PacketGenerator), dev2: (Xsk, PacketGenerator)) { 335 | unsafe { 336 | let mut xsk1 = dev1.0; 337 | let mut xsk2 = dev2.0; 338 | 339 | // Write to dev2 frame headroom and put in fill queue 340 | xsk2.umem 341 | .headroom_mut(&mut xsk2.descs[0]) 342 | .cursor() 343 | .write_all(ÐERNET_PACKET[..]) 344 | .unwrap(); 345 | 346 | assert_eq!(xsk2.descs[0].lengths().data(), 0); 347 | assert_eq!(xsk2.descs[0].lengths().headroom(), ETHERNET_PACKET.len()); 348 | 349 | assert_eq!(xsk2.fq.produce(&xsk2.descs[0..1]), 1); 350 | 351 | // Send from dev1 352 | xsk1.umem 353 | .data_mut(&mut xsk1.descs[0]) 354 | .cursor() 355 | .write_all(ÐERNET_PACKET[..]) 356 | .unwrap(); 357 | 358 | assert_eq!(xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..1]).unwrap(), 1); 359 | 360 | // Read on dev2 361 | assert_eq!( 362 | xsk2.rx_q 363 | .poll_and_consume_one(&mut xsk2.descs[0], 100) 364 | .unwrap(), 365 | 1 366 | ); 367 | 368 | assert_eq!(xsk2.descs[0].lengths().data(), ETHERNET_PACKET.len()); 369 | assert_eq!(xsk2.descs[0].lengths().headroom(), 0); 370 | 371 | // Length reset to zero but data should still be there 372 | xsk2.umem 373 | .headroom_mut(&mut xsk2.descs[0]) 374 | .cursor() 375 | .set_pos(ETHERNET_PACKET.len()); 376 | 377 | assert_eq!( 378 | xsk2.umem.headroom(&xsk2.descs[0]).contents(), 379 | ÐERNET_PACKET[..] 380 | ); 381 | assert_eq!( 382 | xsk2.umem.headroom_mut(&mut xsk2.descs[0]).contents(), 383 | ÐERNET_PACKET[..] 384 | ); 385 | } 386 | } 387 | 388 | build_configs_and_run_test(test).await 389 | } 390 | 391 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 392 | #[serial] 393 | async fn xdp_statistics_report_dropped_packet() { 394 | fn test(dev1: (Xsk, PacketGenerator), dev2: (Xsk, PacketGenerator)) { 395 | unsafe { 396 | let mut xsk1 = dev1.0; 397 | let mut xsk2 = dev2.0; 398 | 399 | // Don't add frames to dev2's fill queue, just send from 400 | // dev1 401 | xsk1.umem 402 | .data_mut(&mut xsk1.descs[0]) 403 | .cursor() 404 | .write_all(ÐERNET_PACKET[..]) 405 | .unwrap(); 406 | 407 | assert_eq!(xsk1.tx_q.produce_and_wakeup(&xsk1.descs[..1]).unwrap(), 1); 408 | 409 | // Try read - no frames in fill queue so should be zero 410 | assert_eq!(xsk2.rx_q.poll_and_consume(&mut xsk2.descs, 100).unwrap(), 0); 411 | 412 | let stats = xsk2.rx_q.fd().xdp_statistics().unwrap(); 413 | 414 | assert!(stats.rx_dropped() > 0); 415 | } 416 | } 417 | 418 | build_configs_and_run_test(test).await 419 | } 420 | 421 | async fn build_configs_and_run_test(test: F) 422 | where 423 | F: Fn((Xsk, PacketGenerator), (Xsk, PacketGenerator)) + Send + 'static, 424 | { 425 | let (dev1_umem_config, dev1_socket_config) = build_configs(); 426 | let (dev2_umem_config, dev2_socket_config) = build_configs(); 427 | 428 | setup::run_test( 429 | XskConfig { 430 | frame_count: FRAME_COUNT.try_into().unwrap(), 431 | umem_config: dev1_umem_config, 432 | socket_config: dev1_socket_config, 433 | }, 434 | XskConfig { 435 | frame_count: FRAME_COUNT.try_into().unwrap(), 436 | umem_config: dev2_umem_config, 437 | socket_config: dev2_socket_config, 438 | }, 439 | test, 440 | ) 441 | .await; 442 | } 443 | -------------------------------------------------------------------------------- /tests/setup/mod.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | pub use util::PacketGenerator; 3 | 4 | pub mod veth_setup; 5 | pub use veth_setup::{LinkIpAddr, VethDevConfig}; 6 | 7 | use std::{net::Ipv4Addr, num::NonZeroU32}; 8 | use xsk_rs::{ 9 | config::{Interface, SocketConfig, UmemConfig}, 10 | socket::{RxQueue, Socket, TxQueue}, 11 | umem::{frame::FrameDesc, CompQueue, FillQueue, Umem}, 12 | }; 13 | 14 | pub const ETHERNET_PACKET: [u8; 42] = [ 15 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a, 0x08, 0x06, 0x00, 0x01, 16 | 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a, 0xc0, 0xa8, 0x45, 0x01, 17 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x45, 0xfe, 18 | ]; 19 | 20 | pub struct Xsk { 21 | pub umem: Umem, 22 | pub fq: FillQueue, 23 | pub cq: CompQueue, 24 | pub tx_q: TxQueue, 25 | pub rx_q: RxQueue, 26 | pub descs: Vec, 27 | } 28 | 29 | #[derive(Debug, Clone)] 30 | pub struct XskConfig { 31 | pub frame_count: NonZeroU32, 32 | pub umem_config: UmemConfig, 33 | pub socket_config: SocketConfig, 34 | } 35 | 36 | pub fn default_veth_dev_configs() -> (VethDevConfig, VethDevConfig) { 37 | let dev1_config = VethDevConfig::new( 38 | "xsk_test_dev1".into(), 39 | Some([0xf6, 0xe0, 0xf6, 0xc9, 0x60, 0x0a]), 40 | Some(LinkIpAddr::new(Ipv4Addr::new(192, 168, 69, 1), 24)), 41 | ); 42 | 43 | let dev2_config = VethDevConfig::new( 44 | "xsk_test_dev2".into(), 45 | Some([0x4a, 0xf1, 0x30, 0xeb, 0x0d, 0x31]), 46 | Some(LinkIpAddr::new(Ipv4Addr::new(192, 168, 69, 2), 24)), 47 | ); 48 | 49 | (dev1_config, dev2_config) 50 | } 51 | 52 | pub fn build_socket_and_umem( 53 | umem_config: UmemConfig, 54 | socket_config: SocketConfig, 55 | frame_count: NonZeroU32, 56 | if_name: &Interface, 57 | queue_id: u32, 58 | ) -> Xsk { 59 | let (umem, descs) = Umem::new(umem_config, frame_count, false).expect("failed to build umem"); 60 | 61 | let (tx_q, rx_q, fq_and_cq) = unsafe { 62 | Socket::new(socket_config, &umem, if_name, queue_id).expect("failed to build socket") 63 | }; 64 | 65 | let (fq, cq) = fq_and_cq.expect(&format!( 66 | "missing fill and comp queue - interface {:?} may already be bound to", 67 | if_name 68 | )); 69 | 70 | Xsk { 71 | umem, 72 | fq, 73 | cq, 74 | tx_q, 75 | rx_q, 76 | descs, 77 | } 78 | } 79 | 80 | pub async fn run_test(xsk1_config: XskConfig, xsk2_config: XskConfig, test: F) 81 | where 82 | F: Fn((Xsk, PacketGenerator), (Xsk, PacketGenerator)) + Send + 'static, 83 | { 84 | let (dev1_config, dev2_config) = default_veth_dev_configs(); 85 | 86 | let inner = move |dev1_config: VethDevConfig, dev2_config: VethDevConfig| { 87 | let xsk1 = build_socket_and_umem( 88 | xsk1_config.umem_config, 89 | xsk1_config.socket_config, 90 | xsk1_config.frame_count, 91 | &dev1_config 92 | .if_name() 93 | .parse() 94 | .expect("failed to parse interface name"), 95 | 0, 96 | ); 97 | 98 | let xsk2 = build_socket_and_umem( 99 | xsk2_config.umem_config, 100 | xsk2_config.socket_config, 101 | xsk2_config.frame_count, 102 | &dev2_config 103 | .if_name() 104 | .parse() 105 | .expect("failed to parse interface name"), 106 | 0, 107 | ); 108 | 109 | let dev1_pkt_gen = PacketGenerator::new(dev1_config, dev2_config); 110 | let dev2_pkt_gen = dev1_pkt_gen.clone().into_swapped(); 111 | 112 | test((xsk1, dev1_pkt_gen), (xsk2, dev2_pkt_gen)) 113 | }; 114 | 115 | veth_setup::run_with_veth_pair(inner, dev1_config, dev2_config) 116 | .await 117 | .unwrap(); 118 | } 119 | 120 | pub async fn run_test_with_dev_configs( 121 | xsk1_configs: (XskConfig, VethDevConfig), 122 | xsk2_configs: (XskConfig, VethDevConfig), 123 | test: F, 124 | ) where 125 | F: Fn((Xsk, PacketGenerator), (Xsk, PacketGenerator)) + Send + 'static, 126 | { 127 | let (xsk1_config, dev1_config) = xsk1_configs; 128 | let (xsk2_config, dev2_config) = xsk2_configs; 129 | 130 | let inner = move |dev1_config: VethDevConfig, dev2_config: VethDevConfig| { 131 | let xsk1 = build_socket_and_umem( 132 | xsk1_config.umem_config, 133 | xsk1_config.socket_config, 134 | xsk1_config.frame_count, 135 | &dev1_config 136 | .if_name() 137 | .parse() 138 | .expect("failed to parse interface name"), 139 | 0, 140 | ); 141 | 142 | let xsk2 = build_socket_and_umem( 143 | xsk2_config.umem_config, 144 | xsk2_config.socket_config, 145 | xsk2_config.frame_count, 146 | &dev2_config 147 | .if_name() 148 | .parse() 149 | .expect("failed to parse interface name"), 150 | 0, 151 | ); 152 | 153 | let dev1_pkt_gen = PacketGenerator::new(dev1_config, dev2_config); 154 | let dev2_pkt_gen = dev1_pkt_gen.clone().into_swapped(); 155 | 156 | test((xsk1, dev1_pkt_gen), (xsk2, dev2_pkt_gen)) 157 | }; 158 | 159 | veth_setup::run_with_veth_pair(inner, dev1_config, dev2_config) 160 | .await 161 | .unwrap(); 162 | } 163 | -------------------------------------------------------------------------------- /tests/setup/util.rs: -------------------------------------------------------------------------------- 1 | use etherparse::{err::packet::BuildWriteError, PacketBuilder}; 2 | 3 | use super::veth_setup::VethDevConfig; 4 | 5 | #[derive(Debug, Clone)] 6 | pub struct PacketGenerator { 7 | src: VethDevConfig, 8 | dst: VethDevConfig, 9 | } 10 | 11 | impl PacketGenerator { 12 | pub fn new(src: VethDevConfig, dst: VethDevConfig) -> Self { 13 | Self { src, dst } 14 | } 15 | 16 | /// Generate an ETH frame w/ UDP as transport layer and payload size `payload_len` 17 | pub fn generate_packet( 18 | &self, 19 | src_port: u16, 20 | dst_port: u16, 21 | payload_len: usize, 22 | ) -> Result, BuildWriteError> { 23 | let builder = PacketBuilder::ethernet2( 24 | self.src.addr().unwrap(), // src mac 25 | self.dst.addr().unwrap(), // dst mac 26 | ) 27 | .ipv4( 28 | self.src.ip_addr().unwrap().octets(), // src ip 29 | self.dst.ip_addr().unwrap().octets(), // dst ip 30 | 20, // time to live 31 | ) 32 | .udp(src_port, dst_port); 33 | 34 | let payload = generate_random_bytes(payload_len); 35 | 36 | let mut result = Vec::with_capacity(builder.size(payload.len())); 37 | 38 | builder.write(&mut result, &payload)?; 39 | 40 | Ok(result) 41 | } 42 | 43 | /// Packet generator with `src` and `dst` swapped. 44 | pub fn into_swapped(self) -> Self { 45 | Self { 46 | src: self.dst.clone(), 47 | dst: self.src.clone(), 48 | } 49 | } 50 | } 51 | 52 | fn generate_random_bytes(len: usize) -> Vec { 53 | (0..len).map(|_| rand::random::()).collect() 54 | } 55 | -------------------------------------------------------------------------------- /tests/setup/veth_setup.rs: -------------------------------------------------------------------------------- 1 | use futures::stream::TryStreamExt; 2 | use rtnetlink::Handle; 3 | use std::net::{IpAddr, Ipv4Addr}; 4 | use tokio::{runtime, task}; 5 | 6 | #[derive(Debug, Clone, Copy)] 7 | pub enum LinkStatus { 8 | Up, 9 | Down, 10 | } 11 | 12 | pub struct VethDev { 13 | handle: Handle, 14 | index: u32, 15 | if_name: String, 16 | } 17 | 18 | impl VethDev { 19 | pub fn if_name(&self) -> &str { 20 | &self.if_name 21 | } 22 | 23 | async fn set_status(&self, status: LinkStatus) -> anyhow::Result<()> { 24 | Ok(match status { 25 | LinkStatus::Up => { 26 | self.handle.link().set(self.index).up().execute().await?; 27 | } 28 | LinkStatus::Down => { 29 | self.handle.link().set(self.index).down().execute().await?; 30 | } 31 | }) 32 | } 33 | 34 | async fn set_addr(&self, addr: Vec) -> anyhow::Result<()> { 35 | self.handle 36 | .link() 37 | .set(self.index) 38 | .address(addr) 39 | .execute() 40 | .await?; 41 | 42 | Ok(()) 43 | } 44 | 45 | async fn set_ip_addr(&self, ip_addr: LinkIpAddr) -> anyhow::Result<()> { 46 | self.handle 47 | .address() 48 | .add( 49 | self.index, 50 | IpAddr::V4(ip_addr.addr.clone()), 51 | ip_addr.prefix_len, 52 | ) 53 | .execute() 54 | .await?; 55 | 56 | Ok(()) 57 | } 58 | } 59 | 60 | pub struct VethPair { 61 | dev1: VethDev, 62 | dev2: VethDev, 63 | } 64 | 65 | impl VethPair { 66 | pub async fn set_status(&self, status: LinkStatus) -> anyhow::Result<()> { 67 | for dev in [&self.dev1, &self.dev2] { 68 | dev.set_status(status).await?; 69 | } 70 | Ok(()) 71 | } 72 | 73 | pub fn dev1(&self) -> &VethDev { 74 | &self.dev1 75 | } 76 | 77 | pub fn dev2(&self) -> &VethDev { 78 | &self.dev2 79 | } 80 | } 81 | 82 | impl Drop for VethPair { 83 | fn drop(&mut self) { 84 | let (handle, index, if_name) = (&self.dev1.handle, self.dev1.index, &self.dev1.if_name); 85 | 86 | let res = task::block_in_place(move || { 87 | runtime::Handle::current() 88 | .block_on(async move { handle.link().del(index).execute().await }) 89 | }); 90 | 91 | if let Err(e) = res { 92 | eprintln!("failed to delete link: {:?} (you may need to delete it manually with 'sudo ip link del {}')", e, if_name); 93 | } 94 | } 95 | } 96 | 97 | #[derive(Debug, Clone, Copy)] 98 | pub struct LinkIpAddr { 99 | addr: Ipv4Addr, 100 | prefix_len: u8, 101 | } 102 | 103 | impl LinkIpAddr { 104 | pub fn new(addr: Ipv4Addr, prefix_len: u8) -> Self { 105 | LinkIpAddr { addr, prefix_len } 106 | } 107 | 108 | pub fn octets(&self) -> [u8; 4] { 109 | self.addr.octets() 110 | } 111 | } 112 | 113 | #[derive(Clone, Debug)] 114 | pub struct VethDevConfig { 115 | if_name: String, 116 | addr: Option<[u8; 6]>, 117 | ip_addr: Option, 118 | } 119 | 120 | impl VethDevConfig { 121 | pub fn new(if_name: String, addr: Option<[u8; 6]>, ip_addr: Option) -> Self { 122 | Self { 123 | if_name, 124 | addr, 125 | ip_addr, 126 | } 127 | } 128 | 129 | pub fn if_name(&self) -> &str { 130 | &self.if_name 131 | } 132 | 133 | pub fn addr(&self) -> Option<[u8; 6]> { 134 | self.addr 135 | } 136 | 137 | pub fn ip_addr(&self) -> Option { 138 | self.ip_addr 139 | } 140 | } 141 | 142 | async fn get_link_index(handle: &Handle, name: &str) -> anyhow::Result { 143 | Ok(handle 144 | .link() 145 | .get() 146 | .match_name(name.into()) 147 | .execute() 148 | .try_next() 149 | .await? 150 | .expect(format!("no link with name {} found", name).as_str()) 151 | .header 152 | .index) 153 | } 154 | 155 | pub async fn build_veth_pair( 156 | dev1_config: &VethDevConfig, 157 | dev2_config: &VethDevConfig, 158 | ) -> anyhow::Result { 159 | let (connection, handle, _) = rtnetlink::new_connection().unwrap(); 160 | 161 | tokio::spawn(connection); 162 | 163 | handle 164 | .link() 165 | .add() 166 | .veth(dev1_config.if_name.clone(), dev2_config.if_name.clone()) 167 | .execute() 168 | .await?; 169 | 170 | let dev1_index = get_link_index(&handle, &dev1_config.if_name).await.expect( 171 | format!( 172 | "failed to retrieve index for dev1, delete link manually: 'sudo ip link del {}'", 173 | dev1_config.if_name 174 | ) 175 | .as_str(), 176 | ); 177 | 178 | let dev2_index = get_link_index(&handle, &dev2_config.if_name).await.expect( 179 | format!( 180 | "failed to retrieve index for dev2, delete link manually: 'sudo ip link del {}'", 181 | dev1_config.if_name 182 | ) 183 | .as_str(), 184 | ); 185 | 186 | let veth_pair = VethPair { 187 | dev1: VethDev { 188 | handle: handle.clone(), 189 | index: dev1_index, 190 | if_name: dev1_config.if_name.clone(), 191 | }, 192 | dev2: VethDev { 193 | handle: handle.clone(), 194 | index: dev2_index, 195 | if_name: dev2_config.if_name.clone(), 196 | }, 197 | }; 198 | 199 | for (d, c) in [ 200 | (&veth_pair.dev1, dev1_config), 201 | (&veth_pair.dev2, dev2_config), 202 | ] { 203 | if let Some(addr) = c.addr { 204 | d.set_addr(addr.into()).await?; 205 | } 206 | if let Some(ip_addr) = c.ip_addr { 207 | d.set_ip_addr(ip_addr).await?; 208 | } 209 | } 210 | 211 | Ok(veth_pair) 212 | } 213 | 214 | pub async fn run_with_veth_pair( 215 | f: F, 216 | dev1_config: VethDevConfig, 217 | dev2_config: VethDevConfig, 218 | ) -> anyhow::Result<()> 219 | where 220 | F: FnOnce(VethDevConfig, VethDevConfig) + Send + 'static, 221 | { 222 | let veth_pair = build_veth_pair(&dev1_config, &dev2_config).await.unwrap(); 223 | 224 | veth_pair.set_status(LinkStatus::Up).await?; 225 | 226 | let res = task::spawn_blocking(move || f(dev1_config, dev2_config)).await; 227 | 228 | veth_pair.set_status(LinkStatus::Down).await?; 229 | 230 | Ok(res?) 231 | } 232 | -------------------------------------------------------------------------------- /tests/tx_queue_tests.rs: -------------------------------------------------------------------------------- 1 | #[allow(dead_code)] 2 | mod setup; 3 | use std::convert::TryInto; 4 | 5 | use setup::Xsk; 6 | 7 | use serial_test::serial; 8 | use xsk_rs::config::{QueueSize, SocketConfig, UmemConfig}; 9 | 10 | use crate::setup::{PacketGenerator, XskConfig}; 11 | 12 | const TX_Q_SIZE: u32 = 4; 13 | const FRAME_COUNT: u32 = 8; 14 | 15 | fn build_configs() -> (UmemConfig, SocketConfig) { 16 | let umem_config = UmemConfig::default(); 17 | 18 | let socket_config = SocketConfig::builder() 19 | .tx_queue_size(QueueSize::new(TX_Q_SIZE).unwrap()) 20 | .build(); 21 | 22 | (umem_config, socket_config) 23 | } 24 | 25 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 26 | #[serial] 27 | async fn producing_tx_size_frames_is_ok() { 28 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 29 | let mut xsk1 = dev1.0; 30 | 31 | assert_eq!(unsafe { xsk1.tx_q.produce(&xsk1.descs[..4]) }, 4); 32 | } 33 | 34 | build_configs_and_run_test(test).await 35 | } 36 | 37 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 38 | #[serial] 39 | async fn produce_greater_than_tx_size_frames_fails() { 40 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 41 | let mut xsk1 = dev1.0; 42 | 43 | assert_eq!(unsafe { xsk1.tx_q.produce(&xsk1.descs[..5]) }, 0); 44 | } 45 | 46 | build_configs_and_run_test(test).await 47 | } 48 | 49 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 50 | #[serial] 51 | async fn produce_frames_until_full() { 52 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 53 | let mut xsk1 = dev1.0; 54 | 55 | unsafe { 56 | assert_eq!(xsk1.tx_q.produce(&xsk1.descs[..2]), 2); 57 | assert_eq!(xsk1.tx_q.produce(&xsk1.descs[2..3]), 1); 58 | assert_eq!(xsk1.tx_q.produce(&xsk1.descs[3..8]), 0); 59 | assert_eq!(xsk1.tx_q.produce(&xsk1.descs[3..4]), 1); 60 | } 61 | } 62 | 63 | build_configs_and_run_test(test).await 64 | } 65 | 66 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 67 | #[serial] 68 | async fn produce_one_is_ok() { 69 | fn test(dev1: (Xsk, PacketGenerator), _dev2: (Xsk, PacketGenerator)) { 70 | let mut xsk1 = dev1.0; 71 | 72 | assert_eq!(unsafe { xsk1.tx_q.produce_one(&xsk1.descs[0]) }, 1); 73 | } 74 | 75 | build_configs_and_run_test(test).await 76 | } 77 | 78 | async fn build_configs_and_run_test(test: F) 79 | where 80 | F: Fn((Xsk, PacketGenerator), (Xsk, PacketGenerator)) + Send + 'static, 81 | { 82 | let (dev1_umem_config, dev1_socket_config) = build_configs(); 83 | let (dev2_umem_config, dev2_socket_config) = build_configs(); 84 | 85 | setup::run_test( 86 | XskConfig { 87 | frame_count: FRAME_COUNT.try_into().unwrap(), 88 | umem_config: dev1_umem_config, 89 | socket_config: dev1_socket_config, 90 | }, 91 | XskConfig { 92 | frame_count: FRAME_COUNT.try_into().unwrap(), 93 | umem_config: dev2_umem_config, 94 | socket_config: dev2_socket_config, 95 | }, 96 | test, 97 | ) 98 | .await; 99 | } 100 | -------------------------------------------------------------------------------- /tests/umem_tests.rs: -------------------------------------------------------------------------------- 1 | #[allow(dead_code)] 2 | mod setup; 3 | use setup::{veth_setup, VethDevConfig, Xsk, ETHERNET_PACKET}; 4 | 5 | use serial_test::serial; 6 | use std::{convert::TryInto, io::Write}; 7 | use xsk_rs::{ 8 | config::{LibxdpFlags, SocketConfig, UmemConfig}, 9 | Socket, Umem, 10 | }; 11 | 12 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 13 | #[serial] 14 | async fn shared_umem_returns_new_fq_and_cq_when_sockets_are_bound_to_different_devices() { 15 | let inner = move |dev1_config: VethDevConfig, dev2_config: VethDevConfig| { 16 | let frame_count = 64; 17 | 18 | let (umem, descs) = Umem::new( 19 | UmemConfig::default(), 20 | frame_count.try_into().unwrap(), 21 | false, 22 | ) 23 | .unwrap(); 24 | 25 | let mut sender_descs = descs; 26 | let receiver_descs = sender_descs.drain((frame_count / 2) as usize..).collect(); 27 | 28 | let (sender_tx_q, sender_rx_q, sender_fq_and_cq) = unsafe { 29 | Socket::new( 30 | SocketConfig::default(), 31 | &umem, 32 | &dev1_config.if_name().parse().unwrap(), 33 | 0, 34 | ) 35 | } 36 | .unwrap(); 37 | 38 | let (sender_fq, sender_cq) = sender_fq_and_cq.unwrap(); 39 | 40 | let mut sender = Xsk { 41 | umem: umem.clone(), 42 | fq: sender_fq, 43 | cq: sender_cq, 44 | tx_q: sender_tx_q, 45 | rx_q: sender_rx_q, 46 | descs: sender_descs, 47 | }; 48 | 49 | let (receiver_tx_q, receiver_rx_q, receiver_fq_and_cq) = unsafe { 50 | Socket::new( 51 | SocketConfig::default(), 52 | &umem, 53 | &dev2_config.if_name().parse().unwrap(), 54 | 0, 55 | ) 56 | } 57 | .unwrap(); 58 | 59 | let (receiver_fq, receiver_cq) = receiver_fq_and_cq.unwrap(); 60 | 61 | let mut receiver = Xsk { 62 | umem, 63 | fq: receiver_fq, 64 | cq: receiver_cq, 65 | tx_q: receiver_tx_q, 66 | rx_q: receiver_rx_q, 67 | descs: receiver_descs, 68 | }; 69 | 70 | send_and_receive_pkt(&mut sender, &mut receiver, ÐERNET_PACKET[..]); 71 | }; 72 | 73 | let (dev1_config, dev2_config) = setup::default_veth_dev_configs(); 74 | 75 | veth_setup::run_with_veth_pair(inner, dev1_config, dev2_config) 76 | .await 77 | .unwrap(); 78 | } 79 | 80 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 81 | #[serial] 82 | async fn shared_umem_does_not_return_new_fq_and_cq_when_sockets_are_bound_to_same_device() { 83 | let inner = move |dev1_config: VethDevConfig, _dev2_config: VethDevConfig| { 84 | let (umem, _frames) = 85 | Umem::new(UmemConfig::default(), 64.try_into().unwrap(), false).unwrap(); 86 | 87 | let (_sender_tx_q, _sender_rx_q, sender_fq_and_cq) = unsafe { 88 | Socket::new( 89 | SocketConfig::builder() 90 | .libxdp_flags(LibxdpFlags::XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD) 91 | .build(), 92 | &umem, 93 | &dev1_config.if_name().parse().unwrap(), 94 | 0, 95 | ) 96 | } 97 | .unwrap(); 98 | 99 | assert!(sender_fq_and_cq.is_some()); 100 | 101 | let (_receiver_tx_q, _receiver_rx_q, receiver_fq_and_cq) = unsafe { 102 | Socket::new( 103 | SocketConfig::builder() 104 | .libxdp_flags(LibxdpFlags::XSK_LIBXDP_FLAGS_INHIBIT_PROG_LOAD) 105 | .build(), 106 | &umem, 107 | &dev1_config.if_name().parse().unwrap(), 108 | 0, 109 | ) 110 | } 111 | .unwrap(); 112 | 113 | assert!(receiver_fq_and_cq.is_none()); 114 | }; 115 | 116 | let (dev1_config, dev2_config) = setup::default_veth_dev_configs(); 117 | 118 | veth_setup::run_with_veth_pair(inner, dev1_config, dev2_config) 119 | .await 120 | .unwrap(); 121 | } 122 | 123 | #[tokio::test] 124 | #[serial] 125 | async fn writing_to_frame_and_reading_works_as_expected() { 126 | let (umem, mut descs) = Umem::new( 127 | UmemConfig::builder().frame_headroom(32).build().unwrap(), 128 | 64.try_into().unwrap(), 129 | false, 130 | ) 131 | .unwrap(); 132 | 133 | unsafe { 134 | let (mut h, mut d) = umem.frame_mut(&mut descs[0]); 135 | 136 | h.cursor().write_all(b"hello").unwrap(); 137 | d.cursor().write_all(b"world").unwrap(); 138 | 139 | assert_eq!(umem.headroom(&descs[0]).contents(), b"hello"); 140 | assert_eq!(umem.headroom_mut(&mut descs[0]).contents(), b"hello"); 141 | 142 | assert_eq!(umem.data(&descs[0]).contents(), b"world"); 143 | assert_eq!(umem.data_mut(&mut descs[0]).contents(), b"world"); 144 | } 145 | } 146 | 147 | fn send_and_receive_pkt(sender: &mut Xsk, receiver: &mut Xsk, pkt: &[u8]) { 148 | unsafe { 149 | assert_eq!( 150 | receiver 151 | .fq 152 | .produce_and_wakeup(&receiver.descs[0..1], receiver.rx_q.fd_mut(), 100) 153 | .unwrap(), 154 | 1 155 | ); 156 | 157 | sender 158 | .umem 159 | .data_mut(&mut sender.descs[0]) 160 | .cursor() 161 | .write_all(pkt) 162 | .unwrap(); 163 | 164 | loop { 165 | if sender.tx_q.produce_and_wakeup(&sender.descs[..1]).unwrap() == 1 { 166 | break; 167 | } 168 | } 169 | 170 | loop { 171 | if receiver 172 | .rx_q 173 | .poll_and_consume(&mut receiver.descs[1..2], 100) 174 | .unwrap() 175 | == 1 176 | { 177 | break; 178 | } 179 | } 180 | 181 | assert_eq!(sender.cq.consume(&mut sender.descs[1..2]), 1); 182 | 183 | // Check that: 184 | // 1. Data received matches 185 | // 2. Address consumed in rx queue is address of frame added to fill queue 186 | // 3. Address consumed in comp queue is address of frame written to 187 | 188 | assert_eq!(receiver.umem.data(&receiver.descs[1]).contents(), pkt); 189 | assert_eq!(receiver.descs[1].addr(), receiver.descs[0].addr()); 190 | assert_eq!(sender.descs[1].addr(), sender.descs[0].addr()); 191 | } 192 | } 193 | --------------------------------------------------------------------------------