├── .gitignore ├── Cargo.toml ├── README.md ├── rte-build ├── Cargo.toml ├── patches │ └── librte_bbdev.patch └── src │ ├── build.rs │ ├── cargo.rs │ ├── cpu.rs │ ├── gcc.rs │ ├── lib.rs │ └── rte.rs ├── rte-sys ├── Cargo.toml ├── build.rs └── src │ ├── config.rs │ ├── lib.rs │ ├── raw.rs │ ├── rte.h │ ├── stub.c │ └── stub.h ├── rte ├── Cargo.toml ├── build.rs ├── examples │ ├── bond │ │ └── main.rs │ ├── cmdline │ │ └── main.rs │ ├── ethtool │ │ ├── ethapp.rs │ │ ├── ethtool.rs │ │ └── main.rs │ ├── helloworld │ │ └── main.rs │ ├── kni │ │ ├── kni_core.c │ │ └── main.rs │ └── l2fwd │ │ ├── l2fwd_core.c │ │ └── main.rs ├── runtest.sh └── src │ ├── arp.rs │ ├── bond.rs │ ├── cmdline.rs │ ├── common │ ├── bitmap.rs │ ├── byteorder.rs │ ├── config.rs │ ├── cycles.rs │ ├── debug.rs │ ├── dev.rs │ ├── devargs.rs │ ├── eal.rs │ ├── keepalive.rs │ ├── launch.rs │ ├── lcore.rs │ ├── log.rs │ ├── malloc.rs │ ├── memory.rs │ ├── memzone.rs │ ├── mod.rs │ ├── rand.rs │ ├── spinlock.rs │ └── version.rs │ ├── errors.rs │ ├── ethdev.rs │ ├── ether.rs │ ├── ffi.rs │ ├── ip.rs │ ├── kni.rs │ ├── lib.rs │ ├── macros.rs │ ├── mbuf.rs │ ├── mempool.rs │ ├── pci.rs │ ├── ring.rs │ ├── tests.rs │ └── utils.rs └── rustfmt.toml /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["rte-build", "rte-sys", "rte"] 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rust-dpdk 2 | 3 | Rust-Dpdk is an experimental prototype to wrap [DPDK](http://dpdk.org/) API with Rust language. 4 | 5 | ## Build 6 | 7 | First, please follow [the official document](http://dpdk.org/doc/guides/linux_gsg/build_dpdk.html) to setup a DPDK development envrionment. 8 | 9 | ``` 10 | $ CONFIG_RTE_BUILD_COMBINE_LIBS=y EXTRA_CFLAGS="-fPIC -O0 -g -ggdb" make install T=x86_64-native-linuxapp-gcc -j 4 11 | ``` 12 | 13 | And build `rust-dpdk` with `RTE_SDK` envrionment variable: 14 | 15 | ``` 16 | $ RTE_SDK= cargo build 17 | ``` 18 | 19 | ## Examples 20 | 21 | ```rust 22 | extern crate rte; 23 | 24 | use std::env; 25 | use std::ptr; 26 | use std::os::raw::c_void; 27 | 28 | use rte::*; 29 | 30 | extern "C" fn lcore_hello(_: *const c_void) -> i32 { 31 | println!("hello from core {}", lcore::id().unwrap()); 32 | 33 | 0 34 | } 35 | 36 | fn main() { 37 | let args: Vec = env::args().collect(); 38 | 39 | eal::init(&args).expect("Cannot init EAL"); 40 | 41 | // call lcore_hello() on every slave lcore 42 | lcore::foreach_slave(|lcore_id| { 43 | launch::remote_launch(lcore_hello, None, lcore_id).expect("Cannot launch task"); 44 | }); 45 | 46 | // call it on master lcore too 47 | lcore_hello(ptr::null()); 48 | 49 | launch::mp_wait_lcore(); 50 | } 51 | ``` 52 | 53 | Please check [l2fwd](rte/examples/l2fwd/l2fwd.rs) example for details. 54 | 55 | ``` 56 | $ sudo RTE_SDK= cargo run --example l2fwd -- --log-level 8 -v -c f -- -p f 57 | ``` 58 | -------------------------------------------------------------------------------- /rte-build/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rte-build" 3 | version = "18.11.0" 4 | authors = ["Flier Lu "] 5 | description = "FFI bindings to DPDK" 6 | 7 | [dependencies] 8 | log = "0.4" 9 | lazy_static = "1.2" 10 | raw-cpuid = "6.1" 11 | itertools = "0.8" 12 | num_cpus = "1.0" 13 | cc = "1.0" 14 | -------------------------------------------------------------------------------- /rte-build/patches/librte_bbdev.patch: -------------------------------------------------------------------------------- 1 | --- lib/librte_bbdev/rte_bbdev_version.map 2019-04-08 23:28:53.000000000 +0800 2 | +++ lib/librte_bbdev/rte_bbdev_version.map.patched 2019-05-30 12:49:13.000000000 +0800 3 | @@ -6,11 +6,11 @@ 4 | rte_bbdev_callback_unregister; 5 | rte_bbdev_close; 6 | rte_bbdev_count; 7 | - rte_bbdev_dequeue_dec_ops; 8 | - rte_bbdev_dequeue_enc_ops; 9 | + #rte_bbdev_dequeue_dec_ops; 10 | + #rte_bbdev_dequeue_enc_ops; 11 | rte_bbdev_devices; 12 | - rte_bbdev_enqueue_dec_ops; 13 | - rte_bbdev_enqueue_enc_ops; 14 | + #rte_bbdev_enqueue_dec_ops; 15 | + #rte_bbdev_enqueue_enc_ops; 16 | rte_bbdev_find_next; 17 | rte_bbdev_get_named_dev; 18 | rte_bbdev_info_get; 19 | -------------------------------------------------------------------------------- /rte-build/src/build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::path::Path; 3 | use std::process::Command; 4 | 5 | use num_cpus; 6 | 7 | pub fn build_dpdk(rte_sdk: &Path, rte_target: &str) { 8 | let debug_mode = env::var("DEBUG") 9 | .map(|s| s.parse().unwrap_or_default()) 10 | .unwrap_or_default(); 11 | 12 | info!( 13 | "building {} mode DPDK {} @ {:?}", 14 | if debug_mode { "debug" } else { "release" }, 15 | rte_target, 16 | rte_sdk 17 | ); 18 | 19 | Command::new("make") 20 | .arg("install") 21 | .arg(format!("T={}", rte_target)) 22 | .args(&["-j", &num_cpus::get().to_string()]) 23 | .env("CONFIG_RTE_BUILD_COMBINE_LIBS", "y") 24 | .env( 25 | "EXTRA_CFLAGS", 26 | if debug_mode { 27 | "-fPIC -fkeep-inline-functions -O0 -g -ggdb" 28 | } else { 29 | "-fPIC -fkeep-inline-functions -O" 30 | }, 31 | ) 32 | .current_dir(rte_sdk) 33 | .status() 34 | .unwrap_or_else(|e| panic!("failed to build DPDK: {}", e)); 35 | } 36 | -------------------------------------------------------------------------------- /rte-build/src/cargo.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::path::PathBuf; 3 | 4 | lazy_static! { 5 | pub static ref OUT_DIR: PathBuf = env::var("OUT_DIR").unwrap().into(); 6 | } 7 | 8 | pub fn gen_cargo_config>(rte_sdk_dir: &PathBuf, libs: impl Iterator) { 9 | for lib in libs { 10 | println!("cargo:rustc-link-lib=static={}", lib.as_ref()); 11 | } 12 | 13 | println!( 14 | "cargo:rustc-link-search=native={}", 15 | rte_sdk_dir.join("lib").to_str().unwrap() 16 | ); 17 | println!("cargo:include={}", rte_sdk_dir.join("include").to_str().unwrap()); 18 | } 19 | -------------------------------------------------------------------------------- /rte-build/src/cpu.rs: -------------------------------------------------------------------------------- 1 | use std::iter; 2 | 3 | use raw_cpuid; 4 | 5 | pub fn gen_cpu_features() -> impl Iterator)> { 6 | let mut cflags = vec![]; 7 | let mut compile_time_cpuflags = vec![]; 8 | 9 | let cpuid = raw_cpuid::CpuId::new(); 10 | 11 | if let Some(features) = cpuid.get_feature_info() { 12 | if features.has_sse() { 13 | cflags.push("RTE_MACHINE_CPUFLAG_SSE"); 14 | compile_time_cpuflags.push("RTE_CPUFLAG_SSE"); 15 | } 16 | if features.has_sse2() { 17 | cflags.push("RTE_MACHINE_CPUFLAG_SSE2"); 18 | compile_time_cpuflags.push("RTE_CPUFLAG_SSE2"); 19 | } 20 | if features.has_sse3() { 21 | cflags.push("RTE_MACHINE_CPUFLAG_SSE3"); 22 | compile_time_cpuflags.push("RTE_CPUFLAG_SSE3"); 23 | } 24 | if features.has_ssse3() { 25 | cflags.push("RTE_MACHINE_CPUFLAG_SSSE3"); 26 | compile_time_cpuflags.push("RTE_CPUFLAG_SSSE3"); 27 | } 28 | if features.has_sse41() { 29 | cflags.push("RTE_MACHINE_CPUFLAG_SSE4_1"); 30 | compile_time_cpuflags.push("RTE_CPUFLAG_SSE4_1"); 31 | } 32 | if features.has_sse42() { 33 | cflags.push("RTE_MACHINE_CPUFLAG_SSE4_2"); 34 | compile_time_cpuflags.push("RTE_CPUFLAG_SSE4_2"); 35 | } 36 | if features.has_aesni() { 37 | cflags.push("RTE_MACHINE_CPUFLAG_AES"); 38 | compile_time_cpuflags.push("RTE_CPUFLAG_AES"); 39 | } 40 | if features.has_pclmulqdq() { 41 | cflags.push("RTE_MACHINE_CPUFLAG_PCLMULQDQ"); 42 | compile_time_cpuflags.push("RTE_CPUFLAG_PCLMULQDQ"); 43 | } 44 | if features.has_avx() { 45 | cflags.push("RTE_MACHINE_CPUFLAG_AVX"); 46 | compile_time_cpuflags.push("RTE_CPUFLAG_AVX"); 47 | } 48 | if features.has_rdrand() { 49 | cflags.push("RTE_MACHINE_CPUFLAG_RDRAND"); 50 | } 51 | if features.has_f16c() { 52 | cflags.push("RTE_MACHINE_CPUFLAG_F16C"); 53 | } 54 | } 55 | 56 | if let Some(features) = cpuid.get_extended_feature_info() { 57 | if features.has_fsgsbase() { 58 | cflags.push("RTE_MACHINE_CPUFLAG_FSGSBASE"); 59 | } 60 | if features.has_avx2() { 61 | cflags.push("RTE_MACHINE_CPUFLAG_AVX2"); 62 | compile_time_cpuflags.push("RTE_CPUFLAG_AVX2"); 63 | } 64 | if features.has_avx512f() { 65 | cflags.push("RTE_MACHINE_CPUFLAG_AVX512F"); 66 | compile_time_cpuflags.push("RTE_CPUFLAG_AVX512F"); 67 | } 68 | } 69 | 70 | cflags.into_iter().map(|s| (s, None)).chain(iter::once(( 71 | "RTE_COMPILE_TIME_CPUFLAGS", 72 | Some(itertools::join(compile_time_cpuflags, ",")), 73 | ))) 74 | } 75 | -------------------------------------------------------------------------------- /rte-build/src/gcc.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use cc; 4 | 5 | use crate::gen_cpu_features; 6 | 7 | pub fn gcc_rte_config(rte_sdk_dir: &Path) -> cc::Build { 8 | let mut build = cc::Build::new(); 9 | 10 | build 11 | .include(rte_sdk_dir.join("include")) 12 | .flag("-march=native") 13 | .cargo_metadata(true); 14 | 15 | for (name, value) in gen_cpu_features() { 16 | let define = if let Some(value) = value { 17 | format!("-D{}={}", name, value) 18 | } else { 19 | format!("-D{}", name) 20 | }; 21 | 22 | build.flag(&define); 23 | } 24 | 25 | build 26 | } 27 | -------------------------------------------------------------------------------- /rte-build/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | #[macro_use] 4 | extern crate lazy_static; 5 | extern crate cc; 6 | extern crate itertools; 7 | extern crate num_cpus; 8 | extern crate raw_cpuid; 9 | 10 | mod build; 11 | mod cargo; 12 | mod cpu; 13 | mod gcc; 14 | mod rte; 15 | 16 | pub use crate::build::build_dpdk; 17 | pub use crate::cargo::{gen_cargo_config, OUT_DIR}; 18 | pub use crate::cpu::gen_cpu_features; 19 | pub use crate::gcc::gcc_rte_config; 20 | pub use crate::rte::*; 21 | -------------------------------------------------------------------------------- /rte-build/src/rte.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::env::consts::*; 3 | use std::fs::File; 4 | use std::io::{BufRead, BufReader, Write}; 5 | use std::path::{Path, PathBuf}; 6 | use std::process::{Command, Stdio}; 7 | use std::str::FromStr; 8 | 9 | pub const MACHINE: &str = "native"; 10 | pub const TOOLCHAIN: &str = "gcc"; 11 | 12 | lazy_static! { 13 | pub static ref RTE_SDK: PathBuf = env::var("RTE_SDK") 14 | .expect("RTE_SDK - Points to the DPDK installation directory.") 15 | .into(); 16 | pub static ref RTE_ARCH: String = env::var("RTE_ARCH").unwrap_or_else(|_| ARCH.to_owned()); 17 | pub static ref RTE_MACHINE: String = env::var("RTE_MACHINE").unwrap_or_else(|_| MACHINE.to_owned()); 18 | pub static ref RTE_OS: String = env::var("RTE_OS").unwrap_or_else(|_| OS.to_owned()); 19 | pub static ref RTE_TOOLCHAIN: String = env::var("RTE_TOOLCHAIN").unwrap_or_else(|_| TOOLCHAIN.to_owned()); 20 | pub static ref RTE_TARGET: String = env::var("RTE_TARGET") 21 | .unwrap_or_else(|_| format!("{}-{}-{}app-{}", *RTE_ARCH, *RTE_MACHINE, *RTE_OS, *RTE_TOOLCHAIN,)); 22 | pub static ref RTE_CORE_LIBS: Vec<&'static str> = vec![ 23 | "rte_acl", 24 | "rte_bbdev", 25 | "rte_bitratestats", 26 | "rte_bpf", 27 | "rte_bus_dpaa", 28 | "rte_bus_fslmc", 29 | "rte_bus_ifpga", 30 | "rte_bus_pci", 31 | "rte_bus_vdev", 32 | "rte_bus_vmbus", 33 | "rte_cfgfile", 34 | "rte_cmdline", 35 | "rte_common_cpt", 36 | "rte_common_dpaax", 37 | "rte_common_octeontx", 38 | "rte_compressdev", 39 | "rte_cryptodev", 40 | "rte_distributor", 41 | "rte_eal", 42 | "rte_efd", 43 | "rte_ethdev", 44 | "rte_eventdev", 45 | "rte_flow_classify", 46 | "rte_gro", 47 | "rte_gso", 48 | "rte_hash", 49 | "rte_ip_frag", 50 | "rte_jobstats", 51 | "rte_kni", 52 | "rte_kvargs", 53 | "rte_latencystats", 54 | "rte_lpm", 55 | "rte_mbuf", 56 | "rte_member", 57 | "rte_mempool", 58 | "rte_mempool_bucket", 59 | "rte_mempool_dpaa", 60 | "rte_mempool_dpaa2", 61 | "rte_mempool_octeontx", 62 | "rte_mempool_ring", 63 | "rte_mempool_stack", 64 | "rte_meter", 65 | "rte_metrics", 66 | "rte_net", 67 | "rte_pci", 68 | "rte_pdump", 69 | "rte_pipeline", 70 | "rte_port", 71 | "rte_power", 72 | "rte_rawdev", 73 | "rte_reorder", 74 | "rte_ring", 75 | "rte_sched", 76 | "rte_security", 77 | "rte_table", 78 | "rte_timer", 79 | "rte_vhost", 80 | ]; 81 | pub static ref RTE_PMD_LIBS: Vec<&'static str> = vec![ 82 | "rte_pmd_af_packet", 83 | "rte_pmd_ark", 84 | "rte_pmd_atlantic", 85 | "rte_pmd_avf", 86 | "rte_pmd_avp", 87 | "rte_pmd_axgbe", 88 | "rte_pmd_bbdev_null", 89 | "rte_pmd_bnxt", 90 | "rte_pmd_bond", 91 | "rte_pmd_caam_jr", 92 | "rte_pmd_crypto_scheduler", 93 | "rte_pmd_cxgbe", 94 | "rte_pmd_dpaa", 95 | "rte_pmd_dpaa2", 96 | "rte_pmd_dpaa2_cmdif", 97 | "rte_pmd_dpaa2_event", 98 | "rte_pmd_dpaa2_qdma", 99 | "rte_pmd_dpaa2_sec", 100 | "rte_pmd_dpaa_event", 101 | "rte_pmd_dpaa_sec", 102 | "rte_pmd_dsw_event", 103 | "rte_pmd_e1000", 104 | "rte_pmd_ena", 105 | "rte_pmd_enetc", 106 | "rte_pmd_enic", 107 | "rte_pmd_failsafe", 108 | "rte_pmd_fm10k", 109 | "rte_pmd_i40e", 110 | "rte_pmd_ifc", 111 | "rte_pmd_ifpga_rawdev", 112 | "rte_pmd_ixgbe", 113 | "rte_pmd_kni", 114 | "rte_pmd_lio", 115 | "rte_pmd_netvsc", 116 | "rte_pmd_nfp", 117 | "rte_pmd_null", 118 | "rte_pmd_null_crypto", 119 | "rte_pmd_octeontx", 120 | "rte_pmd_octeontx_crypto", 121 | "rte_pmd_octeontx_ssovf", 122 | "rte_pmd_octeontx_zip", 123 | "rte_pmd_opdl_event", 124 | "rte_pmd_qat", 125 | "rte_pmd_qede", 126 | "rte_pmd_ring", 127 | "rte_pmd_sfc_efx", 128 | "rte_pmd_skeleton_event", 129 | "rte_pmd_skeleton_rawdev", 130 | "rte_pmd_softnic", 131 | "rte_pmd_sw_event", 132 | "rte_pmd_tap", 133 | "rte_pmd_thunderx_nicvf", 134 | "rte_pmd_vdev_netvsc", 135 | "rte_pmd_vhost", 136 | "rte_pmd_virtio", 137 | "rte_pmd_virtio_crypto", 138 | "rte_pmd_vmxnet3_uio", 139 | ]; 140 | pub static ref RTE_DEPS_LIBS: Vec<&'static str> = vec!["numa"]; 141 | } 142 | 143 | pub fn gen_rte_config(rte_sdk_dir: &Path, dest_path: &Path) { 144 | let config_file = rte_sdk_dir.join(".config"); 145 | 146 | info!("generating DPDK config base on {:?}", config_file); 147 | 148 | let mut f = File::create(&dest_path).unwrap(); 149 | 150 | writeln!( 151 | &mut f, 152 | "/* automatically generated by {} v{}, DON'T EDIT IT */\n", 153 | env::var("CARGO_PKG_NAME").unwrap(), 154 | env::var("CARGO_PKG_VERSION").unwrap(), 155 | ) 156 | .unwrap(); 157 | 158 | let r = BufReader::new(File::open(&config_file).expect("RTE config file")); 159 | 160 | for line in r.lines().flat_map(|line| line) { 161 | if line.starts_with('#') { 162 | writeln!(&mut f, "///{}", &line[1..]).unwrap(); 163 | } else { 164 | let mut i = line.splitn(2, '='); 165 | let key = i.next().expect("key"); 166 | let value = i.next().expect("value"); 167 | 168 | match value { 169 | "" => { 170 | writeln!(&mut f, "pub const {}: () = ();", key).unwrap(); 171 | } 172 | "y" => { 173 | writeln!(&mut f, "pub const {}: bool = true;", key).unwrap(); 174 | } 175 | "n" => { 176 | writeln!(&mut f, "pub const {}: bool = false;", key).unwrap(); 177 | } 178 | s if s.starts_with('"') && s.ends_with('"') => { 179 | writeln!(&mut f, "pub const {}: &str = {};", key, value).unwrap(); 180 | } 181 | _ => { 182 | if let Ok(n) = u32::from_str(value) { 183 | writeln!(&mut f, "pub const {}: u32 = {};", key, n).unwrap(); 184 | } else { 185 | writeln!(&mut f, "// pub const {}: _ = {};", key, value).unwrap(); 186 | } 187 | } 188 | } 189 | } 190 | } 191 | } 192 | 193 | pub fn apply_patches(rte_sdk_dir: &Path) { 194 | let mut patch = Command::new("patch") 195 | .stdin(Stdio::piped()) 196 | .current_dir(rte_sdk_dir) 197 | .spawn() 198 | .expect("failed to apply patches"); 199 | { 200 | patch 201 | .stdin 202 | .as_mut() 203 | .expect("Failed to open stdin") 204 | .write_all(include_bytes!("../patches/librte_bbdev.patch")) 205 | .expect("Failed to send patch"); 206 | } 207 | let _ = patch.wait_with_output().expect("Failed to read stdout"); 208 | } 209 | -------------------------------------------------------------------------------- /rte-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rte-sys" 3 | version = "18.11.0" 4 | authors = ["Flier Lu "] 5 | description = "FFI bindings to DPDK" 6 | links = "dpdk" 7 | build = "build.rs" 8 | 9 | [features] 10 | default = [] 11 | gen = ["bindgen"] 12 | 13 | [lib] 14 | name = "rte_sys" 15 | 16 | [dependencies] 17 | log = "0.4" 18 | cfg-if = "0.1" 19 | 20 | [build-dependencies] 21 | log = "0.4" 22 | pretty_env_logger = "0.3" 23 | 24 | rte-build = { version = "18.11", path = "../rte-build" } 25 | 26 | bindgen = { version = "0.49", optional = true } 27 | -------------------------------------------------------------------------------- /rte-sys/build.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate pretty_env_logger; 4 | 5 | #[cfg(feature = "gen")] 6 | extern crate bindgen; 7 | 8 | extern crate rte_build; 9 | 10 | use std::path::Path; 11 | 12 | use rte_build::*; 13 | 14 | #[cfg(feature = "gen")] 15 | fn gen_rte_binding(rte_sdk_dir: &Path, dest_path: &Path) { 16 | let rte_header = "src/rte.h"; 17 | let stub_header = "src/stub.h"; 18 | 19 | info!("generating RTE binding file base on \"{}\"", rte_header); 20 | 21 | let rte_sdk_inc_dir = rte_sdk_dir.join("include"); 22 | let cflags = vec!["-march=native", "-I", rte_sdk_inc_dir.to_str().unwrap()]; 23 | 24 | bindgen::Builder::default() 25 | .header(rte_header) 26 | .header(stub_header) 27 | .generate_comments(true) 28 | .generate_inline_functions(true) 29 | .whitelist_type(r"(rte|cmdline|ether|eth|arp|vlan|vxlan)_.*") 30 | .whitelist_function(r"(_rte|rte|cmdline|lcore|ether|eth|arp|is)_.*") 31 | .whitelist_var( 32 | r"(RTE|CMDLINE|ETHER|ARP|VXLAN|BONDING|LCORE|MEMPOOL|ARP|PKT|EXT_ATTACHED|IND_ATTACHED|lcore|rte|cmdline|per_lcore)_.*", 33 | ) 34 | .derive_copy(true) 35 | .derive_debug(true) 36 | .derive_default(true) 37 | .derive_partialeq(true) 38 | .default_enum_style(bindgen::EnumVariation::ModuleConsts) 39 | .clang_arg("-fkeep-inline-functions") 40 | .clang_args( 41 | cflags 42 | .into_iter() 43 | .map(|s| s.to_owned()) 44 | .chain(gen_cpu_features().map(|(name, value)| { 45 | if let Some(value) = value { 46 | format!("-D{}={}", name, value) 47 | } else { 48 | format!("-D{}", name) 49 | } 50 | })), 51 | ) 52 | .rustfmt_bindings(true) 53 | .time_phases(true) 54 | .generate() 55 | .expect("Unable to generate bindings") 56 | .write_to_file(dest_path) 57 | .expect("Couldn't write bindings!"); 58 | } 59 | 60 | #[cfg(not(feature = "gen"))] 61 | fn gen_rte_binding(_rte_sdk_dir: &Path, dest_path: &Path) { 62 | use std::fs; 63 | 64 | info!("coping RTE binding file"); 65 | 66 | fs::copy("src/raw.rs", dest_path).expect("copy binding file"); 67 | } 68 | 69 | fn main() { 70 | pretty_env_logger::init(); 71 | 72 | let rte_sdk_dir = RTE_SDK.join(RTE_TARGET.as_str()); 73 | 74 | info!("using DPDK @ {:?}", rte_sdk_dir); 75 | 76 | if !rte_sdk_dir.exists() || !rte_sdk_dir.join("lib/libdpdk.a").exists() { 77 | apply_patches(RTE_SDK.as_path()); 78 | 79 | build_dpdk(RTE_SDK.as_path(), RTE_TARGET.as_str()); 80 | } 81 | 82 | if cfg!(feature = "gen") { 83 | gen_rte_config(&rte_sdk_dir, &OUT_DIR.join("config.rs")); 84 | 85 | let binding_file = OUT_DIR.join("raw.rs"); 86 | 87 | gen_rte_binding(&rte_sdk_dir, &binding_file); 88 | } 89 | 90 | gcc_rte_config(&rte_sdk_dir) 91 | .file("src/stub.c") 92 | .include("src") 93 | .compile("rte_stub"); 94 | 95 | gen_cargo_config( 96 | &rte_sdk_dir, 97 | RTE_CORE_LIBS 98 | .iter() 99 | .chain(RTE_PMD_LIBS.iter()) 100 | .chain(RTE_DEPS_LIBS.iter()), 101 | ); 102 | 103 | if cfg!(target_os = "linux") { 104 | println!("cargo:rustc-link-search=native=/usr/lib/x86_64-linux-gnu"); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /rte-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_upper_case_globals, non_camel_case_types, non_snake_case)] 2 | 3 | #[macro_use] 4 | extern crate cfg_if; 5 | 6 | cfg_if! { 7 | if #[cfg(feature = "gen")] { 8 | include!(concat!(env!("OUT_DIR"), "/config.rs")); 9 | include!(concat!(env!("OUT_DIR"), "/raw.rs")); 10 | } else { 11 | include!("config.rs"); 12 | include!("raw.rs"); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /rte-sys/src/rte.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // Build Configuration 4 | #include 5 | 6 | // Common Components 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | // Core Components 26 | #include 27 | #include 28 | #include 29 | 30 | #include 31 | #include 32 | #include 33 | 34 | #include 35 | #include 36 | 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | #include 49 | #include 50 | 51 | #include 52 | #include 53 | #include 54 | #include 55 | #include 56 | #include 57 | #include 58 | #include 59 | #include 60 | 61 | -------------------------------------------------------------------------------- /rte-sys/src/stub.c: -------------------------------------------------------------------------------- 1 | #include "rte.h" 2 | 3 | void 4 | _rte_srand(uint64_t seedval) { 5 | rte_srand(seedval); 6 | } 7 | 8 | uint64_t 9 | _rte_rand(void) { 10 | return rte_rand(); 11 | } 12 | 13 | struct rte_bitmap * 14 | _rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t mem_size) { 15 | return rte_bitmap_init(n_bits, mem, mem_size); 16 | } 17 | 18 | int 19 | _rte_bitmap_free(struct rte_bitmap *bmp) { 20 | return rte_bitmap_free(bmp); 21 | } 22 | 23 | void 24 | _rte_bitmap_reset(struct rte_bitmap *bmp) { 25 | rte_bitmap_reset(bmp); 26 | } 27 | 28 | void 29 | _rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos) { 30 | rte_bitmap_prefetch0(bmp, pos); 31 | } 32 | 33 | uint64_t 34 | _rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos) { 35 | return rte_bitmap_get(bmp, pos); 36 | } 37 | 38 | void 39 | _rte_bitmap_set(struct rte_bitmap *bmp, uint32_t pos) { 40 | rte_bitmap_set(bmp, pos); 41 | } 42 | 43 | void 44 | _rte_bitmap_set_slab(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab) { 45 | return rte_bitmap_set_slab(bmp, pos, slab); 46 | } 47 | 48 | void 49 | _rte_bitmap_clear(struct rte_bitmap *bmp, uint32_t pos) { 50 | return rte_bitmap_clear(bmp, pos); 51 | } 52 | 53 | int 54 | _rte_bitmap_scan(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) { 55 | return rte_bitmap_scan(bmp, pos, slab); 56 | } 57 | 58 | uint32_t 59 | _rte_bitmap_get_memory_footprint(uint32_t n_bits) { 60 | return rte_bitmap_get_memory_footprint(n_bits); 61 | } 62 | 63 | void 64 | _rte_spinlock_init(rte_spinlock_t *sl) { 65 | rte_spinlock_init(sl); 66 | } 67 | 68 | void 69 | _rte_spinlock_lock(rte_spinlock_t *sl) { 70 | rte_spinlock_lock(sl); 71 | } 72 | 73 | void 74 | _rte_spinlock_unlock(rte_spinlock_t *sl) { 75 | rte_spinlock_unlock(sl); 76 | } 77 | 78 | int 79 | _rte_spinlock_trylock(rte_spinlock_t *sl) { 80 | return rte_spinlock_trylock(sl); 81 | } 82 | 83 | int 84 | _rte_tm_supported(void) { 85 | return rte_tm_supported(); 86 | } 87 | 88 | void 89 | _rte_spinlock_lock_tm(rte_spinlock_t *sl) { 90 | rte_spinlock_lock_tm(sl); 91 | } 92 | 93 | int 94 | _rte_spinlock_trylock_tm(rte_spinlock_t *sl) { 95 | return rte_spinlock_trylock_tm(sl); 96 | } 97 | 98 | void 99 | _rte_spinlock_unlock_tm(rte_spinlock_t *sl) { 100 | rte_spinlock_unlock_tm(sl); 101 | } 102 | 103 | void 104 | _rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr) { 105 | rte_spinlock_recursive_init(slr); 106 | } 107 | 108 | void 109 | _rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr) { 110 | rte_spinlock_recursive_lock(slr); 111 | } 112 | 113 | void 114 | _rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr) { 115 | rte_spinlock_recursive_unlock(slr); 116 | } 117 | 118 | int 119 | _rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr) { 120 | return rte_spinlock_recursive_trylock(slr); 121 | } 122 | 123 | void 124 | _rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr) { 125 | rte_spinlock_recursive_lock_tm(slr); 126 | } 127 | 128 | void 129 | _rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr) { 130 | rte_spinlock_recursive_unlock_tm(slr); 131 | } 132 | 133 | int 134 | _rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr) { 135 | return rte_spinlock_recursive_trylock_tm(slr); 136 | } 137 | 138 | unsigned 139 | _rte_lcore_id(void) { 140 | return rte_lcore_id(); 141 | } 142 | 143 | int 144 | _rte_errno(void) { 145 | return rte_errno; 146 | } 147 | 148 | uint64_t 149 | _rte_get_tsc_cycles(void) { 150 | return rte_get_tsc_cycles(); 151 | } 152 | 153 | uint64_t 154 | _rte_get_timer_cycles(void) { 155 | return rte_get_timer_cycles(); 156 | } 157 | 158 | uint64_t 159 | _rte_get_timer_hz(void) { 160 | return rte_get_timer_hz(); 161 | } 162 | 163 | void 164 | _rte_delay_ms(unsigned ms) { 165 | rte_delay_ms(ms); 166 | } 167 | 168 | uint64_t 169 | _rte_rdtsc(void) { 170 | return rte_rdtsc(); 171 | } 172 | 173 | uint64_t 174 | _rte_rdtsc_precise(void) { 175 | return rte_rdtsc_precise(); 176 | } 177 | 178 | struct rte_mempool * 179 | _rte_mempool_from_obj(void *obj) { 180 | return rte_mempool_from_obj(obj); 181 | } 182 | 183 | rte_iova_t 184 | _rte_mempool_virt2iova(const void *elt) { 185 | return rte_mempool_virt2iova(elt); 186 | } 187 | 188 | void * 189 | _rte_mempool_get_priv(struct rte_mempool *mp) { 190 | return rte_mempool_get_priv(mp); 191 | } 192 | 193 | void 194 | _rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp) { 195 | rte_mempool_cache_flush(cache, mp); 196 | } 197 | 198 | struct rte_mempool_cache * 199 | _rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) { 200 | return rte_mempool_default_cache(mp, lcore_id); 201 | } 202 | 203 | void 204 | _rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, 205 | unsigned int n, struct rte_mempool_cache *cache) { 206 | rte_mempool_generic_put(mp, obj_table, n, cache); 207 | } 208 | 209 | void 210 | _rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, unsigned int n) { 211 | rte_mempool_put_bulk(mp, obj_table, n); 212 | } 213 | 214 | void 215 | _rte_mempool_put(struct rte_mempool *mp, void *obj) { 216 | rte_mempool_put(mp, obj); 217 | } 218 | 219 | int 220 | _rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, 221 | unsigned int n, struct rte_mempool_cache *cache) { 222 | return rte_mempool_generic_get(mp, obj_table, n, cache); 223 | } 224 | 225 | int 226 | _rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) { 227 | return rte_mempool_get_bulk(mp, obj_table, n); 228 | } 229 | 230 | int 231 | _rte_mempool_get(struct rte_mempool *mp, void **obj_p) { 232 | return rte_mempool_get(mp, obj_p); 233 | } 234 | 235 | int __rte_experimental 236 | _rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n) { 237 | return rte_mempool_get_contig_blocks(mp, first_obj_table, n); 238 | } 239 | 240 | void 241 | _rte_mbuf_prefetch_part1(struct rte_mbuf *m) { 242 | rte_mbuf_prefetch_part1(m); 243 | } 244 | 245 | void 246 | _rte_mbuf_prefetch_part2(struct rte_mbuf *m) { 247 | rte_mbuf_prefetch_part2(m); 248 | } 249 | 250 | rte_iova_t 251 | _rte_mbuf_data_iova(const struct rte_mbuf *mb) { 252 | return rte_mbuf_data_iova(mb); 253 | } 254 | 255 | rte_iova_t 256 | _rte_mbuf_data_iova_default(const struct rte_mbuf *mb) { 257 | return rte_mbuf_data_iova_default(mb); 258 | } 259 | 260 | struct rte_mbuf * 261 | _rte_mbuf_from_indirect(struct rte_mbuf *mi) { 262 | return rte_mbuf_from_indirect(mi); 263 | } 264 | 265 | char * 266 | _rte_mbuf_to_baddr(struct rte_mbuf *md) { 267 | return rte_mbuf_to_baddr(md); 268 | } 269 | 270 | void * 271 | _rte_mbuf_to_priv(struct rte_mbuf *m) { 272 | return rte_mbuf_to_priv(m); 273 | } 274 | 275 | uint16_t 276 | _rte_mbuf_refcnt_read(const struct rte_mbuf *m) { 277 | return rte_mbuf_refcnt_read(m); 278 | } 279 | 280 | void 281 | _rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value) { 282 | rte_mbuf_refcnt_set(m, new_value); 283 | } 284 | 285 | uint16_t 286 | _rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) { 287 | return rte_mbuf_refcnt_update(m, value); 288 | } 289 | 290 | uint16_t 291 | _rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo) { 292 | return rte_mbuf_ext_refcnt_read(shinfo); 293 | } 294 | 295 | void 296 | _rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value) { 297 | rte_mbuf_ext_refcnt_set(shinfo, new_value); 298 | } 299 | 300 | uint16_t 301 | _rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value) { 302 | return rte_mbuf_ext_refcnt_update(shinfo, value); 303 | } 304 | 305 | struct rte_mbuf * 306 | _rte_mbuf_raw_alloc(struct rte_mempool *mp) { 307 | return rte_mbuf_raw_alloc(mp); 308 | } 309 | 310 | void 311 | _rte_mbuf_raw_free(struct rte_mbuf *m) { 312 | rte_mbuf_raw_free(m); 313 | } 314 | 315 | uint16_t 316 | _rte_pktmbuf_data_room_size(struct rte_mempool *mp) { 317 | return rte_pktmbuf_data_room_size(mp); 318 | } 319 | 320 | uint16_t 321 | _rte_pktmbuf_priv_size(struct rte_mempool *mp) { 322 | return rte_pktmbuf_priv_size(mp); 323 | } 324 | 325 | void 326 | _rte_pktmbuf_reset_headroom(struct rte_mbuf *m) { 327 | rte_pktmbuf_reset_headroom(m); 328 | } 329 | 330 | void 331 | _rte_pktmbuf_reset(struct rte_mbuf *m) { 332 | rte_pktmbuf_reset(m); 333 | } 334 | 335 | struct rte_mbuf * 336 | _rte_pktmbuf_alloc(struct rte_mempool *mp) { 337 | return rte_pktmbuf_alloc(mp); 338 | } 339 | 340 | int 341 | _rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count) { 342 | return rte_pktmbuf_alloc_bulk(pool, mbufs, count); 343 | } 344 | 345 | struct rte_mbuf_ext_shared_info * 346 | _rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, 347 | rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque) { 348 | return rte_pktmbuf_ext_shinfo_init_helper(buf_addr, buf_len, free_cb, fcb_opaque); 349 | } 350 | 351 | void 352 | _rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, 353 | rte_iova_t buf_iova, uint16_t buf_len, 354 | struct rte_mbuf_ext_shared_info *shinfo) { 355 | rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); 356 | } 357 | 358 | void 359 | _rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) { 360 | rte_pktmbuf_attach(mi, m); 361 | } 362 | 363 | void 364 | _rte_pktmbuf_detach(struct rte_mbuf *m) { 365 | rte_pktmbuf_detach(m); 366 | } 367 | 368 | struct rte_mbuf * 369 | _rte_pktmbuf_prefree_seg(struct rte_mbuf *m) { 370 | return rte_pktmbuf_prefree_seg(m); 371 | } 372 | 373 | void 374 | _rte_pktmbuf_free_seg(struct rte_mbuf *m) { 375 | rte_pktmbuf_free_seg(m); 376 | } 377 | 378 | void 379 | _rte_pktmbuf_free(struct rte_mbuf *m) { 380 | rte_pktmbuf_free(m); 381 | } 382 | 383 | struct rte_mbuf * 384 | _rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp) { 385 | return rte_pktmbuf_clone(md, mp); 386 | } 387 | 388 | void 389 | _rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v) { 390 | rte_pktmbuf_refcnt_update(m, v); 391 | } 392 | 393 | uint16_t 394 | _rte_pktmbuf_headroom(const struct rte_mbuf *m) { 395 | return rte_pktmbuf_headroom(m); 396 | } 397 | 398 | uint16_t 399 | _rte_pktmbuf_tailroom(const struct rte_mbuf *m) { 400 | return rte_pktmbuf_tailroom(m); 401 | } 402 | 403 | struct rte_mbuf * 404 | _rte_pktmbuf_lastseg(struct rte_mbuf *m) { 405 | return rte_pktmbuf_lastseg(m); 406 | } 407 | 408 | char * 409 | _rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len) { 410 | return rte_pktmbuf_prepend(m, len); 411 | } 412 | 413 | char * 414 | _rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len) { 415 | return rte_pktmbuf_append(m, len); 416 | } 417 | 418 | char * 419 | _rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len) { 420 | return rte_pktmbuf_adj(m, len); 421 | } 422 | 423 | int 424 | _rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len) { 425 | return rte_pktmbuf_trim(m, len); 426 | } 427 | 428 | int 429 | _rte_pktmbuf_is_contiguous(const struct rte_mbuf *m) { 430 | return rte_pktmbuf_is_contiguous(m); 431 | } 432 | 433 | const void * 434 | _rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf) { 435 | return rte_pktmbuf_read(m, off, len, buf); 436 | } 437 | 438 | int 439 | _rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail) { 440 | return rte_pktmbuf_chain(head, tail); 441 | } 442 | 443 | int 444 | _rte_validate_tx_offload(const struct rte_mbuf *m) { 445 | return rte_validate_tx_offload(m); 446 | } 447 | 448 | int 449 | _rte_pktmbuf_linearize(struct rte_mbuf *mbuf) { 450 | return rte_pktmbuf_linearize(mbuf); 451 | } 452 | 453 | uint16_t 454 | _rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, 455 | struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { 456 | return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); 457 | } 458 | 459 | int 460 | _rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id) { 461 | return rte_eth_rx_queue_count(port_id, queue_id); 462 | } 463 | 464 | int 465 | _rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset) { 466 | return rte_eth_rx_descriptor_done(port_id, queue_id, offset); 467 | } 468 | 469 | int 470 | _rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset) { 471 | return rte_eth_rx_descriptor_status(port_id, queue_id, offset); 472 | } 473 | 474 | int 475 | _rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset) { 476 | return rte_eth_tx_descriptor_status(port_id, queue_id, offset); 477 | } 478 | 479 | uint16_t 480 | _rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, 481 | struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { 482 | return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); 483 | } 484 | 485 | uint16_t 486 | _rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, 487 | struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { 488 | return rte_eth_tx_prepare(port_id, queue_id, tx_pkts, nb_pkts); 489 | } 490 | 491 | uint16_t 492 | _rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer) { 493 | return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); 494 | } 495 | 496 | uint16_t 497 | _rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, 498 | struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt) { 499 | return rte_eth_tx_buffer(port_id, queue_id, buffer, tx_pkt); 500 | } 501 | 502 | int 503 | _rte_vlan_strip(struct rte_mbuf *m) { 504 | return rte_vlan_strip(m); 505 | } 506 | 507 | int 508 | _rte_vlan_insert(struct rte_mbuf **m) { 509 | return rte_vlan_insert(m); 510 | } 511 | -------------------------------------------------------------------------------- /rte/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rte" 3 | version = "18.11.0" 4 | authors = ["Flier Lu "] 5 | description = "Rust bindings to DPDK" 6 | build = "build.rs" 7 | 8 | [features] 9 | default = [] 10 | gen = ["rte-sys/gen"] 11 | 12 | [dependencies] 13 | log = "0.4" 14 | bitflags = "1.0" 15 | failure = "0.1" 16 | lazy_static = "1.2" 17 | libc = "0.2" 18 | time = "0.1" 19 | cfile = "0.4" 20 | rand = "0.6" 21 | errno = "0.2" 22 | num-traits = "0.2" 23 | num-derive = "0.2" 24 | itertools = "0.8" 25 | 26 | rte-sys = { version = "18.11", path = "../rte-sys" } 27 | 28 | [build-dependencies] 29 | log = "0.4" 30 | pretty_env_logger = "0.3" 31 | 32 | rte-build = { version = "18.11", path = "../rte-build" } 33 | 34 | [dev-dependencies] 35 | pretty_env_logger = "0.3" 36 | num_cpus = "1.0" 37 | getopts = "0.2" 38 | nix = "0.14" 39 | 40 | [lib] 41 | name = "rte" 42 | 43 | [[example]] 44 | name = "helloworld" 45 | path = "examples/helloworld/main.rs" 46 | 47 | [[example]] 48 | name = "l2fwd" 49 | path = "examples/l2fwd/main.rs" 50 | 51 | [[example]] 52 | name = "kni" 53 | path = "examples/kni/main.rs" 54 | 55 | [[example]] 56 | name = "bond" 57 | path = "examples/bond/main.rs" 58 | 59 | [[example]] 60 | name = "cmdline" 61 | path = "examples/cmdline/main.rs" 62 | 63 | [[example]] 64 | name = "ethtool" 65 | path = "examples/ethtool/main.rs" 66 | -------------------------------------------------------------------------------- /rte/build.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | 4 | extern crate rte_build; 5 | 6 | use rte_build::*; 7 | 8 | fn main() { 9 | pretty_env_logger::init(); 10 | 11 | let rte_sdk_dir = RTE_SDK.join(RTE_TARGET.as_str()); 12 | 13 | info!("using DPDK @ {:?}", rte_sdk_dir); 14 | 15 | gcc_rte_config(&rte_sdk_dir) 16 | .file("examples/l2fwd/l2fwd_core.c") 17 | .compile("libl2fwd_core.a"); 18 | gcc_rte_config(&rte_sdk_dir) 19 | .file("examples/kni/kni_core.c") 20 | .compile("libkni_core.a"); 21 | 22 | gen_cargo_config( 23 | &rte_sdk_dir, 24 | RTE_CORE_LIBS 25 | .iter() 26 | .chain(RTE_PMD_LIBS.iter()) 27 | .chain(RTE_DEPS_LIBS.iter()), 28 | ); 29 | 30 | if cfg!(target_os = "linux") { 31 | println!("cargo:rustc-link-search=native=/usr/lib/x86_64-linux-gnu"); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /rte/examples/cmdline/main.rs: -------------------------------------------------------------------------------- 1 | extern crate libc; 2 | extern crate pretty_env_logger; 3 | extern crate rte; 4 | 5 | use std::cell::RefCell; 6 | use std::collections::HashMap; 7 | use std::env; 8 | use std::ffi::CString; 9 | use std::marker::PhantomData; 10 | use std::mem; 11 | use std::net::IpAddr; 12 | use std::os::raw::c_void; 13 | use std::rc::Rc; 14 | use std::slice; 15 | use std::str; 16 | 17 | use rte::cmdline::*; 18 | use rte::*; 19 | 20 | struct Object { 21 | name: String, 22 | ip: IpAddr, 23 | } 24 | 25 | type ObjectMap = HashMap; 26 | 27 | struct TokenObjectListData { 28 | objs: Rc>, 29 | } 30 | 31 | struct TokenObjectList { 32 | hdr: cmdline::RawTokenHeader, 33 | obj_list_data: TokenObjectListData, 34 | } 35 | 36 | unsafe extern "C" fn parse_obj_list( 37 | token: *mut RawParseTokenHeader, 38 | srcbuf: *const i8, 39 | res: *mut libc::c_void, 40 | ressize: u32, 41 | ) -> i32 { 42 | if srcbuf.is_null() { 43 | return -1; 44 | } 45 | 46 | if !res.is_null() && (ressize as usize) < mem::size_of::<*const Object>() { 47 | return -1; 48 | } 49 | 50 | let mut p = srcbuf as *const u8; 51 | let mut token_len = 0; 52 | 53 | while !cmdline::is_end_of_token(*p) { 54 | p = p.offset(1); 55 | token_len += 1; 56 | } 57 | 58 | let name = str::from_utf8(slice::from_raw_parts(srcbuf as *const u8, token_len)).unwrap(); 59 | 60 | if let Some(obj) = (token as *mut TokenObjectList) 61 | .as_ref() 62 | .unwrap() 63 | .obj_list_data 64 | .objs 65 | .borrow() 66 | .get(name) 67 | { 68 | if !res.is_null() { 69 | (res as *mut *const Object).write(obj); 70 | } 71 | 72 | token_len as i32 73 | } else { 74 | -1 75 | } 76 | } 77 | 78 | unsafe extern "C" fn complete_get_nb_obj_list(token: *mut RawTokenHeader) -> i32 { 79 | (token as *mut TokenObjectList) 80 | .as_ref() 81 | .unwrap() 82 | .obj_list_data 83 | .objs 84 | .borrow() 85 | .len() as i32 86 | } 87 | 88 | unsafe extern "C" fn complete_get_elt_obj_list( 89 | token: *mut cmdline::RawTokenHeader, 90 | idx: i32, 91 | dstbuf: *mut i8, 92 | size: u32, 93 | ) -> i32 { 94 | if let Some((name, _)) = (token as *mut TokenObjectList) 95 | .as_ref() 96 | .unwrap() 97 | .obj_list_data 98 | .objs 99 | .borrow() 100 | .iter() 101 | .nth(idx as usize) 102 | { 103 | if (name.len() + 1) < size as usize { 104 | let buf = slice::from_raw_parts_mut(dstbuf as *mut u8, size as usize); 105 | 106 | buf[..name.len()].clone_from_slice(name.as_bytes()); 107 | buf[name.len()] = 0; 108 | 109 | return 0; 110 | } 111 | } 112 | 113 | -1 114 | } 115 | 116 | unsafe extern "C" fn get_help_obj_list(_: *mut cmdline::RawTokenHeader, dstbuf: *mut i8, size: u32) -> i32 { 117 | let dbuf = slice::from_raw_parts_mut(dstbuf as *mut u8, size as usize); 118 | let s = CString::new("Obj-List").unwrap(); 119 | let sbuf = s.as_bytes_with_nul(); 120 | 121 | if sbuf.len() < size as usize { 122 | dbuf[0..sbuf.len()].clone_from_slice(sbuf); 123 | 124 | 0 125 | } else { 126 | -1 127 | } 128 | } 129 | 130 | struct CmdDelShowResult<'a> { 131 | action: cmdline::FixedStr, 132 | obj: &'a Object, 133 | } 134 | 135 | impl<'a> CmdDelShowResult<'a> { 136 | fn parsed(&mut self, cl: &cmdline::CmdLine, objs: Option<&RefCell>) { 137 | let action = self.action.to_str(); 138 | 139 | match action { 140 | "show" => { 141 | cl.print(format!("Object {}, ip={}\n", self.obj.name, self.obj.ip)) 142 | .unwrap(); 143 | } 144 | "del" => { 145 | if let Some(ref obj) = objs.unwrap().borrow_mut().remove(&self.obj.name) { 146 | cl.print(format!("Object {} removed, ip={}\n", obj.name, obj.ip)) 147 | .unwrap(); 148 | } 149 | } 150 | _ => { 151 | cl.print(format!("Unknown action, {}", action)).unwrap(); 152 | } 153 | } 154 | } 155 | } 156 | 157 | struct CmdObjAddResult { 158 | action: cmdline::FixedStr, 159 | name: cmdline::FixedStr, 160 | ip: cmdline::IpNetAddr, 161 | } 162 | 163 | impl CmdObjAddResult { 164 | fn parsed(&mut self, cl: &cmdline::CmdLine, objs: Option<&RefCell>) { 165 | let name = self.name.to_str(); 166 | 167 | if objs.unwrap().borrow().contains_key(name) { 168 | cl.print(format!("Object {} already exist\n", name)).unwrap(); 169 | 170 | return; 171 | } 172 | 173 | let obj = Object { 174 | name: String::from(name), 175 | ip: self.ip.to_ipaddr(), 176 | }; 177 | 178 | cl.print(format!("Object {} added, ip={}\n", name, obj.ip)).unwrap(); 179 | 180 | let _ = objs.unwrap().borrow_mut().insert(String::from(name), obj); 181 | } 182 | } 183 | 184 | struct CmdHelpResult { 185 | help: cmdline::FixedStr, 186 | } 187 | 188 | impl CmdHelpResult { 189 | fn parsed(&mut self, cl: &cmdline::CmdLine, _: Option<&c_void>) { 190 | cl.print( 191 | r#"Demo example of command line interface in RTE 192 | 193 | 194 | This is a readline-like interface that can be used to 195 | debug your RTE application. It supports some features 196 | of GNU readline like completion, cut/paste, and some 197 | other special bindings. 198 | 199 | This demo shows how rte_cmdline library can be 200 | extended to handle a list of objects. There are 201 | 3 commands: 202 | - add obj_name IP 203 | - del obj_name 204 | - show obj_name 205 | "#, 206 | ) 207 | .unwrap(); 208 | } 209 | } 210 | 211 | struct CmdQuitResult { 212 | quit: cmdline::FixedStr, 213 | } 214 | 215 | impl CmdQuitResult { 216 | fn parsed(&mut self, cl: &cmdline::CmdLine, _: Option<&c_void>) { 217 | cl.quit(); 218 | } 219 | } 220 | 221 | fn main() { 222 | pretty_env_logger::init(); 223 | 224 | let args: Vec = env::args().collect(); 225 | 226 | eal::init(&args).expect("Cannot init EAL"); 227 | 228 | let objects = Rc::new(RefCell::new(ObjectMap::new())); 229 | 230 | let cmd_obj_action = TOKEN_STRING_INITIALIZER!(CmdDelShowResult, action, "show#del"); 231 | 232 | let mut token_obj_list_ops = cmdline::RawTokenOps { 233 | parse: Some(parse_obj_list), 234 | complete_get_nb: Some(complete_get_nb_obj_list), 235 | complete_get_elt: Some(complete_get_elt_obj_list), 236 | get_help: Some(get_help_obj_list), 237 | }; 238 | 239 | let token_obj_list = TokenObjectList { 240 | hdr: cmdline::RawTokenHeader { 241 | ops: &mut token_obj_list_ops, 242 | offset: offset_of!(CmdDelShowResult, obj) as u32, 243 | }, 244 | obj_list_data: TokenObjectListData { objs: objects.clone() }, 245 | }; 246 | 247 | let cmd_obj_obj = cmdline::Token::Raw(&token_obj_list.hdr, PhantomData); 248 | 249 | let cmd_obj_del_show = cmdline::inst( 250 | CmdDelShowResult::parsed, 251 | Some(&objects), 252 | "Show/del an object", 253 | &[&cmd_obj_action, &cmd_obj_obj], 254 | ); 255 | 256 | let cmd_obj_action_add = TOKEN_STRING_INITIALIZER!(CmdObjAddResult, action, "add"); 257 | let cmd_obj_name = TOKEN_STRING_INITIALIZER!(CmdObjAddResult, name); 258 | let cmd_obj_ip = TOKEN_IPADDR_INITIALIZER!(CmdObjAddResult, ip); 259 | 260 | let cmd_obj_add = cmdline::inst( 261 | CmdObjAddResult::parsed, 262 | Some(&objects), 263 | "Add an object (name, val)", 264 | &[&cmd_obj_action_add, &cmd_obj_name, &cmd_obj_ip], 265 | ); 266 | 267 | let cmd_help_help = TOKEN_STRING_INITIALIZER!(CmdHelpResult, help, "help"); 268 | 269 | let cmd_help = cmdline::inst(CmdHelpResult::parsed, None, "show help", &[&cmd_help_help]); 270 | 271 | let cmd_quit_quit = TOKEN_STRING_INITIALIZER!(CmdQuitResult, quit, "quit"); 272 | 273 | let cmd_quit = cmdline::inst(CmdQuitResult::parsed, None, "quit", &[&cmd_quit_quit]); 274 | 275 | let cmds = &[&cmd_obj_del_show, &cmd_obj_add, &cmd_help, &cmd_quit]; 276 | 277 | cmdline::new(cmds) 278 | .open_stdin("example> ") 279 | .expect("fail to open stdin") 280 | .interact(); 281 | } 282 | -------------------------------------------------------------------------------- /rte/examples/ethtool/ethapp.rs: -------------------------------------------------------------------------------- 1 | use std::os::raw::c_void; 2 | 3 | use rte::cmdline::*; 4 | use rte::ethdev::{EthDevice, EthDeviceInfo}; 5 | use rte::{self, *}; 6 | 7 | use ethtool::*; 8 | 9 | struct CmdGetParams { 10 | cmd: FixedStr, 11 | } 12 | 13 | impl CmdGetParams { 14 | fn quit(&mut self, cl: &CmdLine, _: Option<&c_void>) { 15 | debug!("execute `{}` command", self.cmd); 16 | 17 | cl.quit(); 18 | } 19 | 20 | fn drvinfo(&mut self, cl: &CmdLine, _: Option<&c_void>) { 21 | debug!("execute `{}` command", self.cmd); 22 | 23 | for dev in ethdev::devices() { 24 | let info = dev.info(); 25 | 26 | cl.println(format!( 27 | "Port {} driver: {} (ver: {})", 28 | dev.portid(), 29 | info.driver_name(), 30 | rte::version() 31 | )) 32 | .unwrap(); 33 | } 34 | } 35 | 36 | fn link(&mut self, cl: &CmdLine, _: Option<&c_void>) { 37 | debug!("execute `{}` command", self.cmd); 38 | 39 | for dev in ethdev::devices().filter(|dev| dev.is_valid()) { 40 | let link = dev.link(); 41 | 42 | if link.up { 43 | cl.println(format!( 44 | "Port {} Link Up (speed {} Mbps, {})", 45 | dev.portid(), 46 | link.speed, 47 | if link.duplex { "full-duplex" } else { "half-duplex" } 48 | )) 49 | .unwrap(); 50 | } else { 51 | cl.println(format!("Port {} Link Down", dev.portid())).unwrap(); 52 | } 53 | } 54 | } 55 | } 56 | 57 | struct CmdIntParams { 58 | cmd: FixedStr, 59 | port: u16, 60 | } 61 | 62 | impl CmdIntParams { 63 | fn dev(&self) -> ethdev::PortId { 64 | self.port as ethdev::PortId 65 | } 66 | 67 | fn open(&mut self, cl: &CmdLine, app_cfg: Option<&AppConfig>) { 68 | debug!("execute `{}` command for port {}", self.cmd, self.port); 69 | 70 | let res = app_cfg.unwrap().lock_port(self.dev(), |app_port, dev| { 71 | dev.stop(); 72 | 73 | if let Err(err) = dev.start() { 74 | Err(format!("Error: failed to start port {}, {}", self.port, err)) 75 | } else { 76 | app_port.port_active = true; 77 | 78 | Ok(format!("port {} started", self.port)) 79 | } 80 | }); 81 | 82 | cl.println(res.unwrap_or_else(|err| format!("Error: {}", err))).unwrap(); 83 | } 84 | 85 | fn stop(&mut self, cl: &CmdLine, app_cfg: Option<&AppConfig>) { 86 | debug!("execute `{}` command for port {}", self.cmd, self.port); 87 | 88 | let res = app_cfg.unwrap().lock_port(self.dev(), |app_port, dev| { 89 | if !dev.is_up() { 90 | Err(format!("Port {} already stopped", self.port)) 91 | } else { 92 | dev.stop(); 93 | 94 | app_port.port_active = false; 95 | 96 | Ok(format!("port {} stopped", self.port)) 97 | } 98 | }); 99 | 100 | cl.println(res.unwrap_or_else(|err| format!("Error: {}", err))).unwrap(); 101 | } 102 | 103 | fn rxmode(&mut self, cl: &CmdLine, _: Option<&c_void>) { 104 | debug!("execute `{}` command for port {}", self.cmd, self.port); 105 | 106 | let dev = self.dev(); 107 | 108 | if !dev.is_valid() { 109 | cl.println(format!("Error: port {} is invalid", self.port)).unwrap(); 110 | } else { 111 | // // Set VF vf_rx_mode, VF unsupport status is discard 112 | // for vf in 0..dev.info().max_vfs { 113 | // if let Err(err) = dev.set_vf_rxmode(vf, ethdev::ETH_VMDQ_ACCEPT_UNTAG, false) { 114 | // cl.println(format!( 115 | // "Error: failed to set VF rx mode for port {}, {}", 116 | // self.port, err 117 | // )).unwrap(); 118 | // } 119 | // } 120 | 121 | // Enable Rx vlan filter, VF unspport status is discard 122 | if let Err(err) = dev.set_vlan_offload(ethdev::EthVlanOffloadMode::ETH_VLAN_FILTER_MASK) { 123 | cl.println(format!( 124 | "Error: failed to set VLAN offload mode for port {}, {}", 125 | self.port, err 126 | )) 127 | .unwrap(); 128 | } 129 | } 130 | } 131 | 132 | fn portstats(&mut self, cl: &CmdLine, _: Option<&c_void>) { 133 | debug!("execute `{}` command for port {}", self.cmd, self.port); 134 | 135 | let dev = self.dev(); 136 | 137 | cl.println(if !dev.is_valid() { 138 | format!("Error: port {} is invalid", self.port) 139 | } else { 140 | match dev.stats() { 141 | Ok(stats) => format!( 142 | "Port {} stats\n In: {} ({} bytes)\n Out: {} ({} bytes)\n \ 143 | Err: {}", 144 | self.port, 145 | stats.ipackets, 146 | stats.ibytes, 147 | stats.opackets, 148 | stats.obytes, 149 | stats.ierrors + stats.oerrors 150 | ), 151 | Err(err) => format!("Error: port {} fail to fetch statistics, {}", self.port, err), 152 | } 153 | }) 154 | .unwrap(); 155 | } 156 | } 157 | 158 | struct CmdIntMtuParams { 159 | cmd: FixedStr, 160 | port: u16, 161 | mtu: u16, 162 | } 163 | 164 | impl CmdIntMtuParams { 165 | fn dev(&self) -> ethdev::PortId { 166 | self.port as ethdev::PortId 167 | } 168 | 169 | fn mtu_list(&mut self, cl: &CmdLine, app_cfg: Option<&AppConfig>) { 170 | debug!("execute list `{}` command for port {}", self.cmd, self.port); 171 | 172 | for portid in 0..app_cfg.unwrap().ports.len() { 173 | let dev = portid as ethdev::PortId; 174 | 175 | cl.println(format!("Port {} MTU: {}", portid, dev.mtu().unwrap())) 176 | .unwrap(); 177 | } 178 | } 179 | 180 | fn mtu_get(&mut self, cl: &CmdLine, _: Option<&c_void>) { 181 | debug!("execute get `{}` command for port {}", self.cmd, self.port); 182 | 183 | let dev = self.dev(); 184 | 185 | cl.println(if !dev.is_valid() { 186 | format!("Error: port {} is invalid", self.port) 187 | } else { 188 | format!("Port {} MTU: {}", self.port, dev.mtu().unwrap()) 189 | }) 190 | .unwrap(); 191 | } 192 | 193 | fn mtu_set(&mut self, cl: &CmdLine, _: Option<&c_void>) { 194 | debug!("execute set `{}` command for port {}", self.cmd, self.port); 195 | 196 | let dev = self.dev(); 197 | 198 | cl.println(if let Err(err) = dev.set_mtu(self.mtu) { 199 | format!("Error: Fail to change mac address of port {}, {}", self.port, err) 200 | } else { 201 | format!("Port {} MTU was changed to {}", self.port, self.mtu) 202 | }) 203 | .unwrap(); 204 | } 205 | } 206 | 207 | struct CmdIntMacParams { 208 | cmd: FixedStr, 209 | port: u16, 210 | mac: EtherAddr, 211 | } 212 | 213 | impl CmdIntMacParams { 214 | fn dev(&self) -> ethdev::PortId { 215 | self.port as ethdev::PortId 216 | } 217 | 218 | fn list(&mut self, cl: &CmdLine, app_cfg: Option<&AppConfig>) { 219 | debug!("execute list `{}` command for port {}", self.cmd, self.port); 220 | 221 | for portid in 0..app_cfg.unwrap().ports.len() { 222 | let dev = portid as ethdev::PortId; 223 | 224 | cl.println(format!("Port {} MAC Address: {}", portid, dev.mac_addr())) 225 | .unwrap(); 226 | } 227 | } 228 | 229 | fn get(&mut self, cl: &CmdLine, _: Option<&c_void>) { 230 | debug!("execute get `{}` command for port {}", self.cmd, self.port); 231 | 232 | let dev = self.dev(); 233 | 234 | cl.println(if !dev.is_valid() { 235 | format!("Error: port {} is invalid", self.port) 236 | } else { 237 | format!("Port {} MAC Address: {}", self.port, dev.mac_addr()) 238 | }) 239 | .unwrap(); 240 | } 241 | 242 | fn set(&mut self, cl: &CmdLine, app_cfg: Option<&AppConfig>) { 243 | debug!("execute set `{}` command for port {}", self.cmd, self.port); 244 | 245 | let res = app_cfg.unwrap().lock_port(self.dev(), |app_port, dev| { 246 | if let Err(err) = dev.set_mac_addr(&self.mac) { 247 | Err(format!("Fail to change mac address of port {}, {}", self.port, err)) 248 | } else { 249 | app_port.port_dirty = true; 250 | 251 | Ok(format!("Port {} mac address was changed to {}", self.port, self.mac)) 252 | } 253 | }); 254 | 255 | cl.println(res.unwrap_or_else(|err| format!("Error: {}", err))).unwrap(); 256 | } 257 | 258 | fn validate(&mut self, cl: &CmdLine, _: Option<&c_void>) { 259 | debug!("execute `{}` command for port {}", self.cmd, self.port); 260 | 261 | cl.println(format!( 262 | "MAC address {} is {}", 263 | self.mac, 264 | if self.mac.is_valid() { "unicast" } else { "not unicast" } 265 | )) 266 | .unwrap(); 267 | } 268 | } 269 | 270 | struct CmdVlanParams { 271 | cmd: FixedStr, 272 | port: u16, 273 | mode: FixedStr, 274 | vlan_id: u16, 275 | } 276 | 277 | impl CmdVlanParams { 278 | fn dev(&self) -> ethdev::PortId { 279 | self.port as ethdev::PortId 280 | } 281 | 282 | fn change(&mut self, cl: &CmdLine, _: Option<&c_void>) { 283 | debug!("execute `{}` command for port {}", self.cmd, self.port); 284 | 285 | let dev = self.dev(); 286 | 287 | cl.println(if !dev.is_valid() { 288 | format!("Error: port {} is invalid", self.port) 289 | } else { 290 | match self.mode.to_str() { 291 | "add" => match dev.set_vlan_filter(self.vlan_id, true) { 292 | Ok(_) => format!("VLAN vid {} added to port {}", self.vlan_id, self.port), 293 | Err(err) => format!( 294 | "Error: fail to add VLAN vid {} to port {}, {}", 295 | self.vlan_id, self.port, err 296 | ), 297 | }, 298 | "del" => match dev.set_vlan_filter(self.vlan_id, false) { 299 | Ok(_) => format!("VLAN vid {} removed from port {}", self.vlan_id, self.port), 300 | Err(err) => format!( 301 | "Error: fail to remove VLAN vid {} to port {}, {}", 302 | self.vlan_id, self.port, err 303 | ), 304 | }, 305 | mode @ _ => format!("Error: Bad mode {}", mode), 306 | } 307 | }) 308 | .unwrap(); 309 | } 310 | } 311 | 312 | pub fn main(app_cfg: &mut AppConfig) { 313 | // Parameter-less commands 314 | let pcmd_quit_token_cmd = TOKEN_STRING_INITIALIZER!(CmdGetParams, cmd, "quit"); 315 | let pcmd_drvinfo_token_cmd = TOKEN_STRING_INITIALIZER!(CmdGetParams, cmd, "drvinfo"); 316 | let pcmd_link_token_cmd = TOKEN_STRING_INITIALIZER!(CmdGetParams, cmd, "link"); 317 | 318 | // Commands taking just port id 319 | let pcmd_open_token_cmd = TOKEN_STRING_INITIALIZER!(CmdIntParams, cmd, "open"); 320 | let pcmd_stop_token_cmd = TOKEN_STRING_INITIALIZER!(CmdIntParams, cmd, "stop"); 321 | let pcmd_rxmode_token_cmd = TOKEN_STRING_INITIALIZER!(CmdIntParams, cmd, "rxmode"); 322 | let pcmd_portstats_token_cmd = TOKEN_STRING_INITIALIZER!(CmdIntParams, cmd, "portstats"); 323 | 324 | let pcmd_int_token_port = TOKEN_NUM_INITIALIZER!(CmdIntParams, port, u16); 325 | 326 | // Commands taking port id and string 327 | let pcmd_mtu_token_cmd = TOKEN_STRING_INITIALIZER!(CmdIntMtuParams, cmd, "mtu"); 328 | let pcmd_intmtu_token_port = TOKEN_NUM_INITIALIZER!(CmdIntMtuParams, port, u16); 329 | let pcmd_intmtu_token_opt = TOKEN_NUM_INITIALIZER!(CmdIntMtuParams, mtu, u16); 330 | 331 | // Commands taking port id and a MAC address string 332 | let pcmd_macaddr_token_cmd = TOKEN_STRING_INITIALIZER!(CmdIntMacParams, cmd, "macaddr"); 333 | let pcmd_intmac_token_port = TOKEN_NUM_INITIALIZER!(CmdIntMacParams, port, u16); 334 | let pcmd_intmac_token_mac = TOKEN_ETHERADDR_INITIALIZER!(CmdIntMacParams, mac); 335 | 336 | // Command taking just a MAC address 337 | let pcmd_validate_token_cmd = TOKEN_STRING_INITIALIZER!(CmdIntMacParams, cmd, "validate"); 338 | 339 | // /* VLAN commands */ 340 | let pcmd_vlan_token_cmd = TOKEN_STRING_INITIALIZER!(CmdVlanParams, cmd, "vlan"); 341 | let pcmd_vlan_token_port = TOKEN_NUM_INITIALIZER!(CmdVlanParams, port, u16); 342 | let pcmd_vlan_token_mode = TOKEN_STRING_INITIALIZER!(CmdVlanParams, mode, "add#del"); 343 | let pcmd_vlan_token_vlan_id = TOKEN_NUM_INITIALIZER!(CmdVlanParams, vlan_id, u16); 344 | 345 | let pcmd_quit = inst( 346 | CmdGetParams::quit, 347 | None, 348 | "quit\n Exit program", 349 | &[&pcmd_quit_token_cmd], 350 | ); 351 | 352 | let pcmd_drvinfo = inst( 353 | CmdGetParams::drvinfo, 354 | None, 355 | "drvinfo\n Print driver info", 356 | &[&pcmd_drvinfo_token_cmd], 357 | ); 358 | 359 | let pcmd_link = inst( 360 | CmdGetParams::link, 361 | None, 362 | "link\n Print port link states", 363 | &[&pcmd_link_token_cmd], 364 | ); 365 | 366 | let pcmd_open = inst( 367 | CmdIntParams::open, 368 | Some(app_cfg), 369 | "open \n Open port", 370 | &[&pcmd_open_token_cmd, &pcmd_int_token_port], 371 | ); 372 | 373 | let pcmd_stop = inst( 374 | CmdIntParams::stop, 375 | Some(app_cfg), 376 | "stop \n Stop port", 377 | &[&pcmd_stop_token_cmd, &pcmd_int_token_port], 378 | ); 379 | 380 | let pcmd_rxmode = inst( 381 | CmdIntParams::rxmode, 382 | None, 383 | "rxmode \n Toggle port Rx mode", 384 | &[&pcmd_rxmode_token_cmd, &pcmd_int_token_port], 385 | ); 386 | 387 | let pcmd_portstats = inst( 388 | CmdIntParams::portstats, 389 | None, 390 | "portstats \n Print port eth statistics", 391 | &[&pcmd_portstats_token_cmd, &pcmd_int_token_port], 392 | ); 393 | 394 | let pcmd_mtu_list = inst( 395 | CmdIntMtuParams::mtu_list, 396 | Some(app_cfg), 397 | "mtu\n List MTU", 398 | &[&pcmd_mtu_token_cmd], 399 | ); 400 | 401 | let pcmd_mtu_get = inst( 402 | CmdIntMtuParams::mtu_get, 403 | None, 404 | "mtu \n Show MTU", 405 | &[&pcmd_mtu_token_cmd, &pcmd_intmtu_token_port], 406 | ); 407 | 408 | let pcmd_mtu_set = inst( 409 | CmdIntMtuParams::mtu_set, 410 | None, 411 | "mtu \n Change MTU", 412 | &[&pcmd_mtu_token_cmd, &pcmd_intmtu_token_port, &pcmd_intmtu_token_opt], 413 | ); 414 | 415 | let pcmd_macaddr_list = inst( 416 | CmdIntMacParams::list, 417 | Some(app_cfg), 418 | "macaddr\n List port MAC address", 419 | &[&pcmd_macaddr_token_cmd], 420 | ); 421 | 422 | let pcmd_macaddr_get = inst( 423 | CmdIntMacParams::get, 424 | None, 425 | "macaddr \n Get MAC address", 426 | &[&pcmd_macaddr_token_cmd, &pcmd_intmac_token_port], 427 | ); 428 | 429 | let pcmd_macaddr_set = inst( 430 | CmdIntMacParams::set, 431 | Some(app_cfg), 432 | "macaddr \n Set MAC address", 433 | &[&pcmd_macaddr_token_cmd, &pcmd_intmac_token_port, &pcmd_intmac_token_mac], 434 | ); 435 | 436 | let pcmd_macaddr_validate = inst( 437 | CmdIntMacParams::validate, 438 | None, 439 | "validate \n Check that MAC address \ 440 | is valid unicast address", 441 | &[&pcmd_validate_token_cmd, &pcmd_intmac_token_mac], 442 | ); 443 | 444 | let pcmd_vlan = inst( 445 | CmdVlanParams::change, 446 | None, 447 | "vlan \n Add/remove VLAN id", 448 | &[ 449 | &pcmd_vlan_token_cmd, 450 | &pcmd_vlan_token_port, 451 | &pcmd_vlan_token_mode, 452 | &pcmd_vlan_token_vlan_id, 453 | ], 454 | ); 455 | 456 | let cmds = &[ 457 | &pcmd_quit, 458 | &pcmd_drvinfo, 459 | &pcmd_link, 460 | &pcmd_open, 461 | &pcmd_stop, 462 | &pcmd_rxmode, 463 | &pcmd_portstats, 464 | &pcmd_mtu_list, 465 | &pcmd_mtu_get, 466 | &pcmd_mtu_set, 467 | &pcmd_macaddr_list, 468 | &pcmd_macaddr_get, 469 | &pcmd_macaddr_set, 470 | &pcmd_macaddr_validate, 471 | &pcmd_vlan, 472 | ]; 473 | 474 | new(cmds).open_stdin("EthApp> ").expect("fail to open stdin").interact(); 475 | } 476 | -------------------------------------------------------------------------------- /rte/examples/ethtool/ethtool.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | use std::result; 3 | use std::sync::Mutex; 4 | 5 | use rte::ethdev::EthDevice; 6 | use rte::ffi::RTE_MAX_ETHPORTS; 7 | use rte::*; 8 | 9 | pub const MAX_PORTS: u16 = RTE_MAX_ETHPORTS as u16; 10 | 11 | pub const MAX_BURST_LENGTH: usize = 32; 12 | 13 | pub struct TxQueuePort { 14 | pub cnt_unsent: usize, 15 | pub buf_frames: [Option; MAX_BURST_LENGTH], 16 | } 17 | 18 | pub struct AppPort { 19 | pub mac_addr: ether::EtherAddr, 20 | pub txq: TxQueuePort, 21 | pub port_id: u8, 22 | pub port_active: bool, 23 | pub port_dirty: bool, 24 | pub pkt_pool: mempool::MemoryPool, 25 | } 26 | 27 | impl Default for AppPort { 28 | fn default() -> Self { 29 | unsafe { mem::zeroed() } 30 | } 31 | } 32 | 33 | pub struct AppConfig { 34 | pub ports: Vec>, 35 | pub exit_now: bool, 36 | } 37 | 38 | impl Default for AppConfig { 39 | fn default() -> Self { 40 | unsafe { mem::zeroed() } 41 | } 42 | } 43 | 44 | impl AppConfig { 45 | pub fn new(ports: u32) -> AppConfig { 46 | AppConfig { 47 | ports: (0..ports).map(|_| Mutex::new(AppPort::default())).collect(), 48 | exit_now: false, 49 | } 50 | } 51 | 52 | pub fn lock_port(&self, port: ethdev::PortId, callback: F) -> result::Result 53 | where 54 | F: Fn(&mut AppPort, ethdev::PortId) -> result::Result, 55 | { 56 | match self.ports.iter().nth(port as usize) { 57 | Some(mutex) => { 58 | if !port.is_valid() { 59 | Err(format!("port {} is invalid", port)) 60 | } else { 61 | match mutex.lock() { 62 | Ok(mut guard) => { 63 | let app_port = &mut *guard; 64 | 65 | callback(app_port, port) 66 | } 67 | Err(err) => Err(format!("fail to lock port {}, {}", port, err)), 68 | } 69 | } 70 | } 71 | _ => Err(format!("port number {} is invalid", port)), 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /rte/examples/ethtool/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate libc; 4 | extern crate pretty_env_logger; 5 | extern crate rte; 6 | 7 | mod ethapp; 8 | mod ethtool; 9 | 10 | use std::env; 11 | 12 | use rte::ethdev::EthDevice; 13 | use rte::*; 14 | 15 | use ethtool::*; 16 | 17 | const PORT_RX_QUEUE_SIZE: u16 = 128; 18 | const PORT_TX_QUEUE_SIZE: u16 = 256; 19 | 20 | const PKTPOOL_EXTRA_SIZE: u16 = 512; 21 | const PKTPOOL_CACHE: u32 = 32; 22 | 23 | const EXIT_FAILURE: i32 = -1; 24 | 25 | fn setup_ports(app_cfg: &mut AppConfig) { 26 | let port_conf = ethdev::EthConf::default(); 27 | 28 | for (portid, mutex) in app_cfg.ports.iter().enumerate() { 29 | if let Ok(mut guard) = mutex.lock() { 30 | let app_port: &mut AppPort = &mut *guard; 31 | 32 | let dev = portid as ethdev::PortId; 33 | let dev_info = dev.info(); 34 | 35 | let size_pktpool = dev_info.rx_desc_lim.nb_max + dev_info.tx_desc_lim.nb_max + PKTPOOL_EXTRA_SIZE; 36 | 37 | app_port.pkt_pool = mbuf::pool_create( 38 | &format!("pkt_pool_{}", portid), 39 | size_pktpool as u32, 40 | PKTPOOL_CACHE, 41 | 0, 42 | mbuf::RTE_MBUF_DEFAULT_BUF_SIZE as u16, 43 | rte::socket_id() as i32, 44 | ) 45 | .expect("create mbuf pool failed"); 46 | 47 | println!("Init port {}..\n", portid); 48 | 49 | app_port.mac_addr = dev.mac_addr(); 50 | app_port.port_active = true; 51 | app_port.port_id = portid as u8; 52 | 53 | dev.configure(1, 1, &port_conf) 54 | .expect(&format!("fail to configure device: port={}", portid)); 55 | 56 | // init one RX queue 57 | dev.rx_queue_setup(0, PORT_RX_QUEUE_SIZE, None, &mut app_port.pkt_pool) 58 | .expect(&format!("fail to setup device rx queue: port={}", portid)); 59 | 60 | // init one TX queue on each port 61 | dev.tx_queue_setup(0, PORT_TX_QUEUE_SIZE, None) 62 | .expect(&format!("fail to setup device tx queue: port={}", portid)); 63 | 64 | // Start device 65 | dev.start().expect(&format!("fail to start device: port={}", portid)); 66 | 67 | dev.promiscuous_enable(); 68 | } 69 | } 70 | } 71 | 72 | fn process_frame(mac_addr: ðer::EtherAddr, frame: &mbuf::MBuf) { 73 | let mut ether_hdr = frame.mtod::(); 74 | let ether_hdr = unsafe { ether_hdr.as_mut() }; 75 | ether::EtherAddr::copy(ðer_hdr.s_addr.addr_bytes, &mut ether_hdr.d_addr.addr_bytes); 76 | ether::EtherAddr::copy(mac_addr, &mut ether_hdr.s_addr.addr_bytes); 77 | } 78 | 79 | fn slave_main(app_cfg: Option<&mut AppConfig>) -> i32 { 80 | let app_cfg = app_cfg.unwrap(); 81 | 82 | while !app_cfg.exit_now { 83 | for (portid, mutex) in app_cfg.ports.iter().enumerate() { 84 | // Check that port is active and unlocked 85 | if let Ok(mut guard) = mutex.try_lock() { 86 | let app_port: &mut AppPort = &mut *guard; 87 | 88 | if !app_port.port_active { 89 | continue; 90 | } 91 | 92 | let dev = portid as ethdev::PortId; 93 | 94 | // MAC address was updated 95 | if app_port.port_dirty { 96 | app_port.mac_addr = dev.mac_addr(); 97 | app_port.port_dirty = false; 98 | } 99 | 100 | let txq = &mut app_port.txq; 101 | 102 | // Incoming frames 103 | let cnt_recv_frames = dev.rx_burst(0, &mut txq.buf_frames[txq.cnt_unsent..]); 104 | 105 | if cnt_recv_frames > 0 { 106 | let frames = &txq.buf_frames[txq.cnt_unsent..txq.cnt_unsent + cnt_recv_frames]; 107 | for frame in frames { 108 | process_frame(&app_port.mac_addr, frame.as_ref().unwrap()); 109 | } 110 | 111 | txq.cnt_unsent += cnt_recv_frames 112 | } 113 | 114 | // Outgoing frames 115 | if txq.cnt_unsent > 0 { 116 | let cnt_sent = dev.tx_burst(0, &mut txq.buf_frames[..txq.cnt_unsent]); 117 | 118 | for i in cnt_sent..txq.cnt_unsent { 119 | txq.buf_frames[i - cnt_sent] = txq.buf_frames[i].take(); 120 | } 121 | } 122 | } 123 | } 124 | } 125 | 126 | 0 127 | } 128 | 129 | fn main() { 130 | pretty_env_logger::init(); 131 | 132 | let args: Vec = env::args().collect(); 133 | 134 | // Init runtime enviornment 135 | eal::init(&args).expect("Cannot init EAL"); 136 | 137 | let cnt_ports = match ethdev::count() { 138 | 0 => { 139 | eal::exit(EXIT_FAILURE, "No available NIC ports!\n"); 140 | 141 | 0 142 | } 143 | ports @ 1...MAX_PORTS => ports, 144 | ports @ _ => { 145 | println!("Using only {} of {} ports", MAX_PORTS, ports); 146 | 147 | MAX_PORTS 148 | } 149 | } as u32; 150 | 151 | println!("Number of NICs: {}", cnt_ports); 152 | 153 | let mut app_cfg = AppConfig::new(cnt_ports); 154 | 155 | if lcore::count() < 2 { 156 | eal::exit(EXIT_FAILURE, "No available slave core!\n"); 157 | } 158 | 159 | setup_ports(&mut app_cfg); 160 | 161 | // Assume there is an available slave.. 162 | let lcore_id = lcore::current().unwrap().next().unwrap(); 163 | 164 | launch::remote_launch(slave_main, Some(&mut app_cfg), lcore_id).unwrap(); 165 | 166 | ethapp::main(&mut app_cfg); 167 | 168 | app_cfg.exit_now = true; 169 | 170 | launch::mp_wait_lcore(); 171 | } 172 | -------------------------------------------------------------------------------- /rte/examples/helloworld/main.rs: -------------------------------------------------------------------------------- 1 | extern crate rte; 2 | 3 | use std::env; 4 | use std::os::raw::c_void; 5 | 6 | use rte::*; 7 | 8 | fn lcore_hello(_: Option) -> i32 { 9 | println!("hello from core {}", lcore::current().unwrap()); 10 | 11 | 0 12 | } 13 | 14 | fn main() { 15 | let args: Vec = env::args().collect(); 16 | 17 | eal::init(&args).expect("Cannot init EAL"); 18 | 19 | // call lcore_hello() on every slave lcore 20 | lcore::foreach_slave(|lcore_id| { 21 | launch::remote_launch(lcore_hello, None, lcore_id).expect("Cannot launch task"); 22 | }); 23 | 24 | // call it on master lcore too 25 | lcore_hello(None); 26 | 27 | launch::mp_wait_lcore(); 28 | } 29 | -------------------------------------------------------------------------------- /rte/examples/kni/kni_core.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | /* Macros for printing using RTE_LOG */ 16 | #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1 17 | 18 | /* How many packets to attempt to read from NIC in one go */ 19 | #define PKT_BURST_SZ 32 20 | 21 | /* How many objects (mbufs) to keep in per-lcore mempool cache */ 22 | #define MEMPOOL_CACHE_SZ PKT_BURST_SZ 23 | 24 | #define KNI_MAX_KTHREAD 32 25 | 26 | /* 27 | * Structure of port parameters 28 | */ 29 | struct kni_port_params 30 | { 31 | uint8_t port_id; /* Port ID */ 32 | unsigned lcore_rx; /* lcore ID for RX */ 33 | unsigned lcore_tx; /* lcore ID for TX */ 34 | uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */ 35 | uint32_t nb_kni; /* Number of KNI devices to be created */ 36 | unsigned lcore_k[KNI_MAX_KTHREAD]; /* lcore ID list for kthreads */ 37 | struct rte_kni *kni[KNI_MAX_KTHREAD]; /* KNI context pointers */ 38 | } __rte_cache_aligned; 39 | 40 | struct kni_port_params **kni_port_params_array; 41 | 42 | /* Structure type for recording kni interface specific stats */ 43 | struct kni_interface_stats 44 | { 45 | /* number of pkts received from NIC, and sent to KNI */ 46 | uint64_t rx_packets; 47 | 48 | /* number of pkts received from NIC, but failed to send to KNI */ 49 | uint64_t rx_dropped; 50 | 51 | /* number of pkts received from KNI, and sent to NIC */ 52 | uint64_t tx_packets; 53 | 54 | /* number of pkts received from KNI, but failed to send to NIC */ 55 | uint64_t tx_dropped; 56 | }; 57 | 58 | /* kni device statistics array */ 59 | struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS]; 60 | 61 | int kni_stop = 0; 62 | 63 | /* Print out statistics on packets handled */ 64 | void kni_print_stats(void) 65 | { 66 | uint8_t i; 67 | 68 | printf("\n**KNI example application statistics**\n" 69 | "====== ============== ============ ============ ============ ============\n" 70 | " Port Lcore(RX/TX) rx_packets rx_dropped tx_packets tx_dropped\n" 71 | "------ -------------- ------------ ------------ ------------ ------------\n"); 72 | for (i = 0; i < RTE_MAX_ETHPORTS; i++) 73 | { 74 | if (!kni_port_params_array[i]) 75 | continue; 76 | 77 | printf("%7d %10u/%2u %13" PRIu64 " %13" PRIu64 " %13" PRIu64 " " 78 | "%13" PRIu64 "\n", 79 | i, 80 | kni_port_params_array[i]->lcore_rx, 81 | kni_port_params_array[i]->lcore_tx, 82 | kni_stats[i].rx_packets, 83 | kni_stats[i].rx_dropped, 84 | kni_stats[i].tx_packets, 85 | kni_stats[i].tx_dropped); 86 | } 87 | printf("====== ============== ============ ============ ============ ============\n"); 88 | } 89 | 90 | static void 91 | kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num) 92 | { 93 | unsigned i; 94 | 95 | if (pkts == NULL) 96 | return; 97 | 98 | for (i = 0; i < num; i++) 99 | { 100 | rte_pktmbuf_free(pkts[i]); 101 | pkts[i] = NULL; 102 | } 103 | } 104 | 105 | /** 106 | * Interface to burst rx and enqueue mbufs into rx_q 107 | */ 108 | int kni_ingress(struct kni_port_params *p) 109 | { 110 | uint8_t i, port_id; 111 | unsigned nb_rx, num; 112 | uint32_t nb_kni; 113 | struct rte_mbuf *pkts_burst[PKT_BURST_SZ]; 114 | 115 | if (p == NULL) 116 | return 0; 117 | 118 | nb_kni = p->nb_kni; 119 | port_id = p->port_id; 120 | 121 | while (!kni_stop) 122 | { 123 | for (i = 0; i < nb_kni; i++) 124 | { 125 | /* Burst rx from eth */ 126 | nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ); 127 | if (unlikely(nb_rx > PKT_BURST_SZ)) 128 | { 129 | RTE_LOG(ERR, APP, "Error receiving from eth\n"); 130 | return -1; 131 | } 132 | /* Burst tx to kni */ 133 | num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx); 134 | kni_stats[port_id].rx_packets += num; 135 | 136 | rte_kni_handle_request(p->kni[i]); 137 | if (unlikely(num < nb_rx)) 138 | { 139 | /* Free mbufs not tx to kni interface */ 140 | kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num); 141 | kni_stats[port_id].rx_dropped += nb_rx - num; 142 | } 143 | } 144 | } 145 | 146 | return 0; 147 | } 148 | 149 | /** 150 | * Interface to dequeue mbufs from tx_q and burst tx 151 | */ 152 | int kni_egress(struct kni_port_params *p) 153 | { 154 | uint8_t i, port_id; 155 | unsigned nb_tx, num; 156 | uint32_t nb_kni; 157 | struct rte_mbuf *pkts_burst[PKT_BURST_SZ]; 158 | 159 | if (p == NULL) 160 | return -1; 161 | 162 | nb_kni = p->nb_kni; 163 | port_id = p->port_id; 164 | 165 | while (!kni_stop) 166 | { 167 | for (i = 0; i < nb_kni; i++) 168 | { 169 | /* Burst rx from kni */ 170 | num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ); 171 | if (unlikely(num > PKT_BURST_SZ)) 172 | { 173 | RTE_LOG(ERR, APP, "Error receiving from KNI\n"); 174 | return -1; 175 | } 176 | /* Burst tx to eth */ 177 | nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num); 178 | kni_stats[port_id].tx_packets += nb_tx; 179 | if (unlikely(nb_tx < num)) 180 | { 181 | /* Free mbufs not tx to NIC */ 182 | kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx); 183 | kni_stats[port_id].tx_dropped += num - nb_tx; 184 | } 185 | } 186 | } 187 | 188 | return 0; 189 | } 190 | -------------------------------------------------------------------------------- /rte/examples/l2fwd/l2fwd_core.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #define MAX_PKT_BURST 32 14 | #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 15 | 16 | int l2fwd_force_quit = 0; 17 | 18 | /* mask of enabled ports */ 19 | uint32_t l2fwd_enabled_port_mask = 0; 20 | 21 | /* ethernet addresses of ports */ 22 | struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS]; 23 | 24 | /* list of enabled ports */ 25 | uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; 26 | 27 | struct rte_eth_dev_tx_buffer *l2fwd_tx_buffers[RTE_MAX_ETHPORTS]; 28 | 29 | /* Per-port statistics struct */ 30 | struct l2fwd_port_statistics 31 | { 32 | uint64_t tx; 33 | uint64_t rx; 34 | uint64_t dropped; 35 | } __rte_cache_aligned; 36 | 37 | struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; 38 | 39 | int64_t l2fwd_timer_period; /* default period is 10 seconds */ 40 | 41 | /* Print out statistics on packets dropped */ 42 | static void 43 | print_stats(void) 44 | { 45 | uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; 46 | unsigned portid; 47 | 48 | total_packets_dropped = 0; 49 | total_packets_tx = 0; 50 | total_packets_rx = 0; 51 | 52 | const char clr[] = {27, '[', '2', 'J', '\0'}; 53 | const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'}; 54 | 55 | /* Clear screen and move to top left */ 56 | printf("%s%s", clr, topLeft); 57 | 58 | printf("\nPort statistics ===================================="); 59 | 60 | for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) 61 | { 62 | /* skip disabled ports */ 63 | if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) 64 | continue; 65 | 66 | printf("\nStatistics for port %u ------------------------------" 67 | "\nPackets sent: %24" PRIu64 68 | "\nPackets received: %20" PRIu64 69 | "\nPackets dropped: %21" PRIu64, 70 | portid, 71 | port_statistics[portid].tx, 72 | port_statistics[portid].rx, 73 | port_statistics[portid].dropped); 74 | 75 | total_packets_dropped += port_statistics[portid].dropped; 76 | total_packets_tx += port_statistics[portid].tx; 77 | total_packets_rx += port_statistics[portid].rx; 78 | } 79 | printf("\nAggregate statistics ===============================" 80 | "\nTotal packets sent: %18" PRIu64 81 | "\nTotal packets received: %14" PRIu64 82 | "\nTotal packets dropped: %15" PRIu64, 83 | total_packets_tx, 84 | total_packets_rx, 85 | total_packets_dropped); 86 | printf("\n====================================================\n"); 87 | } 88 | 89 | static void 90 | l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) 91 | { 92 | struct ether_hdr *eth; 93 | void *tmp; 94 | unsigned dst_port; 95 | int sent; 96 | struct rte_eth_dev_tx_buffer *buffer; 97 | 98 | dst_port = l2fwd_dst_ports[portid]; 99 | eth = rte_pktmbuf_mtod(m, struct ether_hdr *); 100 | 101 | /* 02:00:00:00:00:xx */ 102 | tmp = ð->d_addr.addr_bytes[0]; 103 | *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40); 104 | 105 | /* src addr */ 106 | ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); 107 | 108 | buffer = l2fwd_tx_buffers[dst_port]; 109 | sent = rte_eth_tx_buffer(dst_port, 0, buffer, m); 110 | if (sent) 111 | port_statistics[dst_port].tx += sent; 112 | } 113 | 114 | int l2fwd_main_loop(uint32_t *rx_port_list, unsigned n_rx_port) 115 | { 116 | unsigned lcore_id = rte_lcore_id(); 117 | uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; 118 | const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; 119 | unsigned portid, nb_rx; 120 | struct rte_eth_dev_tx_buffer *buffer; 121 | struct rte_mbuf *pkts_burst[MAX_PKT_BURST], *m; 122 | int sent, i, j; 123 | 124 | while (!l2fwd_force_quit) 125 | { 126 | cur_tsc = rte_rdtsc(); 127 | 128 | /* 129 | * TX burst queue drain 130 | */ 131 | diff_tsc = cur_tsc - prev_tsc; 132 | 133 | if (unlikely(diff_tsc > drain_tsc)) 134 | { 135 | for (i = 0; i < (int)n_rx_port; i++) 136 | { 137 | 138 | portid = l2fwd_dst_ports[rx_port_list[i]]; 139 | buffer = l2fwd_tx_buffers[portid]; 140 | 141 | sent = rte_eth_tx_buffer_flush(portid, 0, buffer); 142 | if (sent) 143 | port_statistics[portid].tx += sent; 144 | } 145 | 146 | /* if timer is enabled */ 147 | if (l2fwd_timer_period > 0) 148 | { 149 | 150 | /* advance the timer */ 151 | timer_tsc += diff_tsc; 152 | 153 | /* if timer has reached its timeout */ 154 | if (unlikely(timer_tsc >= (uint64_t)l2fwd_timer_period)) 155 | { 156 | 157 | /* do this only on master core */ 158 | if (lcore_id == rte_get_master_lcore()) 159 | { 160 | print_stats(); 161 | /* reset the timer */ 162 | timer_tsc = 0; 163 | } 164 | } 165 | } 166 | 167 | prev_tsc = cur_tsc; 168 | } 169 | 170 | /* 171 | * Read packet from RX queues 172 | */ 173 | for (i = 0; i < (int)n_rx_port; i++) 174 | { 175 | 176 | portid = rx_port_list[i]; 177 | nb_rx = rte_eth_rx_burst((uint8_t)portid, 0, pkts_burst, MAX_PKT_BURST); 178 | 179 | port_statistics[portid].rx += nb_rx; 180 | 181 | for (j = 0; j < (int)nb_rx; j++) 182 | { 183 | m = pkts_burst[j]; 184 | rte_prefetch0(rte_pktmbuf_mtod(m, void *)); 185 | l2fwd_simple_forward(m, portid); 186 | } 187 | } 188 | } 189 | 190 | return 0; 191 | } 192 | -------------------------------------------------------------------------------- /rte/examples/l2fwd/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate getopts; 4 | extern crate libc; 5 | extern crate nix; 6 | extern crate pretty_env_logger; 7 | extern crate rte; 8 | 9 | use std::clone::Clone; 10 | use std::env; 11 | use std::io; 12 | use std::io::prelude::*; 13 | use std::mem; 14 | use std::path::Path; 15 | use std::process; 16 | use std::str::FromStr; 17 | 18 | use nix::sys::signal; 19 | 20 | use rte::ethdev::{EthDevice, EthDeviceInfo, TxBuffer}; 21 | use rte::ffi::RTE_MAX_ETHPORTS; 22 | use rte::lcore::RTE_MAX_LCORE; 23 | use rte::memory::AsMutRef; 24 | use rte::*; 25 | 26 | const EXIT_FAILURE: i32 = -1; 27 | 28 | const MAX_PKT_BURST: usize = 32; 29 | 30 | const MAX_RX_QUEUE_PER_LCORE: u32 = 16; 31 | 32 | // A tsc-based timer responsible for triggering statistics printout 33 | const TIMER_MILLISECOND: i64 = 2000000; /* around 1ms at 2 Ghz */ 34 | const MAX_TIMER_PERIOD: u32 = 86400; /* 1 day max */ 35 | 36 | const NB_MBUF: u32 = 2048; 37 | 38 | // Configurable number of RX/TX ring descriptors 39 | 40 | const RTE_TEST_RX_DESC_DEFAULT: u16 = 128; 41 | const RTE_TEST_TX_DESC_DEFAULT: u16 = 512; 42 | 43 | struct LcoreQueueConf { 44 | n_rx_port: u32, 45 | rx_port_list: [u32; MAX_RX_QUEUE_PER_LCORE as usize], 46 | } 47 | 48 | struct Conf { 49 | nb_rxd: u16, 50 | nb_txd: u16, 51 | 52 | queue_conf: [LcoreQueueConf; RTE_MAX_LCORE as usize], 53 | } 54 | 55 | impl Default for Conf { 56 | fn default() -> Self { 57 | let mut conf: Self = unsafe { mem::zeroed() }; 58 | 59 | conf.nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 60 | conf.nb_txd = RTE_TEST_TX_DESC_DEFAULT; 61 | 62 | return conf; 63 | } 64 | } 65 | 66 | // display usage 67 | fn print_usage(program: &String, opts: getopts::Options) -> ! { 68 | let brief = format!("Usage: {} [EAL options] -- [options]", program); 69 | 70 | print!("{}", opts.usage(&brief)); 71 | 72 | process::exit(-1); 73 | } 74 | 75 | // Parse the argument given in the command line of the application 76 | fn parse_args(args: &Vec) -> (u32, u32, u32) { 77 | let mut opts = getopts::Options::new(); 78 | let program = args[0].clone(); 79 | 80 | opts.optopt("p", "", "hexadecimal bitmask of ports to configure", "PORTMASK"); 81 | opts.optopt("q", "", "number of queue (=ports) per lcore (default is 1)", "NQ"); 82 | opts.optopt( 83 | "T", 84 | "", 85 | "statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, \ 86 | 86400 maximum)", 87 | "PERIOD", 88 | ); 89 | opts.optflag("h", "help", "print this help menu"); 90 | 91 | let matches = match opts.parse(&args[1..]) { 92 | Ok(m) => m, 93 | Err(err) => { 94 | println!("Invalid L2FWD arguments, {}", err); 95 | 96 | print_usage(&program, opts); 97 | } 98 | }; 99 | 100 | if matches.opt_present("h") { 101 | print_usage(&program, opts); 102 | } 103 | 104 | let mut enabled_port_mask: u32 = 0; // mask of enabled ports 105 | let mut rx_queue_per_lcore: u32 = 1; 106 | let mut timer_period_seconds: u32 = 10; // default period is 10 seconds 107 | 108 | if let Some(arg) = matches.opt_str("p") { 109 | match u32::from_str_radix(arg.as_str(), 16) { 110 | Ok(mask) if mask != 0 => enabled_port_mask = mask, 111 | _ => { 112 | println!("invalid portmask, {}", arg); 113 | 114 | print_usage(&program, opts); 115 | } 116 | } 117 | } 118 | 119 | if let Some(arg) = matches.opt_str("q") { 120 | match u32::from_str(arg.as_str()) { 121 | Ok(n) if 0 < n && n < MAX_RX_QUEUE_PER_LCORE => rx_queue_per_lcore = n, 122 | _ => { 123 | println!("invalid queue number, {}", arg); 124 | 125 | print_usage(&program, opts); 126 | } 127 | } 128 | } 129 | 130 | if let Some(arg) = matches.opt_str("T") { 131 | match u32::from_str(arg.as_str()) { 132 | Ok(t) if 0 < t && t < MAX_TIMER_PERIOD => timer_period_seconds = t, 133 | _ => { 134 | println!("invalid timer period, {}", arg); 135 | 136 | print_usage(&program, opts); 137 | } 138 | } 139 | } 140 | 141 | (enabled_port_mask, rx_queue_per_lcore, timer_period_seconds) 142 | } 143 | 144 | // Check the link status of all ports in up to 9s, and print them finally 145 | fn check_all_ports_link_status(enabled_devices: &Vec) { 146 | print!("Checking link status"); 147 | 148 | const CHECK_INTERVAL: u32 = 100; 149 | const MAX_CHECK_TIME: usize = 90; 150 | 151 | for _ in 0..MAX_CHECK_TIME { 152 | if unsafe { l2fwd_force_quit != 0 } { 153 | break; 154 | } 155 | 156 | if enabled_devices.iter().all(|dev| dev.link_nowait().up) { 157 | break; 158 | } 159 | 160 | delay_ms(CHECK_INTERVAL); 161 | 162 | print!("."); 163 | 164 | io::stdout().flush().unwrap(); 165 | } 166 | 167 | println!("Done:"); 168 | 169 | for dev in enabled_devices { 170 | let link = dev.link(); 171 | 172 | if link.up { 173 | println!( 174 | " Port {} Link Up - speed {} Mbps - {}", 175 | dev.portid(), 176 | link.speed, 177 | if link.duplex { "full-duplex" } else { "half-duplex" } 178 | ) 179 | } else { 180 | println!(" Port {} Link Down", dev.portid()); 181 | } 182 | } 183 | } 184 | 185 | #[link(name = "l2fwd_core")] 186 | extern "C" { 187 | static mut l2fwd_force_quit: libc::c_int; 188 | 189 | static mut l2fwd_enabled_port_mask: libc::uint32_t; 190 | 191 | static mut l2fwd_ports_eth_addr: [[libc::uint8_t; 6usize]; RTE_MAX_ETHPORTS as usize]; 192 | 193 | static mut l2fwd_dst_ports: [libc::uint32_t; RTE_MAX_ETHPORTS as usize]; 194 | 195 | static mut l2fwd_tx_buffers: [*mut rte::ffi::rte_eth_dev_tx_buffer; RTE_MAX_ETHPORTS as usize]; 196 | 197 | static mut l2fwd_timer_period: libc::int64_t; 198 | 199 | fn l2fwd_main_loop(rx_port_list: *const libc::uint32_t, n_rx_port: libc::c_uint) -> libc::c_int; 200 | } 201 | 202 | fn l2fwd_launch_one_lcore(conf: Option<&Conf>) -> i32 { 203 | let lcore_id = lcore::current().unwrap(); 204 | let qconf = &conf.unwrap().queue_conf[*lcore_id as usize]; 205 | 206 | if qconf.n_rx_port == 0 { 207 | info!("lcore {} has nothing to do", lcore_id); 208 | 209 | return -1; 210 | } 211 | 212 | info!("entering main loop on lcore {}", lcore_id); 213 | 214 | for portid in &qconf.rx_port_list[..qconf.n_rx_port as usize] { 215 | info!(" -- lcoreid={} portid={}", lcore_id, portid); 216 | } 217 | 218 | unsafe { l2fwd_main_loop(qconf.rx_port_list.as_ptr(), qconf.n_rx_port) } 219 | } 220 | 221 | extern "C" fn handle_sigint(sig: libc::c_int) { 222 | match signal::Signal::from_c_int(sig).unwrap() { 223 | signal::SIGINT | signal::SIGTERM => unsafe { 224 | println!("Signal {} received, preparing to exit...", sig); 225 | 226 | l2fwd_force_quit = 1; 227 | }, 228 | _ => info!("unexpect signo: {}", sig), 229 | } 230 | } 231 | 232 | fn handle_signals() -> nix::Result<()> { 233 | let sig_action = signal::SigAction::new( 234 | signal::SigHandler::Handler(handle_sigint), 235 | signal::SaFlags::empty(), 236 | signal::SigSet::empty(), 237 | ); 238 | unsafe { 239 | try!(signal::sigaction(signal::SIGINT, &sig_action)); 240 | try!(signal::sigaction(signal::SIGTERM, &sig_action)); 241 | } 242 | 243 | Ok(()) 244 | } 245 | 246 | fn prepare_args(args: &mut Vec) -> (Vec, Vec) { 247 | let program = String::from(Path::new(&args[0]).file_name().unwrap().to_str().unwrap()); 248 | 249 | if let Some(pos) = args.iter().position(|arg| arg == "--") { 250 | let (eal_args, opt_args) = args.split_at_mut(pos); 251 | 252 | opt_args[0] = program; 253 | 254 | (eal_args.to_vec(), opt_args.to_vec()) 255 | } else { 256 | (args[..1].to_vec(), args.clone()) 257 | } 258 | } 259 | 260 | fn main() { 261 | pretty_env_logger::init(); 262 | 263 | handle_signals().expect("fail to handle signals"); 264 | 265 | let mut args: Vec = env::args().collect(); 266 | 267 | let (eal_args, opt_args) = prepare_args(&mut args); 268 | 269 | debug!("eal args: {:?}, l2fwd args: {:?}", eal_args, opt_args); 270 | 271 | let (enabled_port_mask, rx_queue_per_lcore, timer_period_seconds) = parse_args(&opt_args); 272 | 273 | unsafe { 274 | l2fwd_enabled_port_mask = enabled_port_mask; 275 | l2fwd_timer_period = timer_period_seconds as i64 * TIMER_MILLISECOND * 1000; 276 | } 277 | 278 | // init EAL 279 | eal::init(&eal_args).expect("fail to initial EAL"); 280 | 281 | // create the mbuf pool 282 | let mut l2fwd_pktmbuf_pool = mbuf::pool_create( 283 | "mbuf_pool", 284 | NB_MBUF, 285 | 32, 286 | 0, 287 | mbuf::RTE_MBUF_DEFAULT_BUF_SIZE as u16, 288 | rte::socket_id() as i32, 289 | ) 290 | .unwrap(); 291 | 292 | let enabled_devices: Vec = ethdev::devices() 293 | .filter(|dev| ((1 << dev.portid()) & enabled_port_mask) != 0) 294 | .collect(); 295 | 296 | if enabled_devices.is_empty() { 297 | eal::exit(EXIT_FAILURE, "All available ports are disabled. Please set portmask.\n"); 298 | } 299 | 300 | let mut last_port = 0; 301 | let mut nb_ports_in_mask = 0; 302 | 303 | // Each logical core is assigned a dedicated TX queue on each port. 304 | for dev in &enabled_devices { 305 | let portid = dev.portid(); 306 | 307 | if (nb_ports_in_mask % 2) != 0 { 308 | unsafe { 309 | l2fwd_dst_ports[portid as usize] = last_port as u32; 310 | l2fwd_dst_ports[last_port as usize] = portid as u32; 311 | } 312 | } else { 313 | last_port = portid; 314 | } 315 | 316 | nb_ports_in_mask += 1; 317 | 318 | let info = dev.info(); 319 | 320 | debug!("found port #{} with `{}` drive", portid, info.driver_name()); 321 | } 322 | 323 | if (nb_ports_in_mask % 2) != 0 { 324 | println!("Notice: odd number of ports in portmask."); 325 | 326 | unsafe { 327 | l2fwd_dst_ports[last_port as usize] = last_port as u32; 328 | } 329 | } 330 | 331 | let mut conf = Conf::default(); 332 | 333 | let mut rx_lcore_id = lcore::id(0); 334 | 335 | // Initialize the port/queue configuration of each logical core 336 | for dev in &enabled_devices { 337 | let portid = dev.portid(); 338 | 339 | loop { 340 | if let Some(id) = rx_lcore_id.next() { 341 | if conf.queue_conf[*rx_lcore_id as usize].n_rx_port == rx_queue_per_lcore { 342 | rx_lcore_id = id 343 | } 344 | } 345 | 346 | break; 347 | } 348 | 349 | // Assigned a new logical core in the loop above. 350 | let qconf = &mut conf.queue_conf[*rx_lcore_id as usize]; 351 | 352 | qconf.rx_port_list[qconf.n_rx_port as usize] = portid as u32; 353 | qconf.n_rx_port += 1; 354 | 355 | println!("Lcore {}: RX port {}", rx_lcore_id, portid); 356 | } 357 | 358 | let port_conf = ethdev::EthConf::default(); 359 | 360 | // Initialise each port 361 | for dev in &enabled_devices { 362 | let portid = dev.portid() as usize; 363 | 364 | // init port 365 | print!("Initializing port {}... ", portid); 366 | 367 | dev.configure(1, 1, &port_conf) 368 | .expect(&format!("fail to configure device: port={}", portid)); 369 | 370 | let mac_addr = dev.mac_addr(); 371 | 372 | unsafe { 373 | l2fwd_ports_eth_addr[portid] = *mac_addr.octets(); 374 | } 375 | 376 | // init one RX queue 377 | dev.rx_queue_setup(0, conf.nb_rxd, None, &mut l2fwd_pktmbuf_pool) 378 | .expect(&format!("fail to setup device rx queue: port={}", portid)); 379 | 380 | // init one TX queue on each port 381 | dev.tx_queue_setup(0, conf.nb_txd, None) 382 | .expect(&format!("fail to setup device tx queue: port={}", portid)); 383 | 384 | // Initialize TX buffers 385 | let buf = ethdev::alloc_buffer(MAX_PKT_BURST, dev.socket_id()) 386 | .as_mut_ref() 387 | .expect(&format!("fail to allocate buffer for tx: port={}", portid)); 388 | 389 | buf.count_err_packets() 390 | .expect(&format!("failt to set error callback for tx buffer: port={}", portid)); 391 | 392 | unsafe { 393 | l2fwd_tx_buffers[portid] = buf; 394 | } 395 | 396 | // Start device 397 | dev.start().expect(&format!("fail to start device: port={}", portid)); 398 | 399 | println!("Done: "); 400 | 401 | dev.promiscuous_enable(); 402 | 403 | println!( 404 | " Port {}, MAC address: {} (promiscuous {})", 405 | portid, 406 | mac_addr, 407 | dev.is_promiscuous_enabled() 408 | .map(|enabled| if enabled { "enabled" } else { "disabled" }) 409 | .expect(&format!("fail to enable promiscuous mode for device: port={}", portid)) 410 | ); 411 | } 412 | 413 | check_all_ports_link_status(&enabled_devices); 414 | 415 | // launch per-lcore init on every lcore 416 | launch::mp_remote_launch(l2fwd_launch_one_lcore, Some(&conf), false).unwrap(); 417 | 418 | launch::mp_wait_lcore(); 419 | 420 | for dev in &enabled_devices { 421 | print!("Closing port {}...", dev.portid()); 422 | dev.stop(); 423 | dev.close(); 424 | println!(" Done"); 425 | 426 | if let Some(buf) = (unsafe { l2fwd_tx_buffers[dev.portid() as usize] }).as_mut_ref() { 427 | buf.free(); 428 | } 429 | } 430 | 431 | println!("Bye..."); 432 | } 433 | -------------------------------------------------------------------------------- /rte/runtest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | sudo RUST_BACKTRACE=1 RUST_LOG=debug,rustc=warn,cargo=warn RTE_SDK=/home/flier/dpdk-16.04 cargo test 6 | -------------------------------------------------------------------------------- /rte/src/arp.rs: -------------------------------------------------------------------------------- 1 | use ffi; 2 | 3 | pub use ffi::{ 4 | ARP_HRD_ETHER, ARP_OP_INVREPLY, ARP_OP_INVREQUEST, ARP_OP_REPLY, ARP_OP_REQUEST, ARP_OP_REVREPLY, ARP_OP_REVREQUEST, 5 | }; 6 | 7 | /// ARP header IPv4 payload. 8 | pub type ArpIpv4 = ffi::arp_ipv4; 9 | 10 | /// ARP header. 11 | pub type ArpHdr = ffi::arp_hdr; 12 | -------------------------------------------------------------------------------- /rte/src/bond.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | 3 | use ffi; 4 | 5 | use errors::Result; 6 | use ethdev; 7 | use ether; 8 | use memory::SocketId; 9 | 10 | /// Supported modes of operation of link bonding library 11 | #[repr(u8)] 12 | #[derive(Copy, Clone, Eq, PartialEq, FromPrimitive, ToPrimitive)] 13 | pub enum BondMode { 14 | /// Round Robin (Mode 0). 15 | /// 16 | /// In this mode all transmitted packets will be balanced equally across all 17 | /// active slaves of the bonded in a round robin fashion. 18 | /// 19 | RouncRobin = ffi::BONDING_MODE_ROUND_ROBIN as u8, 20 | 21 | /// Active Backup (Mode 1). 22 | /// 23 | /// In this mode all packets transmitted will be transmitted on the primary 24 | /// slave until such point as the primary slave is no longer available and then 25 | /// transmitted packets will be sent on the next available slaves. The primary 26 | /// slave can be defined by the user but defaults to the first active slave 27 | /// available if not specified. 28 | /// 29 | ActiveBackup = ffi::BONDING_MODE_ACTIVE_BACKUP as u8, 30 | 31 | /// Balance (Mode 2). 32 | /// 33 | /// In this mode all packets transmitted will be balanced across the available 34 | /// slaves using one of three available transmit policies - l2, l2+3 or l3+4. 35 | /// See BALANCE_XMIT_POLICY macros definitions for further details on transmit 36 | /// policies. 37 | /// 38 | Balance = ffi::BONDING_MODE_BALANCE as u8, 39 | 40 | /// Broadcast (Mode 3). 41 | /// 42 | /// In this mode all transmitted packets will be transmitted on all available 43 | /// active slaves of the bonded. 44 | /// 45 | Broadcast = ffi::BONDING_MODE_BROADCAST as u8, 46 | 47 | /// 802.3AD (Mode 4). 48 | /// 49 | /// This mode provides auto negotiation/configuration 50 | /// of peers and well as link status changes monitoring using out of band 51 | /// LACP (link aggregation control protocol) messages. For further details of 52 | /// LACP specification see the IEEE 802.3ad/802.1AX standards. It is also 53 | /// described here 54 | /// https://www.kernel.org/doc/Documentation/networking/bonding.txt. 55 | /// 56 | /// Important Usage Notes: 57 | /// - for LACP mode to work the rx/tx burst functions must be invoked 58 | /// at least once every 100ms, otherwise the out-of-band LACP messages will not 59 | /// be handled with the expected latency and this may cause the link status to be 60 | /// incorrectly marked as down or failure to correctly negotiate with peers. 61 | /// - For optimal performance during initial handshaking the array of mbufs provided 62 | /// to rx_burst should be at least 2 times the slave count size. 63 | /// 64 | AutoNeg = ffi::BONDING_MODE_8023AD as u8, 65 | 66 | /// Adaptive TLB (Mode 5) 67 | /// 68 | /// This mode provides an adaptive transmit load balancing. It dynamically 69 | /// changes the transmitting slave, according to the computed load. Statistics 70 | /// are collected in 100ms intervals and scheduled every 10ms 71 | /// 72 | AdaptiveTLB = ffi::BONDING_MODE_TLB as u8, 73 | 74 | /// Adaptive Load Balancing (Mode 6) 75 | /// 76 | /// This mode includes adaptive TLB and receive load balancing (RLB). In RLB the 77 | /// bonding driver intercepts ARP replies send by local system and overwrites its 78 | /// source MAC address, so that different peers send data to the server on 79 | /// different slave interfaces. When local system sends ARP request, it saves IP 80 | /// information from it. When ARP reply from that peer is received, its MAC is 81 | /// stored, one of slave MACs assigned and ARP reply send to that peer. 82 | /// 83 | AdaptiveLB = ffi::BONDING_MODE_ALB as u8, 84 | } 85 | 86 | impl From for BondMode { 87 | fn from(v: u8) -> Self { 88 | unsafe { mem::transmute(v) } 89 | } 90 | } 91 | 92 | /// Balance Mode Transmit Policies 93 | #[repr(u8)] 94 | #[derive(Copy, Clone, Eq, PartialEq)] 95 | pub enum TransmitPolicy { 96 | /// Layer 2 (Ethernet MAC) 97 | Layer2, 98 | /// Layer 2+3 (Ethernet MAC + IP Addresses) transmit load balancing 99 | Layer23, 100 | /// Layer 3+4 (IP Addresses + UDP Ports) transmit load balancing 101 | Layer34, 102 | } 103 | 104 | impl From for TransmitPolicy { 105 | fn from(v: u8) -> Self { 106 | unsafe { mem::transmute(v) } 107 | } 108 | } 109 | 110 | /// Create a bonded rte_eth_dev device 111 | pub fn create(name: &str, mode: BondMode, socket_id: SocketId) -> Result { 112 | let port_id = unsafe { ffi::rte_eth_bond_create(try!(to_cptr!(name)), mode as u8, socket_id as u8) }; 113 | 114 | rte_check!(port_id; ok => { port_id as ethdev::PortId }) 115 | } 116 | 117 | /// Free a bonded rte_eth_dev device 118 | pub fn free(name: &str) -> Result<()> { 119 | rte_check!(unsafe { ffi::rte_eth_bond_free(try!(to_cptr!(name))) }) 120 | } 121 | 122 | pub trait BondedDevice { 123 | /// Add a rte_eth_dev device as a slave to the bonded device 124 | fn add_slave(&self, slave: ethdev::PortId) -> Result<&Self>; 125 | 126 | /// Remove a slave rte_eth_dev device from the bonded device 127 | fn remove_slave(&self, slave: ethdev::PortId) -> Result<&Self>; 128 | 129 | /// Get link bonding mode of bonded device 130 | fn mode(&self) -> Result; 131 | 132 | /// Set link bonding mode of bonded device 133 | fn set_mode(&self, mode: BondMode) -> Result<&Self>; 134 | 135 | /// Get primary slave of bonded device 136 | fn primary(&self) -> Result; 137 | 138 | /// Set slave rte_eth_dev as primary slave of bonded device 139 | fn set_primary(&self, dev: ethdev::PortId) -> Result<&Self>; 140 | 141 | /// Populate an array with list of the slaves port id's of the bonded device 142 | fn slaves(&self) -> Result>; 143 | 144 | /// Populate an array with list of the active slaves port id's of the bonded device. 145 | fn active_slaves(&self) -> Result>; 146 | 147 | /// Set explicit MAC address to use on bonded device and it's slaves. 148 | fn set_mac_addr(&self, mac_addr: ðer::EtherAddr) -> Result<&Self>; 149 | 150 | /// Reset bonded device to use MAC from primary slave on bonded device and it's slaves. 151 | fn reset_mac_addr(&self) -> Result<&Self>; 152 | 153 | /// Get the transmit policy set on bonded device for balance mode operation 154 | fn xmit_policy(&self) -> Result; 155 | 156 | /// Set the transmit policy for bonded device to use when it is operating in balance mode, 157 | /// this parameter is otherwise ignored in other modes of operation. 158 | fn set_xmit_policy(&self, policy: TransmitPolicy) -> Result<&Self>; 159 | } 160 | 161 | impl BondedDevice for ethdev::PortId { 162 | fn add_slave(&self, slave: ethdev::PortId) -> Result<&Self> { 163 | rte_check!(unsafe { 164 | ffi::rte_eth_bond_slave_add(*self, slave) 165 | }; ok => { self }) 166 | } 167 | 168 | fn remove_slave(&self, slave: ethdev::PortId) -> Result<&Self> { 169 | rte_check!(unsafe { 170 | ffi::rte_eth_bond_slave_remove(*self, slave) 171 | }; ok => { self }) 172 | } 173 | 174 | fn mode(&self) -> Result { 175 | let mode = unsafe { ffi::rte_eth_bond_mode_get(*self) }; 176 | 177 | rte_check!(mode; ok => { BondMode::from(mode as u8) }) 178 | } 179 | 180 | fn set_mode(&self, mode: BondMode) -> Result<&Self> { 181 | rte_check!(unsafe { 182 | ffi::rte_eth_bond_mode_set(*self, mode as u8) 183 | }; ok => { self }) 184 | } 185 | 186 | fn primary(&self) -> Result { 187 | let portid = unsafe { ffi::rte_eth_bond_primary_get(*self) }; 188 | 189 | rte_check!(portid; ok => { portid as ethdev::PortId }) 190 | } 191 | 192 | fn set_primary(&self, dev: ethdev::PortId) -> Result<&Self> { 193 | rte_check!(unsafe { 194 | ffi::rte_eth_bond_primary_set(*self, dev) 195 | }; ok => { self }) 196 | } 197 | 198 | fn slaves(&self) -> Result> { 199 | let mut slaves = [0u16; ffi::RTE_MAX_ETHPORTS as usize]; 200 | 201 | let num = unsafe { ffi::rte_eth_bond_slaves_get(*self, slaves.as_mut_ptr(), slaves.len() as u16) }; 202 | 203 | rte_check!(num; ok => { 204 | Vec::from(&slaves[..num as usize]) 205 | }) 206 | } 207 | 208 | fn active_slaves(&self) -> Result> { 209 | let mut slaves = [0u16; ffi::RTE_MAX_ETHPORTS as usize]; 210 | 211 | let num = unsafe { ffi::rte_eth_bond_slaves_get(*self, slaves.as_mut_ptr(), slaves.len() as u16) }; 212 | 213 | rte_check!(num; ok => { 214 | Vec::from(&slaves[..num as usize]) 215 | }) 216 | } 217 | 218 | fn set_mac_addr(&self, mac_addr: ðer::EtherAddr) -> Result<&Self> { 219 | rte_check!(unsafe { 220 | ffi::rte_eth_bond_mac_address_set(*self, mac_addr.octets().as_ptr() as * mut _) 221 | }; ok => { self }) 222 | } 223 | 224 | fn reset_mac_addr(&self) -> Result<&Self> { 225 | rte_check!(unsafe { 226 | ffi::rte_eth_bond_mac_address_reset(*self) 227 | }; ok => { self }) 228 | } 229 | 230 | fn xmit_policy(&self) -> Result { 231 | let policy = unsafe { ffi::rte_eth_bond_xmit_policy_get(*self) }; 232 | 233 | rte_check!(policy; ok => { TransmitPolicy::from(policy as u8) }) 234 | } 235 | 236 | fn set_xmit_policy(&self, policy: TransmitPolicy) -> Result<&Self> { 237 | rte_check!(unsafe { 238 | ffi::rte_eth_bond_xmit_policy_set(*self, policy as u8) 239 | }; ok => { self }) 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /rte/src/common/bitmap.rs: -------------------------------------------------------------------------------- 1 | use std::ptr::NonNull; 2 | 3 | use errors::{AsResult, Result}; 4 | use ffi; 5 | use utils::AsRaw; 6 | 7 | pub type Position = u32; 8 | pub type Slab = u64; 9 | 10 | pub type RawBitmap = ffi::rte_bitmap; 11 | pub type RawBitmapPtr = *mut ffi::rte_bitmap; 12 | 13 | /// RTE Bitmap 14 | /// 15 | /// The bitmap component provides a mechanism to manage large arrays of bits 16 | /// through bit get/set/clear and bit array scan operations. 17 | /// 18 | /// The bitmap scan operation is optimized for 64-bit CPUs using 64/128 byte cache 19 | /// lines. The bitmap is hierarchically organized using two arrays (array1 and 20 | /// array2), with each bit in array1 being associated with a full cache line 21 | /// (512/1024 bits) of bitmap bits, which are stored in array2: the bit in array1 22 | /// is set only when there is at least one bit set within its associated array2 23 | /// bits, otherwise the bit in array1 is cleared. The read and write operations 24 | /// for array1 and array2 are always done in slabs of 64 bits. 25 | /// 26 | /// This bitmap is not thread safe. For lock free operation on a specific bitmap 27 | /// instance, a single writer thread performing bit set/clear operations is 28 | /// allowed, only the writer thread can do bitmap scan operations, while there 29 | /// can be several reader threads performing bit get operations in parallel with 30 | /// the writer thread. When the use of locking primitives is acceptable, the 31 | /// serialization of the bit set/clear and bitmap scan operations needs to be 32 | /// enforced by the caller, while the bit get operation does not require locking 33 | /// the bitmap. 34 | #[repr(transparent)] 35 | #[derive(Debug)] 36 | pub struct Bitmap(NonNull); 37 | 38 | impl Drop for Bitmap { 39 | fn drop(&mut self) { 40 | unsafe { 41 | ffi::_rte_bitmap_free(self.as_raw()); 42 | } 43 | } 44 | } 45 | 46 | impl AsRaw for Bitmap { 47 | type Raw = RawBitmap; 48 | 49 | fn as_raw(&self) -> *mut Self::Raw { 50 | self.0.as_ptr() 51 | } 52 | } 53 | 54 | impl Bitmap { 55 | /// Bitmap memory footprint calculation 56 | pub fn memory_footprint(bits: u32) -> u32 { 57 | unsafe { ffi::_rte_bitmap_get_memory_footprint(bits) } 58 | } 59 | 60 | /// Bitmap initialization 61 | pub fn init(bits: u32, mem: *mut u8, mem_size: u32) -> Result { 62 | unsafe { ffi::_rte_bitmap_init(bits, mem, mem_size) } 63 | .as_result() 64 | .map(Bitmap) 65 | } 66 | 67 | /// Bitmap reset 68 | pub fn reset(&mut self) { 69 | unsafe { ffi::_rte_bitmap_reset(self.as_raw()) } 70 | } 71 | 72 | /// Bitmap location prefetch into CPU L1 cache 73 | pub fn prefetch0(&self, pos: Position) { 74 | unsafe { ffi::_rte_bitmap_prefetch0(self.as_raw(), pos) } 75 | } 76 | 77 | /// Bitmap bit get 78 | pub fn get(&self, pos: Position) -> bool { 79 | unsafe { ffi::_rte_bitmap_get(self.as_raw(), pos) != 0 } 80 | } 81 | 82 | /// Bitmap bit set 83 | pub fn set(&mut self, pos: Position) { 84 | unsafe { ffi::_rte_bitmap_set(self.as_raw(), pos) } 85 | } 86 | 87 | /// Bitmap slab set 88 | pub fn set_slab(&mut self, pos: Position, slab: Slab) { 89 | unsafe { ffi::_rte_bitmap_set_slab(self.as_raw(), pos, slab) } 90 | } 91 | 92 | /// Bitmap bit clear 93 | pub fn clear(&mut self, pos: Position) { 94 | unsafe { ffi::_rte_bitmap_clear(self.as_raw(), pos) } 95 | } 96 | 97 | /// Bitmap scan (with automatic wrap-around) 98 | pub fn scan(&self) -> Option<(Position, Slab)> { 99 | let mut pos = 0; 100 | let mut slab = 0; 101 | 102 | if unsafe { ffi::_rte_bitmap_scan(self.as_raw(), &mut pos, &mut slab) } == 0 { 103 | None 104 | } else { 105 | Some((pos, slab)) 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /rte/src/common/byteorder.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! rte_cpu_to_be_16 { 3 | ($n:expr) => { 4 | (($n >> 8) & 0xFF) | (($n & 0xFF) << 8) 5 | }; 6 | } 7 | -------------------------------------------------------------------------------- /rte/src/common/config.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | use std::ops::Deref; 3 | 4 | use ffi; 5 | 6 | use eal::ProcType; 7 | use lcore; 8 | use memzone; 9 | 10 | pub type RawMemConfig = ffi::rte_mem_config; 11 | pub type RawMemConfigPtr = *mut ffi::rte_mem_config; 12 | 13 | /// the structure for the memory configuration for the RTE. 14 | pub struct MemoryConfig(RawMemConfigPtr); 15 | 16 | impl From for MemoryConfig { 17 | fn from(p: RawMemConfigPtr) -> Self { 18 | MemoryConfig(p) 19 | } 20 | } 21 | 22 | impl Deref for MemoryConfig { 23 | type Target = RawMemConfig; 24 | 25 | fn deref(&self) -> &Self::Target { 26 | unsafe { &*self.0 } 27 | } 28 | } 29 | 30 | impl MemoryConfig { 31 | /// Number of channels (0 if unknown). 32 | pub fn nchannel(&self) -> u32 { 33 | self.nchannel 34 | } 35 | 36 | /// Number of ranks (0 if unknown). 37 | pub fn nrank(&self) -> u32 { 38 | self.nrank 39 | } 40 | 41 | /// Memzone descriptors. 42 | pub fn memzones(&self) -> Vec { 43 | (0..self.memzones.len) 44 | .map(|idx| unsafe { ffi::rte_fbarray_get(&self.memzones, idx) as *const _ }) 45 | .map(memzone::from_raw) 46 | .collect() 47 | } 48 | } 49 | 50 | pub type RawConfig = ffi::rte_config; 51 | pub type RawConfigPtr = *mut ffi::rte_config; 52 | 53 | /// The global RTE configuration structure. 54 | pub struct Config(RawConfigPtr); 55 | 56 | impl From for Config { 57 | fn from(p: RawConfigPtr) -> Self { 58 | Config(p) 59 | } 60 | } 61 | 62 | impl Deref for Config { 63 | type Target = RawConfig; 64 | 65 | fn deref(&self) -> &Self::Target { 66 | unsafe { &*self.0 } 67 | } 68 | } 69 | 70 | impl Config { 71 | /// Id of the master lcore 72 | pub fn master_lcore(&self) -> lcore::Id { 73 | self.master_lcore.into() 74 | } 75 | 76 | /// Number of available logical cores. 77 | pub fn lcore_count(&self) -> usize { 78 | self.lcore_count as usize 79 | } 80 | 81 | /// Primary or secondary configuration 82 | pub fn process_type(&self) -> ProcType { 83 | unsafe { mem::transmute(self.process_type) } 84 | } 85 | 86 | /// State of cores. 87 | pub fn lcore_roles(&self) -> &'static [lcore::Role] { 88 | unsafe { &*(&self.lcore_role[..self.lcore_count as usize] as *const _ as *const [lcore::Role]) } 89 | } 90 | 91 | /// State of core. 92 | pub fn lcore_role(&self, lcore_id: lcore::Id) -> lcore::Role { 93 | self.lcore_role[usize::from(lcore_id)].into() 94 | } 95 | 96 | /// Memory configuration, which may be shared across multiple DPDK instances 97 | pub fn memory_config(&self) -> MemoryConfig { 98 | self.mem_config.into() 99 | } 100 | } 101 | 102 | /// Get the global configuration structure. 103 | pub fn config() -> Config { 104 | unsafe { ffi::rte_eal_get_configuration().into() } 105 | } 106 | -------------------------------------------------------------------------------- /rte/src/common/cycles.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use ffi; 4 | 5 | /// Get the measured frequency of the RDTSC counter 6 | #[inline] 7 | pub fn get_tsc_hz() -> u64 { 8 | unsafe { ffi::rte_get_tsc_hz() } 9 | } 10 | 11 | /// Wait at least us microseconds. 12 | #[inline] 13 | pub fn delay_us(us: u32) { 14 | unsafe { ffi::rte_delay_us.unwrap()(us) } 15 | } 16 | 17 | /// Wait at least ms milliseconds. 18 | #[inline] 19 | pub fn delay_ms(ms: u32) { 20 | delay_us(ms * 1000) 21 | } 22 | 23 | #[inline] 24 | pub fn delay(d: Duration) { 25 | delay_us(d.as_secs() as u32 * 1_000_000 + d.subsec_micros()) 26 | } 27 | 28 | #[inline] 29 | pub fn rdtsc() -> u64 { 30 | unsafe { ffi::_rte_rdtsc() } 31 | } 32 | 33 | #[inline] 34 | pub fn rdtsc_precise() -> u64 { 35 | unsafe { ffi::_rte_rdtsc_precise() } 36 | } 37 | -------------------------------------------------------------------------------- /rte/src/common/debug.rs: -------------------------------------------------------------------------------- 1 | use ffi; 2 | 3 | /// Dump the stack of the calling core to the console. 4 | pub fn dump_stack() { 5 | unsafe { ffi::rte_dump_stack() } 6 | } 7 | 8 | /// Dump the registers of the calling core to the console. 9 | pub fn dump_registers() { 10 | unsafe { ffi::rte_dump_registers() } 11 | } 12 | 13 | /// Provide notification of a critical non-recoverable error and stop. 14 | #[macro_export] 15 | macro_rules! rte_panic { 16 | ($fmt:expr, $($args:tt)*) => ( 17 | unsafe { ffi::__rte_panic(concat!(file!(), ":", line!())), format_args!($fmt, $($args)*) } 18 | ) 19 | } 20 | -------------------------------------------------------------------------------- /rte/src/common/dev.rs: -------------------------------------------------------------------------------- 1 | //! RTE PMD Driver Registration Interface 2 | //! 3 | //! This file manages the list of device drivers. 4 | //! 5 | use std::ffi::CStr; 6 | use std::mem; 7 | use std::os::raw::{c_char, c_void}; 8 | 9 | use errors::{AsResult, Result}; 10 | use ffi::{self, rte_dev_event_type::*}; 11 | use utils::AsCString; 12 | 13 | /// The device event type. 14 | #[repr(u32)] 15 | #[derive(Clone, Copy, Debug, PartialEq, Eq, FromPrimitive, ToPrimitive)] 16 | pub enum Event { 17 | /// device being added 18 | Add = RTE_DEV_EVENT_ADD, 19 | /// device being removed 20 | Remove = RTE_DEV_EVENT_REMOVE, 21 | /// max value of this enum 22 | Max = RTE_DEV_EVENT_MAX, 23 | } 24 | 25 | pub type RawDevice = ffi::rte_device; 26 | pub type RawDevicePtr = *mut ffi::rte_device; 27 | 28 | #[repr(transparent)] 29 | #[derive(Debug)] 30 | pub struct Device(RawDevicePtr); 31 | 32 | impl From for Device { 33 | fn from(p: RawDevicePtr) -> Self { 34 | Device(p) 35 | } 36 | } 37 | 38 | impl Device { 39 | /// Query status of a device. 40 | pub fn is_probed(&self) -> bool { 41 | unsafe { ffi::rte_dev_is_probed(self.0) != 0 } 42 | } 43 | 44 | /// Remove one device. 45 | /// 46 | /// In multi-process, it will request other processes to remove the same device. 47 | /// A failure, in any process, will rollback the action 48 | pub fn remove(&self) -> Result<()> { 49 | unsafe { ffi::rte_dev_remove(self.0) }.as_result().map(|_| ()) 50 | } 51 | } 52 | 53 | /// Hotplug add a given device to a specific bus. 54 | /// 55 | /// In multi-process, it will request other processes to add the same device. 56 | /// A failure, in any process, will rollback the action 57 | pub fn hotplug_add(busname: &str, devname: &str, drvargs: &str) -> Result<()> { 58 | let busname = busname.as_cstring(); 59 | let devname = devname.as_cstring(); 60 | let drvargs = drvargs.as_cstring(); 61 | 62 | unsafe { ffi::rte_eal_hotplug_add(busname.as_ptr(), devname.as_ptr(), drvargs.as_ptr()) } 63 | .as_result() 64 | .map(|_| ()) 65 | } 66 | 67 | /// Hotplug remove a given device from a specific bus. 68 | /// 69 | /// In multi-process, it will request other processes to remove the same device. 70 | /// A failure, in any process, will rollback the action 71 | pub fn hotplug_remove(busname: &str, devname: &str) -> Result<()> { 72 | let busname = busname.as_cstring(); 73 | let devname = devname.as_cstring(); 74 | 75 | unsafe { ffi::rte_eal_hotplug_remove(busname.as_ptr(), devname.as_ptr()) } 76 | .as_result() 77 | .map(|_| ()) 78 | } 79 | 80 | pub type EventCallback = fn(devname: &str, Event, Option); 81 | 82 | struct EventContext { 83 | callback: EventCallback, 84 | arg: Option, 85 | } 86 | 87 | unsafe extern "C" fn event_stub(devname: *const c_char, event: ffi::rte_dev_event_type::Type, arg: *mut c_void) { 88 | let devname = CStr::from_ptr(devname); 89 | let ctxt = Box::from_raw(arg as *mut EventContext); 90 | 91 | (ctxt.callback)(devname.to_str().unwrap(), mem::transmute(event), ctxt.arg) 92 | } 93 | 94 | /// It registers the callback for the specific device. 95 | /// Multiple callbacks cal be registered at the same time. 96 | pub fn event_callback_register(devname: &str, callback: EventCallback, arg: Option) -> Result<()> { 97 | let devname = devname.as_cstring(); 98 | let ctxt = Box::into_raw(Box::new(EventContext:: { callback, arg })); 99 | 100 | unsafe { ffi::rte_dev_event_callback_register(devname.as_ptr(), Some(event_stub::), ctxt as *mut _) } 101 | .as_result() 102 | .map(|_| ()) 103 | } 104 | -------------------------------------------------------------------------------- /rte/src/common/devargs.rs: -------------------------------------------------------------------------------- 1 | use std::os::unix::io::AsRawFd; 2 | 3 | use cfile; 4 | 5 | use ffi; 6 | 7 | use errors::Result; 8 | 9 | /// Type of generic device 10 | #[repr(u32)] 11 | #[derive(Clone, Copy, Debug, PartialEq, FromPrimitive, ToPrimitive)] 12 | pub enum DevType { 13 | WhiteListed = ffi::rte_devtype::RTE_DEVTYPE_WHITELISTED_PCI, 14 | BlackListed = ffi::rte_devtype::RTE_DEVTYPE_BLACKLISTED_PCI, 15 | Virtual = ffi::rte_devtype::RTE_DEVTYPE_VIRTUAL, 16 | } 17 | 18 | /// Add a device to the user device list 19 | pub fn add(devtype: DevType, devargs: &str) -> Result<()> { 20 | rte_check!(unsafe { ffi::rte_devargs_add(devtype as u32, try!(to_cptr!(devargs))) }) 21 | } 22 | 23 | /// Count the number of user devices of a specified type 24 | pub fn type_count(devtype: DevType) -> usize { 25 | unsafe { ffi::rte_devargs_type_count(devtype as u32) as usize } 26 | } 27 | 28 | /// This function dumps the list of user device and their arguments. 29 | pub fn dump(s: &S) { 30 | if let Ok(f) = cfile::fdopen(s, "w") { 31 | unsafe { 32 | ffi::rte_devargs_dump(f.stream() as *mut ffi::FILE); 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /rte/src/common/eal.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CStr; 2 | use std::fmt; 3 | use std::mem; 4 | use std::os::raw::c_char; 5 | use std::path::PathBuf; 6 | use std::ptr; 7 | 8 | use ffi::{self, rte_proc_type_t::*}; 9 | 10 | use errors::{AsResult, Result}; 11 | use utils::AsCString; 12 | 13 | pub use common::config; 14 | pub use launch::{mp_remote_launch, mp_wait_lcore, remote_launch}; 15 | 16 | #[repr(i32)] 17 | #[derive(Clone, Copy, Debug, PartialEq, FromPrimitive, ToPrimitive)] 18 | pub enum ProcType { 19 | Auto = RTE_PROC_AUTO, 20 | Primary = RTE_PROC_PRIMARY, 21 | Secondary = RTE_PROC_SECONDARY, 22 | Invalid = RTE_PROC_INVALID, 23 | } 24 | 25 | extern "C" { 26 | // fn vdrvinitfn_pmd_af_packet_drv(); 27 | // fn vdrvinitfn_bbdev_null_pmd_drv(); 28 | // fn vdrvinitfn_pmd_bond_drv(); 29 | // fn vdrvinitfn_cryptodev_caam_jr_drv(); 30 | // fn vdrvinitfn_cryptodev_scheduler_pmd_drv(); 31 | // fn vdrvinitfn_dpaa2_cmdif_drv(); 32 | // fn vdrvinitfn_vdev_eventdev_dpaa2_pmd(); 33 | // fn vdrvinitfn_vdev_eventdev_dpaa_pmd(); 34 | // fn vdrvinitfn_evdev_dsw_pmd_drv(); 35 | // fn vdrvinitfn_failsafe_drv(); 36 | // fn vdrvinitfn_ifpga_cfg_driver(); 37 | // fn vdrvinitfn_eth_kni_drv(); 38 | // fn vdrvinitfn_pmd_null_drv(); 39 | // fn vdrvinitfn_cryptodev_null_pmd_drv(); 40 | // fn vdrvinitfn_octeontx_pmd_drv(); 41 | // fn vdrvinitfn_vdev_ssovf_pmd(); 42 | // fn vdrvinitfn_evdev_opdl_pmd_drv(); 43 | // fn vdrvinitfn_pmd_ring_drv(); 44 | // fn vdrvinitfn_vdev_eventdev_skeleton_pmd(); 45 | // fn vdrvinitfn_skeleton_pmd_drv(); 46 | // fn vdrvinitfn_pmd_softnic_drv(); 47 | // fn vdrvinitfn_evdev_sw_pmd_drv(); 48 | // fn vdrvinitfn_pmd_tap_drv(); 49 | // fn vdrvinitfn_pmd_tun_drv(); 50 | // fn vdrvinitfn_vdev_netvsc_vdev(); 51 | // fn vdrvinitfn_pmd_vhost_drv(); 52 | // fn vdrvinitfn_virtio_user_driver(); 53 | } 54 | 55 | unsafe fn init_pmd_drivers() { 56 | // vdrvinitfn_pmd_af_packet_drv(); 57 | // vdrvinitfn_bbdev_null_pmd_drv(); 58 | // vdrvinitfn_pmd_bond_drv(); 59 | // vdrvinitfn_cryptodev_caam_jr_drv(); 60 | // vdrvinitfn_cryptodev_scheduler_pmd_drv(); 61 | // vdrvinitfn_dpaa2_cmdif_drv(); 62 | // vdrvinitfn_vdev_eventdev_dpaa2_pmd(); 63 | // vdrvinitfn_vdev_eventdev_dpaa_pmd(); 64 | // vdrvinitfn_evdev_dsw_pmd_drv(); 65 | // vdrvinitfn_failsafe_drv(); 66 | // vdrvinitfn_ifpga_cfg_driver(); 67 | // vdrvinitfn_eth_kni_drv(); 68 | // vdrvinitfn_pmd_null_drv(); 69 | // vdrvinitfn_cryptodev_null_pmd_drv(); 70 | // vdrvinitfn_octeontx_pmd_drv(); 71 | // vdrvinitfn_vdev_ssovf_pmd(); 72 | // vdrvinitfn_evdev_opdl_pmd_drv(); 73 | // vdrvinitfn_pmd_ring_drv(); 74 | // vdrvinitfn_vdev_eventdev_skeleton_pmd(); 75 | // vdrvinitfn_skeleton_pmd_drv(); 76 | // vdrvinitfn_pmd_softnic_drv(); 77 | // vdrvinitfn_evdev_sw_pmd_drv(); 78 | // vdrvinitfn_pmd_tap_drv(); 79 | // vdrvinitfn_pmd_tun_drv(); 80 | // vdrvinitfn_vdev_netvsc_vdev(); 81 | // vdrvinitfn_pmd_vhost_drv(); 82 | // vdrvinitfn_virtio_user_driver(); 83 | } 84 | 85 | /// Request iopl privilege for all RPL. 86 | pub fn iopl_init() -> Result<()> { 87 | unsafe { ffi::rte_eal_iopl_init() }.as_result().map(|_| ()) 88 | } 89 | 90 | /// Initialize the Environment Abstraction Layer (EAL). 91 | /// 92 | /// This function is to be executed on the MASTER lcore only, 93 | /// as soon as possible in the application's main() function. 94 | /// 95 | /// The function finishes the initialization process before main() is called. 96 | /// It puts the SLAVE lcores in the WAIT state. 97 | /// 98 | pub fn init>(args: &[S]) -> Result { 99 | debug!("initial EAL with {} args: {:?}", args.len(), args); 100 | 101 | // rust doesn't support __attribute__((constructor)), we need to invoke those static initializer 102 | unsafe { 103 | init_pmd_drivers(); 104 | } 105 | 106 | let parsed = if args.is_empty() { 107 | unsafe { ffi::rte_eal_init(0, ptr::null_mut()) } 108 | } else { 109 | let args: Vec<_> = args.iter().map(|s| s.as_cstring()).collect(); 110 | let mut cptrs: Vec<_> = args.iter().map(|s| s.as_ptr() as *mut c_char).collect(); 111 | 112 | unsafe { ffi::rte_eal_init(cptrs.len() as i32, cptrs.as_mut_ptr()) } 113 | }; 114 | 115 | debug!("EAL parsed {} arguments", parsed); 116 | 117 | parsed.as_result().map(|_| parsed) 118 | } 119 | 120 | /// Clean up the Environment Abstraction Layer (EAL) 121 | pub fn cleanup() -> Result<()> { 122 | unsafe { ffi::rte_eal_cleanup() }.as_result().map(|_| ()) 123 | } 124 | 125 | /// Function to terminate the application immediately, 126 | /// printing an error message and returning the exit_code back to the shell. 127 | pub fn exit(code: i32, msg: &str) { 128 | unsafe { 129 | ffi::rte_exit(code, to_cptr!(msg).unwrap()); 130 | } 131 | } 132 | 133 | /// Get the process type in a multi-process setup 134 | pub fn process_type() -> ProcType { 135 | unsafe { mem::transmute(ffi::rte_eal_process_type()) } 136 | } 137 | 138 | /// Check if a primary process is currently alive 139 | pub fn primary_proc_alive() -> bool { 140 | unsafe { ffi::rte_eal_primary_proc_alive(ptr::null()) != 0 } 141 | } 142 | 143 | /// Whether EAL is using huge pages (disabled by --no-huge option). 144 | pub fn has_hugepages() -> bool { 145 | unsafe { ffi::rte_eal_has_hugepages() != 0 } 146 | } 147 | 148 | /// Whether EAL is using PCI bus. 149 | pub fn has_pci() -> bool { 150 | unsafe { ffi::rte_eal_has_pci() != 0 } 151 | } 152 | 153 | /// Whether the EAL was asked to create UIO device. 154 | pub fn create_uio_dev() -> bool { 155 | unsafe { ffi::rte_eal_create_uio_dev() != 0 } 156 | } 157 | 158 | /// Get the runtime directory of DPDK 159 | pub fn runtime_dir() -> PathBuf { 160 | PathBuf::from(unsafe { 161 | CStr::from_ptr(ffi::rte_eal_get_runtime_dir()) 162 | .to_string_lossy() 163 | .into_owned() 164 | }) 165 | } 166 | -------------------------------------------------------------------------------- /rte/src/common/keepalive.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | use std::os::raw::{c_int, c_void}; 3 | use std::ptr::{self, NonNull}; 4 | 5 | use errors::{AsResult, Result}; 6 | use ffi::{ 7 | self, 8 | rte_keepalive_state::{self, *}, 9 | }; 10 | use lcore; 11 | use utils::AsRaw; 12 | 13 | #[repr(u32)] 14 | #[derive(Clone, Copy, Debug, PartialEq, Eq, FromPrimitive, ToPrimitive)] 15 | pub enum State { 16 | Unused = RTE_KA_STATE_UNUSED, 17 | Alive = RTE_KA_STATE_ALIVE, 18 | Missing = RTE_KA_STATE_MISSING, 19 | Dead = RTE_KA_STATE_DEAD, 20 | Gone = RTE_KA_STATE_GONE, 21 | Dozing = RTE_KA_STATE_DOZING, 22 | Sleep = RTE_KA_STATE_SLEEP, 23 | } 24 | 25 | impl From for State { 26 | fn from(t: rte_keepalive_state::Type) -> Self { 27 | unsafe { mem::transmute(t) } 28 | } 29 | } 30 | 31 | /// Keepalive failure callback. 32 | /// 33 | /// Receives a data pointer passed to rte_keepalive_create() and the id of the 34 | /// failed core. 35 | pub type FailureCallback = fn(Option, lcore::Id); 36 | 37 | /// Keepalive relay callback. 38 | /// 39 | /// Receives a data pointer passed to rte_keepalive_register_relay_callback(), 40 | /// the id of the core for which state is to be forwarded, and details of the 41 | /// current core state. 42 | pub type RelayCallback = fn(Option, lcore::Id, State, u64); 43 | 44 | pub type RawKeepalive = ffi::rte_keepalive; 45 | pub type RawKeepalivePtr = *mut ffi::rte_keepalive; 46 | 47 | #[repr(transparent)] 48 | #[derive(Debug)] 49 | pub struct Keepalive(NonNull); 50 | 51 | impl AsRaw for Keepalive { 52 | type Raw = RawKeepalive; 53 | 54 | fn as_raw(&self) -> *mut Self::Raw { 55 | self.0.as_ptr() 56 | } 57 | } 58 | 59 | pub fn create(callback: FailureCallback, arg: Option) -> Result { 60 | Keepalive::new(callback, arg) 61 | } 62 | 63 | impl Keepalive { 64 | pub fn new(callback: FailureCallback, arg: Option) -> Result { 65 | let ctxt = Box::into_raw(Box::new(FailureContext { callback, arg })); 66 | 67 | unsafe { ffi::rte_keepalive_create(Some(failure_stub::), ctxt as *mut _) } 68 | .as_result() 69 | .map(Keepalive) 70 | } 71 | 72 | /// Checks & handles keepalive state of monitored cores. 73 | pub fn dispatch_pings(&self) { 74 | unsafe { ffi::rte_keepalive_dispatch_pings(ptr::null_mut(), self.as_raw() as *mut _) } 75 | } 76 | 77 | /// Registers a core for keepalive checks. 78 | pub fn register_core(&self, core_id: lcore::Id) { 79 | unsafe { ffi::rte_keepalive_register_core(self.as_raw(), *core_id as i32) } 80 | } 81 | 82 | /// Per-core keepalive check. 83 | /// 84 | /// This function needs to be called from within the main process loop of the LCore to be checked. 85 | pub fn mark_alive(&self) { 86 | unsafe { ffi::rte_keepalive_mark_alive(self.as_raw()) } 87 | } 88 | 89 | /// Per-core sleep-time indication. 90 | /// 91 | /// If CPU idling is enabled, this function needs to be called from within 92 | /// the main process loop of the LCore going to sleep, 93 | /// in order to avoid the LCore being mis-detected as dead. 94 | pub fn mark_sleep(&self) { 95 | unsafe { ffi::rte_keepalive_mark_sleep(self.as_raw()) } 96 | } 97 | 98 | /// Registers a 'live core' callback. 99 | /// 100 | /// The complement of the 'dead core' callback. This is called when a 101 | /// core is known to be alive, and is intended for cases when an app 102 | /// needs to know 'liveness' beyond just knowing when a core has died. 103 | pub fn register_relay_callback(&self, callback: RelayCallback, arg: Option) { 104 | let ctxt = Box::into_raw(Box::new(RelayContext { callback, arg })); 105 | 106 | unsafe { ffi::rte_keepalive_register_relay_callback(self.as_raw(), Some(relay_stub::), ctxt as *mut _) } 107 | } 108 | } 109 | 110 | struct FailureContext { 111 | callback: FailureCallback, 112 | arg: Option, 113 | } 114 | 115 | unsafe extern "C" fn failure_stub(data: *mut c_void, id_core: c_int) { 116 | let ctxt = Box::from_raw(data as *mut FailureContext); 117 | 118 | (ctxt.callback)(ctxt.arg, lcore::id(id_core as u32)) 119 | } 120 | 121 | struct RelayContext { 122 | callback: RelayCallback, 123 | arg: Option, 124 | } 125 | 126 | unsafe extern "C" fn relay_stub( 127 | data: *mut c_void, 128 | id_core: c_int, 129 | core_state: rte_keepalive_state::Type, 130 | last_seen: u64, 131 | ) { 132 | let ctxt = Box::from_raw(data as *mut RelayContext); 133 | 134 | (ctxt.callback)(ctxt.arg, lcore::id(id_core as u32), core_state.into(), last_seen) 135 | } 136 | -------------------------------------------------------------------------------- /rte/src/common/launch.rs: -------------------------------------------------------------------------------- 1 | //! Launch tasks on other lcores 2 | //! 3 | use std::os::raw::{c_int, c_void}; 4 | 5 | use ffi; 6 | use num_traits::FromPrimitive; 7 | 8 | use errors::{AsResult, Result}; 9 | use lcore; 10 | 11 | /// State of an lcore. 12 | #[repr(u32)] 13 | #[derive(Clone, Copy, Debug, PartialEq, Eq, FromPrimitive, ToPrimitive)] 14 | pub enum State { 15 | Wait = ffi::rte_lcore_state_t::WAIT, 16 | Running = ffi::rte_lcore_state_t::RUNNING, 17 | Finished = ffi::rte_lcore_state_t::FINISHED, 18 | } 19 | 20 | impl From for State { 21 | fn from(s: ffi::rte_lcore_state_t::Type) -> Self { 22 | State::from_u32(s).unwrap() 23 | } 24 | } 25 | 26 | // Definition of a remote launch function. 27 | pub type LcoreFunc = fn(Option) -> i32; 28 | 29 | struct LcoreContext { 30 | callback: LcoreFunc, 31 | arg: Option, 32 | } 33 | 34 | unsafe extern "C" fn lcore_stub(arg: *mut c_void) -> c_int { 35 | let ctxt = Box::from_raw(arg as *mut LcoreContext); 36 | 37 | (ctxt.callback)(ctxt.arg) 38 | } 39 | 40 | /// Launch a function on another lcore. 41 | /// 42 | /// To be executed on the MASTER lcore only. 43 | pub fn remote_launch(callback: LcoreFunc, arg: Option, slave_id: lcore::Id) -> Result<()> { 44 | let ctxt = Box::into_raw(Box::new(LcoreContext:: { callback, arg })) as *mut c_void; 45 | 46 | unsafe { ffi::rte_eal_remote_launch(Some(lcore_stub::), ctxt, *slave_id) } 47 | .as_result() 48 | .map(|_| ()) 49 | } 50 | 51 | /// Launch a function on all lcores. 52 | pub fn mp_remote_launch(callback: LcoreFunc, arg: Option, skip_master: bool) -> Result<()> { 53 | let ctxt = Box::into_raw(Box::new(LcoreContext:: { callback, arg })) as *mut c_void; 54 | let call_master = if skip_master { 55 | ffi::rte_rmt_call_master_t::SKIP_MASTER 56 | } else { 57 | ffi::rte_rmt_call_master_t::CALL_MASTER 58 | }; 59 | 60 | unsafe { ffi::rte_eal_mp_remote_launch(Some(lcore_stub::), ctxt, call_master) } 61 | .as_result() 62 | .map(|_| ()) 63 | } 64 | 65 | impl lcore::Id { 66 | /// Get the state of the lcore identified by lcore_id. 67 | pub fn state(self) -> State { 68 | unsafe { ffi::rte_eal_get_lcore_state(*self) }.into() 69 | } 70 | 71 | /// Wait until an lcore finishes its job. 72 | /// 73 | /// To be executed on the MASTER lcore only. 74 | /// 75 | /// If the slave lcore identified by the slave_id is in a FINISHED state, 76 | /// switch to the WAIT state. If the lcore is in RUNNING state, wait until 77 | /// the lcore finishes its job and moves to the FINISHED state. 78 | /// 79 | pub fn wait(self) -> JobState { 80 | let s = unsafe { ffi::rte_eal_wait_lcore(*self) }; 81 | 82 | if s == 0 { 83 | JobState::Wait 84 | } else { 85 | JobState::Finished(s) 86 | } 87 | } 88 | } 89 | 90 | #[derive(Clone, Copy, Debug, PartialEq)] 91 | pub enum JobState { 92 | Wait, 93 | Finished(i32), 94 | } 95 | 96 | /// Wait until all lcores finish their jobs. 97 | /// 98 | /// To be executed on the MASTER lcore only. 99 | /// Issue an rte_eal_wait_lcore() for every lcore. 100 | /// The return values are ignored. 101 | pub fn mp_wait_lcore() { 102 | unsafe { ffi::rte_eal_mp_wait_lcore() } 103 | } 104 | -------------------------------------------------------------------------------- /rte/src/common/lcore.rs: -------------------------------------------------------------------------------- 1 | //! API for lcore and socket manipulation 2 | //! 3 | use std::cmp::Ordering; 4 | use std::fmt; 5 | use std::mem; 6 | use std::ops::Deref; 7 | 8 | use ffi; 9 | 10 | use common::config; 11 | use errors::{rte_error, Result}; 12 | use memory::SocketId; 13 | 14 | pub use ffi::LCORE_ID_ANY; 15 | pub use ffi::RTE_MAX_LCORE; 16 | 17 | #[repr(transparent)] 18 | #[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] 19 | pub struct Id(u32); 20 | 21 | impl fmt::Display for Id { 22 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 23 | write!(f, "{}", self.0) 24 | } 25 | } 26 | 27 | pub fn id(id: u32) -> Id { 28 | Id(id) 29 | } 30 | 31 | impl> From for Id { 32 | fn from(id: T) -> Self { 33 | Id(id.into()) 34 | } 35 | } 36 | 37 | impl Deref for Id { 38 | type Target = u32; 39 | 40 | fn deref(&self) -> &Self::Target { 41 | &self.0 42 | } 43 | } 44 | 45 | impl From for usize { 46 | fn from(id: Id) -> Self { 47 | id.0 as usize 48 | } 49 | } 50 | 51 | impl PartialEq for Id { 52 | fn eq(&self, other: &u32) -> bool { 53 | self.0 == *other 54 | } 55 | } 56 | 57 | impl PartialOrd for Id { 58 | fn partial_cmp(&self, other: &u32) -> Option { 59 | if *other == ffi::LCORE_ID_ANY { 60 | None 61 | } else { 62 | Some(self.0.cmp(other)) 63 | } 64 | } 65 | } 66 | 67 | impl Id { 68 | /// Any lcore. 69 | pub fn any() -> Id { 70 | Id(ffi::LCORE_ID_ANY) 71 | } 72 | 73 | pub fn max() -> Id { 74 | Id(ffi::RTE_MAX_LCORE) 75 | } 76 | 77 | /// Get the ID of the physical socket of the specified lcore 78 | pub fn socket_id(self) -> SocketId { 79 | unsafe { ffi::lcore_config[self.0 as usize].socket_id as SocketId } 80 | } 81 | 82 | /// Test if an lcore is enabled. 83 | pub fn is_enabled(self) -> bool { 84 | config().lcore_role(self) == Role::Rte 85 | } 86 | 87 | pub fn is_master(self) -> bool { 88 | self.0 == config().master_lcore().0 89 | } 90 | 91 | /// Get the next enabled lcore ID. 92 | pub fn next(self) -> Option { 93 | next_id(self.0, false, true).map(Id) 94 | } 95 | 96 | /// Return the index of the lcore starting from zero. 97 | pub fn index(self) -> usize { 98 | unsafe { ffi::lcore_config[self.0 as usize].core_index as usize } 99 | } 100 | 101 | /// Test if the core supplied has a specific role 102 | pub fn has_role(self, role: Role) -> bool { 103 | unsafe { ffi::rte_lcore_has_role(self.0, role as u32) == 0 } 104 | } 105 | 106 | /// Get a lcore's role. 107 | pub fn role(self) -> Role { 108 | config().lcore_role(self) 109 | } 110 | } 111 | 112 | #[repr(u32)] 113 | #[derive(Clone, Copy, Debug, PartialEq, FromPrimitive, ToPrimitive)] 114 | pub enum Role { 115 | Rte = ffi::rte_lcore_role_t::ROLE_RTE, 116 | Off = ffi::rte_lcore_role_t::ROLE_OFF, 117 | Service = ffi::rte_lcore_role_t::ROLE_SERVICE, 118 | } 119 | 120 | impl From for Role { 121 | fn from(role: u32) -> Self { 122 | unsafe { mem::transmute(role) } 123 | } 124 | } 125 | 126 | /// Return the ID of the execution unit we are running on. 127 | pub fn current() -> Option { 128 | match unsafe { ffi::_rte_lcore_id() } { 129 | ffi::LCORE_ID_ANY => None, 130 | id => Some(id.into()), 131 | } 132 | } 133 | 134 | /// All the enabled lcores. 135 | pub fn enabled() -> Vec { 136 | foreach_lcores(false).collect() 137 | } 138 | 139 | /// Get the id of the master lcore 140 | pub fn master() -> Id { 141 | config().master_lcore() 142 | } 143 | 144 | /// Return the number of execution units (lcores) on the system. 145 | pub fn count() -> usize { 146 | config().lcore_count() 147 | } 148 | 149 | /// Return the index of the lcore starting from zero. 150 | pub fn index(lcore_id: u32) -> Option { 151 | let id = if lcore_id == ffi::LCORE_ID_ANY { 152 | current().map(|id| id.0) 153 | } else if lcore_id < ffi::RTE_MAX_LCORE { 154 | Some(lcore_id) 155 | } else { 156 | None 157 | }; 158 | 159 | id.map(|id| unsafe { ffi::lcore_config[id as usize].core_index as usize }) 160 | } 161 | 162 | /// Get the next enabled lcore ID. 163 | pub fn next_id(lcore_id: u32, skip_master: bool, wrap: bool) -> Option { 164 | let mut next_id = lcore_id; 165 | 166 | loop { 167 | next_id += 1; 168 | 169 | if wrap { 170 | next_id %= ffi::RTE_MAX_LCORE; 171 | } else if next_id >= ffi::RTE_MAX_LCORE || next_id == lcore_id { 172 | return None; 173 | } 174 | 175 | if !Id(next_id).is_enabled() { 176 | continue; 177 | } 178 | 179 | if skip_master && Id(next_id).is_master() { 180 | continue; 181 | } 182 | 183 | break; 184 | } 185 | 186 | Some(next_id) 187 | } 188 | 189 | /// Return the ID of the physical socket of the logical core we are running on. 190 | pub fn socket_id() -> u32 { 191 | unsafe { ffi::rte_socket_id() } 192 | } 193 | 194 | /// Return number of physical sockets detected on the system. 195 | /// 196 | /// Note that number of nodes may not be correspondent to their physical id's: 197 | /// for example, a system may report two socket id's, but the actual socket id's 198 | /// may be 0 and 8. 199 | pub fn socket_count() -> u32 { 200 | unsafe { ffi::rte_socket_count() } 201 | } 202 | 203 | /// Return socket id with a particular index. 204 | /// 205 | /// This will return socket id at a particular position in list of all detected 206 | /// physical socket id's. For example, on a machine with sockets [0, 8], passing 207 | /// 1 as a parameter will return 8. 208 | pub fn socket_id_by_idx(idx: u32) -> Result { 209 | let id = unsafe { ffi::rte_socket_id_by_idx(idx) }; 210 | 211 | if id < 0 { 212 | Err(rte_error()) 213 | } else { 214 | Ok(id) 215 | } 216 | } 217 | 218 | /// Browse all running lcores. 219 | pub fn foreach(f: F) { 220 | foreach_lcores(false).for_each(f) 221 | } 222 | 223 | /// Browse all running lcores except the master lcore. 224 | pub fn foreach_slave(f: F) { 225 | foreach_lcores(true).for_each(f) 226 | } 227 | 228 | fn foreach_lcores(skip_master: bool) -> impl Iterator { 229 | (0..ffi::RTE_MAX_LCORE) 230 | .map(Id) 231 | .filter(|lcore_id| lcore_id.is_enabled()) 232 | .filter(move |lcore_id| !skip_master || !lcore_id.is_master()) 233 | } 234 | -------------------------------------------------------------------------------- /rte/src/common/log.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | use std::os::unix::io::AsRawFd; 3 | 4 | use cfile; 5 | 6 | use errors::{AsResult, ErrorKind::*, Result}; 7 | use ffi; 8 | use utils::AsCString; 9 | 10 | /// SDK log type 11 | #[repr(u32)] 12 | #[derive(Clone, Copy, Debug, PartialEq, FromPrimitive, ToPrimitive)] 13 | pub enum Type { 14 | /// Log related to eal. 15 | Eal = ffi::RTE_LOGTYPE_EAL, 16 | /// Log related to malloc. 17 | Malloc = ffi::RTE_LOGTYPE_MALLOC, 18 | /// Log related to ring. 19 | Ring = ffi::RTE_LOGTYPE_RING, 20 | /// Log related to mempool. 21 | MemPool = ffi::RTE_LOGTYPE_MEMPOOL, 22 | /// Log related to timers. 23 | Timer = ffi::RTE_LOGTYPE_TIMER, 24 | /// Log related to poll mode driver. 25 | PMD = ffi::RTE_LOGTYPE_PMD, 26 | /// Log related to hash table. 27 | Hash = ffi::RTE_LOGTYPE_HASH, 28 | /// Log related to LPM. 29 | LPM = ffi::RTE_LOGTYPE_LPM, 30 | /// Log related to KNI. 31 | KNI = ffi::RTE_LOGTYPE_KNI, 32 | /// Log related to ACL. 33 | ACL = ffi::RTE_LOGTYPE_ACL, 34 | /// Log related to power. 35 | Power = ffi::RTE_LOGTYPE_POWER, 36 | /// Log related to QoS meter. 37 | Meter = ffi::RTE_LOGTYPE_METER, 38 | /// Log related to QoS port scheduler. 39 | PortScheduler = ffi::RTE_LOGTYPE_SCHED, 40 | /// Log related to port. 41 | Port = ffi::RTE_LOGTYPE_PORT, 42 | /// Log related to table. 43 | Table = ffi::RTE_LOGTYPE_TABLE, 44 | /// Log related to pipeline. 45 | Pipeline = ffi::RTE_LOGTYPE_PIPELINE, 46 | /// Log related to mbuf. 47 | MBuf = ffi::RTE_LOGTYPE_MBUF, 48 | /// Log related to cryptodev. 49 | CryptoDev = ffi::RTE_LOGTYPE_CRYPTODEV, 50 | /// Log related to EFD. 51 | EFD = ffi::RTE_LOGTYPE_EFD, 52 | /// Log related to eventdev. 53 | EventDev = ffi::RTE_LOGTYPE_EVENTDEV, 54 | /// Log related to GSO. 55 | GSO = ffi::RTE_LOGTYPE_GSO, 56 | /// User-defined log type 1. 57 | User1 = ffi::RTE_LOGTYPE_USER1, 58 | /// User-defined log type 2. 59 | User2 = ffi::RTE_LOGTYPE_USER2, 60 | /// User-defined log type 3. 61 | User3 = ffi::RTE_LOGTYPE_USER3, 62 | /// User-defined log type 4. 63 | User4 = ffi::RTE_LOGTYPE_USER4, 64 | /// User-defined log type 5. 65 | User5 = ffi::RTE_LOGTYPE_USER5, 66 | /// User-defined log type 6. 67 | User6 = ffi::RTE_LOGTYPE_USER6, 68 | /// User-defined log type 7. 69 | User7 = ffi::RTE_LOGTYPE_USER7, 70 | /// User-defined log type 8. 71 | User8 = ffi::RTE_LOGTYPE_USER8, 72 | 73 | /// First identifier for extended logs 74 | FirstExt = ffi::RTE_LOGTYPE_FIRST_EXT_ID, 75 | } 76 | 77 | #[repr(u32)] 78 | #[derive(Clone, Copy, Debug, PartialEq, FromPrimitive, ToPrimitive)] 79 | pub enum Level { 80 | /// System is unusable. 81 | Emerge = ffi::RTE_LOG_EMERG, 82 | /// Action must be taken immediately. 83 | Alert = ffi::RTE_LOG_ALERT, 84 | /// Critical conditions. 85 | Critical = ffi::RTE_LOG_CRIT, 86 | /// Error conditions. 87 | Error = ffi::RTE_LOG_ERR, 88 | /// Warning conditions. 89 | Warn = ffi::RTE_LOG_WARNING, 90 | /// Normal but significant condition. 91 | Notice = ffi::RTE_LOG_NOTICE, 92 | /// Informational. 93 | Info = ffi::RTE_LOG_INFO, 94 | /// Debug-level messages. 95 | Debug = ffi::RTE_LOG_DEBUG, 96 | } 97 | 98 | /// Change the stream that will be used by the logging system. 99 | /// 100 | /// This can be done at any time. The f argument represents the stream 101 | /// to be used to send the logs. If f is NULL, the default output is 102 | /// used (stderr). 103 | pub fn openlog_stream(s: &S) -> Result { 104 | let f = cfile::fdopen(s, "w")?; 105 | 106 | unsafe { ffi::rte_openlog_stream(f.stream() as *mut ffi::FILE) } 107 | .as_result() 108 | .map(|_| f) 109 | } 110 | 111 | /// Set the global log level. 112 | /// 113 | /// After this call, logs with a level lower or equal than the level 114 | /// passed as argument will be displayed. 115 | pub fn set_global_level(level: Level) { 116 | unsafe { ffi::rte_log_set_global_level(level as u32) } 117 | } 118 | 119 | /// Get the global log level. 120 | pub fn get_global_level() -> Level { 121 | unsafe { mem::transmute(ffi::rte_log_get_global_level()) } 122 | } 123 | 124 | /// Get the log level for a given type. 125 | pub fn get_level(ty: Type) -> Result { 126 | let level = unsafe { ffi::rte_log_get_level(ty as u32) }; 127 | 128 | level 129 | .ok_or(InvalidLogType(ty as u32)) 130 | .map(|_| unsafe { mem::transmute(level) }) 131 | } 132 | 133 | /// Set the log level for a given type. 134 | pub fn set_level(ty: Type, level: Level) -> Result<()> { 135 | unsafe { ffi::rte_log_set_level(ty as u32, level as u32) } 136 | .ok_or(InvalidLogLevel(level as u32)) 137 | .map(|_| ()) 138 | } 139 | 140 | /// Get the current loglevel for the message being processed. 141 | /// 142 | /// Before calling the user-defined stream for logging, the log 143 | /// subsystem sets a per-lcore variable containing the loglevel and the 144 | /// logtype of the message being processed. This information can be 145 | /// accessed by the user-defined log output function through this function. 146 | pub fn cur_msg_loglevel() -> Level { 147 | unsafe { mem::transmute(ffi::rte_log_cur_msg_loglevel()) } 148 | } 149 | 150 | /// Get the current logtype for the message being processed. 151 | /// 152 | /// Before calling the user-defined stream for logging, the log 153 | /// subsystem sets a per-lcore variable containing the loglevel and the 154 | /// logtype of the message being processed. This information can be 155 | /// accessed by the user-defined log output function through this function. 156 | pub fn cur_msg_logtype() -> Type { 157 | unsafe { mem::transmute(ffi::rte_log_cur_msg_logtype()) } 158 | } 159 | 160 | /// Register a dynamic log type 161 | /// 162 | /// If a log is already registered with the same type, the returned value 163 | /// is the same than the previous one. 164 | pub fn register>(name: S) -> Result<()> { 165 | let name = name.as_cstring(); 166 | 167 | unsafe { ffi::rte_log_register(name.as_ptr()) }.as_result().map(|_| ()) 168 | } 169 | 170 | /// Dump log information. 171 | /// 172 | /// Dump the global level and the registered log types. 173 | pub fn dump(s: &S) -> Result<()> { 174 | let f = cfile::fdopen(s, "w")?; 175 | 176 | unsafe { ffi::rte_log_dump(f.stream() as *mut ffi::FILE) }; 177 | 178 | Ok(()) 179 | } 180 | 181 | /// Generates a log message. 182 | /// 183 | /// The message will be sent in the stream defined by the previous call 184 | /// to rte_openlog_stream(). 185 | /// 186 | /// The level argument determines if the log should be displayed or 187 | /// not, depending on the global rte_logs variable. 188 | /// 189 | /// The preferred alternative is the RTE_LOG() because it adds the 190 | /// level and type in the logged string. 191 | pub fn log(level: Level, ty: Type, msg: &str) -> Result<()> { 192 | let msg = msg.as_cstring(); 193 | 194 | unsafe { ffi::rte_log(level as u32, ty as u32, msg.as_ptr()) } 195 | .as_result() 196 | .map(|_| ()) 197 | } 198 | -------------------------------------------------------------------------------- /rte/src/common/malloc.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | use std::os::raw::c_void; 3 | use std::os::unix::io::AsRawFd; 4 | use std::ptr; 5 | 6 | use cfile; 7 | 8 | use ffi; 9 | 10 | #[macro_export] 11 | macro_rules! rte_new { 12 | ($t:ty) => { 13 | unsafe { 14 | ::std::mem::transmute($crate::malloc::zmalloc( 15 | stringify!($t), 16 | ::std::mem::size_of::<$t>(), 17 | $crate::RTE_CACHE_LINE_SIZE, 18 | ) as *mut $t) 19 | } 20 | }; 21 | } 22 | 23 | #[macro_export] 24 | macro_rules! rte_new_array { 25 | ($t:ty; $num:expr) => { 26 | unsafe { 27 | ::std::mem::transmute(::std::slice::from_raw_parts_mut( 28 | $crate::malloc::calloc( 29 | stringify!($t), 30 | $num, 31 | ::std::mem::size_of::<$t>(), 32 | $crate::RTE_CACHE_LINE_SIZE, 33 | ) as *mut $t, 34 | $num, 35 | )) 36 | } 37 | }; 38 | } 39 | 40 | #[macro_export] 41 | macro_rules! rte_free { 42 | ($p:expr) => { 43 | $crate::malloc::free($p as *mut ::std::os::raw::c_void) 44 | }; 45 | } 46 | 47 | /// This function allocates memory from the huge-page area of memory. 48 | /// 49 | /// The memory is not cleared. In NUMA systems, the memory allocated 50 | /// resides on the same NUMA socket as the core that calls this function. 51 | /// 52 | pub fn malloc(tag: &'static str, size: usize, align: u32) -> *mut c_void { 53 | unsafe { ffi::rte_malloc(tag.as_ptr() as *const i8, size, align) } 54 | } 55 | 56 | /// Allocate zero'ed memory from the heap. 57 | /// 58 | /// Equivalent to rte_malloc() except that the memory zone is initialised with zeros. 59 | /// In NUMA systems, the memory allocated resides on the same NUMA socket 60 | /// as the core that calls this function. 61 | /// 62 | pub fn zmalloc(tag: &'static str, size: usize, align: u32) -> *mut c_void { 63 | unsafe { ffi::rte_zmalloc(tag.as_ptr() as *const i8, size, align) } 64 | } 65 | 66 | /// Replacement function for calloc(), using huge-page memory. 67 | /// 68 | /// Memory area is initialised with zeros. In NUMA systems, 69 | /// the memory allocated resides on the same NUMA socket as the core that calls this function. 70 | /// 71 | pub fn calloc(tag: &'static str, num: usize, size: usize, align: u32) -> *mut c_void { 72 | unsafe { ffi::rte_calloc(tag.as_ptr() as *const i8, num, size, align) } 73 | } 74 | 75 | /// Replacement function for realloc(), using huge-page memory. 76 | /// 77 | /// Reserved area memory is resized, preserving contents. 78 | /// In NUMA systems, the new area resides on the same NUMA socket as the old area. 79 | /// 80 | pub fn realloc(ptr: *mut c_void, size: usize, align: u32) -> *mut c_void { 81 | unsafe { ffi::rte_realloc(ptr, size, align) } 82 | } 83 | 84 | /// This function allocates memory from the huge-page area of memory. 85 | /// 86 | /// The memory is not cleared. 87 | /// 88 | pub fn malloc_socket(tag: &'static str, size: usize, align: u32, socket_id: i32) -> *mut c_void { 89 | unsafe { ffi::rte_malloc_socket(tag.as_ptr() as *const i8, size, align, socket_id) } 90 | } 91 | 92 | /// Allocate zero'ed memory from the heap. 93 | /// 94 | /// Equivalent to rte_malloc() except that the memory zone is initialised with zeros. 95 | /// 96 | pub fn zmalloc_socket(tag: &'static str, size: usize, align: u32, socket_id: i32) -> *mut c_void { 97 | unsafe { ffi::rte_zmalloc_socket(tag.as_ptr() as *const i8, size, align, socket_id) } 98 | } 99 | 100 | /// Replacement function for calloc(), using huge-page memory. 101 | /// 102 | /// Memory area is initialised with zeros. 103 | /// 104 | pub fn calloc_socket(tag: &'static str, num: usize, size: usize, align: u32, socket_id: i32) -> *mut c_void { 105 | unsafe { ffi::rte_calloc_socket(tag.as_ptr() as *const i8, num, size, align, socket_id) } 106 | } 107 | 108 | /// Frees the memory space pointed to by the provided pointer. 109 | pub fn free(ptr: *mut c_void) { 110 | unsafe { ffi::rte_free(ptr as *mut c_void) } 111 | } 112 | 113 | /// Get heap statistics for the specified heap. 114 | pub fn get_socket_stats(socket_id: i32) -> Option { 115 | unsafe { 116 | let mut stats: ffi::rte_malloc_socket_stats = mem::zeroed(); 117 | 118 | if ffi::rte_malloc_get_socket_stats(socket_id, &mut stats) == 0 { 119 | Some(stats) 120 | } else { 121 | None 122 | } 123 | } 124 | } 125 | 126 | /// Dump statistics. 127 | pub fn dump_stats(s: &S, tag: Option<&str>) { 128 | if let Ok(mut f) = cfile::fdopen(s, "w") { 129 | unsafe { 130 | ffi::rte_malloc_dump_stats( 131 | &mut **f as *mut _ as *mut _, 132 | tag.map_or_else(ptr::null, |s| s.as_ptr() as *const i8), 133 | ); 134 | } 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /rte/src/common/memory.rs: -------------------------------------------------------------------------------- 1 | pub type SocketId = i32; 2 | 3 | pub const SOCKET_ID_ANY: SocketId = -1; 4 | 5 | pub trait AsRef<'a, T: 'a> { 6 | fn as_ref(self) -> Option<&'a T>; 7 | } 8 | 9 | pub trait AsMutRef<'a, T: 'a> { 10 | fn as_mut_ref(self) -> Option<&'a mut T>; 11 | } 12 | 13 | impl<'a, T: 'a> AsRef<'a, T> for *const T { 14 | fn as_ref(self) -> Option<&'a T> { 15 | if self.is_null() { 16 | None 17 | } else { 18 | Some(unsafe { &*self }) 19 | } 20 | } 21 | } 22 | 23 | impl<'a, T: 'a> AsMutRef<'a, T> for *mut T { 24 | fn as_mut_ref(self) -> Option<&'a mut T> { 25 | if self.is_null() { 26 | None 27 | } else { 28 | Some(unsafe { &mut *self }) 29 | } 30 | } 31 | } 32 | 33 | impl<'a, T: 'a> AsRef<'a, T> for Option<*const T> { 34 | fn as_ref(self) -> Option<&'a T> { 35 | self.map(|p| unsafe { &*p }) 36 | } 37 | } 38 | 39 | impl<'a, T: 'a> AsMutRef<'a, T> for Option<*mut T> { 40 | fn as_mut_ref(self) -> Option<&'a mut T> { 41 | self.map(|p| unsafe { &mut *p }) 42 | } 43 | } 44 | 45 | impl<'a, T: 'a, E> AsRef<'a, T> for Result<*const T, E> { 46 | fn as_ref(self) -> Option<&'a T> { 47 | self.ok().map(|p| unsafe { &*p }) 48 | } 49 | } 50 | 51 | impl<'a, T: 'a, E> AsMutRef<'a, T> for Result<*mut T, E> { 52 | fn as_mut_ref(self) -> Option<&'a mut T> { 53 | self.ok().map(|p| unsafe { &mut *p }) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /rte/src/common/memzone.rs: -------------------------------------------------------------------------------- 1 | use ffi::rte_memzone; 2 | 3 | /// RTE Memzone 4 | /// 5 | /// The goal of the memzone allocator is to reserve contiguous portions of physical memory. 6 | /// These zones are identified by a name. 7 | /// 8 | /// The memzone descriptors are shared by all partitions 9 | /// and are located in a known place of physical memory. 10 | /// This zone is accessed using rte_eal_get_configuration(). 11 | /// The lookup (by name) of a memory zone can be done 12 | // in any partition and returns the same physical address. 13 | /// 14 | /// A reserved memory zone cannot be unreserved. 15 | /// The reservation shall be done at initialization time only. 16 | /// 17 | pub struct MemoryZone(*const rte_memzone); 18 | 19 | pub fn from_raw(zone: *const rte_memzone) -> MemoryZone { 20 | MemoryZone(zone) 21 | } 22 | -------------------------------------------------------------------------------- /rte/src/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod bitmap; 2 | mod config; 3 | pub mod eal; 4 | pub mod keepalive; 5 | pub mod launch; 6 | pub mod lcore; 7 | pub mod log; 8 | mod rand; 9 | mod version; 10 | #[macro_use] 11 | pub mod malloc; 12 | pub mod dev; 13 | pub mod devargs; 14 | #[macro_use] 15 | pub mod debug; 16 | pub mod spinlock; 17 | #[macro_use] 18 | pub mod byteorder; 19 | mod cycles; 20 | pub mod memory; 21 | pub mod memzone; 22 | 23 | pub use self::config::{config, Config, MemoryConfig}; 24 | pub use self::cycles::*; 25 | pub use self::lcore::{socket_count, socket_id}; 26 | pub use self::rand::{rand, srand}; 27 | pub use self::version::version; 28 | -------------------------------------------------------------------------------- /rte/src/common/rand.rs: -------------------------------------------------------------------------------- 1 | //! Pseudo-random Generators in RTE 2 | 3 | use ffi; 4 | 5 | /// Seed the pseudo-random generator. 6 | /// 7 | /// The generator is automatically seeded by the EAL init with a timer 8 | /// value. It may need to be re-seeded by the user with a real random value. 9 | pub fn srand(seed: u64) { 10 | unsafe { ffi::_rte_srand(seed) } 11 | } 12 | 13 | /// Get a pseudo-random value. 14 | /// 15 | /// This function generates pseudo-random numbers using the linear 16 | /// congruential algorithm and 48-bit integer arithmetic, called twice 17 | /// to generate a 64-bit value. 18 | pub fn rand() -> u64 { 19 | unsafe { ffi::_rte_rand() } 20 | } 21 | -------------------------------------------------------------------------------- /rte/src/common/spinlock.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Deref, DerefMut}; 2 | 3 | use libc; 4 | 5 | use ffi; 6 | 7 | pub type RawSpinLock = ffi::rte_spinlock_t; 8 | pub type RawSpinLockPtr = *mut ffi::rte_spinlock_t; 9 | 10 | pub type RawRecursiveSpinLock = ffi::rte_spinlock_recursive_t; 11 | pub type RawRecursiveSpinLockPtr = *mut ffi::rte_spinlock_recursive_t; 12 | 13 | pub trait LockImpl { 14 | type RawLock: ?Sized; 15 | 16 | fn init(p: *mut Self::RawLock); 17 | 18 | fn lock(p: *mut Self::RawLock); 19 | 20 | fn unlock(p: *mut Self::RawLock); 21 | 22 | fn trylock(p: *mut Self::RawLock) -> libc::c_int; 23 | 24 | fn is_locked(p: *mut Self::RawLock) -> libc::c_int; 25 | } 26 | 27 | pub struct Lock(T::RawLock); 28 | 29 | pub struct LockGuard<'a, T: LockImpl + 'a>(&'a mut Lock); 30 | 31 | impl<'a, T: LockImpl> Drop for LockGuard<'a, T> { 32 | fn drop(&mut self) { 33 | self.0.unlock(); 34 | } 35 | } 36 | 37 | impl Deref for Lock { 38 | type Target = T::RawLock; 39 | 40 | fn deref(&self) -> &Self::Target { 41 | &self.0 42 | } 43 | } 44 | 45 | impl DerefMut for Lock { 46 | fn deref_mut(&mut self) -> &mut Self::Target { 47 | &mut self.0 48 | } 49 | } 50 | 51 | /// Take the spinlock. 52 | impl<'a, T: LockImpl> Lock { 53 | #[inline] 54 | pub fn as_raw(&mut self) -> *mut T::RawLock { 55 | &mut self.0 56 | } 57 | 58 | /// Initialize the spinlock to an unlocked state. 59 | #[inline] 60 | pub fn init(&mut self) -> &Self { 61 | T::init(&mut self.0); 62 | 63 | self 64 | } 65 | 66 | /// Test if the lock is taken. 67 | #[inline] 68 | pub fn is_locked(&self) -> bool { 69 | T::is_locked(&self.0 as *const _ as *mut _) != 0 70 | } 71 | 72 | /// Take the spinlock. 73 | #[inline] 74 | pub fn lock(&'a mut self) -> LockGuard<'a, T> { 75 | T::lock(&mut self.0); 76 | 77 | LockGuard(self) 78 | } 79 | 80 | /// Try to take the lock. 81 | #[inline] 82 | pub fn trylock(&'a mut self) -> Option> { 83 | if T::trylock(&mut self.0) == 0 { 84 | None 85 | } else { 86 | Some(LockGuard(self)) 87 | } 88 | } 89 | 90 | /// Release the spinlock. 91 | #[inline] 92 | pub fn unlock(&mut self) -> &Self { 93 | T::unlock(&mut self.0); 94 | 95 | self 96 | } 97 | } 98 | 99 | pub enum SpinLockImpl {} 100 | 101 | pub type SpinLock = Lock; 102 | 103 | impl LockImpl for SpinLockImpl { 104 | type RawLock = RawSpinLock; 105 | 106 | #[inline] 107 | fn init(p: *mut Self::RawLock) { 108 | unsafe { 109 | (*p).locked = 0; 110 | } 111 | } 112 | 113 | #[inline] 114 | fn is_locked(p: *mut Self::RawLock) -> libc::c_int { 115 | unsafe { (*p).locked } 116 | } 117 | 118 | #[inline] 119 | fn lock(p: *mut Self::RawLock) { 120 | unsafe { ffi::_rte_spinlock_lock(p) } 121 | } 122 | 123 | #[inline] 124 | fn unlock(p: *mut Self::RawLock) { 125 | unsafe { ffi::_rte_spinlock_unlock(p) } 126 | } 127 | 128 | #[inline] 129 | fn trylock(p: *mut Self::RawLock) -> libc::c_int { 130 | unsafe { ffi::_rte_spinlock_trylock(p) } 131 | } 132 | } 133 | 134 | pub enum TmSpinLockImpl {} 135 | 136 | pub type TmSpinLock = Lock; 137 | 138 | impl LockImpl for TmSpinLockImpl { 139 | type RawLock = RawSpinLock; 140 | 141 | #[inline] 142 | fn init(p: *mut Self::RawLock) { 143 | unsafe { 144 | (*p).locked = 0; 145 | } 146 | } 147 | 148 | #[inline] 149 | fn is_locked(p: *mut Self::RawLock) -> libc::c_int { 150 | unsafe { (*p).locked } 151 | } 152 | 153 | #[inline] 154 | fn lock(p: *mut Self::RawLock) { 155 | unsafe { ffi::_rte_spinlock_lock_tm(p) } 156 | } 157 | 158 | #[inline] 159 | fn unlock(p: *mut Self::RawLock) { 160 | unsafe { ffi::_rte_spinlock_unlock_tm(p) } 161 | } 162 | 163 | #[inline] 164 | fn trylock(p: *mut Self::RawLock) -> libc::c_int { 165 | unsafe { ffi::_rte_spinlock_trylock_tm(p) } 166 | } 167 | } 168 | 169 | pub fn tm_supported() -> bool { 170 | unsafe { ffi::_rte_tm_supported() != 0 } 171 | } 172 | 173 | pub enum RecursiveSpinLockImpl {} 174 | 175 | pub type RecursiveSpinLock = Lock; 176 | 177 | impl LockImpl for RecursiveSpinLockImpl { 178 | type RawLock = RawRecursiveSpinLock; 179 | 180 | #[inline] 181 | fn init(p: *mut Self::RawLock) { 182 | unsafe { 183 | (*p).sl.locked = 0; 184 | (*p).user = -1; 185 | (*p).count = 0; 186 | } 187 | } 188 | 189 | #[inline] 190 | fn is_locked(p: *mut Self::RawLock) -> libc::c_int { 191 | unsafe { (*p).sl.locked } 192 | } 193 | 194 | #[inline] 195 | fn lock(p: *mut Self::RawLock) { 196 | unsafe { ffi::_rte_spinlock_recursive_lock(p) } 197 | } 198 | 199 | #[inline] 200 | fn unlock(p: *mut Self::RawLock) { 201 | unsafe { ffi::_rte_spinlock_recursive_unlock(p) } 202 | } 203 | 204 | #[inline] 205 | fn trylock(p: *mut Self::RawLock) -> libc::c_int { 206 | unsafe { ffi::_rte_spinlock_recursive_trylock(p) } 207 | } 208 | } 209 | 210 | pub enum RecursiveTmSpinLockImpl {} 211 | 212 | pub type RecursiveTmSpinLock = Lock; 213 | 214 | impl LockImpl for RecursiveTmSpinLockImpl { 215 | type RawLock = RawRecursiveSpinLock; 216 | 217 | #[inline] 218 | fn init(p: *mut Self::RawLock) { 219 | unsafe { 220 | (*p).sl.locked = 0; 221 | (*p).user = -1; 222 | (*p).count = 0; 223 | } 224 | } 225 | 226 | #[inline] 227 | fn is_locked(p: *mut Self::RawLock) -> libc::c_int { 228 | unsafe { (*p).sl.locked } 229 | } 230 | 231 | #[inline] 232 | fn lock(p: *mut Self::RawLock) { 233 | unsafe { ffi::_rte_spinlock_recursive_lock_tm(p) } 234 | } 235 | 236 | #[inline] 237 | fn unlock(p: *mut Self::RawLock) { 238 | unsafe { ffi::_rte_spinlock_recursive_unlock_tm(p) } 239 | } 240 | 241 | #[inline] 242 | fn trylock(p: *mut Self::RawLock) -> libc::c_int { 243 | unsafe { ffi::_rte_spinlock_recursive_trylock_tm(p) } 244 | } 245 | } 246 | -------------------------------------------------------------------------------- /rte/src/common/version.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CStr; 2 | 3 | use ffi; 4 | 5 | /// Patch level number i.e. the z in yy.mm.z 6 | pub use ffi::RTE_VER_MINOR; 7 | /// Minor version/month number i.e. the mm in yy.mm.z 8 | pub use ffi::RTE_VER_MONTH; 9 | /// Patch release number 10 | pub use ffi::RTE_VER_RELEASE; 11 | /// Major version/year number i.e. the yy in yy.mm.z 12 | pub use ffi::RTE_VER_YEAR; 13 | 14 | /// Macro to compute a version number usable for comparisons 15 | macro_rules! RTE_VERSION_NUM { 16 | ($a:expr, $b:expr, $c:expr, $d:expr) => { 17 | (($a) << 24 | ($b) << 16 | ($c) << 8 | ($d)) 18 | }; 19 | } 20 | 21 | lazy_static! { 22 | /// String that appears before the version number 23 | pub static ref RTE_VER_PREFIX: &'static str = unsafe { CStr::from_bytes_with_nul_unchecked(ffi::RTE_VER_PREFIX).to_str().unwrap() }; 24 | /// Extra string to be appended to version number 25 | pub static ref RTE_VER_SUFFIX: &'static str = unsafe { CStr::from_bytes_with_nul_unchecked(ffi::RTE_VER_SUFFIX).to_str().unwrap() }; 26 | 27 | /// All version numbers in one to compare with RTE_VERSION_NUM() 28 | pub static ref RTE_VERSION: u32 = 29 | RTE_VERSION_NUM!(RTE_VER_YEAR, RTE_VER_MONTH, RTE_VER_MINOR, RTE_VER_RELEASE); 30 | 31 | pub static ref RTE_VERSION_STR: String = version(); 32 | } 33 | 34 | /// Function returning version string 35 | pub fn version() -> String { 36 | if ffi::RTE_VER_SUFFIX.is_empty() { 37 | format!( 38 | "{} {}.{:02}.{}", 39 | *RTE_VER_PREFIX, RTE_VER_YEAR, RTE_VER_MONTH, RTE_VER_MINOR 40 | ) 41 | } else { 42 | format!( 43 | "{} {}.{:02}.{}{}{}", 44 | *RTE_VER_PREFIX, 45 | RTE_VER_YEAR, 46 | RTE_VER_MONTH, 47 | RTE_VER_MINOR, 48 | *RTE_VER_SUFFIX, 49 | if RTE_VER_RELEASE < 16 { 50 | RTE_VER_RELEASE 51 | } else { 52 | RTE_VER_RELEASE - 16 53 | } 54 | ) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /rte/src/errors.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CStr; 2 | use std::fmt; 3 | use std::os::raw::c_int; 4 | use std::ptr::NonNull; 5 | use std::result; 6 | 7 | use errno::errno; 8 | use failure::{Error, Fail}; 9 | 10 | use ffi; 11 | 12 | pub type Result = result::Result; 13 | 14 | pub trait AsResult { 15 | type Result; 16 | 17 | fn as_result(self) -> Result; 18 | 19 | fn ok_or(self, err: E) -> Result; 20 | 21 | fn ok_or_else E>(self, err: F) -> Result; 22 | } 23 | 24 | impl AsResult for *mut T { 25 | type Result = NonNull; 26 | 27 | fn as_result(self) -> Result { 28 | NonNull::new(self).ok_or_else(rte_error) 29 | } 30 | 31 | fn ok_or(self, err: E) -> Result { 32 | NonNull::new(self).ok_or_else(|| err.into()) 33 | } 34 | 35 | fn ok_or_else E>(self, err: F) -> Result { 36 | NonNull::new(self).ok_or_else(|| err().into()) 37 | } 38 | } 39 | 40 | impl AsResult for c_int { 41 | type Result = c_int; 42 | 43 | fn as_result(self) -> Result { 44 | if self == -1 { 45 | Err(RteError(self).into()) 46 | } else { 47 | Ok(self) 48 | } 49 | } 50 | 51 | fn ok_or(self, err: E) -> Result { 52 | if self == -1 { 53 | Err(err.into()) 54 | } else { 55 | Ok(self) 56 | } 57 | } 58 | 59 | fn ok_or_else E>(self, err: F) -> Result { 60 | if self == -1 { 61 | Err(err().into()) 62 | } else { 63 | Ok(self) 64 | } 65 | } 66 | } 67 | 68 | macro_rules! rte_check { 69 | ( $ret:expr ) => { 70 | rte_check!($ret; ok => {()}; err => {$crate::errors::RteError($ret).into()}) 71 | }; 72 | ( $ret:expr; ok => $ok:block) => { 73 | rte_check!($ret; ok => $ok; err => {$crate::errors::RteError($ret).into()}) 74 | }; 75 | ( $ret:expr; err => $err:block) => { 76 | rte_check!($ret; ok => {()}; err => $err) 77 | }; 78 | ( $ret:expr; ok => $ok:block; err => $err:block ) => {{ 79 | if $ret == 0 { 80 | Ok($ok) 81 | } else { 82 | Err($err) 83 | } 84 | }}; 85 | 86 | ( $ret:expr, NonNull ) => { 87 | rte_check!($ret, NonNull; ok => {$ret}; err => {$crate::errors::rte_error()}) 88 | }; 89 | ( $ret:expr, NonNull; ok => $ok:block) => { 90 | rte_check!($ret, NonNull; ok => $ok; err => {$crate::errors::rte_error()}) 91 | }; 92 | ( $ret:expr, NonNull; err => $err:block) => { 93 | rte_check!($ret, NonNull; ok => {$ret}; err => $err) 94 | }; 95 | ( $ret:expr, NonNull; ok => $ok:block; err => $err:block ) => {{ 96 | if !$ret.is_null() { 97 | Ok($ok) 98 | } else { 99 | Err($err) 100 | } 101 | }}; 102 | } 103 | 104 | #[derive(Debug, Fail)] 105 | pub struct RteError(pub i32); 106 | 107 | impl fmt::Display for RteError { 108 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 109 | write!( 110 | f, 111 | "RTE error, {} ({})", 112 | unsafe { CStr::from_ptr(ffi::rte_strerror(self.0)).to_string_lossy() }, 113 | self.0 114 | ) 115 | } 116 | } 117 | 118 | #[derive(Debug, Fail)] 119 | pub enum ErrorKind { 120 | #[fail(display = "invalid log type, {}", _0)] 121 | InvalidLogType(u32), 122 | #[fail(display = "invalid log level, {}", _0)] 123 | InvalidLogLevel(u32), 124 | #[fail(display = "cmdline parse error, {}", _0)] 125 | CmdLineParseError(i32), 126 | #[fail(display = "{}", _0)] 127 | OsError(i32), 128 | } 129 | 130 | pub fn rte_error() -> Error { 131 | RteError(unsafe { ffi::rte_errno() }).into() 132 | } 133 | 134 | pub fn os_error() -> Error { 135 | ErrorKind::OsError(errno().0 as i32).into() 136 | } 137 | -------------------------------------------------------------------------------- /rte/src/ether.rs: -------------------------------------------------------------------------------- 1 | use std::error; 2 | use std::fmt; 3 | use std::mem; 4 | use std::ops::{Deref, DerefMut}; 5 | use std::ptr; 6 | use std::result; 7 | use std::str; 8 | 9 | use rand::{thread_rng, Rng}; 10 | 11 | use ffi; 12 | 13 | use errors::Result; 14 | use mbuf; 15 | use utils::AsRaw; 16 | 17 | pub use ffi::{ 18 | ETHER_TYPE_IPv4, ETHER_TYPE_IPv6, ETHER_ADDR_FMT_SIZE, ETHER_CRC_LEN, ETHER_GROUP_ADDR, ETHER_HDR_LEN, 19 | ETHER_LOCAL_ADMIN_ADDR, ETHER_MAX_JUMBO_FRAME_LEN, ETHER_MAX_LEN, ETHER_MAX_VLAN_FRAME_LEN, ETHER_MAX_VLAN_ID, 20 | ETHER_MIN_LEN, ETHER_MIN_MTU, ETHER_MTU, ETHER_TYPE_1588, ETHER_TYPE_ARP, ETHER_TYPE_ETAG, ETHER_TYPE_LEN, 21 | ETHER_TYPE_LLDP, ETHER_TYPE_MPLS, ETHER_TYPE_MPLSM, ETHER_TYPE_QINQ, ETHER_TYPE_RARP, ETHER_TYPE_SLOW, 22 | ETHER_TYPE_TEB, ETHER_TYPE_VLAN, 23 | }; 24 | 25 | pub const ETHER_ADDR_LEN: usize = ffi::ETHER_ADDR_LEN as usize; 26 | 27 | #[derive(Debug, Clone, PartialEq)] 28 | pub struct AddrParseError(()); 29 | 30 | impl fmt::Display for AddrParseError { 31 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 32 | fmt.write_str(error::Error::description(self)) 33 | } 34 | } 35 | 36 | impl error::Error for AddrParseError { 37 | fn description(&self) -> &str { 38 | "invalid MAC address syntax" 39 | } 40 | } 41 | 42 | pub type RawEtherAddr = ffi::ether_addr; 43 | 44 | /// A 48-bit (6 byte) buffer containing the MAC address 45 | #[derive(Debug, Copy, Clone, Default, PartialEq, Eq, Hash)] 46 | pub struct EtherAddr([u8; ETHER_ADDR_LEN]); 47 | 48 | impl Deref for EtherAddr { 49 | type Target = [u8; ETHER_ADDR_LEN]; 50 | 51 | fn deref(&self) -> &Self::Target { 52 | &self.0 53 | } 54 | } 55 | 56 | impl DerefMut for EtherAddr { 57 | fn deref_mut(&mut self) -> &mut Self::Target { 58 | &mut self.0 59 | } 60 | } 61 | 62 | impl EtherAddr { 63 | /// Creates a new MAC address from six eight-bit octets. 64 | /// 65 | /// The result will represent the MAC address a:b:c:d:e:f. 66 | #[inline] 67 | pub fn new(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> EtherAddr { 68 | EtherAddr([a, b, c, d, e, f]) 69 | } 70 | 71 | /// Fast copy an Ethernet address. 72 | #[inline] 73 | pub fn copy(from: &[u8; ETHER_ADDR_LEN], to: &mut [u8; ETHER_ADDR_LEN]) { 74 | unsafe { ptr::copy_nonoverlapping(from.as_ptr(), to.as_mut_ptr(), ETHER_ADDR_LEN) } 75 | } 76 | 77 | /// Returns the six eight-bit integers that make up this address. 78 | #[inline] 79 | pub fn octets(&self) -> &[u8; ETHER_ADDR_LEN] { 80 | &self.0 81 | } 82 | 83 | pub fn into_bytes(self) -> [u8; ETHER_ADDR_LEN] { 84 | self.0 85 | } 86 | 87 | pub fn from_bytes(b: &[u8]) -> result::Result { 88 | if b.len() != ETHER_ADDR_LEN { 89 | return Err(AddrParseError(())); 90 | } 91 | 92 | let mut addr = [0; ETHER_ADDR_LEN]; 93 | 94 | unsafe { 95 | ptr::copy(b.as_ptr(), addr.as_mut().as_mut_ptr(), b.len()); 96 | } 97 | 98 | Ok(EtherAddr(addr)) 99 | } 100 | 101 | pub fn zeroed() -> Self { 102 | unsafe { mem::zeroed() } 103 | } 104 | 105 | pub fn broadcast() -> Self { 106 | EtherAddr([0xffu8; ETHER_ADDR_LEN]) 107 | } 108 | 109 | /// Generate a random Ethernet address that is locally administered and not multicast. 110 | pub fn random() -> Self { 111 | let mut addr = [0u8; ETHER_ADDR_LEN]; 112 | 113 | thread_rng().fill(&mut addr); 114 | 115 | addr[0] &= !ffi::ETHER_GROUP_ADDR as u8; // clear multicast bit 116 | addr[0] |= ffi::ETHER_LOCAL_ADMIN_ADDR as u8; // set local assignment bit 117 | 118 | EtherAddr(addr) 119 | } 120 | 121 | /// Check if an Ethernet address is filled with zeros. 122 | #[inline] 123 | pub fn is_zero(&self) -> bool { 124 | self.0 == Self::zeroed().0 125 | } 126 | 127 | /// Check if an Ethernet address is a unicast address. 128 | #[inline] 129 | pub fn is_unicast(&self) -> bool { 130 | (self.0[0] & ffi::ETHER_GROUP_ADDR as u8) == 0 131 | } 132 | 133 | /// Check if an Ethernet address is a multicast address. 134 | #[inline] 135 | pub fn is_multicast(&self) -> bool { 136 | (self.0[0] & ffi::ETHER_GROUP_ADDR as u8) != 0 137 | } 138 | 139 | /// Check if an Ethernet address is a broadcast address. 140 | #[inline] 141 | pub fn is_broadcast(&self) -> bool { 142 | self.0 == Self::broadcast().0 143 | } 144 | 145 | /// Check if an Ethernet address is a universally assigned address. 146 | #[inline] 147 | pub fn is_universal(&self) -> bool { 148 | (self.0[0] & ffi::ETHER_LOCAL_ADMIN_ADDR as u8) == 0 149 | } 150 | 151 | /// Check if an Ethernet address is a locally assigned address. 152 | #[inline] 153 | pub fn is_local_admin(&self) -> bool { 154 | (self.0[0] & ffi::ETHER_LOCAL_ADMIN_ADDR as u8) != 0 155 | } 156 | 157 | /// Check if an Ethernet address is a valid address. 158 | /// 159 | /// Checks that the address is a unicast address and is not filled with zeros. 160 | #[inline] 161 | pub fn is_valid(&self) -> bool { 162 | self.is_unicast() && !self.is_zero() 163 | } 164 | } 165 | 166 | impl fmt::Display for EtherAddr { 167 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 168 | write!( 169 | f, 170 | "{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", 171 | self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5] 172 | ) 173 | } 174 | } 175 | 176 | impl From<[u8; 6]> for EtherAddr { 177 | fn from(addr: [u8; 6]) -> EtherAddr { 178 | EtherAddr(addr) 179 | } 180 | } 181 | 182 | impl From<*const u8> for EtherAddr { 183 | fn from(p: *const u8) -> EtherAddr { 184 | let mut mac = [0u8; ETHER_ADDR_LEN]; 185 | 186 | unsafe { 187 | ptr::copy_nonoverlapping(p, (&mut mac[..]).as_mut_ptr(), ETHER_ADDR_LEN); 188 | } 189 | 190 | EtherAddr(mac) 191 | } 192 | } 193 | 194 | impl From<*mut u8> for EtherAddr { 195 | fn from(p: *mut u8) -> EtherAddr { 196 | let mut mac = [0u8; ETHER_ADDR_LEN]; 197 | 198 | unsafe { 199 | ptr::copy_nonoverlapping(p, (&mut mac[..]).as_mut_ptr(), ETHER_ADDR_LEN); 200 | } 201 | 202 | EtherAddr(mac) 203 | } 204 | } 205 | 206 | impl From for EtherAddr { 207 | fn from(addr: RawEtherAddr) -> EtherAddr { 208 | EtherAddr(addr.addr_bytes) 209 | } 210 | } 211 | 212 | impl str::FromStr for EtherAddr { 213 | type Err = AddrParseError; 214 | 215 | fn from_str(s: &str) -> result::Result { 216 | let addr: Vec = s 217 | .split(':') 218 | .filter_map(|part| u8::from_str_radix(part, 16).ok()) 219 | .collect(); 220 | 221 | EtherAddr::from_bytes(addr.as_slice()) 222 | } 223 | } 224 | 225 | // Ethernet frame types 226 | 227 | /// IPv4 Protocol. 228 | pub const ETHER_TYPE_IPV4_BE: u16 = rte_cpu_to_be_16!(ffi::ETHER_TYPE_IPv4 as u16); 229 | /// IPv6 Protocol. 230 | pub const ETHER_TYPE_IPV6_BE: u16 = rte_cpu_to_be_16!(ffi::ETHER_TYPE_IPv6 as u16); 231 | /// Arp Protocol. 232 | pub const ETHER_TYPE_ARP_BE: u16 = rte_cpu_to_be_16!(ffi::ETHER_TYPE_ARP as u16); 233 | /// Reverse Arp Protocol. 234 | pub const ETHER_TYPE_RARP_BE: u16 = rte_cpu_to_be_16!(ffi::ETHER_TYPE_RARP as u16); 235 | /// IEEE 802.1Q VLAN tagging. 236 | pub const ETHER_TYPE_VLAN_BE: u16 = rte_cpu_to_be_16!(ffi::ETHER_TYPE_VLAN as u16); 237 | /// IEEE 802.1AS 1588 Precise Time Protocol. 238 | pub const ETHER_TYPE_1588_BE: u16 = rte_cpu_to_be_16!(ffi::ETHER_TYPE_1588 as u16); 239 | /// Slow protocols (LACP and Marker). 240 | pub const ETHER_TYPE_SLOW_BE: u16 = rte_cpu_to_be_16!(ffi::ETHER_TYPE_SLOW as u16); 241 | /// Transparent Ethernet Bridging. 242 | pub const ETHER_TYPE_TEB_BE: u16 = rte_cpu_to_be_16!(ffi::ETHER_TYPE_TEB as u16); 243 | 244 | /// Ethernet header: Contains the destination address, source address and frame type. 245 | pub type EtherHdr = ffi::ether_hdr; 246 | 247 | /// Ethernet VLAN Header. 248 | pub type VlanHdr = ffi::vlan_hdr; 249 | 250 | /// VXLAN protocol header. 251 | pub type VxlanHdr = ffi::vxlan_hdr; 252 | 253 | pub trait VlanExt { 254 | /// Extract VLAN tag information into mbuf 255 | fn vlan_strip(&mut self) -> Result<()>; 256 | } 257 | 258 | impl VlanExt for mbuf::MBuf { 259 | fn vlan_strip(&mut self) -> Result<()> { 260 | rte_check!(unsafe { ffi::_rte_vlan_strip(self.as_raw()) }) 261 | } 262 | } 263 | 264 | /// Insert VLAN tag into mbuf. 265 | pub fn vlan_insert(m: &mut mbuf::RawMBufPtr) -> Result<()> { 266 | rte_check!(unsafe { ffi::_rte_vlan_insert(m) }) 267 | } 268 | 269 | #[cfg(test)] 270 | mod tests { 271 | use std::str::FromStr; 272 | 273 | use super::*; 274 | 275 | #[test] 276 | fn test_macaddr() { 277 | let addr = EtherAddr::new(0x18, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f); 278 | 279 | assert_eq!(addr.octets(), &[0x18, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f]); 280 | assert_eq!(addr.to_string(), "18:2b:3c:4d:5e:6f"); 281 | 282 | assert_eq!(addr, EtherAddr::from([0x18, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f])); 283 | assert_eq!(addr, EtherAddr::from_str("18:2b:3c:4d:5e:6f").unwrap()); 284 | 285 | assert!(!addr.is_zero()); 286 | assert!(EtherAddr::zeroed().is_zero()); 287 | 288 | assert!(addr.is_unicast()); 289 | 290 | let local_addr = EtherAddr::new(0x13, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f); 291 | 292 | assert!(!addr.is_multicast()); 293 | assert!(local_addr.is_multicast()); 294 | 295 | assert!(!addr.is_broadcast()); 296 | assert!(EtherAddr::broadcast().is_broadcast()); 297 | 298 | assert!(addr.is_universal()); 299 | assert!(!local_addr.is_universal()); 300 | 301 | assert!(!addr.is_local_admin()); 302 | assert!(local_addr.is_local_admin()); 303 | 304 | let rand_addr = EtherAddr::random(); 305 | 306 | assert!(rand_addr.is_unicast()); 307 | assert!(!rand_addr.is_multicast()); 308 | assert!(!rand_addr.is_broadcast()); 309 | assert!(!rand_addr.is_universal()); 310 | assert!(rand_addr.is_local_admin()); 311 | assert!(rand_addr.is_valid()); 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /rte/src/ffi.rs: -------------------------------------------------------------------------------- 1 | use std::os::raw::{c_int, c_uchar, c_uint}; 2 | 3 | use libc::uint16_t; 4 | 5 | pub use rte_sys::*; 6 | 7 | /// Error number value, stored per-thread, which can be queried after 8 | /// calls to certain functions to determine why those functions failed. 9 | pub fn rte_errno() -> i32 { 10 | unsafe { rte_sys::_rte_errno() } 11 | } 12 | -------------------------------------------------------------------------------- /rte/src/ip.rs: -------------------------------------------------------------------------------- 1 | use ffi; 2 | 3 | /// IPv4 Header 4 | pub type Ipv4Hdr = ffi::ipv4_hdr; 5 | 6 | /// IPv6 Header 7 | pub type Ipv6Hdr = ffi::ipv6_hdr; 8 | -------------------------------------------------------------------------------- /rte/src/kni.rs: -------------------------------------------------------------------------------- 1 | use std::cmp; 2 | use std::ffi::CStr; 3 | use std::mem; 4 | use std::ops::{Deref, DerefMut}; 5 | use std::ptr; 6 | 7 | use libc; 8 | 9 | use ffi; 10 | 11 | use errors::{rte_error, Result}; 12 | use ether; 13 | use mbuf; 14 | use mempool; 15 | use pci; 16 | 17 | /// Initialize and preallocate KNI subsystem 18 | pub fn init(max_kni_ifaces: usize) -> Result<()> { 19 | if unsafe { ffi::rte_kni_init(max_kni_ifaces as u32) } == 0 { 20 | Ok(()) 21 | } else { 22 | Err(rte_error()) 23 | } 24 | } 25 | 26 | /// Close KNI device. 27 | pub fn close() { 28 | unsafe { ffi::rte_kni_close() } 29 | } 30 | 31 | /// Allocate KNI interface according to the port id, mbuf size, mbuf pool, 32 | /// configurations and callbacks for kernel requests. 33 | /// 34 | /// The KNI interface created in the kernel space is the net interface 35 | /// the traditional Linux application talking to. 36 | /// 37 | pub fn alloc( 38 | pktmbuf_pool: &mut mempool::RawMemoryPool, 39 | conf: &KniDeviceConf, 40 | opts: Option<&KniDeviceOps>, 41 | ) -> Result { 42 | unsafe { 43 | let mut kni_conf = ffi::rte_kni_conf { 44 | name: mem::zeroed(), 45 | core_id: conf.core_id, 46 | group_id: conf.group_id, 47 | mbuf_size: conf.mbuf_size, 48 | addr: conf.pci_addr, 49 | id: conf.pci_id, 50 | _bitfield_1: ffi::rte_kni_conf::new_bitfield_1(conf.flags.bits), 51 | mac_addr: mem::transmute(conf.mac_addr.into_bytes()), 52 | mtu: conf.mtu, 53 | }; 54 | 55 | ptr::copy( 56 | conf.name.as_ptr(), 57 | kni_conf.name.as_mut_ptr() as *mut u8, 58 | cmp::min(conf.name.len(), kni_conf.name.len() - 1), 59 | ); 60 | 61 | let p = ffi::rte_kni_alloc(pktmbuf_pool, &kni_conf, mem::transmute(opts)); 62 | 63 | rte_check!(p, NonNull; ok => { KniDevice(p)}) 64 | } 65 | } 66 | 67 | bitflags! { 68 | pub struct KniFlag: u8 { 69 | const FORCE_BIND = 1; 70 | } 71 | } 72 | 73 | /// Structure for configuring KNI device. 74 | pub struct KniDeviceConf<'a> { 75 | /// KNI name which will be used in relevant network device. 76 | /// Let the name as short as possible, as it will be part of memzone name. 77 | pub name: &'a str, 78 | /// Core ID to bind kernel thread on 79 | pub core_id: u32, 80 | /// Group ID 81 | pub group_id: u16, 82 | /// mbuf size 83 | pub mbuf_size: u32, 84 | 85 | pub pci_addr: pci::Addr, 86 | pub pci_id: pci::Id, 87 | 88 | /// Flag to bind kernel thread 89 | pub flags: KniFlag, 90 | 91 | pub mac_addr: ether::EtherAddr, 92 | pub mtu: u16, 93 | } 94 | 95 | impl<'a> Default for KniDeviceConf<'a> { 96 | fn default() -> Self { 97 | unsafe { mem::zeroed() } 98 | } 99 | } 100 | 101 | /// Pointer to function of changing MTU 102 | pub type ChangeMtuCallback = fn(port_id: u8, new_mut: libc::c_uint) -> libc::c_int; 103 | 104 | /// Pointer to function of configuring network interface 105 | pub type ConfigNetworkInterfaceCallback = fn(port_id: u8, if_up: u8) -> libc::c_int; 106 | 107 | pub type KniDeviceOps = ffi::rte_kni_ops; 108 | 109 | pub type RawKniDevice = ffi::rte_kni; 110 | pub type RawKniDevicePtr = *mut ffi::rte_kni; 111 | 112 | pub struct KniDevice(RawKniDevicePtr); 113 | 114 | impl Drop for KniDevice { 115 | fn drop(&mut self) { 116 | self.release().expect("fail to release KNI device") 117 | } 118 | } 119 | 120 | impl Deref for KniDevice { 121 | type Target = RawKniDevice; 122 | 123 | fn deref(&self) -> &Self::Target { 124 | unsafe { &*self.0 } 125 | } 126 | } 127 | 128 | impl DerefMut for KniDevice { 129 | fn deref_mut(&mut self) -> &mut Self::Target { 130 | unsafe { &mut *self.0 } 131 | } 132 | } 133 | 134 | impl KniDevice { 135 | pub fn from_raw(p: RawKniDevicePtr) -> Self { 136 | KniDevice(p) 137 | } 138 | 139 | /// Extract the raw pointer from an underlying object. 140 | pub fn as_raw(&self) -> RawKniDevicePtr { 141 | self.0 142 | } 143 | 144 | /// Consume the KniDevice, returning the raw pointer from an underlying object. 145 | pub fn into_raw(self) -> RawKniDevicePtr { 146 | self.0 147 | } 148 | 149 | pub fn release(&mut self) -> Result<()> { 150 | if self.0.is_null() { 151 | Ok(()) 152 | } else { 153 | rte_check!(unsafe { 154 | ffi::rte_kni_release(self.0) 155 | }; ok => { 156 | self.0 = ptr::null_mut(); 157 | }) 158 | } 159 | } 160 | 161 | /// Get the KNI context of its name. 162 | pub fn get(name: &str) -> Result { 163 | let p = unsafe { ffi::rte_kni_get(try!(to_cptr!(name))) }; 164 | 165 | rte_check!(p, NonNull; ok => { KniDevice(p) }) 166 | } 167 | 168 | /// Get the name given to a KNI device 169 | pub fn name(&self) -> &str { 170 | unsafe { CStr::from_ptr(ffi::rte_kni_get_name(self.0)).to_str().unwrap() } 171 | } 172 | 173 | /// It is used to handle the request mbufs sent from kernel space. 174 | /// 175 | /// Then analyzes it and calls the specific actions for the specific requests. 176 | /// Finally constructs the response mbuf and puts it back to the resp_q. 177 | /// 178 | pub fn handle_requests(&self) -> Result<&Self> { 179 | rte_check!(unsafe { ffi::rte_kni_handle_request(self.0) }; ok => { self }) 180 | } 181 | 182 | /// Retrieve a burst of packets from a KNI interface. 183 | /// 184 | /// The retrieved packets are stored in rte_mbuf structures 185 | /// whose pointers are supplied in the array of mbufs, 186 | /// and the maximum number is indicated by num. 187 | /// It handles the freeing of the mbufs in the free queue of KNI interface. 188 | /// 189 | pub fn rx_burst(&self, mbufs: &mut [mbuf::RawMBufPtr]) -> usize { 190 | unsafe { ffi::rte_kni_rx_burst(self.0, mbufs.as_mut_ptr(), mbufs.len() as u32) as usize } 191 | } 192 | 193 | /// Send a burst of packets to a KNI interface. 194 | /// 195 | /// The packets to be sent out are stored in rte_mbuf structures 196 | /// whose pointers are supplied in the array of mbufs, 197 | /// and the maximum number is indicated by num. 198 | /// It handles allocating the mbufs for KNI interface alloc queue. 199 | /// 200 | pub fn tx_burst(&self, mbufs: &mut [mbuf::RawMBufPtr]) -> usize { 201 | unsafe { ffi::rte_kni_rx_burst(self.0, mbufs.as_mut_ptr(), mbufs.len() as u32) as usize } 202 | } 203 | 204 | /// Register KNI request handling for a specified port, 205 | /// and it can be called by master process or slave process. 206 | pub fn register_handlers(&self, opts: Option<&KniDeviceOps>) -> Result<&Self> { 207 | rte_check!(unsafe { 208 | ffi::rte_kni_register_handlers(self.0, mem::transmute(opts)) 209 | }; ok => { self }) 210 | } 211 | 212 | /// Unregister KNI request handling for a specified port. 213 | pub fn unregister_handlers(&self) -> Result<&Self> { 214 | rte_check!(unsafe { ffi::rte_kni_unregister_handlers(self.0) }; ok => { self }) 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /rte/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | deprecated, 3 | unused, 4 | clippy::useless_attribute, 5 | clippy::not_unsafe_ptr_arg_deref, 6 | clippy::trivially_copy_pass_by_ref, 7 | clippy::many_single_char_names 8 | )] 9 | 10 | #[macro_use] 11 | extern crate log; 12 | #[macro_use] 13 | extern crate bitflags; 14 | #[macro_use] 15 | extern crate failure; 16 | #[macro_use] 17 | extern crate lazy_static; 18 | extern crate cfile; 19 | extern crate errno; 20 | extern crate itertools; 21 | extern crate libc; 22 | extern crate rand; 23 | extern crate time; 24 | #[macro_use] 25 | extern crate num_derive; 26 | extern crate num_traits; 27 | 28 | extern crate rte_sys; 29 | 30 | pub mod ffi; 31 | 32 | #[macro_use] 33 | pub mod errors; 34 | #[macro_use] 35 | pub mod macros; 36 | #[macro_use] 37 | mod common; 38 | #[macro_use] 39 | pub mod utils; 40 | 41 | pub mod mbuf; 42 | pub mod mempool; 43 | pub mod ring; 44 | 45 | pub mod bond; 46 | pub mod ethdev; 47 | pub mod kni; 48 | pub mod pci; 49 | 50 | pub mod arp; 51 | pub mod ether; 52 | pub mod ip; 53 | 54 | #[macro_use] 55 | pub mod cmdline; 56 | 57 | pub use self::common::*; 58 | pub use self::errors::{ErrorKind, Result, RteError}; 59 | pub use self::ethdev::PortId; 60 | pub use self::ethdev::QueueId; 61 | 62 | #[cfg(test)] 63 | mod tests; 64 | -------------------------------------------------------------------------------- /rte/src/macros.rs: -------------------------------------------------------------------------------- 1 | pub const BOOL_TRUE: u8 = 1; 2 | pub const BOOL_FALSE: u8 = 0; 3 | 4 | #[macro_export] 5 | macro_rules! bool_value { 6 | ($b:expr) => { 7 | if $b { 8 | $crate::macros::BOOL_TRUE 9 | } else { 10 | $crate::macros::BOOL_FALSE 11 | } 12 | }; 13 | } 14 | 15 | #[macro_export] 16 | macro_rules! to_cptr { 17 | ($s:expr) => { 18 | ::std::ffi::CString::new($s).map(|s| s.as_ptr() as *const i8) 19 | }; 20 | } 21 | 22 | /// Macro to get the offset of a struct field in bytes from the address of the 23 | /// struct. 24 | /// 25 | /// This macro is identical to `offset_of!` but doesn't give a warning about 26 | /// unnecessary unsafe blocks when invoked from unsafe code. 27 | #[macro_export] 28 | macro_rules! offset_of_unsafe { 29 | ($container:path, $field:ident) => {{ 30 | // Make sure the field exists, otherwise this could result in UB if the 31 | // field is accessed through Deref. This will cause a null dereference 32 | // at runtime since the offset can't be reduced to a constant. 33 | let $container { $field: _, .. }; 34 | 35 | // Yes, this is technically derefencing a null pointer. However, Rust 36 | // currently accepts this and reduces it to a constant, even in debug 37 | // builds! 38 | &(*(0 as *const $container)).$field as *const _ as isize 39 | }}; 40 | } 41 | 42 | /// Macro to get the offset of a struct field in bytes from the address of the 43 | /// struct. 44 | /// 45 | /// This macro will cause a warning if it is invoked in an unsafe block. Use the 46 | /// `offset_of_unsafe` macro instead to avoid this warning. 47 | #[macro_export] 48 | macro_rules! offset_of { 49 | ($container:path, $field:ident) => { 50 | unsafe { offset_of_unsafe!($container, $field) } 51 | }; 52 | } 53 | -------------------------------------------------------------------------------- /rte/src/mempool.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! RTE Mempool. 3 | //! 4 | //! A memory pool is an allocator of fixed-size object. It is 5 | //! identified by its name, and uses a ring to store free objects. It 6 | //! provides some other optional services, like a per-core object 7 | //! cache, and an alignment helper to ensure that objects are padded 8 | //! to spread them equally on all RAM channels, ranks, and so on. 9 | //! 10 | //! Objects owned by a mempool should never be added in another 11 | //! mempool. When an object is freed using rte_mempool_put() or 12 | //! equivalent, the object data is not modified; the user can save some 13 | //! meta-data in the object data and retrieve them when allocating a 14 | //! new object. 15 | //! 16 | //! Note: the mempool implementation is not preemptible. An lcore must not be 17 | //! interrupted by another task that uses the same mempool (because it uses a 18 | //! ring which is not preemptible). Also, usual mempool functions like 19 | //! rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL 20 | //! thread due to the internal per-lcore cache. Due to the lack of caching, 21 | //! rte_mempool_get() or rte_mempool_put() performance will suffer when called 22 | //! by non-EAL threads. Instead, non-EAL threads should call 23 | //! rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache 24 | //! created with rte_mempool_cache_create(). 25 | //! 26 | use std::ffi::CStr; 27 | use std::mem; 28 | use std::os::raw::{c_uint, c_void}; 29 | use std::os::unix::io::AsRawFd; 30 | use std::ptr::{self, NonNull}; 31 | 32 | use cfile; 33 | use ffi; 34 | use libc; 35 | 36 | use errors::{AsResult, Result}; 37 | use lcore; 38 | use memory::SocketId; 39 | use ring; 40 | use utils::{AsCString, AsRaw, CallbackContext, FromRaw, IntoRaw, Raw}; 41 | 42 | pub use ffi::{ 43 | MEMPOOL_PG_NUM_DEFAULT, RTE_MEMPOOL_ALIGN, RTE_MEMPOOL_ALIGN_MASK, RTE_MEMPOOL_HEADER_COOKIE1, 44 | RTE_MEMPOOL_HEADER_COOKIE2, RTE_MEMPOOL_MZ_FORMAT, RTE_MEMPOOL_MZ_PREFIX, RTE_MEMPOOL_TRAILER_COOKIE, 45 | }; 46 | 47 | lazy_static! { 48 | pub static ref RTE_MEMPOOL_NAMESIZE: usize = *ring::RTE_RING_NAMESIZE - RTE_MEMPOOL_MZ_PREFIX.len() + 1; 49 | } 50 | 51 | bitflags! { 52 | pub struct MemoryPoolFlags: u32 { 53 | /// Do not spread in memory. 54 | const MEMPOOL_F_NO_SPREAD = ffi::MEMPOOL_F_NO_SPREAD; 55 | /// Do not align objs on cache lines. 56 | const MEMPOOL_F_NO_CACHE_ALIGN = ffi::MEMPOOL_F_NO_CACHE_ALIGN; 57 | /// Default put is "single-producer". 58 | const MEMPOOL_F_SP_PUT = ffi::MEMPOOL_F_SP_PUT; 59 | /// Default get is "single-consumer". 60 | const MEMPOOL_F_SC_GET = ffi::MEMPOOL_F_SC_GET; 61 | /// Internal: pool is created. 62 | const MEMPOOL_F_POOL_CREATED = ffi::MEMPOOL_F_POOL_CREATED; 63 | /// Don't need IOVA contiguous objs. 64 | const MEMPOOL_F_NO_IOVA_CONTIG = ffi::MEMPOOL_F_NO_IOVA_CONTIG; 65 | } 66 | } 67 | 68 | pub trait Pooled: Raw { 69 | /// Return a pointer to the mempool owning this object. 70 | fn pool(&self) -> MemoryPool { 71 | unsafe { ffi::_rte_mempool_from_obj(self.as_raw() as *mut _) }.into() 72 | } 73 | 74 | /// Return the IO address of elt, which is an element of the pool mp. 75 | fn virt2iova(&self) -> ffi::rte_iova_t { 76 | unsafe { ffi::_rte_mempool_virt2iova(self.as_raw() as *mut _ as *const _) } 77 | } 78 | } 79 | 80 | /// A mempool constructor callback function. 81 | pub type Constructor = fn(pool: &MemoryPool, arg: Option); 82 | 83 | /// A mempool walk callback function. 84 | pub type PoolWalkCallback = fn(pool: &MemoryPool, arg: Option); 85 | 86 | /// A mempool object iterator callback function. 87 | pub type ObjectCallback = fn(pool: &MemoryPool, arg: Option, obj: &mut O, idx: usize); 88 | 89 | pub type MemoryChunkCallback = fn(pool: &MemoryPool, arg: Option, mem: &ffi::rte_mempool_memhdr, idx: usize); 90 | 91 | pub type RawMemoryPool = ffi::rte_mempool; 92 | pub type RawMemoryPoolPtr = *mut ffi::rte_mempool; 93 | 94 | /// The RTE mempool structure. 95 | raw!(pub MemoryPool(RawMemoryPool)); 96 | 97 | impl MemoryPool { 98 | /// Search a mempool from its name 99 | pub fn lookup>(name: S) -> Result { 100 | let name = name.as_cstring(); 101 | 102 | unsafe { ffi::rte_mempool_lookup(name.as_ptr()) } 103 | .as_result() 104 | .map(MemoryPool) 105 | } 106 | 107 | /// Name of mempool. 108 | pub fn name(&self) -> &str { 109 | unsafe { CStr::from_ptr((&self.name[..]).as_ptr()).to_str().unwrap() } 110 | } 111 | 112 | /// Free a mempool 113 | /// 114 | /// Unlink the mempool from global list, free the memory chunks, and all 115 | /// memory referenced by the mempool. The objects must not be used by 116 | /// other cores as they will be freed. 117 | fn free(&mut self) { 118 | unsafe { ffi::rte_mempool_free(self.as_raw()) } 119 | } 120 | 121 | /// Return the number of entries in the mempool. 122 | /// 123 | /// When cache is enabled, this function has to browse the length of 124 | /// all lcores, so it should not be used in a data path, but only for 125 | /// debug purposes. User-owned mempool caches are not accounted for. 126 | pub fn avail_count(&self) -> usize { 127 | unsafe { ffi::rte_mempool_avail_count(self.as_raw()) as usize } 128 | } 129 | 130 | /// Return the number of elements which have been allocated from the mempool 131 | /// 132 | /// When cache is enabled, this function has to browse the length of 133 | /// all lcores, so it should not be used in a data path, but only for 134 | /// debug purposes. 135 | pub fn in_use_count(&self) -> usize { 136 | unsafe { ffi::rte_mempool_in_use_count(self.as_raw()) as usize } 137 | } 138 | 139 | /// Test if the mempool is full. 140 | pub fn is_full(&self) -> bool { 141 | self.avail_count() == self.size as usize 142 | } 143 | 144 | /// Test if the mempool is empty. 145 | pub fn is_empty(&self) -> bool { 146 | self.avail_count() == 0 147 | } 148 | 149 | /// Check the consistency of mempool objects. 150 | /// 151 | /// Verify the coherency of fields in the mempool structure. 152 | /// Also check that the cookies of mempool objects (even the ones that are not present in pool) 153 | /// have a correct value. If not, a panic will occur. 154 | /// 155 | pub fn audit(&self) { 156 | unsafe { ffi::rte_mempool_audit(self.as_raw()) } 157 | } 158 | 159 | /// Return a pointer to the private data in an mempool structure. 160 | pub fn get_priv(&self) -> *const T { 161 | unsafe { ffi::_rte_mempool_get_priv(self.as_raw()) as *const _ } 162 | } 163 | 164 | /// Dump the status of the mempool to the console. 165 | pub fn dump(&self, s: &S) -> Result<()> { 166 | let mut f = cfile::fdopen(s, "w")?; 167 | 168 | unsafe { ffi::rte_mempool_dump(&mut **f as *mut _ as *mut _, self.as_raw()) }; 169 | 170 | Ok(()) 171 | } 172 | 173 | /// Dump the status of all mempools on the console 174 | pub fn list_dump(s: &S) -> Result<()> { 175 | let mut f = cfile::fdopen(s, "w")?; 176 | 177 | unsafe { ffi::rte_mempool_list_dump(&mut **f as *mut _ as *mut _) }; 178 | 179 | Ok(()) 180 | } 181 | 182 | /// Call a function for each mempool object in a memory chunk 183 | /// 184 | /// Iterate across objects of the given size and alignment in the provided chunk of memory. 185 | /// The given memory buffer can consist of disjointed physical pages. 186 | /// 187 | /// For each object, call the provided callback (if any). 188 | /// This function is used to populate a mempool, or walk through all the elements of a mempool, 189 | /// or estimate how many elements of the given size could be created in the given memory buffer. 190 | /// 191 | pub fn walk(&mut self, callback: ObjectCallback, arg: Option) -> usize { 192 | unsafe { 193 | ffi::rte_mempool_obj_iter( 194 | self.as_raw(), 195 | Some(obj_cb_stub::), 196 | ObjectContext::new(callback, arg).into_raw(), 197 | ) as usize 198 | } 199 | } 200 | } 201 | 202 | /// Create a new mempool named name in memory. 203 | /// 204 | /// This function uses memzone_reserve() to allocate memory. 205 | /// The pool contains n elements of elt_size. Its size is set to n. 206 | /// All elements of the mempool are allocated together with the mempool header, 207 | /// in one physically continuous chunk of memory. 208 | /// 209 | pub fn create( 210 | name: S, 211 | n: u32, 212 | cache_size: u32, 213 | private_data_size: u32, 214 | mp_init: Option>, 215 | mp_init_arg: Option, 216 | obj_init: Option>, 217 | obj_init_arg: Option, 218 | socket_id: SocketId, 219 | flags: MemoryPoolFlags, 220 | ) -> Result 221 | where 222 | S: AsRef, 223 | { 224 | let name = name.as_cstring(); 225 | 226 | let mp_init_ctx = if let Some(callback) = mp_init { 227 | ConstructorContext::new(callback, mp_init_arg).into_raw() 228 | } else { 229 | ptr::null_mut() 230 | }; 231 | let obj_init_ctx = if let Some(callback) = obj_init { 232 | ObjectContext::new(callback, obj_init_arg).into_raw() 233 | } else { 234 | ptr::null_mut() 235 | }; 236 | 237 | unsafe { 238 | ffi::rte_mempool_create( 239 | name.as_ptr(), 240 | n, 241 | mem::size_of::() as u32, 242 | cache_size, 243 | private_data_size, 244 | if mp_init.is_none() { 245 | None 246 | } else { 247 | Some(mp_init_stub::) 248 | }, 249 | mp_init_ctx, 250 | if obj_init.is_none() { 251 | None 252 | } else { 253 | Some(obj_cb_stub::) 254 | }, 255 | obj_init_ctx, 256 | socket_id, 257 | flags.bits, 258 | ) 259 | } 260 | .as_result() 261 | .map(MemoryPool) 262 | } 263 | 264 | /// Create an empty mempool 265 | /// 266 | /// The mempool is allocated and initialized, but it is not populated: 267 | /// no memory is allocated for the mempool elements. 268 | /// The user has to call rte_mempool_populate_*() to add memory chunks to the pool. 269 | /// Once populated, the user may also want to initialize each object with rte_mempool_obj_iter(). 270 | pub fn create_empty( 271 | name: S, 272 | n: u32, 273 | cache_size: u32, 274 | private_data_size: u32, 275 | socket_id: SocketId, 276 | flags: MemoryPoolFlags, 277 | ) -> Result 278 | where 279 | S: AsRef, 280 | { 281 | let name = name.as_cstring(); 282 | 283 | unsafe { 284 | ffi::rte_mempool_create_empty( 285 | name.as_ptr(), 286 | n, 287 | mem::size_of::() as u32, 288 | cache_size, 289 | private_data_size, 290 | socket_id, 291 | flags.bits, 292 | ) 293 | } 294 | .as_result() 295 | .map(MemoryPool) 296 | } 297 | 298 | type ConstructorContext = CallbackContext, Option>; 299 | 300 | unsafe extern "C" fn mp_init_stub(mp: *mut ffi::rte_mempool, arg: *mut c_void) { 301 | let mp = MemoryPool::from(mp); 302 | let ctx = ConstructorContext::::from_raw(arg); 303 | 304 | (ctx.callback)(&mp, ctx.arg); 305 | 306 | mem::forget(mp); 307 | } 308 | 309 | type ObjectContext = CallbackContext, Option>; 310 | 311 | unsafe extern "C" fn obj_cb_stub(mp: *mut ffi::rte_mempool, arg: *mut c_void, obj: *mut c_void, obj_idx: c_uint) { 312 | let mp = MemoryPool::from(mp); 313 | let ctx = ObjectContext::::from_raw(arg); 314 | 315 | (ctx.callback)(&mp, ctx.arg, (obj as *mut O).as_mut().unwrap(), obj_idx as usize); 316 | 317 | mem::forget(mp); 318 | } 319 | 320 | type MemoryChunkContext = CallbackContext, Option>; 321 | 322 | unsafe extern "C" fn mem_cb_stub( 323 | mp: *mut ffi::rte_mempool, 324 | arg: *mut c_void, 325 | memhdr: *mut ffi::rte_mempool_memhdr, 326 | mem_idx: c_uint, 327 | ) { 328 | let mp = MemoryPool::from(mp); 329 | let ctx = MemoryChunkContext::::from_raw(arg); 330 | 331 | (ctx.callback)(&mp, ctx.arg, &*memhdr, mem_idx as usize); 332 | 333 | mem::forget(mp); 334 | } 335 | 336 | pub fn lookup(name: &str) -> Result { 337 | let p = unsafe { ffi::rte_mempool_lookup(try!(to_cptr!(name))) }; 338 | 339 | rte_check!(p, NonNull) 340 | } 341 | 342 | /// Dump the status of all mempools on the console 343 | pub fn list_dump(s: &S) { 344 | if let Ok(mut f) = cfile::fdopen(s, "w") { 345 | unsafe { 346 | ffi::rte_mempool_list_dump(&mut **f as *mut _ as *mut _); 347 | } 348 | } 349 | } 350 | 351 | /// Walk list of all memory pools 352 | pub fn walk(callback: PoolWalkCallback, arg: Option) { 353 | unsafe { 354 | ffi::rte_mempool_walk( 355 | Some(pool_walk_stub::), 356 | PoolWalkContext::new(callback, arg).into_raw(), 357 | ); 358 | } 359 | } 360 | 361 | type PoolWalkContext = CallbackContext, Option>; 362 | 363 | unsafe extern "C" fn pool_walk_stub(mp: *mut ffi::rte_mempool, arg: *mut libc::c_void) { 364 | let mp = MemoryPool::from(mp); 365 | let ctxt = PoolWalkContext::::from_raw(arg); 366 | 367 | (ctxt.callback)(&mp, ctxt.arg); 368 | 369 | mem::forget(mp) 370 | } 371 | 372 | pub type RawCache = ffi::rte_mempool_cache; 373 | pub type RawCachePtr = *mut ffi::rte_mempool_cache; 374 | 375 | raw!(pub Cache(RawCache)); 376 | 377 | impl Cache { 378 | /// Create a user-owned mempool cache. 379 | /// 380 | /// This can be used by non-EAL threads to enable caching 381 | /// when they interact with a mempool. 382 | pub fn create(size: usize, socket_id: SocketId) -> Self { 383 | unsafe { ffi::rte_mempool_cache_create(size as u32, socket_id) }.into() 384 | } 385 | 386 | /// Free a user-owned mempool cache. 387 | fn free(self) { 388 | unsafe { ffi::rte_mempool_cache_free(self.as_raw()) } 389 | } 390 | } 391 | 392 | impl MemoryPool { 393 | /// Flush a user-owned mempool cache to the specified mempool. 394 | pub fn flush(&self, cache: &Cache) { 395 | unsafe { ffi::_rte_mempool_cache_flush(cache.as_raw(), self.as_raw()) } 396 | } 397 | 398 | /// Get a pointer to the per-lcore default mempool cache. 399 | pub fn default_cache(&self) -> Option { 400 | lcore::current().and_then(|lcore_id| { 401 | NonNull::new(unsafe { ffi::_rte_mempool_default_cache(self.as_raw(), *lcore_id) }).map(Cache) 402 | }) 403 | } 404 | 405 | /// Put several objects back in the mempool. 406 | pub fn generic_put, R>(&mut self, objs: &[T], cache: Option) { 407 | unsafe { 408 | ffi::_rte_mempool_generic_put( 409 | self.as_raw(), 410 | objs.as_ptr() as *const _, 411 | objs.len() as u32, 412 | cache.map(|cache| cache.into_raw()).unwrap_or(ptr::null_mut()), 413 | ) 414 | } 415 | } 416 | 417 | /// Put several objects back in the mempool. 418 | /// 419 | /// This function calls the multi-producer or the single-producer 420 | /// version depending on the default behavior that was specified at 421 | /// mempool creation time (see flags). 422 | pub fn put_bulk, R>(&mut self, objs: &[T]) { 423 | unsafe { ffi::_rte_mempool_put_bulk(self.as_raw(), objs.as_ptr() as *const _, objs.len() as u32) } 424 | } 425 | 426 | /// Put several objects back in the mempool. 427 | /// 428 | /// This function calls the multi-producer or the single-producer 429 | /// version depending on the default behavior that was specified at 430 | /// mempool creation time (see flags). 431 | pub fn put, R>(&mut self, obj: T) { 432 | unsafe { ffi::_rte_mempool_put(self.as_raw(), obj.as_raw() as *mut _) } 433 | } 434 | 435 | /// Get several objects from the mempool. 436 | /// 437 | /// If cache is enabled, objects will be retrieved first from cache, 438 | /// subsequently from the common pool. Note that it can return -ENOENT when 439 | /// the local cache and common pool are empty, even if cache from other 440 | /// lcores are full. 441 | pub fn generic_get, R>(&mut self, objs: &mut [T], cache: Option) -> Result<()> { 442 | unsafe { 443 | ffi::_rte_mempool_generic_get( 444 | self.as_raw(), 445 | objs.as_mut_ptr() as *mut _, 446 | objs.len() as u32, 447 | cache.map(|cache| cache.into_raw()).unwrap_or(ptr::null_mut()), 448 | ) 449 | } 450 | .as_result() 451 | .map(|_| ()) 452 | } 453 | 454 | /// Get several objects from the mempool. 455 | /// 456 | /// This function calls the multi-consumers or the single-consumer 457 | /// version, depending on the default behaviour that was specified at 458 | /// mempool creation time (see flags). 459 | /// 460 | /// If cache is enabled, objects will be retrieved first from cache, 461 | /// subsequently from the common pool. Note that it can return -ENOENT when 462 | /// the local cache and common pool are empty, even if cache from other 463 | /// lcores are full. 464 | pub fn get_bulk, R>(&mut self, objs: &mut [T]) -> Result<()> { 465 | unsafe { ffi::_rte_mempool_get_bulk(self.as_raw(), objs.as_mut_ptr() as *mut _, objs.len() as u32) } 466 | .as_result() 467 | .map(|_| ()) 468 | } 469 | 470 | /// Get several objects from the mempool. 471 | /// 472 | /// This function calls the multi-consumers or the single-consumer 473 | /// version, depending on the default behaviour that was specified at 474 | /// mempool creation time (see flags). 475 | /// 476 | /// If cache is enabled, objects will be retrieved first from cache, 477 | /// subsequently from the common pool. Note that it can return -ENOENT when 478 | /// the local cache and common pool are empty, even if cache from other 479 | /// lcores are full. 480 | pub fn get, R>(&mut self) -> Result { 481 | let mut obj = ptr::null_mut(); 482 | 483 | unsafe { ffi::_rte_mempool_get(self.as_raw(), &mut obj) } 484 | .as_result() 485 | .map(|_| (obj as *mut T::Raw).into()) 486 | } 487 | 488 | /// Get a contiguous blocks of objects from the mempool. 489 | /// 490 | /// If cache is enabled, consider to flush it first, to reuse objects 491 | /// as soon as possible. 492 | /// 493 | /// The application should check that the driver supports the operation 494 | /// by calling rte_mempool_ops_get_info() and checking that `contig_block_size` 495 | /// is not zero. 496 | pub fn get_contig_blocks, R>(&mut self, objs: &mut [T]) -> Result<()> { 497 | unsafe { ffi::_rte_mempool_get_contig_blocks(self.as_raw(), objs.as_mut_ptr() as *mut _, objs.len() as u32) } 498 | .as_result() 499 | .map(|_| ()) 500 | } 501 | } 502 | -------------------------------------------------------------------------------- /rte/src/pci.rs: -------------------------------------------------------------------------------- 1 | use ffi; 2 | 3 | pub type Addr = ffi::rte_pci_addr; 4 | pub type Id = ffi::rte_pci_id; 5 | -------------------------------------------------------------------------------- /rte/src/ring.rs: -------------------------------------------------------------------------------- 1 | use ffi; 2 | 3 | lazy_static! { 4 | pub static ref RTE_RING_NAMESIZE: usize = ffi::RTE_MEMZONE_NAMESIZE as usize - ffi::RTE_RING_MZ_PREFIX.len() + 1; 5 | } 6 | -------------------------------------------------------------------------------- /rte/src/tests.rs: -------------------------------------------------------------------------------- 1 | extern crate num_cpus; 2 | extern crate pretty_env_logger; 3 | 4 | use std::os::raw::c_void; 5 | use std::sync::{Arc, Mutex}; 6 | 7 | use cfile; 8 | use log::Level::Debug; 9 | 10 | use ffi; 11 | 12 | use common::memory::SOCKET_ID_ANY; 13 | use eal::{self, ProcType}; 14 | use launch; 15 | use lcore; 16 | use mbuf; 17 | use memory::AsMutRef; 18 | use mempool::{self, MemoryPool, MemoryPoolFlags}; 19 | use utils::AsRaw; 20 | 21 | #[test] 22 | fn test_eal() { 23 | let _ = pretty_env_logger::try_init_timed(); 24 | 25 | assert_eq!( 26 | eal::init(&vec![ 27 | String::from("test"), 28 | String::from("-c"), 29 | format!("{:x}", (1 << num_cpus::get()) - 1), 30 | String::from("--log-level"), 31 | String::from("8") 32 | ]) 33 | .unwrap(), 34 | 4 35 | ); 36 | 37 | assert_eq!(eal::process_type(), ProcType::Primary); 38 | assert!(!eal::primary_proc_alive()); 39 | assert!(eal::has_hugepages()); 40 | assert_eq!(lcore::socket_id(), 0); 41 | 42 | test_config(); 43 | 44 | test_lcore(); 45 | 46 | test_launch(); 47 | 48 | test_mempool(); 49 | 50 | test_mbuf(); 51 | } 52 | 53 | fn test_config() { 54 | let eal_cfg = eal::config(); 55 | 56 | assert_eq!(eal_cfg.master_lcore(), 0); 57 | assert_eq!(eal_cfg.lcore_count(), num_cpus::get()); 58 | assert_eq!(eal_cfg.process_type(), ProcType::Primary); 59 | assert_eq!( 60 | eal_cfg.lcore_roles(), 61 | &[lcore::Role::Rte, lcore::Role::Rte, lcore::Role::Rte, lcore::Role::Rte] 62 | ); 63 | 64 | let mem_cfg = eal_cfg.memory_config(); 65 | 66 | assert_eq!(mem_cfg.nchannel(), 0); 67 | assert_eq!(mem_cfg.nrank(), 0); 68 | 69 | let memzones = mem_cfg.memzones(); 70 | 71 | assert!(memzones.len() > 0); 72 | } 73 | 74 | fn test_lcore() { 75 | assert_eq!(lcore::current().unwrap(), 0); 76 | 77 | let lcore_id = lcore::current().unwrap(); 78 | 79 | assert_eq!(lcore_id.role(), lcore::Role::Rte); 80 | assert_eq!(lcore_id.socket_id(), 0); 81 | assert!(lcore_id.is_enabled()); 82 | 83 | assert_eq!(lcore::master(), 0); 84 | assert_eq!(lcore::count(), num_cpus::get()); 85 | assert_eq!(lcore::enabled().len(), num_cpus::get()); 86 | 87 | assert_eq!(lcore::index(256), None); 88 | assert_eq!(lcore::Id::any().index(), 0); 89 | assert_eq!(lcore::id(0).index(), 0); 90 | } 91 | 92 | fn test_launch() { 93 | fn slave_main(mutex: Option>>) -> i32 { 94 | debug!("lcore {} is running", lcore::current().unwrap()); 95 | 96 | let mutex = mutex.unwrap(); 97 | let mut data = mutex.lock().unwrap(); 98 | 99 | *data += 1; 100 | 101 | debug!("lcore {} finished, data={}", lcore::current().unwrap(), *data); 102 | 103 | 0 104 | } 105 | 106 | let mutex = Arc::new(Mutex::new(0)); 107 | let slave_id = lcore::id(1); 108 | 109 | assert_eq!(slave_id.state(), launch::State::Wait); 110 | 111 | { 112 | let data = mutex.lock().unwrap(); 113 | 114 | assert_eq!(*data, 0); 115 | 116 | debug!("remote launch lcore {}", slave_id); 117 | 118 | launch::remote_launch(slave_main, Some(mutex.clone()), slave_id).unwrap(); 119 | 120 | assert_eq!(slave_id.state(), launch::State::Running); 121 | } 122 | 123 | debug!("waiting lcore {} ...", slave_id); 124 | 125 | assert_eq!(slave_id.wait(), launch::JobState::Wait); 126 | 127 | { 128 | let data = mutex.lock().unwrap(); 129 | 130 | assert_eq!(*data, 1); 131 | 132 | debug!("remote lcore {} finished", slave_id); 133 | 134 | assert_eq!(slave_id.state(), launch::State::Wait); 135 | } 136 | 137 | { 138 | let _ = mutex.lock().unwrap(); 139 | 140 | debug!("remote launch lcores"); 141 | 142 | launch::mp_remote_launch(slave_main, Some(mutex.clone()), true).unwrap(); 143 | } 144 | 145 | launch::mp_wait_lcore(); 146 | 147 | { 148 | let data = mutex.lock().unwrap(); 149 | 150 | debug!("remote lcores finished"); 151 | 152 | assert_eq!(*data, num_cpus::get()); 153 | } 154 | } 155 | 156 | fn test_mempool() { 157 | let mut p = mempool::create_empty::<_, ()>( 158 | "test", 159 | 16, 160 | 128, 161 | 0, 162 | SOCKET_ID_ANY, 163 | MemoryPoolFlags::MEMPOOL_F_SP_PUT | MemoryPoolFlags::MEMPOOL_F_SC_GET, 164 | ) 165 | .unwrap(); 166 | 167 | assert_eq!(p.name(), "test"); 168 | assert_eq!(p.size, 16); 169 | assert_eq!(p.cache_size, 0); 170 | assert_eq!(p.elt_size, 128); 171 | assert_eq!(p.header_size, 64); 172 | assert_eq!(p.trailer_size, 0); 173 | assert_eq!(p.private_data_size, 64); 174 | 175 | assert_eq!(p.avail_count(), 16); 176 | assert_eq!(p.in_use_count(), 0); 177 | assert!(p.is_full()); 178 | assert!(!p.is_empty()); 179 | 180 | p.audit(); 181 | 182 | if log_enabled!(Debug) { 183 | let stdout = cfile::tmpfile().unwrap(); 184 | 185 | p.dump(&*stdout); 186 | } 187 | 188 | let mut elements: Vec<(usize, *mut ())> = Vec::new(); 189 | 190 | fn walk_element( 191 | _pool: &mempool::MemoryPool, 192 | elements: Option<&mut Vec<(usize, *mut ())>>, 193 | obj: &mut (), 194 | idx: usize, 195 | ) { 196 | elements.unwrap().push((idx, obj as *mut _)); 197 | } 198 | 199 | assert_eq!(p.walk(walk_element, Some(&mut elements)), 4); 200 | 201 | assert_eq!(elements.len(), 4); 202 | 203 | let raw_ptr = p.as_raw(); 204 | 205 | assert_eq!(raw_ptr, mempool::lookup("test").unwrap()); 206 | 207 | let mut pools: Vec = Vec::new(); 208 | 209 | fn walk_mempool(pool: &mempool::MemoryPool, pools: Option<&mut Vec>) { 210 | pools.unwrap().push(pool.as_raw()); 211 | } 212 | 213 | mempool::walk(walk_mempool, Some(&mut pools)); 214 | 215 | assert!(pools.contains(&raw_ptr)); 216 | 217 | if log_enabled!(Debug) { 218 | let stdout = cfile::tmpfile().unwrap(); 219 | 220 | mempool::list_dump(&*stdout); 221 | } 222 | } 223 | 224 | fn test_mbuf() { 225 | const NB_MBUF: u32 = 1024; 226 | const CACHE_SIZE: u32 = 32; 227 | const PRIV_SIZE: u32 = 0; 228 | const MBUF_SIZE: u32 = 128; 229 | 230 | let p = mbuf::pool_create( 231 | "mbuf_pool", 232 | NB_MBUF, 233 | CACHE_SIZE, 234 | PRIV_SIZE as u16, 235 | mbuf::RTE_MBUF_DEFAULT_BUF_SIZE as u16, 236 | lcore::socket_id() as i32, 237 | ) 238 | .unwrap(); 239 | 240 | assert_eq!(p.name(), "mbuf_pool"); 241 | assert_eq!(p.size, NB_MBUF); 242 | assert_eq!(p.cache_size, CACHE_SIZE); 243 | assert_eq!(p.elt_size, mbuf::RTE_MBUF_DEFAULT_BUF_SIZE + PRIV_SIZE + MBUF_SIZE); 244 | assert_eq!(p.header_size, 64); 245 | assert_eq!(p.trailer_size, 0); 246 | assert_eq!(p.private_data_size, 64); 247 | 248 | assert_eq!(p.avail_count(), NB_MBUF as usize); 249 | assert_eq!(p.in_use_count(), 0); 250 | assert!(p.is_full()); 251 | assert!(!p.is_empty()); 252 | 253 | p.audit(); 254 | } 255 | -------------------------------------------------------------------------------- /rte/src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | use std::ffi::CString; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::ptr; 5 | 6 | pub trait Raw: Deref + DerefMut + AsRaw + IntoRaw + FromRaw + From<*mut T> {} 7 | 8 | pub trait AsRaw { 9 | type Raw; 10 | 11 | fn as_raw(&self) -> *mut Self::Raw; 12 | } 13 | 14 | impl AsRaw for &T { 15 | type Raw = T::Raw; 16 | 17 | fn as_raw(&self) -> *mut Self::Raw { 18 | (*self).as_raw() 19 | } 20 | } 21 | 22 | impl AsRaw for Option { 23 | type Raw = T::Raw; 24 | 25 | fn as_raw(&self) -> *mut Self::Raw { 26 | self.as_ref().map(|p| p.as_raw()).unwrap_or(ptr::null_mut()) 27 | } 28 | } 29 | 30 | pub trait IntoRaw: AsRaw { 31 | fn into_raw(self) -> *mut Self::Raw; 32 | } 33 | 34 | pub trait FromRaw: AsRaw 35 | where 36 | Self: Sized, 37 | { 38 | fn from_raw(raw: *mut Self::Raw) -> Option; 39 | } 40 | 41 | macro_rules! raw { 42 | (pub $wrapper:ident ( $raw_ty:ty ) ) => { 43 | #[repr(transparent)] 44 | #[derive(Debug)] 45 | pub struct $wrapper(::std::ptr::NonNull<$raw_ty>); 46 | 47 | impl $crate::utils::Raw<$raw_ty> for $wrapper {} 48 | 49 | impl ::std::ops::Deref for $wrapper { 50 | type Target = $raw_ty; 51 | 52 | fn deref(&self) -> &Self::Target { 53 | unsafe { self.0.as_ref() } 54 | } 55 | } 56 | 57 | impl ::std::ops::DerefMut for $wrapper { 58 | fn deref_mut(&mut self) -> &mut Self::Target { 59 | unsafe { self.0.as_mut() } 60 | } 61 | } 62 | 63 | impl $crate::utils::AsRaw for $wrapper { 64 | type Raw = $raw_ty; 65 | 66 | fn as_raw(&self) -> *mut Self::Raw { 67 | self.0.as_ptr() 68 | } 69 | } 70 | 71 | impl $crate::utils::IntoRaw for $wrapper { 72 | fn into_raw(self) -> *mut Self::Raw { 73 | self.0.as_ptr() 74 | } 75 | } 76 | 77 | impl $crate::utils::FromRaw for $wrapper { 78 | fn from_raw(raw: *mut Self::Raw) -> Option { 79 | ::std::ptr::NonNull::new(raw).map($wrapper) 80 | } 81 | } 82 | 83 | impl From<*mut $raw_ty> for $wrapper { 84 | fn from(p: *mut $raw_ty) -> Self { 85 | use $crate::utils::FromRaw; 86 | 87 | Self::from_raw(p).unwrap() 88 | } 89 | } 90 | }; 91 | } 92 | 93 | pub trait AsCString { 94 | fn as_cstring(&self) -> CString; 95 | } 96 | 97 | impl AsCString for T 98 | where 99 | T: AsRef, 100 | { 101 | fn as_cstring(&self) -> CString { 102 | let mut v = self.as_ref().as_bytes().to_owned(); 103 | v.push(0); 104 | unsafe { CString::from_vec_unchecked(v) } 105 | } 106 | } 107 | 108 | pub struct CallbackContext { 109 | pub callback: F, 110 | pub arg: T, 111 | } 112 | 113 | impl CallbackContext { 114 | pub fn new(callback: F, arg: T) -> Self { 115 | CallbackContext { callback, arg } 116 | } 117 | 118 | pub fn into_raw(self) -> *mut R { 119 | Box::into_raw(Box::new(self)) as *mut _ 120 | } 121 | 122 | pub fn from_raw(raw: *mut V) -> Box { 123 | unsafe { Box::from_raw(raw as *mut _) } 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 120 2 | --------------------------------------------------------------------------------