├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .gitmodules ├── Cargo.toml ├── LICENSE ├── README.md ├── build.rs ├── examples ├── client.rs └── server.rs ├── scripts └── run.sh └── src ├── bindings.h ├── lib.rs ├── opcode.rs ├── types.rs └── verbs.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: RDMA-SYS-CI 2 | on: 3 | pull_request: 4 | branches: [master] 5 | push: 6 | branches: [master] 7 | schedule: [cron: "0 */24 * * *"] 8 | 9 | env: 10 | CI_RUST_TOOLCHAIN: 1.61.0 11 | 12 | jobs: 13 | build: 14 | name: Build 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v2 18 | with: 19 | submodules: true 20 | - name: Git Sumbodule Update 21 | run: | 22 | git submodule update --remote --recursive 23 | - name: Install dependencies 24 | run: | 25 | # Install libibverbs-dev as dependency automatically 26 | sudo apt-get install -y librdmacm-dev 27 | - uses: actions-rs/toolchain@v1 28 | with: 29 | profile: minimal 30 | toolchain: ${{ env.CI_RUST_TOOLCHAIN }} 31 | override: true 32 | - name: Test 33 | run: | 34 | ./rdma-env-setup/scripts/setup.sh 35 | ./scripts/run.sh 36 | - name: Setup tmate session 37 | if: ${{ failure() }} 38 | uses: mxschmitt/action-tmate@v3 39 | 40 | fmt: 41 | name: Fmt 42 | runs-on: ubuntu-latest 43 | steps: 44 | - uses: actions/checkout@v2 45 | - uses: actions-rs/toolchain@v1 46 | with: 47 | profile: minimal 48 | toolchain: ${{ env.CI_RUST_TOOLCHAIN }} 49 | override: true 50 | - run: rustup component add rustfmt 51 | - uses: actions-rs/cargo@v1 52 | with: 53 | command: fmt 54 | args: --all -- --check 55 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "rdma-env-setup"] 2 | path = rdma-env-setup 3 | url = https://github.com/datenlord/rdma-env-setup.git 4 | branch = main -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rdma-sys" 3 | version = "0.3.0" 4 | authors = ["Pu Wang "] 5 | edition = "2021" 6 | description = "Rdma ibverbs lib Rust binding" 7 | license-file = "LICENSE" 8 | repository = "https://github.com/datenlord/rdma-sys" 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | libc = "0.2" 13 | memoffset = "0.6" 14 | paste = "1.0" 15 | # intrusive-collections = "0.9" 16 | 17 | [build-dependencies] 18 | bindgen = "0.59.2" 19 | pkg-config = "0.3" 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 datenlord 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rdma ibverbs lib Rust binding 2 | 3 | This lib is the ibverbs low-level Rust binding. As inline function and nested structure are not handled properly in the automatic bind generator, we deal with them case by case manually. 4 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::path::Path; 3 | 4 | fn link_rdma_core(lib_name: &str, pkg_name: &str, version: &str, include_paths: &mut Vec) { 5 | let result: _ = pkg_config::Config::new() 6 | .atleast_version(version) 7 | .statik(false) 8 | .probe(lib_name); 9 | 10 | let lib = result.unwrap_or_else(|_| panic!("please install {pkg_name} {version})")); 11 | println!("found {pkg_name} {}", lib.version); 12 | 13 | for path in lib.include_paths { 14 | let path = path.to_str().expect("non-utf8 path"); 15 | include_paths.push(path.to_owned()); 16 | } 17 | } 18 | 19 | fn main() { 20 | let mut include_paths: Vec = Vec::new(); 21 | 22 | { 23 | let lib_name = "libibverbs"; 24 | let pkg_name = "libibverbs-dev"; 25 | let version = "1.8.28"; 26 | link_rdma_core(lib_name, pkg_name, version, &mut include_paths); 27 | } 28 | 29 | { 30 | let lib_name = "librdmacm"; 31 | let pkg_name = "librdmacm-dev"; 32 | let version = "1.2.28"; 33 | link_rdma_core(lib_name, pkg_name, version, &mut include_paths); 34 | } 35 | 36 | { 37 | include_paths.sort_unstable(); 38 | include_paths.dedup_by(|x, first| x == first); 39 | include_paths.push("/usr/include".into()); 40 | println!("include paths: {:?}", include_paths); 41 | } 42 | 43 | let include_args = include_paths.iter().map(|p| format!("-I{}", p)); 44 | 45 | let bindings = bindgen::Builder::default() 46 | .clang_args(include_args) 47 | .header("src/bindings.h") 48 | .allowlist_function("ibv_.*") 49 | .allowlist_type("ibv_.*") 50 | .allowlist_function("rdma_.*") 51 | .allowlist_type("rdma_.*") 52 | .allowlist_type("verbs_.*") 53 | .allowlist_type("ib_uverbs_access_flags") 54 | //.allowlist_type("verbs_devices_ops") 55 | //.allowlist_var("verbs_provider_.*") 56 | .blocklist_type("in6_addr") 57 | .opaque_type("pthread_.*") 58 | .blocklist_type("sockaddr.*") 59 | .blocklist_type("timespec") 60 | .blocklist_type("ibv_ah_attr") 61 | .blocklist_type("ibv_async_event") 62 | .blocklist_type("ibv_flow_spec") 63 | .blocklist_type("ibv_gid") 64 | .blocklist_type("ibv_global_route") 65 | .blocklist_type("ibv_mw_bind_info") 66 | .blocklist_type("ibv_ops_wr") 67 | .blocklist_type("ibv_send_wr") 68 | .blocklist_type("ibv_wc") 69 | .blocklist_type("rdma_addr") 70 | .blocklist_type("rdma_cm_event") 71 | .blocklist_type("rdma_ib_addr") 72 | .blocklist_type("rdma_ud_param") 73 | // Following ENUM will used with bitwise-or 74 | // including flags, mask, caps, bits, fields, size 75 | .bitfield_enum("ibv_device_cap_flags") 76 | .bitfield_enum("ibv_odp_transport_cap_bits") 77 | .bitfield_enum("ibv_odp_general_caps") 78 | .bitfield_enum("ibv_rx_hash_function_flags") 79 | .bitfield_enum("ibv_rx_hash_fields") 80 | .bitfield_enum("ibv_raw_packet_caps") 81 | .bitfield_enum("ibv_tm_cap_flags") 82 | .bitfield_enum("ibv_pci_atomic_op_size") 83 | .bitfield_enum("ibv_port_cap_flags") 84 | .bitfield_enum("ibv_port_cap_flags2") 85 | .bitfield_enum("ibv_create_cq_wc_flags") 86 | .bitfield_enum("ibv_wc_flags") 87 | .bitfield_enum("ibv_access_flags") 88 | .bitfield_enum("ibv_xrcd_init_attr_mask") 89 | .bitfield_enum("ibv_rereg_mr_flags") 90 | .bitfield_enum("ibv_srq_attr_mask") 91 | .bitfield_enum("ibv_srq_init_attr_mask") // TODO: need to be bitfield? 92 | .bitfield_enum("ibv_wq_init_attr_mask") 93 | .bitfield_enum("ibv_wq_flags") 94 | .bitfield_enum("ibv_wq_attr_mask") 95 | .bitfield_enum("ibv_ind_table_init_attr_mask") 96 | .bitfield_enum("ibv_qp_init_attr_mask") // TODO: need to be bitfield? 97 | .bitfield_enum("ibv_qp_create_flags") 98 | .bitfield_enum("ibv_qp_create_send_ops_flags") 99 | .bitfield_enum("ibv_qp_open_attr_mask") 100 | .bitfield_enum("ibv_qp_attr_mask") 101 | .bitfield_enum("ibv_send_flags") 102 | .bitfield_enum("ibv_ops_flags") 103 | .bitfield_enum("ibv_cq_attr_mask") 104 | .bitfield_enum("ibv_flow_flags") 105 | .bitfield_enum("ibv_flow_action_esp_mask") 106 | .bitfield_enum("ibv_cq_init_attr_mask") 107 | .bitfield_enum("ibv_create_cq_attr_flags") 108 | .bitfield_enum("ibv_parent_domain_init_attr_mask") 109 | .bitfield_enum("ibv_read_counters_flags") 110 | .bitfield_enum("ibv_values_mask") 111 | .bitfield_enum("ib_uverbs_access_flags") 112 | .bitfield_enum("rdma_cm_join_mc_attr_mask") 113 | .bitfield_enum("rdma_cm_mc_join_flags") 114 | // Following ENUM will be const in a sub-mod 115 | .constified_enum_module("ibv_node_type") 116 | .constified_enum_module("ibv_transport_type") 117 | .constified_enum_module("ibv_atomic_cap") 118 | .constified_enum_module("ibv_mtu") 119 | .constified_enum_module("ibv_port_state") 120 | .constified_enum_module("ibv_wc_status") 121 | .constified_enum_module("ibv_wc_opcode") 122 | .constified_enum_module("ibv_mw_type") 123 | .constified_enum_module("ibv_rate") 124 | .constified_enum_module("ibv_srq_type") 125 | .constified_enum_module("ibv_wq_type") 126 | .constified_enum_module("ibv_wq_state") 127 | .constified_enum_module("ibv_qp_type") 128 | .constified_enum_module("ibv_qp_state") 129 | .constified_enum_module("ibv_mig_state") 130 | .constified_enum_module("ibv_wr_opcode") 131 | .constified_enum_module("ibv_ops_wr_opcode") 132 | .constified_enum_module("ibv_flow_attr_type") 133 | .constified_enum_module("ibv_flow_spec_type") 134 | .constified_enum_module("ibv_counter_description") 135 | .constified_enum_module("ibv_rereg_mr_err_code") 136 | .constified_enum_module("ib_uverbs_advise_mr_advice") 137 | .constified_enum_module("rdma_cm_event_type") 138 | .constified_enum_module("rdma_driver_id") 139 | .constified_enum_module("rdma_port_space") 140 | .rustified_enum("ibv_event_type") 141 | // unions with non-`Copy` fields other than `ManuallyDrop` are unstable 142 | // for example: `pub eth: ibv_flow_spec_eth` 143 | // note: see issue #55149 for more information 144 | .derive_copy(true) 145 | .derive_debug(false) 146 | .derive_default(false) 147 | .generate_comments(false) 148 | //.generate_inline_functions(true) 149 | //.default_macro_constant_type(bindgen::MacroTypeVariation::Unsigned) 150 | .prepend_enum_name(false) 151 | .rustfmt_bindings(true) 152 | .size_t_is_usize(true) 153 | .disable_untagged_union() 154 | .generate() 155 | .expect("Unable to generate bindings"); 156 | 157 | let out_dir = env::var_os("OUT_DIR").unwrap(); 158 | let dest_path = Path::new(&out_dir).join("bindings.rs"); 159 | 160 | bindings 161 | .write_to_file(dest_path) 162 | .expect("Could not write bindings"); 163 | } 164 | -------------------------------------------------------------------------------- /examples/client.rs: -------------------------------------------------------------------------------- 1 | //! This demo shows how to establish a connection between server and client 2 | //! and send msg to the other end. 3 | //! 4 | //! You can try this example by running: 5 | //! 6 | //! cargo run --example server 7 | //! 8 | //! And then start client in another terminal by running: 9 | //! 10 | //! cargo run --example client 11 | //! 12 | //! The default port is 7471. 13 | 14 | use rdma_sys::*; 15 | use std::{env, process::exit, ptr::null_mut}; 16 | 17 | fn run(ip: &str, port: &str) -> i32 { 18 | let mut send_msg = vec![1_u8; 16]; 19 | let mut recv_msg = vec![0_u8; 16]; 20 | let mut hints = unsafe { std::mem::zeroed::() }; 21 | let mut res: *mut rdma_addrinfo = null_mut(); 22 | 23 | hints.ai_port_space = rdma_port_space::RDMA_PS_TCP as i32; 24 | let mut ret = 25 | unsafe { rdma_getaddrinfo(ip.as_ptr().cast(), port.as_ptr().cast(), &hints, &mut res) }; 26 | 27 | if ret != 0 { 28 | println!("rdma_getaddrinfo"); 29 | return ret; 30 | } 31 | 32 | let mut attr = unsafe { std::mem::zeroed::() }; 33 | let mut id: *mut rdma_cm_id = null_mut(); 34 | attr.cap.max_send_wr = 1; 35 | attr.cap.max_recv_wr = 1; 36 | attr.cap.max_send_sge = 1; 37 | attr.cap.max_recv_sge = 1; 38 | attr.cap.max_inline_data = 16; 39 | attr.qp_context = id.cast(); 40 | attr.sq_sig_all = 1; 41 | ret = unsafe { rdma_create_ep(&mut id, res, null_mut(), &mut attr) }; 42 | // Check to see if we got inline data allowed or not 43 | let mut send_flags = 0_u32; 44 | if attr.cap.max_inline_data >= 16 { 45 | send_flags = ibv_send_flags::IBV_SEND_INLINE.0; 46 | } else { 47 | println!("rdma_client: device doesn't support IBV_SEND_INLINE, using sge sends"); 48 | } 49 | 50 | if ret != 0 { 51 | println!("rdma_create_ep"); 52 | unsafe { 53 | rdma_freeaddrinfo(res); 54 | } 55 | return ret; 56 | } 57 | 58 | let mr = unsafe { rdma_reg_msgs(id, recv_msg.as_mut_ptr().cast(), 16) }; 59 | if mr.is_null() { 60 | println!("rdma_reg_msgs for recv_msg"); 61 | unsafe { 62 | rdma_destroy_ep(id); 63 | } 64 | return -1; 65 | } 66 | 67 | let mut send_mr = null_mut(); 68 | if (send_flags & ibv_send_flags::IBV_SEND_INLINE.0) as u32 == 0 { 69 | println!("flags {:?}", send_flags); 70 | send_mr = unsafe { rdma_reg_msgs(id, send_msg.as_mut_ptr().cast(), 16) }; 71 | if send_mr.is_null() { 72 | println!("rdma_reg_msgs for send_msg"); 73 | unsafe { 74 | rdma_dereg_mr(mr); 75 | } 76 | return -1; 77 | } 78 | } 79 | 80 | ret = unsafe { rdma_post_recv(id, null_mut(), recv_msg.as_mut_ptr().cast(), 16, mr) }; 81 | if ret != 0 { 82 | println!("rdma_post_recv"); 83 | if (send_flags & ibv_send_flags::IBV_SEND_INLINE.0) as u32 == 0 { 84 | unsafe { rdma_dereg_mr(send_mr) }; 85 | } 86 | return ret; 87 | } 88 | 89 | ret = unsafe { rdma_connect(id, null_mut()) }; 90 | if ret != 0 { 91 | println!("rdma_connect"); 92 | unsafe { 93 | rdma_disconnect(id); 94 | } 95 | return ret; 96 | } 97 | 98 | ret = unsafe { 99 | rdma_post_send( 100 | id, 101 | null_mut(), 102 | send_msg.as_mut_ptr().cast(), 103 | 16, 104 | send_mr, 105 | send_flags.try_into().unwrap(), 106 | ) 107 | }; 108 | if ret != 0 { 109 | println!("rdma_post_send"); 110 | unsafe { 111 | rdma_disconnect(id); 112 | } 113 | return ret; 114 | } 115 | 116 | let mut wc = unsafe { std::mem::zeroed::() }; 117 | while ret == 0 { 118 | ret = unsafe { rdma_get_send_comp(id, &mut wc) }; 119 | } 120 | if ret < 0 { 121 | println!("rdma_get_send_comp"); 122 | unsafe { 123 | rdma_disconnect(id); 124 | } 125 | return ret; 126 | } 127 | 128 | ret = 0; 129 | while ret == 0 { 130 | ret = unsafe { rdma_get_recv_comp(id, &mut wc) }; 131 | } 132 | println!("rdma_client: recv msg : {:?}", recv_msg); 133 | if ret < 0 { 134 | println!("rdma_get_recv_comp"); 135 | } else { 136 | ret = 0; 137 | } 138 | 139 | ret 140 | } 141 | 142 | fn main() { 143 | println!("rdma_client: start"); 144 | let args: Vec = env::args().collect(); 145 | if args.len() != 3 { 146 | println!("usage : cargo run --example client "); 147 | println!("input : {:?}", args); 148 | exit(-1); 149 | } 150 | let ip = args.get(1).unwrap().as_str(); 151 | let port = args.get(2).unwrap().as_str(); 152 | 153 | let ret = run(ip, port); 154 | 155 | if ret != 0 { 156 | println!( 157 | "rdma_client: ret error {:?}", 158 | std::io::Error::from_raw_os_error(-ret) 159 | ); 160 | if ret == -1 { 161 | println!( 162 | "rdma_client: last os error {:?}", 163 | std::io::Error::last_os_error() 164 | ); 165 | } 166 | } 167 | println!("rdma_client: end"); 168 | } 169 | -------------------------------------------------------------------------------- /examples/server.rs: -------------------------------------------------------------------------------- 1 | //! This demo shows how to establish a connection between server and client 2 | //! and send msg to the other end. 3 | //! 4 | //! You can try this example by running: 5 | //! 6 | //! cargo run --example server 7 | //! 8 | //! And then start client in another terminal by running: 9 | //! 10 | //! cargo run --example client 11 | //! 12 | //! The default port is 7471. 13 | 14 | use rdma_sys::*; 15 | use std::ptr::null_mut; 16 | 17 | static SERVER: &str = "0.0.0.0\0"; 18 | static PORT: &str = "7471\0"; 19 | 20 | fn run() -> i32 { 21 | let mut send_msg = vec![1_u8; 16]; 22 | let mut recv_msg = vec![0_u8; 16]; 23 | let mut hints = unsafe { std::mem::zeroed::() }; 24 | let mut res: *mut rdma_addrinfo = null_mut(); 25 | hints.ai_flags = RAI_PASSIVE.try_into().unwrap(); 26 | hints.ai_port_space = rdma_port_space::RDMA_PS_TCP.try_into().unwrap(); 27 | let mut ret = unsafe { 28 | rdma_getaddrinfo( 29 | SERVER.as_ptr().cast(), 30 | PORT.as_ptr().cast(), 31 | &hints, 32 | &mut res, 33 | ) 34 | }; 35 | 36 | if ret != 0 { 37 | println!("rdma_getaddrinfo"); 38 | return ret; 39 | } 40 | 41 | let mut listen_id = null_mut(); 42 | let mut id = null_mut(); 43 | 44 | let mut init_attr = unsafe { std::mem::zeroed::() }; 45 | init_attr.cap.max_send_wr = 1; 46 | init_attr.cap.max_recv_wr = 1; 47 | init_attr.cap.max_send_sge = 1; 48 | init_attr.cap.max_recv_sge = 1; 49 | init_attr.cap.max_inline_data = 16; 50 | init_attr.sq_sig_all = 1; 51 | ret = unsafe { rdma_create_ep(&mut listen_id, res, null_mut(), &mut init_attr) }; 52 | // Check to see if we got inline data allowed or not 53 | if ret != 0 { 54 | println!("rdma_create_ep"); 55 | unsafe { 56 | rdma_freeaddrinfo(res); 57 | } 58 | return ret; 59 | } 60 | ret = unsafe { rdma_listen(listen_id, 0) }; 61 | if ret != 0 { 62 | println!("rdma_listen"); 63 | unsafe { 64 | rdma_destroy_ep(listen_id); 65 | } 66 | return ret; 67 | } 68 | 69 | ret = unsafe { rdma_get_request(listen_id, &mut id) }; 70 | if ret != 0 { 71 | println!("rdma_get_request"); 72 | unsafe { 73 | rdma_destroy_ep(listen_id); 74 | } 75 | return ret; 76 | } 77 | 78 | let mut qp_attr = unsafe { std::mem::zeroed::() }; 79 | ret = unsafe { 80 | ibv_query_qp( 81 | (*id).qp, 82 | &mut qp_attr, 83 | ibv_qp_attr_mask::IBV_QP_CAP.0.try_into().unwrap(), 84 | &mut init_attr, 85 | ) 86 | }; 87 | 88 | if ret != 0 { 89 | println!("ibv_query_qp"); 90 | unsafe { 91 | rdma_destroy_ep(id); 92 | } 93 | return ret; 94 | } 95 | 96 | let mut send_flags = 0_u32; 97 | if init_attr.cap.max_inline_data >= 16 { 98 | send_flags = ibv_send_flags::IBV_SEND_INLINE.0; 99 | } else { 100 | println!("rdma_server: device doesn't support IBV_SEND_INLINE, using sge sends"); 101 | } 102 | 103 | let recv_mr = unsafe { rdma_reg_msgs(id, recv_msg.as_mut_ptr().cast(), 16) }; 104 | if recv_mr.is_null() { 105 | ret = -1; 106 | println!("rdma_reg_msgs for recv_msg"); 107 | unsafe { 108 | rdma_dereg_mr(recv_mr); 109 | } 110 | return ret; 111 | } 112 | 113 | let mut send_mr = null_mut(); 114 | if (send_flags & ibv_send_flags::IBV_SEND_INLINE.0) == 0 { 115 | send_mr = unsafe { rdma_reg_msgs(id, send_msg.as_mut_ptr().cast(), 16) }; 116 | if send_mr.is_null() { 117 | ret = -1; 118 | println!("rdma_reg_msgs for send_msg"); 119 | unsafe { 120 | rdma_dereg_mr(recv_mr); 121 | } 122 | return ret; 123 | } 124 | } 125 | ret = unsafe { rdma_post_recv(id, null_mut(), recv_msg.as_mut_ptr().cast(), 16, recv_mr) }; 126 | 127 | if ret != 0 { 128 | println!("rdma_post_recv"); 129 | unsafe { 130 | rdma_dereg_mr(recv_mr); 131 | } 132 | return ret; 133 | } 134 | 135 | ret = unsafe { rdma_accept(id, null_mut()) }; 136 | if ret != 0 { 137 | println!("rdma_accept"); 138 | if (send_flags & ibv_send_flags::IBV_SEND_INLINE.0) == 0 { 139 | unsafe { rdma_dereg_mr(send_mr) }; 140 | } 141 | return ret; 142 | } 143 | 144 | let mut wc = unsafe { std::mem::zeroed::() }; 145 | while ret == 0 { 146 | ret = unsafe { rdma_get_recv_comp(id, &mut wc) }; 147 | } 148 | if ret < 0 { 149 | println!("rdma_get_recv_comp"); 150 | unsafe { 151 | rdma_disconnect(id); 152 | } 153 | return ret; 154 | } 155 | println!("rdma_server: recv msg : {:?}", recv_msg); 156 | ret = unsafe { 157 | rdma_post_send( 158 | id, 159 | null_mut(), 160 | send_msg.as_mut_ptr().cast(), 161 | 16, 162 | send_mr, 163 | send_flags.try_into().unwrap(), 164 | ) 165 | }; 166 | if ret != 0 { 167 | println!("rdma_post_send"); 168 | unsafe { 169 | rdma_disconnect(id); 170 | } 171 | return ret; 172 | } 173 | 174 | while ret == 0 { 175 | ret = unsafe { rdma_get_send_comp(id, &mut wc) }; 176 | } 177 | if ret < 0 { 178 | println!("rdma_get_send_comp"); 179 | } else { 180 | ret = 0; 181 | } 182 | ret 183 | } 184 | 185 | fn main() { 186 | println!("rdma_server: start"); 187 | let ret = run(); 188 | if ret != 0 { 189 | println!( 190 | "rdma_server: ret error {:?}", 191 | std::io::Error::from_raw_os_error(-ret) 192 | ); 193 | if ret == -1 { 194 | println!( 195 | "rdma_server: last os error {:?}", 196 | std::io::Error::last_os_error() 197 | ); 198 | } 199 | } 200 | println!("rdma_server: end"); 201 | } 202 | -------------------------------------------------------------------------------- /scripts/run.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | RXE_DEV=rxe_eth0 4 | 5 | # Remove existing devices if any 6 | sudo rdma link delete $RXE_DEV 7 | 8 | set -o errexit 9 | set -o nounset 10 | set -o xtrace 11 | 12 | if [ `ifconfig -s | grep -c '^e'` -eq 0 ]; then 13 | echo "no eth device" 14 | exit 1 15 | elif [ `ifconfig -s | grep -c '^e'` -gt 1 ]; then 16 | echo "multiple eth devices, select the first one" 17 | ifconfig -s | grep '^e' 18 | fi 19 | 20 | ETH_DEV=`ifconfig -s | grep '^e' | cut -d ' ' -f 1 | head -n 1` 21 | ETH_IP=`ifconfig $ETH_DEV | grep inet | grep -v inet6 | awk '{print $2}' | tr -d "addr:"` 22 | CM_PORT=7471 23 | # Setup soft-roce device 24 | sudo rdma link add $RXE_DEV type rxe netdev $ETH_DEV 25 | rdma link | grep $RXE_DEV 26 | 27 | cargo test --all 28 | cargo run --example server & 29 | sleep 1 && 30 | cargo run --example client $ETH_IP $CM_PORT -------------------------------------------------------------------------------- /src/bindings.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings)] 2 | #![allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] 3 | #![allow(deref_nullptr)] // TODO(fxbug.dev/74605): Remove once bindgen is fixed. 4 | #![allow(clippy::missing_safety_doc, clippy::too_many_arguments)] 5 | 6 | use libc::*; 7 | 8 | include!(concat!(env!("OUT_DIR"), "/bindings.rs")); 9 | 10 | mod opcode; 11 | mod types; 12 | mod verbs; 13 | 14 | pub use self::opcode::*; 15 | pub use self::types::*; 16 | pub use self::verbs::*; 17 | -------------------------------------------------------------------------------- /src/opcode.rs: -------------------------------------------------------------------------------- 1 | pub mod ibv_opcode { 2 | use paste::paste; 3 | pub type Type = std::os::raw::c_int; 4 | macro_rules! concat_ibv_opcode { 5 | ($transport: expr, $op : expr ) => { 6 | paste! { 7 | pub const []: Type = [] + [] ; 8 | } 9 | }; 10 | } 11 | 12 | /* transport types -- just used to define real constants */ 13 | pub const IBV_OPCODE_RC: Type = 0x00; 14 | pub const IBV_OPCODE_UC: Type = 0x20; 15 | pub const IBV_OPCODE_RD: Type = 0x40; 16 | pub const IBV_OPCODE_UD: Type = 0x60; 17 | /* operations -- just used to define real constants */ 18 | pub const IBV_OPCODE_SEND_FIRST: Type = 0x00; 19 | pub const IBV_OPCODE_SEND_MIDDLE: Type = 0x01; 20 | pub const IBV_OPCODE_SEND_LAST: Type = 0x02; 21 | pub const IBV_OPCODE_SEND_LAST_WITH_IMMEDIATE: Type = 0x03; 22 | pub const IBV_OPCODE_SEND_ONLY: Type = 0x04; 23 | pub const IBV_OPCODE_SEND_ONLY_WITH_IMMEDIATE: Type = 0x05; 24 | pub const IBV_OPCODE_RDMA_WRITE_FIRST: Type = 0x06; 25 | pub const IBV_OPCODE_RDMA_WRITE_MIDDLE: Type = 0x07; 26 | pub const IBV_OPCODE_RDMA_WRITE_LAST: Type = 0x08; 27 | pub const IBV_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: Type = 0x09; 28 | pub const IBV_OPCODE_RDMA_WRITE_ONLY: Type = 0x0a; 29 | pub const IBV_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: Type = 0x0b; 30 | pub const IBV_OPCODE_RDMA_READ_REQUEST: Type = 0x0c; 31 | pub const IBV_OPCODE_RDMA_READ_RESPONSE_FIRST: Type = 0x0d; 32 | pub const IBV_OPCODE_RDMA_READ_RESPONSE_MIDDLE: Type = 0x0e; 33 | pub const IBV_OPCODE_RDMA_READ_RESPONSE_LAST: Type = 0x0f; 34 | pub const IBV_OPCODE_RDMA_READ_RESPONSE_ONLY: Type = 0x10; 35 | pub const IBV_OPCODE_ACKNOWLEDGE: Type = 0x11; 36 | pub const IBV_OPCODE_ATOMIC_ACKNOWLEDGE: Type = 0x12; 37 | pub const IBV_OPCODE_COMPARE_SWAP: Type = 0x13; 38 | pub const IBV_OPCODE_FETCH_ADD: Type = 0x14; 39 | 40 | /* RC */ 41 | concat_ibv_opcode!(RC, SEND_FIRST); 42 | concat_ibv_opcode!(RC, SEND_MIDDLE); 43 | concat_ibv_opcode!(RC, SEND_LAST); 44 | concat_ibv_opcode!(RC, SEND_LAST_WITH_IMMEDIATE); 45 | concat_ibv_opcode!(RC, SEND_ONLY); 46 | concat_ibv_opcode!(RC, SEND_ONLY_WITH_IMMEDIATE); 47 | concat_ibv_opcode!(RC, RDMA_WRITE_FIRST); 48 | concat_ibv_opcode!(RC, RDMA_WRITE_MIDDLE); 49 | concat_ibv_opcode!(RC, RDMA_WRITE_LAST); 50 | concat_ibv_opcode!(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE); 51 | concat_ibv_opcode!(RC, RDMA_WRITE_ONLY); 52 | concat_ibv_opcode!(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE); 53 | concat_ibv_opcode!(RC, RDMA_READ_REQUEST); 54 | concat_ibv_opcode!(RC, RDMA_READ_RESPONSE_FIRST); 55 | concat_ibv_opcode!(RC, RDMA_READ_RESPONSE_MIDDLE); 56 | concat_ibv_opcode!(RC, RDMA_READ_RESPONSE_LAST); 57 | concat_ibv_opcode!(RC, RDMA_READ_RESPONSE_ONLY); 58 | concat_ibv_opcode!(RC, ACKNOWLEDGE); 59 | concat_ibv_opcode!(RC, ATOMIC_ACKNOWLEDGE); 60 | concat_ibv_opcode!(RC, COMPARE_SWAP); 61 | concat_ibv_opcode!(RC, FETCH_ADD); 62 | 63 | /* UC */ 64 | concat_ibv_opcode!(UC, SEND_FIRST); 65 | concat_ibv_opcode!(UC, SEND_MIDDLE); 66 | concat_ibv_opcode!(UC, SEND_LAST); 67 | concat_ibv_opcode!(UC, SEND_LAST_WITH_IMMEDIATE); 68 | concat_ibv_opcode!(UC, SEND_ONLY); 69 | concat_ibv_opcode!(UC, SEND_ONLY_WITH_IMMEDIATE); 70 | concat_ibv_opcode!(UC, RDMA_WRITE_FIRST); 71 | concat_ibv_opcode!(UC, RDMA_WRITE_MIDDLE); 72 | concat_ibv_opcode!(UC, RDMA_WRITE_LAST); 73 | concat_ibv_opcode!(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE); 74 | concat_ibv_opcode!(UC, RDMA_WRITE_ONLY); 75 | concat_ibv_opcode!(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE); 76 | 77 | /* RD */ 78 | concat_ibv_opcode!(RD, SEND_FIRST); 79 | concat_ibv_opcode!(RD, SEND_MIDDLE); 80 | concat_ibv_opcode!(RD, SEND_LAST); 81 | concat_ibv_opcode!(RD, SEND_LAST_WITH_IMMEDIATE); 82 | concat_ibv_opcode!(RD, SEND_ONLY); 83 | concat_ibv_opcode!(RD, SEND_ONLY_WITH_IMMEDIATE); 84 | concat_ibv_opcode!(RD, RDMA_WRITE_FIRST); 85 | concat_ibv_opcode!(RD, RDMA_WRITE_MIDDLE); 86 | concat_ibv_opcode!(RD, RDMA_WRITE_LAST); 87 | concat_ibv_opcode!(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE); 88 | concat_ibv_opcode!(RD, RDMA_WRITE_ONLY); 89 | concat_ibv_opcode!(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE); 90 | concat_ibv_opcode!(RD, RDMA_READ_REQUEST); 91 | concat_ibv_opcode!(RD, RDMA_READ_RESPONSE_FIRST); 92 | concat_ibv_opcode!(RD, RDMA_READ_RESPONSE_MIDDLE); 93 | concat_ibv_opcode!(RD, RDMA_READ_RESPONSE_LAST); 94 | concat_ibv_opcode!(RD, RDMA_READ_RESPONSE_ONLY); 95 | concat_ibv_opcode!(RD, ACKNOWLEDGE); 96 | concat_ibv_opcode!(RD, ATOMIC_ACKNOWLEDGE); 97 | concat_ibv_opcode!(RD, COMPARE_SWAP); 98 | concat_ibv_opcode!(RD, FETCH_ADD); 99 | 100 | /* UD */ 101 | concat_ibv_opcode!(UD, SEND_ONLY); 102 | concat_ibv_opcode!(UD, SEND_ONLY_WITH_IMMEDIATE); 103 | } 104 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | 3 | /// This file defines the types directly or indirectly involving union, 4 | /// in that BindGen cannot handle union very well, so mannually define them. 5 | 6 | /// Struct types involve union in 7 | 8 | // ibv_gid related union and struct types 9 | #[repr(C)] 10 | #[derive(Clone, Copy)] 11 | pub struct ibv_gid_global_t { 12 | pub subnet_prefix: __be64, 13 | pub interface_id: __be64, 14 | } 15 | 16 | #[repr(C)] 17 | #[derive(Clone, Copy)] 18 | pub union ibv_gid { 19 | pub raw: [u8; 16], 20 | pub global: ibv_gid_global_t, 21 | } 22 | 23 | // ibv_async_event related union and struct type 24 | #[repr(C)] 25 | pub union ibv_async_event_element_t { 26 | pub cq: *mut ibv_cq, 27 | pub qp: *mut ibv_qp, 28 | pub srq: *mut ibv_srq, 29 | pub wq: *mut ibv_wq, 30 | pub port_num: c_int, 31 | } 32 | 33 | #[repr(C)] 34 | pub struct ibv_async_event { 35 | pub element: ibv_async_event_element_t, 36 | pub event_type: ibv_event_type, 37 | } 38 | 39 | // ibv_wc related union and struct types 40 | #[repr(C)] 41 | pub union imm_data_invalidated_rkey_union_t { 42 | /// When (wc_flags & IBV_WC_WITH_IMM): Immediate data in network byte order. 43 | pub imm_data: __be32, 44 | /// When (wc_flags & IBV_WC_WITH_INV): Stores the invalidated rkey. 45 | pub invalidated_rkey: u32, 46 | } 47 | 48 | #[repr(C)] 49 | pub struct ibv_wc { 50 | pub wr_id: u64, 51 | pub status: ibv_wc_status::Type, 52 | pub opcode: ibv_wc_opcode::Type, 53 | pub vendor_err: u32, 54 | pub byte_len: u32, 55 | pub imm_data_invalidated_rkey_union: imm_data_invalidated_rkey_union_t, 56 | pub qp_num: u32, 57 | pub src_qp: u32, 58 | pub wc_flags: c_uint, 59 | pub pkey_index: u16, 60 | pub slid: u16, 61 | pub sl: u8, 62 | pub dlid_path_bits: u8, 63 | } 64 | 65 | #[repr(C)] 66 | #[derive(Clone, Copy)] 67 | pub struct ibv_global_route { 68 | pub dgid: ibv_gid, 69 | pub flow_label: u32, 70 | pub sgid_index: u8, 71 | pub hop_limit: u8, 72 | pub traffic_class: u8, 73 | } 74 | 75 | // ibv_send_wr related union and struct types 76 | #[repr(C)] 77 | #[derive(Copy, Clone)] 78 | pub struct ibv_mw_bind_info { 79 | pub mr: *mut ibv_mr, 80 | pub addr: u64, 81 | pub length: u64, 82 | pub mw_access_flags: ::std::os::raw::c_uint, 83 | } 84 | 85 | #[repr(C)] 86 | #[derive(Clone, Copy)] 87 | pub struct rdma_t { 88 | pub remote_addr: u64, 89 | pub rkey: u32, 90 | } 91 | 92 | #[repr(C)] 93 | #[derive(Clone, Copy)] 94 | pub struct atomic_t { 95 | pub remote_addr: u64, 96 | pub compare_add: u64, 97 | pub swap: u64, 98 | pub rkey: u32, 99 | } 100 | 101 | #[repr(C)] 102 | #[derive(Clone, Copy)] 103 | pub struct ud_t { 104 | pub ah: *mut ibv_ah, 105 | pub remote_qpn: u32, 106 | pub remote_qkey: u32, 107 | } 108 | 109 | #[repr(C)] 110 | pub union wr_t { 111 | pub rdma: rdma_t, 112 | pub atomic: atomic_t, 113 | pub ud: ud_t, 114 | } 115 | 116 | #[repr(C)] 117 | #[derive(Clone, Copy)] 118 | pub struct xrc_t { 119 | pub remote_srqn: u32, 120 | } 121 | 122 | #[repr(C)] 123 | pub union qp_type_t { 124 | pub xrc: xrc_t, 125 | } 126 | 127 | #[repr(C)] 128 | #[derive(Clone, Copy)] 129 | pub struct bind_mw_t { 130 | pub mw: *mut ibv_mw, 131 | pub rkey: u32, 132 | pub bind_info: ibv_mw_bind_info, 133 | } 134 | 135 | #[repr(C)] 136 | #[derive(Clone, Copy)] 137 | pub struct tso_t { 138 | pub hdr: *mut c_void, 139 | pub hdr_sz: u16, 140 | pub mss: u16, 141 | } 142 | 143 | #[repr(C)] 144 | pub union bind_mw_tso_union_t { 145 | pub bind_mw: bind_mw_t, 146 | pub tso: tso_t, 147 | } 148 | 149 | #[repr(C)] 150 | pub struct ibv_send_wr { 151 | pub wr_id: u64, 152 | pub next: *mut Self, 153 | pub sg_list: *mut ibv_sge, 154 | pub num_sge: c_int, 155 | pub opcode: ibv_wr_opcode::Type, 156 | pub send_flags: c_uint, 157 | /// When opcode is *_WITH_IMM: Immediate data in network byte order. 158 | /// When opcode is *_INV: Stores the rkey to invalidate 159 | pub imm_data_invalidated_rkey_union: imm_data_invalidated_rkey_union_t, 160 | pub wr: wr_t, 161 | pub qp_type: qp_type_t, 162 | pub bind_mw_tso_union: bind_mw_tso_union_t, 163 | } 164 | 165 | #[repr(C)] 166 | #[derive(Copy, Clone)] 167 | pub struct add_t { 168 | pub recv_wr_id: u64, 169 | pub sg_list: *mut ibv_sge, 170 | pub num_sge: c_int, 171 | pub tag: u64, 172 | pub mask: u64, 173 | } 174 | 175 | #[repr(C)] 176 | #[derive(Copy, Clone)] 177 | pub struct tm_t { 178 | pub unexpected_cnt: u32, 179 | pub handle: u32, 180 | pub add: add_t, 181 | } 182 | 183 | #[repr(C)] 184 | pub struct ibv_ops_wr { 185 | wr_id: u64, 186 | next: *mut Self, 187 | opcode: ibv_ops_wr_opcode::Type, 188 | flags: c_int, 189 | tm: tm_t, 190 | } 191 | 192 | // ibv_flow_spec related union and struct types 193 | #[repr(C)] 194 | #[derive(Clone, Copy)] 195 | pub struct hdr_t { 196 | pub type_: ibv_flow_spec_type::Type, 197 | pub size: u16, 198 | } 199 | 200 | #[repr(C)] 201 | pub union ibv_flow_spec_union_t { 202 | pub hdr: hdr_t, 203 | pub eth: ibv_flow_spec_eth, 204 | pub ipv4: ibv_flow_spec_ipv4, 205 | pub tcp_udp: ibv_flow_spec_tcp_udp, 206 | pub ipv4_ext: ibv_flow_spec_ipv4_ext, 207 | pub ipv6: ibv_flow_spec_ipv6, 208 | pub esp: ibv_flow_spec_esp, 209 | pub tunnel: ibv_flow_spec_tunnel, 210 | pub gre: ibv_flow_spec_gre, 211 | pub mpls: ibv_flow_spec_mpls, 212 | pub flow_tag: ibv_flow_spec_action_tag, 213 | pub drop: ibv_flow_spec_action_drop, 214 | pub handle: ibv_flow_spec_action_handle, 215 | pub flow_count: ibv_flow_spec_counter_action, 216 | } 217 | 218 | #[repr(C)] 219 | pub struct ibv_flow_spec { 220 | pub ibv_flow_spec_union: ibv_flow_spec_union_t, 221 | } 222 | 223 | /// Struct types involve union in 224 | 225 | // rdma_addr related union and struct types 226 | #[repr(C)] 227 | #[derive(Clone, Copy)] 228 | pub struct rdma_ib_addr { 229 | pub sgid: ibv_gid, 230 | pub dgid: ibv_gid, 231 | pub pkey: __be16, 232 | } 233 | 234 | #[repr(C)] 235 | pub union src_addr_union_t { 236 | pub src_addr: libc::sockaddr, 237 | pub src_sin: libc::sockaddr_in, 238 | pub src_sin6: libc::sockaddr_in6, 239 | pub src_storage: libc::sockaddr_storage, 240 | } 241 | 242 | #[repr(C)] 243 | pub union dst_addr_union_t { 244 | pub dst_addr: libc::sockaddr, 245 | pub dst_sin: libc::sockaddr_in, 246 | pub dst_sin6: libc::sockaddr_in6, 247 | pub dst_storage: libc::sockaddr_storage, 248 | } 249 | 250 | #[repr(C)] 251 | pub union addr_union_t { 252 | pub ibaddr: rdma_ib_addr, 253 | } 254 | 255 | #[repr(C)] 256 | pub struct rdma_addr { 257 | pub src_addr_union: src_addr_union_t, 258 | pub dst_addr_union: dst_addr_union_t, 259 | pub addr: addr_union_t, 260 | } 261 | 262 | /// rdma_cm_event related union and struct types 263 | 264 | #[repr(C)] 265 | #[derive(Clone, Copy)] 266 | pub struct ibv_ah_attr { 267 | pub grh: ibv_global_route, 268 | pub dlid: u16, 269 | pub sl: u8, 270 | pub src_path_bits: u8, 271 | pub static_rate: u8, 272 | pub is_global: u8, 273 | pub port_num: u8, 274 | } 275 | 276 | #[repr(C)] 277 | #[derive(Clone, Copy)] 278 | pub struct rdma_ud_param { 279 | pub private_data: *const ::std::os::raw::c_void, 280 | pub private_data_len: u8, 281 | pub ah_attr: ibv_ah_attr, 282 | pub qp_num: u32, 283 | pub qkey: u32, 284 | } 285 | 286 | #[repr(C)] 287 | pub union param_t { 288 | pub conn: rdma_conn_param, 289 | pub ud: rdma_ud_param, 290 | } 291 | 292 | #[repr(C)] 293 | pub struct rdma_cm_event { 294 | pub id: *mut rdma_cm_id, 295 | pub listen_id: *mut rdma_cm_id, 296 | pub event: rdma_cm_event_type::Type, 297 | pub status: c_int, 298 | pub param: param_t, 299 | } 300 | -------------------------------------------------------------------------------- /src/verbs.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | 3 | use std::mem; 4 | use std::ptr; 5 | 6 | /// Inline functions from 7 | 8 | pub type ibv_advise_mr_advice = ib_uverbs_advise_mr_advice::Type; 9 | 10 | // ibv_qp_ex related inline functions 11 | #[inline] 12 | pub unsafe fn ibv_wr_atomic_cmp_swp( 13 | qp: *mut ibv_qp_ex, 14 | rkey: u32, 15 | remote_addr: u64, 16 | compare: u64, 17 | swap: u64, 18 | ) { 19 | (*qp).wr_atomic_cmp_swp.unwrap()(qp, rkey, remote_addr, compare, swap); 20 | } 21 | 22 | #[inline] 23 | pub unsafe fn ibv_wr_atomic_fetch_add(qp: *mut ibv_qp_ex, rkey: u32, remote_addr: u64, add: u64) { 24 | (*qp).wr_atomic_fetch_add.unwrap()(qp, rkey, remote_addr, add); 25 | } 26 | 27 | #[inline] 28 | pub unsafe fn ibv_wr_bind_mw( 29 | qp: *mut ibv_qp_ex, 30 | mw: *mut ibv_mw, 31 | rkey: u32, 32 | bind_info: *const ibv_mw_bind_info, 33 | ) { 34 | (*qp).wr_bind_mw.unwrap()(qp, mw, rkey, bind_info); 35 | } 36 | 37 | #[inline] 38 | pub unsafe fn ibv_wr_local_inv(qp: *mut ibv_qp_ex, invalidate_rkey: u32) { 39 | (*qp).wr_local_inv.unwrap()(qp, invalidate_rkey); 40 | } 41 | 42 | #[inline] 43 | pub unsafe fn ibv_wr_rdma_read(qp: *mut ibv_qp_ex, rkey: u32, remote_addr: u64) { 44 | (*qp).wr_rdma_read.unwrap()(qp, rkey, remote_addr); 45 | } 46 | 47 | #[inline] 48 | pub unsafe fn ibv_wr_rdma_write(qp: *mut ibv_qp_ex, rkey: u32, remote_addr: u64) { 49 | (*qp).wr_rdma_write.unwrap()(qp, rkey, remote_addr); 50 | } 51 | 52 | #[inline] 53 | pub unsafe fn ibv_wr_rdma_write_imm( 54 | qp: *mut ibv_qp_ex, 55 | rkey: u32, 56 | remote_addr: u64, 57 | imm_data: __be32, 58 | ) { 59 | (*qp).wr_rdma_write_imm.unwrap()(qp, rkey, remote_addr, imm_data); 60 | } 61 | 62 | #[inline] 63 | pub unsafe fn ibv_wr_send(qp: *mut ibv_qp_ex) { 64 | (*qp).wr_send.unwrap()(qp); 65 | } 66 | 67 | #[inline] 68 | pub unsafe fn ibv_wr_send_imm(qp: *mut ibv_qp_ex, imm_data: __be32) { 69 | (*qp).wr_send_imm.unwrap()(qp, imm_data); 70 | } 71 | 72 | #[inline] 73 | pub unsafe fn ibv_wr_send_inv(qp: *mut ibv_qp_ex, invalidate_rkey: u32) { 74 | (*qp).wr_send_inv.unwrap()(qp, invalidate_rkey); 75 | } 76 | 77 | #[inline] 78 | pub unsafe fn ibv_wr_send_tso(qp: *mut ibv_qp_ex, hdr: *mut c_void, hdr_sz: u16, mss: u16) { 79 | (*qp).wr_send_tso.unwrap()(qp, hdr, hdr_sz, mss); 80 | } 81 | 82 | #[inline] 83 | pub unsafe fn ibv_wr_set_ud_addr( 84 | qp: *mut ibv_qp_ex, 85 | ah: *mut ibv_ah, 86 | remote_qpn: u32, 87 | remote_qkey: u32, 88 | ) { 89 | (*qp).wr_set_ud_addr.unwrap()(qp, ah, remote_qpn, remote_qkey); 90 | } 91 | 92 | #[inline] 93 | pub unsafe fn ibv_wr_set_xrc_srqn(qp: *mut ibv_qp_ex, remote_srqn: u32) { 94 | (*qp).wr_set_xrc_srqn.unwrap()(qp, remote_srqn); 95 | } 96 | 97 | #[inline] 98 | pub unsafe fn ibv_wr_set_inline_data(qp: *mut ibv_qp_ex, addr: *mut c_void, length: usize) { 99 | (*qp).wr_set_inline_data.unwrap()(qp, addr, length); 100 | } 101 | 102 | #[inline] 103 | pub unsafe fn ibv_wr_set_inline_data_list( 104 | qp: *mut ibv_qp_ex, 105 | num_buf: usize, 106 | buf_list: *const ibv_data_buf, 107 | ) { 108 | (*qp).wr_set_inline_data_list.unwrap()(qp, num_buf, buf_list); 109 | } 110 | 111 | #[inline] 112 | pub unsafe fn ibv_wr_set_sge(qp: *mut ibv_qp_ex, lkey: u32, addr: u64, length: u32) { 113 | (*qp).wr_set_sge.unwrap()(qp, lkey, addr, length); 114 | } 115 | 116 | #[inline] 117 | pub unsafe fn ibv_wr_set_sge_list(qp: *mut ibv_qp_ex, num_sge: usize, sg_list: *const ibv_sge) { 118 | (*qp).wr_set_sge_list.unwrap()(qp, num_sge, sg_list); 119 | } 120 | 121 | #[inline] 122 | pub unsafe fn ibv_wr_start(qp: *mut ibv_qp_ex) { 123 | (*qp).wr_start.unwrap()(qp); 124 | } 125 | 126 | #[inline] 127 | pub unsafe fn ibv_wr_complete(qp: *mut ibv_qp_ex) -> c_int { 128 | (*qp).wr_complete.unwrap()(qp) 129 | } 130 | 131 | #[inline] 132 | pub unsafe fn ibv_wr_abort(qp: *mut ibv_qp_ex) { 133 | (*qp).wr_abort.unwrap()(qp) 134 | } 135 | 136 | // ibv_cq_ex related inline functions 137 | #[inline] 138 | pub unsafe fn ibv_cq_ex_to_cq(cq: *mut ibv_cq_ex) -> *mut ibv_cq { 139 | cq as *mut ibv_cq_ex as *mut ibv_cq 140 | } 141 | 142 | #[inline] 143 | pub unsafe fn ibv_start_poll(cq: *mut ibv_cq_ex, attr: *mut ibv_poll_cq_attr) -> c_int { 144 | (*cq).start_poll.unwrap()(cq, attr) 145 | } 146 | 147 | #[inline] 148 | pub unsafe fn ibv_next_poll(cq: *mut ibv_cq_ex) -> c_int { 149 | (*cq).next_poll.unwrap()(cq) 150 | } 151 | 152 | #[inline] 153 | pub unsafe fn ibv_end_poll(cq: *mut ibv_cq_ex) { 154 | (*cq).end_poll.unwrap()(cq) 155 | } 156 | 157 | #[inline] 158 | pub unsafe fn ibv_wc_read_opcode(cq: *mut ibv_cq_ex) -> ibv_wc_opcode::Type { 159 | (*cq).read_opcode.unwrap()(cq) 160 | } 161 | 162 | #[inline] 163 | pub unsafe fn ibv_wc_read_vendor_err(cq: *mut ibv_cq_ex) -> u32 { 164 | (*cq).read_vendor_err.unwrap()(cq) 165 | } 166 | 167 | #[inline] 168 | pub unsafe fn ibv_wc_read_byte_len(cq: *mut ibv_cq_ex) -> u32 { 169 | (*cq).read_byte_len.unwrap()(cq) 170 | } 171 | 172 | #[inline] 173 | pub unsafe fn ibv_wc_read_imm_data(cq: *mut ibv_cq_ex) -> __be32 { 174 | (*cq).read_imm_data.unwrap()(cq) 175 | } 176 | 177 | #[inline] 178 | pub unsafe fn ibv_wc_read_invalidated_rkey(cq: *mut ibv_cq_ex) -> u32 { 179 | // #ifdef __CHECKER__ 180 | // return (__attribute__((force)) uint32_t)cq->read_imm_data(cq); 181 | // #else 182 | // return cq->read_imm_data(cq); 183 | // #endif 184 | (*cq).read_imm_data.unwrap()(cq) 185 | } 186 | 187 | #[inline] 188 | pub unsafe fn ibv_wc_read_qp_num(cq: *mut ibv_cq_ex) -> u32 { 189 | (*cq).read_qp_num.unwrap()(cq) 190 | } 191 | 192 | #[inline] 193 | pub unsafe fn ibv_wc_read_src_qp(cq: *mut ibv_cq_ex) -> u32 { 194 | (*cq).read_src_qp.unwrap()(cq) 195 | } 196 | 197 | #[inline] 198 | pub unsafe fn ibv_wc_read_wc_flags(cq: *mut ibv_cq_ex) -> c_uint { 199 | (*cq).read_wc_flags.unwrap()(cq) 200 | } 201 | 202 | #[inline] 203 | pub unsafe fn ibv_wc_read_slid(cq: *mut ibv_cq_ex) -> u32 { 204 | (*cq).read_slid.unwrap()(cq) 205 | } 206 | 207 | #[inline] 208 | pub unsafe fn ibv_wc_read_sl(cq: *mut ibv_cq_ex) -> u8 { 209 | (*cq).read_sl.unwrap()(cq) 210 | } 211 | 212 | #[inline] 213 | pub unsafe fn ibv_wc_read_dlid_path_bits(cq: *mut ibv_cq_ex) -> u8 { 214 | (*cq).read_dlid_path_bits.unwrap()(cq) 215 | } 216 | 217 | #[inline] 218 | pub unsafe fn ibv_wc_read_completion_ts(cq: *mut ibv_cq_ex) -> u64 { 219 | (*cq).read_completion_ts.unwrap()(cq) 220 | } 221 | 222 | #[inline] 223 | pub unsafe fn ibv_wc_read_completion_wallclock_ns(cq: *mut ibv_cq_ex) -> u64 { 224 | (*cq).read_completion_wallclock_ns.unwrap()(cq) 225 | } 226 | 227 | #[inline] 228 | pub unsafe fn ibv_wc_read_cvlan(cq: *mut ibv_cq_ex) -> u16 { 229 | (*cq).read_cvlan.unwrap()(cq) 230 | } 231 | 232 | #[inline] 233 | pub unsafe fn ibv_wc_read_flow_tag(cq: *mut ibv_cq_ex) -> u32 { 234 | (*cq).read_flow_tag.unwrap()(cq) 235 | } 236 | 237 | #[inline] 238 | pub unsafe fn ibv_wc_read_tm_info(cq: *mut ibv_cq_ex, tm_info: *mut ibv_wc_tm_info) { 239 | (*cq).read_tm_info.unwrap()(cq, tm_info) 240 | } 241 | 242 | // ibv_wq related inline function 243 | #[inline] 244 | pub unsafe fn ibv_post_wq_recv( 245 | wq: *mut ibv_wq, 246 | recv_wr: *mut ibv_recv_wr, 247 | bad_recv_wr: *mut *mut ibv_recv_wr, 248 | ) -> c_int { 249 | (*wq).post_recv.unwrap()(wq, recv_wr, bad_recv_wr) 250 | } 251 | 252 | // Use intrusive_collections::container_of! instread, once it's stable not nightly 253 | macro_rules! container_of { 254 | ($ptr:expr, $container:path, $field:ident) => {{ 255 | ($ptr as *const _ as *const u8).sub(memoffset::offset_of!($container, $field)) 256 | as *const $container 257 | }}; 258 | } 259 | 260 | // Utility function to get verbs_context from ibv_context 261 | #[inline] 262 | unsafe fn verbs_get_ctx(ctx: *const ibv_context) -> Option<*mut verbs_context> { 263 | if (*ctx).abi_compat as usize != u32::MAX as usize { 264 | None 265 | } else { 266 | let vcp = container_of!(ctx, verbs_context, context) as *mut _; 267 | Some(vcp) 268 | } 269 | } 270 | 271 | macro_rules! verbs_get_ctx_op { 272 | ($vcr:expr, $field:ident) => { 273 | if let Some(vc) = verbs_get_ctx($vcr) { 274 | if (*vc).sz < mem::size_of_val(&*vc) - memoffset::offset_of!(verbs_context, $field) { 275 | None 276 | } else { 277 | if (*vc).$field.is_some() { 278 | Some(vc) 279 | } else { 280 | None 281 | } 282 | } 283 | } else { 284 | None 285 | } 286 | }; 287 | } 288 | 289 | // TODO: note that ibv_query_port, ibv_get_device_list, ibv_reg_mr, and 290 | // ibv_reg_mr_iova are redefined using ___ibv_query_port, 291 | // __ibv_get_device_list, __ibv_reg_mr, and __ibv_reg_mr_iova in C, which 292 | // should be handled properly in Rust. 293 | 294 | // When statically linking the user can set RDMA_STATIC_PROVIDERS to a comma 295 | // separated list of provider names to include in the static link, and this 296 | // machinery will cause those providers to be included statically. 297 | // 298 | // Linking will fail if this is set for dynamic linking. 299 | // 300 | // #define ibv_get_device_list(num_devices) __ibv_get_device_list(num_devices) 301 | // #endif 302 | // #ifdef RDMA_STATIC_PROVIDERS 303 | // #define _RDMA_STATIC_PREFIX_(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, \ 304 | // _12, _13, _14, _15, _16, _17, ...) \ 305 | // &verbs_provider_##_1, &verbs_provider_##_2, &verbs_provider_##_3, \ 306 | // &verbs_provider_##_4, &verbs_provider_##_5, \ 307 | // &verbs_provider_##_6, &verbs_provider_##_7, \ 308 | // &verbs_provider_##_8, &verbs_provider_##_9, \ 309 | // &verbs_provider_##_10, &verbs_provider_##_11, \ 310 | // &verbs_provider_##_12, &verbs_provider_##_13, \ 311 | // &verbs_provider_##_14, &verbs_provider_##_15, \ 312 | // &verbs_provider_##_16, &verbs_provider_##_17 313 | // #define _RDMA_STATIC_PREFIX(arg) \ 314 | // _RDMA_STATIC_PREFIX_(arg, none, none, none, none, none, none, none, \ 315 | // none, none, none, none, none, none, none, none, \ 316 | // none) 317 | pub unsafe fn __ibv_get_device_list(num_devices: *mut c_int) -> *mut *mut ibv_device { 318 | // TODO: check static linking compatibility 319 | // ibv_static_providers(NULL, _RDMA_STATIC_PREFIX(RDMA_STATIC_PROVIDERS), NULL); 320 | ibv_get_device_list(num_devices) 321 | } 322 | 323 | // TODO: missing variable args function 324 | // void ibv_static_providers(void *unused, ...); 325 | 326 | // ibv_context related inline function 327 | #[inline] 328 | pub unsafe fn ___ibv_query_port( 329 | context: *mut ibv_context, 330 | port_num: u8, 331 | port_attr: *mut ibv_port_attr, 332 | ) -> c_int { 333 | let vcr = verbs_get_ctx_op!(context, query_port); 334 | 335 | if let Some(vctx) = vcr { 336 | (*vctx).query_port.unwrap()(context, port_num, port_attr, mem::size_of_val(&*port_attr)) 337 | } else { 338 | // TODO: memset(port_attr, 0, sizeof(*port_attr)); 339 | let compat_attr = port_attr as *mut _ as *mut _compat_ibv_port_attr; 340 | ibv_query_port(context, port_num, compat_attr) 341 | } 342 | } 343 | 344 | // ibv_flow related inline functions 345 | #[inline] 346 | pub unsafe fn ibv_create_flow(qp: *mut ibv_qp, flow: *mut ibv_flow_attr) -> Option<*mut ibv_flow> { 347 | let vcr = verbs_get_ctx_op!((*qp).context, ibv_create_flow); 348 | 349 | if let Some(vctx) = vcr { 350 | Some((*vctx).ibv_create_flow.unwrap()(qp, flow)) 351 | } else { 352 | *libc::__errno_location() = libc::EOPNOTSUPP; 353 | None 354 | } 355 | } 356 | 357 | #[inline] 358 | pub unsafe fn ibv_destroy_flow(flow_id: *mut ibv_flow) -> c_int { 359 | let vcr = verbs_get_ctx_op!((*flow_id).context, ibv_destroy_flow); 360 | 361 | if let Some(vctx) = vcr { 362 | (*vctx).ibv_destroy_flow.unwrap()(flow_id) 363 | } else { 364 | libc::EOPNOTSUPP 365 | } 366 | } 367 | 368 | #[inline] 369 | pub unsafe fn ibv_create_flow_action_esp( 370 | ctx: *mut ibv_context, 371 | esp: *mut ibv_flow_action_esp_attr, 372 | ) -> Option<*mut ibv_flow_action> { 373 | let vcr = verbs_get_ctx_op!(ctx, create_flow_action_esp); 374 | 375 | if let Some(vctx) = vcr { 376 | Some((*vctx).create_flow_action_esp.unwrap()(ctx, esp)) 377 | } else { 378 | *libc::__errno_location() = libc::EOPNOTSUPP; 379 | None 380 | } 381 | } 382 | 383 | #[inline] 384 | pub unsafe fn ibv_modify_flow_action_esp( 385 | action: *mut ibv_flow_action, 386 | esp: *mut ibv_flow_action_esp_attr, 387 | ) -> c_int { 388 | let vcr = verbs_get_ctx_op!((*action).context, modify_flow_action_esp); 389 | 390 | if let Some(vctx) = vcr { 391 | (*vctx).modify_flow_action_esp.unwrap()(action, esp) 392 | } else { 393 | libc::EOPNOTSUPP 394 | } 395 | } 396 | 397 | #[inline] 398 | pub unsafe fn ibv_destroy_flow_action(action: *mut ibv_flow_action) -> c_int { 399 | let vcr = verbs_get_ctx_op!((*action).context, destroy_flow_action); 400 | 401 | if let Some(vctx) = vcr { 402 | (*vctx).destroy_flow_action.unwrap()(action) 403 | } else { 404 | libc::EOPNOTSUPP 405 | } 406 | } 407 | 408 | // ibv_xrcd related inline functions 409 | #[inline] 410 | pub unsafe fn ibv_open_xrcd( 411 | context: *mut ibv_context, 412 | xrcd_init_attr: *mut ibv_xrcd_init_attr, 413 | ) -> Option<*mut ibv_xrcd> { 414 | let vcr = verbs_get_ctx_op!(context, open_xrcd); 415 | 416 | if let Some(vctx) = vcr { 417 | Some((*vctx).open_xrcd.unwrap()(context, xrcd_init_attr)) 418 | } else { 419 | *libc::__errno_location() = libc::EOPNOTSUPP; 420 | None 421 | } 422 | } 423 | 424 | #[inline] 425 | pub unsafe fn ibv_close_xrcd(xrcd: *mut ibv_xrcd) -> c_int { 426 | let vctx = verbs_get_ctx((*xrcd).context); 427 | 428 | (*vctx.unwrap()).close_xrcd.unwrap()(xrcd) 429 | } 430 | 431 | // use new ibv_reg_mr version only if access flags that require it are used 432 | #[inline] 433 | pub unsafe fn __ibv_reg_mr( 434 | pd: *mut ibv_pd, 435 | addr: *mut c_void, 436 | length: usize, 437 | access: c_uint, 438 | is_access_const: c_int, 439 | ) -> *mut ibv_mr { 440 | if is_access_const != 0 441 | && (ib_uverbs_access_flags(access) 442 | & ib_uverbs_access_flags::IB_UVERBS_ACCESS_OPTIONAL_RANGE) 443 | == ib_uverbs_access_flags(0) 444 | { 445 | ibv_reg_mr(pd, addr, length, access as c_int) 446 | } else { 447 | ibv_reg_mr_iova2(pd, addr, length, addr as u64, access) 448 | } 449 | } 450 | // TODO: handle C macro defined function 451 | // #define ibv_reg_mr(pd, addr, length, access) \ 452 | // __ibv_reg_mr(pd, addr, length, access, \ 453 | // __builtin_constant_p( \ 454 | // ((access) & IBV_ACCESS_OPTIONAL_RANGE) == 0)) 455 | 456 | // use new ibv_reg_mr version only if access flags that require it are used 457 | #[inline] 458 | pub unsafe fn __ibv_reg_mr_iova( 459 | pd: *mut ibv_pd, 460 | addr: *mut c_void, 461 | length: usize, 462 | iova: u64, 463 | access: c_uint, 464 | is_access_const: c_int, 465 | ) -> *mut ibv_mr { 466 | if is_access_const != 0 467 | && (ib_uverbs_access_flags(access) 468 | & ib_uverbs_access_flags::IB_UVERBS_ACCESS_OPTIONAL_RANGE) 469 | == ib_uverbs_access_flags(0) 470 | { 471 | ibv_reg_mr_iova(pd, addr, length, iova, access as c_int) 472 | } else { 473 | ibv_reg_mr_iova2(pd, addr, length, iova, access) 474 | } 475 | } 476 | // TODO: handle C macro defined function 477 | // #define ibv_reg_mr_iova(pd, addr, length, iova, access) \ 478 | // __ibv_reg_mr_iova(pd, addr, length, iova, access, \ 479 | // __builtin_constant_p( \ 480 | // ((access) & IBV_ACCESS_OPTIONAL_RANGE) == 0)) 481 | 482 | // ibv_mw related inline functions 483 | #[inline] 484 | pub unsafe fn ibv_alloc_mw(pd: *mut ibv_pd, type_: ibv_mw_type::Type) -> Option<*mut ibv_mw> { 485 | if (*(*pd).context).ops.alloc_mw.is_some() { 486 | Some((*(*pd).context).ops.alloc_mw.unwrap()(pd, type_)) 487 | } else { 488 | *libc::__errno_location() = libc::EOPNOTSUPP; 489 | None 490 | } 491 | } 492 | 493 | #[inline] 494 | pub unsafe fn ibv_dealloc_mw(mw: *mut ibv_mw) -> c_int { 495 | (*(*mw).context).ops.dealloc_mw.unwrap()(mw) 496 | } 497 | 498 | // ibv_inc_rkey - Increase the 8 lsb in the given rkey 499 | #[inline] 500 | pub unsafe fn ibv_inc_rkey(rkey: u32) -> u32 { 501 | let mask: u32 = 0x000000ff; 502 | let newtag = ((rkey + 1) & mask) as u8; 503 | 504 | (rkey & !mask) | (newtag as u32) 505 | } 506 | 507 | #[inline] 508 | pub unsafe fn ibv_bind_mw(qp: *mut ibv_qp, mw: *mut ibv_mw, mw_bind: *mut ibv_mw_bind) -> c_int { 509 | if (*mw).type_ != ibv_mw_type::IBV_MW_TYPE_1 { 510 | libc::EINVAL 511 | } else { 512 | (*(*mw).context).ops.bind_mw.unwrap()(qp, mw, mw_bind) 513 | } 514 | } 515 | 516 | #[inline] 517 | pub unsafe fn ibv_advise_mr( 518 | pd: *mut ibv_pd, 519 | advice: ibv_advise_mr_advice, 520 | flags: u32, 521 | sg_list: *mut ibv_sge, 522 | num_sge: u32, 523 | ) -> c_int { 524 | let vcr = verbs_get_ctx_op!((*pd).context, advise_mr); 525 | 526 | if let Some(vctx) = vcr { 527 | (*vctx).advise_mr.unwrap()(pd, advice, flags, sg_list, num_sge) 528 | } else { 529 | libc::EOPNOTSUPP 530 | } 531 | } 532 | 533 | // ibv_dm related inline functions 534 | #[inline] 535 | pub unsafe fn ibv_alloc_dm( 536 | context: *mut ibv_context, 537 | attr: *mut ibv_alloc_dm_attr, 538 | ) -> Option<*mut ibv_dm> { 539 | let vcr = verbs_get_ctx_op!(context, alloc_dm); 540 | 541 | if let Some(vctx) = vcr { 542 | Some((*vctx).alloc_dm.unwrap()(context, attr)) 543 | } else { 544 | *libc::__errno_location() = libc::EOPNOTSUPP; 545 | None 546 | } 547 | } 548 | 549 | #[inline] 550 | pub unsafe fn ibv_free_dm(dm: *mut ibv_dm) -> c_int { 551 | let vcr = verbs_get_ctx_op!((*dm).context, free_dm); 552 | 553 | if let Some(vctx) = vcr { 554 | (*vctx).free_dm.unwrap()(dm) 555 | } else { 556 | libc::EOPNOTSUPP 557 | } 558 | } 559 | 560 | #[inline] 561 | pub unsafe fn ibv_memcpy_to_dm( 562 | dm: *mut ibv_dm, 563 | dm_offset: u64, 564 | host_addr: *const c_void, 565 | length: usize, 566 | ) -> c_int { 567 | (*dm).memcpy_to_dm.unwrap()(dm, dm_offset, host_addr, length) 568 | } 569 | 570 | #[inline] 571 | pub unsafe fn ibv_memcpy_from_dm( 572 | host_addr: *mut c_void, 573 | dm: *mut ibv_dm, 574 | dm_offset: u64, 575 | length: usize, 576 | ) -> c_int { 577 | (*dm).memcpy_from_dm.unwrap()(host_addr, dm, dm_offset, length) 578 | } 579 | 580 | #[inline] 581 | pub unsafe fn ibv_alloc_null_mr(pd: *mut ibv_pd) -> Option<*mut ibv_mr> { 582 | let vcr = verbs_get_ctx_op!((*pd).context, alloc_null_mr); 583 | 584 | if let Some(vctx) = vcr { 585 | Some((*vctx).alloc_null_mr.unwrap()(pd)) 586 | } else { 587 | *libc::__errno_location() = libc::EOPNOTSUPP; 588 | None 589 | } 590 | } 591 | 592 | #[inline] 593 | pub unsafe fn ibv_reg_dm_mr( 594 | pd: *mut ibv_pd, 595 | dm: *mut ibv_dm, 596 | dm_offset: u64, 597 | length: usize, 598 | access: u32, 599 | ) -> Option<*mut ibv_mr> { 600 | let vcr = verbs_get_ctx_op!((*pd).context, reg_dm_mr); 601 | 602 | if let Some(vctx) = vcr { 603 | Some((*vctx).reg_dm_mr.unwrap()( 604 | pd, dm, dm_offset, length, access, 605 | )) 606 | } else { 607 | *libc::__errno_location() = libc::EOPNOTSUPP; 608 | None 609 | } 610 | } 611 | 612 | // ibv_cq_ex related inline function 613 | #[inline] 614 | pub unsafe fn ibv_create_cq_ex( 615 | context: *mut ibv_context, 616 | cq_attr: *mut ibv_cq_init_attr_ex, 617 | ) -> Option<*mut ibv_cq_ex> { 618 | let vcr = verbs_get_ctx_op!(context, create_cq_ex); 619 | 620 | if let Some(vctx) = vcr { 621 | Some((*vctx).create_cq_ex.unwrap()(context, cq_attr)) 622 | } else { 623 | *libc::__errno_location() = libc::EOPNOTSUPP; 624 | None 625 | } 626 | } 627 | 628 | // ibv_cq related inline functions 629 | #[inline] 630 | pub unsafe fn ibv_poll_cq(cq: *mut ibv_cq, num_entries: i32, wc: *mut ibv_wc) -> c_int { 631 | (*(*cq).context).ops.poll_cq.unwrap()(cq, num_entries, wc) 632 | } 633 | 634 | #[inline] 635 | pub unsafe fn ibv_req_notify_cq(cq: *mut ibv_cq, solicited_only: i32) -> c_int { 636 | (*(*cq).context).ops.req_notify_cq.unwrap()(cq, solicited_only) 637 | } 638 | 639 | #[inline] 640 | pub unsafe fn ibv_modify_cq(cq: *mut ibv_cq, attr: *mut ibv_modify_cq_attr) -> c_int { 641 | let vcr = verbs_get_ctx_op!((*cq).context, modify_cq); 642 | 643 | if let Some(vctx) = vcr { 644 | (*vctx).modify_cq.unwrap()(cq, attr) 645 | } else { 646 | libc::EOPNOTSUPP 647 | } 648 | } 649 | 650 | // ibv_srq related inline functions 651 | #[inline] 652 | pub unsafe fn ibv_create_srq_ex( 653 | context: *mut ibv_context, 654 | srq_init_attr_ex: *mut ibv_srq_init_attr_ex, 655 | ) -> Option<*mut ibv_srq> { 656 | let mask = ibv_srq_init_attr_mask((*srq_init_attr_ex).comp_mask); 657 | let mask_inv = ibv_srq_init_attr_mask(!(*srq_init_attr_ex).comp_mask); 658 | let zero = ibv_srq_init_attr_mask(0); 659 | 660 | // TODO: verify the condition 661 | let cond = (mask_inv 662 | | (ibv_srq_init_attr_mask::IBV_SRQ_INIT_ATTR_PD 663 | & ibv_srq_init_attr_mask::IBV_SRQ_INIT_ATTR_TYPE)) 664 | != zero 665 | && (mask & ibv_srq_init_attr_mask::IBV_SRQ_INIT_ATTR_PD) != zero 666 | && ((mask & ibv_srq_init_attr_mask::IBV_SRQ_INIT_ATTR_TYPE) != zero 667 | || ((*srq_init_attr_ex).srq_type == ibv_srq_type::IBV_SRQT_BASIC)); 668 | if cond { 669 | Some(ibv_create_srq( 670 | (*srq_init_attr_ex).pd, 671 | srq_init_attr_ex as *mut ibv_srq_init_attr, 672 | )) 673 | } else { 674 | let vcr = verbs_get_ctx_op!(context, create_srq_ex); 675 | 676 | if let Some(vctx) = vcr { 677 | Some((*vctx).create_srq_ex.unwrap()(context, srq_init_attr_ex)) 678 | } else { 679 | *libc::__errno_location() = libc::EOPNOTSUPP; 680 | None 681 | } 682 | } 683 | } 684 | 685 | #[inline] 686 | pub unsafe fn ibv_get_srq_num(srq: *mut ibv_srq, srq_num: *mut u32) -> c_int { 687 | let vcr = verbs_get_ctx_op!((*srq).context, get_srq_num); 688 | 689 | if let Some(vctx) = vcr { 690 | (*vctx).get_srq_num.unwrap()(srq, srq_num) 691 | } else { 692 | libc::EOPNOTSUPP 693 | } 694 | } 695 | 696 | #[inline] 697 | pub unsafe fn ibv_post_srq_recv( 698 | srq: *mut ibv_srq, 699 | recv_wr: *mut ibv_recv_wr, 700 | bad_recv_wr: *mut *mut ibv_recv_wr, 701 | ) -> c_int { 702 | (*(*srq).context).ops.post_srq_recv.unwrap()(srq, recv_wr, bad_recv_wr) 703 | } 704 | 705 | #[inline] 706 | pub unsafe fn ibv_post_srq_ops( 707 | srq: *mut ibv_srq, 708 | op: *mut ibv_ops_wr, 709 | bad_op: *mut *mut ibv_ops_wr, 710 | ) -> c_int { 711 | let vcr = verbs_get_ctx_op!((*srq).context, post_srq_ops); 712 | 713 | if let Some(vctx) = vcr { 714 | (*vctx).post_srq_ops.unwrap()(srq, op, bad_op) 715 | } else { 716 | *bad_op = op; 717 | libc::EOPNOTSUPP 718 | } 719 | } 720 | 721 | // ibv_qp related inline functions 722 | #[inline] 723 | pub unsafe fn ibv_create_qp_ex( 724 | context: *mut ibv_context, 725 | qp_init_attr_ex: *mut ibv_qp_init_attr_ex, 726 | ) -> Option<*mut ibv_qp> { 727 | let mask = ibv_qp_init_attr_mask((*qp_init_attr_ex).comp_mask); 728 | 729 | if mask == ibv_qp_init_attr_mask::IBV_QP_INIT_ATTR_PD { 730 | Some(ibv_create_qp( 731 | (*qp_init_attr_ex).pd, 732 | qp_init_attr_ex as *mut ibv_qp_init_attr, 733 | )) 734 | } else { 735 | let vcr = verbs_get_ctx_op!(context, create_qp_ex); 736 | 737 | if let Some(vctx) = vcr { 738 | Some((*vctx).create_qp_ex.unwrap()(context, qp_init_attr_ex)) 739 | } else { 740 | *libc::__errno_location() = libc::EOPNOTSUPP; 741 | None 742 | } 743 | } 744 | } 745 | 746 | // ibv_td related inline functions 747 | #[inline] 748 | pub unsafe fn ibv_alloc_td( 749 | context: *mut ibv_context, 750 | init_attr: *mut ibv_td_init_attr, 751 | ) -> Option<*mut ibv_td> { 752 | let vcr = verbs_get_ctx_op!(context, alloc_td); 753 | 754 | if let Some(vctx) = vcr { 755 | Some((*vctx).alloc_td.unwrap()(context, init_attr)) 756 | } else { 757 | *libc::__errno_location() = libc::EOPNOTSUPP; 758 | None 759 | } 760 | } 761 | 762 | #[inline] 763 | pub unsafe fn ibv_dealloc_td(td: *mut ibv_td) -> c_int { 764 | let vcr = verbs_get_ctx_op!((*td).context, dealloc_td); 765 | 766 | if let Some(vctx) = vcr { 767 | (*vctx).dealloc_td.unwrap()(td) 768 | } else { 769 | libc::EOPNOTSUPP 770 | } 771 | } 772 | 773 | // ibv_pd related inline function 774 | #[inline] 775 | pub unsafe fn ibv_alloc_parent_domain( 776 | context: *mut ibv_context, 777 | attr: *mut ibv_parent_domain_init_attr, 778 | ) -> Option<*mut ibv_pd> { 779 | let vcr = verbs_get_ctx_op!(context, alloc_parent_domain); 780 | 781 | if let Some(vctx) = vcr { 782 | Some((*vctx).alloc_parent_domain.unwrap()(context, attr)) 783 | } else { 784 | *libc::__errno_location() = libc::EOPNOTSUPP; 785 | None 786 | } 787 | } 788 | 789 | // device related inline functions 790 | #[inline] 791 | pub unsafe fn ibv_query_rt_values_ex( 792 | context: *mut ibv_context, 793 | values: *mut ibv_values_ex, 794 | ) -> c_int { 795 | let vcr = verbs_get_ctx_op!(context, query_rt_values); 796 | 797 | if let Some(vctx) = vcr { 798 | (*vctx).query_rt_values.unwrap()(context, values) 799 | } else { 800 | libc::EOPNOTSUPP 801 | } 802 | } 803 | 804 | #[inline] 805 | pub unsafe fn ibv_query_device_ex( 806 | context: *mut ibv_context, 807 | input: *const ibv_query_device_ex_input, 808 | attr: *mut ibv_device_attr_ex, 809 | ) -> c_int { 810 | let vcr = verbs_get_ctx_op!(context, query_device_ex); 811 | 812 | if let Some(vctx) = vcr { 813 | let ret = (*vctx).query_device_ex.unwrap()(context, input, attr, mem::size_of_val(&*attr)); 814 | if ret != libc::EOPNOTSUPP { 815 | return ret; 816 | } 817 | } 818 | 819 | // TODO: memset(attr, 0, sizeof(*attr)); 820 | ibv_query_device(context, &mut (*attr).orig_attr) 821 | } 822 | 823 | // ibv_qp related inline functions 824 | #[inline] 825 | pub unsafe fn ibv_open_qp( 826 | context: *mut ibv_context, 827 | qp_open_attr: *mut ibv_qp_open_attr, 828 | ) -> Option<*mut ibv_qp> { 829 | let vcr = verbs_get_ctx_op!(context, open_qp); 830 | 831 | if let Some(vctx) = vcr { 832 | Some((*vctx).open_qp.unwrap()(context, qp_open_attr)) 833 | } else { 834 | *libc::__errno_location() = libc::EOPNOTSUPP; 835 | None 836 | } 837 | } 838 | 839 | #[inline] 840 | pub unsafe fn ibv_modify_qp_rate_limit( 841 | qp: *mut ibv_qp, 842 | attr: *mut ibv_qp_rate_limit_attr, 843 | ) -> c_int { 844 | let vcr = verbs_get_ctx_op!((*qp).context, modify_qp_rate_limit); 845 | 846 | if let Some(vctx) = vcr { 847 | (*vctx).modify_qp_rate_limit.unwrap()(qp, attr) 848 | } else { 849 | libc::EOPNOTSUPP 850 | } 851 | } 852 | 853 | // ibv_wq related inline functions 854 | #[inline] 855 | pub unsafe fn ibv_create_wq( 856 | context: *mut ibv_context, 857 | wq_init_attr: *mut ibv_wq_init_attr, 858 | ) -> Option<*mut ibv_wq> { 859 | let vcr = verbs_get_ctx_op!(context, create_wq); 860 | 861 | if let Some(vctx) = vcr { 862 | let wq = (*vctx).create_wq.unwrap()(context, wq_init_attr); 863 | if wq != (ptr::null::() as *mut _) { 864 | (*wq).events_completed = 0; 865 | libc::pthread_mutex_init( 866 | &mut (*wq).mutex, 867 | ptr::null::() as *mut _, 868 | ); 869 | libc::pthread_cond_init( 870 | &mut (*wq).cond, 871 | ptr::null::() as *mut _, 872 | ); 873 | } 874 | Some(wq) 875 | } else { 876 | *libc::__errno_location() = libc::EOPNOTSUPP; 877 | None 878 | } 879 | } 880 | 881 | #[inline] 882 | pub unsafe fn ibv_modify_wq(wq: *mut ibv_wq, wq_attr: *mut ibv_wq_attr) -> c_int { 883 | let vcr = verbs_get_ctx_op!((*wq).context, modify_wq); 884 | 885 | if let Some(vctx) = vcr { 886 | (*vctx).modify_wq.unwrap()(wq, wq_attr) 887 | } else { 888 | libc::EOPNOTSUPP 889 | } 890 | } 891 | 892 | #[inline] 893 | pub unsafe fn ibv_destroy_wq(wq: *mut ibv_wq) -> c_int { 894 | let vcr = verbs_get_ctx_op!((*wq).context, destroy_wq); 895 | 896 | if let Some(vctx) = vcr { 897 | (*vctx).destroy_wq.unwrap()(wq) 898 | } else { 899 | libc::EOPNOTSUPP 900 | } 901 | } 902 | 903 | // ibv_rwq_ind_table related inline functions 904 | #[inline] 905 | pub unsafe fn ibv_create_rwq_ind_table( 906 | context: *mut ibv_context, 907 | init_attr: *mut ibv_rwq_ind_table_init_attr, 908 | ) -> Option<*mut ibv_rwq_ind_table> { 909 | let vcr = verbs_get_ctx_op!(context, create_rwq_ind_table); 910 | 911 | if let Some(vctx) = vcr { 912 | Some((*vctx).create_rwq_ind_table.unwrap()(context, init_attr)) 913 | } else { 914 | *libc::__errno_location() = libc::EOPNOTSUPP; 915 | None 916 | } 917 | } 918 | 919 | #[inline] 920 | pub unsafe fn ibv_destroy_rwq_ind_table(rwq_ind_table: *mut ibv_rwq_ind_table) -> c_int { 921 | let vcr = verbs_get_ctx_op!((*rwq_ind_table).context, destroy_rwq_ind_table); 922 | 923 | if let Some(vctx) = vcr { 924 | (*vctx).destroy_rwq_ind_table.unwrap()(rwq_ind_table) 925 | } else { 926 | libc::EOPNOTSUPP 927 | } 928 | } 929 | 930 | // If IBV_SEND_INLINE flag is set, the data buffers can be reused 931 | // immediately after the call returns. 932 | #[inline] 933 | pub unsafe fn ibv_post_send( 934 | qp: *mut ibv_qp, 935 | wr: *mut ibv_send_wr, 936 | bad_wr: *mut *mut ibv_send_wr, 937 | ) -> c_int { 938 | (*(*qp).context).ops.post_send.unwrap()(qp, wr, bad_wr) 939 | } 940 | 941 | #[inline] 942 | pub unsafe fn ibv_post_recv( 943 | qp: *mut ibv_qp, 944 | wr: *mut ibv_recv_wr, 945 | bad_wr: *mut *mut ibv_recv_wr, 946 | ) -> c_int { 947 | (*(*qp).context).ops.post_recv.unwrap()(qp, wr, bad_wr) 948 | } 949 | 950 | #[inline] 951 | pub unsafe fn ibv_is_qpt_supported(caps: u32, qpt: ibv_qp_type::Type) -> c_int { 952 | !!(caps & (1 << qpt)) as c_int 953 | } 954 | 955 | // ibv_counters related inline functions 956 | #[inline] 957 | pub unsafe fn ibv_create_counters( 958 | context: *mut ibv_context, 959 | init_attr: *mut ibv_counters_init_attr, 960 | ) -> Option<*mut ibv_counters> { 961 | let vcr = verbs_get_ctx_op!(context, create_counters); 962 | 963 | if let Some(vctx) = vcr { 964 | Some((*vctx).create_counters.unwrap()(context, init_attr)) 965 | } else { 966 | *libc::__errno_location() = libc::EOPNOTSUPP; 967 | None 968 | } 969 | } 970 | 971 | #[inline] 972 | pub unsafe fn ibv_destroy_counters(counters: *mut ibv_counters) -> c_int { 973 | let vcr = verbs_get_ctx_op!((*counters).context, destroy_counters); 974 | 975 | if let Some(vctx) = vcr { 976 | (*vctx).destroy_counters.unwrap()(counters) 977 | } else { 978 | libc::EOPNOTSUPP 979 | } 980 | } 981 | 982 | #[inline] 983 | pub unsafe fn ibv_attach_counters_point_flow( 984 | counters: *mut ibv_counters, 985 | attr: *mut ibv_counter_attach_attr, 986 | flow: *mut ibv_flow, 987 | ) -> c_int { 988 | let vcr = verbs_get_ctx_op!((*counters).context, attach_counters_point_flow); 989 | 990 | if let Some(vctx) = vcr { 991 | (*vctx).attach_counters_point_flow.unwrap()(counters, attr, flow) 992 | } else { 993 | libc::EOPNOTSUPP 994 | } 995 | } 996 | 997 | #[inline] 998 | pub unsafe fn ibv_read_counters( 999 | counters: *mut ibv_counters, 1000 | counters_value: *mut u64, 1001 | ncounters: u32, 1002 | flags: u32, 1003 | ) -> c_int { 1004 | let vcr = verbs_get_ctx_op!((*counters).context, read_counters); 1005 | 1006 | if let Some(vctx) = vcr { 1007 | (*vctx).read_counters.unwrap()(counters, counters_value, ncounters, flags) 1008 | } else { 1009 | libc::EOPNOTSUPP 1010 | } 1011 | } 1012 | 1013 | /// Inline functions from 1014 | 1015 | pub const RDMA_IB_IP_PS_MASK: u64 = 0xFFFFFFFFFFFF0000; 1016 | pub const RDMA_IB_IP_PORT_MASK: u64 = 0x000000000000FFFF; 1017 | pub const RDMA_IB_IP_PS_TCP: u64 = 0x0000000001060000; 1018 | pub const RDMA_IB_IP_PS_UDP: u64 = 0x0000000001110000; 1019 | pub const RDMA_IB_PS_IB: u64 = 0x00000000013F0000; 1020 | 1021 | pub const RDMA_UDP_QKEY: u32 = 0x01234567; 1022 | 1023 | pub const RAI_PASSIVE: u32 = 0x00000001; 1024 | pub const RAI_NUMERICHOST: u32 = 0x00000002; 1025 | pub const RAI_NOROUTE: u32 = 0x00000004; 1026 | pub const RAI_FAMILY: u32 = 0x00000008; 1027 | 1028 | #[inline] 1029 | pub unsafe fn rdma_get_local_addr(id: &rdma_cm_id) -> &libc::sockaddr { 1030 | &id.route.addr.src_addr_union.src_addr 1031 | } 1032 | 1033 | #[inline] 1034 | pub unsafe fn rdma_get_peer_addr(id: &rdma_cm_id) -> &libc::sockaddr { 1035 | &id.route.addr.dst_addr_union.dst_addr 1036 | } 1037 | 1038 | /// Inline functions from 1039 | 1040 | #[inline] 1041 | pub unsafe fn rdma_seterrno(ret: c_int) -> c_int { 1042 | if ret != 0 { 1043 | *libc::__errno_location() = ret; 1044 | -1 1045 | } else { 1046 | ret 1047 | } 1048 | } 1049 | 1050 | #[inline] 1051 | pub unsafe fn rdma_reg_msgs(id: *mut rdma_cm_id, addr: *mut c_void, length: usize) -> *mut ibv_mr { 1052 | ibv_reg_mr( 1053 | (*id).pd, 1054 | addr, 1055 | length, 1056 | ibv_access_flags::IBV_ACCESS_LOCAL_WRITE.0 as c_int, 1057 | ) 1058 | } 1059 | 1060 | #[inline] 1061 | pub unsafe fn rdma_reg_read(id: *mut rdma_cm_id, addr: *mut c_void, length: usize) -> *mut ibv_mr { 1062 | ibv_reg_mr( 1063 | (*id).pd, 1064 | addr, 1065 | length, 1066 | (ibv_access_flags::IBV_ACCESS_LOCAL_WRITE | ibv_access_flags::IBV_ACCESS_REMOTE_READ).0 1067 | as c_int, 1068 | ) 1069 | } 1070 | 1071 | #[inline] 1072 | pub unsafe fn rdma_reg_write(id: *mut rdma_cm_id, addr: *mut c_void, length: usize) -> *mut ibv_mr { 1073 | ibv_reg_mr( 1074 | (*id).pd, 1075 | addr, 1076 | length, 1077 | (ibv_access_flags::IBV_ACCESS_LOCAL_WRITE | ibv_access_flags::IBV_ACCESS_REMOTE_WRITE).0 1078 | as c_int, 1079 | ) 1080 | } 1081 | 1082 | #[inline] 1083 | pub unsafe fn rdma_dereg_mr(mr: *mut ibv_mr) -> c_int { 1084 | rdma_seterrno(ibv_dereg_mr(mr)) 1085 | } 1086 | 1087 | #[inline] 1088 | pub unsafe fn rdma_post_recvv( 1089 | id: *mut rdma_cm_id, 1090 | context: *mut c_void, 1091 | sgl: *mut ibv_sge, 1092 | nsge: c_int, 1093 | ) -> c_int { 1094 | let mut wr = ibv_recv_wr { 1095 | wr_id: context as u64, 1096 | next: ptr::null::() as *mut _, 1097 | sg_list: sgl, 1098 | num_sge: nsge, 1099 | }; 1100 | let mut bad = ptr::null::() as *mut _; 1101 | 1102 | if (*id).srq as usize != 0 { 1103 | rdma_seterrno(ibv_post_srq_recv((*id).srq, &mut wr, &mut bad)) 1104 | } else { 1105 | rdma_seterrno(ibv_post_recv((*id).qp, &mut wr, &mut bad)) 1106 | } 1107 | } 1108 | 1109 | #[inline] 1110 | pub unsafe fn rdma_post_sendv( 1111 | id: *mut rdma_cm_id, 1112 | context: *mut c_void, 1113 | sgl: *mut ibv_sge, 1114 | nsge: c_int, 1115 | flags: c_int, 1116 | ) -> c_int { 1117 | let mut wr = std::mem::zeroed::(); 1118 | wr.wr_id = context as u64; 1119 | wr.next = ptr::null::() as *mut _; 1120 | wr.sg_list = sgl; 1121 | wr.num_sge = nsge; 1122 | wr.opcode = ibv_wr_opcode::IBV_WR_SEND; 1123 | wr.send_flags = flags as c_uint; 1124 | let mut bad = ptr::null::() as *mut _; 1125 | 1126 | rdma_seterrno(ibv_post_send((*id).qp, &mut wr, &mut bad)) 1127 | } 1128 | 1129 | #[inline] 1130 | pub unsafe fn rdma_post_readv( 1131 | id: *mut rdma_cm_id, 1132 | context: *mut c_void, 1133 | sgl: *mut ibv_sge, 1134 | nsge: c_int, 1135 | flags: c_int, 1136 | remote_addr: u64, 1137 | rkey: u32, 1138 | ) -> c_int { 1139 | let mut wr = std::mem::zeroed::(); 1140 | wr.wr_id = context as u64; 1141 | wr.next = ptr::null::() as *mut _; 1142 | wr.sg_list = sgl; 1143 | wr.num_sge = nsge; 1144 | wr.opcode = ibv_wr_opcode::IBV_WR_RDMA_READ; 1145 | wr.send_flags = flags as c_uint; 1146 | wr.wr = wr_t { 1147 | rdma: rdma_t { remote_addr, rkey }, 1148 | }; 1149 | let mut bad = ptr::null::() as *mut _; 1150 | 1151 | rdma_seterrno(ibv_post_send((*id).qp, &mut wr, &mut bad)) 1152 | } 1153 | 1154 | #[inline] 1155 | pub unsafe fn rdma_post_writev( 1156 | id: *mut rdma_cm_id, 1157 | context: *mut c_void, 1158 | sgl: *mut ibv_sge, 1159 | nsge: c_int, 1160 | flags: c_int, 1161 | remote_addr: u64, 1162 | rkey: u32, 1163 | ) -> c_int { 1164 | let mut wr = std::mem::zeroed::(); 1165 | wr.wr_id = context as u64; 1166 | wr.next = ptr::null::() as *mut _; 1167 | wr.sg_list = sgl; 1168 | wr.num_sge = nsge; 1169 | wr.opcode = ibv_wr_opcode::IBV_WR_RDMA_WRITE; 1170 | wr.send_flags = flags as c_uint; 1171 | wr.wr = wr_t { 1172 | rdma: rdma_t { remote_addr, rkey }, 1173 | }; 1174 | let mut bad = ptr::null::() as *mut _; 1175 | 1176 | rdma_seterrno(ibv_post_send((*id).qp, &mut wr, &mut bad)) 1177 | } 1178 | 1179 | #[inline] 1180 | pub unsafe fn rdma_post_recv( 1181 | id: *mut rdma_cm_id, 1182 | context: *mut c_void, 1183 | addr: *mut c_void, 1184 | length: usize, 1185 | mr: *mut ibv_mr, 1186 | ) -> c_int { 1187 | assert!( 1188 | addr >= (*mr).addr && (addr as usize + length <= (*mr).addr as usize + (*mr).length), 1189 | "invalid addr={} and length={}", 1190 | addr as usize, 1191 | length, 1192 | ); 1193 | let mut sge = ibv_sge { 1194 | addr: addr as u64, 1195 | length: length as u32, 1196 | lkey: (*mr).lkey, 1197 | }; 1198 | let nsge = 1; 1199 | rdma_post_recvv(id, context, &mut sge, nsge) 1200 | } 1201 | 1202 | #[inline] 1203 | pub unsafe fn rdma_post_send( 1204 | id: *mut rdma_cm_id, 1205 | context: *mut c_void, 1206 | addr: *mut c_void, 1207 | length: usize, 1208 | mr: *mut ibv_mr, 1209 | flags: c_int, 1210 | ) -> c_int { 1211 | let mut sge = ibv_sge { 1212 | addr: addr as u64, 1213 | length: length as u32, 1214 | lkey: if !mr.is_null() { (*mr).lkey } else { 0 }, 1215 | }; 1216 | let nsge = 1; 1217 | rdma_post_sendv(id, context, &mut sge, nsge, flags) 1218 | } 1219 | 1220 | #[inline] 1221 | pub unsafe fn rdma_post_read( 1222 | id: *mut rdma_cm_id, 1223 | context: *mut c_void, 1224 | addr: *mut c_void, 1225 | length: usize, 1226 | mr: *mut ibv_mr, 1227 | flags: c_int, 1228 | remote_addr: u64, 1229 | rkey: u32, 1230 | ) -> c_int { 1231 | let mut sge = ibv_sge { 1232 | addr: addr as u64, 1233 | length: length as u32, 1234 | lkey: (*mr).lkey, 1235 | }; 1236 | let nsge = 1; 1237 | rdma_post_readv(id, context, &mut sge, nsge, flags, remote_addr, rkey) 1238 | } 1239 | 1240 | #[inline] 1241 | pub unsafe fn rdma_post_write( 1242 | id: *mut rdma_cm_id, 1243 | context: *mut c_void, 1244 | addr: *mut c_void, 1245 | length: usize, 1246 | mr: *mut ibv_mr, 1247 | flags: c_int, 1248 | remote_addr: u64, 1249 | rkey: u32, 1250 | ) -> c_int { 1251 | let mut sge = ibv_sge { 1252 | addr: addr as u64, 1253 | length: length as u32, 1254 | lkey: if !mr.is_null() { (*mr).lkey } else { 0 }, 1255 | }; 1256 | let nsge = 1; 1257 | rdma_post_writev(id, context, &mut sge, nsge, flags, remote_addr, rkey) 1258 | } 1259 | 1260 | #[inline] 1261 | pub unsafe fn rdma_post_ud_send( 1262 | id: *mut rdma_cm_id, 1263 | context: *mut c_void, 1264 | addr: *mut c_void, 1265 | length: usize, 1266 | mr: *mut ibv_mr, 1267 | flags: c_int, 1268 | ah: *mut ibv_ah, 1269 | remote_qpn: u32, 1270 | ) -> c_int { 1271 | let mut sge = ibv_sge { 1272 | addr: addr as u64, 1273 | length: length as u32, 1274 | lkey: if !mr.is_null() { (*mr).lkey } else { 0 }, 1275 | }; 1276 | 1277 | let mut wr = std::mem::zeroed::(); 1278 | wr.wr_id = context as u64; 1279 | wr.next = ptr::null::() as *mut _; 1280 | wr.sg_list = &mut sge; 1281 | wr.num_sge = 1; 1282 | wr.opcode = ibv_wr_opcode::IBV_WR_SEND; 1283 | wr.send_flags = flags as c_uint; 1284 | wr.wr = wr_t { 1285 | ud: ud_t { 1286 | ah, 1287 | remote_qpn, 1288 | remote_qkey: RDMA_UDP_QKEY, 1289 | }, 1290 | }; 1291 | let mut bad = ptr::null::() as *mut _; 1292 | 1293 | rdma_seterrno(ibv_post_send((*id).qp, &mut wr, &mut bad)) 1294 | } 1295 | 1296 | #[inline] 1297 | pub unsafe fn rdma_get_send_comp(id: *mut rdma_cm_id, wc: *mut ibv_wc) -> c_int { 1298 | let mut ret: c_int; 1299 | let mut cq = ptr::null::() as *mut _; 1300 | let mut context = ptr::null::() as *mut _; 1301 | let nevents = 1; 1302 | let num_entries = 1; 1303 | let solicited_only = 0; 1304 | 1305 | loop { 1306 | ret = ibv_poll_cq((*id).send_cq, num_entries, wc); 1307 | if ret != 0 { 1308 | break; 1309 | } 1310 | ret = ibv_req_notify_cq((*id).send_cq, solicited_only); 1311 | if ret != 0 { 1312 | return rdma_seterrno(ret); 1313 | } 1314 | ret = ibv_poll_cq((*id).send_cq, num_entries, wc); 1315 | if ret != 0 { 1316 | break; 1317 | } 1318 | ret = ibv_get_cq_event((*id).send_cq_channel, &mut cq, &mut context); 1319 | if ret != 0 { 1320 | return ret; 1321 | } 1322 | 1323 | assert!(cq == (*id).send_cq && context as *mut rdma_cm_id == id); 1324 | ibv_ack_cq_events((*id).send_cq, nevents); 1325 | } 1326 | if ret < 0 { 1327 | rdma_seterrno(ret) 1328 | } else { 1329 | ret 1330 | } 1331 | } 1332 | 1333 | #[inline] 1334 | pub unsafe fn rdma_get_recv_comp(id: *mut rdma_cm_id, wc: *mut ibv_wc) -> c_int { 1335 | let mut ret: c_int; 1336 | let mut cq = ptr::null::() as *mut _; 1337 | let mut context = ptr::null::() as *mut _; 1338 | let nevents = 1; 1339 | let num_entries = 1; 1340 | let solicited_only = 0; 1341 | loop { 1342 | ret = ibv_poll_cq((*id).recv_cq, num_entries, wc); 1343 | if ret != 0 { 1344 | break; 1345 | } 1346 | ret = ibv_req_notify_cq((*id).recv_cq, solicited_only); 1347 | if ret != 0 { 1348 | return rdma_seterrno(ret); 1349 | } 1350 | ret = ibv_poll_cq((*id).recv_cq, num_entries, wc); 1351 | if ret != 0 { 1352 | break; 1353 | } 1354 | ret = ibv_get_cq_event((*id).recv_cq_channel, &mut cq, &mut context); 1355 | if ret != 0 { 1356 | return ret; 1357 | } 1358 | 1359 | assert!(cq == (*id).recv_cq && context as *mut rdma_cm_id == id); 1360 | ibv_ack_cq_events((*id).recv_cq, nevents); 1361 | } 1362 | if ret < 0 { 1363 | rdma_seterrno(ret) 1364 | } else { 1365 | ret 1366 | } 1367 | } 1368 | --------------------------------------------------------------------------------