├── .gitignore ├── .gitmodules ├── CMakeLists.txt ├── Jenkinsfile ├── LICENSE ├── README.md ├── kernel-headers ├── linux │ └── mlx5 │ │ ├── mlx5_ifc.h │ │ └── mlx5_ifc_fpga.h └── uapi │ └── rdma │ ├── ib_user_ioctl_cmds.h │ ├── ib_user_verbs.h │ ├── mlx5-abi.h │ ├── mlx5_user_ioctl_cmds.h │ └── rdma_user_ioctl_cmds.h ├── src ├── devx.c ├── devx.h ├── devx_dbrec.c ├── devx_dpdk.c ├── devx_dpdk.h ├── devx_init.c ├── devx_ioctl.c ├── devx_ioctl.h ├── devx_priv.h ├── devx_prm.h ├── devx_uar.c ├── devx_verbs.c └── devx_verbs.h └── tests └── test.cc /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Object files 5 | *.o 6 | *.ko 7 | *.obj 8 | *.elf 9 | 10 | # Linker output 11 | *.ilk 12 | *.map 13 | *.exp 14 | 15 | # Precompiled Headers 16 | *.gch 17 | *.pch 18 | 19 | # Libraries 20 | *.lib 21 | *.a 22 | *.la 23 | *.lo 24 | 25 | # Shared objects (inc. Windows DLLs) 26 | *.dll 27 | *.so 28 | *.so.* 29 | *.dylib 30 | 31 | # Executables 32 | *.exe 33 | *.out 34 | *.app 35 | *.i*86 36 | *.x86_64 37 | *.hex 38 | 39 | # Debug files 40 | *.dSYM/ 41 | *.su 42 | *.idb 43 | *.pdb 44 | 45 | # Kernel Module Compile Results 46 | *.mod* 47 | *.cmd 48 | .tmp_versions/ 49 | modules.order 50 | Module.symvers 51 | Mkfile.old 52 | dkms.conf 53 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "googletest"] 2 | path = googletest 3 | url = https://github.com/google/googletest.git 4 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.11) 2 | 3 | 4 | include_directories(src kernel-headers kernel-headers/uapi) 5 | 6 | add_library(devx 7 | src/devx.c 8 | src/devx_dbrec.c 9 | src/devx_init.c 10 | src/devx_ioctl.c 11 | src/devx_uar.c) 12 | 13 | add_subdirectory(googletest/googletest) 14 | 15 | add_executable(test tests/test.cc) 16 | 17 | target_link_libraries(test devx gtest gtest_main) 18 | 19 | project (devx) 20 | set(VERSION "1.0.0") 21 | set(CPACK_PACKAGE_VERSION ${VERSION}) 22 | set(CPACK_GENERATOR "RPM") 23 | set(CPACK_PACKAGE_NAME "devx") 24 | set(CPACK_PACKAGE_RELEASE 1) 25 | set(CPACK_PACKAGE_CONTACT "support@mellanox.com") 26 | set(CPACK_PACKAGE_VENDOR "Mellanox") 27 | set(CPACK_PACKAGING_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX}) 28 | set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}.${CMAKE_SYSTEM_PROCESSOR}") 29 | include(CPack) 30 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env groovy 2 | /* 3 | * Testing script for Dev, to run from Jenkins CI 4 | * 5 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 6 | * 7 | * See file LICENSE for terms. 8 | */ 9 | 10 | node('master') { 11 | deleteDir() 12 | checkout scm 13 | dir('swx_ci') { 14 | checkout([$class: 'GitSCM', 15 | extensions: [[$class: 'CloneOption', shallow: true]], 16 | userRemoteConfigs: [[ url: 'https://github.com/Mellanox/swx_ci']] 17 | ]) 18 | } 19 | def funcs = load "${env.WORKSPACE}/swx_ci/template/functions.groovy" 20 | def jjb_pipeFile = funcs.getProjFile("proj_pipeline.groovy") 21 | evaluate(readFile("${jjb_pipeFile}")) 22 | } 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2001-2018 Mellanox Technologies, Ltd. All rights reserved. 3 | * 4 | * This software is available to you under a choice of one of two 5 | * licenses. You may choose to be licensed under the terms of the GNU 6 | * General Public License (GPL) Version 2, available from the file 7 | * COPYING in the main directory of this source tree, or the 8 | * BSD license below: 9 | * 10 | * Redistribution and use in source and binary forms, with or 11 | * without modification, are permitted provided that the following 12 | * conditions are met: 13 | * 14 | * - Redistributions of source code must retain the above 15 | * copyright notice, this list of conditions and the following 16 | * disclaimer. 17 | * 18 | * - Redistributions in binary form must reproduce the above 19 | * copyright notice, this list of conditions and the following 20 | * disclaimer in the documentation and/or other materials 21 | * provided with the distribution. 22 | * 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 | * SOFTWARE. 31 | */ 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # What is DevX project? 2 | 3 | DevX library enables direct access from the user space area to the 4 | mlx5 device driver by using the KABI mechanism. 5 | 6 | The main purpose here is to make the user space driver as independent as 7 | possible from the kernel so that future device functionality and commands 8 | can be activated with minimal to none kernel changes. 9 | 10 | 11 | ## How to build and run tests with DevX 12 | 13 | ``` 14 | % git clone https://github.com/Mellanox/devx 15 | % cd devx 16 | % git submodule init 17 | % git submodule update 18 | % cmake . 19 | % make 20 | % ./test 21 | ``` 22 | -------------------------------------------------------------------------------- /kernel-headers/linux/mlx5/mlx5_ifc_fpga.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #ifndef MLX5_IFC_FPGA_H 8 | #define MLX5_IFC_FPGA_H 9 | 10 | enum { 11 | MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9, 12 | }; 13 | 14 | enum { 15 | MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, 16 | }; 17 | 18 | struct mlx5_ifc_fpga_shell_caps_bits { 19 | u8 max_num_qps[0x10]; 20 | u8 reserved_at_10[0x8]; 21 | u8 total_rcv_credits[0x8]; 22 | 23 | u8 reserved_at_20[0xe]; 24 | u8 qp_type[0x2]; 25 | u8 reserved_at_30[0x5]; 26 | u8 rae[0x1]; 27 | u8 rwe[0x1]; 28 | u8 rre[0x1]; 29 | u8 reserved_at_38[0x4]; 30 | u8 dc[0x1]; 31 | u8 ud[0x1]; 32 | u8 uc[0x1]; 33 | u8 rc[0x1]; 34 | 35 | u8 reserved_at_40[0x1a]; 36 | u8 log_ddr_size[0x6]; 37 | 38 | u8 max_fpga_qp_msg_size[0x20]; 39 | 40 | u8 reserved_at_80[0x180]; 41 | }; 42 | 43 | struct mlx5_ifc_fpga_cap_bits { 44 | u8 fpga_id[0x8]; 45 | u8 fpga_device[0x18]; 46 | 47 | u8 register_file_ver[0x20]; 48 | 49 | u8 fpga_ctrl_modify[0x1]; 50 | u8 reserved_at_41[0x5]; 51 | u8 access_reg_query_mode[0x2]; 52 | u8 reserved_at_48[0x6]; 53 | u8 access_reg_modify_mode[0x2]; 54 | u8 reserved_at_50[0x10]; 55 | 56 | u8 reserved_at_60[0x20]; 57 | 58 | u8 image_version[0x20]; 59 | 60 | u8 image_date[0x20]; 61 | 62 | u8 image_time[0x20]; 63 | 64 | u8 shell_version[0x20]; 65 | 66 | u8 reserved_at_100[0x80]; 67 | 68 | struct mlx5_ifc_fpga_shell_caps_bits shell_caps; 69 | 70 | u8 reserved_at_380[0x8]; 71 | u8 ieee_vendor_id[0x18]; 72 | 73 | u8 sandbox_product_version[0x10]; 74 | u8 sandbox_product_id[0x10]; 75 | 76 | u8 sandbox_basic_caps[0x20]; 77 | 78 | u8 reserved_at_3e0[0x10]; 79 | u8 sandbox_extended_caps_len[0x10]; 80 | 81 | u8 sandbox_extended_caps_addr[0x40]; 82 | 83 | u8 fpga_ddr_start_addr[0x40]; 84 | 85 | u8 fpga_cr_space_start_addr[0x40]; 86 | 87 | u8 fpga_ddr_size[0x20]; 88 | 89 | u8 fpga_cr_space_size[0x20]; 90 | 91 | u8 reserved_at_500[0x300]; 92 | }; 93 | 94 | enum { 95 | MLX5_FPGA_CTRL_OPERATION_LOAD = 0x1, 96 | MLX5_FPGA_CTRL_OPERATION_RESET = 0x2, 97 | MLX5_FPGA_CTRL_OPERATION_FLASH_SELECT = 0x3, 98 | MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON = 0x4, 99 | MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF = 0x5, 100 | MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX = 0x6, 101 | }; 102 | 103 | struct mlx5_ifc_fpga_ctrl_bits { 104 | u8 reserved_at_0[0x8]; 105 | u8 operation[0x8]; 106 | u8 reserved_at_10[0x8]; 107 | u8 status[0x8]; 108 | 109 | u8 reserved_at_20[0x8]; 110 | u8 flash_select_admin[0x8]; 111 | u8 reserved_at_30[0x8]; 112 | u8 flash_select_oper[0x8]; 113 | 114 | u8 reserved_at_40[0x40]; 115 | }; 116 | 117 | enum { 118 | MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR = 0x1, 119 | MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT = 0x2, 120 | MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR = 0x3, 121 | MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_FAILURE = 0x4, 122 | MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE = 0x5, 123 | MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED = 0x6, 124 | MLX5_FPGA_ERROR_EVENT_SYNDROME_TEMPERATURE_CRITICAL = 0x7, 125 | }; 126 | 127 | struct mlx5_ifc_fpga_error_event_bits { 128 | u8 reserved_at_0[0x40]; 129 | 130 | u8 reserved_at_40[0x18]; 131 | u8 syndrome[0x8]; 132 | 133 | u8 reserved_at_60[0x80]; 134 | }; 135 | 136 | #define MLX5_FPGA_ACCESS_REG_SIZE_MAX 64 137 | 138 | struct mlx5_ifc_fpga_access_reg_bits { 139 | u8 reserved_at_0[0x20]; 140 | 141 | u8 reserved_at_20[0x10]; 142 | u8 size[0x10]; 143 | 144 | u8 address[0x40]; 145 | 146 | u8 data[0][0x8]; 147 | }; 148 | 149 | enum mlx5_ifc_fpga_qp_state { 150 | MLX5_FPGA_QPC_STATE_INIT = 0x0, 151 | MLX5_FPGA_QPC_STATE_ACTIVE = 0x1, 152 | MLX5_FPGA_QPC_STATE_ERROR = 0x2, 153 | }; 154 | 155 | enum mlx5_ifc_fpga_qp_type { 156 | MLX5_FPGA_QPC_QP_TYPE_SHELL_QP = 0x0, 157 | MLX5_FPGA_QPC_QP_TYPE_SANDBOX_QP = 0x1, 158 | }; 159 | 160 | enum mlx5_ifc_fpga_qp_service_type { 161 | MLX5_FPGA_QPC_ST_RC = 0x0, 162 | }; 163 | 164 | struct mlx5_ifc_fpga_qpc_bits { 165 | u8 state[0x4]; 166 | u8 reserved_at_4[0x1b]; 167 | u8 qp_type[0x1]; 168 | 169 | u8 reserved_at_20[0x4]; 170 | u8 st[0x4]; 171 | u8 reserved_at_28[0x10]; 172 | u8 traffic_class[0x8]; 173 | 174 | u8 ether_type[0x10]; 175 | u8 prio[0x3]; 176 | u8 dei[0x1]; 177 | u8 vid[0xc]; 178 | 179 | u8 reserved_at_60[0x20]; 180 | 181 | u8 reserved_at_80[0x8]; 182 | u8 next_rcv_psn[0x18]; 183 | 184 | u8 reserved_at_a0[0x8]; 185 | u8 next_send_psn[0x18]; 186 | 187 | u8 reserved_at_c0[0x10]; 188 | u8 pkey[0x10]; 189 | 190 | u8 reserved_at_e0[0x8]; 191 | u8 remote_qpn[0x18]; 192 | 193 | u8 reserved_at_100[0x15]; 194 | u8 rnr_retry[0x3]; 195 | u8 reserved_at_118[0x5]; 196 | u8 retry_count[0x3]; 197 | 198 | u8 reserved_at_120[0x20]; 199 | 200 | u8 reserved_at_140[0x10]; 201 | u8 remote_mac_47_32[0x10]; 202 | 203 | u8 remote_mac_31_0[0x20]; 204 | 205 | u8 remote_ip[16][0x8]; 206 | 207 | u8 reserved_at_200[0x40]; 208 | 209 | u8 reserved_at_240[0x10]; 210 | u8 fpga_mac_47_32[0x10]; 211 | 212 | u8 fpga_mac_31_0[0x20]; 213 | 214 | u8 fpga_ip[16][0x8]; 215 | }; 216 | 217 | struct mlx5_ifc_fpga_create_qp_in_bits { 218 | u8 opcode[0x10]; 219 | u8 reserved_at_10[0x10]; 220 | 221 | u8 reserved_at_20[0x10]; 222 | u8 op_mod[0x10]; 223 | 224 | u8 reserved_at_40[0x40]; 225 | 226 | struct mlx5_ifc_fpga_qpc_bits fpga_qpc; 227 | }; 228 | 229 | struct mlx5_ifc_fpga_create_qp_out_bits { 230 | u8 status[0x8]; 231 | u8 reserved_at_8[0x18]; 232 | 233 | u8 syndrome[0x20]; 234 | 235 | u8 reserved_at_40[0x8]; 236 | u8 fpga_qpn[0x18]; 237 | 238 | u8 reserved_at_60[0x20]; 239 | 240 | struct mlx5_ifc_fpga_qpc_bits fpga_qpc; 241 | }; 242 | 243 | struct mlx5_ifc_fpga_modify_qp_in_bits { 244 | u8 opcode[0x10]; 245 | u8 reserved_at_10[0x10]; 246 | 247 | u8 reserved_at_20[0x10]; 248 | u8 op_mod[0x10]; 249 | 250 | u8 reserved_at_40[0x8]; 251 | u8 fpga_qpn[0x18]; 252 | 253 | u8 field_select[0x20]; 254 | 255 | struct mlx5_ifc_fpga_qpc_bits fpga_qpc; 256 | }; 257 | 258 | struct mlx5_ifc_fpga_modify_qp_out_bits { 259 | u8 status[0x8]; 260 | u8 reserved_at_8[0x18]; 261 | 262 | u8 syndrome[0x20]; 263 | 264 | u8 reserved_at_40[0x40]; 265 | }; 266 | 267 | struct mlx5_ifc_fpga_query_qp_in_bits { 268 | u8 opcode[0x10]; 269 | u8 reserved_at_10[0x10]; 270 | 271 | u8 reserved_at_20[0x10]; 272 | u8 op_mod[0x10]; 273 | 274 | u8 reserved_at_40[0x8]; 275 | u8 fpga_qpn[0x18]; 276 | 277 | u8 reserved_at_60[0x20]; 278 | }; 279 | 280 | struct mlx5_ifc_fpga_query_qp_out_bits { 281 | u8 status[0x8]; 282 | u8 reserved_at_8[0x18]; 283 | 284 | u8 syndrome[0x20]; 285 | 286 | u8 reserved_at_40[0x40]; 287 | 288 | struct mlx5_ifc_fpga_qpc_bits fpga_qpc; 289 | }; 290 | 291 | struct mlx5_ifc_fpga_query_qp_counters_in_bits { 292 | u8 opcode[0x10]; 293 | u8 reserved_at_10[0x10]; 294 | 295 | u8 reserved_at_20[0x10]; 296 | u8 op_mod[0x10]; 297 | 298 | u8 clear[0x1]; 299 | u8 reserved_at_41[0x7]; 300 | u8 fpga_qpn[0x18]; 301 | 302 | u8 reserved_at_60[0x20]; 303 | }; 304 | 305 | struct mlx5_ifc_fpga_query_qp_counters_out_bits { 306 | u8 status[0x8]; 307 | u8 reserved_at_8[0x18]; 308 | 309 | u8 syndrome[0x20]; 310 | 311 | u8 reserved_at_40[0x40]; 312 | 313 | u8 rx_ack_packets[0x40]; 314 | 315 | u8 rx_send_packets[0x40]; 316 | 317 | u8 tx_ack_packets[0x40]; 318 | 319 | u8 tx_send_packets[0x40]; 320 | 321 | u8 rx_total_drop[0x40]; 322 | 323 | u8 reserved_at_1c0[0x1c0]; 324 | }; 325 | 326 | struct mlx5_ifc_fpga_destroy_qp_in_bits { 327 | u8 opcode[0x10]; 328 | u8 reserved_at_10[0x10]; 329 | 330 | u8 reserved_at_20[0x10]; 331 | u8 op_mod[0x10]; 332 | 333 | u8 reserved_at_40[0x8]; 334 | u8 fpga_qpn[0x18]; 335 | 336 | u8 reserved_at_60[0x20]; 337 | }; 338 | 339 | struct mlx5_ifc_fpga_destroy_qp_out_bits { 340 | u8 status[0x8]; 341 | u8 reserved_at_8[0x18]; 342 | 343 | u8 syndrome[0x20]; 344 | 345 | u8 reserved_at_40[0x40]; 346 | }; 347 | 348 | struct mlx5_ifc_ipsec_extended_cap_bits { 349 | u8 encapsulation[0x20]; 350 | 351 | u8 reserved_0[0x12]; 352 | u8 v2_command[0x1]; 353 | u8 udp_encap[0x1]; 354 | u8 rx_no_trailer[0x1]; 355 | u8 ipv4_fragment[0x1]; 356 | u8 ipv6[0x1]; 357 | u8 esn[0x1]; 358 | u8 lso[0x1]; 359 | u8 transport_and_tunnel_mode[0x1]; 360 | u8 tunnel_mode[0x1]; 361 | u8 transport_mode[0x1]; 362 | u8 ah_esp[0x1]; 363 | u8 esp[0x1]; 364 | u8 ah[0x1]; 365 | u8 ipv4_options[0x1]; 366 | 367 | u8 auth_alg[0x20]; 368 | 369 | u8 enc_alg[0x20]; 370 | 371 | u8 sa_cap[0x20]; 372 | 373 | u8 reserved_1[0x10]; 374 | u8 number_of_ipsec_counters[0x10]; 375 | 376 | u8 ipsec_counters_addr_low[0x20]; 377 | u8 ipsec_counters_addr_high[0x20]; 378 | }; 379 | 380 | struct mlx5_ifc_ipsec_counters_bits { 381 | u8 dec_in_packets[0x40]; 382 | 383 | u8 dec_out_packets[0x40]; 384 | 385 | u8 dec_bypass_packets[0x40]; 386 | 387 | u8 enc_in_packets[0x40]; 388 | 389 | u8 enc_out_packets[0x40]; 390 | 391 | u8 enc_bypass_packets[0x40]; 392 | 393 | u8 drop_dec_packets[0x40]; 394 | 395 | u8 failed_auth_dec_packets[0x40]; 396 | 397 | u8 drop_enc_packets[0x40]; 398 | 399 | u8 success_add_sa[0x40]; 400 | 401 | u8 fail_add_sa[0x40]; 402 | 403 | u8 success_delete_sa[0x40]; 404 | 405 | u8 fail_delete_sa[0x40]; 406 | 407 | u8 dropped_cmd[0x40]; 408 | }; 409 | 410 | enum mlx5_ifc_fpga_ipsec_response_syndrome { 411 | MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0, 412 | MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, 413 | MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2, 414 | MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, 415 | }; 416 | 417 | struct mlx5_ifc_fpga_ipsec_cmd_resp { 418 | __be32 syndrome; 419 | union { 420 | __be32 sw_sa_handle; 421 | __be32 flags; 422 | }; 423 | u8 reserved[24]; 424 | } __packed; 425 | 426 | enum mlx5_ifc_fpga_ipsec_cmd_opcode { 427 | MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0, 428 | MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1, 429 | MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2, 430 | MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3, 431 | MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4, 432 | MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5, 433 | }; 434 | 435 | enum mlx5_ifc_fpga_ipsec_cap { 436 | MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), 437 | }; 438 | 439 | struct mlx5_ifc_fpga_ipsec_cmd_cap { 440 | __be32 cmd; 441 | __be32 flags; 442 | u8 reserved[24]; 443 | } __packed; 444 | 445 | enum mlx5_ifc_fpga_ipsec_sa_flags { 446 | MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0), 447 | MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1), 448 | MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2), 449 | MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3), 450 | MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4), 451 | MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5), 452 | MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6), 453 | MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7), 454 | }; 455 | 456 | enum mlx5_ifc_fpga_ipsec_sa_enc_mode { 457 | MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0, 458 | MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1, 459 | MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3, 460 | }; 461 | 462 | struct mlx5_ifc_fpga_ipsec_sa_v1 { 463 | __be32 cmd; 464 | u8 key_enc[32]; 465 | u8 key_auth[32]; 466 | __be32 sip[4]; 467 | __be32 dip[4]; 468 | union { 469 | struct { 470 | __be32 reserved; 471 | u8 salt_iv[8]; 472 | __be32 salt; 473 | } __packed gcm; 474 | struct { 475 | u8 salt[16]; 476 | } __packed cbc; 477 | }; 478 | __be32 spi; 479 | __be32 sw_sa_handle; 480 | __be16 tfclen; 481 | u8 enc_mode; 482 | u8 reserved1[2]; 483 | u8 flags; 484 | u8 reserved2[2]; 485 | }; 486 | 487 | struct mlx5_ifc_fpga_ipsec_sa { 488 | struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1; 489 | __be16 udp_sp; 490 | __be16 udp_dp; 491 | u8 reserved1[4]; 492 | __be32 esn; 493 | __be16 vid; /* only 12 bits, rest is reserved */ 494 | __be16 reserved2; 495 | } __packed; 496 | 497 | #endif /* MLX5_IFC_FPGA_H */ 498 | -------------------------------------------------------------------------------- /kernel-headers/uapi/rdma/ib_user_ioctl_cmds.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #ifndef IB_USER_IOCTL_CMDS_H 8 | #define IB_USER_IOCTL_CMDS_H 9 | 10 | #define UVERBS_ID_NS_MASK 0xF000 11 | #define UVERBS_ID_NS_SHIFT 12 12 | 13 | #define UVERBS_UDATA_DRIVER_DATA_NS 1 14 | #define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT) 15 | 16 | enum uverbs_default_objects { 17 | UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */ 18 | UVERBS_OBJECT_PD, 19 | UVERBS_OBJECT_COMP_CHANNEL, 20 | UVERBS_OBJECT_CQ, 21 | UVERBS_OBJECT_QP, 22 | UVERBS_OBJECT_SRQ, 23 | UVERBS_OBJECT_AH, 24 | UVERBS_OBJECT_MR, 25 | UVERBS_OBJECT_MW, 26 | UVERBS_OBJECT_FLOW, 27 | UVERBS_OBJECT_XRCD, 28 | UVERBS_OBJECT_RWQ_IND_TBL, 29 | UVERBS_OBJECT_WQ, 30 | UVERBS_OBJECT_FLOW_ACTION, 31 | UVERBS_OBJECT_DM, 32 | }; 33 | 34 | enum { 35 | UVERBS_ATTR_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG, 36 | UVERBS_ATTR_UHW_OUT, 37 | }; 38 | 39 | enum uverbs_attrs_create_cq_cmd_attr_ids { 40 | UVERBS_ATTR_CREATE_CQ_HANDLE, 41 | UVERBS_ATTR_CREATE_CQ_CQE, 42 | UVERBS_ATTR_CREATE_CQ_USER_HANDLE, 43 | UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL, 44 | UVERBS_ATTR_CREATE_CQ_COMP_VECTOR, 45 | UVERBS_ATTR_CREATE_CQ_FLAGS, 46 | UVERBS_ATTR_CREATE_CQ_RESP_CQE, 47 | }; 48 | 49 | enum uverbs_attrs_destroy_cq_cmd_attr_ids { 50 | UVERBS_ATTR_DESTROY_CQ_HANDLE, 51 | UVERBS_ATTR_DESTROY_CQ_RESP, 52 | }; 53 | 54 | enum uverbs_attrs_create_flow_action_esp { 55 | UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, 56 | UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, 57 | UVERBS_ATTR_FLOW_ACTION_ESP_ESN, 58 | UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, 59 | UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, 60 | UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, 61 | }; 62 | 63 | enum uverbs_attrs_destroy_flow_action_esp { 64 | UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, 65 | }; 66 | 67 | enum uverbs_methods_cq { 68 | UVERBS_METHOD_CQ_CREATE, 69 | UVERBS_METHOD_CQ_DESTROY, 70 | }; 71 | 72 | enum uverbs_methods_actions_flow_action_ops { 73 | UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 74 | UVERBS_METHOD_FLOW_ACTION_DESTROY, 75 | UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY, 76 | }; 77 | 78 | enum uverbs_attrs_alloc_dm_cmd_attr_ids { 79 | UVERBS_ATTR_ALLOC_DM_HANDLE, 80 | UVERBS_ATTR_ALLOC_DM_LENGTH, 81 | UVERBS_ATTR_ALLOC_DM_ALIGNMENT, 82 | }; 83 | 84 | enum uverbs_attrs_free_dm_cmd_attr_ids { 85 | UVERBS_ATTR_FREE_DM_HANDLE, 86 | }; 87 | 88 | enum uverbs_methods_dm { 89 | UVERBS_METHOD_DM_ALLOC, 90 | UVERBS_METHOD_DM_FREE, 91 | }; 92 | 93 | enum uverbs_attrs_reg_dm_mr_cmd_attr_ids { 94 | UVERBS_ATTR_REG_DM_MR_HANDLE, 95 | UVERBS_ATTR_REG_DM_MR_OFFSET, 96 | UVERBS_ATTR_REG_DM_MR_LENGTH, 97 | UVERBS_ATTR_REG_DM_MR_PD_HANDLE, 98 | UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS, 99 | UVERBS_ATTR_REG_DM_MR_DM_HANDLE, 100 | UVERBS_ATTR_REG_DM_MR_RESP_LKEY, 101 | UVERBS_ATTR_REG_DM_MR_RESP_RKEY, 102 | }; 103 | 104 | enum uverbs_methods_mr { 105 | UVERBS_METHOD_DM_MR_REG, 106 | }; 107 | 108 | #endif 109 | -------------------------------------------------------------------------------- /kernel-headers/uapi/rdma/ib_user_verbs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ 2 | /* 3 | * Copyright (c) 2005 Topspin Communications. All rights reserved. 4 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 5 | * Copyright (c) 2005 PathScale, Inc. All rights reserved. 6 | * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 7 | * 8 | * This software is available to you under a choice of one of two 9 | * licenses. You may choose to be licensed under the terms of the GNU 10 | * General Public License (GPL) Version 2, available from the file 11 | * COPYING in the main directory of this source tree, or the 12 | * OpenIB.org BSD license below: 13 | * 14 | * Redistribution and use in source and binary forms, with or 15 | * without modification, are permitted provided that the following 16 | * conditions are met: 17 | * 18 | * - Redistributions of source code must retain the above 19 | * copyright notice, this list of conditions and the following 20 | * disclaimer. 21 | * 22 | * - Redistributions in binary form must reproduce the above 23 | * copyright notice, this list of conditions and the following 24 | * disclaimer in the documentation and/or other materials 25 | * provided with the distribution. 26 | * 27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 | * SOFTWARE. 35 | */ 36 | 37 | #ifndef IB_USER_VERBS_H 38 | #define IB_USER_VERBS_H 39 | 40 | #include 41 | 42 | /* 43 | * Increment this value if any changes that break userspace ABI 44 | * compatibility are made. 45 | */ 46 | #define IB_USER_VERBS_ABI_VERSION 6 47 | #define IB_USER_VERBS_CMD_THRESHOLD 50 48 | 49 | enum { 50 | IB_USER_VERBS_CMD_GET_CONTEXT, 51 | IB_USER_VERBS_CMD_QUERY_DEVICE, 52 | IB_USER_VERBS_CMD_QUERY_PORT, 53 | IB_USER_VERBS_CMD_ALLOC_PD, 54 | IB_USER_VERBS_CMD_DEALLOC_PD, 55 | IB_USER_VERBS_CMD_CREATE_AH, 56 | IB_USER_VERBS_CMD_MODIFY_AH, 57 | IB_USER_VERBS_CMD_QUERY_AH, 58 | IB_USER_VERBS_CMD_DESTROY_AH, 59 | IB_USER_VERBS_CMD_REG_MR, 60 | IB_USER_VERBS_CMD_REG_SMR, 61 | IB_USER_VERBS_CMD_REREG_MR, 62 | IB_USER_VERBS_CMD_QUERY_MR, 63 | IB_USER_VERBS_CMD_DEREG_MR, 64 | IB_USER_VERBS_CMD_ALLOC_MW, 65 | IB_USER_VERBS_CMD_BIND_MW, 66 | IB_USER_VERBS_CMD_DEALLOC_MW, 67 | IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL, 68 | IB_USER_VERBS_CMD_CREATE_CQ, 69 | IB_USER_VERBS_CMD_RESIZE_CQ, 70 | IB_USER_VERBS_CMD_DESTROY_CQ, 71 | IB_USER_VERBS_CMD_POLL_CQ, 72 | IB_USER_VERBS_CMD_PEEK_CQ, 73 | IB_USER_VERBS_CMD_REQ_NOTIFY_CQ, 74 | IB_USER_VERBS_CMD_CREATE_QP, 75 | IB_USER_VERBS_CMD_QUERY_QP, 76 | IB_USER_VERBS_CMD_MODIFY_QP, 77 | IB_USER_VERBS_CMD_DESTROY_QP, 78 | IB_USER_VERBS_CMD_POST_SEND, 79 | IB_USER_VERBS_CMD_POST_RECV, 80 | IB_USER_VERBS_CMD_ATTACH_MCAST, 81 | IB_USER_VERBS_CMD_DETACH_MCAST, 82 | IB_USER_VERBS_CMD_CREATE_SRQ, 83 | IB_USER_VERBS_CMD_MODIFY_SRQ, 84 | IB_USER_VERBS_CMD_QUERY_SRQ, 85 | IB_USER_VERBS_CMD_DESTROY_SRQ, 86 | IB_USER_VERBS_CMD_POST_SRQ_RECV, 87 | IB_USER_VERBS_CMD_OPEN_XRCD, 88 | IB_USER_VERBS_CMD_CLOSE_XRCD, 89 | IB_USER_VERBS_CMD_CREATE_XSRQ, 90 | IB_USER_VERBS_CMD_OPEN_QP, 91 | }; 92 | 93 | enum { 94 | IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, 95 | IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ, 96 | IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP, 97 | IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP, 98 | IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 99 | IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 100 | IB_USER_VERBS_EX_CMD_CREATE_WQ, 101 | IB_USER_VERBS_EX_CMD_MODIFY_WQ, 102 | IB_USER_VERBS_EX_CMD_DESTROY_WQ, 103 | IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL, 104 | IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL, 105 | IB_USER_VERBS_EX_CMD_MODIFY_CQ 106 | }; 107 | 108 | /* 109 | * Make sure that all structs defined in this file remain laid out so 110 | * that they pack the same way on 32-bit and 64-bit architectures (to 111 | * avoid incompatibility between 32-bit userspace and 64-bit kernels). 112 | * Specifically: 113 | * - Do not use pointer types -- pass pointers in __u64 instead. 114 | * - Make sure that any structure larger than 4 bytes is padded to a 115 | * multiple of 8 bytes. Otherwise the structure size will be 116 | * different between 32-bit and 64-bit architectures. 117 | */ 118 | 119 | struct ib_uverbs_async_event_desc { 120 | __aligned_u64 element; 121 | __u32 event_type; /* enum ib_event_type */ 122 | __u32 reserved; 123 | }; 124 | 125 | struct ib_uverbs_comp_event_desc { 126 | __aligned_u64 cq_handle; 127 | }; 128 | 129 | struct ib_uverbs_cq_moderation_caps { 130 | __u16 max_cq_moderation_count; 131 | __u16 max_cq_moderation_period; 132 | __u32 reserved; 133 | }; 134 | 135 | /* 136 | * All commands from userspace should start with a __u32 command field 137 | * followed by __u16 in_words and out_words fields (which give the 138 | * length of the command block and response buffer if any in 32-bit 139 | * words). The kernel driver will read these fields first and read 140 | * the rest of the command struct based on these value. 141 | */ 142 | 143 | #define IB_USER_VERBS_CMD_COMMAND_MASK 0xff 144 | #define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80000000u 145 | 146 | struct ib_uverbs_cmd_hdr { 147 | __u32 command; 148 | __u16 in_words; 149 | __u16 out_words; 150 | }; 151 | 152 | struct ib_uverbs_ex_cmd_hdr { 153 | __aligned_u64 response; 154 | __u16 provider_in_words; 155 | __u16 provider_out_words; 156 | __u32 cmd_hdr_reserved; 157 | }; 158 | 159 | struct ib_uverbs_get_context { 160 | __aligned_u64 response; 161 | __aligned_u64 driver_data[0]; 162 | }; 163 | 164 | struct ib_uverbs_get_context_resp { 165 | __u32 async_fd; 166 | __u32 num_comp_vectors; 167 | }; 168 | 169 | struct ib_uverbs_query_device { 170 | __aligned_u64 response; 171 | __aligned_u64 driver_data[0]; 172 | }; 173 | 174 | struct ib_uverbs_query_device_resp { 175 | __aligned_u64 fw_ver; 176 | __be64 node_guid; 177 | __be64 sys_image_guid; 178 | __aligned_u64 max_mr_size; 179 | __aligned_u64 page_size_cap; 180 | __u32 vendor_id; 181 | __u32 vendor_part_id; 182 | __u32 hw_ver; 183 | __u32 max_qp; 184 | __u32 max_qp_wr; 185 | __u32 device_cap_flags; 186 | __u32 max_sge; 187 | __u32 max_sge_rd; 188 | __u32 max_cq; 189 | __u32 max_cqe; 190 | __u32 max_mr; 191 | __u32 max_pd; 192 | __u32 max_qp_rd_atom; 193 | __u32 max_ee_rd_atom; 194 | __u32 max_res_rd_atom; 195 | __u32 max_qp_init_rd_atom; 196 | __u32 max_ee_init_rd_atom; 197 | __u32 atomic_cap; 198 | __u32 max_ee; 199 | __u32 max_rdd; 200 | __u32 max_mw; 201 | __u32 max_raw_ipv6_qp; 202 | __u32 max_raw_ethy_qp; 203 | __u32 max_mcast_grp; 204 | __u32 max_mcast_qp_attach; 205 | __u32 max_total_mcast_qp_attach; 206 | __u32 max_ah; 207 | __u32 max_fmr; 208 | __u32 max_map_per_fmr; 209 | __u32 max_srq; 210 | __u32 max_srq_wr; 211 | __u32 max_srq_sge; 212 | __u16 max_pkeys; 213 | __u8 local_ca_ack_delay; 214 | __u8 phys_port_cnt; 215 | __u8 reserved[4]; 216 | }; 217 | 218 | struct ib_uverbs_ex_query_device { 219 | __u32 comp_mask; 220 | __u32 reserved; 221 | }; 222 | 223 | struct ib_uverbs_odp_caps { 224 | __aligned_u64 general_caps; 225 | struct { 226 | __u32 rc_odp_caps; 227 | __u32 uc_odp_caps; 228 | __u32 ud_odp_caps; 229 | } per_transport_caps; 230 | __u32 reserved; 231 | }; 232 | 233 | struct ib_uverbs_rss_caps { 234 | /* Corresponding bit will be set if qp type from 235 | * 'enum ib_qp_type' is supported, e.g. 236 | * supported_qpts |= 1 << IB_QPT_UD 237 | */ 238 | __u32 supported_qpts; 239 | __u32 max_rwq_indirection_tables; 240 | __u32 max_rwq_indirection_table_size; 241 | __u32 reserved; 242 | }; 243 | 244 | struct ib_uverbs_tm_caps { 245 | /* Max size of rendezvous request message */ 246 | __u32 max_rndv_hdr_size; 247 | /* Max number of entries in tag matching list */ 248 | __u32 max_num_tags; 249 | /* TM flags */ 250 | __u32 flags; 251 | /* Max number of outstanding list operations */ 252 | __u32 max_ops; 253 | /* Max number of SGE in tag matching entry */ 254 | __u32 max_sge; 255 | __u32 reserved; 256 | }; 257 | 258 | struct ib_uverbs_ex_query_device_resp { 259 | struct ib_uverbs_query_device_resp base; 260 | __u32 comp_mask; 261 | __u32 response_length; 262 | struct ib_uverbs_odp_caps odp_caps; 263 | __aligned_u64 timestamp_mask; 264 | __aligned_u64 hca_core_clock; /* in KHZ */ 265 | __aligned_u64 device_cap_flags_ex; 266 | struct ib_uverbs_rss_caps rss_caps; 267 | __u32 max_wq_type_rq; 268 | __u32 raw_packet_caps; 269 | struct ib_uverbs_tm_caps tm_caps; 270 | struct ib_uverbs_cq_moderation_caps cq_moderation_caps; 271 | __aligned_u64 max_dm_size; 272 | }; 273 | 274 | struct ib_uverbs_query_port { 275 | __aligned_u64 response; 276 | __u8 port_num; 277 | __u8 reserved[7]; 278 | __aligned_u64 driver_data[0]; 279 | }; 280 | 281 | struct ib_uverbs_query_port_resp { 282 | __u32 port_cap_flags; 283 | __u32 max_msg_sz; 284 | __u32 bad_pkey_cntr; 285 | __u32 qkey_viol_cntr; 286 | __u32 gid_tbl_len; 287 | __u16 pkey_tbl_len; 288 | __u16 lid; 289 | __u16 sm_lid; 290 | __u8 state; 291 | __u8 max_mtu; 292 | __u8 active_mtu; 293 | __u8 lmc; 294 | __u8 max_vl_num; 295 | __u8 sm_sl; 296 | __u8 subnet_timeout; 297 | __u8 init_type_reply; 298 | __u8 active_width; 299 | __u8 active_speed; 300 | __u8 phys_state; 301 | __u8 link_layer; 302 | __u8 reserved[2]; 303 | }; 304 | 305 | struct ib_uverbs_alloc_pd { 306 | __aligned_u64 response; 307 | __aligned_u64 driver_data[0]; 308 | }; 309 | 310 | struct ib_uverbs_alloc_pd_resp { 311 | __u32 pd_handle; 312 | }; 313 | 314 | struct ib_uverbs_dealloc_pd { 315 | __u32 pd_handle; 316 | }; 317 | 318 | struct ib_uverbs_open_xrcd { 319 | __aligned_u64 response; 320 | __u32 fd; 321 | __u32 oflags; 322 | __aligned_u64 driver_data[0]; 323 | }; 324 | 325 | struct ib_uverbs_open_xrcd_resp { 326 | __u32 xrcd_handle; 327 | }; 328 | 329 | struct ib_uverbs_close_xrcd { 330 | __u32 xrcd_handle; 331 | }; 332 | 333 | struct ib_uverbs_reg_mr { 334 | __aligned_u64 response; 335 | __aligned_u64 start; 336 | __aligned_u64 length; 337 | __aligned_u64 hca_va; 338 | __u32 pd_handle; 339 | __u32 access_flags; 340 | __aligned_u64 driver_data[0]; 341 | }; 342 | 343 | struct ib_uverbs_reg_mr_resp { 344 | __u32 mr_handle; 345 | __u32 lkey; 346 | __u32 rkey; 347 | }; 348 | 349 | struct ib_uverbs_rereg_mr { 350 | __aligned_u64 response; 351 | __u32 mr_handle; 352 | __u32 flags; 353 | __aligned_u64 start; 354 | __aligned_u64 length; 355 | __aligned_u64 hca_va; 356 | __u32 pd_handle; 357 | __u32 access_flags; 358 | }; 359 | 360 | struct ib_uverbs_rereg_mr_resp { 361 | __u32 lkey; 362 | __u32 rkey; 363 | }; 364 | 365 | struct ib_uverbs_dereg_mr { 366 | __u32 mr_handle; 367 | }; 368 | 369 | struct ib_uverbs_alloc_mw { 370 | __aligned_u64 response; 371 | __u32 pd_handle; 372 | __u8 mw_type; 373 | __u8 reserved[3]; 374 | }; 375 | 376 | struct ib_uverbs_alloc_mw_resp { 377 | __u32 mw_handle; 378 | __u32 rkey; 379 | }; 380 | 381 | struct ib_uverbs_dealloc_mw { 382 | __u32 mw_handle; 383 | }; 384 | 385 | struct ib_uverbs_create_comp_channel { 386 | __aligned_u64 response; 387 | }; 388 | 389 | struct ib_uverbs_create_comp_channel_resp { 390 | __u32 fd; 391 | }; 392 | 393 | struct ib_uverbs_create_cq { 394 | __aligned_u64 response; 395 | __aligned_u64 user_handle; 396 | __u32 cqe; 397 | __u32 comp_vector; 398 | __s32 comp_channel; 399 | __u32 reserved; 400 | __aligned_u64 driver_data[0]; 401 | }; 402 | 403 | enum ib_uverbs_ex_create_cq_flags { 404 | IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 405 | IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 406 | }; 407 | 408 | struct ib_uverbs_ex_create_cq { 409 | __aligned_u64 user_handle; 410 | __u32 cqe; 411 | __u32 comp_vector; 412 | __s32 comp_channel; 413 | __u32 comp_mask; 414 | __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */ 415 | __u32 reserved; 416 | }; 417 | 418 | struct ib_uverbs_create_cq_resp { 419 | __u32 cq_handle; 420 | __u32 cqe; 421 | }; 422 | 423 | struct ib_uverbs_ex_create_cq_resp { 424 | struct ib_uverbs_create_cq_resp base; 425 | __u32 comp_mask; 426 | __u32 response_length; 427 | }; 428 | 429 | struct ib_uverbs_resize_cq { 430 | __aligned_u64 response; 431 | __u32 cq_handle; 432 | __u32 cqe; 433 | __aligned_u64 driver_data[0]; 434 | }; 435 | 436 | struct ib_uverbs_resize_cq_resp { 437 | __u32 cqe; 438 | __u32 reserved; 439 | __aligned_u64 driver_data[0]; 440 | }; 441 | 442 | struct ib_uverbs_poll_cq { 443 | __aligned_u64 response; 444 | __u32 cq_handle; 445 | __u32 ne; 446 | }; 447 | 448 | struct ib_uverbs_wc { 449 | __aligned_u64 wr_id; 450 | __u32 status; 451 | __u32 opcode; 452 | __u32 vendor_err; 453 | __u32 byte_len; 454 | union { 455 | __be32 imm_data; 456 | __u32 invalidate_rkey; 457 | } ex; 458 | __u32 qp_num; 459 | __u32 src_qp; 460 | __u32 wc_flags; 461 | __u16 pkey_index; 462 | __u16 slid; 463 | __u8 sl; 464 | __u8 dlid_path_bits; 465 | __u8 port_num; 466 | __u8 reserved; 467 | }; 468 | 469 | struct ib_uverbs_poll_cq_resp { 470 | __u32 count; 471 | __u32 reserved; 472 | struct ib_uverbs_wc wc[0]; 473 | }; 474 | 475 | struct ib_uverbs_req_notify_cq { 476 | __u32 cq_handle; 477 | __u32 solicited_only; 478 | }; 479 | 480 | struct ib_uverbs_destroy_cq { 481 | __aligned_u64 response; 482 | __u32 cq_handle; 483 | __u32 reserved; 484 | }; 485 | 486 | struct ib_uverbs_destroy_cq_resp { 487 | __u32 comp_events_reported; 488 | __u32 async_events_reported; 489 | }; 490 | 491 | struct ib_uverbs_global_route { 492 | __u8 dgid[16]; 493 | __u32 flow_label; 494 | __u8 sgid_index; 495 | __u8 hop_limit; 496 | __u8 traffic_class; 497 | __u8 reserved; 498 | }; 499 | 500 | struct ib_uverbs_ah_attr { 501 | struct ib_uverbs_global_route grh; 502 | __u16 dlid; 503 | __u8 sl; 504 | __u8 src_path_bits; 505 | __u8 static_rate; 506 | __u8 is_global; 507 | __u8 port_num; 508 | __u8 reserved; 509 | }; 510 | 511 | struct ib_uverbs_qp_attr { 512 | __u32 qp_attr_mask; 513 | __u32 qp_state; 514 | __u32 cur_qp_state; 515 | __u32 path_mtu; 516 | __u32 path_mig_state; 517 | __u32 qkey; 518 | __u32 rq_psn; 519 | __u32 sq_psn; 520 | __u32 dest_qp_num; 521 | __u32 qp_access_flags; 522 | 523 | struct ib_uverbs_ah_attr ah_attr; 524 | struct ib_uverbs_ah_attr alt_ah_attr; 525 | 526 | /* ib_qp_cap */ 527 | __u32 max_send_wr; 528 | __u32 max_recv_wr; 529 | __u32 max_send_sge; 530 | __u32 max_recv_sge; 531 | __u32 max_inline_data; 532 | 533 | __u16 pkey_index; 534 | __u16 alt_pkey_index; 535 | __u8 en_sqd_async_notify; 536 | __u8 sq_draining; 537 | __u8 max_rd_atomic; 538 | __u8 max_dest_rd_atomic; 539 | __u8 min_rnr_timer; 540 | __u8 port_num; 541 | __u8 timeout; 542 | __u8 retry_cnt; 543 | __u8 rnr_retry; 544 | __u8 alt_port_num; 545 | __u8 alt_timeout; 546 | __u8 reserved[5]; 547 | }; 548 | 549 | struct ib_uverbs_create_qp { 550 | __aligned_u64 response; 551 | __aligned_u64 user_handle; 552 | __u32 pd_handle; 553 | __u32 send_cq_handle; 554 | __u32 recv_cq_handle; 555 | __u32 srq_handle; 556 | __u32 max_send_wr; 557 | __u32 max_recv_wr; 558 | __u32 max_send_sge; 559 | __u32 max_recv_sge; 560 | __u32 max_inline_data; 561 | __u8 sq_sig_all; 562 | __u8 qp_type; 563 | __u8 is_srq; 564 | __u8 reserved; 565 | __aligned_u64 driver_data[0]; 566 | }; 567 | 568 | enum ib_uverbs_create_qp_mask { 569 | IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1UL << 0, 570 | }; 571 | 572 | enum { 573 | IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, 574 | }; 575 | 576 | enum { 577 | /* 578 | * This value is equal to IB_QP_DEST_QPN. 579 | */ 580 | IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20, 581 | }; 582 | 583 | enum { 584 | /* 585 | * This value is equal to IB_QP_RATE_LIMIT. 586 | */ 587 | IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25, 588 | }; 589 | 590 | struct ib_uverbs_ex_create_qp { 591 | __aligned_u64 user_handle; 592 | __u32 pd_handle; 593 | __u32 send_cq_handle; 594 | __u32 recv_cq_handle; 595 | __u32 srq_handle; 596 | __u32 max_send_wr; 597 | __u32 max_recv_wr; 598 | __u32 max_send_sge; 599 | __u32 max_recv_sge; 600 | __u32 max_inline_data; 601 | __u8 sq_sig_all; 602 | __u8 qp_type; 603 | __u8 is_srq; 604 | __u8 reserved; 605 | __u32 comp_mask; 606 | __u32 create_flags; 607 | __u32 rwq_ind_tbl_handle; 608 | __u32 source_qpn; 609 | }; 610 | 611 | struct ib_uverbs_open_qp { 612 | __aligned_u64 response; 613 | __aligned_u64 user_handle; 614 | __u32 pd_handle; 615 | __u32 qpn; 616 | __u8 qp_type; 617 | __u8 reserved[7]; 618 | __aligned_u64 driver_data[0]; 619 | }; 620 | 621 | /* also used for open response */ 622 | struct ib_uverbs_create_qp_resp { 623 | __u32 qp_handle; 624 | __u32 qpn; 625 | __u32 max_send_wr; 626 | __u32 max_recv_wr; 627 | __u32 max_send_sge; 628 | __u32 max_recv_sge; 629 | __u32 max_inline_data; 630 | __u32 reserved; 631 | }; 632 | 633 | struct ib_uverbs_ex_create_qp_resp { 634 | struct ib_uverbs_create_qp_resp base; 635 | __u32 comp_mask; 636 | __u32 response_length; 637 | }; 638 | 639 | /* 640 | * This struct needs to remain a multiple of 8 bytes to keep the 641 | * alignment of the modify QP parameters. 642 | */ 643 | struct ib_uverbs_qp_dest { 644 | __u8 dgid[16]; 645 | __u32 flow_label; 646 | __u16 dlid; 647 | __u16 reserved; 648 | __u8 sgid_index; 649 | __u8 hop_limit; 650 | __u8 traffic_class; 651 | __u8 sl; 652 | __u8 src_path_bits; 653 | __u8 static_rate; 654 | __u8 is_global; 655 | __u8 port_num; 656 | }; 657 | 658 | struct ib_uverbs_query_qp { 659 | __aligned_u64 response; 660 | __u32 qp_handle; 661 | __u32 attr_mask; 662 | __aligned_u64 driver_data[0]; 663 | }; 664 | 665 | struct ib_uverbs_query_qp_resp { 666 | struct ib_uverbs_qp_dest dest; 667 | struct ib_uverbs_qp_dest alt_dest; 668 | __u32 max_send_wr; 669 | __u32 max_recv_wr; 670 | __u32 max_send_sge; 671 | __u32 max_recv_sge; 672 | __u32 max_inline_data; 673 | __u32 qkey; 674 | __u32 rq_psn; 675 | __u32 sq_psn; 676 | __u32 dest_qp_num; 677 | __u32 qp_access_flags; 678 | __u16 pkey_index; 679 | __u16 alt_pkey_index; 680 | __u8 qp_state; 681 | __u8 cur_qp_state; 682 | __u8 path_mtu; 683 | __u8 path_mig_state; 684 | __u8 sq_draining; 685 | __u8 max_rd_atomic; 686 | __u8 max_dest_rd_atomic; 687 | __u8 min_rnr_timer; 688 | __u8 port_num; 689 | __u8 timeout; 690 | __u8 retry_cnt; 691 | __u8 rnr_retry; 692 | __u8 alt_port_num; 693 | __u8 alt_timeout; 694 | __u8 sq_sig_all; 695 | __u8 reserved[5]; 696 | __aligned_u64 driver_data[0]; 697 | }; 698 | 699 | struct ib_uverbs_modify_qp { 700 | struct ib_uverbs_qp_dest dest; 701 | struct ib_uverbs_qp_dest alt_dest; 702 | __u32 qp_handle; 703 | __u32 attr_mask; 704 | __u32 qkey; 705 | __u32 rq_psn; 706 | __u32 sq_psn; 707 | __u32 dest_qp_num; 708 | __u32 qp_access_flags; 709 | __u16 pkey_index; 710 | __u16 alt_pkey_index; 711 | __u8 qp_state; 712 | __u8 cur_qp_state; 713 | __u8 path_mtu; 714 | __u8 path_mig_state; 715 | __u8 en_sqd_async_notify; 716 | __u8 max_rd_atomic; 717 | __u8 max_dest_rd_atomic; 718 | __u8 min_rnr_timer; 719 | __u8 port_num; 720 | __u8 timeout; 721 | __u8 retry_cnt; 722 | __u8 rnr_retry; 723 | __u8 alt_port_num; 724 | __u8 alt_timeout; 725 | __u8 reserved[2]; 726 | __aligned_u64 driver_data[0]; 727 | }; 728 | 729 | struct ib_uverbs_ex_modify_qp { 730 | struct ib_uverbs_modify_qp base; 731 | __u32 rate_limit; 732 | __u32 reserved; 733 | }; 734 | 735 | struct ib_uverbs_modify_qp_resp { 736 | }; 737 | 738 | struct ib_uverbs_ex_modify_qp_resp { 739 | __u32 comp_mask; 740 | __u32 response_length; 741 | }; 742 | 743 | struct ib_uverbs_destroy_qp { 744 | __aligned_u64 response; 745 | __u32 qp_handle; 746 | __u32 reserved; 747 | }; 748 | 749 | struct ib_uverbs_destroy_qp_resp { 750 | __u32 events_reported; 751 | }; 752 | 753 | /* 754 | * The ib_uverbs_sge structure isn't used anywhere, since we assume 755 | * the ib_sge structure is packed the same way on 32-bit and 64-bit 756 | * architectures in both kernel and user space. It's just here to 757 | * document the ABI. 758 | */ 759 | struct ib_uverbs_sge { 760 | __aligned_u64 addr; 761 | __u32 length; 762 | __u32 lkey; 763 | }; 764 | 765 | struct ib_uverbs_send_wr { 766 | __aligned_u64 wr_id; 767 | __u32 num_sge; 768 | __u32 opcode; 769 | __u32 send_flags; 770 | union { 771 | __be32 imm_data; 772 | __u32 invalidate_rkey; 773 | } ex; 774 | union { 775 | struct { 776 | __aligned_u64 remote_addr; 777 | __u32 rkey; 778 | __u32 reserved; 779 | } rdma; 780 | struct { 781 | __aligned_u64 remote_addr; 782 | __aligned_u64 compare_add; 783 | __aligned_u64 swap; 784 | __u32 rkey; 785 | __u32 reserved; 786 | } atomic; 787 | struct { 788 | __u32 ah; 789 | __u32 remote_qpn; 790 | __u32 remote_qkey; 791 | __u32 reserved; 792 | } ud; 793 | } wr; 794 | }; 795 | 796 | struct ib_uverbs_post_send { 797 | __aligned_u64 response; 798 | __u32 qp_handle; 799 | __u32 wr_count; 800 | __u32 sge_count; 801 | __u32 wqe_size; 802 | struct ib_uverbs_send_wr send_wr[0]; 803 | }; 804 | 805 | struct ib_uverbs_post_send_resp { 806 | __u32 bad_wr; 807 | }; 808 | 809 | struct ib_uverbs_recv_wr { 810 | __aligned_u64 wr_id; 811 | __u32 num_sge; 812 | __u32 reserved; 813 | }; 814 | 815 | struct ib_uverbs_post_recv { 816 | __aligned_u64 response; 817 | __u32 qp_handle; 818 | __u32 wr_count; 819 | __u32 sge_count; 820 | __u32 wqe_size; 821 | struct ib_uverbs_recv_wr recv_wr[0]; 822 | }; 823 | 824 | struct ib_uverbs_post_recv_resp { 825 | __u32 bad_wr; 826 | }; 827 | 828 | struct ib_uverbs_post_srq_recv { 829 | __aligned_u64 response; 830 | __u32 srq_handle; 831 | __u32 wr_count; 832 | __u32 sge_count; 833 | __u32 wqe_size; 834 | struct ib_uverbs_recv_wr recv[0]; 835 | }; 836 | 837 | struct ib_uverbs_post_srq_recv_resp { 838 | __u32 bad_wr; 839 | }; 840 | 841 | struct ib_uverbs_create_ah { 842 | __aligned_u64 response; 843 | __aligned_u64 user_handle; 844 | __u32 pd_handle; 845 | __u32 reserved; 846 | struct ib_uverbs_ah_attr attr; 847 | }; 848 | 849 | struct ib_uverbs_create_ah_resp { 850 | __u32 ah_handle; 851 | }; 852 | 853 | struct ib_uverbs_destroy_ah { 854 | __u32 ah_handle; 855 | }; 856 | 857 | struct ib_uverbs_attach_mcast { 858 | __u8 gid[16]; 859 | __u32 qp_handle; 860 | __u16 mlid; 861 | __u16 reserved; 862 | __aligned_u64 driver_data[0]; 863 | }; 864 | 865 | struct ib_uverbs_detach_mcast { 866 | __u8 gid[16]; 867 | __u32 qp_handle; 868 | __u16 mlid; 869 | __u16 reserved; 870 | __aligned_u64 driver_data[0]; 871 | }; 872 | 873 | struct ib_uverbs_flow_spec_hdr { 874 | __u32 type; 875 | __u16 size; 876 | __u16 reserved; 877 | /* followed by flow_spec */ 878 | __aligned_u64 flow_spec_data[0]; 879 | }; 880 | 881 | struct ib_uverbs_flow_eth_filter { 882 | __u8 dst_mac[6]; 883 | __u8 src_mac[6]; 884 | __be16 ether_type; 885 | __be16 vlan_tag; 886 | }; 887 | 888 | struct ib_uverbs_flow_spec_eth { 889 | union { 890 | struct ib_uverbs_flow_spec_hdr hdr; 891 | struct { 892 | __u32 type; 893 | __u16 size; 894 | __u16 reserved; 895 | }; 896 | }; 897 | struct ib_uverbs_flow_eth_filter val; 898 | struct ib_uverbs_flow_eth_filter mask; 899 | }; 900 | 901 | struct ib_uverbs_flow_ipv4_filter { 902 | __be32 src_ip; 903 | __be32 dst_ip; 904 | __u8 proto; 905 | __u8 tos; 906 | __u8 ttl; 907 | __u8 flags; 908 | }; 909 | 910 | struct ib_uverbs_flow_spec_ipv4 { 911 | union { 912 | struct ib_uverbs_flow_spec_hdr hdr; 913 | struct { 914 | __u32 type; 915 | __u16 size; 916 | __u16 reserved; 917 | }; 918 | }; 919 | struct ib_uverbs_flow_ipv4_filter val; 920 | struct ib_uverbs_flow_ipv4_filter mask; 921 | }; 922 | 923 | struct ib_uverbs_flow_tcp_udp_filter { 924 | __be16 dst_port; 925 | __be16 src_port; 926 | }; 927 | 928 | struct ib_uverbs_flow_spec_tcp_udp { 929 | union { 930 | struct ib_uverbs_flow_spec_hdr hdr; 931 | struct { 932 | __u32 type; 933 | __u16 size; 934 | __u16 reserved; 935 | }; 936 | }; 937 | struct ib_uverbs_flow_tcp_udp_filter val; 938 | struct ib_uverbs_flow_tcp_udp_filter mask; 939 | }; 940 | 941 | struct ib_uverbs_flow_ipv6_filter { 942 | __u8 src_ip[16]; 943 | __u8 dst_ip[16]; 944 | __be32 flow_label; 945 | __u8 next_hdr; 946 | __u8 traffic_class; 947 | __u8 hop_limit; 948 | __u8 reserved; 949 | }; 950 | 951 | struct ib_uverbs_flow_spec_ipv6 { 952 | union { 953 | struct ib_uverbs_flow_spec_hdr hdr; 954 | struct { 955 | __u32 type; 956 | __u16 size; 957 | __u16 reserved; 958 | }; 959 | }; 960 | struct ib_uverbs_flow_ipv6_filter val; 961 | struct ib_uverbs_flow_ipv6_filter mask; 962 | }; 963 | 964 | struct ib_uverbs_flow_spec_action_tag { 965 | union { 966 | struct ib_uverbs_flow_spec_hdr hdr; 967 | struct { 968 | __u32 type; 969 | __u16 size; 970 | __u16 reserved; 971 | }; 972 | }; 973 | __u32 tag_id; 974 | __u32 reserved1; 975 | }; 976 | 977 | struct ib_uverbs_flow_spec_action_drop { 978 | union { 979 | struct ib_uverbs_flow_spec_hdr hdr; 980 | struct { 981 | __u32 type; 982 | __u16 size; 983 | __u16 reserved; 984 | }; 985 | }; 986 | }; 987 | 988 | struct ib_uverbs_flow_spec_action_handle { 989 | union { 990 | struct ib_uverbs_flow_spec_hdr hdr; 991 | struct { 992 | __u32 type; 993 | __u16 size; 994 | __u16 reserved; 995 | }; 996 | }; 997 | __u32 handle; 998 | __u32 reserved1; 999 | }; 1000 | 1001 | struct ib_uverbs_flow_tunnel_filter { 1002 | __be32 tunnel_id; 1003 | }; 1004 | 1005 | struct ib_uverbs_flow_spec_tunnel { 1006 | union { 1007 | struct ib_uverbs_flow_spec_hdr hdr; 1008 | struct { 1009 | __u32 type; 1010 | __u16 size; 1011 | __u16 reserved; 1012 | }; 1013 | }; 1014 | struct ib_uverbs_flow_tunnel_filter val; 1015 | struct ib_uverbs_flow_tunnel_filter mask; 1016 | }; 1017 | 1018 | struct ib_uverbs_flow_spec_esp_filter { 1019 | __u32 spi; 1020 | __u32 seq; 1021 | }; 1022 | 1023 | struct ib_uverbs_flow_spec_esp { 1024 | union { 1025 | struct ib_uverbs_flow_spec_hdr hdr; 1026 | struct { 1027 | __u32 type; 1028 | __u16 size; 1029 | __u16 reserved; 1030 | }; 1031 | }; 1032 | struct ib_uverbs_flow_spec_esp_filter val; 1033 | struct ib_uverbs_flow_spec_esp_filter mask; 1034 | }; 1035 | 1036 | struct ib_uverbs_flow_attr { 1037 | __u32 type; 1038 | __u16 size; 1039 | __u16 priority; 1040 | __u8 num_of_specs; 1041 | __u8 reserved[2]; 1042 | __u8 port; 1043 | __u32 flags; 1044 | /* Following are the optional layers according to user request 1045 | * struct ib_flow_spec_xxx 1046 | * struct ib_flow_spec_yyy 1047 | */ 1048 | struct ib_uverbs_flow_spec_hdr flow_specs[0]; 1049 | }; 1050 | 1051 | struct ib_uverbs_create_flow { 1052 | __u32 comp_mask; 1053 | __u32 qp_handle; 1054 | struct ib_uverbs_flow_attr flow_attr; 1055 | }; 1056 | 1057 | struct ib_uverbs_create_flow_resp { 1058 | __u32 comp_mask; 1059 | __u32 flow_handle; 1060 | }; 1061 | 1062 | struct ib_uverbs_destroy_flow { 1063 | __u32 comp_mask; 1064 | __u32 flow_handle; 1065 | }; 1066 | 1067 | struct ib_uverbs_create_srq { 1068 | __aligned_u64 response; 1069 | __aligned_u64 user_handle; 1070 | __u32 pd_handle; 1071 | __u32 max_wr; 1072 | __u32 max_sge; 1073 | __u32 srq_limit; 1074 | __aligned_u64 driver_data[0]; 1075 | }; 1076 | 1077 | struct ib_uverbs_create_xsrq { 1078 | __aligned_u64 response; 1079 | __aligned_u64 user_handle; 1080 | __u32 srq_type; 1081 | __u32 pd_handle; 1082 | __u32 max_wr; 1083 | __u32 max_sge; 1084 | __u32 srq_limit; 1085 | __u32 max_num_tags; 1086 | __u32 xrcd_handle; 1087 | __u32 cq_handle; 1088 | __aligned_u64 driver_data[0]; 1089 | }; 1090 | 1091 | struct ib_uverbs_create_srq_resp { 1092 | __u32 srq_handle; 1093 | __u32 max_wr; 1094 | __u32 max_sge; 1095 | __u32 srqn; 1096 | }; 1097 | 1098 | struct ib_uverbs_modify_srq { 1099 | __u32 srq_handle; 1100 | __u32 attr_mask; 1101 | __u32 max_wr; 1102 | __u32 srq_limit; 1103 | __aligned_u64 driver_data[0]; 1104 | }; 1105 | 1106 | struct ib_uverbs_query_srq { 1107 | __aligned_u64 response; 1108 | __u32 srq_handle; 1109 | __u32 reserved; 1110 | __aligned_u64 driver_data[0]; 1111 | }; 1112 | 1113 | struct ib_uverbs_query_srq_resp { 1114 | __u32 max_wr; 1115 | __u32 max_sge; 1116 | __u32 srq_limit; 1117 | __u32 reserved; 1118 | }; 1119 | 1120 | struct ib_uverbs_destroy_srq { 1121 | __aligned_u64 response; 1122 | __u32 srq_handle; 1123 | __u32 reserved; 1124 | }; 1125 | 1126 | struct ib_uverbs_destroy_srq_resp { 1127 | __u32 events_reported; 1128 | }; 1129 | 1130 | struct ib_uverbs_ex_create_wq { 1131 | __u32 comp_mask; 1132 | __u32 wq_type; 1133 | __aligned_u64 user_handle; 1134 | __u32 pd_handle; 1135 | __u32 cq_handle; 1136 | __u32 max_wr; 1137 | __u32 max_sge; 1138 | __u32 create_flags; /* Use enum ib_wq_flags */ 1139 | __u32 reserved; 1140 | }; 1141 | 1142 | struct ib_uverbs_ex_create_wq_resp { 1143 | __u32 comp_mask; 1144 | __u32 response_length; 1145 | __u32 wq_handle; 1146 | __u32 max_wr; 1147 | __u32 max_sge; 1148 | __u32 wqn; 1149 | }; 1150 | 1151 | struct ib_uverbs_ex_destroy_wq { 1152 | __u32 comp_mask; 1153 | __u32 wq_handle; 1154 | }; 1155 | 1156 | struct ib_uverbs_ex_destroy_wq_resp { 1157 | __u32 comp_mask; 1158 | __u32 response_length; 1159 | __u32 events_reported; 1160 | __u32 reserved; 1161 | }; 1162 | 1163 | struct ib_uverbs_ex_modify_wq { 1164 | __u32 attr_mask; 1165 | __u32 wq_handle; 1166 | __u32 wq_state; 1167 | __u32 curr_wq_state; 1168 | __u32 flags; /* Use enum ib_wq_flags */ 1169 | __u32 flags_mask; /* Use enum ib_wq_flags */ 1170 | }; 1171 | 1172 | /* Prevent memory allocation rather than max expected size */ 1173 | #define IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE 0x0d 1174 | struct ib_uverbs_ex_create_rwq_ind_table { 1175 | __u32 comp_mask; 1176 | __u32 log_ind_tbl_size; 1177 | /* Following are the wq handles according to log_ind_tbl_size 1178 | * wq_handle1 1179 | * wq_handle2 1180 | */ 1181 | __u32 wq_handles[0]; 1182 | }; 1183 | 1184 | struct ib_uverbs_ex_create_rwq_ind_table_resp { 1185 | __u32 comp_mask; 1186 | __u32 response_length; 1187 | __u32 ind_tbl_handle; 1188 | __u32 ind_tbl_num; 1189 | }; 1190 | 1191 | struct ib_uverbs_ex_destroy_rwq_ind_table { 1192 | __u32 comp_mask; 1193 | __u32 ind_tbl_handle; 1194 | }; 1195 | 1196 | struct ib_uverbs_cq_moderation { 1197 | __u16 cq_count; 1198 | __u16 cq_period; 1199 | }; 1200 | 1201 | struct ib_uverbs_ex_modify_cq { 1202 | __u32 cq_handle; 1203 | __u32 attr_mask; 1204 | struct ib_uverbs_cq_moderation attr; 1205 | __u32 reserved; 1206 | }; 1207 | 1208 | #define IB_DEVICE_NAME_MAX 64 1209 | 1210 | #endif /* IB_USER_VERBS_H */ 1211 | -------------------------------------------------------------------------------- /kernel-headers/uapi/rdma/mlx5-abi.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ 2 | /* 3 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 4 | * 5 | * This software is available to you under a choice of one of two 6 | * licenses. You may choose to be licensed under the terms of the GNU 7 | * General Public License (GPL) Version 2, available from the file 8 | * COPYING in the main directory of this source tree, or the 9 | * OpenIB.org BSD license below: 10 | * 11 | * Redistribution and use in source and binary forms, with or 12 | * without modification, are permitted provided that the following 13 | * conditions are met: 14 | * 15 | * - Redistributions of source code must retain the above 16 | * copyright notice, this list of conditions and the following 17 | * disclaimer. 18 | * 19 | * - Redistributions in binary form must reproduce the above 20 | * copyright notice, this list of conditions and the following 21 | * disclaimer in the documentation and/or other materials 22 | * provided with the distribution. 23 | * 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 | * SOFTWARE. 32 | */ 33 | 34 | #ifndef MLX5_ABI_USER_H 35 | #define MLX5_ABI_USER_H 36 | 37 | #include 38 | #include /* For ETH_ALEN. */ 39 | 40 | enum { 41 | MLX5_QP_FLAG_SIGNATURE = 1 << 0, 42 | MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, 43 | MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2, 44 | MLX5_QP_FLAG_BFREG_INDEX = 1 << 3, 45 | MLX5_QP_FLAG_TYPE_DCT = 1 << 4, 46 | MLX5_QP_FLAG_TYPE_DCI = 1 << 5, 47 | }; 48 | 49 | enum { 50 | MLX5_SRQ_FLAG_SIGNATURE = 1 << 0, 51 | }; 52 | 53 | enum { 54 | MLX5_WQ_FLAG_SIGNATURE = 1 << 0, 55 | }; 56 | 57 | /* Increment this value if any changes that break userspace ABI 58 | * compatibility are made. 59 | */ 60 | #define MLX5_IB_UVERBS_ABI_VERSION 1 61 | 62 | /* Make sure that all structs defined in this file remain laid out so 63 | * that they pack the same way on 32-bit and 64-bit architectures (to 64 | * avoid incompatibility between 32-bit userspace and 64-bit kernels). 65 | * In particular do not use pointer types -- pass pointers in __u64 66 | * instead. 67 | */ 68 | 69 | struct mlx5_ib_alloc_ucontext_req { 70 | __u32 total_num_bfregs; 71 | __u32 num_low_latency_bfregs; 72 | }; 73 | 74 | enum mlx5_lib_caps { 75 | MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, 76 | }; 77 | 78 | enum mlx5_ib_alloc_uctx_v2_flags { 79 | MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0, 80 | }; 81 | struct mlx5_ib_alloc_ucontext_req_v2 { 82 | __u32 total_num_bfregs; 83 | __u32 num_low_latency_bfregs; 84 | __u32 flags; 85 | __u32 comp_mask; 86 | __u8 max_cqe_version; 87 | __u8 reserved0; 88 | __u16 reserved1; 89 | __u32 reserved2; 90 | __aligned_u64 lib_caps; 91 | }; 92 | 93 | enum mlx5_ib_alloc_ucontext_resp_mask { 94 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, 95 | }; 96 | 97 | enum mlx5_user_cmds_supp_uhw { 98 | MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, 99 | MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, 100 | }; 101 | 102 | /* The eth_min_inline response value is set to off-by-one vs the FW 103 | * returned value to allow user-space to deal with older kernels. 104 | */ 105 | enum mlx5_user_inline_mode { 106 | MLX5_USER_INLINE_MODE_NA, 107 | MLX5_USER_INLINE_MODE_NONE, 108 | MLX5_USER_INLINE_MODE_L2, 109 | MLX5_USER_INLINE_MODE_IP, 110 | MLX5_USER_INLINE_MODE_TCP_UDP, 111 | }; 112 | 113 | enum { 114 | MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0, 115 | MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1, 116 | MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2, 117 | MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3, 118 | MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4, 119 | }; 120 | 121 | struct mlx5_ib_alloc_ucontext_resp { 122 | __u32 qp_tab_size; 123 | __u32 bf_reg_size; 124 | __u32 tot_bfregs; 125 | __u32 cache_line_size; 126 | __u16 max_sq_desc_sz; 127 | __u16 max_rq_desc_sz; 128 | __u32 max_send_wqebb; 129 | __u32 max_recv_wr; 130 | __u32 max_srq_recv_wr; 131 | __u16 num_ports; 132 | __u16 flow_action_flags; 133 | __u32 comp_mask; 134 | __u32 response_length; 135 | __u8 cqe_version; 136 | __u8 cmds_supp_uhw; 137 | __u8 eth_min_inline; 138 | __u8 clock_info_versions; 139 | __aligned_u64 hca_core_clock_offset; 140 | __u32 log_uar_size; 141 | __u32 num_uars_per_page; 142 | __u32 num_dyn_bfregs; 143 | __u32 reserved3; 144 | }; 145 | 146 | struct mlx5_ib_alloc_pd_resp { 147 | __u32 pdn; 148 | }; 149 | 150 | struct mlx5_ib_tso_caps { 151 | __u32 max_tso; /* Maximum tso payload size in bytes */ 152 | 153 | /* Corresponding bit will be set if qp type from 154 | * 'enum ib_qp_type' is supported, e.g. 155 | * supported_qpts |= 1 << IB_QPT_UD 156 | */ 157 | __u32 supported_qpts; 158 | }; 159 | 160 | struct mlx5_ib_rss_caps { 161 | __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ 162 | __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ 163 | __u8 reserved[7]; 164 | }; 165 | 166 | enum mlx5_ib_cqe_comp_res_format { 167 | MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0, 168 | MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1, 169 | MLX5_IB_CQE_RES_RESERVED = 1 << 2, 170 | }; 171 | 172 | struct mlx5_ib_cqe_comp_caps { 173 | __u32 max_num; 174 | __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ 175 | }; 176 | 177 | enum mlx5_ib_packet_pacing_cap_flags { 178 | MLX5_IB_PP_SUPPORT_BURST = 1 << 0, 179 | }; 180 | 181 | struct mlx5_packet_pacing_caps { 182 | __u32 qp_rate_limit_min; 183 | __u32 qp_rate_limit_max; /* In kpbs */ 184 | 185 | /* Corresponding bit will be set if qp type from 186 | * 'enum ib_qp_type' is supported, e.g. 187 | * supported_qpts |= 1 << IB_QPT_RAW_PACKET 188 | */ 189 | __u32 supported_qpts; 190 | __u8 cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */ 191 | __u8 reserved[3]; 192 | }; 193 | 194 | enum mlx5_ib_mpw_caps { 195 | MPW_RESERVED = 1 << 0, 196 | MLX5_IB_ALLOW_MPW = 1 << 1, 197 | MLX5_IB_SUPPORT_EMPW = 1 << 2, 198 | }; 199 | 200 | enum mlx5_ib_sw_parsing_offloads { 201 | MLX5_IB_SW_PARSING = 1 << 0, 202 | MLX5_IB_SW_PARSING_CSUM = 1 << 1, 203 | MLX5_IB_SW_PARSING_LSO = 1 << 2, 204 | }; 205 | 206 | struct mlx5_ib_sw_parsing_caps { 207 | __u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */ 208 | 209 | /* Corresponding bit will be set if qp type from 210 | * 'enum ib_qp_type' is supported, e.g. 211 | * supported_qpts |= 1 << IB_QPT_RAW_PACKET 212 | */ 213 | __u32 supported_qpts; 214 | }; 215 | 216 | struct mlx5_ib_striding_rq_caps { 217 | __u32 min_single_stride_log_num_of_bytes; 218 | __u32 max_single_stride_log_num_of_bytes; 219 | __u32 min_single_wqe_log_num_of_strides; 220 | __u32 max_single_wqe_log_num_of_strides; 221 | 222 | /* Corresponding bit will be set if qp type from 223 | * 'enum ib_qp_type' is supported, e.g. 224 | * supported_qpts |= 1 << IB_QPT_RAW_PACKET 225 | */ 226 | __u32 supported_qpts; 227 | __u32 reserved; 228 | }; 229 | 230 | enum mlx5_ib_query_dev_resp_flags { 231 | /* Support 128B CQE compression */ 232 | MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, 233 | MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, 234 | }; 235 | 236 | enum mlx5_ib_tunnel_offloads { 237 | MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0, 238 | MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1, 239 | MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2 240 | }; 241 | 242 | struct mlx5_ib_query_device_resp { 243 | __u32 comp_mask; 244 | __u32 response_length; 245 | struct mlx5_ib_tso_caps tso_caps; 246 | struct mlx5_ib_rss_caps rss_caps; 247 | struct mlx5_ib_cqe_comp_caps cqe_comp_caps; 248 | struct mlx5_packet_pacing_caps packet_pacing_caps; 249 | __u32 mlx5_ib_support_multi_pkt_send_wqes; 250 | __u32 flags; /* Use enum mlx5_ib_query_dev_resp_flags */ 251 | struct mlx5_ib_sw_parsing_caps sw_parsing_caps; 252 | struct mlx5_ib_striding_rq_caps striding_rq_caps; 253 | __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */ 254 | __u32 reserved; 255 | }; 256 | 257 | enum mlx5_ib_create_cq_flags { 258 | MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0, 259 | }; 260 | 261 | struct mlx5_ib_create_cq { 262 | __aligned_u64 buf_addr; 263 | __aligned_u64 db_addr; 264 | __u32 cqe_size; 265 | __u8 cqe_comp_en; 266 | __u8 cqe_comp_res_format; 267 | __u16 flags; 268 | }; 269 | 270 | struct mlx5_ib_create_cq_resp { 271 | __u32 cqn; 272 | __u32 reserved; 273 | }; 274 | 275 | struct mlx5_ib_resize_cq { 276 | __aligned_u64 buf_addr; 277 | __u16 cqe_size; 278 | __u16 reserved0; 279 | __u32 reserved1; 280 | }; 281 | 282 | struct mlx5_ib_create_srq { 283 | __aligned_u64 buf_addr; 284 | __aligned_u64 db_addr; 285 | __u32 flags; 286 | __u32 reserved0; /* explicit padding (optional on i386) */ 287 | __u32 uidx; 288 | __u32 reserved1; 289 | }; 290 | 291 | struct mlx5_ib_create_srq_resp { 292 | __u32 srqn; 293 | __u32 reserved; 294 | }; 295 | 296 | struct mlx5_ib_create_qp { 297 | __aligned_u64 buf_addr; 298 | __aligned_u64 db_addr; 299 | __u32 sq_wqe_count; 300 | __u32 rq_wqe_count; 301 | __u32 rq_wqe_shift; 302 | __u32 flags; 303 | __u32 uidx; 304 | __u32 bfreg_index; 305 | union { 306 | __aligned_u64 sq_buf_addr; 307 | __aligned_u64 access_key; 308 | }; 309 | }; 310 | 311 | /* RX Hash function flags */ 312 | enum mlx5_rx_hash_function_flags { 313 | MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0, 314 | }; 315 | 316 | /* 317 | * RX Hash flags, these flags allows to set which incoming packet's field should 318 | * participates in RX Hash. Each flag represent certain packet's field, 319 | * when the flag is set the field that is represented by the flag will 320 | * participate in RX Hash calculation. 321 | * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP 322 | * and *TCP and *UDP flags can't be enabled together on the same QP. 323 | */ 324 | enum mlx5_rx_hash_fields { 325 | MLX5_RX_HASH_SRC_IPV4 = 1 << 0, 326 | MLX5_RX_HASH_DST_IPV4 = 1 << 1, 327 | MLX5_RX_HASH_SRC_IPV6 = 1 << 2, 328 | MLX5_RX_HASH_DST_IPV6 = 1 << 3, 329 | MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4, 330 | MLX5_RX_HASH_DST_PORT_TCP = 1 << 5, 331 | MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6, 332 | MLX5_RX_HASH_DST_PORT_UDP = 1 << 7, 333 | MLX5_RX_HASH_IPSEC_SPI = 1 << 8, 334 | /* Save bits for future fields */ 335 | MLX5_RX_HASH_INNER = (1UL << 31), 336 | }; 337 | 338 | struct mlx5_ib_create_qp_rss { 339 | __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ 340 | __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ 341 | __u8 rx_key_len; /* valid only for Toeplitz */ 342 | __u8 reserved[6]; 343 | __u8 rx_hash_key[128]; /* valid only for Toeplitz */ 344 | __u32 comp_mask; 345 | __u32 flags; 346 | }; 347 | 348 | struct mlx5_ib_create_qp_resp { 349 | __u32 bfreg_index; 350 | __u32 reserved; 351 | }; 352 | 353 | struct mlx5_ib_alloc_mw { 354 | __u32 comp_mask; 355 | __u8 num_klms; 356 | __u8 reserved1; 357 | __u16 reserved2; 358 | }; 359 | 360 | enum mlx5_ib_create_wq_mask { 361 | MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0), 362 | }; 363 | 364 | struct mlx5_ib_create_wq { 365 | __aligned_u64 buf_addr; 366 | __aligned_u64 db_addr; 367 | __u32 rq_wqe_count; 368 | __u32 rq_wqe_shift; 369 | __u32 user_index; 370 | __u32 flags; 371 | __u32 comp_mask; 372 | __u32 single_stride_log_num_of_bytes; 373 | __u32 single_wqe_log_num_of_strides; 374 | __u32 two_byte_shift_en; 375 | }; 376 | 377 | struct mlx5_ib_create_ah_resp { 378 | __u32 response_length; 379 | __u8 dmac[ETH_ALEN]; 380 | __u8 reserved[6]; 381 | }; 382 | 383 | struct mlx5_ib_burst_info { 384 | __u32 max_burst_sz; 385 | __u16 typical_pkt_sz; 386 | __u16 reserved; 387 | }; 388 | 389 | struct mlx5_ib_modify_qp { 390 | __u32 comp_mask; 391 | struct mlx5_ib_burst_info burst_info; 392 | __u32 reserved; 393 | }; 394 | 395 | struct mlx5_ib_modify_qp_resp { 396 | __u32 response_length; 397 | __u32 dctn; 398 | }; 399 | 400 | struct mlx5_ib_create_wq_resp { 401 | __u32 response_length; 402 | __u32 reserved; 403 | }; 404 | 405 | struct mlx5_ib_create_rwq_ind_tbl_resp { 406 | __u32 response_length; 407 | __u32 reserved; 408 | }; 409 | 410 | struct mlx5_ib_modify_wq { 411 | __u32 comp_mask; 412 | __u32 reserved; 413 | }; 414 | 415 | struct mlx5_ib_clock_info { 416 | __u32 sign; 417 | __u32 resv; 418 | __aligned_u64 nsec; 419 | __aligned_u64 cycles; 420 | __aligned_u64 frac; 421 | __u32 mult; 422 | __u32 shift; 423 | __aligned_u64 mask; 424 | __aligned_u64 overflow_period; 425 | }; 426 | 427 | enum mlx5_ib_mmap_cmd { 428 | MLX5_IB_MMAP_REGULAR_PAGE = 0, 429 | MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, 430 | MLX5_IB_MMAP_WC_PAGE = 2, 431 | MLX5_IB_MMAP_NC_PAGE = 3, 432 | /* 5 is chosen in order to be compatible with old versions of libmlx5 */ 433 | MLX5_IB_MMAP_CORE_CLOCK = 5, 434 | MLX5_IB_MMAP_ALLOC_WC = 6, 435 | MLX5_IB_MMAP_CLOCK_INFO = 7, 436 | MLX5_IB_MMAP_DEVICE_MEM = 8, 437 | }; 438 | 439 | enum { 440 | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1, 441 | }; 442 | 443 | /* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */ 444 | enum { 445 | MLX5_IB_CLOCK_INFO_V1 = 0, 446 | }; 447 | #endif /* MLX5_ABI_USER_H */ 448 | -------------------------------------------------------------------------------- /kernel-headers/uapi/rdma/mlx5_user_ioctl_cmds.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. 3 | * 4 | * This software is available to you under a choice of one of two 5 | * licenses. You may choose to be licensed under the terms of the GNU 6 | * General Public License (GPL) Version 2, available from the file 7 | * COPYING in the main directory of this source tree, or the 8 | * OpenIB.org BSD license below: 9 | * 10 | * Redistribution and use in source and binary forms, with or 11 | * without modification, are permitted provided that the following 12 | * conditions are met: 13 | * 14 | * - Redistributions of source code must retain the above 15 | * copyright notice, this list of conditions and the following 16 | * disclaimer. 17 | * 18 | * - Redistributions in binary form must reproduce the above 19 | * copyright notice, this list of conditions and the following 20 | * disclaimer in the documentation and/or other materials 21 | * provided with the distribution. 22 | * 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 | * SOFTWARE. 31 | */ 32 | 33 | #ifndef MLX5_USER_IOCTL_CMDS_H 34 | #define MLX5_USER_IOCTL_CMDS_H 35 | 36 | #include 37 | #include 38 | 39 | enum mlx5_ib_create_flow_action_attrs { 40 | /* This attribute belong to the driver namespace */ 41 | MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT), 42 | }; 43 | 44 | enum mlx5_ib_alloc_dm_attrs { 45 | MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT), 46 | MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 47 | }; 48 | 49 | enum mlx5_ib_devx_methods { 50 | MLX5_IB_METHOD_DEVX_OTHER = (1U << UVERBS_ID_NS_SHIFT), 51 | MLX5_IB_METHOD_DEVX_QUERY_UAR, 52 | MLX5_IB_METHOD_DEVX_QUERY_EQN, 53 | }; 54 | 55 | enum mlx5_ib_devx_other_attrs { 56 | MLX5_IB_ATTR_DEVX_OTHER_CMD_IN = (1U << UVERBS_ID_NS_SHIFT), 57 | MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, 58 | }; 59 | 60 | enum mlx5_ib_devx_obj_create_attrs { 61 | MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 62 | MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN, 63 | MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, 64 | }; 65 | 66 | enum mlx5_ib_devx_query_uar_attrs { 67 | MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX = (1U << UVERBS_ID_NS_SHIFT), 68 | MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX, 69 | }; 70 | 71 | enum mlx5_ib_devx_obj_destroy_attrs { 72 | MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 73 | }; 74 | 75 | enum mlx5_ib_devx_obj_modify_attrs { 76 | MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 77 | MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, 78 | MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, 79 | }; 80 | 81 | enum mlx5_ib_devx_obj_query_attrs { 82 | MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 83 | MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN, 84 | MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, 85 | }; 86 | 87 | enum mlx5_ib_devx_query_eqn_attrs { 88 | MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC = (1U << UVERBS_ID_NS_SHIFT), 89 | MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, 90 | }; 91 | 92 | enum mlx5_ib_devx_obj_methods { 93 | MLX5_IB_METHOD_DEVX_OBJ_CREATE = (1U << UVERBS_ID_NS_SHIFT), 94 | MLX5_IB_METHOD_DEVX_OBJ_DESTROY, 95 | MLX5_IB_METHOD_DEVX_OBJ_MODIFY, 96 | MLX5_IB_METHOD_DEVX_OBJ_QUERY, 97 | }; 98 | 99 | enum mlx5_ib_devx_umem_reg_attrs { 100 | MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 101 | MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR, 102 | MLX5_IB_ATTR_DEVX_UMEM_REG_LEN, 103 | MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, 104 | MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, 105 | }; 106 | 107 | enum mlx5_ib_devx_umem_dereg_attrs { 108 | MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 109 | }; 110 | 111 | enum mlx5_ib_devx_umem_methods { 112 | MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT), 113 | MLX5_IB_METHOD_DEVX_UMEM_DEREG, 114 | }; 115 | 116 | enum mlx5_ib_objects { 117 | MLX5_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT), 118 | MLX5_IB_OBJECT_DEVX_OBJ, 119 | MLX5_IB_OBJECT_DEVX_UMEM, 120 | MLX5_IB_OBJECT_FLOW_MATCHER, 121 | }; 122 | 123 | enum mlx5_ib_flow_matcher_create_attrs { 124 | MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 125 | MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK, 126 | MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE, 127 | MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA, 128 | }; 129 | 130 | enum mlx5_ib_flow_matcher_destroy_attrs { 131 | MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 132 | }; 133 | 134 | enum mlx5_ib_flow_matcher_methods { 135 | MLX5_IB_METHOD_FLOW_MATCHER_CREATE = (1U << UVERBS_ID_NS_SHIFT), 136 | MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, 137 | }; 138 | 139 | #define MLX5_IB_DW_MATCH_PARAM 0x80 140 | 141 | struct mlx5_ib_match_params { 142 | __u32 match_params[MLX5_IB_DW_MATCH_PARAM]; 143 | }; 144 | 145 | enum mlx5_ib_flow_type { 146 | MLX5_IB_FLOW_TYPE_NORMAL, 147 | MLX5_IB_FLOW_TYPE_SNIFFER, 148 | MLX5_IB_FLOW_TYPE_ALL_DEFAULT, 149 | MLX5_IB_FLOW_TYPE_MC_DEFAULT, 150 | }; 151 | 152 | enum mlx5_ib_create_flow_attrs { 153 | MLX5_IB_ATTR_CREATE_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 154 | MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE, 155 | MLX5_IB_ATTR_CREATE_FLOW_DEST_QP, 156 | MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX, 157 | MLX5_IB_ATTR_CREATE_FLOW_MATCHER, 158 | MLX5_IB_ATTR_CREATE_FLOW_DEST_VPORT, 159 | }; 160 | 161 | enum mlx5_ib_destoy_flow_attrs { 162 | MLX5_IB_ATTR_DESTROY_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 163 | }; 164 | 165 | enum mlx5_ib_flow_methods { 166 | MLX5_IB_METHOD_CREATE_FLOW = (1U << UVERBS_ID_NS_SHIFT), 167 | MLX5_IB_METHOD_DESTROY_FLOW, 168 | }; 169 | 170 | #endif 171 | -------------------------------------------------------------------------------- /kernel-headers/uapi/rdma/rdma_user_ioctl_cmds.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. 3 | * 4 | * This software is available to you under a choice of one of two 5 | * licenses. You may choose to be licensed under the terms of the GNU 6 | * General Public License (GPL) Version 2, available from the file 7 | * COPYING in the main directory of this source tree, or the 8 | * OpenIB.org BSD license below: 9 | * 10 | * Redistribution and use in source and binary forms, with or 11 | * without modification, are permitted provided that the following 12 | * conditions are met: 13 | * 14 | * - Redistributions of source code must retain the above 15 | * copyright notice, this list of conditions and the following 16 | * disclaimer. 17 | * 18 | * - Redistributions in binary form must reproduce the above 19 | * copyright notice, this list of conditions and the following 20 | * disclaimer in the documentation and/or other materials 21 | * provided with the distribution. 22 | * 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 | * SOFTWARE. 31 | */ 32 | 33 | #ifndef RDMA_USER_IOCTL_CMDS_H 34 | #define RDMA_USER_IOCTL_CMDS_H 35 | 36 | #include 37 | #include 38 | 39 | /* Documentation/ioctl/ioctl-number.txt */ 40 | #define RDMA_IOCTL_MAGIC 0x1b 41 | #define RDMA_VERBS_IOCTL \ 42 | _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr) 43 | 44 | enum { 45 | /* User input */ 46 | UVERBS_ATTR_F_MANDATORY = 1U << 0, 47 | /* 48 | * Valid output bit should be ignored and considered set in 49 | * mandatory fields. This bit is kernel output. 50 | */ 51 | UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1, 52 | }; 53 | 54 | struct ib_uverbs_attr { 55 | __u16 attr_id; /* command specific type attribute */ 56 | __u16 len; /* only for pointers */ 57 | __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ 58 | union { 59 | struct { 60 | __u8 elem_id; 61 | __u8 reserved; 62 | } enum_data; 63 | __u16 reserved; 64 | } attr_data; 65 | __aligned_u64 data; /* ptr to command, inline data or idr/fd */ 66 | }; 67 | 68 | struct ib_uverbs_ioctl_hdr { 69 | __u16 length; 70 | __u16 object_id; 71 | __u16 method_id; 72 | __u16 num_attrs; 73 | __aligned_u64 reserved1; 74 | __u32 driver_id; 75 | __u32 reserved2; 76 | struct ib_uverbs_attr attrs[0]; 77 | }; 78 | 79 | enum rdma_driver_id { 80 | RDMA_DRIVER_UNKNOWN, 81 | RDMA_DRIVER_MLX5, 82 | RDMA_DRIVER_MLX4, 83 | RDMA_DRIVER_CXGB3, 84 | RDMA_DRIVER_CXGB4, 85 | RDMA_DRIVER_MTHCA, 86 | RDMA_DRIVER_BNXT_RE, 87 | RDMA_DRIVER_OCRDMA, 88 | RDMA_DRIVER_NES, 89 | RDMA_DRIVER_I40IW, 90 | RDMA_DRIVER_VMW_PVRDMA, 91 | RDMA_DRIVER_QEDR, 92 | RDMA_DRIVER_HNS, 93 | RDMA_DRIVER_USNIC, 94 | RDMA_DRIVER_RXE, 95 | RDMA_DRIVER_HFI1, 96 | RDMA_DRIVER_QIB, 97 | }; 98 | 99 | #endif 100 | -------------------------------------------------------------------------------- /src/devx.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #include 8 | #include 9 | #include "devx_ioctl.h" 10 | #include 11 | #include "devx.h" 12 | #include "devx_priv.h" 13 | #include "devx_prm.h" 14 | 15 | int devx_cmd(void *ctx, 16 | void *in, size_t inlen, 17 | void *out, size_t outlen) 18 | { 19 | DECLARE_COMMAND_BUFFER(cmd, 20 | MLX5_IB_OBJECT_DEVX, 21 | MLX5_IB_METHOD_DEVX_OTHER, 22 | 2); 23 | 24 | fill_attr_in(cmd, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN, in, inlen); 25 | fill_attr_in(cmd, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, out, outlen); 26 | return execute_ioctl(((struct devx_context *)ctx)->cmd_fd, cmd); 27 | } 28 | 29 | int devx_query_eqn(void *ctx, uint32_t vector, uint32_t *eqn) 30 | { 31 | DECLARE_COMMAND_BUFFER(cmd, 32 | MLX5_IB_OBJECT_DEVX, 33 | MLX5_IB_METHOD_DEVX_QUERY_EQN, 34 | 2); 35 | 36 | fill_attr_in_uint32(cmd, MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC, vector); 37 | fill_attr_out(cmd, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, eqn, sizeof(*eqn)); 38 | return execute_ioctl(((struct devx_context *)ctx)->cmd_fd, cmd); 39 | } 40 | 41 | struct devx_obj_handle *devx_obj_create(void *ctx, 42 | void *in, size_t inlen, 43 | void *out, size_t outlen) 44 | { 45 | DECLARE_COMMAND_BUFFER(cmd, 46 | MLX5_IB_OBJECT_DEVX_OBJ, 47 | MLX5_IB_METHOD_DEVX_OBJ_CREATE, 48 | 3); 49 | struct ib_uverbs_attr *handle; 50 | struct devx_obj_handle *obj; 51 | int ret = ENOMEM; 52 | 53 | obj = (struct devx_obj_handle *)malloc(sizeof(*obj)); 54 | if (!obj) 55 | goto err; 56 | obj->ctx = ctx; 57 | 58 | handle = fill_attr_out_obj(cmd, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); 59 | fill_attr_in(cmd, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN, in, inlen); 60 | fill_attr_in(cmd, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, out, outlen); 61 | 62 | ret = execute_ioctl(obj->ctx->cmd_fd, cmd); 63 | if (ret) 64 | goto err; 65 | obj->handle = handle->data; 66 | 67 | return obj; 68 | err: 69 | free(obj); 70 | errno = ret; 71 | return NULL; 72 | } 73 | 74 | int devx_obj_query(struct devx_obj_handle *obj, 75 | void *in, size_t inlen, 76 | void *out, size_t outlen) 77 | { 78 | DECLARE_COMMAND_BUFFER(cmd, 79 | MLX5_IB_OBJECT_DEVX_OBJ, 80 | MLX5_IB_METHOD_DEVX_OBJ_QUERY, 81 | 3); 82 | 83 | fill_attr_in_obj(cmd, MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE, obj->handle); 84 | fill_attr_in(cmd, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN, in, inlen); 85 | fill_attr_in(cmd, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, out, outlen); 86 | 87 | return execute_ioctl(obj->ctx->cmd_fd, cmd); 88 | } 89 | 90 | int devx_obj_modify(struct devx_obj_handle *obj, 91 | void *in, size_t inlen, 92 | void *out, size_t outlen) 93 | { 94 | DECLARE_COMMAND_BUFFER(cmd, 95 | MLX5_IB_OBJECT_DEVX_OBJ, 96 | MLX5_IB_METHOD_DEVX_OBJ_MODIFY, 97 | 3); 98 | 99 | fill_attr_in_obj(cmd, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE, obj->handle); 100 | fill_attr_in(cmd, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, in, inlen); 101 | fill_attr_in(cmd, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, out, outlen); 102 | 103 | return execute_ioctl(obj->ctx->cmd_fd, cmd); 104 | } 105 | 106 | int devx_obj_destroy(struct devx_obj_handle *obj) 107 | { 108 | DECLARE_COMMAND_BUFFER(cmd, 109 | MLX5_IB_OBJECT_DEVX_OBJ, 110 | MLX5_IB_METHOD_DEVX_OBJ_DESTROY, 111 | 1); 112 | int ret; 113 | 114 | fill_attr_in_obj(cmd, MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE, obj->handle); 115 | ret = execute_ioctl(obj->ctx->cmd_fd, cmd); 116 | 117 | if (ret) 118 | return ret; 119 | free(obj); 120 | return 0; 121 | } 122 | 123 | struct devx_obj_handle *devx_umem_reg(void *ctx, 124 | void *addr, size_t size, 125 | int access, 126 | uint32_t *id) 127 | { 128 | DECLARE_COMMAND_BUFFER(cmd, 129 | MLX5_IB_OBJECT_DEVX_UMEM, 130 | MLX5_IB_METHOD_DEVX_UMEM_REG, 131 | 5); 132 | struct ib_uverbs_attr *handle; 133 | struct devx_obj_handle *obj; 134 | int ret = ENOMEM; 135 | 136 | obj = (struct devx_obj_handle *)malloc(sizeof(*obj)); 137 | if (!obj) 138 | goto err; 139 | obj->ctx = ctx; 140 | 141 | handle = fill_attr_out_obj(cmd, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE); 142 | fill_attr_in_uint64(cmd, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR, (intptr_t)addr); 143 | fill_attr_in_uint64(cmd, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN, size); 144 | fill_attr_in_uint32(cmd, MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, access); 145 | fill_attr_out(cmd, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, id, sizeof(*id)); 146 | 147 | ret = execute_ioctl(obj->ctx->cmd_fd, cmd); 148 | if (ret) 149 | goto err; 150 | obj->handle = handle->data; 151 | 152 | return obj; 153 | err: 154 | free(obj); 155 | errno = ret; 156 | return NULL; 157 | } 158 | 159 | int devx_umem_dereg(struct devx_obj_handle *obj) 160 | { 161 | DECLARE_COMMAND_BUFFER(cmd, 162 | MLX5_IB_OBJECT_DEVX_UMEM, 163 | MLX5_IB_METHOD_DEVX_UMEM_DEREG, 164 | 1); 165 | int ret; 166 | 167 | fill_attr_in_obj(cmd, MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE, obj->handle); 168 | ret = execute_ioctl(obj->ctx->cmd_fd, cmd); 169 | if (ret) 170 | return ret; 171 | free(obj); 172 | return 0; 173 | } 174 | 175 | struct devx_fs_rule_handle { 176 | struct devx_obj_handle flow; 177 | uint32_t matcher_handle; 178 | }; 179 | 180 | static int __matcher_create(struct devx_fs_rule_handle *obj, void* in) 181 | { 182 | DECLARE_COMMAND_BUFFER(cmd, 183 | MLX5_IB_OBJECT_FLOW_MATCHER, 184 | MLX5_IB_METHOD_FLOW_MATCHER_CREATE, 185 | 6); 186 | struct ib_uverbs_attr *handle; 187 | uint16_t prio; 188 | int ret; 189 | 190 | prio = DEVX_GET(fs_rule_add_in, in, prio); 191 | 192 | handle = fill_attr_out_obj(cmd, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE); 193 | fill_attr_in(cmd, 194 | MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK, 195 | DEVX_ADDR_OF(fs_rule_add_in, in, flow_spec.match_criteria), 196 | DEVX_FLD_SZ_BYTES(fs_rule_add_in, flow_spec.match_criteria)); 197 | fill_attr_in_enum(cmd, 198 | MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE, 199 | MLX5_IB_FLOW_TYPE_NORMAL, 200 | &prio, sizeof(prio)); 201 | fill_attr_in(cmd, 202 | MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA, 203 | DEVX_ADDR_OF(fs_rule_add_in, in, flow_spec.match_criteria_enable), 204 | DEVX_FLD_SZ_BYTES(fs_rule_add_in, flow_spec.match_criteria_enable)); 205 | 206 | ret = execute_ioctl(obj->flow.ctx->cmd_fd, cmd); 207 | if (ret) 208 | return ret; 209 | obj->matcher_handle = handle->data; 210 | return 0; 211 | } 212 | 213 | static int __matcher_destroy(struct devx_fs_rule_handle *obj) 214 | { 215 | DECLARE_COMMAND_BUFFER(cmd, 216 | MLX5_IB_OBJECT_FLOW_MATCHER, 217 | MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, 218 | 1); 219 | fill_attr_in_obj(cmd, 220 | MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE, 221 | obj->matcher_handle); 222 | return execute_ioctl(obj->flow.ctx->cmd_fd, cmd); 223 | } 224 | 225 | struct devx_obj_handle *devx_fs_rule_add(void *ctx, void *in, 226 | struct devx_obj_handle *dest, 227 | uint32_t vport) 228 | { 229 | DECLARE_COMMAND_BUFFER(cmd, 230 | UVERBS_OBJECT_FLOW, 231 | MLX5_IB_METHOD_CREATE_FLOW, 232 | 4); 233 | struct devx_fs_rule_handle *obj; 234 | struct ib_uverbs_attr *handle; 235 | int ret = ENOMEM; 236 | 237 | obj = (struct devx_fs_rule_handle *)malloc(sizeof(*obj)); 238 | if (!obj) 239 | goto err; 240 | obj->flow.ctx = ctx; 241 | 242 | ret = __matcher_create(obj, in); 243 | if (ret) 244 | goto err; 245 | 246 | handle = fill_attr_out_obj(cmd, MLX5_IB_ATTR_CREATE_FLOW_HANDLE); 247 | fill_attr_in(cmd, 248 | MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE, 249 | DEVX_ADDR_OF(fs_rule_add_in, in, flow_spec.match_value), 250 | DEVX_FLD_SZ_BYTES(fs_rule_add_in, flow_spec.match_value)); 251 | fill_attr_in_obj(cmd, 252 | MLX5_IB_ATTR_CREATE_FLOW_MATCHER, 253 | obj->matcher_handle); 254 | if (dest) { 255 | fill_attr_in_obj(cmd, 256 | MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX, 257 | dest->handle); 258 | } else { 259 | fill_attr_in(cmd, MLX5_IB_ATTR_CREATE_FLOW_DEST_VPORT, 260 | &vport, sizeof(vport)); 261 | } 262 | 263 | ret = execute_ioctl(obj->flow.ctx->cmd_fd, cmd); 264 | if (ret) 265 | goto err_cmd; 266 | obj->flow.handle = handle->data; 267 | 268 | return &obj->flow; 269 | 270 | err_cmd: 271 | __matcher_destroy(obj); 272 | err: 273 | free(obj); 274 | errno = ret; 275 | return NULL; 276 | } 277 | 278 | int devx_fs_rule_del(struct devx_obj_handle *fobj) 279 | { 280 | DECLARE_COMMAND_BUFFER(cmd, 281 | UVERBS_OBJECT_FLOW, 282 | MLX5_IB_METHOD_DESTROY_FLOW, 283 | 1); 284 | struct devx_fs_rule_handle *obj = (void *)fobj; 285 | int ret; 286 | 287 | fill_attr_in_obj(cmd, MLX5_IB_ATTR_DESTROY_FLOW_HANDLE, obj->flow.handle); 288 | ret = execute_ioctl(obj->flow.ctx->cmd_fd, cmd); 289 | if (ret) 290 | return ret; 291 | 292 | ret =__matcher_destroy(obj); 293 | if (ret) 294 | return ret; 295 | 296 | free(obj); 297 | return 0; 298 | } 299 | 300 | -------------------------------------------------------------------------------- /src/devx.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #ifndef __DEVX_H__ 8 | #define __DEVX_H__ 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | #ifdef __cplusplus 15 | extern "C" { 16 | #endif 17 | 18 | struct devx_device { 19 | char name[NAME_MAX]; 20 | char dev_name[NAME_MAX]; 21 | char dev_path[PATH_MAX]; 22 | char ibdev_path[PATH_MAX]; 23 | }; 24 | 25 | struct devx_device **devx_get_device_list(int *num_devices); 26 | void devx_free_device_list(struct devx_device **list); 27 | void *devx_open_device(struct devx_device *device); 28 | int devx_close_device(void *context); 29 | 30 | int devx_cmd(void *ctx, 31 | void *in, size_t inlen, 32 | void *out, size_t outlen); 33 | 34 | int devx_alloc_uar(void *ctx, uint32_t *idx, void **addr, off_t *off); 35 | void devx_free_uar(void *ctx, void* addr); 36 | 37 | int devx_query_eqn(void *ctx, uint32_t vector, uint32_t *eqn); 38 | 39 | struct devx_obj_handle; 40 | 41 | struct devx_obj_handle *devx_obj_create(void *ctx, 42 | void *in, size_t inlen, 43 | void *out, size_t outlen); 44 | int devx_obj_query(struct devx_obj_handle *obj, 45 | void *in, size_t inlen, 46 | void *out, size_t outlen); 47 | int devx_obj_modify(struct devx_obj_handle *obj, 48 | void *in, size_t inlen, 49 | void *out, size_t outlen); 50 | int devx_obj_destroy(struct devx_obj_handle *obj); 51 | 52 | struct devx_obj_handle *devx_umem_reg(void *ctx, 53 | void *addr, size_t size, 54 | int access, 55 | uint32_t *id); 56 | int devx_umem_dereg(struct devx_obj_handle *obj); 57 | 58 | struct devx_obj_handle *devx_fs_rule_add(void *ctx, void *in, 59 | struct devx_obj_handle *dest, 60 | uint32_t vport); 61 | int devx_fs_rule_del(struct devx_obj_handle *obj); 62 | 63 | void *devx_alloc_dbrec(void *ctx, uint32_t *mem_id, size_t *off); 64 | void devx_free_dbrec(void *ctx, void *db); 65 | 66 | int devx_query_gid(void *ctx, uint8_t port_num, 67 | int index, uint8_t *gid); 68 | 69 | #ifdef __cplusplus 70 | } 71 | #endif 72 | 73 | #endif 74 | -------------------------------------------------------------------------------- /src/devx_dbrec.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #define _GNU_SOURCE 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "devx.h" 14 | #include "devx_priv.h" 15 | 16 | struct devx_db_page { 17 | struct devx_db_page *prev, *next; 18 | uint8_t *buf; 19 | int num_db; 20 | int use_cnt; 21 | struct devx_obj_handle *mem; 22 | uint32_t mem_id; 23 | unsigned long free[0]; 24 | }; 25 | 26 | static struct devx_db_page *__add_page(void *ctx) 27 | { 28 | struct devx_context *context = (struct devx_context *)ctx; 29 | uintptr_t ps = context->page_size; 30 | struct devx_db_page *page; 31 | int pp; 32 | int i; 33 | int nlong; 34 | int ret; 35 | 36 | pp = ps / context->cache_line_size; 37 | nlong = (pp + 8 * sizeof(long) - 1) / (8 * sizeof(long)); 38 | 39 | page = malloc(sizeof *page + nlong * sizeof(long)); 40 | if (!page) 41 | return NULL; 42 | 43 | ret = posix_memalign((void **)&page->buf, ps, ps); 44 | if (ret) { 45 | free(page); 46 | return NULL; 47 | } 48 | 49 | page->num_db = pp; 50 | page->use_cnt = 0; 51 | for (i = 0; i < nlong; ++i) 52 | page->free[i] = ~0; 53 | 54 | page->mem = devx_umem_reg(ctx, page->buf, ps, 7, &page->mem_id); 55 | 56 | page->prev = NULL; 57 | page->next = context->db_list; 58 | context->db_list = page; 59 | if (page->next) 60 | page->next->prev = page; 61 | 62 | return page; 63 | } 64 | 65 | void *devx_alloc_dbrec(void *ctx, uint32_t *mem_id, size_t *off) 66 | { 67 | struct devx_context *context = (struct devx_context *)ctx; 68 | struct devx_db_page *page; 69 | void *db = NULL; 70 | int i, j; 71 | 72 | for (page = context->db_list; page; page = page->next) 73 | if (page->use_cnt < page->num_db) 74 | goto found; 75 | 76 | page = __add_page(ctx); 77 | if (!page) 78 | goto out; 79 | 80 | found: 81 | ++page->use_cnt; 82 | 83 | for (i = 0; !page->free[i]; ++i) 84 | /* nothing */; 85 | 86 | j = ffsl(page->free[i]); 87 | --j; 88 | page->free[i] &= ~(1UL << j); 89 | 90 | *mem_id = page->mem_id; 91 | *off = (i * 8 * sizeof(long) + j) * context->cache_line_size; 92 | db = page->buf + *off; 93 | out: 94 | return db; 95 | } 96 | 97 | void devx_free_dbrec(void *ctx, void *db) 98 | { 99 | struct devx_context *context = (struct devx_context *)ctx; 100 | uintptr_t ps = context->page_size; 101 | struct devx_db_page *page; 102 | int i; 103 | 104 | for (page = context->db_list; page; page = page->next) 105 | if (((uintptr_t) db & ~(ps - 1)) == (uintptr_t) page->buf) 106 | break; 107 | 108 | if (!page) 109 | return; 110 | 111 | i = ((uint8_t *)db - page->buf) / context->cache_line_size; 112 | page->free[i / (8 * sizeof(long))] |= 1UL << (i % (8 * sizeof(long))); 113 | 114 | if (!--page->use_cnt) { 115 | if (page->prev) 116 | page->prev->next = page->next; 117 | else 118 | context->db_list = page->next; 119 | if (page->next) 120 | page->next->prev = page->prev; 121 | 122 | devx_umem_dereg(page->mem); 123 | free(page->buf); 124 | free(page); 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/devx_dpdk.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #include 8 | #include 9 | #include "devx_dpdk.h" 10 | 11 | int devx_device_to_pci_addr(const struct devx_device *device, 12 | struct rte_pci_addr *pci_addr) 13 | { 14 | FILE *file; 15 | char line[32]; 16 | char path[PATH_MAX]; 17 | 18 | snprintf(path, sizeof(path), "%s/device/uevent", device->ibdev_path); 19 | 20 | file = fopen(path, "rb"); 21 | if (file == NULL) 22 | return -1; 23 | while (fgets(line, sizeof(line), file) == line) { 24 | size_t len = strlen(line); 25 | int ret; 26 | 27 | /* Truncate long lines. */ 28 | if (len == (sizeof(line) - 1)) 29 | while (line[(len - 1)] != '\n') { 30 | ret = fgetc(file); 31 | if (ret == EOF) 32 | break; 33 | line[(len - 1)] = ret; 34 | } 35 | /* Extract information. */ 36 | if (sscanf(line, 37 | "PCI_SLOT_NAME=" 38 | "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", 39 | &pci_addr->domain, 40 | &pci_addr->bus, 41 | &pci_addr->devid, 42 | &pci_addr->function) == 4) { 43 | ret = 0; 44 | break; 45 | } 46 | } 47 | fclose(file); 48 | return 0; 49 | } 50 | 51 | -------------------------------------------------------------------------------- /src/devx_dpdk.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #ifndef __DEVX_DPDK_H__ 8 | #define __DEVX_DPDK_H__ 9 | 10 | #include "devx.h" 11 | #include 12 | 13 | #ifdef __cplusplus 14 | extern "C" { 15 | #endif 16 | 17 | int devx_device_to_pci_addr(const struct devx_device *device, 18 | struct rte_pci_addr *pci_addr); 19 | 20 | #ifdef __cplusplus 21 | } 22 | #endif 23 | 24 | #endif 25 | -------------------------------------------------------------------------------- /src/devx_init.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #define _GNU_SOURCE 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include "devx.h" 22 | #include "devx_priv.h" 23 | #include 24 | #include 25 | 26 | static int read_file(const char *dir, const char *file, 27 | char *buf, size_t size) 28 | { 29 | char *path; 30 | int fd; 31 | size_t len; 32 | 33 | if (asprintf(&path, "%s/%s", dir, file) < 0) 34 | return -1; 35 | 36 | fd = open(path, O_RDONLY | O_CLOEXEC); 37 | if (fd < 0) { 38 | free(path); 39 | return -1; 40 | } 41 | 42 | len = read(fd, buf, size); 43 | 44 | close(fd); 45 | free(path); 46 | 47 | if (len > 0) { 48 | if (buf[len - 1] == '\n') 49 | buf[--len] = '\0'; 50 | else if (len < size) 51 | buf[len] = '\0'; 52 | else 53 | return -1; 54 | } 55 | 56 | return len; 57 | } 58 | 59 | #define __snprintf(buf, len, fmt, ...) ({ \ 60 | int __rc = snprintf(buf, len, fmt, ##__VA_ARGS__); \ 61 | (size_t)__rc < len && __rc >= 0; }) 62 | 63 | #define MLX_VENDOR_ID 0x15b3 64 | 65 | static uint16_t hca_devices[] = { 66 | 0x1011, /* MT4113 Connect-IB */ 67 | 0x1012, /* Connect-IB Virtual Function */ 68 | 0x1013, /* ConnectX-4 */ 69 | 0x1014, /* ConnectX-4 Virtual Function */ 70 | 0x1015, /* ConnectX-4LX */ 71 | 0x1016, /* ConnectX-4LX Virtual Function */ 72 | 0x1017, /* ConnectX-5, PCIe 3.0 */ 73 | 0x1018, /* ConnectX-5 Virtual Function */ 74 | 0x1019, /* ConnectX-5 Ex */ 75 | 0x101a, /* ConnectX-5 Ex VF */ 76 | 0x101b, /* ConnectX-6 */ 77 | 0x101c, /* ConnectX-6 VF */ 78 | 0xa2d2, /* BlueField integrated ConnectX-5 network controller */ 79 | 0xa2d3, /* BlueField integrated ConnectX-5 network controller VF */ 80 | }; 81 | 82 | static const char *sysfs = "/sys"; 83 | 84 | static int match(char *name_ma) { 85 | char pci_ma[100]; 86 | size_t i; 87 | 88 | for (i = 0; i < sizeof(hca_devices) / sizeof(uint16_t); i++) { 89 | snprintf(pci_ma, sizeof(pci_ma), "pci:v%08Xd%08Xsv*", 90 | MLX_VENDOR_ID, hca_devices[i]); 91 | if (fnmatch(pci_ma, name_ma, 0) == 0) 92 | return 1; 93 | } 94 | 95 | return 0; 96 | } 97 | 98 | struct devx_device **devx_get_device_list(int *num) { 99 | DIR *class_dir; 100 | struct dirent *dent; 101 | char class_path[PATH_MAX]; 102 | char sysfs_path[PATH_MAX]; 103 | char ibdev_path[PATH_MAX]; 104 | char sysfs_name[NAME_MAX]; 105 | char ibdev_name[NAME_MAX]; 106 | char modalias[512]; 107 | struct devx_device **res = NULL; 108 | int curr = 0; 109 | size_t size = 0; 110 | 111 | if (!__snprintf(class_path, sizeof(class_path), 112 | "%s/class/infiniband_verbs", sysfs)) { 113 | errno = ENOMEM; 114 | return NULL; 115 | } 116 | 117 | class_dir = opendir(class_path); 118 | if (!class_dir) { 119 | errno = ENOSYS; 120 | return NULL; 121 | } 122 | 123 | while ((dent = readdir(class_dir))) { 124 | struct stat buf; 125 | 126 | if (dent->d_name[0] == '.') 127 | continue; 128 | 129 | if (!__snprintf(sysfs_path, sizeof(sysfs_path), 130 | "%s/%s", class_path, dent->d_name)) 131 | continue; 132 | 133 | if (stat(sysfs_path, &buf)) { 134 | fprintf(stderr, "Warning: couldn't stat '%s'.\n", 135 | sysfs_path); 136 | continue; 137 | } 138 | 139 | if (!S_ISDIR(buf.st_mode)) 140 | continue; 141 | 142 | if (!__snprintf(sysfs_name, sizeof(sysfs_name), 143 | "%s", dent->d_name)) 144 | continue; 145 | 146 | if (read_file(sysfs_path, "ibdev", ibdev_name, 147 | sizeof(ibdev_name)) < 0) { 148 | fprintf(stderr, "Warning: no ibdev class attr for '%s'.\n", 149 | dent->d_name); 150 | continue; 151 | } 152 | 153 | if (!__snprintf(ibdev_path, sizeof(ibdev_path), 154 | "%s/class/infiniband/%s", sysfs, ibdev_name)) 155 | continue; 156 | 157 | if (stat(ibdev_path, &buf)) { 158 | fprintf(stderr, "Warning: couldn't stat '%s'.\n", 159 | ibdev_path); 160 | continue; 161 | } 162 | 163 | if (read_file(sysfs_path, "device/modalias", modalias, 164 | sizeof(modalias)) <= 0) { 165 | fprintf(stderr, "Warning: no modalias for '%s'.\n", 166 | dent->d_name); 167 | continue; 168 | } 169 | 170 | if (!match(modalias)) 171 | continue; 172 | 173 | if (size < (curr + 1) * sizeof(struct devx_device*)) { 174 | void *old = res; 175 | size += sizeof(struct devx_device*) * 8; 176 | res = realloc(res, size); 177 | if (!res) { 178 | res = old; 179 | goto err; 180 | } 181 | } 182 | 183 | res[curr] = calloc(1, sizeof(struct devx_device)); 184 | if (!res[curr]) 185 | goto err; 186 | 187 | strcpy(res[curr]->dev_name, sysfs_name); 188 | strcpy(res[curr]->dev_path, sysfs_path); 189 | strcpy(res[curr]->name, ibdev_name); 190 | strcpy(res[curr]->ibdev_path, ibdev_path); 191 | 192 | curr++; 193 | } 194 | 195 | closedir(class_dir); 196 | 197 | if (res) 198 | res[curr] = NULL; 199 | if (num) 200 | *num = curr; 201 | 202 | return res; 203 | err: 204 | closedir(class_dir); 205 | devx_free_device_list(res); 206 | errno = ENOMEM; 207 | return NULL; 208 | } 209 | 210 | void devx_free_device_list(struct devx_device **list) 211 | { 212 | int i; 213 | 214 | for(i = 0; list[i]; i++) 215 | free(list[i]); 216 | free(list); 217 | } 218 | 219 | enum { 220 | MLX5_CQE_VERSION_V0, 221 | MLX5_CQE_VERSION_V1, 222 | }; 223 | 224 | void *devx_open_device(struct devx_device *device) 225 | { 226 | char *devpath; 227 | struct { 228 | struct ib_uverbs_cmd_hdr hdr; 229 | struct ib_uverbs_get_context ib; 230 | struct mlx5_ib_alloc_ucontext_req_v2 drv; 231 | } req; 232 | struct { 233 | struct ib_uverbs_get_context_resp ib; 234 | struct mlx5_ib_alloc_ucontext_resp drv; 235 | } resp; 236 | struct devx_context *ctx; 237 | 238 | if (asprintf(&devpath, "/dev/infiniband/%s", device->dev_name) < 0) 239 | return NULL; 240 | 241 | ctx = calloc(1, sizeof(*ctx)); 242 | if (!ctx) 243 | return NULL; 244 | 245 | ctx->cmd_fd = open(devpath, O_RDWR | O_CLOEXEC); 246 | free(devpath); 247 | 248 | if (ctx->cmd_fd < 0) 249 | goto err_free; 250 | 251 | memset(&req, 0, sizeof(req)); 252 | memset(&resp, 0, sizeof(resp)); 253 | 254 | req.drv.total_num_bfregs = 1; 255 | req.drv.num_low_latency_bfregs = 0; 256 | req.drv.flags = MLX5_IB_ALLOC_UCTX_DEVX; 257 | req.drv.max_cqe_version = MLX5_CQE_VERSION_V1; 258 | req.drv.lib_caps = MLX5_LIB_CAP_4K_UAR; 259 | 260 | req.hdr.command = IB_USER_VERBS_CMD_GET_CONTEXT; 261 | req.hdr.in_words = sizeof(req) / 4; 262 | req.hdr.out_words = sizeof(resp) / 4; 263 | req.ib.response = (uintptr_t)&resp; 264 | 265 | if (write(ctx->cmd_fd, &req, sizeof(req)) != (sizeof(req))) 266 | goto err; 267 | 268 | ctx->page_size = sysconf(_SC_PAGESIZE); 269 | ctx->num_uars_per_page = resp.drv.num_uars_per_page; 270 | ctx->cache_line_size = resp.drv.cache_line_size; 271 | ctx->num_uars = resp.drv.num_dyn_bfregs / 272 | MLX5_NUM_NON_FP_BFREGS_PER_UAR; 273 | 274 | ctx->uars = calloc(ctx->num_uars, sizeof(*ctx->uars)); 275 | 276 | if (!ctx->uars) 277 | goto err; 278 | 279 | strcpy(ctx->ibdev_path, device->ibdev_path); 280 | 281 | return ctx; 282 | err: 283 | close(ctx->cmd_fd); 284 | err_free: 285 | free(ctx->uars); 286 | free(ctx); 287 | return NULL; 288 | } 289 | 290 | int devx_close_device(void *context) 291 | { 292 | struct devx_context *ctx = (struct devx_context *)context; 293 | int page_size = ctx->page_size; 294 | uint32_t i; 295 | 296 | for (i = 0; i < ctx->num_uars; i++) { 297 | if (ctx->uars[i].uar) 298 | munmap(ctx->uars[i].uar, page_size); 299 | } 300 | 301 | free(ctx->uars); 302 | close(ctx->cmd_fd); 303 | free(ctx); 304 | 305 | return 0; 306 | } 307 | 308 | int devx_query_gid(void *context, uint8_t port_num, 309 | int index, uint8_t *gid) 310 | { 311 | struct devx_context *ctx = (struct devx_context *)context; 312 | char name[24]; 313 | char attr[41]; 314 | uint16_t val; 315 | int i; 316 | 317 | snprintf(name, sizeof name, "ports/%d/gids/%d", port_num, index); 318 | 319 | if (read_file(ctx->ibdev_path, name, attr, sizeof(attr)) < 0) 320 | return -1; 321 | 322 | for (i = 0; i < 8; ++i) { 323 | if (sscanf(attr + i * 5, "%hx", &val) != 1) 324 | return -1; 325 | gid[i * 2 ] = val >> 8; 326 | gid[i * 2 + 1] = val & 0xff; 327 | } 328 | 329 | return 0; 330 | } 331 | -------------------------------------------------------------------------------- /src/devx_ioctl.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #include "devx_ioctl.h" 8 | 9 | #include 10 | #include 11 | 12 | /* Number of attrs in this and all the link'd buffers */ 13 | unsigned int __ioctl_final_num_attrs(unsigned int num_attrs, 14 | struct ibv_command_buffer *link) 15 | { 16 | for (; link; link = link->next) 17 | num_attrs += link->next_attr - link->hdr.attrs; 18 | 19 | return num_attrs; 20 | } 21 | 22 | /* Linearize the link'd buffers into this one */ 23 | static void prepare_attrs(struct ibv_command_buffer *cmd) 24 | { 25 | struct ib_uverbs_attr *end = cmd->next_attr; 26 | struct ibv_command_buffer *link; 27 | 28 | for (link = cmd->next; link; link = link->next) { 29 | struct ib_uverbs_attr *cur; 30 | 31 | assert(cmd->hdr.object_id == link->hdr.object_id); 32 | assert(cmd->hdr.method_id == link->hdr.method_id); 33 | 34 | /* 35 | * Keep track of where the uhw_in lands in the final array if 36 | * we copy it from a link 37 | */ 38 | if (!VERBS_IOCTL_ONLY && link->uhw_in_idx != _UHW_NO_INDEX) { 39 | assert(cmd->uhw_in_idx == _UHW_NO_INDEX); 40 | cmd->uhw_in_idx = 41 | link->uhw_in_idx + (end - cmd->hdr.attrs); 42 | } 43 | 44 | for (cur = link->hdr.attrs; cur != link->next_attr; cur++) 45 | *end++ = *cur; 46 | 47 | assert(end <= cmd->last_attr); 48 | } 49 | 50 | cmd->hdr.num_attrs = end - cmd->hdr.attrs; 51 | 52 | /* 53 | * We keep the in UHW uninlined until directly before sending to 54 | * support the compat path. See _fill_attr_in_uhw 55 | */ 56 | if (!VERBS_IOCTL_ONLY && cmd->uhw_in_idx != _UHW_NO_INDEX) { 57 | struct ib_uverbs_attr *uhw = &cmd->hdr.attrs[cmd->uhw_in_idx]; 58 | 59 | assert(uhw->attr_id == UVERBS_ATTR_UHW_IN); 60 | 61 | if (uhw->len <= sizeof(uhw->data)) 62 | memcpy(&uhw->data, (void *)(uintptr_t)uhw->data, 63 | uhw->len); 64 | } 65 | } 66 | 67 | static void finalize_attr(struct ib_uverbs_attr *attr __attribute__((__unused__))) 68 | { 69 | } 70 | 71 | /* 72 | * Copy the link'd attrs back to their source and make all output buffers safe 73 | * for VALGRIND 74 | */ 75 | static void finalize_attrs(struct ibv_command_buffer *cmd) 76 | { 77 | struct ibv_command_buffer *link; 78 | struct ib_uverbs_attr *end; 79 | 80 | for (end = cmd->hdr.attrs; end != cmd->last_attr; end++) 81 | finalize_attr(end); 82 | 83 | for (link = cmd->next; link; link = link->next) { 84 | struct ib_uverbs_attr *cur; 85 | 86 | for (cur = link->hdr.attrs; cur != link->next_attr; cur++) { 87 | finalize_attr(end); 88 | *cur = *end++; 89 | } 90 | } 91 | } 92 | 93 | 94 | int execute_ioctl(int cmd_fd, struct ibv_command_buffer *cmd) 95 | { 96 | prepare_attrs(cmd); 97 | cmd->hdr.length = sizeof(cmd->hdr) + 98 | sizeof(cmd->hdr.attrs[0]) * cmd->hdr.num_attrs; 99 | cmd->hdr.reserved1 = 0; 100 | cmd->hdr.reserved2 = 0; 101 | 102 | cmd->hdr.driver_id = RDMA_DRIVER_MLX5; 103 | if (ioctl(cmd_fd, RDMA_VERBS_IOCTL, &cmd->hdr)) 104 | return errno; 105 | 106 | finalize_attrs(cmd); 107 | 108 | return 0; 109 | } 110 | 111 | /* 112 | * The compat scheme for UHW IN requires a pointer in .data, however the 113 | * kernel protocol requires pointers < 8 to be inlined into .data. We defer 114 | * that transformation until directly before the ioctl. 115 | */ 116 | static inline struct ib_uverbs_attr * 117 | _fill_attr_in_uhw(struct ibv_command_buffer *cmd, uint16_t attr_id, 118 | const void *data, size_t len) 119 | { 120 | struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id); 121 | 122 | assert(len <= UINT16_MAX); 123 | 124 | attr->len = len; 125 | attr->data = ioctl_ptr_to_u64(data); 126 | 127 | return attr; 128 | } 129 | 130 | -------------------------------------------------------------------------------- /src/devx_ioctl.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #ifndef __DEVX_IOCTL_H__ 8 | #define __DEVX_IOCTL_H__ 9 | 10 | #define VERBS_IOCTL_ONLY 0 11 | #define VERBS_WRITE_ONLY 0 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | static inline uint64_t ioctl_ptr_to_u64(const void *ptr) 22 | { 23 | if (sizeof(ptr) == sizeof(uint64_t)) 24 | return (uintptr_t)ptr; 25 | 26 | /* 27 | * Some CPU architectures require sign extension when converting from 28 | * a 32 bit to 64 bit pointer. This should match the kernel 29 | * implementation of compat_ptr() for the architecture. 30 | */ 31 | #if defined(__tilegx__) 32 | return (int64_t)(intptr_t)ptr; 33 | #else 34 | return (uintptr_t)ptr; 35 | #endif 36 | } 37 | 38 | static inline void _scrub_ptr_attr(void **ptr) 39 | { 40 | #if UINTPTR_MAX == UINT64_MAX 41 | /* Do nothing */ 42 | #else 43 | RDMA_UAPI_PTR(void *, data) *scrub_data; 44 | 45 | scrub_data = container_of(ptr, typeof(*scrub_data), data); 46 | scrub_data->data_data_u64 = ioctl_ptr_to_u64(scrub_data->data); 47 | #endif 48 | } 49 | 50 | #define scrub_ptr_attr(ptr) _scrub_ptr_attr((void **)(&ptr)) 51 | 52 | /* 53 | * The command buffer is organized as a linked list of blocks of attributes. 54 | * Each stack frame allocates its block and then calls up toward to core code 55 | * which will do the ioctl. The frame that does the ioctl calls the special 56 | * FINAL variant which will allocate enough space to linearize the attribute 57 | * buffer for the kernel. 58 | * 59 | * The current range of attributes to fill is next_attr -> last_attr. 60 | */ 61 | struct ibv_command_buffer { 62 | struct ibv_command_buffer *next; 63 | struct ib_uverbs_attr *next_attr; 64 | struct ib_uverbs_attr *last_attr; 65 | /* 66 | * Used by the legacy write interface to keep track of where the UHW 67 | * buffer is located and the 'headroom' space that the common code 68 | * uses to construct the command header and common command struct 69 | * directly before the drivers' UHW. 70 | */ 71 | uint8_t uhw_in_idx; 72 | uint8_t uhw_out_idx; 73 | uint8_t uhw_in_headroom_dwords; 74 | uint8_t uhw_out_headroom_dwords; 75 | /* 76 | * These flags control what execute_ioctl_fallback does if the kernel 77 | * does not support ioctl 78 | */ 79 | uint8_t fallback_require_ex:1; 80 | uint8_t fallback_ioctl_only:1; 81 | struct ib_uverbs_ioctl_hdr hdr; 82 | }; 83 | 84 | enum {_UHW_NO_INDEX = 0xFF}; 85 | 86 | /* 87 | * Constructing an array of ibv_command_buffer is a reasonable way to expand 88 | * the VLA in hdr.attrs on the stack and also allocate some internal state in 89 | * a single contiguous stack memory region. It will over-allocate the region in 90 | * some cases, but this approach allows the number of elements to be dynamic, 91 | * and not fixed as a compile time constant. 92 | */ 93 | #define _IOCTL_NUM_CMDB(_num_attrs) \ 94 | ((sizeof(struct ibv_command_buffer) + \ 95 | sizeof(struct ib_uverbs_attr) * (_num_attrs) + \ 96 | sizeof(struct ibv_command_buffer) - 1) / \ 97 | sizeof(struct ibv_command_buffer)) 98 | 99 | unsigned int __ioctl_final_num_attrs(unsigned int num_attrs, 100 | struct ibv_command_buffer *link); 101 | 102 | /* If the user doesn't provide a link then don't create a VLA */ 103 | #define _ioctl_final_num_attrs(_num_attrs, _link) \ 104 | ((__builtin_constant_p(!(_link)) && !(_link)) \ 105 | ? (_num_attrs) \ 106 | : __ioctl_final_num_attrs(_num_attrs, _link)) 107 | 108 | #define _COMMAND_BUFFER_INIT(_hdr, _object_id, _method_id, _num_attrs, _link) \ 109 | ((struct ibv_command_buffer){ \ 110 | .hdr = \ 111 | { \ 112 | .object_id = (_object_id), \ 113 | .method_id = (_method_id), \ 114 | }, \ 115 | .next = _link, \ 116 | .uhw_in_idx = _UHW_NO_INDEX, \ 117 | .uhw_out_idx = _UHW_NO_INDEX, \ 118 | .next_attr = (_hdr).attrs, \ 119 | .last_attr = (_hdr).attrs + _num_attrs}) 120 | 121 | /* 122 | * C99 does not permit an initializer for VLAs, so this function does the init 123 | * instead. It is called in the wonky way so that DELCARE_COMMAND_BUFFER can 124 | * still be a 'variable', and we so we don't require C11 mode. 125 | */ 126 | static inline int _ioctl_init_cmdb(struct ibv_command_buffer *cmd, 127 | uint16_t object_id, uint16_t method_id, 128 | size_t num_attrs, 129 | struct ibv_command_buffer *link) 130 | { 131 | *cmd = _COMMAND_BUFFER_INIT(cmd->hdr, object_id, method_id, num_attrs, 132 | link); 133 | return 0; 134 | } 135 | 136 | /* 137 | * Construct an IOCTL command buffer on the stack with enough space for 138 | * _num_attrs elements. _num_attrs does not have to be a compile time constant. 139 | * _link is a previous COMMAND_BUFFER in the call chain. 140 | */ 141 | #ifndef __CHECKER__ 142 | #define DECLARE_COMMAND_BUFFER_LINK(_name, _object_id, _method_id, _num_attrs, \ 143 | _link) \ 144 | const unsigned int __##_name##total = \ 145 | _ioctl_final_num_attrs(_num_attrs, _link); \ 146 | struct ibv_command_buffer _name[_IOCTL_NUM_CMDB(__##_name##total)]; \ 147 | int __attribute__((unused)) __##_name##dummy = _ioctl_init_cmdb( \ 148 | _name, _object_id, _method_id, __##_name##total, _link) 149 | #else 150 | /* 151 | * sparse enforces kernel rules which forbids VLAs. Make the VLA into a static 152 | * array when running sparse. Don't actually run the sparse compile result. 153 | */ 154 | #define DECLARE_COMMAND_BUFFER_LINK(_name, _object_id, _method_id, _num_attrs, \ 155 | _link) \ 156 | struct ibv_command_buffer _name[10]; \ 157 | int __attribute__((unused)) __##_name##dummy = \ 158 | _ioctl_init_cmdb(_name, _object_id, _method_id, 10, _link) 159 | #endif 160 | 161 | #define DECLARE_COMMAND_BUFFER(_name, _object_id, _method_id, _num_attrs) \ 162 | DECLARE_COMMAND_BUFFER_LINK(_name, _object_id, _method_id, _num_attrs, \ 163 | NULL) 164 | 165 | int execute_ioctl(int cmd_fd, struct ibv_command_buffer *cmd); 166 | 167 | static inline struct ib_uverbs_attr * 168 | _ioctl_next_attr(struct ibv_command_buffer *cmd, uint16_t attr_id) 169 | { 170 | struct ib_uverbs_attr *attr; 171 | 172 | assert(cmd->next_attr < cmd->last_attr); 173 | attr = cmd->next_attr++; 174 | 175 | *attr = (struct ib_uverbs_attr){ 176 | .attr_id = attr_id, 177 | /* 178 | * All attributes default to mandatory. Wrapper the fill_* 179 | * call in attr_optional() to make it optional. 180 | */ 181 | .flags = UVERBS_ATTR_F_MANDATORY, 182 | }; 183 | 184 | return attr; 185 | } 186 | 187 | /* 188 | * This construction is insane, an expression with a side effect that returns 189 | * from the calling function, but it is a non-invasive way to get the compiler 190 | * to elide the IOCTL support in the backwards compat command functions 191 | * without disturbing native ioctl support. 192 | * 193 | * A command function will set last_attr on the stack to NULL, and if it is 194 | * coded properly, the compiler will prove that last_attr is never changed and 195 | * elide the function. Unfortunately this penalizes native ioctl uses with the 196 | * extra if overhead. 197 | * 198 | * For this reason, _ioctl_next_attr must never be called outside a fill 199 | * function. 200 | */ 201 | #if VERBS_WRITE_ONLY 202 | #define _ioctl_next_attr(cmd, attr_id) \ 203 | ({ \ 204 | if (!((cmd)->last_attr)) \ 205 | return NULL; \ 206 | _ioctl_next_attr(cmd, attr_id); \ 207 | }) 208 | #endif 209 | 210 | /* Make the attribute optional. */ 211 | static inline struct ib_uverbs_attr *attr_optional(struct ib_uverbs_attr *attr) 212 | { 213 | attr->flags &= ~UVERBS_ATTR_F_MANDATORY; 214 | return attr; 215 | } 216 | 217 | /* Send attributes of kernel type UVERBS_ATTR_TYPE_IDR */ 218 | static inline struct ib_uverbs_attr * 219 | fill_attr_in_obj(struct ibv_command_buffer *cmd, uint16_t attr_id, uint32_t idr) 220 | { 221 | struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id); 222 | 223 | /* UVERBS_ATTR_TYPE_IDR uses a 64 bit value for the idr # */ 224 | attr->data = idr; 225 | return attr; 226 | } 227 | 228 | static inline struct ib_uverbs_attr * 229 | fill_attr_out_obj(struct ibv_command_buffer *cmd, uint16_t attr_id) 230 | { 231 | return fill_attr_in_obj(cmd, attr_id, 0); 232 | } 233 | 234 | static inline uint32_t read_attr_obj(uint16_t attr_id, 235 | struct ib_uverbs_attr *attr) 236 | { 237 | assert(attr->attr_id == attr_id); 238 | return attr->data; 239 | } 240 | 241 | /* Send attributes of kernel type UVERBS_ATTR_TYPE_PTR_IN */ 242 | static inline struct ib_uverbs_attr * 243 | fill_attr_in(struct ibv_command_buffer *cmd, uint16_t attr_id, const void *data, 244 | size_t len) 245 | { 246 | struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id); 247 | 248 | assert(len <= UINT16_MAX); 249 | 250 | attr->len = len; 251 | if (len <= sizeof(uint64_t)) 252 | memcpy(&attr->data, data, len); 253 | else 254 | attr->data = ioctl_ptr_to_u64(data); 255 | 256 | return attr; 257 | } 258 | 259 | #define fill_attr_in_ptr(cmd, attr_id, ptr) \ 260 | fill_attr_in(cmd, attr_id, ptr, sizeof(*ptr)) 261 | 262 | /* Send attributes of various inline kernel types */ 263 | 264 | static inline struct ib_uverbs_attr * 265 | fill_attr_in_uint64(struct ibv_command_buffer *cmd, uint16_t attr_id, 266 | uint64_t data) 267 | { 268 | struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id); 269 | 270 | attr->len = sizeof(data); 271 | attr->data = data; 272 | 273 | return attr; 274 | } 275 | 276 | static inline struct ib_uverbs_attr * 277 | fill_attr_in_uint32(struct ibv_command_buffer *cmd, uint16_t attr_id, 278 | uint32_t data) 279 | { 280 | struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id); 281 | 282 | attr->len = sizeof(data); 283 | memcpy(&attr->data, &data, sizeof(data)); 284 | 285 | return attr; 286 | } 287 | 288 | static inline struct ib_uverbs_attr * 289 | fill_attr_in_fd(struct ibv_command_buffer *cmd, uint16_t attr_id, int fd) 290 | { 291 | struct ib_uverbs_attr *attr; 292 | 293 | if (fd == -1) 294 | return NULL; 295 | 296 | attr = _ioctl_next_attr(cmd, attr_id); 297 | /* UVERBS_ATTR_TYPE_FD uses a 64 bit value for the idr # */ 298 | attr->data = fd; 299 | return attr; 300 | } 301 | 302 | static inline struct ib_uverbs_attr * 303 | fill_attr_out_fd(struct ibv_command_buffer *cmd, uint16_t attr_id, int fd) 304 | { 305 | struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id); 306 | 307 | attr->data = 0; 308 | return attr; 309 | } 310 | 311 | static inline int read_attr_fd(uint16_t attr_id, struct ib_uverbs_attr *attr) 312 | { 313 | assert(attr->attr_id == attr_id); 314 | /* The kernel cannot fail to create a FD here, it never returns -1 */ 315 | return attr->data; 316 | } 317 | 318 | /* Send attributes of kernel type UVERBS_ATTR_TYPE_PTR_OUT */ 319 | static inline struct ib_uverbs_attr * 320 | fill_attr_out(struct ibv_command_buffer *cmd, uint16_t attr_id, void *data, 321 | size_t len) 322 | { 323 | struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id); 324 | 325 | assert(len <= UINT16_MAX); 326 | attr->len = len; 327 | attr->data = ioctl_ptr_to_u64(data); 328 | 329 | return attr; 330 | } 331 | 332 | #define fill_attr_out_ptr(cmd, attr_id, ptr) \ 333 | fill_attr_out(cmd, attr_id, ptr, sizeof(*(ptr))) 334 | 335 | static inline size_t __check_divide(size_t val, unsigned int div) 336 | { 337 | assert(val % div == 0); 338 | return val / div; 339 | } 340 | 341 | static inline struct ib_uverbs_attr * 342 | fill_attr_in_enum(struct ibv_command_buffer *cmd, uint16_t attr_id, 343 | uint8_t elem_id, const void *data, size_t len) 344 | { 345 | struct ib_uverbs_attr *attr; 346 | 347 | attr = fill_attr_in(cmd, attr_id, data, len); 348 | attr->attr_data.enum_data.elem_id = elem_id; 349 | 350 | return attr; 351 | } 352 | 353 | #endif 354 | -------------------------------------------------------------------------------- /src/devx_priv.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #ifndef __DEVX_PRIV_H__ 8 | #define __DEVX_PRIV_H__ 9 | 10 | #include 11 | 12 | struct devx_uar { 13 | void *reg; 14 | uint32_t uuarn; 15 | uint8_t *uar; 16 | int used; 17 | }; 18 | 19 | struct devx_db_page; 20 | 21 | struct devx_context { 22 | int cmd_fd; 23 | size_t page_size; 24 | int num_uars_per_page; 25 | int cache_line_size; 26 | uint32_t num_uars; 27 | struct devx_uar *uars; 28 | struct devx_db_page *db_list; 29 | char ibdev_path[PATH_MAX]; 30 | }; 31 | 32 | struct devx_obj_handle { 33 | struct devx_context *ctx; 34 | uint32_t handle; 35 | }; 36 | 37 | #ifndef MLX5_ABI_H 38 | enum { 39 | MLX5_NUM_NON_FP_BFREGS_PER_UAR = 2, 40 | MLX5_ADAPTER_PAGE_SIZE = 4096, 41 | MLX5_MMAP_ALLOC_WC = 6, 42 | }; 43 | #endif 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /src/devx_prm.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #ifndef __DEVX_PRM_H__ 8 | #define __DEVX_PRM_H__ 9 | 10 | #include 11 | 12 | #define u8 uint8_t 13 | #define BIT(n) (1<<(n)) 14 | #define __packed 15 | #include 16 | #include "linux/mlx5/mlx5_ifc.h" 17 | 18 | struct mlx5_ifc_pas_umem_bits { 19 | u8 reserved_at_0[0x20]; 20 | u8 pas_umem_id[0x20]; 21 | u8 pas_umem_off[0x40]; 22 | u8 reserved_at_80[0x20]; 23 | u8 dbr_umem_id[0x20]; 24 | u8 dbr_umem_off[0x40]; 25 | }; 26 | 27 | #define __devx_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) 28 | #define __devx_bit_sz(typ, fld) sizeof(__devx_nullp(typ)->fld) 29 | #define __devx_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld)) 30 | #define __devx_dw_off(typ, fld) (__devx_bit_off(typ, fld) / 32) 31 | #define __devx_64_off(typ, fld) (__devx_bit_off(typ, fld) / 64) 32 | #define __devx_dw_bit_off(typ, fld) (32 - __devx_bit_sz(typ, fld) - (__devx_bit_off(typ, fld) & 0x1f)) 33 | #define __devx_mask(typ, fld) ((uint32_t)((1ull << __devx_bit_sz(typ, fld)) - 1)) 34 | #define __devx_dw_mask(typ, fld) (__devx_mask(typ, fld) << __devx_dw_bit_off(typ, fld)) 35 | #define __devx_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) 36 | 37 | #define DEVX_FLD_SZ_BYTES(typ, fld) (__devx_bit_sz(typ, fld) / 8) 38 | #define DEVX_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 39 | #define DEVX_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 40 | #define DEVX_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) 41 | #define DEVX_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) 42 | #define DEVX_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) 43 | #define DEVX_BYTE_OFF(typ, fld) (__devx_bit_off(typ, fld) / 8) 44 | #define DEVX_ADDR_OF(typ, p, fld) ((char *)(p) + DEVX_BYTE_OFF(typ, fld)) 45 | 46 | #define BUILD_BUG_ON(a) /*TODO*/ 47 | /* insert a value to a struct */ 48 | #define DEVX_SET(typ, p, fld, v) do { \ 49 | uint32_t _v = v; \ 50 | BUILD_BUG_ON(__devx_st_sz_bits(typ) % 32); \ 51 | *((__be32 *)(p) + __devx_dw_off(typ, fld)) = \ 52 | htobe32((be32toh(*((__be32 *)(p) + __devx_dw_off(typ, fld))) & \ 53 | (~__devx_dw_mask(typ, fld))) | (((_v) & __devx_mask(typ, fld)) \ 54 | << __devx_dw_bit_off(typ, fld))); \ 55 | } while (0) 56 | 57 | #define DEVX_GET(typ, p, fld) ((be32toh(*((__be32 *)(p) +\ 58 | __devx_dw_off(typ, fld))) >> __devx_dw_bit_off(typ, fld)) & \ 59 | __devx_mask(typ, fld)) 60 | 61 | 62 | #define __DEVX_SET64(typ, p, fld, v) do { \ 63 | BUILD_BUG_ON(__devx_bit_sz(typ, fld) != 64); \ 64 | *((__be64 *)(p) + __devx_64_off(typ, fld)) = htobe64(v); \ 65 | } while (0) 66 | 67 | #define DEVX_SET64(typ, p, fld, v) do { \ 68 | BUILD_BUG_ON(__devx_bit_off(typ, fld) % 64); \ 69 | __DEVX_SET64(typ, p, fld, v); \ 70 | } while (0) 71 | 72 | #define DEVX_GET64(typ, p, fld) \ 73 | be64toh(*((__be64 *)(p) + __devx_64_off(typ, fld))) 74 | 75 | #define DEVX_SET_TO_ONES(typ, p, fld) do { \ 76 | BUILD_BUG_ON(__devx_st_sz_bits(typ) % 32); \ 77 | *((__be32 *)(p) + __devx_dw_off(typ, fld)) = \ 78 | htobe32((be32toh(*((__be32 *)(p) + __devx_dw_off(typ, fld))) & \ 79 | (~__devx_dw_mask(typ, fld))) | ((__devx_mask(typ, fld)) \ 80 | << __devx_dw_bit_off(typ, fld))); \ 81 | } while (0) 82 | 83 | #endif 84 | -------------------------------------------------------------------------------- /src/devx_uar.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #include 8 | #include 9 | #include 10 | #include "devx_ioctl.h" 11 | #include 12 | #include "devx.h" 13 | #include "devx_priv.h" 14 | 15 | int devx_alloc_uar(void *context, uint32_t *idx, void **addr, off_t *off) 16 | { 17 | struct devx_context *ctx = (struct devx_context *)context; 18 | DECLARE_COMMAND_BUFFER(cmd, 19 | MLX5_IB_OBJECT_DEVX, 20 | MLX5_IB_METHOD_DEVX_QUERY_UAR, 21 | 2); 22 | int index = -1; 23 | uint32_t uar_page_index; 24 | int mmap_index; 25 | off_t offset; 26 | uint32_t i; 27 | int ret; 28 | 29 | for (i = 0; i < ctx->num_uars; i++) { 30 | if (!ctx->uars[i].used) { 31 | index = i; 32 | break; 33 | } 34 | } 35 | 36 | if (index < 0) 37 | return -ENOENT; 38 | 39 | ctx->uars[index].used = 1; 40 | 41 | uar_page_index = index / ctx->num_uars_per_page; 42 | offset = (MLX5_MMAP_ALLOC_WC << 8); 43 | offset |= (uar_page_index & 0xff) | ((uar_page_index >> 8) << 16); 44 | offset *= ctx->page_size; 45 | 46 | if (ctx->uars[index].reg) 47 | goto ret; 48 | 49 | mmap_index = uar_page_index * ctx->num_uars_per_page; 50 | 51 | if (ctx->uars[mmap_index].uar) 52 | goto set_reg; 53 | 54 | ctx->uars[mmap_index].uar = mmap(*addr, ctx->page_size, 55 | PROT_WRITE, MAP_SHARED, 56 | ctx->cmd_fd, offset); 57 | if (ctx->uars[mmap_index].uar == MAP_FAILED) { 58 | ctx->uars[mmap_index].uar = NULL; 59 | ret = -errno; 60 | goto err; 61 | } 62 | 63 | set_reg: 64 | fill_attr_in_uint32(cmd, MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX, 65 | index * MLX5_NUM_NON_FP_BFREGS_PER_UAR); 66 | fill_attr_out(cmd, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX, 67 | &ctx->uars[index].uuarn, sizeof(uint32_t)); 68 | ret = execute_ioctl(((struct devx_context *)ctx)->cmd_fd, cmd); 69 | if (ret) 70 | goto err; 71 | 72 | ctx->uars[index].reg = ctx->uars[mmap_index].uar + 73 | index % ctx->num_uars_per_page * MLX5_ADAPTER_PAGE_SIZE; 74 | 75 | ret: 76 | *idx = ctx->uars[index].uuarn; 77 | *addr = ctx->uars[index].reg; 78 | if (off) 79 | *off = offset; 80 | return 0; 81 | err: 82 | ctx->uars[index].used = 0; 83 | return ret; 84 | } 85 | 86 | void devx_free_uar(void *context, void* addr) 87 | { 88 | struct devx_context *ctx = (struct devx_context *)context; 89 | uint32_t i; 90 | 91 | for (i = 0; i < ctx->num_uars; i++) 92 | if (ctx->uars[i].reg == addr) 93 | ctx->uars[i].used = 0; 94 | } 95 | 96 | -------------------------------------------------------------------------------- /src/devx_verbs.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #include 8 | #include "infiniband/verbs.h" 9 | #include "../providers/mlx5/mlx5.h" 10 | #include "devx.h" 11 | #include "devx_priv.h" 12 | 13 | void *devx_from_ibv(struct ibv_context *ibctx) 14 | { 15 | struct devx_context *ctx; 16 | struct mlx5_context *mctx = to_mctx(ibctx); 17 | 18 | ctx = calloc(1, sizeof(*ctx)); 19 | if (!ctx) 20 | return NULL; 21 | 22 | ctx->cmd_fd = dup(ibctx->cmd_fd); 23 | ctx->page_size = to_mdev(ibctx->device)->page_size; 24 | ctx->num_uars_per_page = mctx->num_uars_per_page; 25 | ctx->cache_line_size = mctx->cache_line_size; 26 | ctx->num_uars = mctx->num_dyn_bfregs / 27 | MLX5_NUM_NON_FP_BFREGS_PER_UAR; 28 | 29 | ctx->uars = calloc(ctx->num_uars, sizeof(*ctx->uars)); 30 | 31 | if (!ctx->uars) 32 | goto err; 33 | 34 | return ctx; 35 | 36 | err: 37 | free(ctx->uars); 38 | free(ctx); 39 | return NULL; 40 | } 41 | 42 | -------------------------------------------------------------------------------- /src/devx_verbs.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) Mellanox Technologies Ltd, 2001-2018. ALL RIGHTS RESERVED. 3 | * 4 | * See file LICENSE for terms. 5 | */ 6 | 7 | #ifndef __DEVX_VERBS_H__ 8 | #define __DEVX_VERBS_H__ 9 | 10 | #include 11 | 12 | #ifdef __cplusplus 13 | extern "C" { 14 | #endif 15 | 16 | void *devx_from_ibv(struct ibv_context *ibctx); 17 | 18 | #ifdef __cplusplus 19 | } 20 | #endif 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /tests/test.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include "devx.h" 13 | #include "devx_prm.h" 14 | #include "devx_priv.h" 15 | 16 | enum { 17 | MLX5_HCA_CAP_OPMOD_GET_MAX = 0, 18 | MLX5_HCA_CAP_OPMOD_GET_CUR = 1, 19 | }; 20 | 21 | enum { 22 | MLX5_CAP_GENERAL = 0, 23 | MLX5_CAP_ETHERNET_OFFLOADS, 24 | MLX5_CAP_ODP, 25 | MLX5_CAP_ATOMIC, 26 | MLX5_CAP_ROCE, 27 | MLX5_CAP_IPOIB_OFFLOADS, 28 | MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, 29 | MLX5_CAP_FLOW_TABLE, 30 | MLX5_CAP_ESWITCH_FLOW_TABLE, 31 | MLX5_CAP_ESWITCH, 32 | MLX5_CAP_RESERVED, 33 | MLX5_CAP_VECTOR_CALC, 34 | MLX5_CAP_QOS, 35 | MLX5_CAP_FPGA, 36 | }; 37 | 38 | 39 | int query_device(void *ctx); 40 | int query_device(void *ctx) { 41 | uint32_t in[DEVX_ST_SZ_DW(query_hca_cap_in)] = {0}; 42 | uint32_t out[DEVX_ST_SZ_DW(query_hca_cap_out)] = {0}; 43 | int ret; 44 | 45 | DEVX_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 46 | DEVX_SET(query_hca_cap_in, in, op_mod, MLX5_HCA_CAP_OPMOD_GET_MAX | (MLX5_CAP_GENERAL << 1)); 47 | ret = devx_cmd(ctx, in, sizeof(in), out, sizeof(out)); 48 | if (ret) 49 | return ret; 50 | return DEVX_GET(query_hca_cap_out, out, 51 | capability.cmd_hca_cap.port_type); 52 | } 53 | 54 | int alloc_pd(void *ctx); 55 | int alloc_pd(void *ctx) { 56 | uint32_t in[DEVX_ST_SZ_DW(alloc_pd_in)] = {0}; 57 | uint32_t out[DEVX_ST_SZ_DW(alloc_pd_out)] = {0}; 58 | struct devx_obj_handle *pd; 59 | 60 | DEVX_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); 61 | pd = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 62 | if (!pd) 63 | return -1; 64 | 65 | return DEVX_GET(alloc_pd_out, out, pd); 66 | } 67 | 68 | int reg_mr(void *ctx, int pd, void *buff, size_t size); 69 | int reg_mr(void *ctx, int pd, void *buff, size_t size) { 70 | uint32_t in[DEVX_ST_SZ_DW(create_mkey_in)] = {0}; 71 | uint32_t out[DEVX_ST_SZ_DW(create_mkey_out)] = {0}; 72 | struct devx_obj_handle *mem, *mr; 73 | uint32_t mem_id; 74 | 75 | mem = devx_umem_reg(ctx, buff, size, 7, &mem_id); 76 | if (!mem) 77 | return 0; 78 | 79 | DEVX_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 80 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 81 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.a, 1); 82 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.rw, 1); 83 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.rr, 1); 84 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.lw, 1); 85 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.lr, 1); 86 | DEVX_SET64(create_mkey_in, in, memory_key_mkey_entry.start_addr, (intptr_t)buff); 87 | DEVX_SET64(create_mkey_in, in, memory_key_mkey_entry.len, size); 88 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.pd, pd); 89 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.translations_octword_size, 1); 90 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.log_entity_size, 12); 91 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.qpn, 0xffffff); 92 | DEVX_SET(create_mkey_in, in, memory_key_mkey_entry.mkey_7_0, 0x42); 93 | DEVX_SET(create_mkey_in, in, translations_octword_actual_size, 1); 94 | DEVX_SET(create_mkey_in, in, pg_access, 1); 95 | DEVX_SET(create_mkey_in, in, mkey_umem_id, mem_id); 96 | 97 | mr = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 98 | if (!mr) 99 | return 0; 100 | return DEVX_GET(create_mkey_out, out, mkey_index) << 8 | 0x42; 101 | } 102 | 103 | struct mlx5_eqe_comp { 104 | __be32 reserved[6]; 105 | __be32 cqn; 106 | }; 107 | 108 | union ev_data { 109 | __be32 raw[7]; 110 | struct mlx5_eqe_comp comp; 111 | }; 112 | 113 | struct mlx5_eqe { 114 | u8 rsvd0; 115 | u8 type; 116 | u8 rsvd1; 117 | u8 sub_type; 118 | __be32 rsvd2[7]; 119 | union ev_data data; 120 | __be16 rsvd3; 121 | u8 signature; 122 | u8 owner; 123 | }; 124 | 125 | int create_eq(void *ctx, void **buff_out, uint32_t uar_id); 126 | int create_eq(void *ctx, void **buff_out, uint32_t uar_id) 127 | { 128 | uint32_t in[DEVX_ST_SZ_DW(create_eq_in) + DEVX_ST_SZ_DW(pas_umem)] = {0}; 129 | uint32_t out[DEVX_ST_SZ_DW(create_eq_out)] = {0}; 130 | struct mlx5_eqe *eqe; 131 | struct devx_obj_handle *pas, *eq; 132 | uint32_t pas_id; 133 | uint8_t *buff; 134 | void *eqc, *up; 135 | int i; 136 | 137 | buff = (uint8_t *)memalign(0x1000, 0x1000); 138 | memset(buff, 0, 0x1000); 139 | for (i = 0; i < (1<<6); i++) { 140 | eqe = (struct mlx5_eqe *)(buff + i * sizeof(*eqe)); 141 | eqe->owner = 1; 142 | } 143 | 144 | pas = devx_umem_reg(ctx, buff, 0x1000, 7, &pas_id); 145 | if (!pas) 146 | return 0; 147 | 148 | DEVX_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); 149 | 150 | eqc = DEVX_ADDR_OF(create_eq_in, in, eq_context_entry); 151 | DEVX_SET(eqc, eqc, log_eq_size, 6); 152 | DEVX_SET(eqc, eqc, uar_page, uar_id); 153 | 154 | up = DEVX_ADDR_OF(create_eq_in, in, pas); 155 | DEVX_SET(pas_umem, up, pas_umem_id, pas_id); 156 | 157 | eq = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 158 | if (!eq) 159 | return 0; 160 | 161 | *buff_out = buff; 162 | return DEVX_GET(create_eq_out, out, eq_number); 163 | } 164 | 165 | int create_cq(void *ctx, void **buff_out, uint32_t uar_id, uint32_t **dbr_out, uint32_t eq); 166 | int create_cq(void *ctx, void **buff_out, uint32_t uar_id, uint32_t **dbr_out, uint32_t eq) { 167 | uint32_t in[DEVX_ST_SZ_DW(create_cq_in)] = {0}; 168 | uint32_t out[DEVX_ST_SZ_DW(create_cq_out)] = {0}; 169 | struct mlx5_cqe64 *cqe; 170 | struct devx_obj_handle *pas, *cq; 171 | uint32_t pas_id, dbr_id; 172 | uint8_t *buff; 173 | void *dbr, *uar_ptr; 174 | size_t dbr_off; 175 | int ret = 0; 176 | int i; 177 | 178 | buff = (uint8_t *)memalign(0x1000, 0x1000); 179 | memset(buff, 0, 0x1000); 180 | for (i = 0; i < (1<<5); i++) { 181 | cqe = (struct mlx5_cqe64 *)(buff + i * sizeof(*cqe)); 182 | cqe->op_own = MLX5_CQE_INVALID << 4; 183 | } 184 | 185 | if (!eq) 186 | ret = devx_query_eqn(ctx, 0, &eq); 187 | if (!uar_id) 188 | ret = devx_alloc_uar(ctx, &uar_id, &uar_ptr, NULL); 189 | pas = devx_umem_reg(ctx, buff, 0x1000, 7, &pas_id); 190 | dbr = devx_alloc_dbrec(ctx, &dbr_id, &dbr_off); 191 | 192 | if (ret || !pas || !dbr) 193 | return 0; 194 | 195 | DEVX_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 196 | DEVX_SET(create_cq_in, in, cq_context.c_eqn, eq); 197 | DEVX_SET(create_cq_in, in, cq_context.cqe_sz, 0); 198 | DEVX_SET(create_cq_in, in, cq_context.log_cq_size, 5); 199 | DEVX_SET(create_cq_in, in, cq_context.uar_page, uar_id); 200 | DEVX_SET(create_cq_in, in, cq_umem_id, pas_id); 201 | DEVX_SET(create_cq_in, in, cq_context.dbr_umem_id, dbr_id); 202 | DEVX_SET64(create_cq_in, in, cq_context.dbr_addr, dbr_off); 203 | 204 | cq = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 205 | if (!cq) 206 | return 0; 207 | 208 | if (dbr_out) 209 | *dbr_out = (uint32_t *)dbr; 210 | if (buff_out) 211 | *buff_out = buff; 212 | 213 | return DEVX_GET(create_cq_out, out, cqn); 214 | } 215 | 216 | int query_lid(void *ctx) { 217 | uint32_t in[DEVX_ST_SZ_DW(query_hca_vport_context_in)] = {0}; 218 | uint32_t out[DEVX_ST_SZ_DW(query_hca_vport_context_out)] = {0}; 219 | int err; 220 | 221 | DEVX_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT); 222 | DEVX_SET(query_hca_vport_context_in, in, port_num, 1); 223 | 224 | err = devx_cmd(ctx, in, sizeof(in), out, sizeof(out)); 225 | if (err) 226 | return -1; 227 | 228 | return DEVX_GET(query_hca_vport_context_out, out, hca_vport_context.lid); 229 | } 230 | 231 | enum mlx5_qp_optpar { 232 | MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 233 | MLX5_QP_OPTPAR_RRE = 1 << 1, 234 | MLX5_QP_OPTPAR_RAE = 1 << 2, 235 | MLX5_QP_OPTPAR_RWE = 1 << 3, 236 | MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4, 237 | MLX5_QP_OPTPAR_Q_KEY = 1 << 5, 238 | MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, 239 | MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, 240 | MLX5_QP_OPTPAR_SRA_MAX = 1 << 8, 241 | MLX5_QP_OPTPAR_RRA_MAX = 1 << 9, 242 | MLX5_QP_OPTPAR_PM_STATE = 1 << 10, 243 | MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, 244 | MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, 245 | MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 246 | MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, 247 | MLX5_QP_OPTPAR_SRQN = 1 << 18, 248 | MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, 249 | MLX5_QP_OPTPAR_DC_HS = 1 << 20, 250 | MLX5_QP_OPTPAR_DC_KEY = 1 << 21, 251 | }; 252 | 253 | enum mlx5_qp_state { 254 | MLX5_QP_STATE_RST = 0, 255 | MLX5_QP_STATE_INIT = 1, 256 | MLX5_QP_STATE_RTR = 2, 257 | MLX5_QP_STATE_RTS = 3, 258 | MLX5_QP_STATE_SQER = 4, 259 | MLX5_QP_STATE_SQD = 5, 260 | MLX5_QP_STATE_ERR = 6, 261 | MLX5_QP_STATE_SQ_DRAINING = 7, 262 | MLX5_QP_STATE_SUSPENDED = 9, 263 | MLX5_QP_NUM_STATE, 264 | MLX5_QP_STATE, 265 | MLX5_QP_STATE_BAD, 266 | }; 267 | 268 | enum { 269 | MLX5_QP_ST_RC = 0x0, 270 | MLX5_QP_ST_UC = 0x1, 271 | MLX5_QP_ST_UD = 0x2, 272 | MLX5_QP_ST_XRC = 0x3, 273 | MLX5_QP_ST_MLX = 0x4, 274 | MLX5_QP_ST_DCI = 0x5, 275 | MLX5_QP_ST_DCT = 0x6, 276 | MLX5_QP_ST_QP0 = 0x7, 277 | MLX5_QP_ST_QP1 = 0x8, 278 | MLX5_QP_ST_RAW_ETHERTYPE = 0x9, 279 | MLX5_QP_ST_RAW_IPV6 = 0xa, 280 | MLX5_QP_ST_SNIFFER = 0xb, 281 | MLX5_QP_ST_SYNC_UMR = 0xe, 282 | MLX5_QP_ST_PTP_1588 = 0xd, 283 | MLX5_QP_ST_REG_UMR = 0xc, 284 | MLX5_QP_ST_MAX 285 | }; 286 | 287 | enum { 288 | MLX5_QP_PM_MIGRATED = 0x3, 289 | MLX5_QP_PM_ARMED = 0x0, 290 | MLX5_QP_PM_REARM = 0x1 291 | }; 292 | 293 | 294 | enum { 295 | MLX5_RES_SCAT_DATA32_CQE = 0x1, 296 | MLX5_RES_SCAT_DATA64_CQE = 0x2, 297 | MLX5_REQ_SCAT_DATA32_CQE = 0x11, 298 | MLX5_REQ_SCAT_DATA64_CQE = 0x22, 299 | }; 300 | 301 | #define RQ_SIZE (1 << 6) 302 | #define SQ_SIZE (1 << 6) 303 | #define CQ_SIZE (1 << 6) 304 | #define EQ_SIZE (1 << 6) 305 | 306 | int create_qp(void *ctx, void **buff_out, int uar_id, uint32_t **dbr_out, 307 | int cqn, int pd, struct devx_obj_handle **q); 308 | int create_qp(void *ctx, void **buff_out, int uar_id, uint32_t **dbr_out, 309 | int cqn, int pd, struct devx_obj_handle **q) { 310 | u8 in[DEVX_ST_SZ_BYTES(create_qp_in)] = {0}; 311 | u8 out[DEVX_ST_SZ_BYTES(create_qp_out)] = {0}; 312 | struct devx_obj_handle *pas; 313 | uint32_t pas_id, dbr_id; 314 | void *buff, *uar_ptr = NULL, *dbr, *qpc; 315 | size_t dbr_off; 316 | int ret; 317 | 318 | buff = memalign(0x1000, 0x2000); 319 | memset(buff, 0, 0x2000); 320 | pas = devx_umem_reg(ctx, buff, 0x2000, 7, &pas_id); 321 | dbr = devx_alloc_dbrec(ctx, &dbr_id, &dbr_off); 322 | 323 | if (!pas || !dbr) 324 | return 0; 325 | 326 | DEVX_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 327 | 328 | qpc = DEVX_ADDR_OF(create_qp_in, in, qpc); 329 | DEVX_SET(qpc, qpc, st, MLX5_QP_ST_RC); 330 | DEVX_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 331 | DEVX_SET(qpc, qpc, pd, pd); 332 | DEVX_SET(qpc, qpc, uar_page, uar_id); 333 | DEVX_SET(qpc, qpc, cqn_snd, cqn); 334 | DEVX_SET(qpc, qpc, cqn_rcv, cqn); 335 | DEVX_SET(qpc, qpc, log_sq_size, 6); 336 | DEVX_SET(qpc, qpc, log_rq_stride, 2); 337 | DEVX_SET(qpc, qpc, log_rq_size, 6); 338 | DEVX_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); 339 | DEVX_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE); 340 | DEVX_SET(create_qp_in, in, wq_umem_id, pas_id); 341 | DEVX_SET(qpc, qpc, dbr_umem_id, dbr_id); 342 | DEVX_SET64(qpc, qpc, dbr_addr, dbr_off); 343 | 344 | *q = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 345 | if (!*q) 346 | return 0; 347 | 348 | *dbr_out = (uint32_t *)dbr; 349 | *buff_out = buff; 350 | 351 | return DEVX_GET(create_qp_out, out, qpn); 352 | } 353 | 354 | int to_init(struct devx_obj_handle *obj, int qp) { 355 | uint32_t in[DEVX_ST_SZ_DW(rst2init_qp_in)] = {0}; 356 | uint32_t out[DEVX_ST_SZ_DW(rst2init_qp_out)] = {0}; 357 | void *qpc = DEVX_ADDR_OF(rst2init_qp_in, in, qpc); 358 | 359 | DEVX_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP); 360 | DEVX_SET(rst2init_qp_in, in, qpn, qp); 361 | 362 | DEVX_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 363 | 364 | return devx_obj_modify(obj, in, sizeof(in), out, sizeof(out)); 365 | } 366 | 367 | int to_rtr(struct devx_obj_handle *obj, int qp, int type, int lid, uint8_t *gid) { 368 | uint32_t in[DEVX_ST_SZ_DW(init2rtr_qp_in)] = {0}; 369 | uint32_t out[DEVX_ST_SZ_DW(init2rtr_qp_out)] = {0}; 370 | void *qpc = DEVX_ADDR_OF(rst2init_qp_in, in, qpc); 371 | uint8_t mac[6]; 372 | 373 | mac[0] = gid[8] ^ 0x02; 374 | mac[1] = gid[9]; 375 | mac[2] = gid[10]; 376 | mac[3] = gid[13]; 377 | mac[4] = gid[14]; 378 | mac[5] = gid[15]; 379 | 380 | DEVX_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); 381 | DEVX_SET(init2rtr_qp_in, in, qpn, qp); 382 | 383 | DEVX_SET(qpc, qpc, mtu, 2); 384 | DEVX_SET(qpc, qpc, log_msg_max, 30); 385 | DEVX_SET(qpc, qpc, remote_qpn, qp); 386 | if (type) { 387 | DEVX_SET(qpc, qpc, primary_address_path.hop_limit, 1); 388 | memcpy(DEVX_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32), mac, 6); 389 | } else { 390 | DEVX_SET(qpc, qpc, primary_address_path.rlid, lid); 391 | DEVX_SET(qpc, qpc, primary_address_path.grh, 1); 392 | } 393 | memcpy(DEVX_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip), gid, 394 | DEVX_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip)); 395 | DEVX_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 396 | DEVX_SET(qpc, qpc, rre, 1); 397 | DEVX_SET(qpc, qpc, rwe, 1); 398 | DEVX_SET(qpc, qpc, min_rnr_nak, 12); 399 | 400 | return devx_obj_modify(obj, in, sizeof(in), out, sizeof(out)); 401 | } 402 | 403 | int to_rts(struct devx_obj_handle *obj, int qp) { 404 | uint32_t in[DEVX_ST_SZ_DW(rtr2rts_qp_in)] = {0}; 405 | uint32_t out[DEVX_ST_SZ_DW(rtr2rts_qp_out)] = {0}; 406 | void *qpc = DEVX_ADDR_OF(rst2init_qp_in, in, qpc); 407 | 408 | DEVX_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); 409 | DEVX_SET(rtr2rts_qp_in, in, qpn, qp); 410 | 411 | DEVX_SET(qpc, qpc, log_ack_req_freq, 8); 412 | DEVX_SET(qpc, qpc, retry_count, 7); 413 | DEVX_SET(qpc, qpc, rnr_retry, 7); 414 | DEVX_SET(qpc, qpc, primary_address_path.ack_timeout, 14); 415 | 416 | return devx_obj_modify(obj, in, sizeof(in), out, sizeof(out)); 417 | } 418 | 419 | int recv(uint8_t *rq, uint32_t *rqi, uint32_t *qp_dbr, 420 | uint32_t mkey, void *addr, size_t size) { 421 | struct mlx5_wqe_data_seg *dseg = (struct mlx5_wqe_data_seg *)(rq + *rqi % RQ_SIZE * MLX5_SEND_WQE_BB); 422 | mlx5dv_set_data_seg(dseg, size, mkey, (intptr_t)addr); 423 | mlx5dv_set_data_seg(dseg + 1, 0, MLX5_INVALID_LKEY, 0); 424 | (*rqi)++; 425 | asm volatile("" ::: "memory"); 426 | qp_dbr[MLX5_RCV_DBR] = htobe32(*rqi & 0xffff); 427 | return 0; 428 | } 429 | 430 | int xmit(uint8_t *sq, uint32_t *sqi, uint32_t *qp_dbr, 431 | uint32_t mkey, void *addr, size_t size, 432 | void *uar_ptr, uint32_t qp) { 433 | struct mlx5_wqe_ctrl_seg *ctrl = (struct mlx5_wqe_ctrl_seg *)(sq + *sqi % SQ_SIZE * MLX5_SEND_WQE_BB); 434 | mlx5dv_set_ctrl_seg(ctrl, *sqi, MLX5_OPCODE_SEND, 0, 435 | qp, MLX5_WQE_CTRL_CQ_UPDATE, 436 | 2, 0, 0); 437 | struct mlx5_wqe_data_seg *dseg = (struct mlx5_wqe_data_seg *)(ctrl + 1); 438 | mlx5dv_set_data_seg(dseg, size, mkey, (intptr_t)addr); 439 | (*sqi)++; 440 | asm volatile("" ::: "memory"); 441 | qp_dbr[MLX5_SND_DBR] = htobe32(*sqi & 0xffff); 442 | asm volatile("" ::: "memory"); 443 | *(uint64_t *)((uint8_t *)uar_ptr + 0x800) = *(uint64_t *)ctrl; 444 | asm volatile("" ::: "memory"); 445 | return 0; 446 | } 447 | 448 | enum { 449 | MLX5_CQ_SET_CI = 0, 450 | MLX5_CQ_ARM_DB = 1, 451 | }; 452 | 453 | int arm_cq(uint32_t cq, uint32_t cqi, uint32_t *cq_dbr, void *uar_ptr) { 454 | uint64_t doorbell; 455 | uint32_t sn; 456 | uint32_t ci; 457 | uint32_t cmd; 458 | 459 | sn = cqi & 3; 460 | ci = cqi & 0xffffff; 461 | cmd = MLX5_CQ_DB_REQ_NOT; 462 | 463 | doorbell = sn << 28 | cmd | ci; 464 | doorbell <<= 32; 465 | doorbell |= cq; 466 | 467 | cq_dbr[MLX5_CQ_ARM_DB] = htobe32(sn << 28 | cmd | ci); 468 | asm volatile("" ::: "memory"); 469 | 470 | *(uint64_t *)((uint8_t *)uar_ptr + 0x20) = htobe64(doorbell); 471 | asm volatile("" ::: "memory"); 472 | 473 | return 0; 474 | } 475 | 476 | int poll_cq(uint8_t *cq_buff, uint32_t *cqi, uint32_t *cq_dbr) { 477 | struct mlx5_cqe64 *cqe = (struct mlx5_cqe64 *)(cq_buff + *cqi % CQ_SIZE * sizeof(*cqe)); 478 | int retry = 1600000; 479 | 480 | while (--retry && (mlx5dv_get_cqe_opcode(cqe) == MLX5_CQE_INVALID || 481 | ((cqe->op_own & MLX5_CQE_OWNER_MASK) ^ !!(*cqi & CQ_SIZE)))) 482 | asm volatile("" ::: "memory"); 483 | 484 | if (!retry) 485 | return 1; 486 | 487 | (*cqi)++; 488 | asm volatile("" ::: "memory"); 489 | cq_dbr[MLX5_CQ_SET_CI] = htobe32(*cqi & 0xffffff); 490 | printf("CQ op %d size %d\n", mlx5dv_get_cqe_opcode(cqe), be32toh(cqe->byte_cnt)); 491 | return 0; 492 | } 493 | 494 | int poll_eq(uint8_t *eq_buff, uint32_t *eqi, int expected) { 495 | #if HAS_EQ_SUPPORT 496 | struct mlx5_eqe *eqe = (struct mlx5_eqe *)(eq_buff + *eqi % EQ_SIZE * sizeof(*eqe)); 497 | int retry = 1600000; 498 | while (--retry && (eqe->owner & 1) ^ !!(*eqi & EQ_SIZE)) 499 | asm volatile("" ::: "memory"); 500 | 501 | if (!retry) 502 | return 1; 503 | 504 | (*eqi)++; 505 | asm volatile("" ::: "memory"); 506 | printf("EQ cq %x\n", be32toh(eqe->data.comp.cqn)); 507 | return 0; 508 | #else 509 | return expected; 510 | #endif 511 | } 512 | 513 | int arm_eq(uint32_t eq, uint32_t eqi, void *uar_ptr) { 514 | #if HAS_EQ_SUPPORT 515 | uint32_t doorbell = (eqi & 0xffffff) | (eq << 24); 516 | 517 | *(uint32_t *)((uint8_t *)uar_ptr + 0x48) = htobe32(doorbell); 518 | asm volatile("" ::: "memory"); 519 | #endif 520 | return 0; 521 | } 522 | 523 | #include "gtest/gtest.h" 524 | 525 | TEST(devx, smoke) { 526 | int num, devn = 0; 527 | struct devx_device **list = devx_get_device_list(&num); 528 | void *ctx; 529 | 530 | if (getenv("DEVN")) 531 | devn = atoi(getenv("DEVN")); 532 | 533 | ASSERT_GT(num, devn); 534 | ctx = devx_open_device(list[devn]); 535 | ASSERT_TRUE(ctx); 536 | devx_free_device_list(list); 537 | 538 | EXPECT_LE(0, query_device(ctx)); 539 | } 540 | 541 | TEST(devx, gid) { 542 | int num, devn = 0; 543 | struct devx_device **list = devx_get_device_list(&num); 544 | void *ctx; 545 | 546 | if (getenv("DEVN")) 547 | devn = atoi(getenv("DEVN")); 548 | 549 | ASSERT_GT(num, devn); 550 | ctx = devx_open_device(list[devn]); 551 | ASSERT_TRUE(ctx); 552 | devx_free_device_list(list); 553 | 554 | uint8_t gid[16]; 555 | ASSERT_FALSE(devx_query_gid(ctx, 1, 0, gid)); 556 | } 557 | 558 | TEST(devx, send) { 559 | int num, devn = 0; 560 | struct devx_device **list = devx_get_device_list(&num); 561 | void *ctx; 562 | struct devx_contex *dctx = (struct devx_contex *)ctx; 563 | unsigned char buff[0x1000]; 564 | for(int i = 0; i < 0x60; i++) 565 | buff[i] = i + 0x20; 566 | 567 | int ret, lid, type; 568 | 569 | if (getenv("DEVN")) 570 | devn = atoi(getenv("DEVN")); 571 | 572 | ASSERT_GT(num, devn); 573 | ctx = devx_open_device(list[devn]); 574 | ASSERT_TRUE(ctx); 575 | devx_free_device_list(list); 576 | 577 | uint32_t uar_id; 578 | void *uar_ptr = NULL; 579 | ret = devx_alloc_uar(ctx, &uar_id, &uar_ptr, NULL); 580 | ASSERT_FALSE(ret); 581 | 582 | int pd = alloc_pd(ctx); 583 | ASSERT_TRUE(pd); 584 | 585 | void *eq_buff; 586 | int eq = 0; 587 | #if HAS_EQ_SUPPORT 588 | ASSERT_TRUE(eq = create_eq(ctx, &eq_buff, uar_id)); 589 | #endif 590 | 591 | void *cq_buff; 592 | uint32_t *cq_dbr; 593 | int cq = create_cq(ctx, &cq_buff, uar_id, &cq_dbr, eq); 594 | ASSERT_TRUE(cq); 595 | int mkey = reg_mr(ctx, pd, buff, sizeof(buff)); 596 | ASSERT_TRUE(mkey); 597 | 598 | type = query_device(ctx); 599 | EXPECT_LE(0, query_device(ctx)); 600 | 601 | uint8_t gid[16]; 602 | ASSERT_FALSE(devx_query_gid(ctx, 1, 0, gid)); 603 | 604 | if (!type) { 605 | lid = query_lid(ctx); 606 | ASSERT_LE(0, lid); 607 | } else { 608 | lid = 0; 609 | } 610 | 611 | void *qp_buff; 612 | uint32_t *qp_dbr; 613 | struct devx_obj_handle *q; 614 | int qp = create_qp(ctx, &qp_buff, uar_id, &qp_dbr, cq, pd, &q); 615 | ASSERT_TRUE(qp); 616 | ASSERT_FALSE(to_init(q, qp)); 617 | ASSERT_FALSE(to_rtr(q, qp, type, lid, gid)); 618 | ASSERT_FALSE(to_rts(q, qp)); 619 | 620 | uint8_t *rq = (uint8_t *)qp_buff; 621 | uint8_t *sq = (uint8_t *)qp_buff + MLX5_SEND_WQE_BB * RQ_SIZE; 622 | uint32_t rqi = 0, sqi = 0, cqi = 0, eqi = 0; 623 | 624 | ASSERT_FALSE(arm_eq(eq, eqi, uar_ptr)); 625 | ASSERT_FALSE(arm_cq(cq, cqi, cq_dbr, uar_ptr)); 626 | 627 | ASSERT_TRUE(poll_eq((uint8_t *)eq_buff, &eqi, 1)); 628 | ASSERT_TRUE(poll_cq((uint8_t *)cq_buff, &cqi, cq_dbr)); 629 | 630 | ASSERT_FALSE(recv(rq, &rqi, qp_dbr, mkey, buff, 0x30)); 631 | ASSERT_FALSE(xmit(sq, &sqi, qp_dbr, mkey, buff + 0x30, 0x30, uar_ptr, qp)); 632 | 633 | ASSERT_FALSE(poll_eq((uint8_t *)eq_buff, &eqi, 0)); 634 | ASSERT_FALSE(poll_cq((uint8_t *)cq_buff, &cqi, cq_dbr)); 635 | ASSERT_FALSE(arm_eq(eq, eqi, uar_ptr)); 636 | ASSERT_FALSE(arm_cq(cq, cqi, cq_dbr, uar_ptr)); 637 | ASSERT_FALSE(poll_eq((uint8_t *)eq_buff, &eqi, 0)); 638 | ASSERT_FALSE(poll_cq((uint8_t *)cq_buff, &cqi, cq_dbr)); 639 | ASSERT_FALSE(arm_eq(eq, eqi, uar_ptr)); 640 | ASSERT_FALSE(arm_cq(cq, cqi, cq_dbr, uar_ptr)); 641 | } 642 | 643 | int test_rq(void *ctx, int cqn, int pd); 644 | int test_rq(void *ctx, int cqn, int pd) { 645 | u8 in[DEVX_ST_SZ_BYTES(create_rq_in)] = {0}; 646 | u8 out[DEVX_ST_SZ_BYTES(create_rq_out)] = {0}; 647 | struct devx_obj_handle *pas, *q; 648 | uint32_t pas_id, dbr_id, uar_id; 649 | void *buff, *uar_ptr = NULL, *dbr; 650 | void *rqc, *wq; 651 | int ret; 652 | 653 | buff = mmap(NULL, 0x10000, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); 654 | pas = devx_umem_reg(ctx, buff, 0x10000, 7, &pas_id); 655 | 656 | dbr = devx_umem_reg(ctx, memalign(64, 8), 8, 7, &dbr_id); 657 | if (!dbr) 658 | return 0; 659 | ret = devx_alloc_uar(ctx, &uar_id, &uar_ptr, NULL); 660 | if (ret) 661 | return 0; 662 | 663 | DEVX_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); 664 | 665 | rqc = DEVX_ADDR_OF(create_rq_in, in, ctx); 666 | DEVX_SET(rqc, rqc, vsd, 1); 667 | DEVX_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 668 | DEVX_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 669 | DEVX_SET(rqc, rqc, flush_in_error_en, 1); 670 | DEVX_SET(rqc, rqc, user_index, 1); 671 | DEVX_SET(rqc, rqc, cqn, cqn); 672 | 673 | wq = DEVX_ADDR_OF(rqc, rqc, wq); 674 | DEVX_SET(wq, wq, wq_type, 1); 675 | DEVX_SET(wq, wq, pd, pd); 676 | DEVX_SET(wq, wq, log_wq_stride, 6); 677 | DEVX_SET(wq, wq, log_wq_sz, 6); 678 | 679 | DEVX_SET(create_rq_in, in, ctx.wq.wq_umem_id, pas_id); 680 | DEVX_SET(create_rq_in, in, ctx.wq.dbr_umem_id, dbr_id); 681 | 682 | q = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 683 | 684 | return DEVX_GET(create_rq_out, out, rqn); 685 | } 686 | 687 | int test_td(void *ctx); 688 | int test_td(void *ctx) { 689 | u8 in[DEVX_ST_SZ_BYTES(alloc_transport_domain_in)] = {0}; 690 | u8 out[DEVX_ST_SZ_BYTES(alloc_transport_domain_out)] = {0}; 691 | 692 | DEVX_SET(alloc_transport_domain_in, in, opcode, 693 | MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); 694 | 695 | if (!devx_obj_create(ctx, in, sizeof(in), out, sizeof(out))) 696 | return 0; 697 | 698 | return DEVX_GET(alloc_transport_domain_out, out, transport_domain); 699 | } 700 | 701 | struct devx_obj_handle *test_tir(void *ctx, int rq, int td, int *tir_num); 702 | struct devx_obj_handle *test_tir(void *ctx, int rq, int td, int *tir_num) { 703 | u8 in[DEVX_ST_SZ_BYTES(create_tir_in)] = {0}; 704 | u8 out[DEVX_ST_SZ_BYTES(create_tir_out)] = {0}; 705 | struct devx_obj_handle *tir; 706 | void *tirc; 707 | 708 | DEVX_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 709 | tirc = DEVX_ADDR_OF(create_tir_in, in, ctx); 710 | DEVX_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); 711 | DEVX_SET(tirc, tirc, inline_rqn, rq); 712 | DEVX_SET(tirc, tirc, transport_domain, td); 713 | 714 | tir = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 715 | 716 | *tir_num = DEVX_GET(create_tir_out, out, tirn); 717 | return tir; 718 | } 719 | 720 | int test_rule(void *ctx, struct devx_obj_handle *tir, struct devx_obj_handle **rule); 721 | int test_rule(void *ctx, struct devx_obj_handle *tir, struct devx_obj_handle **rule) { 722 | u8 in[DEVX_ST_SZ_BYTES(fs_rule_add_in)] = {0}; 723 | __be32 src_ip = 0x01020304; 724 | __be32 dst_ip = 0x05060708; 725 | void *headers_c, *headers_v; 726 | 727 | DEVX_SET(fs_rule_add_in, in, prio, 4); 728 | 729 | headers_c = DEVX_ADDR_OF(fs_rule_add_in, in, 730 | flow_spec.match_criteria.outer_headers); 731 | headers_v = DEVX_ADDR_OF(fs_rule_add_in, in, 732 | flow_spec.match_value.outer_headers); 733 | 734 | DEVX_SET(fte_match_set_lyr_2_4, headers_c, ip_version, 0xf); 735 | DEVX_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4); 736 | 737 | DEVX_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 738 | src_ipv4_src_ipv6.ipv4_layout.ipv4); 739 | memcpy(DEVX_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 740 | src_ipv4_src_ipv6.ipv4_layout.ipv4), 741 | &src_ip, sizeof(src_ip)); 742 | 743 | DEVX_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 744 | dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 745 | memcpy(DEVX_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 746 | dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 747 | &dst_ip, sizeof(dst_ip)); 748 | 749 | DEVX_SET(fs_rule_add_in, in, flow_spec.match_criteria_enable, 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS); 750 | 751 | *rule = devx_fs_rule_add(ctx, in, tir, 0); 752 | return !!*rule; 753 | } 754 | 755 | enum fs_flow_table_type { 756 | FS_FT_NIC_RX = 0x0, 757 | FS_FT_NIC_TX = 0x1, 758 | FS_FT_ESW_EGRESS_ACL = 0x2, 759 | FS_FT_ESW_INGRESS_ACL = 0x3, 760 | FS_FT_FDB = 0X4, 761 | FS_FT_SNIFFER_RX = 0X5, 762 | FS_FT_SNIFFER_TX = 0X6, 763 | FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX, 764 | }; 765 | 766 | struct devx_obj_handle *create_ft(void *ctx, int *ft_num); 767 | struct devx_obj_handle *create_ft(void *ctx, int *ft_num) 768 | { 769 | uint8_t in[DEVX_ST_SZ_BYTES(create_flow_table_in)] = {0}; 770 | uint8_t out[DEVX_ST_SZ_BYTES(create_flow_table_out)] = {0}; 771 | struct devx_obj_handle *ft; 772 | void *ftc; 773 | 774 | DEVX_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); 775 | DEVX_SET(create_flow_table_in, in, table_type, FS_FT_NIC_RX); 776 | 777 | ftc = DEVX_ADDR_OF(create_flow_table_in, in, flow_table_context); 778 | DEVX_SET(flow_table_context, ftc, table_miss_action, 0); // default table 779 | DEVX_SET(flow_table_context, ftc, level, 64); // table level 780 | DEVX_SET(flow_table_context, ftc, log_size, 0); 781 | 782 | ft = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 783 | *ft_num = DEVX_GET(create_flow_table_out, out, table_id); 784 | 785 | return ft; 786 | } 787 | 788 | int create_fg(void *ctx, int ft); 789 | int create_fg(void *ctx, int ft) 790 | { 791 | uint8_t in[DEVX_ST_SZ_BYTES(create_flow_group_in)] = {0}; 792 | uint8_t out[DEVX_ST_SZ_BYTES(create_flow_group_out)] = {0}; 793 | 794 | DEVX_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); 795 | DEVX_SET(create_flow_group_in, in, table_type, FS_FT_NIC_RX); 796 | DEVX_SET(create_flow_group_in, in, table_id, ft); 797 | DEVX_SET(create_flow_group_in, in, start_flow_index, 0); 798 | DEVX_SET(create_flow_group_in, in, end_flow_index, 0); 799 | DEVX_SET(create_flow_group_in, in, match_criteria_enable, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS); 800 | 801 | if (!devx_obj_create(ctx, in, sizeof(in), out, sizeof(out))) 802 | return 0; 803 | 804 | return DEVX_GET(create_flow_group_out, out, group_id); 805 | } 806 | 807 | int set_fte(void *ctx, int ft, int fg, int tir, struct devx_obj_handle **rule); 808 | int set_fte(void *ctx, int ft, int fg, int tir, struct devx_obj_handle **rule) 809 | { 810 | uint8_t in[DEVX_ST_SZ_BYTES(set_fte_in) + DEVX_UN_SZ_BYTES(dest_format_struct_flow_counter_list_auto)] = {0}; 811 | uint8_t out[DEVX_ST_SZ_BYTES(set_fte_out)] = {0}; 812 | void *in_flow_context, *in_dests; 813 | int op = !!*rule; 814 | 815 | DEVX_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); 816 | DEVX_SET(set_fte_in, in, uid, 0); // FIXME !!!!!! 817 | DEVX_SET(set_fte_in, in, op_mod, op); 818 | DEVX_SET(set_fte_in, in, modify_enable_mask, op ? 4 : 0); 819 | DEVX_SET(set_fte_in, in, table_type, FS_FT_NIC_RX); 820 | DEVX_SET(set_fte_in, in, table_id, ft); 821 | DEVX_SET(set_fte_in, in, flow_index, 0); 822 | 823 | DEVX_SET(set_fte_in, in, vport_number, 0); 824 | DEVX_SET(set_fte_in, in, other_vport, 0); 825 | 826 | in_flow_context = DEVX_ADDR_OF(set_fte_in, in, flow_context); 827 | DEVX_SET(flow_context, in_flow_context, group_id, fg); 828 | DEVX_SET(flow_context, in_flow_context, flow_tag, 1); 829 | DEVX_SET(flow_context, in_flow_context, action, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); 830 | DEVX_SET(flow_context, in_flow_context, destination_list_size, 1); 831 | 832 | in_dests = DEVX_ADDR_OF(flow_context, in_flow_context, destination[0].dest_format_struct); 833 | DEVX_SET(dest_format_struct, in_dests, destination_type, MLX5_FLOW_DESTINATION_TYPE_TIR); 834 | DEVX_SET(dest_format_struct, in_dests, destination_id, tir); 835 | 836 | if (*rule) { 837 | return !devx_obj_modify(*rule, in, sizeof(in), out, sizeof(out)); 838 | } else { 839 | *rule = devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 840 | return !!*rule; 841 | } 842 | } 843 | 844 | int test_rule_priv(void *ctx, struct devx_obj_handle *ft); 845 | int test_rule_priv(void *ctx, struct devx_obj_handle *ft) { 846 | u8 in[DEVX_ST_SZ_BYTES(fs_rule_add_in)] = {0}; 847 | struct devx_obj_handle *rule; 848 | __be32 src_ip = 0x01020304; 849 | __be32 dst_ip = 0x05060708; 850 | void *headers_c, *headers_v; 851 | 852 | DEVX_SET(fs_rule_add_in, in, prio, 5); 853 | 854 | headers_c = DEVX_ADDR_OF(fs_rule_add_in, in, 855 | flow_spec.match_criteria.outer_headers); 856 | headers_v = DEVX_ADDR_OF(fs_rule_add_in, in, 857 | flow_spec.match_value.outer_headers); 858 | 859 | DEVX_SET(fte_match_set_lyr_2_4, headers_c, ip_version, 0xf); 860 | DEVX_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4); 861 | 862 | DEVX_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 863 | src_ipv4_src_ipv6.ipv4_layout.ipv4); 864 | memcpy(DEVX_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 865 | src_ipv4_src_ipv6.ipv4_layout.ipv4), 866 | &src_ip, sizeof(src_ip)); 867 | 868 | DEVX_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, 869 | dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 870 | memcpy(DEVX_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 871 | dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 872 | &dst_ip, sizeof(dst_ip)); 873 | 874 | DEVX_SET(fs_rule_add_in, in, flow_spec.match_criteria_enable, 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS); 875 | 876 | rule = devx_fs_rule_add(ctx, in, ft, 0); 877 | if (!rule) 878 | return 0; 879 | 880 | return 1; 881 | } 882 | 883 | TEST(devx, roce) { 884 | int num; 885 | struct devx_device **list = devx_get_device_list(&num); 886 | void *ctx; 887 | int devn = 0; 888 | int pd; 889 | int cq, rq, td, tir_num; 890 | int ft_num, fg; 891 | 892 | if (getenv("DEVN")) 893 | devn = atoi(getenv("DEVN")); 894 | 895 | ASSERT_GT(num, devn); 896 | ctx = devx_open_device(list[devn]); 897 | ASSERT_TRUE(ctx); 898 | devx_free_device_list(list); 899 | 900 | int type = query_device(ctx); 901 | EXPECT_LE(0, query_device(ctx)); 902 | 903 | pd = alloc_pd(ctx); 904 | ASSERT_TRUE(pd); 905 | 906 | cq = create_cq(ctx, NULL, 0, NULL, 0); 907 | ASSERT_TRUE(cq); 908 | rq = test_rq(ctx, cq, pd); 909 | ASSERT_TRUE(rq); 910 | td = test_td(ctx); 911 | ASSERT_TRUE(td); 912 | struct devx_obj_handle *tir = test_tir(ctx, rq, td, &tir_num); 913 | ASSERT_TRUE(tir); 914 | 915 | struct devx_obj_handle *rule1 = NULL; 916 | ASSERT_TRUE(test_rule(ctx, tir, &rule1)); 917 | ASSERT_FALSE(devx_fs_rule_del(rule1)); 918 | 919 | struct devx_obj_handle *ft = create_ft(ctx, &ft_num); 920 | ASSERT_TRUE(ft); 921 | fg = create_fg(ctx,ft_num); 922 | struct devx_obj_handle *rule = NULL; 923 | ASSERT_TRUE(set_fte(ctx,ft_num,fg,tir_num,&rule)); 924 | ASSERT_TRUE(set_fte(ctx,ft_num,fg,tir_num,&rule)); 925 | 926 | ASSERT_TRUE(test_rule_priv(ctx,ft)); 927 | ASSERT_TRUE(test_rule_priv(ctx,ft)); 928 | 929 | int tir2; 930 | ASSERT_TRUE(test_tir(ctx, rq, td, &tir2)); 931 | ASSERT_TRUE(set_fte(ctx,ft_num,fg,tir2,&rule)); 932 | 933 | devx_close_device(ctx); 934 | } 935 | 936 | TEST(devx, rule_qp) { 937 | int num; 938 | struct devx_device **list = devx_get_device_list(&num); 939 | void *ctx; 940 | int devn = 0; 941 | void *mask, *val; 942 | u8 in[DEVX_ST_SZ_BYTES(fs_rule_add_in)] = {0}; 943 | 944 | if (getenv("DEVN")) 945 | devn = atoi(getenv("DEVN")); 946 | 947 | ASSERT_GT(num, devn); 948 | ctx = devx_open_device(list[devn]); 949 | ASSERT_TRUE(ctx); 950 | devx_free_device_list(list); 951 | 952 | DEVX_SET(fs_rule_add_in, in, prio, -1); 953 | 954 | mask = DEVX_ADDR_OF(fs_rule_add_in, in, flow_spec.match_criteria.misc_parameters); 955 | val = DEVX_ADDR_OF(fs_rule_add_in, in, flow_spec.match_value.misc_parameters); 956 | 957 | DEVX_SET(fte_match_set_misc, mask, bth_dst_qp, 0xffffff); 958 | DEVX_SET(fte_match_set_misc, val, bth_dst_qp, 0x100); 959 | 960 | DEVX_SET(fs_rule_add_in, in, flow_spec.match_criteria_enable, 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS); 961 | 962 | ASSERT_TRUE(devx_fs_rule_add(ctx, in, NULL, 0)); 963 | } 964 | 965 | #ifdef DEVX_VERBS_MIX 966 | #include "devx_verbs.h" 967 | 968 | TEST(devx, send_verbs_mr) { 969 | int num, devn = 0; 970 | struct ibv_device **list = ibv_get_device_list(&num); 971 | struct ibv_context *ibctx; 972 | void *ctx; 973 | unsigned char buff[0x1000]; 974 | for(int i = 0; i < 0x60; i++) 975 | buff[i] = i + 0x20; 976 | 977 | int ret, lid; 978 | 979 | if (getenv("DEVN")) 980 | devn = atoi(getenv("DEVN")); 981 | 982 | ASSERT_GT(num, devn); 983 | ibctx = ibv_open_device(list[devn]); 984 | ctx = devx_from_ibv(ibctx); 985 | ASSERT_TRUE(ibctx); 986 | ibv_free_device_list(list); 987 | 988 | uint32_t uar_id; 989 | void *uar_ptr = NULL; 990 | ret = devx_alloc_uar(ctx, &uar_id, &uar_ptr, NULL); 991 | ASSERT_FALSE(ret); 992 | 993 | struct ibv_pd *pd = ibv_alloc_pd(ibctx); 994 | ASSERT_TRUE(pd); 995 | uint32_t pdn = *(uint32_t *)(pd + 1); 996 | 997 | void *cq_buff, *cq_dbr; 998 | int cq = create_cq(ctx, &cq_buff, uar_id, &cq_dbr, eq); 999 | ASSERT_TRUE(cq); 1000 | struct ibv_mr *mr = ibv_reg_mr(pd, buff, sizeof(buff), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE); 1001 | ASSERT_TRUE(mr); 1002 | uint32_t mkey = mr->lkey; 1003 | 1004 | lid = query_lid(ctx); 1005 | ASSERT_LE(0, lid); 1006 | 1007 | void *qp_buff; 1008 | uint32_t *qp_dbr; 1009 | struct devx_obj_handle *q; 1010 | int qp = create_qp(ctx, &qp_buff, uar_id, &qp_dbr, cq, pdn, &q); 1011 | ASSERT_TRUE(qp); 1012 | ASSERT_FALSE(to_init(q, qp)); 1013 | ASSERT_FALSE(to_rtr(q, qp, lid)); 1014 | ASSERT_FALSE(to_rts(q, qp)); 1015 | 1016 | uint8_t *rq = (u8 *)qp_buff; 1017 | uint8_t *sq = (u8 *)qp_buff + MLX5_SEND_WQE_BB * RQ_SIZE; 1018 | int rqi = 0, sqi = 0; 1019 | 1020 | ASSERT_FALSE(recv(rq, &rqi, qp_dbr, mkey, buff, 0x30)); 1021 | ASSERT_FALSE(xmit(sq, &sqi, qp_dbr, mkey, buff + 0x30, 0x30, uar_ptr, qp)); 1022 | 1023 | int cqi = 0; 1024 | ASSERT_FALSE(poll_cq((uint8_t *)cq_buff, &cqi, cq_dbr)); 1025 | ASSERT_FALSE(poll_cq((uint8_t *)cq_buff, &cqi, cq_dbr)); 1026 | } 1027 | 1028 | #endif 1029 | 1030 | --------------------------------------------------------------------------------