├── src ├── Module.supported ├── ice_aux_support.h ├── ice_defs.h ├── ice_pf_vsi_vlan_ops.h ├── ice_hwmon.h ├── ice_mk_grp.h ├── ice_vlan_mode.h ├── ice_vlan.h ├── ice_flg_rd.h ├── ice_ethtool_common.h ├── ice_virtchnl_allowlist.h ├── ice_fw_update.h ├── ice_ptype_mk.h ├── ice_proto_grp.h ├── ice_dcb_nl.h ├── ice_virtchnl_fsub.h ├── ice_vf_vsi_vlan_ops.h ├── ice_ieps.h ├── ice_tmatch.h ├── ice_metainit.h ├── ice_xlt_kb.h ├── ice_siov.h ├── ice_bst_tcam.h ├── kcompat_defs.h ├── ice_aux_support.c ├── ice_parser_util.h ├── ice_idc_int.h ├── ice_irq.h ├── ice_migration_private.h ├── ice_vsi_vlan_ops.h ├── ice_vf_adq.h ├── kcompat_ubuntu_defs.h ├── ice_parser_rt.h ├── ice_vsi_vlan_lib.h ├── ice_migration.h ├── ice_adapter.h ├── ice_pf_vsi_vlan_ops.c ├── ice_base.h ├── ice_repr.h ├── ice_mk_grp.c ├── devlink │ └── health.h ├── ice_gnss.h ├── siov_regs.h ├── ice_virtchnl_fdir.h ├── ice_pg_cam.h ├── ice_sbq_cmd.h ├── kcompat_kthread.h ├── ice_vf_lib_private.h ├── ice_fltr.h ├── ice_vf_mbx.h ├── ice_flg_rd.c ├── kcompat_gnss.h ├── ice_ptype_mk.c ├── kcompat_dim.c ├── ice_imem.h ├── ice_phy_regs.h ├── ice_arfs.h ├── ice_fwlog.h ├── ice_devlink.h ├── ice_lag.h ├── ice_eswitch.h ├── ice_proto_grp.c ├── ice_hwmon.c ├── ice_txrx_lib.h ├── ice_vsi_vlan_ops.c ├── ice_dcf.h ├── linux │ └── auxiliary_bus.h ├── ice_devids.h ├── ice_adapter.c ├── ice_xsk.h ├── ice_flex_pipe.h ├── ice_tspll.h ├── ice_vdcm.h ├── ice_parser.h ├── ice_dcb_lib.h ├── auxiliary_compat.h ├── ice_nvm.h ├── kcompat_gcc.h ├── kcompat_rhel_defs.h ├── ice_cpi.h ├── ice_controlq.h ├── kcompat_pldmfw.h ├── ice_dpll.h ├── ice_metainit.c └── ice_lib.h ├── ddp ├── ice-1.3.50.0.pkg └── LICENSE ├── security.md ├── scripts ├── set_xps_rxqs ├── adqsetup │ └── LICENSE └── set_arfs ├── CONTRIBUTING.md ├── pci.updates └── CODE_OF_CONDUCT.md /src/Module.supported: -------------------------------------------------------------------------------- 1 | ice.ko external 2 | intel_auxiliary.ko external 3 | -------------------------------------------------------------------------------- /ddp/ice-1.3.50.0.pkg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/ethernet-linux-ice/HEAD/ddp/ice-1.3.50.0.pkg -------------------------------------------------------------------------------- /src/ice_aux_support.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_AUX_SUPPORT_H_ 5 | #define _ICE_AUX_SUPPORT_H_ 6 | int ice_init_aux(struct ice_pf *pf); 7 | void ice_deinit_aux(struct ice_pf *pf); 8 | #endif /* _ICE_AUX_SUPPORT_H_ */ 9 | -------------------------------------------------------------------------------- /src/ice_defs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_DEFS_H_ 5 | #define _ICE_DEFS_H_ 6 | 7 | #define ICE_BYTES_PER_WORD 2 8 | #define ICE_BYTES_PER_DWORD 4 9 | #define ICE_MAX_TRAFFIC_CLASS 8 10 | #define ICE_CHNL_MAX_TC 16 11 | 12 | #endif /* _ICE_DEFS_H_ */ 13 | -------------------------------------------------------------------------------- /src/ice_pf_vsi_vlan_ops.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_PF_VSI_VLAN_OPS_H_ 5 | #define _ICE_PF_VSI_VLAN_OPS_H_ 6 | 7 | #include "ice_vsi_vlan_ops.h" 8 | 9 | struct ice_vsi; 10 | 11 | void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi); 12 | 13 | #endif /* _ICE_PF_VSI_VLAN_OPS_H_ */ 14 | -------------------------------------------------------------------------------- /src/ice_hwmon.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_HWMON_H_ 5 | #define _ICE_HWMON_H_ 6 | 7 | #ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_INFO 8 | void ice_hwmon_init(struct ice_pf *pf); 9 | void ice_hwmon_exit(struct ice_pf *pf); 10 | #endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_INFO */ 11 | 12 | #endif /* _ICE_HWMON_H_ */ 13 | -------------------------------------------------------------------------------- /security.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation. 3 | 4 | ## Reporting a Vulnerability 5 | Please report any security vulnerabilities in this project [utilizing the guidelines here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html). -------------------------------------------------------------------------------- /src/ice_mk_grp.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_MK_GRP_H_ 5 | #define _ICE_MK_GRP_H_ 6 | 7 | struct ice_mk_grp_item { 8 | int idx; 9 | u8 markers[8]; 10 | }; 11 | 12 | void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item); 13 | struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw); 14 | #endif /* _ICE_MK_GRP_H_ */ 15 | -------------------------------------------------------------------------------- /src/ice_vlan_mode.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VLAN_MODE_H_ 5 | #define _ICE_VLAN_MODE_H_ 6 | 7 | #include "ice_osdep.h" 8 | 9 | struct ice_hw; 10 | 11 | bool ice_is_dvm_ena(struct ice_hw *hw); 12 | int ice_set_vlan_mode(struct ice_hw *hw); 13 | void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw); 14 | 15 | #endif /* _ICE_VLAN_MODE_H */ 16 | -------------------------------------------------------------------------------- /src/ice_vlan.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VLAN_H_ 5 | #define _ICE_VLAN_H_ 6 | 7 | #include 8 | #include "ice_type.h" 9 | 10 | struct ice_vlan { 11 | u16 tpid; 12 | u16 vid; 13 | u8 prio; 14 | enum ice_sw_fwd_act_type fwd_act; 15 | }; 16 | 17 | #define ICE_VLAN(tpid, vid, prio, fwd_action) \ 18 | ((struct ice_vlan){ tpid, vid, prio, fwd_action }) 19 | #endif /* _ICE_VLAN_H_ */ 20 | -------------------------------------------------------------------------------- /src/ice_flg_rd.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_FLG_RD_H_ 5 | #define _ICE_FLG_RD_H_ 6 | 7 | struct ice_flg_rd_item { 8 | u16 idx; 9 | bool expose; 10 | u8 intr_flg_id; 11 | }; 12 | 13 | void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item); 14 | struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw); 15 | u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg); 16 | #endif /* _ICE_FLG_RD_H_ */ 17 | -------------------------------------------------------------------------------- /src/ice_ethtool_common.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_ETHTOOL_COMMON_H_ 5 | #define _ICE_ETHTOOL_COMMON_H_ 6 | 7 | void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data); 8 | int ice_get_sset_count(struct net_device *netdev, int sset); 9 | void ice_get_ethtool_stats(struct net_device *netdev, 10 | struct ethtool_stats __always_unused *stats, 11 | u64 *data); 12 | 13 | #endif /* _ICE_ETHTOOL_COMMON_H_ */ 14 | -------------------------------------------------------------------------------- /src/ice_virtchnl_allowlist.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VIRTCHNL_ALLOWLIST_H_ 5 | #define _ICE_VIRTCHNL_ALLOWLIST_H_ 6 | #include "ice.h" 7 | 8 | bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode); 9 | 10 | void ice_vc_set_default_allowlist(struct ice_vf *vf); 11 | void ice_vc_set_working_allowlist(struct ice_vf *vf); 12 | void ice_vc_set_hqos_allowlist(struct ice_vf *vf); 13 | void ice_vc_set_caps_allowlist(struct ice_vf *vf); 14 | #endif /* _ICE_VIRTCHNL_ALLOWLIST_H_ */ 15 | -------------------------------------------------------------------------------- /src/ice_fw_update.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_FW_UPDATE_H_ 5 | #define _ICE_FW_UPDATE_H_ 6 | 7 | int ice_flash_pldm_image(struct devlink *devlink, 8 | struct devlink_flash_update_params *params, 9 | struct netlink_ext_ack *extack); 10 | int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, 11 | struct netlink_ext_ack *extack); 12 | int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, 13 | u16 block_size, u8 *block, bool last_cmd, 14 | u8 *reset_level, struct netlink_ext_ack *extack); 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /src/ice_ptype_mk.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_PTYPE_MK_H_ 5 | #define _ICE_PTYPE_MK_H_ 6 | 7 | struct ice_ptype_mk_tcam_item { 8 | u16 address; 9 | u16 ptype; 10 | u8 key[10]; 11 | u8 key_inv[10]; 12 | }; 13 | 14 | void ice_ptype_mk_tcam_dump(struct ice_hw *hw, 15 | struct ice_ptype_mk_tcam_item *item); 16 | struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw); 17 | struct ice_ptype_mk_tcam_item * 18 | ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, 19 | u8 *pat, int len); 20 | #endif /* _ICE_PTYPE_MK_H_ */ 21 | -------------------------------------------------------------------------------- /src/ice_proto_grp.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_PROTO_GRP_H_ 5 | #define _ICE_PROTO_GRP_H_ 6 | 7 | #define ICE_PROTO_COUNT_PER_GRP 8 8 | #define ICE_PROTO_GRP_TABLE_SIZE 192 9 | 10 | struct ice_proto_off { 11 | bool polarity; /* true: positive, false: nagtive */ 12 | u8 proto_id; 13 | u16 offset; 14 | }; 15 | 16 | struct ice_proto_grp_item { 17 | u16 idx; 18 | struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP]; 19 | }; 20 | 21 | void ice_proto_grp_dump(struct ice_hw *hw, struct ice_proto_grp_item *item); 22 | struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw); 23 | #endif /* _ICE_PROTO_GRP_H_ */ 24 | -------------------------------------------------------------------------------- /src/ice_dcb_nl.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_DCB_NL_H_ 5 | #define _ICE_DCB_NL_H_ 6 | 7 | #ifdef CONFIG_DCB 8 | void ice_dcbnl_setup(struct ice_vsi *vsi); 9 | void ice_dcbnl_set_all(struct ice_vsi *vsi); 10 | void 11 | ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, 12 | struct ice_dcbx_cfg *new_cfg); 13 | #else 14 | static inline void ice_dcbnl_setup(struct ice_vsi *vsi) { } 15 | static inline void ice_dcbnl_set_all(struct ice_vsi *vsi) { } 16 | static inline void 17 | ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, 18 | struct ice_dcbx_cfg *new_cfg) { } 19 | #endif /* CONFIG_DCB */ 20 | #endif /* _ICE_DCB_NL_H_ */ 21 | -------------------------------------------------------------------------------- /src/ice_virtchnl_fsub.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VIRTCHNL_FSUB_H_ 5 | #define _ICE_VIRTCHNL_FSUB_H_ 6 | 7 | struct ice_vf; 8 | struct ice_pf; 9 | struct ice_vsi; 10 | 11 | #define ICE_IPV4_PROTO_NVGRE 0x002F 12 | #define ICE_FSUB_MAX_FLTRS 16384 13 | #define ICE_FSUB_PRI_BASE 6 14 | 15 | /* VF FSUB information structure */ 16 | struct ice_vf_fsub { 17 | struct idr fsub_rule_idr; 18 | struct list_head fsub_rule_list; 19 | }; 20 | 21 | void ice_vf_fsub_init(struct ice_vf *vf); 22 | int ice_vc_flow_sub_fltr(struct ice_vf *vf, u8 *msg); 23 | int ice_vc_flow_unsub_fltr(struct ice_vf *vf, u8 *msg); 24 | void ice_vf_fsub_exit(struct ice_vf *vf); 25 | #endif /* _ICE_VIRTCHNL_FSUB_H_ */ 26 | -------------------------------------------------------------------------------- /src/ice_vf_vsi_vlan_ops.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VF_VSI_VLAN_OPS_H_ 5 | #define _ICE_VF_VSI_VLAN_OPS_H_ 6 | 7 | #include "ice_vsi_vlan_ops.h" 8 | 9 | struct ice_vsi; 10 | 11 | void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi); 12 | void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi); 13 | int ice_vf_vsi_dcf_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 14 | int ice_vf_vsi_dcf_ena_outer_vlan_stripping(struct ice_vsi *vsi, u16 tpid); 15 | #ifdef CONFIG_PCI_IOV 16 | void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi); 17 | #else 18 | static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { } 19 | #endif /* CONFIG_PCI_IOV */ 20 | 21 | #endif /* _ICE_PF_VSI_VLAN_OPS_H_ */ 22 | -------------------------------------------------------------------------------- /src/ice_ieps.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | /* Intel(R) Ethernet Connection E800 Series Linux Driver IEPS extensions */ 5 | 6 | #ifndef _ICE_IEPS_H_ 7 | #define _ICE_IEPS_H_ 8 | 9 | #include "ieps_peer.h" 10 | #include "iidc.h" 11 | 12 | int ice_ieps_entry(struct iidc_core_dev_info *obj, void *arg); 13 | 14 | void ice_cdev_init_ieps_info(struct ice_hw *hw, 15 | enum iidc_ieps_nac_mode *nac_mode); 16 | void ice_ieps_handle_link_event(struct ice_hw *hw); 17 | enum ieps_peer_status ice_ieps_get_link_state_speed(struct ice_hw *hw, 18 | bool *link_up, 19 | u16 *link_speed); 20 | enum ieps_peer_status ice_ieps_exec_cpi(struct ice_hw *hw, 21 | struct ieps_peer_cpi_cmd_resp *cpi); 22 | void ice_ieps_init_lm_ops(struct ice_hw *hw, bool en_fw_ops); 23 | #endif /* _ICE_IEPS_H_ */ 24 | -------------------------------------------------------------------------------- /src/ice_tmatch.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_TMATCH_H_ 5 | #define _ICE_TMATCH_H_ 6 | 7 | static inline 8 | bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat) 9 | { 10 | u8 k1, k2, v; 11 | int i; 12 | 13 | for (i = 0; i < 8; i++) { 14 | k1 = (u8)(key & (1 << i)); 15 | k2 = (u8)(key_inv & (1 << i)); 16 | v = (u8)(pat & (1 << i)); 17 | 18 | if (k1 != 0 && k2 != 0) 19 | continue; 20 | if (k1 == 0 && k2 == 0) 21 | return false; 22 | 23 | if (k1 == v) 24 | return false; 25 | } 26 | 27 | return true; 28 | } 29 | 30 | static inline 31 | bool ice_ternary_match(const u8 *key, const u8 *key_inv, 32 | const u8 *pat, int len) 33 | { 34 | int i; 35 | 36 | for (i = 0; i < len; i++) 37 | if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i])) 38 | return false; 39 | 40 | return true; 41 | } 42 | 43 | #endif /* _ICE_TMATCH_H_ */ 44 | -------------------------------------------------------------------------------- /src/ice_metainit.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_METAINIT_H_ 5 | #define _ICE_METAINIT_H_ 6 | 7 | struct ice_metainit_item { 8 | u16 idx; 9 | 10 | u8 tsr; 11 | u16 ho; 12 | u16 pc; 13 | u16 pg_rn; 14 | u8 cd; 15 | 16 | bool gpr_a_ctrl; 17 | u8 gpr_a_data_mdid; 18 | u8 gpr_a_data_start; 19 | u8 gpr_a_data_len; 20 | u8 gpr_a_id; 21 | 22 | bool gpr_b_ctrl; 23 | u8 gpr_b_data_mdid; 24 | u8 gpr_b_data_start; 25 | u8 gpr_b_data_len; 26 | u8 gpr_b_id; 27 | 28 | bool gpr_c_ctrl; 29 | u8 gpr_c_data_mdid; 30 | u8 gpr_c_data_start; 31 | u8 gpr_c_data_len; 32 | u8 gpr_c_id; 33 | 34 | bool gpr_d_ctrl; 35 | u8 gpr_d_data_mdid; 36 | u8 gpr_d_data_start; 37 | u8 gpr_d_data_len; 38 | u8 gpr_d_id; 39 | 40 | u64 flags; 41 | }; 42 | 43 | void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item); 44 | struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw); 45 | #endif /*_ICE_METAINIT_H_ */ 46 | -------------------------------------------------------------------------------- /src/ice_xlt_kb.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_XLT_KB_H_ 5 | #define _ICE_XLT_KB_H_ 6 | 7 | #define ICE_XLT_KB_TBL_CNT 8 8 | #define ICE_XLT_KB_FLAG0_14_CNT 15 9 | 10 | struct ice_xlt_kb_entry { 11 | u8 xlt1_ad_sel; 12 | u8 xlt2_ad_sel; 13 | u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT]; 14 | u8 xlt1_md_sel; 15 | u8 xlt2_md_sel; 16 | }; 17 | 18 | struct ice_xlt_kb { 19 | u8 xlt1_pm; 20 | u8 xlt2_pm; 21 | u8 prof_id_pm; 22 | u64 flag15; 23 | 24 | struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT]; 25 | }; 26 | 27 | void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb); 28 | struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw); 29 | struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw); 30 | struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw); 31 | struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw); 32 | u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag); 33 | #endif /* _ICE_XLT_KB_H */ 34 | -------------------------------------------------------------------------------- /src/ice_siov.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_SIOV_H_ 5 | #define _ICE_SIOV_H_ 6 | 7 | #define ICE_DFLT_QS_PER_SIOV_VF 4 8 | 9 | #if IS_ENABLED(CONFIG_VFIO_MDEV) && defined(HAVE_PASID_SUPPORT) && defined(HAVE_IOMMU_DEV_FEAT_AUX) 10 | bool ice_is_siov_capable(struct ice_pf *pf); 11 | void ice_initialize_siov_res(struct ice_pf *pf); 12 | void ice_deinit_siov_res(struct ice_pf *pf); 13 | void ice_restore_pasid_config(struct ice_pf *pf, enum ice_reset_req reset_type); 14 | #else 15 | static inline bool ice_is_siov_capable(struct ice_pf *pf) 16 | { 17 | return false; 18 | } 19 | static inline void ice_initialize_siov_res(struct ice_pf *pf) { } 20 | static inline void ice_deinit_siov_res(struct ice_pf *pf) { } 21 | static inline void ice_restore_pasid_config(struct ice_pf *pf, 22 | enum ice_reset_req reset_type) { } 23 | #endif /* CONFIG_VFIO_MDEV && HAVE_PASID_SUPPORT && HAVE_IOMMU_DEV_FEAT_AUX */ 24 | 25 | #endif /* _ICE_SIOV_H_ */ 26 | -------------------------------------------------------------------------------- /src/ice_bst_tcam.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_BST_TCAM_H_ 5 | #define _ICE_BST_TCAM_H_ 6 | 7 | #include "ice_imem.h" 8 | 9 | struct ice_bst_tcam_item { 10 | u16 address; 11 | u8 key[20]; 12 | u8 key_inv[20]; 13 | u8 hit_idx_grp; 14 | u8 pg_pri; 15 | struct ice_np_keybuilder np_kb; 16 | struct ice_pg_keybuilder pg_kb; 17 | struct ice_alu alu0; 18 | struct ice_alu alu1; 19 | struct ice_alu alu2; 20 | }; 21 | 22 | void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item); 23 | 24 | struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw); 25 | 26 | struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw); 27 | 28 | struct ice_bst_tcam_item * 29 | ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat); 30 | struct ice_bst_tcam_item * 31 | ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, 32 | struct ice_lbl_item *lbl_table, 33 | const char *prefix, u16 *start); 34 | #endif /*_ICE_BST_TCAM_H_ */ 35 | -------------------------------------------------------------------------------- /src/kcompat_defs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _KCOMPAT_DEFS_H_ 5 | #define _KCOMPAT_DEFS_H_ 6 | 7 | #ifndef LINUX_VERSION_CODE 8 | #include 9 | #else 10 | #ifndef KERNEL_VERSION 11 | #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) 12 | #endif 13 | #endif /* LINUX_VERSION_CODE */ 14 | 15 | #ifndef UTS_RELEASE 16 | #include 17 | #endif 18 | 19 | /* 20 | * Include the definitions file for HAVE/NEED flags for the standard upstream 21 | * kernels. 22 | * 23 | * Then, based on the distribution we detect, load the distribution specific 24 | * definitions file that customizes the definitions for the target 25 | * distribution. 26 | */ 27 | #include "kcompat_std_defs.h" 28 | 29 | #ifdef CONFIG_SUSE_KERNEL 30 | #include "kcompat_sles_defs.h" 31 | #elif UBUNTU_VERSION_CODE 32 | #include "kcompat_ubuntu_defs.h" 33 | #elif RHEL_RELEASE_CODE 34 | #include "kcompat_rhel_defs.h" 35 | #endif 36 | #endif /* _KCOMPAT_DEFS_H_ */ 37 | -------------------------------------------------------------------------------- /src/ice_aux_support.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice.h" 5 | #include "ice_lib.h" 6 | #include "ice_aux_support.h" 7 | #include "ice_idc_int.h" 8 | 9 | int ice_init_aux(struct ice_pf *pf) 10 | { 11 | struct device *dev = ice_pf_to_dev(pf); 12 | int err; 13 | 14 | if (!ice_is_aux_ena(pf)) { 15 | dev_warn(dev, "Aux drivers are not supported on this device\n"); 16 | return 0; 17 | } 18 | 19 | pf->cdev_infos = devm_kcalloc(dev, IIDC_MAX_NUM_AUX, 20 | sizeof(*pf->cdev_infos), GFP_KERNEL); 21 | if (!pf->cdev_infos) 22 | return -ENOMEM; 23 | 24 | err = ice_init_aux_devices(pf); 25 | if (err) { 26 | dev_err(dev, "Failed to initialize aux devs: %d\n", 27 | err); 28 | return -EIO; 29 | } 30 | 31 | return err; 32 | } 33 | 34 | void ice_deinit_aux(struct ice_pf *pf) 35 | { 36 | struct device *dev = ice_pf_to_dev(pf); 37 | 38 | ice_for_each_aux(pf, NULL, ice_unroll_cdev_info); 39 | devm_kfree(dev, pf->cdev_infos); 40 | pf->cdev_infos = NULL; 41 | } 42 | -------------------------------------------------------------------------------- /src/ice_parser_util.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_PARSER_UTIL_H_ 5 | #define _ICE_PARSER_UTIL_H_ 6 | 7 | #include "ice_imem.h" 8 | #include "ice_metainit.h" 9 | 10 | struct ice_lbl_item { 11 | u16 idx; 12 | char label[64]; 13 | }; 14 | 15 | struct ice_pkg_sect_hdr { 16 | __le16 count; 17 | __le16 offset; 18 | }; 19 | 20 | void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item); 21 | void ice_parse_item_dflt(struct ice_hw *hw, u16 idx, void *item, 22 | void *data, int size); 23 | 24 | void *ice_parser_sect_item_get(u32 sect_type, void *section, 25 | u32 index, u32 *offset); 26 | 27 | void *ice_parser_create_table(struct ice_hw *hw, u32 sect_type, 28 | u32 item_size, u32 length, 29 | void *(*handler)(u32 sect_type, void *section, 30 | u32 index, u32 *offset), 31 | void (*parse_item)(struct ice_hw *hw, u16 idx, 32 | void *item, void *data, 33 | int size), 34 | bool no_offset); 35 | #endif /* _ICE_PARSER_UTIL_H_ */ 36 | -------------------------------------------------------------------------------- /src/ice_idc_int.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_IDC_INT_H_ 5 | #define _ICE_IDC_INT_H_ 6 | 7 | #include "iidc.h" 8 | 9 | #define ICE_MAX_NUM_AUX 4 10 | 11 | struct ice_pf; 12 | void ice_send_event_to_auxs(struct ice_pf *pf, struct iidc_event *event); 13 | struct iidc_auxiliary_drv 14 | *ice_get_auxiliary_drv(struct iidc_core_dev_info *cdev_info); 15 | void ice_send_event_to_aux_no_lock(struct iidc_core_dev_info *cdev, void *data); 16 | 17 | void ice_cdev_info_update_vsi(struct iidc_core_dev_info *cdev_info, 18 | struct ice_vsi *vsi); 19 | int ice_unroll_cdev_info(struct iidc_core_dev_info *cdev_info, void *data); 20 | struct iidc_core_dev_info 21 | *ice_find_cdev_info_by_id(struct ice_pf *pf, int cdev_info_id); 22 | 23 | #define ICE_FIND_CDEV_INFO(pf, cdev_info_id) \ 24 | ice_find_cdev_info_by_id((pf), (cdev_info_id)) 25 | 26 | void ice_send_vf_reset_to_aux(struct iidc_core_dev_info *cdev_info, u16 vf_id); 27 | bool ice_is_rdma_aux_loaded(struct ice_pf *pf); 28 | 29 | #endif /* !_ICE_IDC_INT_H_ */ 30 | -------------------------------------------------------------------------------- /src/ice_irq.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_IRQ_H_ 5 | #define _ICE_IRQ_H_ 6 | 7 | #ifdef HAVE_PCI_MSIX_ALLOC_IRQ_AT 8 | struct ice_irq_entry { 9 | unsigned int index; 10 | bool dynamic; /* allocation type flag */ 11 | }; 12 | 13 | struct ice_irq_tracker { 14 | struct xarray entries; 15 | u16 num_entries; /* total vectors available */ 16 | u16 num_static; /* preallocated entries */ 17 | }; 18 | #endif /* HAVE_PCI_MSIX_ALLOC_IRQ_AT */ 19 | 20 | int ice_init_interrupt_scheme(struct ice_pf *pf); 21 | void ice_clear_interrupt_scheme(struct ice_pf *pf); 22 | 23 | int ice_get_irq_num(struct ice_pf *pf, int idx); 24 | 25 | struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only); 26 | void ice_free_irq(struct ice_pf *pf, struct msi_map map); 27 | int ice_get_max_used_msix_vector(struct ice_pf *pf); 28 | 29 | struct msix_entry * 30 | ice_alloc_aux_vectors(struct ice_pf *pf, int count); 31 | void 32 | ice_free_aux_vectors(struct ice_pf *pf, struct msix_entry *msix_entries, 33 | int count); 34 | #endif 35 | -------------------------------------------------------------------------------- /src/ice_migration_private.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_MIGRATION_PRIVATE_H_ 5 | #define _ICE_MIGRATION_PRIVATE_H_ 6 | 7 | /* This header file is for exposing functions in ice_migration.c to 8 | * files which will be compiled in ice.ko. 9 | * Functions which may be used by other files which will be compiled 10 | * in ice-vfio-pic.ko should be exposed as part of ice_migration.h. 11 | */ 12 | 13 | #if IS_ENABLED(CONFIG_VFIO_PCI_CORE) && defined(HAVE_LMV1_SUPPORT) 14 | void ice_migration_save_vf_msg(struct ice_vf *vf, 15 | struct ice_rq_event_info *event); 16 | void ice_migration_fix_msg_vsi(struct ice_vf *vf, u32 v_opcode, u8 *msg); 17 | u32 ice_migration_supported_caps(void); 18 | #else 19 | static inline void 20 | ice_migration_save_vf_msg(struct ice_vf *vf, 21 | struct ice_rq_event_info *event) { } 22 | static inline u32 23 | ice_migration_supported_caps(void) 24 | { 25 | return 0xFFFFFFFF; 26 | } 27 | #endif /* CONFIG_VFIO_PCI_CORE && HAVE_LMV1_SUPPORT */ 28 | 29 | #endif /* _ICE_MIGRATION_PRIVATE_H_ */ 30 | -------------------------------------------------------------------------------- /src/ice_vsi_vlan_ops.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VSI_VLAN_OPS_H_ 5 | #define _ICE_VSI_VLAN_OPS_H_ 6 | 7 | #include "ice_type.h" 8 | #include "ice_vsi_vlan_lib.h" 9 | 10 | struct ice_vsi; 11 | 12 | struct ice_vsi_vlan_ops { 13 | int (*add_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan); 14 | int (*del_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan); 15 | int (*ena_stripping)(struct ice_vsi *vsi, const u16 tpid); 16 | int (*dis_stripping)(struct ice_vsi *vsi); 17 | int (*ena_insertion)(struct ice_vsi *vsi, const u16 tpid); 18 | int (*dis_insertion)(struct ice_vsi *vsi); 19 | int (*ena_rx_filtering)(struct ice_vsi *vsi); 20 | int (*dis_rx_filtering)(struct ice_vsi *vsi); 21 | int (*ena_tx_filtering)(struct ice_vsi *vsi); 22 | int (*dis_tx_filtering)(struct ice_vsi *vsi); 23 | int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan); 24 | }; 25 | 26 | void ice_vsi_init_vlan_ops(struct ice_vsi *vsi); 27 | struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi); 28 | 29 | #endif /* _ICE_VSI_VLAN_OPS_H_ */ 30 | 31 | -------------------------------------------------------------------------------- /src/ice_vf_adq.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VF_ADQ_H_ 5 | #define _ICE_VF_ADQ_H_ 6 | 7 | struct ice_vsi *ice_get_vf_adq_vsi(struct ice_vf *vf, u8 tc); 8 | bool ice_is_vf_adq_ena(struct ice_vf *vf); 9 | bool ice_vf_adq_vsi_valid(struct ice_vf *vf, u8 tc); 10 | void ice_del_all_adv_switch_fltr(struct ice_vf *vf); 11 | void ice_vf_adq_release(struct ice_vf *vf); 12 | void ice_vf_rebuild_adq_host_cfg(struct ice_vf *vf); 13 | int ice_vf_recreate_adq_vsi(struct ice_vf *vf); 14 | int ice_vf_rebuild_adq_vsi(struct ice_vf *vf); 15 | u16 ice_vf_get_tc_based_qid(u16 qid, u16 offset); 16 | void ice_vf_q_id_get_vsi_q_id(struct ice_vf *vf, u16 vf_q_id, u16 *t_tc, 17 | struct virtchnl_queue_select *vqs, 18 | struct ice_vsi **vsi_p, u16 *q_id); 19 | int ice_vc_del_switch_filter(struct ice_vf *vf, u8 *msg); 20 | int ice_vc_add_switch_filter(struct ice_vf *vf, u8 *msg); 21 | int ice_vc_add_qch_msg(struct ice_vf *vf, u8 *msg); 22 | int ice_vc_del_qch_msg(struct ice_vf *vf, u8 *msg); 23 | u64 ice_vf_adq_total_max_tx_rate(struct ice_vf *vf); 24 | 25 | #endif /* _ICE_VF_ADQ_H_ */ 26 | -------------------------------------------------------------------------------- /src/kcompat_ubuntu_defs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _KCOMPAT_UBUNTU_DEFS_H_ 5 | #define _KCOMPAT_UBUNTU_DEFS_H_ 6 | 7 | /* This file contains the definitions for the Ubuntu specific distribution of 8 | * the Linux kernel. 9 | * 10 | * It checks the UBUNTU_VERSION_CODE to decide which features are available in 11 | * the target kernel. It assumes that kcompat_std_defs.h has already been 12 | * processed, and will #define or #undef the relevant flags based on what 13 | * features were backported by Ubuntu. 14 | */ 15 | 16 | #if !UTS_UBUNTU_RELEASE_ABI 17 | #error "UTS_UBUNTU_RELEASE_ABI is 0 or undefined" 18 | #endif 19 | 20 | #if !UBUNTU_VERSION_CODE 21 | #error "UBUNTU_VERSION_CODE is 0 or undefined" 22 | #endif 23 | 24 | #ifndef UBUNTU_VERSION 25 | #error "UBUNTU_VERSION is undefined" 26 | #endif 27 | 28 | /*****************************************************************************/ 29 | #if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,15,0,159) && \ 30 | UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,15,0,999)) 31 | #endif 32 | 33 | /*****************************************************************************/ 34 | #endif /* _KCOMPAT_UBUNTU_DEFS_H_ */ 35 | -------------------------------------------------------------------------------- /src/ice_parser_rt.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_PARSER_RT_H_ 5 | #define _ICE_PARSER_RT_H_ 6 | 7 | struct ice_parser_ctx; 8 | 9 | #define ICE_PARSER_MAX_PKT_LEN 504 10 | #define ICE_PARSER_GPR_NUM 128 11 | 12 | struct ice_gpr_pu { 13 | bool gpr_val_upd[128]; /* flag to indicate if GRP needs to be updated */ 14 | u16 gpr_val[128]; 15 | u64 flg_msk; 16 | u64 flg_val; 17 | u16 err_msk; 18 | u16 err_val; 19 | }; 20 | 21 | struct ice_parser_rt { 22 | struct ice_parser *psr; 23 | u16 gpr[ICE_PARSER_GPR_NUM]; 24 | u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + 32]; 25 | u16 pkt_len; 26 | u16 po; 27 | u8 bst_key[20]; 28 | struct ice_pg_cam_key pg_key; 29 | struct ice_alu *alu0; 30 | struct ice_alu *alu1; 31 | struct ice_alu *alu2; 32 | struct ice_pg_cam_action *action; 33 | u8 pg; 34 | struct ice_gpr_pu pu; 35 | u8 markers[9]; /* 8 * 9 = 72 bits*/ 36 | bool protocols[256]; 37 | u16 offsets[256]; 38 | }; 39 | 40 | void ice_parser_rt_reset(struct ice_parser_rt *rt); 41 | void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, 42 | int pkt_len); 43 | 44 | struct ice_parser_result; 45 | int ice_parser_rt_execute(struct ice_parser_rt *rt, 46 | struct ice_parser_result *rslt); 47 | #endif /* _ICE_PARSER_RT_H_ */ 48 | -------------------------------------------------------------------------------- /src/ice_vsi_vlan_lib.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VSI_VLAN_LIB_H_ 5 | #define _ICE_VSI_VLAN_LIB_H_ 6 | 7 | #include 8 | #include "ice_vlan.h" 9 | 10 | struct ice_vsi; 11 | 12 | int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 13 | int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 14 | 15 | int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, u16 tpid); 16 | int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi); 17 | int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid); 18 | int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi); 19 | int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 20 | 21 | int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi); 22 | int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi); 23 | int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi); 24 | int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi); 25 | 26 | int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid); 27 | int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi); 28 | int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid); 29 | int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi); 30 | int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 31 | 32 | int ice_vsi_clear_port_vlan(struct ice_vsi *vsi); 33 | #endif /* _ICE_VSI_VLAN_LIB_H_ */ 34 | -------------------------------------------------------------------------------- /src/ice_migration.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_MIGRATION_H_ 5 | #define _ICE_MIGRATION_H_ 6 | 7 | #include "kcompat.h" 8 | #include 9 | 10 | #define IAVF_QRX_TAIL_MAX 256 11 | #if IS_ENABLED(CONFIG_VFIO_PCI_CORE) && defined(HAVE_LMV1_SUPPORT) 12 | void *ice_migration_get_vf(struct pci_dev *vf_pdev); 13 | void ice_migration_init_vf(void *opaque); 14 | void ice_migration_uninit_vf(void *opaque); 15 | int ice_migration_suspend_vf(void *opaque); 16 | int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz); 17 | int ice_migration_restore_devstate(void *opaque, const u8 *buf, u64 buf_sz, 18 | struct kvm *kvm); 19 | #else 20 | static inline void *ice_migration_get_vf(struct pci_dev *vf_pdev) 21 | { 22 | return NULL; 23 | } 24 | 25 | static inline void ice_migration_init_vf(void *opaque) { } 26 | static inline void ice_migration_uninit_vf(void *opaque) { } 27 | 28 | static inline int ice_migration_suspend_vf(void *opaque) 29 | { 30 | return 0; 31 | } 32 | 33 | static inline int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz) 34 | { 35 | return 0; 36 | } 37 | 38 | static inline int ice_migration_restore_devstate(void *opaque, const u8 *buf, 39 | u64 buf_sz, struct kvm *kvm) 40 | { 41 | return 0; 42 | } 43 | #endif /* CONFIG_VFIO_PCI_CORE && HAVE_LMV1_SUPPORT */ 44 | 45 | #endif /* _ICE_MIGRATION_H_ */ 46 | -------------------------------------------------------------------------------- /scripts/set_xps_rxqs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | # Copyright (C) 2019 - 2023 Intel Corporation 4 | # 5 | # Script to setup mechanism for Tx queue selection based on Rx queue(s) map. 6 | # This is done by configuring Rx queue(s) map per Tx queue via sysfs. This 7 | # Rx queue(s) map is used during selection of Tx queue in 8 | # data path (net/core/dev.c:get_xps_queue). 9 | # 10 | # typical usage is (as root): 11 | # set_xps_rxqs 12 | # 13 | # to get help: 14 | # set_xps_rxqs 15 | 16 | iface=$1 17 | 18 | if [ -z "$iface" ]; then 19 | echo "Usage: $0 " 20 | exit 1 21 | fi 22 | 23 | CHECK () { 24 | "$@" 25 | if [ $? -ne 0 ]; then 26 | echo "Error in command ${1}, execution aborted, but some changes may have already been made!" >&2 27 | exit 1 28 | fi 29 | } 30 | 31 | CPUMASK () { 32 | cpu=$1 33 | if [ $cpu -ge 32 ]; then 34 | mask_fill="" 35 | mask_zero="00000000" 36 | let "pow = $cpu / 32" 37 | for ((i=1; i<=pow; i++)); do 38 | mask_fill="${mask_fill},${mask_zero}" 39 | done 40 | 41 | let "cpu -= 32 * $pow" 42 | mask_tmp=$((1 << cpu)) 43 | mask=$(printf "%X%s" $mask_tmp $mask_fill) 44 | else 45 | mask_tmp=$((1 << cpu)) 46 | mask=$(printf "%X" $mask_tmp) 47 | fi 48 | echo $mask 49 | } 50 | 51 | for i in /sys/class/net/$iface/queues/tx-*/xps_rxqs; do 52 | j=$(echo $i | cut -d'/' -f7 | cut -d'-' -f2) 53 | mask=$(CPUMASK $j) 54 | echo ${mask} > $i 55 | CHECK echo ${mask} > $i 56 | done 57 | -------------------------------------------------------------------------------- /src/ice_adapter.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_ADAPTER_H_ 5 | #define _ICE_ADAPTER_H_ 6 | 7 | #include 8 | #include 9 | #include "kcompat.h" 10 | 11 | struct pci_dev; 12 | struct ice_pf; 13 | 14 | /** 15 | * struct ice_port_list - data used to store the list of adapter ports 16 | * 17 | * This structure contains data used to maintain a list of adapter ports 18 | * 19 | * @ports: list of ports 20 | * @lock: protect access to the ports list 21 | */ 22 | struct ice_port_list { 23 | struct list_head ports; 24 | /* To synchronize the ports list operations */ 25 | struct mutex lock; 26 | }; 27 | 28 | /** 29 | * struct ice_adapter - PCI adapter resources shared across PFs 30 | * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME 31 | * register of the PTP clock. 32 | * @refcount: Reference count. struct ice_pf objects hold the references. 33 | * @ctrl_pf: Control PF of the adapter 34 | * @ports: Ports list 35 | */ 36 | struct ice_adapter { 37 | refcount_t refcount; 38 | /* For access to the GLTSYN_TIME register */ 39 | spinlock_t ptp_gltsyn_time_lock; 40 | 41 | struct ice_pf *ctrl_pf; 42 | struct ice_port_list ports; 43 | }; 44 | 45 | struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev); 46 | void ice_adapter_put(const struct pci_dev *pdev); 47 | 48 | #endif /* _ICE_ADAPTER_H */ 49 | -------------------------------------------------------------------------------- /src/ice_pf_vsi_vlan_ops.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice_vsi_vlan_ops.h" 5 | #include "ice_vsi_vlan_lib.h" 6 | #include "ice_vlan_mode.h" 7 | #include "ice.h" 8 | #include "ice_pf_vsi_vlan_ops.h" 9 | 10 | void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi) 11 | { 12 | struct ice_vsi_vlan_ops *vlan_ops; 13 | 14 | if (ice_is_dvm_ena(&vsi->back->hw)) { 15 | vlan_ops = &vsi->outer_vlan_ops; 16 | 17 | vlan_ops->add_vlan = ice_vsi_add_vlan; 18 | vlan_ops->del_vlan = ice_vsi_del_vlan; 19 | vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping; 20 | vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping; 21 | vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion; 22 | vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion; 23 | vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; 24 | vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; 25 | } else { 26 | vlan_ops = &vsi->inner_vlan_ops; 27 | 28 | vlan_ops->add_vlan = ice_vsi_add_vlan; 29 | vlan_ops->del_vlan = ice_vsi_del_vlan; 30 | vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; 31 | vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; 32 | vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; 33 | vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; 34 | vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; 35 | vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; 36 | } 37 | } 38 | 39 | -------------------------------------------------------------------------------- /src/ice_base.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_BASE_H_ 5 | #define _ICE_BASE_H_ 6 | 7 | #include "ice.h" 8 | 9 | int ice_vsi_cfg_rxq(struct ice_rx_ring *ring); 10 | int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg); 11 | int 12 | ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait); 13 | int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); 14 | int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi, u8 tc); 15 | void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); 16 | void ice_vsi_free_q_vectors(struct ice_vsi *vsi); 17 | int 18 | ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, 19 | struct ice_tx_ring *tstamp_ring, 20 | struct ice_aqc_add_tx_qgrp *qg_buf, 21 | struct ice_aqc_set_txtime_qgrp *txtime_qg_buf); 22 | u16 ice_calc_ts_ring_count(struct ice_hw *hw, u16 tx_desc_count); 23 | void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); 24 | void 25 | ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); 26 | void 27 | ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); 28 | void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector); 29 | int 30 | ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 31 | u16 rel_vmvf_num, struct ice_tx_ring *ring, 32 | struct ice_txq_meta *txq_meta); 33 | void 34 | ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, 35 | struct ice_txq_meta *txq_meta); 36 | #endif /* _ICE_BASE_H_ */ 37 | -------------------------------------------------------------------------------- /src/ice_repr.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_REPR_H_ 5 | #define _ICE_REPR_H_ 6 | 7 | struct ice_repr_pcpu_stats { 8 | struct u64_stats_sync syncp; 9 | u64 rx_packets; 10 | u64 rx_bytes; 11 | u64 tx_packets; 12 | u64 tx_bytes; 13 | u64 tx_drops; 14 | }; 15 | 16 | struct ice_repr { 17 | struct ice_vsi *src_vsi; 18 | struct ice_vf *vf; 19 | struct net_device *netdev; 20 | struct metadata_dst *dst; 21 | struct ice_repr_pcpu_stats __percpu *stats; 22 | u32 id; 23 | u8 parent_mac[ETH_ALEN]; 24 | }; 25 | 26 | struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); 27 | void ice_repr_rem_vf(struct ice_repr *repr); 28 | void ice_repr_start_tx_queues(struct ice_repr *repr); 29 | void ice_repr_stop_tx_queues(struct ice_repr *repr); 30 | void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, int ret); 31 | void ice_repr_inc_rx_stats(const struct net_device *netdev, unsigned int len); 32 | #if IS_ENABLED(CONFIG_NET_DEVLINK) 33 | void ice_repr_set_link(struct radix_tree_root *reprs, u32 repr_id, bool link); 34 | struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev); 35 | bool ice_is_port_repr_netdev(const struct net_device *netdev); 36 | #else 37 | static inline struct ice_repr * 38 | ice_netdev_to_repr(const struct net_device *netdev) { return NULL; } 39 | static inline 40 | bool ice_is_port_repr_netdev(const struct net_device *netdev) { return false; } 41 | static inline void 42 | ice_repr_set_link(struct radix_tree_root *reprs, u32 repr_id, bool link) { } 43 | #endif /* CONFIG_NET_DEVLINK */ 44 | #endif 45 | -------------------------------------------------------------------------------- /scripts/adqsetup/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2022 - 2023 Intel Corporation 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, 8 | this list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | 14 | 3. Neither the name of the Intel Corporation nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 | POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /src/ice_mk_grp.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice_common.h" 5 | #include "ice_parser_util.h" 6 | 7 | #define ICE_MK_GRP_TABLE_SIZE 128 8 | #define ICE_MK_COUNT_PER_GRP 8 9 | 10 | /** 11 | * ice_mk_grp_dump - dump an marker group item info 12 | * @hw: pointer to the hardware structure 13 | * @item: marker group item to dump 14 | */ 15 | void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item) 16 | { 17 | int i; 18 | 19 | dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); 20 | dev_info(ice_hw_to_dev(hw), "markers: "); 21 | for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) 22 | dev_info(ice_hw_to_dev(hw), "%d ", item->markers[i]); 23 | dev_info(ice_hw_to_dev(hw), "\n"); 24 | } 25 | 26 | static void _mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, 27 | void *data, int size) 28 | { 29 | struct ice_mk_grp_item *grp = item; 30 | u8 *buf = data; 31 | int i; 32 | 33 | grp->idx = idx; 34 | 35 | for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) 36 | grp->markers[i] = buf[i]; 37 | 38 | if (hw->debug_mask & ICE_DBG_PARSER) 39 | ice_mk_grp_dump(hw, grp); 40 | } 41 | 42 | /** 43 | * ice_mk_grp_table_get - create a marker group table 44 | * @hw: pointer to the hardware structure 45 | */ 46 | struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw) 47 | { 48 | return (struct ice_mk_grp_item *) 49 | ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP, 50 | sizeof(struct ice_mk_grp_item), 51 | ICE_MK_GRP_TABLE_SIZE, 52 | ice_parser_sect_item_get, 53 | _mk_grp_parse_item, false); 54 | } 55 | -------------------------------------------------------------------------------- /src/devlink/health.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _HEALTH_H_ 5 | #define _HEALTH_H_ 6 | 7 | #include "kcompat.h" 8 | #include 9 | 10 | struct ice_pf; 11 | struct ice_tx_ring; 12 | 13 | enum ice_mdd_src { 14 | ICE_MDD_SRC_TX_PQM, 15 | ICE_MDD_SRC_TX_TCLAN, 16 | ICE_MDD_SRC_TX_TDPU, 17 | ICE_MDD_SRC_RX, 18 | }; 19 | 20 | #ifdef HAVE_DEVLINK_HEALTH 21 | 22 | /** 23 | * struct ice_health couples all ice devlink health reporters and accompanied 24 | * data 25 | */ 26 | struct ice_health { 27 | struct devlink_health_reporter *mdd; 28 | struct devlink_health_reporter *tx_hang; 29 | }; 30 | 31 | void ice_health_init(struct ice_pf *pf); 32 | void ice_health_deinit(struct ice_pf *pf); 33 | void ice_health_clear(struct ice_pf *pf); 34 | 35 | void ice_devlink_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, 36 | u8 pf_num, u16 vf_num, u8 event, u16 queue); 37 | void ice_report_tx_hang(struct ice_pf *pf, struct ice_tx_ring *tx_ring, 38 | u16 vsi_num, u32 head, u32 intr); 39 | 40 | #else /* HAVE_DEVLINK_HEALTH */ 41 | 42 | static inline void ice_health_init(struct ice_pf *pf) {} 43 | static inline void ice_health_deinit(struct ice_pf *pf) {} 44 | static inline void ice_health_clear(struct ice_pf *pf) {} 45 | 46 | static inline 47 | void ice_devlink_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, 48 | u8 pf_num, u16 vf_num, u8 event, u16 queue) {} 49 | static inline 50 | void ice_report_tx_hang(struct ice_pf *pf, struct ice_tx_ring *tx_ring, 51 | u16 vsi_num, u32 head, u32 intr) {} 52 | 53 | #endif /* HAVE_DEVLINK_HEALTH */ 54 | #endif /* _HEALTH_H_ */ 55 | -------------------------------------------------------------------------------- /src/ice_gnss.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_GNSS_H_ 5 | #define _ICE_GNSS_H_ 6 | 7 | #define ICE_E810T_GNSS_I2C_BUS 0x2 8 | #define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */ 9 | #define ICE_GNSS_TTY_WRITE_BUF 250 10 | #define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M) 11 | #define ICE_MAX_I2C_WRITE_BYTES 4 12 | 13 | /* u-blox ZED-F9T specific definitions */ 14 | #define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42 15 | /* Data length register is big endian */ 16 | #define ICE_GNSS_UBX_DATA_LEN_H 0xFD 17 | #define ICE_GNSS_UBX_DATA_LEN_WIDTH 2 18 | #define ICE_GNSS_UBX_EMPTY_DATA 0xFF 19 | /* For u-blox writes are performed without address so the first byte to write is 20 | * passed as I2C addr parameter. 21 | */ 22 | #define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1) 23 | #define ICE_MAX_UBX_READ_TRIES 255 24 | #define ICE_MAX_UBX_ACK_READ_TRIES 4095 25 | 26 | /** 27 | * struct gnss_serial - data used to initialize GNSS TTY port 28 | * @back: back pointer to PF 29 | * @kworker: kwork thread for handling periodic work 30 | * @read_work: read_work function for handling GNSS reads 31 | * @gnss_module_owner: flag informing whether current driver is responsible 32 | * for module deinitialization 33 | */ 34 | struct gnss_serial { 35 | struct ice_pf *back; 36 | struct kthread_worker *kworker; 37 | struct kthread_delayed_work read_work; 38 | #if !defined(HAVE_GNSS_MODULE) || !IS_ENABLED(CONFIG_GNSS) 39 | bool gnss_module_owner; 40 | #endif /* !HAVE_GNSS_MODULE || !IS_ENABLED(CONFIG_GNSS) */ 41 | }; 42 | 43 | void ice_gnss_init(struct ice_pf *pf); 44 | void ice_gnss_exit(struct ice_pf *pf); 45 | bool ice_gnss_is_module_present(struct ice_hw *hw); 46 | #endif /* _ICE_GNSS_H_ */ 47 | -------------------------------------------------------------------------------- /src/siov_regs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | /* 5 | * Copyright (C) 2023 Intel Corporation 6 | * 7 | * For licensing information, see the file 'LICENSE' in the root folder 8 | */ 9 | 10 | #ifndef _SIOV_REGS_H_ 11 | #define _SIOV_REGS_H_ 12 | #define VDEV_MBX_START 0x20000 /* Begin at 128KB */ 13 | #define VDEV_GET_RSTAT 0x21000 /* 132KB for RSTAT */ 14 | 15 | /* Begin at offset after 1MB (after 256 4k pages) */ 16 | #define VDEV_QRX_TAIL_START 0x100000 17 | #define VDEV_QRX_TAIL(_i) (VDEV_QRX_TAIL_START + ((_i) * 0x1000)) /* 2k Rx queues */ 18 | 19 | #define VDEV_QRX_BUFQ_TAIL_START 0x900000 /* Begin at offset of 9MB for Rx buffer queue tail register pages */ 20 | #define VDEV_QRX_BUFQ_TAIL(_i) (VDEV_QRX_BUFQ_TAIL_START + ((_i) * 0x1000)) /* 2k Rx buffer queues */ 21 | 22 | #define VDEV_QTX_TAIL_START 0x1100000 /* Begin at offset of 17MB for 2k Tx queues */ 23 | #define VDEV_QTX_TAIL(_i) (VDEV_QTX_TAIL_START + ((_i) * 0x1000)) /* 2k Tx queues */ 24 | 25 | #define VDEV_QTX_COMPL_TAIL_START 0x1900000 /* Begin at offset of 25MB for 2k Tx completion queues */ 26 | #define VDEV_QTX_COMPL_TAIL(_i) (VDEV_QTX_COMPL_TAIL_START + ((_i) * 0x1000)) /* 2k Tx completion queues */ 27 | 28 | #define VDEV_INT_DYN_CTL01 0x2100000 /* Begin at offset 33MB */ 29 | 30 | #define VDEV_INT_DYN_START (VDEV_INT_DYN_CTL01 + 0x1000) /* Begin at offset of 33MB + 4k to accomdate CTL01 register */ 31 | #define VDEV_INT_DYN_CTL(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000)) 32 | #define VDEV_INT_ITR_0(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x04) 33 | #define VDEV_INT_ITR_1(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x08) 34 | #define VDEV_INT_ITR_2(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x0C) 35 | 36 | #define SIOV_REG_BAR_SIZE 0x2A00000 37 | /* Next offset to begin at 42MB + 4K (0x2A00000 + 0x1000) */ 38 | #endif /* _SIOV_REGS_H_ */ 39 | -------------------------------------------------------------------------------- /src/ice_virtchnl_fdir.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VIRTCHNL_FDIR_H_ 5 | #define _ICE_VIRTCHNL_FDIR_H_ 6 | 7 | struct ice_vf; 8 | struct ice_pf; 9 | struct ice_vsi; 10 | 11 | enum ice_fdir_ctx_stat { 12 | ICE_FDIR_CTX_READY, 13 | ICE_FDIR_CTX_IRQ, 14 | ICE_FDIR_CTX_TIMEOUT, 15 | }; 16 | 17 | struct ice_vf_fdir_ctx { 18 | struct timer_list rx_tmr; 19 | enum virtchnl_ops v_opcode; 20 | enum ice_fdir_ctx_stat stat; 21 | union ice_32b_rx_flex_desc rx_desc; 22 | #define ICE_VF_FDIR_CTX_VALID BIT(0) 23 | u32 flags; 24 | 25 | void *conf; 26 | }; 27 | 28 | /* VF FDIR information structure */ 29 | struct ice_vf_fdir { 30 | u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; 31 | int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; 32 | struct ice_fd_hw_prof **fdir_prof; 33 | 34 | struct idr fdir_rule_idr; 35 | struct list_head fdir_rule_list; 36 | 37 | spinlock_t ctx_lock; /* protects FDIR context info */ 38 | struct ice_vf_fdir_ctx ctx_irq; 39 | struct ice_vf_fdir_ctx ctx_done; 40 | }; 41 | 42 | #ifdef CONFIG_PCI_IOV 43 | int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg); 44 | int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg); 45 | void ice_vc_fdir_free_prof_all(struct ice_vf *vf); 46 | void ice_vc_fdir_rem_prof_all(struct ice_vf *vf); 47 | void ice_vf_fdir_init(struct ice_vf *vf); 48 | void ice_vf_fdir_exit(struct ice_vf *vf); 49 | void ice_vf_fdir_exit_all(struct ice_pf *pf); 50 | void 51 | ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 52 | union ice_32b_rx_flex_desc *rx_desc); 53 | void ice_flush_fdir_ctx(struct ice_pf *pf); 54 | #else 55 | static inline void ice_vf_fdir_exit_all(struct ice_pf *pf) { } 56 | static inline void 57 | ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { } 58 | static inline void ice_flush_fdir_ctx(struct ice_pf *pf) { } 59 | #endif /* CONFIG_PCI_IOV */ 60 | #endif /* _ICE_VIRTCHNL_FDIR_H_ */ 61 | -------------------------------------------------------------------------------- /src/ice_pg_cam.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_PG_CAM_H_ 5 | #define _ICE_PG_CAM_H_ 6 | 7 | #define ICE_PG_CAM_TABLE_SIZE 2048 8 | #define ICE_PG_SP_CAM_TABLE_SIZE 128 9 | #define ICE_PG_NM_CAM_TABLE_SIZE 1024 10 | #define ICE_PG_NM_SP_CAM_TABLE_SIZE 64 11 | 12 | struct ice_pg_cam_key { 13 | bool valid; 14 | u16 node_id; 15 | bool flag0; 16 | bool flag1; 17 | bool flag2; 18 | bool flag3; 19 | u8 boost_idx; 20 | u16 alu_reg; 21 | u32 next_proto; 22 | }; 23 | 24 | struct ice_pg_nm_cam_key { 25 | bool valid; 26 | u16 node_id; 27 | bool flag0; 28 | bool flag1; 29 | bool flag2; 30 | bool flag3; 31 | u8 boost_idx; 32 | u16 alu_reg; 33 | }; 34 | 35 | struct ice_pg_cam_action { 36 | u16 next_node; 37 | u8 next_pc; 38 | bool is_pg; 39 | u8 proto_id; 40 | bool is_mg; 41 | u8 marker_id; 42 | bool is_last_round; 43 | bool ho_polarity; 44 | u16 ho_inc; 45 | }; 46 | 47 | struct ice_pg_cam_item { 48 | u16 idx; 49 | struct ice_pg_cam_key key; 50 | struct ice_pg_cam_action action; 51 | }; 52 | 53 | struct ice_pg_nm_cam_item { 54 | u16 idx; 55 | struct ice_pg_nm_cam_key key; 56 | struct ice_pg_cam_action action; 57 | }; 58 | 59 | void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item); 60 | void ice_pg_nm_cam_dump(struct ice_hw *hw, struct ice_pg_nm_cam_item *item); 61 | 62 | struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw); 63 | struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw); 64 | 65 | struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw); 66 | struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw); 67 | 68 | struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, 69 | int size, struct ice_pg_cam_key *key); 70 | struct ice_pg_nm_cam_item * 71 | ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, 72 | struct ice_pg_cam_key *key); 73 | #endif /* _ICE_PG_CAM_H_ */ 74 | -------------------------------------------------------------------------------- /src/ice_sbq_cmd.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_SBQ_CMD_H_ 5 | #define _ICE_SBQ_CMD_H_ 6 | 7 | /* This header file defines the Sideband Queue commands, error codes and 8 | * descriptor format. It is shared between Firmware and Software. 9 | */ 10 | 11 | /* Sideband Queue command structure and opcodes */ 12 | enum ice_sbq_opc { 13 | /* Sideband Queue commands */ 14 | ice_sbq_opc_neigh_dev_req = 0x0C00, 15 | ice_sbq_opc_neigh_dev_ev = 0x0C01 16 | }; 17 | 18 | /* Sideband Queue descriptor. Indirect command 19 | * and non posted 20 | */ 21 | struct ice_sbq_cmd_desc { 22 | __le16 flags; 23 | __le16 opcode; 24 | __le16 datalen; 25 | __le16 cmd_retval; 26 | 27 | /* Opaque message data */ 28 | __le32 cookie_high; 29 | __le32 cookie_low; 30 | 31 | union { 32 | __le16 cmd_len; 33 | __le16 cmpl_len; 34 | } param0; 35 | 36 | u8 reserved[6]; 37 | __le32 addr_high; 38 | __le32 addr_low; 39 | }; 40 | 41 | struct ice_sbq_evt_desc { 42 | __le16 flags; 43 | __le16 opcode; 44 | __le16 datalen; 45 | __le16 cmd_retval; 46 | u8 data[24]; 47 | }; 48 | 49 | enum ice_sbq_msg_dev { 50 | phy_0 = 2, 51 | cgu = 6, 52 | phy_0_peer = 13, 53 | cgu_peer = 15, 54 | }; 55 | 56 | enum ice_sbq_msg_opcode { 57 | ice_sbq_msg_rd = 0x00, 58 | ice_sbq_msg_wr_p = 0x01, 59 | ice_sbq_msg_wr_np = 0x02 60 | }; 61 | 62 | #define ICE_SBQ_MSG_FLAGS 0x40 63 | #define ICE_SBQ_MSG_SBE_FBE 0x0F 64 | 65 | struct ice_sbq_msg_req { 66 | u8 dest_dev; 67 | u8 src_dev; 68 | u8 opcode; 69 | u8 flags; 70 | u8 sbe_fbe; 71 | u8 func_id; 72 | __le16 msg_addr_low; 73 | __le32 msg_addr_high; 74 | __le32 data; 75 | }; 76 | 77 | struct ice_sbq_msg_cmpl { 78 | u8 dest_dev; 79 | u8 src_dev; 80 | u8 opcode; 81 | u8 flags; 82 | __le32 data; 83 | }; 84 | 85 | /* Internal struct */ 86 | struct ice_sbq_msg_input { 87 | u8 dest_dev; 88 | u8 opcode; 89 | u16 msg_addr_low; 90 | u32 msg_addr_high; 91 | u32 data; 92 | }; 93 | #endif /* _ICE_SBQ_CMD_H_ */ 94 | -------------------------------------------------------------------------------- /src/kcompat_kthread.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | /* SPDX-License-Identifier: GPL-2.0 */ 5 | 6 | #ifndef _KCOMPAT_KTHREAD_H_ 7 | #define _KCOMPAT_KTHREAD_H_ 8 | 9 | /* Kernels since 4.9 have supported delayed work items for kthreads. In order 10 | * to allow seamless transition from old to new kernels, this header defines 11 | * a set of macros to switch out kthread usage with a work queue on the older 12 | * kernels that do not have support for kthread_delayed_work. 13 | */ 14 | #ifdef HAVE_KTHREAD_DELAYED_API 15 | #include 16 | #else /* HAVE_KTHREAD_DELAYED_API */ 17 | #include 18 | #undef kthread_work 19 | #define kthread_work work_struct 20 | #undef kthread_delayed_work 21 | #define kthread_delayed_work delayed_work 22 | #undef kthread_worker 23 | #define kthread_worker workqueue_struct 24 | #undef kthread_queue_work 25 | #define kthread_queue_work(worker, work) queue_work(worker, work) 26 | #undef kthread_queue_delayed_work 27 | #define kthread_queue_delayed_work(worker, dwork, delay) \ 28 | queue_delayed_work(worker, dwork, delay) 29 | #undef kthread_init_work 30 | #define kthread_init_work(work, fn) INIT_WORK(work, fn) 31 | #undef kthread_init_delayed_work 32 | #define kthread_init_delayed_work(dwork, fn) \ 33 | INIT_DELAYED_WORK(dwork, fn) 34 | #undef kthread_flush_worker 35 | #define kthread_flush_worker(worker) flush_workqueue(worker) 36 | #undef kthread_cancel_work_sync 37 | #define kthread_cancel_work_sync(work) cancel_work_sync(work) 38 | #undef kthread_cancel_delayed_work_sync 39 | #define kthread_cancel_delayed_work_sync(dwork) \ 40 | cancel_delayed_work_sync(dwork) 41 | #undef kthread_create_worker 42 | #define kthread_create_worker(flags, namefmt, ...) \ 43 | alloc_workqueue(namefmt, 0, 0, ##__VA_ARGS__) 44 | #undef kthread_destroy_worker 45 | #define kthread_destroy_worker(worker) destroy_workqueue(worker) 46 | #endif /* !HAVE_KTHREAD_DELAYED_API */ 47 | 48 | #endif /* _KCOMPAT_KTHREAD_H_ */ 49 | -------------------------------------------------------------------------------- /src/ice_vf_lib_private.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VF_LIB_PRIVATE_H_ 5 | #define _ICE_VF_LIB_PRIVATE_H_ 6 | 7 | #include "ice_vf_lib.h" 8 | 9 | /* This header file is for exposing functions in ice_vf_lib.c to other files 10 | * which are also conditionally compiled depending on CONFIG_PCI_IOV. 11 | * Functions which may be used by other files should be exposed as part of 12 | * ice_vf_lib.h 13 | * 14 | * Functions in this file are exposed only when CONFIG_PCI_IOV is enabled, and 15 | * thus this header must not be included by .c files which may be compiled 16 | * with CONFIG_PCI_IOV disabled. 17 | * 18 | * To avoid this, only include this header file directly within .c files that 19 | * are conditionally enabled in the "ice-$(CONFIG_PCI_IOV)" block. 20 | */ 21 | 22 | #ifndef CONFIG_PCI_IOV 23 | #warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files" 24 | #endif 25 | 26 | void ice_initialize_vf_entry(struct ice_vf *vf); 27 | void ice_deinitialize_vf_entry(struct ice_vf *vf); 28 | void ice_dis_vf_qs(struct ice_vf *vf); 29 | enum virtchnl_status_code ice_err_to_virt_err(int err); 30 | int ice_check_vf_init(struct ice_vf *vf); 31 | struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf); 32 | int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable); 33 | bool ice_is_vf_trusted(struct ice_vf *vf); 34 | bool ice_vf_has_no_qs_ena(struct ice_vf *vf); 35 | bool ice_is_vf_link_up(struct ice_vf *vf); 36 | int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi); 37 | void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi); 38 | int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi); 39 | void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf); 40 | void ice_vf_ctrl_vsi_release(struct ice_vf *vf); 41 | struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); 42 | void ice_vf_invalidate_vsi(struct ice_vf *vf); 43 | void ice_vf_vsi_release(struct ice_vf *vf); 44 | 45 | #endif /* _ICE_VF_LIB_PRIVATE_H_ */ 46 | -------------------------------------------------------------------------------- /src/ice_fltr.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_FLTR_H_ 5 | #define _ICE_FLTR_H_ 6 | 7 | #include "ice_vlan.h" 8 | 9 | void ice_fltr_free_list(struct device *dev, struct list_head *h); 10 | int 11 | ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, 12 | unsigned long *promisc_mask); 13 | int 14 | ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, 15 | unsigned long *promisc_mask); 16 | int 17 | ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, 18 | unsigned long *promisc_mask, u16 vid, u8 lport); 19 | int 20 | ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, 21 | unsigned long *promisc_mask, u16 vid, u8 lport); 22 | int 23 | ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, 24 | const u8 *mac, enum ice_sw_fwd_act_type action); 25 | int 26 | ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, 27 | enum ice_sw_fwd_act_type action); 28 | int 29 | ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, 30 | enum ice_sw_fwd_act_type action); 31 | int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); 32 | int 33 | ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, 34 | enum ice_sw_fwd_act_type action); 35 | int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); 36 | 37 | int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 38 | int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 39 | int 40 | ice_fltr_add_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, 41 | enum ice_sw_fwd_act_type action); 42 | int 43 | ice_fltr_remove_mac_vlan(struct ice_vsi *vsi, const u8 *mac, u16 vlan_id, 44 | enum ice_sw_fwd_act_type action); 45 | 46 | int 47 | ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, 48 | enum ice_sw_fwd_act_type action); 49 | int 50 | ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, 51 | enum ice_sw_fwd_act_type action); 52 | void ice_fltr_remove_all(struct ice_vsi *vsi); 53 | #endif 54 | -------------------------------------------------------------------------------- /src/ice_vf_mbx.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VF_MBX_H_ 5 | #define _ICE_VF_MBX_H_ 6 | 7 | #include "ice_type.h" 8 | #include "ice_controlq.h" 9 | 10 | /* Defining the mailbox message threshold as 63 asynchronous 11 | * pending messages. Normal VF functionality does not require 12 | * sending more than 63 asynchronous pending message. 13 | * 14 | * Threshold value should be used to initialize 15 | * E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT register. 16 | */ 17 | #define ICE_ASYNC_VF_MSG_THRESHOLD 63 18 | 19 | #ifdef CONFIG_PCI_IOV 20 | int 21 | ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, 22 | u8 *msg, u16 msglen, struct ice_sq_cd *cd); 23 | 24 | u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); 25 | void ice_mbx_vf_dec_trig_e830(struct ice_hw *hw, 26 | struct ice_rq_event_info *event); 27 | void ice_mbx_vf_clear_cnt_e830(struct ice_hw *hw, u16 vf_id); 28 | int 29 | ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, 30 | struct ice_mbx_vf_info *vf_info, bool *report_malvf); 31 | void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info); 32 | void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info); 33 | void ice_mbx_init_snapshot(struct ice_hw *hw); 34 | #else /* CONFIG_PCI_IOV */ 35 | static inline int 36 | ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, 37 | u16 __always_unused vfid, u32 __always_unused v_opcode, 38 | u32 __always_unused v_retval, u8 __always_unused *msg, 39 | u16 __always_unused msglen, 40 | struct ice_sq_cd __always_unused *cd) 41 | { 42 | return 0; 43 | } 44 | 45 | static inline u32 46 | ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support, 47 | u16 __always_unused link_speed) 48 | { 49 | return 0; 50 | } 51 | 52 | static inline void ice_mbx_init_snapshot(struct ice_hw *hw) 53 | { 54 | } 55 | 56 | static inline void ice_mbx_vf_dec_trig_e830(struct ice_hw *hw, 57 | struct ice_rq_event_info *event) 58 | { 59 | } 60 | 61 | #endif /* CONFIG_PCI_IOV */ 62 | #endif /* _ICE_VF_MBX_H_ */ 63 | -------------------------------------------------------------------------------- /src/ice_flg_rd.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice_common.h" 5 | #include "ice_parser_util.h" 6 | 7 | #define ICE_FLG_RD_TABLE_SIZE 64 8 | 9 | /** 10 | * ice_flg_rd_dump - dump a flag redirect item info 11 | * @hw: pointer to the hardware structure 12 | * @item: flag redirect item to dump 13 | */ 14 | void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item) 15 | { 16 | dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); 17 | dev_info(ice_hw_to_dev(hw), "expose = %d\n", item->expose); 18 | dev_info(ice_hw_to_dev(hw), "intr_flg_id = %d\n", item->intr_flg_id); 19 | } 20 | 21 | /** The function parses a 8 bits Flag Redirect Table entry with below format: 22 | * BIT 0: Expose (rdi->expose) 23 | * BIT 1-6: Internal Flag ID (rdi->intr_flg_id) 24 | * BIT 7: reserved 25 | */ 26 | static void _flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item, 27 | void *data, int size) 28 | { 29 | struct ice_flg_rd_item *rdi = item; 30 | u8 d8 = *(u8 *)data; 31 | 32 | rdi->idx = idx; 33 | rdi->expose = (d8 & 0x1) != 0; 34 | rdi->intr_flg_id = (u8)((d8 >> 1) & 0x3f); 35 | 36 | if (hw->debug_mask & ICE_DBG_PARSER) 37 | ice_flg_rd_dump(hw, rdi); 38 | } 39 | 40 | /** 41 | * ice_flg_rd_table_get - create a flag redirect table 42 | * @hw: pointer to the hardware structure 43 | */ 44 | struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw) 45 | { 46 | return (struct ice_flg_rd_item *) 47 | ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR, 48 | sizeof(struct ice_flg_rd_item), 49 | ICE_FLG_RD_TABLE_SIZE, 50 | ice_parser_sect_item_get, 51 | _flg_rd_parse_item, false); 52 | } 53 | 54 | /** 55 | * ice_flg_redirect - redirect a parser flag to packet flag 56 | * @table: flag redirect table 57 | * @psr_flg: parser flag to redirect 58 | */ 59 | u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg) 60 | { 61 | u64 flg = 0; 62 | int i; 63 | 64 | for (i = 0; i < 64; i++) { 65 | struct ice_flg_rd_item *item = &table[i]; 66 | 67 | if (!item->expose) 68 | continue; 69 | 70 | if (psr_flg & (1ul << item->intr_flg_id)) 71 | flg |= (1ul << i); 72 | } 73 | 74 | return flg; 75 | } 76 | -------------------------------------------------------------------------------- /ddp/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2006-2018, Intel Corporation. 2 | All rights reserved. 3 | 4 | Redistribution. Redistribution and use in binary form, without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions must reproduce the above copyright notice and the 9 | following disclaimer in the documentation and/or other materials 10 | provided with the distribution. 11 | * Neither the name of Intel Corporation nor the names of its suppliers 12 | may be used to endorse or promote products derived from this software 13 | without specific prior written permission. 14 | * No reverse engineering, decompilation, or disassembly of this software 15 | is permitted. 16 | 17 | Limited patent license. Intel Corporation grants a world-wide, 18 | royalty-free, non-exclusive license under patents it now or hereafter 19 | owns or controls to make, have made, use, import, offer to sell and 20 | sell ("Utilize") this software, but solely to the extent that any 21 | such patent is necessary to Utilize the software alone, or in 22 | combination with an operating system licensed under an approved Open 23 | Source license as listed by the Open Source Initiative at 24 | http://opensource.org/licenses. The patent license shall not apply to 25 | any other combinations which include this software. No hardware per 26 | se is licensed hereunder. 27 | 28 | DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND 29 | CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, 30 | BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 31 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 32 | COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 33 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 34 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 35 | OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 36 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 37 | TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 38 | USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 39 | DAMAGE. 40 | 41 | 42 | -------------------------------------------------------------------------------- /src/kcompat_gnss.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | // SPDX-License-Identifier: GPL-2.0 5 | /* Copyright (C) 2023 Intel Corporation. */ 6 | 7 | /* This is taken from upstream commit 625239d */ 8 | 9 | #ifndef _KCOMPAT_GNSS_H_ 10 | #define _KCOMPAT_GNSS_H_ 11 | 12 | #ifdef _GNSS_H_ 13 | #error "Do not include both kcompat_gnss.h and " 14 | #endif 15 | 16 | #if IS_ENABLED(CONFIG_GNSS) && !defined(CONFIG_SUSE_KERNEL) 17 | #error "CONFIG_GNSS is enabled, use " 18 | #endif 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | struct gnss_device; 29 | 30 | enum gnss_type { 31 | GNSS_TYPE_NMEA = 0, 32 | GNSS_TYPE_SIRF, 33 | GNSS_TYPE_UBX, 34 | GNSS_TYPE_MTK, 35 | 36 | GNSS_TYPE_COUNT 37 | }; 38 | 39 | struct gnss_operations { 40 | int (*open)(struct gnss_device *gdev); 41 | void (*close)(struct gnss_device *gdev); 42 | int (*write_raw)(struct gnss_device *gdev, const unsigned char *buf, 43 | size_t count); 44 | }; 45 | 46 | struct gnss_device { 47 | struct device dev; 48 | struct cdev cdev; 49 | int id; 50 | 51 | enum gnss_type type; 52 | unsigned long flags; 53 | 54 | struct rw_semaphore rwsem; 55 | const struct gnss_operations *ops; 56 | unsigned int count; 57 | unsigned int disconnected:1; 58 | 59 | struct mutex read_mutex; /* GNSS read lock */ 60 | struct kfifo read_fifo; 61 | wait_queue_head_t read_queue; 62 | 63 | struct mutex write_mutex; /* GNSS write lock */ 64 | char *write_buf; 65 | }; 66 | 67 | struct gnss_device *gnss_allocate_device(struct device *parent); 68 | void gnss_put_device(struct gnss_device *gdev); 69 | int gnss_register_device(struct gnss_device *gdev); 70 | void gnss_deregister_device(struct gnss_device *gdev); 71 | 72 | int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf, 73 | size_t count); 74 | 75 | static inline void gnss_set_drvdata(struct gnss_device *gdev, void *data) 76 | { 77 | dev_set_drvdata(&gdev->dev, data); 78 | } 79 | 80 | static inline void *gnss_get_drvdata(struct gnss_device *gdev) 81 | { 82 | return dev_get_drvdata(&gdev->dev); 83 | } 84 | 85 | int gnss_module_init(void); 86 | void gnss_module_exit(void); 87 | 88 | #endif /* _KCOMPAT_GNSS_H_ */ 89 | -------------------------------------------------------------------------------- /scripts/set_arfs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | # Copyright (C) 2020 - 2023 Intel Corporation 4 | # 5 | # Attempt a basic setup of adaptive receive flow steering 6 | # also known as aRFS. aRFS will automatically at runtime 7 | # use ntuple rules to direct receive traffic to the same 8 | # rx queue (matched by queue number) as was used for the 9 | # application that was transmitting. Most useful for 10 | # TCP workloads and latency sensitive TCP connections. 11 | # 12 | # typical usage is (as root): 13 | # set_arfs -s flow_entries eth1 14 | # 15 | # to get help: 16 | # set_arfs 17 | 18 | usage() 19 | { 20 | echo 21 | echo "Usage: $0 [-s flow_entries] ..." 22 | echo " Options: " 23 | echo " -s number of socket flow entries" 24 | echo " Examples:" 25 | echo " $0 eth1 eth2 # eth1 and eth2 use default flow_entries" 26 | echo " $0 -s flow_entries eth1 # eth1 use specified flow_entries" 27 | echo 28 | exit 1 29 | } 30 | 31 | # offer some help 32 | if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then 33 | usage 34 | exit 1 35 | fi 36 | 37 | # if -s and a value specified, then use them. 38 | if [ "$1" == "-s" ]; then 39 | FLOW_ENTRIES=$2 40 | shift 41 | shift 42 | fi 43 | 44 | # append the interfaces listed to the string with spaces 45 | while [ "$#" -ne "0" ] ; do 46 | IFACES+=" $1" 47 | shift 48 | done 49 | 50 | # for now the user must specify interfaces 51 | if [ -z "$IFACES" ]; then 52 | usage 53 | exit 1 54 | fi 55 | 56 | # provide a default flow num value, typically 2048 per queue 57 | # is useful, but if there are many queues then maybe a smaller 58 | # value per-queue is good enough. 59 | if [ -z "$FLOW_ENTRIES" ]; then 60 | FLOW_ENTRIES=32768 61 | fi 62 | 63 | set_arfs() 64 | { 65 | echo $FLOW_ENTRIES > /proc/sys/net/core/rps_sock_flow_entries 66 | echo -n "done: " 67 | grep -H . /proc/sys/net/core/rps_sock_flow_entries 68 | for IFACE in $IFACES; do 69 | QDIR="/sys/class/net/$IFACE/queues" 70 | QUEUES=`ls -1 -d $QDIR/rx-*` 71 | QUEUES_COUNT=`ls -1 -d $QDIR/rx-* | wc -l` 72 | sockTrack=$((FLOW_ENTRIES / QUEUES_COUNT)) 73 | if [ -z `ls $QDIR/rx-0/rps_flow_cnt` ]; then 74 | echo "ERROR: aRFS is not supported on $IFACE" 75 | exit 2 76 | fi 77 | n=0 78 | for i in $QUEUES; do 79 | echo $sockTrack > $i/rps_flow_cnt 80 | echo -n "Queue $((n++)) done: " 81 | grep -H . $i/rps_flow_cnt 82 | done 83 | done 84 | } 85 | 86 | set_arfs 87 | -------------------------------------------------------------------------------- /src/ice_ptype_mk.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice_common.h" 5 | #include "ice_parser_util.h" 6 | 7 | #define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024 8 | 9 | /** 10 | * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info_ 11 | * @hw: pointer to the hardware structure 12 | * @item: ptype marker tcam to dump 13 | */ 14 | void ice_ptype_mk_tcam_dump(struct ice_hw *hw, 15 | struct ice_ptype_mk_tcam_item *item) 16 | { 17 | int i; 18 | 19 | dev_info(ice_hw_to_dev(hw), "address = %d\n", item->address); 20 | dev_info(ice_hw_to_dev(hw), "ptype = %d\n", item->ptype); 21 | dev_info(ice_hw_to_dev(hw), "key :"); 22 | for (i = 0; i < 10; i++) 23 | dev_info(ice_hw_to_dev(hw), "%02x ", item->key[i]); 24 | dev_info(ice_hw_to_dev(hw), "\n"); 25 | dev_info(ice_hw_to_dev(hw), "key_inv:"); 26 | for (i = 0; i < 10; i++) 27 | dev_info(ice_hw_to_dev(hw), "%02x ", item->key_inv[i]); 28 | dev_info(ice_hw_to_dev(hw), "\n"); 29 | } 30 | 31 | static void _parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx, void *item, 32 | void *data, int size) 33 | { 34 | ice_parse_item_dflt(hw, idx, item, data, size); 35 | 36 | if (hw->debug_mask & ICE_DBG_PARSER) 37 | ice_ptype_mk_tcam_dump(hw, 38 | (struct ice_ptype_mk_tcam_item *)item); 39 | } 40 | 41 | /** 42 | * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table 43 | * @hw: pointer to the hardware structure 44 | */ 45 | struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw) 46 | { 47 | return (struct ice_ptype_mk_tcam_item *) 48 | ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE, 49 | sizeof(struct ice_ptype_mk_tcam_item), 50 | ICE_PTYPE_MK_TCAM_TABLE_SIZE, 51 | ice_parser_sect_item_get, 52 | _parse_ptype_mk_tcam_item, true); 53 | } 54 | 55 | /** 56 | * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table 57 | * @table: ptype marker tcam table to search 58 | * @pat: pattern to match 59 | * @len: length of the pattern 60 | */ 61 | struct ice_ptype_mk_tcam_item * 62 | ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, 63 | u8 *pat, int len) 64 | { 65 | int i; 66 | 67 | for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) { 68 | struct ice_ptype_mk_tcam_item *item = &table[i]; 69 | 70 | if (ice_ternary_match(item->key, item->key_inv, pat, len)) 71 | return item; 72 | } 73 | 74 | return NULL; 75 | } 76 | -------------------------------------------------------------------------------- /src/kcompat_dim.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 5 | /* 6 | * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. 7 | */ 8 | 9 | #include "kcompat.h" 10 | #include "kcompat_dim.h" 11 | 12 | bool dim_on_top(struct dim *dim) 13 | { 14 | switch (dim->tune_state) { 15 | case DIM_PARKING_ON_TOP: 16 | case DIM_PARKING_TIRED: 17 | return true; 18 | case DIM_GOING_RIGHT: 19 | return (dim->steps_left > 1) && (dim->steps_right == 1); 20 | default: /* DIM_GOING_LEFT */ 21 | return (dim->steps_right > 1) && (dim->steps_left == 1); 22 | } 23 | } 24 | 25 | void dim_turn(struct dim *dim) 26 | { 27 | switch (dim->tune_state) { 28 | case DIM_PARKING_ON_TOP: 29 | case DIM_PARKING_TIRED: 30 | break; 31 | case DIM_GOING_RIGHT: 32 | dim->tune_state = DIM_GOING_LEFT; 33 | dim->steps_left = 0; 34 | break; 35 | case DIM_GOING_LEFT: 36 | dim->tune_state = DIM_GOING_RIGHT; 37 | dim->steps_right = 0; 38 | break; 39 | } 40 | } 41 | 42 | void dim_park_on_top(struct dim *dim) 43 | { 44 | dim->steps_right = 0; 45 | dim->steps_left = 0; 46 | dim->tired = 0; 47 | dim->tune_state = DIM_PARKING_ON_TOP; 48 | } 49 | 50 | void dim_park_tired(struct dim *dim) 51 | { 52 | dim->steps_right = 0; 53 | dim->steps_left = 0; 54 | dim->tune_state = DIM_PARKING_TIRED; 55 | } 56 | 57 | void dim_calc_stats(struct dim_sample *start, const struct dim_sample *end, 58 | struct dim_stats *curr_stats) 59 | { 60 | /* u32 holds up to 71 minutes, should be enough */ 61 | u32 delta_us = ktime_us_delta(end->time, start->time); 62 | u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); 63 | u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, 64 | start->byte_ctr); 65 | u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr, 66 | start->comp_ctr); 67 | 68 | if (!delta_us) 69 | return; 70 | 71 | curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); 72 | curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); 73 | curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC, 74 | delta_us); 75 | curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us); 76 | if (curr_stats->epms != 0) 77 | curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL( 78 | curr_stats->cpms * 100, curr_stats->epms); 79 | else 80 | curr_stats->cpe_ratio = 0; 81 | 82 | } 83 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ### License 4 | 5 | ethernet-linux-ice is licensed under the terms in [COPYING](./COPYING). By 6 | contributing to the project, you agree to the license and copyright terms 7 | therein and release your contribution under these terms. 8 | 9 | ### Sign your work 10 | 11 | Please use the sign-off line at the end of the patch. Your signature certifies 12 | that you wrote the patch or otherwise have the right to pass it on as an 13 | open-source patch. The rules are pretty simple: if you can certify the below 14 | (from [developercertificate.org](http://developercertificate.org/)): 15 | 16 | ``` 17 | Developer Certificate of Origin 18 | Version 1.1 19 | 20 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 21 | 660 York Street, Suite 102, 22 | San Francisco, CA 94110 USA 23 | 24 | Everyone is permitted to copy and distribute verbatim copies of this 25 | license document, but changing it is not allowed. 26 | 27 | Developer's Certificate of Origin 1.1 28 | 29 | By making a contribution to this project, I certify that: 30 | 31 | (a) The contribution was created in whole or in part by me and I 32 | have the right to submit it under the open source license 33 | indicated in the file; or 34 | 35 | (b) The contribution is based upon previous work that, to the best 36 | of my knowledge, is covered under an appropriate open source 37 | license and I have the right under that license to submit that 38 | work with modifications, whether created in whole or in part 39 | by me, under the same open source license (unless I am 40 | permitted to submit under a different license), as indicated 41 | in the file; or 42 | 43 | (c) The contribution was provided directly to me by some other 44 | person who certified (a), (b) or (c) and I have not modified 45 | it. 46 | 47 | (d) I understand and agree that this project and the contribution 48 | are public and that a record of the contribution (including all 49 | personal information I submit with it, including my sign-off) is 50 | maintained indefinitely and may be redistributed consistent with 51 | this project or the open source license(s) involved. 52 | ``` 53 | 54 | Then you just add a line to every git commit message: 55 | 56 | Signed-off-by: Joe Smith 57 | 58 | Use your real name (sorry, no pseudonyms or anonymous contributions.) 59 | 60 | If you set your `user.name` and `user.email` git configs, you can sign your 61 | commit automatically with `git commit -s`. 62 | -------------------------------------------------------------------------------- /src/ice_imem.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_IMEM_H_ 5 | #define _ICE_IMEM_H_ 6 | 7 | struct ice_bst_main { 8 | bool al0; 9 | bool al1; 10 | bool al2; 11 | bool pg; 12 | }; 13 | 14 | struct ice_bst_keybuilder { 15 | u8 priority; 16 | bool tsr_ctrl; 17 | }; 18 | 19 | struct ice_np_keybuilder { 20 | u8 ops; 21 | u8 start_or_reg0; 22 | u8 len_or_reg1; 23 | }; 24 | 25 | struct ice_pg_keybuilder { 26 | bool flag0_ena; 27 | bool flag1_ena; 28 | bool flag2_ena; 29 | bool flag3_ena; 30 | u8 flag0_idx; 31 | u8 flag1_idx; 32 | u8 flag2_idx; 33 | u8 flag3_idx; 34 | u8 alu_reg_idx; 35 | }; 36 | 37 | enum ice_alu_opcode { 38 | ICE_ALU_PARK = 0, 39 | ICE_ALU_MOV_ADD = 1, 40 | ICE_ALU_ADD = 2, 41 | ICE_ALU_MOV_AND = 4, 42 | ICE_ALU_AND = 5, 43 | ICE_ALU_AND_IMM = 6, 44 | ICE_ALU_MOV_OR = 7, 45 | ICE_ALU_OR = 8, 46 | ICE_ALU_MOV_XOR = 9, 47 | ICE_ALU_XOR = 10, 48 | ICE_ALU_NOP = 11, 49 | ICE_ALU_BR = 12, 50 | ICE_ALU_BREQ = 13, 51 | ICE_ALU_BRNEQ = 14, 52 | ICE_ALU_BRGT = 15, 53 | ICE_ALU_BRLT = 16, 54 | ICE_ALU_BRGEQ = 17, 55 | ICE_ALU_BRLEG = 18, 56 | ICE_ALU_SETEQ = 19, 57 | ICE_ALU_ANDEQ = 20, 58 | ICE_ALU_OREQ = 21, 59 | ICE_ALU_SETNEQ = 22, 60 | ICE_ALU_ANDNEQ = 23, 61 | ICE_ALU_ORNEQ = 24, 62 | ICE_ALU_SETGT = 25, 63 | ICE_ALU_ANDGT = 26, 64 | ICE_ALU_ORGT = 27, 65 | ICE_ALU_SETLT = 28, 66 | ICE_ALU_ANDLT = 29, 67 | ICE_ALU_ORLT = 30, 68 | ICE_ALU_MOV_SUB = 31, 69 | ICE_ALU_SUB = 32, 70 | ICE_ALU_INVALID = 64, 71 | }; 72 | 73 | struct ice_alu { 74 | enum ice_alu_opcode opc; 75 | u8 src_start; 76 | u8 src_len; 77 | bool shift_xlate_select; 78 | u8 shift_xlate_key; 79 | u8 src_reg_id; 80 | u8 dst_reg_id; 81 | bool inc0; 82 | bool inc1; 83 | u8 proto_offset_opc; 84 | u8 proto_offset; 85 | u8 branch_addr; 86 | u16 imm; 87 | bool dedicate_flags_ena; 88 | u8 dst_start; 89 | u8 dst_len; 90 | bool flags_extr_imm; 91 | u8 flags_start_imm; 92 | }; 93 | 94 | struct ice_imem_item { 95 | u16 idx; 96 | struct ice_bst_main b_m; 97 | struct ice_bst_keybuilder b_kb; 98 | u8 pg; 99 | struct ice_np_keybuilder np_kb; 100 | struct ice_pg_keybuilder pg_kb; 101 | struct ice_alu alu0; 102 | struct ice_alu alu1; 103 | struct ice_alu alu2; 104 | }; 105 | 106 | void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item); 107 | struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw); 108 | #endif /* _ICE_IMEM_H_ */ 109 | -------------------------------------------------------------------------------- /src/ice_phy_regs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_PHY_REGS_H_ 5 | #define _ICE_PHY_REGS_H_ 6 | 7 | #define CLKRX_CMN_CLK(i) (0x7E8000 + (i) * 0x5000) 8 | #define CLKRX_CMN_CLK_NUM 5 9 | 10 | #define CLKRX_CMN_REG_10(i) (CLKRX_CMN_CLK(i) + 0x28) 11 | union clkrx_cmn_reg_10 { 12 | struct { 13 | u32 cmnntl_refck_pdmtchval : 19; 14 | u32 cmnntl_refckm_charge_up_locovr : 1; 15 | u32 cmnntl_refckm_pull_dn_locovr : 1; 16 | u32 cmnntl_refckm_sense_locovr : 1; 17 | u32 cmnntl_refckp_charge_up_locovr : 1; 18 | u32 cmnntl_refckp_pull_dn_locovr : 1; 19 | u32 cmnntl_refckp_sense_locovr : 1; 20 | u32 cmnpmu_h8_off_delay : 4; 21 | u32 cmnref_locovren : 1; 22 | u32 cmnref_pad2cmos_ana_en_locovr : 1; 23 | u32 cmnref_pad2cmos_dig_en_locovr : 1; 24 | } field; 25 | u32 val; 26 | }; 27 | 28 | #define CLKRX_CMN_REG_12(i) (CLKRX_CMN_CLK(i) + 0x30) 29 | union clkrx_cmn_reg_12 { 30 | struct { 31 | u32 cmnpmu_restore_off_delay : 4; 32 | u32 cmnpmu_rst_off_delay : 4; 33 | u32 cmnref_cdrdivsel_locovr : 5; 34 | u32 cmnref_refsel0_locovr : 4; 35 | u32 cmnref_refsel1_locovr : 4; 36 | u32 cmnref_refsel1_powersave_en_locovr : 1; 37 | u32 cmnref_refsel2_locovr : 4; 38 | u32 cmnref_refsel2_powersave_en_locovr : 1; 39 | u32 cmnref_refsel3_locovr : 4; 40 | u32 cmnref_refsel3_powersave_en_locovr : 1; 41 | } field; 42 | u32 val; 43 | }; 44 | 45 | #define CLKRX_CMN_REG_46(i) (CLKRX_CMN_CLK(i) + 0x220) 46 | union clkrx_cmn_reg_46 { 47 | struct { 48 | u32 cmnntl_refck_lkgcnt : 19; 49 | u32 cmnref_refsel0_loc : 4; 50 | u32 cmnref_refsel1_loc : 4; 51 | u32 cmnref_refsel1_powersave_en_loc : 1; 52 | u32 cmnref_refsel2_loc : 4; 53 | } field; 54 | u32 val; 55 | }; 56 | 57 | #define SERDES_IP_IF_LN_FLXM_GENERAL(n, m) \ 58 | (0x32B800 + (m) * 0x100000 + (n) * 0x8000) 59 | union serdes_ip_if_ln_flxm_general { 60 | struct { 61 | u32 reserved0_1 : 2; 62 | u32 ictl_pcs_mode_nt : 1; 63 | u32 ictl_pcs_rcomp_slave_en_nt : 1; 64 | u32 ictl_pcs_cmn_force_pup_a : 1; 65 | u32 ictl_pcs_rcomp_slave_valid_a : 1; 66 | u32 ictl_pcs_ref_sel_rx_nt : 4; 67 | u32 idat_dfx_obs_dig_ : 2; 68 | u32 irst_apb_mem_b : 1; 69 | u32 ictl_pcs_disconnect_nt : 1; 70 | u32 ictl_pcs_isolate_nt : 1; 71 | u32 reserved15_15 : 1; 72 | u32 irst_pcs_tstbus_b_a : 1; 73 | u32 ictl_pcs_ref_term_hiz_en_nt : 1; 74 | u32 reserved18_19 : 2; 75 | u32 ictl_pcs_synthlcslow_force_pup_a : 1; 76 | u32 ictl_pcs_synthlcfast_force_pup_a : 1; 77 | u32 reserved22_24 : 3; 78 | u32 ictl_pcs_ref_sel_tx_nt : 4; 79 | u32 reserved29_31 : 3; 80 | } field; 81 | u32 val; 82 | }; 83 | #endif /* _ICE_PHY_REGS_H_ */ 84 | -------------------------------------------------------------------------------- /src/ice_arfs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_ARFS_H_ 5 | #define _ICE_ARFS_H_ 6 | 7 | #include "ice_fdir.h" 8 | 9 | enum ice_arfs_fltr_state { 10 | ICE_ARFS_INACTIVE, 11 | ICE_ARFS_ACTIVE, 12 | ICE_ARFS_TODEL, 13 | }; 14 | 15 | struct ice_arfs_entry { 16 | struct ice_fdir_fltr fltr_info; 17 | struct hlist_node list_entry; 18 | u64 time_activated; /* only valid for UDP flows */ 19 | u32 flow_id; 20 | /* fltr_state = 0 - ICE_ARFS_INACTIVE: 21 | * filter needs to be updated or programmed in HW. 22 | * fltr_state = 1 - ICE_ARFS_ACTIVE: 23 | * filter is active and programmed in HW. 24 | * fltr_state = 2 - ICE_ARFS_TODEL: 25 | * filter has been deleted from HW and needs to be removed from 26 | * the aRFS hash table. 27 | */ 28 | u8 fltr_state; 29 | }; 30 | 31 | struct ice_arfs_entry_ptr { 32 | struct ice_arfs_entry *arfs_entry; 33 | struct hlist_node list_entry; 34 | }; 35 | 36 | struct ice_arfs_active_fltr_cntrs { 37 | atomic_t active_tcpv4_cnt; 38 | atomic_t active_tcpv6_cnt; 39 | atomic_t active_udpv4_cnt; 40 | atomic_t active_udpv6_cnt; 41 | }; 42 | 43 | #ifdef CONFIG_RFS_ACCEL 44 | int 45 | ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, 46 | u16 rxq_idx, u32 flow_id); 47 | void ice_clear_arfs(struct ice_vsi *vsi); 48 | void ice_free_cpu_rx_rmap(struct ice_vsi *vsi); 49 | void ice_init_arfs(struct ice_vsi *vsi); 50 | void ice_sync_arfs_fltrs(struct ice_pf *pf); 51 | int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); 52 | void ice_remove_arfs(struct ice_pf *pf); 53 | void ice_rebuild_arfs(struct ice_pf *pf); 54 | bool 55 | ice_is_arfs_using_perfect_flow(struct ice_hw *hw, 56 | enum ice_fltr_ptype flow_type); 57 | #else 58 | static inline void ice_clear_arfs(struct ice_vsi *vsi) { } 59 | static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { } 60 | static inline void ice_init_arfs(struct ice_vsi *vsi) { } 61 | static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { } 62 | static inline void ice_remove_arfs(struct ice_pf *pf) { } 63 | static inline void ice_rebuild_arfs(struct ice_pf *pf) { } 64 | 65 | static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi) 66 | { 67 | return 0; 68 | } 69 | 70 | static inline int 71 | ice_rx_flow_steer(struct net_device __always_unused *netdev, 72 | const struct sk_buff __always_unused *skb, 73 | u16 __always_unused rxq_idx, u32 __always_unused flow_id) 74 | { 75 | return -EOPNOTSUPP; 76 | } 77 | 78 | static inline bool 79 | ice_is_arfs_using_perfect_flow(struct ice_hw __always_unused *hw, 80 | enum ice_fltr_ptype __always_unused flow_type) 81 | { 82 | return false; 83 | } 84 | #endif /* CONFIG_RFS_ACCEL */ 85 | #endif /* _ICE_ARFS_H_ */ 86 | -------------------------------------------------------------------------------- /src/ice_fwlog.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_FWLOG_H_ 5 | #define _ICE_FWLOG_H_ 6 | #include "ice_adminq_cmd.h" 7 | 8 | struct ice_hw; 9 | 10 | /* Only a single log level should be set and all log levels under the set value 11 | * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all 12 | * other log levels are included (except ICE_FW_LOG_LEVEL_NONE) 13 | */ 14 | enum ice_fwlog_level { 15 | ICE_FWLOG_LEVEL_NONE = 0, 16 | ICE_FWLOG_LEVEL_ERROR = 1, 17 | ICE_FWLOG_LEVEL_WARNING = 2, 18 | ICE_FWLOG_LEVEL_NORMAL = 3, 19 | ICE_FWLOG_LEVEL_VERBOSE = 4, 20 | ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */ 21 | }; 22 | 23 | struct ice_fwlog_module_entry { 24 | /* module ID for the corresponding firmware logging event */ 25 | u16 module_id; 26 | /* verbosity level for the module_id */ 27 | u8 log_level; 28 | }; 29 | 30 | struct ice_fwlog_cfg { 31 | /* list of modules for configuring log level */ 32 | struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX]; 33 | /* options used to configure firmware logging */ 34 | u16 options; 35 | #define ICE_FWLOG_OPTION_ARQ_ENA BIT(0) 36 | #define ICE_FWLOG_OPTION_UART_ENA BIT(1) 37 | /* set before calling ice_fwlog_init() so the PF registers for firmware 38 | * logging on initialization 39 | */ 40 | #define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2) 41 | /* set in the ice_fwlog_get() response if the PF is registered for FW 42 | * logging events over ARQ 43 | */ 44 | #define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3) 45 | 46 | /* minimum number of log events sent per Admin Receive Queue event */ 47 | u16 log_resolution; 48 | }; 49 | 50 | struct ice_fwlog_data { 51 | u16 data_size; 52 | u8 *data; 53 | }; 54 | 55 | struct ice_fwlog_ring { 56 | struct ice_fwlog_data *rings; 57 | u16 index; 58 | u16 size; 59 | u16 head; 60 | u16 tail; 61 | }; 62 | 63 | #define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3 64 | #define ICE_FWLOG_RING_SIZE_DFLT 256 65 | #define ICE_FWLOG_RING_SIZE_MAX 512 66 | 67 | bool ice_fwlog_ring_full(const struct ice_fwlog_ring *rings); 68 | bool ice_fwlog_ring_empty(const struct ice_fwlog_ring *rings); 69 | void ice_fwlog_ring_increment(u16 *item, u16 size); 70 | void ice_fwlog_set_supported(struct ice_hw *hw); 71 | bool ice_fwlog_supported(const struct ice_hw *hw); 72 | int ice_fwlog_init(struct ice_hw *hw); 73 | void ice_fwlog_deinit(struct ice_hw *hw); 74 | int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); 75 | int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); 76 | int ice_fwlog_register(struct ice_hw *hw); 77 | int ice_fwlog_unregister(struct ice_hw *hw); 78 | void ice_fwlog_realloc_rings(struct ice_hw *hw, int index); 79 | #endif /* _ICE_FWLOG_H_ */ 80 | -------------------------------------------------------------------------------- /src/ice_devlink.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_DEVLINK_H_ 5 | #define _ICE_DEVLINK_H_ 6 | 7 | #if IS_ENABLED(CONFIG_NET_DEVLINK) 8 | struct ice_pf *ice_allocate_pf(struct device *dev); 9 | 10 | void ice_devlink_register(struct ice_pf *pf); 11 | void ice_devlink_unregister(struct ice_pf *pf); 12 | int ice_devlink_register_params(struct ice_pf *pf); 13 | void ice_devlink_unregister_params(struct ice_pf *pf); 14 | #ifdef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT 15 | int ice_devlink_register_resources(struct ice_pf *pf); 16 | void ice_devlink_unregister_resources(struct ice_pf *pf); 17 | #else 18 | static inline int ice_devlink_register_resources(struct ice_pf *pf) 19 | { 20 | return 0; 21 | } 22 | 23 | static inline void ice_devlink_unregister_resources(struct ice_pf *pf) { } 24 | #endif /* HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT */ 25 | int ice_devlink_create_pf_port(struct ice_pf *pf); 26 | void ice_devlink_destroy_pf_port(struct ice_pf *pf); 27 | #ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF 28 | int ice_devlink_create_vf_port(struct ice_vf *vf); 29 | void ice_devlink_destroy_vf_port(struct ice_vf *vf); 30 | #endif /* HAVE_DEVLINK_PORT_ATTR_PCI_VF */ 31 | #else /* CONFIG_NET_DEVLINK */ 32 | static inline struct ice_pf *ice_allocate_pf(struct device *dev) 33 | { 34 | return devm_kzalloc(dev, sizeof(struct ice_pf), GFP_KERNEL); 35 | } 36 | 37 | static inline void ice_devlink_register(struct ice_pf *pf) { } 38 | static inline void ice_devlink_unregister(struct ice_pf *pf) { } 39 | static inline int ice_devlink_register_params(struct ice_pf *pf) { return 0; } 40 | static inline void ice_devlink_unregister_params(struct ice_pf *pf) { } 41 | static inline int ice_devlink_register_resources(struct ice_pf *pf) 42 | { 43 | return 0; 44 | } 45 | 46 | static inline void ice_devlink_unregister_resources(struct ice_pf *pf) { } 47 | static inline int ice_devlink_create_pf_port(struct ice_pf *pf) { return 0; } 48 | static inline void ice_devlink_destroy_pf_port(struct ice_pf *pf) { } 49 | #ifdef HAVE_DEVLINK_PORT_ATTR_PCI_VF 50 | static inline int ice_devlink_create_vf_port(struct ice_vf *vf) { return 0; } 51 | static inline void ice_devlink_destroy_vf_port(struct ice_vf *vf) { } 52 | #endif /* HAVE_DEVLINK_PORT_ATTR_PCI_VF */ 53 | #endif /* !CONFIG_NET_DEVLINK */ 54 | 55 | #if IS_ENABLED(CONFIG_NET_DEVLINK) && defined(HAVE_DEVLINK_REGIONS) 56 | void ice_devlink_init_regions(struct ice_pf *pf); 57 | void ice_devlink_destroy_regions(struct ice_pf *pf); 58 | #else 59 | static inline void ice_devlink_init_regions(struct ice_pf *pf) { } 60 | static inline void ice_devlink_destroy_regions(struct ice_pf *pf) { } 61 | #endif 62 | 63 | int ice_devlink_tc_params_register(struct ice_vsi *vsi); 64 | void ice_devlink_tc_params_unregister(struct ice_vsi *vsi); 65 | 66 | #ifdef HAVE_DEVLINK_RATE_NODE_CREATE 67 | int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi); 68 | void ice_tear_down_devlink_rate_tree(struct ice_pf *pf); 69 | void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi); 70 | #endif /* HAVE_DEVLINK_RATE_NODE_CREATE */ 71 | 72 | #endif /* _ICE_DEVLINK_H_ */ 73 | -------------------------------------------------------------------------------- /src/ice_lag.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_LAG_H_ 5 | #define _ICE_LAG_H_ 6 | #ifdef HAVE_NETDEV_UPPER_INFO 7 | 8 | #include 9 | #include "ice.h" 10 | 11 | #define ICE_LAG_INVALID_PORT 0xFF 12 | #define ICE_LAG_RESET_RETRIES 5 13 | #define ICE_SW_DEFAULT_PROFILE 0 14 | #define ICE_FV_PROT_MDID 255 15 | #define ICE_LP_EXT_BUF_OFFSET 32 16 | #define ICE_LAG_SINGLE_FILTER_SIZE 0xC 17 | 18 | #define ICE_PRI_IDX 0x0 19 | #define ICE_SEC_IDX 0x1 20 | 21 | struct ice_pf; 22 | 23 | struct ice_lag_netdev_list { 24 | struct list_head node; 25 | struct net_device *netdev; 26 | }; 27 | 28 | /* LAG info struct */ 29 | struct ice_lag { 30 | struct ice_pf *pf; /* backlink to PF struct */ 31 | struct iidc_rdma_qset_params rdma_qset[IEEE_8021QAZ_MAX_TCS]; 32 | struct iidc_rdma_multi_qset_params rdma_qsets[IEEE_8021QAZ_MAX_TCS]; 33 | struct ice_vsi *rdma_vsi; 34 | struct net_device *netdev; /* this PF's netdev */ 35 | struct net_device *upper_netdev; /* upper bonding netdev */ 36 | struct list_head *netdev_head; 37 | struct notifier_block notif_block; 38 | s32 bond_mode; 39 | int bond_id; /* identify which bond we are in */ 40 | u16 bond_swid; /* swid for primary interface */ 41 | u8 active_port; /* lport value for the current active port */ 42 | bool sriov_enabled; /* whether bond is in SR-IOV mode */ 43 | u8 bonded:1; /* currently bonded */ 44 | u8 primary:1; /* this is primary */ 45 | u16 pf_recipe; 46 | u16 lport_recipe; 47 | 48 | u16 pfmac_recipe; 49 | u16 pfmac_unicst_idx; 50 | u16 pfmac_bdcst_idx; 51 | 52 | u16 pf_rx_rule_id; 53 | u16 pf_tx_rule_id; 54 | u16 cp_rule_idx; 55 | u16 lport_rule_idx; 56 | /* each thing blocking bonding will increment this value by one. 57 | * If this value is zero, then bonding is allowed. 58 | */ 59 | struct ice_rule_query_data fltr; 60 | u16 action_idx; 61 | }; 62 | 63 | /* LAG workqueue struct */ 64 | struct ice_lag_work { 65 | struct work_struct lag_task; 66 | struct ice_lag_netdev_list netdev_list; 67 | struct ice_lag *lag; 68 | unsigned long event; 69 | struct net_device *event_netdev; 70 | union { 71 | struct netdev_notifier_changeupper_info changeupper_info; 72 | struct netdev_notifier_bonding_info bonding_info; 73 | struct netdev_notifier_info notifier_info; 74 | } info; 75 | }; 76 | 77 | void ice_lag_move_new_vf_nodes(struct ice_vf *vf); 78 | int ice_init_lag(struct ice_pf *pf); 79 | int 80 | ice_lag_move_node(struct ice_lag *lag, u8 oldport, u8 newport, u8 tc, u32 teid, 81 | u16 qs_handle); 82 | int ice_lag_move_node_sync(struct ice_hw *old_hw, struct ice_hw *new_hw, 83 | struct ice_vsi *new_vsi, 84 | struct iidc_rdma_qset_params *qset); 85 | void ice_lag_aa_failover(struct ice_lag *lag, struct iidc_core_dev_info *cdev, 86 | u8 dest, bool locked); 87 | void ice_deinit_lag(struct ice_pf *pf); 88 | void 89 | ice_lag_aa_reclaim_nodes(struct iidc_core_dev_info *cdev, 90 | struct iidc_rdma_multi_qset_params *qset); 91 | void ice_lag_rebuild(struct ice_pf *pf); 92 | bool ice_lag_is_switchdev_running(struct ice_pf *pf); 93 | void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt); 94 | #endif /* HAVE_NETDEV_UPPER_INFO */ 95 | #endif /* _ICE_LAG_H_ */ 96 | -------------------------------------------------------------------------------- /src/ice_eswitch.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_ESWITCH_H_ 5 | #define _ICE_ESWITCH_H_ 6 | #if IS_ENABLED(CONFIG_NET_DEVLINK) 7 | #include 8 | 9 | void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); 10 | int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); 11 | int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); 12 | #ifdef HAVE_DEVLINK_ESWITCH_OPS_EXTACK 13 | #ifdef HAVE_METADATA_PORT_INFO 14 | int 15 | ice_eswitch_mode_set(struct devlink *devlink, u16 mode, 16 | struct netlink_ext_ack *extack); 17 | #else 18 | static inline int 19 | ice_eswitch_mode_set(struct devlink __always_unused *devlink, 20 | u16 __always_unused mode, 21 | struct netlink_ext_ack __always_unused *extack) 22 | { 23 | return -EOPNOTSUPP; 24 | } 25 | #endif /* HAVE_METADATA_PORT_INFO */ 26 | #else 27 | #ifdef HAVE_METADATA_PORT_INFO 28 | int ice_eswitch_mode_set(struct devlink *devlink, u16 mode); 29 | #else 30 | static inline int ice_eswitch_mode_set(struct devlink __always_unused *devlink, 31 | u16 __always_unused mode) 32 | { 33 | return -EOPNOTSUPP; 34 | } 35 | #endif /* HAVE_METADATA_PORT_INFO */ 36 | #endif /* HAVE_DEVLINK_ESWITCH_OPS_EXTACK */ 37 | bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); 38 | #ifdef HAVE_METADATA_PORT_INFO 39 | void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi); 40 | #else 41 | static inline void 42 | ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { } 43 | #endif /* HAVE_METADATA_PORT_INFO */ 44 | void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); 45 | void ice_eswitch_start_all_tx_queues(struct ice_pf *pf); 46 | 47 | #ifdef HAVE_METADATA_PORT_INFO 48 | void ice_eswitch_set_target_vsi(struct sk_buff *skb, 49 | struct ice_tx_offload_params *off); 50 | #else 51 | static inline void 52 | ice_eswitch_set_target_vsi(struct sk_buff *skb, 53 | struct ice_tx_offload_params *off) { } 54 | #endif /* HAVE_METADATA_PORT_INFO */ 55 | netdev_tx_t 56 | ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); 57 | struct net_device *ice_eswitch_get_target(const struct ice_rx_ring *rx_ring, 58 | union ice_32b_rx_flex_desc *rx_desc); 59 | #else /* !CONFIG_NET_DEVLINK */ 60 | static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } 61 | static inline void 62 | ice_eswitch_set_target_vsi(struct sk_buff *skb, 63 | struct ice_tx_offload_params *off) { } 64 | static inline void 65 | ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { } 66 | static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } 67 | static inline void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) { } 68 | 69 | static inline int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) 70 | { 71 | return -EOPNOTSUPP; 72 | } 73 | 74 | static inline bool 75 | ice_is_eswitch_mode_switchdev(struct ice_pf __always_unused *pf) 76 | { 77 | return false; 78 | } 79 | 80 | static inline netdev_tx_t 81 | ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) 82 | { 83 | return 0; 84 | } 85 | 86 | static inline struct net_device * 87 | ice_eswitch_get_target(const struct ice_rx_ring *rx_ring, 88 | union ice_32b_rx_flex_desc *rx_desc) 89 | { 90 | return rx_ring->netdev; 91 | } 92 | #endif /* CONFIG_NET_DEVLINK */ 93 | #endif /* _ICE_ESWITCH_H_ */ 94 | -------------------------------------------------------------------------------- /src/ice_proto_grp.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice_common.h" 5 | #include "ice_parser_util.h" 6 | 7 | static void _proto_off_dump(struct ice_hw *hw, struct ice_proto_off *po, 8 | int idx) 9 | { 10 | dev_info(ice_hw_to_dev(hw), "proto %d\n", idx); 11 | dev_info(ice_hw_to_dev(hw), "\tpolarity = %d\n", po->polarity); 12 | dev_info(ice_hw_to_dev(hw), "\tproto_id = %d\n", po->proto_id); 13 | dev_info(ice_hw_to_dev(hw), "\toffset = %d\n", po->offset); 14 | } 15 | 16 | /** 17 | * ice_proto_grp_dump - dump a proto group item info 18 | * @hw: pointer to the hardware structure 19 | * @item: proto group item to dump 20 | */ 21 | void ice_proto_grp_dump(struct ice_hw *hw, struct ice_proto_grp_item *item) 22 | { 23 | int i; 24 | 25 | dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); 26 | 27 | for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) 28 | _proto_off_dump(hw, &item->po[i], i); 29 | } 30 | 31 | /** The function parses a 22 bits Protocol entry with below format: 32 | * BIT 0: Polarity of Protocol Offset (po->polarity) 33 | * BIT 1-8: Protocol ID (po->proto_id) 34 | * BIT 9-11: reserved 35 | * BIT 12-21: Protocol Offset (po->offset) 36 | */ 37 | static void _proto_off_parse(struct ice_proto_off *po, u32 data) 38 | { 39 | po->polarity = (data & 0x1) != 0; 40 | po->proto_id = (u8)((data >> 1) & 0xff); 41 | po->offset = (u16)((data >> 12) & 0x3ff); 42 | } 43 | 44 | /** The function parses a 192 bits Protocol Group Table entry with below 45 | * format: 46 | * BIT 0-21: Protocol 0 (grp->po[0]) 47 | * BIT 22-43: Protocol 1 (grp->po[1]) 48 | * BIT 44-65: Protocol 2 (grp->po[2]) 49 | * BIT 66-87: Protocol 3 (grp->po[3]) 50 | * BIT 88-109: Protocol 4 (grp->po[4]) 51 | * BIT 110-131:Protocol 5 (grp->po[5]) 52 | * BIT 132-153:Protocol 6 (grp->po[6]) 53 | * BIT 154-175:Protocol 7 (grp->po[7]) 54 | * BIT 176-191:reserved 55 | */ 56 | static void _proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, 57 | void *data, int size) 58 | { 59 | struct ice_proto_grp_item *grp = item; 60 | u8 *buf = data; 61 | u32 d32; 62 | 63 | grp->idx = idx; 64 | 65 | d32 = *(u32 *)buf; 66 | _proto_off_parse(&grp->po[0], d32); 67 | 68 | d32 = (*(u32 *)&buf[2] >> 6); 69 | _proto_off_parse(&grp->po[1], d32); 70 | 71 | d32 = (*(u32 *)&buf[5] >> 4); 72 | _proto_off_parse(&grp->po[2], d32); 73 | 74 | d32 = (*(u32 *)&buf[8] >> 2); 75 | _proto_off_parse(&grp->po[3], d32); 76 | 77 | d32 = *(u32 *)&buf[11]; 78 | _proto_off_parse(&grp->po[4], d32); 79 | 80 | d32 = (*(u32 *)&buf[13] >> 6); 81 | _proto_off_parse(&grp->po[5], d32); 82 | 83 | d32 = (*(u32 *)&buf[16] >> 4); 84 | _proto_off_parse(&grp->po[6], d32); 85 | 86 | d32 = (*(u32 *)&buf[19] >> 2); 87 | _proto_off_parse(&grp->po[7], d32); 88 | 89 | if (hw->debug_mask & ICE_DBG_PARSER) 90 | ice_proto_grp_dump(hw, grp); 91 | } 92 | 93 | /** 94 | * ice_proto_grp_table_get - create a proto group table 95 | * @hw: pointer to the hardware structure 96 | */ 97 | struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw) 98 | { 99 | return (struct ice_proto_grp_item *) 100 | ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP, 101 | sizeof(struct ice_proto_grp_item), 102 | ICE_PROTO_GRP_TABLE_SIZE, 103 | ice_parser_sect_item_get, 104 | _proto_grp_parse_item, false); 105 | } 106 | -------------------------------------------------------------------------------- /src/ice_hwmon.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice.h" 5 | #include "ice_hwmon.h" 6 | 7 | #ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_INFO 8 | #include "ice_adminq_cmd.h" 9 | #include 10 | 11 | #define ICE_INTERNAL_CVL_TEMP_SENSOR 0x00 12 | #define ICE_INTERNAL_CVL_TEMP_SENSOR_FORMAT 0x00 13 | #define ICE_CHMOD_READ_ONLY 0444 14 | 15 | #define ICE_TO_MILLIDEGREE(celsius) ((celsius) * 1000) 16 | 17 | static const struct hwmon_channel_info *ice_hwmon_info[] = { 18 | HWMON_CHANNEL_INFO(temp, 19 | HWMON_T_INPUT | HWMON_T_MAX | 20 | HWMON_T_CRIT | HWMON_T_EMERGENCY), 21 | NULL 22 | }; 23 | 24 | static int ice_hwmon_read(struct device *dev, enum hwmon_sensor_types type, 25 | u32 attr, int channel, long *val) 26 | { 27 | struct ice_pf *pf = (struct ice_pf *)dev_get_drvdata(dev); 28 | struct ice_aqc_get_sensor_reading_resp resp; 29 | int ret; 30 | 31 | if (type != hwmon_temp) 32 | return -EOPNOTSUPP; 33 | 34 | ret = ice_aq_get_sensor_reading(&pf->hw, 35 | ICE_INTERNAL_CVL_TEMP_SENSOR, 36 | ICE_INTERNAL_CVL_TEMP_SENSOR_FORMAT, 37 | &resp, 38 | NULL); 39 | if (ret) { 40 | dev_warn(dev, "%s HW read failure (%d)\n", __func__, ret); 41 | return ret; 42 | } 43 | 44 | switch (attr) { 45 | case hwmon_temp_input: 46 | *val = ICE_TO_MILLIDEGREE(resp.data.s0f0.temp); 47 | break; 48 | case hwmon_temp_max: 49 | *val = 50 | ICE_TO_MILLIDEGREE(resp.data.s0f0.temp_warning_threshold); 51 | break; 52 | case hwmon_temp_crit: 53 | *val = 54 | ICE_TO_MILLIDEGREE(resp.data.s0f0.temp_critical_threshold); 55 | break; 56 | case hwmon_temp_emergency: 57 | *val = ICE_TO_MILLIDEGREE(resp.data.s0f0.temp_fatal_threshold); 58 | break; 59 | default: 60 | dev_warn(dev, "%s unsupported attribute (%d)\n", 61 | __func__, attr); 62 | return -EINVAL; 63 | } 64 | 65 | return 0; 66 | } 67 | 68 | static umode_t ice_hwmon_is_visible(const void *data, 69 | enum hwmon_sensor_types type, 70 | u32 attr, 71 | int channel) 72 | { 73 | if (type != hwmon_temp) 74 | return 0; 75 | 76 | switch (attr) { 77 | case hwmon_temp_input: 78 | case hwmon_temp_crit: 79 | case hwmon_temp_max: 80 | case hwmon_temp_emergency: 81 | return ICE_CHMOD_READ_ONLY; 82 | } 83 | 84 | return 0; 85 | } 86 | 87 | static const struct hwmon_ops ice_hwmon_ops = { 88 | .is_visible = ice_hwmon_is_visible, 89 | .read = ice_hwmon_read 90 | }; 91 | 92 | static const struct hwmon_chip_info ice_chip_info = { 93 | .ops = &ice_hwmon_ops, 94 | .info = ice_hwmon_info 95 | }; 96 | 97 | void ice_hwmon_init(struct ice_pf *pf) 98 | { 99 | struct device *dev = ice_pf_to_dev(pf); 100 | struct device *hdev; 101 | 102 | if (pf->hw.pf_id != 0) 103 | return; 104 | 105 | if (!(pf->hw.dev_caps.supported_sensors & 106 | ICE_SENSOR_SUPPORT_E810_INT_TEMP)) 107 | return; 108 | 109 | ice_hwmon_exit(pf); 110 | 111 | hdev = hwmon_device_register_with_info(dev, "ice", pf, &ice_chip_info, 112 | NULL); 113 | if (IS_ERR(hdev)) { 114 | dev_warn(dev, 115 | "hwmon_device_register_with_info returns error (%ld)", 116 | PTR_ERR(hdev)); 117 | return; 118 | } 119 | pf->hwmon_dev = hdev; 120 | } 121 | 122 | void ice_hwmon_exit(struct ice_pf *pf) 123 | { 124 | if (!pf->hwmon_dev) 125 | return; 126 | 127 | hwmon_device_unregister(pf->hwmon_dev); 128 | } 129 | #endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_INFO */ 130 | -------------------------------------------------------------------------------- /src/ice_txrx_lib.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_TXRX_LIB_H_ 5 | #define _ICE_TXRX_LIB_H_ 6 | #include "ice.h" 7 | 8 | /** 9 | * ice_test_staterr - tests bits in Rx descriptor status and error fields 10 | * @status_err_n: Rx descriptor status_error0 or status_error1 bits 11 | * @stat_err_bits: value to mask 12 | * 13 | * This function does some fast chicanery in order to return the 14 | * value of the mask which is really only used for boolean tests. 15 | * The status_error_len doesn't need to be shifted because it begins 16 | * at offset zero. 17 | */ 18 | static inline bool 19 | ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits) 20 | { 21 | return !!(status_err_n & cpu_to_le16(stat_err_bits)); 22 | } 23 | 24 | static inline __le64 25 | ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) 26 | { 27 | return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 28 | (td_cmd << ICE_TXD_QW1_CMD_S) | 29 | (td_offset << ICE_TXD_QW1_OFFSET_S) | 30 | ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 31 | (td_tag << ICE_TXD_QW1_L2TAG1_S)); 32 | } 33 | 34 | /** 35 | * ice_build_tstamp_desc - build Tx time stamp descriptor 36 | * @tx_desc: Tx LAN descriptor index 37 | * @tstamp: time stamp 38 | */ 39 | static inline __le32 40 | ice_build_tstamp_desc(u16 tx_desc, u32 tstamp) 41 | { 42 | return cpu_to_le32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M, tx_desc) | 43 | FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp)); 44 | } 45 | 46 | /** 47 | * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor 48 | * @rx_desc: Rx 32b flex descriptor with RXDID=2 49 | * 50 | * The OS and current PF implementation only support stripping a single VLAN tag 51 | * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If 52 | * one is found return the tag, else return 0 to mean no VLAN tag was found. 53 | */ 54 | static inline u16 55 | ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc) 56 | { 57 | u16 stat_err_bits; 58 | 59 | stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 60 | if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) 61 | return le16_to_cpu(rx_desc->wb.l2tag1); 62 | 63 | stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S); 64 | if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits)) 65 | return le16_to_cpu(rx_desc->wb.l2tag2_2nd); 66 | 67 | return 0; 68 | } 69 | 70 | #ifdef HAVE_XDP_SUPPORT 71 | /** 72 | * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register 73 | * @xdp_ring: XDP Tx ring 74 | * 75 | * This function updates the XDP Tx ring tail register. 76 | */ 77 | static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring) 78 | { 79 | /* Force memory writes to complete before letting h/w 80 | * know there are new descriptors to fetch. 81 | */ 82 | wmb(); 83 | writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); 84 | } 85 | 86 | void ice_finalize_xdp_rx(struct ice_rx_ring *rx_ring, unsigned int xdp_res); 87 | int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring); 88 | int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring); 89 | #endif /* HAVE_XDP_SUPPORT */ 90 | void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val); 91 | void 92 | ice_process_skb_fields(struct ice_rx_ring *rx_ring, 93 | union ice_32b_rx_flex_desc *rx_desc, 94 | struct sk_buff *skb, u16 ptype); 95 | void 96 | ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); 97 | #endif /* !_ICE_TXRX_LIB_H_ */ 98 | -------------------------------------------------------------------------------- /src/ice_vsi_vlan_ops.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice_pf_vsi_vlan_ops.h" 5 | #include "ice_vf_vsi_vlan_ops.h" 6 | #include "ice_lib.h" 7 | #include "ice.h" 8 | 9 | static int 10 | op_unsupported_vlan_arg(struct ice_vsi * __always_unused vsi, 11 | struct ice_vlan * __always_unused vlan) 12 | { 13 | return -EOPNOTSUPP; 14 | } 15 | 16 | static int 17 | op_unsupported_tpid_arg(struct ice_vsi *__always_unused vsi, 18 | u16 __always_unused tpid) 19 | { 20 | return -EOPNOTSUPP; 21 | } 22 | 23 | static int op_unsupported(struct ice_vsi *__always_unused vsi) 24 | { 25 | return -EOPNOTSUPP; 26 | } 27 | 28 | /* If any new ops are added to the VSI VLAN ops interface then an unsupported 29 | * implementation should be set here. 30 | */ 31 | static struct ice_vsi_vlan_ops ops_unsupported = { 32 | .add_vlan = op_unsupported_vlan_arg, 33 | .del_vlan = op_unsupported_vlan_arg, 34 | .ena_stripping = op_unsupported_tpid_arg, 35 | .dis_stripping = op_unsupported, 36 | .ena_insertion = op_unsupported_tpid_arg, 37 | .dis_insertion = op_unsupported, 38 | .ena_rx_filtering = op_unsupported, 39 | .dis_rx_filtering = op_unsupported, 40 | .ena_tx_filtering = op_unsupported, 41 | .dis_tx_filtering = op_unsupported, 42 | .set_port_vlan = op_unsupported_vlan_arg, 43 | }; 44 | 45 | /** 46 | * ice_vsi_init_unsupported_vlan_ops - init all VSI VLAN ops to unsupported 47 | * @vsi: VSI to initialize VSI VLAN ops to unsupported for 48 | * 49 | * By default all inner and outer VSI VLAN ops return -EOPNOTSUPP. This was done 50 | * as oppsed to leaving the ops null to prevent unexpected crashes. Instead if 51 | * an unsupported VSI VLAN op is called it will just return -EOPNOTSUPP. 52 | * 53 | */ 54 | static void ice_vsi_init_unsupported_vlan_ops(struct ice_vsi *vsi) 55 | { 56 | vsi->outer_vlan_ops = ops_unsupported; 57 | vsi->inner_vlan_ops = ops_unsupported; 58 | } 59 | 60 | /** 61 | * ice_vsi_init_vlan_ops - initialize type specific VSI VLAN ops 62 | * @vsi: VSI to initialize ops for 63 | * 64 | * If any VSI types are added and/or require different ops than the PF or VF VSI 65 | * then they will have to add a case here to handle that. Also, VSI type 66 | * specific files should be added in the same manner that was done for PF VSI. 67 | */ 68 | void ice_vsi_init_vlan_ops(struct ice_vsi *vsi) 69 | { 70 | /* Initialize all VSI types to have unsupported VSI VLAN ops */ 71 | ice_vsi_init_unsupported_vlan_ops(vsi); 72 | 73 | switch (vsi->type) { 74 | case ICE_VSI_PF: 75 | case ICE_VSI_CHNL: 76 | ice_pf_vsi_init_vlan_ops(vsi); 77 | break; 78 | case ICE_VSI_ADI: 79 | case ICE_VSI_VF: 80 | ice_vf_vsi_init_vlan_ops(vsi); 81 | break; 82 | default: 83 | dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n", 84 | ice_vsi_type_str(vsi->type)); 85 | break; 86 | } 87 | } 88 | 89 | /** 90 | * ice_get_compat_vsi_vlan_ops - Get VSI VLAN ops based on VLAN mode 91 | * @vsi: VSI used to get the VSI VLAN ops 92 | * 93 | * This function is meant to be used when the caller doesn't know which VLAN ops 94 | * to use (i.e. inner or outer). This allows backward compatibility for VLANs 95 | * since most of the Outer VSI VLAN functins are not supported when 96 | * the device is configured in Single VLAN Mode (SVM). 97 | */ 98 | struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi) 99 | { 100 | if (ice_is_dvm_ena(&vsi->back->hw)) 101 | return &vsi->outer_vlan_ops; 102 | else 103 | return &vsi->inner_vlan_ops; 104 | } 105 | -------------------------------------------------------------------------------- /src/ice_dcf.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_DCF_H_ 5 | #define _ICE_DCF_H_ 6 | 7 | struct ice_vf; 8 | struct ice_pf; 9 | struct ice_hw; 10 | 11 | #define ICE_DCF_VFID 0 12 | 13 | /* DCF mode states */ 14 | enum ice_dcf_state { 15 | /* DCF mode is fully off */ 16 | ICE_DCF_STATE_OFF = 0, 17 | /* Process is live, acquired capability to send DCF CMD */ 18 | ICE_DCF_STATE_ON, 19 | /* Kernel is busy, deny DCF CMD */ 20 | ICE_DCF_STATE_BUSY, 21 | /* Kernel is ready for Process to Re-establish, deny DCF CMD */ 22 | ICE_DCF_STATE_PAUSE, 23 | }; 24 | 25 | struct ice_dcf_sw_rule_entry; 26 | 27 | #define ICE_HW_VSI_ID_MAX BIT(10) /* The AQ VSI number uses 10 bits */ 28 | 29 | struct ice_dcf_vsi_list_info { 30 | struct list_head list_entry; 31 | struct ice_dcf_sw_rule_entry *sw_rule; 32 | u16 list_id; 33 | 34 | u16 vsi_count; 35 | DECLARE_BITMAP(hw_vsi_map, ICE_HW_VSI_ID_MAX); 36 | }; 37 | 38 | struct ice_dcf_sw_rule_entry { 39 | struct list_head list_entry; 40 | u16 rule_id; 41 | 42 | /* Only support ICE_FWD_TO_VSI and ICE_FWD_TO_VSI_LIST */ 43 | enum ice_sw_fwd_act_type fltr_act; 44 | /* Depending on filter action */ 45 | union { 46 | u16 hw_vsi_id:10; 47 | u16 vsi_list_id:10; 48 | } fwd_id; 49 | 50 | struct ice_dcf_vsi_list_info *vsi_list_info; 51 | }; 52 | 53 | struct ice_dcf { 54 | struct ice_vf *vf; 55 | enum ice_dcf_state state; 56 | 57 | /* Trace the switch rules added/removed by DCF */ 58 | struct list_head sw_rule_head; 59 | struct list_head vsi_list_info_head; 60 | 61 | /* Handle the AdminQ command between the DCF (Device Config Function) 62 | * and the firmware. 63 | */ 64 | #define ICE_DCF_AQ_DESC_TIMEOUT (HZ / 10) 65 | struct ice_aq_desc aq_desc; 66 | u8 aq_desc_received; 67 | unsigned long aq_desc_expires; 68 | 69 | /* Save the current Device Serial Number when searching the package 70 | * path for later query. 71 | */ 72 | #define ICE_DSN_NUM_LEN 8 73 | u8 dsn[ICE_DSN_NUM_LEN]; 74 | }; 75 | 76 | #ifdef CONFIG_PCI_IOV 77 | bool ice_dcf_aq_cmd_permitted(struct ice_aq_desc *desc); 78 | bool ice_check_dcf_allowed(struct ice_vf *vf); 79 | bool ice_is_dcf_enabled(struct ice_pf *pf); 80 | bool ice_is_vf_dcf(struct ice_vf *vf); 81 | enum ice_dcf_state ice_dcf_get_state(struct ice_pf *pf); 82 | void ice_dcf_set_state(struct ice_pf *pf, enum ice_dcf_state state); 83 | void ice_dcf_init_sw_rule_mgmt(struct ice_pf *pf); 84 | void ice_rm_all_dcf_sw_rules(struct ice_pf *pf); 85 | void ice_rm_dcf_sw_vsi_rule(struct ice_pf *pf, struct ice_vf *vf); 86 | bool 87 | ice_dcf_pre_aq_send_cmd(struct ice_vf *vf, struct ice_aq_desc *aq_desc, 88 | u8 *aq_buf, u16 aq_buf_size); 89 | enum virtchnl_status_code 90 | ice_dcf_post_aq_send_cmd(struct ice_pf *pf, struct ice_aq_desc *aq_desc, 91 | u8 *aq_buf); 92 | bool ice_dcf_is_acl_aq_cmd(struct ice_aq_desc *desc); 93 | bool ice_dcf_is_udp_tunnel_aq_cmd(struct ice_aq_desc *desc, u8 *aq_buf); 94 | void ice_clear_dcf_acl_cfg(struct ice_pf *pf); 95 | bool ice_dcf_is_acl_capable(struct ice_hw *hw); 96 | void ice_clear_dcf_udp_tunnel_cfg(struct ice_pf *pf); 97 | bool ice_dcf_is_udp_tunnel_capable(struct ice_hw *hw); 98 | enum virtchnl_status_code 99 | ice_dcf_update_acl_rule_info(struct ice_pf *pf, struct ice_aq_desc *desc, 100 | u8 *aq_buf); 101 | #else 102 | static inline bool ice_is_dcf_enabled(struct ice_pf __always_unused *pf) 103 | { 104 | return false; 105 | } 106 | 107 | static inline bool 108 | ice_dcf_is_udp_tunnel_capable(struct ice_hw __always_unused *hw) 109 | { 110 | return false; 111 | } 112 | 113 | static inline bool ice_dcf_is_acl_capable(struct ice_hw __always_unused *hw) 114 | { 115 | return false; 116 | } 117 | #endif /* CONFIG_PCI_IOV */ 118 | #endif /* _ICE_DCF_H_ */ 119 | -------------------------------------------------------------------------------- /src/linux/auxiliary_bus.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _AUXILIARY_BUS_H_ 5 | #define _AUXILIARY_BUS_H_ 6 | 7 | /* The CRC of the exported symbols which depend on 'struct device' can change 8 | * if the definition of 'struct dev_pm_qos' is not included in this file, i.e. 9 | * if we don't include this file. 10 | */ 11 | #include 12 | #include 13 | #include 14 | #include "auxiliary_compat.h" 15 | 16 | #ifndef HAVE_AUXILIARY_DEVICE_ID 17 | /* For some kernel, auxiliary is not mature enough so that part code is 18 | * included, but no driver uses it. HAVE_AUXILIARY_DEVICE_ID will be 19 | * generated only when CONFIG_AUXILIARY_BUS is not defined. If 20 | * HAVE_AUXILIARY_DEVICE_ID is defined, it means struct 21 | * auxiliary_device_id and AUXILIARY_NAME_SIZE, AXUILIARY_MODULE_PREFIX 22 | * in is used. Otherwise need definition here. 23 | */ 24 | #define AUXILIARY_NAME_SIZE 32 25 | #define AUXILIARY_MODULE_PREFIX "intel_auxiliary:" 26 | struct auxiliary_device_id { 27 | char name[AUXILIARY_NAME_SIZE]; 28 | kernel_ulong_t driver_data; 29 | }; 30 | #endif 31 | 32 | #define AUX_PREFIX(func) intel_ ## func 33 | 34 | #define auxiliary_device_init AUX_PREFIX(auxiliary_device_init) 35 | #define __auxiliary_device_add AUX_PREFIX(__auxiliary_device_add) 36 | #define auxiliary_find_device AUX_PREFIX(auxiliary_find_device) 37 | #define __auxiliary_driver_register AUX_PREFIX(__auxiliary_driver_register) 38 | #define auxiliary_driver_unregister AUX_PREFIX(auxiliary_driver_unregister) 39 | 40 | struct auxiliary_device { 41 | struct device dev; 42 | const char *name; 43 | u32 id; 44 | }; 45 | 46 | struct auxiliary_driver { 47 | int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id); 48 | void (*remove)(struct auxiliary_device *auxdev); 49 | void (*shutdown)(struct auxiliary_device *auxdev); 50 | int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state); 51 | int (*resume)(struct auxiliary_device *auxdev); 52 | const char *name; 53 | struct device_driver driver; 54 | const struct auxiliary_device_id *id_table; 55 | }; 56 | 57 | static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev) 58 | { 59 | return container_of(dev, struct auxiliary_device, dev); 60 | } 61 | 62 | static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv) 63 | { 64 | return container_of(drv, struct auxiliary_driver, driver); 65 | } 66 | 67 | int auxiliary_device_init(struct auxiliary_device *auxdev); 68 | int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname); 69 | #define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME) 70 | 71 | static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev) 72 | { 73 | put_device(&auxdev->dev); 74 | } 75 | 76 | static inline void auxiliary_device_delete(struct auxiliary_device *auxdev) 77 | { 78 | device_del(&auxdev->dev); 79 | } 80 | 81 | int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner, 82 | const char *modname); 83 | #define auxiliary_driver_register(auxdrv) \ 84 | __auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME) 85 | 86 | void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); 87 | 88 | /** 89 | * module_auxiliary_driver() - Helper macro for registering an auxiliary driver 90 | * @__auxiliary_driver: auxiliary driver struct 91 | * 92 | * Helper macro for auxiliary drivers which do not do anything special in 93 | * module init/exit. This eliminates a lot of boilerplate. Each module may only 94 | * use this macro once, and calling it replaces module_init() and module_exit() 95 | */ 96 | #define module_auxiliary_driver(__auxiliary_driver) \ 97 | module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister) 98 | 99 | struct auxiliary_device *auxiliary_find_device(struct device *start, 100 | const void *data, 101 | int (*match)(struct device *dev, const void *data)); 102 | 103 | #endif /* _AUXILIARY_BUS_H_ */ 104 | -------------------------------------------------------------------------------- /src/ice_devids.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_DEVIDS_H_ 5 | #define _ICE_DEVIDS_H_ 6 | 7 | /* Device IDs */ 8 | #define ICE_DEV_ID_E822_SI_DFLT 0x1888 9 | /* Intel(R) Ethernet Connection E823-L for backplane */ 10 | #define ICE_DEV_ID_E823L_BACKPLANE 0x124C 11 | /* Intel(R) Ethernet Connection E823-L for SFP */ 12 | #define ICE_DEV_ID_E823L_SFP 0x124D 13 | /* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */ 14 | #define ICE_DEV_ID_E823L_10G_BASE_T 0x124E 15 | /* Intel(R) Ethernet Connection E823-L 1GbE */ 16 | #define ICE_DEV_ID_E823L_1GBE 0x124F 17 | /* Intel(R) Ethernet Connection E823-L for QSFP */ 18 | #define ICE_DEV_ID_E823L_QSFP 0x151D 19 | /* Intel(R) Ethernet Controller E830-CC for backplane */ 20 | #define ICE_DEV_ID_E830_BACKPLANE 0x12D1 21 | /* Intel(R) Ethernet Controller E830-CC for QSFP */ 22 | #define ICE_DEV_ID_E830_QSFP56 0x12D2 23 | /* Intel(R) Ethernet Controller E830-CC for SFP */ 24 | #define ICE_DEV_ID_E830_SFP 0x12D3 25 | /* Intel(R) Ethernet Controller E830-C for backplane */ 26 | #define ICE_DEV_ID_E830C_BACKPLANE 0x12D5 27 | /* Intel(R) Ethernet Controller E830-L for backplane */ 28 | #define ICE_DEV_ID_E830_L_BACKPLANE 0x12DC 29 | /* Intel(R) Ethernet Controller E830-C for QSFP */ 30 | #define ICE_DEV_ID_E830C_QSFP 0x12D8 31 | /* Intel(R) Ethernet Controller E830-L for QSFP */ 32 | #define ICE_DEV_ID_E830_L_QSFP 0x12DD 33 | /* Intel(R) Ethernet Controller E830-C for SFP */ 34 | #define ICE_DEV_ID_E830C_SFP 0x12DA 35 | /* Intel(R) Ethernet Controller E830-L for SFP */ 36 | #define ICE_DEV_ID_E830_L_SFP 0x12DE 37 | /* Intel(R) Ethernet Controller E810-C for backplane */ 38 | #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 39 | /* Intel(R) Ethernet Controller E810-C for QSFP */ 40 | #define ICE_DEV_ID_E810C_QSFP 0x1592 41 | /* Intel(R) Ethernet Controller E810-C for SFP */ 42 | #define ICE_DEV_ID_E810C_SFP 0x1593 43 | #define ICE_SUBDEV_ID_E810T 0x000E 44 | #define ICE_SUBDEV_ID_E810T2 0x000F 45 | #define ICE_SUBDEV_ID_E810T3 0x0010 46 | #define ICE_SUBDEV_ID_E810T4 0x0011 47 | #define ICE_SUBDEV_ID_E810T5 0x0012 48 | #define ICE_SUBDEV_ID_E810T6 0x02E9 49 | #define ICE_SUBDEV_ID_E810T7 0x02EA 50 | /* Intel(R) Ethernet Controller E810-XXV for backplane */ 51 | #define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599 52 | /* Intel(R) Ethernet Controller E810-XXV for QSFP */ 53 | #define ICE_DEV_ID_E810_XXV_QSFP 0x159A 54 | /* Intel(R) Ethernet Controller E810-XXV for SFP */ 55 | #define ICE_DEV_ID_E810_XXV_SFP 0x159B 56 | /* Intel(R) Ethernet Connection E823-C for backplane */ 57 | #define ICE_DEV_ID_E823C_BACKPLANE 0x188A 58 | /* Intel(R) Ethernet Connection E823-C for QSFP */ 59 | #define ICE_DEV_ID_E823C_QSFP 0x188B 60 | /* Intel(R) Ethernet Connection E823-C for SFP */ 61 | #define ICE_DEV_ID_E823C_SFP 0x188C 62 | /* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */ 63 | #define ICE_DEV_ID_E823C_10G_BASE_T 0x188D 64 | /* Intel(R) Ethernet Connection E823-C 1GbE */ 65 | #define ICE_DEV_ID_E823C_SGMII 0x188E 66 | /* Intel(R) Ethernet Connection E822-C for backplane */ 67 | #define ICE_DEV_ID_E822C_BACKPLANE 0x1890 68 | /* Intel(R) Ethernet Connection E822-C for QSFP */ 69 | #define ICE_DEV_ID_E822C_QSFP 0x1891 70 | /* Intel(R) Ethernet Connection E822-C for SFP */ 71 | #define ICE_DEV_ID_E822C_SFP 0x1892 72 | /* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */ 73 | #define ICE_DEV_ID_E822C_10G_BASE_T 0x1893 74 | /* Intel(R) Ethernet Connection E822-C 1GbE */ 75 | #define ICE_DEV_ID_E822C_SGMII 0x1894 76 | /* Intel(R) Ethernet Connection E822-L for backplane */ 77 | #define ICE_DEV_ID_E822L_BACKPLANE 0x1897 78 | /* Intel(R) Ethernet Connection E822-L for SFP */ 79 | #define ICE_DEV_ID_E822L_SFP 0x1898 80 | /* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */ 81 | #define ICE_DEV_ID_E822L_10G_BASE_T 0x1899 82 | /* Intel(R) Ethernet Connection E822-L 1GbE */ 83 | #define ICE_DEV_ID_E822L_SGMII 0x189A 84 | /* Intel(R) Ethernet Connection E825-C for backplane */ 85 | #define ICE_DEV_ID_E825C_BACKPLANE 0x579C 86 | /* Intel(R) Ethernet Connection E825-C for QSFP */ 87 | #define ICE_DEV_ID_E825C_QSFP 0x579D 88 | /* Intel(R) Ethernet Connection E825-C for SFP */ 89 | #define ICE_DEV_ID_E825C_SFP 0x579E 90 | /* Intel(R) Ethernet Connection E825-C 10GbE */ 91 | #define ICE_DEV_ID_E825C_SGMII 0x579F 92 | #endif /* _ICE_DEVIDS_H_ */ 93 | -------------------------------------------------------------------------------- /src/ice_adapter.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "kcompat.h" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #ifdef HAVE_XARRAY_API 12 | #include 13 | #endif /* HAVE_XARRAY_API */ 14 | #include "ice_adapter.h" 15 | #include "ice.h" 16 | 17 | static DEFINE_XARRAY(ice_adapters); 18 | static DEFINE_MUTEX(ice_adapters_mutex); 19 | 20 | /* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ 21 | #define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) 22 | #define INDEX_FIELD_DEV GENMASK(31, 16) 23 | #define INDEX_FIELD_BUS GENMASK(12, 5) 24 | #define INDEX_FIELD_SLOT GENMASK(4, 0) 25 | 26 | #define ICE_DEV_ID_E825C_MASK GENMASK(15, 2) 27 | 28 | static unsigned long ice_adapter_index(const struct pci_dev *pdev) 29 | { 30 | unsigned int domain = pci_domain_nr(pdev->bus); 31 | 32 | WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN)); 33 | 34 | switch (pdev->device) { 35 | case ICE_DEV_ID_E825C_BACKPLANE: 36 | case ICE_DEV_ID_E825C_QSFP: 37 | case ICE_DEV_ID_E825C_SFP: 38 | case ICE_DEV_ID_E825C_SGMII: 39 | return FIELD_PREP(INDEX_FIELD_DEV, 40 | pdev->device & ICE_DEV_ID_E825C_MASK); 41 | default: 42 | return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) | 43 | FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) | 44 | FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn)); 45 | } 46 | } 47 | 48 | static struct ice_adapter *ice_adapter_new(void) 49 | { 50 | struct ice_adapter *adapter; 51 | 52 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 53 | if (!adapter) 54 | return NULL; 55 | 56 | spin_lock_init(&adapter->ptp_gltsyn_time_lock); 57 | refcount_set(&adapter->refcount, 1); 58 | 59 | mutex_init(&adapter->ports.lock); 60 | INIT_LIST_HEAD(&adapter->ports.ports); 61 | 62 | return adapter; 63 | } 64 | 65 | static void ice_adapter_free(struct ice_adapter *adapter) 66 | { 67 | WARN_ON(!list_empty(&adapter->ports.ports)); 68 | mutex_destroy(&adapter->ports.lock); 69 | 70 | kfree(adapter); 71 | } 72 | 73 | /** 74 | * ice_adapter_get - Get a shared ice_adapter structure. 75 | * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter. 76 | * 77 | * Gets a pointer to a shared ice_adapter structure. Physical functions (PFs) 78 | * of the same multi-function PCI device share one ice_adapter structure. 79 | * The ice_adapter is reference-counted. The PF driver must use ice_adapter_put 80 | * to release its reference. 81 | * 82 | * Context: Process, may sleep. 83 | * Return: Pointer to ice_adapter on success. 84 | * ERR_PTR() on error. -ENOMEM is the only possible error. 85 | */ 86 | struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) 87 | { 88 | unsigned long index = ice_adapter_index(pdev); 89 | struct ice_adapter *adapter; 90 | int err; 91 | 92 | scoped_guard(mutex, &ice_adapters_mutex) { 93 | err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); 94 | if (err == -EBUSY) { 95 | adapter = xa_load(&ice_adapters, index); 96 | refcount_inc(&adapter->refcount); 97 | return adapter; 98 | } 99 | if (err) 100 | return ERR_PTR(err); 101 | 102 | adapter = ice_adapter_new(); 103 | if (!adapter) 104 | return ERR_PTR(-ENOMEM); 105 | xa_store(&ice_adapters, index, adapter, GFP_KERNEL); 106 | } 107 | return adapter; 108 | } 109 | 110 | /** 111 | * ice_adapter_put - Release a reference to the shared ice_adapter structure. 112 | * @pdev: Pointer to the pci_dev whose driver is releasing the ice_adapter. 113 | * 114 | * Releases the reference to ice_adapter previously obtained with 115 | * ice_adapter_get. 116 | * 117 | * Context: Process, may sleep. 118 | */ 119 | void ice_adapter_put(const struct pci_dev *pdev) 120 | { 121 | unsigned long index = ice_adapter_index(pdev); 122 | struct ice_adapter *adapter; 123 | 124 | scoped_guard(mutex, &ice_adapters_mutex) { 125 | adapter = xa_load(&ice_adapters, index); 126 | if (WARN_ON(!adapter)) 127 | return; 128 | if (!refcount_dec_and_test(&adapter->refcount)) 129 | return; 130 | 131 | WARN_ON(xa_erase(&ice_adapters, index) != adapter); 132 | } 133 | ice_adapter_free(adapter); 134 | } 135 | -------------------------------------------------------------------------------- /src/ice_xsk.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_XSK_H_ 5 | #define _ICE_XSK_H_ 6 | #include "ice_txrx.h" 7 | #ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL 8 | #include 9 | #endif 10 | 11 | struct ice_vsi; 12 | 13 | #ifdef HAVE_AF_XDP_ZC_SUPPORT 14 | #ifdef CONFIG_XDP_SOCKETS 15 | #ifdef HAVE_NETDEV_BPF_XSK_POOL 16 | int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, 17 | u16 qid); 18 | #else 19 | int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, 20 | u16 qid); 21 | #endif 22 | #ifndef NO_XDP_QUERY_XSK_UMEM 23 | int ice_xsk_umem_query(struct ice_vsi *vsi, struct xdp_umem **umem, u16 qid); 24 | #endif 25 | #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL 26 | void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle); 27 | #endif 28 | int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget); 29 | bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring); 30 | #ifdef HAVE_NDO_XSK_WAKEUP 31 | int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); 32 | #else 33 | int ice_xsk_async_xmit(struct net_device *netdev, u32 queue_id); 34 | #endif /* HAVE_NDO_XSK_WAKEUP */ 35 | #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL 36 | bool ice_alloc_rx_bufs_slow_zc(struct ice_rx_ring *rx_ring, u16 count); 37 | #else 38 | bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, int count); 39 | #endif 40 | bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); 41 | void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); 42 | void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); 43 | #ifdef HAVE_XSK_BATCHED_RX_ALLOC 44 | int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); 45 | #endif 46 | #else 47 | static inline int 48 | #ifdef HAVE_NETDEV_BPF_XSK_POOL 49 | ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, 50 | struct xsk_buff_pool __always_unused *pool, 51 | #else 52 | ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, 53 | struct xdp_umem __always_unused *umem, 54 | #endif 55 | u16 __always_unused qid) 56 | { 57 | return -EOPNOTSUPP; 58 | } 59 | 60 | #ifndef NO_XDP_QUERY_XSK_UMEM 61 | static inline int 62 | ice_xsk_umem_query(struct ice_vsi __always_unused *vsi, 63 | struct xdp_umem __always_unused **umem, 64 | u16 __always_unused qid) 65 | { 66 | return -EOPNOTSUPP; 67 | } 68 | #endif 69 | 70 | #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL 71 | static inline void 72 | ice_zca_free(struct zero_copy_allocator __always_unused *zca, 73 | unsigned long __always_unused handle) 74 | { 75 | } 76 | #endif 77 | 78 | static inline int 79 | ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, 80 | int __always_unused budget) 81 | { 82 | return 0; 83 | } 84 | 85 | static inline bool 86 | ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring) 87 | { 88 | return false; 89 | } 90 | 91 | static inline bool 92 | #ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL 93 | ice_alloc_rx_bufs_slow_zc(struct ice_rx_ring __always_unused *rx_ring, 94 | u16 __always_unused count) 95 | #else 96 | ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring, 97 | u16 __always_unused count) 98 | #endif 99 | { 100 | return false; 101 | } 102 | 103 | static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi) 104 | { 105 | return false; 106 | } 107 | 108 | #ifdef HAVE_NDO_XSK_WAKEUP 109 | static inline int 110 | ice_xsk_wakeup(struct net_device __always_unused *netdev, 111 | u32 __always_unused queue_id, u32 __always_unused flags) 112 | { 113 | return -EOPNOTSUPP; 114 | } 115 | #else 116 | static inline int 117 | ice_xsk_async_xmit(struct net_device __always_unused *netdev, 118 | u32 __always_unused queue_id) 119 | { 120 | return -EOPNOTSUPP; 121 | } 122 | #endif /* HAVE_NDO_XSK_WAKEUP */ 123 | 124 | static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { } 125 | static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } 126 | 127 | #ifdef HAVE_XSK_BATCHED_RX_ALLOC 128 | static inline int ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi, 129 | bool __always_unused zc) 130 | { 131 | return 0; 132 | } 133 | #endif 134 | 135 | #endif /* CONFIG_XDP_SOCKETS */ 136 | #endif /* HAVE_AF_XDP_ZC_SUPPORT */ 137 | #endif /* !_ICE_XSK_H_ */ 138 | -------------------------------------------------------------------------------- /src/ice_flex_pipe.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_FLEX_PIPE_H_ 5 | #define _ICE_FLEX_PIPE_H_ 6 | 7 | #include "ice_type.h" 8 | 9 | int 10 | ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, 11 | u8 *prot, u16 *off); 12 | int 13 | ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, 14 | u16 *value); 15 | void 16 | ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, 17 | unsigned long *bm); 18 | void 19 | ice_init_prof_result_bm(struct ice_hw *hw); 20 | int 21 | ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 22 | u16 buf_size, struct ice_sq_cd *cd); 23 | bool 24 | ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, 25 | u16 *port); 26 | int 27 | ice_is_create_tunnel_possible(struct ice_hw *hw, enum ice_tunnel_type type, 28 | u16 port); 29 | bool ice_is_tunnel_empty(struct ice_hw *hw); 30 | int 31 | ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); 32 | int ice_set_dvm_boost_entries(struct ice_hw *hw); 33 | int ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); 34 | bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index); 35 | bool 36 | ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type); 37 | int ice_replay_tunnels(struct ice_hw *hw); 38 | 39 | /* RX parser PType functions */ 40 | bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); 41 | 42 | /* XLT1/PType group functions */ 43 | int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk); 44 | void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg); 45 | 46 | /* XLT2/VSI group functions */ 47 | int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk); 48 | int 49 | ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig); 50 | int 51 | ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, 52 | unsigned long *ptypes, const struct ice_ptype_attributes *attr, 53 | u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool fd_swap); 54 | void ice_init_all_prof_masks(struct ice_hw *hw); 55 | void ice_shutdown_all_prof_masks(struct ice_hw *hw); 56 | struct ice_prof_map * 57 | ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); 58 | int 59 | ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig); 60 | int 61 | ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); 62 | int 63 | ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); 64 | int 65 | ice_flow_assoc_hw_prof(struct ice_hw *hw, enum ice_block blk, 66 | u16 dest_vsi_handle, u16 fdir_vsi_handle, int id); 67 | int 68 | ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt); 69 | int 70 | ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt); 71 | int ice_init_hw_tbls(struct ice_hw *hw); 72 | void ice_fill_blk_tbls(struct ice_hw *hw); 73 | void ice_clear_hw_tbls(struct ice_hw *hw); 74 | void ice_free_hw_tbls(struct ice_hw *hw); 75 | int 76 | ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, 77 | u64 id); 78 | int 79 | ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, 80 | u64 id); 81 | int 82 | ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); 83 | 84 | int 85 | ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, 86 | u16 len); 87 | 88 | void ice_fill_blk_tbls(struct ice_hw *hw); 89 | 90 | /* To support tunneling entries by PF, the package will append the PF number to 91 | * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. 92 | */ 93 | #define ICE_TNL_PRE "TNL_" 94 | /* For supporting double VLAN mode, it is necessary to enable or disable certain 95 | * boost tcam entries. The metadata labels names that match the following 96 | * prefixes will be saved to allow enabling double VLAN mode. 97 | */ 98 | #define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ 99 | #define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ 100 | 101 | void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val); 102 | void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable); 103 | 104 | #endif /* _ICE_FLEX_PIPE_H_ */ 105 | -------------------------------------------------------------------------------- /src/ice_tspll.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_TSPLL_H_ 5 | #define _ICE_TSPLL_H_ 6 | 7 | /** 8 | * struct ice_tspll_params_e82x 9 | * @refclk_pre_div: Reference clock pre-divisor 10 | * @feedback_div: Feedback divisor 11 | * @frac_n_div: Fractional divisor 12 | * @post_pll_div: Post PLL divisor 13 | * 14 | * Clock Generation Unit parameters used to program the PLL based on the 15 | * selected TIME_REF/TCXO frequency. 16 | */ 17 | struct ice_tspll_params_e82x { 18 | u32 refclk_pre_div; 19 | u32 feedback_div; 20 | u32 frac_n_div; 21 | u32 post_pll_div; 22 | }; 23 | 24 | /** 25 | * struct ice_tspll_params_e825c 26 | * @ck_refclkfreq: TSPLL reference clock frequency selection 27 | * @ndivratio: ndiv ratio that goes directly to the TSPLL 28 | * @fbdiv_intgr: TSPLL integer feedback divide 29 | * @fbdiv_frac: TSPLL fractional feedback divide 30 | * 31 | * Clock Generation Unit parameters used to program the PLL based on the 32 | * selected TIME_REF/TCXO frequency. 33 | */ 34 | struct ice_tspll_params_e825c { 35 | u32 ck_refclkfreq; 36 | u32 ndivratio; 37 | u32 fbdiv_intgr; 38 | u32 fbdiv_frac; 39 | }; 40 | 41 | #define ICE_CGU_R9 0x24 42 | #define ICE_CGU_R9_TIME_REF_FREQ_SEL GENMASK(2, 0) 43 | #define ICE_CGU_R9_CLK_EREF0_EN BIT(4) 44 | #define ICE_CGU_R9_TIME_REF_EN BIT(5) 45 | #define ICE_CGU_R9_TIME_SYNC_EN BIT(6) 46 | #define ICE_CGU_R9_ONE_PPS_OUT_EN BIT(7) 47 | #define ICE_CGU_R9_ONE_PPS_OUT_AMP GENMASK(19, 18) 48 | 49 | #define ICE_CGU_R10 0x28 50 | #define ICE_CGU_R10_SYNCE_CLKO_SEL GENMASK(8, 5) 51 | #define ICE_CGU_R10_SYNCE_CLKODIV_M1 GENMASK(13, 9) 52 | #define ICE_CGU_R10_SYNCE_CLKODIV_LOAD BIT(14) 53 | #define ICE_CGU_R10_SYNCE_DCK_RST BIT(15) 54 | #define ICE_CGU_R10_SYNCE_ETHCLKO_SEL GENMASK(18, 16) 55 | #define ICE_CGU_R10_SYNCE_ETHDIV_M1 GENMASK(23, 19) 56 | #define ICE_CGU_R10_SYNCE_ETHDIV_LOAD BIT(24) 57 | #define ICE_CGU_R10_SYNCE_DCK2_RST BIT(25) 58 | #define ICE_CGU_R10_SYNCE_S_REF_CLK GENMASK(31, 27) 59 | 60 | #define ICE_CGU_R11 0x2C 61 | #define ICE_CGU_R11_SYNCE_S_BYP_CLK GENMASK(6, 1) 62 | 63 | #define ICE_CGU_R16 0x40 64 | #define ICE_CGU_R16_TSPLL_CK_REFCLKFREQ GENMASK(31, 24) 65 | 66 | #define ICE_CGU_R19 0x4C 67 | #define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X GENMASK(7, 0) 68 | #define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 GENMASK(9, 0) 69 | #define ICE_CGU_R19_TSPLL_NDIVRATIO GENMASK(19, 16) 70 | 71 | #define ICE_CGU_R22 0x58 72 | #define ICE_CGU_R22_TIME1588CLK_DIV GENMASK(23, 20) 73 | #define ICE_CGU_R22_TIME1588CLK_SEL_DIV2 BIT(30) 74 | 75 | #define ICE_CGU_R23 0x5C 76 | 77 | #define ICE_CGU_R24 0x60 78 | #define ICE_CGU_R24_E82X_TSPLL_FBDIV_FRAC GENMASK(21, 0) 79 | #define ICE_CGU_R23_R24_TSPLL_ENABLE BIT(24) 80 | #define ICE_CGU_R23_R24_REF1588_CK_DIV GENMASK(30, 27) 81 | #define ICE_CGU_R23_R24_TIME_REF_SEL BIT(31) 82 | #define ICE_CGU_R24_ETH56G_FBDIV_FRAC GENMASK(31, 0) 83 | 84 | #define ICE_CGU_BW_TDC 0x31C 85 | #define ICE_CGU_BW_TDC_PLLLOCK_SEL GENMASK(30, 29) 86 | 87 | #define ICE_CGU_RO_LOCK 0x3F0 88 | #define ICE_CGU_RO_LOCK_TRUE_LOCK BIT(12) 89 | #define ICE_CGU_RO_LOCK_UNLOCK BIT(13) 90 | 91 | #define ICE_CGU_CNTR_BIST 0x344 92 | #define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 BIT(15) 93 | #define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1 BIT(16) 94 | 95 | #define ICE_CGU_RO_BWM_LF 0x370 96 | #define ICE_CGU_RO_BWM_LF_TRUE_LOCK BIT(12) 97 | 98 | void ice_tspll_sysfs_init(struct ice_hw *hw); 99 | void ice_tspll_sysfs_release(struct ice_hw *hw); 100 | u64 ice_tspll_ticks2ns(const struct ice_hw *hw, u64 ticks); 101 | u64 ice_tspll_ns2ticks(const struct ice_hw *hw, u64 ns); 102 | int ice_tspll_monitor_lock_e825c(struct ice_hw *hw); 103 | int ice_tspll_bypass_mux_active_e825c(struct ice_hw *hw, u8 port, bool *active, 104 | enum ice_synce_clk output); 105 | int ice_tspll_cfg_bypass_mux_e825c(struct ice_hw *hw, u8 port, 106 | enum ice_synce_clk output, 107 | bool clock_1588, unsigned int ena); 108 | int ice_tspll_cfg_synce_ethdiv_e825c(struct ice_hw *hw, u8 *divider, 109 | enum ice_synce_clk output); 110 | int ice_tspll_ena_pps_out_e825c(struct ice_hw *hw, bool ena); 111 | int ice_tspll_cfg_cgu_err_reporting(struct ice_hw *hw, bool enable); 112 | int ice_tspll_init(struct ice_hw *hw); 113 | void ice_tspll_process_cgu_err(struct ice_hw *hw, 114 | struct ice_rq_event_info *event); 115 | #endif /* _ICE_TSPLL_H_ */ 116 | -------------------------------------------------------------------------------- /src/ice_vdcm.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_VDCM_H_ 5 | #define _ICE_VDCM_H_ 6 | 7 | #include "ice.h" 8 | #include 9 | #if IS_ENABLED(CONFIG_VFIO_MDEV) 10 | #include 11 | #endif /* CONFIG_VFIO_MDEV */ 12 | #include 13 | #include 14 | #if IS_ENABLED(CONFIG_IRQ_BYPASS_MANAGER) 15 | #include 16 | #endif /* CONFIG_IRQ_BYPASS_MANAGER */ 17 | #if IS_ENABLED(CONFIG_VFIO_MDEV) && defined(HAVE_PASID_SUPPORT) && defined(HAVE_IOMMU_DEV_FEAT_AUX) 18 | #include 19 | #endif 20 | 21 | #define ICE_VDCM_CFG_SIZE 256 22 | #define ICE_VDCM_BAR0_SIZE SZ_64M 23 | 24 | struct ice_vdcm_irq_ctx { 25 | struct eventfd_ctx *trigger; 26 | char *name; 27 | unsigned int irq; 28 | #if IS_ENABLED(CONFIG_IRQ_BYPASS_MANAGER) 29 | struct irq_bypass_producer producer; 30 | #endif /* CONFIG_IRQ_BYPASS_MANAGER */ 31 | }; 32 | 33 | /** 34 | * struct ice_vdcm - The abstraction for VDCM 35 | * 36 | * @dev: linux device for this VDCM 37 | * @parent_dev: linux parent device for this VDCM 38 | * @vfio_group: vfio group for this device 39 | * @pci_cfg_space: PCI configuration space buffer 40 | * @vma_lock: protects access to vma_list 41 | * @vma_list: linked list for VMA 42 | * @ctx: IRQ context 43 | * @num_ctx: number of requested IRQ context 44 | * @irq_type: IRQ type 45 | * @adi: ADI attribute 46 | */ 47 | struct ice_vdcm { 48 | /* Common attribute */ 49 | struct device *dev; 50 | struct device *parent_dev; 51 | struct vfio_group *vfio_group; 52 | 53 | u8 pci_cfg_space[ICE_VDCM_CFG_SIZE]; 54 | struct mutex vma_lock; /* protects access to vma_list */ 55 | struct list_head vma_list; 56 | 57 | /* IRQ context */ 58 | struct ice_vdcm_irq_ctx *ctx; 59 | unsigned int num_ctx; 60 | unsigned int irq_type; 61 | 62 | /* Device Specific */ 63 | struct ice_adi *adi; 64 | }; 65 | 66 | /** 67 | * struct ice_adi - Assignable Device Interface attribute 68 | * 69 | * This structure defines the device specific resource and callbacks 70 | * 71 | * It is expected to be embedded in a private container structure allocated by 72 | * the driver. Use container_of to get the private structure pointer back from 73 | * a pointer to the ice_adi structure. 74 | * 75 | * @get_vector_num: get number of vectors assigned to this ADI 76 | * @get_vector_irq: get OS IRQ number per vector 77 | * @reset: This function is called when VDCM wants to reset ADI 78 | * @cfg_pasid: This function is called when VDCM wants to configure ADI's PASID 79 | * @close: This function is called when VDCM wants to close ADI 80 | * @read_reg32: This function is called when VDCM wants to read ADI register 81 | * @write_reg32: This function is called when VDCM wants to write ADI register 82 | * @get_sparse_mmap_hpa: This function is called when VDCM wants to get ADI HPA 83 | * @get_sparse_mmap_num: This function is called when VDCM wants to get 84 | * the number of sparse memory areas 85 | * @get_sparse_mmap_area: This function is called when VDCM wants to get 86 | * layout of sparse memory 87 | */ 88 | struct ice_adi { 89 | int (*get_vector_num)(struct ice_adi *adi); 90 | int (*get_vector_irq)(struct ice_adi *adi, u32 vector); 91 | int (*reset)(struct ice_adi *adi); 92 | int (*cfg_pasid)(struct ice_adi *adi, u32 pasid, bool ena); 93 | int (*close)(struct ice_adi *adi); 94 | u32 (*read_reg32)(struct ice_adi *adi, size_t offs); 95 | void (*write_reg32)(struct ice_adi *adi, size_t offs, u32 val); 96 | int (*get_sparse_mmap_hpa)(struct ice_adi *adi, u32 index, u64 pg_off, 97 | u64 *addr); 98 | int (*get_sparse_mmap_num)(struct ice_adi *adi); 99 | int (*get_sparse_mmap_area)(struct ice_adi *adi, int index, 100 | u64 *offset, u64 *size); 101 | }; 102 | 103 | #if IS_ENABLED(CONFIG_VFIO_MDEV) && defined(HAVE_PASID_SUPPORT) && defined(HAVE_IOMMU_DEV_FEAT_AUX) 104 | struct ice_adi *ice_vdcm_alloc_adi(struct device *dev, void *token); 105 | void ice_vdcm_free_adi(struct ice_adi *adi); 106 | void ice_vdcm_pre_rebuild_irqctx(void *token); 107 | int ice_vdcm_rebuild_irqctx(void *token); 108 | int ice_vdcm_zap(void *token); 109 | int ice_vdcm_init(struct pci_dev *pdev); 110 | void ice_vdcm_deinit(struct pci_dev *pdev); 111 | #else 112 | static inline int ice_vdcm_init(struct pci_dev *pdev) 113 | { 114 | return 0; 115 | } 116 | 117 | static inline void ice_vdcm_deinit(struct pci_dev *pdev) { } 118 | #endif /* CONFIG_VFIO_MDEV && HAVE_PASID_SUPPORT && HAVE_IOMMU_DEV_FEAT_AUX */ 119 | 120 | #endif /* _ICE_VDCM_H_ */ 121 | -------------------------------------------------------------------------------- /src/ice_parser.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_PARSER_H_ 5 | #define _ICE_PARSER_H_ 6 | 7 | #include "ice_metainit.h" 8 | #include "ice_imem.h" 9 | #include "ice_pg_cam.h" 10 | #include "ice_bst_tcam.h" 11 | #include "ice_ptype_mk.h" 12 | #include "ice_mk_grp.h" 13 | #include "ice_proto_grp.h" 14 | #include "ice_flg_rd.h" 15 | #include "ice_xlt_kb.h" 16 | #include "ice_parser_rt.h" 17 | #include "ice_tmatch.h" 18 | 19 | struct ice_parser { 20 | struct ice_hw *hw; /* pointer to the hardware structure */ 21 | 22 | /* load data from section ICE_SID_RX_PARSER_IMEM */ 23 | struct ice_imem_item *imem_table; 24 | /* load data from section ICE_SID_RXPARSER_METADATA_INIT */ 25 | struct ice_metainit_item *mi_table; 26 | /* load data from section ICE_SID_RXPARSER_CAM */ 27 | struct ice_pg_cam_item *pg_cam_table; 28 | /* load data from section ICE_SID_RXPARSER_PG_SPILL */ 29 | struct ice_pg_cam_item *pg_sp_cam_table; 30 | /* load data from section ICE_SID_RXPARSER_NOMATCH_CAM */ 31 | struct ice_pg_nm_cam_item *pg_nm_cam_table; 32 | /* load data from section ICE_SID_RXPARSER_NOMATCH_SPILL */ 33 | struct ice_pg_nm_cam_item *pg_nm_sp_cam_table; 34 | /* load data from section ICE_SID_RXPARSER_BOOST_TCAM */ 35 | struct ice_bst_tcam_item *bst_tcam_table; 36 | /* load data from section ICE_SID_LBL_RXPARSER_TMEM */ 37 | struct ice_lbl_item *bst_lbl_table; 38 | /* load data from section ICE_SID_RXPARSER_MARKER_PTYPE */ 39 | struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table; 40 | /* load data from section ICE_SID_RXPARSER_MARKER_GRP */ 41 | struct ice_mk_grp_item *mk_grp_table; 42 | /* load data from section ICE_SID_RXPARSER_PROTO_GRP */ 43 | struct ice_proto_grp_item *proto_grp_table; 44 | /* load data from section ICE_SID_RXPARSER_FLAG_REDIR */ 45 | struct ice_flg_rd_item *flg_rd_table; 46 | /* load data from section ICE_SID_XLT_KEY_BUILDER_SW */ 47 | struct ice_xlt_kb *xlt_kb_sw; 48 | /* load data from section ICE_SID_XLT_KEY_BUILDER_ACL */ 49 | struct ice_xlt_kb *xlt_kb_acl; 50 | /* load data from section ICE_SID_XLT_KEY_BUILDER_FD */ 51 | struct ice_xlt_kb *xlt_kb_fd; 52 | /* load data from section ICE_SID_XLT_KEY_BUILDER_RSS */ 53 | struct ice_xlt_kb *xlt_kb_rss; 54 | struct ice_parser_rt rt; /* parser runtime */ 55 | }; 56 | 57 | int ice_parser_create(struct ice_hw *hw, struct ice_parser **psr); 58 | void ice_parser_destroy(struct ice_parser *psr); 59 | void ice_parser_dvm_set(struct ice_parser *psr, bool on); 60 | int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, 61 | u16 udp_port, bool on); 62 | int ice_parser_geneve_tunnel_set(struct ice_parser *psr, 63 | u16 udp_port, bool on); 64 | int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, 65 | u16 udp_port, bool on); 66 | 67 | struct ice_parser_proto_off { 68 | u8 proto_id; /* hardware protocol ID */ 69 | u16 offset; /* offset where the protocol header start */ 70 | }; 71 | 72 | struct ice_parser_result { 73 | u16 ptype; /* 16 bits hardware PTYPE */ 74 | /* protocol and header offset pairs */ 75 | struct ice_parser_proto_off po[16]; 76 | int po_num; /* number of pairs must <= 16 */ 77 | u64 flags_psr; /* 64 bits parser flags */ 78 | u64 flags_pkt; /* 64 bits packet flags */ 79 | u16 flags_sw; /* 16 bits key builder flag for SW */ 80 | u16 flags_acl; /* 16 bits key builder flag for ACL */ 81 | u16 flags_fd; /* 16 bits key builder flag for FD */ 82 | u16 flags_rss; /* 16 bits key builder flag for RSS */ 83 | }; 84 | 85 | int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, 86 | int pkt_len, struct ice_parser_result *rslt); 87 | void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt); 88 | 89 | struct ice_parser_fv { 90 | u8 proto_id; /* hardware protocol ID */ 91 | u16 offset; /* offset from the start of the protocol header */ 92 | u16 spec; /* 16 bits pattern to match */ 93 | u16 msk; /* 16 bits pattern mask */ 94 | }; 95 | 96 | struct ice_parser_profile { 97 | struct ice_parser_fv fv[48]; /* field vector arrary */ 98 | int fv_num; /* field vector number must <= 48 */ 99 | u16 flags; /* 16 bits key builder flag */ 100 | u16 flags_msk; /* key builder flag masker */ 101 | /* 1024 bits PTYPE bitmap */ 102 | DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); 103 | }; 104 | 105 | int ice_parser_profile_init(struct ice_parser_result *rslt, 106 | const u8 *pkt_buf, const u8 *msk_buf, 107 | int buf_len, enum ice_block blk, 108 | bool prefix_match, 109 | struct ice_parser_profile *prof); 110 | void ice_parser_profile_dump(struct ice_hw *hw, 111 | struct ice_parser_profile *prof); 112 | bool ice_check_ddp_support_proto_id(struct ice_hw *hw, 113 | enum ice_prot_id proto_id); 114 | #endif /* _ICE_PARSER_H_ */ 115 | -------------------------------------------------------------------------------- /src/ice_dcb_lib.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_DCB_LIB_H_ 5 | #define _ICE_DCB_LIB_H_ 6 | 7 | #include "ice.h" 8 | #include "ice_base.h" 9 | #include "ice_lib.h" 10 | 11 | #ifdef CONFIG_DCB 12 | #define ICE_TC_MAX_BW 100 /* Default Max BW percentage */ 13 | #define ICE_DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ 14 | #define ICE_DCB_NO_HW_CHG 1 /* DCB configuration did not change */ 15 | #define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */ 16 | 17 | void ice_dcb_rebuild(struct ice_pf *pf); 18 | int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked); 19 | void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi); 20 | bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue); 21 | #ifdef HAVE_NDO_SET_TX_MAXRATE 22 | u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); 23 | #endif /* HAVE_NDO_SET_TX_MAXRATE */ 24 | bool 25 | ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, 26 | struct ice_dcbx_cfg *new_cfg); 27 | int 28 | ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); 29 | int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg); 30 | void ice_pf_dcb_recfg(struct ice_pf *pf); 31 | void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); 32 | int ice_init_pf_dcb(struct ice_pf *pf, bool locked); 33 | void ice_update_dcb_stats(struct ice_pf *pf); 34 | void 35 | ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, 36 | struct ice_tx_buf *first); 37 | void 38 | ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_qos_params *qos_info); 39 | void 40 | ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, 41 | struct ice_rq_event_info *event); 42 | 43 | /** 44 | * ice_find_q_in_range 45 | * @low: start of queue range for a TC i.e. offset of TC 46 | * @high: start of queue for next TC 47 | * @tx_q: hung_queue/tx_queue 48 | * 49 | * finds if queue 'tx_q' falls between the two offsets of any given TC 50 | */ 51 | static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q) 52 | { 53 | return (tx_q >= low) && (tx_q < high); 54 | } 55 | 56 | static inline void 57 | ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) 58 | { 59 | tlan_ctx->cgd_num = dcb_tc; 60 | } 61 | 62 | static inline bool ice_is_dcb_active(struct ice_pf *pf) 63 | { 64 | return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) || 65 | test_bit(ICE_FLAG_DCB_ENA, pf->flags)); 66 | } 67 | 68 | static inline u8 ice_get_pfc_mode(struct ice_pf *pf) 69 | { 70 | return pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc_mode; 71 | } 72 | 73 | #else 74 | static inline void ice_dcb_rebuild(struct ice_pf *pf) { } 75 | static inline void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi) 76 | { 77 | vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; 78 | vsi->tc_cfg.numtc = 1; 79 | } 80 | 81 | static inline u8 ice_get_first_droptc(struct ice_vsi __always_unused *vsi) 82 | { 83 | return 0; 84 | } 85 | 86 | #ifdef HAVE_NDO_SET_TX_MAXRATE 87 | static inline u8 88 | ice_dcb_get_tc(struct ice_vsi __always_unused *vsi, 89 | int __always_unused queue_index) 90 | { 91 | return 0; 92 | } 93 | #endif /* HAVE_NDO_SET_TX_MAXRATE */ 94 | 95 | static inline int 96 | ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) 97 | { 98 | dev_dbg(ice_pf_to_dev(pf), "DCB not supported\n"); 99 | return -EOPNOTSUPP; 100 | } 101 | 102 | static inline int 103 | ice_pf_dcb_cfg(struct ice_pf __always_unused *pf, 104 | struct ice_dcbx_cfg __always_unused *new_cfg, 105 | bool __always_unused locked) 106 | { 107 | return -EOPNOTSUPP; 108 | } 109 | 110 | static inline int 111 | ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring __always_unused *tx_ring, 112 | struct ice_tx_buf __always_unused *first) 113 | { 114 | return 0; 115 | } 116 | 117 | static inline bool ice_is_dcb_active(struct ice_pf __always_unused *pf) 118 | { 119 | return false; 120 | } 121 | 122 | static inline bool 123 | ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf, 124 | unsigned int __always_unused txqueue) 125 | { 126 | return false; 127 | } 128 | 129 | static inline u8 ice_get_pfc_mode(struct ice_pf *pf) 130 | { 131 | return 0; 132 | } 133 | 134 | static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } 135 | static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { } 136 | static inline void ice_update_dcb_stats(struct ice_pf *pf) { } 137 | static inline void 138 | ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_qos_params *qos_info) { } 139 | static inline void 140 | ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, 141 | struct ice_rq_event_info *event) { } 142 | static inline void 143 | ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { } 144 | #endif /* CONFIG_DCB */ 145 | 146 | #endif /* _ICE_DCB_LIB_H_ */ 147 | -------------------------------------------------------------------------------- /src/auxiliary_compat.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _AUXILIARY_COMPAT_H_ 5 | #define _AUXILIARY_COMPAT_H_ 6 | 7 | /* This file contains only the minimal set of kernel compatibility backports 8 | * required by auxiliary.c to build. It is similar to the kcompat.h file, but 9 | * reduced to an absolute minimum in order to reduce the risk of generating 10 | * different kernel symbol CRC values at build time. 11 | * 12 | * For a detailed discussion of kernel symbol CRCs, please read: 13 | * 14 | * Documentation/kernel-symbol-crc.rst 15 | * 16 | * Include only the minimum required kernel compatibility implementations from 17 | * kcompat_generated_defs.h and kcompat_impl.h. If a new fix is required, 18 | * please first implement it as part of the kcompat project before porting it 19 | * to this file. 20 | * 21 | * The current list of required implementations is: 22 | * 23 | * NEED_BUS_FIND_DEVICE_CONST_DATA 24 | * NEED_DEV_PM_DOMAIN_ATTACH 25 | * NEED_DEV_PM_DOMAIN_DETACH 26 | * 27 | * Note that kernels since v5.11 support auxiliary as a built-in config 28 | * option. Using this is always preferred to using an out-of-tree module when 29 | * available. 30 | */ 31 | 32 | #include "kcompat_generated_defs.h" 33 | 34 | /**************************** 35 | * Backport implementations * 36 | ****************************/ 37 | 38 | #ifdef NEED_BUS_FIND_DEVICE_CONST_DATA 39 | /* NEED_BUS_FIND_DEVICE_CONST_DATA 40 | * 41 | * bus_find_device() was updated in upstream commit 418e3ea157ef 42 | * ("bus_find_device: Unify the match callback with class_find_device") 43 | * to take a const void *data parameter and also have the match() function 44 | * passed in take a const void *data parameter. 45 | * 46 | * all of the kcompat below makes it so the caller can always just call 47 | * bus_find_device() according to the upstream kernel without having to worry 48 | * about const vs. non-const arguments. 49 | */ 50 | struct _kc_bus_find_device_custom_data { 51 | const void *real_data; 52 | int (*real_match)(struct device *dev, const void *data); 53 | }; 54 | 55 | static inline int _kc_bus_find_device_wrapped_match(struct device *dev, void *data) 56 | { 57 | struct _kc_bus_find_device_custom_data *custom_data = data; 58 | 59 | return custom_data->real_match(dev, custom_data->real_data); 60 | } 61 | 62 | static inline struct device * 63 | _kc_bus_find_device(struct bus_type *type, struct device *start, 64 | const void *data, 65 | int (*match)(struct device *dev, const void *data)) 66 | { 67 | struct _kc_bus_find_device_custom_data custom_data = {}; 68 | 69 | custom_data.real_data = data; 70 | custom_data.real_match = match; 71 | 72 | return bus_find_device(type, start, &custom_data, 73 | _kc_bus_find_device_wrapped_match); 74 | } 75 | 76 | /* force callers of bus_find_device() to call _kc_bus_find_device() on kernels 77 | * where NEED_BUS_FIND_DEVICE_CONST_DATA is defined 78 | */ 79 | #define bus_find_device(type, start, data, match) \ 80 | _kc_bus_find_device(type, start, data, match) 81 | #endif /* NEED_BUS_FIND_DEVICE_CONST_DATA */ 82 | 83 | #if defined(NEED_DEV_PM_DOMAIN_ATTACH) && defined(NEED_DEV_PM_DOMAIN_DETACH) 84 | #include 85 | /* NEED_DEV_PM_DOMAIN_ATTACH and NEED_DEV_PM_DOMAIN_DETACH 86 | * 87 | * dev_pm_domain_attach() and dev_pm_domain_detach() were added in upstream 88 | * commit 46420dd73b80 ("PM / Domains: Add APIs to attach/detach a PM domain for 89 | * a device"). To support older kernels and OSVs that don't have these API, just 90 | * implement how older versions worked by directly calling acpi_dev_pm_attach() 91 | * and acpi_dev_pm_detach(). 92 | */ 93 | static inline int dev_pm_domain_attach(struct device *dev, bool power_on) 94 | { 95 | if (dev->pm_domain) 96 | return 0; 97 | 98 | if (ACPI_HANDLE(dev)) 99 | return acpi_dev_pm_attach(dev, true); 100 | 101 | return 0; 102 | } 103 | 104 | static inline void dev_pm_domain_detach(struct device *dev, bool power_off) 105 | { 106 | if (ACPI_HANDLE(dev)) 107 | acpi_dev_pm_detach(dev, true); 108 | } 109 | #else /* NEED_DEV_PM_DOMAIN_ATTACH && NEED_DEV_PM_DOMAIN_DETACH */ 110 | /* it doesn't make sense to compat only one of these functions, and it is 111 | * likely either a failure in kcompat-generator.sh or a failed distribution 112 | * backport if this occurs. Don't try to support it. 113 | */ 114 | #ifdef NEED_DEV_PM_DOMAIN_ATTACH 115 | #error "NEED_DEV_PM_DOMAIN_ATTACH defined but NEED_DEV_PM_DOMAIN_DETACH not defined???" 116 | #endif /* NEED_DEV_PM_DOMAIN_ATTACH */ 117 | #ifdef NEED_DEV_PM_DOMAIN_DETACH 118 | #error "NEED_DEV_PM_DOMAIN_DETACH defined but NEED_DEV_PM_DOMAIN_ATTACH not defined???" 119 | #endif /* NEED_DEV_PM_DOMAIN_DETACH */ 120 | #endif /* NEED_DEV_PM_DOMAIN_ATTACH && NEED_DEV_PM_DOMAIN_DETACH */ 121 | 122 | #endif /* _AUXILIARY_COMPAT_H_ */ 123 | -------------------------------------------------------------------------------- /src/ice_nvm.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_NVM_H_ 5 | #define _ICE_NVM_H_ 6 | 7 | #define ICE_NVM_CMD_READ 0x0000000B 8 | #define ICE_NVM_CMD_WRITE 0x0000000C 9 | 10 | /* NVM Access config bits */ 11 | #define ICE_NVM_CFG_MODULE_M ICE_M(0xFF, 0) 12 | #define ICE_NVM_CFG_MODULE_S 0 13 | #define ICE_NVM_CFG_FLAGS_M ICE_M(0xF, 8) 14 | #define ICE_NVM_CFG_FLAGS_S 8 15 | #define ICE_NVM_CFG_EXT_FLAGS_M ICE_M(0xF, 12) 16 | #define ICE_NVM_CFG_EXT_FLAGS_S 12 17 | #define ICE_NVM_CFG_ADAPTER_INFO_M ICE_M(0xFFFF, 16) 18 | #define ICE_NVM_CFG_ADAPTER_INFO_S 16 19 | 20 | /* NVM Read Get Driver Features */ 21 | #define ICE_NVM_GET_FEATURES_MODULE 0xE 22 | #define ICE_NVM_GET_FEATURES_FLAGS 0xF 23 | 24 | /* NVM Read/Write Mapped Space */ 25 | #define ICE_NVM_REG_RW_MODULE 0x0 26 | #define ICE_NVM_REG_RW_FLAGS 0x1 27 | 28 | struct ice_orom_civd_info { 29 | u8 signature[4]; /* Must match ASCII '$CIV' characters */ 30 | u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */ 31 | __le32 combo_ver; /* Combo Image Version number */ 32 | u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */ 33 | __le16 combo_name[32]; /* Unicode string representing the Combo Image version */ 34 | } __packed; 35 | 36 | #define ICE_NVM_ACCESS_MAJOR_VER 0 37 | #define ICE_NVM_ACCESS_MINOR_VER 5 38 | 39 | /* NVM Access feature flags. Other bits in the features field are reserved and 40 | * should be set to zero when reporting the ice_nvm_features structure. 41 | */ 42 | #define ICE_NVM_FEATURES_0_REG_ACCESS BIT(1) 43 | 44 | /* NVM Access Features */ 45 | struct ice_nvm_features { 46 | u8 major; /* Major version (informational only) */ 47 | u8 minor; /* Minor version (informational only) */ 48 | u16 size; /* size of ice_nvm_features structure */ 49 | u8 features[12]; /* Array of feature bits */ 50 | }; 51 | 52 | /* NVM Access command */ 53 | struct ice_nvm_access_cmd { 54 | u32 command; /* NVM command: READ or WRITE */ 55 | u32 config; /* NVM command configuration */ 56 | u32 offset; /* offset to read/write, in bytes */ 57 | u32 data_size; /* size of data field, in bytes */ 58 | }; 59 | 60 | /* NVM Access data */ 61 | union ice_nvm_access_data { 62 | u32 regval; /* Storage for register value */ 63 | struct ice_nvm_features drv_features; /* NVM features */ 64 | }; 65 | 66 | u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd); 67 | u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd); 68 | u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd); 69 | int 70 | ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 71 | union ice_nvm_access_data *data); 72 | int 73 | ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 74 | union ice_nvm_access_data *data); 75 | int 76 | ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, 77 | union ice_nvm_access_data *data); 78 | int 79 | ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 80 | union ice_nvm_access_data *data); 81 | 82 | int 83 | ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); 84 | void ice_release_nvm(struct ice_hw *hw); 85 | int 86 | ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, 87 | void *data, bool last_command, bool read_shadow_ram, 88 | struct ice_sq_cd *cd); 89 | int 90 | ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, 91 | bool read_shadow_ram); 92 | int 93 | ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 94 | u16 module_type); 95 | int 96 | ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs); 97 | int 98 | ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs); 99 | int 100 | ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); 101 | int 102 | ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); 103 | int 104 | ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist); 105 | int 106 | ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); 107 | int ice_init_nvm(struct ice_hw *hw); 108 | int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); 109 | int 110 | ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd); 111 | int 112 | ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, 113 | u16 length, void *data, bool last_command, u8 command_flags, 114 | struct ice_sq_cd *cd); 115 | int ice_nvm_validate_checksum(struct ice_hw *hw); 116 | int ice_nvm_recalculate_checksum(struct ice_hw *hw); 117 | int 118 | ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags); 119 | int ice_aq_nvm_update_empr(struct ice_hw *hw); 120 | int 121 | ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, 122 | u16 length, struct ice_sq_cd *cd); 123 | int 124 | ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, 125 | u8 transfer_flag, u8 *comp_response, 126 | u8 *comp_response_code, struct ice_sq_cd *cd); 127 | #endif /* _ICE_NVM_H_ */ 128 | -------------------------------------------------------------------------------- /src/kcompat_gcc.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _KCOMPAT_GCC_H_ 5 | #define _KCOMPAT_GCC_H_ 6 | 7 | #ifndef GCC_VERSION 8 | #define GCC_VERSION (__GNUC__ * 10000 \ 9 | + __GNUC_MINOR__ * 100 \ 10 | + __GNUC_PATCHLEVEL__) 11 | #endif /* GCC_VERSION */ 12 | 13 | /* as GCC_VERSION yields 40201 for any modern clang (checked on clang 7 & 13) 14 | * we want other means to add workarounds for "old GCC" */ 15 | #ifdef __clang__ 16 | #define GCC_IS_BELOW(x) 0 17 | #else 18 | #define GCC_IS_BELOW(x) (GCC_VERSION < (x)) 19 | #endif 20 | 21 | #ifdef __has_attribute 22 | #if __has_attribute(__fallthrough__) 23 | # define fallthrough __attribute__((__fallthrough__)) 24 | #else 25 | # define fallthrough do {} while (0) /* fallthrough */ 26 | #endif /* __has_attribute(fallthrough) */ 27 | #else 28 | # define fallthrough do {} while (0) /* fallthrough */ 29 | #endif /* __has_attribute */ 30 | 31 | /* 32 | * upstream commit 4eb6bd55cfb2 ("compiler.h: drop fallback overflow checkers") 33 | * removed bunch of code for builitin overflow fallback implementations, that 34 | * we need for gcc prior to 5.1 35 | */ 36 | #if !GCC_IS_BELOW(50100) 37 | #ifndef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 38 | #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 39 | #endif 40 | #endif /* GCC_VERSION >= 50100 */ 41 | 42 | #include "kcompat_overflow.h" 43 | 44 | /* Backport macros for controlling GCC diagnostics */ 45 | #if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) 46 | /* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ 47 | #if GCC_VERSION >= 40600 48 | #define __diag_str1(s) #s 49 | #define __diag_str(s) __diag_str1(s) 50 | #define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) 51 | #else 52 | #define __diag(s) 53 | #endif /* GCC_VERSION >= 4.6 */ 54 | #define __diag_push() __diag(push) 55 | #define __diag_pop() __diag(pop) 56 | #endif /* LINUX_VERSION < 4.18.0 */ 57 | 58 | #if GCC_IS_BELOW(50000) 59 | /* Workaround for gcc bug - not accepting "(type)" before "{ ... }" as part of 60 | * static struct initializers [when used with -std=gnu11 switch] 61 | * https://bugzilla.redhat.com/show_bug.cgi?id=1672652 62 | * 63 | * fix was backported to gcc 4.8.5-39 by RedHat, contained in RHEL 7.7 64 | * workaround here is to just drop that redundant (commented out below) part and 65 | * redefine kernel macros used by us. 66 | */ 67 | 68 | /* Since problematic code could be triggered by print-family (incl. wrappers) 69 | * invocation, we have to first include headers that contain macros that we are 70 | * redefining, and only later proceed with the rest of includes. 71 | */ 72 | #include 73 | #include 74 | #include 75 | #include 76 | 77 | #ifdef __SPIN_LOCK_INITIALIZER 78 | #undef __SPIN_LOCK_UNLOCKED 79 | #define __SPIN_LOCK_UNLOCKED(lockname) \ 80 | /* (spinlock_t) */ __SPIN_LOCK_INITIALIZER(lockname) 81 | #endif /* __SPIN_LOCK_INITIALIZER */ 82 | 83 | #ifdef __RAW_SPIN_LOCK_INITIALIZER 84 | #undef __RAW_SPIN_LOCK_UNLOCKED 85 | #define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ 86 | /* (raw_spinlock_t) */ __RAW_SPIN_LOCK_INITIALIZER(lockname) 87 | #endif /* __RAW_SPIN_LOCK_INITIALIZER */ 88 | 89 | #ifndef CONFIG_DEBUG_SPINLOCK 90 | /* raw_spin_lock_init needs __RAW_SPIN_LOCK_UNLOCKED with typecast, so keep the 91 | * original impl, 92 | * but enhance it with typecast dropped from __RAW_SPIN_LOCK_UNLOCKED() */ 93 | #undef raw_spin_lock_init 94 | #define raw_spin_lock_init(lock) \ 95 | do { *(lock) = (raw_spinlock_t) __RAW_SPIN_LOCK_UNLOCKED(lock); \ 96 | } while (0) 97 | #endif /* !CONFIG_DEBUG_SPINLOCK */ 98 | 99 | #undef STATIC_KEY_INIT_TRUE 100 | #define STATIC_KEY_INIT_TRUE \ 101 | { .enabled = { 1 }, \ 102 | { .type = 1UL } } 103 | 104 | #undef STATIC_KEY_INIT_FALSE 105 | #define STATIC_KEY_INIT_FALSE \ 106 | { .enabled = { 0 } } 107 | 108 | #undef STATIC_KEY_TRUE_INIT 109 | #define STATIC_KEY_TRUE_INIT \ 110 | /* (struct static_key_true) */ { .key = STATIC_KEY_INIT_TRUE } 111 | 112 | #undef STATIC_KEY_FALSE_INIT 113 | #define STATIC_KEY_FALSE_INIT \ 114 | /* (struct static_key_false) */ { .key = STATIC_KEY_INIT_FALSE } 115 | 116 | #ifdef HAVE_JUMP_LABEL 117 | /* dd_key_init() is used (indirectly) with arg like "(STATIC_KEY_INIT_FALSE)" 118 | * from DEFINE_DYNAMIC_DEBUG_METADATA(), which, depending on config has many 119 | * different definitions (including helper macros). 120 | * To reduce compat code, just consume parens from the arg instead copy-pasting 121 | * all definitions and slightly changing them. */ 122 | #define _KC_SLURP_PARENS(...) __VA_ARGS__ 123 | #undef dd_key_init 124 | #define dd_key_init(key, init) key = _KC_SLURP_PARENS init 125 | #endif /* HAVE_JUMP_LABEL */ 126 | 127 | #undef UUID_INIT 128 | #define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ 129 | {{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, \ 130 | ((a) >> 8) & 0xff, (a) & 0xff, \ 131 | ((b) >> 8) & 0xff, (b) & 0xff, \ 132 | ((c) >> 8) & 0xff, (c) & 0xff, \ 133 | (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) \ 134 | }} 135 | 136 | #endif /* old GCC < 5.0 */ 137 | 138 | #endif /* _KCOMPAT_GCC_H_ */ 139 | -------------------------------------------------------------------------------- /src/kcompat_rhel_defs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _KCOMPAT_RHEL_DEFS_H_ 5 | #define _KCOMPAT_RHEL_DEFS_H_ 6 | 7 | /* This is the RedHat Enterprise Linux distribution specific definitions file. 8 | * It defines what features need backports for a given version of the RHEL 9 | * kernel. 10 | * 11 | * It checks the RHEL_RELEASE_CODE and RHEL_RELEASE_VERSION macros to decide 12 | * what support the target kernel has. 13 | * 14 | * It assumes that kcompat_std_defs.h has already been processed, and will 15 | * #define or #undef any flags that have changed based on backports done by 16 | * RHEL. 17 | */ 18 | 19 | #if !RHEL_RELEASE_CODE 20 | #error "RHEL_RELEASE_CODE is 0 or undefined" 21 | #endif 22 | 23 | #ifndef RHEL_RELEASE_VERSION 24 | #error "RHEL_RELEASE_VERSION is undefined" 25 | #endif 26 | 27 | /*****************************************************************************/ 28 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3)) 29 | #define NEED_NETDEV_TXQ_BQL_PREFETCH 30 | #else /* >= 7.3 */ 31 | #endif /* 7.3 */ 32 | 33 | /*****************************************************************************/ 34 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) 35 | #define NEED_BUILD_BUG_ON 36 | #else /* >= 7.4 */ 37 | #define HAVE_RHEL7_EXTENDED_OFFLOAD_STATS 38 | #endif /* 7.4 */ 39 | 40 | /*****************************************************************************/ 41 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) 42 | #else /* >= 7.5 */ 43 | #define HAVE_FLOW_DISSECTOR_KEY_IP 44 | #endif /* 7.5 */ 45 | 46 | /*****************************************************************************/ 47 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)) 48 | #undef HAVE_XDP_BUFF_RXQ 49 | #undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS 50 | #else /* >= 7.6 */ 51 | #undef NEED_JIFFIES_64_TIME_IS_MACROS 52 | #undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 53 | #undef NEED_TC_SETUP_QDISC_MQPRIO 54 | #endif /* 7.6 */ 55 | 56 | /*****************************************************************************/ 57 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7)) 58 | #else /* >= 7.7 */ 59 | #define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR 60 | #define HAVE_ETHTOOL_NEW_100G_BITS 61 | #undef NEED_IN_TASK 62 | #define HAVE_FLOW_DISSECTOR_KEY_ENC_IP 63 | #endif /* 7.7 */ 64 | 65 | /*****************************************************************************/ 66 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) 67 | #else /* >= 8.0 */ 68 | #undef HAVE_ETHTOOL_NEW_100G_BITS 69 | #define HAVE_NDO_OFFLOAD_STATS 70 | #undef HAVE_RHEL7_EXTENDED_OFFLOAD_STATS 71 | #endif /* 8.0 */ 72 | 73 | /*****************************************************************************/ 74 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,1)) 75 | #define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE 76 | #else /* >= 8.1 */ 77 | #define HAVE_ETHTOOL_NEW_100G_BITS 78 | #undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE 79 | #undef NEED_INDIRECT_CALL_WRAPPER_MACROS 80 | #define HAVE_INDIRECT_CALL_WRAPPER_HEADER 81 | #endif /* 8.1 */ 82 | 83 | /*****************************************************************************/ 84 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) 85 | #else /* >= 8.2 */ 86 | #undef NEED_FLOW_INDR_BLOCK_CB_REGISTER 87 | #define HAVE_FLOW_INDR_BLOCK_LOCK 88 | #define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID 89 | #define HAVE_NETDEV_SB_DEV 90 | #endif /* 8.2 */ 91 | 92 | /*****************************************************************************/ 93 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,3)) 94 | #else /* >= 8.3 */ 95 | #undef NEED_CPU_LATENCY_QOS_RENAME 96 | #define HAVE_RT_IRQ_SCHED_FIX 97 | #endif /* 8.3 */ 98 | 99 | /*****************************************************************************/ 100 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,4)) 101 | #else /* >= 8.4 */ 102 | #undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT 103 | #undef HAVE_XDP_QUERY_PROG 104 | #define HAVE_AF_XDP_ZC_SUPPORT 105 | #define HAVE_MEM_TYPE_XSK_BUFF_POOL 106 | #define HAVE_NDO_XSK_WAKEUP 107 | #define XSK_UMEM_RETURNS_XDP_DESC 108 | #undef NEED_XSK_UMEM_GET_RX_FRAME_SIZE 109 | #define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT 110 | #endif /* 8.4 */ 111 | 112 | /*****************************************************************************/ 113 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,5)) 114 | #else /* >= 8.5 */ 115 | #undef HAVE_NAPI_BUSY_LOOP 116 | #undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS 117 | #define NO_XDP_QUERY_XSK_UMEM 118 | #undef NEED_XSK_BUFF_POOL_RENAME 119 | #define HAVE_NETDEV_BPF_XSK_POOL 120 | #define HAVE_AF_XDP_NETDEV_UMEM 121 | #define HAVE_DEVLINK_OPS_CREATE_DEL 122 | #endif /* 8.5 */ 123 | 124 | /*****************************************************************************/ 125 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,7)) 126 | #else /* >= 8.7 */ 127 | #undef NEED_DEVLINK_ALLOC_SETS_DEV 128 | #define HAVE_DEVLINK_SET_STATE_3_PARAM 129 | #endif /* 8.7 */ 130 | 131 | /*****************************************************************************/ 132 | #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) 133 | #else /* >= 9.0 */ 134 | #define HAVE_XDP_BUFF_RXQ 135 | #endif /* 9.0 */ 136 | 137 | /*****************************************************************************/ 138 | #endif /* _KCOMPAT_RHEL_DEFS_H_ */ 139 | -------------------------------------------------------------------------------- /src/ice_cpi.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_CPI_H_ 5 | #define _ICE_CPI_H_ 6 | 7 | #define CPI0_PHY1_CMD_DATA 0x7FD028 8 | #define CPI0_LM1_CMD_DATA 0x7FD024 9 | #define CPI_RETRIES_COUNT 10 10 | #define CPI_RETRIES_CADENCE_MS 100 11 | #define CPI_MAX_FEC_OPTIONS 8 12 | 13 | #define CPI_OPCODE_PORT_STATE 0x1 14 | #define CPI_OPCODE_PORT_STATE_DISABLE BIT(15) 15 | #define CPI_OPCODE_PORT_STATE_RX_READY BIT(7) 16 | 17 | #define CPI_OPCODE_PORT_MODE 0x3 18 | #define CPI_OPCODE_PORT_MODE_PORT_WIDTH_M GENMASK(10, 8) 19 | #define CPI_OPCODE_PORT_MODE_SINGLE_LANE 0 20 | #define CPI_OPCODE_PORT_MODE_TWO_LANE 1 21 | #define CPI_OPCODE_PORT_MODE_FOUR_LANE 2 22 | #define CPI_OPCODE_PORT_MODE_EIGHT_LANE 3 23 | #define CPI_OPCODE_PORT_MODE_PORT_MODE_M GENMASK(7, 0) 24 | #define CPI_OPCODE_PORT_MODE_AN73 BIT(2) 25 | #define CPI_OPCODE_PORT_MODE_SGMII 0x02 26 | #define CPI_OPCODE_PORT_MODE_1000_BASE_X 0x06 27 | #define CPI_OPCODE_PORT_MODE_2500_BASE_X 0x08 28 | #define CPI_OPCODE_PORT_MODE_10G_SFI 0x0C 29 | #define CPI_OPCODE_PORT_MODE_25G_AUI 0x15 30 | #define CPI_OPCODE_PORT_MODE_50G_LAUI_2 0x17 31 | #define CPI_OPCODE_PORT_MODE_100G_CAUI_4 0x18 32 | #define CPI_OPCODE_PORT_MODE_50G_AUI_2 0x19 33 | #define CPI_OPCODE_PORT_MODE_50G_AUI_1 0x20 34 | #define CPI_OPCODE_PORT_MODE_100G_AUI_4 0x1A 35 | #define CPI_OPCODE_PORT_MODE_50G_AUI_1 0x20 36 | #define CPI_OPCODE_PORT_MODE_100G_AUI_2 0x21 37 | #define CPI_OPCODE_PORT_NEG_FORCED BIT(13) 38 | #define CPI_OPCODE_PORT_AN37 BIT(14) 39 | 40 | #define CPI_OPCODE_NEG_MODE 0x5 41 | #define CPI_OPCODE_NEG_MODE_FEC_M GENMASK(15, 12) 42 | #define CPI_OPCODE_NEG_MODE_FEC_NONE 0x0 43 | #define CPI_OPCODE_NEG_MODE_FEC_BASE_R 0x1 44 | #define CPI_OPCODE_NEG_MODE_FEC_RS_528 0x2 45 | #define CPI_OPCODE_NEG_MODE_FEC_RS_544 0x4 46 | 47 | #define CPI_OPCODE_PMD_CONTROL 0xC 48 | #define CPI_OPCODE_PMD_CONTROL_SFI 0x0 49 | #define CPI_OPCODE_PMD_CONTROL_TRAINING 0x400 50 | #define CPI_OPCODE_PMD_DIS_DSP_ADAPTATION BIT(6) 51 | 52 | #define CPI_OPCODE_CURATE0 0x18 53 | #define CPI_OPCODE_CURATE0_100M_SGMII BIT(1) 54 | #define CPI_OPCODE_CURATE0_1G_SGMII BIT(2) 55 | #define CPI_OPCODE_CURATE0_1000BASE_KX BIT(3) 56 | #define CPI_OPCODE_CURATE0_2500BASE_KX BIT(4) 57 | #define CPI_OPCODE_CURATE0_5GBASE_KR BIT(6) 58 | #define CPI_OPCODE_CURATE0_10GBASE_KR BIT(8) 59 | #define CPI_OPCODE_CURATE0_40GBASE_KR4 BIT(9) 60 | #define CPI_OPCODE_CURATE0_40GBASE_CR4 BIT(10) 61 | 62 | #define CPI_OPCODE_CURATE1 0x19 63 | #define CPI_OPCODE_CURATE1_25GBASE_CR_KR_S BIT(0) 64 | #define CPI_OPCODE_CURATE1_25GBASE_CR_KR BIT(1) 65 | #define CPI_OPCODE_CURATE1_25GBASE_KR1 BIT(2) 66 | #define CPI_OPCODE_CURATE1_50GBASE_KR2 BIT(3) 67 | #define CPI_OPCODE_CURATE1_100GBASE_CR4 BIT(4) 68 | #define CPI_OPCODE_CURATE1_100GBASE_KR4 BIT(5) 69 | #define CPI_OPCODE_CURATE1_50GBASE_CP4_KP4 BIT(8) 70 | #define CPI_OPCODE_CURATE1_100GBASE_CK_R2P4 BIT(9) 71 | #define CPI_OPCODE_CURATE1_25GBASE_CR1 BIT(12) 72 | #define CPI_OPCODE_CURATE1_50GBASE_CR2 BIT(13) 73 | #define CPI_OPCODE_CURATE1_100GBASE_KP4 BIT(14) 74 | 75 | #define CPI_OPCODE_AN_CONTROL 0x20 76 | #define CPI_OPCODE_AN_CONTROL_CFG 0x0 77 | 78 | #define CPI_OPCODE_CUFEC0 0x1E 79 | #define CPI_OPCODE_CUFEC0_10GCL74CAP BIT(0) 80 | #define CPI_OPCODE_CUFEC0_10GCL74REQ BIT(1) 81 | #define CPI_OPCODE_CUFEC0_25GCL74CAP BIT(2) 82 | #define CPI_OPCODE_CUFEC0_25GCL74REQ BIT(3) 83 | #define CPI_OPCODE_CUFEC0_CL91CL108CAP BIT(4) 84 | #define CPI_OPCODE_CUFEC0_CL91REQ BIT(5) 85 | #define CPI_OPCODE_CUFEC0_CL108REQ BIT(6) 86 | #define CPI_OPCODE_CUFEC0_DISABLE 0 87 | 88 | #define CPI_OPCODE_COMMAND 0xF 89 | #define CPI_OPCODE_COMMAND_CMD_M GENMASK(7, 0) 90 | #define CPI_OPCODE_COMMAND_LANE_M GENMASK(15, 12) 91 | #define CPI_OPCODE_COMMAND_START_RESTART_CFG 0x1 92 | #define CPI_OPCODE_COMMAND_RESET_PORT 0x9 93 | 94 | #define CPI_OPCODE_PHY_CLK 0xF1 95 | #define CPI_OPCODE_PHY_CLK_PHY_SEL_M GENMASK(9, 6) 96 | #define CPI_OPCODE_PHY_CLK_REF_CTRL_M GENMASK(5, 4) 97 | #define CPI_OPCODE_PHY_CLK_PORT_SEL 0 98 | #define CPI_OPCODE_PHY_CLK_DISABLE 1 99 | #define CPI_OPCODE_PHY_CLK_ENABLE 2 100 | #define CPI_OPCODE_PHY_CLK_REF_SEL_M GENMASK(3, 0) 101 | 102 | #define CPI_OPCODE_PHY_PCS_RESET 0xF0 103 | #define CPI_OPCODE_PHY_PCS_ONPI_RESET_VAL 0x3F 104 | 105 | #define CPI_LM_CMD_REQ 1 106 | #define CPI_LM_CMD_SET 1 107 | 108 | #define PHY0 0 109 | #define PHY1 1 110 | 111 | union cpi_reg_phy_cmd_data { 112 | struct { 113 | u16 data; 114 | u16 opcode : 8; 115 | u16 portlane : 3; 116 | u16 reserved_13_11: 3; 117 | u16 error : 1; 118 | u16 ack : 1; 119 | } field; 120 | u32 val; 121 | }; 122 | 123 | union cpi_reg_lm_cmd_data { 124 | struct { 125 | u16 data; 126 | u16 opcode : 8; 127 | u16 portlane : 3; 128 | u16 reserved_12_11: 2; 129 | u16 get_set : 1; 130 | u16 cpi_reset : 1; 131 | u16 cpi_req : 1; 132 | } __packed field; 133 | u32 val; 134 | }; 135 | 136 | struct ice_cpi_cmd { 137 | u8 port; 138 | u8 opcode; 139 | u16 data; 140 | bool set; 141 | }; 142 | 143 | struct ice_cpi_resp { 144 | u8 port; 145 | u8 opcode; 146 | u16 data; 147 | }; 148 | 149 | int ice_cpi_exec(struct ice_hw *hw, u8 phy, 150 | const struct ice_cpi_cmd *cmd, 151 | struct ice_cpi_resp *resp); 152 | int ice_cpi_ena_dis_clk_ref(struct ice_hw *hw, u8 port, 153 | enum ice_e825c_ref_clk clk, bool enable); 154 | #endif /* _ICE_CPI_H_ */ 155 | -------------------------------------------------------------------------------- /pci.updates: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0-only 2 | # Copyright (C) 2018-2025 Intel Corporation 3 | 4 | # updates for the system pci.ids file 5 | # 6 | # IMPORTANT! Entries in this list must be sorted as they 7 | # would appear in the system pci.ids file. Entries 8 | # are sorted by ven, dev, subven, subdev 9 | # (numerical order). 10 | # 11 | 8086 Intel Corporation 12 | 124c Ethernet Connection E823-L for backplane 13 | 124d Ethernet Connection E823-L for SFP 14 | 124e Ethernet Connection E823-L/X557-AT 10GBASE-T 15 | 124f Ethernet Connection E823-L 1GbE 16 | 12d1 Ethernet Controller E830-CC for backplane 17 | 12d2 Ethernet Connection E830-CC QSFP 18 | 8086 0001 Ethernet Network Adapter E830-C-Q2 19 | 8086 0002 Ethernet Network Adapter E830-C-Q2 for OCP 3.0 20 | 8086 0003 Ethernet Network Adapter E830-CC-Q1 21 | 8086 0004 Ethernet Network Adapter E830-CC-Q1 for OCP 3.0 22 | 12d3 Ethernet Controller E830-CC for SFP 23 | 8086 0001 Ethernet Network Adapter E830-XXV-2 for OCP 3.0 24 | 8086 0003 Ethernet Network Adapter E830-XXV-2 25 | 8086 0004 Ethernet Network Adapter E830-XXV-4 for OCP 3.0 26 | 8086 0005 Ethernet Network Adapter E830-XXV-8F for OCP 3.0 27 | 8086 0006 Ethernet Network Adapter E830-XXV-8F 28 | 12d5 Ethernet Controller E830-C for backplane 29 | 12d8 Ethernet Controller E830-C for QSFP 30 | 12da Ethernet Controller E830-C for SFP 31 | 12dc Ethernet Controller E830-L for backplane 32 | 12dd Ethernet Controller E830-L for QSFP 33 | 12de Ethernet Controller E830-L for SFP 34 | 8086 0001 Ethernet Network Adapter E830-XXV-2 for OCP 3.0 35 | 8086 0003 Ethernet Network Adapter E830-XXV-2 36 | 151d Ethernet Connection E823-L for QSFP 37 | 1591 Ethernet Controller E810-C for backplane 38 | 8086 bcce Ethernet Controller E810-C for Intel Open FPGA Stack 39 | 1592 Ethernet Controller E810-C for QSFP 40 | 1137 02bf E810CQDA2 2x100 GbE QSFP28 PCIe NIC 41 | 8086 0001 Ethernet Network Adapter E810-C-Q1 42 | 8086 0002 Ethernet Network Adapter E810-C-Q2 43 | 8086 0005 Ethernet Network Adapter E810-C-Q1 for OCP3.0 44 | 8086 0006 Ethernet Network Adapter E810-C-Q2 for OCP3.0 45 | 8086 000a Ethernet Network Adapter E810-C-Q1 for OCP 46 | 8086 000b Ethernet 100G 2P E810-C Adapter 47 | 8086 000c Ethernet 100G 2P E810-C OCP 48 | 8086 000d Ethernet Network Adapter E810-L-Q2 for OCP3.0 49 | 8086 000e Ethernet Network Adapter E810-2C-Q2 50 | 8086 000f Ethernet Network Adapter E810-C-Q2T 51 | 8086 0010 Ethernet 100G 2P E810-C-stg Adapter 52 | 8086 0011 Ethernet Network Adapter E810-C-Q1 for OCP3.0 53 | 8086 0012 Ethernet 100G 2P E810-C-st Adapter 54 | 8086 0013 Ethernet Network Adapter E810-C-Q1 for OCP 3.0 55 | 8086 0014 Ethernet 100G 2P E810-2C Adapter 56 | 1593 Ethernet Controller E810-C for SFP 57 | 1137 02c3 E810XXVDA4 4x25/10 GbE SFP28 PCIe NIC 58 | 1137 02e9 E810XXVDA4TG 4x25/10 GbE SFP28 PCIe NIC 59 | 1137 02ea E810XXVDA4T 4x25/10 GbE SFP28 PCIe NIC 60 | 8086 0005 Ethernet Network Adapter E810-XXV-4 61 | 8086 0007 Ethernet Network Adapter E810-XXV-4 62 | 8086 000a Ethernet 25G 4P E810-XXV Adapter 63 | 8086 000c Ethernet Network Adapter E810-XXV-4 for OCP 3.0 64 | 8086 000d Ethernet 25G 4P E810-XXV OCP 65 | 8086 000e Ethernet Network Adapter E810-XXV-4T 66 | 8086 000f Ethernet 25G 4P E810-XXV-stg Adapter 67 | 8086 0010 Ethernet 25G 4P E810-XXV-st Adapter 68 | 8086 0012 Ethernet 25G 4P E810-XXV Adapter 69 | 8086 4010 Ethernet Network Adapter E810-XXV-4 70 | 8086 4013 Ethernet Network Adapter E810-XXV-4 for OCP 3.0 71 | 8086 401c Ethernet Network Adapter E810-XXV-4 for OCP 3.0 72 | 1599 Ethernet Controller E810-XXV for backplane 73 | 8086 0001 Ethernet 25G 2P E810-XXV-k Mezz 74 | 159a Ethernet Controller E810-XXV for QSFP 75 | 159b Ethernet Controller E810-XXV for SFP 76 | 1137 02be E810XXVDA2 2x25/10 GbE SFP28 PCIe NIC 77 | 8086 0001 Ethernet 25G 2P E810-XXV OCP 78 | 8086 0002 Ethernet 25G 2P E810-XXV Adapter 79 | 8086 0003 Ethernet Network Adapter E810-XXV-2 80 | 8086 0005 Ethernet Network Adapter E810-XXV-2 for OCP 3.0 81 | 8086 4001 Ethernet Network Adapter E810-XXV-2 82 | 8086 4002 Ethernet Network Adapter E810-XXV-2 for OCP 3.0 83 | 8086 4003 Ethernet Network Adapter E810-XXV-2 84 | 8086 4015 Ethernet Network Adapter E810-XXV-2 for OCP 3.0 85 | 1888 Ethernet Connection C800 series 86 | 188a Ethernet Connection E823-C for backplane 87 | 188b Ethernet Controller E823-C for QSFP 88 | 188c Ethernet Controller E823-C for SFP 89 | 1028 0abd Ethernet Connection 25G 4P E823-C LOM 90 | 17aa 405e E823 25G/10G Ethernet LOM Controller 91 | 8086 0001 Ethernet Connection E823-C for SFP 92 | 188d Ethernet Connection E823-C/X557-AT 10GBASE-T 93 | 188e Ethernet Connection E823-C 1GbE 94 | 17aa 405f E823 1G Ethernet LOM Controller 95 | 1890 Ethernet Connection E822-C for backplane 96 | 1891 Ethernet Controller E822-C for QSFP 97 | 1892 Ethernet Controller E822-C for SFP 98 | 1893 Ethernet Connection E822-C/X557-AT 10GBASE-T 99 | 1894 Ethernet Connection E822-C 1GbE 100 | 1897 Ethernet Connection E822-L for backplane 101 | 1898 Ethernet Connection E822-L for SFP 102 | 1899 Ethernet Connection E822-L/X557-AT 10GBASE-T 103 | 189a Ethernet Connection E822-L 1GbE 104 | 579c Ethernet Connection E825-C for backplane 105 | 579d Ethernet Connection E825-C for QSFP 106 | 579e Ethernet Connection E825-C for SFP 107 | 579f Ethernet Connection E825-C 10GbE 108 | -------------------------------------------------------------------------------- /src/ice_controlq.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_CONTROLQ_H_ 5 | #define _ICE_CONTROLQ_H_ 6 | 7 | #include "ice_adminq_cmd.h" 8 | 9 | /* Maximum buffer lengths for all control queue types */ 10 | #define ICE_AQ_MAX_BUF_LEN 4096 11 | #define ICE_MBXQ_MAX_BUF_LEN 4096 12 | #define ICE_SBQ_MAX_BUF_LEN 512 13 | 14 | #define ICE_CTL_Q_DESC(R, i) \ 15 | (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) 16 | 17 | #define ICE_CTL_Q_DESC_UNUSED(R) \ 18 | ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 19 | (R)->next_to_clean - (R)->next_to_use - 1)) 20 | 21 | /* Defines that help manage the driver vs FW API checks. 22 | * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. 23 | */ 24 | #define EXP_FW_API_VER_BRANCH_E810 0x00 25 | #define EXP_FW_API_VER_MAJOR_E810 0x01 26 | #define EXP_FW_API_VER_MINOR_E810 0x05 27 | 28 | #define EXP_FW_API_VER_BRANCH_E830 0x00 29 | #define EXP_FW_API_VER_MAJOR_E830 0x01 30 | #define EXP_FW_API_VER_MINOR_E830 0x07 31 | 32 | #define EXP_FW_API_VER_BRANCH_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ 33 | EXP_FW_API_VER_BRANCH_E830 : \ 34 | EXP_FW_API_VER_BRANCH_E810) 35 | #define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ 36 | EXP_FW_API_VER_MAJOR_E830 : \ 37 | EXP_FW_API_VER_MAJOR_E810) 38 | #define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ 39 | EXP_FW_API_VER_MINOR_E830 : \ 40 | EXP_FW_API_VER_MINOR_E810) 41 | 42 | /* Different control queue types: These are mainly for SW consumption. */ 43 | enum ice_ctl_q { 44 | ICE_CTL_Q_UNKNOWN = 0, 45 | ICE_CTL_Q_ADMIN, 46 | ICE_CTL_Q_MAILBOX, 47 | ICE_CTL_Q_SB, 48 | }; 49 | 50 | /* Control Queue timeout settings - max delay 1s */ 51 | #define ICE_CTL_Q_SQ_CMD_TIMEOUT USEC_PER_SEC 52 | #define ICE_CTL_Q_SQ_CMD_TIMEOUT_SPIN 100 53 | #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ 54 | #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ 55 | 56 | struct ice_ctl_q_ring { 57 | void *dma_head; /* Virtual address to DMA head */ 58 | struct ice_dma_mem desc_buf; /* descriptor ring memory */ 59 | 60 | union { 61 | struct ice_dma_mem *sq_bi; 62 | struct ice_dma_mem *rq_bi; 63 | } r; 64 | 65 | u16 count; /* Number of descriptors */ 66 | 67 | /* used for interrupt processing */ 68 | u16 next_to_use; 69 | u16 next_to_clean; 70 | 71 | /* used for queue tracking */ 72 | u32 head; 73 | u32 tail; 74 | u32 len; 75 | u32 bah; 76 | u32 bal; 77 | u32 len_mask; 78 | u32 len_ena_mask; 79 | u32 len_crit_mask; 80 | u32 head_mask; 81 | }; 82 | 83 | /* sq transaction details */ 84 | struct ice_sq_cd { 85 | u8 postpone : 1; 86 | struct ice_aq_desc *wb_desc; 87 | }; 88 | 89 | /* rq event information */ 90 | struct ice_rq_event_info { 91 | struct ice_aq_desc desc; 92 | u16 msg_len; 93 | u16 buf_len; 94 | u8 *msg_buf; 95 | }; 96 | 97 | struct ice_var_lock { 98 | bool sleepable : 1; 99 | union { 100 | struct mutex mlock; /* Sleepable lock. */ 101 | struct { 102 | spinlock_t slock; /* Non-sleepable lock. */ 103 | unsigned long flags; 104 | }; 105 | }; 106 | }; 107 | 108 | /** 109 | * ice_vlock_init - Initialize ice_var_lock 110 | * @vlock: pointer to the ice_var_lock structure 111 | */ 112 | static inline void ice_vlock_init(struct ice_var_lock *vlock, bool sleepable) 113 | { 114 | vlock->sleepable = sleepable; 115 | if (sleepable) 116 | mutex_init(&vlock->mlock); 117 | else 118 | spin_lock_init(&vlock->slock); 119 | } 120 | 121 | /** 122 | * ice_vlock_destroy - Destroy ice_var_lock 123 | * @vlock: pointer to the ice_var_lock structure 124 | */ 125 | static inline void ice_vlock_destroy(struct ice_var_lock *vlock) 126 | { 127 | if (vlock->sleepable) 128 | mutex_destroy(&vlock->mlock); 129 | } 130 | 131 | /** 132 | * ice_vlock_fsleep - Sleep using ice_var_lock 133 | * @vlock: pointer to the ice_var_lock structure 134 | * @msec_sleepable: time to sleep in milliseconds for sleepable 135 | * @usec_nonsleepable: time to delay in microseconds for nonsleepable 136 | */ 137 | static inline void ice_vlock_fsleep(struct ice_var_lock *vlock, 138 | unsigned int msec_sleepable, 139 | unsigned int usec_nonsleepable) 140 | { 141 | if (vlock->sleepable) 142 | msleep(msec_sleepable); 143 | else 144 | udelay(usec_nonsleepable); 145 | } 146 | 147 | /** 148 | * ice_vlock - Acquire ice_var_lock 149 | * @vlock: pointer to the ice_var_lock structure 150 | */ 151 | static inline void ice_vlock(struct ice_var_lock *vlock) 152 | { 153 | if (vlock->sleepable) 154 | mutex_lock(&vlock->mlock); 155 | else 156 | spin_lock_irqsave(&vlock->slock, vlock->flags); 157 | } 158 | 159 | /** 160 | * ice_vunlock - Release ice_var_lock 161 | * @vlock: pointer to the ice_var_lock structure 162 | */ 163 | static inline void ice_vunlock(struct ice_var_lock *vlock) 164 | { 165 | if (vlock->sleepable) 166 | mutex_unlock(&vlock->mlock); 167 | else 168 | spin_unlock_irqrestore(&vlock->slock, vlock->flags); 169 | } 170 | 171 | DEFINE_GUARD(ice_var_lock, struct ice_var_lock *, ice_vlock(_T), 172 | ice_vunlock(_T)) 173 | 174 | /* Control Queue information */ 175 | struct ice_ctl_q_info { 176 | enum ice_ctl_q qtype; 177 | struct ice_ctl_q_ring rq; /* receive queue */ 178 | struct ice_ctl_q_ring sq; /* send queue */ 179 | u16 num_rq_entries; /* receive queue depth */ 180 | u16 num_sq_entries; /* send queue depth */ 181 | u16 rq_buf_size; /* receive queue buffer size */ 182 | u16 sq_buf_size; /* send queue buffer size */ 183 | enum ice_aq_err sq_last_status; /* last status on send queue */ 184 | struct ice_var_lock sq_lock; /* Send queue lock */ 185 | struct mutex rq_lock; /* Receive queue lock */ 186 | }; 187 | 188 | #endif /* _ICE_CONTROLQ_H_ */ 189 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | CommunityCodeOfConduct AT intel DOT com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series of 86 | actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or permanent 93 | ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within the 113 | community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.1, available at 119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 120 | 121 | Community Impact Guidelines were inspired by 122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 123 | 124 | For answers to common questions about this code of conduct, see the FAQ at 125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 126 | [https://www.contributor-covenant.org/translations][translations]. 127 | 128 | [homepage]: https://www.contributor-covenant.org 129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 130 | [Mozilla CoC]: https://github.com/mozilla/diversity 131 | [FAQ]: https://www.contributor-covenant.org/faq 132 | -------------------------------------------------------------------------------- /src/kcompat_pldmfw.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | /* SPDX-License-Identifier: GPL-2.0 */ 5 | /* Copyright (C) 2021 - 2023 Intel Corporation. */ 6 | 7 | /* This is taken from upstream */ 8 | 9 | #ifndef _KCOMPAT_PLDMFW_H_ 10 | #define _KCOMPAT_PLDMFW_H_ 11 | 12 | #ifdef _PLDMFW_H_ 13 | #error "Do not include both kcompat_pldmfw.h and " 14 | #endif 15 | 16 | #if IS_ENABLED(CONFIG_PLDMFW) 17 | #error "CONFIG_PLDMFW is enabled, use " 18 | #endif 19 | 20 | #include 21 | #include 22 | 23 | #define PLDM_DEVICE_UPDATE_CONTINUE_AFTER_FAIL BIT(0) 24 | 25 | #define PLDM_STRING_TYPE_UNKNOWN 0 26 | #define PLDM_STRING_TYPE_ASCII 1 27 | #define PLDM_STRING_TYPE_UTF8 2 28 | #define PLDM_STRING_TYPE_UTF16 3 29 | #define PLDM_STRING_TYPE_UTF16LE 4 30 | #define PLDM_STRING_TYPE_UTF16BE 5 31 | 32 | struct pldmfw_record { 33 | struct list_head entry; 34 | 35 | /* List of descriptor TLVs */ 36 | struct list_head descs; 37 | 38 | /* Component Set version string*/ 39 | const u8 *version_string; 40 | u8 version_type; 41 | u8 version_len; 42 | 43 | /* Package Data length */ 44 | u16 package_data_len; 45 | 46 | /* Bitfield of Device Update Flags */ 47 | u32 device_update_flags; 48 | 49 | /* Package Data block */ 50 | const u8 *package_data; 51 | 52 | /* Bitmap of components applicable to this record */ 53 | unsigned long *component_bitmap; 54 | u16 component_bitmap_len; 55 | }; 56 | 57 | /* Standard descriptor TLV identifiers */ 58 | #define PLDM_DESC_ID_PCI_VENDOR_ID 0x0000 59 | #define PLDM_DESC_ID_IANA_ENTERPRISE_ID 0x0001 60 | #define PLDM_DESC_ID_UUID 0x0002 61 | #define PLDM_DESC_ID_PNP_VENDOR_ID 0x0003 62 | #define PLDM_DESC_ID_ACPI_VENDOR_ID 0x0004 63 | #define PLDM_DESC_ID_PCI_DEVICE_ID 0x0100 64 | #define PLDM_DESC_ID_PCI_SUBVENDOR_ID 0x0101 65 | #define PLDM_DESC_ID_PCI_SUBDEV_ID 0x0102 66 | #define PLDM_DESC_ID_PCI_REVISION_ID 0x0103 67 | #define PLDM_DESC_ID_PNP_PRODUCT_ID 0x0104 68 | #define PLDM_DESC_ID_ACPI_PRODUCT_ID 0x0105 69 | #define PLDM_DESC_ID_VENDOR_DEFINED 0xFFFF 70 | 71 | struct pldmfw_desc_tlv { 72 | struct list_head entry; 73 | 74 | const u8 *data; 75 | u16 type; 76 | u16 size; 77 | }; 78 | 79 | #define PLDM_CLASSIFICATION_UNKNOWN 0x0000 80 | #define PLDM_CLASSIFICATION_OTHER 0x0001 81 | #define PLDM_CLASSIFICATION_DRIVER 0x0002 82 | #define PLDM_CLASSIFICATION_CONFIG_SW 0x0003 83 | #define PLDM_CLASSIFICATION_APP_SW 0x0004 84 | #define PLDM_CLASSIFICATION_INSTRUMENTATION 0x0005 85 | #define PLDM_CLASSIFICATION_BIOS 0x0006 86 | #define PLDM_CLASSIFICATION_DIAGNOSTIC_SW 0x0007 87 | #define PLDM_CLASSIFICATION_OS 0x0008 88 | #define PLDM_CLASSIFICATION_MIDDLEWARE 0x0009 89 | #define PLDM_CLASSIFICATION_FIRMWARE 0x000A 90 | #define PLDM_CLASSIFICATION_CODE 0x000B 91 | #define PLDM_CLASSIFICATION_SERVICE_PACK 0x000C 92 | #define PLDM_CLASSIFICATION_SOFTWARE_BUNDLE 0x000D 93 | 94 | #define PLDM_ACTIVATION_METHOD_AUTO BIT(0) 95 | #define PLDM_ACTIVATION_METHOD_SELF_CONTAINED BIT(1) 96 | #define PLDM_ACTIVATION_METHOD_MEDIUM_SPECIFIC BIT(2) 97 | #define PLDM_ACTIVATION_METHOD_REBOOT BIT(3) 98 | #define PLDM_ACTIVATION_METHOD_DC_CYCLE BIT(4) 99 | #define PLDM_ACTIVATION_METHOD_AC_CYCLE BIT(5) 100 | 101 | #define PLDMFW_COMPONENT_OPTION_FORCE_UPDATE BIT(0) 102 | #define PLDMFW_COMPONENT_OPTION_USE_COMPARISON_STAMP BIT(1) 103 | 104 | struct pldmfw_component { 105 | struct list_head entry; 106 | 107 | /* component identifier */ 108 | u16 classification; 109 | u16 identifier; 110 | 111 | u16 options; 112 | u16 activation_method; 113 | 114 | u32 comparison_stamp; 115 | 116 | u32 component_size; 117 | const u8 *component_data; 118 | 119 | /* Component version string */ 120 | const u8 *version_string; 121 | u8 version_type; 122 | u8 version_len; 123 | 124 | /* component index */ 125 | u8 index; 126 | 127 | }; 128 | 129 | /* Transfer flag used for sending components to the firmware */ 130 | #define PLDM_TRANSFER_FLAG_START BIT(0) 131 | #define PLDM_TRANSFER_FLAG_MIDDLE BIT(1) 132 | #define PLDM_TRANSFER_FLAG_END BIT(2) 133 | 134 | struct pldmfw_ops; 135 | 136 | /* Main entry point to the PLDM firmware update engine. Device drivers 137 | * should embed this in a private structure and use container_of to obtain 138 | * a pointer to their own data, used to implement the device specific 139 | * operations. 140 | */ 141 | struct pldmfw { 142 | const struct pldmfw_ops *ops; 143 | struct device *dev; 144 | }; 145 | 146 | bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record); 147 | 148 | /* Operations invoked by the generic PLDM firmware update engine. Used to 149 | * implement device specific logic. 150 | * 151 | * @match_record: check if the device matches the given record. For 152 | * convenience, a standard implementation is provided for PCI devices. 153 | * 154 | * @send_package_data: send the package data associated with the matching 155 | * record to firmware. 156 | * 157 | * @send_component_table: send the component data associated with a given 158 | * component to firmware. Called once for each applicable component. 159 | * 160 | * @flash_component: Flash the data for a given component to the device. 161 | * Called once for each applicable component, after all component tables have 162 | * been sent. 163 | * 164 | * @finalize_update: (optional) Finish the update. Called after all components 165 | * have been flashed. 166 | */ 167 | struct pldmfw_ops { 168 | bool (*match_record)(struct pldmfw *context, struct pldmfw_record *record); 169 | int (*send_package_data)(struct pldmfw *context, const u8 *data, u16 length); 170 | int (*send_component_table)(struct pldmfw *context, struct pldmfw_component *component, 171 | u8 transfer_flag); 172 | int (*flash_component)(struct pldmfw *context, struct pldmfw_component *component); 173 | int (*finalize_update)(struct pldmfw *context); 174 | }; 175 | 176 | int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw); 177 | 178 | #endif 179 | -------------------------------------------------------------------------------- /src/ice_dpll.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | /* SPDX-License-Identifier: GPL-2.0 */ 5 | /* Copyright (C) 2023, Intel Corporation. */ 6 | 7 | #ifndef _ICE_DPLL_H_ 8 | #define _ICE_DPLL_H_ 9 | 10 | #ifdef CONFIG_DPLL 11 | #include "ice.h" 12 | #include 13 | 14 | #define ICE_DPLL_RCLK_NUM_MAX 4 15 | #endif /* CONFIG_DPLL */ 16 | 17 | /** 18 | * enum ice_dpll_pin_type - enumerate ice pin types: 19 | * @ICE_DPLL_PIN_TYPE_INVALID: invalid pin type 20 | * @ICE_DPLL_PIN_TYPE_INPUT: input pin 21 | * @ICE_DPLL_PIN_TYPE_OUTPUT: output pin 22 | * @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin 23 | * @ICE_DPLL_PIN_TYPE_SOFTWARE: software controlled SMA/U.FL pins 24 | */ 25 | enum ice_dpll_pin_type { 26 | ICE_DPLL_PIN_TYPE_INVALID, 27 | ICE_DPLL_PIN_TYPE_INPUT, 28 | ICE_DPLL_PIN_TYPE_OUTPUT, 29 | ICE_DPLL_PIN_TYPE_RCLK_INPUT, 30 | ICE_DPLL_PIN_TYPE_SOFTWARE, 31 | }; 32 | 33 | /** 34 | * enum ice_dpll_pin_sw - enumerate ice software pin indices: 35 | * @ICE_DPLL_PIN_SW_1_IDX: index of first SW pin 36 | * @ICE_DPLL_PIN_SW_2_IDX: index of second SW pin 37 | * @ICE_DPLL_PIN_SW_NUM: number of SW pins in pair 38 | */ 39 | enum ice_dpll_pin_sw { 40 | ICE_DPLL_PIN_SW_1_IDX, 41 | ICE_DPLL_PIN_SW_2_IDX, 42 | ICE_DPLL_PIN_SW_NUM 43 | }; 44 | 45 | #ifdef CONFIG_DPLL 46 | /** ice_dpll_pin - store info about pins 47 | * @pin: dpll pin structure 48 | * @pf: pointer to pf, which has registered the dpll_pin 49 | * @idx: ice pin private idx 50 | * @num_parents: hols number of parent pins 51 | * @parent_idx: hold indexes of parent pins 52 | * @flags: pin flags returned from HW 53 | * @state: state of a pin 54 | * @prop: pin properties 55 | * @freq: current frequency of a pin 56 | * @phase_adjust: current phase adjust value 57 | */ 58 | struct ice_dpll_pin { 59 | struct dpll_pin *pin; 60 | struct ice_pf *pf; 61 | u8 idx; 62 | u8 num_parents; 63 | u8 parent_idx[ICE_DPLL_RCLK_NUM_MAX]; 64 | u8 flags[ICE_DPLL_RCLK_NUM_MAX]; 65 | u8 state[ICE_DPLL_RCLK_NUM_MAX]; 66 | struct dpll_pin_properties prop; 67 | u32 freq; 68 | s32 phase_adjust; 69 | struct ice_dpll_pin *input; 70 | struct ice_dpll_pin *output; 71 | enum dpll_pin_direction direction; 72 | u8 status; 73 | bool active; 74 | bool hidden; 75 | }; 76 | 77 | /** ice_dpll - store info required for DPLL control 78 | * @dpll: pointer to dpll dev 79 | * @pf: pointer to pf, which has registered the dpll_device 80 | * @dpll_idx: index of dpll on the NIC 81 | * @input_idx: currently selected input index 82 | * @prev_input_idx: previously selected input index 83 | * @ref_state: state of dpll reference signals 84 | * @eec_mode: eec_mode dpll is configured for 85 | * @phase_offset: phase offset of active pin vs dpll signal 86 | * @prev_phase_offset: previous phase offset of active pin vs dpll signal 87 | * @input_prio: priorities of each input 88 | * @dpll_state: current dpll sync state 89 | * @prev_dpll_state: last dpll sync state 90 | * @active_input: pointer to active input pin 91 | * @prev_input: pointer to previous active input pin 92 | */ 93 | struct ice_dpll { 94 | struct dpll_device *dpll; 95 | struct ice_pf *pf; 96 | u8 dpll_idx; 97 | u8 input_idx; 98 | u8 prev_input_idx; 99 | u8 ref_state; 100 | u8 eec_mode; 101 | s64 phase_offset; 102 | s64 prev_phase_offset; 103 | u8 *input_prio; 104 | enum dpll_lock_status dpll_state; 105 | enum dpll_lock_status prev_dpll_state; 106 | enum dpll_mode mode; 107 | struct dpll_pin *active_input; 108 | struct dpll_pin *prev_input; 109 | }; 110 | 111 | /** ice_dplls - store info required for CCU (clock controlling unit) 112 | * @kworker: periodic worker 113 | * @work: periodic work 114 | * @lock: locks access to configuration of a dpll 115 | * @eec: pointer to EEC dpll dev 116 | * @pps: pointer to PPS dpll dev 117 | * @inputs: input pins pointer 118 | * @outputs: output pins pointer 119 | * @rclk: recovered pins pointer 120 | * @num_inputs: number of input pins available on dpll 121 | * @num_outputs: number of output pins available on dpll 122 | * @cgu_state_acq_err_num: number of errors returned during periodic work 123 | * @base_rclk_idx: idx of first pin used for clock revocery pins 124 | * @clock_id: clock_id of dplls 125 | * @input_phase_adj_max: max phase adjust value for an input pins 126 | * @output_phase_adj_max: max phase adjust value for an output pins 127 | */ 128 | struct ice_dplls { 129 | struct kthread_worker *kworker; 130 | struct kthread_delayed_work work; 131 | struct mutex lock; /* locks access to configuration of a dpll */ 132 | struct ice_dpll eec; 133 | struct ice_dpll pps; 134 | struct ice_dpll_pin *inputs; 135 | struct ice_dpll_pin *outputs; 136 | struct ice_dpll_pin sma[ICE_DPLL_PIN_SW_NUM]; 137 | struct ice_dpll_pin ufl[ICE_DPLL_PIN_SW_NUM]; 138 | struct ice_dpll_pin rclk; 139 | u8 num_inputs; 140 | u8 num_outputs; 141 | u8 sma_data; 142 | u8 base_rclk_idx; 143 | int cgu_state_acq_err_num; 144 | u64 clock_id; 145 | s32 input_phase_adj_max; 146 | s32 output_phase_adj_max; 147 | bool generic; 148 | bool unmanaged; 149 | }; 150 | 151 | #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) 152 | void ice_dpll_init(struct ice_pf *pf); 153 | void ice_dpll_deinit(struct ice_pf *pf); 154 | void ice_dpll_pin_update_lock(struct ice_pf *pf); 155 | void ice_dpll_pin_update_unlock(struct ice_pf *pf, bool pin_updated, 156 | enum ice_dpll_pin_type pin_type, u8 pin_idx); 157 | void ice_dpll_lock_state_set_unmanaged(struct ice_pf *pf, 158 | struct ice_aqc_health_status_elem *buff, 159 | bool notify); 160 | #else /* CONFIG_PTP_1588_CLOCK */ 161 | static inline void ice_dpll_init(struct ice_pf *pf) { } 162 | static inline void ice_dpll_deinit(struct ice_pf *pf) { } 163 | static inline void 164 | ice_dpll_lock_state_set_unmanaged(struct ice_pf *pf, 165 | struct ice_aqc_health_status_elem *buff, 166 | bool notify) { } 167 | #endif /* CONFIG_PTP_1588_CLOCK */ 168 | #else /* CONFIG_DPLL */ 169 | static inline void ice_dpll_pin_update_lock(struct ice_pf *pf) { } 170 | static inline void 171 | ice_dpll_pin_update_unlock(struct ice_pf *pf, bool pin_updated, 172 | enum ice_dpll_pin_type pin_type, u8 pin_idx) { } 173 | #endif /* CONFIG_DPLL */ 174 | 175 | #endif /* _ICE_DPLL_H_ */ 176 | -------------------------------------------------------------------------------- /src/ice_metainit.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #include "ice_common.h" 5 | #include "ice_parser_util.h" 6 | 7 | #define ICE_METAINIT_TABLE_SIZE 16 8 | 9 | /** 10 | * ice_metainit_dump - dump an metainit item info 11 | * @hw: pointer to the hardware structure 12 | * @item: metainit item to dump 13 | */ 14 | void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item) 15 | { 16 | dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); 17 | dev_info(ice_hw_to_dev(hw), "tsr = %d\n", item->tsr); 18 | dev_info(ice_hw_to_dev(hw), "ho = %d\n", item->ho); 19 | dev_info(ice_hw_to_dev(hw), "pc = %d\n", item->pc); 20 | dev_info(ice_hw_to_dev(hw), "pg_rn = %d\n", item->pg_rn); 21 | dev_info(ice_hw_to_dev(hw), "cd = %d\n", item->cd); 22 | dev_info(ice_hw_to_dev(hw), "gpr_a_ctrl = %d\n", item->gpr_a_ctrl); 23 | dev_info(ice_hw_to_dev(hw), "gpr_a_data_mdid = %d\n", 24 | item->gpr_a_data_mdid); 25 | dev_info(ice_hw_to_dev(hw), "gpr_a_data_start = %d\n", 26 | item->gpr_a_data_start); 27 | dev_info(ice_hw_to_dev(hw), "gpr_a_data_len = %d\n", 28 | item->gpr_a_data_len); 29 | dev_info(ice_hw_to_dev(hw), "gpr_a_id = %d\n", item->gpr_a_id); 30 | dev_info(ice_hw_to_dev(hw), "gpr_b_ctrl = %d\n", item->gpr_b_ctrl); 31 | dev_info(ice_hw_to_dev(hw), "gpr_b_data_mdid = %d\n", 32 | item->gpr_b_data_mdid); 33 | dev_info(ice_hw_to_dev(hw), "gpr_b_data_start = %d\n", 34 | item->gpr_b_data_start); 35 | dev_info(ice_hw_to_dev(hw), "gpr_b_data_len = %d\n", 36 | item->gpr_b_data_len); 37 | dev_info(ice_hw_to_dev(hw), "gpr_b_id = %d\n", item->gpr_b_id); 38 | dev_info(ice_hw_to_dev(hw), "gpr_c_ctrl = %d\n", item->gpr_c_ctrl); 39 | dev_info(ice_hw_to_dev(hw), "gpr_c_data_mdid = %d\n", 40 | item->gpr_c_data_mdid); 41 | dev_info(ice_hw_to_dev(hw), "gpr_c_data_start = %d\n", 42 | item->gpr_c_data_start); 43 | dev_info(ice_hw_to_dev(hw), "gpr_c_data_len = %d\n", 44 | item->gpr_c_data_len); 45 | dev_info(ice_hw_to_dev(hw), "gpr_c_id = %d\n", item->gpr_c_id); 46 | dev_info(ice_hw_to_dev(hw), "gpr_d_ctrl = %d\n", item->gpr_d_ctrl); 47 | dev_info(ice_hw_to_dev(hw), "gpr_d_data_mdid = %d\n", 48 | item->gpr_d_data_mdid); 49 | dev_info(ice_hw_to_dev(hw), "gpr_d_data_start = %d\n", 50 | item->gpr_d_data_start); 51 | dev_info(ice_hw_to_dev(hw), "gpr_d_data_len = %d\n", 52 | item->gpr_d_data_len); 53 | dev_info(ice_hw_to_dev(hw), "gpr_d_id = %d\n", item->gpr_d_id); 54 | dev_info(ice_hw_to_dev(hw), "flags = 0x%llx\n", 55 | (unsigned long long)(item->flags)); 56 | } 57 | 58 | /** The function parses a 192 bits Metadata Init entry with below format: 59 | * BIT 0-7: TCAM Search Key Register (mi->tsr) 60 | * BIT 8-16: Header Offset (mi->ho) 61 | * BIT 17-24: Program Counter (mi->pc) 62 | * BIT 25-35: Parse Graph Root Node (mi->pg_rn) 63 | * BIT 36-38: Control Domain (mi->cd) 64 | * BIT 39: GPR_A Data Control (mi->gpr_a_ctrl) 65 | * BIT 40-44: GPR_A MDID.ID (mi->gpr_a_data_mdid) 66 | * BIT 45-48: GPR_A MDID.START (mi->gpr_a_data_start) 67 | * BIT 49-53: GPR_A MDID.LEN (mi->gpr_a_data_len) 68 | * BIT 54-55: reserved 69 | * BIT 56-59: GPR_A ID (mi->gpr_a_id) 70 | * BIT 60: GPR_B Data Control (mi->gpr_b_ctrl) 71 | * BIT 61-65: GPR_B MDID.ID (mi->gpr_b_data_mdid) 72 | * BIT 66-69: GPR_B MDID.START (mi->gpr_b_data_start) 73 | * BIT 70-74: GPR_B MDID.LEN (mi->gpr_b_data_len) 74 | * BIT 75-76: reserved 75 | * BIT 77-80: GPR_B ID (mi->gpr_a_id) 76 | * BIT 81: GPR_C Data Control (mi->gpr_c_ctrl) 77 | * BIT 82-86: GPR_C MDID.ID (mi->gpr_c_data_mdid) 78 | * BIT 87-90: GPR_C MDID.START (mi->gpr_c_data_start) 79 | * BIT 91-95: GPR_C MDID.LEN (mi->gpr_c_data_len) 80 | * BIT 96-97: reserved 81 | * BIT 98-101: GPR_C ID (mi->gpr_c_id) 82 | * BIT 102: GPR_D Data Control (mi->gpr_d_ctrl) 83 | * BIT 103-107:GPR_D MDID.ID (mi->gpr_d_data_mdid) 84 | * BIT 108-111:GPR_D MDID.START (mi->gpr_d_data_start) 85 | * BIT 112-116:GPR_D MDID.LEN (mi->gpr_d_data_len) 86 | * BIT 117-118:reserved 87 | * BIT 119-122:GPR_D ID (mi->gpr_d_id) 88 | * BIT 123-186:Flags (mi->flags) 89 | * BIT 187-191:rserved 90 | */ 91 | static void _metainit_parse_item(struct ice_hw *hw, u16 idx, void *item, 92 | void *data, int size) 93 | { 94 | struct ice_metainit_item *mi = item; 95 | u8 *buf = data; 96 | u64 d64; 97 | 98 | mi->idx = idx; 99 | d64 = *(u64 *)buf; 100 | 101 | mi->tsr = (u8)(d64 & 0xff); 102 | mi->ho = (u16)((d64 >> 8) & 0x1ff); 103 | mi->pc = (u16)((d64 >> 17) & 0xff); 104 | mi->pg_rn = (u16)((d64 >> 25) & 0x3ff); 105 | mi->cd = (u16)((d64 >> 36) & 0x7); 106 | mi->gpr_a_ctrl = ((d64 >> 39) & 0x1) != 0; 107 | mi->gpr_a_data_mdid = (u8)((d64 >> 40) & 0x1f); 108 | mi->gpr_a_data_start = (u8)((d64 >> 45) & 0xf); 109 | mi->gpr_a_data_len = (u8)((d64 >> 49) & 0x1f); 110 | mi->gpr_a_id = (u8)((d64 >> 56) & 0xf); 111 | 112 | d64 = *(u64 *)&buf[7] >> 4; 113 | mi->gpr_b_ctrl = (d64 & 0x1) != 0; 114 | mi->gpr_b_data_mdid = (u8)((d64 >> 1) & 0x1f); 115 | mi->gpr_b_data_start = (u8)((d64 >> 6) & 0xf); 116 | mi->gpr_b_data_len = (u8)((d64 >> 10) & 0x1f); 117 | mi->gpr_b_id = (u8)((d64 >> 17) & 0xf); 118 | 119 | mi->gpr_c_ctrl = ((d64 >> 21) & 0x1) != 0; 120 | mi->gpr_c_data_mdid = (u8)((d64 >> 22) & 0x1f); 121 | mi->gpr_c_data_start = (u8)((d64 >> 27) & 0xf); 122 | mi->gpr_c_data_len = (u8)((d64 >> 31) & 0x1f); 123 | mi->gpr_c_id = (u8)((d64 >> 38) & 0xf); 124 | 125 | mi->gpr_d_ctrl = ((d64 >> 42) & 0x1) != 0; 126 | mi->gpr_d_data_mdid = (u8)((d64 >> 43) & 0x1f); 127 | mi->gpr_d_data_start = (u8)((d64 >> 48) & 0xf); 128 | mi->gpr_d_data_len = (u8)((d64 >> 52) & 0x1f); 129 | 130 | d64 = *(u64 *)&buf[14] >> 7; 131 | mi->gpr_d_id = (u8)(d64 & 0xf); 132 | 133 | d64 = *(u64 *)&buf[15] >> 3; 134 | mi->flags = d64; 135 | 136 | d64 = ((*(u64 *)&buf[16] >> 56) & 0x7); 137 | mi->flags |= (d64 << 61); 138 | 139 | if (hw->debug_mask & ICE_DBG_PARSER) 140 | ice_metainit_dump(hw, mi); 141 | } 142 | 143 | /** 144 | * ice_metainit_table_get - create a metainit table 145 | * @hw: pointer to the hardware structure 146 | */ 147 | struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw) 148 | { 149 | return (struct ice_metainit_item *) 150 | ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT, 151 | sizeof(struct ice_metainit_item), 152 | ICE_METAINIT_TABLE_SIZE, 153 | ice_parser_sect_item_get, 154 | _metainit_parse_item, false); 155 | } 156 | -------------------------------------------------------------------------------- /src/ice_lib.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* Copyright (C) 2018-2025 Intel Corporation */ 3 | 4 | #ifndef _ICE_LIB_H_ 5 | #define _ICE_LIB_H_ 6 | 7 | #include "ice.h" 8 | 9 | /* Flags used for VSI configuration and rebuild */ 10 | #define ICE_VSI_FLAG_NO_INIT 0 11 | #define ICE_VSI_FLAG_INIT BIT(0) 12 | #define ICE_VSI_FLAG_RELOAD BIT(1) /* devlink reload action */ 13 | 14 | #define ICE_MAX_LARGE_RSS_QS 256 15 | #define ICE_MAX_MEDIUM_RSS_QS 64 16 | #define ICE_MAX_SMALL_RSS_QS 16 17 | 18 | const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); 19 | 20 | bool ice_pf_state_is_nominal(struct ice_pf *pf); 21 | 22 | void ice_update_eth_stats(struct ice_vsi *vsi); 23 | 24 | int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); 25 | 26 | int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx); 27 | 28 | int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); 29 | 30 | int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); 31 | 32 | void ice_vsi_cfg_msix(struct ice_vsi *vsi); 33 | 34 | int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi); 35 | 36 | int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi); 37 | 38 | int 39 | ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 40 | u16 rel_vmvf_num); 41 | #ifdef HAVE_XDP_SUPPORT 42 | 43 | int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi); 44 | 45 | int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); 46 | 47 | #endif /* HAVE_XDP_SUPPORT */ 48 | 49 | void ice_dis_sw_lldp(struct ice_pf *pf); 50 | bool ice_is_mc_lldp_eth_addr(const u8 *mac); 51 | void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); 52 | 53 | int ice_set_link(struct ice_vsi *vsi, bool ena); 54 | 55 | void ice_vsi_delete_from_hw(struct ice_vsi *vsi); 56 | int ice_vsi_free(struct ice_vsi *vsi); 57 | void ice_vsi_put_qs(struct ice_vsi *vsi); 58 | 59 | void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); 60 | 61 | int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); 62 | 63 | int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi); 64 | 65 | int ice_get_valid_rss_size(struct ice_hw *hw, int new_size); 66 | int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size); 67 | 68 | struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, 69 | const struct ice_vsi_cfg_params *params); 70 | 71 | void ice_napi_del(struct ice_vsi *vsi); 72 | 73 | int ice_vsi_release(struct ice_vsi *vsi); 74 | 75 | void ice_vsi_close(struct ice_vsi *vsi); 76 | 77 | int ice_vsi_cfg(struct ice_vsi *vsi); 78 | 79 | int ice_ena_vsi(struct ice_vsi *vsi, bool locked); 80 | 81 | void ice_vsi_decfg(struct ice_vsi *vsi); 82 | 83 | void ice_dis_vsi(struct ice_vsi *vsi, bool locked); 84 | 85 | struct ice_res_tracker *ice_alloc_res_tracker(u16 size); 86 | 87 | int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); 88 | 89 | u16 ice_get_free_res_count(struct ice_res_tracker *res); 90 | 91 | u16 ice_get_valid_res_count(struct ice_res_tracker *res); 92 | 93 | int 94 | ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); 95 | 96 | int ice_vsi_rebuild(struct ice_vsi *vsi, u32 flags); 97 | 98 | bool ice_is_reset_in_progress(unsigned long *state); 99 | int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); 100 | 101 | void ice_vsi_dis_irq(struct ice_vsi *vsi); 102 | 103 | void ice_vsi_free_irq(struct ice_vsi *vsi); 104 | 105 | void ice_vsi_free_rx_rings(struct ice_vsi *vsi); 106 | 107 | void ice_vsi_free_tx_rings(struct ice_vsi *vsi); 108 | 109 | void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); 110 | 111 | void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable); 112 | 113 | void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes); 114 | 115 | void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes); 116 | 117 | void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); 118 | 119 | void 120 | ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, 121 | bool __maybe_unused ena_ts); 122 | 123 | #ifdef HAVE_NETPOLL_CONTROLLER 124 | irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data); 125 | #endif /* HAVE_NETPOLL_CONTROLLER */ 126 | 127 | void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); 128 | void ice_write_itr(struct ice_ring_container *rc, u16 itr); 129 | void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); 130 | void ice_vsi_get_q_vector_q_base(struct ice_vsi *vsi, u16 vector_id, u16 *txq, 131 | u16 *rxq); 132 | 133 | int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); 134 | bool ice_is_safe_mode(struct ice_pf *pf); 135 | bool ice_is_aux_ena(struct ice_pf *pf); 136 | bool ice_is_rdma_ena(struct ice_pf *pf); 137 | bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi); 138 | int ice_set_dflt_vsi(struct ice_vsi *vsi); 139 | int ice_clear_dflt_vsi(struct ice_vsi *vsi); 140 | int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate); 141 | int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate); 142 | int ice_get_link_speed_kbps(struct ice_vsi *vsi); 143 | int ice_get_link_speed_mbps(struct ice_vsi *vsi); 144 | int ice_vsi_update_security(struct ice_vsi *vsi, 145 | void (*fill)(struct ice_vsi_ctx *)); 146 | #ifdef HAVE_METADATA_PORT_INFO 147 | void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx); 148 | void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); 149 | #endif /* HAVE_METADATA_PORT_INFO */ 150 | void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); 151 | void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); 152 | int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set); 153 | int ice_vsi_config_prune(struct ice_vsi *vsi, bool set); 154 | #ifndef HAVE_NETDEV_MIN_MAX_MTU 155 | int ice_check_mtu_valid(struct net_device *netdev, int new_mtu); 156 | #endif /* !HAVE_NETDEV_MIN_MAX_MTU */ 157 | int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); 158 | int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); 159 | bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi); 160 | u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi); 161 | bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f); 162 | void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f); 163 | void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f); 164 | void ice_init_feature_support(struct ice_pf *pf); 165 | int ice_normalize_cpu_count(int num_cpus); 166 | bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi); 167 | void ice_vsi_free_rss_global_lut(struct ice_vsi *vsi); 168 | int ice_vsi_alloc_rss_global_lut(struct ice_vsi *vsi); 169 | ssize_t 170 | ice_vsi_alloc_rss_lut(struct ice_hw *hw, struct device *dev, 171 | struct ice_vsi *vsi, const char *buf, size_t count); 172 | u16 ice_lut_type_to_qs_num(enum ice_lut_type lut_type); 173 | #endif /* !_ICE_LIB_H_ */ 174 | --------------------------------------------------------------------------------