├── prestera ├── prestera_fw_log.h ├── prestera_flow.h ├── prestera_dcb.h ├── prestera_debugfs.h ├── prestera_storm_control.h ├── prestera_ethtool.h ├── prestera_shm.h ├── prestera_rxtx.h ├── prestera_span.h ├── prestera_drv_ver.h ├── Makefile ├── Kconfig ├── prestera_switchdev.h ├── prestera_qdisc.h ├── prestera_counter.h ├── prestera_flower.h ├── prestera_devlink.h ├── prestera_ct.h ├── prestera_dsa.h ├── prestera_log.h ├── prestera_fw.h ├── prestera_matchall.c ├── prestera_router_hw.h ├── prestera_storm_control.c ├── prestera_span.c ├── prestera_flow.c ├── prestera_log.c ├── prestera_acl.h ├── prestera_dsa.c ├── prestera_dcb.c ├── prestera_counter.c ├── prestera_fw.c ├── prestera_fw_log.c ├── prestera_pci.c ├── prestera_debugfs.c ├── prestera_shm.c ├── prestera_qdisc.c └── prestera_flower.c ├── .github └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── README.md /prestera/prestera_fw_log.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _MVSW_PRESTERA_FW_LOG_H_ 5 | #define _MVSW_PRESTERA_FW_LOG_H_ 6 | 7 | #include "prestera.h" 8 | 9 | int mvsw_pr_fw_log_init(struct prestera_switch *sw); 10 | void mvsw_pr_fw_log_fini(struct prestera_switch *sw); 11 | 12 | #endif /* _MVSW_PRESTERA_FW_LOG_H_ */ 13 | -------------------------------------------------------------------------------- /prestera/prestera_flow.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_FLOW_H_ 5 | #define _PRESTERA_FLOW_H_ 6 | 7 | #include 8 | 9 | struct prestera_port; 10 | 11 | int prestera_flow_block_setup(struct prestera_port *port, 12 | struct flow_block_offload *f); 13 | 14 | #endif /* _PRESTERA_FLOW_H_ */ 15 | -------------------------------------------------------------------------------- /prestera/prestera_dcb.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2022 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_DCB_H_ 5 | #define _PRESTERA_DCB_H_ 6 | 7 | #include 8 | 9 | struct prestera_port; 10 | 11 | int prestera_port_dcb_init(struct prestera_port *port); 12 | void prestera_port_dcb_fini(struct prestera_port *port); 13 | 14 | #endif /* _PRESTERA_DCB_H_ */ 15 | -------------------------------------------------------------------------------- /prestera/prestera_debugfs.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _MVSW_PRESTERA_DEBUGFS_H_ 5 | #define _MVSW_PRESTERA_DEBUGFS_H_ 6 | 7 | struct prestera_switch; 8 | 9 | int prestera_debugfs_init(struct prestera_switch *sw); 10 | void prestera_debugfs_fini(struct prestera_switch *sw); 11 | 12 | #endif /* _MVSW_PRESTERA_DEBUGFS_H_ */ 13 | -------------------------------------------------------------------------------- /prestera/prestera_storm_control.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _MVSW_PRESTERA_STORM_CONTROL_H_ 5 | #define _MVSW_PRESTERA_STORM_CONTROL_H_ 6 | 7 | #include "prestera.h" 8 | 9 | int prestera_storm_control_init(struct prestera_switch *sw); 10 | void prestera_storm_control_fini(struct prestera_switch *sw); 11 | 12 | #endif /* _MVSW_PRESTERA_STORM_CONTROL_H_ */ 13 | -------------------------------------------------------------------------------- /prestera/prestera_ethtool.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef __PRESTERA_ETHTOOL_H_ 5 | #define __PRESTERA_ETHTOOL_H_ 6 | 7 | #include 8 | 9 | #include "prestera.h" 10 | 11 | extern const struct ethtool_ops prestera_ethtool_ops; 12 | 13 | int prestera_port_link_mode_set(struct prestera_port *port, 14 | u32 speed, u8 duplex, u8 type); 15 | 16 | #endif /* _PRESTERA_ETHTOOL_H_ */ 17 | -------------------------------------------------------------------------------- /prestera/prestera_shm.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #ifndef PRESTERA_SHM_H_ 5 | #define PRESTERA_SHM_H_ 6 | 7 | #define PRESTERA_SHM_INTERRUPT_IOC_MAGIC 's' 8 | #define PRESTERA_SHM_INIT_IOC_MAGIC 'i' 9 | #define PRESTERA_SHM_BARRIER_IOC_MAGIC 'b' 10 | 11 | #define PRESTERA_SHM_INTERRUPT _IOW(PRESTERA_SHM_INTERRUPT_IOC_MAGIC, 0, __u32) 12 | #define PRESTERA_SHM_INIT _IOW(PRESTERA_SHM_INIT_IOC_MAGIC, 0, __u32) 13 | #define PRESTERA_SHM_DEVNAME "prestera_shm" 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /prestera/prestera_rxtx.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _MVSW_PRESTERA_RXTX_H_ 5 | #define _MVSW_PRESTERA_RXTX_H_ 6 | 7 | #include 8 | 9 | #define MVSW_PR_RXTX_CPU_CODE_MAX_NUM 256 10 | 11 | struct prestera_switch; 12 | 13 | int prestera_rxtx_switch_init(struct prestera_switch *sw); 14 | void prestera_rxtx_switch_fini(struct prestera_switch *sw); 15 | 16 | netdev_tx_t prestera_rxtx_xmit(struct sk_buff *skb, struct prestera_port *port); 17 | 18 | u64 mvsw_pr_rxtx_get_cpu_code_stats(u8 cpu_code); 19 | 20 | #endif /* _MVSW_PRESTERA_RXTX_H_ */ 21 | -------------------------------------------------------------------------------- /prestera/prestera_span.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_SPAN_H_ 5 | #define _PRESTERA_SPAN_H_ 6 | 7 | #include 8 | 9 | #define PRESTERA_SPAN_INVALID_ID -1 10 | 11 | struct prestera_switch; 12 | struct prestera_flow_block; 13 | 14 | int prestera_span_init(struct prestera_switch *sw); 15 | void prestera_span_fini(struct prestera_switch *sw); 16 | int prestera_span_replace(struct prestera_flow_block *block, 17 | struct tc_cls_matchall_offload *f); 18 | void prestera_span_destroy(struct prestera_flow_block *block); 19 | 20 | #endif /* _PRESTERA_SPAN_H_ */ 21 | -------------------------------------------------------------------------------- /prestera/prestera_drv_ver.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_DRV_VER_H_ 5 | #define _PRESTERA_DRV_VER_H_ 6 | 7 | #include 8 | 9 | /* Prestera driver version */ 10 | #define PRESTERA_DRV_VER_MAJOR 2 11 | #define PRESTERA_DRV_VER_MINOR 0 12 | #define PRESTERA_DRV_VER_PATCH 0 13 | #define PRESTERA_DRV_VER_EXTRA 14 | 15 | #define PRESTERA_DRV_VER \ 16 | __stringify(PRESTERA_DRV_VER_MAJOR) "." \ 17 | __stringify(PRESTERA_DRV_VER_MINOR) "." \ 18 | __stringify(PRESTERA_DRV_VER_PATCH) \ 19 | __stringify(PRESTERA_DRV_VER_EXTRA) 20 | 21 | #endif /* _PRESTERA_DRV_VER_H_ */ 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[Feature Request]" 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Does the LK already supports this feature** 14 | A clear and concise description of what the LK supports and if it requires specific User Space application support. 15 | 16 | **Describe the solution you'd like** 17 | A clear and concise description of what you want to happen. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /prestera/Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | # 3 | # Makefile for the Marvell Switch driver. 4 | # 5 | 6 | obj-$(CONFIG_PRESTERA) += prestera.o 7 | prestera-objs := prestera_main.o \ 8 | prestera_hw.o prestera_switchdev.o prestera_devlink.o prestera_fw_log.o \ 9 | prestera_rxtx.o prestera_dsa.o prestera_router.o \ 10 | prestera_acl.o prestera_flow.o prestera_flower.o prestera_matchall.o prestera_debugfs.o \ 11 | prestera_ct.o prestera_ethtool.o prestera_counter.o \ 12 | prestera_fw.o prestera_router_hw.o prestera_dcb.o prestera_qdisc.o 13 | 14 | prestera-$(CONFIG_PRESTERA_DEBUG) += prestera_log.o 15 | ccflags-$(CONFIG_PRESTERA_DEBUG) += -DCONFIG_MRVL_PRESTERA_DEBUG 16 | 17 | obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o 18 | obj-$(CONFIG_PRESTERA_SHM) += prestera_shm.o 19 | -------------------------------------------------------------------------------- /prestera/Kconfig: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0-only 2 | # 3 | # Marvell Prestera drivers configuration 4 | # 5 | 6 | config PRESTERA 7 | tristate "Marvell Prestera Switch ASICs support" 8 | depends on NET_SWITCHDEV && VLAN_8021Q 9 | depends on BRIDGE || BRIDGE=n 10 | select NET_DEVLINK 11 | help 12 | This driver supports Marvell Prestera Switch ASICs family. 13 | 14 | To compile this driver as a module, choose M here: the 15 | module will be called prestera. 16 | 17 | config PRESTERA_PCI 18 | tristate "PCI interface driver for Marvell Prestera Switch ASICs family" 19 | depends on PCI && HAS_IOMEM && PRESTERA 20 | default PRESTERA 21 | help 22 | This is implementation of PCI interface support for Marvell Prestera 23 | Switch ASICs family. 24 | 25 | To compile this driver as a module, choose M here: the 26 | module will be called prestera_pci. 27 | -------------------------------------------------------------------------------- /prestera/prestera_switchdev.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_SWITCHDEV_H_ 5 | #define _PRESTERA_SWITCHDEV_H_ 6 | 7 | int prestera_switchdev_init(struct prestera_switch *sw); 8 | void prestera_switchdev_fini(struct prestera_switch *sw); 9 | 10 | int prestera_port_bridge_join(struct prestera_port *port, 11 | struct net_device *brport_dev, 12 | struct net_device *br_dev, 13 | struct netlink_ext_ack *extack); 14 | 15 | void prestera_port_bridge_leave(struct prestera_port *port, 16 | struct net_device *brport_dev, 17 | struct net_device *br_dev); 18 | 19 | bool prestera_bridge_is_offloaded(const struct prestera_switch *sw, 20 | const struct net_device *br_dev); 21 | 22 | int prestera_bridge_port_down(struct prestera_port *port); 23 | 24 | #endif /* _PRESTERA_SWITCHDEV_H_ */ 25 | -------------------------------------------------------------------------------- /prestera/prestera_qdisc.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | /* tc qdisc add dev sw1p1 root handle 1: ets bands 8 strict 8 */ 4 | #ifndef _PRESTERA_ETS_H_ 5 | #define _PRESTERA_ETS_H_ 6 | 7 | #include 8 | #include 9 | 10 | struct prestera_port; 11 | 12 | int prestera_setup_tc_ets(struct prestera_port *port, 13 | struct tc_ets_qopt_offload *p); 14 | int prestera_setup_tc_tbf(struct prestera_port *port, 15 | struct tc_tbf_qopt_offload *p); 16 | int prestera_setup_tc_red(struct prestera_port *port, 17 | struct tc_red_qopt_offload *p); 18 | 19 | int prestera_qdisc_init(struct prestera_switch *sw); 20 | int prestera_qdisc_fini(struct prestera_switch *sw); 21 | int prestera_qdisc_port_init(struct prestera_port *port); 22 | void prestera_qdisc_port_fini(struct prestera_port *port); 23 | 24 | #endif /* _PRESTERA_ETS_H_ */ 25 | -------------------------------------------------------------------------------- /prestera/prestera_counter.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_COUNTER_H_ 5 | #define _PRESTERA_COUNTER_H_ 6 | 7 | #include 8 | 9 | struct prestera_counter_stats { 10 | u64 packets; 11 | u64 bytes; 12 | }; 13 | 14 | struct prestera_counter_block; 15 | 16 | int prestera_counter_init(struct prestera_switch *sw); 17 | void prestera_counter_fini(struct prestera_switch *sw); 18 | 19 | int prestera_counter_get(struct prestera_counter *counter, u32 client, 20 | struct prestera_counter_block **block, 21 | u32 *counter_id); 22 | void prestera_counter_put(struct prestera_counter *counter, 23 | struct prestera_counter_block *block, u32 counter_id); 24 | int prestera_counter_stats_get(struct prestera_counter *counter, 25 | struct prestera_counter_block *block, 26 | u32 counter_id, u64 *packets, u64 *bytes); 27 | 28 | #endif /* _PRESTERA_COUNTER_H_ */ 29 | -------------------------------------------------------------------------------- /prestera/prestera_flower.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_FLOWER_H_ 5 | #define _PRESTERA_FLOWER_H_ 6 | 7 | #include 8 | 9 | struct prestera_flow_block; 10 | 11 | int prestera_flower_replace(struct prestera_flow_block *block, 12 | struct flow_cls_offload *f); 13 | void prestera_flower_destroy(struct prestera_flow_block *block, 14 | struct flow_cls_offload *f); 15 | int prestera_flower_stats(struct prestera_flow_block *block, 16 | struct flow_cls_offload *f); 17 | int prestera_flower_tmplt_create(struct prestera_flow_block *block, 18 | struct flow_cls_offload *f); 19 | void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, 20 | struct flow_cls_offload *f); 21 | void prestera_flower_template_cleanup(struct prestera_flow_block *block); 22 | int prestera_flower_prio_get(struct prestera_flow_block *block, 23 | u32 *prio); 24 | 25 | #endif /* _PRESTERA_FLOWER_H_ */ 26 | -------------------------------------------------------------------------------- /prestera/prestera_devlink.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_DEVLINK_H_ 5 | #define _PRESTERA_DEVLINK_H_ 6 | 7 | #include "prestera.h" 8 | 9 | struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev); 10 | void prestera_devlink_free(struct prestera_switch *sw); 11 | 12 | int prestera_devlink_register(struct prestera_switch *sw); 13 | void prestera_devlink_unregister(struct prestera_switch *sw); 14 | 15 | int prestera_devlink_port_register(struct prestera_port *port); 16 | void prestera_devlink_port_unregister(struct prestera_port *port); 17 | 18 | void prestera_devlink_port_set(struct prestera_port *port); 19 | void prestera_devlink_port_clear(struct prestera_port *port); 20 | 21 | struct devlink_port *prestera_devlink_get_port(struct net_device *dev); 22 | 23 | void prestera_devlink_trap_report(struct prestera_port *port, 24 | struct sk_buff *skb, u8 cpu_code); 25 | 26 | #endif /* _PRESTERA_DEVLINK_H_ */ 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Configure '...' 16 | 2. Send/Receive traffic '....' 17 | 3. See error 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Screenshots / Console Captures** 23 | If applicable, add screenshots to help explain your problem. 24 | 25 | **LK, Drivers and Distribution versions (please complete the following information):** 26 | - LK ver: [e.g. 5.10] 27 | - Marvell Switchdev KO ver [e.g. v3.1.1] 28 | - Marvell Switchdev FW FLASH ver [e.g. v3.1.0] 29 | - DENT Ver [e.g.DENT 1.0] 30 | 31 | **HW Platform (please complete the following information):** 32 | - HW ODM: [e.g. Accton, Delta] 33 | - Platform Mark: [e.g. AS4564, AS5114] 34 | 35 | **Additional context** 36 | Add any other context about the problem here. 37 | -------------------------------------------------------------------------------- /prestera/prestera_ct.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_CT_H_ 5 | #define _PRESTERA_CT_H_ 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | struct prestera_switch; 12 | struct prestera_ct_ft; 13 | struct prestera_ct_priv; 14 | 15 | struct prestera_ct_attr { 16 | u16 zone; 17 | u16 ct_action; 18 | struct net *net; 19 | struct prestera_ct_ft *ft; 20 | struct nf_flowtable *nf_ft; 21 | }; 22 | 23 | struct prestera_ct_priv *prestera_ct_init(struct prestera_acl *acl); 24 | void prestera_ct_clean(struct prestera_ct_priv *ct_priv); 25 | 26 | /* match & action */ 27 | int prestera_ct_match_parse(struct flow_cls_offload *f, 28 | struct netlink_ext_ack *extack); 29 | int prestera_ct_parse_action(const struct flow_action_entry *act, 30 | struct prestera_acl_rule *rule, 31 | struct netlink_ext_ack *extack); 32 | 33 | /* flowtable */ 34 | int prestera_ct_ft_offload_add_cb(struct prestera_switch *sw, 35 | struct prestera_acl_rule *rule); 36 | void prestera_ct_ft_offload_del_cb(struct prestera_switch *sw, 37 | struct prestera_acl_rule *rule); 38 | 39 | #endif /* _PRESTERA_CT_H_ */ 40 | -------------------------------------------------------------------------------- /prestera/prestera_dsa.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _MVSW_PRESTERA_DSA_H_ 5 | #define _MVSW_PRESTERA_DSA_H_ 6 | 7 | #include 8 | 9 | #define MVSW_PR_DSA_HLEN 16 10 | 11 | enum mvsw_pr_dsa_cmd { 12 | /* DSA command is "To CPU" */ 13 | MVSW_NET_DSA_CMD_TO_CPU_E = 0, 14 | 15 | /* DSA command is "FROM CPU" */ 16 | MVSW_NET_DSA_CMD_FROM_CPU_E, 17 | }; 18 | 19 | struct mvsw_pr_dsa_common { 20 | /* the value vlan priority tag (APPLICABLE RANGES: 0..7) */ 21 | u8 vpt; 22 | 23 | /* CFI bit of the vlan tag (APPLICABLE RANGES: 0..1) */ 24 | u8 cfi_bit; 25 | 26 | /* Vlan id */ 27 | u16 vid; 28 | }; 29 | 30 | struct mvsw_pr_dsa_to_cpu { 31 | bool is_tagged; 32 | u32 hw_dev_num; 33 | bool src_is_trunk; 34 | u8 cpu_code; 35 | struct { 36 | u16 src_trunk_id; 37 | u32 port_num; 38 | u32 eport; 39 | } iface; 40 | }; 41 | 42 | struct mvsw_pr_dsa_from_cpu { 43 | struct prestera_iface dst_iface; /* vid/port */ 44 | bool egr_filter_en; 45 | bool egr_filter_registered; 46 | u32 src_id; 47 | u32 src_hw_dev; 48 | u32 dst_eport; /* for port but not for vid */ 49 | }; 50 | 51 | struct mvsw_pr_dsa { 52 | struct mvsw_pr_dsa_common common_params; 53 | enum mvsw_pr_dsa_cmd dsa_cmd; 54 | union { 55 | struct mvsw_pr_dsa_to_cpu to_cpu; 56 | struct mvsw_pr_dsa_from_cpu from_cpu; 57 | } dsa_info; 58 | }; 59 | 60 | int mvsw_pr_dsa_parse(const u8 *dsa_bytes_ptr, 61 | struct mvsw_pr_dsa *dsa_info_ptr); 62 | int mvsw_pr_dsa_build(const struct mvsw_pr_dsa *dsa_info_ptr, 63 | u8 *dsa_bytes_ptr); 64 | 65 | #endif /* _MVSW_PRESTERA_DSA_H_ */ 66 | -------------------------------------------------------------------------------- /prestera/prestera_log.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _MVSW_PRESTERA_LOG_H_ 5 | #define _MVSW_PRESTERA_LOG_H_ 6 | 7 | #ifdef CONFIG_MRVL_PRESTERA_DEBUG 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #define DEF_ENUM_MAP(enum_name) \ 17 | static const char *enum_name##_map[] 18 | 19 | #define DEF_ENUM_FUNC(enum_name, enum_min, enum_max) \ 20 | const char *enum_name##_to_name(enum enum_name val) \ 21 | { \ 22 | if (val < enum_min || val > enum_max) \ 23 | return unknown; \ 24 | return enum_name##_map[val]; \ 25 | } 26 | 27 | #define DEC_ENUM_FUNC(enum_name) \ 28 | const char *enum_name##_to_name(enum enum_name) 29 | 30 | #define ENUM_TO_NAME(enum_name, val) enum_name##_to_name(val) 31 | 32 | #define MVSW_LOG_INFO(fmt, ...) \ 33 | pr_info("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__) 34 | 35 | #define MVSW_LOG_ERROR(fmt, ...) \ 36 | pr_err("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__) 37 | 38 | DEC_ENUM_FUNC(netdev_cmd); 39 | DEC_ENUM_FUNC(switchdev_notifier_type); 40 | DEC_ENUM_FUNC(switchdev_attr_id); 41 | DEC_ENUM_FUNC(switchdev_obj_id); 42 | DEC_ENUM_FUNC(fib_event_type); 43 | DEC_ENUM_FUNC(netevent_notif_type); 44 | DEC_ENUM_FUNC(tc_setup_type); 45 | DEC_ENUM_FUNC(flow_block_binder_type); 46 | DEC_ENUM_FUNC(tc_matchall_command); 47 | DEC_ENUM_FUNC(flow_cls_command); 48 | DEC_ENUM_FUNC(flow_action_id); 49 | 50 | #else /* CONFIG_MRVL_PRESTERA_DEBUG */ 51 | #define MVSW_LOG_INFO(...) 52 | #define MVSW_LOG_ERROR(...) 53 | #endif /* CONFIG_MRVL_PRESTERA_DEBUG */ 54 | 55 | #endif /* _MVSW_PRESTERA_LOG_H_ */ 56 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ⚠️ **Notice: This project is no longer maintained.** ⚠️ 2 | 3 | Please be aware that this repository is no longer actively maintained, and no further updates or security patches will be provided. 4 | We recommend seeking alternative solutions or forking the repository if you wish to continue development. 5 | 6 | # Switchdev-prestera 7 | Marvell Prestera Switchdev Repository. 8 | 9 | For more information, please check our [wiki](../../wiki) 10 | 11 | Features by Linux kernel version 12 | 13 | | Kernel Version || 14 | | :-----------: | ------------- | 15 | | 5.10 | Initial submition, support for Marvell Prestera 98DX326x. Features: [VLAN-aware/unaware bridge offloading, FDB](../../wiki/bridge-and-vlan), [Switchport configuration](../../wiki/switch-port-configuration)| 16 | | 5.13 | Support for 98DX3265 | 17 | | 5.14 | Add support for [LAG](../../wiki/link-aggregation-(lag)), [Devlink traps](../../wiki/Devlink), [ACL](../../wiki/ACL)| 18 | | 5.17 | Migrate to new vTCAM api, ACL stats support, [flower template support](../../wiki/Chain-Support#chain-template-support), CPU routing | 19 | | 5.18 | [Multi chain support](../../wiki/Chain-Support), Offload "local" and "blackhole" routes | 20 | | 5.19 | TC hardware stats support, [Traffic Policing](../../wiki/Traffic-Policing) | 21 | | 6.0 | Bridge MDB offloading, [PhyLink support](https://www.kernel.org/doc/html/latest/networking/sfp-phylink.html), Support for for Aldrin2 (98DX8525) | 22 | | 6.1 | [L3 offloading](https://github.com/Marvell-switching/switchdev-prestera/wiki/l3-configuration) only for switchports | 23 | | 6.2 | Support for AC5X family devices | 24 | 25 | Features by driver version 26 | 27 | 28 | | Driver Version || 29 | | ------------- | ------------- | 30 | | 2.6.0(dentOS v1.0 Arthur) | Based on Linux kernel 5.6 Features: [VLAN-aware/unaware bridge offloading, FDB](../../wiki/bridge-and-vlan), [Switchport configuration](../../wiki/switch-port-configuration), [LAG](../../wiki/link-aggregation-(lag)), [STP](../../wiki/STP-Configuration), [LLDP](../../wiki/link-layer-discovery-protocol-(lldp)), [IPv4 routing](../../wiki/static-route), [ECMP](../../wiki/equal-cost-multi-path-(ecmp)), [VRRP](../../wiki/virtual-router-redundancy-protocol-(vrrp)), [ACL](../../wiki/ACL), [Devlink traps](../../wiki/Devlink)| 31 | | 3.1.1 (dentOS v2 Beeblebrox) | Based on Linux Kernel 5.10 New Features: [NAT](../../wiki/NAT-overview), [Multi chain support](../../wiki/Chain-Support) [Chain Templates](../../wiki/Chain-Support#chain-template-support), [PhyLink support](https://www.kernel.org/doc/html/latest/networking/sfp-phylink.html) | 32 | | 3.2.2 (dentOS v2.5) | Based on Linux Kernel 5.15 Feature equivalent to DENT 2.5 | 33 | | 4.0.3| Based on Linux Kernel 5.15 [DHCP](https://github.com/Marvell-switching/switchdev-prestera/wiki/DHCP), [Port Isolation](https://github.com/Marvell-switching/switchdev-prestera/wiki/port-isolation), [IGMP Snooping MDB](https://github.com/Marvell-switching/switchdev-prestera/wiki/IGMP-Snooping-MDB), [SPAN](https://github.com/Marvell-switching/switchdev-prestera/wiki/SPAN), [IPv6](https://github.com/Marvell-switching/switchdev-prestera/wiki/l3-configuration#ipv6), [QoS](https://github.com/Marvell-switching/switchdev-prestera/wiki/QoS) | 34 | 35 | Please use GitHub [issues](../../issues) to report issues/request new enhancements. 36 | -------------------------------------------------------------------------------- /prestera/prestera_fw.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #ifndef PRESTERA_FW_H 5 | #define PRESTERA_FW_H_ 6 | 7 | #define PRESTERA_EVT_QNUM_MAX 4 8 | #define PRESTERA_CMD_QNUM_MAX 4 9 | 10 | struct prestera_fw_evtq_regs { 11 | u32 rd_idx; 12 | u32 pad1; 13 | u32 wr_idx; 14 | u32 pad2; 15 | u32 offs; 16 | u32 len; 17 | }; 18 | 19 | struct prestera_fw_cmdq_regs { 20 | u32 cmd_req_ctl; 21 | u32 cmd_req_len; 22 | u32 cmd_rcv_ctl; 23 | u32 cmd_rcv_len; 24 | u32 offs; 25 | u32 len; 26 | }; 27 | 28 | struct prestera_fw_regs { 29 | u32 fw_ready; 30 | u32 cmd_offs; 31 | u32 cmd_len; 32 | u32 cmd_qnum; 33 | u32 evt_offs; 34 | u32 evt_qnum; 35 | 36 | u32 fw_status; 37 | u32 rx_status; 38 | 39 | struct prestera_fw_cmdq_regs cmdq_list[PRESTERA_CMD_QNUM_MAX]; 40 | struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX]; 41 | }; 42 | 43 | #define prestera_wait(cond, waitms) \ 44 | ({ \ 45 | unsigned long __wait_end = jiffies + msecs_to_jiffies(waitms); \ 46 | bool __wait_ret = false; \ 47 | do { \ 48 | if (cond) { \ 49 | __wait_ret = true; \ 50 | break; \ 51 | } \ 52 | cond_resched(); \ 53 | } while (time_before(jiffies, __wait_end)); \ 54 | __wait_ret; \ 55 | }) 56 | 57 | #define prestera_fw_dev(fw) ((fw)->dev.dev) 58 | 59 | /* Firmware registers: */ 60 | #define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f) 61 | 62 | #define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status) 63 | #define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status) 64 | 65 | #define prestera_fw_write(fw, reg, val) writel(val, (fw)->hw_regs + (reg)) 66 | #define prestera_fw_read(fw, reg) readl((fw)->hw_regs + (reg)) 67 | 68 | #define PRESTERA_SUPP_FW_MAJ_VER 3 69 | #define PRESTERA_SUPP_FW_MIN_VER 2 70 | #define PRESTERA_SUPP_FW_PATCH_VER 2 71 | 72 | struct prestera_fw_evtq { 73 | u8 __iomem *addr; 74 | size_t len; 75 | }; 76 | 77 | struct prestera_fw_cmdq { 78 | /* serialize access to dev->send_req */ 79 | struct mutex cmd_mtx; 80 | u8 __iomem *addr; 81 | size_t len; 82 | }; 83 | 84 | struct prestera_fw_header { 85 | __be32 magic_number; 86 | __be32 version_value; 87 | u8 reserved[8]; 88 | } __packed; 89 | 90 | struct prestera_fw { 91 | struct workqueue_struct *wq; 92 | struct prestera_device dev; 93 | struct pci_dev *pci_dev; 94 | u8 __iomem *mem_addr; 95 | 96 | u8 __iomem *ldr_regs; 97 | u8 __iomem *hw_regs; 98 | 99 | u8 __iomem *ldr_ring_buf; 100 | u32 ldr_buf_len; 101 | u32 ldr_wr_idx; 102 | 103 | size_t cmd_mbox_len; 104 | u8 __iomem *cmd_mbox; 105 | struct prestera_fw_cmdq cmd_queue[PRESTERA_CMD_QNUM_MAX]; 106 | u8 cmd_qnum; 107 | struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX]; 108 | u8 evt_qnum; 109 | struct work_struct evt_work; 110 | u8 __iomem *evt_buf; 111 | u8 *evt_msg; 112 | }; 113 | 114 | int prestera_fw_rev_check(struct prestera_fw *fw); 115 | void prestera_fw_rev_parse_int(unsigned int firmware_version, 116 | struct prestera_fw_rev *rev); 117 | void prestera_fw_rev_parse(const struct prestera_fw_header *hdr, 118 | struct prestera_fw_rev *rev); 119 | void prestera_fw_uninit(struct prestera_fw *fw); 120 | int prestera_fw_init(struct prestera_fw *fw); 121 | int prestera_fw_send_req(struct prestera_device *dev, int qid, 122 | u8 *in_msg, size_t in_size, u8 *out_msg, 123 | size_t out_size, unsigned int wait); 124 | void prestera_fw_handle_event(struct prestera_fw *fw); 125 | void prestera_fw_queue_work(struct prestera_fw *fw); 126 | 127 | #endif 128 | -------------------------------------------------------------------------------- /prestera/prestera_matchall.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | 7 | #include "prestera.h" 8 | #include "prestera_hw.h" 9 | #include "prestera_flower.h" 10 | 11 | static int prestera_mall_rule_add(struct prestera_flow_block_binding *binding, 12 | struct prestera_port *to_port) 13 | { 14 | int err; 15 | u8 span_id; 16 | struct prestera_switch *sw = binding->port->sw; 17 | 18 | if (binding->span_id != PRESTERA_SPAN_INVALID_ID) 19 | /* port already in mirroring */ 20 | return -EEXIST; 21 | 22 | err = prestera_span_get(to_port, &span_id); 23 | if (err) 24 | return err; 25 | 26 | err = prestera_hw_span_bind(binding->port, span_id); 27 | if (err) { 28 | prestera_span_put(sw, span_id); 29 | return err; 30 | } 31 | 32 | binding->span_id = span_id; 33 | return 0; 34 | } 35 | 36 | static int prestera_mall_rule_del(struct prestera_flow_block_binding *binding) 37 | { 38 | int err; 39 | 40 | err = prestera_hw_span_unbind(binding->port); 41 | if (err) 42 | return err; 43 | 44 | err = prestera_span_put(binding->port->sw, binding->span_id); 45 | if (err) 46 | return err; 47 | 48 | binding->span_id = PRESTERA_SPAN_INVALID_ID; 49 | return 0; 50 | } 51 | 52 | static int prestera_mall_prio_check(struct prestera_flow_block *block, 53 | struct tc_cls_matchall_offload *f) 54 | { 55 | u32 flower_prio; 56 | int err; 57 | 58 | err = prestera_flower_prio_get(block, &flower_prio); 59 | if (err == -ENOENT) 60 | return 0; 61 | if (err) 62 | return err; 63 | 64 | if (f->common.prio >= flower_prio) 65 | return -EOPNOTSUPP; 66 | 67 | return 0; 68 | } 69 | 70 | int prestera_mall_prio_get(struct prestera_flow_block *block, 71 | u32 *prio) 72 | { 73 | if (block->mall_prio == UINT_MAX) 74 | return -ENOENT; 75 | 76 | *prio = block->mall_prio; 77 | return 0; 78 | } 79 | 80 | static void prestera_mall_prio_update(struct prestera_flow_block *block, 81 | struct tc_cls_matchall_offload *f) 82 | { 83 | if (f->common.prio > block->mall_prio || block->mall_prio == UINT_MAX) 84 | block->mall_prio = f->common.prio; 85 | } 86 | 87 | int prestera_mall_replace(struct prestera_flow_block *block, 88 | struct tc_cls_matchall_offload *f) 89 | { 90 | struct prestera_flow_block_binding *binding; 91 | __be16 protocol = f->common.protocol; 92 | struct flow_action_entry *act; 93 | struct prestera_port *port; 94 | int err; 95 | 96 | if (!flow_offload_has_one_action(&f->rule->action)) { 97 | NL_SET_ERR_MSG(f->common.extack, 98 | "Only singular actions are supported"); 99 | return -EOPNOTSUPP; 100 | } 101 | 102 | act = &f->rule->action.entries[0]; 103 | 104 | if (act->id != FLOW_ACTION_MIRRED) 105 | return -EOPNOTSUPP; 106 | 107 | if (protocol != htons(ETH_P_ALL)) 108 | return -EOPNOTSUPP; 109 | 110 | err = prestera_mall_prio_check(block, f); 111 | if (err) 112 | return err; 113 | 114 | if (!prestera_netdev_check(act->dev)) { 115 | NL_SET_ERR_MSG(f->common.extack, 116 | "Only switchdev port is supported"); 117 | return -EINVAL; 118 | } 119 | 120 | port = netdev_priv(act->dev); 121 | list_for_each_entry(binding, &block->binding_list, list) { 122 | err = prestera_mall_rule_add(binding, port); 123 | if (err == -EEXIST) 124 | return err; 125 | if (err) 126 | goto rollback; 127 | } 128 | 129 | prestera_mall_prio_update(block, f); 130 | 131 | return 0; 132 | 133 | rollback: 134 | list_for_each_entry_continue_reverse(binding, 135 | &block->binding_list, list) 136 | prestera_mall_rule_del(binding); 137 | return err; 138 | } 139 | 140 | void prestera_mall_destroy(struct prestera_flow_block *block) 141 | { 142 | struct prestera_flow_block_binding *binding; 143 | 144 | list_for_each_entry(binding, &block->binding_list, list) 145 | prestera_mall_rule_del(binding); 146 | 147 | block->mall_prio = UINT_MAX; 148 | } 149 | 150 | -------------------------------------------------------------------------------- /prestera/prestera_router_hw.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_ROUTER_HW_H_ 5 | #define _PRESTERA_ROUTER_HW_H_ 6 | 7 | /* TODO: move structures, that not used from external to .c file */ 8 | 9 | struct prestera_vr { 10 | u16 hw_vr_id; /* virtual router ID */ 11 | u32 tb_id; /* key (kernel fib table id) */ 12 | struct list_head router_node; 13 | unsigned int ref_cnt; 14 | }; 15 | 16 | struct prestera_rif_macvlan_list_node { 17 | struct list_head head; 18 | unsigned char addr[ETH_ALEN]; 19 | }; 20 | 21 | /* Add port to vlan + FDB ... or only FDB for vlan */ 22 | struct prestera_rif_entry { 23 | struct prestera_rif_entry_key { 24 | struct prestera_iface iface; 25 | } key; 26 | struct prestera_vr *vr; 27 | unsigned char addr[ETH_ALEN]; 28 | struct list_head macvlan_list; 29 | u16 hw_id; /* rif_id */ 30 | struct list_head router_node; /* ht */ 31 | }; 32 | 33 | struct prestera_nexthop_group { 34 | struct prestera_nexthop_group_key key; 35 | /* Store intermediate object here. 36 | * This prevent overhead kzalloc call. 37 | */ 38 | /* nh_neigh is used only to notify nexthop_group */ 39 | struct prestera_nh_neigh_head { 40 | struct prestera_nexthop_group *this; 41 | struct list_head head; 42 | /* ptr to neigh is not necessary. 43 | * It used to prevent lookup of nh_neigh by key (n) on destroy 44 | */ 45 | struct prestera_nh_neigh *neigh; 46 | } nh_neigh_head[PRESTERA_NHGR_SIZE_MAX]; 47 | u32 grp_id; /* hw */ 48 | struct rhash_head ht_node; /* node of prestera_vr */ 49 | unsigned int ref_cnt; 50 | }; 51 | 52 | struct prestera_fib_key { 53 | struct prestera_ip_addr addr; 54 | u32 prefix_len; 55 | u32 tb_id; 56 | }; 57 | 58 | struct prestera_fib_info { 59 | struct prestera_vr *vr; 60 | struct list_head vr_node; 61 | enum prestera_fib_type { 62 | PRESTERA_FIB_TYPE_INVALID = 0, 63 | /* must be pointer to nh_grp id */ 64 | PRESTERA_FIB_TYPE_UC_NH, 65 | /* It can be connected route 66 | * and will be overlapped with neighbours 67 | */ 68 | PRESTERA_FIB_TYPE_TRAP, 69 | PRESTERA_FIB_TYPE_DROP 70 | } type; 71 | /* Valid only if type = UC_NH*/ 72 | struct prestera_nexthop_group *nh_grp; 73 | }; 74 | 75 | struct prestera_fib_node { 76 | struct rhash_head ht_node; /* node of prestera_vr */ 77 | struct prestera_fib_key key; 78 | struct prestera_fib_info info; /* action related info */ 79 | }; 80 | 81 | int prestera_rif_entry_set_macvlan(const struct prestera_switch *sw, 82 | struct prestera_rif_entry *e, 83 | bool enable, const char *addr); 84 | struct prestera_rif_entry * 85 | prestera_rif_entry_find(const struct prestera_switch *sw, 86 | const struct prestera_rif_entry_key *k); 87 | void prestera_rif_entry_destroy(struct prestera_switch *sw, 88 | struct prestera_rif_entry *e); 89 | void prestera_rif_entry_destroy_ht(struct prestera_switch *sw); 90 | struct prestera_rif_entry * 91 | prestera_rif_entry_create(struct prestera_switch *sw, 92 | struct prestera_rif_entry_key *k, 93 | u32 tb_id, unsigned char *addr); 94 | void prestera_vr_util_hw_abort(struct prestera_switch *sw); 95 | int prestera_router_hw_init(struct prestera_switch *sw); 96 | void prestera_router_hw_fini(struct prestera_switch *sw); 97 | struct prestera_nh_neigh * 98 | prestera_nh_neigh_find(struct prestera_switch *sw, 99 | struct prestera_nh_neigh_key *key); 100 | struct prestera_nh_neigh * 101 | prestera_nh_neigh_get(struct prestera_switch *sw, 102 | struct prestera_nh_neigh_key *key); 103 | void prestera_nh_neigh_put(struct prestera_switch *sw, 104 | struct prestera_nh_neigh *neigh); 105 | int prestera_nh_neigh_set(struct prestera_switch *sw, 106 | struct prestera_nh_neigh *neigh); 107 | bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw, 108 | struct prestera_nh_neigh *nh_neigh); 109 | struct prestera_fib_node * 110 | prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key); 111 | void prestera_fib_node_destroy(struct prestera_switch *sw, 112 | struct prestera_fib_node *fib_node); 113 | void prestera_fib_node_destroy_ht(struct prestera_switch *sw); 114 | struct prestera_fib_node * 115 | prestera_fib_node_create(struct prestera_switch *sw, 116 | struct prestera_fib_key *key, 117 | enum prestera_fib_type fib_type, 118 | struct prestera_nexthop_group_key *nh_grp_key); 119 | 120 | #endif /* _PRESTERA_ROUTER_HW_H_ */ 121 | -------------------------------------------------------------------------------- /prestera/prestera_storm_control.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include "prestera_storm_control.h" 5 | #include "prestera_hw.h" 6 | 7 | #define SYSFS_ATTR_MODE 0644 8 | 9 | static ssize_t storm_control_attr_store(struct device *dev, 10 | struct device_attribute *attr, 11 | const char *buf, size_t size); 12 | static ssize_t storm_control_attr_show(struct device *dev, 13 | struct device_attribute *attr, 14 | char *buf); 15 | 16 | struct strom_control_attributes { 17 | u32 bc_kbyte_per_sec_rate; 18 | u32 unknown_uc_kbyte_per_sec_rate; 19 | u32 unreg_mc_kbyte_per_sec_rate; 20 | }; 21 | 22 | struct prestera_storm_control { 23 | struct prestera_switch *sw; 24 | struct strom_control_attributes *attribute_values; 25 | }; 26 | 27 | static DEVICE_ATTR(broadcast_kbyte_per_sec_rate, SYSFS_ATTR_MODE, 28 | storm_control_attr_show, storm_control_attr_store); 29 | 30 | static DEVICE_ATTR(unknown_unicast_kbyte_per_sec_rate, SYSFS_ATTR_MODE, 31 | storm_control_attr_show, storm_control_attr_store); 32 | 33 | static DEVICE_ATTR(unregistered_multicast_kbyte_per_sec_rate, SYSFS_ATTR_MODE, 34 | storm_control_attr_show, storm_control_attr_store); 35 | 36 | static struct attribute *prestera_sw_dev_attrs[] = { 37 | &dev_attr_broadcast_kbyte_per_sec_rate.attr, 38 | &dev_attr_unknown_unicast_kbyte_per_sec_rate.attr, 39 | &dev_attr_unregistered_multicast_kbyte_per_sec_rate.attr, 40 | NULL 41 | }; 42 | 43 | static struct attribute_group prestera_sw_dev_attr_group = { 44 | .name = "storm_control", /* we want them in subdirectory */ 45 | .attrs = prestera_sw_dev_attrs, 46 | }; 47 | 48 | static ssize_t storm_control_attr_store(struct device *dev, 49 | struct device_attribute *attr, 50 | const char *buf, size_t size) 51 | { 52 | struct prestera_port *port = dev_to_prestera_port(dev); 53 | struct strom_control_attributes *sc_attr; 54 | struct prestera_storm_control *sc; 55 | u32 *attr_to_change = NULL; 56 | u32 kbyte_per_sec_rate; 57 | ssize_t ret = -EINVAL; 58 | u32 storm_type; 59 | 60 | if (!port) 61 | return -EINVAL; 62 | 63 | sc = port->sw->storm_control; 64 | sc_attr = &sc->attribute_values[port->fp_id]; 65 | 66 | ret = kstrtou32(buf, 10, &kbyte_per_sec_rate); 67 | if (ret) 68 | return ret; 69 | 70 | if (!strcmp(attr->attr.name, "broadcast_kbyte_per_sec_rate")) { 71 | attr_to_change = &sc_attr->bc_kbyte_per_sec_rate; 72 | storm_type = PRESTERA_PORT_STORM_CTL_TYPE_BC; 73 | } 74 | 75 | if (!strcmp(attr->attr.name, "unknown_unicast_kbyte_per_sec_rate")) { 76 | attr_to_change = &sc_attr->unknown_uc_kbyte_per_sec_rate; 77 | storm_type = PRESTERA_PORT_STORM_CTL_TYPE_UC_UNK; 78 | } 79 | 80 | if (!strcmp(attr->attr.name, 81 | "unregistered_multicast_kbyte_per_sec_rate")) { 82 | attr_to_change = &sc_attr->unreg_mc_kbyte_per_sec_rate; 83 | storm_type = PRESTERA_PORT_STORM_CTL_TYPE_MC; 84 | } 85 | 86 | if (!attr_to_change) 87 | return -EINVAL; 88 | 89 | if (kbyte_per_sec_rate != *attr_to_change) 90 | ret = prestera_hw_port_storm_control_cfg_set(port, storm_type, 91 | kbyte_per_sec_rate); 92 | else 93 | return size; 94 | 95 | if (ret) 96 | return ret; 97 | 98 | *attr_to_change = kbyte_per_sec_rate; 99 | 100 | return size; 101 | } 102 | 103 | static ssize_t storm_control_attr_show(struct device *dev, 104 | struct device_attribute *attr, 105 | char *buf) 106 | { 107 | struct prestera_port *port = dev_to_prestera_port(dev); 108 | struct strom_control_attributes *sc_attr; 109 | struct prestera_storm_control *sc; 110 | 111 | if (!port) 112 | return -EINVAL; 113 | 114 | sc = port->sw->storm_control; 115 | 116 | sc_attr = &sc->attribute_values[port->fp_id]; 117 | 118 | if (!strcmp(attr->attr.name, "broadcast_kbyte_per_sec_rate")) 119 | return sprintf(buf, "%u\n", sc_attr->bc_kbyte_per_sec_rate); 120 | 121 | if (!strcmp(attr->attr.name, "unknown_unicast_kbyte_per_sec_rate")) 122 | return sprintf(buf, "%u\n", 123 | sc_attr->unknown_uc_kbyte_per_sec_rate); 124 | 125 | if (!strcmp(attr->attr.name, 126 | "unregistered_multicast_kbyte_per_sec_rate")) 127 | return sprintf(buf, "%u\n", 128 | sc_attr->unreg_mc_kbyte_per_sec_rate); 129 | 130 | return -EINVAL; 131 | } 132 | 133 | int prestera_storm_control_init(struct prestera_switch *sw) 134 | { 135 | struct prestera_storm_control *sc; 136 | struct prestera_port *port; 137 | int err; 138 | 139 | sc = kzalloc(sizeof(*sc), GFP_KERNEL); 140 | if (!sc) 141 | return -ENOMEM; 142 | 143 | sc->attribute_values = kcalloc(sw->port_count, 144 | sizeof(*sc->attribute_values), 145 | GFP_KERNEL); 146 | if (!sc->attribute_values) { 147 | err = -ENOMEM; 148 | goto err_values_alloca; 149 | } 150 | 151 | list_for_each_entry(port, &sw->port_list, list) { 152 | err = sysfs_create_group(&port->net_dev->dev.kobj, 153 | &prestera_sw_dev_attr_group); 154 | if (err) { 155 | pr_err("Failed to create sysfs group for %s\n", 156 | dev_name(&port->net_dev->dev)); 157 | goto err_group_create; 158 | } 159 | } 160 | 161 | sc->sw = sw; 162 | sw->storm_control = sc; 163 | 164 | return 0; 165 | 166 | err_group_create: 167 | list_for_each_entry_continue_reverse(port, &sw->port_list, list) { 168 | sysfs_remove_group(&port->net_dev->dev.kobj, 169 | &prestera_sw_dev_attr_group); 170 | } 171 | kfree(sc->attribute_values); 172 | err_values_alloca: 173 | kfree(sc); 174 | return err; 175 | } 176 | 177 | void prestera_storm_control_fini(struct prestera_switch *sw) 178 | { 179 | struct prestera_storm_control *sc = sw->storm_control; 180 | struct prestera_port *port; 181 | 182 | list_for_each_entry(port, &sw->port_list, list) 183 | sysfs_remove_group(&port->net_dev->dev.kobj, 184 | &prestera_sw_dev_attr_group); 185 | 186 | kfree(sc->attribute_values); 187 | kfree(sc); 188 | } 189 | 190 | -------------------------------------------------------------------------------- /prestera/prestera_span.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | 7 | #include "prestera.h" 8 | #include "prestera_hw.h" 9 | #include "prestera_acl.h" 10 | #include "prestera_span.h" 11 | 12 | struct prestera_span_entry { 13 | struct list_head list; 14 | struct prestera_port *port; 15 | refcount_t ref_count; 16 | u8 id; 17 | }; 18 | 19 | struct prestera_span { 20 | struct prestera_switch *sw; 21 | struct list_head entries; 22 | }; 23 | 24 | static struct prestera_span_entry * 25 | prestera_span_entry_create(struct prestera_port *port, u8 span_id) 26 | { 27 | struct prestera_span_entry *entry; 28 | 29 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); 30 | if (!entry) 31 | return ERR_PTR(-ENOMEM); 32 | 33 | refcount_set(&entry->ref_count, 1); 34 | entry->port = port; 35 | entry->id = span_id; 36 | list_add_tail(&entry->list, &port->sw->span->entries); 37 | 38 | return entry; 39 | } 40 | 41 | static void prestera_span_entry_del(struct prestera_span_entry *entry) 42 | { 43 | list_del(&entry->list); 44 | kfree(entry); 45 | } 46 | 47 | static struct prestera_span_entry * 48 | prestera_span_entry_find_by_id(struct prestera_span *span, u8 span_id) 49 | { 50 | struct prestera_span_entry *entry; 51 | 52 | list_for_each_entry(entry, &span->entries, list) { 53 | if (entry->id == span_id) 54 | return entry; 55 | } 56 | 57 | return NULL; 58 | } 59 | 60 | static struct prestera_span_entry * 61 | prestera_span_entry_find_by_port(struct prestera_span *span, 62 | struct prestera_port *port) 63 | { 64 | struct prestera_span_entry *entry; 65 | 66 | list_for_each_entry(entry, &span->entries, list) { 67 | if (entry->port == port) 68 | return entry; 69 | } 70 | 71 | return NULL; 72 | } 73 | 74 | static int prestera_span_get(struct prestera_port *port, u8 *span_id) 75 | { 76 | u8 new_span_id; 77 | struct prestera_switch *sw = port->sw; 78 | struct prestera_span_entry *entry; 79 | int err; 80 | 81 | entry = prestera_span_entry_find_by_port(sw->span, port); 82 | if (entry) { 83 | refcount_inc(&entry->ref_count); 84 | *span_id = entry->id; 85 | return 0; 86 | } 87 | 88 | err = prestera_hw_span_get(port, &new_span_id); 89 | if (err) 90 | return err; 91 | 92 | entry = prestera_span_entry_create(port, new_span_id); 93 | if (IS_ERR(entry)) { 94 | prestera_hw_span_release(sw, new_span_id); 95 | return PTR_ERR(entry); 96 | } 97 | 98 | *span_id = new_span_id; 99 | return 0; 100 | } 101 | 102 | static int prestera_span_put(struct prestera_switch *sw, u8 span_id) 103 | { 104 | struct prestera_span_entry *entry; 105 | int err; 106 | 107 | entry = prestera_span_entry_find_by_id(sw->span, span_id); 108 | if (!entry) 109 | return false; 110 | 111 | if (!refcount_dec_and_test(&entry->ref_count)) 112 | return 0; 113 | 114 | err = prestera_hw_span_release(sw, span_id); 115 | if (err) 116 | return err; 117 | 118 | prestera_span_entry_del(entry); 119 | return 0; 120 | } 121 | 122 | static int prestera_span_rule_add(struct prestera_flow_block_binding *binding, 123 | struct prestera_port *to_port) 124 | { 125 | struct prestera_switch *sw = binding->port->sw; 126 | u8 span_id; 127 | int err; 128 | 129 | if (binding->span_id != PRESTERA_SPAN_INVALID_ID) 130 | /* port already in mirroring */ 131 | return -EEXIST; 132 | 133 | err = prestera_span_get(to_port, &span_id); 134 | if (err) 135 | return err; 136 | 137 | err = prestera_hw_span_bind(binding->port, span_id); 138 | if (err) { 139 | prestera_span_put(sw, span_id); 140 | return err; 141 | } 142 | 143 | binding->span_id = span_id; 144 | return 0; 145 | } 146 | 147 | static int prestera_span_rule_del(struct prestera_flow_block_binding *binding) 148 | { 149 | int err; 150 | 151 | err = prestera_hw_span_unbind(binding->port); 152 | if (err) 153 | return err; 154 | 155 | err = prestera_span_put(binding->port->sw, binding->span_id); 156 | if (err) 157 | return err; 158 | 159 | binding->span_id = PRESTERA_SPAN_INVALID_ID; 160 | return 0; 161 | } 162 | 163 | int prestera_span_replace(struct prestera_flow_block *block, 164 | struct tc_cls_matchall_offload *f) 165 | { 166 | struct prestera_flow_block_binding *binding; 167 | __be16 protocol = f->common.protocol; 168 | struct flow_action_entry *act; 169 | struct prestera_port *port; 170 | int err; 171 | 172 | if (!flow_offload_has_one_action(&f->rule->action)) { 173 | NL_SET_ERR_MSG(f->common.extack, 174 | "Only singular actions are supported"); 175 | return -EOPNOTSUPP; 176 | } 177 | 178 | act = &f->rule->action.entries[0]; 179 | 180 | if (!prestera_netdev_check(act->dev)) { 181 | NL_SET_ERR_MSG(f->common.extack, 182 | "Only Marvell Prestera port is supported"); 183 | return -EINVAL; 184 | } 185 | if (!tc_cls_can_offload_and_chain0(act->dev, &f->common)) 186 | return -EOPNOTSUPP; 187 | if (act->id != FLOW_ACTION_MIRRED) 188 | return -EOPNOTSUPP; 189 | if (protocol != htons(ETH_P_ALL)) 190 | return -EOPNOTSUPP; 191 | 192 | port = netdev_priv(act->dev); 193 | 194 | list_for_each_entry(binding, &block->binding_list, list) { 195 | err = prestera_span_rule_add(binding, port); 196 | if (err) 197 | goto rollback; 198 | } 199 | 200 | return 0; 201 | 202 | rollback: 203 | list_for_each_entry_continue_reverse(binding, 204 | &block->binding_list, list) 205 | prestera_span_rule_del(binding); 206 | return err; 207 | } 208 | 209 | void prestera_span_destroy(struct prestera_flow_block *block) 210 | { 211 | struct prestera_flow_block_binding *binding; 212 | 213 | list_for_each_entry(binding, &block->binding_list, list) 214 | prestera_span_rule_del(binding); 215 | } 216 | 217 | int prestera_span_init(struct prestera_switch *sw) 218 | { 219 | struct prestera_span *span; 220 | 221 | span = kzalloc(sizeof(*span), GFP_KERNEL); 222 | if (!span) 223 | return -ENOMEM; 224 | 225 | INIT_LIST_HEAD(&span->entries); 226 | 227 | sw->span = span; 228 | span->sw = sw; 229 | 230 | return 0; 231 | } 232 | 233 | void prestera_span_fini(struct prestera_switch *sw) 234 | { 235 | struct prestera_span *span = sw->span; 236 | 237 | WARN_ON(!list_empty(&span->entries)); 238 | kfree(span); 239 | } 240 | -------------------------------------------------------------------------------- /prestera/prestera_flow.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "prestera.h" 9 | #include "prestera_acl.h" 10 | #include "prestera_flower.h" 11 | 12 | static LIST_HEAD(prestera_block_cb_list); 13 | 14 | static int prestera_flow_block_mall_cb(struct prestera_flow_block *block, 15 | struct tc_cls_matchall_offload *f) 16 | { 17 | if (f->common.chain_index != 0) { 18 | NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported"); 19 | return -EOPNOTSUPP; 20 | } 21 | 22 | switch (f->command) { 23 | case TC_CLSMATCHALL_REPLACE: 24 | return prestera_mall_replace(block, f); 25 | case TC_CLSMATCHALL_DESTROY: 26 | prestera_mall_destroy(block); 27 | return 0; 28 | default: 29 | return -EOPNOTSUPP; 30 | } 31 | } 32 | 33 | static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, 34 | struct flow_cls_offload *f) 35 | { 36 | switch (f->command) { 37 | case FLOW_CLS_REPLACE: 38 | return prestera_flower_replace(block, f); 39 | case FLOW_CLS_DESTROY: 40 | prestera_flower_destroy(block, f); 41 | return 0; 42 | case FLOW_CLS_STATS: 43 | return prestera_flower_stats(block, f); 44 | case FLOW_CLS_TMPLT_CREATE: 45 | return prestera_flower_tmplt_create(block, f); 46 | case FLOW_CLS_TMPLT_DESTROY: 47 | prestera_flower_tmplt_destroy(block, f); 48 | return 0; 49 | default: 50 | return -EOPNOTSUPP; 51 | } 52 | } 53 | 54 | static int prestera_flow_block_cb(enum tc_setup_type type, 55 | void *type_data, void *cb_priv) 56 | { 57 | struct prestera_flow_block *block = cb_priv; 58 | 59 | switch (type) { 60 | case TC_SETUP_CLSFLOWER: 61 | return prestera_flow_block_flower_cb(block, type_data); 62 | case TC_SETUP_CLSMATCHALL: 63 | return prestera_flow_block_mall_cb(block, type_data); 64 | default: 65 | return -EOPNOTSUPP; 66 | } 67 | } 68 | 69 | static void prestera_flow_block_destroy(void *cb_priv) 70 | { 71 | struct prestera_flow_block *block = cb_priv; 72 | 73 | prestera_flower_template_cleanup(block); 74 | 75 | WARN_ON(!list_empty(&block->template_list)); 76 | WARN_ON(!list_empty(&block->binding_list)); 77 | 78 | kfree(block); 79 | } 80 | 81 | static void prestera_flow_block_release(void *cb_priv) 82 | { 83 | struct prestera_flow_block *block = cb_priv; 84 | 85 | prestera_flow_block_destroy(block); 86 | } 87 | 88 | static bool 89 | prestera_flow_block_is_bound(const struct prestera_flow_block *block) 90 | { 91 | return block->ruleset_zero; 92 | } 93 | 94 | static struct prestera_flow_block_binding * 95 | prestera_flow_block_lookup(struct prestera_flow_block *block, 96 | struct prestera_port *port) 97 | { 98 | struct prestera_flow_block_binding *binding; 99 | 100 | list_for_each_entry(binding, &block->binding_list, list) 101 | if (binding->port == port) 102 | return binding; 103 | 104 | return NULL; 105 | } 106 | 107 | static int prestera_flow_block_bind(struct prestera_flow_block *block, 108 | struct prestera_port *port) 109 | { 110 | struct prestera_flow_block_binding *binding; 111 | int err; 112 | 113 | binding = kzalloc(sizeof(*binding), GFP_KERNEL); 114 | if (!binding) 115 | return -ENOMEM; 116 | 117 | binding->span_id = PRESTERA_SPAN_INVALID_ID; 118 | binding->port = port; 119 | 120 | if (prestera_flow_block_is_bound(block)) { 121 | err = prestera_acl_ruleset_bind(block->ruleset_zero, port); 122 | if (err) 123 | goto err_ruleset_bind; 124 | } 125 | 126 | list_add(&binding->list, &block->binding_list); 127 | return 0; 128 | 129 | err_ruleset_bind: 130 | kfree(binding); 131 | return err; 132 | } 133 | 134 | static int prestera_flow_block_unbind(struct prestera_flow_block *block, 135 | struct prestera_port *port) 136 | { 137 | struct prestera_flow_block_binding *binding; 138 | 139 | binding = prestera_flow_block_lookup(block, port); 140 | if (!binding) 141 | return -ENOENT; 142 | 143 | list_del(&binding->list); 144 | 145 | if (prestera_flow_block_is_bound(block)) 146 | prestera_acl_ruleset_unbind(block->ruleset_zero, port); 147 | 148 | kfree(binding); 149 | return 0; 150 | } 151 | 152 | static struct prestera_flow_block * 153 | prestera_flow_block_create(struct prestera_switch *sw, 154 | struct net *net, 155 | bool ingress) 156 | { 157 | struct prestera_flow_block *block; 158 | 159 | block = kzalloc(sizeof(*block), GFP_KERNEL); 160 | if (!block) 161 | return NULL; 162 | 163 | INIT_LIST_HEAD(&block->binding_list); 164 | INIT_LIST_HEAD(&block->template_list); 165 | block->net = net; 166 | block->sw = sw; 167 | block->mall_prio = UINT_MAX; 168 | block->flower_min_prio = UINT_MAX; 169 | block->ingress = ingress; 170 | 171 | return block; 172 | } 173 | 174 | static struct prestera_flow_block * 175 | prestera_flow_block_get(struct prestera_switch *sw, 176 | struct flow_block_offload *f, 177 | bool *register_block, 178 | bool ingress) 179 | { 180 | struct prestera_flow_block *block; 181 | struct flow_block_cb *block_cb; 182 | 183 | block_cb = flow_block_cb_lookup(f->block, 184 | prestera_flow_block_cb, sw); 185 | if (!block_cb) { 186 | block = prestera_flow_block_create(sw, f->net, ingress); 187 | if (!block) 188 | return ERR_PTR(-ENOMEM); 189 | 190 | block_cb = flow_block_cb_alloc(prestera_flow_block_cb, 191 | sw, block, 192 | prestera_flow_block_release); 193 | if (IS_ERR(block_cb)) { 194 | prestera_flow_block_destroy(block); 195 | return ERR_CAST(block_cb); 196 | } 197 | 198 | block->block_cb = block_cb; 199 | *register_block = true; 200 | } else { 201 | block = flow_block_cb_priv(block_cb); 202 | *register_block = false; 203 | } 204 | 205 | flow_block_cb_incref(block_cb); 206 | 207 | return block; 208 | } 209 | 210 | static void prestera_flow_block_put(struct prestera_flow_block *block) 211 | { 212 | struct flow_block_cb *block_cb = block->block_cb; 213 | 214 | if (flow_block_cb_decref(block_cb)) 215 | return; 216 | 217 | flow_block_cb_free(block_cb); 218 | prestera_flow_block_destroy(block); 219 | } 220 | 221 | static int prestera_setup_tc_block_bind(struct prestera_port *port, 222 | struct flow_block_offload *f, bool ingress) 223 | { 224 | struct prestera_switch *sw = port->sw; 225 | struct prestera_flow_block *block; 226 | struct flow_block_cb *block_cb; 227 | bool register_block; 228 | int err; 229 | 230 | block = prestera_flow_block_get(sw, f, ®ister_block, ingress); 231 | if (IS_ERR(block)) 232 | return PTR_ERR(block); 233 | 234 | block_cb = block->block_cb; 235 | 236 | err = prestera_flow_block_bind(block, port); 237 | if (err) 238 | goto err_block_bind; 239 | 240 | if (register_block) { 241 | flow_block_cb_add(block_cb, f); 242 | list_add_tail(&block_cb->driver_list, &prestera_block_cb_list); 243 | } 244 | 245 | if (ingress) 246 | port->ingress_flow_block = block; 247 | else 248 | port->egress_flow_block = block; 249 | 250 | return 0; 251 | 252 | err_block_bind: 253 | prestera_flow_block_put(block); 254 | return err; 255 | } 256 | 257 | static void prestera_setup_tc_block_unbind(struct prestera_port *port, 258 | struct flow_block_offload *f, bool ingress) 259 | { 260 | struct prestera_switch *sw = port->sw; 261 | struct prestera_flow_block *block; 262 | struct flow_block_cb *block_cb; 263 | int err; 264 | 265 | block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw); 266 | if (!block_cb) 267 | return; 268 | 269 | block = flow_block_cb_priv(block_cb); 270 | 271 | prestera_mall_destroy(block); 272 | 273 | err = prestera_flow_block_unbind(block, port); 274 | if (err) 275 | goto err_flow_block_unbind; 276 | 277 | if (!flow_block_cb_decref(block_cb)) { 278 | flow_block_cb_remove(block_cb, f); 279 | list_del(&block_cb->driver_list); 280 | } 281 | 282 | err_flow_block_unbind: 283 | if (ingress) 284 | port->ingress_flow_block = NULL; 285 | else 286 | port->egress_flow_block = NULL; 287 | } 288 | 289 | static int prestera_setup_tc_block_clsact(struct prestera_port *port, 290 | struct flow_block_offload *f, 291 | bool ingress) 292 | { 293 | f->driver_block_list = &prestera_block_cb_list; 294 | 295 | switch (f->command) { 296 | case FLOW_BLOCK_BIND: 297 | return prestera_setup_tc_block_bind(port, f, ingress); 298 | case FLOW_BLOCK_UNBIND: 299 | prestera_setup_tc_block_unbind(port, f, ingress); 300 | return 0; 301 | default: 302 | return -EOPNOTSUPP; 303 | } 304 | } 305 | 306 | int prestera_setup_tc_block(struct prestera_port *port, 307 | struct flow_block_offload *f) 308 | { 309 | switch (f->binder_type) { 310 | case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 311 | return prestera_setup_tc_block_clsact(port, f, true); 312 | case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 313 | return prestera_setup_tc_block_clsact(port, f, false); 314 | default: 315 | return -EOPNOTSUPP; 316 | } 317 | } 318 | -------------------------------------------------------------------------------- /prestera/prestera_log.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include "prestera_log.h" 5 | 6 | static const char unknown[] = "UNKNOWN"; 7 | 8 | DEF_ENUM_MAP(netdev_cmd) = { 9 | [NETDEV_UP] = "NETDEV_UP", 10 | [NETDEV_DOWN] = "NETDEV_DOWN", 11 | [NETDEV_REBOOT] = "NETDEV_REBOOT", 12 | [NETDEV_CHANGE] = "NETDEV_CHANGE", 13 | [NETDEV_REGISTER] = "NETDEV_REGISTER", 14 | [NETDEV_UNREGISTER] = "NETDEV_UNREGISTER", 15 | [NETDEV_CHANGEMTU] = "NETDEV_CHANGEMTU", 16 | [NETDEV_CHANGEADDR] = "NETDEV_CHANGEADDR", 17 | [NETDEV_PRE_CHANGEADDR] = "NETDEV_PRE_CHANGEADDR", 18 | [NETDEV_GOING_DOWN] = "NETDEV_GOING_DOWN", 19 | [NETDEV_CHANGENAME] = "NETDEV_CHANGENAME", 20 | [NETDEV_FEAT_CHANGE] = "NETDEV_FEAT_CHANGE", 21 | [NETDEV_BONDING_FAILOVER] = "NETDEV_BONDING_FAILOVER", 22 | [NETDEV_PRE_UP] = "NETDEV_PRE_UP", 23 | [NETDEV_PRE_TYPE_CHANGE] = "NETDEV_PRE_TYPE_CHANGE", 24 | [NETDEV_POST_TYPE_CHANGE] = "NETDEV_POST_TYPE_CHANGE", 25 | [NETDEV_POST_INIT] = "NETDEV_POST_INIT", 26 | [NETDEV_RELEASE] = "NETDEV_RELEASE", 27 | [NETDEV_NOTIFY_PEERS] = "NETDEV_NOTIFY_PEERS", 28 | [NETDEV_JOIN] = "NETDEV_JOIN", 29 | [NETDEV_CHANGEUPPER] = "NETDEV_CHANGEUPPER", 30 | [NETDEV_RESEND_IGMP] = "NETDEV_RESEND_IGMP", 31 | [NETDEV_PRECHANGEMTU] = "NETDEV_PRECHANGEMTU", 32 | [NETDEV_CHANGEINFODATA] = "NETDEV_CHANGEINFODATA", 33 | [NETDEV_BONDING_INFO] = "NETDEV_BONDING_INFO", 34 | [NETDEV_PRECHANGEUPPER] = "NETDEV_PRECHANGEUPPER", 35 | [NETDEV_CHANGELOWERSTATE] = "NETDEV_CHANGELOWERSTATE", 36 | [NETDEV_UDP_TUNNEL_PUSH_INFO] = "NETDEV_UDP_TUNNEL_PUSH_INFO", 37 | [NETDEV_UDP_TUNNEL_DROP_INFO] = "NETDEV_UDP_TUNNEL_DROP_INFO", 38 | [NETDEV_CHANGE_TX_QUEUE_LEN] = "NETDEV_CHANGE_TX_QUEUE_LEN", 39 | [NETDEV_CVLAN_FILTER_PUSH_INFO] = "NETDEV_CVLAN_FILTER_PUSH_INFO", 40 | [NETDEV_CVLAN_FILTER_DROP_INFO] = "NETDEV_CVLAN_FILTER_DROP_INFO", 41 | [NETDEV_SVLAN_FILTER_PUSH_INFO] = "NETDEV_SVLAN_FILTER_PUSH_INFO", 42 | [NETDEV_SVLAN_FILTER_DROP_INFO] = "NETDEV_SVLAN_FILTER_DROP_INFO" 43 | }; 44 | 45 | DEF_ENUM_MAP(switchdev_notifier_type) = { 46 | [SWITCHDEV_FDB_ADD_TO_BRIDGE] = "SWITCHDEV_FDB_ADD_TO_BRIDGE", 47 | [SWITCHDEV_FDB_DEL_TO_BRIDGE] = "SWITCHDEV_FDB_DEL_TO_BRIDGE", 48 | [SWITCHDEV_FDB_ADD_TO_DEVICE] = "SWITCHDEV_FDB_ADD_TO_DEVICE", 49 | [SWITCHDEV_FDB_DEL_TO_DEVICE] = "SWITCHDEV_FDB_DEL_TO_DEVICE", 50 | [SWITCHDEV_FDB_OFFLOADED] = "SWITCHDEV_FDB_OFFLOADED", 51 | [SWITCHDEV_PORT_OBJ_ADD] = "SWITCHDEV_PORT_OBJ_ADD", 52 | [SWITCHDEV_PORT_OBJ_DEL] = "SWITCHDEV_PORT_OBJ_DEL", 53 | [SWITCHDEV_PORT_ATTR_SET] = "SWITCHDEV_PORT_ATTR_SET", 54 | [SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE] = 55 | "SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE", 56 | [SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE] = 57 | "SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE", 58 | [SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE] = 59 | "SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE", 60 | [SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE] = 61 | "SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE", 62 | [SWITCHDEV_VXLAN_FDB_OFFLOADED] = "SWITCHDEV_VXLAN_FDB_OFFLOADED" 63 | }; 64 | 65 | DEF_ENUM_MAP(switchdev_attr_id) = { 66 | [SWITCHDEV_ATTR_ID_UNDEFINED] = 67 | "SWITCHDEV_ATTR_ID_UNDEFINED", 68 | [SWITCHDEV_ATTR_ID_PORT_STP_STATE] = 69 | "SWITCHDEV_ATTR_ID_PORT_STP_STATE", 70 | [SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS] = 71 | "SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS", 72 | [SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS] = 73 | "SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS", 74 | [SWITCHDEV_ATTR_ID_PORT_MROUTER] = 75 | "SWITCHDEV_ATTR_ID_PORT_MROUTER", 76 | [SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME] = 77 | "SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME", 78 | [SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING] = 79 | "SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING", 80 | [SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED] = 81 | "SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED", 82 | [SWITCHDEV_ATTR_ID_BRIDGE_MROUTER] = 83 | "SWITCHDEV_ATTR_ID_BRIDGE_MROUTER" 84 | }; 85 | 86 | DEF_ENUM_MAP(switchdev_obj_id) = { 87 | [SWITCHDEV_OBJ_ID_UNDEFINED] = "SWITCHDEV_OBJ_ID_UNDEFINED", 88 | [SWITCHDEV_OBJ_ID_PORT_VLAN] = "SWITCHDEV_OBJ_ID_PORT_VLAN", 89 | [SWITCHDEV_OBJ_ID_PORT_MDB] = "SWITCHDEV_OBJ_ID_PORT_MDB", 90 | [SWITCHDEV_OBJ_ID_HOST_MDB] = "SWITCHDEV_OBJ_ID_HOST_MDB", 91 | }; 92 | 93 | DEF_ENUM_MAP(fib_event_type) = { 94 | [FIB_EVENT_ENTRY_REPLACE] = "FIB_EVENT_ENTRY_REPLACE", 95 | [FIB_EVENT_ENTRY_APPEND] = "FIB_EVENT_ENTRY_APPEND", 96 | [FIB_EVENT_ENTRY_ADD] = "FIB_EVENT_ENTRY_ADD", 97 | [FIB_EVENT_ENTRY_DEL] = "FIB_EVENT_ENTRY_DEL", 98 | [FIB_EVENT_RULE_ADD] = "FIB_EVENT_RULE_ADD", 99 | [FIB_EVENT_RULE_DEL] = "FIB_EVENT_RULE_DEL", 100 | [FIB_EVENT_NH_ADD] = "FIB_EVENT_NH_ADD", 101 | [FIB_EVENT_NH_DEL] = "FIB_EVENT_NH_DEL", 102 | [FIB_EVENT_VIF_ADD] = "FIB_EVENT_VIF_ADD", 103 | [FIB_EVENT_VIF_DEL] = "FIB_EVENT_VIF_DEL", 104 | }; 105 | 106 | DEF_ENUM_MAP(netevent_notif_type) = { 107 | [NETEVENT_NEIGH_UPDATE] = "NETEVENT_NEIGH_UPDATE", 108 | [NETEVENT_REDIRECT] = "NETEVENT_REDIRECT", 109 | [NETEVENT_DELAY_PROBE_TIME_UPDATE] = 110 | "NETEVENT_DELAY_PROBE_TIME_UPDATE", 111 | [NETEVENT_IPV4_MPATH_HASH_UPDATE] = 112 | "NETEVENT_IPV4_MPATH_HASH_UPDATE", 113 | [NETEVENT_IPV6_MPATH_HASH_UPDATE] = 114 | "NETEVENT_IPV6_MPATH_HASH_UPDATE", 115 | [NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE] = 116 | "NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE", 117 | }; 118 | 119 | DEF_ENUM_MAP(tc_setup_type) = { 120 | [TC_SETUP_QDISC_MQPRIO] = "TC_SETUP_QDISC_MQPRIO", 121 | [TC_SETUP_CLSU32] = "TC_SETUP_CLSU32", 122 | [TC_SETUP_CLSFLOWER] = "TC_SETUP_CLSFLOWER", 123 | [TC_SETUP_CLSMATCHALL] = "TC_SETUP_CLSMATCHALL", 124 | [TC_SETUP_CLSBPF] = "TC_SETUP_CLSBPF", 125 | [TC_SETUP_BLOCK] = "TC_SETUP_BLOCK", 126 | [TC_SETUP_QDISC_CBS] = "TC_SETUP_QDISC_CBS", 127 | [TC_SETUP_QDISC_RED] = "TC_SETUP_QDISC_RED", 128 | [TC_SETUP_QDISC_PRIO] = "TC_SETUP_QDISC_PRIO", 129 | [TC_SETUP_QDISC_MQ] = "TC_SETUP_QDISC_MQ", 130 | [TC_SETUP_QDISC_ETF] = "TC_SETUP_QDISC_ETF", 131 | [TC_SETUP_ROOT_QDISC] = "TC_SETUP_ROOT_QDISC", 132 | [TC_SETUP_QDISC_GRED] = "TC_SETUP_QDISC_GRED", 133 | }; 134 | 135 | DEF_ENUM_MAP(flow_block_binder_type) = { 136 | [FLOW_BLOCK_BINDER_TYPE_UNSPEC] = 137 | "FLOW_BLOCK_BINDER_TYPE_UNSPEC", 138 | [FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS] = 139 | "FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS", 140 | [FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS] = 141 | "FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS", 142 | }; 143 | 144 | DEF_ENUM_MAP(tc_matchall_command) = { 145 | [TC_CLSMATCHALL_REPLACE] = "TC_CLSMATCHALL_REPLACE", 146 | [TC_CLSMATCHALL_DESTROY] = "TC_CLSMATCHALL_DESTROY", 147 | [TC_CLSMATCHALL_STATS] = "TC_CLSMATCHALL_STATS", 148 | }; 149 | 150 | DEF_ENUM_MAP(flow_cls_command) = { 151 | [FLOW_CLS_REPLACE] = "FLOW_CLS_REPLACE", 152 | [FLOW_CLS_DESTROY] = "FLOW_CLS_DESTROY", 153 | [FLOW_CLS_STATS] = "FLOW_CLS_STATS", 154 | [FLOW_CLS_TMPLT_CREATE] = "FLOW_CLS_TMPLT_CREATE", 155 | [FLOW_CLS_TMPLT_DESTROY] = "FLOW_CLS_TMPLT_DESTROY", 156 | }; 157 | 158 | DEF_ENUM_MAP(flow_action_id) = { 159 | [FLOW_ACTION_ACCEPT] = "FLOW_ACTION_ACCEPT", 160 | [FLOW_ACTION_DROP] = "FLOW_ACTION_DROP", 161 | [FLOW_ACTION_TRAP] = "FLOW_ACTION_TRAP", 162 | [FLOW_ACTION_GOTO] = "FLOW_ACTION_GOTO", 163 | [FLOW_ACTION_REDIRECT] = "FLOW_ACTION_REDIRECT", 164 | [FLOW_ACTION_MIRRED] = "FLOW_ACTION_MIRRED", 165 | [FLOW_ACTION_VLAN_PUSH] = "FLOW_ACTION_VLAN_PUSH", 166 | [FLOW_ACTION_VLAN_POP] = "FLOW_ACTION_VLAN_POP", 167 | [FLOW_ACTION_VLAN_MANGLE] = "FLOW_ACTION_VLAN_MANGLE", 168 | [FLOW_ACTION_TUNNEL_ENCAP] = "FLOW_ACTION_TUNNEL_ENCAP", 169 | [FLOW_ACTION_TUNNEL_DECAP] = "FLOW_ACTION_TUNNEL_DECAP", 170 | [FLOW_ACTION_MANGLE] = "FLOW_ACTION_MANGLE", 171 | [FLOW_ACTION_ADD] = "FLOW_ACTION_ADD", 172 | [FLOW_ACTION_CSUM] = "FLOW_ACTION_CSUM", 173 | [FLOW_ACTION_MARK] = "FLOW_ACTION_MARK", 174 | [FLOW_ACTION_WAKE] = "FLOW_ACTION_WAKE", 175 | [FLOW_ACTION_QUEUE] = "FLOW_ACTION_QUEUE", 176 | [FLOW_ACTION_SAMPLE] = "FLOW_ACTION_SAMPLE", 177 | [FLOW_ACTION_POLICE] = "FLOW_ACTION_POLICE", 178 | [FLOW_ACTION_CT] = "FLOW_ACTION_CT", 179 | }; 180 | 181 | DEF_ENUM_FUNC(netdev_cmd, NETDEV_UP, NETDEV_SVLAN_FILTER_DROP_INFO) 182 | 183 | DEF_ENUM_FUNC(switchdev_notifier_type, SWITCHDEV_FDB_ADD_TO_BRIDGE, 184 | SWITCHDEV_VXLAN_FDB_OFFLOADED) 185 | DEF_ENUM_FUNC(switchdev_attr_id, SWITCHDEV_ATTR_ID_UNDEFINED, 186 | SWITCHDEV_ATTR_ID_BRIDGE_MROUTER) 187 | DEF_ENUM_FUNC(switchdev_obj_id, SWITCHDEV_OBJ_ID_UNDEFINED, 188 | SWITCHDEV_OBJ_ID_HOST_MDB) 189 | 190 | DEF_ENUM_FUNC(fib_event_type, FIB_EVENT_ENTRY_REPLACE, FIB_EVENT_VIF_DEL) 191 | 192 | DEF_ENUM_FUNC(netevent_notif_type, NETEVENT_NEIGH_UPDATE, 193 | NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE) 194 | 195 | /* TC traffic control */ 196 | DEF_ENUM_FUNC(tc_setup_type, TC_SETUP_QDISC_MQPRIO, TC_SETUP_QDISC_GRED) 197 | DEF_ENUM_FUNC(flow_block_binder_type, FLOW_BLOCK_BINDER_TYPE_UNSPEC, 198 | FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 199 | DEF_ENUM_FUNC(tc_matchall_command, TC_CLSMATCHALL_REPLACE, TC_CLSMATCHALL_STATS) 200 | DEF_ENUM_FUNC(flow_cls_command, FLOW_CLS_REPLACE, FLOW_CLS_TMPLT_DESTROY) 201 | DEF_ENUM_FUNC(flow_action_id, FLOW_ACTION_ACCEPT, FLOW_ACTION_CT) 202 | -------------------------------------------------------------------------------- /prestera/prestera_acl.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ 3 | 4 | #ifndef _PRESTERA_ACL_H_ 5 | #define _PRESTERA_ACL_H_ 6 | 7 | #include 8 | #include "prestera_ct.h" 9 | #include "prestera_counter.h" 10 | 11 | #define PRESTERA_ACL_RULE_DEF_HW_CHAIN_ID 0 12 | 13 | #define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF 14 | #define PRESTERA_ACL_KEYMASK_PCL_ID_USER \ 15 | (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF) 16 | #define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \ 17 | (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00) 18 | #define PRESTERA_ACL_CHAIN_MASK \ 19 | (PRESTERA_ACL_KEYMASK_PCL_ID >> 8) 20 | 21 | #define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \ 22 | (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \ 23 | (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN)) 24 | 25 | #define rule_flag_set(rule, flag) \ 26 | prestera_acl_rule_flag_set(rule, PRESTERA_ACL_RULE_FLAG_##flag) 27 | #define rule_flag_test(rule, flag) \ 28 | prestera_acl_rule_flag_test(rule, PRESTERA_ACL_RULE_FLAG_##flag) 29 | 30 | #define rule_match_set_n(match_p, type, val_p, size) \ 31 | memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ 32 | val_p, size) 33 | #define rule_match_set(match_p, type, val) \ 34 | memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ 35 | &(val), sizeof(val)) 36 | #define rule_match_set_u32(match_p, type, val) \ 37 | ((match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type] = \ 38 | htonl(val)) 39 | #define rule_match_set_u16(match_p, type, val) \ 40 | ((match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type] = \ 41 | (__force __be32)htons(val)) 42 | #define rule_match_set_u8(match_p, type, val) \ 43 | ((match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type] = \ 44 | (__force __be32)(val)) 45 | #define rule_match_get_u32(match_p, type) \ 46 | (match_p[PRESTERA_ACL_RULE_MATCH_TYPE_##type]) 47 | 48 | #define MVSW_ACL_RULE_DEF_HW_CHAIN_ID 0 49 | #define MVSW_ACL_RULESET_ALL 0xff 50 | 51 | #define PRESTERA_ACL_ACTION_MAX 8 52 | 53 | /* HW objects infrastructure */ 54 | struct prestera_mangle_cfg { 55 | u8 l4_src_valid:1, l4_dst_valid:1, 56 | sip_valid:1, dip_valid:1; 57 | __be16 l4_src; 58 | __be16 l4_dst; 59 | struct prestera_ip_addr sip; 60 | struct prestera_ip_addr dip; 61 | }; 62 | 63 | /* TODO: Move mangle_entry to router ? */ 64 | struct prestera_nh_mangle_entry { 65 | struct rhash_head ht_node; /* node of prestera_router */ 66 | struct prestera_nh_mangle_entry_key { 67 | struct prestera_mangle_cfg mangle; 68 | struct prestera_nh_neigh_key n; 69 | } key; 70 | struct prestera_nh_neigh *n; 71 | u32 hw_id; 72 | unsigned long is_active_hw_cache_kick; /* jiffies */ 73 | bool is_active_hw_cache; 74 | u32 ref_cnt; 75 | struct list_head nh_neigh_head; 76 | }; 77 | 78 | struct prestera_acl_rule_entry { 79 | struct rhash_head ht_node; /* node of prestera_sw */ 80 | struct prestera_acl_rule_entry_key { 81 | u32 prio; 82 | struct prestera_acl_match match; 83 | } key; 84 | u32 hw_id; 85 | u32 vtcam_id; 86 | /* This struct seems to be dublicate of arg, but purpose is to pass 87 | * in cfg objet keys, resolve them and save object links here. 88 | * E.g. chain can be link to object, when chain_id just key in cfg. 89 | */ 90 | struct { 91 | struct { 92 | u8 valid:1; 93 | } accept, drop; 94 | struct { 95 | u8 valid:1; 96 | struct prestera_acl_action_trap i; 97 | } trap; 98 | struct { 99 | u8 valid:1; 100 | struct prestera_acl_action_police i; 101 | } police; 102 | struct { 103 | u8 valid:1; 104 | struct prestera_acl_action_nat i; 105 | } nat; 106 | struct { 107 | u8 valid:1; 108 | struct prestera_acl_action_jump i; 109 | } jump; 110 | struct { 111 | u8 valid:1; 112 | struct prestera_nh_mangle_entry *e; /* entry */ 113 | } nh; 114 | struct { 115 | u32 id; 116 | struct prestera_counter_block *block; 117 | } counter; 118 | struct { 119 | u8 valid:1; 120 | struct prestera_acl_action_remark i; 121 | } remark; 122 | }; 123 | }; 124 | 125 | /* This struct (arg) used only to be passed as parameter for 126 | * acl_rule_entry_create. Must be flat. Can contain object keys, which will be 127 | * resolved to object links, before saving to acl_rule_entry struct 128 | */ 129 | struct prestera_acl_rule_entry_arg { 130 | u32 vtcam_id; 131 | struct { 132 | struct { 133 | u8 valid:1; 134 | } accept, drop; 135 | struct { 136 | u8 valid:1; 137 | struct prestera_acl_action_trap i; 138 | } trap; 139 | struct { 140 | u8 valid:1; 141 | struct prestera_acl_action_police i; 142 | } police; 143 | struct { 144 | u8 valid:1; 145 | struct prestera_acl_action_nat i; 146 | } nat; 147 | struct { 148 | u8 valid:1; 149 | struct prestera_acl_action_jump i; 150 | } jump; 151 | struct { 152 | u8 valid:1; 153 | struct prestera_nh_mangle_entry_key k; /* key */ 154 | } nh; 155 | struct { 156 | u8 valid:1; 157 | u32 client; 158 | } count; 159 | struct { 160 | u8 valid:1; 161 | struct prestera_acl_action_remark i; 162 | } remark; 163 | }; 164 | }; 165 | 166 | enum { 167 | PRESTERA_ACL_RULE_FLAG_CT, 168 | PRESTERA_ACL_RULE_FLAG_GOTO, 169 | PRESTERA_ACL_RULE_FLAG_NAT 170 | }; 171 | 172 | struct prestera_acl_stats { 173 | u64 packets; 174 | u64 bytes; 175 | }; 176 | 177 | struct prestera_acl { 178 | struct prestera_switch *sw; 179 | struct list_head nat_port_list; 180 | struct list_head vtcam_list; 181 | struct list_head rules; 182 | struct rhashtable ruleset_ht; 183 | struct rhashtable acl_rule_entry_ht; 184 | /* TODO: move nh_mangle_entry_ht to router ? */ 185 | struct rhashtable nh_mangle_entry_ht; 186 | struct prestera_ct_priv *ct_priv; 187 | struct idr uid; 188 | }; 189 | 190 | struct prestera_acl_nat_port { 191 | struct list_head list; 192 | struct prestera_port *port; 193 | refcount_t refcount; 194 | }; 195 | 196 | struct prestera_acl_rule_attr { 197 | struct prestera_ct_attr ct_attr; 198 | unsigned long flags; 199 | }; 200 | 201 | struct prestera_acl_rule { 202 | struct rhash_head ht_node; /* Member of acl HT */ 203 | struct list_head list; 204 | struct prestera_acl_nat_port *nat_port; 205 | struct prestera_acl_rule_attr attr; 206 | struct prestera_acl_ruleset *ruleset; 207 | struct prestera_acl_ruleset *jump_ruleset; 208 | unsigned long cookie; 209 | u32 chain_index; 210 | u32 priority; 211 | u8 hw_tc; 212 | struct prestera_acl_rule_entry_key re_key; 213 | struct prestera_acl_rule_entry_arg re_arg; 214 | struct prestera_acl_rule_entry *re; 215 | }; 216 | 217 | enum { 218 | PRESTERA_ACL_IFACE_TYPE_PORT, 219 | PRESTERA_ACL_IFACE_TYPE_INDEX 220 | }; 221 | 222 | struct prestera_acl_iface { 223 | u8 type; 224 | union { 225 | struct prestera_port *port; 226 | u32 index; 227 | }; 228 | }; 229 | 230 | void prestera_acl_rule_flag_set(struct prestera_acl_rule *rule, 231 | unsigned long flag); 232 | bool 233 | prestera_acl_rule_flag_test(const struct prestera_acl_rule *rule, 234 | unsigned long flag); 235 | struct prestera_acl_rule * 236 | prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, 237 | unsigned long cookie, u32 chain_index); 238 | void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, 239 | u32 priority); 240 | u8 prestera_acl_rule_hw_tc_get(struct prestera_acl_rule *rule); 241 | void prestera_acl_rule_hw_tc_set(struct prestera_acl_rule *rule, u8 hw_tc); 242 | u8 prestera_acl_rule_hw_chain_id_get(const struct prestera_acl_rule *rule); 243 | void prestera_acl_rule_destroy(struct prestera_acl_rule *rule); 244 | struct prestera_acl_rule * 245 | prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, 246 | unsigned long cookie); 247 | int prestera_acl_rule_add(struct prestera_switch *sw, 248 | struct prestera_acl_rule *rule); 249 | void prestera_acl_rule_del(struct prestera_switch *sw, 250 | struct prestera_acl_rule *rule); 251 | int prestera_acl_rule_get_stats(struct prestera_acl *acl, 252 | struct prestera_acl_rule *rule, 253 | u64 *packets, u64 *bytes, u64 *last_use); 254 | 255 | int prestera_nh_mangle_entry_set(struct prestera_switch *sw, 256 | struct prestera_nh_mangle_entry *e); 257 | bool prestera_nh_mangle_entry_util_hw_state(struct prestera_switch *sw, 258 | struct prestera_nh_mangle_entry *e); 259 | struct prestera_acl_rule_entry * 260 | prestera_acl_rule_entry_find(struct prestera_acl *acl, 261 | struct prestera_acl_rule_entry_key *key); 262 | void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, 263 | struct prestera_acl_rule_entry *e); 264 | struct prestera_acl_rule_entry * 265 | prestera_acl_rule_entry_create(struct prestera_acl *acl, 266 | struct prestera_acl_rule_entry_key *key, 267 | struct prestera_acl_rule_entry_arg *arg); 268 | struct prestera_acl_ruleset * 269 | prestera_acl_ruleset_get(struct prestera_acl *acl, 270 | struct prestera_flow_block *block, 271 | u32 chain_index); 272 | struct prestera_acl_ruleset * 273 | prestera_acl_ruleset_lookup(struct prestera_acl *acl, 274 | struct prestera_flow_block *block, 275 | u32 chain_index); 276 | void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, 277 | void *keymask); 278 | int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset); 279 | u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset); 280 | bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset); 281 | void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset); 282 | int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, 283 | struct prestera_port *port); 284 | int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, 285 | struct prestera_port *port); 286 | void 287 | prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, 288 | u16 pcl_id); 289 | int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir, 290 | void *keymask, u32 *vtcam_id); 291 | int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id); 292 | int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client); 293 | 294 | #endif /* _PRESTERA_ACL_H_ */ 295 | -------------------------------------------------------------------------------- /prestera/prestera_dsa.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include "prestera.h" 5 | #include "prestera_dsa.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #define W0_MASK_IS_TAGGED BIT(29) 13 | 14 | /* TrgDev[4:0] = {Word0[28:24]} */ 15 | #define W0_MASK_HW_DEV_NUM GENMASK(28, 24) 16 | 17 | /* SrcPort/TrgPort extended to 8b 18 | * SrcPort/TrgPort[7:0] = {Word2[20], Word1[11:10], Word0[23:19]} 19 | */ 20 | #define W0_MASK_IFACE_PORT_NUM GENMASK(23, 19) 21 | 22 | /* bits 30:31 - TagCommand 1 = FROM_CPU */ 23 | #define W0_MASK_DSA_CMD GENMASK(31, 30) 24 | 25 | /* bits 13:15 -- UP */ 26 | #define W0_MASK_VPT GENMASK(15, 13) 27 | 28 | #define W0_MASK_EXT_BIT BIT(12) 29 | #define W0_MASK_OPCODE GENMASK(18, 16) 30 | 31 | /* bit 16 - CFI */ 32 | #define W0_MASK_CFI_BIT BIT(16) 33 | 34 | /* bits 0:11 -- VID */ 35 | #define W0_MASK_VID GENMASK(11, 0) 36 | 37 | #define W1_MASK_SRC_IS_TARNK BIT(27) 38 | 39 | /* SrcPort/TrgPort extended to 8b 40 | * SrcPort/TrgPort[7:0] = {Word2[20], Word1[11:10], Word0[23:19]} 41 | */ 42 | #define W1_MASK_IFACE_PORT_NUM GENMASK(11, 10) 43 | 44 | #define W1_MASK_EXT_BIT BIT(31) 45 | #define W1_MASK_CFI_BIT BIT(30) 46 | 47 | /* bit 30 -- EgressFilterEn */ 48 | #define W1_MASK_EGR_FILTER_EN BIT(30) 49 | 50 | /* bit 28 -- egrFilterRegistered */ 51 | #define W1_MASK_EGR_FILTER_REG BIT(28) 52 | 53 | /* bits 20-24 -- Src-ID */ 54 | #define W1_MASK_SRC_ID GENMASK(24, 20) 55 | 56 | /* bits 15-19 -- SrcDev */ 57 | #define W1_MASK_SRC_DEV GENMASK(19, 15) 58 | 59 | /* SrcTrunk is extended to 12b 60 | * SrcTrunk[11:0] = {Word2[14:3] 61 | */ 62 | #define W2_MASK_SRC_TRANK_ID GENMASK(14, 3) 63 | 64 | /* SRCePort[16:0]/TRGePort[16:0]/ = {Word2[19:3]} */ 65 | #define W2_MASK_IFACE_EPORT GENMASK(19, 3) 66 | 67 | /* SrcPort/TrgPort extended to 8b 68 | * SrcPort/TrgPort[7:0] = {Word2[20], Word1[11:10], Word0[23:19]} 69 | */ 70 | #define W2_MASK_IFACE_PORT_NUM BIT(20) 71 | 72 | #define W2_MASK_EXT_BIT BIT(31) 73 | 74 | /* 5b SrcID is extended to 12 bits 75 | * SrcID[11:0] = {Word2[27:21], Word1[24:20]} 76 | */ 77 | #define W2_MASK_SRC_ID GENMASK(27, 21) 78 | 79 | /* 5b SrcDev is extended to 12b 80 | * SrcDev[11:0] = {Word2[20:14], Word1[19:15]} 81 | */ 82 | #define W2_MASK_SRC_DEV GENMASK(20, 14) 83 | 84 | /* trgHwDev and trgPort 85 | * TrgDev[11:5] = {Word3[6:0]} 86 | */ 87 | #define W3_MASK_HW_DEV_NUM GENMASK(6, 0) 88 | 89 | /* bits 0-7 -- CpuCode */ 90 | #define W1_MASK_CPU_CODE GENMASK(7, 0) 91 | 92 | /* VID becomes 16b eVLAN. eVLAN[15:0] = {Word3[30:27], Word0[11:0]} */ 93 | #define W3_MASK_VID GENMASK(30, 27) 94 | 95 | /* TRGePort[16:0] = {Word3[23:7]} */ 96 | #define W3_MASK_DST_EPORT GENMASK(23, 7) 97 | 98 | #define DEV_NUM_MASK GENMASK(11, 5) 99 | #define VID_MASK GENMASK(15, 12) 100 | 101 | static int net_if_dsa_to_cpu_parse(const u32 *words_ptr, 102 | struct mvsw_pr_dsa *dsa_info_ptr) 103 | { 104 | u32 get_value; /* used to get needed bits from the DSA */ 105 | struct mvsw_pr_dsa_to_cpu *to_cpu_ptr; 106 | 107 | to_cpu_ptr = &dsa_info_ptr->dsa_info.to_cpu; 108 | to_cpu_ptr->is_tagged = 109 | (bool)FIELD_GET(W0_MASK_IS_TAGGED, words_ptr[0]); 110 | to_cpu_ptr->hw_dev_num = FIELD_GET(W0_MASK_HW_DEV_NUM, words_ptr[0]); 111 | to_cpu_ptr->src_is_trunk = 112 | (bool)FIELD_GET(W1_MASK_SRC_IS_TARNK, words_ptr[1]); 113 | 114 | /* set hw dev num */ 115 | get_value = FIELD_GET(W3_MASK_HW_DEV_NUM, words_ptr[3]); 116 | to_cpu_ptr->hw_dev_num &= W3_MASK_HW_DEV_NUM; 117 | to_cpu_ptr->hw_dev_num |= FIELD_PREP(DEV_NUM_MASK, get_value); 118 | 119 | get_value = FIELD_GET(W1_MASK_CPU_CODE, words_ptr[1]); 120 | to_cpu_ptr->cpu_code = (u8)get_value; 121 | 122 | if (to_cpu_ptr->src_is_trunk) { 123 | to_cpu_ptr->iface.src_trunk_id = 124 | (u16)FIELD_GET(W2_MASK_SRC_TRANK_ID, words_ptr[2]); 125 | } else { 126 | /* When to_cpu_ptr->is_egress_pipe = false: 127 | * this field indicates the source ePort number assigned by 128 | * the ingress device. 129 | * When to_cpu_ptr->is_egress_pipe = true: 130 | * this field indicates the target ePort number assigned by 131 | * the ingress device. 132 | */ 133 | to_cpu_ptr->iface.eport = 134 | FIELD_GET(W2_MASK_IFACE_EPORT, words_ptr[2]); 135 | } 136 | to_cpu_ptr->iface.port_num = 137 | (FIELD_GET(W0_MASK_IFACE_PORT_NUM, words_ptr[0]) << 0) | 138 | (FIELD_GET(W1_MASK_IFACE_PORT_NUM, words_ptr[1]) << 5) | 139 | (FIELD_GET(W2_MASK_IFACE_PORT_NUM, words_ptr[2]) << 7); 140 | 141 | return 0; 142 | } 143 | 144 | int mvsw_pr_dsa_parse(const u8 *dsa_bytes_ptr, struct mvsw_pr_dsa *dsa_info_ptr) 145 | { 146 | u32 get_value; /* used to get needed bits from the DSA */ 147 | u32 words_ptr[4] = { 0 }; /* DSA tag can be up to 4 words */ 148 | u32 *dsa_words_ptr = (u32 *)dsa_bytes_ptr; 149 | 150 | /* sanity */ 151 | if (unlikely(!dsa_info_ptr || !dsa_bytes_ptr)) 152 | return -EINVAL; 153 | 154 | /* zero results */ 155 | memset(dsa_info_ptr, 0, sizeof(struct mvsw_pr_dsa)); 156 | 157 | /* copy the data of the first word */ 158 | words_ptr[0] = ntohl((__force __be32)dsa_words_ptr[0]); 159 | 160 | /* set the common parameters */ 161 | dsa_info_ptr->dsa_cmd = 162 | (enum mvsw_pr_dsa_cmd)FIELD_GET(W0_MASK_DSA_CMD, words_ptr[0]); 163 | 164 | /* vid & vlan prio */ 165 | dsa_info_ptr->common_params.vid = 166 | (u16)FIELD_GET(W0_MASK_VID, words_ptr[0]); 167 | dsa_info_ptr->common_params.vpt = 168 | (u8)FIELD_GET(W0_MASK_VPT, words_ptr[0]); 169 | 170 | /* only to CPU is supported */ 171 | if (unlikely(dsa_info_ptr->dsa_cmd != MVSW_NET_DSA_CMD_TO_CPU_E)) 172 | return -EINVAL; 173 | 174 | /* check extended bit */ 175 | if (FIELD_GET(W0_MASK_EXT_BIT, words_ptr[0]) == 0) 176 | /* 1 words DSA tag is not supported */ 177 | return -EINVAL; 178 | 179 | /* check that the "old" cpu opcode is set the 0xF 180 | * (with the extended bit) 181 | */ 182 | if (FIELD_GET(W0_MASK_OPCODE, words_ptr[0]) != 0x07) 183 | return -EINVAL; 184 | 185 | /* copy the data of the second word */ 186 | words_ptr[1] = ntohl((__force __be32)dsa_words_ptr[1]); 187 | 188 | /* check the extended bit */ 189 | if (FIELD_GET(W1_MASK_EXT_BIT, words_ptr[1]) == 0) 190 | /* 2 words DSA tag is not supported */ 191 | return -EINVAL; 192 | 193 | /* copy the data of the third word */ 194 | words_ptr[2] = ntohl((__force __be32)dsa_words_ptr[2]); 195 | 196 | /* check the extended bit */ 197 | if (FIELD_GET(W2_MASK_EXT_BIT, words_ptr[1]) == 0) 198 | /* 3 words DSA tag is not supported */ 199 | return -EINVAL; 200 | 201 | /* copy the data of the forth word */ 202 | words_ptr[3] = ntohl((__force __be32)dsa_words_ptr[3]); 203 | 204 | /* VID */ 205 | get_value = FIELD_GET(W3_MASK_VID, words_ptr[3]); 206 | dsa_info_ptr->common_params.vid &= ~VID_MASK; 207 | dsa_info_ptr->common_params.vid |= FIELD_PREP(VID_MASK, get_value); 208 | 209 | dsa_info_ptr->common_params.cfi_bit = 210 | (u8)FIELD_GET(W1_MASK_CFI_BIT, words_ptr[1]); 211 | 212 | return net_if_dsa_to_cpu_parse(words_ptr, dsa_info_ptr); 213 | } 214 | 215 | static int net_if_dsa_tag_from_cpu_build(const struct mvsw_pr_dsa *dsa_info_ptr, 216 | u32 *words_ptr) 217 | { 218 | u32 trg_hw_dev = 0; 219 | u32 trg_port = 0; 220 | const struct mvsw_pr_dsa_from_cpu *from_cpu_ptr = 221 | &dsa_info_ptr->dsa_info.from_cpu; 222 | 223 | if (unlikely(from_cpu_ptr->dst_iface.type != PRESTERA_IF_PORT_E)) 224 | /* only sending to port interface is supported */ 225 | return -EINVAL; 226 | 227 | words_ptr[0] |= 228 | FIELD_PREP(W0_MASK_DSA_CMD, MVSW_NET_DSA_CMD_FROM_CPU_E); 229 | 230 | trg_hw_dev = from_cpu_ptr->dst_iface.dev_port.hw_dev_num; 231 | trg_port = from_cpu_ptr->dst_iface.dev_port.port_num; 232 | 233 | if (trg_hw_dev >= BIT(12)) 234 | return -EINVAL; 235 | 236 | if (trg_port >= BIT(8) || trg_port >= BIT(10)) 237 | return -EINVAL; 238 | 239 | words_ptr[0] |= FIELD_PREP(W0_MASK_HW_DEV_NUM, trg_hw_dev); 240 | words_ptr[3] |= FIELD_PREP(W3_MASK_HW_DEV_NUM, (trg_hw_dev >> 5)); 241 | 242 | if (dsa_info_ptr->common_params.cfi_bit == 1) 243 | words_ptr[0] |= FIELD_PREP(W0_MASK_CFI_BIT, 1); 244 | 245 | words_ptr[0] |= FIELD_PREP(W0_MASK_VPT, 246 | dsa_info_ptr->common_params.vpt); 247 | words_ptr[0] |= FIELD_PREP(W0_MASK_VID, 248 | dsa_info_ptr->common_params.vid); 249 | 250 | /* set extended bits */ 251 | words_ptr[0] |= FIELD_PREP(W0_MASK_EXT_BIT, 1); 252 | words_ptr[1] |= FIELD_PREP(W1_MASK_EXT_BIT, 1); 253 | words_ptr[2] |= FIELD_PREP(W2_MASK_EXT_BIT, 1); 254 | 255 | if (from_cpu_ptr->egr_filter_en) 256 | words_ptr[1] |= FIELD_PREP(W1_MASK_EGR_FILTER_EN, 1); 257 | 258 | if (from_cpu_ptr->egr_filter_registered) 259 | words_ptr[1] |= FIELD_PREP(W1_MASK_EGR_FILTER_REG, 1); 260 | 261 | /* check src_id & src_hw_dev */ 262 | if (from_cpu_ptr->src_id >= BIT(12) || 263 | from_cpu_ptr->src_hw_dev >= BIT(12)) { 264 | return -EINVAL; 265 | } 266 | 267 | words_ptr[1] |= FIELD_PREP(W1_MASK_SRC_ID, from_cpu_ptr->src_id); 268 | words_ptr[1] |= FIELD_PREP(W1_MASK_SRC_DEV, from_cpu_ptr->src_hw_dev); 269 | 270 | words_ptr[2] |= FIELD_PREP(W2_MASK_SRC_ID, from_cpu_ptr->src_id >> 5); 271 | words_ptr[2] |= FIELD_PREP(W2_MASK_SRC_DEV, 272 | from_cpu_ptr->src_hw_dev >> 5); 273 | 274 | /* bits 0:9 -- reserved with value 0 */ 275 | if (from_cpu_ptr->dst_eport >= BIT(17)) 276 | return -EINVAL; 277 | 278 | words_ptr[3] |= FIELD_PREP(W3_MASK_DST_EPORT, from_cpu_ptr->dst_eport); 279 | words_ptr[3] |= FIELD_PREP(W3_MASK_VID, 280 | (dsa_info_ptr->common_params.vid >> 12)); 281 | 282 | return 0; 283 | } 284 | 285 | int mvsw_pr_dsa_build(const struct mvsw_pr_dsa *dsa_info_ptr, 286 | u8 *dsa_bytes_ptr) 287 | { 288 | int rc; 289 | u32 words_ptr[4] = { 0 }; /* 4 words of DSA tag */ 290 | __be32 *dsa_words_ptr = (__be32 *)dsa_bytes_ptr; 291 | 292 | if (unlikely(!dsa_info_ptr || !dsa_bytes_ptr)) 293 | return -EINVAL; 294 | 295 | if (dsa_info_ptr->common_params.cfi_bit >= BIT(1) || 296 | dsa_info_ptr->common_params.vpt >= BIT(3)) { 297 | return -EINVAL; 298 | } 299 | 300 | if (unlikely(dsa_info_ptr->dsa_cmd != MVSW_NET_DSA_CMD_FROM_CPU_E)) 301 | return -EINVAL; 302 | 303 | /* build form CPU DSA tag */ 304 | rc = net_if_dsa_tag_from_cpu_build(dsa_info_ptr, words_ptr); 305 | if (rc != 0) 306 | return rc; 307 | 308 | dsa_words_ptr[0] = htonl(words_ptr[0]); 309 | dsa_words_ptr[1] = htonl(words_ptr[1]); 310 | dsa_words_ptr[2] = htonl(words_ptr[2]); 311 | dsa_words_ptr[3] = htonl(words_ptr[3]); 312 | 313 | return 0; 314 | } 315 | -------------------------------------------------------------------------------- /prestera/prestera_dcb.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | 7 | #include "prestera.h" 8 | #include "prestera_hw.h" 9 | #include "prestera_dcb.h" 10 | #include "prestera_acl.h" 11 | 12 | #define PRESTERA_ACL_QOS_REMARK_PRIO (0) 13 | #define PRESTERA_QOS_SP_TO_PROFILE_INDEX(__sp) ((__sp) | 0b1000) 14 | 15 | struct prestera_qos { 16 | struct prestera_acl_rule_entry *rule[IEEE_8021QAZ_MAX_TCS]; 17 | bool bind; 18 | u32 trust_mode; 19 | u32 vtcam_id; 20 | u8 uid; 21 | }; 22 | 23 | struct prestera_acl_prio_dscp_map { 24 | u32 dscp[IEEE_8021QAZ_MAX_TCS]; 25 | }; 26 | 27 | static int prestera_qos_remark_port_bind(struct prestera_port *port) 28 | { 29 | struct prestera_acl *acl = port->sw->acl; 30 | struct prestera_acl_iface iface; 31 | struct prestera_acl_match match; 32 | u32 vtcam_id; 33 | int err = 0; 34 | u32 uid = 0; 35 | u32 pcl_id; 36 | 37 | err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL); 38 | if (err) 39 | goto err_uid; 40 | 41 | memset(&match, 0, sizeof(match)); 42 | rule_match_set_u16(match.mask, PCL_ID, PRESTERA_ACL_KEYMASK_PCL_ID); 43 | rule_match_set_u8(match.mask, QOS_PROFILE, 0xff); 44 | 45 | err = prestera_acl_vtcam_id_get(acl, 0, PRESTERA_HW_VTCAM_DIR_EGRESS, 46 | match.mask, &vtcam_id); 47 | if (err) 48 | goto err_vtcam; 49 | 50 | pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, 0); 51 | iface.type = PRESTERA_ACL_IFACE_TYPE_PORT; 52 | iface.port = port; 53 | 54 | err = prestera_hw_vtcam_iface_bind(port->sw, &iface, 55 | vtcam_id, pcl_id); 56 | if (err) 57 | goto err_bind; 58 | 59 | port->qos->uid = uid; 60 | port->qos->vtcam_id = vtcam_id; 61 | port->qos->bind = true; 62 | 63 | return 0; 64 | err_bind: 65 | prestera_acl_vtcam_id_put(acl, vtcam_id); 66 | err_vtcam: 67 | idr_remove(&acl->uid, uid); 68 | err_uid: 69 | return err; 70 | } 71 | 72 | static void prestera_qos_remark_port_unbind(struct prestera_port *port) 73 | { 74 | struct prestera_acl *acl = port->sw->acl; 75 | struct prestera_acl_iface iface = { 76 | .type = PRESTERA_ACL_IFACE_TYPE_PORT, 77 | .port = port 78 | }; 79 | 80 | if (!port->qos->bind) 81 | return; 82 | 83 | WARN_ON(prestera_hw_vtcam_iface_unbind(port->sw, &iface, 84 | port->qos->vtcam_id)); 85 | WARN_ON(prestera_acl_vtcam_id_put(acl, port->qos->vtcam_id)); 86 | idr_remove(&acl->uid, port->qos->uid); 87 | 88 | port->qos->bind = false; 89 | } 90 | 91 | static void prestera_qos_remark_rules_del(struct prestera_port *port) 92 | { 93 | int i; 94 | 95 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 96 | if (!port->qos->rule[i]) 97 | continue; 98 | 99 | prestera_acl_rule_entry_destroy(port->sw->acl, 100 | port->qos->rule[i]); 101 | port->qos->rule[i] = NULL; 102 | } 103 | } 104 | 105 | static int prestera_qos_remark_rules_add(struct prestera_port *port, 106 | struct prestera_acl_prio_dscp_map *map) 107 | { 108 | struct prestera_acl_rule_entry_key re_key; 109 | struct prestera_acl_rule_entry_arg re_arg; 110 | struct prestera_acl_rule_entry *re; 111 | u32 pcl_id; 112 | int err; 113 | int i; 114 | 115 | memset(&re_key, 0, sizeof(re_key)); 116 | memset(&re_arg, 0, sizeof(re_arg)); 117 | 118 | pcl_id = PRESTERA_ACL_PCL_ID_MAKE(port->qos->uid, 0); 119 | re_key.prio = PRESTERA_ACL_QOS_REMARK_PRIO; 120 | re_arg.remark.valid = 1; 121 | re_arg.vtcam_id = port->qos->vtcam_id; 122 | 123 | rule_match_set_u16(re_key.match.key, PCL_ID, pcl_id); 124 | rule_match_set_u16(re_key.match.mask, PCL_ID, 125 | PRESTERA_ACL_KEYMASK_PCL_ID); 126 | rule_match_set_u8(re_key.match.mask, QOS_PROFILE, 0xff); 127 | 128 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 129 | rule_match_set_u8(re_key.match.key, QOS_PROFILE, 130 | PRESTERA_QOS_SP_TO_PROFILE_INDEX(i)); 131 | re_arg.remark.i.dscp = map->dscp[i]; 132 | 133 | re = prestera_acl_rule_entry_create(port->sw->acl, &re_key, 134 | &re_arg); 135 | err = !re ? -EINVAL : 0; 136 | if (err) 137 | goto err_rule_add; 138 | 139 | port->qos->rule[i] = re; 140 | } 141 | 142 | return 0; 143 | 144 | err_rule_add: 145 | prestera_qos_remark_rules_del(port); 146 | return err; 147 | } 148 | 149 | static int prestera_dcb_app_validate(struct net_device *dev, 150 | struct dcb_app *app) 151 | { 152 | int prio; 153 | 154 | if (app->priority >= IEEE_8021QAZ_MAX_TCS) { 155 | netdev_err(dev, "APP entry with priority value %u is invalid\n", 156 | app->priority); 157 | return -EINVAL; 158 | } 159 | 160 | switch (app->selector) { 161 | case IEEE_8021QAZ_APP_SEL_DSCP: 162 | if (app->protocol >= 64) { 163 | netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", 164 | app->protocol); 165 | return -EINVAL; 166 | } 167 | 168 | /* Warn about any DSCP APP entries with the same PID. */ 169 | prio = fls(dcb_ieee_getapp_mask(dev, app)); 170 | if (prio--) { 171 | if (prio < app->priority) 172 | netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n", 173 | app->priority, app->protocol, prio); 174 | else if (prio > app->priority) 175 | netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n", 176 | app->priority, app->protocol, prio); 177 | } 178 | break; 179 | 180 | case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 181 | if (app->protocol) { 182 | netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n"); 183 | return -EINVAL; 184 | } 185 | break; 186 | 187 | default: 188 | netdev_err(dev, "APP entries with selector %u not supported\n", 189 | app->selector); 190 | return -EINVAL; 191 | } 192 | 193 | return 0; 194 | } 195 | 196 | static u8 prestera_dcb_port_default_prio(struct prestera_port *port) 197 | { 198 | u8 prio_mask; 199 | 200 | prio_mask = dcb_ieee_getapp_default_prio_mask(port->net_dev); 201 | if (prio_mask) 202 | /* Take the highest configured priority. */ 203 | return fls(prio_mask) - 1; 204 | 205 | return 0; 206 | } 207 | 208 | static void prestera_dcb_port_dscp_prio_map(struct prestera_port *port, 209 | u8 default_prio, 210 | struct dcb_ieee_app_dscp_map *map) 211 | { 212 | int i; 213 | 214 | dcb_ieee_getapp_dscp_prio_mask_map(port->net_dev, map); 215 | for (i = 0; i < ARRAY_SIZE(map->map); ++i) { 216 | if (map->map[i]) 217 | map->map[i] = fls(map->map[i]) - 1; 218 | else 219 | map->map[i] = default_prio; 220 | } 221 | } 222 | 223 | static bool prestera_dcb_port_prio_dscp_map(struct prestera_port *port, 224 | struct dcb_ieee_app_prio_map *map) 225 | { 226 | bool have_dscp = false; 227 | int i; 228 | 229 | dcb_ieee_getapp_prio_dscp_mask_map(port->net_dev, map); 230 | for (i = 0; i < ARRAY_SIZE(map->map); ++i) { 231 | if (map->map[i]) { 232 | map->map[i] = fls64(map->map[i]) - 1; 233 | have_dscp = true; 234 | } 235 | } 236 | 237 | return have_dscp; 238 | } 239 | 240 | static int prestera_port_trust_mode_set(struct prestera_port *port, u8 mode) 241 | { 242 | int err; 243 | 244 | err = prestera_hw_port_qos_trust_mode_set(port, mode); 245 | if (err) 246 | return err; 247 | 248 | if (mode == PRESTERA_HW_QOS_TRUST_MODE_L3) { 249 | err = prestera_qos_remark_port_bind(port); 250 | if (err) 251 | goto err_trust_mode; 252 | } else { 253 | prestera_qos_remark_rules_del(port); 254 | prestera_qos_remark_port_unbind(port); 255 | } 256 | 257 | port->qos->trust_mode = mode; 258 | return 0; 259 | 260 | err_trust_mode: 261 | prestera_hw_port_qos_trust_mode_set(port, port->qos->trust_mode); 262 | return err; 263 | } 264 | 265 | static int prestera_dcb_port_app_update(struct prestera_port *port, 266 | struct dcb_app *app) 267 | { 268 | struct prestera_acl_prio_dscp_map remark_map; 269 | struct dcb_ieee_app_dscp_map dscp_map; 270 | struct dcb_ieee_app_prio_map prio_map; 271 | u8 default_prio; 272 | bool have_dscp; 273 | int err = 0; 274 | u8 mode; 275 | int i; 276 | 277 | have_dscp = prestera_dcb_port_prio_dscp_map(port, &prio_map); 278 | 279 | mode = have_dscp ? PRESTERA_HW_QOS_TRUST_MODE_L3 : 280 | PRESTERA_HW_QOS_TRUST_MODE_L2; 281 | 282 | if (port->qos->trust_mode != mode) { 283 | err = prestera_port_trust_mode_set(port, mode); 284 | if (err) { 285 | netdev_err(port->net_dev, 286 | "Failed to configure trust mode\n"); 287 | return err; 288 | } 289 | } 290 | 291 | default_prio = prestera_dcb_port_default_prio(port); 292 | prestera_dcb_port_dscp_prio_map(port, default_prio, &dscp_map); 293 | 294 | err = prestera_hw_port_qos_default_prio_set(port, default_prio); 295 | if (err) { 296 | netdev_err(port->net_dev, 297 | "Failed to configure default priority\n"); 298 | return err; 299 | } 300 | 301 | if (mode != PRESTERA_HW_QOS_TRUST_MODE_L3) 302 | return 0; 303 | 304 | err = prestera_hw_port_qos_mapping_update(port, &dscp_map); 305 | if (err) { 306 | netdev_err(port->net_dev, "Failed to configure priority\n"); 307 | return err; 308 | } 309 | 310 | for (i = 0; i < ARRAY_SIZE(remark_map.dscp); i++) 311 | remark_map.dscp[i] = (u32)prio_map.map[i]; 312 | 313 | prestera_qos_remark_rules_del(port); 314 | err = prestera_qos_remark_rules_add(port, &remark_map); 315 | if (err) { 316 | netdev_err(port->net_dev, "Failed to create remarking rules\n"); 317 | return err; 318 | } 319 | 320 | return err; 321 | } 322 | 323 | static int prestera_dcb_port_app_flush(struct prestera_port *port, 324 | struct dcb_app *app) 325 | { 326 | int err; 327 | 328 | err = prestera_hw_port_qos_default_prio_set(port, 0); 329 | if (err) { 330 | netdev_err(port->net_dev, 331 | "Failed to reset default priority\n"); 332 | return err; 333 | } 334 | 335 | err = prestera_port_trust_mode_set(port, PRESTERA_HW_QOS_TRUST_MODE_L2); 336 | if (err) { 337 | netdev_err(port->net_dev, 338 | "Failed to reset trust mode\n"); 339 | return err; 340 | } 341 | 342 | return 0; 343 | } 344 | 345 | static int prestera_dcb_ieee_setapp(struct net_device *dev, 346 | struct dcb_app *app) 347 | { 348 | struct prestera_port *port = netdev_priv(dev); 349 | int err; 350 | 351 | err = prestera_dcb_app_validate(dev, app); 352 | if (err) 353 | return err; 354 | 355 | err = dcb_ieee_setapp(dev, app); 356 | if (err) 357 | return err; 358 | 359 | err = prestera_dcb_port_app_update(port, app); 360 | if (err) 361 | dcb_ieee_delapp(dev, app); 362 | 363 | return err; 364 | } 365 | 366 | static int prestera_dcb_ieee_delapp(struct net_device *dev, 367 | struct dcb_app *app) 368 | { 369 | struct prestera_port *port = netdev_priv(dev); 370 | int err; 371 | 372 | err = dcb_ieee_delapp(dev, app); 373 | if (err) 374 | return err; 375 | 376 | err = prestera_dcb_port_app_flush(port, app); 377 | if (err) 378 | return err; 379 | 380 | return 0; 381 | } 382 | 383 | static const struct dcbnl_rtnl_ops prestera_dcbnl_ops = { 384 | .ieee_setapp = prestera_dcb_ieee_setapp, 385 | .ieee_delapp = prestera_dcb_ieee_delapp, 386 | }; 387 | 388 | int prestera_port_dcb_init(struct prestera_port *port) 389 | { 390 | port->qos = kzalloc(sizeof(*port->qos), GFP_KERNEL); 391 | if (!port->qos) 392 | return -ENOMEM; 393 | 394 | port->net_dev->dcbnl_ops = &prestera_dcbnl_ops; 395 | port->qos->trust_mode = PRESTERA_HW_QOS_TRUST_MODE_L2; 396 | 397 | return 0; 398 | } 399 | 400 | void prestera_port_dcb_fini(struct prestera_port *port) 401 | { 402 | prestera_qos_remark_rules_del(port); 403 | prestera_qos_remark_port_unbind(port); 404 | kfree(port->qos); 405 | 406 | port->net_dev->dcbnl_ops = NULL; 407 | } 408 | -------------------------------------------------------------------------------- /prestera/prestera_counter.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include "prestera.h" 5 | #include "prestera_hw.h" 6 | #include "prestera_acl.h" 7 | #include "prestera_counter.h" 8 | 9 | #define COUNTER_POLL_TIME (msecs_to_jiffies(1000)) 10 | #define COUNTER_RESCHED_TIME (msecs_to_jiffies(50)) 11 | #define COUNTER_BULK_SIZE (256) 12 | 13 | struct prestera_counter { 14 | struct prestera_switch *sw; 15 | struct delayed_work stats_dw; 16 | bool is_fetching; 17 | u32 total_read; 18 | struct mutex mtx; /* protect block_list */ 19 | struct prestera_counter_block **block_list; 20 | u32 block_list_len; 21 | u32 curr_idx; 22 | }; 23 | 24 | struct prestera_counter_block { 25 | struct list_head list; 26 | u32 id; 27 | u32 offset; 28 | u32 num_counters; 29 | u32 client; 30 | struct idr counter_idr; 31 | bool full; 32 | bool is_updating; 33 | refcount_t refcnt; 34 | struct mutex mtx; /* protect stats and counter_idr */ 35 | struct prestera_counter_stats *stats; 36 | u8 *counter_flag; 37 | }; 38 | 39 | enum { 40 | COUNTER_FLAG_READY = 0, 41 | COUNTER_FLAG_INVALID = 1 42 | }; 43 | 44 | static bool 45 | prestera_counter_is_ready(struct prestera_counter_block *block, u32 id) 46 | { 47 | return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY; 48 | } 49 | 50 | static void prestera_counter_lock(struct prestera_counter *counter) 51 | { 52 | mutex_lock(&counter->mtx); 53 | } 54 | 55 | static void prestera_counter_unlock(struct prestera_counter *counter) 56 | { 57 | mutex_unlock(&counter->mtx); 58 | } 59 | 60 | static void prestera_counter_block_lock(struct prestera_counter_block *block) 61 | { 62 | mutex_lock(&block->mtx); 63 | } 64 | 65 | static void prestera_counter_block_unlock(struct prestera_counter_block *block) 66 | { 67 | mutex_unlock(&block->mtx); 68 | } 69 | 70 | static bool prestera_counter_block_incref(struct prestera_counter_block *block) 71 | { 72 | return refcount_inc_not_zero(&block->refcnt); 73 | } 74 | 75 | static bool prestera_counter_block_decref(struct prestera_counter_block *block) 76 | { 77 | return refcount_dec_and_test(&block->refcnt); 78 | } 79 | 80 | /* must be called with prestera_counter_block_lock() */ 81 | static void prestera_counter_stats_clear(struct prestera_counter_block *block, 82 | u32 counter_id) 83 | { 84 | memset(&block->stats[counter_id - block->offset], 0, 85 | sizeof(*block->stats)); 86 | } 87 | 88 | static struct prestera_counter_block * 89 | prestera_counter_block_lookup_not_full(struct prestera_counter *counter, 90 | u32 client) 91 | { 92 | u32 i; 93 | 94 | prestera_counter_lock(counter); 95 | for (i = 0; i < counter->block_list_len; i++) { 96 | if (counter->block_list[i] && 97 | counter->block_list[i]->client == client && 98 | !counter->block_list[i]->full && 99 | prestera_counter_block_incref(counter->block_list[i])) { 100 | prestera_counter_unlock(counter); 101 | return counter->block_list[i]; 102 | } 103 | } 104 | prestera_counter_unlock(counter); 105 | 106 | return NULL; 107 | } 108 | 109 | static int prestera_counter_block_list_add(struct prestera_counter *counter, 110 | struct prestera_counter_block *block) 111 | { 112 | struct prestera_counter_block **arr; 113 | u32 i; 114 | 115 | prestera_counter_lock(counter); 116 | 117 | for (i = 0; i < counter->block_list_len; i++) { 118 | if (counter->block_list[i]) 119 | continue; 120 | 121 | counter->block_list[i] = block; 122 | prestera_counter_unlock(counter); 123 | return 0; 124 | } 125 | 126 | arr = krealloc(counter->block_list, (counter->block_list_len + 1) * 127 | sizeof(*counter->block_list), GFP_KERNEL); 128 | if (!arr) { 129 | prestera_counter_unlock(counter); 130 | return -ENOMEM; 131 | } 132 | 133 | counter->block_list = arr; 134 | counter->block_list[counter->block_list_len] = block; 135 | counter->block_list_len++; 136 | prestera_counter_unlock(counter); 137 | return 0; 138 | } 139 | 140 | static struct prestera_counter_block * 141 | prestera_counter_block_get(struct prestera_counter *counter, 142 | u32 client) 143 | { 144 | struct prestera_counter_block *block; 145 | int err; 146 | 147 | block = prestera_counter_block_lookup_not_full(counter, client); 148 | if (!block) { 149 | block = kzalloc(sizeof(*block), GFP_KERNEL); 150 | if (!block) 151 | return ERR_PTR(-ENOMEM); 152 | 153 | err = prestera_hw_counter_block_get(counter->sw, client, 154 | &block->id, &block->offset, 155 | &block->num_counters); 156 | if (err) 157 | goto err_block; 158 | 159 | block->stats = kcalloc(block->num_counters, 160 | sizeof(*block->stats), GFP_KERNEL); 161 | if (!block->stats) { 162 | err = -ENOMEM; 163 | goto err_stats; 164 | } 165 | 166 | block->counter_flag = kcalloc(block->num_counters, 167 | sizeof(*block->counter_flag), 168 | GFP_KERNEL); 169 | if (!block->counter_flag) { 170 | err = -ENOMEM; 171 | goto err_flag; 172 | } 173 | 174 | block->client = client; 175 | mutex_init(&block->mtx); 176 | refcount_set(&block->refcnt, 1); 177 | idr_init_base(&block->counter_idr, block->offset); 178 | 179 | err = prestera_counter_block_list_add(counter, block); 180 | if (err) 181 | goto err_list_add; 182 | } 183 | 184 | return block; 185 | 186 | err_list_add: 187 | idr_destroy(&block->counter_idr); 188 | mutex_destroy(&block->mtx); 189 | kfree(block->counter_flag); 190 | err_flag: 191 | kfree(block->stats); 192 | err_stats: 193 | prestera_hw_counter_block_release(counter->sw, block->id); 194 | err_block: 195 | kfree(block); 196 | return ERR_PTR(err); 197 | } 198 | 199 | static void prestera_counter_block_put(struct prestera_counter *counter, 200 | struct prestera_counter_block *block) 201 | { 202 | u32 i; 203 | 204 | if (!prestera_counter_block_decref(block)) 205 | return; 206 | 207 | prestera_counter_lock(counter); 208 | for (i = 0; i < counter->block_list_len; i++) { 209 | if (counter->block_list[i] && 210 | counter->block_list[i]->id == block->id) { 211 | counter->block_list[i] = NULL; 212 | break; 213 | } 214 | } 215 | prestera_counter_unlock(counter); 216 | 217 | WARN_ON(!idr_is_empty(&block->counter_idr)); 218 | 219 | prestera_hw_counter_block_release(counter->sw, block->id); 220 | idr_destroy(&block->counter_idr); 221 | mutex_destroy(&block->mtx); 222 | kfree(block->stats); 223 | kfree(block); 224 | } 225 | 226 | static int prestera_counter_get_vacant(struct prestera_counter_block *block, 227 | u32 *id) 228 | { 229 | int free_id; 230 | 231 | if (block->full) 232 | return -ENOSPC; 233 | 234 | prestera_counter_block_lock(block); 235 | free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset, 236 | block->offset + block->num_counters, 237 | GFP_KERNEL); 238 | if (free_id < 0) { 239 | if (free_id == -ENOSPC) 240 | block->full = true; 241 | 242 | prestera_counter_block_unlock(block); 243 | return free_id; 244 | } 245 | *id = free_id; 246 | prestera_counter_block_unlock(block); 247 | 248 | return 0; 249 | } 250 | 251 | int prestera_counter_get(struct prestera_counter *counter, u32 client, 252 | struct prestera_counter_block **bl, u32 *counter_id) 253 | { 254 | struct prestera_counter_block *block; 255 | int err; 256 | u32 id; 257 | 258 | get_next_block: 259 | block = prestera_counter_block_get(counter, client); 260 | if (IS_ERR(block)) 261 | return PTR_ERR(block); 262 | 263 | err = prestera_counter_get_vacant(block, &id); 264 | if (err) { 265 | prestera_counter_block_put(counter, block); 266 | 267 | if (err == -ENOSPC) 268 | goto get_next_block; 269 | 270 | return err; 271 | } 272 | 273 | prestera_counter_block_lock(block); 274 | if (block->is_updating) 275 | block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID; 276 | prestera_counter_block_unlock(block); 277 | 278 | *counter_id = id; 279 | *bl = block; 280 | 281 | return 0; 282 | } 283 | 284 | void prestera_counter_put(struct prestera_counter *counter, 285 | struct prestera_counter_block *block, u32 counter_id) 286 | { 287 | if (!block) 288 | return; 289 | 290 | prestera_counter_block_lock(block); 291 | idr_remove(&block->counter_idr, counter_id); 292 | block->full = false; 293 | prestera_counter_stats_clear(block, counter_id); 294 | prestera_counter_block_unlock(block); 295 | 296 | prestera_hw_counter_clear(counter->sw, block->id, counter_id); 297 | prestera_counter_block_put(counter, block); 298 | } 299 | 300 | static u32 prestera_counter_block_idx_next(struct prestera_counter *counter, 301 | u32 curr_idx) 302 | { 303 | u32 idx, i, start = curr_idx + 1; 304 | 305 | prestera_counter_lock(counter); 306 | for (i = 0; i < counter->block_list_len; i++) { 307 | idx = (start + i) % counter->block_list_len; 308 | if (!counter->block_list[idx]) 309 | continue; 310 | 311 | prestera_counter_unlock(counter); 312 | return idx; 313 | } 314 | prestera_counter_unlock(counter); 315 | 316 | return 0; 317 | } 318 | 319 | static struct prestera_counter_block * 320 | prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) 321 | { 322 | if (idx >= counter->block_list_len) 323 | return NULL; 324 | 325 | prestera_counter_lock(counter); 326 | 327 | if (!counter->block_list[idx] || 328 | !prestera_counter_block_incref(counter->block_list[idx])) { 329 | prestera_counter_unlock(counter); 330 | return NULL; 331 | } 332 | 333 | prestera_counter_unlock(counter); 334 | return counter->block_list[idx]; 335 | } 336 | 337 | static void prestera_counter_stats_work(struct work_struct *work) 338 | { 339 | struct delayed_work *dl_work = 340 | container_of(work, struct delayed_work, work); 341 | struct prestera_counter *counter = 342 | container_of(dl_work, struct prestera_counter, stats_dw); 343 | struct prestera_counter_block *block; 344 | u32 resched_time = COUNTER_POLL_TIME; 345 | u32 count = COUNTER_BULK_SIZE; 346 | bool done = false; 347 | int err; 348 | u32 i; 349 | 350 | block = prestera_counter_block_get_by_idx(counter, counter->curr_idx); 351 | if (!block) { 352 | if (counter->is_fetching) 353 | goto abort; 354 | 355 | goto next; 356 | } 357 | 358 | if (!counter->is_fetching) { 359 | err = prestera_hw_counter_trigger(counter->sw, block->id); 360 | if (err) 361 | goto abort; 362 | 363 | prestera_counter_block_lock(block); 364 | block->is_updating = true; 365 | prestera_counter_block_unlock(block); 366 | 367 | counter->is_fetching = true; 368 | counter->total_read = 0; 369 | resched_time = COUNTER_RESCHED_TIME; 370 | goto resched; 371 | } 372 | 373 | prestera_counter_block_lock(block); 374 | err = prestera_hw_counters_get(counter->sw, counter->total_read, 375 | &count, &done, 376 | &block->stats[counter->total_read]); 377 | prestera_counter_block_unlock(block); 378 | if (err) 379 | goto abort; 380 | 381 | counter->total_read += count; 382 | if (!done || counter->total_read < block->num_counters) { 383 | resched_time = COUNTER_RESCHED_TIME; 384 | goto resched; 385 | } 386 | 387 | for (i = 0; i < block->num_counters; i++) { 388 | if (block->counter_flag[i] == COUNTER_FLAG_INVALID) { 389 | prestera_counter_block_lock(block); 390 | block->counter_flag[i] = COUNTER_FLAG_READY; 391 | memset(&block->stats[i], 0, sizeof(*block->stats)); 392 | prestera_counter_block_unlock(block); 393 | } 394 | } 395 | 396 | prestera_counter_block_lock(block); 397 | block->is_updating = false; 398 | prestera_counter_block_unlock(block); 399 | 400 | goto next; 401 | abort: 402 | prestera_hw_counter_abort(counter->sw); 403 | next: 404 | counter->is_fetching = false; 405 | counter->curr_idx = 406 | prestera_counter_block_idx_next(counter, counter->curr_idx); 407 | resched: 408 | if (block) 409 | prestera_counter_block_put(counter, block); 410 | 411 | schedule_delayed_work(&counter->stats_dw, resched_time); 412 | } 413 | 414 | /* Can be executed without rtnl_lock(). 415 | * So pay attention when something changing. 416 | */ 417 | int prestera_counter_stats_get(struct prestera_counter *counter, 418 | struct prestera_counter_block *block, 419 | u32 counter_id, u64 *packets, u64 *bytes) 420 | { 421 | if (!block || !prestera_counter_is_ready(block, counter_id)) { 422 | *packets = 0; 423 | *bytes = 0; 424 | return 0; 425 | } 426 | 427 | prestera_counter_block_lock(block); 428 | *packets = block->stats[counter_id - block->offset].packets; 429 | *bytes = block->stats[counter_id - block->offset].bytes; 430 | 431 | prestera_counter_stats_clear(block, counter_id); 432 | prestera_counter_block_unlock(block); 433 | 434 | return 0; 435 | } 436 | 437 | int prestera_counter_init(struct prestera_switch *sw) 438 | { 439 | struct prestera_counter *counter; 440 | 441 | counter = kzalloc(sizeof(*counter), GFP_KERNEL); 442 | if (!counter) 443 | return -ENOMEM; 444 | 445 | counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL); 446 | if (!counter->block_list) { 447 | kfree(counter); 448 | return -ENOMEM; 449 | } 450 | 451 | mutex_init(&counter->mtx); 452 | counter->block_list_len = 1; 453 | counter->sw = sw; 454 | sw->counter = counter; 455 | 456 | INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work); 457 | schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME); 458 | 459 | return 0; 460 | } 461 | 462 | void prestera_counter_fini(struct prestera_switch *sw) 463 | { 464 | struct prestera_counter *counter = sw->counter; 465 | u32 i; 466 | 467 | cancel_delayed_work_sync(&counter->stats_dw); 468 | 469 | for (i = 0; i < counter->block_list_len; i++) 470 | WARN_ON(counter->block_list[i]); 471 | 472 | mutex_destroy(&counter->mtx); 473 | kfree(counter->block_list); 474 | kfree(counter); 475 | } 476 | -------------------------------------------------------------------------------- /prestera/prestera_fw.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "prestera.h" 9 | #include "prestera_fw.h" 10 | 11 | #define PRESTERA_FW_READY_MAGIC 0xcafebabe 12 | 13 | /* Firmware registers: */ 14 | #define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready) 15 | 16 | #define PRESTERA_CMDQ_REG_OFFSET(q, f) \ 17 | (PRESTERA_FW_REG_OFFSET(cmdq_list) + \ 18 | (q) * sizeof(struct prestera_fw_cmdq_regs) + \ 19 | offsetof(struct prestera_fw_cmdq_regs, f)) 20 | 21 | #define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs) 22 | #define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len) 23 | #define PRESTERA_CMD_QNUM_REG PRESTERA_FW_REG_OFFSET(cmd_qnum) 24 | #define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs) 25 | #define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum) 26 | 27 | #define PRESTERA_CMDQ_REQ_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, cmd_req_ctl) 28 | #define PRESTERA_CMDQ_REQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, cmd_req_len) 29 | #define PRESTERA_CMDQ_RCV_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, cmd_rcv_ctl) 30 | #define PRESTERA_CMDQ_RCV_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, cmd_rcv_len) 31 | #define PRESTERA_CMDQ_OFFS_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, offs) 32 | #define PRESTERA_CMDQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, len) 33 | 34 | /* PRESTERA_CMDQ_REQ_CTL_REG flags */ 35 | #define PRESTERA_CMD_F_REQ_SENT BIT(0) 36 | #define PRESTERA_CMD_F_REPL_RCVD BIT(1) 37 | 38 | /* PRESTERA_CMDQ_RCV_CTL_REG flags */ 39 | #define PRESTERA_CMD_F_REPL_SENT BIT(0) 40 | 41 | /* PRESTERA_FW_STATUS_REG flags */ 42 | #define PRESTERA_STATUS_F_EVT_OFF BIT(0) 43 | 44 | #define PRESTERA_EVTQ_REG_OFFSET(q, f) \ 45 | (PRESTERA_FW_REG_OFFSET(evtq_list) + \ 46 | (q) * sizeof(struct prestera_fw_evtq_regs) + \ 47 | offsetof(struct prestera_fw_evtq_regs, f)) 48 | 49 | #define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx) 50 | #define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx) 51 | #define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs) 52 | #define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len) 53 | 54 | #define FW_VER_MAJ_MUL 1000000 55 | #define FW_VER_MIN_MUL 1000 56 | 57 | static int fw_ver_maj(int v) 58 | { 59 | return ((v) / FW_VER_MAJ_MUL); 60 | } 61 | 62 | static int fw_ver_min(int v) 63 | { 64 | int vv = fw_ver_maj(v) * FW_VER_MAJ_MUL; 65 | 66 | return (((v) - vv) / FW_VER_MIN_MUL); 67 | } 68 | 69 | static int fw_ver_patch(int v) 70 | { 71 | int vv, vvv; 72 | 73 | vv = (fw_ver_maj(v) * FW_VER_MAJ_MUL); 74 | vvv = (fw_ver_min(v) * FW_VER_MIN_MUL); 75 | return ((v) - vv - vvv); 76 | } 77 | 78 | static void prestera_fw_cmdq_lock(struct prestera_fw *fw, u8 qid) 79 | { 80 | mutex_lock(&fw->cmd_queue[qid].cmd_mtx); 81 | } 82 | 83 | static void prestera_fw_cmdq_unlock(struct prestera_fw *fw, u8 qid) 84 | { 85 | mutex_unlock(&fw->cmd_queue[qid].cmd_mtx); 86 | } 87 | 88 | static u32 prestera_fw_cmdq_len(struct prestera_fw *fw, u8 qid) 89 | { 90 | return fw->cmd_queue[qid].len; 91 | } 92 | 93 | static u8 __iomem *prestera_fw_cmdq_buf(struct prestera_fw *fw, u8 qid) 94 | { 95 | return fw->cmd_queue[qid].addr; 96 | } 97 | 98 | static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid) 99 | { 100 | return fw->evt_queue[qid].len; 101 | } 102 | 103 | static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid) 104 | { 105 | u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid)); 106 | u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); 107 | 108 | return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid)); 109 | } 110 | 111 | static void prestera_fw_evtq_rd_set(struct prestera_fw *fw, u8 qid, u32 idx) 112 | { 113 | u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1); 114 | 115 | prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx); 116 | } 117 | 118 | static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid) 119 | { 120 | return fw->evt_queue[qid].addr; 121 | } 122 | 123 | static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid) 124 | { 125 | u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); 126 | u32 val; 127 | 128 | val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx); 129 | prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4); 130 | return val; 131 | } 132 | 133 | static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw, u8 qid, 134 | u8 *buf, size_t len) 135 | { 136 | u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); 137 | u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid); 138 | u32 *buf32 = (u32 *)buf; 139 | int i; 140 | 141 | for (i = 0; i < len / 4; buf32++, i++) { 142 | *buf32 = readl_relaxed(evtq_addr + idx); 143 | idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1); 144 | } 145 | 146 | prestera_fw_evtq_rd_set(fw, qid, idx); 147 | 148 | return i; 149 | } 150 | 151 | static u8 prestera_fw_evtq_pick(struct prestera_fw *fw) 152 | { 153 | int qid; 154 | 155 | for (qid = 0; qid < fw->evt_qnum; qid++) { 156 | if (prestera_fw_evtq_avail(fw, qid) >= 4) 157 | return qid; 158 | } 159 | 160 | return PRESTERA_EVT_QNUM_MAX; 161 | } 162 | 163 | static void prestera_fw_status_set(struct prestera_fw *fw, unsigned int val) 164 | { 165 | u32 status = prestera_fw_read(fw, PRESTERA_FW_STATUS_REG); 166 | 167 | status |= val; 168 | 169 | prestera_fw_write(fw, PRESTERA_FW_STATUS_REG, status); 170 | } 171 | 172 | static void prestera_fw_status_clear(struct prestera_fw *fw, u32 val) 173 | { 174 | u32 status = prestera_fw_read(fw, PRESTERA_FW_STATUS_REG); 175 | 176 | status &= ~val; 177 | 178 | prestera_fw_write(fw, PRESTERA_FW_STATUS_REG, status); 179 | } 180 | 181 | void prestera_fw_handle_event(struct prestera_fw *fw) 182 | { 183 | u8 *msg; 184 | u8 qid; 185 | 186 | msg = fw->evt_msg; 187 | 188 | prestera_fw_status_set(fw, PRESTERA_STATUS_F_EVT_OFF); 189 | 190 | while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) { 191 | u32 idx; 192 | u32 len; 193 | 194 | len = prestera_fw_evtq_read32(fw, qid); 195 | idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); 196 | 197 | WARN_ON(prestera_fw_evtq_avail(fw, qid) < len); 198 | 199 | if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) { 200 | prestera_fw_evtq_rd_set(fw, qid, idx + len); 201 | continue; 202 | } 203 | 204 | prestera_fw_evtq_read_buf(fw, qid, msg, len); 205 | 206 | if (fw->dev.recv_msg) 207 | fw->dev.recv_msg(&fw->dev, msg, len); 208 | } 209 | 210 | prestera_fw_status_clear(fw, PRESTERA_STATUS_F_EVT_OFF); 211 | } 212 | EXPORT_SYMBOL(prestera_fw_handle_event); 213 | 214 | static void prestera_fw_evt_work_fn(struct work_struct *work) 215 | { 216 | struct prestera_fw *fw; 217 | 218 | fw = container_of(work, struct prestera_fw, evt_work); 219 | 220 | prestera_fw_handle_event(fw); 221 | } 222 | 223 | void prestera_fw_queue_work(struct prestera_fw *fw) 224 | { 225 | queue_work(fw->wq, &fw->evt_work); 226 | } 227 | EXPORT_SYMBOL(prestera_fw_queue_work); 228 | 229 | static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 val, 230 | unsigned int wait) 231 | { 232 | if (prestera_wait(prestera_fw_read(fw, reg) == val || !fw->dev.running, 233 | wait)) 234 | return fw->dev.running ? 0 : -ENODEV; 235 | 236 | return -EBUSY; 237 | } 238 | 239 | static void prestera_pci_copy_to(u8 __iomem *dst, u8 *src, size_t len) 240 | { 241 | u32 __iomem *dst32 = (u32 __iomem *)dst; 242 | u32 *src32 = (u32 *)src; 243 | int i; 244 | 245 | for (i = 0; i < (len / 4); dst32++, src32++, i++) 246 | writel_relaxed(*src32, dst32); 247 | } 248 | 249 | static void prestera_pci_copy_from(u8 *dst, u8 __iomem *src, size_t len) 250 | { 251 | u32 *dst32 = (u32 *)dst; 252 | u32 __iomem *src32 = (u32 __iomem *)src; 253 | int i; 254 | 255 | for (i = 0; i < (len / 4); dst32++, src32++, i++) 256 | *dst32 = readl_relaxed(src32); 257 | } 258 | 259 | static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid, 260 | u8 *in_msg, size_t in_size, 261 | u8 *out_msg, size_t out_size, 262 | unsigned int wait) 263 | { 264 | u32 ret_size = 0; 265 | int err = 0; 266 | 267 | if (!wait) 268 | wait = 30000; 269 | 270 | if (ALIGN(in_size, 4) > prestera_fw_cmdq_len(fw, qid)) 271 | return -EMSGSIZE; 272 | 273 | /* wait for finish previous reply from FW */ 274 | err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 275 | 0, 1000); 276 | if (err) { 277 | dev_err(prestera_fw_dev(fw), 278 | "finish reply from FW is timed out\n"); 279 | return err; 280 | } 281 | 282 | prestera_fw_write(fw, PRESTERA_CMDQ_REQ_LEN_REG(qid), in_size); 283 | prestera_pci_copy_to(prestera_fw_cmdq_buf(fw, qid), in_msg, in_size); 284 | 285 | prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid), 286 | PRESTERA_CMD_F_REQ_SENT); 287 | 288 | /* wait for reply from FW */ 289 | err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 290 | PRESTERA_CMD_F_REPL_SENT, wait); 291 | if (err) { 292 | dev_err(prestera_fw_dev(fw), 293 | "reply from FW is timed out\n"); 294 | goto cmd_exit; 295 | } 296 | 297 | ret_size = prestera_fw_read(fw, PRESTERA_CMDQ_RCV_LEN_REG(qid)); 298 | if (ret_size > out_size) { 299 | dev_err(prestera_fw_dev(fw), "ret_size (%u) > out_len(%zu)\n", 300 | ret_size, out_size); 301 | err = -EMSGSIZE; 302 | goto cmd_exit; 303 | } 304 | 305 | prestera_pci_copy_from(out_msg, prestera_fw_cmdq_buf(fw, qid) + in_size, 306 | ret_size); 307 | 308 | cmd_exit: 309 | prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid), 310 | PRESTERA_CMD_F_REPL_RCVD); 311 | return err; 312 | } 313 | 314 | int prestera_fw_send_req(struct prestera_device *pr_dev, int qid, 315 | u8 *in_msg, size_t in_size, u8 *out_msg, 316 | size_t out_size, unsigned int wait) 317 | { 318 | struct prestera_fw *fw; 319 | ssize_t ret; 320 | 321 | fw = container_of(pr_dev, struct prestera_fw, dev); 322 | 323 | if (!fw->dev.running) 324 | return -ENODEV; 325 | 326 | prestera_fw_cmdq_lock(fw, qid); 327 | ret = prestera_fw_cmd_send(fw, qid, in_msg, in_size, out_msg, out_size, 328 | wait); 329 | prestera_fw_cmdq_unlock(fw, qid); 330 | 331 | return ret; 332 | } 333 | EXPORT_SYMBOL_GPL(prestera_fw_send_req); 334 | 335 | int prestera_fw_rev_check(struct prestera_fw *fw) 336 | { 337 | struct prestera_fw_rev *rev = &fw->dev.fw_rev; 338 | 339 | dev_info(prestera_fw_dev(fw), "FW version '%u.%u.%u'\n", 340 | rev->maj, rev->min, rev->sub); 341 | dev_info(prestera_fw_dev(fw), "Driver version '%u.%u.%u'\n", 342 | PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER, 343 | PRESTERA_SUPP_FW_PATCH_VER); 344 | 345 | if (rev->maj == PRESTERA_SUPP_FW_MAJ_VER && 346 | rev->min == PRESTERA_SUPP_FW_MIN_VER) { 347 | return 0; 348 | } 349 | 350 | dev_err(prestera_fw_dev(fw), 351 | "Driver is incomatible with FW: version mismatch"); 352 | 353 | return -EINVAL; 354 | } 355 | EXPORT_SYMBOL_GPL(prestera_fw_rev_check); 356 | 357 | void prestera_fw_rev_parse(const struct prestera_fw_header *hdr, 358 | struct prestera_fw_rev *rev) 359 | { 360 | u32 version = be32_to_cpu(hdr->version_value); 361 | 362 | rev->maj = fw_ver_maj(version); 363 | rev->min = fw_ver_min(version); 364 | rev->sub = fw_ver_patch(version); 365 | } 366 | EXPORT_SYMBOL_GPL(prestera_fw_rev_parse); 367 | 368 | void prestera_fw_rev_parse_int(unsigned int firmware_version, 369 | struct prestera_fw_rev *rev) 370 | { 371 | u32 version = firmware_version; 372 | 373 | rev->maj = fw_ver_maj(version); 374 | rev->min = fw_ver_min(version); 375 | rev->sub = fw_ver_patch(version); 376 | } 377 | EXPORT_SYMBOL_GPL(prestera_fw_rev_parse_int); 378 | 379 | int prestera_fw_init(struct prestera_fw *fw) 380 | { 381 | u8 __iomem *base; 382 | int err; 383 | u8 qid; 384 | 385 | err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG, 386 | PRESTERA_FW_READY_MAGIC, 20000); 387 | if (err) { 388 | dev_err(prestera_fw_dev(fw), "FW failed to start\n"); 389 | return err; 390 | } 391 | 392 | base = fw->mem_addr; 393 | 394 | fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG); 395 | fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG); 396 | fw->cmd_qnum = prestera_fw_read(fw, PRESTERA_CMD_QNUM_REG); 397 | 398 | for (qid = 0; qid < fw->cmd_qnum; qid++) { 399 | u32 offs = prestera_fw_read(fw, PRESTERA_CMDQ_OFFS_REG(qid)); 400 | struct prestera_fw_cmdq *cmdq = &fw->cmd_queue[qid]; 401 | 402 | cmdq->len = prestera_fw_read(fw, PRESTERA_CMDQ_LEN_REG(qid)); 403 | cmdq->addr = fw->cmd_mbox + offs; 404 | mutex_init(&cmdq->cmd_mtx); 405 | } 406 | 407 | fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG); 408 | fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG); 409 | fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL); 410 | if (!fw->evt_msg) 411 | return -ENOMEM; 412 | 413 | for (qid = 0; qid < fw->evt_qnum; qid++) { 414 | u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid)); 415 | struct prestera_fw_evtq *evtq = &fw->evt_queue[qid]; 416 | 417 | evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid)); 418 | evtq->addr = fw->evt_buf + offs; 419 | } 420 | 421 | fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1); 422 | if (!fw->wq) 423 | goto err_wq_alloc; 424 | 425 | INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn); 426 | 427 | return 0; 428 | 429 | err_wq_alloc: 430 | kfree(fw->evt_msg); 431 | return -ENOMEM; 432 | } 433 | EXPORT_SYMBOL_GPL(prestera_fw_init); 434 | 435 | void prestera_fw_uninit(struct prestera_fw *fw) 436 | { 437 | kfree(fw->evt_msg); 438 | flush_workqueue(fw->wq); 439 | destroy_workqueue(fw->wq); 440 | } 441 | EXPORT_SYMBOL_GPL(prestera_fw_uninit); 442 | 443 | MODULE_AUTHOR("Marvell Semi."); 444 | MODULE_LICENSE("GPL"); 445 | MODULE_DESCRIPTION("Marvell Prestera switch Firmware Agent interface"); 446 | -------------------------------------------------------------------------------- /prestera/prestera_fw_log.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "prestera.h" 12 | #include "prestera_hw.h" 13 | #include "prestera_log.h" 14 | #include "prestera_fw_log.h" 15 | 16 | #define FW_LOG_DBGFS_CFG_DIR "mvsw_pr_fw_log" 17 | #define FW_LOG_DBGFS_CFG_NAME "cfg" 18 | #define FW_LOG_DBGFS_MAX_STR_LEN 64 19 | #define FW_LOG_PR_LOG_PREFIX "[mvsw_pr_fw_log]" 20 | #define FW_LOG_PR_LIB_SIZE 32 21 | #define FW_LOG_PR_READ_BUF_SIZE 8192 22 | #define MVSW_FW_LOG_INFO(fmt, ...) \ 23 | pr_info(fmt, ##__VA_ARGS__) 24 | 25 | #define FW_LOG_READ_TABLE_FMT "%-23s" 26 | 27 | #define mvsw_dev(sw) ((sw)->dev->dev) 28 | 29 | static void mvsw_pr_fw_log_evt_handler(struct prestera_switch *, 30 | struct prestera_event *, 31 | void *); 32 | static ssize_t mvsw_pr_fw_log_debugfs_read(struct file *file, 33 | char __user *ubuf, 34 | size_t count, loff_t *ppos); 35 | static ssize_t mvsw_pr_fw_log_debugfs_write(struct file *file, 36 | const char __user *ubuf, 37 | size_t count, loff_t *ppos); 38 | static int mvsw_pr_fw_log_get_type_from_str(const char *str); 39 | static int mvsw_pr_fw_log_get_lib_from_str(const char *str); 40 | 41 | static int mvsw_pr_fw_log_event_handler_register(struct prestera_switch *sw); 42 | static void mvsw_pr_fw_log_event_handler_unregister(struct prestera_switch *sw); 43 | 44 | struct mvsw_pr_fw_log_prv_debugfs { 45 | struct dentry *cfg_dir; 46 | struct dentry *cfg; 47 | const struct file_operations cfg_fops; 48 | char *read_buf; 49 | }; 50 | 51 | static u8 fw_log_lib_type_config[PRESTERA_FW_LOG_LIB_MAX] = { 0 }; 52 | 53 | static struct mvsw_pr_fw_log_prv_debugfs fw_log_debugfs_handle = { 54 | .cfg_dir = NULL, 55 | .cfg_fops = { 56 | .read = mvsw_pr_fw_log_debugfs_read, 57 | .write = mvsw_pr_fw_log_debugfs_write, 58 | .open = simple_open, 59 | .llseek = default_llseek, 60 | } 61 | }; 62 | 63 | static const char *mvsw_pr_fw_log_lib_id2name[PRESTERA_FW_LOG_LIB_MAX] = { 64 | [PRESTERA_FW_LOG_LIB_ALL] = "all", 65 | [PRESTERA_FW_LOG_LIB_BRIDGE] = "bridge", 66 | [PRESTERA_FW_LOG_LIB_CNC] = "cnc", 67 | [PRESTERA_FW_LOG_LIB_CONFIG] = "config", 68 | [PRESTERA_FW_LOG_LIB_COS] = "cos", 69 | [PRESTERA_FW_LOG_LIB_CSCD] = "cscd", 70 | [PRESTERA_FW_LOG_LIB_CUT_THROUGH] = "cut-through", 71 | [PRESTERA_FW_LOG_LIB_DIAG] = "diag", 72 | [PRESTERA_FW_LOG_LIB_DRAGONITE] = "dragonite", 73 | [PRESTERA_FW_LOG_LIB_EGRESS] = "egress", 74 | [PRESTERA_FW_LOG_LIB_EXACT_MATCH] = "exact-match", 75 | [PRESTERA_FW_LOG_LIB_FABRIC] = "fabric", 76 | [PRESTERA_FW_LOG_LIB_BRIDGE_FDB_MANAGER] = "fdb-manager", 77 | [PRESTERA_FW_LOG_LIB_FLOW_MANAGER] = "flow-manager", 78 | [PRESTERA_FW_LOG_LIB_HW_INIT] = "hw-init", 79 | [PRESTERA_FW_LOG_LIB_I2C] = "i2c", 80 | [PRESTERA_FW_LOG_LIB_INGRESS] = "ingress", 81 | [PRESTERA_FW_LOG_LIB_INIT] = "init", 82 | [PRESTERA_FW_LOG_LIB_IPFIX] = "ipfix", 83 | [PRESTERA_FW_LOG_LIB_IP] = "ip", 84 | [PRESTERA_FW_LOG_LIB_IP_LPM] = "ip-lpm", 85 | [PRESTERA_FW_LOG_LIB_L2_MLL] = "l2-mll", 86 | [PRESTERA_FW_LOG_LIB_LATENCY_MONITORING] = "latency-monitoring", 87 | [PRESTERA_FW_LOG_LIB_LOGICAL_TARGET] = "logical-target", 88 | [PRESTERA_FW_LOG_LIB_LPM] = "lpm", 89 | [PRESTERA_FW_LOG_LIB_MIRROR] = "mirror", 90 | [PRESTERA_FW_LOG_LIB_MULTI_PORT_GROUP] = "multi-port-group", 91 | [PRESTERA_FW_LOG_LIB_NETWORK_IF] = "network-if", 92 | [PRESTERA_FW_LOG_LIB_NST] = "nst", 93 | [PRESTERA_FW_LOG_LIB_OAM] = "oam", 94 | [PRESTERA_FW_LOG_LIB_PACKET_ANALYZER] = "packet-analyzer", 95 | [PRESTERA_FW_LOG_LIB_PCL] = "pcl", 96 | [PRESTERA_FW_LOG_LIB_PHA] = "pha", 97 | [PRESTERA_FW_LOG_LIB_PHY] = "phy", 98 | [PRESTERA_FW_LOG_LIB_POLICER] = "policer", 99 | [PRESTERA_FW_LOG_LIB_PROTECTION] = "protection", 100 | [PRESTERA_FW_LOG_LIB_PTP] = "ptp", 101 | [PRESTERA_FW_LOG_LIB_RESOURCE_MANAGER] = "resource-manager", 102 | [PRESTERA_FW_LOG_LIB_SMI] = "smi", 103 | [PRESTERA_FW_LOG_LIB_SYSTEM_RECOVERY] = "system-recovery", 104 | [PRESTERA_FW_LOG_LIB_TAM] = "tam", 105 | [PRESTERA_FW_LOG_LIB_TCAM] = "tcam", 106 | [PRESTERA_FW_LOG_LIB_TM] = "tm", 107 | [PRESTERA_FW_LOG_LIB_TM_GLUE] = "tm-glue", 108 | [PRESTERA_FW_LOG_LIB_TRUNK] = "trunk", 109 | [PRESTERA_FW_LOG_LIB_TTI] = "tti", 110 | [PRESTERA_FW_LOG_LIB_TUNNEL] = "tunnel", 111 | [PRESTERA_FW_LOG_LIB_VERSION] = "version", 112 | [PRESTERA_FW_LOG_LIB_VIRTUAL_TCAM] = "virtual-tcam", 113 | [PRESTERA_FW_LOG_LIB_VNT] = "vnt", 114 | [PRESTERA_FW_LOG_LIB_PPU] = "ppu", 115 | [PRESTERA_FW_LOG_LIB_EXACT_MATCH_MANAGER] = "exact-match-manager", 116 | [PRESTERA_FW_LOG_LIB_MAC_SEC] = "mac-sec", 117 | [PRESTERA_FW_LOG_LIB_PTP_MANAGER] = "ptp-manager", 118 | [PRESTERA_FW_LOG_LIB_HSR_PRP] = "hsr-prp", 119 | [PRESTERA_FW_LOG_LIB_STREAM] = "stream", 120 | [PRESTERA_FW_LOG_LIB_IPFIX_MANAGER] = "ipfix_manager", 121 | }; 122 | 123 | static const char *mvsw_pr_fw_log_prv_type_id2name[PRESTERA_FW_LOG_TYPE_MAX] = { 124 | [PRESTERA_FW_LOG_TYPE_INFO] = "info", 125 | [PRESTERA_FW_LOG_TYPE_ENTRY_LEVEL_FUNCTION] = "entry-level-function", 126 | [PRESTERA_FW_LOG_TYPE_ERROR] = "error", 127 | [PRESTERA_FW_LOG_TYPE_ALL] = "all", 128 | [PRESTERA_FW_LOG_TYPE_NONE] = "none", 129 | }; 130 | 131 | static void mvsw_pr_fw_log_evt_handler(struct prestera_switch *sw, 132 | struct prestera_event *evt, void *arg) 133 | { 134 | u32 log_len = evt->fw_log_evt.log_len; 135 | u8 *buf = evt->fw_log_evt.data; 136 | 137 | buf[log_len] = '\0'; 138 | 139 | MVSW_FW_LOG_INFO(FW_LOG_PR_LOG_PREFIX "%s\n", buf); 140 | } 141 | 142 | static ssize_t mvsw_pr_fw_log_format_str(void) 143 | { 144 | char *buf = fw_log_debugfs_handle.read_buf; 145 | int chars_written = 0; 146 | int lib, type; 147 | int ret; 148 | 149 | memset(buf, 0, FW_LOG_PR_READ_BUF_SIZE); 150 | 151 | ret = snprintf(buf, FW_LOG_PR_READ_BUF_SIZE, FW_LOG_READ_TABLE_FMT, 152 | " "); 153 | if (ret < 0) 154 | return ret; 155 | 156 | chars_written += ret; 157 | 158 | for (type = 0; type < PRESTERA_FW_LOG_TYPE_MAX; ++type) { 159 | if (type == PRESTERA_FW_LOG_TYPE_NONE || 160 | type == PRESTERA_FW_LOG_TYPE_ALL) 161 | continue; 162 | 163 | ret = snprintf(buf + chars_written, 164 | FW_LOG_PR_READ_BUF_SIZE - chars_written, 165 | FW_LOG_READ_TABLE_FMT, 166 | mvsw_pr_fw_log_prv_type_id2name[type]); 167 | if (ret < 0) 168 | return ret; 169 | 170 | chars_written += ret; 171 | } 172 | 173 | strcat(buf, "\n"); 174 | ++chars_written; 175 | 176 | for (lib = 0; lib < PRESTERA_FW_LOG_LIB_MAX; ++lib) { 177 | if (lib == PRESTERA_FW_LOG_LIB_ALL || 178 | !mvsw_pr_fw_log_lib_id2name[lib]) 179 | continue; 180 | 181 | ret = snprintf(buf + chars_written, 182 | FW_LOG_PR_READ_BUF_SIZE - chars_written, 183 | FW_LOG_READ_TABLE_FMT, 184 | mvsw_pr_fw_log_lib_id2name[lib]); 185 | if (ret < 0) 186 | return ret; 187 | 188 | chars_written += ret; 189 | 190 | for (type = 0; type < PRESTERA_FW_LOG_TYPE_MAX; ++type) { 191 | if (type == PRESTERA_FW_LOG_TYPE_NONE || 192 | type == PRESTERA_FW_LOG_TYPE_ALL) 193 | continue; 194 | 195 | ret = snprintf(buf + chars_written, 196 | FW_LOG_PR_READ_BUF_SIZE - chars_written, 197 | FW_LOG_READ_TABLE_FMT, 198 | fw_log_lib_type_config[lib] & BIT(type) 199 | ? "+" : "-"); 200 | if (ret < 0) 201 | return ret; 202 | 203 | chars_written += ret; 204 | } 205 | strlcat(buf, "\n", FW_LOG_PR_READ_BUF_SIZE); 206 | ++chars_written; 207 | } 208 | 209 | return chars_written; 210 | } 211 | 212 | static ssize_t mvsw_pr_fw_log_debugfs_read(struct file *file, 213 | char __user *ubuf, 214 | size_t count, loff_t *ppos) 215 | { 216 | char *buf = fw_log_debugfs_handle.read_buf; 217 | 218 | return simple_read_from_buffer(ubuf, count, ppos, buf, 219 | FW_LOG_PR_READ_BUF_SIZE); 220 | } 221 | 222 | static int mvsw_pr_fw_log_parse_usr_input(int *name, int *type, 223 | const char __user *ubuf, size_t count) 224 | { 225 | u8 tmp_buf[FW_LOG_DBGFS_MAX_STR_LEN] = { 0 }; 226 | u8 lib_str[FW_LOG_PR_LIB_SIZE] = { 0 }; 227 | u8 type_str[FW_LOG_PR_LIB_SIZE] = { 0 }; 228 | ssize_t len_to_copy = count - 1; 229 | u8 *ppos_lib, *ppos_type; 230 | char *end = tmp_buf; 231 | int err; 232 | 233 | if (len_to_copy > FW_LOG_DBGFS_MAX_STR_LEN) { 234 | MVSW_LOG_ERROR("Len is > than max(%zu vs max possible %d)\n", 235 | count, FW_LOG_DBGFS_MAX_STR_LEN); 236 | return -EMSGSIZE; 237 | } 238 | 239 | err = copy_from_user(tmp_buf, ubuf, len_to_copy); 240 | if (err) 241 | return -EINVAL; 242 | 243 | ppos_lib = strsep(&end, " \t"); 244 | ppos_type = strsep(&end, " \t\0"); 245 | 246 | if (!ppos_lib || !ppos_type) 247 | return -EINVAL; 248 | 249 | strcpy(lib_str, ppos_lib); 250 | 251 | strcpy(type_str, ppos_type); 252 | 253 | if (iscntrl(lib_str[0]) || isspace(lib_str[0]) || lib_str[0] == '\0' || 254 | iscntrl(type_str[0]) || isspace(type_str[0]) || 255 | type_str[0] == '\0') { 256 | return -EINVAL; 257 | } 258 | 259 | *name = mvsw_pr_fw_log_get_lib_from_str(lib_str); 260 | *type = mvsw_pr_fw_log_get_type_from_str(type_str); 261 | 262 | if (*name >= PRESTERA_FW_LOG_LIB_MAX || 263 | *type >= PRESTERA_FW_LOG_TYPE_MAX || 264 | (*name != PRESTERA_FW_LOG_LIB_ALL && 265 | *type == PRESTERA_FW_LOG_TYPE_NONE)) 266 | return -EINVAL; 267 | 268 | return 0; 269 | } 270 | 271 | static ssize_t mvsw_pr_fw_log_debugfs_write(struct file *file, 272 | const char __user *ubuf, 273 | size_t count, loff_t *ppos) 274 | { 275 | struct prestera_switch *sw = file->private_data; 276 | int lib, type; 277 | int i, j; 278 | int err; 279 | 280 | err = mvsw_pr_fw_log_parse_usr_input(&lib, &type, ubuf, count); 281 | if (err) 282 | goto error; 283 | 284 | err = prestera_hw_fw_log_level_set(sw, lib, type); 285 | if (err) { 286 | dev_err(mvsw_dev(sw), "Failed to send request to firmware\n"); 287 | return err; 288 | } 289 | 290 | /* specific lib and specific type */ 291 | if (lib != PRESTERA_FW_LOG_LIB_ALL && 292 | type != PRESTERA_FW_LOG_TYPE_ALL) { 293 | /* special type 'NONE' to disable feature */ 294 | if (type == PRESTERA_FW_LOG_TYPE_NONE) 295 | memset(fw_log_lib_type_config, 0, 296 | sizeof(fw_log_lib_type_config)); 297 | /* Actual type should be switched */ 298 | else 299 | fw_log_lib_type_config[lib] ^= (1 << type); 300 | /* specific lib but all types */ 301 | } else if (lib != PRESTERA_FW_LOG_LIB_ALL && 302 | type == PRESTERA_FW_LOG_TYPE_ALL) { 303 | for (j = 0; j < PRESTERA_FW_LOG_TYPE_ALL; ++j) 304 | fw_log_lib_type_config[lib] ^= (1 << j); 305 | /* specific type but all libs */ 306 | } else if (lib == PRESTERA_FW_LOG_LIB_ALL && 307 | type != PRESTERA_FW_LOG_TYPE_ALL) { 308 | for (i = 0; i < PRESTERA_FW_LOG_LIB_ALL; ++i) 309 | fw_log_lib_type_config[i] |= (1 << type); 310 | /* all libs and all types */ 311 | } else { 312 | for (i = 0; i < PRESTERA_FW_LOG_LIB_ALL; ++i) { 313 | for (j = 0; j < PRESTERA_FW_LOG_TYPE_ALL; ++j) 314 | fw_log_lib_type_config[i] |= (1 << j); 315 | } 316 | } 317 | 318 | err = mvsw_pr_fw_log_format_str(); 319 | if (err <= 0) { 320 | dev_err(mvsw_dev(sw), "Failed to form output string\n"); 321 | return err; 322 | } 323 | 324 | return count; 325 | 326 | error: 327 | dev_warn(mvsw_dev(sw), 328 | "Invalid str received, make sure request is valid\n"); 329 | dev_warn(mvsw_dev(sw), 330 | "Valid fmt consists of: \"lib type\" string, e.g:\n"); 331 | dev_warn(mvsw_dev(sw), 332 | "\"phy error\" for 'phy' lib 'error' logs enabled\n"); 333 | 334 | return err; 335 | } 336 | 337 | static int mvsw_pr_fw_log_get_type_from_str(const char *str) 338 | { 339 | int i; 340 | 341 | for (i = 0; i < PRESTERA_FW_LOG_TYPE_MAX; ++i) { 342 | if (!mvsw_pr_fw_log_prv_type_id2name[i]) 343 | continue; 344 | 345 | if (strcmp(mvsw_pr_fw_log_prv_type_id2name[i], str) == 0) 346 | return i; 347 | } 348 | 349 | return PRESTERA_FW_LOG_TYPE_MAX; 350 | } 351 | 352 | static int mvsw_pr_fw_log_get_lib_from_str(const char *str) 353 | { 354 | int i; 355 | 356 | for (i = 0; i < PRESTERA_FW_LOG_LIB_MAX; ++i) { 357 | if (!mvsw_pr_fw_log_lib_id2name[i]) 358 | continue; 359 | 360 | if (strcmp(mvsw_pr_fw_log_lib_id2name[i], str) == 0) 361 | return i; 362 | } 363 | 364 | return PRESTERA_FW_LOG_LIB_MAX; 365 | } 366 | 367 | static int mvsw_pr_fw_log_event_handler_register(struct prestera_switch *sw) 368 | { 369 | return prestera_hw_event_handler_register(sw, 370 | PRESTERA_EVENT_TYPE_FW_LOG, 371 | mvsw_pr_fw_log_evt_handler, 372 | NULL); 373 | } 374 | 375 | static void mvsw_pr_fw_log_event_handler_unregister(struct prestera_switch *sw) 376 | { 377 | prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FW_LOG); 378 | } 379 | 380 | int mvsw_pr_fw_log_init(struct prestera_switch *sw) 381 | { 382 | fw_log_debugfs_handle.cfg_dir = 383 | debugfs_create_dir(FW_LOG_DBGFS_CFG_DIR, NULL); 384 | 385 | if (!fw_log_debugfs_handle.cfg_dir) { 386 | MVSW_LOG_ERROR("Failed to create debugfs dir entry"); 387 | return -1; 388 | } 389 | 390 | fw_log_debugfs_handle.cfg = 391 | debugfs_create_file(FW_LOG_DBGFS_CFG_NAME, 0644, 392 | fw_log_debugfs_handle.cfg_dir, sw, 393 | &fw_log_debugfs_handle.cfg_fops); 394 | 395 | if (!fw_log_debugfs_handle.cfg) { 396 | MVSW_LOG_ERROR("Failed to create debugfs dir entry"); 397 | debugfs_remove(fw_log_debugfs_handle.cfg_dir); 398 | return -1; 399 | } 400 | 401 | if (mvsw_pr_fw_log_event_handler_register(sw)) 402 | goto error; 403 | 404 | fw_log_debugfs_handle.read_buf = 405 | kzalloc(FW_LOG_PR_READ_BUF_SIZE, GFP_KERNEL); 406 | 407 | if (!fw_log_debugfs_handle.read_buf) 408 | goto error; 409 | 410 | prestera_hw_fw_log_level_set(sw, PRESTERA_FW_LOG_LIB_ALL, 411 | PRESTERA_FW_LOG_TYPE_NONE); 412 | mvsw_pr_fw_log_format_str(); 413 | 414 | return 0; 415 | error: 416 | debugfs_remove(fw_log_debugfs_handle.cfg); 417 | debugfs_remove(fw_log_debugfs_handle.cfg_dir); 418 | return -1; 419 | } 420 | 421 | void mvsw_pr_fw_log_fini(struct prestera_switch *sw) 422 | { 423 | mvsw_pr_fw_log_event_handler_unregister(sw); 424 | 425 | kfree(fw_log_debugfs_handle.read_buf); 426 | 427 | debugfs_remove(fw_log_debugfs_handle.cfg); 428 | debugfs_remove(fw_log_debugfs_handle.cfg_dir); 429 | } 430 | -------------------------------------------------------------------------------- /prestera/prestera_pci.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "prestera.h" 12 | #include "prestera_fw.h" 13 | 14 | #define PRESTERA_FW_DEFAULT_PATH "marvell/mvsw_prestera_fw.img" 15 | #define PRESTERA_FW_ARM64_PATH "marvell/mvsw_prestera_fw_arm64.img" 16 | 17 | #define PRESTERA_FW_HDR_MAGIC 0x351D9D06 18 | #define PRESTERA_FW_DL_TIMEOUT 50000 19 | #define PRESTERA_FW_BLK_SZ 1024 20 | 21 | struct prestera_ldr_regs { 22 | u32 ldr_ready; 23 | u32 pad1; 24 | 25 | u32 ldr_img_size; 26 | u32 ldr_ctl_flags; 27 | 28 | u32 ldr_buf_offs; 29 | u32 ldr_buf_size; 30 | 31 | u32 ldr_buf_rd; 32 | u32 pad2; 33 | u32 ldr_buf_wr; 34 | 35 | u32 ldr_status; 36 | } __packed __aligned(4); 37 | 38 | #define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f) 39 | 40 | #define PRESTERA_LDR_READY_MAGIC 0xf00dfeed 41 | 42 | #define PRESTERA_LDR_STATUS_IMG_DL BIT(0) 43 | #define PRESTERA_LDR_STATUS_START_FW BIT(1) 44 | #define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2) 45 | #define PRESTERA_LDR_STATUS_NOMEM BIT(3) 46 | 47 | #define prestera_ldr_write(fw, reg, val) \ 48 | writel(val, (fw)->ldr_regs + (reg)) 49 | #define prestera_ldr_read(fw, reg) \ 50 | readl((fw)->ldr_regs + (reg)) 51 | 52 | /* fw loader registers */ 53 | #define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready) 54 | #define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size) 55 | #define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags) 56 | #define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size) 57 | #define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs) 58 | #define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd) 59 | #define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr) 60 | #define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status) 61 | 62 | #define PRESTERA_LDR_CTL_DL_START BIT(0) 63 | 64 | #define PRESTERA_LDR_WR_IDX_MOVE(fw, n) \ 65 | do { \ 66 | typeof(fw) __fw = (fw); \ 67 | (__fw)->ldr_wr_idx = ((__fw)->ldr_wr_idx + (n)) & \ 68 | ((__fw)->ldr_buf_len - 1); \ 69 | } while (0) 70 | 71 | #define PRESTERA_LDR_WR_IDX_COMMIT(fw) \ 72 | ({ \ 73 | typeof(fw) __fw = (fw); \ 74 | prestera_ldr_write((__fw), PRESTERA_LDR_BUF_WR_REG, \ 75 | (__fw)->ldr_wr_idx); \ 76 | }) 77 | 78 | #define PRESTERA_LDR_WR_PTR(fw) \ 79 | ({ \ 80 | typeof(fw) __fw = (fw); \ 81 | ((__fw)->ldr_ring_buf + (__fw)->ldr_wr_idx); \ 82 | }) 83 | 84 | #define PRESTERA_DEVICE(id) PCI_VDEVICE(MARVELL, (id)) 85 | 86 | #define PRESTERA_DEV_ID_AC3X_98DX_55 0xC804 87 | #define PRESTERA_DEV_ID_AC3X_98DX_65 0xC80C 88 | #define PRESTERA_DEV_ID_ALDRIN2 0xCC1E 89 | #define PRESTERA_DEV_ID_ALDRIN3S 0x981F 90 | #define PRESTERA_DEV_ID_98DX3500 0x9820 91 | #define PRESTERA_DEV_ID_98DX3501 0x9826 92 | #define PRESTERA_DEV_ID_98DX3510 0x9821 93 | #define PRESTERA_DEV_ID_98DX3520 0x9822 94 | 95 | static struct prestera_pci_match { 96 | struct pci_driver driver; 97 | const struct pci_device_id id; 98 | bool registered; 99 | } prestera_devices[] = { 100 | { 101 | .driver = { .name = "AC3x B2B 98DX3255", }, 102 | .id = { PRESTERA_DEVICE(PRESTERA_DEV_ID_AC3X_98DX_55), 0 }, 103 | }, 104 | { 105 | .driver = { .name = "AC3x B2B 98DX3265", }, 106 | .id = { PRESTERA_DEVICE(PRESTERA_DEV_ID_AC3X_98DX_65), 0 }, 107 | }, 108 | { 109 | .driver = { .name = "Aldrin2", }, 110 | .id = { PRESTERA_DEVICE(PRESTERA_DEV_ID_ALDRIN2), 0 }, 111 | }, 112 | { 113 | .driver = { .name = "Aldrin3S", }, 114 | .id = { PRESTERA_DEVICE(PRESTERA_DEV_ID_ALDRIN3S), 0 }, 115 | }, 116 | { 117 | .driver = { .name = "AC5X 98DX3500", }, 118 | .id = { PRESTERA_DEVICE(PRESTERA_DEV_ID_98DX3500), 0 }, 119 | }, 120 | { 121 | .driver = { .name = "AC5X 98DX3501", }, 122 | .id = { PRESTERA_DEVICE(PRESTERA_DEV_ID_98DX3501), 0 }, 123 | }, 124 | { 125 | .driver = { .name = "AC5X 98DX3510", }, 126 | .id = { PRESTERA_DEVICE(PRESTERA_DEV_ID_98DX3510), 0 }, 127 | }, 128 | { 129 | .driver = { .name = "AC5X 98DX3520", }, 130 | .id = { PRESTERA_DEVICE(PRESTERA_DEV_ID_98DX3520), 0 }, 131 | }, 132 | {{ }, } 133 | }; 134 | 135 | static int prestera_fw_load(struct prestera_fw *fw); 136 | 137 | static irqreturn_t prestera_irq_handler(int irq, void *dev_id) 138 | { 139 | struct prestera_fw *fw = dev_id; 140 | 141 | if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) { 142 | if (fw->dev.recv_pkt) { 143 | prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0); 144 | fw->dev.recv_pkt(&fw->dev); 145 | } 146 | } 147 | 148 | prestera_fw_queue_work(fw); 149 | 150 | return IRQ_HANDLED; 151 | } 152 | 153 | static int prestera_ldr_wait_reg32(struct prestera_fw *fw, u32 reg, u32 val, 154 | unsigned int wait) 155 | { 156 | if (prestera_wait(prestera_ldr_read(fw, reg) == val, wait)) 157 | return 0; 158 | 159 | return -EBUSY; 160 | } 161 | 162 | static u32 prestera_ldr_buf_avail(struct prestera_fw *fw) 163 | { 164 | u32 rd_idx = prestera_ldr_read(fw, PRESTERA_LDR_BUF_RD_REG); 165 | 166 | return CIRC_SPACE(fw->ldr_wr_idx, rd_idx, fw->ldr_buf_len); 167 | } 168 | 169 | static int prestera_ldr_send_buf(struct prestera_fw *fw, const u8 *buf, 170 | size_t len) 171 | { 172 | int i; 173 | 174 | if (!prestera_wait(prestera_ldr_buf_avail(fw) >= len, 100)) { 175 | dev_err(prestera_fw_dev(fw), 176 | "failed wait for sending firmware\n"); 177 | return -EBUSY; 178 | } 179 | 180 | for (i = 0; i < len; i += 4) { 181 | writel_relaxed(*(u32 *)(buf + i), PRESTERA_LDR_WR_PTR(fw)); 182 | PRESTERA_LDR_WR_IDX_MOVE(fw, 4); 183 | } 184 | 185 | PRESTERA_LDR_WR_IDX_COMMIT(fw); 186 | return 0; 187 | } 188 | 189 | static int prestera_ldr_send(struct prestera_fw *fw, const char *img, 190 | u32 fw_size) 191 | { 192 | unsigned long mask; 193 | u32 status; 194 | u32 pos; 195 | int err; 196 | 197 | if (prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG, 198 | PRESTERA_LDR_STATUS_IMG_DL, 1000)) { 199 | dev_err(prestera_fw_dev(fw), 200 | "Loader is not ready to load image\n"); 201 | return -EBUSY; 202 | } 203 | 204 | for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) { 205 | if (pos + PRESTERA_FW_BLK_SZ > fw_size) 206 | break; 207 | 208 | err = prestera_ldr_send_buf(fw, img + pos, PRESTERA_FW_BLK_SZ); 209 | if (err) { 210 | if (prestera_fw_read(fw, PRESTERA_LDR_STATUS_REG) == 211 | PRESTERA_LDR_STATUS_NOMEM) { 212 | dev_err(prestera_fw_dev(fw), 213 | "Fw image is too big or invalid\n"); 214 | return -EINVAL; 215 | } 216 | return err; 217 | } 218 | } 219 | 220 | if (pos < fw_size) { 221 | err = prestera_ldr_send_buf(fw, img + pos, fw_size - pos); 222 | if (err) 223 | return err; 224 | } 225 | 226 | /* Waiting for status IMG_DOWNLOADING to change to something else */ 227 | mask = ~(PRESTERA_LDR_STATUS_IMG_DL); 228 | 229 | if (!prestera_wait(prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG) & 230 | mask, PRESTERA_FW_DL_TIMEOUT)) { 231 | dev_err(prestera_fw_dev(fw), 232 | "Timeout to load FW img [state=%d]", 233 | prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG)); 234 | return -ETIMEDOUT; 235 | } 236 | 237 | status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG); 238 | if (status != PRESTERA_LDR_STATUS_START_FW) { 239 | switch (status) { 240 | case PRESTERA_LDR_STATUS_INVALID_IMG: 241 | dev_err(prestera_fw_dev(fw), "FW img has bad crc\n"); 242 | return -EINVAL; 243 | case PRESTERA_LDR_STATUS_NOMEM: 244 | dev_err(prestera_fw_dev(fw), 245 | "Loader has no enough mem\n"); 246 | return -ENOMEM; 247 | default: 248 | break; 249 | } 250 | } 251 | 252 | return 0; 253 | } 254 | 255 | static bool prestera_ldr_is_ready(struct prestera_fw *fw) 256 | { 257 | return prestera_ldr_read(fw, PRESTERA_LDR_READY_REG) == 258 | PRESTERA_LDR_READY_MAGIC; 259 | } 260 | 261 | static int prestera_fw_hdr_parse(struct prestera_fw *fw, 262 | const struct firmware *img) 263 | { 264 | struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data; 265 | struct prestera_fw_rev *rev = &fw->dev.fw_rev; 266 | u32 magic; 267 | 268 | magic = be32_to_cpu(hdr->magic_number); 269 | if (magic != PRESTERA_FW_HDR_MAGIC) { 270 | dev_err(prestera_fw_dev(fw), "FW img type is invalid"); 271 | return -EINVAL; 272 | } 273 | 274 | prestera_fw_rev_parse(hdr, rev); 275 | 276 | return prestera_fw_rev_check(fw); 277 | } 278 | 279 | static const char *prestera_fw_path_get(struct prestera_fw *fw) 280 | { 281 | switch (fw->pci_dev->device) { 282 | case PRESTERA_DEV_ID_98DX3500: 283 | case PRESTERA_DEV_ID_98DX3501: 284 | case PRESTERA_DEV_ID_98DX3510: 285 | case PRESTERA_DEV_ID_98DX3520: 286 | return PRESTERA_FW_ARM64_PATH; 287 | 288 | default: 289 | return PRESTERA_FW_DEFAULT_PATH; 290 | } 291 | } 292 | 293 | static int prestera_fw_load(struct prestera_fw *fw) 294 | { 295 | size_t hlen = sizeof(struct prestera_fw_header); 296 | const char *fw_path = prestera_fw_path_get(fw); 297 | const struct firmware *f; 298 | bool has_ldr; 299 | int err; 300 | 301 | /* 10s delay is required for soft reset feature */ 302 | has_ldr = prestera_wait(prestera_ldr_is_ready(fw), 15000); 303 | if (!has_ldr) { 304 | dev_err(prestera_fw_dev(fw), 305 | "waiting for FW loader is timed out"); 306 | return -ETIMEDOUT; 307 | } 308 | 309 | fw->ldr_ring_buf = fw->ldr_regs + 310 | prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG); 311 | 312 | fw->ldr_buf_len = prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG); 313 | 314 | fw->ldr_wr_idx = 0; 315 | 316 | err = request_firmware_direct(&f, fw_path, &fw->pci_dev->dev); 317 | if (err) { 318 | dev_err(prestera_fw_dev(fw), 319 | "failed to request firmware file: %s\n", fw_path); 320 | return err; 321 | } 322 | 323 | if (!IS_ALIGNED(f->size, 4)) { 324 | dev_err(prestera_fw_dev(fw), "FW image file is not aligned"); 325 | release_firmware(f); 326 | return -EINVAL; 327 | } 328 | 329 | err = prestera_fw_hdr_parse(fw, f); 330 | if (err) { 331 | dev_err(prestera_fw_dev(fw), "FW image is invalid\n"); 332 | release_firmware(f); 333 | return err; 334 | } 335 | 336 | prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen); 337 | prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START); 338 | 339 | dev_info(prestera_fw_dev(fw), "Loading prestera FW image ..."); 340 | 341 | err = prestera_ldr_send(fw, f->data + hlen, f->size - hlen); 342 | 343 | release_firmware(f); 344 | return err; 345 | } 346 | 347 | static bool prestera_pci_pp_use_bar2(struct pci_dev *pdev) 348 | { 349 | switch (pdev->device) { 350 | case PRESTERA_DEV_ID_ALDRIN3S: 351 | case PRESTERA_DEV_ID_98DX3500: 352 | case PRESTERA_DEV_ID_98DX3501: 353 | case PRESTERA_DEV_ID_98DX3510: 354 | case PRESTERA_DEV_ID_98DX3520: 355 | return true; 356 | 357 | default: 358 | return false; 359 | } 360 | } 361 | 362 | static u32 prestera_pci_pp_bar2_offs(struct pci_dev *pdev) 363 | { 364 | if (pci_resource_len(pdev, 2) == 0x1000000) 365 | return 0x0; 366 | else 367 | return (pci_resource_len(pdev, 2) / 2); 368 | } 369 | 370 | static u32 prestera_pci_fw_bar2_offs(struct pci_dev *pdev) 371 | { 372 | if (pci_resource_len(pdev, 2) == 0x1000000) 373 | return 0x400000; 374 | else 375 | return 0x0; 376 | } 377 | 378 | static int prestera_pci_probe(struct pci_dev *pdev, 379 | const struct pci_device_id *id) 380 | { 381 | const char *driver_name = pdev->driver->name; 382 | u8 __iomem *mem_addr, *pp_addr = NULL; 383 | struct prestera_fw *fw; 384 | int err; 385 | 386 | err = pci_enable_device(pdev); 387 | if (err) { 388 | dev_err(&pdev->dev, "pci_enable_device failed\n"); 389 | goto err_pci_enable_device; 390 | } 391 | 392 | err = pci_request_regions(pdev, driver_name); 393 | if (err) { 394 | dev_err(&pdev->dev, "pci_request_regions failed\n"); 395 | goto err_pci_request_regions; 396 | } 397 | 398 | if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) { 399 | dev_err(&pdev->dev, "fail to set DMA mask\n"); 400 | goto err_dma_mask; 401 | } 402 | 403 | mem_addr = pcim_iomap(pdev, 2, 0); 404 | if (!mem_addr) { 405 | dev_err(&pdev->dev, "pci mem ioremap failed\n"); 406 | err = -EIO; 407 | goto err_mem_ioremap; 408 | } 409 | 410 | /* Aldrin3S uses second half of BAR2 */ 411 | if (prestera_pci_pp_use_bar2(pdev)) { 412 | pp_addr = mem_addr + prestera_pci_pp_bar2_offs(pdev); 413 | mem_addr = mem_addr + prestera_pci_fw_bar2_offs(pdev); 414 | } else { 415 | pp_addr = pcim_iomap(pdev, 4, 0); 416 | if (!pp_addr) { 417 | dev_err(&pdev->dev, "pp regs ioremap failed\n"); 418 | err = -EIO; 419 | goto err_pp_ioremap; 420 | } 421 | } 422 | 423 | pci_set_master(pdev); 424 | 425 | fw = kzalloc(sizeof(*fw), GFP_KERNEL); 426 | if (!fw) { 427 | err = -ENOMEM; 428 | goto err_pci_dev_alloc; 429 | } 430 | 431 | fw->pci_dev = pdev; 432 | fw->dev.dev = &pdev->dev; 433 | fw->dev.send_req = prestera_fw_send_req; 434 | fw->dev.pp_regs = pp_addr; 435 | fw->dev.dma_flags = GFP_DMA; 436 | fw->dev.running = true; 437 | fw->mem_addr = mem_addr; 438 | fw->ldr_regs = mem_addr; 439 | fw->hw_regs = mem_addr; 440 | 441 | err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 442 | if (err < 0) { 443 | dev_err(&pdev->dev, "MSI IRQ init failed\n"); 444 | goto err_irq_alloc; 445 | } 446 | 447 | pci_set_drvdata(pdev, fw); 448 | 449 | err = prestera_fw_load(fw); 450 | 451 | if (err) 452 | goto err_fw_init; 453 | 454 | err = prestera_fw_init(fw); 455 | if (err) 456 | goto err_fw_init; 457 | 458 | err = request_irq(pci_irq_vector(pdev, 0), prestera_irq_handler, 459 | 0, driver_name, fw); 460 | if (err) { 461 | dev_err(&pdev->dev, "fail to request IRQ\n"); 462 | goto err_request_irq; 463 | } 464 | 465 | dev_info(prestera_fw_dev(fw), "Prestera Switch FW is ready\n"); 466 | 467 | err = prestera_device_register(&fw->dev); 468 | if (err) 469 | goto err_prestera_dev_register; 470 | 471 | return 0; 472 | 473 | err_prestera_dev_register: 474 | free_irq(pci_irq_vector(pdev, 0), fw); 475 | err_request_irq: 476 | prestera_fw_uninit(fw); 477 | err_fw_init: 478 | pci_free_irq_vectors(pdev); 479 | err_irq_alloc: 480 | 481 | err_pci_dev_alloc: 482 | err_pp_ioremap: 483 | err_mem_ioremap: 484 | err_dma_mask: 485 | pci_release_regions(pdev); 486 | err_pci_request_regions: 487 | pci_disable_device(pdev); 488 | err_pci_enable_device: 489 | return err; 490 | } 491 | 492 | static void prestera_pci_remove(struct pci_dev *pdev) 493 | { 494 | struct prestera_fw *fw = pci_get_drvdata(pdev); 495 | 496 | free_irq(pci_irq_vector(pdev, 0), fw); 497 | pci_free_irq_vectors(pdev); 498 | prestera_device_unregister(&fw->dev); 499 | prestera_fw_uninit(fw); 500 | pci_release_regions(pdev); 501 | pci_disable_device(pdev); 502 | kfree(fw); 503 | } 504 | 505 | static int __init prestera_pci_init(void) 506 | { 507 | struct prestera_pci_match *match; 508 | int err = 0; 509 | 510 | for (match = prestera_devices; match->driver.name; match++) { 511 | match->driver.probe = prestera_pci_probe; 512 | match->driver.remove = prestera_pci_remove; 513 | match->driver.id_table = &match->id; 514 | 515 | err = pci_register_driver(&match->driver); 516 | if (err) { 517 | pr_err("prestera_pci: failed to register %s\n", 518 | match->driver.name); 519 | break; 520 | } 521 | 522 | match->registered = true; 523 | } 524 | 525 | if (err) { 526 | for (match = prestera_devices; match->driver.name; match++) { 527 | if (!match->registered) 528 | break; 529 | 530 | pci_unregister_driver(&match->driver); 531 | } 532 | 533 | return err; 534 | } 535 | 536 | pr_info("prestera_pci: Registered Marvell Prestera PCI driver\n"); 537 | return 0; 538 | } 539 | 540 | static void __exit prestera_pci_exit(void) 541 | { 542 | struct prestera_pci_match *match; 543 | 544 | for (match = prestera_devices; match->driver.name; match++) { 545 | if (!match->registered) 546 | break; 547 | 548 | pci_unregister_driver(&match->driver); 549 | } 550 | 551 | pr_info("prestera_pci: Unregistered Marvell Prestera PCI driver\n"); 552 | } 553 | 554 | module_init(prestera_pci_init); 555 | module_exit(prestera_pci_exit); 556 | 557 | MODULE_AUTHOR("Marvell Semi."); 558 | MODULE_LICENSE("GPL"); 559 | MODULE_DESCRIPTION("Marvell Prestera switch PCI interface"); 560 | -------------------------------------------------------------------------------- /prestera/prestera_debugfs.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "prestera_debugfs.h" 10 | #include "prestera.h" 11 | #include "prestera_log.h" 12 | #include "prestera_fw_log.h" 13 | #include "prestera_rxtx.h" 14 | #include "prestera_hw.h" 15 | 16 | #define PRESTERA_DEBUGFS_ROOTDIR "prestera" 17 | 18 | #define CPU_CODE_HW_CNT_SUBDIR_NAME "hw_counters" 19 | #define CPU_CODE_SW_CNT_SUBDIR_NAME "sw_counters" 20 | 21 | #define CPU_CODE_CNT_SUBDIR_TRAP_NAME "traps" 22 | #define CPU_CODE_CNT_SUBDIR_DROP_NAME "drops" 23 | 24 | #define SCT_CFG_SUBDIR_NAME "sct" 25 | 26 | #define CPU_CODE_CNT_BUF_MAX_SIZE (MVSW_PR_RXTX_CPU_CODE_MAX_NUM * 32) 27 | 28 | static ssize_t prestera_cnt_read(struct file *file, char __user *ubuf, 29 | size_t count, loff_t *ppos); 30 | 31 | static ssize_t 32 | prestera_ipg_write(struct file *file, const char __user *ubuf, 33 | size_t count, loff_t *ppos); 34 | static ssize_t 35 | prestera_ipg_read(struct file *file, char __user *ubuf, 36 | size_t count, loff_t *ppos); 37 | 38 | static ssize_t 39 | prestera_sct_cfg_read(struct file *file, char __user *ubuf, 40 | size_t count, loff_t *ppos); 41 | static ssize_t 42 | prestera_sct_cfg_write(struct file *file, const char __user *ubuf, 43 | size_t count, loff_t *ppos); 44 | 45 | struct prestera_debugfs { 46 | struct dentry *root_dir; 47 | const struct file_operations cpu_code_cnt_fops; 48 | const struct file_operations ipg_fops; 49 | const struct file_operations sct_cfg_fops; 50 | char *cpu_code_cnt_buf; 51 | const char *sct_cfg_fname[PRESTERA_SCT_MAX]; 52 | /* serialize access to cpu_code_cnt_buf */ 53 | struct mutex cpu_code_cnt_buf_mtx; 54 | struct prestera_switch *sw; 55 | }; 56 | 57 | struct prestera_cpu_code_data { 58 | union { 59 | long data; 60 | struct { 61 | u16 cpu_code; 62 | u8 cpu_code_cnt_type; 63 | } __packed __aligned(4); 64 | }; 65 | } __packed __aligned(4); 66 | 67 | static struct prestera_debugfs prestera_debugfs = { 68 | .cpu_code_cnt_fops = { 69 | .read = prestera_cnt_read, 70 | .open = simple_open, 71 | .llseek = default_llseek, 72 | }, 73 | .ipg_fops = { 74 | .read = prestera_ipg_read, 75 | .write = prestera_ipg_write, 76 | .open = simple_open, 77 | .llseek = default_llseek, 78 | }, 79 | .sct_cfg_fops = { 80 | .write = prestera_sct_cfg_write, 81 | .read = prestera_sct_cfg_read, 82 | .open = simple_open, 83 | .llseek = default_llseek, 84 | }, 85 | .sct_cfg_fname = { 86 | [PRESTERA_SCT_ALL_UNSPECIFIED_CPU_OPCODES] = "all_unspecified_cpu_opcodes", 87 | [PRESTERA_SCT_ACL_TRAP_QUEUE_0] = "sct_acl_trap_queue_0", 88 | [PRESTERA_SCT_ACL_TRAP_QUEUE_1] = "sct_acl_trap_queue_1", 89 | [PRESTERA_SCT_ACL_TRAP_QUEUE_2] = "sct_acl_trap_queue_2", 90 | [PRESTERA_SCT_ACL_TRAP_QUEUE_3] = "sct_acl_trap_queue_3", 91 | [PRESTERA_SCT_ACL_TRAP_QUEUE_4] = "sct_acl_trap_queue_4", 92 | [PRESTERA_SCT_ACL_TRAP_QUEUE_5] = "sct_acl_trap_queue_5", 93 | [PRESTERA_SCT_ACL_TRAP_QUEUE_6] = "sct_acl_trap_queue_6", 94 | [PRESTERA_SCT_ACL_TRAP_QUEUE_7] = "sct_acl_trap_queue_7", 95 | [PRESTERA_SCT_STP] = "sct_stp", 96 | [PRESTERA_SCT_LACP] = "sct_lacp", 97 | [PRESTERA_SCT_LLDP] = "sct_lldp", 98 | [PRESTERA_SCT_CDP] = "sct_cdp", 99 | [PRESTERA_SCT_ARP_INTERVENTION] = "sct_arp_intervention", 100 | [PRESTERA_SCT_ARP_TO_ME] = "sct_arp_to_me", 101 | [PRESTERA_SCT_BGP_ALL_ROUTERS_MC] = "sct_bgp_all_routers_mc", 102 | [PRESTERA_SCT_VRRP] = "sct_vrrp", 103 | [PRESTERA_SCT_IP_BC] = "sct_ip_bc", 104 | [PRESTERA_SCT_IP_TO_ME] = "sct_ip_to_me", 105 | [PRESTERA_SCT_DEFAULT_ROUTE] = "sct_default_route", 106 | [PRESTERA_SCT_BGP] = "sct_bgp", 107 | [PRESTERA_SCT_SSH] = "sct_ssh", 108 | [PRESTERA_SCT_TELNET] = "sct_telnet", 109 | [PRESTERA_SCT_DHCP] = "sct_dhcp", 110 | [PRESTERA_SCT_ICMP] = "sct_icmp", 111 | [PRESTERA_SCT_IGMP] = "sct_igmp", 112 | [PRESTERA_SCT_SPECIAL_IP4_ICMP_REDIRECT] = "sct_special_ip4_icmp_redirect", 113 | [PRESTERA_SCT_SPECIAL_IP4_OPTIONS_IN_IP_HDR] = "sct_special_ip4_options_in_ip_hdr", 114 | [PRESTERA_SCT_SPECIAL_IP4_MTU_EXCEED] = "sct_special_ip4_mtu_exceed", 115 | [PRESTERA_SCT_SPECIAL_IP4_ZERO_TTL] = "sct_special_ip4_zero_ttl", 116 | [PRESTERA_SCT_OSPF] = "sct_ospf", 117 | [PRESTERA_SCT_ISIS] = "sct_isis", 118 | [PRESTERA_SCT_NAT] = "sct_nat", 119 | }, 120 | }; 121 | 122 | enum { 123 | CPU_CODE_CNT_TYPE_HW_DROP = PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP, 124 | CPU_CODE_CNT_TYPE_HW_TRAP = PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP, 125 | CPU_CODE_CNT_TYPE_SW_TRAP = CPU_CODE_CNT_TYPE_HW_TRAP + 1, 126 | }; 127 | 128 | static int prestera_debugfs_ipg_init(struct prestera_switch *sw) 129 | { 130 | struct prestera_debugfs *debugfs = &prestera_debugfs; 131 | const struct file_operations *fops = 132 | &prestera_debugfs.ipg_fops; 133 | struct dentry *debugfs_file; 134 | 135 | debugfs_file = debugfs_create_file("ipg", 0644, debugfs->root_dir, NULL, 136 | fops); 137 | if (PTR_ERR_OR_ZERO(debugfs_file)) 138 | return (int)PTR_ERR(debugfs_file); 139 | 140 | return 0; 141 | } 142 | 143 | static ssize_t 144 | prestera_sct_cfg_read(struct file *file, char __user *ubuf, 145 | size_t count, loff_t *ppos) 146 | { 147 | struct prestera_debugfs *debugfs = &prestera_debugfs; 148 | u8 grp = (long)file->private_data; 149 | char buf[128] = { 0 }; 150 | size_t buf_len = 1; 151 | u32 rate; 152 | int ret; 153 | 154 | ret = prestera_hw_sct_ratelimit_get(debugfs->sw, grp, &rate); 155 | if (ret) 156 | return ret; 157 | 158 | buf_len += sprintf(buf, "%s: %d (pps)\n", debugfs->sct_cfg_fname[grp], 159 | rate); 160 | 161 | return simple_read_from_buffer(ubuf, count, ppos, buf, buf_len); 162 | } 163 | 164 | static ssize_t 165 | prestera_sct_cfg_write(struct file *file, const char __user *ubuf, 166 | size_t count, loff_t *ppos) 167 | { 168 | struct prestera_debugfs *debugfs = &prestera_debugfs; 169 | u8 grp = (long)file->private_data; 170 | char buf[128] = { 0 }; 171 | long rate; 172 | int ret; 173 | 174 | ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); 175 | if (ret < 0) 176 | return -EINVAL; 177 | 178 | ret = kstrtol(buf, 10, &rate); 179 | if (ret) 180 | return ret; 181 | 182 | ret = prestera_hw_sct_ratelimit_set(debugfs->sw, grp, (u32)rate); 183 | if (ret) 184 | return ret; 185 | 186 | return count; 187 | } 188 | 189 | static int prestera_debugfs_sct_init(void) 190 | { 191 | const struct file_operations *fops = &prestera_debugfs.sct_cfg_fops; 192 | struct prestera_debugfs *debugfs = &prestera_debugfs; 193 | struct dentry *sct_cfg_subdir; 194 | struct dentry *debugfs_file; 195 | int err; 196 | long i; 197 | 198 | sct_cfg_subdir = debugfs_create_dir(SCT_CFG_SUBDIR_NAME, 199 | debugfs->root_dir); 200 | if (PTR_ERR_OR_ZERO(sct_cfg_subdir)) 201 | return (int)PTR_ERR(sct_cfg_subdir); 202 | 203 | for (i = 0; i < PRESTERA_SCT_MAX; ++i) { 204 | if (!debugfs->sct_cfg_fname[i]) 205 | continue; 206 | 207 | debugfs_file = debugfs_create_file(debugfs->sct_cfg_fname[i], 208 | 0644, sct_cfg_subdir, 209 | (void *)i, fops); 210 | if (PTR_ERR_OR_ZERO(debugfs_file)) { 211 | err = (int)PTR_ERR(debugfs_file); 212 | goto err_single_file_creation; 213 | } 214 | } 215 | 216 | return 0; 217 | 218 | err_single_file_creation: 219 | debugfs_remove(sct_cfg_subdir); 220 | return err; 221 | } 222 | 223 | int prestera_debugfs_init(struct prestera_switch *sw) 224 | { 225 | struct prestera_debugfs *debugfs = &prestera_debugfs; 226 | struct dentry *cpu_code_hw_cnt_trap_subdir; 227 | struct dentry *cpu_code_hw_cnt_drop_subdir; 228 | struct dentry *cpu_code_sw_cnt_trap_subdir; 229 | struct dentry *cpu_code_sw_cnt_subdir; 230 | struct dentry *cpu_code_hw_counters_subdir; 231 | char file_name[] = "cpu_code_XXX_stats"; 232 | const struct file_operations *fops = 233 | &prestera_debugfs.cpu_code_cnt_fops; 234 | struct prestera_cpu_code_data f_data; 235 | struct dentry *debugfs_file; 236 | int err; 237 | int i; 238 | 239 | mutex_init(&debugfs->cpu_code_cnt_buf_mtx); 240 | 241 | debugfs->sw = sw; 242 | 243 | debugfs->cpu_code_cnt_buf = kzalloc(CPU_CODE_CNT_BUF_MAX_SIZE, 244 | GFP_KERNEL); 245 | if (!debugfs->cpu_code_cnt_buf) 246 | return -ENOMEM; 247 | 248 | err = mvsw_pr_fw_log_init(sw); 249 | if (err) 250 | goto err_fw_log_init; 251 | 252 | debugfs->root_dir = debugfs_create_dir(PRESTERA_DEBUGFS_ROOTDIR, NULL); 253 | if (PTR_ERR_OR_ZERO(debugfs->root_dir)) { 254 | err = (int)PTR_ERR(debugfs->root_dir); 255 | goto err_root_dir_alloc; 256 | } 257 | 258 | cpu_code_sw_cnt_subdir = debugfs_create_dir(CPU_CODE_SW_CNT_SUBDIR_NAME, 259 | debugfs->root_dir); 260 | if (PTR_ERR_OR_ZERO(cpu_code_sw_cnt_subdir)) { 261 | err = (int)PTR_ERR(debugfs->root_dir); 262 | goto err_subdir_alloc; 263 | } 264 | 265 | cpu_code_sw_cnt_trap_subdir = 266 | debugfs_create_dir(CPU_CODE_CNT_SUBDIR_TRAP_NAME, 267 | cpu_code_sw_cnt_subdir); 268 | if (PTR_ERR_OR_ZERO(cpu_code_sw_cnt_trap_subdir)) { 269 | err = (int)PTR_ERR(cpu_code_sw_cnt_trap_subdir); 270 | goto err_subdir_alloc; 271 | } 272 | 273 | cpu_code_hw_counters_subdir = 274 | debugfs_create_dir(CPU_CODE_HW_CNT_SUBDIR_NAME, 275 | debugfs->root_dir); 276 | if (PTR_ERR_OR_ZERO(cpu_code_hw_counters_subdir)) { 277 | err = (int)PTR_ERR(cpu_code_hw_counters_subdir); 278 | goto err_subdir_alloc; 279 | } 280 | 281 | cpu_code_hw_cnt_trap_subdir = 282 | debugfs_create_dir(CPU_CODE_CNT_SUBDIR_TRAP_NAME, 283 | cpu_code_hw_counters_subdir); 284 | if (PTR_ERR_OR_ZERO(cpu_code_hw_cnt_trap_subdir)) { 285 | err = (int)PTR_ERR(cpu_code_hw_cnt_trap_subdir); 286 | goto err_subdir_alloc; 287 | } 288 | 289 | cpu_code_hw_cnt_drop_subdir = 290 | debugfs_create_dir(CPU_CODE_CNT_SUBDIR_DROP_NAME, 291 | cpu_code_hw_counters_subdir); 292 | if (PTR_ERR_OR_ZERO(cpu_code_hw_cnt_drop_subdir)) { 293 | err = (int)PTR_ERR(cpu_code_hw_cnt_trap_subdir); 294 | goto err_subdir_alloc; 295 | } 296 | 297 | for (i = 0; i < MVSW_PR_RXTX_CPU_CODE_MAX_NUM; ++i) { 298 | f_data.cpu_code = i; 299 | 300 | snprintf(file_name, sizeof(file_name), "cpu_code_%d_stats", i); 301 | 302 | f_data.cpu_code_cnt_type = CPU_CODE_CNT_TYPE_SW_TRAP; 303 | debugfs_file = debugfs_create_file(file_name, 0644, 304 | cpu_code_sw_cnt_trap_subdir, 305 | (void *)f_data.data, 306 | fops); 307 | if (PTR_ERR_OR_ZERO(debugfs_file)) { 308 | err = (int)PTR_ERR(debugfs_file); 309 | goto err_single_file_creation; 310 | } 311 | 312 | f_data.cpu_code_cnt_type = CPU_CODE_CNT_TYPE_HW_TRAP; 313 | debugfs_file = debugfs_create_file(file_name, 0644, 314 | cpu_code_hw_cnt_trap_subdir, 315 | (void *)f_data.data, 316 | fops); 317 | if (PTR_ERR_OR_ZERO(debugfs_file)) { 318 | err = (int)PTR_ERR(debugfs_file); 319 | goto err_single_file_creation; 320 | } 321 | 322 | f_data.cpu_code_cnt_type = CPU_CODE_CNT_TYPE_HW_DROP; 323 | debugfs_file = debugfs_create_file(file_name, 0644, 324 | cpu_code_hw_cnt_drop_subdir, 325 | (void *)f_data.data, 326 | fops); 327 | if (PTR_ERR_OR_ZERO(debugfs_file)) { 328 | err = (int)PTR_ERR(debugfs_file); 329 | goto err_single_file_creation; 330 | } 331 | } 332 | 333 | f_data.cpu_code = MVSW_PR_RXTX_CPU_CODE_MAX_NUM; 334 | f_data.cpu_code_cnt_type = CPU_CODE_CNT_TYPE_SW_TRAP; 335 | debugfs_file = debugfs_create_file("cpu_code_stats", 0644, 336 | cpu_code_sw_cnt_trap_subdir, 337 | (void *)f_data.data, 338 | fops); 339 | if (PTR_ERR_OR_ZERO(debugfs_file)) { 340 | err = (int)PTR_ERR(debugfs_file); 341 | goto err_single_file_creation; 342 | } 343 | 344 | f_data.cpu_code_cnt_type = CPU_CODE_CNT_TYPE_HW_TRAP; 345 | debugfs_file = debugfs_create_file("cpu_code_stats", 0644, 346 | cpu_code_hw_cnt_trap_subdir, 347 | (void *)f_data.data, 348 | fops); 349 | if (PTR_ERR_OR_ZERO(debugfs_file)) { 350 | err = (int)PTR_ERR(debugfs_file); 351 | goto err_single_file_creation; 352 | } 353 | 354 | f_data.cpu_code_cnt_type = CPU_CODE_CNT_TYPE_HW_DROP; 355 | debugfs_file = debugfs_create_file("cpu_code_stats", 0644, 356 | cpu_code_hw_cnt_drop_subdir, 357 | (void *)f_data.data, 358 | fops); 359 | if (PTR_ERR_OR_ZERO(debugfs_file)) { 360 | err = (int)PTR_ERR(debugfs_file); 361 | goto err_single_file_creation; 362 | } 363 | 364 | err = prestera_debugfs_ipg_init(sw); 365 | if (err) 366 | goto err_ipg_init; 367 | 368 | err = prestera_debugfs_sct_init(); 369 | if (err) 370 | goto sct_init_failed; 371 | 372 | return 0; 373 | 374 | sct_init_failed: 375 | err_ipg_init: 376 | err_single_file_creation: 377 | err_subdir_alloc: 378 | /* 379 | * Removing root directory would result in recursive 380 | * subdirectories / files cleanup of all child nodes; 381 | */ 382 | debugfs_remove(debugfs->root_dir); 383 | err_root_dir_alloc: 384 | mvsw_pr_fw_log_fini(sw); 385 | err_fw_log_init: 386 | kfree(debugfs->cpu_code_cnt_buf); 387 | return err; 388 | } 389 | 390 | void prestera_debugfs_fini(struct prestera_switch *sw) 391 | { 392 | mvsw_pr_fw_log_fini(sw); 393 | debugfs_remove(prestera_debugfs.root_dir); 394 | mutex_destroy(&prestera_debugfs.cpu_code_cnt_buf_mtx); 395 | kfree(prestera_debugfs.cpu_code_cnt_buf); 396 | } 397 | 398 | /* 399 | * Software: only TRAP counters are present 400 | * Hardware: counters can be either TRAP or drops 401 | */ 402 | static int prestera_cpu_code_cnt_get(u64 *stats, u8 cpu_code, u8 cnt_type) 403 | { 404 | switch (cnt_type) { 405 | case CPU_CODE_CNT_TYPE_HW_DROP: 406 | case CPU_CODE_CNT_TYPE_HW_TRAP: 407 | /* fall through */ 408 | return prestera_hw_cpu_code_counters_get(prestera_debugfs.sw, 409 | cpu_code, cnt_type, 410 | stats); 411 | case CPU_CODE_CNT_TYPE_SW_TRAP: 412 | *stats = mvsw_pr_rxtx_get_cpu_code_stats(cpu_code); 413 | return 0; 414 | default: 415 | return -EINVAL; 416 | } 417 | } 418 | 419 | static ssize_t prestera_cnt_read(struct file *file, char __user *ubuf, 420 | size_t count, loff_t *ppos) 421 | { 422 | char *buf = prestera_debugfs.cpu_code_cnt_buf; 423 | struct prestera_cpu_code_data f_data = { 424 | .data = (long)file->private_data, 425 | }; 426 | u64 cpu_code_stats; 427 | /* as the snprintf doesn't count for \0, start with 1 */ 428 | int buf_len = 1; 429 | int ret; 430 | 431 | mutex_lock(&prestera_debugfs.cpu_code_cnt_buf_mtx); 432 | 433 | if (f_data.cpu_code == MVSW_PR_RXTX_CPU_CODE_MAX_NUM) { 434 | int i; 435 | 436 | memset(buf, 0, CPU_CODE_CNT_BUF_MAX_SIZE); 437 | 438 | for (i = 0; i < MVSW_PR_RXTX_CPU_CODE_MAX_NUM; ++i) { 439 | ret = prestera_cpu_code_cnt_get 440 | (&cpu_code_stats, (u8)i, 441 | f_data.cpu_code_cnt_type); 442 | if (ret) 443 | goto err_get_stats; 444 | 445 | if (!cpu_code_stats) 446 | continue; 447 | 448 | buf_len += snprintf(buf + buf_len, 449 | CPU_CODE_CNT_BUF_MAX_SIZE - buf_len, 450 | "%u:%llu\n", i, cpu_code_stats); 451 | } 452 | 453 | } else { 454 | ret = prestera_cpu_code_cnt_get(&cpu_code_stats, 455 | (u8)f_data.cpu_code, 456 | f_data.cpu_code_cnt_type); 457 | if (ret) 458 | goto err_get_stats; 459 | 460 | buf_len += sprintf(buf, "%llu\n", cpu_code_stats); 461 | } 462 | 463 | ret = simple_read_from_buffer(ubuf, count, ppos, buf, buf_len); 464 | 465 | err_get_stats: 466 | mutex_unlock(&prestera_debugfs.cpu_code_cnt_buf_mtx); 467 | 468 | return ret; 469 | } 470 | 471 | static ssize_t 472 | prestera_ipg_write(struct file *file, const char __user *ubuf, 473 | size_t count, loff_t *ppos) 474 | { 475 | struct prestera_debugfs *debugfs = &prestera_debugfs; 476 | char buf[128] = { 0 }; 477 | u32 ipg; 478 | int ret; 479 | 480 | ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); 481 | if (ret < 0) 482 | return -EINVAL; 483 | 484 | if (kstrtou32(buf, 0, &ipg) || !ipg || ipg % PRESTERA_IPG_ALIGN_VALUE) 485 | return -EINVAL; 486 | 487 | ret = prestera_hw_ipg_set(debugfs->sw, ipg); 488 | if (ret) 489 | return ret; 490 | 491 | return count; 492 | } 493 | 494 | static ssize_t 495 | prestera_ipg_read(struct file *file, char __user *ubuf, 496 | size_t count, loff_t *ppos) 497 | { 498 | struct prestera_debugfs *debugfs = &prestera_debugfs; 499 | char buf[128] = { 0 }; 500 | /* as the snprintf doesn't count for \0, start with 1 */ 501 | int buf_len = 1; 502 | int ret; 503 | u32 ipg; 504 | 505 | ret = prestera_hw_ipg_get(debugfs->sw, &ipg); 506 | if (ret) 507 | return ret; 508 | 509 | buf_len += snprintf(buf, sizeof(buf) - 1, "ipg: %u\n", ipg); 510 | 511 | return simple_read_from_buffer(ubuf, count, ppos, buf, buf_len); 512 | } 513 | -------------------------------------------------------------------------------- /prestera/prestera_shm.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "prestera.h" 16 | #include "prestera_fw.h" 17 | #include "prestera_shm.h" 18 | 19 | #define PRESTERA_SW_SHM_DEV_NAME "prestera_shm" 20 | #define PRESTERA_SHM_FIFO_SZ 128 21 | 22 | #define SIM_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16) 23 | #define SIM_SDMA_TX_QUEUE_DESC_REG 0x26C0 24 | #define SIM_REG(x) ((phys_addr_t *)((char *)shm_dev->fw.dev.pp_regs + (x))) 25 | #define SIM_REG_DEREF(x) (*(SIM_REG(x))) 26 | 27 | struct prestera_shm_msg { 28 | int command; 29 | long arg; 30 | }; 31 | 32 | struct prestera_shm_dev { 33 | struct device *dev_ptr; 34 | atomic_t pending_intr_cntr; 35 | wait_queue_head_t shm_queue; 36 | struct task_struct *shm_kthread; 37 | struct page *alloc_pages; 38 | phys_addr_t addr_phys; 39 | size_t size; 40 | struct prestera_fw fw; 41 | void __iomem *shm_mmap_memory; 42 | dev_t shm_cdev_ids; 43 | struct cdev shm_cdev; 44 | struct class *shm_class; 45 | struct task_struct *sim_kthread; 46 | struct net_device *net_dev; 47 | int initialized; 48 | }; 49 | 50 | /* Needed so devlink_nl_put_handle() won't crash trying to write NL attributes for bus name: */ 51 | static struct bus_type prestera_shm_bus_type = { 52 | .name = "prestera_shm_bus", 53 | }; 54 | 55 | /* Interface netdev name for simulation mode. When specified, enables simulation mode */ 56 | static char *sim_devname; 57 | 58 | static netdev_tx_t prestera_shm_sim_netdev_xmit(struct sk_buff *skb, struct net_device *dev) 59 | { 60 | int i = 0; 61 | phys_addr_t pa; 62 | unsigned int *ulptr; 63 | unsigned char *dst_buf; 64 | struct prestera_shm_dev **pp_dev; 65 | struct prestera_shm_dev *shm_dev; 66 | 67 | pp_dev = (struct prestera_shm_dev **)netdev_priv(dev); 68 | shm_dev = *pp_dev; 69 | 70 | pa = SIM_REG_DEREF(SIM_SDMA_RX_QUEUE_DESC_REG(0)); 71 | if (!pa) { 72 | /* 73 | * We can either return NETDEV_TX_BUSY which will cause 74 | * constant requeueing and ksoftirqd running constantly 75 | * at 100% CPU (which causes appDemo initialization to fail) 76 | * or just free the SKB and return OK. 77 | */ 78 | dev_kfree_skb(skb); 79 | dev->stats.tx_errors++; 80 | return NETDEV_TX_OK; 81 | } 82 | 83 | ulptr = phys_to_virt(pa); 84 | 85 | for (i = 0; ((i < 1000) && (ulptr[3])); i++) { 86 | if (*ulptr & 0x80000000) { 87 | /* found a ready buffer descriptor */ 88 | dst_buf = phys_to_virt(ulptr[2]); 89 | skb_copy_bits(skb, 0, dst_buf, skb->len); 90 | ulptr[1] = skb->len << 16; 91 | *ulptr = 0x0C000000; 92 | dev->stats.tx_packets++; 93 | dev->stats.tx_bytes += skb->len; 94 | dev_kfree_skb(skb); 95 | 96 | return NETDEV_TX_OK; 97 | } 98 | pa = ulptr[3]; 99 | if (!pa) 100 | break; 101 | ulptr = phys_to_virt(pa); 102 | } 103 | dev->stats.tx_errors++; 104 | return NETDEV_TX_BUSY; 105 | } 106 | 107 | static int prestera_shm_sim_set_mac_addr(struct net_device *dev, void *addr) 108 | { 109 | eth_commit_mac_addr_change(dev, addr); 110 | return 0; 111 | } 112 | 113 | static const struct net_device_ops shm_sim_netdev_ops = { 114 | .ndo_start_xmit = prestera_shm_sim_netdev_xmit, 115 | .ndo_set_mac_address = prestera_shm_sim_set_mac_addr, 116 | }; 117 | 118 | static int prestera_shm_sim_sdma(void *data) 119 | { 120 | int i = 0; 121 | phys_addr_t pa; 122 | unsigned int len; 123 | unsigned int *ulptr; 124 | unsigned char *dst_buf; 125 | struct sk_buff *new_skb; 126 | struct prestera_shm_dev *shm_dev = (struct prestera_shm_dev *)data; 127 | 128 | while (!kthread_should_stop()) { 129 | /* Simulate SDMA TX path */ 130 | pa = SIM_REG_DEREF(SIM_SDMA_TX_QUEUE_DESC_REG); 131 | if (!pa) { 132 | msleep(20); 133 | continue; 134 | } 135 | 136 | ulptr = phys_to_virt(pa); 137 | 138 | for (i = 0; ((i < 1000) && (ulptr[3])); i++) { 139 | if (*ulptr & 0x80000000) { 140 | /* found a ready to send buffer descriptor */ 141 | dst_buf = phys_to_virt(ulptr[2]); 142 | len = ulptr[1] >> 16; 143 | new_skb = alloc_skb(len, GFP_KERNEL); 144 | skb_copy_to_linear_data(new_skb, dst_buf, len); 145 | shm_dev->net_dev->stats.rx_packets++; 146 | shm_dev->net_dev->stats.rx_bytes += len; 147 | 148 | if (netif_receive_skb(new_skb) != NET_RX_SUCCESS) 149 | dev_kfree_skb(new_skb); 150 | 151 | *ulptr = 0x0300000; 152 | } 153 | 154 | pa = ulptr[3]; 155 | if (!pa) 156 | break; 157 | 158 | ulptr = phys_to_virt(pa); 159 | } 160 | msleep(20); 161 | } 162 | 163 | return 0; 164 | } 165 | 166 | /* mmap() handler - 167 | * allocates kernel contiguous physical memory and map it to user-space 168 | * for shared memory IPC interface to user-space 169 | */ 170 | 171 | static int prestera_shm_mmap(struct file *filp, struct vm_area_struct *vma) 172 | { 173 | struct prestera_shm_dev *dev = filp->private_data; 174 | void *vaddr; 175 | 176 | dev->size = MAX_ORDER_NR_PAGES * PAGE_SIZE; 177 | 178 | pr_err("%s: Entry. Size=%lu. VMA_size = %lu\n", __func__, dev->size, 179 | vma->vm_end - vma->vm_start); 180 | 181 | dev->alloc_pages = alloc_pages(GFP_DMA, MAX_ORDER - 1); 182 | 183 | if (!dev->alloc_pages/*dev->shm_mmap_memory*/) { 184 | pr_err("%s: Failed allocating %lu bytes of memory.\n", __func__, dev->size); 185 | return -ENOMEM; 186 | } 187 | 188 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 189 | 190 | dev->addr_phys = (page_to_pfn(dev->alloc_pages) << PAGE_SHIFT); 191 | 192 | vaddr = page_address(dev->alloc_pages); 193 | dev->shm_mmap_memory = vaddr; 194 | if (!dev->shm_mmap_memory) { 195 | pr_err("%s: Failed remap kernel vaddr, %lu bytes.\n", __func__, dev->size); 196 | return -ENOMEM; 197 | } 198 | 199 | /* In lieu of physical registers */ 200 | dev->fw.hw_regs = (u8 __iomem *)dev->shm_mmap_memory; 201 | dev->fw.mem_addr = (u8 __iomem *)dev->shm_mmap_memory; 202 | pr_info("%s: memptr %p hw_regs %p mem_addr %p\n", __func__, 203 | dev->shm_mmap_memory, dev->fw.hw_regs, 204 | dev->fw.mem_addr); 205 | 206 | if (remap_pfn_range(vma, vma->vm_start, 207 | dev->addr_phys >> PAGE_SHIFT, 208 | vma->vm_end - vma->vm_start, vma->vm_page_prot) < 0) { 209 | pr_err("%s: failed remapping virt %lx phys %llx len %lu\n", 210 | __func__, vma->vm_start, dev->addr_phys, 211 | vma->vm_end - vma->vm_start); 212 | __free_pages(dev->alloc_pages, MAX_ORDER_NR_PAGES); 213 | return -EAGAIN; 214 | } 215 | 216 | pr_info("%s: remapped virt %lx phys %llx len %lu\n", 217 | __func__, vma->vm_start, dev->addr_phys, 218 | vma->vm_end - vma->vm_start); 219 | 220 | return 0; 221 | } 222 | 223 | static int prestera_shm_open(struct inode *inode, struct file *filp) 224 | { 225 | struct prestera_shm_dev *dev; 226 | 227 | dev = container_of(inode->i_cdev, struct prestera_shm_dev, shm_cdev); 228 | 229 | if (!dev->initialized) 230 | return -ENODEV; 231 | 232 | filp->private_data = dev; 233 | 234 | return 0; 235 | } 236 | 237 | static int prestera_shm_release(struct inode *inode, struct file *filp) 238 | { 239 | struct prestera_shm_dev *dev = filp->private_data; 240 | 241 | __free_pages(dev->alloc_pages, MAX_ORDER - 1); 242 | dev->alloc_pages = NULL; 243 | 244 | dev->shm_mmap_memory = NULL; 245 | filp->private_data = NULL; 246 | 247 | return 0; 248 | } 249 | 250 | static void prestera_shm_handle_interrupt(struct prestera_shm_dev *dev) 251 | { 252 | if (prestera_fw_read(&dev->fw, PRESTERA_RX_STATUS_REG)) { 253 | prestera_fw_write(&dev->fw, PRESTERA_RX_STATUS_REG, 0); 254 | 255 | if (likely(dev->fw.dev.recv_pkt)) 256 | dev->fw.dev.recv_pkt(&dev->fw.dev); 257 | } 258 | } 259 | 260 | static int prestera_shm_kthread(void *data) 261 | { 262 | struct prestera_shm_dev *dev = (struct prestera_shm_dev *)data; 263 | 264 | sched_set_fifo(current); 265 | while (!kthread_should_stop()) { 266 | wait_event_interruptible(dev->shm_queue, 267 | ((kthread_should_stop()) || 268 | (atomic_read(&dev->pending_intr_cntr) > 0))); 269 | 270 | if (kthread_should_stop()) { 271 | pr_err("%s: stopping...\n", __func__); 272 | break; 273 | } 274 | 275 | while (atomic_fetch_dec(&dev->pending_intr_cntr)) 276 | prestera_fw_handle_event(&dev->fw); 277 | } 278 | return 0; 279 | } 280 | 281 | static int prestera_shm_ioctl_handle_one(struct prestera_shm_dev *dev, 282 | struct prestera_shm_msg *msg) 283 | { 284 | int err; 285 | struct prestera_fw_rev *rev = &dev->fw.dev.fw_rev; 286 | 287 | switch (msg->command) { 288 | case PRESTERA_SHM_INTERRUPT: 289 | prestera_shm_handle_interrupt(dev); 290 | atomic_inc(&dev->pending_intr_cntr); 291 | wake_up(&dev->shm_queue); 292 | return 0; 293 | 294 | case PRESTERA_SHM_INIT: 295 | 296 | if (!dev->shm_mmap_memory) { 297 | dev_err(dev->dev_ptr, "%s: userspace must call mmap() for this driver before calling ioctl() with PRESTERA_SHM_INIT!", 298 | __func__); 299 | return -ENXIO; 300 | } 301 | 302 | err = prestera_fw_init(&dev->fw); 303 | if (err) 304 | return err; 305 | 306 | prestera_fw_rev_parse_int(msg->arg, rev); 307 | if (prestera_fw_rev_check(&dev->fw)) 308 | return -EINVAL; 309 | 310 | pr_info("Before Registering prestera device\n"); 311 | err = prestera_device_register(&dev->fw.dev); 312 | pr_info("Registered prestera device (return code %d).\n", err); 313 | return err; 314 | } 315 | 316 | return -EINVAL; 317 | } 318 | 319 | /* Handle simulation of PCIe interrupt and firmware version number update 320 | */ 321 | static long prestera_shm_ioctl(struct file *filp, unsigned int command, 322 | unsigned long arg) 323 | { 324 | struct prestera_shm_dev *dev = filp->private_data; 325 | struct prestera_shm_msg msg; 326 | 327 | msg.command = command; 328 | msg.arg = arg; 329 | 330 | return prestera_shm_ioctl_handle_one(dev, &msg); 331 | } 332 | 333 | static const struct file_operations shm_file_operations = { 334 | .owner = THIS_MODULE, 335 | .unlocked_ioctl = prestera_shm_ioctl, 336 | .mmap = prestera_shm_mmap, 337 | .open = prestera_shm_open, 338 | .release = prestera_shm_release, 339 | }; 340 | 341 | /* Module device specific information initialization function - 342 | * Allocates character device for ioctl() and mmap() functionalities 343 | */ 344 | 345 | static int prestera_shm_dev_init(struct prestera_shm_dev *dev, 346 | struct platform_device *pdev) 347 | { 348 | int ret; 349 | struct resource *resource = NULL; 350 | void __iomem *base; 351 | struct prestera_shm_dev **pp_dev; 352 | unsigned char addr[ETH_ALEN]; 353 | 354 | atomic_set(&dev->pending_intr_cntr, 0); 355 | init_waitqueue_head(&dev->shm_queue); 356 | dev->shm_kthread = kthread_run(prestera_shm_kthread, (void *)dev, 357 | "prestera_shm_kthread"); 358 | if (!dev->shm_kthread) 359 | return -ENOMEM; 360 | 361 | ret = alloc_chrdev_region(&dev->shm_cdev_ids, 0, 1, PRESTERA_SHM_DEVNAME); 362 | if (ret) 363 | goto err_chrdev_region; 364 | 365 | cdev_init(&dev->shm_cdev, &shm_file_operations); 366 | 367 | ret = cdev_add(&dev->shm_cdev, dev->shm_cdev_ids, 1); 368 | if (ret) 369 | goto err_cdev_add; 370 | 371 | dev->shm_class = class_create(THIS_MODULE, PRESTERA_SHM_DEVNAME); 372 | if (IS_ERR(dev->shm_class)) { 373 | ret = PTR_ERR(dev->shm_class); 374 | goto err_class_create; 375 | } 376 | 377 | dev->dev_ptr = device_create(dev->shm_class, NULL, dev->shm_cdev_ids, 378 | NULL, PRESTERA_SHM_DEVNAME); 379 | 380 | dev->dev_ptr->bus = &prestera_shm_bus_type; 381 | if (IS_ERR(dev->dev_ptr)) { 382 | ret = PTR_ERR(dev->dev_ptr); 383 | goto err_dev_create; 384 | } 385 | 386 | if (!dev->dev_ptr->dma_mask) { 387 | pr_info("%s: Fixing dma_mask...\n", __func__); 388 | dev->dev_ptr->dma_mask = &dev->dev_ptr->coherent_dma_mask; 389 | } 390 | 391 | /* 392 | * AC5X DDR starts at physical address 0x2_0000_0000, 393 | * and can end at 0xff_ffff_ffff so we need a 40 bit 394 | * mask for it ... 395 | */ 396 | ret = dma_set_mask_and_coherent(dev->dev_ptr, DMA_BIT_MASK(40)); 397 | if (ret) { 398 | dev_err(dev->dev_ptr, "fail to set DMA mask, return code is: %d\n", ret); 399 | goto err_dma_mask; 400 | } 401 | dev->dev_ptr->bus_dma_limit = DMA_BIT_MASK(40); 402 | pr_info("%s: coherent_dma_mask %llx bus_dma_limit %llx\n", __func__, 403 | dev->dev_ptr->coherent_dma_mask, dev->dev_ptr->bus_dma_limit); 404 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 405 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 406 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) 407 | dev->dev_ptr->dma_coherent = true; 408 | #endif 409 | if (!sim_devname) { 410 | dev->fw.dev.dma_flags = GFP_DMA; 411 | resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 412 | if (!resource || resource_size(resource) < PAGE_SIZE) { 413 | dev_err(dev->dev_ptr, "Failed getting valid IO resource from device tree!\n"); 414 | goto err_dma_mask; 415 | } 416 | 417 | base = devm_ioremap_resource(dev->dev_ptr, resource); 418 | if (IS_ERR(base)) 419 | goto err_dma_mask; 420 | 421 | } else { 422 | /* X86 does not really support GFP_DMA. 423 | * It translates into ISA lower 16MB in memory. 424 | */ 425 | dev->fw.dev.dma_flags = 0; 426 | 427 | /* Simulate MG SDMA area in software */ 428 | base = kzalloc(0x10000, GFP_KERNEL); 429 | if (!base) 430 | goto err_dma_mask; 431 | 432 | dev->sim_kthread = kthread_run(prestera_shm_sim_sdma, dev, "sim_mg_sdma"); 433 | 434 | if (!dev->sim_kthread) { 435 | kfree(base); 436 | goto err_dma_mask; 437 | } 438 | 439 | dev->net_dev = alloc_etherdev(sizeof(void *)); 440 | 441 | if (!dev->net_dev) { 442 | kthread_stop(dev->sim_kthread); 443 | kfree(base); 444 | goto err_dma_mask; 445 | } 446 | 447 | pp_dev = (struct prestera_shm_dev **)netdev_priv(dev->net_dev); 448 | *pp_dev = dev; 449 | dev->net_dev->netdev_ops = &shm_sim_netdev_ops; 450 | SET_NETDEV_DEV(dev->net_dev, dev->dev_ptr); 451 | 452 | dev->net_dev->mtu = 1536; 453 | dev->net_dev->min_mtu = 64; 454 | dev->net_dev->max_mtu = 1536; 455 | dev->net_dev->type = ARPHRD_ETHER; 456 | dev->net_dev->addr_len = ETH_ALEN; 457 | 458 | strcpy(dev->net_dev->name, "sim%d"); 459 | get_random_bytes(addr + 1, ETH_ALEN - 1); 460 | addr[0] = 0x0; 461 | memcpy(dev->net_dev->dev_addr, addr, ETH_ALEN); 462 | 463 | ret = register_netdev(dev->net_dev); 464 | 465 | if (ret) { 466 | pr_err("%s: failed registering network device %s with error number %d\n", 467 | __func__, dev->net_dev->name, ret); 468 | } else { 469 | netif_carrier_on(dev->net_dev); 470 | pr_info("%s: ifindex %d ns %p netdev %p\n", 471 | __func__, dev->net_dev->ifindex, 472 | dev_net(dev->net_dev), 473 | __dev_get_by_index(dev_net(dev->net_dev), 474 | dev->net_dev->ifindex)); 475 | } 476 | } 477 | 478 | dev->fw.dev.dev = dev->dev_ptr; 479 | dev->fw.dev.send_req = prestera_fw_send_req; 480 | 481 | dev->fw.dev.pp_regs = base; 482 | if (resource) 483 | pr_info("%s: remmap %llx..%llx to %p...\n", 484 | __func__, resource->start, resource->end, 485 | dev->fw.dev.pp_regs); 486 | dev->fw.dev.running = true; 487 | 488 | pr_info("prestera_shm: Initialized Marvell Prestera shared memory device\n"); 489 | dev->initialized = true; 490 | return ret; 491 | 492 | err_dma_mask: 493 | device_destroy(dev->shm_class, dev->shm_cdev_ids); 494 | err_dev_create: 495 | class_destroy(dev->shm_class); 496 | err_class_create: 497 | cdev_del(&dev->shm_cdev); 498 | err_cdev_add: 499 | unregister_chrdev_region(dev->shm_cdev_ids, 1); 500 | err_chrdev_region: 501 | kthread_stop(dev->shm_kthread); 502 | return ret; 503 | } 504 | 505 | static int prestera_shm_probe(struct platform_device *pdev) 506 | { 507 | struct prestera_shm_dev *dev; 508 | int ret; 509 | 510 | pr_info("prestera_shm: Probing Marvell Prestera shared memory driver...\n"); 511 | dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); 512 | if (!dev) 513 | return -ENOMEM; 514 | 515 | ret = prestera_shm_dev_init(dev, pdev); 516 | 517 | if (ret < 0) 518 | return ret; 519 | 520 | platform_set_drvdata(pdev, dev); 521 | 522 | pr_info("prestera_shm: Probed Marvell Prestera shared memory driver\n"); 523 | return 0; 524 | } 525 | 526 | static void prestera_shm_dev_deinit(struct prestera_shm_dev *dev) 527 | { 528 | kthread_stop(dev->shm_kthread); 529 | dev->fw.dev.running = false; 530 | prestera_device_unregister(&dev->fw.dev); 531 | prestera_fw_uninit(&dev->fw); 532 | cancel_delayed_work_sync(&dev->fw.dev.keepalive_wdog_work); 533 | 534 | if (sim_devname) { 535 | kthread_stop(dev->sim_kthread); 536 | unregister_netdev(dev->net_dev); 537 | } 538 | dev->dev_ptr->bus = NULL; 539 | if (dev->alloc_pages) 540 | __free_pages(dev->alloc_pages, MAX_ORDER - 1); 541 | 542 | device_destroy(dev->shm_class, dev->shm_cdev_ids); 543 | class_destroy(dev->shm_class); 544 | 545 | cdev_del(&dev->shm_cdev); 546 | unregister_chrdev_region(dev->shm_cdev_ids, 1); 547 | if (sim_devname) 548 | kfree(dev->fw.dev.pp_regs); 549 | pr_info("%s: Unregistered Marvell Prestera shared memory driver\n", __func__); 550 | } 551 | 552 | static int prestera_shm_remove(struct platform_device *pdev) 553 | { 554 | struct prestera_shm_dev *dev = platform_get_drvdata(pdev); 555 | 556 | pr_info("%s: Unregistering Marvell Prestera shared memory driver.\n", __func__); 557 | prestera_shm_dev_deinit(dev); 558 | return 0; 559 | } 560 | 561 | static const struct of_device_id prestera_shm_of_match[] = { 562 | { .compatible = "marvell,prestera", }, 563 | {}, 564 | }; 565 | MODULE_DEVICE_TABLE(of, prestera_shm_of_match); 566 | 567 | static struct platform_driver prestera_shm_driver = { 568 | .driver = { 569 | .name = PRESTERA_SW_SHM_DEV_NAME, 570 | .owner = THIS_MODULE, 571 | .of_match_table = prestera_shm_of_match, 572 | }, 573 | .probe = prestera_shm_probe, 574 | .remove = prestera_shm_remove, 575 | }; 576 | 577 | #ifdef CONFIG_X86_64 578 | struct platform_device *g_pdev; 579 | 580 | static int __init prestera_shm_init(void) 581 | { 582 | int ret; 583 | 584 | pr_info("Entry: %s\n", __func__); 585 | 586 | g_pdev = platform_device_alloc("prestera_shm", -1); 587 | 588 | if (!g_pdev) 589 | return -ENOMEM; 590 | 591 | return prestera_shm_probe(g_pdev); 592 | } 593 | 594 | /* 595 | * Exit function of our module. 596 | */ 597 | static void __exit prestera_shm_exit(void) 598 | { 599 | pr_info("Exit: %s\n", __func__); 600 | 601 | prestera_shm_remove(g_pdev); 602 | platform_device_unregister(g_pdev); 603 | } 604 | 605 | module_init(prestera_shm_init); 606 | module_exit(prestera_shm_exit); 607 | #else 608 | 609 | module_platform_driver(prestera_shm_driver); 610 | 611 | #endif 612 | 613 | MODULE_AUTHOR("Marvell Semi."); 614 | MODULE_LICENSE("GPL"); 615 | MODULE_DESCRIPTION("Marvell Prestera switch shared memory interface"); 616 | module_param(sim_devname, charp, 0444); 617 | MODULE_PARM_DESC(sim_devname, "Interface name for simulation mode. When specified, enables simulation mode."); 618 | -------------------------------------------------------------------------------- /prestera/prestera_qdisc.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include 5 | #include 6 | 7 | #include "prestera.h" 8 | #include "prestera_hw.h" 9 | #include "prestera_qdisc.h" 10 | 11 | #define PRESTERA_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - (band) - 1) 12 | 13 | struct prestera_qdisc_sched_info { 14 | struct { 15 | u32 sdwrr_weight; 16 | bool sdwrr; 17 | } tc[IEEE_8021QAZ_MAX_TCS]; 18 | }; 19 | 20 | struct prestera_qdisc_sched { 21 | struct list_head list; 22 | struct prestera_qdisc_sched_info info; 23 | refcount_t refcount; 24 | u32 id; 25 | }; 26 | 27 | struct prestera_qdisc_ets_band { 28 | int tclass_num; 29 | }; 30 | 31 | struct prestera_qdisc_ets_data { 32 | struct prestera_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS]; 33 | struct prestera_qdisc_sched *sched; 34 | u32 default_sched_id; 35 | }; 36 | 37 | struct prestera_qdisc; 38 | struct prestera_qdisc_ops { 39 | int (*check)(void *params); 40 | int (*replace)(struct prestera_port *port, u32 handle, 41 | struct prestera_qdisc *qdisc, void *params); 42 | int (*destroy)(struct prestera_port *port, 43 | struct prestera_qdisc *qdisc); 44 | int (*get_stats)(struct prestera_port *port, 45 | struct prestera_qdisc *qdisc, 46 | struct tc_qopt_offload_stats *stats_ptr); 47 | struct prestera_qdisc *(*find_class)(struct prestera_qdisc *qdisc, 48 | u32 parent); 49 | int (*get_tclass_num)(struct prestera_qdisc *qdisc, 50 | struct prestera_qdisc *child); 51 | unsigned int num_classes; 52 | }; 53 | 54 | struct prestera_qdisc { 55 | struct prestera_qdisc *qdiscs; 56 | struct prestera_qdisc *parent; 57 | struct prestera_qdisc_ops *ops; 58 | union { 59 | struct prestera_qdisc_ets_data *ets_data; 60 | }; 61 | unsigned int num_classes; 62 | u32 handle; 63 | }; 64 | 65 | struct prestera_qdisc_port { 66 | struct mutex lock; /* Protects qdisc */ 67 | struct prestera_qdisc root_qdisc; 68 | }; 69 | 70 | static int prestera_qdisc_sched_put(struct prestera_switch *sw, 71 | struct prestera_qdisc_sched *sched) 72 | { 73 | int err; 74 | 75 | if (!refcount_dec_and_test(&sched->refcount)) 76 | return 0; 77 | 78 | err = prestera_hw_sched_release(sw, sched->id); 79 | if (err) 80 | return err; 81 | 82 | list_del(&sched->list); 83 | kfree(sched); 84 | return 0; 85 | } 86 | 87 | static int 88 | prestera_qdisc_sched_info_cmp(struct prestera_qdisc_sched_info *info1, 89 | struct prestera_qdisc_sched_info *info2) 90 | { 91 | unsigned int i, sched_match = 0; 92 | 93 | for (i = 0; i < ARRAY_SIZE(info1->tc); i++) { 94 | if (info1->tc[i].sdwrr != info2->tc[i].sdwrr) 95 | break; 96 | 97 | if (info1->tc[i].sdwrr && 98 | info1->tc[i].sdwrr_weight != info2->tc[i].sdwrr_weight) 99 | break; 100 | 101 | sched_match++; 102 | } 103 | 104 | return !(sched_match == ARRAY_SIZE(info1->tc)); 105 | } 106 | 107 | static int prestera_qdisc_sched_set(struct prestera_port *port, u8 tc, 108 | u32 sched_id, bool sdwrr, u8 sdwrr_weight) 109 | { 110 | if (!sdwrr) 111 | return prestera_hw_sched_sp_set(port, tc, sched_id); 112 | 113 | return prestera_hw_sched_sdwrr_set(port, tc, sched_id, sdwrr_weight); 114 | } 115 | 116 | static struct prestera_qdisc_sched * 117 | prestera_qdisc_sched_get(struct prestera_port *port, 118 | struct prestera_qdisc_sched_info *sched_info) 119 | { 120 | struct prestera_qdisc_sched *sched; 121 | int err, i; 122 | 123 | list_for_each_entry(sched, &port->sw->sched_list, list) 124 | if (!prestera_qdisc_sched_info_cmp(&sched->info, sched_info)) { 125 | refcount_inc(&sched->refcount); 126 | return sched; 127 | } 128 | 129 | sched = kzalloc(sizeof(*sched), GFP_KERNEL); 130 | if (!sched) 131 | return ERR_PTR(-ENOMEM); 132 | 133 | err = prestera_hw_sched_create(port->sw, &sched->id); 134 | if (err) 135 | goto err_sched_create; 136 | 137 | refcount_set(&sched->refcount, 1); 138 | list_add_rcu(&sched->list, &port->sw->sched_list); 139 | 140 | for (i = 0; i < ARRAY_SIZE(sched->info.tc); i++) { 141 | sched->info.tc[i].sdwrr = sched_info->tc[i].sdwrr; 142 | sched->info.tc[i].sdwrr_weight = sched_info->tc[i].sdwrr_weight; 143 | err = prestera_qdisc_sched_set(port, i, sched->id, 144 | sched->info.tc[i].sdwrr, 145 | sched->info.tc[i].sdwrr_weight); 146 | if (err) { 147 | prestera_qdisc_sched_put(port->sw, sched); 148 | return ERR_PTR(err); 149 | } 150 | } 151 | 152 | return sched; 153 | 154 | err_sched_create: 155 | kfree(sched); 156 | return ERR_PTR(err); 157 | } 158 | 159 | static struct prestera_qdisc * 160 | prestera_qdisc_find(struct prestera_port *port, u32 parent) 161 | { 162 | struct prestera_qdisc_port *qdisc_port = port->qdisc; 163 | struct prestera_qdisc *qdisc; 164 | 165 | if (!qdisc_port) 166 | return NULL; 167 | 168 | qdisc = &qdisc_port->root_qdisc; 169 | if (parent == TC_H_ROOT) 170 | return qdisc; 171 | 172 | if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) { 173 | if (qdisc->ops->find_class) 174 | return qdisc->ops->find_class(qdisc, parent); 175 | } 176 | 177 | return NULL; 178 | } 179 | 180 | static void prestera_qdisc_cleanup(struct prestera_qdisc *qdisc) 181 | { 182 | qdisc->handle = TC_H_UNSPEC; 183 | qdisc->num_classes = 0; 184 | qdisc->ops = NULL; 185 | kfree(qdisc->qdiscs); 186 | qdisc->qdiscs = NULL; 187 | } 188 | 189 | static int prestera_qdisc_create(struct prestera_port *port, 190 | u32 handle, struct prestera_qdisc *qdisc, 191 | struct prestera_qdisc_ops *ops, void *params) 192 | { 193 | unsigned int i; 194 | int err; 195 | 196 | if (ops->check) { 197 | err = ops->check(params); 198 | if (err) 199 | return err; 200 | } 201 | 202 | if (ops->num_classes) { 203 | qdisc->qdiscs = kcalloc(ops->num_classes, 204 | sizeof(*qdisc->qdiscs), GFP_KERNEL); 205 | if (!qdisc->qdiscs) 206 | return -ENOMEM; 207 | 208 | for (i = 0; i < ops->num_classes; i++) 209 | qdisc->qdiscs[i].parent = qdisc; 210 | } 211 | 212 | qdisc->num_classes = ops->num_classes; 213 | qdisc->ops = ops; 214 | qdisc->handle = handle; 215 | 216 | err = ops->replace(port, handle, qdisc, params); 217 | if (err) 218 | goto err_replace; 219 | 220 | return 0; 221 | 222 | err_replace: 223 | prestera_qdisc_cleanup(qdisc); 224 | return err; 225 | } 226 | 227 | static int 228 | prestera_qdisc_replace(struct prestera_port *port, 229 | u32 handle, struct prestera_qdisc *qdisc, 230 | struct prestera_qdisc_ops *ops, void *params) 231 | { 232 | if (!qdisc->ops) 233 | return prestera_qdisc_create(port, handle, qdisc, ops, params); 234 | 235 | /* qdisc change is not supported for now */ 236 | return -EOPNOTSUPP; 237 | } 238 | 239 | static int 240 | prestera_qdisc_destroy(struct prestera_port *port, 241 | struct prestera_qdisc *qdisc) 242 | { 243 | int err = 0; 244 | int i; 245 | 246 | if (!qdisc->ops) 247 | return 0; 248 | 249 | for (i = 0; i < qdisc->num_classes; i++) 250 | prestera_qdisc_destroy(port, &qdisc->qdiscs[i]); 251 | 252 | if (qdisc->ops->destroy) 253 | err = qdisc->ops->destroy(port, qdisc); 254 | 255 | prestera_qdisc_cleanup(qdisc); 256 | return err; 257 | } 258 | 259 | static int 260 | prestera_qdisc_get_stats(struct prestera_port *port, 261 | struct prestera_qdisc *qdisc, 262 | struct tc_qopt_offload_stats *stats_ptr) 263 | { 264 | if (qdisc && qdisc->ops && qdisc->ops->get_stats) 265 | return qdisc->ops->get_stats(port, qdisc, stats_ptr); 266 | 267 | return -EOPNOTSUPP; 268 | } 269 | 270 | static int prestera_qdisc_graft(struct prestera_port *port, 271 | struct prestera_qdisc *qdisc, 272 | u8 band, u32 child_handle) 273 | { 274 | if (band < qdisc->num_classes && 275 | qdisc->qdiscs[band].handle == child_handle) 276 | return 0; 277 | 278 | if (!child_handle) 279 | /* This is an invisible FIFO replacing the original Qdisc */ 280 | return 0; 281 | 282 | return -EOPNOTSUPP; 283 | } 284 | 285 | static int prestera_qdisc_get_tclass_num(struct prestera_qdisc *qdisc) 286 | { 287 | struct prestera_qdisc *parent = qdisc->parent; 288 | 289 | if (!parent->ops->get_tclass_num) 290 | return prestera_qdisc_get_tclass_num(parent); 291 | 292 | return parent->ops->get_tclass_num(parent, qdisc); 293 | } 294 | 295 | static int prestera_qdisc_ets_check(void *params) 296 | { 297 | struct tc_ets_qopt_offload_replace_params *p = params; 298 | u8 i, prio2bandmap[] = { 7, 6, 5, 4, 3, 2, 1, 0 }; 299 | 300 | if (p->bands != IEEE_8021QAZ_MAX_TCS) 301 | return -EOPNOTSUPP; 302 | 303 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 304 | if (prio2bandmap[i] != p->priomap[i]) 305 | return -EOPNOTSUPP; 306 | 307 | return 0; 308 | } 309 | 310 | static int 311 | prestera_qdisc_ets_replace(struct prestera_port *port, u32 handle, 312 | struct prestera_qdisc *qdisc, void *params) 313 | { 314 | struct prestera_qdisc_ets_data *ets_data = qdisc->ets_data; 315 | struct tc_ets_qopt_offload_replace_params *p = params; 316 | struct prestera_qdisc_sched_info sched_info; 317 | int err, band, tclass_num; 318 | 319 | if (!ets_data) { 320 | ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL); 321 | if (!ets_data) 322 | return -ENOMEM; 323 | 324 | qdisc->ets_data = ets_data; 325 | for (band = 0; band < qdisc->num_classes; band++) { 326 | tclass_num = PRESTERA_PRIO_BAND_TO_TCLASS(band); 327 | ets_data->bands[band].tclass_num = tclass_num; 328 | } 329 | } 330 | 331 | for (band = 0; band < p->bands; band++) { 332 | tclass_num = ets_data->bands[band].tclass_num; 333 | sched_info.tc[tclass_num].sdwrr = !!p->quanta[band]; 334 | sched_info.tc[tclass_num].sdwrr_weight = p->weights[band]; 335 | } 336 | 337 | ets_data->sched = prestera_qdisc_sched_get(port, &sched_info); 338 | if (IS_ERR(ets_data->sched)) 339 | goto err_sched_port_get; 340 | 341 | err = prestera_hw_sched_port_get(port, &ets_data->default_sched_id); 342 | if (err) 343 | goto err_sched_get; 344 | 345 | err = prestera_hw_sched_port_set(port, ets_data->sched->id); 346 | if (err) 347 | goto err_sched_port_band_set; 348 | 349 | return 0; 350 | 351 | err_sched_port_band_set: 352 | WARN_ON(prestera_hw_sched_port_set(port, ets_data->default_sched_id)); 353 | err_sched_get: 354 | WARN_ON(prestera_qdisc_sched_put(port->sw, ets_data->sched)); 355 | 356 | err_sched_port_get: 357 | qdisc->ets_data = NULL; 358 | kfree(ets_data); 359 | return err; 360 | } 361 | 362 | static int prestera_qdisc_ets_destroy(struct prestera_port *port, 363 | struct prestera_qdisc *qdisc) 364 | { 365 | struct prestera_qdisc_ets_data *ets_data = qdisc->ets_data; 366 | int err; 367 | 368 | err = prestera_hw_sched_port_set(port, ets_data->default_sched_id); 369 | if (err) 370 | return err; 371 | 372 | err = prestera_qdisc_sched_put(port->sw, ets_data->sched); 373 | if (err) 374 | return err; 375 | 376 | qdisc->ets_data = NULL; 377 | kfree(ets_data); 378 | return 0; 379 | } 380 | 381 | static int 382 | prestera_qdisc_ets_get_tclass_num(struct prestera_qdisc *parent, 383 | struct prestera_qdisc *child) 384 | { 385 | unsigned int band = child - parent->qdiscs; 386 | 387 | WARN_ON(band >= IEEE_8021QAZ_MAX_TCS); 388 | return parent->ets_data->bands[band].tclass_num; 389 | } 390 | 391 | static struct prestera_qdisc * 392 | prestera_qdisc_ets_find_class(struct prestera_qdisc *qdisc, u32 parent) 393 | { 394 | int child_index = TC_H_MIN(parent); 395 | int band = child_index - 1; 396 | 397 | if (band < 0 || band >= qdisc->num_classes) 398 | return NULL; 399 | 400 | return &qdisc->qdiscs[band]; 401 | } 402 | 403 | static int prestera_qdisc_ets_stats_get(struct prestera_port *port, 404 | struct prestera_qdisc *qdisc, 405 | struct tc_qopt_offload_stats *stats_ptr) 406 | { 407 | return 0; 408 | } 409 | 410 | static u32 411 | prestera_qdisc_tbf_burst(struct tc_tbf_qopt_offload_replace_params *p) 412 | { 413 | /* burst is configured in units of 4K bytes */ 414 | return p->max_size / 4096; 415 | } 416 | 417 | static u64 418 | prestera_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p) 419 | { 420 | /* rate is configured in Kbps */ 421 | return div_u64(p->rate.rate_bytes_ps, 1000) * 8; 422 | } 423 | 424 | static int prestera_qdisc_tbf_replace(struct prestera_port *port, 425 | u32 handle, struct prestera_qdisc *qdisc, 426 | void *params) 427 | { 428 | struct tc_tbf_qopt_offload_replace_params *p = params; 429 | int tclass_num; 430 | u32 burst; 431 | u32 rate; 432 | 433 | burst = prestera_qdisc_tbf_burst(p); 434 | rate = (u32)prestera_qdisc_tbf_rate_kbps(p); 435 | tclass_num = prestera_qdisc_get_tclass_num(qdisc); 436 | 437 | return prestera_hw_shaper_port_queue_configure(port, tclass_num, rate, burst); 438 | } 439 | 440 | static int prestera_qdisc_tbf_destroy(struct prestera_port *port, 441 | struct prestera_qdisc *qdisc) 442 | { 443 | int tclass_num; 444 | 445 | tclass_num = prestera_qdisc_get_tclass_num(qdisc); 446 | return prestera_hw_shaper_port_queue_disable(port, tclass_num); 447 | } 448 | 449 | static int prestera_qdisc_tc_stats_get(struct prestera_port *port, u8 tc, 450 | struct tc_qopt_offload_stats *stats_ptr) 451 | { 452 | u64 pkts, bytes, drops; 453 | int err; 454 | 455 | err = prestera_hw_port_queue_stats_get(port, tc, &pkts, &bytes, &drops); 456 | if (err) 457 | return err; 458 | 459 | _bstats_update(stats_ptr->bstats, bytes, pkts); 460 | stats_ptr->qstats->drops += drops; 461 | 462 | return 0; 463 | } 464 | 465 | static int prestera_qdisc_tbf_stats_get(struct prestera_port *port, 466 | struct prestera_qdisc *qdisc, 467 | struct tc_qopt_offload_stats *stats_ptr) 468 | { 469 | int tclass_num; 470 | 471 | tclass_num = prestera_qdisc_get_tclass_num(qdisc); 472 | return prestera_qdisc_tc_stats_get(port, tclass_num, stats_ptr); 473 | } 474 | 475 | static int prestera_qdisc_red_check(void *params) 476 | { 477 | struct tc_red_qopt_offload_params *p = params; 478 | 479 | if (p->min > p->max) 480 | return -EINVAL; 481 | 482 | if (p->min == 0 || p->max == 0) 483 | return -EINVAL; 484 | 485 | return 0; 486 | } 487 | 488 | static int prestera_qdisc_red_replace(struct prestera_port *port, 489 | u32 handle, struct prestera_qdisc *qdisc, 490 | void *params) 491 | { 492 | struct tc_red_qopt_offload_params *p = params; 493 | int tclass_num; 494 | u64 prob; 495 | 496 | /* calculate probability as a percentage */ 497 | prob = p->probability; 498 | prob *= 100; 499 | prob = DIV_ROUND_UP(prob, 1 << 16); 500 | prob = DIV_ROUND_UP(prob, 1 << 16); 501 | 502 | tclass_num = prestera_qdisc_get_tclass_num(qdisc); 503 | 504 | return prestera_hw_wred_port_queue_enable(port, tclass_num, p->min, 505 | p->max, (u32)prob); 506 | } 507 | 508 | static int prestera_qdisc_red_destroy(struct prestera_port *port, 509 | struct prestera_qdisc *qdisc) 510 | { 511 | int tclass_num; 512 | 513 | tclass_num = prestera_qdisc_get_tclass_num(qdisc); 514 | return prestera_hw_wred_port_queue_disable(port, tclass_num); 515 | } 516 | 517 | static struct prestera_qdisc_ops prestera_qdisc_ets_ops = { 518 | .check = prestera_qdisc_ets_check, 519 | .replace = prestera_qdisc_ets_replace, 520 | .destroy = prestera_qdisc_ets_destroy, 521 | .get_tclass_num = prestera_qdisc_ets_get_tclass_num, 522 | .find_class = prestera_qdisc_ets_find_class, 523 | .get_stats = prestera_qdisc_ets_stats_get, 524 | .num_classes = IEEE_8021QAZ_MAX_TCS 525 | }; 526 | 527 | static int __prestera_setup_tc_ets(struct prestera_port *port, 528 | struct tc_ets_qopt_offload *p) 529 | { 530 | struct prestera_qdisc *qdisc; 531 | 532 | qdisc = prestera_qdisc_find(port, p->parent); 533 | if (!qdisc) 534 | return -EOPNOTSUPP; 535 | 536 | if (p->command == TC_ETS_REPLACE) 537 | return prestera_qdisc_replace(port, p->handle, qdisc, 538 | &prestera_qdisc_ets_ops, 539 | &p->replace_params); 540 | 541 | if (qdisc->handle != p->handle) 542 | return -EOPNOTSUPP; 543 | 544 | switch (p->command) { 545 | case TC_ETS_DESTROY: 546 | return prestera_qdisc_destroy(port, qdisc); 547 | case TC_ETS_STATS: 548 | return prestera_qdisc_get_stats(port, qdisc, &p->stats); 549 | case TC_ETS_GRAFT: 550 | return prestera_qdisc_graft(port, qdisc, p->graft_params.band, 551 | p->graft_params.child_handle); 552 | default: 553 | return -EOPNOTSUPP; 554 | } 555 | } 556 | 557 | int prestera_setup_tc_ets(struct prestera_port *port, 558 | struct tc_ets_qopt_offload *p) 559 | { 560 | int err; 561 | 562 | mutex_lock(&port->qdisc->lock); 563 | err = __prestera_setup_tc_ets(port, p); 564 | mutex_unlock(&port->qdisc->lock); 565 | 566 | return err; 567 | } 568 | 569 | static struct prestera_qdisc_ops prestera_qdisc_tbf_ops = { 570 | .replace = prestera_qdisc_tbf_replace, 571 | .destroy = prestera_qdisc_tbf_destroy, 572 | .get_stats = prestera_qdisc_tbf_stats_get 573 | }; 574 | 575 | static int __prestera_setup_tc_tbf(struct prestera_port *port, 576 | struct tc_tbf_qopt_offload *p) 577 | { 578 | struct prestera_qdisc *qdisc; 579 | 580 | qdisc = prestera_qdisc_find(port, p->parent); 581 | if (!qdisc) 582 | return -EOPNOTSUPP; 583 | 584 | if (p->command == TC_TBF_REPLACE) 585 | return prestera_qdisc_replace(port, p->handle, qdisc, 586 | &prestera_qdisc_tbf_ops, 587 | &p->replace_params); 588 | 589 | if (qdisc->handle != p->handle) 590 | return -EOPNOTSUPP; 591 | 592 | switch (p->command) { 593 | case TC_TBF_DESTROY: 594 | return prestera_qdisc_destroy(port, qdisc); 595 | case TC_TBF_STATS: 596 | return prestera_qdisc_get_stats(port, qdisc, &p->stats); 597 | default: 598 | return -EOPNOTSUPP; 599 | } 600 | 601 | return 0; 602 | } 603 | 604 | int prestera_setup_tc_tbf(struct prestera_port *port, 605 | struct tc_tbf_qopt_offload *p) 606 | { 607 | int err; 608 | 609 | mutex_lock(&port->qdisc->lock); 610 | err = __prestera_setup_tc_tbf(port, p); 611 | mutex_unlock(&port->qdisc->lock); 612 | 613 | return err; 614 | } 615 | 616 | static struct prestera_qdisc_ops prestera_qdisc_red_ops = { 617 | .check = prestera_qdisc_red_check, 618 | .replace = prestera_qdisc_red_replace, 619 | .destroy = prestera_qdisc_red_destroy, 620 | .get_stats = prestera_qdisc_tbf_stats_get 621 | }; 622 | 623 | static int __prestera_setup_tc_red(struct prestera_port *port, 624 | struct tc_red_qopt_offload *p) 625 | { 626 | struct prestera_qdisc *qdisc; 627 | 628 | qdisc = prestera_qdisc_find(port, p->parent); 629 | if (!qdisc) 630 | return -EOPNOTSUPP; 631 | 632 | if (p->command == TC_RED_REPLACE) 633 | return prestera_qdisc_replace(port, p->handle, qdisc, 634 | &prestera_qdisc_red_ops, 635 | &p->set); 636 | 637 | if (qdisc->handle != p->handle) 638 | return -EOPNOTSUPP; 639 | 640 | switch (p->command) { 641 | case TC_RED_DESTROY: 642 | return prestera_qdisc_destroy(port, qdisc); 643 | case TC_RED_STATS: 644 | return prestera_qdisc_get_stats(port, qdisc, &p->stats); 645 | default: 646 | return -EOPNOTSUPP; 647 | } 648 | 649 | return 0; 650 | } 651 | 652 | int prestera_setup_tc_red(struct prestera_port *port, 653 | struct tc_red_qopt_offload *p) 654 | { 655 | int err; 656 | 657 | mutex_lock(&port->qdisc->lock); 658 | err = __prestera_setup_tc_red(port, p); 659 | mutex_unlock(&port->qdisc->lock); 660 | 661 | return err; 662 | } 663 | 664 | int prestera_qdisc_port_init(struct prestera_port *port) 665 | { 666 | struct prestera_qdisc_port *qdisc_port; 667 | 668 | qdisc_port = kzalloc(sizeof(*qdisc_port), GFP_KERNEL); 669 | if (!qdisc_port) 670 | return -ENOMEM; 671 | 672 | mutex_init(&qdisc_port->lock); 673 | port->qdisc = qdisc_port; 674 | return 0; 675 | } 676 | 677 | void prestera_qdisc_port_fini(struct prestera_port *port) 678 | { 679 | mutex_destroy(&port->qdisc->lock); 680 | kfree(port->qdisc); 681 | } 682 | 683 | int prestera_qdisc_init(struct prestera_switch *sw) 684 | { 685 | INIT_LIST_HEAD(&sw->sched_list); 686 | return 0; 687 | } 688 | 689 | int prestera_qdisc_fini(struct prestera_switch *sw) 690 | { 691 | WARN_ON(!list_empty(&sw->sched_list)); 692 | return 0; 693 | } 694 | -------------------------------------------------------------------------------- /prestera/prestera_flower.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 | /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ 3 | 4 | #include "prestera.h" 5 | #include "prestera_ct.h" 6 | #include "prestera_acl.h" 7 | #include "prestera_log.h" 8 | #include "prestera_hw.h" 9 | #include "prestera_flower.h" 10 | 11 | #define PRESTERA_DEFAULT_TC_NUM 8 12 | 13 | struct prestera_flower_template { 14 | struct prestera_acl_ruleset *ruleset; 15 | struct list_head list; 16 | u32 chain_index; 17 | }; 18 | 19 | static void 20 | prestera_flower_template_free(struct prestera_flower_template *template) 21 | { 22 | prestera_acl_ruleset_put(template->ruleset); 23 | list_del(&template->list); 24 | kfree(template); 25 | } 26 | 27 | void prestera_flower_template_cleanup(struct prestera_flow_block *block) 28 | { 29 | struct prestera_flower_template *template, *tmp; 30 | 31 | /* put the reference to all rulesets kept in tmpl create */ 32 | list_for_each_entry_safe(template, tmp, &block->template_list, list) 33 | prestera_flower_template_free(template); 34 | } 35 | 36 | static int 37 | prestera_flower_parse_goto_action(struct prestera_flow_block *block, 38 | struct prestera_acl_rule *rule, 39 | u32 chain_index, 40 | const struct flow_action_entry *act) 41 | { 42 | struct prestera_acl_ruleset *ruleset; 43 | 44 | if (act->chain_index <= chain_index) 45 | /* we can jump only forward */ 46 | return -EINVAL; 47 | 48 | if (rule->re_arg.jump.valid) 49 | return -EEXIST; 50 | 51 | ruleset = prestera_acl_ruleset_get(block->sw->acl, block, 52 | act->chain_index); 53 | if (IS_ERR(ruleset)) 54 | return PTR_ERR(ruleset); 55 | 56 | rule->re_arg.jump.valid = 1; 57 | rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset); 58 | 59 | rule->jump_ruleset = ruleset; 60 | 61 | return 0; 62 | } 63 | 64 | static int mvsw_pr_flower_parse_actions(struct prestera_flow_block *block, 65 | struct prestera_acl_rule *rule, 66 | struct flow_action *flow_action, 67 | u32 chain_index, 68 | struct netlink_ext_ack *extack) 69 | { 70 | const struct flow_action_entry *act; 71 | struct prestera_flow_block_binding *binding; 72 | int err, i; 73 | 74 | /* whole struct (rule->re_arg) must be initialized with 0 */ 75 | if (!flow_action_has_entries(flow_action)) 76 | return 0; 77 | 78 | if (!flow_action_mixed_hw_stats_check(flow_action, extack)) 79 | return -EOPNOTSUPP; 80 | 81 | act = flow_action_first_entry_get(flow_action); 82 | if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { 83 | /* Nothing to do */ 84 | } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) { 85 | /* setup counter first */ 86 | rule->re_arg.count.valid = true; 87 | err = prestera_acl_chain_to_client(chain_index, block->ingress, 88 | &rule->re_arg.count.client); 89 | if (err) 90 | return err; 91 | } else { 92 | NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); 93 | return -EOPNOTSUPP; 94 | } 95 | 96 | flow_action_for_each(i, act, flow_action) { 97 | switch (act->id) { 98 | case FLOW_ACTION_ACCEPT: 99 | if (rule->re_arg.accept.valid) 100 | return -EEXIST; 101 | 102 | rule->re_arg.accept.valid = 1; 103 | break; 104 | case FLOW_ACTION_DROP: 105 | if (rule->re_arg.drop.valid) 106 | return -EEXIST; 107 | 108 | rule->re_arg.drop.valid = 1; 109 | break; 110 | case FLOW_ACTION_TRAP: 111 | if (rule->re_arg.trap.valid) 112 | return -EEXIST; 113 | 114 | rule->re_arg.trap.valid = 1; 115 | rule->re_arg.trap.i.hw_tc = 116 | prestera_acl_rule_hw_tc_get(rule); 117 | break; 118 | case FLOW_ACTION_POLICE: 119 | if (rule->re_arg.police.valid) 120 | return -EEXIST; 121 | 122 | rule->re_arg.police.valid = 1; 123 | rule->re_arg.police.i.rate = 124 | act->police.rate_bytes_ps; 125 | rule->re_arg.police.i.burst = act->police.burst; 126 | break; 127 | case FLOW_ACTION_GOTO: 128 | err = prestera_flower_parse_goto_action(block, rule, 129 | chain_index, 130 | act); 131 | if (err) 132 | return err; 133 | 134 | rule_flag_set(rule, GOTO); 135 | break; 136 | case FLOW_ACTION_NAT: 137 | if (rule->re_arg.nat.valid) 138 | return -EEXIST; 139 | 140 | if (~act->nat.mask) { 141 | NL_SET_ERR_MSG_MOD(extack, 142 | "Netmask is not supported"); 143 | return -EOPNOTSUPP; 144 | } 145 | if (!act->nat.old_addr || !act->nat.new_addr) { 146 | NL_SET_ERR_MSG_MOD 147 | (extack, 148 | "All-zero IP address isn't supported"); 149 | return -EOPNOTSUPP; 150 | } 151 | 152 | rule->re_arg.nat.valid = 1; 153 | rule->re_arg.nat.i.old_addr = act->nat.old_addr; 154 | rule->re_arg.nat.i.new_addr = act->nat.new_addr; 155 | rule->re_arg.nat.i.flags = act->nat.flags; 156 | 157 | /* TODO: move this to the rule_add() */ 158 | binding = list_first_entry 159 | (&block->binding_list, 160 | struct prestera_flow_block_binding, list); 161 | rule->re_arg.nat.i.dev = binding->port->dev_id; 162 | rule->re_arg.nat.i.port = binding->port->hw_id; 163 | rule_flag_set(rule, NAT); 164 | break; 165 | case FLOW_ACTION_CT: 166 | /* TODO: check ct nat commit */ 167 | if (rule_flag_test(rule, CT)) 168 | return -EEXIST; 169 | 170 | err = prestera_ct_parse_action(act, rule, extack); 171 | if (err) 172 | return err; 173 | 174 | rule_flag_set(rule, CT); 175 | break; 176 | default: 177 | NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 178 | pr_err("Unsupported action\n"); 179 | return -EOPNOTSUPP; 180 | } 181 | } 182 | 183 | return 0; 184 | } 185 | 186 | static int mvsw_pr_flower_parse_meta(struct prestera_acl_rule *rule, 187 | struct flow_cls_offload *f, 188 | struct prestera_flow_block *block) 189 | { 190 | struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); 191 | struct prestera_acl_match *r_match = &rule->re_key.match; 192 | struct prestera_port *port; 193 | struct net_device *ingress_dev; 194 | struct flow_match_meta match; 195 | __be16 key, mask; 196 | 197 | flow_rule_match_meta(f_rule, &match); 198 | if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 199 | NL_SET_ERR_MSG_MOD(f->common.extack, 200 | "Unsupported ingress ifindex mask"); 201 | return -EINVAL; 202 | } 203 | 204 | ingress_dev = __dev_get_by_index(prestera_acl_block_net(block), 205 | match.key->ingress_ifindex); 206 | if (!ingress_dev) { 207 | NL_SET_ERR_MSG_MOD(f->common.extack, 208 | "Can't find specified ingress port to match on"); 209 | return -EINVAL; 210 | } 211 | 212 | if (!prestera_netdev_check(ingress_dev)) { 213 | NL_SET_ERR_MSG_MOD(f->common.extack, 214 | "Can't match on switchdev ingress port"); 215 | return -EINVAL; 216 | } 217 | port = netdev_priv(ingress_dev); 218 | 219 | mask = htons(0x1FFF << 3); 220 | key = htons(port->hw_id << 3); 221 | 222 | rule_match_set(r_match->key, SYS_PORT, key); 223 | rule_match_set(r_match->mask, SYS_PORT, mask); 224 | 225 | mask = htons(0x3FF); 226 | key = htons(port->dev_id); 227 | rule_match_set(r_match->key, SYS_DEV, key); 228 | rule_match_set(r_match->mask, SYS_DEV, mask); 229 | 230 | return 0; 231 | } 232 | 233 | static int mvsw_pr_flower_parse(struct prestera_flow_block *block, 234 | struct prestera_acl_rule *rule, 235 | struct flow_cls_offload *f) 236 | { 237 | struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); 238 | struct flow_dissector *dissector = f_rule->match.dissector; 239 | struct prestera_acl_match *r_match = &rule->re_key.match; 240 | __be16 n_proto_mask = 0; 241 | __be16 n_proto_key = 0; 242 | u16 addr_type = 0; 243 | u8 ip_proto = 0; 244 | u32 hwtc = 0; 245 | int err; 246 | 247 | if (dissector->used_keys & 248 | ~(BIT(FLOW_DISSECTOR_KEY_META) | 249 | BIT(FLOW_DISSECTOR_KEY_CONTROL) | 250 | BIT(FLOW_DISSECTOR_KEY_BASIC) | 251 | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 252 | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 253 | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 254 | BIT(FLOW_DISSECTOR_KEY_ICMP) | 255 | BIT(FLOW_DISSECTOR_KEY_PORTS) | 256 | BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) | 257 | BIT(FLOW_DISSECTOR_KEY_CT) | 258 | BIT(FLOW_DISSECTOR_KEY_VLAN))) { 259 | NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 260 | MVSW_LOG_INFO("Unsupported key"); 261 | return -EOPNOTSUPP; 262 | } 263 | 264 | if (f->classid) { 265 | /* The classid values of TC_H_MIN_PRIORITY through 266 | * TC_H_MIN_PRIORITY + PRESTERA_DEFAULT_TC_NUM - 1 represents 267 | * the hardware traffic classes. 268 | */ 269 | hwtc = TC_H_MIN(f->classid) - TC_H_MIN_PRIORITY; 270 | if (hwtc >= PRESTERA_DEFAULT_TC_NUM) { 271 | NL_SET_ERR_MSG_MOD(f->common.extack, 272 | "Unsupported HW TC"); 273 | return -EINVAL; 274 | } 275 | prestera_acl_rule_hw_tc_set(rule, hwtc); 276 | } 277 | 278 | prestera_acl_rule_priority_set(rule, f->common.prio); 279 | 280 | if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) { 281 | err = mvsw_pr_flower_parse_meta(rule, f, block); 282 | if (err) 283 | return err; 284 | } 285 | 286 | err = prestera_ct_match_parse(f, f->common.extack); 287 | if (err) 288 | return err; 289 | 290 | if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) { 291 | struct flow_match_control match; 292 | 293 | flow_rule_match_control(f_rule, &match); 294 | addr_type = match.key->addr_type; 295 | } 296 | 297 | if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) { 298 | struct flow_match_basic match; 299 | 300 | flow_rule_match_basic(f_rule, &match); 301 | n_proto_key = match.key->n_proto; 302 | n_proto_mask = match.mask->n_proto; 303 | 304 | if (ntohs(match.key->n_proto) == ETH_P_ALL) { 305 | n_proto_key = 0; 306 | n_proto_mask = 0; 307 | } 308 | 309 | rule_match_set(r_match->key, ETH_TYPE, n_proto_key); 310 | rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask); 311 | 312 | rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto); 313 | rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto); 314 | ip_proto = match.key->ip_proto; 315 | } 316 | 317 | if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 318 | struct flow_match_eth_addrs match; 319 | 320 | flow_rule_match_eth_addrs(f_rule, &match); 321 | 322 | /* DA key, mask */ 323 | rule_match_set_n(r_match->key, 324 | ETH_DMAC_0, &match.key->dst[0], 4); 325 | rule_match_set_n(r_match->key, 326 | ETH_DMAC_1, &match.key->dst[4], 2); 327 | 328 | rule_match_set_n(r_match->mask, 329 | ETH_DMAC_0, &match.mask->dst[0], 4); 330 | rule_match_set_n(r_match->mask, 331 | ETH_DMAC_1, &match.mask->dst[4], 2); 332 | 333 | /* SA key, mask */ 334 | rule_match_set_n(r_match->key, 335 | ETH_SMAC_0, &match.key->src[0], 4); 336 | rule_match_set_n(r_match->key, 337 | ETH_SMAC_1, &match.key->src[4], 2); 338 | 339 | rule_match_set_n(r_match->mask, 340 | ETH_SMAC_0, &match.mask->src[0], 4); 341 | rule_match_set_n(r_match->mask, 342 | ETH_SMAC_1, &match.mask->src[4], 2); 343 | } 344 | 345 | if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 346 | struct flow_match_ipv4_addrs match; 347 | 348 | flow_rule_match_ipv4_addrs(f_rule, &match); 349 | 350 | rule_match_set(r_match->key, IP_SRC, match.key->src); 351 | rule_match_set(r_match->mask, IP_SRC, match.mask->src); 352 | 353 | rule_match_set(r_match->key, IP_DST, match.key->dst); 354 | rule_match_set(r_match->mask, IP_DST, match.mask->dst); 355 | } 356 | 357 | if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) { 358 | struct flow_match_ports match; 359 | 360 | if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 361 | NL_SET_ERR_MSG_MOD 362 | (f->common.extack, 363 | "Only UDP and TCP keys are supported"); 364 | return -EINVAL; 365 | } 366 | 367 | flow_rule_match_ports(f_rule, &match); 368 | 369 | rule_match_set(r_match->key, L4_PORT_SRC, match.key->src); 370 | rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src); 371 | 372 | rule_match_set(r_match->key, L4_PORT_DST, match.key->dst); 373 | rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst); 374 | } 375 | 376 | if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) { 377 | struct flow_match_ports_range match; 378 | __be32 tp_key, tp_mask; 379 | 380 | flow_rule_match_ports_range(f_rule, &match); 381 | 382 | /* src port range (min, max) */ 383 | tp_key = htonl(ntohs(match.key->tp_min.src) | 384 | (ntohs(match.key->tp_max.src) << 16)); 385 | tp_mask = htonl(ntohs(match.mask->tp_min.src) | 386 | (ntohs(match.mask->tp_max.src) << 16)); 387 | rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key); 388 | rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask); 389 | 390 | /* dst port range (min, max) */ 391 | tp_key = htonl(ntohs(match.key->tp_min.dst) | 392 | (ntohs(match.key->tp_max.dst) << 16)); 393 | tp_mask = htonl(ntohs(match.mask->tp_min.dst) | 394 | (ntohs(match.mask->tp_max.dst) << 16)); 395 | rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key); 396 | rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask); 397 | } 398 | 399 | if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) { 400 | struct flow_match_vlan match; 401 | 402 | flow_rule_match_vlan(f_rule, &match); 403 | 404 | if (match.mask->vlan_id != 0) { 405 | __be16 key = cpu_to_be16(match.key->vlan_id); 406 | __be16 mask = cpu_to_be16(match.mask->vlan_id); 407 | 408 | rule_match_set(r_match->key, VLAN_ID, key); 409 | rule_match_set(r_match->mask, VLAN_ID, mask); 410 | } 411 | 412 | rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid); 413 | rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid); 414 | } 415 | 416 | if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) { 417 | struct flow_match_icmp match; 418 | 419 | flow_rule_match_icmp(f_rule, &match); 420 | 421 | rule_match_set(r_match->key, ICMP_TYPE, match.key->type); 422 | rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type); 423 | 424 | rule_match_set(r_match->key, ICMP_CODE, match.key->code); 425 | rule_match_set(r_match->mask, ICMP_CODE, match.mask->code); 426 | } 427 | 428 | return mvsw_pr_flower_parse_actions(block, rule, &f->rule->action, 429 | f->common.chain_index, 430 | f->common.extack); 431 | } 432 | 433 | static int prestera_flower_prio_check(struct prestera_flow_block *block, 434 | struct flow_cls_offload *f) 435 | { 436 | u32 mall_prio; 437 | int err; 438 | 439 | err = prestera_mall_prio_get(block, &mall_prio); 440 | if (err == -ENOENT) 441 | return 0; 442 | if (err) 443 | return err; 444 | 445 | if (f->common.prio <= mall_prio) 446 | return -EOPNOTSUPP; 447 | 448 | return 0; 449 | } 450 | 451 | int prestera_flower_prio_get(struct prestera_flow_block *block, 452 | u32 *prio) 453 | { 454 | if (!prestera_acl_block_rule_count(block)) 455 | return -ENOENT; 456 | 457 | *prio = block->flower_min_prio; 458 | return 0; 459 | } 460 | 461 | static void prestera_flower_prio_update(struct prestera_flow_block *block, 462 | u32 prio) 463 | { 464 | if (prio < block->flower_min_prio) 465 | block->flower_min_prio = prio; 466 | } 467 | 468 | int prestera_flower_replace(struct prestera_flow_block *block, 469 | struct flow_cls_offload *f) 470 | { 471 | struct prestera_acl_ruleset *ruleset; 472 | struct prestera_acl *acl = block->sw->acl; 473 | struct prestera_acl_rule *rule; 474 | int err; 475 | 476 | err = prestera_flower_prio_check(block, f); 477 | if (err) 478 | return err; 479 | 480 | ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index); 481 | if (IS_ERR(ruleset)) 482 | return PTR_ERR(ruleset); 483 | 484 | /* increments the ruleset reference */ 485 | rule = prestera_acl_rule_create(ruleset, f->cookie, 486 | f->common.chain_index); 487 | if (IS_ERR(rule)) { 488 | err = PTR_ERR(rule); 489 | goto err_rule_create; 490 | } 491 | 492 | err = mvsw_pr_flower_parse(block, rule, f); 493 | if (err) 494 | goto err_rule_add; 495 | 496 | if (!prestera_acl_ruleset_is_offload(ruleset)) { 497 | err = prestera_acl_ruleset_offload(ruleset); 498 | if (err) 499 | goto err_ruleset_offload; 500 | } 501 | 502 | err = prestera_acl_rule_add(block->sw, rule); 503 | if (err) 504 | goto err_rule_add; 505 | 506 | prestera_flower_prio_update(block, f->common.prio); 507 | 508 | prestera_acl_ruleset_put(ruleset); 509 | return 0; 510 | 511 | err_ruleset_offload: 512 | err_rule_add: 513 | prestera_acl_rule_destroy(rule); 514 | err_rule_create: 515 | prestera_acl_ruleset_put(ruleset); 516 | return err; 517 | } 518 | 519 | void prestera_flower_destroy(struct prestera_flow_block *block, 520 | struct flow_cls_offload *f) 521 | { 522 | struct prestera_acl_ruleset *ruleset; 523 | struct prestera_acl_rule *rule; 524 | 525 | ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, 526 | f->common.chain_index); 527 | if (IS_ERR(ruleset)) 528 | return; 529 | 530 | rule = prestera_acl_rule_lookup(ruleset, f->cookie); 531 | if (rule) { 532 | prestera_acl_rule_del(block->sw, rule); 533 | prestera_acl_rule_destroy(rule); 534 | } 535 | prestera_acl_ruleset_put(ruleset); 536 | } 537 | 538 | int prestera_flower_tmplt_create(struct prestera_flow_block *block, 539 | struct flow_cls_offload *f) 540 | { 541 | struct prestera_flower_template *template; 542 | struct prestera_acl_ruleset *ruleset; 543 | struct prestera_acl_rule rule; 544 | int err; 545 | 546 | memset(&rule, 0, sizeof(rule)); 547 | err = mvsw_pr_flower_parse(block, &rule, f); 548 | if (err) 549 | return err; 550 | 551 | template = kmalloc(sizeof(*template), GFP_KERNEL); 552 | if (!template) { 553 | err = -ENOMEM; 554 | goto err_malloc; 555 | } 556 | 557 | prestera_acl_rule_keymask_pcl_id_set(&rule, 0); 558 | ruleset = prestera_acl_ruleset_get(block->sw->acl, block, 559 | f->common.chain_index); 560 | if (IS_ERR_OR_NULL(ruleset)) { 561 | err = -EINVAL; 562 | goto err_ruleset_get; 563 | } 564 | 565 | /* preserve keymask/template to this ruleset */ 566 | prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); 567 | 568 | /* skip error, as it is not possible to reject template operation, 569 | * so, keep the reference to the ruleset for rules to be added 570 | * to that ruleset later. In case of offload fail, the ruleset 571 | * will be offloaded again during adding a new rule. Also, 572 | * unlikly possble that ruleset is already offloaded at this staage. 573 | */ 574 | prestera_acl_ruleset_offload(ruleset); 575 | 576 | /* keep the reference to the ruleset */ 577 | template->ruleset = ruleset; 578 | template->chain_index = f->common.chain_index; 579 | list_add_rcu(&template->list, &block->template_list); 580 | return 0; 581 | 582 | err_ruleset_get: 583 | kfree(template); 584 | err_malloc: 585 | MVSW_LOG_ERROR("Create chain template failed"); 586 | return err; 587 | } 588 | 589 | void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, 590 | struct flow_cls_offload *f) 591 | { 592 | struct prestera_flower_template *template, *tmp; 593 | 594 | list_for_each_entry_safe(template, tmp, &block->template_list, list) 595 | if (template->chain_index == f->common.chain_index) { 596 | /* put the reference to the ruleset kept in create */ 597 | prestera_flower_template_free(template); 598 | return; 599 | } 600 | } 601 | 602 | int prestera_flower_stats(struct prestera_flow_block *block, 603 | struct flow_cls_offload *f) 604 | { 605 | struct prestera_acl_ruleset *ruleset; 606 | struct prestera_acl_rule *rule; 607 | u64 packets; 608 | u64 lastuse; 609 | u64 bytes; 610 | int err; 611 | 612 | ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, 613 | f->common.chain_index); 614 | if (IS_ERR(ruleset)) 615 | return PTR_ERR(ruleset); 616 | 617 | rule = prestera_acl_rule_lookup(ruleset, f->cookie); 618 | if (!rule) { 619 | err = -EINVAL; 620 | goto err_rule_get_stats; 621 | } 622 | 623 | err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets, 624 | &bytes, &lastuse); 625 | if (err) 626 | goto err_rule_get_stats; 627 | 628 | flow_stats_update(&f->stats, bytes, packets, 0, lastuse, 629 | FLOW_ACTION_HW_STATS_DELAYED); 630 | 631 | err_rule_get_stats: 632 | prestera_acl_ruleset_put(ruleset); 633 | return err; 634 | } 635 | --------------------------------------------------------------------------------