Ramaxel inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4CBDP CVE: NA
Add NIC Layer support, include: 1. register net device to the kernel 2. implement the hooks of the 'struct net_device_ops' and 'struct ethtool_ops' 3. etc.
Signed-off-by: Yanling Song songyl@ramaxel.com Reviwed-by: Leizhen (ThunderTown) thunder.leizhen@huawei.com --- drivers/net/ethernet/ramaxel/spnic/Makefile | 24 +- .../ethernet/ramaxel/spnic/hw/sphw_hwdev.c | 8 - .../net/ethernet/ramaxel/spnic/spnic_dbg.c | 752 ++++++++ .../net/ethernet/ramaxel/spnic/spnic_dcb.c | 965 +++++++++++ .../net/ethernet/ramaxel/spnic/spnic_dcb.h | 56 + .../ethernet/ramaxel/spnic/spnic_dev_mgmt.c | 811 +++++++++ .../ethernet/ramaxel/spnic/spnic_dev_mgmt.h | 78 + .../ethernet/ramaxel/spnic/spnic_ethtool.c | 988 +++++++++++ .../ramaxel/spnic/spnic_ethtool_stats.c | 1035 +++++++++++ .../net/ethernet/ramaxel/spnic/spnic_filter.c | 411 +++++ .../net/ethernet/ramaxel/spnic/spnic_irq.c | 178 ++ .../net/ethernet/ramaxel/spnic/spnic_lld.c | 937 ++++++++++ .../net/ethernet/ramaxel/spnic/spnic_lld.h | 75 + .../ethernet/ramaxel/spnic/spnic_mag_cfg.c | 778 +++++++++ .../ethernet/ramaxel/spnic/spnic_mag_cmd.h | 643 +++++++ .../net/ethernet/ramaxel/spnic/spnic_main.c | 925 ++++++++++ .../ramaxel/spnic/spnic_mgmt_interface.h | 617 +++++++ .../ethernet/ramaxel/spnic/spnic_netdev_ops.c | 1526 +++++++++++++++++ .../net/ethernet/ramaxel/spnic/spnic_nic.h | 148 ++ .../ethernet/ramaxel/spnic/spnic_nic_cfg.c | 1321 ++++++++++++++ .../ethernet/ramaxel/spnic/spnic_nic_cfg.h | 724 ++++++++ .../ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c | 647 +++++++ .../ethernet/ramaxel/spnic/spnic_nic_cmd.h | 105 ++ .../ethernet/ramaxel/spnic/spnic_nic_dbg.c | 151 ++ .../ethernet/ramaxel/spnic/spnic_nic_dbg.h | 16 + .../ethernet/ramaxel/spnic/spnic_nic_dev.h | 352 ++++ .../ethernet/ramaxel/spnic/spnic_nic_event.c | 506 ++++++ .../net/ethernet/ramaxel/spnic/spnic_nic_io.c | 1123 ++++++++++++ .../net/ethernet/ramaxel/spnic/spnic_nic_io.h | 309 ++++ .../net/ethernet/ramaxel/spnic/spnic_nic_qp.h | 421 +++++ .../net/ethernet/ramaxel/spnic/spnic_ntuple.c | 841 +++++++++ .../ethernet/ramaxel/spnic/spnic_pci_id_tbl.h | 12 + .../net/ethernet/ramaxel/spnic/spnic_rss.c | 750 ++++++++ .../net/ethernet/ramaxel/spnic/spnic_rss.h | 48 + .../ethernet/ramaxel/spnic/spnic_rss_cfg.c | 390 +++++ drivers/net/ethernet/ramaxel/spnic/spnic_rx.c | 1249 ++++++++++++++ drivers/net/ethernet/ramaxel/spnic/spnic_rx.h | 118 ++ .../net/ethernet/ramaxel/spnic/spnic_sriov.c | 200 +++ .../net/ethernet/ramaxel/spnic/spnic_sriov.h | 24 + drivers/net/ethernet/ramaxel/spnic/spnic_tx.c | 879 ++++++++++ drivers/net/ethernet/ramaxel/spnic/spnic_tx.h | 129 ++ 41 files changed, 21261 insertions(+), 9 deletions(-) create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_filter.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_irq.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_lld.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_lld.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_main.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rx.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rx.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_tx.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_tx.h
diff --git a/drivers/net/ethernet/ramaxel/spnic/Makefile b/drivers/net/ethernet/ramaxel/spnic/Makefile index 3c9c7b0c7a80..f86ccff374f6 100644 --- a/drivers/net/ethernet/ramaxel/spnic/Makefile +++ b/drivers/net/ethernet/ramaxel/spnic/Makefile @@ -14,4 +14,26 @@ spnic-objs := hw/sphw_common.o \ hw/sphw_prof_adap.o \ hw/sphw_hw_cfg.o \ hw/sphw_hw_comm.o \ - hw/sphw_hwdev.o + hw/sphw_hwdev.o \ + spnic_sriov.o \ + spnic_lld.o \ + spnic_dev_mgmt.o \ + spnic_main.o \ + spnic_tx.o \ + spnic_rx.o \ + spnic_rss.o \ + spnic_ntuple.o \ + spnic_dcb.o \ + spnic_ethtool.o \ + spnic_ethtool_stats.o \ + spnic_dbg.o \ + spnic_irq.o \ + spnic_filter.o \ + spnic_netdev_ops.o \ + spnic_nic_cfg.o \ + spnic_mag_cfg.o \ + spnic_nic_cfg_vf.o \ + spnic_rss_cfg.o \ + spnic_nic_event.o \ + spnic_nic_io.o \ + spnic_nic_dbg.o diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c index 4e5cd1292b1e..f58b0325bf04 100644 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c @@ -52,14 +52,6 @@ enum sphw_pcie_tph { SPHW_PCIE_TPH_DISABLE = 0, SPHW_PCIE_TPH_ENABLE = 1, }; -#define SPNIC_NIC_DRV_NAME "spnic" -#define SPNIC_DRV_VERSION "B090" -#define SPNIC_DRV_DESC "Ramaxel(R) Network Interface Card Driver" - -MODULE_AUTHOR("Ramaxel Technologies CO., Ltd"); -MODULE_DESCRIPTION(SPNIC_DRV_DESC); -MODULE_VERSION(SPNIC_DRV_VERSION); -MODULE_LICENSE("GPL");
#define SPHW_DMA_ATTR_INDIR_IDX_SHIFT 0
diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c new file mode 100644 index 000000000000..910baed023a5 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c @@ -0,0 +1,752 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/semaphore.h> + +#include "sphw_mt.h" +#include "sphw_crm.h" +#include "spnic_nic_dev.h" +#include "spnic_nic_dbg.h" +#include "spnic_nic_qp.h" +#include "spnic_rx.h" +#include "spnic_tx.h" +#include "spnic_dcb.h" + +typedef int (*nic_driv_module)(struct spnic_nic_dev *nic_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +struct nic_drv_module_handle { + enum driver_cmd_type driv_cmd_name; + nic_driv_module driv_func; +}; + +int get_nic_drv_version(void *buf_out, u32 *out_size) +{ + struct drv_version_info *ver_info = buf_out; + + if (!buf_out) { + pr_err("Buf_out is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(*ver_info)) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(*ver_info)); + return -EINVAL; + } + + snprintf(ver_info->ver, sizeof(ver_info->ver), "%s [compiled with the kernel]", + SPNIC_DRV_VERSION); + + return 0; +} + +static int get_tx_info(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u16 q_id; + + if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get tx info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + q_id = *((u16 *)buf_in); + + return spnic_dbg_get_sq_info(nic_dev->hwdev, q_id, buf_out, *out_size); +} + +static int get_q_num(struct spnic_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get queue number\n"); + return -EFAULT; + } + + if (!buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get queue number para buf_out is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EINVAL; + } + + *((u16 *)buf_out) = nic_dev->q_params.num_qps; + + return 0; +} + +static int get_tx_wqe_info(struct spnic_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + const struct wqe_info *info = buf_in; + u16 wqebb_cnt = 1; + + if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get tx wqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + return spnic_dbg_get_wqe_info(nic_dev->hwdev, (u16)info->q_id, + (u16)info->wqe_id, wqebb_cnt, + buf_out, (u16 *)out_size, SPNIC_SQ); +} + +static int get_rx_info(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct nic_rq_info *rq_info = buf_out; + u16 q_id; + int err; + + if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + q_id = *((u16 *)buf_in); + + err = spnic_dbg_get_rq_info(nic_dev->hwdev, q_id, buf_out, *out_size); + + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get rq info failed, ret is %d.\n", err); + return err; + } + + rq_info->ci = (u16)nic_dev->rxqs[q_id].cons_idx & + nic_dev->rxqs[q_id].q_mask; + + rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update; + rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id; + + rq_info->coalesc_timer_cfg = nic_dev->rxqs[q_id].last_coalesc_timer_cfg; + rq_info->pending_limt = nic_dev->rxqs[q_id].last_pending_limt; + + return 0; +} + +static int get_rx_wqe_info(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const struct wqe_info *info = buf_in; + u16 wqebb_cnt = 1; + + if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx wqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + return spnic_dbg_get_wqe_info(nic_dev->hwdev, (u16)info->q_id, + (u16)info->wqe_id, wqebb_cnt, + buf_out, (u16 *)out_size, SPNIC_RQ); +} + +static int get_rx_cqe_info(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const struct wqe_info *info = buf_in; + u16 q_id = 0; + u16 idx = 0; + + if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx cqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(struct spnic_rq_cqe)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(struct spnic_rq_cqe)); + return -EINVAL; + } + q_id = (u16)info->q_id; + idx = (u16)info->wqe_id; + + if (q_id >= nic_dev->q_params.num_qps || + idx >= nic_dev->rxqs[q_id].q_depth) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid q_id[%u] >= %u, or wqe idx[%u] >= %u.\n", + q_id, nic_dev->q_params.num_qps, idx, + nic_dev->rxqs[q_id].q_depth); + return -EFAULT; + } + + memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe, + sizeof(struct spnic_rq_cqe)); + + return 0; +} + +static void clean_nicdev_stats(struct spnic_nic_dev *nic_dev) +{ + u64_stats_update_begin(&nic_dev->stats.syncp); + nic_dev->stats.netdev_tx_timeout = 0; + nic_dev->stats.tx_carrier_off_drop = 0; + nic_dev->stats.tx_invalid_qid = 0; + u64_stats_update_end(&nic_dev->stats.syncp); +} + +static int clear_func_static(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int i; + + *out_size = 0; + clean_nicdev_stats(nic_dev); + for (i = 0; i < nic_dev->max_qps; i++) { + spnic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats); + spnic_txq_clean_stats(&nic_dev->txqs[i].txq_stats); + } + + return 0; +} + +static int get_loopback_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct spnic_nic_loop_mode *mode = buf_out; + + if (!out_size || !mode) + return -EINVAL; + + if (*out_size != sizeof(*mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(*mode)); + return -EINVAL; + } + + return spnic_get_loopback_mode(nic_dev->hwdev, (u8 *)&mode->loop_mode, + (u8 *)&mode->loop_ctrl); +} + +static int set_loopback_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const struct spnic_nic_loop_mode *mode = buf_in; + int err; + + if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't set loopback mode\n"); + return -EFAULT; + } + + if (!mode || !out_size || in_size != sizeof(*mode)) + return -EINVAL; + + if (*out_size != sizeof(*mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(*mode)); + return -EINVAL; + } + + err = spnic_set_loopback_mode(nic_dev->hwdev, (u8)mode->loop_mode, (u8)mode->loop_ctrl); + if (err == 0) + nicif_info(nic_dev, drv, nic_dev->netdev, "Set loopback mode %u en %u succeed\n", + mode->loop_mode, mode->loop_ctrl); + + return err; +} + +enum spnic_nic_link_mode { + SPNIC_LINK_MODE_AUTO = 0, + SPNIC_LINK_MODE_UP, + SPNIC_LINK_MODE_DOWN, + SPNIC_LINK_MODE_MAX, +}; + +static int set_link_mode_param_valid(struct spnic_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + u32 *out_size) +{ + if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't set link mode\n"); + return -EFAULT; + } + + if (!buf_in || !out_size || + in_size != sizeof(enum spnic_nic_link_mode)) + return -EINVAL; + + if (*out_size != sizeof(enum spnic_nic_link_mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(enum spnic_nic_link_mode)); + return -EINVAL; + } + + return 0; +} + +static int set_link_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const enum spnic_nic_link_mode *link = buf_in; + u8 link_status; + + if (set_link_mode_param_valid(nic_dev, buf_in, in_size, out_size)) + return -EFAULT; + + switch (*link) { + case SPNIC_LINK_MODE_AUTO: + if (spnic_get_link_state(nic_dev->hwdev, &link_status)) + link_status = false; + spnic_link_status_change(nic_dev, (bool)link_status); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: auto succeed, now is link %s\n", + (link_status ? "up" : "down")); + break; + case SPNIC_LINK_MODE_UP: + spnic_link_status_change(nic_dev, true); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: up succeed\n"); + break; + case SPNIC_LINK_MODE_DOWN: + spnic_link_status_change(nic_dev, false); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: down succeed\n"); + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid link mode %d to set\n", *link); + return -EINVAL; + } + + return 0; +} + +static int get_sset_count(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 count; + + if (!buf_in || in_size != sizeof(u32) || !out_size || + *out_size != sizeof(u32) || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", + in_size); + return -EINVAL; + } + + switch (*((u32 *)buf_in)) { + case SHOW_SSET_IO_STATS: + count = spnic_get_io_stats_size(nic_dev); + break; + default: + count = 0; + break; + } + + *((u32 *)buf_out) = count; + + return 0; +} + +static int get_sset_stats(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct spnic_show_item *items = buf_out; + u32 sset, count, size; + int err; + + if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", + in_size); + return -EINVAL; + } + + size = sizeof(u32); + err = get_sset_count(nic_dev, buf_in, in_size, &count, &size); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Get sset count failed, ret=%d\n", + err); + return -EINVAL; + } + if (count * sizeof(*items) != *out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, count * sizeof(*items)); + return -EINVAL; + } + + sset = *((u32 *)buf_in); + + switch (sset) { + case SHOW_SSET_IO_STATS: + spnic_get_io_stats(nic_dev, items); + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %u to get stats\n", + sset); + err = -EINVAL; + break; + } + + return err; +} + +static int dcb_mt_qos_map(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const struct spnic_mt_qos_info *qos = buf_in; + struct spnic_mt_qos_info *qos_out = buf_out; + u8 up_cnt, up; + int err; + + if (!buf_out || !out_size || !buf_in) + return -EINVAL; + + if (*out_size != sizeof(*qos_out) || in_size != sizeof(*qos)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*qos)); + return -EINVAL; + } + + memcpy(qos_out, qos, sizeof(*qos)); + qos_out->head.status = 0; + if (qos->op_code & MT_DCB_OPCODE_WR) { + up_cnt = 0; + for (up = 0; up < SPNIC_DCB_UP_MAX; up++) { + if (qos->valid_up_bitmap & BIT(up)) + up_cnt++; + } + + if (up_cnt != nic_dev->wanted_dcb_cfg.max_cos) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid up bitmap: 0x%x", + qos->valid_up_bitmap); + qos_out->head.status = MT_EINVAL; + return 0; + } + + err = spnic_dcbcfg_set_up_bitmap(nic_dev, qos->valid_up_bitmap); + if (err) + qos_out->head.status = MT_EIO; + } else { + qos_out->valid_up_bitmap = + spnic_get_valid_up_bitmap(&nic_dev->wanted_dcb_cfg); + qos_out->valid_cos_bitmap = + nic_dev->wanted_dcb_cfg.valid_cos_bitmap; + } + + return 0; +} + +static int dcb_mt_dcb_state(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const struct spnic_mt_dcb_state *dcb = buf_in; + struct spnic_mt_dcb_state *dcb_out = buf_out; + int err; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*dcb_out) || in_size != sizeof(*dcb)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*dcb)); + return -EINVAL; + } + + memcpy(dcb_out, dcb, sizeof(*dcb)); + dcb_out->head.status = 0; + if (dcb->op_code & MT_DCB_OPCODE_WR) { + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags) == dcb->state) + return 0; + + /* nic_mutex has been acquired by send_to_nic_driver and will + * also be acquired inside spnic_setup_tc + */ + mutex_unlock(&nic_dev->nic_mutex); + rtnl_lock(); + err = spnic_setup_tc(nic_dev->netdev, + dcb->state ? nic_dev->wanted_dcb_cfg.max_cos : 0); + rtnl_unlock(); + mutex_lock(&nic_dev->nic_mutex); + if (err) + dcb_out->head.status = MT_EIO; + } else { + dcb_out->state = !!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); + } + + return 0; +} + +static int dcb_mt_pfc_state(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const struct spnic_mt_pfc_state *pfc = buf_in; + struct spnic_mt_pfc_state *pfc_out = buf_out; + u8 cur_pfc_state, cur_pfc_en_bitmap; + int err; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*pfc_out) || in_size != sizeof(*pfc)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*pfc)); + return -EINVAL; + } + + cur_pfc_state = spnic_dcbcfg_get_pfc_state(nic_dev); + cur_pfc_en_bitmap = spnic_dcbcfg_get_pfc_pri_en(nic_dev); + + memcpy(pfc_out, pfc, sizeof(*pfc)); + pfc_out->head.status = 0; + if (pfc->op_code & MT_DCB_OPCODE_WR) { + if (pfc->op_code & MT_DCB_PFC_PFC_STATE) + spnic_dcbcfg_set_pfc_state(nic_dev, pfc->state); + + if (pfc->op_code & MT_DCB_PFC_PFC_PRI_EN) + spnic_dcbcfg_set_pfc_pri_en(nic_dev, pfc->pfc_en_bitpamp); + + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + err = spnic_configure_dcb(nic_dev->netdev); + if (err) { + pfc_out->head.status = MT_EIO; + goto set_err; + } + } + } else { + pfc_out->state = cur_pfc_state; + pfc_out->pfc_en_bitpamp = cur_pfc_en_bitmap; + } + + return 0; + +set_err: + spnic_dcbcfg_set_pfc_state(nic_dev, cur_pfc_state); + spnic_dcbcfg_set_pfc_pri_en(nic_dev, cur_pfc_en_bitmap); + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + err = spnic_configure_dcb(nic_dev->netdev); + if (err) + nicif_warn(nic_dev, drv, nic_dev->netdev, + "Failed to rollback pfc config\n"); + } + return 0; +} + +static int dcb_mt_ets_state(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const struct spnic_mt_ets_state *ets = buf_in; + struct spnic_mt_ets_state *ets_out = buf_out; + struct spnic_dcb_config dcb_cfg_backup; + int err; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*ets_out) || in_size != sizeof(*ets)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*ets)); + return -EINVAL; + } + + memcpy(ets_out, ets, sizeof(*ets)); + ets_out->head.status = 0; + if (ets->op_code & MT_DCB_OPCODE_WR) { + if (ets->op_code & (MT_DCB_ETS_UP_BW | MT_DCB_ETS_UP_PRIO)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Not support to set up bw and up prio\n"); + ets_out->head.status = MT_EOPNOTSUPP; + return 0; + } + + dcb_cfg_backup = nic_dev->wanted_dcb_cfg; + + if (ets->op_code & MT_DCB_ETS_UP_TC) { + err = spnic_dcbcfg_set_ets_up_tc_map(nic_dev, ets->up_tc); + if (err) { + ets_out->head.status = MT_EIO; + return 0; + } + } + if (ets->op_code & MT_DCB_ETS_TC_BW) { + err = spnic_dcbcfg_set_ets_tc_bw(nic_dev, ets->tc_bw); + if (err) { + ets_out->head.status = MT_EIO; + goto set_err; + } + } + if (ets->op_code & MT_DCB_ETS_TC_PRIO) + spnic_dcbcfg_set_ets_tc_prio_type(nic_dev, ets->tc_prio_bitmap); + + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + err = spnic_configure_dcb(nic_dev->netdev); + if (err) { + ets_out->head.status = MT_EIO; + goto set_err; + } + } + } else { + spnic_dcbcfg_get_ets_up_tc_map(nic_dev, ets_out->up_tc); + spnic_dcbcfg_get_ets_tc_bw(nic_dev, ets_out->tc_bw); + spnic_dcbcfg_get_ets_tc_prio_type(nic_dev, &ets_out->tc_prio_bitmap); + } + + return 0; + +set_err: + nic_dev->wanted_dcb_cfg = dcb_cfg_backup; + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + err = spnic_configure_dcb(nic_dev->netdev); + if (err) + nicif_warn(nic_dev, drv, nic_dev->netdev, + "Failed to rollback ets config\n"); + } + + return 0; +} + +static int get_inter_num(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u16 intr_num; + + intr_num = sphw_intr_num(nic_dev->hwdev); + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *(u16 *)buf_out = intr_num; + + *out_size = sizeof(u16); + + return 0; +} + +static int get_netdev_name(struct spnic_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (*out_size != IFNAMSIZ) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%u, expect: %u\n", + *out_size, IFNAMSIZ); + return -EFAULT; + } + + strlcpy(buf_out, nic_dev->netdev->name, IFNAMSIZ); + + return 0; +} + +struct nic_drv_module_handle nic_driv_module_cmd_handle[] = { + {TX_INFO, get_tx_info}, + {Q_NUM, get_q_num}, + {TX_WQE_INFO, get_tx_wqe_info}, + {RX_INFO, get_rx_info}, + {RX_WQE_INFO, get_rx_wqe_info}, + {RX_CQE_INFO, get_rx_cqe_info}, + {GET_INTER_NUM, get_inter_num}, + {CLEAR_FUNC_STASTIC, clear_func_static}, + {GET_LOOPBACK_MODE, get_loopback_mode}, + {SET_LOOPBACK_MODE, set_loopback_mode}, + {SET_LINK_MODE, set_link_mode}, + {GET_SSET_COUNT, get_sset_count}, + {GET_SSET_ITEMS, get_sset_stats}, + {DCB_QOS_INFO, dcb_mt_qos_map}, + {DCB_STATE, dcb_mt_dcb_state}, + {DCB_PFC_STATE, dcb_mt_pfc_state}, + {DCB_ETS_STATE, dcb_mt_ets_state}, + {GET_ULD_DEV_NAME, get_netdev_name}, +}; + +static int send_to_nic_driver(struct spnic_nic_dev *nic_dev, + u32 cmd, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int index, num_cmds = sizeof(nic_driv_module_cmd_handle) / + sizeof(nic_driv_module_cmd_handle[0]); + enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd; + int err = 0; + + mutex_lock(&nic_dev->nic_mutex); + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + nic_driv_module_cmd_handle[index].driv_cmd_name) { + err = nic_driv_module_cmd_handle[index].driv_func + (nic_dev, buf_in, + in_size, buf_out, out_size); + break; + } + } + mutex_unlock(&nic_dev->nic_mutex); + + if (index == num_cmds) + pr_err("Can't find callback for %d\n", cmd_type); + + return err; +} + +int nic_ioctl(void *uld_dev, u32 cmd, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (cmd == GET_DRV_VERSION) + return get_nic_drv_version(buf_out, out_size); + else if (!uld_dev) + return -EINVAL; + + return send_to_nic_driver(uld_dev, cmd, buf_in, + in_size, buf_out, out_size); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c new file mode 100644 index 000000000000..7108430e0618 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c @@ -0,0 +1,965 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> + +#include "sphw_crm.h" +#include "spnic_lld.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic_dev.h" +#include "spnic_dcb.h" + +#define DCB_CFG_CHG_ETS BIT(0) +#define DCB_CFG_CHG_PFC BIT(1) +#define DCB_CFG_CHG_UP_COS BIT(2) + +#define MAX_BW_PERCENT 100 + +void spnic_set_prio_tc_map(struct spnic_nic_dev *nic_dev) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + u8 valid_up_bitmap = spnic_get_valid_up_bitmap(dcb_cfg); + u8 default_tc = dcb_cfg->max_cos - 1; + u8 i, tc_id; + + /* use 0~max_cos-1 as tc for netdev */ + for (tc_id = 0, i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (dcb_cfg->valid_cos_bitmap & BIT(i)) { + netdev_set_prio_tc_map(nic_dev->netdev, + dcb_cfg->cos_cfg[i].up, tc_id); + tc_id++; + } + } + + /* set invalid up mapping to the default tc */ + for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { + if (!(valid_up_bitmap & BIT(i))) + netdev_set_prio_tc_map(nic_dev->netdev, i, default_tc); + } +} + +void spnic_update_tx_db_cos(struct spnic_nic_dev *nic_dev) +{ + u8 i, valid_cos_bitmap, cos; + u16 num_rss; + + if (!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + spnic_set_txq_cos(nic_dev, 0, nic_dev->q_params.num_qps, + nic_dev->hw_dcb_cfg.default_cos); + return; + } + + num_rss = nic_dev->q_params.num_rss; + valid_cos_bitmap = nic_dev->hw_dcb_cfg.valid_cos_bitmap; + for (i = 0; i < nic_dev->q_params.num_tc; i++) { + cos = (u8)(ffs(valid_cos_bitmap) - 1); + spnic_set_txq_cos(nic_dev, (u16)(i * num_rss), num_rss, cos); + valid_cos_bitmap &= (~BIT(cos)); + } +} + +int spnic_set_tx_cos_state(struct spnic_nic_dev *nic_dev) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + struct spnic_dcb_state dcb_state = {0}; + u8 default_cos, i; + int err; + + if (SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { + err = spnic_get_pf_dcb_state(nic_dev->hwdev, &dcb_state); + if (err) { + spnic_err(nic_dev, drv, "Failed to get vf default cos\n"); + return err; + } + /* VF does not support DCB, use the default cos */ + dcb_cfg->default_cos = dcb_state.default_cos; + + return 0; + } + + default_cos = dcb_cfg->default_cos; + dcb_state.dcb_on = !!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); + dcb_state.default_cos = default_cos; + memset(dcb_state.up_cos, default_cos, sizeof(dcb_state.up_cos)); + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (dcb_cfg->valid_cos_bitmap & BIT(i)) + dcb_state.up_cos[dcb_cfg->cos_cfg[i].up] = i; + } + } + + err = spnic_set_dcb_state(nic_dev->hwdev, &dcb_state); + if (err) + spnic_err(nic_dev, drv, "Failed to set dcb state\n"); + + return err; +} + +static void setup_tc_reopen_handler(struct spnic_nic_dev *nic_dev, + const void *priv_data) +{ + u8 tc = *((u8 *)priv_data); + + if (tc) { + netdev_set_num_tc(nic_dev->netdev, tc); + spnic_set_prio_tc_map(nic_dev); + + set_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); + } else { + netdev_reset_tc(nic_dev->netdev); + + clear_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); + } + + spnic_set_tx_cos_state(nic_dev); +} + +int spnic_setup_tc(struct net_device *netdev, u8 tc) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_dyna_txrxq_params q_params = {0}; + u8 cur_tc; + int err; + + if (tc && test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, "Failed to enable DCB while Symmetric RSS is enabled\n"); + return -EOPNOTSUPP; + } + + if (tc > nic_dev->hw_dcb_cfg.max_cos) { + nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %u, max tc: %u\n", + tc, nic_dev->hw_dcb_cfg.max_cos); + return -EINVAL; + } + + if (tc & (tc - 1)) { + nicif_err(nic_dev, drv, netdev, + "Invalid num_tc: %u, must be power of 2\n", tc); + return -EINVAL; + } + + if (netif_running(netdev)) { + cur_tc = nic_dev->q_params.num_tc; + q_params = nic_dev->q_params; + q_params.num_tc = tc; + q_params.txqs_res = NULL; + q_params.rxqs_res = NULL; + q_params.irq_cfg = NULL; + + nicif_info(nic_dev, drv, netdev, "Change num_tc to %u, restarting channel\n", + tc); + err = spnic_change_channel_settings(nic_dev, &q_params, setup_tc_reopen_handler, + &tc); + if (err) { + if (cur_tc != nic_dev->q_params.num_tc) { + nicif_err(nic_dev, drv, netdev, + "Restore num_tc to %u\n", cur_tc); + /* In this case, the channel resource is + * invalid, so we can safely modify the number + * of tc in netdev. + */ + nic_dev->q_params.num_tc = cur_tc; + setup_tc_reopen_handler(nic_dev, &cur_tc); + } + nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); + return err; + } + } else { + setup_tc_reopen_handler(nic_dev, &tc); + spnic_update_num_qps(netdev); + } + + spnic_configure_dcb(netdev); + + return 0; +} + +/* Ucode thread timeout is 210ms, must be lagger then 210ms */ +#define SPNIC_WAIT_PORT_IO_STOP 250 + +static int spnic_stop_port_traffic_flow(struct spnic_nic_dev *nic_dev, bool wait) +{ + int err = 0; + + down(&nic_dev->dcb_sem); + + if (nic_dev->disable_port_cnt++ != 0) + goto out; + + err = spnic_force_port_disable(nic_dev); + if (err) { + spnic_err(nic_dev, drv, "Failed to disable port\n"); + goto set_port_err; + } + + err = spnic_set_port_funcs_state(nic_dev->hwdev, false); + if (err) { + spnic_err(nic_dev, drv, "Failed to disable all functions in port\n"); + goto set_port_funcs_err; + } + + spnic_info(nic_dev, drv, "Stop port traffic flow\n"); + + goto out; + +set_port_funcs_err: + spnic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev)); + +set_port_err: +out: + if (err) + nic_dev->disable_port_cnt--; + + up(&nic_dev->dcb_sem); + if (!err && wait && nic_dev->netdev->reg_state == NETREG_REGISTERED) + msleep(SPNIC_WAIT_PORT_IO_STOP); + + return err; +} + +static int spnic_start_port_traffic_flow(struct spnic_nic_dev *nic_dev) +{ + int err; + + down(&nic_dev->dcb_sem); + + nic_dev->disable_port_cnt--; + if (nic_dev->disable_port_cnt > 0) { + up(&nic_dev->dcb_sem); + return 0; + } + + nic_dev->disable_port_cnt = 0; + up(&nic_dev->dcb_sem); + + err = spnic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev)); + if (err) + spnic_err(nic_dev, drv, "Failed to disable port\n"); + + err = spnic_set_port_funcs_state(nic_dev->hwdev, true); + if (err) + spnic_err(nic_dev, drv, "Failed to disable all functions in port\n"); + + spnic_info(nic_dev, drv, "Start port traffic flow\n"); + + return err; +} + +static u8 get_cos_settings(u8 hw_valid_cos_bitmap, u8 *dst_valid_cos_bitmap) +{ + u8 support_cos = 0; + u8 num_cos, overflow; + u8 i; + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (hw_valid_cos_bitmap & BIT(i)) + support_cos++; + } + + num_cos = (u8)(1U << (u8)ilog2(support_cos)); + if (num_cos != support_cos) { + /* Remove unused cos id */ + overflow = support_cos - num_cos; + i = SPNIC_DCB_COS_MAX - 1; + while (overflow) { + if (hw_valid_cos_bitmap & BIT(i)) { + hw_valid_cos_bitmap &= (~BIT(i)); + overflow--; + } + + i--; + } + } + + *dst_valid_cos_bitmap = hw_valid_cos_bitmap; + + return num_cos; +} + +static int get_dft_valid_up_bitmap(struct spnic_nic_dev *nic_dev, u8 num_pri, + u8 *valid_up_bitmap) +{ + bool setted = false; + u8 up_bitmap = 0; + u8 up; + int err; + + err = spnic_get_chip_up_bitmap(nic_dev->pdev, &setted, &up_bitmap); + if (err) { + spnic_err(nic_dev, drv, "Get chip cos_up map failed\n"); + return -EFAULT; + } + + if (!setted) { + /* Use (num_cos-1)~0 as default user priority */ + for (up = 0; up < num_pri; up++) + up_bitmap |= (u8)BIT(up); + } + + err = spnic_set_chip_up_bitmap(nic_dev->pdev, up_bitmap); + if (err) { + spnic_err(nic_dev, drv, "Set chip cos_up map failed\n"); + return -EFAULT; + } + + *valid_up_bitmap = up_bitmap; + + return 0; +} + +u8 spnic_get_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg) +{ + u8 valid_up_bitmap = 0; + u8 i; + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (dcb_cfg->valid_cos_bitmap & BIT(i)) + valid_up_bitmap |= (u8)BIT(dcb_cfg->cos_cfg[i].up); + } + + return valid_up_bitmap; +} + +static void update_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg, + u8 valid_up_bitmap) +{ + u8 i, up; + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) { + dcb_cfg->cos_cfg[i].up = 0; + continue; + } + + /* get the highest priority */ + up = (u8)fls(valid_up_bitmap) - 1; + valid_up_bitmap &= (~BIT(up)); + + dcb_cfg->cos_cfg[i].up = up; + } +} + +static int init_default_dcb_cfg(struct spnic_nic_dev *nic_dev, + struct spnic_dcb_config *dcb_cfg) +{ + struct spnic_cos_cfg *cos_cfg = dcb_cfg->cos_cfg; + struct spnic_tc_cfg *tc_cfg = dcb_cfg->tc_cfg; + u8 valid_cos_bitmap, i; + u8 valid_up_bitmap = 0; + int err; + + valid_cos_bitmap = sphw_cos_valid_bitmap(nic_dev->hwdev); + if (!valid_cos_bitmap) { + spnic_err(nic_dev, drv, "None cos supported\n"); + return -EFAULT; + } + + dcb_cfg->max_cos = get_cos_settings(valid_cos_bitmap, + &dcb_cfg->valid_cos_bitmap); + dcb_cfg->default_cos = (u8)fls(dcb_cfg->valid_cos_bitmap) - 1; + + err = get_dft_valid_up_bitmap(nic_dev, dcb_cfg->max_cos, + &valid_up_bitmap); + if (err) + return err; + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + /* set all cos with 100 percent bw in default */ + cos_cfg[i].bw_pct = MAX_BW_PERCENT; + cos_cfg[i].prio_sp = 0; /* DWRR */ + cos_cfg[i].tc_id = 0; /* all cos mapping to tc0 */ + cos_cfg[i].up = 0; + } + + update_valid_up_bitmap(dcb_cfg, valid_up_bitmap); + + for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { + /* tc0 with 100 percent bw in default */ + tc_cfg[i].bw_pct = (i == 0) ? MAX_BW_PERCENT : 0; + tc_cfg[i].prio_sp = 0; /* DWRR */ + } + + /* disable pfc */ + dcb_cfg->pfc_state = 0; + dcb_cfg->pfc_en_bitmap = 0; + + return 0; +} + +int spnic_dcb_init(struct spnic_nic_dev *nic_dev) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; + int err; + + if (SPNIC_FUNC_IS_VF(nic_dev->hwdev)) + return spnic_set_tx_cos_state(nic_dev); + + err = init_default_dcb_cfg(nic_dev, dcb_cfg); + if (err) { + spnic_err(nic_dev, drv, "Initialize dcb configuration failed\n"); + return err; + } + + spnic_info(nic_dev, drv, "Support num cos %u, default cos %u\n", + dcb_cfg->max_cos, dcb_cfg->default_cos); + + nic_dev->dcb_changes = DCB_CFG_CHG_ETS | DCB_CFG_CHG_PFC | + DCB_CFG_CHG_UP_COS; + + memcpy(&nic_dev->hw_dcb_cfg, &nic_dev->wanted_dcb_cfg, + sizeof(nic_dev->hw_dcb_cfg)); + + err = spnic_set_tx_cos_state(nic_dev); + if (err) { + spnic_err(nic_dev, drv, "Set tx cos state failed\n"); + return err; + } + + sema_init(&nic_dev->dcb_sem, 1); + + return 0; +} + +u32 spnic_sync_dcb_cfg(struct spnic_nic_dev *nic_dev, struct spnic_dcb_config *src_dcb_cfg) +{ + struct spnic_dcb_config *wanted_cfg = src_dcb_cfg; + struct spnic_dcb_config *hw_cfg = &nic_dev->hw_dcb_cfg; + u32 changes = 0; + + if (memcmp(hw_cfg->cos_cfg, wanted_cfg->cos_cfg, + sizeof(hw_cfg->cos_cfg))) { + memcpy(hw_cfg->cos_cfg, wanted_cfg->cos_cfg, + sizeof(hw_cfg->cos_cfg)); + changes |= DCB_CFG_CHG_ETS; + } + + if (memcmp(hw_cfg->tc_cfg, wanted_cfg->tc_cfg, + sizeof(hw_cfg->tc_cfg))) { + memcpy(hw_cfg->tc_cfg, wanted_cfg->tc_cfg, + sizeof(hw_cfg->tc_cfg)); + changes |= DCB_CFG_CHG_ETS; + } + + if (hw_cfg->pfc_state != wanted_cfg->pfc_state || + (wanted_cfg->pfc_state && + hw_cfg->pfc_en_bitmap != wanted_cfg->pfc_en_bitmap)) { + hw_cfg->pfc_state = wanted_cfg->pfc_state; + hw_cfg->pfc_en_bitmap = wanted_cfg->pfc_en_bitmap; + changes |= DCB_CFG_CHG_PFC; + } + + return changes; +} + +static int dcbcfg_set_hw_cos_up_map(struct spnic_nic_dev *nic_dev, + struct spnic_dcb_config *dcb_cfg) +{ + u8 cos_up_map[SPNIC_DCB_COS_MAX] = {0}; + int err; + u8 i; + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) + continue; + + cos_up_map[i] = dcb_cfg->cos_cfg[i].up; + } + + err = spnic_dcb_set_cos_up_map(nic_dev->hwdev, dcb_cfg->valid_cos_bitmap, + cos_up_map, SPNIC_DCB_COS_MAX); + if (err) + spnic_err(nic_dev, drv, "Set cos_up map failed\n"); + + return err; +} + +/* The sum of the cos bandwidth mapped to the same TC is 100 */ +static void adjust_cos_bw(u8 valid_cos_bitmap, u8 *cos_tc, u8 *cos_bw) +{ + u8 tc, cos, cos_num; + u16 bw_all, bw_remain; + + for (tc = 0; tc < SPNIC_DCB_TC_MAX; tc++) { + bw_all = 0; + cos_num = 0; + for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { + if (!(valid_cos_bitmap & BIT(cos)) || cos_tc[cos] != tc) + continue; + bw_all += cos_bw[cos]; + cos_num++; + } + + if (!bw_all || !cos_num) + continue; + + bw_remain = MAX_BW_PERCENT % cos_num; + for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { + if (!(valid_cos_bitmap & BIT(cos)) || cos_tc[cos] != tc) + continue; + + cos_bw[cos] = + (u8)(MAX_BW_PERCENT * cos_bw[cos] / bw_all); + + if (bw_remain) { + cos_bw[cos]++; + bw_remain--; + } + } + } +} + +static void dcbcfg_dump_configuration(struct spnic_nic_dev *nic_dev, + u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, + u8 *tc_bw, u8 *tc_prio) +{ + u8 i; + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (!(nic_dev->hw_dcb_cfg.valid_cos_bitmap & BIT(i))) + continue; + + spnic_info(nic_dev, drv, "cos: %u, up: %u, tc: %u, bw: %u, prio: %u\n", + i, nic_dev->hw_dcb_cfg.cos_cfg[i].up, cos_tc[i], + cos_bw[i], cos_prio[i]); + } + + for (i = 0; i < nic_dev->hw_dcb_cfg.max_cos; i++) + spnic_info(nic_dev, drv, "tc: %u, bw: %u, prio: %u\n", + i, tc_bw[i], tc_prio[i]); +} + +static int dcbcfg_set_hw_ets(struct spnic_nic_dev *nic_dev, + struct spnic_dcb_config *dcb_cfg) +{ + u8 cos_tc[SPNIC_DCB_COS_MAX] = {0}; + u8 cos_bw[SPNIC_DCB_COS_MAX] = {0}; + u8 cos_prio[SPNIC_DCB_COS_MAX] = {0}; + u8 tc_bw[SPNIC_DCB_TC_MAX] = {0}; + u8 tc_prio[SPNIC_DCB_TC_MAX] = {0}; + int err; + u8 i; + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) + continue; + + cos_tc[i] = dcb_cfg->cos_cfg[i].tc_id; + cos_bw[i] = dcb_cfg->cos_cfg[i].bw_pct; + cos_prio[i] = dcb_cfg->cos_cfg[i].prio_sp; + } + + for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { + tc_bw[i] = dcb_cfg->tc_cfg[i].bw_pct; + tc_prio[i] = dcb_cfg->tc_cfg[i].prio_sp; + } + + adjust_cos_bw(dcb_cfg->valid_cos_bitmap, cos_tc, cos_bw); + + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) + dcbcfg_dump_configuration(nic_dev, cos_tc, cos_bw, cos_prio, tc_bw, tc_prio); + + err = spnic_dcb_set_ets(nic_dev->hwdev, cos_tc, cos_bw, cos_prio, tc_bw, tc_prio); + if (err) { + spnic_err(nic_dev, drv, "Failed to set ets\n"); + return err; + } + + return 0; +} + +static int dcbcfg_set_hw_pfc(struct spnic_nic_dev *nic_dev, + struct spnic_dcb_config *dcb_cfg) +{ + u8 valid_up_bitmap = spnic_get_valid_up_bitmap(dcb_cfg); + u8 outof_range_pfc = (~valid_up_bitmap) & dcb_cfg->pfc_en_bitmap; + int err; + + if (dcb_cfg->pfc_state && outof_range_pfc) + spnic_info(nic_dev, drv, "PFC setting out of range, 0x%x will be ignored\n", + outof_range_pfc); + + err = spnic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state, dcb_cfg->pfc_en_bitmap); + if (err) { + spnic_err(nic_dev, drv, "Failed to %s PFC\n", + dcb_cfg->pfc_state ? "enable" : "disable"); + return err; + } + + if (dcb_cfg->pfc_state) + spnic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n", + dcb_cfg->pfc_en_bitmap & valid_up_bitmap); + else + spnic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n"); + + return 0; +} + +int spnic_dcbcfg_setall_to_hw(struct spnic_nic_dev *nic_dev, struct spnic_dcb_config *src_dcb_cfg) +{ + bool stop_traffic = false; + int err = 0; + + nic_dev->dcb_changes |= spnic_sync_dcb_cfg(nic_dev, src_dcb_cfg); + if (!nic_dev->dcb_changes) + return 0; + + /* hw does not support to change up cos mapping and cos tc mapping with + * traffic flow + */ + stop_traffic = !!(nic_dev->dcb_changes & + (DCB_CFG_CHG_ETS | DCB_CFG_CHG_UP_COS)); + if (stop_traffic) { + err = spnic_stop_port_traffic_flow(nic_dev, true); + if (err) + return err; + } + + if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) { + err = dcbcfg_set_hw_cos_up_map(nic_dev, &nic_dev->hw_dcb_cfg); + if (err) + goto out; + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS); + } + + if (nic_dev->dcb_changes & DCB_CFG_CHG_ETS) { + err = dcbcfg_set_hw_ets(nic_dev, &nic_dev->hw_dcb_cfg); + if (err) + goto out; + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_ETS); + } + + if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) { + err = dcbcfg_set_hw_pfc(nic_dev, &nic_dev->hw_dcb_cfg); + if (err) + goto out; + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC); + } + +out: + if (stop_traffic) + spnic_start_port_traffic_flow(nic_dev); + + return err; +} + +int spnic_dcb_reset_hw_config(struct spnic_nic_dev *nic_dev) +{ + struct spnic_dcb_config dft_cfg = {0}; + int err; + + init_default_dcb_cfg(nic_dev, &dft_cfg); + err = spnic_dcbcfg_setall_to_hw(nic_dev, &dft_cfg); + if (err) { + spnic_err(nic_dev, drv, "Failed to reset hw dcb configuration\n"); + return err; + } + + spnic_info(nic_dev, drv, "Reset hardware DCB configuration done\n"); + + return 0; +} + +int spnic_configure_dcb(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) + return spnic_dcbcfg_setall_to_hw(nic_dev, &nic_dev->wanted_dcb_cfg); + else + return spnic_dcb_reset_hw_config(nic_dev); +} + +void spnic_dcbcfg_set_pfc_state(struct spnic_nic_dev *nic_dev, u8 pfc_state) +{ + nic_dev->wanted_dcb_cfg.pfc_state = pfc_state; +} + +u8 spnic_dcbcfg_get_pfc_state(struct spnic_nic_dev *nic_dev) +{ + return nic_dev->wanted_dcb_cfg.pfc_state; +} + +void spnic_dcbcfg_set_pfc_pri_en(struct spnic_nic_dev *nic_dev, u8 pfc_en_bitmap) +{ + nic_dev->wanted_dcb_cfg.pfc_en_bitmap = pfc_en_bitmap; +} + +u8 spnic_dcbcfg_get_pfc_pri_en(struct spnic_nic_dev *nic_dev) +{ + return nic_dev->wanted_dcb_cfg.pfc_en_bitmap; +} + +int spnic_dcbcfg_set_ets_up_tc_map(struct spnic_nic_dev *nic_dev, const u8 *up_tc_map) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; + u8 i; + + for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { + if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) + continue; + + /* TC id can't exceed max cos */ + if (up_tc_map[dcb_cfg->cos_cfg[i].up] >= dcb_cfg->max_cos) + return -EINVAL; + } + + for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { + if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) + continue; + + dcb_cfg->cos_cfg[i].tc_id = up_tc_map[dcb_cfg->cos_cfg[i].up]; + } + + return 0; +} + +void spnic_dcbcfg_get_ets_up_tc_map(struct spnic_nic_dev *nic_dev, u8 *up_tc_map) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; + struct spnic_cos_cfg *cos_cfg = dcb_cfg->cos_cfg; + u8 i; + + /* set unused up mapping to default tc */ + memset(up_tc_map, cos_cfg[dcb_cfg->default_cos].tc_id, + SPNIC_DCB_UP_MAX); + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) + continue; + + up_tc_map[cos_cfg[i].up] = cos_cfg[i].tc_id; + } +} + +int spnic_dcbcfg_set_ets_tc_bw(struct spnic_nic_dev *nic_dev, const u8 *tc_bw) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; + u8 bw_sum = 0; + u8 i; + + for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { + /* cannot set bandwidth for unused tc */ + if (i >= dcb_cfg->max_cos && tc_bw[i] > 0) + return -EINVAL; + + bw_sum += tc_bw[i]; + } + + if (bw_sum != MAX_BW_PERCENT && bw_sum != 0) { + spnic_err(nic_dev, drv, "Invalid total bw %u\n", bw_sum); + return -EINVAL; + } + + for (i = 0; i < dcb_cfg->max_cos; i++) + dcb_cfg->tc_cfg[i].bw_pct = tc_bw[i]; + + return 0; +} + +void spnic_dcbcfg_get_ets_tc_bw(struct spnic_nic_dev *nic_dev, u8 *tc_bw) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; + u8 i; + + for (i = 0; i < dcb_cfg->max_cos; i++) + tc_bw[i] = dcb_cfg->tc_cfg[i].bw_pct; +} + +void spnic_dcbcfg_set_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 tc_prio_bitmap) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; + u8 i; + + for (i = 0; i < dcb_cfg->max_cos; i++) + dcb_cfg->tc_cfg[i].prio_sp = !!(tc_prio_bitmap & BIT(i)); +} + +void spnic_dcbcfg_get_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 *tc_prio_bitmap) +{ + struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; + u8 i; + + *tc_prio_bitmap = 0; + for (i = 0; i < dcb_cfg->max_cos; i++) { + if (dcb_cfg->tc_cfg[i].prio_sp) + *tc_prio_bitmap |= (u8)BIT(i); + } +} + +/* TODO: send a command to MPU, and MPU close all port traffic */ +static int stop_all_ports_flow(void *uld_array[], u32 num_dev) +{ + struct spnic_nic_dev *tmp_dev = NULL; + u32 i, idx; + int err; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; + err = spnic_stop_port_traffic_flow(tmp_dev, false); + if (err) { + nicif_err(tmp_dev, drv, tmp_dev->netdev, "Stop port traffic flow failed\n"); + goto stop_port_err; + } + } + + /* wait all traffic flow stopped */ + msleep(SPNIC_WAIT_PORT_IO_STOP); + + return 0; + +stop_port_err: + for (i = 0; i < idx; i++) { + tmp_dev = (struct spnic_nic_dev *)uld_array[i]; + spnic_start_port_traffic_flow(tmp_dev); + } + + return err; +} + +static void start_all_ports_flow(void *uld_array[], u32 num_dev) +{ + struct spnic_nic_dev *tmp_dev = NULL; + u32 idx; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; + spnic_start_port_traffic_flow(tmp_dev); + } +} + +int change_dev_cos_up_map(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap) +{ + struct net_device *netdev = nic_dev->netdev; + int err = 0; + + if (test_and_set_bit(SPNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { + nicif_warn(nic_dev, drv, netdev, + "Cos_up map setting in inprocess, please try again later\n"); + return -EFAULT; + } + + if (spnic_get_valid_up_bitmap(&nic_dev->wanted_dcb_cfg) == + valid_up_bitmap) { + nicif_err(nic_dev, drv, netdev, "Same up bitmap, don't need to change anything\n"); + err = 0; + goto out; + } + + nicif_info(nic_dev, drv, netdev, "Set valid_up_bitmap: 0x%x\n", + valid_up_bitmap); + + update_valid_up_bitmap(&nic_dev->wanted_dcb_cfg, valid_up_bitmap); + + nic_dev->dcb_changes = DCB_CFG_CHG_ETS | DCB_CFG_CHG_PFC | DCB_CFG_CHG_UP_COS; + + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + err = spnic_dcbcfg_setall_to_hw(nic_dev, &nic_dev->wanted_dcb_cfg); + if (err) { + nicif_err(nic_dev, drv, netdev, "Reconfig dcb to hw failed\n"); + goto out; + } + + /* Change up/tc map for netdev */ + spnic_set_prio_tc_map(nic_dev); + spnic_update_tx_db_cos(nic_dev); + } + + err = spnic_set_tx_cos_state(nic_dev); + +out: + clear_bit(SPNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); + + return err; +} + +int spnic_dcbcfg_set_up_bitmap(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap) +{ + struct spnic_nic_dev *tmp_dev = NULL; + void **uld_array = NULL; + u32 i, idx, num_dev = 0; + int err, rollback_err; + bool up_setted = false; + u8 old_valid_up_bitmap = 0; + u8 max_pf; + + /* Save old valid up bitmap, in case of set failed */ + err = spnic_get_chip_up_bitmap(nic_dev->pdev, &up_setted, &old_valid_up_bitmap); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Get old chip valid up bitmap failed\n"); + return -EFAULT; + } + + if (valid_up_bitmap == old_valid_up_bitmap) { + nicif_info(nic_dev, drv, nic_dev->netdev, "Same valid up bitmap, don't need to change anything\n"); + return 0; + } + + max_pf = sphw_max_pf_num(nic_dev->hwdev); + if (!max_pf) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid max pf number\n"); + return -EFAULT; + } + + uld_array = kcalloc(max_pf, sizeof(void *), GFP_KERNEL); + if (!uld_array) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc uld_array\n"); + return -ENOMEM; + } + + /* Get all pf of this chip */ + err = spnic_get_pf_nic_uld_array(nic_dev->pdev, &num_dev, uld_array); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Get all pf private handle failed\n"); + err = -EFAULT; + goto out; + } + + err = stop_all_ports_flow(uld_array, num_dev); + if (err) + goto out; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; + err = change_dev_cos_up_map(tmp_dev, valid_up_bitmap); + if (err) { + nicif_err(tmp_dev, drv, tmp_dev->netdev, "Set cos_up map to hw failed\n"); + goto set_err; + } + } + + start_all_ports_flow(uld_array, num_dev); + + spnic_set_chip_up_bitmap(nic_dev->pdev, valid_up_bitmap); + kfree(uld_array); + + return 0; + +set_err: + /* undo all settings */ + for (i = 0; i <= idx; i++) { + tmp_dev = (struct spnic_nic_dev *)uld_array[i]; + rollback_err = change_dev_cos_up_map(tmp_dev, old_valid_up_bitmap); + if (rollback_err) + nicif_err(tmp_dev, drv, tmp_dev->netdev, "Failed to rollback cos_up map to hw\n"); + } + + start_all_ports_flow(uld_array, num_dev); + +out: + kfree(uld_array); + + return err; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h b/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h new file mode 100644 index 000000000000..48ef471237e2 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_DCB_H +#define SPNIC_DCB_H + +enum SPNIC_DCB_FLAGS { + SPNIC_DCB_UP_COS_SETTING, + SPNIC_DCB_TRAFFIC_STOPPED, +}; + +struct spnic_cos_cfg { + u8 up; + u8 bw_pct; + u8 tc_id; + u8 prio_sp; /* 0 - DWRR, 1 - SP */ +}; + +struct spnic_tc_cfg { + u8 bw_pct; + u8 prio_sp; /* 0 - DWRR, 1 - SP */ + u16 rsvd; +}; + +struct spnic_dcb_config { + /* The num_tc of the protocol stack is also the same */ + u8 max_cos; + u8 default_cos; + u8 valid_cos_bitmap; + u8 rsvd1; + struct spnic_cos_cfg cos_cfg[SPNIC_DCB_COS_MAX]; + struct spnic_tc_cfg tc_cfg[SPNIC_DCB_TC_MAX]; + + u8 pfc_state; + u8 pfc_en_bitmap; + u16 rsvd2; +}; + +int spnic_dcb_init(struct spnic_nic_dev *nic_dev); +int spnic_dcb_reset_hw_config(struct spnic_nic_dev *nic_dev); +int spnic_configure_dcb(struct net_device *netdev); +int spnic_setup_tc(struct net_device *netdev, u8 tc); +u8 spnic_get_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg); +void spnic_dcbcfg_set_pfc_state(struct spnic_nic_dev *nic_dev, u8 pfc_state); +u8 spnic_dcbcfg_get_pfc_state(struct spnic_nic_dev *nic_dev); +void spnic_dcbcfg_set_pfc_pri_en(struct spnic_nic_dev *nic_dev, u8 pfc_en_bitmap); +u8 spnic_dcbcfg_get_pfc_pri_en(struct spnic_nic_dev *nic_dev); +int spnic_dcbcfg_set_ets_up_tc_map(struct spnic_nic_dev *nic_dev, const u8 *up_tc_map); +void spnic_dcbcfg_get_ets_up_tc_map(struct spnic_nic_dev *nic_dev, u8 *up_tc_map); +int spnic_dcbcfg_set_ets_tc_bw(struct spnic_nic_dev *nic_dev, const u8 *tc_bw); +void spnic_dcbcfg_get_ets_tc_bw(struct spnic_nic_dev *nic_dev, u8 *tc_bw); +void spnic_dcbcfg_set_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 tc_prio_bitmap); +void spnic_dcbcfg_get_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 *tc_prio_bitmap); +int spnic_dcbcfg_set_up_bitmap(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap); +void spnic_update_tx_db_cos(struct spnic_nic_dev *nic_dev); +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c new file mode 100644 index 000000000000..6037645c0e8b --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <net/addrconf.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/debugfs.h> + +#include "sphw_common.h" +#include "sphw_mt.h" +#include "sphw_crm.h" +#include "spnic_lld.h" +#include "spnic_sriov.h" +#include "spnic_pci_id_tbl.h" +#include "spnic_dev_mgmt.h" + +#define SPNIC_WAIT_TOOL_CNT_TIMEOUT 10000 +#define SPNIC_WAIT_TOOL_MIN_USLEEP_TIME 9900 +#define SPNIC_WAIT_TOOL_MAX_USLEEP_TIME 10000 + +#define MAX_CARD_ID 64 +static unsigned long card_bit_map; + +LIST_HEAD(g_spnic_chip_list); + +void lld_dev_cnt_init(struct spnic_pcidev *pci_adapter) +{ + atomic_set(&pci_adapter->ref_cnt, 0); +} + +void lld_dev_hold(struct spnic_lld_dev *dev) +{ + struct spnic_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_inc(&pci_adapter->ref_cnt); +} + +void lld_dev_put(struct spnic_lld_dev *dev) +{ + struct spnic_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_dec(&pci_adapter->ref_cnt); +} + +void wait_lld_dev_unused(struct spnic_pcidev *pci_adapter) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(SPNIC_WAIT_TOOL_CNT_TIMEOUT); + do { + if (!atomic_read(&pci_adapter->ref_cnt)) + return; + + /* if sleep 10ms, use usleep_range to be more precise */ + usleep_range(SPNIC_WAIT_TOOL_MIN_USLEEP_TIME, + SPNIC_WAIT_TOOL_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); +} + +enum spnic_lld_status { + SPNIC_NODE_CHANGE = BIT(0), +}; + +struct spnic_lld_lock { + /* lock for chip list */ + struct mutex lld_mutex; + unsigned long status; + atomic_t dev_ref_cnt; +}; + +struct spnic_lld_lock g_lld_lock; + +#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ +#define PRINT_TIMEOUT_INTERVAL 10000 +#define MS_PER_SEC 1000 +#define LLD_LOCK_MIN_USLEEP_TIME 900 +#define LLD_LOCK_MAX_USLEEP_TIME 1000 + +/* node in chip_node will changed, tools or driver can't get node + * during this situation + */ +void lld_lock_chip_node(void) +{ + unsigned long end; + bool timeout = true; + u32 loop_cnt; + + mutex_lock(&g_lld_lock.lld_mutex); + + loop_cnt = 0; + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); + do { + if (!test_and_set_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for lld node change complete for %us\n", + loop_cnt / MS_PER_SEC); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (timeout && test_and_set_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) + pr_warn("Wait for lld node change complete timeout when trying to get lld lock\n"); + + loop_cnt = 0; + timeout = true; + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); + do { + if (!atomic_read(&g_lld_lock.dev_ref_cnt)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for lld dev unused for %us, reference count: %d\n", + loop_cnt / MS_PER_SEC, + atomic_read(&g_lld_lock.dev_ref_cnt)); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (timeout && atomic_read(&g_lld_lock.dev_ref_cnt)) + pr_warn("Wait for lld dev unused timeout\n"); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +void lld_unlock_chip_node(void) +{ + clear_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status); +} + +/* When tools or other drivers want to get node of chip_node, use this function + * to prevent node be freed + */ +void lld_hold(void) +{ + unsigned long end; + u32 loop_cnt = 0; + + /* ensure there have not any chip node in changing */ + mutex_lock(&g_lld_lock.lld_mutex); + + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_HOLD_TIMEOUT); + do { + if (!test_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait lld node change complete for %us\n", + loop_cnt / MS_PER_SEC); + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (test_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) + pr_warn("Wait lld node change complete timeout when trying to hode lld dev\n"); + + atomic_inc(&g_lld_lock.dev_ref_cnt); + mutex_unlock(&g_lld_lock.lld_mutex); +} + +void lld_put(void) +{ + atomic_dec(&g_lld_lock.dev_ref_cnt); +} + +void spnic_lld_lock_init(void) +{ + mutex_init(&g_lld_lock.lld_mutex); + atomic_set(&g_lld_lock.dev_ref_cnt, 0); +} + +void spnic_get_all_chip_id(void *id_info) +{ + struct nic_card_id *card_id = (struct nic_card_id *)id_info; + struct card_node *chip_node = NULL; + int i = 0; + int id, err; + + lld_hold(); + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + err = sscanf(chip_node->chip_name, SPHW_CHIP_NAME "%d", &id); + if (err < 0) + pr_err("Failed to get spnic id\n"); + card_id->id[i] = id; + i++; + } + lld_put(); + card_id->num = i; +} + +void spnic_get_card_func_info_by_card_name(const char *chip_name, + struct sphw_card_func_info *card_func) +{ + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + struct func_pdev_info *pdev_info = NULL; + + card_func->num_pf = 0; + + lld_hold(); + + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sphw_func_type(dev->hwdev) == TYPE_VF) + continue; + + pdev_info = &card_func->pdev_info[card_func->num_pf]; + pdev_info->bar1_size = + pci_resource_len(dev->pcidev, SPNIC_PF_PCI_CFG_REG_BAR); + pdev_info->bar1_phy_addr = + pci_resource_start(dev->pcidev, SPNIC_PF_PCI_CFG_REG_BAR); + + pdev_info->bar3_size = + pci_resource_len(dev->pcidev, SPNIC_PCI_MGMT_REG_BAR); + pdev_info->bar3_phy_addr = + pci_resource_start(dev->pcidev, SPNIC_PCI_MGMT_REG_BAR); + + card_func->num_pf++; + if (card_func->num_pf >= CARD_MAX_SIZE) { + lld_put(); + return; + } + } + } + + lld_put(); +} + +static bool is_pcidev_match_chip_name(const char *ifname, struct spnic_pcidev *dev, + struct card_node *chip_node, enum func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (sphw_func_type(dev->hwdev) != type) + return false; + return true; + } + + return false; +} + +static struct spnic_lld_dev *_get_lld_dev_by_chip_name(const char *ifname, enum func_type type) +{ + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + + lld_hold(); + + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (is_pcidev_match_chip_name(ifname, dev, chip_node, type)) { + lld_put(); + return &dev->lld_dev; + } + } + } + + lld_put(); + return NULL; +} + +static struct spnic_lld_dev *spnic_get_lld_dev_by_chip_name(const char *ifname) +{ + struct spnic_lld_dev *dev_hw_init = NULL; + struct spnic_lld_dev *dev = NULL; + + /*find hw init device first*/ + dev_hw_init = _get_lld_dev_by_chip_name(ifname, TYPE_UNKNOWN); + if (dev_hw_init) { + if (sphw_func_type(dev_hw_init->hwdev) == TYPE_PPF) + return dev_hw_init; + } + + dev = _get_lld_dev_by_chip_name(ifname, TYPE_PPF); + if (dev) { + if (dev_hw_init) + return dev_hw_init; + + return dev; + } + + dev = _get_lld_dev_by_chip_name(ifname, TYPE_PF); + if (dev) { + if (dev_hw_init) + return dev_hw_init; + + return dev; + } + + dev = _get_lld_dev_by_chip_name(ifname, TYPE_VF); + if (dev) + return dev; + + return NULL; +} + +static bool is_pcidev_match_dev_name(const char *ifname, struct spnic_pcidev *dev, + enum sphw_service_type type) +{ + enum sphw_service_type i; + char nic_uld_name[IFNAMSIZ] = {0}; + int err; + + if (type == SERVICE_T_MAX) { + for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) { + if (!strncmp(dev->uld_dev_name[i], ifname, IFNAMSIZ)) + return true; + } + } else { + if (!strncmp(dev->uld_dev_name[type], ifname, IFNAMSIZ)) + return true; + } + + err = spnic_get_uld_dev_name(dev, SERVICE_T_NIC, (char *)nic_uld_name); + if (!err) { + if (!strncmp(nic_uld_name, ifname, IFNAMSIZ)) + return true; + } + + return false; +} + +static struct spnic_lld_dev *spnic_get_lld_dev_by_dev_name(const char *ifname, + enum sphw_service_type type) +{ + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + + lld_hold(); + + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (is_pcidev_match_dev_name(ifname, dev, type)) { + lld_put(); + return &dev->lld_dev; + } + } + } + + lld_put(); + + return NULL; +} + +struct spnic_lld_dev *spnic_get_lld_dev_by_ifname(const char *ifname) +{ + struct spnic_lld_dev *dev = NULL; + + lld_hold(); + /* support search hwdev by chip name, net device name, + * or fc device name + */ + /* Find pcidev by chip_name first */ + dev = spnic_get_lld_dev_by_chip_name(ifname); + if (dev) + goto find_dev; + + /* If ifname not a chip name, + * find pcidev by FC name or netdevice name + */ + dev = spnic_get_lld_dev_by_dev_name(ifname, SERVICE_T_MAX); + if (!dev) { + lld_put(); + return NULL; + } + +find_dev: + lld_dev_hold(dev); + lld_put(); + return dev; +} + +void *spnic_get_hwdev_by_ifname(const char *ifname) +{ + struct spnic_lld_dev *dev = NULL; + + dev = spnic_get_lld_dev_by_ifname(ifname); + if (dev) + return dev->hwdev; + + return NULL; +} + +void *spnic_get_uld_dev_by_ifname(const char *ifname, enum sphw_service_type type) +{ + struct spnic_pcidev *dev = NULL; + struct spnic_lld_dev *lld_dev = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("Service type :%d is error\n", type); + return NULL; + } + + lld_dev = spnic_get_lld_dev_by_dev_name(ifname, type); + if (!lld_dev) + return NULL; + + dev = pci_get_drvdata(lld_dev->pdev); + if (dev) + return dev->uld_dev[type]; + + return NULL; +} + +static struct card_node *spnic_get_chip_node_by_hwdev(const void *hwdev) +{ + struct card_node *chip_node = NULL; + struct card_node *node_tmp = NULL; + struct spnic_pcidev *dev = NULL; + + if (!hwdev) + return NULL; + + lld_hold(); + + list_for_each_entry(node_tmp, &g_spnic_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + lld_put(); + + return chip_node; +} + +int spnic_get_chip_name_by_hwdev(const void *hwdev, char *ifname) +{ + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + + if (!hwdev || !ifname) + return -EINVAL; + + lld_hold(); + + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (dev->hwdev == hwdev) { + strncpy(ifname, chip_node->chip_name, IFNAMSIZ - 1); + ifname[IFNAMSIZ - 1] = 0; + lld_put(); + return 0; + } + } + } + + lld_put(); + + return -ENXIO; +} + +void *spnic_get_uld_dev_by_pdev(struct pci_dev *pdev, enum sphw_service_type type) +{ + struct spnic_pcidev *pci_adapter = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("Service type :%d is error\n", type); + return NULL; + } + + pci_adapter = pci_get_drvdata(pdev); + if (pci_adapter) + return pci_adapter->uld_dev[type]; + + return NULL; +} + +void *spnic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = NULL; + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (dev->hwdev && sphw_func_type(dev->hwdev) == TYPE_PPF) { + lld_put(); + return dev->hwdev; + } + } + lld_put(); + + return NULL; +} + +/* NOTICE: nictool can't use this function, because this function can't keep + * tool context mutual exclusive with remove context + */ +void *spnic_get_ppf_uld_by_pdev(struct pci_dev *pdev, enum sphw_service_type type) +{ + struct spnic_pcidev *pci_adapter = NULL; + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sphw_func_type(dev->hwdev) == TYPE_PPF) { + lld_put(); + return dev->uld_dev[type]; + } + } + lld_put(); + + return NULL; +} + +int spnic_get_pf_nic_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]) +{ + struct spnic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node = NULL; + u32 cnt; + + if (!dev || !sphw_support_nic(dev->hwdev, NULL)) + return -EINVAL; + + lld_hold(); + + cnt = 0; + chip_node = dev->chip_node; + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sphw_func_type(dev->hwdev) == TYPE_VF) + continue; + + array[cnt] = dev->uld_dev[SERVICE_T_NIC]; + cnt++; + } + lld_put(); + + *dev_cnt = cnt; + + return 0; +} + +static bool is_func_valid(struct spnic_pcidev *dev) +{ + if (sphw_func_type(dev->hwdev) == TYPE_VF) + return false; + + return true; +} + +int spnic_get_uld_dev_name(struct spnic_pcidev *dev, enum sphw_service_type type, char *ifname) +{ + u32 out_size = IFNAMSIZ; + + if (!g_uld_info[type].ioctl) + return -EFAULT; + + return g_uld_info[type].ioctl(dev->uld_dev[type], GET_ULD_DEV_NAME, + NULL, 0, ifname, &out_size); +} + +void spnic_get_card_info(const void *hwdev, void *bufin) +{ + struct card_node *chip_node = NULL; + struct card_info *info = (struct card_info *)bufin; + struct spnic_pcidev *dev = NULL; + void *fun_hwdev = NULL; + u32 i = 0; + + info->pf_num = 0; + + chip_node = spnic_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + lld_hold(); + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!is_func_valid(dev)) + continue; + + fun_hwdev = dev->hwdev; + + if (sphw_support_nic(fun_hwdev, NULL)) { + if (dev->uld_dev[SERVICE_T_NIC]) { + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC); + spnic_get_uld_dev_name(dev, SERVICE_T_NIC, info->pf[i].name); + } + } + + /* to do : get other service info*/ + + if (sphw_func_for_mgmt(fun_hwdev)) + strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + + strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[i].bus_info)); + info->pf_num++; + i = info->pf_num; + } + + lld_put(); +} + +struct spnic_sriov_info *spnic_get_sriov_info_by_pcidev(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = NULL; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + return &pci_adapter->sriov_info; +} + +void *spnic_get_hwdev_by_pcidev(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = NULL; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + return pci_adapter->hwdev; +} + +bool spnic_is_in_host(void) +{ + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + + lld_hold(); + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sphw_func_type(dev->hwdev) != TYPE_VF) { + lld_put(); + return true; + } + } + } + + lld_put(); + + return false; +} + +int spnic_get_chip_up_bitmap(struct pci_dev *pdev, bool *is_setted, u8 *valid_up_bitmap) +{ + struct spnic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node = NULL; + + if (!dev || !is_setted || !valid_up_bitmap) + return -EINVAL; + + chip_node = dev->chip_node; + *is_setted = chip_node->up_bitmap_setted; + if (chip_node->up_bitmap_setted) + *valid_up_bitmap = chip_node->valid_up_bitmap; + + return 0; +} + +int spnic_set_chip_up_bitmap(struct pci_dev *pdev, u8 valid_up_bitmap) +{ + struct spnic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node = NULL; + + if (!dev) + return -EINVAL; + + chip_node = dev->chip_node; + chip_node->up_bitmap_setted = true; + chip_node->valid_up_bitmap = valid_up_bitmap; + + return 0; +} + +static bool chip_node_is_exist(struct spnic_pcidev *pci_adapter, unsigned char *bus_number) +{ + struct card_node *chip_node = NULL; + + if (!pci_is_root_bus(pci_adapter->pcidev->bus)) + *bus_number = pci_adapter->pcidev->bus->number; + + if (*bus_number != 0) { + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + if (chip_node->bus_num == *bus_number) { + pci_adapter->chip_node = chip_node; + return true; + } + } + } else if (pci_adapter->pcidev->device == SPNIC_DEV_ID_VF || + pci_adapter->pcidev->device == SPNIC_DEV_ID_VF_HV) { + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + if (chip_node) { + pci_adapter->chip_node = chip_node; + return true; + } + } + } + + return false; +} + +int alloc_chip_node(struct spnic_pcidev *pci_adapter) +{ + struct card_node *chip_node = NULL; + unsigned char i; + unsigned char bus_number = 0; + + if (chip_node_is_exist(pci_adapter, &bus_number)) + return 0; + + for (i = 0; i < MAX_CARD_ID; i++) { + if (!test_and_set_bit(i, &card_bit_map)) + break; + } + + if (i == MAX_CARD_ID) { + sdk_err(&pci_adapter->pcidev->dev, "Failed to alloc card id\n"); + return -EFAULT; + } + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) { + clear_bit(i, &card_bit_map); + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc chip node\n"); + return -ENOMEM; + } + + /* bus number */ + chip_node->bus_num = bus_number; + + snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", SPHW_CHIP_NAME, i); + + sdk_info(&pci_adapter->pcidev->dev, "Add new chip %s to global list succeed\n", + chip_node->chip_name); + + list_add_tail(&chip_node->node, &g_spnic_chip_list); + + INIT_LIST_HEAD(&chip_node->func_list); + pci_adapter->chip_node = chip_node; + + return 0; +} + +void free_chip_node(struct spnic_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + int id, err; + + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&pci_adapter->pcidev->dev, "Delete chip %s from global list succeed\n", + chip_node->chip_name); + err = sscanf(chip_node->chip_name, SPHW_CHIP_NAME "%d", &id); + if (err < 0) + sdk_err(&pci_adapter->pcidev->dev, "Failed to get spnic id\n"); + + clear_bit(id, &card_bit_map); + + kfree(chip_node); + } +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h b/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h new file mode 100644 index 000000000000..8f345769bec5 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_DEV_MGMT_H +#define SPNIC_DEV_MGMT_H +#include <linux/types.h> +#include <linux/bitops.h> + +#define SPHW_CHIP_NAME "spnic" + +#define SPNIC_VF_PCI_CFG_REG_BAR 0 +#define SPNIC_PF_PCI_CFG_REG_BAR 1 + +#define SPNIC_PCI_INTR_REG_BAR 2 +#define SPNIC_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */ +#define SPNIC_PCI_DB_BAR 4 + +/* Structure pcidev private*/ +struct spnic_pcidev { + struct pci_dev *pcidev; + void *hwdev; + struct card_node *chip_node; + struct spnic_lld_dev lld_dev; + /* Record the service object address, + * such as spnic_dev and toe_dev, fc_dev + */ + void *uld_dev[SERVICE_T_MAX]; + /* Record the service object name */ + char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; + /* It is a the global variable for driver to manage + * all function device linked list + */ + struct list_head node; + + bool disable_vf_load; + bool disable_srv_load[SERVICE_T_MAX]; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + void __iomem *mgmt_reg_base; + u64 db_dwqe_len; + u64 db_base_phy; + void __iomem *db_base; + + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + + struct spnic_sriov_info sriov_info; + + /* setted when uld driver processing event */ + unsigned long state; + struct pci_device_id id; + + atomic_t ref_cnt; +}; + +extern struct list_head g_spnic_chip_list; + +extern struct spnic_uld_info g_uld_info[SERVICE_T_MAX]; + +int alloc_chip_node(struct spnic_pcidev *pci_adapter); + +void free_chip_node(struct spnic_pcidev *pci_adapter); + +void lld_lock_chip_node(void); + +void lld_unlock_chip_node(void); + +void spnic_lld_lock_init(void); + +void lld_dev_cnt_init(struct spnic_pcidev *pci_adapter); +void wait_lld_dev_unused(struct spnic_pcidev *pci_adapter); + +int spnic_get_uld_dev_name(struct spnic_pcidev *dev, enum sphw_service_type type, char *ifname); + +void *spnic_get_hwdev_by_pcidev(struct pci_dev *pdev); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c new file mode 100644 index 000000000000..3f6f69b4cb34 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c @@ -0,0 +1,988 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> + +#include "sphw_hw.h" +#include "sphw_crm.h" +#include "spnic_nic_dev.h" +#include "spnic_tx.h" +#include "spnic_rx.h" +#include "spnic_rss.h" + +#define COALESCE_ALL_QUEUE 0xFFFF +#define COALESCE_PENDING_LIMIT_UNIT 8 +#define COALESCE_TIMER_CFG_UNIT 5 +#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) +#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) + +static void spnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + u8 mgmt_ver[SPHW_MGMT_VERSION_MAX_LEN] = {0}; + int err; + + strlcpy(info->driver, SPNIC_NIC_DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, SPNIC_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); + + err = sphw_get_mgmt_version(nic_dev->hwdev, mgmt_ver, SPHW_MGMT_VERSION_MAX_LEN, + SPHW_CHANNEL_NIC); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get fw version\n"); + return; + } + + snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); +} + +static u32 spnic_get_msglevel(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->msg_enable; +} + +static void spnic_set_msglevel(struct net_device *netdev, u32 data) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->msg_enable = data; + + nicif_info(nic_dev, drv, netdev, "Set message level: 0x%x\n", data); +} + +int spnic_nway_reset(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + int err; + + err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Get port info failed\n"); + return -EFAULT; + } + + if (!port_info.autoneg_state) { + nicif_err(nic_dev, drv, netdev, "Autonegotiation is off, don't support to restart it\n"); + return -EINVAL; + } + + err = spnic_set_autoneg(nic_dev->hwdev, true); + if (err) { + nicif_err(nic_dev, drv, netdev, "Restart autonegotiation failed\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Restart autonegotiation successfully\n"); + + return 0; +} + +static void spnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + ring->rx_max_pending = SPNIC_MAX_RX_QUEUE_DEPTH; + ring->tx_max_pending = SPNIC_MAX_TX_QUEUE_DEPTH; + ring->rx_pending = nic_dev->rxqs[0].q_depth; + ring->tx_pending = nic_dev->txqs[0].q_depth; +} + +static void spnic_update_qp_depth(struct spnic_nic_dev *nic_dev, u32 sq_depth, u32 rq_depth) +{ + u16 i; + + nic_dev->q_params.sq_depth = sq_depth; + nic_dev->q_params.rq_depth = rq_depth; + for (i = 0; i < nic_dev->max_qps; i++) { + nic_dev->txqs[i].q_depth = sq_depth; + nic_dev->txqs[i].q_mask = sq_depth - 1; + nic_dev->rxqs[i].q_depth = rq_depth; + nic_dev->rxqs[i].q_mask = rq_depth - 1; + } +} + +static int check_ringparam_valid(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + if (ring->rx_jumbo_pending || ring->rx_mini_pending) { + nicif_err(nic_dev, drv, netdev, + "Unsupported rx_jumbo_pending/rx_mini_pending\n"); + return -EINVAL; + } + + if (ring->tx_pending > SPNIC_MAX_TX_QUEUE_DEPTH || + ring->tx_pending < SPNIC_MIN_QUEUE_DEPTH || + ring->rx_pending > SPNIC_MAX_RX_QUEUE_DEPTH || + ring->rx_pending < SPNIC_MIN_QUEUE_DEPTH) { + nicif_err(nic_dev, drv, netdev, "Queue depth out of rang tx[%d-%d] rx[%d-%d]\n", + SPNIC_MIN_QUEUE_DEPTH, SPNIC_MAX_TX_QUEUE_DEPTH, + SPNIC_MIN_QUEUE_DEPTH, SPNIC_MAX_RX_QUEUE_DEPTH); + return -EINVAL; + } + + return 0; +} + +static int spnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_dyna_txrxq_params q_params = {0}; + u32 new_sq_depth, new_rq_depth; + int err; + + err = check_ringparam_valid(netdev, ring); + if (err) + return err; + + new_sq_depth = (u32)(1U << (u16)ilog2(ring->tx_pending)); + new_rq_depth = (u32)(1U << (u16)ilog2(ring->rx_pending)); + + if (new_sq_depth == nic_dev->q_params.sq_depth && + new_rq_depth == nic_dev->q_params.rq_depth) + return 0; /* nothing to do */ + + nicif_info(nic_dev, drv, netdev, "Change Tx/Rx ring depth from %u/%u to %u/%u\n", + nic_dev->q_params.sq_depth, nic_dev->q_params.rq_depth, + new_sq_depth, new_rq_depth); + + if (!netif_running(netdev)) { + spnic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth); + } else { + q_params = nic_dev->q_params; + q_params.sq_depth = new_sq_depth; + q_params.rq_depth = new_rq_depth; + q_params.txqs_res = NULL; + q_params.rxqs_res = NULL; + q_params.irq_cfg = NULL; + + nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); + err = spnic_change_channel_settings(nic_dev, &q_params, NULL, NULL); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); + return -EFAULT; + } + } + + return 0; +} + +static int get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_intr_coal_info *interrupt_info = NULL; + + if (queue == COALESCE_ALL_QUEUE) { + /* get tx/rx irq0 as default parameters */ + interrupt_info = &nic_dev->intr_coalesce[0]; + } else { + if (queue >= nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, netdev, "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + interrupt_info = &nic_dev->intr_coalesce[queue]; + } + + /* coalescs_timer is in unit of 5us */ + coal->rx_coalesce_usecs = interrupt_info->coalesce_timer_cfg * COALESCE_TIMER_CFG_UNIT; + /* coalescs_frams is in unit of 8 */ + coal->rx_max_coalesced_frames = interrupt_info->pending_limt * COALESCE_PENDING_LIMIT_UNIT; + + /* tx/rx use the same interrupt */ + coal->tx_coalesce_usecs = coal->rx_coalesce_usecs; + coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames; + coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal; + + coal->pkt_rate_high = (u32)interrupt_info->pkt_rate_high; + coal->rx_coalesce_usecs_high = interrupt_info->rx_usecs_high * COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames_high = interrupt_info->rx_pending_limt_high * + COALESCE_PENDING_LIMIT_UNIT; + + coal->pkt_rate_low = (u32)interrupt_info->pkt_rate_low; + coal->rx_coalesce_usecs_low = interrupt_info->rx_usecs_low * + COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames_low = interrupt_info->rx_pending_limt_low * + COALESCE_PENDING_LIMIT_UNIT; + + return 0; +} + +static int set_queue_coalesce(struct spnic_nic_dev *nic_dev, u16 q_id, + struct spnic_intr_coal_info *coal) +{ + struct spnic_intr_coal_info *intr_coal; + struct interrupt_info info = {0}; + struct net_device *netdev = nic_dev->netdev; + int err; + + intr_coal = &nic_dev->intr_coalesce[q_id]; + if (intr_coal->coalesce_timer_cfg != coal->coalesce_timer_cfg || + intr_coal->pending_limt != coal->pending_limt) + intr_coal->user_set_intr_coal_flag = 1; + + intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; + intr_coal->pending_limt = coal->pending_limt; + intr_coal->pkt_rate_low = coal->pkt_rate_low; + intr_coal->rx_usecs_low = coal->rx_usecs_low; + intr_coal->rx_pending_limt_low = coal->rx_pending_limt_low; + intr_coal->pkt_rate_high = coal->pkt_rate_high; + intr_coal->rx_usecs_high = coal->rx_usecs_high; + intr_coal->rx_pending_limt_high = coal->rx_pending_limt_high; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags) || + q_id >= nic_dev->q_params.num_qps || nic_dev->adaptive_rx_coal) + return 0; + + info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx; + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; + info.pending_limt = intr_coal->pending_limt; + info.resend_timer_cfg = intr_coal->resend_timer_cfg; + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = intr_coal->pending_limt; + err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); + if (err) + nicif_warn(nic_dev, drv, netdev, "Failed to set queue%u coalesce", q_id); + + return err; +} + +static int is_coalesce_exceed_limit(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames out of range[%d-%d]\n", 0, + COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs_low out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames_low out of range[%d-%d]\n", + 0, COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_high > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs_high out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_high > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames_high out of range[%d-%d]\n", + 0, COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + return 0; +} + +static int is_coalesce_legal(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct ethtool_coalesce tmp_coal = {0}; + int err; + + if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { + nicif_err(nic_dev, drv, netdev, "tx-usecs must be equal to rx-usecs\n"); + return -EINVAL; + } + + if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { + nicif_err(nic_dev, drv, netdev, "tx-frames must be equal to rx-frames\n"); + return -EINVAL; + } + + tmp_coal.cmd = coal->cmd; + tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs; + tmp_coal.rx_max_coalesced_frames = coal->rx_max_coalesced_frames; + tmp_coal.tx_coalesce_usecs = coal->tx_coalesce_usecs; + tmp_coal.tx_max_coalesced_frames = coal->tx_max_coalesced_frames; + tmp_coal.use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; + + tmp_coal.pkt_rate_low = coal->pkt_rate_low; + tmp_coal.rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low; + tmp_coal.rx_max_coalesced_frames_low = coal->rx_max_coalesced_frames_low; + + tmp_coal.pkt_rate_high = coal->pkt_rate_high; + tmp_coal.rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high; + tmp_coal.rx_max_coalesced_frames_high = coal->rx_max_coalesced_frames_high; + + if (memcmp(coal, &tmp_coal, sizeof(struct ethtool_coalesce))) { + nicif_err(nic_dev, drv, netdev, "Only support to change rx/tx-usecs and rx/tx-frames\n"); + return -EOPNOTSUPP; + } + + err = is_coalesce_exceed_limit(netdev, coal); + if (err) + return err; + + if (coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT >= + coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT) { + nicif_err(nic_dev, drv, netdev, + "coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u), after dividing %d usecs unit\n", + coal->rx_coalesce_usecs_high, + coal->rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT >= + coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT) { + nicif_err(nic_dev, drv, netdev, + "coalesced_frames_high(%u) must more than coalesced_frames_low(%u),after dividing %d frames unit\n", + coal->rx_max_coalesced_frames_high, + coal->rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); + return -EOPNOTSUPP; + } + + if (coal->pkt_rate_low >= coal->pkt_rate_high) { + nicif_err(nic_dev, drv, netdev, "pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", + coal->pkt_rate_high, + coal->pkt_rate_low); + return -EOPNOTSUPP; + } + + return 0; +} + +#define CHECK_COALESCE_ALIGN(coal, item, unit) \ +do { \ + if ((coal)->item % (unit)) \ + nicif_warn(nic_dev, drv, netdev, \ + "%s in %d units, change to %u\n", \ + #item, (unit), ((coal)->item - \ + (coal)->item % (unit))); \ +} while (0) + +#define CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ +do { \ + if (((coal)->item / (unit)) != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %d to %u %s\n", \ + #item, (ori_val) * (unit), \ + ((coal)->item - (coal)->item % (unit)), \ + (obj_str)); \ +} while (0) + +#define CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ +do { \ + if ((coal)->item != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %llu to %u %s\n", \ + #item, (ori_val), (coal)->item, (obj_str)); \ +} while (0) + +static int set_hw_coal_param(struct spnic_nic_dev *nic_dev, struct spnic_intr_coal_info *intr_coal, + u16 queue) +{ + u16 i; + + if (queue == COALESCE_ALL_QUEUE) { + for (i = 0; i < nic_dev->max_qps; i++) + set_queue_coalesce(nic_dev, i, intr_coal); + } else { + if (queue >= nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + set_queue_coalesce(nic_dev, queue, intr_coal); + } + + return 0; +} + +static int set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_intr_coal_info intr_coal = {0}; + struct spnic_intr_coal_info *ori_intr_coal = NULL; + u32 last_adaptive_rx; + char obj_str[32] = {0}; + int err = 0; + + err = is_coalesce_legal(netdev, coal); + if (err) + return err; + + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, COALESCE_PENDING_LIMIT_UNIT); + + if (queue == COALESCE_ALL_QUEUE) { + ori_intr_coal = &nic_dev->intr_coalesce[0]; + snprintf(obj_str, sizeof(obj_str), "for netdev"); + } else { + ori_intr_coal = &nic_dev->intr_coalesce[queue]; + snprintf(obj_str, sizeof(obj_str), "for queue %u", queue); + } + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->coalesce_timer_cfg, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->pending_limt, obj_str); + CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, ori_intr_coal->pkt_rate_high, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->rx_usecs_high, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limt_high, obj_str); + CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, ori_intr_coal->pkt_rate_low, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->rx_usecs_low, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limt_low, obj_str); + + intr_coal.coalesce_timer_cfg = (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); + intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / COALESCE_PENDING_LIMIT_UNIT); + + last_adaptive_rx = nic_dev->adaptive_rx_coal; + nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + + intr_coal.pkt_rate_high = coal->pkt_rate_high; + intr_coal.rx_usecs_high = (u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT); + intr_coal.rx_pending_limt_high = (u8)(coal->rx_max_coalesced_frames_high / + COALESCE_PENDING_LIMIT_UNIT); + + intr_coal.pkt_rate_low = coal->pkt_rate_low; + intr_coal.rx_usecs_low = (u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT); + intr_coal.rx_pending_limt_low = (u8)(coal->rx_max_coalesced_frames_low / + COALESCE_PENDING_LIMIT_UNIT); + + /* coalesce timer or pending set to zero will disable coalesce */ + if (!nic_dev->adaptive_rx_coal && + (!intr_coal.coalesce_timer_cfg || !intr_coal.pending_limt)) + nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); + + /* ensure coalesce paramester will not be changed in auto + * moderation work + */ + if (SPHW_CHANNEL_RES_VALID(nic_dev)) { + if (!nic_dev->adaptive_rx_coal) + cancel_delayed_work_sync(&nic_dev->moderation_task); + else if (!last_adaptive_rx) + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + SPNIC_MODERATONE_DELAY); + } + + return set_hw_coal_param(nic_dev, &intr_coal, queue); +} + +static int spnic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *ext_ack) +{ + return get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +static int spnic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *ext_ack) +{ + return set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +static int spnic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return get_coalesce(netdev, coal, queue); +} + +static int spnic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return set_coalesce(netdev, coal, queue); +} + +static int spnic_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + err = spnic_set_led_status(nic_dev->hwdev, MAG_CMD_LED_TYPE_ALARM, + MAG_CMD_LED_MODE_FORCE_BLINK_2HZ); + if (err) + nicif_err(nic_dev, drv, netdev, "Set LED blinking in 2HZ failed\n"); + else + nicif_info(nic_dev, drv, netdev, "Set LED blinking in 2HZ success\n"); + break; + + case ETHTOOL_ID_INACTIVE: + err = spnic_set_led_status(nic_dev->hwdev, MAG_CMD_LED_TYPE_ALARM, + MAG_CMD_LED_MODE_DEFAULT); + if (err) + nicif_err(nic_dev, drv, netdev, "Reset LED to original status failed\n"); + else + nicif_info(nic_dev, drv, netdev, "Reset LED to original status success\n"); + break; + + default: + return -EOPNOTSUPP; + } + + return err; +} + +static void spnic_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_pause_config nic_pause = {0}; + int err; + + err = spnic_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get pauseparam from hw\n"); + } else { + pause->autoneg = nic_pause.auto_neg; + pause->rx_pause = nic_pause.rx_pause; + pause->tx_pause = nic_pause.tx_pause; + } +} + +static int spnic_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_pause_config nic_pause = {0}; + struct nic_port_info port_info = {0}; + int err; + + err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get auto-negotiation state\n"); + return -EFAULT; + } + + if (pause->autoneg != port_info.autoneg_state) { + nicif_err(nic_dev, drv, netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); + return -EOPNOTSUPP; + } + + if (nic_dev->hw_dcb_cfg.pfc_state) { + nicif_err(nic_dev, drv, netdev, "Can not set pause when pfc is enable\n"); + return -EPERM; + } + + nic_pause.auto_neg = (u8)pause->autoneg; + nic_pause.rx_pause = (u8)pause->rx_pause; + nic_pause.tx_pause = (u8)pause->tx_pause; + + err = spnic_set_pause_info(nic_dev->hwdev, nic_pause); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set pauseparam\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Set pause options, tx: %s, rx: %s\n", + pause->tx_pause ? "on" : "off", + pause->rx_pause ? "on" : "off"); + + return 0; +} + +static int spnic_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_type = 0; + u8 sfp_type_ext = 0; + int err; + + err = spnic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); + if (err) + return err; + + switch (sfp_type) { + case MODULE_TYPE_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case MODULE_TYPE_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case MODULE_TYPE_QSFP_PLUS: + if (sfp_type_ext >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } + break; + case MODULE_TYPE_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + nicif_warn(nic_dev, drv, netdev, "Optical module unknown: 0x%x\n", sfp_type); + return -EINVAL; + } + + return 0; +} + +static int spnic_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + int err; + + if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, ee->len); + + err = spnic_get_sfp_eeprom(nic_dev->hwdev, (u8 *)sfp_data, ee->len); + if (err) + return err; + + memcpy(data, sfp_data + ee->offset, ee->len); + + return 0; +} + +#define SPNIC_PRIV_FLAGS_SYMM_RSS BIT(0) + +static u32 spnic_get_priv_flags(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u32 priv_flags = 0; + + if (test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) + priv_flags |= SPNIC_PRIV_FLAGS_SYMM_RSS; + + return priv_flags; +} + +static int spnic_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + if (priv_flags & SPNIC_PRIV_FLAGS_SYMM_RSS) { + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, "Failed to open Symmetric RSS while DCB is enabled\n"); + return -EOPNOTSUPP; + } + set_bit(SPNIC_SAME_RXTX, &nic_dev->flags); + } else { + clear_bit(SPNIC_SAME_RXTX, &nic_dev->flags); + } + + return 0; +} + +#define BROADCAST_PACKET_SIM 0xFF +#define UNICAST_PACKET_SIM 0xFE +#define IP_PROTOCOL_TYPE 0x08 +#define IP_PROTOCOL_COMPLEMENT 0x00 + +#define PORT_DOWN_ERR_IDX 0 +#define LP_DEFAULT_TIME 5 /* seconds */ +#define LP_PKT_LEN 1514 + +#define TEST_TIME_MULTIPLE 5 +static int spnic_run_lp_test(struct spnic_nic_dev *nic_dev, u32 test_time) +{ + u32 cnt = test_time * TEST_TIME_MULTIPLE; + struct sk_buff *skb = NULL; + struct sk_buff *skb_tmp = NULL; + u8 *test_data = NULL; + u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; + struct net_device *netdev = nic_dev->netdev; + u32 i; + u8 j; + + skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); + if (!skb_tmp) { + nicif_err(nic_dev, drv, netdev, "Alloc xmit skb template failed for loopback test\n"); + return -ENOMEM; + } + + test_data = __skb_put(skb_tmp, LP_PKT_LEN); + + memset(test_data, BROADCAST_PACKET_SIM, ETH_ALEN + ETH_ALEN); + + test_data[ETH_ALEN] = UNICAST_PACKET_SIM; + test_data[ETH_ALEN + ETH_ALEN] = IP_PROTOCOL_TYPE; + test_data[ETH_ALEN + ETH_ALEN + 1] = IP_PROTOCOL_COMPLEMENT; + + for (i = ETH_HLEN; i < LP_PKT_LEN; i++) + test_data[i] = i & 0xFF; + + skb_tmp->queue_mapping = 0; + skb_tmp->ip_summed = CHECKSUM_COMPLETE; + skb_tmp->dev = netdev; + + for (i = 0; i < cnt; i++) { + nic_dev->lb_test_rx_idx = 0; + memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); + + for (j = 0; j < LP_PKT_CNT; j++) { + skb = pskb_copy(skb_tmp, GFP_ATOMIC); + if (!skb) { + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, "Copy skb failed for loopback test\n"); + return -ENOMEM; + } + + /* mark index for every pkt */ + skb->data[LP_PKT_LEN - 1] = j; + + if (spnic_lb_xmit_frame(skb, netdev)) { + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, "Xmit pkt failed for loopback test\n"); + return -EBUSY; + } + } + + /* wait till all pkts received to RX buffer */ + msleep(200); + + for (j = 0; j < LP_PKT_CNT; j++) { + if (memcmp((lb_test_rx_buf + (j * LP_PKT_LEN)), + skb_tmp->data, (LP_PKT_LEN - 1)) || + (*(lb_test_rx_buf + ((j * LP_PKT_LEN) + + (LP_PKT_LEN - 1))) != j)) { + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", + (j + (i * LP_PKT_CNT)), (LP_PKT_LEN - 1), + *(lb_test_rx_buf + + ((j * LP_PKT_LEN) + (LP_PKT_LEN - 1)))); + return -EIO; + } + } + } + + dev_kfree_skb_any(skb_tmp); + nicif_info(nic_dev, drv, netdev, "Loopback test succeed.\n"); + return 0; +} + +enum diag_test_index { + INTERNAL_LP_TEST = 0, + EXTERNAL_LP_TEST = 1, + DIAG_TEST_MAX = 2, +}; + +#define SPNIC_INTERNAL_LP_MODE 5 +static int do_lp_test(struct spnic_nic_dev *nic_dev, u32 *flags, u32 test_time, + enum diag_test_index *test_index) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *lb_test_rx_buf = NULL; + int err = 0; + + if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { + *test_index = INTERNAL_LP_TEST; + if (spnic_set_loopback_mode(nic_dev->hwdev, SPNIC_INTERNAL_LP_MODE, true)) { + nicif_err(nic_dev, drv, netdev, + "Failed to set port loopback mode before loopback test\n"); + return -EFAULT; + } + } else { + *test_index = EXTERNAL_LP_TEST; + } + + lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); + if (!lb_test_rx_buf) { + nicif_err(nic_dev, drv, netdev, "Failed to alloc RX buffer for loopback test\n"); + err = -ENOMEM; + } else { + nic_dev->lb_test_rx_buf = lb_test_rx_buf; + nic_dev->lb_pkt_len = LP_PKT_LEN; + set_bit(SPNIC_LP_TEST, &nic_dev->flags); + + if (spnic_run_lp_test(nic_dev, test_time)) + err = -EFAULT; + + clear_bit(SPNIC_LP_TEST, &nic_dev->flags); + msleep(100); + vfree(lb_test_rx_buf); + nic_dev->lb_test_rx_buf = NULL; + } + + if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { + if (spnic_set_loopback_mode(nic_dev->hwdev, SPNIC_INTERNAL_LP_MODE, false)) { + nicif_err(nic_dev, drv, netdev, + "Failed to cancel port loopback mode after loopback test\n"); + err = -EFAULT; + } + } else { + *flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + + return err; +} + +void spnic_lp_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data, u32 test_time) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + enum diag_test_index test_index = 0; + u8 link_status = 0; + int err; + + /* don't support loopback test when netdev is closed. */ + if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "Do not support loopback test when netdev is closed\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[PORT_DOWN_ERR_IDX] = 1; + return; + } + + if (test_time == 0) + test_time = LP_DEFAULT_TIME; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + err = do_lp_test(nic_dev, ð_test->flags, test_time, &test_index); + if (err) { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[test_index] = 1; + } + + netif_tx_wake_all_queues(netdev); + + err = spnic_get_link_state(nic_dev->hwdev, &link_status); + if (!err && link_status) + netif_carrier_on(netdev); +} + +static void spnic_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); + + spnic_lp_test(netdev, eth_test, data, 0); +} + +static const struct ethtool_ops spnic_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_PKT_RATE_RX_USECS, + .get_link_ksettings = spnic_get_link_ksettings, + .set_link_ksettings = spnic_set_link_ksettings, + + .get_drvinfo = spnic_get_drvinfo, + .get_msglevel = spnic_get_msglevel, + .set_msglevel = spnic_set_msglevel, + .nway_reset = spnic_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = spnic_get_ringparam, + .set_ringparam = spnic_set_ringparam, + .get_pauseparam = spnic_get_pauseparam, + .set_pauseparam = spnic_set_pauseparam, + .get_sset_count = spnic_get_sset_count, + .get_ethtool_stats = spnic_get_ethtool_stats, + .get_strings = spnic_get_strings, + + .self_test = spnic_diag_test, + .set_phys_id = spnic_set_phys_id, + + .get_coalesce = spnic_get_coalesce, + .set_coalesce = spnic_set_coalesce, + .get_per_queue_coalesce = spnic_get_per_queue_coalesce, + .set_per_queue_coalesce = spnic_set_per_queue_coalesce, + + .get_rxnfc = spnic_get_rxnfc, + .set_rxnfc = spnic_set_rxnfc, + .get_priv_flags = spnic_get_priv_flags, + .set_priv_flags = spnic_set_priv_flags, + + .get_channels = spnic_get_channels, + .set_channels = spnic_set_channels, + + .get_module_info = spnic_get_module_info, + .get_module_eeprom = spnic_get_module_eeprom, + + .get_rxfh_key_size = spnic_get_rxfh_key_size, + .get_rxfh = spnic_get_rxfh, + .set_rxfh = spnic_set_rxfh, +}; + +static const struct ethtool_ops spnicvf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_PKT_RATE_RX_USECS, + .get_link_ksettings = spnic_get_link_ksettings, + .get_drvinfo = spnic_get_drvinfo, + .get_msglevel = spnic_get_msglevel, + .set_msglevel = spnic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = spnic_get_ringparam, + + .set_ringparam = spnic_set_ringparam, + .get_sset_count = spnic_get_sset_count, + .get_ethtool_stats = spnic_get_ethtool_stats, + .get_strings = spnic_get_strings, + + .get_coalesce = spnic_get_coalesce, + .set_coalesce = spnic_set_coalesce, + .get_per_queue_coalesce = spnic_get_per_queue_coalesce, + .set_per_queue_coalesce = spnic_set_per_queue_coalesce, + + .get_rxnfc = spnic_get_rxnfc, + .set_rxnfc = spnic_set_rxnfc, + .get_priv_flags = spnic_get_priv_flags, + .set_priv_flags = spnic_set_priv_flags, + + .get_channels = spnic_get_channels, + .set_channels = spnic_set_channels, + + .get_rxfh_key_size = spnic_get_rxfh_key_size, + .get_rxfh = spnic_get_rxfh, + .set_rxfh = spnic_set_rxfh, +}; + +void spnic_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &spnic_ethtool_ops; +} + +void spnicvf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &spnicvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c new file mode 100644 index 000000000000..8a5073a20f0b --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c @@ -0,0 +1,1035 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> + +#include "sphw_hw.h" +#include "sphw_crm.h" +#include "sphw_mt.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic_dev.h" +#include "spnic_tx.h" +#include "spnic_rx.h" + +struct spnic_stats { + char name[ETH_GSTRING_LEN]; + u32 size; + int offset; +}; + +#define SPNIC_NETDEV_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = sizeof_field(struct rtnl_link_stats64, _stat_item), \ + .offset = offsetof(struct rtnl_link_stats64, _stat_item) \ +} + +static struct spnic_stats spnic_netdev_stats[] = { + SPNIC_NETDEV_STAT(rx_packets), + SPNIC_NETDEV_STAT(tx_packets), + SPNIC_NETDEV_STAT(rx_bytes), + SPNIC_NETDEV_STAT(tx_bytes), + SPNIC_NETDEV_STAT(rx_errors), + SPNIC_NETDEV_STAT(tx_errors), + SPNIC_NETDEV_STAT(rx_dropped), + SPNIC_NETDEV_STAT(tx_dropped), + SPNIC_NETDEV_STAT(multicast), + SPNIC_NETDEV_STAT(collisions), + SPNIC_NETDEV_STAT(rx_length_errors), + SPNIC_NETDEV_STAT(rx_over_errors), + SPNIC_NETDEV_STAT(rx_crc_errors), + SPNIC_NETDEV_STAT(rx_frame_errors), + SPNIC_NETDEV_STAT(rx_fifo_errors), + SPNIC_NETDEV_STAT(rx_missed_errors), + SPNIC_NETDEV_STAT(tx_aborted_errors), + SPNIC_NETDEV_STAT(tx_carrier_errors), + SPNIC_NETDEV_STAT(tx_fifo_errors), + SPNIC_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define SPNIC_NIC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = sizeof_field(struct spnic_nic_stats, _stat_item), \ + .offset = offsetof(struct spnic_nic_stats, _stat_item) \ +} + +static struct spnic_stats spnic_nic_dev_stats[] = { + SPNIC_NIC_STAT(netdev_tx_timeout), +}; + +static struct spnic_stats spnic_nic_dev_stats_extern[] = { + SPNIC_NIC_STAT(tx_carrier_off_drop), + SPNIC_NIC_STAT(tx_invalid_qid), +}; + +#define SPNIC_RXQ_STAT(_stat_item) { \ + .name = "rxq%d_"#_stat_item, \ + .size = sizeof_field(struct spnic_rxq_stats, _stat_item), \ + .offset = offsetof(struct spnic_rxq_stats, _stat_item) \ +} + +#define SPNIC_TXQ_STAT(_stat_item) { \ + .name = "txq%d_"#_stat_item, \ + .size = sizeof_field(struct spnic_txq_stats, _stat_item), \ + .offset = offsetof(struct spnic_txq_stats, _stat_item) \ +} + +static struct spnic_stats spnic_rx_queue_stats[] = { + SPNIC_RXQ_STAT(packets), + SPNIC_RXQ_STAT(bytes), + SPNIC_RXQ_STAT(errors), + SPNIC_RXQ_STAT(csum_errors), + SPNIC_RXQ_STAT(other_errors), + SPNIC_RXQ_STAT(dropped), + SPNIC_RXQ_STAT(xdp_dropped), + SPNIC_RXQ_STAT(rx_buf_empty), +}; + +static struct spnic_stats spnic_rx_queue_stats_extern[] = { + SPNIC_RXQ_STAT(alloc_skb_err), + SPNIC_RXQ_STAT(alloc_rx_buf_err), + SPNIC_RXQ_STAT(xdp_large_pkt), +}; + +static struct spnic_stats spnic_tx_queue_stats[] = { + SPNIC_TXQ_STAT(packets), + SPNIC_TXQ_STAT(bytes), + SPNIC_TXQ_STAT(busy), + SPNIC_TXQ_STAT(wake), + SPNIC_TXQ_STAT(dropped), +}; + +static struct spnic_stats spnic_tx_queue_stats_extern[] = { + SPNIC_TXQ_STAT(skb_pad_err), + SPNIC_TXQ_STAT(frag_len_overflow), + SPNIC_TXQ_STAT(offload_cow_skb_err), + SPNIC_TXQ_STAT(map_frag_err), + SPNIC_TXQ_STAT(unknown_tunnel_pkt), + SPNIC_TXQ_STAT(frag_size_err), +}; + +#define SPNIC_FUNC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = sizeof_field(struct spnic_vport_stats, _stat_item), \ + .offset = offsetof(struct spnic_vport_stats, _stat_item) \ +} + +static struct spnic_stats spnic_function_stats[] = { + SPNIC_FUNC_STAT(tx_unicast_pkts_vport), + SPNIC_FUNC_STAT(tx_unicast_bytes_vport), + SPNIC_FUNC_STAT(tx_multicast_pkts_vport), + SPNIC_FUNC_STAT(tx_multicast_bytes_vport), + SPNIC_FUNC_STAT(tx_broadcast_pkts_vport), + SPNIC_FUNC_STAT(tx_broadcast_bytes_vport), + + SPNIC_FUNC_STAT(rx_unicast_pkts_vport), + SPNIC_FUNC_STAT(rx_unicast_bytes_vport), + SPNIC_FUNC_STAT(rx_multicast_pkts_vport), + SPNIC_FUNC_STAT(rx_multicast_bytes_vport), + SPNIC_FUNC_STAT(rx_broadcast_pkts_vport), + SPNIC_FUNC_STAT(rx_broadcast_bytes_vport), + + SPNIC_FUNC_STAT(tx_discard_vport), + SPNIC_FUNC_STAT(rx_discard_vport), + SPNIC_FUNC_STAT(tx_err_vport), + SPNIC_FUNC_STAT(rx_err_vport), +}; + +#define SPNIC_PORT_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = sizeof_field(struct mag_cmd_port_stats, _stat_item), \ + .offset = offsetof(struct mag_cmd_port_stats, _stat_item) \ +} + +static struct spnic_stats spnic_port_stats[] = { + SPNIC_PORT_STAT(mac_rx_total_pkt_num), + SPNIC_PORT_STAT(mac_rx_total_oct_num), + SPNIC_PORT_STAT(mac_rx_bad_pkt_num), + SPNIC_PORT_STAT(mac_rx_bad_oct_num), + SPNIC_PORT_STAT(mac_rx_good_pkt_num), + SPNIC_PORT_STAT(mac_rx_good_oct_num), + SPNIC_PORT_STAT(mac_rx_uni_pkt_num), + SPNIC_PORT_STAT(mac_rx_multi_pkt_num), + SPNIC_PORT_STAT(mac_rx_broad_pkt_num), + SPNIC_PORT_STAT(mac_tx_total_pkt_num), + SPNIC_PORT_STAT(mac_tx_total_oct_num), + SPNIC_PORT_STAT(mac_tx_bad_pkt_num), + SPNIC_PORT_STAT(mac_tx_bad_oct_num), + SPNIC_PORT_STAT(mac_tx_good_pkt_num), + SPNIC_PORT_STAT(mac_tx_good_oct_num), + SPNIC_PORT_STAT(mac_tx_uni_pkt_num), + SPNIC_PORT_STAT(mac_tx_multi_pkt_num), + SPNIC_PORT_STAT(mac_tx_broad_pkt_num), + SPNIC_PORT_STAT(mac_rx_fragment_pkt_num), + SPNIC_PORT_STAT(mac_rx_undersize_pkt_num), + SPNIC_PORT_STAT(mac_rx_undermin_pkt_num), + SPNIC_PORT_STAT(mac_rx_64_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), + SPNIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), + SPNIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), + SPNIC_PORT_STAT(mac_rx_oversize_pkt_num), + SPNIC_PORT_STAT(mac_rx_jabber_pkt_num), + SPNIC_PORT_STAT(mac_rx_pause_num), + SPNIC_PORT_STAT(mac_rx_pfc_pkt_num), + SPNIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), + SPNIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), + SPNIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), + SPNIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), + SPNIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), + SPNIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), + SPNIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), + SPNIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), + SPNIC_PORT_STAT(mac_rx_control_pkt_num), + SPNIC_PORT_STAT(mac_rx_sym_err_pkt_num), + SPNIC_PORT_STAT(mac_rx_fcs_err_pkt_num), + SPNIC_PORT_STAT(mac_rx_send_app_good_pkt_num), + SPNIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), + SPNIC_PORT_STAT(mac_tx_fragment_pkt_num), + SPNIC_PORT_STAT(mac_tx_undersize_pkt_num), + SPNIC_PORT_STAT(mac_tx_undermin_pkt_num), + SPNIC_PORT_STAT(mac_tx_64_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), + SPNIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), + SPNIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), + SPNIC_PORT_STAT(mac_tx_oversize_pkt_num), + SPNIC_PORT_STAT(mac_tx_jabber_pkt_num), + SPNIC_PORT_STAT(mac_tx_pause_num), + SPNIC_PORT_STAT(mac_tx_pfc_pkt_num), + SPNIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), + SPNIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), + SPNIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), + SPNIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), + SPNIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), + SPNIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), + SPNIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), + SPNIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), + SPNIC_PORT_STAT(mac_tx_control_pkt_num), + SPNIC_PORT_STAT(mac_tx_err_all_pkt_num), + SPNIC_PORT_STAT(mac_tx_from_app_good_pkt_num), + SPNIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), +}; + +static char g_spnic_priv_flags_strings[][ETH_GSTRING_LEN] = { + "Symmetric-RSS", +}; + +u32 spnic_get_io_stats_size(struct spnic_nic_dev *nic_dev) +{ + u32 count; + + count = ARRAY_LEN(spnic_nic_dev_stats) + + ARRAY_LEN(spnic_nic_dev_stats_extern) + + (ARRAY_LEN(spnic_tx_queue_stats) + + ARRAY_LEN(spnic_tx_queue_stats_extern) + + ARRAY_LEN(spnic_rx_queue_stats) + + ARRAY_LEN(spnic_rx_queue_stats_extern)) * nic_dev->max_qps; + + return count; +} + +#define GET_VALUE_OF_PTR(size, ptr) ( \ + (size) == sizeof(u64) ? *(u64 *)(ptr) : \ + (size) == sizeof(u32) ? *(u32 *)(ptr) : \ + (size) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ +) + +#define DEV_STATS_PACK(items, item_idx, array, stats_ptr) do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + SPNIC_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + GET_VALUE_OF_PTR((array)[j].size, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +#define QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + SPNIC_SHOW_ITEM_LEN); \ + snprintf((items)[item_idx].name, SPNIC_SHOW_ITEM_LEN, \ + (array)[j].name, (qid)); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + GET_VALUE_OF_PTR((array)[j].size, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +void spnic_get_io_stats(struct spnic_nic_dev *nic_dev, void *stats) +{ + struct spnic_show_item *items = stats; + int item_idx = 0; + u16 qid; + + DEV_STATS_PACK(items, item_idx, spnic_nic_dev_stats, &nic_dev->stats); + DEV_STATS_PACK(items, item_idx, spnic_nic_dev_stats_extern, + &nic_dev->stats); + + for (qid = 0; qid < nic_dev->max_qps; qid++) { + QUEUE_STATS_PACK(items, item_idx, spnic_tx_queue_stats, + &nic_dev->txqs[qid].txq_stats, qid); + QUEUE_STATS_PACK(items, item_idx, spnic_tx_queue_stats_extern, + &nic_dev->txqs[qid].txq_stats, qid); + } + + for (qid = 0; qid < nic_dev->max_qps; qid++) { + QUEUE_STATS_PACK(items, item_idx, spnic_rx_queue_stats, + &nic_dev->rxqs[qid].rxq_stats, qid); + QUEUE_STATS_PACK(items, item_idx, spnic_rx_queue_stats_extern, + &nic_dev->rxqs[qid].rxq_stats, qid); + } +} + +static char spnic_test_strings[][ETH_GSTRING_LEN] = { + "Internal lb test (on/offline)", + "External lb test (external_lb)", +}; + +int spnic_get_sset_count(struct net_device *netdev, int sset) +{ + int count = 0, q_num = 0; + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return ARRAY_LEN(spnic_test_strings); + case ETH_SS_STATS: + q_num = nic_dev->q_params.num_qps; + count = ARRAY_LEN(spnic_netdev_stats) + + ARRAY_LEN(spnic_nic_dev_stats) + + ARRAY_LEN(spnic_function_stats) + + (ARRAY_LEN(spnic_tx_queue_stats) + + ARRAY_LEN(spnic_rx_queue_stats)) * q_num; + + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) + count += ARRAY_LEN(spnic_port_stats); + + return count; + case ETH_SS_PRIV_FLAGS: + return ARRAY_LEN(g_spnic_priv_flags_strings); + default: + return -EOPNOTSUPP; + } +} + +static void get_drv_queue_stats(struct spnic_nic_dev *nic_dev, u64 *data) +{ + struct spnic_txq_stats txq_stats; + struct spnic_rxq_stats rxq_stats; + u16 i = 0, j = 0, qid = 0; + char *p = NULL; + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + if (!nic_dev->txqs) + break; + + spnic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); + for (j = 0; j < ARRAY_LEN(spnic_tx_queue_stats); j++, i++) { + p = (char *)(&txq_stats) + + spnic_tx_queue_stats[j].offset; + data[i] = (spnic_tx_queue_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + if (!nic_dev->rxqs) + break; + + spnic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); + for (j = 0; j < ARRAY_LEN(spnic_rx_queue_stats); j++, i++) { + p = (char *)(&rxq_stats) + + spnic_rx_queue_stats[j].offset; + data[i] = (spnic_rx_queue_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } +} + +static u16 get_ethtool_port_stats(struct spnic_nic_dev *nic_dev, u64 *data) +{ + struct mag_cmd_port_stats *port_stats; + char *p = NULL; + u16 i = 0, j = 0; + int err; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to malloc port stats\n"); + memset(&data[i], 0, + ARRAY_LEN(spnic_port_stats) * sizeof(*data)); + i += ARRAY_LEN(spnic_port_stats); + return i; + } + + err = spnic_get_phy_port_stats(nic_dev->hwdev, port_stats); + if (err) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get port stats from fw\n"); + + for (j = 0; j < ARRAY_LEN(spnic_port_stats); j++, i++) { + p = (char *)(port_stats) + spnic_port_stats[j].offset; + data[i] = (spnic_port_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + kfree(port_stats); + + return i; +} + +void spnic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats = NULL; + struct spnic_nic_stats *nic_stats = NULL; + + struct spnic_vport_stats vport_stats = {0}; + u16 i = 0, j = 0; + char *p = NULL; + int err; + + net_stats = dev_get_stats(netdev, &temp); + for (j = 0; j < ARRAY_LEN(spnic_netdev_stats); j++, i++) { + p = (char *)(net_stats) + spnic_netdev_stats[j].offset; + data[i] = GET_VALUE_OF_PTR(spnic_netdev_stats[j].size, p); + } + + nic_stats = &nic_dev->stats; + for (j = 0; j < ARRAY_LEN(spnic_nic_dev_stats); j++, i++) { + p = (char *)(nic_stats) + spnic_nic_dev_stats[j].offset; + data[i] = GET_VALUE_OF_PTR(spnic_nic_dev_stats[j].size, p); + } + + err = spnic_get_vport_stats(nic_dev->hwdev, &vport_stats); + if (err) + nicif_err(nic_dev, drv, netdev, + "Failed to get function stats from fw\n"); + + for (j = 0; j < ARRAY_LEN(spnic_function_stats); j++, i++) { + p = (char *)(&vport_stats) + spnic_function_stats[j].offset; + data[i] = GET_VALUE_OF_PTR(spnic_function_stats[j].size, p); + } + + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) + i += get_ethtool_port_stats(nic_dev, data + i); + + get_drv_queue_stats(nic_dev, data + i); +} + +static u16 get_drv_dev_strings(struct spnic_nic_dev *nic_dev, char *p) +{ + u16 i, cnt = 0; + + for (i = 0; i < ARRAY_LEN(spnic_netdev_stats); i++) { + memcpy(p, spnic_netdev_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + + for (i = 0; i < ARRAY_LEN(spnic_nic_dev_stats); i++) { + memcpy(p, spnic_nic_dev_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + + return cnt; +} + +static u16 get_hw_stats_strings(struct spnic_nic_dev *nic_dev, char *p) +{ + u16 i, cnt = 0; + + for (i = 0; i < ARRAY_LEN(spnic_function_stats); i++) { + memcpy(p, spnic_function_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { + for (i = 0; i < ARRAY_LEN(spnic_port_stats); i++) { + memcpy(p, spnic_port_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + } + + return cnt; +} + +static u16 get_qp_stats_strings(struct spnic_nic_dev *nic_dev, char *p) +{ + u16 i = 0, j = 0, cnt = 0; + + for (i = 0; i < nic_dev->q_params.num_qps; i++) { + for (j = 0; j < ARRAY_LEN(spnic_tx_queue_stats); j++) { + sprintf(p, spnic_tx_queue_stats[j].name, i); + p += ETH_GSTRING_LEN; + cnt++; + } + } + + for (i = 0; i < nic_dev->q_params.num_qps; i++) { + for (j = 0; j < ARRAY_LEN(spnic_rx_queue_stats); j++) { + sprintf(p, spnic_rx_queue_stats[j].name, i); + p += ETH_GSTRING_LEN; + cnt++; + } + } + + return cnt; +} + +void spnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + char *p = (char *)data; + u16 offset = 0; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *spnic_test_strings, sizeof(spnic_test_strings)); + return; + case ETH_SS_STATS: + offset = get_drv_dev_strings(nic_dev, p); + offset += get_hw_stats_strings(nic_dev, + p + offset * ETH_GSTRING_LEN); + get_qp_stats_strings(nic_dev, p + offset * ETH_GSTRING_LEN); + + return; + case ETH_SS_PRIV_FLAGS: + memcpy(data, g_spnic_priv_flags_strings, + sizeof(g_spnic_priv_flags_strings)); + return; + default: + nicif_err(nic_dev, drv, netdev, + "Invalid string set %u.", stringset); + return; + } +} + +static const u32 spnic_mag_link_mode_ge[] = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_10ge_base_r[] = { + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_25ge_base_r[] = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_40ge_base_r4[] = { + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_50ge_base_r[] = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_50ge_base_r2[] = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_100ge_base_r[] = { + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_100ge_base_r2[] = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_100ge_base_r4[] = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_200ge_base_r2[] = { + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, +}; + +static const u32 spnic_mag_link_mode_200ge_base_r4[] = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +struct hw2ethtool_link_mode { + const u32 *link_mode_bit_arr; + u32 arr_size; + u32 speed; +}; + +static const struct hw2ethtool_link_mode + hw2ethtool_link_mode_table[LINK_MODE_MAX_NUMBERS] = { + [LINK_MODE_GE] = { + .link_mode_bit_arr = spnic_mag_link_mode_ge, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_ge), + .speed = SPEED_1000, + }, + [LINK_MODE_10GE_BASE_R] = { + .link_mode_bit_arr = spnic_mag_link_mode_10ge_base_r, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_10ge_base_r), + .speed = SPEED_10000, + }, + [LINK_MODE_25GE_BASE_R] = { + .link_mode_bit_arr = spnic_mag_link_mode_25ge_base_r, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_25ge_base_r), + .speed = SPEED_25000, + }, + [LINK_MODE_40GE_BASE_R4] = { + .link_mode_bit_arr = spnic_mag_link_mode_40ge_base_r4, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_40ge_base_r4), + .speed = SPEED_40000, + }, + [LINK_MODE_50GE_BASE_R] = { + .link_mode_bit_arr = spnic_mag_link_mode_50ge_base_r, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_50ge_base_r), + .speed = SPEED_50000, + }, + [LINK_MODE_50GE_BASE_R2] = { + .link_mode_bit_arr = spnic_mag_link_mode_50ge_base_r2, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_50ge_base_r2), + .speed = SPEED_50000, + }, + [LINK_MODE_100GE_BASE_R] = { + .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r), + .speed = SPEED_100000, + }, + [LINK_MODE_100GE_BASE_R2] = { + .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r2, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r2), + .speed = SPEED_100000, + }, + [LINK_MODE_100GE_BASE_R4] = { + .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r4, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r4), + .speed = SPEED_100000, + }, + [LINK_MODE_200GE_BASE_R2] = { + .link_mode_bit_arr = spnic_mag_link_mode_200ge_base_r2, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_200ge_base_r2), + .speed = SPEED_200000, + }, + [LINK_MODE_200GE_BASE_R4] = { + .link_mode_bit_arr = spnic_mag_link_mode_200ge_base_r4, + .arr_size = ARRAY_LEN(spnic_mag_link_mode_200ge_base_r4), + .speed = SPEED_200000, + }, +}; + +#define GET_SUPPORTED_MODE 0 +#define GET_ADVERTISED_MODE 1 + +struct cmd_link_settings { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + + u32 speed; + u8 duplex; + u8 port; + u8 autoneg; +}; + +#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->supported) +#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->advertising) + +#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ +do { \ + u32 i; \ + for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) \ + set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], \ + (ecmd)->supported); \ +} while (0) + +#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ +do { \ + u32 i; \ + for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) \ + set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], \ + (ecmd)->advertising); \ +} while (0) + +/* Related to enum mag_cmd_port_speed */ +static u32 hw_to_ethtool_speed[] = { + (u32)SPEED_UNKNOWN, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, SPEED_200000 +}; + +static int spnic_ethtool_to_hw_speed_level(u32 speed) +{ + int i; + + for (i = 0; i < ARRAY_LEN(hw_to_ethtool_speed); i++) { + if (hw_to_ethtool_speed[i] == speed) + break; + } + + return i; +} + +static void +spnic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, u32 hw_link_mode, u32 name) +{ + u32 link_mode; + + for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { + if (hw_link_mode & BIT(link_mode)) { + if (name == GET_SUPPORTED_MODE) + ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(link_settings, hw_link_mode); + else + ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(link_settings, hw_link_mode); + } + } +} + +static int spnic_link_speed_set(struct spnic_nic_dev *nic_dev, + struct cmd_link_settings *link_settings, + struct nic_port_info *port_info) +{ + u8 link_state = 0; + int err; + + if (port_info->supported_mode != LINK_MODE_UNKNOWN) + spnic_add_ethtool_link_mode(link_settings, port_info->supported_mode, + GET_SUPPORTED_MODE); + if (port_info->advertised_mode != LINK_MODE_UNKNOWN) + spnic_add_ethtool_link_mode(link_settings, port_info->advertised_mode, + GET_ADVERTISED_MODE); + + err = spnic_get_link_state(nic_dev->hwdev, &link_state); + if (!err && link_state) { + link_settings->speed = + port_info->speed < ARRAY_LEN(hw_to_ethtool_speed) ? + hw_to_ethtool_speed[port_info->speed] : + (u32)SPEED_UNKNOWN; + + link_settings->duplex = port_info->duplex; + } else { + link_settings->speed = (u32)SPEED_UNKNOWN; + link_settings->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static void spnic_link_port_type(struct cmd_link_settings *link_settings, u8 port_type) +{ + switch (port_type) { + case MAG_CMD_WIRE_TYPE_ELECTRIC: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); + link_settings->port = PORT_TP; + break; + + case MAG_CMD_WIRE_TYPE_AOC: + case MAG_CMD_WIRE_TYPE_MM: + case MAG_CMD_WIRE_TYPE_SM: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); + link_settings->port = PORT_FIBRE; + break; + + case MAG_CMD_WIRE_TYPE_COPPER: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); + link_settings->port = PORT_DA; + break; + + case MAG_CMD_WIRE_TYPE_BACKPLANE: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); + link_settings->port = PORT_NONE; + break; + + default: + link_settings->port = PORT_OTHER; + break; + } +} + +static int get_link_pause_settings(struct spnic_nic_dev *nic_dev, + struct cmd_link_settings *link_settings) +{ + struct nic_pause_config nic_pause = {0}; + int err; + + err = spnic_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get pauseparam from hw\n"); + return err; + } + + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Pause); + if (nic_pause.rx_pause && nic_pause.tx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); + } else if (nic_pause.tx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Asym_Pause); + } else if (nic_pause.rx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Asym_Pause); + } + + return 0; +} + +int get_link_settings(struct net_device *netdev, struct cmd_link_settings *link_settings) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + int err; + + err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get port info\n"); + return err; + } + + err = spnic_link_speed_set(nic_dev, link_settings, &port_info); + if (err) + return err; + + spnic_link_port_type(link_settings, port_info.port_type); + + link_settings->autoneg = port_info.autoneg_state == PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + if (port_info.autoneg_cap) + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Autoneg); + if (port_info.autoneg_state == PORT_CFG_AN_ON) + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Autoneg); + + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) + err = get_link_pause_settings(nic_dev, link_settings); + + return err; +} + +int spnic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_settings) +{ + struct cmd_link_settings settings = { { 0 } }; + struct ethtool_link_settings *base = &link_settings->base; + int err; + + ethtool_link_ksettings_zero_link_mode(link_settings, supported); + ethtool_link_ksettings_zero_link_mode(link_settings, advertising); + + err = get_link_settings(netdev, &settings); + if (err) + return err; + + bitmap_copy(link_settings->link_modes.supported, settings.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(link_settings->link_modes.advertising, settings.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + base->autoneg = settings.autoneg; + base->speed = settings.speed; + base->duplex = settings.duplex; + base->port = settings.port; + + return 0; +} + +static bool spnic_is_support_speed(u32 supported_link, u32 speed) +{ + u32 link_mode; + + for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { + if (!(supported_link & BIT(link_mode))) + continue; + + if (hw2ethtool_link_mode_table[link_mode].speed == speed) + return true; + } + + return false; +} + +static int spnic_is_speed_legal(struct spnic_nic_dev *nic_dev, + struct nic_port_info *port_info, u32 speed) +{ + struct net_device *netdev = nic_dev->netdev; + int speed_level = 0; + + if (port_info->supported_mode == LINK_MODE_UNKNOWN || + port_info->advertised_mode == LINK_MODE_UNKNOWN) { + nicif_err(nic_dev, drv, netdev, "Unknown supported link modes\n"); + return -EAGAIN; + } + + speed_level = spnic_ethtool_to_hw_speed_level(speed); + if (speed_level >= PORT_SPEED_UNKNOWN || + !spnic_is_support_speed(port_info->supported_mode, speed)) { + nicif_err(nic_dev, drv, netdev, + "Not supported speed: %u\n", speed); + return -EINVAL; + } + + return 0; +} + +static int get_link_settings_type(struct spnic_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 *set_settings) +{ + struct nic_port_info port_info = {0}; + int err; + + err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get current settings\n"); + return -EAGAIN; + } + + /* Alwayse set autonegation */ + if (port_info.autoneg_cap) + *set_settings |= HILINK_LINK_SET_AUTONEG; + + if (autoneg == AUTONEG_ENABLE) { + if (!port_info.autoneg_cap) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n"); + return -EOPNOTSUPP; + } + } else if (speed != (u32)SPEED_UNKNOWN) { + /* Set speed only when autoneg is disable */ + err = spnic_is_speed_legal(nic_dev, &port_info, speed); + if (err) + return err; + + *set_settings |= HILINK_LINK_SET_SPEED; + } else { + nicif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int spnic_set_settings_to_hw(struct spnic_nic_dev *nic_dev, + u32 set_settings, u8 autoneg, u32 speed) +{ + struct net_device *netdev = nic_dev->netdev; + struct spnic_link_ksettings settings = {0}; + int speed_level = 0; + char set_link_str[128] = {0}; + int err = 0; + + snprintf(set_link_str, sizeof(set_link_str), "%s", + (set_settings & HILINK_LINK_SET_AUTONEG) ? + (autoneg ? "autong enable " : "autong disable ") : ""); + if (set_settings & HILINK_LINK_SET_SPEED) { + speed_level = spnic_ethtool_to_hw_speed_level(speed); + snprintf(set_link_str, sizeof(set_link_str), + "%sspeed %u ", set_link_str, speed); + } + + settings.valid_bitmap = set_settings; + settings.autoneg = autoneg ? PORT_CFG_AN_ON : PORT_CFG_AN_OFF; + settings.speed = (u8)speed_level; + + err = spnic_set_link_settings(nic_dev->hwdev, &settings); + if (err) + nicif_err(nic_dev, drv, netdev, "Set %sfailed\n", + set_link_str); + else + nicif_info(nic_dev, drv, netdev, "Set %ssuccess\n", + set_link_str); + + return err; +} + +int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u32 set_settings = 0; + int err = 0; + + err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings); + if (err) + return err; + + if (set_settings) + err = spnic_set_settings_to_hw(nic_dev, set_settings, autoneg, speed); + else + nicif_info(nic_dev, drv, netdev, "Nothing changed, exiting without setting anything\n"); + + return err; +} + +int spnic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_settings) +{ + /* Only support to set autoneg and speed */ + return set_link_settings(netdev, link_settings->base.autoneg, + link_settings->base.speed); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c b/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c new file mode 100644 index 000000000000..d7ca2bed454b --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/moduleparam.h> + +#include "sphw_hw.h" +#include "sphw_crm.h" +#include "spnic_nic_dev.h" + +enum spnic_rx_mod { + SPNIC_RX_MODE_UC = 1 << 0, + SPNIC_RX_MODE_MC = 1 << 1, + SPNIC_RX_MODE_BC = 1 << 2, + SPNIC_RX_MODE_MC_ALL = 1 << 3, + SPNIC_RX_MODE_PROMISC = 1 << 4, +}; + +static int spnic_uc_sync(struct net_device *netdev, u8 *addr) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + return spnic_set_mac(nic_dev->hwdev, addr, 0, sphw_global_func_id(nic_dev->hwdev), + SPHW_CHANNEL_NIC); +} + +static int spnic_uc_unsync(struct net_device *netdev, u8 *addr) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + /* The addr is in use */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + return spnic_del_mac(nic_dev->hwdev, addr, 0, sphw_global_func_id(nic_dev->hwdev), + SPHW_CHANNEL_NIC); +} + +void spnic_clean_mac_list_filter(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + struct spnic_mac_filter *ftmp = NULL; + struct spnic_mac_filter *f = NULL; + + list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) { + if (f->state == SPNIC_MAC_HW_SYNCED) + spnic_uc_unsync(netdev, f->addr); + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) { + if (f->state == SPNIC_MAC_HW_SYNCED) + spnic_uc_unsync(netdev, f->addr); + list_del(&f->list); + kfree(f); + } +} + +static struct spnic_mac_filter *spnic_find_mac(struct list_head *filter_list, u8 *addr) +{ + struct spnic_mac_filter *f = NULL; + + list_for_each_entry(f, filter_list, list) { + if (ether_addr_equal(addr, f->addr)) + return f; + } + return NULL; +} + +static struct spnic_mac_filter *spnic_add_filter(struct spnic_nic_dev *nic_dev, + struct list_head *mac_filter_list, u8 *addr) +{ + struct spnic_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto out; + + ether_addr_copy(f->addr, addr); + + INIT_LIST_HEAD(&f->list); + list_add_tail(&f->list, mac_filter_list); + + f->state = SPNIC_MAC_WAIT_HW_SYNC; + set_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + +out: + return f; +} + +static void spnic_del_filter(struct spnic_nic_dev *nic_dev, struct spnic_mac_filter *f) +{ + set_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + + if (f->state == SPNIC_MAC_WAIT_HW_SYNC) { + /* have not added to hw, delete it directly */ + list_del(&f->list); + kfree(f); + return; + } + + f->state = SPNIC_MAC_WAIT_HW_UNSYNC; +} + +static struct spnic_mac_filter *spnic_mac_filter_entry_clone(struct spnic_mac_filter *src) +{ + struct spnic_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + + *f = *src; + INIT_LIST_HEAD(&f->list); + + return f; +} + +static void spnic_undo_del_filter_entries(struct list_head *filter_list, struct list_head *from) +{ + struct spnic_mac_filter *ftmp = NULL; + struct spnic_mac_filter *f = NULL; + + list_for_each_entry_safe(f, ftmp, from, list) { + if (spnic_find_mac(filter_list, f->addr)) + continue; + + if (f->state == SPNIC_MAC_HW_SYNCED) + f->state = SPNIC_MAC_WAIT_HW_UNSYNC; + + list_move_tail(&f->list, filter_list); + } +} + +static void spnic_undo_add_filter_entries(struct list_head *filter_list, struct list_head *from) +{ + struct spnic_mac_filter *ftmp = NULL; + struct spnic_mac_filter *tmp = NULL; + struct spnic_mac_filter *f = NULL; + + list_for_each_entry_safe(f, ftmp, from, list) { + tmp = spnic_find_mac(filter_list, f->addr); + if (tmp && tmp->state == SPNIC_MAC_HW_SYNCED) + tmp->state = SPNIC_MAC_WAIT_HW_SYNC; + } +} + +static void spnic_cleanup_filter_list(struct list_head *head) +{ + struct spnic_mac_filter *ftmp = NULL; + struct spnic_mac_filter *f = NULL; + + list_for_each_entry_safe(f, ftmp, head, list) { + list_del(&f->list); + kfree(f); + } +} + +static int spnic_mac_filter_sync_hw(struct spnic_nic_dev *nic_dev, struct list_head *del_list, + struct list_head *add_list) +{ + struct net_device *netdev = nic_dev->netdev; + struct spnic_mac_filter *ftmp = NULL; + struct spnic_mac_filter *f = NULL; + int err = 0, add_count = 0; + + if (!list_empty(del_list)) { + list_for_each_entry_safe(f, ftmp, del_list, list) { + err = spnic_uc_unsync(netdev, f->addr); + if (err) { /* ignore errors when delete mac */ + nic_err(&nic_dev->pdev->dev, "Failed to delete mac\n"); + } + + list_del(&f->list); + kfree(f); + } + } + + if (!list_empty(add_list)) { + list_for_each_entry_safe(f, ftmp, add_list, list) { + err = spnic_uc_sync(netdev, f->addr); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to add mac\n"); + return err; + } + + add_count++; + list_del(&f->list); + kfree(f); + } + } + + return add_count; +} + +static int spnic_mac_filter_sync(struct spnic_nic_dev *nic_dev, + struct list_head *mac_filter_list, bool uc) +{ + struct net_device *netdev = nic_dev->netdev; + struct list_head tmp_del_list, tmp_add_list; + struct spnic_mac_filter *fclone = NULL; + struct spnic_mac_filter *ftmp = NULL; + struct spnic_mac_filter *f = NULL; + int err = 0, add_count = 0; + + INIT_LIST_HEAD(&tmp_del_list); + INIT_LIST_HEAD(&tmp_add_list); + + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != SPNIC_MAC_WAIT_HW_UNSYNC) + continue; + + f->state = SPNIC_MAC_HW_UNSYNCED; + list_move_tail(&f->list, &tmp_del_list); + } + + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != SPNIC_MAC_WAIT_HW_SYNC) + continue; + + fclone = spnic_mac_filter_entry_clone(f); + if (!fclone) { + err = -ENOMEM; + break; + } + + f->state = SPNIC_MAC_HW_SYNCED; + list_add_tail(&fclone->list, &tmp_add_list); + } + + if (err) { + spnic_undo_del_filter_entries(mac_filter_list, &tmp_del_list); + spnic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); + nicif_err(nic_dev, drv, netdev, "Failed to clone mac_filter_entry\n"); + + spnic_cleanup_filter_list(&tmp_del_list); + spnic_cleanup_filter_list(&tmp_add_list); + return -ENOMEM; + } + + add_count = spnic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); + if (list_empty(&tmp_add_list)) + return add_count; + + /* there are errors when add mac to hw, delete all mac in hw */ + spnic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); + /* VF don't support to enter promisc mode, + * so we can't delete any other uc mac + */ + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) { + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != SPNIC_MAC_HW_SYNCED) + continue; + + fclone = spnic_mac_filter_entry_clone(f); + if (!fclone) + break; + + f->state = SPNIC_MAC_WAIT_HW_SYNC; + list_add_tail(&fclone->list, &tmp_del_list); + } + } + + spnic_cleanup_filter_list(&tmp_add_list); + spnic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); + + /* need to enter promisc/allmulti mode */ + return -ENOMEM; +} + +static void spnic_mac_filter_sync_all(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int add_count; + + if (test_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags)) { + clear_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + add_count = spnic_mac_filter_sync(nic_dev, &nic_dev->uc_filter_list, true); + if (add_count < 0 && SPNIC_SUPPORT_PROMISC(nic_dev->hwdev)) { + set_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); + nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n"); + } else if (add_count) { + clear_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); + } + + add_count = spnic_mac_filter_sync(nic_dev, &nic_dev->mc_filter_list, false); + if (add_count < 0 && SPNIC_SUPPORT_ALLMULTI(nic_dev->hwdev)) { + set_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); + nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n"); + } else if (add_count) { + clear_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); + } + } +} + +#define SPNIC_DEFAULT_RX_MODE (SPNIC_RX_MODE_UC | SPNIC_RX_MODE_MC | \ + SPNIC_RX_MODE_BC) + +static void spnic_update_mac_filter(struct spnic_nic_dev *nic_dev, + struct netdev_hw_addr_list *src_list, + struct list_head *filter_list) +{ + struct spnic_mac_filter *filter = NULL; + struct spnic_mac_filter *ftmp = NULL; + struct spnic_mac_filter *f = NULL; + struct netdev_hw_addr *ha = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(ha, src_list) { + filter = spnic_find_mac(filter_list, ha->addr); + if (!filter) + spnic_add_filter(nic_dev, filter_list, ha->addr); + else if (filter->state == SPNIC_MAC_WAIT_HW_UNSYNC) + filter->state = SPNIC_MAC_HW_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f, ftmp, filter_list, list) { + bool found = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(ha, src_list) + if (ether_addr_equal(ha->addr, f->addr)) { + found = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (found) + continue; + + spnic_del_filter(nic_dev, f); + } +} + +static void update_mac_filter(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + + if (test_and_clear_bit(SPNIC_UPDATE_MAC_FILTER, &nic_dev->flags)) { + spnic_update_mac_filter(nic_dev, &netdev->uc, &nic_dev->uc_filter_list); + spnic_update_mac_filter(nic_dev, &netdev->mc, &nic_dev->mc_filter_list); + } +} + +static void sync_rx_mode_to_hw(struct spnic_nic_dev *nic_dev, int promisc_en, + int allmulti_en) +{ + struct net_device *netdev = nic_dev->netdev; + u32 rx_mod = SPNIC_DEFAULT_RX_MODE; + int err; + + rx_mod |= (promisc_en ? SPNIC_RX_MODE_PROMISC : 0); + rx_mod |= (allmulti_en ? SPNIC_RX_MODE_MC_ALL : 0); + + if (promisc_en != test_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state)) + nicif_info(nic_dev, drv, netdev, "%s promisc mode\n", + promisc_en ? "Enter" : "Left"); + if (allmulti_en != + test_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) + nicif_info(nic_dev, drv, netdev, "%s all_multi mode\n", + allmulti_en ? "Enter" : "Left"); + + err = spnic_set_rx_mode(nic_dev->hwdev, rx_mod); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n"); + return; + } + + promisc_en ? set_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) : + clear_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state); + + allmulti_en ? set_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) : + clear_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state); +} + +void spnic_set_rx_mode_work(struct work_struct *work) +{ + struct spnic_nic_dev *nic_dev = + container_of(work, struct spnic_nic_dev, rx_mode_work); + struct net_device *netdev = nic_dev->netdev; + int promisc_en = 0, allmulti_en = 0; + + update_mac_filter(nic_dev); + + spnic_mac_filter_sync_all(nic_dev); + + if (SPNIC_SUPPORT_PROMISC(nic_dev->hwdev)) + promisc_en = !!(netdev->flags & IFF_PROMISC) || + test_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); + + if (SPNIC_SUPPORT_ALLMULTI(nic_dev->hwdev)) + allmulti_en = !!(netdev->flags & IFF_ALLMULTI) || + test_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); + + if (promisc_en != test_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) || + allmulti_en != test_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) + sync_rx_mode_to_hw(nic_dev, promisc_en, allmulti_en); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c b/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c new file mode 100644 index 000000000000..872a94a73590 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/debugfs.h> + +#include "sphw_hw.h" +#include "sphw_crm.h" +#include "spnic_nic_io.h" +#include "spnic_nic_dev.h" +#include "spnic_tx.h" +#include "spnic_rx.h" + +int spnic_poll(struct napi_struct *napi, int budget) +{ + struct spnic_irq *irq_cfg = container_of(napi, struct spnic_irq, napi); + struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + int tx_pkts, rx_pkts; + + rx_pkts = spnic_rx_poll(irq_cfg->rxq, budget); + + tx_pkts = spnic_tx_poll(irq_cfg->txq, budget); + + if (tx_pkts >= budget || rx_pkts >= budget) + return budget; + + napi_complete(napi); + + sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_ENABLE); + + return max(tx_pkts, rx_pkts); +} + +static void qp_add_napi(struct spnic_irq *irq_cfg) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, spnic_poll, nic_dev->poll_weight); + napi_enable(&irq_cfg->napi); +} + +static void qp_del_napi(struct spnic_irq *irq_cfg) +{ + napi_disable(&irq_cfg->napi); + netif_napi_del(&irq_cfg->napi); +} + +static irqreturn_t qp_irq(int irq, void *data) +{ + struct spnic_irq *irq_cfg = (struct spnic_irq *)data; + struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + /* 1 is resend_timer */ + sphw_misx_intr_clear_resend_bit(nic_dev->hwdev, irq_cfg->msix_entry_idx, 1); + + napi_schedule(&irq_cfg->napi); + return IRQ_HANDLED; +} + +static int spnic_request_irq(struct spnic_irq *irq_cfg, u16 q_id) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + struct interrupt_info info = {0}; + int err; + + qp_add_napi(irq_cfg); + + info.msix_index = irq_cfg->msix_entry_idx; + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = nic_dev->intr_coalesce[q_id].pending_limt; + info.coalesc_timer_cfg = nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; + info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = + nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = nic_dev->intr_coalesce[q_id].pending_limt; + err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); + if (err) { + nicif_err(nic_dev, drv, irq_cfg->netdev, + "Failed to set RX interrupt coalescing attribute.\n"); + qp_del_napi(irq_cfg); + return err; + } + + err = request_irq(irq_cfg->irq_id, &qp_irq, 0, irq_cfg->irq_name, irq_cfg); + if (err) { + nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n"); + qp_del_napi(irq_cfg); + return err; + } + + irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask); + + return 0; +} + +static void spnic_release_irq(struct spnic_irq *irq_cfg) +{ + irq_set_affinity_hint(irq_cfg->irq_id, NULL); + synchronize_irq(irq_cfg->irq_id); + free_irq(irq_cfg->irq_id, irq_cfg); + qp_del_napi(irq_cfg); +} + +int spnic_qps_irq_init(struct spnic_nic_dev *nic_dev) +{ + struct pci_dev *pdev = nic_dev->pdev; + struct irq_info *qp_irq_info = NULL; + struct spnic_irq *irq_cfg = NULL; + u16 q_id, i; + u32 local_cpu; + int err; + + for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { + qp_irq_info = &nic_dev->qps_irq_info[q_id]; + irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; + + irq_cfg->irq_id = qp_irq_info->irq_id; + irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx; + irq_cfg->netdev = nic_dev->netdev; + irq_cfg->txq = &nic_dev->txqs[q_id]; + irq_cfg->rxq = &nic_dev->rxqs[q_id]; + nic_dev->rxqs[q_id].irq_cfg = irq_cfg; + + local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev)); + cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask); + + snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name), + "%s_qp%u", nic_dev->netdev->name, q_id); + + err = spnic_request_irq(irq_cfg, q_id); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n"); + goto req_tx_irq_err; + } + + sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + SPHW_SET_MSIX_AUTO_MASK); + sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_ENABLE); + } + + INIT_DELAYED_WORK(&nic_dev->moderation_task, spnic_auto_moderation_work); + + return 0; + +req_tx_irq_err: + for (i = 0; i < q_id; i++) { + irq_cfg = &nic_dev->q_params.irq_cfg[i]; + sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_DISABLE); + sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + SPHW_CLR_MSIX_AUTO_MASK); + spnic_release_irq(irq_cfg); + } + + return err; +} + +void spnic_qps_irq_deinit(struct spnic_nic_dev *nic_dev) +{ + struct spnic_irq *irq_cfg = NULL; + u16 q_id; + + for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { + irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; + sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_DISABLE); + sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + SPHW_CLR_MSIX_AUTO_MASK); + spnic_release_irq(irq_cfg); + } +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c b/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c new file mode 100644 index 000000000000..f09a4c186aae --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c @@ -0,0 +1,937 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <linux/inetdevice.h> +#include <net/addrconf.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/aer.h> +#include <linux/debugfs.h> + +#include "sphw_common.h" +#include "sphw_mt.h" +#include "sphw_crm.h" +#include "spnic_lld.h" +#include "spnic_pci_id_tbl.h" +#include "spnic_sriov.h" +#include "spnic_dev_mgmt.h" +#include "sphw_hw.h" +#include "spnic_nic_dev.h" + +static bool disable_vf_load; +module_param(disable_vf_load, bool, 0444); +MODULE_PARM_DESC(disable_vf_load, "Disable virtual functions probe or not - default is false"); + +static bool disable_attach; +module_param(disable_attach, bool, 0444); +MODULE_PARM_DESC(disable_attach, "disable_attach or not - default is false"); + +#define SPNIC_WAIT_SRIOV_CFG_TIMEOUT 15000 +#define SPNIC_SYNC_YEAR_OFFSET 1900 + +MODULE_AUTHOR("Ramaxel Technologies CO., Ltd"); +MODULE_DESCRIPTION(SPNIC_DRV_DESC); +MODULE_VERSION(SPNIC_DRV_VERSION); +MODULE_LICENSE("GPL"); + +struct spnic_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; + +#define SPHW_EVENT_PROCESS_TIMEOUT 10000 + +static const char *s_uld_name[SERVICE_T_MAX] = { + "nic", "ovs", "roce", "toe", "ioe", + "fc", "vbs", "ipsec", "virtio", "migrate"}; + +static int attach_uld(struct spnic_pcidev *dev, enum sphw_service_type type, + struct spnic_uld_info *uld_info) +{ + void *uld_dev = NULL; + int err; + + mutex_lock(&dev->pdev_mutex); + + if (dev->uld_dev[type]) { + sdk_err(&dev->pcidev->dev, "%s driver has attached to pcie device\n", + s_uld_name[type]); + err = 0; + goto out_unlock; + } + + err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]); + if (err || !uld_dev) { + sdk_err(&dev->pcidev->dev, "Failed to add object for %s driver to pcie device\n", + s_uld_name[type]); + goto probe_failed; + } + + dev->uld_dev[type] = uld_dev; + mutex_unlock(&dev->pdev_mutex); + + sdk_info(&dev->pcidev->dev, "Attach %s driver to pcie device succeed\n", s_uld_name[type]); + return 0; + +probe_failed: +out_unlock: + mutex_unlock(&dev->pdev_mutex); + + return err; +} + +static void detach_uld(struct spnic_pcidev *dev, enum sphw_service_type type) +{ + struct spnic_uld_info *uld_info = &g_uld_info[type]; + unsigned long end; + bool timeout = true; + + mutex_lock(&dev->pdev_mutex); + if (!dev->uld_dev[type]) { + mutex_unlock(&dev->pdev_mutex); + return; + } + + end = jiffies + msecs_to_jiffies(SPHW_EVENT_PROCESS_TIMEOUT); + do { + if (!test_and_set_bit(type, &dev->state)) { + timeout = false; + break; + } + usleep_range(900, 1000); + } while (time_before(jiffies, end)); + + if (timeout && !test_and_set_bit(type, &dev->state)) + timeout = false; + + uld_info->remove(&dev->lld_dev, dev->uld_dev[type]); + dev->uld_dev[type] = NULL; + if (!timeout) + clear_bit(type, &dev->state); + + sdk_info(&dev->pcidev->dev, "Detach %s driver from pcie device succeed\n", + s_uld_name[type]); + mutex_unlock(&dev->pdev_mutex); +} + +static void attach_ulds(struct spnic_pcidev *dev) +{ + enum sphw_service_type type; + struct pci_dev *pdev = dev->pcidev; + + for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + if (g_uld_info[type].probe) { + if (pdev->is_virtfn && (!spnic_get_vf_service_load(pdev, (u16)type))) { + sdk_info(&pdev->dev, "VF device disable service_type = %d load in host\n", + type); + continue; + } + attach_uld(dev, type, &g_uld_info[type]); + } + } +} + +static void detach_ulds(struct spnic_pcidev *dev) +{ + enum sphw_service_type type; + + for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) { + if (g_uld_info[type].probe) + detach_uld(dev, type); + } + + if (g_uld_info[SERVICE_T_NIC].probe) + detach_uld(dev, SERVICE_T_NIC); +} + +int spnic_register_uld(enum sphw_service_type type, struct spnic_uld_info *uld_info) +{ + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to register\n", type); + return -EINVAL; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + pr_err("Invalid information of %s driver to register\n", s_uld_name[type]); + return -EINVAL; + } + + lld_hold(); + + if (g_uld_info[type].probe) { + pr_err("%s driver has registered\n", s_uld_name[type]); + lld_put(); + return -EINVAL; + } + + memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info)); + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (attach_uld(dev, type, uld_info)) { + sdk_err(&dev->pcidev->dev, "Attach %s driver to pcie device failed\n", + s_uld_name[type]); + continue; + } + } + } + + lld_put(); + + pr_info("Register %s driver succeed\n", s_uld_name[type]); + return 0; +} + +void spnic_unregister_uld(enum sphw_service_type type) +{ + struct card_node *chip_node = NULL; + struct spnic_pcidev *dev = NULL; + struct spnic_uld_info *uld_info = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to unregister\n", type); + return; + } + + lld_hold(); + list_for_each_entry(chip_node, &g_spnic_chip_list, node) { + /* detach vf first */ + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sphw_func_type(dev->hwdev) != TYPE_VF) + continue; + + detach_uld(dev, type); + } + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sphw_func_type(dev->hwdev) == TYPE_VF) + continue; + + detach_uld(dev, type); + } + } + + uld_info = &g_uld_info[type]; + memset(uld_info, 0, sizeof(*uld_info)); + lld_put(); +} + +int spnic_attach_nic(struct spnic_lld_dev *lld_dev) +{ + struct spnic_pcidev *dev = NULL; + + if (!lld_dev) + return -EINVAL; + + dev = container_of(lld_dev, struct spnic_pcidev, lld_dev); + return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]); +} + +void spnic_detach_nic(struct spnic_lld_dev *lld_dev) +{ + struct spnic_pcidev *dev = NULL; + + if (!lld_dev) + return; + + dev = container_of(lld_dev, struct spnic_pcidev, lld_dev); + detach_uld(dev, SERVICE_T_NIC); +} + +static void sphw_sync_time_to_fmw(struct spnic_pcidev *pdev_pri) +{ + struct tm tm = {0}; + u64 tv_msec; + int err; + + tv_msec = ktime_to_ms(ktime_get_real()); + err = sphw_sync_time(pdev_pri->hwdev, tv_msec); + if (err) { + sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + } else { + time64_to_tm(tv_msec / MSEC_PER_SEC, 0, &tm); + sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %ld-%02d-%02d %02d:%02d:%02d.\n", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, + tm.tm_min, tm.tm_sec); + } +} + +static void send_uld_dev_event(struct spnic_pcidev *dev, + struct sphw_event_info *event) +{ + enum sphw_service_type type; + + for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + if (test_and_set_bit(type, &dev->state)) { + sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in detach\n", + event->type, s_uld_name[type]); + continue; + } + + if (g_uld_info[type].event) + g_uld_info[type].event(&dev->lld_dev, dev->uld_dev[type], event); + clear_bit(type, &dev->state); + } +} + +static void send_event_to_dst_pf(struct spnic_pcidev *dev, u16 func_id, + struct sphw_event_info *event) +{ + struct spnic_pcidev *des_dev = NULL; + + lld_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (sphw_func_type(des_dev->hwdev) == TYPE_VF) + continue; + + if (sphw_global_func_id(des_dev->hwdev) == func_id) { + send_uld_dev_event(des_dev, event); + break; + } + } + lld_put(); +} + +void spnic_event_process(void *adapter, struct sphw_event_info *event) +{ + struct spnic_pcidev *dev = adapter; + u16 func_id; + + if (event->type == SPHW_EVENT_FAULT && + event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && + event->info.event.chip.func_id < sphw_max_pf_num(dev->hwdev)) { + func_id = event->info.event.chip.func_id; + return send_event_to_dst_pf(adapter, func_id, event); + } + + send_uld_dev_event(adapter, event); +} + +#define SPNIC_IS_VF_DEV(pdev) ((pdev)->device == SPNIC_DEV_ID_VF) + +static int mapping_bar(struct pci_dev *pdev, struct spnic_pcidev *pci_adapter) +{ + int cfg_bar; + + cfg_bar = SPNIC_IS_VF_DEV(pdev) ? SPNIC_VF_PCI_CFG_REG_BAR : SPNIC_PF_PCI_CFG_REG_BAR; + + pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar); + if (!pci_adapter->cfg_reg_base) { + sdk_err(&pdev->dev, "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, SPNIC_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + sdk_err(&pdev->dev, + "Failed to map interrupt regs\n"); + goto map_intr_bar_err; + } + + if (!SPNIC_IS_VF_DEV(pdev)) { + pci_adapter->mgmt_reg_base = pci_ioremap_bar(pdev, SPNIC_PCI_MGMT_REG_BAR); + if (!pci_adapter->mgmt_reg_base) { + sdk_err(&pdev->dev, "Failed to map mgmt regs\n"); + goto map_mgmt_bar_err; + } + } + + pci_adapter->db_base_phy = pci_resource_start(pdev, SPNIC_PCI_DB_BAR); + pci_adapter->db_dwqe_len = pci_resource_len(pdev, SPNIC_PCI_DB_BAR); + pci_adapter->db_base = pci_ioremap_bar(pdev, SPNIC_PCI_DB_BAR); + if (!pci_adapter->db_base) { + sdk_err(&pdev->dev, "Failed to map doorbell regs\n"); + goto map_db_err; + } + + return 0; + +map_db_err: + if (!SPNIC_IS_VF_DEV(pdev)) + iounmap(pci_adapter->mgmt_reg_base); + +map_mgmt_bar_err: + iounmap(pci_adapter->intr_reg_base); + +map_intr_bar_err: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void unmapping_bar(struct spnic_pcidev *pci_adapter) +{ + iounmap(pci_adapter->db_base); + + if (!SPNIC_IS_VF_DEV(pci_adapter->pcidev)) + iounmap(pci_adapter->mgmt_reg_base); + + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + +static int spnic_pci_init(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = NULL; + int err; + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) { + sdk_err(&pdev->dev, "Failed to alloc pci device adapter\n"); + return -ENOMEM; + } + pci_adapter->pcidev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + + /* to do CONFIG_PCI_IOV */ + + err = pci_enable_device(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to enable PCI device\n"); + goto pci_enable_err; + } + + err = pci_request_regions(pdev, SPNIC_NIC_DRV_NAME); + if (err) { + sdk_err(&pdev->dev, "Failed to request regions\n"); + goto pci_regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, "Failed to set DMA mask\n"); + goto dma_mask_err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit coherent DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, "Failed to set coherent DMA mask\n"); + goto dma_consistnet_mask_err; + } + } + + return 0; + +dma_consistnet_mask_err: +dma_mask_err: + pci_clear_master(pdev); + pci_release_regions(pdev); + +pci_regions_err: + pci_disable_device(pdev); + +pci_enable_err: + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); + + return err; +} + +static void spnic_pci_deinit(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); +} + +#ifdef CONFIG_X86 +/** + * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma + * order register to zero + * @pci_adapter: pci_adapter + **/ +static void cfg_order_reg(struct spnic_pcidev *pci_adapter) +{ + u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; + struct cpuinfo_x86 *cpuinfo = NULL; + u32 i; + + if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) + return; + + cpuinfo = &cpu_data(0); + for (i = 0; i < sizeof(cpu_model); i++) { + if (cpu_model[i] == cpuinfo->x86_model) + sphw_set_pcie_order_cfg(pci_adapter->hwdev); + } +} +#endif + +static int spnic_func_init(struct pci_dev *pdev, struct spnic_pcidev *pci_adapter) +{ + struct sphw_init_para init_para = {0}; + int err; + + init_para.adapter_hdl = pci_adapter; + init_para.pcidev_hdl = pdev; + init_para.dev_hdl = &pdev->dev; + init_para.cfg_reg_base = pci_adapter->cfg_reg_base; + init_para.intr_reg_base = pci_adapter->intr_reg_base; + init_para.mgmt_reg_base = pci_adapter->mgmt_reg_base; + init_para.db_base = pci_adapter->db_base; + init_para.db_base_phy = pci_adapter->db_base_phy; + init_para.db_dwqe_len = pci_adapter->db_dwqe_len; + init_para.hwdev = &pci_adapter->hwdev; + init_para.chip_node = pci_adapter->chip_node; + err = sphw_init_hwdev(&init_para); + if (err) { + pci_adapter->hwdev = NULL; + sdk_err(&pdev->dev, "Failed to initialize hardware device\n"); + return -EFAULT; + } + + pci_adapter->lld_dev.pdev = pdev; + pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; + if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) + set_bit(SPNIC_FUNC_PERSENT, &pci_adapter->sriov_info.state); + + sphw_event_register(pci_adapter->hwdev, pci_adapter, spnic_event_process); + + if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) + sphw_sync_time_to_fmw(pci_adapter); + + lld_lock_chip_node(); + list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); + lld_unlock_chip_node(); + + if (!disable_attach) { + attach_ulds(pci_adapter); +#ifdef CONFIG_X86 + cfg_order_reg(pci_adapter); +#endif + } + + sdk_info(&pdev->dev, "Pcie device probed\n"); + + return 0; +} + +static void spnic_func_deinit(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + /* When function deinit, disable mgmt initiative report events firstly, + * then flush mgmt work-queue. + */ + sphw_disable_mgmt_msg_report(pci_adapter->hwdev); + + sphw_flush_mgmt_workq(pci_adapter->hwdev); + + lld_lock_chip_node(); + list_del(&pci_adapter->node); + lld_unlock_chip_node(); + + wait_lld_dev_unused(pci_adapter); + + detach_ulds(pci_adapter); + + sphw_event_unregister(pci_adapter->hwdev); + + sphw_free_hwdev(pci_adapter->hwdev); +} + +static inline void wait_sriov_cfg_complete(struct spnic_pcidev *pci_adapter) +{ + struct spnic_sriov_info *sriov_info; + unsigned long end; + + sriov_info = &pci_adapter->sriov_info; + clear_bit(SPNIC_FUNC_PERSENT, &sriov_info->state); + usleep_range(9900, 10000); + + end = jiffies + msecs_to_jiffies(SPNIC_WAIT_SRIOV_CFG_TIMEOUT); + do { + if (!test_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state) && + !test_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state)) + return; + + usleep_range(9900, 10000); + } while (time_before(jiffies, end)); +} + +bool spnic_get_vf_load_state(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = NULL; + struct pci_dev *pf_pdev = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return false; + } + + /* vf used in vm */ + if (pci_is_root_bus(pdev->bus)) + return false; + + if (pdev->is_virtfn) + pf_pdev = pdev->physfn; + else + pf_pdev = pdev; + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return false; + } + + return !pci_adapter->disable_vf_load; +} + +int spnic_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state) +{ + struct spnic_pcidev *pci_adapter = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return -EINVAL; + } + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return -EINVAL; + } + + if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) + return 0; + + pci_adapter->disable_vf_load = !vf_load_state; + sdk_info(&pci_adapter->pcidev->dev, "Current function %s vf load in host\n", + vf_load_state ? "enable" : "disable"); + + return 0; +} + +bool spnic_get_vf_service_load(struct pci_dev *pdev, u16 service) +{ + struct spnic_pcidev *pci_adapter = NULL; + struct pci_dev *pf_pdev = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return false; + } + + if (pdev->is_virtfn) + pf_pdev = pdev->physfn; + else + pf_pdev = pdev; + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return false; + } + + if (service >= SERVICE_T_MAX) { + sdk_err(&pdev->dev, "service_type = %u state is error\n", + service); + return false; + } + + return !pci_adapter->disable_srv_load[service]; +} + +int spnic_set_vf_service_load(struct pci_dev *pdev, u16 service, bool vf_srv_load) +{ + struct spnic_pcidev *pci_adapter = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return -EINVAL; + } + + if (service >= SERVICE_T_MAX) { + sdk_err(&pdev->dev, "service_type = %u state is error\n", + service); + return -EFAULT; + } + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return -EINVAL; + } + + if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) + return 0; + + pci_adapter->disable_srv_load[service] = !vf_srv_load; + sdk_info(&pci_adapter->pcidev->dev, "Current function %s vf load in host\n", + vf_srv_load ? "enable" : "disable"); + + return 0; +} + +static int enable_vf_service_state(struct spnic_pcidev *dst_dev, u16 service) +{ + int err; + + err = sphw_get_dev_cap(dst_dev->hwdev); + if (err) { + sdk_err(&dst_dev->pcidev->dev, "Failed to get current device capabilities\n"); + return -EFAULT; + } + return attach_uld(dst_dev, service, &g_uld_info[service]); +} + +int spnic_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 service, bool en) +{ + struct spnic_pcidev *dev = NULL; + struct spnic_pcidev *dst_dev = NULL; + int err = -EFAULT; + + if (!pdev) + return -EINVAL; + + dev = pci_get_drvdata(pdev); + if (!dev) + return -EFAULT; + + if (service >= SERVICE_T_MAX) { + sdk_err(&pdev->dev, "Current vf do not supports set service_type = %u state in host\n", + service); + return -EFAULT; + } + + /* find func_idx pci_adapter and disable or enable service */ + lld_hold(); + list_for_each_entry(dst_dev, &dev->chip_node->func_list, node) { + if (sphw_global_func_id(dst_dev->hwdev) != vf_func_id) + continue; + if (en) { + err = enable_vf_service_state(dst_dev, service); + if (err) + sdk_err(&dev->pcidev->dev, "Failed to set functio_id = %u service_type = %u\n", + vf_func_id, service); + } else { + detach_uld(dst_dev, service); + err = 0; + } + break; + } + lld_put(); + + return err; +} + +static void spnic_remove(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + if (!pci_adapter) + return; + + sdk_info(&pdev->dev, "Pcie device remove begin\n"); + + sphw_detect_hw_present(pci_adapter->hwdev); + + if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) { + wait_sriov_cfg_complete(pci_adapter); + spnic_pci_sriov_disable(pdev); + } + + spnic_func_deinit(pdev); + + lld_lock_chip_node(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + + unmapping_bar(pci_adapter); + spnic_pci_deinit(pdev); + + sdk_info(&pdev->dev, "Pcie device removed\n"); +} + +static int spnic_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct spnic_pcidev *pci_adapter = NULL; + int err; + + sdk_info(&pdev->dev, "Pcie device probe begin\n"); + + if (pdev->is_virtfn && (!spnic_get_vf_load_state(pdev))) { + sdk_info(&pdev->dev, "VF device disable load in host\n"); + return 0; + } + + err = spnic_pci_init(pdev); + if (err) + return err; + + pci_adapter = pci_get_drvdata(pdev); + err = mapping_bar(pdev, pci_adapter); + if (err) { + sdk_err(&pdev->dev, "Failed to map bar\n"); + goto map_bar_failed; + } + + pci_adapter->disable_vf_load = disable_vf_load; + pci_adapter->id = *id; + lld_dev_cnt_init(pci_adapter); + + /* if chip information of pcie function exist, add the function into chip */ + lld_lock_chip_node(); + err = alloc_chip_node(pci_adapter); + if (err) { + lld_unlock_chip_node(); + sdk_err(&pdev->dev, + "Failed to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + + lld_unlock_chip_node(); + + err = spnic_func_init(pdev, pci_adapter); + if (err) + goto func_init_err; + + if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) { + err = sphw_set_bdf_ctxt(pci_adapter->hwdev, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (err) { + sdk_err(&pdev->dev, "Failed to set BDF info to MPU\n"); + goto set_bdf_err; + } + } + + return 0; + +set_bdf_err: + spnic_func_deinit(pdev); + +func_init_err: + lld_lock_chip_node(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + +alloc_chip_node_fail: + unmapping_bar(pci_adapter); + +map_bar_failed: + spnic_pci_deinit(pdev); + + sdk_err(&pdev->dev, "Pcie device probe failed\n"); + return err; +} + +static const struct pci_device_id spnic_pci_table[] = { + {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_PF_STD), 0}, + {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_VF), 0}, + {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_VF_HV), 0}, + {0, 0} +}; + +MODULE_DEVICE_TABLE(pci, spnic_pci_table); + +/** + * spnic_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + * + * Since we only need error detecting not error handling, so we + * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER + * driver that we don't need reset(error handling). + */ +static pci_ers_result_t spnic_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct spnic_pcidev *pci_adapter = NULL; + + sdk_err(&pdev->dev, + "Uncorrectable error detected, log and cleanup error status: 0x%08x\n", + state); + + pci_aer_clear_nonfatal_status(pdev); + pci_adapter = pci_get_drvdata(pdev); + + if (pci_adapter) + sphw_record_pcie_error(pci_adapter->hwdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static void spnic_shutdown(struct pci_dev *pdev) +{ + struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + sdk_err(&pdev->dev, "Shutdown device\n"); + + if (pci_adapter) + sphw_shutdown_hwdev(pci_adapter->hwdev); + + pci_disable_device(pdev); +} + +/* Cause we only need error detecting not error handling, so only error_detected + * callback is enough. + */ +static struct pci_error_handlers spnic_err_handler = { + .error_detected = spnic_io_error_detected, +}; + +static struct pci_driver spnic_driver = { + .name = SPNIC_NIC_DRV_NAME, + .id_table = spnic_pci_table, + .probe = spnic_probe, + .remove = spnic_remove, + .shutdown = spnic_shutdown, + .sriov_configure = spnic_pci_sriov_configure, + .err_handler = &spnic_err_handler +}; + +static __init int spnic_lld_init(void) +{ + int err; + + pr_info("%s - version %s\n", SPNIC_DRV_DESC, SPNIC_DRV_VERSION); + memset(g_uld_info, 0, sizeof(g_uld_info)); + + spnic_lld_lock_init(); + + err = spnic_register_uld(SERVICE_T_NIC, &nic_uld_info); + if (err) { + pr_err("Register spnic uld failed\n"); + return err; + } + + return pci_register_driver(&spnic_driver); +} + +static __exit void spnic_lld_exit(void) +{ + pci_unregister_driver(&spnic_driver); + spnic_unregister_uld(SERVICE_T_NIC); +} + +module_init(spnic_lld_init); +module_exit(spnic_lld_exit); diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h b/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h new file mode 100644 index 000000000000..e1864f1b6c5b --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_LLD_H +#define SPNIC_LLD_H + +#include "sphw_crm.h" + +struct spnic_lld_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +struct spnic_uld_info { + /* uld_dev: should not return null even the function capability + * is not support the up layer driver + * uld_dev_name: NIC driver should copy net device name. + * FC driver could copy fc device name. + * other up layer driver don`t need copy anything + */ + int (*probe)(struct spnic_lld_dev *lld_dev, void **uld_dev, + char *uld_dev_name); + void (*remove)(struct spnic_lld_dev *lld_dev, void *uld_dev); + int (*suspend)(struct spnic_lld_dev *lld_dev, void *uld_dev, + pm_message_t state); + int (*resume)(struct spnic_lld_dev *lld_dev, void *uld_dev); + void (*event)(struct spnic_lld_dev *lld_dev, void *uld_dev, + struct sphw_event_info *event); + int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); +}; + +int spnic_register_uld(enum sphw_service_type type, struct spnic_uld_info *uld_info); + +void spnic_unregister_uld(enum sphw_service_type type); + +void *spnic_get_uld_dev_by_pdev(struct pci_dev *pdev, enum sphw_service_type type); + +void *spnic_get_ppf_uld_by_pdev(struct pci_dev *pdev, enum sphw_service_type type); + +int spnic_get_chip_name_by_hwdev(const void *hwdev, char *ifname); + +void *spnic_get_uld_dev_by_ifname(const char *ifname, enum sphw_service_type type); + +int spnic_get_pf_nic_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]); + +int spnic_get_chip_up_bitmap(struct pci_dev *pdev, bool *is_setted, u8 *valid_up_bitmap); + +int spnic_set_chip_up_bitmap(struct pci_dev *pdev, u8 valid_up_bitmap); + +bool spnic_get_vf_service_load(struct pci_dev *pdev, u16 service); + +int spnic_set_vf_service_load(struct pci_dev *pdev, u16 service, bool vf_srv_load); + +int spnic_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 service, bool en); + +bool spnic_get_vf_load_state(struct pci_dev *pdev); + +int spnic_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state); + +int spnic_attach_nic(struct spnic_lld_dev *lld_dev); + +void spnic_detach_nic(struct spnic_lld_dev *lld_dev); + +void lld_hold(void); +void lld_put(void); +void lld_dev_hold(struct spnic_lld_dev *dev); +void lld_dev_put(struct spnic_lld_dev *dev); +struct spnic_lld_dev *spnic_get_lld_dev_by_ifname(const char *ifname); + +void *spnic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev); + +void spnic_send_event_to_uld(struct pci_dev *pdev, enum sphw_service_type type, + struct sphw_event_info *event); +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c new file mode 100644 index 000000000000..2f73f4e6cbfa --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c @@ -0,0 +1,778 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/module.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "spnic_mag_cmd.h" +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic.h" +#include "sphw_common.h" + +static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u16 channel); + +int spnic_set_port_enable(void *hwdev, bool enable, u16 channel) +{ + struct mag_cmd_set_port_enable en_state; + u16 out_size = sizeof(en_state); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + if (sphw_func_type(hwdev) == TYPE_VF) + return 0; + + memset(&en_state, 0, sizeof(en_state)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + en_state.function_id = sphw_global_func_id(hwdev); + en_state.state = enable ? MAG_CMD_TX_ENABLE | MAG_CMD_RX_ENABLE : + MAG_CMD_PORT_DISABLE; + + err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_SET_PORT_ENABLE, &en_state, + sizeof(en_state), &en_state, &out_size, channel); + if (err || !out_size || en_state.head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, en_state.head.status, out_size, channel); + return -EIO; + } + + return 0; +} + +int spnic_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats) +{ + struct mag_cmd_get_port_stat *port_stats = NULL; + struct mag_cmd_port_stats_info stats_info; + u16 out_size = sizeof(*port_stats); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) + return -ENOMEM; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&stats_info, 0, sizeof(stats_info)); + stats_info.port_id = sphw_physical_port_id(hwdev); + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_PORT_STAT, + &stats_info, sizeof(stats_info), + port_stats, &out_size); + if (err || !out_size || port_stats->head.status) { + nic_err(nic_cfg->dev_hdl, + "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_stats->head.status, out_size); + err = -EIO; + goto out; + } + + memcpy(stats, &port_stats->counter, sizeof(*stats)); + +out: + kfree(port_stats); + + return err; +} + +int spnic_set_port_funcs_state(void *hwdev, bool enable) +{ + return 0; +} + +int spnic_reset_port_link_cfg(void *hwdev) +{ + return 0; +} + +int spnic_force_port_relink(void *hwdev) +{ + return 0; +} + +int spnic_set_autoneg(void *hwdev, bool enable) +{ + /* TODO */ + + return 0; +} + +static int spnic_cfg_loopback_mode(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *mode, u8 *enable) +{ + struct mag_cmd_cfg_loopback_mode lp; + u16 out_size = sizeof(lp); + int err; + + memset(&lp, 0, sizeof(lp)); + lp.port_id = sphw_physical_port_id(nic_cfg->hwdev); + lp.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) { + lp.lp_mode = *mode; + lp.lp_en = *enable; + } + + err = mag_msg_to_mgmt_sync(nic_cfg->hwdev, MAG_CMD_CFG_LOOPBACK_MODE, + &lp, sizeof(lp), &lp, &out_size); + if (err || !out_size || lp.head.status) { + nic_err(nic_cfg->dev_hdl, + "Failed to %s loopback mode, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get", + err, lp.head.status, out_size); + return -EIO; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) { + *mode = lp.lp_mode; + *enable = lp.lp_en; + } + + return 0; +} + +int spnic_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev || !mode || !enable) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + return spnic_cfg_loopback_mode(nic_cfg, MGMT_MSG_CMD_OP_GET, mode, enable); +} + +#define LOOP_MODE_MIN 1 +#define LOOP_MODE_MAX 6 +int spnic_set_loopback_mode(void *hwdev, u8 mode, u8 enable) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) { + nic_err(nic_cfg->dev_hdl, "Invalid loopback mode %u to set\n", + mode); + return -EINVAL; + } + + return spnic_cfg_loopback_mode(nic_cfg, MGMT_MSG_CMD_OP_GET, &mode, &enable); +} + +int spnic_set_led_status(void *hwdev, enum mag_led_type type, enum mag_led_mode mode) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct mag_cmd_set_led_cfg led_info; + u16 out_size = sizeof(led_info); + int err; + + if (!hwdev) + return -EFAULT; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&led_info, 0, sizeof(led_info)); + + led_info.function_id = sphw_global_func_id(hwdev); + led_info.type = type; + led_info.mode = mode; + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_LED_CFG, &led_info, + sizeof(led_info), &led_info, &out_size); + if (err || led_info.head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n", + err, led_info.head.status, out_size); + return -EIO; + } + + return 0; +} + +int spnic_get_port_info(void *hwdev, struct nic_port_info *port_info, u16 channel) +{ + struct mag_cmd_get_port_info port_msg; + u16 out_size = sizeof(port_msg); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !port_info) + return -EINVAL; + + memset(&port_msg, 0, sizeof(port_msg)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + port_msg.port_id = sphw_physical_port_id(hwdev); + + err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_GET_PORT_INFO, &port_msg, + sizeof(port_msg), &port_msg, &out_size, + channel); + if (err || !out_size || port_msg.head.status) { + nic_err(nic_cfg->dev_hdl, + "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, port_msg.head.status, out_size, channel); + return -EIO; + } + + port_info->autoneg_cap = port_msg.an_support; + port_info->autoneg_state = port_msg.an_en; + port_info->duplex = port_msg.duplex; + port_info->port_type = port_msg.wire_type; + port_info->speed = port_msg.speed; + port_info->fec = port_msg.fec; + port_info->supported_mode = port_msg.supported_mode; + port_info->advertised_mode = port_msg.advertised_mode; + + return 0; +} + +int spnic_get_speed(void *hwdev, enum mag_cmd_port_speed *speed, u16 channel) +{ + struct nic_port_info port_info = {0}; + int err; + + if (!hwdev || !speed) + return -EINVAL; + + err = spnic_get_port_info(hwdev, &port_info, channel); + if (err) + return err; + + *speed = port_info.speed; + + return 0; +} + +int spnic_set_link_settings(void *hwdev, struct spnic_link_ksettings *settings) +{ + struct mag_cmd_set_port_cfg info; + u16 out_size = sizeof(info); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !settings) + return -EINVAL; + + memset(&info, 0, sizeof(info)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + info.port_id = sphw_physical_port_id(hwdev); + info.config_bitmap = settings->valid_bitmap; + info.autoneg = settings->autoneg; + info.speed = settings->speed; + info.fec = settings->fec; + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_PORT_CFG, &info, + sizeof(info), &info, &out_size); + if (err || !out_size || info.head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n", + err, info.head.status, out_size); + return -EIO; + } + + return info.head.status; +} + +int spnic_get_link_state(void *hwdev, u8 *link_state) +{ + struct mag_cmd_get_link_status get_link; + u16 out_size = sizeof(get_link); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !link_state) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&get_link, 0, sizeof(get_link)); + get_link.port_id = sphw_physical_port_id(hwdev); + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_LINK_STATUS, &get_link, + sizeof(get_link), &get_link, &out_size); + if (err || !out_size || get_link.head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n", + err, get_link.head.status, out_size); + return -EIO; + } + + *link_state = get_link.status; + + return 0; +} + +void spnic_notify_vf_link_status(struct spnic_nic_cfg *nic_cfg, u16 vf_id, u8 link_status) +{ + struct mag_cmd_get_link_status link; + struct vf_data_storage *vf_infos = nic_cfg->vf_infos; + u16 out_size = sizeof(link); + int err; + + memset(&link, 0, sizeof(link)); + if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { + link.status = link_status; + link.port_id = sphw_physical_port_id(nic_cfg->hwdev); + err = sphw_mbox_to_vf(nic_cfg->hwdev, vf_id, SPHW_MOD_HILINK, + MAG_CMD_GET_LINK_STATUS, &link, sizeof(link), &link, + &out_size, 0, SPHW_CHANNEL_NIC); + if (err || !out_size || link.head.status) + nic_err(nic_cfg->dev_hdl, + "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, + link.head.status, out_size); + } +} + +void spnic_notify_all_vfs_link_changed(void *hwdev, u8 link_status) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + u16 i; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + nic_cfg->link_status = link_status; + for (i = 1; i <= nic_cfg->max_vfs; i++) { + if (!nic_cfg->vf_infos[HW_VF_ID_TO_OS(i)].link_forced) + spnic_notify_vf_link_status(nic_cfg, i, link_status); + } +} + +static int spnic_get_vf_link_status_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct vf_data_storage *vf_infos = nic_cfg->vf_infos; + struct mag_cmd_get_link_status *get_link = buf_out; + bool link_forced, link_up; + + link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced; + link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up; + + if (link_forced) + get_link->status = link_up ? SPNIC_LINK_UP : SPNIC_LINK_DOWN; + else + get_link->status = nic_cfg->link_status; + + get_link->head.status = 0; + *out_size = sizeof(*get_link); + + return 0; +} + +int spnic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info) +{ + /*TO DO */ + return 0; +} + +static void get_port_info(void *hwdev, struct mag_cmd_get_link_status *link_status, + struct sphw_event_link_info *link_info) +{ + struct nic_port_info port_info = {0}; + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (sphw_func_type(hwdev) != TYPE_VF && + link_status->status == SPHW_EVENT_LINK_UP) { + err = spnic_get_port_info(hwdev, &port_info, SPHW_CHANNEL_NIC); + if (err) { + nic_warn(nic_cfg->dev_hdl, "Failed to get port info\n"); + } else { + link_info->valid = 1; + link_info->port_type = port_info.port_type; + link_info->autoneg_cap = port_info.autoneg_cap; + link_info->autoneg_state = port_info.autoneg_state; + link_info->duplex = port_info.duplex; + link_info->speed = port_info.speed; + spnic_refresh_nic_cfg(hwdev, &port_info); + } + } +} + +static void link_status_event_handler(void *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct mag_cmd_get_link_status *link_status = NULL; + struct mag_cmd_get_link_status *ret_link_status = NULL; + struct sphw_event_info event_info = {0}; + struct sphw_event_link_info *link_info = &event_info.link_info; + struct spnic_nic_cfg *nic_cfg = NULL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + link_status = buf_in; + sdk_info(nic_cfg->dev_hdl, "Link status report received, func_id: %u, status: %u\n", + sphw_global_func_id(hwdev), link_status->status); + + sphw_link_event_stats(hwdev, link_status->status); + + /* link event reported only after set vport enable */ + get_port_info(hwdev, link_status, link_info); + + event_info.type = link_status->status ? SPHW_EVENT_LINK_UP : SPHW_EVENT_LINK_DOWN; + + sphw_event_callback(hwdev, &event_info); + + if (sphw_func_type(hwdev) != TYPE_VF) { + spnic_notify_all_vfs_link_changed(hwdev, link_status->status); + ret_link_status = buf_out; + ret_link_status->head.status = 0; + *out_size = sizeof(*ret_link_status); + } +} + +static void cable_plug_event(void *hwdev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct mag_cmd_wire_event *plug_event = buf_in; + struct spnic_port_routine_cmd *rt_cmd = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + struct sphw_event_info event_info; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + rt_cmd = &nic_cfg->rt_cmd; + + mutex_lock(&nic_cfg->sfp_mutex); + rt_cmd->mpu_send_sfp_abs = false; + rt_cmd->mpu_send_sfp_info = false; + mutex_unlock(&nic_cfg->sfp_mutex); + + memset(&event_info, 0, sizeof(event_info)); + event_info.type = SPHW_EVENT_PORT_MODULE_EVENT; + event_info.module_event.type = plug_event->status ? + SPHW_PORT_MODULE_CABLE_PLUGGED : + SPHW_PORT_MODULE_CABLE_UNPLUGGED; + + *out_size = sizeof(*plug_event); + plug_event = buf_out; + plug_event->head.status = 0; + + sphw_event_callback(hwdev, &event_info); +} + +static void port_sfp_info_event(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct mag_cmd_get_xsfp_info *sfp_info = buf_in; + struct spnic_port_routine_cmd *rt_cmd = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (in_size != sizeof(*sfp_info)) { + sdk_err(nic_cfg->dev_hdl, "Invalid sfp info cmd, length: %u, should be %ld\n", + in_size, sizeof(*sfp_info)); + return; + } + + rt_cmd = &nic_cfg->rt_cmd; + mutex_lock(&nic_cfg->sfp_mutex); + memcpy(&rt_cmd->std_sfp_info, sfp_info, + sizeof(struct mag_cmd_get_xsfp_info)); + rt_cmd->mpu_send_sfp_info = true; + mutex_unlock(&nic_cfg->sfp_mutex); +} + +static void port_sfp_abs_event(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct mag_cmd_get_xsfp_present *sfp_abs = buf_in; + struct spnic_port_routine_cmd *rt_cmd = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (in_size != sizeof(*sfp_abs)) { + sdk_err(nic_cfg->dev_hdl, "Invalid sfp absent cmd, length: %u, should be %ld\n", + in_size, sizeof(*sfp_abs)); + return; + } + + rt_cmd = &nic_cfg->rt_cmd; + mutex_lock(&nic_cfg->sfp_mutex); + memcpy(&rt_cmd->abs, sfp_abs, sizeof(struct mag_cmd_get_xsfp_present)); + rt_cmd->mpu_send_sfp_abs = true; + mutex_unlock(&nic_cfg->sfp_mutex); +} + +static bool spnic_if_sfp_absent(void *hwdev) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_port_routine_cmd *rt_cmd = NULL; + struct mag_cmd_get_xsfp_present sfp_abs; + u8 port_id = sphw_physical_port_id(hwdev); + u16 out_size = sizeof(sfp_abs); + int err; + bool sfp_abs_status; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&sfp_abs, 0, sizeof(sfp_abs)); + + rt_cmd = &nic_cfg->rt_cmd; + mutex_lock(&nic_cfg->sfp_mutex); + if (rt_cmd->mpu_send_sfp_abs) { + if (rt_cmd->abs.head.status) { + mutex_unlock(&nic_cfg->sfp_mutex); + return true; + } + + sfp_abs_status = (bool)rt_cmd->abs.abs_status; + mutex_unlock(&nic_cfg->sfp_mutex); + return sfp_abs_status; + } + mutex_unlock(&nic_cfg->sfp_mutex); + + sfp_abs.port_id = port_id; + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_PRESENT, + &sfp_abs, sizeof(sfp_abs), &sfp_abs, + &out_size); + if (sfp_abs.head.status || err || !out_size) { + nic_err(nic_cfg->dev_hdl, + "Failed to get port%u sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n", + port_id, err, sfp_abs.head.status, out_size); + return true; + } + + return (sfp_abs.abs_status == 0 ? false : true); +} + +int spnic_get_sfp_eeprom(void *hwdev, u8 *data, u32 len) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_port_routine_cmd *rt_cmd = NULL; + struct mag_cmd_get_xsfp_info sfp_info; + u16 out_size = sizeof(sfp_info); + int err; + + if (!hwdev || !data) + return -EINVAL; + + if (spnic_if_sfp_absent(hwdev)) + return -ENXIO; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&sfp_info, 0, sizeof(sfp_info)); + + rt_cmd = &nic_cfg->rt_cmd; + mutex_lock(&nic_cfg->sfp_mutex); + if (rt_cmd->mpu_send_sfp_info) { + if (rt_cmd->std_sfp_info.head.status) { + mutex_unlock(&nic_cfg->sfp_mutex); + return -EIO; + } + + memcpy(data, rt_cmd->std_sfp_info.sfp_info, len); + mutex_unlock(&nic_cfg->sfp_mutex); + return 0; + } + mutex_unlock(&nic_cfg->sfp_mutex); + + sfp_info.port_id = sphw_physical_port_id(hwdev); + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_INFO, &sfp_info, + sizeof(sfp_info), &sfp_info, &out_size); + if (sfp_info.head.status || err || !out_size) { + nic_err(nic_cfg->dev_hdl, + "Failed to get port%u sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", + sphw_physical_port_id(hwdev), err, + sfp_info.head.status, out_size); + return -EIO; + } + + memcpy(data, sfp_info.sfp_info, len); + + return 0; +} + +int spnic_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_port_routine_cmd *rt_cmd = NULL; + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + int err; + + if (!hwdev || !sfp_type || !sfp_type_ext) + return -EINVAL; + + if (spnic_if_sfp_absent(hwdev)) + return -ENXIO; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + rt_cmd = &nic_cfg->rt_cmd; + + mutex_lock(&nic_cfg->sfp_mutex); + if (rt_cmd->mpu_send_sfp_info) { + if (rt_cmd->std_sfp_info.head.status) { + mutex_unlock(&nic_cfg->sfp_mutex); + return -EIO; + } + + *sfp_type = rt_cmd->std_sfp_info.sfp_info[0]; + *sfp_type_ext = rt_cmd->std_sfp_info.sfp_info[1]; + mutex_unlock(&nic_cfg->sfp_mutex); + return 0; + } + mutex_unlock(&nic_cfg->sfp_mutex); + + err = spnic_get_sfp_eeprom(hwdev, (u8 *)sfp_data, STD_SFP_INFO_MAX_SIZE); + if (err) + return err; + + *sfp_type = sfp_data[0]; + *sfp_type_ext = sfp_data[1]; + + return 0; +} + +static const struct vf_msg_handler vf_mag_cmd_handler[] = { + { + .cmd = MAG_CMD_GET_LINK_STATUS, + .handler = spnic_get_vf_link_status_msg_handler, + }, +}; + +/* pf/ppf handler mbox msg from vf */ +int spnic_pf_mag_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + u32 index, cmd_size = ARRAY_LEN(vf_mag_cmd_handler); + struct spnic_nic_cfg *nic_cfg = NULL; + const struct vf_msg_handler *handler = NULL; + + if (!hwdev) + return -EFAULT; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + for (index = 0; index < cmd_size; index++) { + handler = &vf_mag_cmd_handler[index]; + if (cmd == handler->cmd) + return handler->handler(nic_cfg, vf_id, buf_in, in_size, + buf_out, out_size); + } + + nic_warn(nic_cfg->dev_hdl, "NO handler for mag cmd: %u received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +static struct nic_event_handler mag_cmd_handler[] = { + { + .cmd = MAG_CMD_GET_LINK_STATUS, + .handler = link_status_event_handler, + }, + + { + .cmd = MAG_CMD_WIRE_EVENT, + .handler = cable_plug_event, + }, + + { + .cmd = MAG_CMD_GET_XSFP_INFO, + .handler = port_sfp_info_event, + }, + + { + .cmd = MAG_CMD_GET_XSFP_PRESENT, + .handler = port_sfp_abs_event, + }, +}; + +int spnic_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + u32 size = ARRAY_LEN(mag_cmd_handler); + u32 i; + + if (!hwdev) + return -EINVAL; + + *out_size = 0; + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + for (i = 0; i < size; i++) { + if (cmd == mag_cmd_handler[i].cmd) { + mag_cmd_handler[i].handler(hwdev, buf_in, in_size, + buf_out, out_size); + break; + } + } + + /* can't find this event cmd */ + if (i == size) + sdk_warn(nic_cfg->dev_hdl, "Unsupported mag event, cmd: %u\n", + cmd); + + return 0; +} + +int spnic_vf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return spnic_mag_event_handler(hwdev, pri_handle, cmd, buf_in, in_size, buf_out, out_size); +} + +/* pf/ppf handler mgmt cpu report hilink event*/ +void spnic_pf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + spnic_mag_event_handler(hwdev, pri_handle, cmd, buf_in, in_size, buf_out, out_size); +} + +static int _mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel) +{ + u32 i, cmd_cnt = ARRAY_LEN(vf_mag_cmd_handler); + bool cmd_to_pf = false; + + if (sphw_func_type(hwdev) == TYPE_VF) { + for (i = 0; i < cmd_cnt; i++) { + if (cmd == vf_mag_cmd_handler[i].cmd) { + cmd_to_pf = true; + break; + } + } + } + + if (cmd_to_pf) + return sphw_mbox_to_pf(hwdev, SPHW_MOD_HILINK, cmd, buf_in, in_size, buf_out, + out_size, 0, channel); + + return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_HILINK, cmd, buf_in, + in_size, buf_out, out_size, 0, channel); +} + +static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, + out_size, SPHW_CHANNEL_NIC); +} + +static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u16 channel) +{ + return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, + out_size, channel); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h b/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h new file mode 100644 index 000000000000..4e65b7af115b --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h @@ -0,0 +1,643 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_MAG_CMD_H +#define SPNIC_MAG_CMD_H + +#include "sphw_mgmt_msg_base.h" + +enum mag_cmd { + SERDES_CMD_PROCESS = 0, + + MAG_CMD_SET_PORT_CFG = 1, + MAG_CMD_SET_PORT_ADAPT = 2, + MAG_CMD_CFG_LOOPBACK_MODE = 3, + + MAG_CMD_GET_PORT_ENABLE = 5, + MAG_CMD_SET_PORT_ENABLE = 6, + MAG_CMD_GET_LINK_STATUS = 7, + MAG_CMD_SET_LINK_FOLLOW = 8, + MAG_CMD_SET_PMA_ENABLE = 9, + MAG_CMD_CFG_FEC_MODE = 10, + + MAG_CMD_CFG_AN_TYPE = 12, /* reserved for furture use */ + + MAG_CMD_SET_PANGEA_ADAPT = 15, + + MAG_CMD_CFG_BIOS_LINK_CFG = 31, + MAG_CMD_RESTORE_LINK_CFG = 32, + MAG_CMD_ACTIVATE_BIOS_LINK_CFG = 33, + + /* LED */ + MAG_CMD_SET_LED_CFG = 50, + + /* PHY */ + MAG_CMD_GET_PHY_INIT_STATUS = 55, /* reserved for furture use */ + + MAG_CMD_GET_XSFP_INFO = 60, + MAG_CMD_SET_XSFP_ENABLE = 61, + MAG_CMD_GET_XSFP_PRESENT = 62, + MAG_CMD_SET_XSFP_RW = 63, + MAG_CMD_CFG_XSFP_TEMPERATURE = 64, + + MAG_CMD_WIRE_EVENT = 100, + MAG_CMD_LINK_ERR_EVENT = 101, + + MAG_CMD_EVENT_PORT_INFO = 150, + MAG_CMD_GET_PORT_STAT = 151, + MAG_CMD_CLR_PORT_STAT = 152, + MAG_CMD_GET_PORT_INFO = 153, + MAG_CMD_GET_PCS_ERR_CNT = 154, + MAG_CMD_GET_MAG_CNT = 155, + MAG_CMD_DUMP_ANTRAIN_INFO = 156, + + MAG_CMD_MAX = 0xFF, +}; + +enum mag_cmd_port_speed { + PORT_SPEED_NOT_SET = 0, + PORT_SPEED_10MB = 1, + PORT_SPEED_100MB = 2, + PORT_SPEED_1GB = 3, + PORT_SPEED_10GB = 4, + PORT_SPEED_25GB = 5, + PORT_SPEED_40GB = 6, + PORT_SPEED_50GB = 7, + PORT_SPEED_100GB = 8, + PORT_SPEED_200GB = 9, + PORT_SPEED_UNKNOWN +}; + +enum mag_cmd_port_an { + PORT_AN_NOT_SET = 0, + PORT_CFG_AN_ON = 1, + PORT_CFG_AN_OFF = 2 +}; + +enum mag_cmd_port_fec { + PORT_FEC_NOT_SET = 0, + PORT_FEC_RSFEC = 1, + PORT_FEC_BASEFEC = 2, + PORT_FEC_NOFEC = 3, + PORT_FEC_LLRSFEC = 4 +}; + +enum mag_cmd_port_lanes { + PORT_LANES_NOT_SET = 0, + PORT_LANES_X1 = 1, + PORT_LANES_X2 = 2, + PORT_LANES_X4 = 4, + PORT_LANES_X8 = 8, /* reserved for future use */ +}; + +enum mag_cmd_port_duplex { + PORT_DUPLEX_HALF = 0, + PORT_DUPLEX_FULL = 1 +}; + +/* mag_cmd_set_port_cfg config bitmap */ +#define MAG_CMD_SET_SPEED 0x1 +#define MAG_CMD_SET_AUTONEG 0x2 +#define MAG_CMD_SET_FEC 0x4 +#define MAG_CMD_SET_LANES 0x8 +struct mag_cmd_set_port_cfg { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u32 config_bitmap; + u8 speed; + u8 autoneg; + u8 fec; + u8 lanes; + u8 rsvd1[20]; +}; + +/* mag supported/advertised link mode bitmap */ +enum mag_cmd_link_mode { + LINK_MODE_GE = 0, + LINK_MODE_10GE_BASE_R = 1, + LINK_MODE_25GE_BASE_R = 2, + LINK_MODE_40GE_BASE_R4 = 3, + LINK_MODE_50GE_BASE_R = 4, + LINK_MODE_50GE_BASE_R2 = 5, + LINK_MODE_100GE_BASE_R = 6, + LINK_MODE_100GE_BASE_R2 = 7, + LINK_MODE_100GE_BASE_R4 = 8, + LINK_MODE_200GE_BASE_R2 = 9, + LINK_MODE_200GE_BASE_R4 = 10, + LINK_MODE_MAX_NUMBERS, + + LINK_MODE_UNKNOWN = 0xFFFF +}; + +struct mag_cmd_get_port_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u8 wire_type; + u8 an_support; + u8 an_en; + u8 duplex; + + u8 speed; + u8 fec; + u8 lanes; + u8 rsvd1; + + u32 supported_mode; + u32 advertised_mode; + u8 rsvd2[8]; +}; + +#define MAG_CMD_OPCODE_GET 0 +#define MAG_CMD_OPCODE_SET 1 +struct mag_cmd_set_port_adapt { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get adapt info 1:set adapt */ + u8 enable; + u8 rsvd0; + u32 speed_mode; + u32 rsvd1[3]; +}; + +#define MAG_CMD_LP_MODE_SDS_S_TX2RX 1 +#define MAG_CMD_LP_MODE_SDS_P_RX2TX 2 +#define MAG_CMD_LP_MODE_SDS_P_TX2RX 3 +#define MAG_CMD_LP_MODE_MAC_RX2TX 4 +#define MAG_CMD_LP_MODE_MAC_TX2RX 5 +#define MAG_CMD_LP_MODE_TXDP2RXDP 6 +struct mag_cmd_cfg_loopback_mode { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get loopback mode 1:set loopback mode */ + u8 lp_mode; + u8 lp_en; /* 0:disable 1:enable */ + + u32 rsvd0[2]; +}; + +#define MAG_CMD_PORT_DISABLE 0x0 +#define MAG_CMD_TX_ENABLE 0x1 +#define MAG_CMD_RX_ENABLE 0x2 + +struct mag_cmd_set_port_enable { + struct mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 state; /* bitmap bit0:tx_en bit1:rx_en */ + u8 rsvd1[3]; +}; + +struct mag_cmd_get_port_enable { + struct mgmt_msg_head head; + + u8 port; + u8 state; /* bitmap bit0:tx_en bit1:rx_en */ + u8 rsvd0[2]; +}; + +#define PMA_FOLLOW_DEFAULT 0x0 +#define PMA_FOLLOW_ENABLE 0x1 +#define PMA_FOLLOW_DISABLE 0x2 +/* the physical port disable link follow only when all pf of the port are set to follow disable */ +struct mag_cmd_set_link_follow { + struct mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 follow; + u8 rsvd1[3]; +}; + +/* firmware also use this cmd report link event to driver */ +struct mag_cmd_get_link_status { + struct mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:link down 1:link up */ + u8 rsvd0[2]; +}; + +struct mag_cmd_set_pma_enable { + struct mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 enable; +}; + +struct mag_cmd_cfg_an_type { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get an type 1:set an type */ + u8 rsvd0[2]; + + u32 an_type; /* 0:ieee 1:25G/50 eth consortium */ +}; + +struct mag_cmd_cfg_fec_mode { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get fec mode 1:set fec mode */ + u8 fec; + u8 rsvd0; +}; + +struct mag_cmd_cfg_bios_link_cfg { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get bios link info 1:set bios link cfg */ + u8 clear; + u8 rsvd0; + + u32 wire_type; + u8 an_en; + u8 speed; + u8 fec; + u8 rsvd1; + u32 speed_mode; + u32 rsvd2[3]; +}; + +struct mag_cmd_restore_link_cfg { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd[7]; +}; + +struct mag_cmd_activate_bios_link_cfg { + struct mgmt_msg_head head; + + u32 rsvd[8]; +}; + +/* led type */ +enum mag_led_type { + MAG_CMD_LED_TYPE_ALARM = 0x0, + MAG_CMD_LED_TYPE_LOW_SPEED = 0x1, + MAG_CMD_LED_TYPE_HIGH_SPEED = 0x2 +}; + +/* led mode */ +enum mag_led_mode { + MAG_CMD_LED_MODE_DEFAULT = 0x0, + MAG_CMD_LED_MODE_FORCE_ON = 0x1, + MAG_CMD_LED_MODE_FORCE_OFF = 0x2, + MAG_CMD_LED_MODE_FORCE_BLINK_1HZ = 0x3, + MAG_CMD_LED_MODE_FORCE_BLINK_2HZ = 0x4, + MAG_CMD_LED_MODE_FORCE_BLINK_4HZ = 0x5, + MAG_CMD_LED_MODE_1HZ = 0x6, + MAG_CMD_LED_MODE_2HZ = 0x7, + MAG_CMD_LED_MODE_4HZ = 0x8, +}; + +/* the led is report alarm when any pf of the port is alram */ +struct mag_cmd_set_led_cfg { + struct mgmt_msg_head head; + + u16 function_id; + u8 type; + u8 mode; +}; + +#define XSFP_INFO_MAX_SIZE 640 +/* xsfp wire type, refer to cmis protocol definition */ +enum mag_wire_type { + MAG_CMD_WIRE_TYPE_UNKNOWN = 0x0, + MAG_CMD_WIRE_TYPE_MM = 0x1, + MAG_CMD_WIRE_TYPE_SM = 0x2, + MAG_CMD_WIRE_TYPE_COPPER = 0x3, + MAG_CMD_WIRE_TYPE_ACC = 0x4, + MAG_CMD_WIRE_TYPE_BASET = 0x5, + MAG_CMD_WIRE_TYPE_AOC = 0x40, + MAG_CMD_WIRE_TYPE_ELECTRIC = 0x41, + MAG_CMD_WIRE_TYPE_BACKPLANE = 0x42 +}; + +struct mag_cmd_get_xsfp_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 wire_type; + u16 out_len; + u32 rsvd; + u8 sfp_info[XSFP_INFO_MAX_SIZE]; +}; + +#define MAG_CMD_XSFP_DISABLE 0x0 +#define MAG_CMD_XSFP_ENABLE 0x1 + +struct mag_cmd_set_xsfp_enable { + struct mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 status; + u8 rsvd1[3]; +}; + +#define MAG_CMD_XSFP_PRESENT 0x0 +#define MAG_CMD_XSFP_ABSENT 0x1 +struct mag_cmd_get_xsfp_present { + struct mgmt_msg_head head; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +#define MAG_CMD_XSFP_READ 0x0 +#define MAG_CMD_XSFP_WRITE 0x1 +struct mag_cmd_set_xsfp_rw { + struct mgmt_msg_head head; + + u8 port_id; + u8 operation; /* 0: read; 1: write */ + u8 value; + u8 rsvd0; + u32 devaddr; + u32 offset; + u32 rsvd1; +}; + +struct mag_cmd_cfg_xsfp_temperature { + struct mgmt_msg_head head; + + u8 opcode; /* 0:read 1:write */ + u8 rsvd0[3]; + s32 max_temp; + s32 min_temp; +}; + +struct mag_cmd_get_xsfp_temperature { + struct mgmt_msg_head head; + + s16 sfp_temp[8]; + u8 rsvd[32]; + s32 max_temp; + s32 min_temp; +}; + +/* xsfp plug event */ +struct mag_cmd_wire_event { + struct mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +/* link err type definition */ +#define MAG_CMD_ERR_XSFP_UNKNOWN 0x0 +struct mag_cmd_link_err_event { + struct mgmt_msg_head head; + + u8 port_id; + u8 link_err_type; + u8 rsvd[2]; +}; + +#define MAG_PARAM_TYPE_DEFAULT_CFG 0x0 +#define MAG_PARAM_TYPE_BIOS_CFG 0x1 +#define MAG_PARAM_TYPE_TOOL_CFG 0x2 +#define MAG_PARAM_TYPE_FINAL_CFG 0x3 +#define MAG_PARAM_TYPE_WIRE_INFO 0x4 +#define MAG_PARAM_TYPE_ADAPT_INFO 0x5 +#define MAG_PARAM_TYPE_MAX_CNT 0x6 +struct param_head { + u8 valid_len; + u8 info_type; + u8 rsvd[2]; +}; + +struct mag_port_link_param { + struct param_head head; + + u8 an; + u8 fec; + u8 speed; + u8 rsvd0; + + u32 used; + u32 an_fec_ability; + u32 an_speed_ability; + u32 an_pause_ability; +}; + +struct mag_port_wire_info { + struct param_head head; + + u8 status; + u8 rsvd0[3]; + + u8 wire_type; + u8 default_fec; + u8 speed; + u8 rsvd1; + u32 speed_ability; +}; + +struct mag_port_adapt_info { + struct param_head head; + + u32 adapt_en; + u32 flash_adapt; + u32 rsvd0[2]; + + u32 wire_node; + u32 an_en; + u32 speed; + u32 fec; +}; + +struct mag_port_param_info { + u8 parameter_cnt; + u8 lane_id; + u8 lane_num; + u8 rsvd0; + + struct mag_port_link_param default_cfg; + struct mag_port_link_param bios_cfg; + struct mag_port_link_param tool_cfg; + struct mag_port_link_param final_cfg; + + struct mag_port_wire_info wire_info; + struct mag_port_adapt_info adapt_info; +}; + +struct mag_cmd_event_port_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 event_type; + u8 rsvd0[2]; + + u8 vendor_name[16]; + u32 port_type; + u32 port_sub_type; + u32 cable_length; + u8 cable_temp; + u8 max_speed; + u8 sfp_type; + u8 rsvd1; + u32 power[4]; + + u8 an_state; + u8 fec; + u16 speed; + + u8 gpio_insert; /* 0:present 1:absent */ + u8 alos; + u8 rx_los; + u8 pma_ctrl; + + u32 pma_fifo_reg; + u32 pma_signal_ok_reg; + u32 pcs_64_66b_reg; + u32 rf_lf; + u8 pcs_link; + u8 pcs_mac_link; + u8 tx_enable; + u8 rx_enable; + u32 pcs_err_cnt; + + u8 eq_data[38]; + u8 rsvd2[2]; + + u32 his_link_machine_state; + u32 cur_link_machine_state; + u8 his_machine_state_data[128]; + u8 cur_machine_state_data[128]; + u8 his_machine_state_length; + u8 cur_machine_state_length; + + struct mag_port_param_info param_info; + u8 rsvd3[360]; +}; + +struct mag_cmd_port_stats { + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_bad_pkt_num; + u64 mac_tx_1519_max_good_pkt_num; + u64 mac_tx_oversize_pkt_num; + u64 mac_tx_jabber_pkt_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + u64 mac_tx_pause_num; + u64 mac_tx_pfc_pkt_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_tx_control_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_bad_pkt_num; + u64 mac_rx_1519_max_good_pkt_num; + u64 mac_rx_oversize_pkt_num; + u64 mac_rx_jabber_pkt_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + u64 mac_rx_pause_num; + u64 mac_rx_pfc_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_rx_control_pkt_num; + u64 mac_rx_sym_err_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; + u64 mac_rx_unfilter_pkt_num; +}; + +struct mag_cmd_port_stats_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; +}; + +struct mag_cmd_get_port_stat { + struct mgmt_msg_head head; + + struct mag_cmd_port_stats counter; + u64 rsvd1[15]; +}; + +struct mag_cmd_clr_port_stat { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; +}; + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_main.c b/drivers/net/ethernet/ramaxel/spnic/spnic_main.c new file mode 100644 index 000000000000..fa57a912cd5e --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_main.c @@ -0,0 +1,925 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/dcbnl.h> +#include <linux/tcp.h> +#include <linux/ip.h> +#include <linux/debugfs.h> + +#include "sphw_common.h" +#include "sphw_hw.h" +#include "sphw_crm.h" +#include "sphw_mt.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic_io.h" +#include "spnic_nic_dev.h" +#include "spnic_tx.h" +#include "spnic_rx.h" +#include "spnic_lld.h" +#include "spnic_rss.h" +#include "spnic_dcb.h" + +#define DEFAULT_POLL_WEIGHT 64 +static unsigned int poll_weight = DEFAULT_POLL_WEIGHT; +module_param(poll_weight, uint, 0444); +MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)"); + +#define SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 0 +#define SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 0 +#define SPNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 + +static unsigned char qp_pending_limit = SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; +module_param(qp_pending_limit, byte, 0444); +MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)"); + +static unsigned char qp_coalesc_timer_cfg = + SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; +module_param(qp_coalesc_timer_cfg, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=32)"); + +#define DEFAULT_RX_BUFF_LEN 2 +u16 rx_buff = DEFAULT_RX_BUFF_LEN; +module_param(rx_buff, ushort, 0444); +MODULE_PARM_DESC(rx_buff, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB"); + +static unsigned int lro_replenish_thld = 256; +module_param(lro_replenish_thld, uint, 0444); +MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer (default=256)"); + +#define SPNIC_NIC_DEV_WQ_NAME "spnic_nic_dev_wq" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) + +#define QID_MASKED(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) +#define WATCHDOG_TIMEOUT 5 + +#define SPNIC_SQ_DEPTH 1024 +#define SPNIC_RQ_DEPTH 1024 + +enum spnic_rx_buff_len { + RX_BUFF_VALID_2KB = 2, + RX_BUFF_VALID_4KB = 4, + RX_BUFF_VALID_8KB = 8, + RX_BUFF_VALID_16KB = 16, +}; + +#define CONVERT_UNIT 1024 + +static int spnic_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr); + +/* used for netdev notifier register/unregister */ +DEFINE_MUTEX(spnic_netdev_notifiers_mutex); +static int spnic_netdev_notifiers_ref_cnt; +static struct notifier_block spnic_netdev_notifier = { + .notifier_call = spnic_netdev_event, +}; + +static void spnic_register_notifier(struct spnic_nic_dev *nic_dev) +{ + int err; + + mutex_lock(&spnic_netdev_notifiers_mutex); + spnic_netdev_notifiers_ref_cnt++; + if (spnic_netdev_notifiers_ref_cnt == 1) { + err = register_netdevice_notifier(&spnic_netdev_notifier); + if (err) { + nic_info(&nic_dev->pdev->dev, "Register netdevice notifier failed, err: %d\n", + err); + spnic_netdev_notifiers_ref_cnt--; + } + } + mutex_unlock(&spnic_netdev_notifiers_mutex); +} + +static void spnic_unregister_notifier(struct spnic_nic_dev *nic_dev) +{ + mutex_lock(&spnic_netdev_notifiers_mutex); + if (spnic_netdev_notifiers_ref_cnt == 1) + unregister_netdevice_notifier(&spnic_netdev_notifier); + + if (spnic_netdev_notifiers_ref_cnt) + spnic_netdev_notifiers_ref_cnt--; + mutex_unlock(&spnic_netdev_notifiers_mutex); +} + +#define SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 1 +#define SPNIC_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ + NETIF_F_ALL_TSO) + +int spnic_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct net_device *real_dev = NULL; + struct net_device *ret = NULL; + struct spnic_nic_dev *nic_dev = NULL; + u16 vlan_depth; + + if (!is_vlan_dev(ndev)) + return NOTIFY_DONE; + + dev_hold(ndev); + + switch (event) { + case NETDEV_REGISTER: + real_dev = vlan_dev_real_dev(ndev); + nic_dev = spnic_get_uld_dev_by_ifname(real_dev->name, SERVICE_T_NIC); + if (!nic_dev) + goto out; + + vlan_depth = 1; + ret = vlan_dev_priv(ndev)->real_dev; + while (is_vlan_dev(ret)) { + ret = vlan_dev_priv(ret)->real_dev; + vlan_depth++; + } + + if (vlan_depth == SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + ndev->vlan_features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); + } else if (vlan_depth > SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + ndev->hw_features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); + ndev->features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); + } + + break; + + default: + break; + }; + +out: + dev_put(ndev); + + return NOTIFY_DONE; +} + +void spnic_link_status_change(struct spnic_nic_dev *nic_dev, bool status) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SPHW_CHANNEL_RES_VALID(nic_dev) || test_bit(SPNIC_LP_TEST, &nic_dev->flags)) + return; + + if (status) { + if (netif_carrier_ok(netdev)) + return; + + nic_dev->link_status = status; + netif_carrier_on(netdev); + nicif_info(nic_dev, link, netdev, "Link is up\n"); + } else { + if (!netif_carrier_ok(netdev)) + return; + + nic_dev->link_status = status; + netif_carrier_off(netdev); + nicif_info(nic_dev, link, netdev, "Link is down\n"); + } +} + +static void netdev_feature_init(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + netdev_features_t dft_fts = 0; + netdev_features_t cso_fts = 0; + netdev_features_t vlan_fts = 0; + netdev_features_t tso_fts = 0; + netdev_features_t hw_features = 0; + + dft_fts |= NETIF_F_SG | NETIF_F_HIGHDMA; + + if (SPNIC_SUPPORT_CSUM(nic_dev->hwdev)) + cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + if (SPNIC_SUPPORT_SCTP_CRC(nic_dev->hwdev)) + cso_fts |= NETIF_F_SCTP_CRC; + + if (SPNIC_SUPPORT_TSO(nic_dev->hwdev)) + tso_fts |= NETIF_F_TSO | NETIF_F_TSO6; + + if (SPNIC_SUPPORT_VLAN_OFFLOAD(nic_dev->hwdev)) { + vlan_fts |= NETIF_F_HW_VLAN_CTAG_TX; + vlan_fts |= NETIF_F_HW_VLAN_CTAG_RX; + } + + if (SPNIC_SUPPORT_RXVLAN_FILTER(nic_dev->hwdev)) + vlan_fts |= NETIF_F_HW_VLAN_CTAG_FILTER; + + if (SPNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->hwdev)) + tso_fts |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; + + /* LRO is disable in default, only set hw features */ + if (SPNIC_SUPPORT_LRO(nic_dev->hwdev)) + hw_features |= NETIF_F_LRO; + + netdev->features |= dft_fts | cso_fts | tso_fts | vlan_fts; + netdev->vlan_features |= dft_fts | cso_fts | tso_fts; + + hw_features |= netdev->hw_features; + + hw_features |= netdev->features; + + netdev->hw_features = hw_features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->hw_enc_features |= dft_fts | cso_fts; + netdev->hw_enc_features |= tso_fts | NETIF_F_TSO_ECN; +} + +static void init_intr_coal_param(struct spnic_nic_dev *nic_dev) +{ + struct spnic_intr_coal_info *info = NULL; + u16 i; + + for (i = 0; i < nic_dev->max_qps; i++) { + info = &nic_dev->intr_coalesce[i]; + + info->pending_limt = qp_pending_limit; + info->coalesce_timer_cfg = qp_coalesc_timer_cfg; + + info->resend_timer_cfg = SPNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + + info->pkt_rate_high = SPNIC_RX_RATE_HIGH; + info->rx_usecs_high = SPNIC_RX_COAL_TIME_HIGH; + info->rx_pending_limt_high = SPNIC_RX_PENDING_LIMIT_HIGH; + info->pkt_rate_low = SPNIC_RX_RATE_LOW; + info->rx_usecs_low = SPNIC_RX_COAL_TIME_LOW; + info->rx_pending_limt_low = SPNIC_RX_PENDING_LIMIT_LOW; + } +} + +static int spnic_init_intr_coalesce(struct spnic_nic_dev *nic_dev) +{ + u64 size; + + if (qp_pending_limit != SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT || + qp_coalesc_timer_cfg != SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG) + nic_dev->intr_coal_set_flag = 1; + else + nic_dev->intr_coal_set_flag = 0; + + size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps; + if (!size) { + nic_err(&nic_dev->pdev->dev, "Cannot allocate zero size intr coalesce\n"); + return -EINVAL; + } + nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL); + if (!nic_dev->intr_coalesce) { + nic_err(&nic_dev->pdev->dev, "Failed to alloc intr coalesce\n"); + return -ENOMEM; + } + + init_intr_coal_param(nic_dev); + + if (test_bit(SPNIC_INTR_ADAPT, &nic_dev->flags)) + nic_dev->adaptive_rx_coal = 1; + else + nic_dev->adaptive_rx_coal = 0; + + return 0; +} + +static void spnic_free_intr_coalesce(struct spnic_nic_dev *nic_dev) +{ + kfree(nic_dev->intr_coalesce); +} + +static int spnic_alloc_txrxqs(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err; + + err = spnic_alloc_txqs(netdev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to alloc txqs\n"); + return err; + } + + err = spnic_alloc_rxqs(netdev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to alloc rxqs\n"); + goto alloc_rxqs_err; + } + + err = spnic_init_intr_coalesce(nic_dev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to init_intr_coalesce\n"); + goto init_intr_err; + } + + return 0; + +init_intr_err: + spnic_free_rxqs(netdev); + +alloc_rxqs_err: + spnic_free_txqs(netdev); + + return err; +} + +static void spnic_free_txrxqs(struct spnic_nic_dev *nic_dev) +{ + spnic_free_intr_coalesce(nic_dev); + spnic_free_rxqs(nic_dev->netdev); + spnic_free_txqs(nic_dev->netdev); +} + +static void spnic_sw_deinit(struct spnic_nic_dev *nic_dev) +{ + spnic_free_txrxqs(nic_dev); + + spnic_clean_mac_list_filter(nic_dev); + + spnic_del_mac(nic_dev->hwdev, nic_dev->netdev->dev_addr, 0, + sphw_global_func_id(nic_dev->hwdev), SPHW_CHANNEL_NIC); + + if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) + spnic_rss_template_free(nic_dev->hwdev); + + spnic_clear_rss_config(nic_dev); +} + +static int spnic_sw_init(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + + int err = 0; + + sema_init(&nic_dev->port_state_sem, 1); + + err = spnic_dcb_init(nic_dev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to init dcb\n"); + return -EFAULT; + } + + nic_dev->q_params.sq_depth = SPNIC_SQ_DEPTH; + nic_dev->q_params.rq_depth = SPNIC_RQ_DEPTH; + + spnic_try_to_enable_rss(nic_dev); + + err = spnic_get_default_mac(nic_dev->hwdev, netdev->dev_addr); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to get MAC address\n"); + goto get_mac_err; + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { + nic_err(&nic_dev->pdev->dev, "Invalid MAC address %pM\n", netdev->dev_addr); + err = -EIO; + goto err_mac; + } + + nic_info(&nic_dev->pdev->dev, "Invalid MAC address %pM, using random\n", + netdev->dev_addr); + eth_hw_addr_random(netdev); + } + + err = spnic_set_mac(nic_dev->hwdev, netdev->dev_addr, 0, + sphw_global_func_id(nic_dev->hwdev), SPHW_CHANNEL_NIC); + /* When this is VF driver, we must consider that PF has already set VF + * MAC, and we can't consider this condition is error status during + * driver probe procedure. + */ + if (err && err != SPNIC_PF_SET_VF_ALREADY) { + nic_err(&nic_dev->pdev->dev, "Failed to set default MAC\n"); + goto set_mac_err; + } + + /* MTU range: 384 - 9600 */ + netdev->min_mtu = SPNIC_MIN_MTU_SIZE; + netdev->max_mtu = SPNIC_MAX_JUMBO_FRAME_SIZE; + + err = spnic_alloc_txrxqs(nic_dev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to alloc qps\n"); + goto alloc_qps_err; + } + + return 0; + +alloc_qps_err: + spnic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, sphw_global_func_id(nic_dev->hwdev), + SPHW_CHANNEL_NIC); + +set_mac_err: +err_mac: +get_mac_err: + + if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) + spnic_rss_template_free(nic_dev->hwdev); + spnic_clear_rss_config(nic_dev); + + return err; +} + +static void spnic_assign_netdev_ops(struct spnic_nic_dev *adapter) +{ + spnic_set_netdev_ops(adapter); + if (!SPNIC_FUNC_IS_VF(adapter->hwdev)) + spnic_set_ethtool_ops(adapter->netdev); + else + spnicvf_set_ethtool_ops(adapter->netdev); + + adapter->netdev->watchdog_timeo = WATCHDOG_TIMEOUT * HZ; +} + +static int spnic_validate_parameters(struct spnic_lld_dev *lld_dev) +{ + struct pci_dev *pdev = lld_dev->pdev; + + /* If weight exceeds the queue depth, the queue resources will be + * exhausted, and increasing it has no effect. + */ + if (!poll_weight || poll_weight > SPNIC_MAX_RX_QUEUE_DEPTH) { + nic_warn(&pdev->dev, "Module Parameter poll_weight is out of range: [1, %d], resetting to %d\n", + SPNIC_MAX_RX_QUEUE_DEPTH, DEFAULT_POLL_WEIGHT); + poll_weight = DEFAULT_POLL_WEIGHT; + } + + /* check rx_buff value, default rx_buff is 2KB. + * Valid rx_buff include 2KB/4KB/8KB/16KB. + */ + if (rx_buff != RX_BUFF_VALID_2KB && rx_buff != RX_BUFF_VALID_4KB && + rx_buff != RX_BUFF_VALID_8KB && rx_buff != RX_BUFF_VALID_16KB) { + nic_warn(&pdev->dev, "Module Parameter rx_buff value %u is out of range, must be 2^n. Valid range is 2 - 16, resetting to %dKB", + rx_buff, DEFAULT_RX_BUFF_LEN); + rx_buff = DEFAULT_RX_BUFF_LEN; + } + + return 0; +} + +static void adaptive_configuration_init(struct spnic_nic_dev *nic_dev) +{ + /* TODO: */ +} + +static int set_interrupt_moder(struct spnic_nic_dev *nic_dev, u16 q_id, + u8 coalesc_timer_cfg, u8 pending_limt) +{ + struct interrupt_info info; + int err; + + memset(&info, 0, sizeof(info)); + + if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg && + pending_limt == nic_dev->rxqs[q_id].last_pending_limt) + return 0; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if (!SPHW_CHANNEL_RES_VALID(nic_dev) || q_id >= nic_dev->q_params.num_qps) + return 0; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.coalesc_timer_cfg = coalesc_timer_cfg; + info.pending_limt = pending_limt; + info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx; + info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; + + err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to modify moderation for Queue: %u\n", q_id); + } else { + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = pending_limt; + } + + return err; +} + +static void calc_coal_para(struct spnic_nic_dev *nic_dev, struct spnic_intr_coal_info *q_coal, + u64 rx_rate, u8 *coalesc_timer_cfg, u8 *pending_limt) +{ + if (rx_rate < q_coal->pkt_rate_low) { + *coalesc_timer_cfg = q_coal->rx_usecs_low; + *pending_limt = q_coal->rx_pending_limt_low; + } else if (rx_rate > q_coal->pkt_rate_high) { + *coalesc_timer_cfg = q_coal->rx_usecs_high; + *pending_limt = q_coal->rx_pending_limt_high; + } else { + *coalesc_timer_cfg = + (u8)((rx_rate - q_coal->pkt_rate_low) * + (q_coal->rx_usecs_high - q_coal->rx_usecs_low) / + (q_coal->pkt_rate_high - q_coal->pkt_rate_low) + q_coal->rx_usecs_low); + + *pending_limt = q_coal->rx_pending_limt_low; + } +} + +static void update_queue_coal(struct spnic_nic_dev *nic_dev, u16 qid, + u64 rx_rate, u64 avg_pkt_size, u64 tx_rate) +{ + struct spnic_intr_coal_info *q_coal = NULL; + u8 coalesc_timer_cfg, pending_limt; + + q_coal = &nic_dev->intr_coalesce[qid]; + + if (rx_rate > SPNIC_RX_RATE_THRESH && + avg_pkt_size > SPNIC_AVG_PKT_SMALL) { + calc_coal_para(nic_dev, q_coal, rx_rate, &coalesc_timer_cfg, &pending_limt); + } else { + coalesc_timer_cfg = SPNIC_LOWEST_LATENCY; + pending_limt = q_coal->rx_pending_limt_low; + } + + set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg, pending_limt); +} + +void spnic_auto_moderation_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct spnic_nic_dev *nic_dev = container_of(delay, struct spnic_nic_dev, moderation_task); + unsigned long period = (unsigned long)(jiffies - nic_dev->last_moder_jiffies); + u64 rx_packets, rx_bytes, rx_pkt_diff, rx_rate, avg_pkt_size; + u64 tx_packets, tx_bytes, tx_pkt_diff, tx_rate; + u16 qid; + + if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) + return; + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, SPNIC_MODERATONE_DELAY); + + if (!nic_dev->adaptive_rx_coal || !period) + return; + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + rx_packets = nic_dev->rxqs[qid].rxq_stats.packets; + rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes; + tx_packets = nic_dev->txqs[qid].txq_stats.packets; + tx_bytes = nic_dev->txqs[qid].txq_stats.bytes; + + rx_pkt_diff = rx_packets - nic_dev->rxqs[qid].last_moder_packets; + avg_pkt_size = rx_pkt_diff ? + ((unsigned long)(rx_bytes - nic_dev->rxqs[qid].last_moder_bytes)) / + rx_pkt_diff : 0; + + rx_rate = rx_pkt_diff * HZ / period; + tx_pkt_diff = tx_packets - nic_dev->txqs[qid].last_moder_packets; + tx_rate = tx_pkt_diff * HZ / period; + + update_queue_coal(nic_dev, qid, rx_rate, avg_pkt_size, tx_rate); + + nic_dev->rxqs[qid].last_moder_packets = rx_packets; + nic_dev->rxqs[qid].last_moder_bytes = rx_bytes; + nic_dev->txqs[qid].last_moder_packets = tx_packets; + nic_dev->txqs[qid].last_moder_bytes = tx_bytes; + } + + nic_dev->last_moder_jiffies = jiffies; +} + +void spnic_periodic_work_handler(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct spnic_nic_dev *nic_dev = container_of(delay, struct spnic_nic_dev, periodic_work); + + if (test_and_clear_bit(EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag)) + sphw_fault_event_report(nic_dev->hwdev, SPHW_FAULT_SRC_TX_TIMEOUT, + FAULT_LEVEL_SERIOUS_FLR); + + queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); +} + +static void free_nic_dev(struct spnic_nic_dev *nic_dev) +{ + destroy_workqueue(nic_dev->workq); + kfree(nic_dev->vlan_bitmap); +} + +static int setup_nic_dev(struct net_device *netdev, struct spnic_lld_dev *lld_dev) +{ + struct pci_dev *pdev = lld_dev->pdev; + struct spnic_nic_dev *nic_dev; + u32 page_num; + + nic_dev = (struct spnic_nic_dev *)netdev_priv(netdev); + nic_dev->netdev = netdev; + SET_NETDEV_DEV(netdev, &pdev->dev); + nic_dev->hwdev = lld_dev->hwdev; + nic_dev->pdev = pdev; + nic_dev->poll_weight = (int)poll_weight; + nic_dev->msg_enable = DEFAULT_MSG_ENABLE; + nic_dev->lro_replenish_thld = lro_replenish_thld; + nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT); + nic_dev->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len; + page_num = nic_dev->dma_rx_buff_size / PAGE_SIZE; + nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0; + + mutex_init(&nic_dev->nic_mutex); + + nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); + if (!nic_dev->vlan_bitmap) { + nic_err(&pdev->dev, "Failed to allocate vlan bitmap\n"); + return -ENOMEM; + } + + nic_dev->workq = create_singlethread_workqueue(SPNIC_NIC_DEV_WQ_NAME); + if (!nic_dev->workq) { + nic_err(&pdev->dev, "Failed to initialize nic workqueue\n"); + kfree(nic_dev->vlan_bitmap); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&nic_dev->periodic_work, spnic_periodic_work_handler); + + INIT_LIST_HEAD(&nic_dev->uc_filter_list); + INIT_LIST_HEAD(&nic_dev->mc_filter_list); + INIT_WORK(&nic_dev->rx_mode_work, spnic_set_rx_mode_work); + + INIT_LIST_HEAD(&nic_dev->rx_flow_rule.rules); + INIT_LIST_HEAD(&nic_dev->tcam.tcam_list); + INIT_LIST_HEAD(&nic_dev->tcam.tcam_dynamic_info.tcam_dynamic_list); + + return 0; +} + +static int spnic_set_default_hw_feature(struct spnic_nic_dev *nic_dev) +{ + u64 nic_features; + int err; + + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { + err = spnic_dcb_reset_hw_config(nic_dev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to reset hw dcb configuration\n"); + return err; + } + } + + nic_features = spnic_get_feature_cap(nic_dev->hwdev); + err = spnic_set_nic_feature(nic_dev->hwdev, &nic_features, 1); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to set nic features\n"); + return err; + } + + /* enable all hw features in netdev->features */ + return spnic_set_hw_features(nic_dev); +} + +static int nic_probe(struct spnic_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name) +{ + struct pci_dev *pdev = lld_dev->pdev; + struct spnic_nic_dev *nic_dev = NULL; + struct net_device *netdev = NULL; + u16 max_qps, glb_func_id; + int err; + + /* *uld_dev should always no be NULL */ + *uld_dev = lld_dev; + + if (!sphw_support_nic(lld_dev->hwdev, NULL)) { + nic_info(&pdev->dev, "Hw don't support nic\n"); + return 0; + } + + nic_info(&pdev->dev, "NIC service probe begin\n"); + + err = spnic_validate_parameters(lld_dev); + if (err) { + err = -EINVAL; + goto err_out; + } + + glb_func_id = sphw_global_func_id(lld_dev->hwdev); + err = sphw_func_reset(lld_dev->hwdev, glb_func_id, SPHW_NIC_RES, SPHW_CHANNEL_NIC); + if (err) { + nic_err(&pdev->dev, "Failed to reset function\n"); + goto err_out; + } + + max_qps = sphw_func_max_nic_qnum(lld_dev->hwdev); + netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); + if (!netdev) { + nic_err(&pdev->dev, "Failed to allocate ETH device\n"); + err = -ENOMEM; + goto err_out; + } + + nic_dev = (struct spnic_nic_dev *)netdev_priv(netdev); + err = setup_nic_dev(netdev, lld_dev); + if (err) + goto setup_dev_err; + + adaptive_configuration_init(nic_dev); + + /* get nic cap from hw */ + sphw_support_nic(lld_dev->hwdev, &nic_dev->nic_cap); + + err = spnic_init_nic_hwdev(nic_dev->hwdev, pdev, &pdev->dev, nic_dev->rx_buff_len); + if (err) { + nic_err(&pdev->dev, "Failed to init nic hwdev\n"); + goto init_nic_hwdev_err; + } + + spnic_assign_netdev_ops(nic_dev); + netdev_feature_init(netdev); + + err = spnic_sw_init(nic_dev); + if (err) + goto sw_init_err; + + err = spnic_set_default_hw_feature(nic_dev); + if (err) + goto set_features_err; + + spnic_register_notifier(nic_dev); + + err = register_netdev(netdev); + if (err) { + nic_err(&pdev->dev, "Failed to register netdev\n"); + err = -ENOMEM; + goto netdev_err; + } + + queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); + netif_carrier_off(netdev); + + *uld_dev = nic_dev; + nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n"); + nic_info(&pdev->dev, "NIC service probed\n"); + + return 0; + +netdev_err: + spnic_unregister_notifier(nic_dev); + +set_features_err: + spnic_sw_deinit(nic_dev); + +sw_init_err: + spnic_free_nic_hwdev(nic_dev->hwdev); + +init_nic_hwdev_err: + free_nic_dev(nic_dev); +setup_dev_err: + free_netdev(netdev); + +err_out: + nic_err(&pdev->dev, "NIC service probe failed\n"); + + return err; +} + +static void nic_remove(struct spnic_lld_dev *lld_dev, void *adapter) +{ + struct spnic_nic_dev *nic_dev = adapter; + struct net_device *netdev = NULL; + + if (!nic_dev || !sphw_support_nic(lld_dev->hwdev, NULL)) + return; + + nic_info(&lld_dev->pdev->dev, "NIC service remove begin\n"); + + netdev = nic_dev->netdev; + + unregister_netdev(netdev); + spnic_unregister_notifier(nic_dev); + + cancel_delayed_work_sync(&nic_dev->periodic_work); + cancel_work_sync(&nic_dev->rx_mode_work); + destroy_workqueue(nic_dev->workq); + + spnic_sw_deinit(nic_dev); + + spnic_flush_rx_flow_rule(nic_dev); + spnic_free_nic_hwdev(nic_dev->hwdev); + + kfree(nic_dev->vlan_bitmap); + + free_netdev(netdev); + + nic_info(&lld_dev->pdev->dev, "NIC service removed\n"); +} + +static void sriov_state_change(struct spnic_nic_dev *nic_dev, + const struct sphw_sriov_state_info *info) +{ + if (!info->enable) + spnic_clear_vfs_info(nic_dev->hwdev); +} + +const char *g_spnic_module_link_err[LINK_ERR_NUM] = { + "Unrecognized module", +}; + +void sphw_port_module_event_handler(struct spnic_nic_dev *nic_dev, struct sphw_event_info *event) +{ + enum port_module_event_type type = event->module_event.type; + enum link_err_type err_type = event->module_event.err_type; + + switch (type) { + case SPHW_PORT_MODULE_CABLE_PLUGGED: + case SPHW_PORT_MODULE_CABLE_UNPLUGGED: + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable %s\n", + type == SPHW_PORT_MODULE_CABLE_PLUGGED ? + "plugged" : "unplugged"); + break; + case SPHW_PORT_MODULE_LINK_ERR: + if (err_type >= LINK_ERR_NUM) { + nicif_info(nic_dev, link, nic_dev->netdev, + "Link failed, Unknown error type: 0x%x\n", err_type); + } else { + nicif_info(nic_dev, link, nic_dev->netdev, "Link failed, error type: 0x%x: %s\n", + err_type, g_spnic_module_link_err[err_type]); + } + break; + default: + nicif_err(nic_dev, link, nic_dev->netdev, "Unknown port module type %d\n", type); + break; + } +} + +void nic_event(struct spnic_lld_dev *lld_dev, void *adapter, struct sphw_event_info *event) +{ + struct spnic_nic_dev *nic_dev = adapter; + enum sphw_event_type type; + + if (!nic_dev || !event || !sphw_support_nic(lld_dev->hwdev, NULL)) + return; + + type = event->type; + + switch (type) { + case SPHW_EVENT_LINK_DOWN: + spnic_link_status_change(nic_dev, false); + break; + case SPHW_EVENT_LINK_UP: + spnic_link_status_change(nic_dev, true); + break; + case SPHW_EVENT_SRIOV_STATE_CHANGE: + sriov_state_change(nic_dev, &event->sriov_state); + break; + case SPHW_EVENT_PORT_MODULE_EVENT: + sphw_port_module_event_handler(nic_dev, event); + break; + case SPHW_EVENT_FAULT: + if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && + event->info.event.chip.func_id == sphw_global_func_id(lld_dev->hwdev)) + spnic_link_status_change(nic_dev, false); + break; + case SPHW_EVENT_PCIE_LINK_DOWN: + case SPHW_EVENT_HEART_LOST: + spnic_link_status_change(nic_dev, false); + break; + default: + break; + } +} + +struct net_device *spnic_get_netdev_by_lld(struct spnic_lld_dev *lld_dev) +{ + struct spnic_nic_dev *nic_dev = NULL; + + if (!lld_dev || !sphw_support_nic(lld_dev->hwdev, NULL)) + return NULL; + + nic_dev = spnic_get_uld_dev_by_pdev(lld_dev->pdev, SERVICE_T_NIC); + if (!nic_dev) { + sdk_err(&lld_dev->pdev->dev, + "There's no net device attached on the pci device"); + return NULL; + } + + return nic_dev->netdev; +} + +void *spnic_get_hwdev_by_netdev(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + if (!nic_dev || !netdev) + return NULL; + + return nic_dev->hwdev; +} + +struct spnic_uld_info nic_uld_info = { + .probe = nic_probe, + .remove = nic_remove, + .suspend = NULL, + .resume = NULL, + .event = nic_event, + .ioctl = nic_ioctl, +}; diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h b/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h new file mode 100644 index 000000000000..58ab35c18057 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h @@ -0,0 +1,617 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_MGMT_INTERFACE_H +#define SPNIC_MGMT_INTERFACE_H + +#include <linux/if_ether.h> + +#include "sphw_mgmt_msg_base.h" + +#define SPNIC_CMD_OP_SET MGMT_MSG_CMD_OP_SET +#define SPNIC_CMD_OP_GET MGMT_MSG_CMD_OP_GET + +#define SPNIC_CMD_OP_ADD 1 +#define SPNIC_CMD_OP_DEL 0 + +enum nic_feature_cap { + NIC_F_CSUM = BIT(0), + NIC_F_SCTP_CRC = BIT(1), + NIC_F_TSO = BIT(2), + NIC_F_LRO = BIT(3), + NIC_F_UFO = BIT(4), + NIC_F_RSS = BIT(5), + NIC_F_RX_VLAN_FILTER = BIT(6), + NIC_F_RX_VLAN_STRIP = BIT(7), + NIC_F_TX_VLAN_INSERT = BIT(8), + NIC_F_VXLAN_OFFLOAD = BIT(9), + NIC_F_IPSEC_OFFLOAD = BIT(10), + NIC_F_FDIR = BIT(11), + NIC_F_PROMISC = BIT(12), + NIC_F_ALLMULTI = BIT(13), +}; + +#define NIC_F_ALL_MASK 0x3FFF + +#define NIC_MAX_FEATURE_QWORD 4 +struct spnic_cmd_feature_nego { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; /* 1: set, 0: get */ + u8 rsvd; + u64 s_feature[NIC_MAX_FEATURE_QWORD]; +}; + +struct spnic_port_mac_set { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct spnic_port_mac_update { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct spnic_vport_state { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd1; + u8 state; /* 0--disable, 1--enable */ + u8 rsvd2[3]; +}; + +struct spnic_port_state { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd1; + u8 state; /* 0--disable, 1--enable */ + u8 rsvd2[3]; +}; + +struct spnic_cmd_clear_qp_resource { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct spnic_port_stats_info { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct spnic_vport_stats { + u64 tx_unicast_pkts_vport; + u64 tx_unicast_bytes_vport; + u64 tx_multicast_pkts_vport; + u64 tx_multicast_bytes_vport; + u64 tx_broadcast_pkts_vport; + u64 tx_broadcast_bytes_vport; + + u64 rx_unicast_pkts_vport; + u64 rx_unicast_bytes_vport; + u64 rx_multicast_pkts_vport; + u64 rx_multicast_bytes_vport; + u64 rx_broadcast_pkts_vport; + u64 rx_broadcast_bytes_vport; + + u64 tx_discard_vport; + u64 rx_discard_vport; + u64 tx_err_vport; + u64 rx_err_vport; +}; + +struct spnic_cmd_vport_stats { + struct mgmt_msg_head msg_head; + + u32 stats_size; + u32 rsvd1; + struct spnic_vport_stats stats; + u64 rsvd2[6]; +}; + +struct spnic_cmd_qpn { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 base_qpn; +}; + +enum spnic_func_tbl_cfg_bitmap { + FUNC_CFG_INIT, + FUNC_CFG_RX_BUF_SIZE, + FUNC_CFG_MTU, +}; + +struct spnic_func_tbl_cfg { + u16 rx_wqe_buf_size; + u16 mtu; + u32 rsvd[9]; +}; + +struct spnic_cmd_set_func_tbl { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd; + + u32 cfg_bitmap; + struct spnic_func_tbl_cfg tbl_cfg; +}; + +struct spnic_cmd_cons_idx_attr { + struct mgmt_msg_head msg_head; + + u16 func_idx; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u32 rsvd; + u64 ci_addr; +}; + +struct spnic_cmd_vlan_offload { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 vlan_offload; + u8 rsvd1[5]; +}; + +struct spnic_cmd_lro_config { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_pkt_len; /* unit is 1K */ + u8 resv2[13]; +}; + +struct spnic_cmd_lro_timer { + struct mgmt_msg_head msg_head; + + u8 opcode; /* 1: set timer value, 0: get timer value */ + u8 rsvd1; + u16 rsvd2; + u32 timer; +}; + +struct spnic_cmd_vf_vlan_config { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 vlan_id; + u8 qos; + u8 rsvd2[5]; +}; + +struct spnic_cmd_spoofchk_set { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 state; + u8 rsvd1; +}; + +struct spnic_cmd_tx_rate_cfg { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd1; + u32 min_rate; + u32 max_rate; + u8 rsvd2[8]; +}; + +struct spnic_cmd_port_info { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 rsvd1[3]; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 fec; + u16 rsvd2; + u32 rsvd3[4]; +}; + +struct spnic_cmd_register_vf { + struct mgmt_msg_head msg_head; + + u8 op_register; /* 0 - unregister, 1 - register */ + u8 rsvd[39]; +}; + +struct spnic_cmd_link_state { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 state; + u16 rsvd1; +}; + +struct spnic_cmd_vlan_config { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 vlan_id; + u16 rsvd2; +}; + +/* set vlan filter */ +struct spnic_cmd_set_vlan_filter { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 resvd[2]; + u32 vlan_filter_ctrl; /* bit0:vlan filter en; bit1:broadcast_filter_en */ +}; + +struct spnic_cmd_link_ksettings_info { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 rsvd1[3]; + + u32 valid_bitmap; + u8 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off, 1 - on */ + u8 fec; /* 0 - RSFEC, 1 - BASEFEC, 2 - NOFEC */ + u8 rsvd2[21]; /* reserved for duplex, port, etc. */ +}; + +struct mpu_lt_info { + u8 node; + u8 inst; + u8 entry_size; + u8 rsvd; + u32 lt_index; + u32 offset; + u32 len; +}; + +struct nic_mpu_lt_opera { + struct mgmt_msg_head msg_head; + struct mpu_lt_info net_lt_cmd; + u8 data[100]; +}; + +struct spnic_rx_mode_config { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +/* rss */ +struct spnic_rss_context_table { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd1; + u32 context; +}; + +struct spnic_cmd_rss_engine_type { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; + u8 hash_engine; + u8 rsvd1[4]; +}; + +#define SPNIC_RSS_INDIR_SIZE 256 +#define SPNIC_RSS_KEY_SIZE 40 + +struct spnic_cmd_rss_hash_key { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 key[SPNIC_RSS_KEY_SIZE]; +}; + +struct spnic_rss_indir_table { + struct mgmt_msg_head msg_head; + + u16 func_id; + u16 rsvd1; + u8 indir[SPNIC_RSS_INDIR_SIZE]; +}; + +#define NIC_RSS_CMD_TEMP_ALLOC 0x01 +#define NIC_RSS_CMD_TEMP_FREE 0x02 + +struct spnic_rss_template_mgmt { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 cmd; + u8 template_id; + u8 rsvd1[4]; +}; + +#define SPNIC_DCB_UP_MAX 0x8 +#define SPNIC_DCB_COS_MAX 0x8 +#define SPNIC_DCB_TC_MAX 0x8 + +struct spnic_cmd_rss_config { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 rss_en; + u8 rq_priority_number; + u8 prio_tc[SPNIC_DCB_COS_MAX]; + u16 num_qps; + u16 rsvd1; +}; + +struct spnic_dcb_state { + u8 dcb_on; + u8 default_cos; + u16 rsvd1; + u8 up_cos[SPNIC_DCB_UP_MAX]; + u32 rsvd2[7]; +}; + +struct spnic_cmd_vf_dcb_state { + struct mgmt_msg_head msg_head; + + struct spnic_dcb_state state; +}; + +struct spnic_up_ets_cfg { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 rsvd1[3]; + + u8 cos_tc[SPNIC_DCB_COS_MAX]; + u8 tc_bw[SPNIC_DCB_TC_MAX]; + u8 cos_prio[SPNIC_DCB_COS_MAX]; + u8 cos_bw[SPNIC_DCB_COS_MAX]; + u8 tc_prio[SPNIC_DCB_TC_MAX]; +}; + +struct spnic_cmd_set_pfc { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 rsvd1; + u8 pfc_en; + u8 pfc_bitmap; + u8 rsvd2[4]; +}; + +struct spnic_cos_up_map { + struct mgmt_msg_head msg_head; + + u8 port_id; + /* every bit indicate index of map is valid 1 or not 0*/ + u8 cos_valid_mask; + u16 rsvd1; + + /* user priority in cos(index:cos, value: up pri) */ + u8 map[SPNIC_DCB_UP_MAX]; +}; + +struct spnic_cmd_pause_config { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 opcode; + u16 rsvd1; + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; + u8 rsvd2[5]; +}; + +struct nic_cmd_tx_pause_notice { + struct mgmt_msg_head head; + + u32 tx_pause_except; + u32 except_level; + u32 rsvd; +}; + +#define SPNIC_CMD_OP_FREE 0 +#define SPNIC_CMD_OP_ALLOC 1 + +struct spnic_cmd_cfg_qps { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; /* 1: alloc qp, 0: free qp */ + u8 rsvd1; + u16 num_qps; + u16 rsvd2; +}; + +struct spnic_cmd_led_config { + struct mgmt_msg_head msg_head; + + u8 port; + u8 type; + u8 mode; + u8 rsvd1; +}; + +struct spnic_cmd_port_loopback { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 opcode; + u8 mode; + u8 en; + u32 rsvd1[2]; +}; + +struct spnic_cmd_get_light_module_abs { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsv[2]; +}; + +#define STD_SFP_INFO_MAX_SIZE 640 +struct spnic_cmd_get_std_sfp_info { + struct mgmt_msg_head msg_head; + + u8 port_id; + u8 wire_type; + u16 eeprom_len; + u32 rsvd; + u8 sfp_info[STD_SFP_INFO_MAX_SIZE]; +}; + +struct spnic_cable_plug_event { + struct mgmt_msg_head msg_head; + + u16 func_id; + u8 plugged; /* 0: unplugged, 1: plugged */ + u8 port_id; +}; + +struct nic_cmd_mac_info { + struct mgmt_msg_head head; + + u32 valid_bitmap; + u16 rsvd; + + u8 host_id[32]; + u8 port_id[32]; + u8 mac_addr[192]; +}; + +#define SPNIC_TCAM_BLOCK_ENABLE 1 +#define SPNIC_TCAM_BLOCK_DISABLE 0 +#define SPNIC_TCAM_BLOCK_NORMAL_TYPE 0 +#define SPNIC_MAX_TCAM_RULES_NUM 4096 + +struct nic_cmd_set_tcam_enable { + struct mgmt_msg_head head; + + u16 func_id; + u8 tcam_enable; + u8 rsvd1; + u32 rsvd2; +}; + +/* alloc tcam block input struct */ +struct nic_cmd_ctrl_tcam_block_in { + struct mgmt_msg_head head; + + u16 func_id; /* func_id */ + u8 alloc_en; /* 0: free tcam block, 1: alloc tcam block */ + u8 tcam_type; /* 0: alloc 16 size tcam block, 1: alloc 0 size tcam block */ + u16 tcam_block_index; + u16 alloc_block_num; +}; + +/* alloc tcam block output struct */ +struct nic_cmd_ctrl_tcam_block_out { + struct mgmt_msg_head head; + + u16 func_id; + u8 alloc_en; + u8 tcam_type; + u16 tcam_block_index; + u16 mpu_alloc_block_size; +}; + +struct nic_cmd_flush_tcam_rules { + struct mgmt_msg_head head; + + u16 func_id; /* func_id */ + u16 rsvd; +}; + +struct nic_cmd_dfx_fdir_tcam_block_table { + struct mgmt_msg_head head; + u8 tcam_type; + u8 valid; + u16 tcam_block_index; + u16 use_function_id; + u16 rsvd; +}; + +struct tcam_result { + u32 qid; + u32 rsvd; +}; + +#define TCAM_FLOW_KEY_SIZE 44 + +struct tcam_key_x_y { + u8 x[TCAM_FLOW_KEY_SIZE]; + u8 y[TCAM_FLOW_KEY_SIZE]; +}; + +struct nic_tcam_cfg_rule { + u32 index; + struct tcam_result data; + struct tcam_key_x_y key; +}; + +struct nic_cmd_fdir_add_rule { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd; + struct nic_tcam_cfg_rule rule; +}; + +struct nic_cmd_fdir_del_rules { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd; + u32 index_start; + u32 index_num; +}; + +struct nic_cmd_fdir_get_rule { + struct mgmt_msg_head head; + + u32 index; + u32 valid; + struct tcam_key_x_y key; + struct tcam_result data; + u64 packet_count; + u64 byte_count; +}; + +#endif /* SPNIC_MGMT_INTERFACE_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c b/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c new file mode 100644 index 000000000000..a4f668682f37 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c @@ -0,0 +1,1526 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/netlink.h> +#include <linux/debugfs.h> +#include <linux/ip.h> +#include <linux/bpf.h> + +#include "sphw_hw.h" +#include "sphw_crm.h" +#include "spnic_nic_io.h" +#include "spnic_nic_dev.h" +#include "spnic_tx.h" +#include "spnic_rx.h" +#include "spnic_dcb.h" + +#define SPNIC_DEFAULT_RX_CSUM_OFFLOAD 0xFFF + +#define SPNIC_LRO_DEFAULT_COAL_PKT_SIZE 32 +#define SPNIC_LRO_DEFAULT_TIME_LIMIT 16 +#define SPNIC_WAIT_FLUSH_QP_RESOURCE_TIMEOUT 2000 +static void spnic_nic_set_rx_mode(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || + netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { + set_bit(SPNIC_UPDATE_MAC_FILTER, &nic_dev->flags); + nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); + nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); + } + + queue_work(nic_dev->workq, &nic_dev->rx_mode_work); +} + +int spnic_alloc_txrxq_resources(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_txrxq_params *q_params) +{ + u32 size; + int err; + + size = sizeof(*q_params->txqs_res) * q_params->num_qps; + q_params->txqs_res = kzalloc(size, GFP_KERNEL); + if (!q_params->txqs_res) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txqs resources array\n"); + return -ENOMEM; + } + + size = sizeof(*q_params->rxqs_res) * q_params->num_qps; + q_params->rxqs_res = kzalloc(size, GFP_KERNEL); + if (!q_params->rxqs_res) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxqs resource array\n"); + err = -ENOMEM; + goto alloc_rxqs_res_arr_err; + } + + size = sizeof(*q_params->irq_cfg) * q_params->num_qps; + q_params->irq_cfg = kzalloc(size, GFP_KERNEL); + if (!q_params->irq_cfg) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irq resource array\n"); + err = -ENOMEM; + goto alloc_irq_cfg_err; + } + + err = spnic_alloc_txqs_res(nic_dev, q_params->num_qps, + q_params->sq_depth, q_params->txqs_res); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc txqs resource\n"); + goto alloc_txqs_res_err; + } + + err = spnic_alloc_rxqs_res(nic_dev, q_params->num_qps, + q_params->rq_depth, q_params->rxqs_res); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxqs resource\n"); + goto alloc_rxqs_res_err; + } + + return 0; + +alloc_rxqs_res_err: + spnic_free_txqs_res(nic_dev, q_params->num_qps, q_params->sq_depth, + q_params->txqs_res); + +alloc_txqs_res_err: + kfree(q_params->irq_cfg); + q_params->irq_cfg = NULL; + +alloc_irq_cfg_err: + kfree(q_params->rxqs_res); + q_params->rxqs_res = NULL; + +alloc_rxqs_res_arr_err: + kfree(q_params->txqs_res); + q_params->txqs_res = NULL; + + return err; +} + +void spnic_free_txrxq_resources(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_txrxq_params *q_params) +{ + spnic_free_rxqs_res(nic_dev, q_params->num_qps, q_params->rq_depth, q_params->rxqs_res); + spnic_free_txqs_res(nic_dev, q_params->num_qps, q_params->sq_depth, q_params->txqs_res); + + kfree(q_params->irq_cfg); + q_params->irq_cfg = NULL; + + kfree(q_params->rxqs_res); + q_params->rxqs_res = NULL; + + kfree(q_params->txqs_res); + q_params->txqs_res = NULL; +} + +int spnic_configure_txrxqs(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_txrxq_params *q_params) +{ + int err; + + err = spnic_configure_txqs(nic_dev, q_params->num_qps, + q_params->sq_depth, q_params->txqs_res); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to configure txqs\n"); + return err; + } + + err = spnic_configure_rxqs(nic_dev, q_params->num_qps, + q_params->rq_depth, q_params->rxqs_res); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to configure rxqs\n"); + return err; + } + + return 0; +} + +static void config_dcb_qps_map(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 i, num_tcs; + u16 num_rss; + + if (!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + spnic_update_tx_db_cos(nic_dev); + return; + } + + num_tcs = (u8)netdev_get_num_tc(netdev); + /* For now, we don't support to change num_tcs */ + if (num_tcs != nic_dev->hw_dcb_cfg.max_cos || + nic_dev->q_params.num_qps < num_tcs || + !test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, "Invalid num_tcs: %u or num_qps: %u, disable DCB\n", + num_tcs, nic_dev->q_params.num_qps); + netdev_reset_tc(netdev); + nic_dev->q_params.num_tc = 0; + clear_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); + /* if we can't enable rss or get enough num_qps, + * need to sync default configure to hw + */ + spnic_configure_dcb(netdev); + } else { + /* use 0~max_cos-1 as tc for netdev */ + num_rss = nic_dev->q_params.num_rss; + for (i = 0; i < num_tcs; i++) + netdev_set_tc_queue(netdev, i, num_rss, (u16)(num_rss * i)); + } + + spnic_update_tx_db_cos(nic_dev); +} + +static int spnic_configure(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err; + + err = spnic_set_port_mtu(nic_dev->hwdev, (u16)netdev->mtu); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n"); + return err; + } + + config_dcb_qps_map(nic_dev); + + /* rx rss init */ + err = spnic_rx_configure(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n"); + return err; + } + + return 0; +} + +static void spnic_remove_configure(struct spnic_nic_dev *nic_dev) +{ + spnic_rx_remove_configure(nic_dev->netdev); +} + +/* try to modify the number of irq to the target number, + * and return the actual number of irq. + */ +static u16 spnic_qp_irq_change(struct spnic_nic_dev *nic_dev, u16 dst_num_qp_irq) +{ + struct irq_info *qps_irq_info = nic_dev->qps_irq_info; + u16 resp_irq_num, irq_num_gap, i; + u16 idx; + int err; + + if (dst_num_qp_irq > nic_dev->num_qp_irq) { + irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq; + err = sphw_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, irq_num_gap, + &qps_irq_info[nic_dev->num_qp_irq], &resp_irq_num); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irqs\n"); + return nic_dev->num_qp_irq; + } + + nic_dev->num_qp_irq += resp_irq_num; + } else if (dst_num_qp_irq < nic_dev->num_qp_irq) { + irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq; + for (i = 0; i < irq_num_gap; i++) { + idx = (nic_dev->num_qp_irq - i) - 1; + sphw_free_irq(nic_dev->hwdev, SERVICE_T_NIC, qps_irq_info[idx].irq_id); + qps_irq_info[idx].irq_id = 0; + qps_irq_info[idx].msix_entry_idx = 0; + } + nic_dev->num_qp_irq = dst_num_qp_irq; + } + + return nic_dev->num_qp_irq; +} + +static void config_dcb_num_qps(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_txrxq_params *q_params, + u16 max_qps) +{ + u8 num_tcs = q_params->num_tc; + u16 num_rss; + + if (!num_tcs || !test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) + return; + + if (num_tcs == nic_dev->hw_dcb_cfg.max_cos && max_qps >= num_tcs) { + num_rss = max_qps / num_tcs; + num_rss = min_t(u16, num_rss, q_params->rss_limit); + q_params->num_rss = num_rss; + q_params->num_qps = (u16)(num_tcs * num_rss); + } /* else will disable DCB in config_dcb_qps_map() */ +} + +static void spnic_config_num_qps(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_txrxq_params *q_params) +{ + u16 alloc_num_irq, cur_num_irq; + u16 dst_num_irq; + + if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { + q_params->num_rss = q_params->rss_limit; + q_params->num_qps = q_params->rss_limit; + } else { + q_params->num_rss = 0; + q_params->num_qps = 1; + } + + config_dcb_num_qps(nic_dev, q_params, nic_dev->max_qps); + + if (nic_dev->num_qp_irq >= q_params->num_qps) + goto out; + + cur_num_irq = nic_dev->num_qp_irq; + + alloc_num_irq = spnic_qp_irq_change(nic_dev, q_params->num_qps); + if (alloc_num_irq < q_params->num_qps) { + q_params->num_qps = alloc_num_irq; + q_params->num_rss = q_params->num_qps; + config_dcb_num_qps(nic_dev, q_params, q_params->num_qps); + nicif_warn(nic_dev, drv, nic_dev->netdev, + "Can not get enough irqs, adjust num_qps to %u\n", + q_params->num_qps); + + /* The current irq may be in use, we must keep it */ + dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps); + spnic_qp_irq_change(nic_dev, dst_num_irq); + } + +out: + nicif_info(nic_dev, drv, nic_dev->netdev, "Finally num_qps: %u, num_rss: %u\n", + q_params->num_qps, q_params->num_rss); +} + +/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */ +int spnic_setup_num_qps(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u32 irq_size; + + nic_dev->num_qp_irq = 0; + + irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->max_qps; + if (!irq_size) { + nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n"); + return -EINVAL; + } + nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL); + if (!nic_dev->qps_irq_info) { + nicif_err(nic_dev, drv, netdev, "Failed to alloc qps_irq_info\n"); + return -ENOMEM; + } + + spnic_config_num_qps(nic_dev, &nic_dev->q_params); + + return 0; +} + +static void spnic_destroy_num_qps(struct spnic_nic_dev *nic_dev) +{ + u16 i; + + for (i = 0; i < nic_dev->num_qp_irq; i++) + sphw_free_irq(nic_dev->hwdev, SERVICE_T_NIC, nic_dev->qps_irq_info[i].irq_id); + + kfree(nic_dev->qps_irq_info); +} + +int spnic_force_port_disable(struct spnic_nic_dev *nic_dev) +{ + int err; + + down(&nic_dev->port_state_sem); + + err = spnic_set_port_enable(nic_dev->hwdev, false, SPHW_CHANNEL_NIC); + if (!err) + nic_dev->force_port_disable = true; + + up(&nic_dev->port_state_sem); + + return err; +} + +int spnic_force_set_port_state(struct spnic_nic_dev *nic_dev, bool enable) +{ + int err = 0; + + down(&nic_dev->port_state_sem); + + nic_dev->force_port_disable = false; + err = spnic_set_port_enable(nic_dev->hwdev, enable, SPHW_CHANNEL_NIC); + + up(&nic_dev->port_state_sem); + + return err; +} + +int spnic_maybe_set_port_state(struct spnic_nic_dev *nic_dev, bool enable) +{ + int err; + + down(&nic_dev->port_state_sem); + + /* Do nothing when force disable + * Port will disable when call force port disable + * and should not enable port when in force mode + */ + if (nic_dev->force_port_disable) { + up(&nic_dev->port_state_sem); + return 0; + } + + err = spnic_set_port_enable(nic_dev->hwdev, enable, SPHW_CHANNEL_NIC); + + up(&nic_dev->port_state_sem); + + return err; +} + +static void spnic_print_link_message(struct spnic_nic_dev *nic_dev, u8 link_status) +{ + if (nic_dev->link_status == link_status) + return; + + nic_dev->link_status = link_status; + + nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n", + (link_status ? "up" : "down")); +} + +int spnic_alloc_channel_resources(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_qp_params *qp_params, + struct spnic_dyna_txrxq_params *trxq_params) +{ + int err; + + qp_params->num_qps = trxq_params->num_qps; + qp_params->sq_depth = trxq_params->sq_depth; + qp_params->rq_depth = trxq_params->rq_depth; + + err = spnic_alloc_qps(nic_dev->hwdev, nic_dev->qps_irq_info, qp_params); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc qps\n"); + return err; + } + + err = spnic_alloc_txrxq_resources(nic_dev, trxq_params); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txrxq resources\n"); + spnic_free_qps(nic_dev->hwdev, qp_params); + return err; + } + + return 0; +} + +void spnic_free_channel_resources(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_qp_params *qp_params, + struct spnic_dyna_txrxq_params *trxq_params) +{ + mutex_lock(&nic_dev->nic_mutex); + spnic_free_txrxq_resources(nic_dev, trxq_params); + spnic_free_qps(nic_dev->hwdev, qp_params); + mutex_unlock(&nic_dev->nic_mutex); +} + +int spnic_open_channel(struct spnic_nic_dev *nic_dev, struct spnic_dyna_qp_params *qp_params, + struct spnic_dyna_txrxq_params *trxq_params) +{ + int err; + + err = spnic_init_qps(nic_dev->hwdev, qp_params); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init qps\n"); + return err; + } + + err = spnic_configure_txrxqs(nic_dev, trxq_params); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to configure txrxqs\n"); + goto cfg_txrxqs_err; + } + + err = spnic_qps_irq_init(nic_dev); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init txrxq irq\n"); + goto init_qp_irq_err; + } + + err = spnic_configure(nic_dev); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init txrxq irq\n"); + goto configure_err; + } + + return 0; + +configure_err: + spnic_qps_irq_deinit(nic_dev); + +init_qp_irq_err: +cfg_txrxqs_err: + spnic_deinit_qps(nic_dev->hwdev, qp_params); + + return err; +} + +void spnic_close_channel(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_qp_params *qp_params) +{ + spnic_remove_configure(nic_dev); + spnic_qps_irq_deinit(nic_dev); + spnic_deinit_qps(nic_dev->hwdev, qp_params); +} + +int spnic_vport_up(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 link_status = 0; + u16 glb_func_id; + int err; + + glb_func_id = sphw_global_func_id(nic_dev->hwdev); + err = spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, true, + SPHW_CHANNEL_NIC); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n"); + goto vport_enable_err; + } + + err = spnic_maybe_set_port_state(nic_dev, true); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to enable port\n"); + goto port_enable_err; + } + + netif_set_real_num_tx_queues(netdev, nic_dev->q_params.num_qps); + netif_set_real_num_rx_queues(netdev, nic_dev->q_params.num_qps); + netif_tx_wake_all_queues(netdev); + + err = spnic_get_link_state(nic_dev->hwdev, &link_status); + if (!err && link_status) + netif_carrier_on(netdev); + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, SPNIC_MODERATONE_DELAY); + + spnic_print_link_message(nic_dev, link_status); + + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) + spnic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status); + + return 0; + +port_enable_err: + spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, false, SPHW_CHANNEL_NIC); + +vport_enable_err: + spnic_flush_qps_res(nic_dev->hwdev); + /* After set vport disable 100ms, no packets will be send to host */ + msleep(100); + + return err; +} + +void spnic_vport_down(struct spnic_nic_dev *nic_dev) +{ + u16 glb_func_id; + + netif_carrier_off(nic_dev->netdev); + netif_tx_disable(nic_dev->netdev); + + cancel_delayed_work_sync(&nic_dev->moderation_task); + + if (sphw_get_chip_present_flag(nic_dev->hwdev)) { + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) + spnic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); + + spnic_maybe_set_port_state(nic_dev, false); + + glb_func_id = sphw_global_func_id(nic_dev->hwdev); + spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, false, SPHW_CHANNEL_NIC); + + spnic_flush_txqs(nic_dev->netdev); + spnic_flush_qps_res(nic_dev->hwdev); + /* After set vport disable 100ms, + * no packets will be send to host + * FPGA set 2000ms + */ + msleep(SPNIC_WAIT_FLUSH_QP_RESOURCE_TIMEOUT); + } +} + +int spnic_change_channel_settings(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_txrxq_params *trxq_params, + spnic_reopen_handler reopen_handler, const void *priv_data) +{ + struct spnic_dyna_qp_params new_qp_params = {0}; + struct spnic_dyna_qp_params cur_qp_params = {0}; + int err; + + spnic_config_num_qps(nic_dev, trxq_params); + + err = spnic_alloc_channel_resources(nic_dev, &new_qp_params, trxq_params); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc channel resources\n"); + return err; + } + + if (!test_and_set_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags)) { + spnic_vport_down(nic_dev); + spnic_close_channel(nic_dev, &cur_qp_params); + spnic_free_channel_resources(nic_dev, &cur_qp_params, &nic_dev->q_params); + } + + if (nic_dev->num_qp_irq > trxq_params->num_qps) + spnic_qp_irq_change(nic_dev, trxq_params->num_qps); + nic_dev->q_params = *trxq_params; + + if (reopen_handler) + reopen_handler(nic_dev, priv_data); + + err = spnic_open_channel(nic_dev, &new_qp_params, trxq_params); + if (err) + goto open_channel_err; + + err = spnic_vport_up(nic_dev); + if (err) + goto vport_up_err; + + clear_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags); + nicif_info(nic_dev, drv, nic_dev->netdev, "Change channel settings success\n"); + + return 0; + +vport_up_err: + spnic_close_channel(nic_dev, &new_qp_params); + +open_channel_err: + spnic_free_channel_resources(nic_dev, &new_qp_params, trxq_params); + + return err; +} + +int spnic_open(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_dyna_qp_params qp_params = {0}; + int err; + + if (test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n"); + return 0; + } + + err = spnic_init_nicio_res(nic_dev->hwdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to init nicio resources\n"); + return err; + } + + err = spnic_setup_num_qps(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n"); + goto setup_qps_err; + } + + err = spnic_alloc_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); + if (err) + goto alloc_channel_res_err; + + err = spnic_open_channel(nic_dev, &qp_params, &nic_dev->q_params); + if (err) + goto open_channel_err; + + err = spnic_vport_up(nic_dev); + if (err) + goto vport_up_err; + + set_bit(SPNIC_INTF_UP, &nic_dev->flags); + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); + + return 0; + +vport_up_err: + spnic_close_channel(nic_dev, &qp_params); + +open_channel_err: + spnic_free_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); + +alloc_channel_res_err: + spnic_destroy_num_qps(nic_dev); + +setup_qps_err: + spnic_deinit_nicio_res(nic_dev->hwdev); + + return err; +} + +int spnic_close(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_dyna_qp_params qp_params = {0}; + + if (!test_and_clear_bit(SPNIC_INTF_UP, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n"); + return 0; + } + + if (test_and_clear_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags)) + goto out; + + spnic_vport_down(nic_dev); + spnic_close_channel(nic_dev, &qp_params); + spnic_free_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); + +out: + spnic_deinit_nicio_res(nic_dev->hwdev); + spnic_destroy_num_qps(nic_dev); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); + + return 0; +} + +#define IPV6_ADDR_LEN 4 +#define PKT_INFO_LEN 9 +#define BITS_PER_TUPLE 32 +static u32 calc_xor_rss(u8 *rss_tunple, u32 len) +{ + u32 hash_value; + u32 i; + + hash_value = rss_tunple[0]; + for (i = 1; i < len; i++) + hash_value = hash_value ^ rss_tunple[i]; + + return hash_value; +} + +static u32 calc_toep_rss(u32 *rss_tunple, u32 len, const u32 *rss_key) +{ + u32 rss = 0; + u32 i, j; + + for (i = 1; i <= len; i++) { + for (j = 0; j < BITS_PER_TUPLE; j++) + if (rss_tunple[i - 1] & ((u32)1 << + (u32)((BITS_PER_TUPLE - 1) - j))) + rss ^= (rss_key[i - 1] << j) | + (u32)((u64)rss_key[i] >> (BITS_PER_TUPLE - j)); + } + + return rss; +} + +#define RSS_VAL(val, type) \ + (((type) == SPNIC_RSS_HASH_ENGINE_TYPE_TOEP) ? ntohl(val) : (val)) + +static u8 parse_ipv6_info(struct sk_buff *skb, u32 *rss_tunple, u8 hash_engine, u32 *len) +{ + struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); + u32 *saddr = (u32 *)&ipv6hdr->saddr; + u32 *daddr = (u32 *)&ipv6hdr->daddr; + u8 i; + + for (i = 0; i < IPV6_ADDR_LEN; i++) { + rss_tunple[i] = RSS_VAL(daddr[i], hash_engine); + /* The offset of the sport relative to the dport is 4 */ + rss_tunple[(u32)(i + IPV6_ADDR_LEN)] = RSS_VAL(saddr[i], hash_engine); + } + *len = IPV6_ADDR_LEN + IPV6_ADDR_LEN; + + if (skb_network_header(skb) + sizeof(*ipv6hdr) == skb_transport_header(skb)) + return ipv6hdr->nexthdr; + return 0; +} + +u16 select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, + unsigned int num_tx_queues) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(dev); + struct nic_rss_type rss_type = nic_dev->rss_type; + struct iphdr *iphdr = NULL; + u32 rss_tunple[PKT_INFO_LEN] = {0}; + u32 len = 0; + u32 hash = 0; + u8 hash_engine = nic_dev->rss_hash_engine; + u8 l4_proto; + unsigned char *l4_hdr = NULL; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + + if (unlikely(hash >= num_tx_queues)) + hash %= num_tx_queues; + return (u16)hash; + } + + iphdr = ip_hdr(skb); + if (iphdr->version == IPV4_VERSION) { + rss_tunple[len++] = RSS_VAL(iphdr->daddr, hash_engine); + rss_tunple[len++] = RSS_VAL(iphdr->saddr, hash_engine); + l4_proto = iphdr->protocol; + } else if (iphdr->version == IPV6_VERSION) { + l4_proto = parse_ipv6_info(skb, (u32 *)rss_tunple, hash_engine, &len); + } else { + return (u16)hash; + } + + if ((iphdr->version == IPV4_VERSION && + ((l4_proto == IPPROTO_UDP && rss_type.udp_ipv4) || + (l4_proto == IPPROTO_TCP && rss_type.tcp_ipv4))) || + (iphdr->version == IPV6_VERSION && + ((l4_proto == IPPROTO_UDP && rss_type.udp_ipv6) || + (l4_proto == IPPROTO_TCP && rss_type.tcp_ipv6)))) { + l4_hdr = skb_transport_header(skb); + /* High 16 bits are dport, low 16 bits are sport. */ + rss_tunple[len++] = ((u32)ntohs(*((u16 *)l4_hdr + 1U)) << 16) | + ntohs(*(u16 *)l4_hdr); + } /* rss_type.ipv4 and rss_type.ipv6 default on. */ + + if (hash_engine == SPNIC_RSS_HASH_ENGINE_TYPE_TOEP) + hash = calc_toep_rss((u32 *)rss_tunple, len, nic_dev->rss_hkey_be); + else + hash = calc_xor_rss((u8 *)rss_tunple, len * (u32)sizeof(u32)); + + return (u16)nic_dev->rss_indir[hash & 0xFF]; +} + +static u16 spnic_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + if (skb->vlan_tci) + skb->priority = skb->vlan_tci >> VLAN_PRIO_SHIFT; + + if (netdev_get_num_tc(netdev)) + goto fall_back; + + if (test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) + return select_queue_by_hash_func(netdev, skb, netdev->real_num_tx_queues); + +fall_back: + return netdev_pick_tx(netdev, skb, NULL); +} + +static void spnic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) + +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_txq_stats *txq_stats = NULL; + struct spnic_rxq_stats *rxq_stats = NULL; + struct spnic_txq *txq = NULL; + struct spnic_rxq *rxq = NULL; + u64 bytes, packets, dropped, errors; + unsigned int start; + int i; + + bytes = 0; + packets = 0; + dropped = 0; + for (i = 0; i < nic_dev->max_qps; i++) { + if (!nic_dev->txqs) + break; + + txq = &nic_dev->txqs[i]; + txq_stats = &txq->txq_stats; + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + bytes += txq_stats->bytes; + packets += txq_stats->packets; + dropped += txq_stats->dropped; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + } + stats->tx_packets = packets; + stats->tx_bytes = bytes; + stats->tx_dropped = dropped; + + bytes = 0; + packets = 0; + errors = 0; + dropped = 0; + for (i = 0; i < nic_dev->max_qps; i++) { + if (!nic_dev->rxqs) + break; + + rxq = &nic_dev->rxqs[i]; + rxq_stats = &rxq->rxq_stats; + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + bytes += rxq_stats->bytes; + packets += rxq_stats->packets; + errors += rxq_stats->csum_errors + rxq_stats->other_errors; + dropped += rxq_stats->dropped; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + } + stats->rx_packets = packets; + stats->rx_bytes = bytes; + stats->rx_errors = errors; + stats->rx_dropped = dropped; +} + +static void spnic_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_io_queue *sq = NULL; + bool hw_err = false; + u32 sw_pi, hw_ci; + u8 q_id; + + SPNIC_NIC_STATS_INC(nic_dev, netdev_tx_timeout); + nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); + + for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { + if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) + continue; + + sq = nic_dev->txqs[q_id].sq; + sw_pi = spnic_get_sq_local_pi(sq); + hw_ci = spnic_get_sq_hw_ci(sq); + nicif_info(nic_dev, drv, netdev, "txq%u: sw_pi: %hu, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx\n", + q_id, sw_pi, hw_ci, spnic_get_sq_local_ci(sq), + nic_dev->q_params.irq_cfg[q_id].napi.state); + + if (sw_pi != hw_ci) + hw_err = true; + } + + if (hw_err) + set_bit(EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag); +} + +static int spnic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u32 mtu = (u32)new_mtu; + int err = 0; + + u32 xdp_max_mtu; + + if (spnic_is_xdp_enable(nic_dev)) { + xdp_max_mtu = spnic_xdp_max_mtu(nic_dev); + if (mtu > xdp_max_mtu) { + nicif_err(nic_dev, drv, netdev, "Max MTU for xdp usage is %d\n", + xdp_max_mtu); + return -EINVAL; + } + } + + err = spnic_set_port_mtu(nic_dev->hwdev, (u16)mtu); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n", + new_mtu); + } else { + nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %u to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = mtu; + } + + return err; +} + +static int spnic_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) { + nicif_info(nic_dev, drv, netdev, "Already using mac address %pM\n", + saddr->sa_data); + return 0; + } + + err = spnic_update_mac(nic_dev->hwdev, netdev->dev_addr, saddr->sa_data, 0, + sphw_global_func_id(nic_dev->hwdev)); + if (err) + return err; + + ether_addr_copy(netdev->dev_addr, saddr->sa_data); + + nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n", saddr->sa_data); + + return 0; +} + +static int spnic_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u16 func_id; + u32 col, line; + int err = 0; + + /* VLAN 0 donot be added, which is the same as VLAN 0 deleted. */ + if (vid == 0) + goto end; + + col = VID_COL(nic_dev, vid); + line = VID_LINE(nic_dev, vid); + + func_id = sphw_global_func_id(nic_dev->hwdev); + + err = spnic_add_vlan(nic_dev->hwdev, vid, func_id); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to add vlan %u\n", vid); + goto end; + } + + set_bit(col, &vlan_bitmap[line]); + + nicif_info(nic_dev, drv, netdev, "Add vlan %u\n", vid); + +end: + return err; +} + +static int spnic_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u16 func_id; + int col, line; + int err = 0; + + col = VID_COL(nic_dev, vid); + line = VID_LINE(nic_dev, vid); + + /* In the broadcast scenario, ucode finds the corresponding function + * based on VLAN 0 of vlan table. If we delete VLAN 0, the VLAN function + * is affected. + */ + if (vid == 0) + goto end; + + func_id = sphw_global_func_id(nic_dev->hwdev); + err = spnic_del_vlan(nic_dev->hwdev, vid, func_id); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); + goto end; + } + + clear_bit(col, &vlan_bitmap[line]); + + nicif_info(nic_dev, drv, netdev, "Remove vlan %u\n", vid); + +end: + return err; +} + +#define SET_FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable") + +static int set_feature_rx_csum(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, + netdev_features_t features, netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + + if (changed & NETIF_F_RXCSUM) + spnic_info(nic_dev, drv, "%s rx csum success\n", + SET_FEATURES_OP_STR(wanted_features & NETIF_F_RXCSUM)); + + return 0; +} + +static int set_feature_tso(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, + netdev_features_t features, netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + + if (changed & NETIF_F_TSO) + spnic_info(nic_dev, drv, "%s tso success\n", + SET_FEATURES_OP_STR(wanted_features & NETIF_F_TSO)); + + return 0; +} + +static int set_feature_lro(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, + netdev_features_t features, netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + bool en = !!(wanted_features & NETIF_F_LRO); + int err; + + if (!(changed & NETIF_F_LRO)) + return 0; + + if (en && spnic_is_xdp_enable(nic_dev)) { + spnic_err(nic_dev, drv, "Can not enable LRO when xdp is enable\n"); + *failed_features |= NETIF_F_LRO; + return -EINVAL; + } + + err = spnic_set_rx_lro_state(nic_dev->hwdev, en, SPNIC_LRO_DEFAULT_TIME_LIMIT, + SPNIC_LRO_DEFAULT_COAL_PKT_SIZE); + if (err) { + spnic_err(nic_dev, drv, "%s lro failed\n", SET_FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_LRO; + } else { + spnic_info(nic_dev, drv, "%s lro success\n", SET_FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_feature_rx_cvlan(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, + netdev_features_t features, netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; + bool en = !!(wanted_features & vlan_feature); + int err; + + if (!(changed & vlan_feature)) + return 0; + + err = spnic_set_rx_vlan_offload(nic_dev->hwdev, en); + if (err) { + spnic_err(nic_dev, drv, "%s rxvlan failed\n", SET_FEATURES_OP_STR(en)); + *failed_features |= vlan_feature; + } else { + spnic_info(nic_dev, drv, "%s rxvlan success\n", SET_FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_feature_vlan_filter(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, + netdev_features_t features, netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + netdev_features_t vlan_filter_feature = NETIF_F_HW_VLAN_CTAG_FILTER; + bool en = !!(wanted_features & vlan_filter_feature); + int err = 0; + + if (!(changed & vlan_filter_feature)) + return 0; + + if (err == 0) + err = spnic_set_vlan_fliter(nic_dev->hwdev, en); + if (err) { + spnic_err(nic_dev, drv, "%s rx vlan filter failed\n", SET_FEATURES_OP_STR(en)); + *failed_features |= vlan_filter_feature; + } else { + spnic_info(nic_dev, drv, "%s rx vlan filter success\n", SET_FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_features(struct spnic_nic_dev *nic_dev, netdev_features_t pre_features, + netdev_features_t features) +{ + netdev_features_t failed_features = 0; + u32 err = 0; + + err |= (u32)set_feature_rx_csum(nic_dev, features, pre_features, &failed_features); + err |= (u32)set_feature_tso(nic_dev, features, pre_features, &failed_features); + err |= (u32)set_feature_lro(nic_dev, features, pre_features, &failed_features); + err |= (u32)set_feature_rx_cvlan(nic_dev, features, pre_features, &failed_features); + err |= (u32)set_feature_vlan_filter(nic_dev, features, pre_features, &failed_features); + if (err) { + nic_dev->netdev->features = features ^ failed_features; + return -EIO; + } + + return 0; +} + +static int spnic_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + return set_features(nic_dev, nic_dev->netdev->features, features); +} + +int spnic_set_hw_features(struct spnic_nic_dev *nic_dev) +{ + /* enable all hw features in netdev->features */ + return set_features(nic_dev, ~nic_dev->netdev->features, nic_dev->netdev->features); +} + +static netdev_features_t spnic_fix_features(struct net_device *netdev, netdev_features_t features) +{ + /* If Rx checksum is disabled, then LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + return features; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void spnic_netpoll(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u16 i; + + for (i = 0; i < nic_dev->q_params.num_qps; i++) + napi_schedule(&nic_dev->q_params.irq_cfg[i].napi); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int spnic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct spnic_nic_dev *adapter = netdev_priv(netdev); + int err; + + if (is_multicast_ether_addr(mac) || vf >= pci_num_vf(adapter->pdev)) + return -EINVAL; + + err = spnic_set_vf_mac(adapter->hwdev, OS_VF_ID_TO_HW(vf), mac); + if (err) + return err; + + if (!is_zero_ether_addr(mac)) + nic_info(&adapter->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf); + else + nic_info(&adapter->pdev->dev, "Deleting MAC on VF %d\n", vf); + + nic_info(&adapter->pdev->dev, "Please reload the VF driver to make this change effective."); + + return 0; +} + +static int set_hw_vf_vlan(void *hwdev, u16 cur_vlanprio, int vf, u16 vlan, u8 qos) +{ + int err = 0; + u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; + + if (vlan || qos) { + if (cur_vlanprio) { + err = spnic_kill_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf)); + if (err) + return err; + } + err = spnic_add_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf), vlan, qos); + } else { + err = spnic_kill_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf)); + } + + if (err) + return err; + + return spnic_update_mac_vlan(hwdev, old_vlan, vlan, OS_VF_ID_TO_HW(vf)); +} + +#define SPNIC_MAX_VLAN_ID 4094 +#define SPNIC_MAX_QOS_NUM 7 + +static int spnic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct spnic_nic_dev *adapter = netdev_priv(netdev); + u16 vlanprio, cur_vlanprio; + + if (vf >= pci_num_vf(adapter->pdev) || vlan > SPNIC_MAX_VLAN_ID || qos > SPNIC_MAX_QOS_NUM) + return -EINVAL; + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + vlanprio = vlan | qos << SPNIC_VLAN_PRIORITY_SHIFT; + cur_vlanprio = spnic_vf_info_vlanprio(adapter->hwdev, OS_VF_ID_TO_HW(vf)); + /* duplicate request, so just return success */ + if (vlanprio == cur_vlanprio) + return 0; + + return set_hw_vf_vlan(adapter->hwdev, cur_vlanprio, vf, vlan, qos); +} + +static int spnic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct spnic_nic_dev *adapter = netdev_priv(netdev); + int err = 0; + bool cur_spoofchk = false; + + if (vf >= pci_num_vf(adapter->pdev)) + return -EINVAL; + + cur_spoofchk = spnic_vf_info_spoofchk(adapter->hwdev, OS_VF_ID_TO_HW(vf)); + /* same request, so just return success */ + if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk)) + return 0; + + err = spnic_set_vf_spoofchk(adapter->hwdev, OS_VF_ID_TO_HW(vf), setting); + if (!err) + nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s\n", + vf, setting ? "on" : "off"); + + return err; +} + +int spnic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct spnic_nic_dev *adapter = netdev_priv(netdev); + int err; + bool cur_trust; + + if (vf >= pci_num_vf(adapter->pdev)) + return -EINVAL; + + cur_trust = spnic_get_vf_trust(adapter->hwdev, OS_VF_ID_TO_HW(vf)); + /* same request, so just return success */ + if ((setting && cur_trust) || (!setting && !cur_trust)) + return 0; + + err = spnic_set_vf_trust(adapter->hwdev, OS_VF_ID_TO_HW(vf), setting); + if (!err) + nicif_info(adapter, drv, netdev, "Set VF %d trusted %s successfully\n", + vf, setting ? "on" : "off"); + else + nicif_err(adapter, drv, netdev, "Failed set VF %d trusted %s\n", + vf, setting ? "on" : "off"); + + return err; +} + +static int spnic_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) +{ + struct spnic_nic_dev *adapter = netdev_priv(netdev); + + if (vf >= pci_num_vf(adapter->pdev)) + return -EINVAL; + + spnic_get_vf_config(adapter->hwdev, OS_VF_ID_TO_HW(vf), ivi); + + return 0; +} + +/** + * spnic_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int spnic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + static const char * const vf_link[] = {"auto", "enable", "disable"}; + struct spnic_nic_dev *adapter = netdev_priv(netdev); + int err; + + /* validate the request */ + if (vf_id >= pci_num_vf(adapter->pdev)) { + nicif_err(adapter, drv, netdev, "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + err = spnic_set_vf_link_state(adapter->hwdev, OS_VF_ID_TO_HW(vf_id), link); + if (!err) + nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n", + vf_id, vf_link[link]); + + return err; +} + +static int is_set_vf_bw_param_valid(const struct spnic_nic_dev *adapter, + int vf, int min_tx_rate, int max_tx_rate) +{ + /* verify VF is active */ + if (vf >= pci_num_vf(adapter->pdev)) { + nicif_err(adapter, drv, adapter->netdev, "VF number must be less than %d\n", + pci_num_vf(adapter->pdev)); + return -EINVAL; + } + + if (max_tx_rate < min_tx_rate) { + nicif_err(adapter, drv, adapter->netdev, "Invalid rate, max rate %d must greater than min rate %d\n", + max_tx_rate, min_tx_rate); + return -EINVAL; + } + + return 0; +} + +#define SPNIC_TX_RATE_TABLE_FULL 12 + +static int spnic_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) +{ + struct spnic_nic_dev *adapter = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + u8 link_status = 0; + u32 speeds[] = {0, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, + SPEED_200000}; + int err = 0; + + err = is_set_vf_bw_param_valid(adapter, vf, min_tx_rate, max_tx_rate); + if (err) + return err; + + err = spnic_get_link_state(adapter->hwdev, &link_status); + if (err) { + nicif_err(adapter, drv, netdev, "Get link status failed when set vf tx rate\n"); + return -EIO; + } + + if (!link_status) { + nicif_err(adapter, drv, netdev, "Link status must be up when set vf tx rate\n"); + return -EINVAL; + } + + err = spnic_get_port_info(adapter->hwdev, &port_info, SPHW_CHANNEL_NIC); + if (err || port_info.speed >= PORT_SPEED_UNKNOWN) + return -EIO; + + /* rate limit cannot be less than 0 and greater than link speed */ + if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) { + nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %u]\n", + speeds[port_info.speed]); + return -EINVAL; + } + + err = spnic_set_vf_tx_rate(adapter->hwdev, OS_VF_ID_TO_HW(vf), max_tx_rate, min_tx_rate); + if (err) { + nicif_err(adapter, drv, netdev, "Unable to set VF %d max rate %d min rate %d%s\n", + vf, max_tx_rate, min_tx_rate, + err == SPNIC_TX_RATE_TABLE_FULL ? ", tx rate profile is full" : ""); + return -EIO; + } + + nicif_info(adapter, drv, netdev, "Set VF %d max tx rate %d min tx rate %d successfully\n", + vf, max_tx_rate, min_tx_rate); + + return 0; +} + +bool spnic_is_xdp_enable(struct spnic_nic_dev *nic_dev) +{ + return !!nic_dev->xdp_prog; +} + +int spnic_xdp_max_mtu(struct spnic_nic_dev *nic_dev) +{ + return nic_dev->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); +} + +static int spnic_xdp_setup(struct spnic_nic_dev *nic_dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct bpf_prog *old_prog = NULL; + int max_mtu = spnic_xdp_max_mtu(nic_dev); + int q_id; + + if (nic_dev->netdev->mtu > max_mtu) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup xdp program, the current MTU %d is larger than max allowed MTU %d\n", + nic_dev->netdev->mtu, max_mtu); + NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading xdp program"); + return -EINVAL; + } + + if (nic_dev->netdev->features & NETIF_F_LRO) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to setup xdp program while LRO is on\n"); + NL_SET_ERR_MSG_MOD(extack, "Failed to setup xdp program while LRO is on\n"); + return -EINVAL; + } + + old_prog = xchg(&nic_dev->xdp_prog, prog); + for (q_id = 0; q_id < nic_dev->max_qps; q_id++) + xchg(&nic_dev->rxqs[q_id].xdp_prog, nic_dev->xdp_prog); + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int spnic_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return spnic_xdp_setup(nic_dev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + +static const struct net_device_ops spnic_netdev_ops = { + .ndo_open = spnic_open, + .ndo_stop = spnic_close, + .ndo_start_xmit = spnic_xmit_frame, + + .ndo_get_stats64 = spnic_get_stats64, + + .ndo_tx_timeout = spnic_tx_timeout, + .ndo_select_queue = spnic_select_queue, + .ndo_change_mtu = spnic_change_mtu, + .ndo_set_mac_address = spnic_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + + .ndo_vlan_rx_add_vid = spnic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = spnic_vlan_rx_kill_vid, + + .ndo_set_vf_mac = spnic_ndo_set_vf_mac, + .ndo_set_vf_vlan = spnic_ndo_set_vf_vlan, + .ndo_set_vf_rate = spnic_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = spnic_ndo_set_vf_spoofchk, + + .ndo_set_vf_trust = spnic_ndo_set_vf_trust, + + .ndo_get_vf_config = spnic_ndo_get_vf_config, + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = spnic_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = spnic_nic_set_rx_mode, + + .ndo_bpf = spnic_xdp, + + .ndo_set_vf_link_state = spnic_ndo_set_vf_link_state, + + .ndo_fix_features = spnic_fix_features, + .ndo_set_features = spnic_set_features, +}; + +static const struct net_device_ops spnicvf_netdev_ops = { + .ndo_open = spnic_open, + .ndo_stop = spnic_close, + .ndo_start_xmit = spnic_xmit_frame, + + .ndo_get_stats64 = spnic_get_stats64, + + .ndo_tx_timeout = spnic_tx_timeout, + .ndo_select_queue = spnic_select_queue, + + .ndo_change_mtu = spnic_change_mtu, + .ndo_set_mac_address = spnic_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + + .ndo_vlan_rx_add_vid = spnic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = spnic_vlan_rx_kill_vid, + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = spnic_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = spnic_nic_set_rx_mode, + + .ndo_bpf = spnic_xdp, + + .ndo_fix_features = spnic_fix_features, + .ndo_set_features = spnic_set_features, +}; + +void spnic_set_netdev_ops(struct spnic_nic_dev *nic_dev) +{ + if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) + nic_dev->netdev->netdev_ops = &spnic_netdev_ops; + else + nic_dev->netdev->netdev_ops = &spnicvf_netdev_ops; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h new file mode 100644 index 000000000000..83c904bc0f72 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_NIC_H +#define SPNIC_NIC_H +#include <linux/types.h> +#include "sphw_common.h" +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_mag_cmd.h" + +#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, status, out_size) \ + ((err) || (status) || !(out_size)) + +struct spnic_sq_attr { + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u64 ci_dma_base; +}; + +struct vf_data_storage { + u8 drv_mac_addr[ETH_ALEN]; + u8 user_mac_addr[ETH_ALEN]; + bool registered; + bool use_specified_mac; + u16 pf_vlan; + u8 pf_qos; + u32 max_rate; + u32 min_rate; + + bool link_forced; + bool link_up; /* only valid if VF link is forced */ + bool spoofchk; + bool trust; + u16 num_qps; +}; + +struct spnic_port_routine_cmd { + bool mpu_send_sfp_info; + bool mpu_send_sfp_abs; + + struct mag_cmd_get_xsfp_info std_sfp_info; + struct mag_cmd_get_xsfp_present abs; +}; + +struct spnic_nic_cfg { + void *hwdev; + void *pcidev_hdl; + void *dev_hdl; + + struct spnic_io_queue *sq; + struct spnic_io_queue *rq; + + u16 rx_buff_len; + + u16 num_qps; + u16 max_qps; + + void *ci_vaddr_base; + dma_addr_t ci_dma_base; + + /* including rq and rx doorbell */ + u16 allocated_num_db; + u8 __iomem **db_addr; + + u8 link_status; + + u16 max_vfs; + struct vf_data_storage *vf_infos; + struct spnic_dcb_state dcb_state; + + u64 feature_cap; + + struct semaphore cfg_lock; + + /* Valid when pfc is disable */ + bool pause_set; + struct nic_pause_config nic_pause; + + u8 pfc_en; + u8 pfc_bitmap; + + struct nic_port_info port_info; + + /* percentage of pf link bandwidth */ + u32 pf_bw_limit; + + struct spnic_port_routine_cmd rt_cmd; + /* mutex used for copy sfp info */ + struct mutex sfp_mutex; +}; + +struct vf_msg_handler { + u16 cmd; + int (*handler)(struct spnic_nic_cfg *nic_cfg, u16 vf, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +}; + +struct nic_event_handler { + u16 cmd; + void (*handler)(void *hwdev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); +}; + +int spnic_set_ci_table(void *hwdev, struct spnic_sq_attr *attr); + +int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel); + +int spnic_cfg_vf_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vid, u8 qos, int vf_id); + +int spnic_vf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void spnic_pf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int spnic_pf_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +u8 spnic_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data); + +int spnic_vf_func_init(struct spnic_nic_cfg *nic_cfg); + +void spnic_vf_func_free(struct spnic_nic_cfg *nic_cfg); + +void spnic_notify_dcb_state_event(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state); + +int spnic_save_dcb_state(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state); + +void spnic_notify_vf_link_status(struct spnic_nic_cfg *nic_cfg, u16 vf_id, u8 link_status); + +int spnic_vf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +void spnic_pf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +int spnic_pf_mag_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, + u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c new file mode 100644 index 000000000000..5bde91f35cca --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c @@ -0,0 +1,1321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/module.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic.h" +#include "spnic_nic_cmd.h" +#include "sphw_common.h" + +int spnic_set_ci_table(void *hwdev, struct spnic_sq_attr *attr) +{ + struct spnic_cmd_cons_idx_attr cons_idx_attr; + u16 out_size = sizeof(cons_idx_attr); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !attr) + return -EINVAL; + + memset(&cons_idx_attr, 0, sizeof(cons_idx_attr)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + cons_idx_attr.func_idx = sphw_global_func_id(hwdev); + + cons_idx_attr.dma_attr_off = attr->dma_attr_off; + cons_idx_attr.pending_limit = attr->pending_limit; + cons_idx_attr.coalescing_time = attr->coalescing_time; + + if (attr->intr_en) { + cons_idx_attr.intr_en = attr->intr_en; + cons_idx_attr.intr_idx = attr->intr_idx; + } + + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; + cons_idx_attr.ci_addr = attr->ci_dma_base; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SQ_CI_ATTR_SET, + &cons_idx_attr, sizeof(cons_idx_attr), + &cons_idx_attr, &out_size); + if (err || !out_size || cons_idx_attr.msg_head.status) { + sdk_err(nic_cfg->dev_hdl, + "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cons_idx_attr.msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int spnic_check_mac_info(u8 status, u16 vlan_id) +{ + if (status && status != SPNIC_MGMT_STATUS_EXIST && status != SPNIC_PF_SET_VF_ALREADY) + return -EINVAL; + + return 0; +} + +#define SPNIC_VLAN_ID_MASK 0x7FFF + +int spnic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel) +{ + struct spnic_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + memset(&mac_info, 0, sizeof(mac_info)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", + vlan_id & SPNIC_VLAN_ID_MASK); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_SET_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size, channel); + if (err || !out_size || spnic_check_mac_info(mac_info.msg_head.status, mac_info.vlan_id)) { + nic_err(nic_cfg->dev_hdl, + "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, mac_info.msg_head.status, out_size, channel); + return -EINVAL; + } + + if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { + nic_warn(nic_cfg->dev_hdl, "PF has already set VF mac, Ignore set operation\n"); + return SPNIC_PF_SET_VF_ALREADY; + } + + if (mac_info.msg_head.status == SPNIC_MGMT_STATUS_EXIST) { + nic_warn(nic_cfg->dev_hdl, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} + +int spnic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel) +{ + struct spnic_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + memset(&mac_info, 0, sizeof(mac_info)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & SPNIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_DEL_MAC, + &mac_info, sizeof(mac_info), &mac_info, + &out_size, channel); + if (err || !out_size || + (mac_info.msg_head.status && mac_info.msg_head.status != + SPNIC_PF_SET_VF_ALREADY)) { + nic_err(nic_cfg->dev_hdl, + "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, mac_info.msg_head.status, out_size, channel); + return -EINVAL; + } + + if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { + nic_warn(nic_cfg->dev_hdl, "PF has already set VF mac, Ignore delete operation.\n"); + return SPNIC_PF_SET_VF_ALREADY; + } + + return 0; +} + +int spnic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id) +{ + struct spnic_port_mac_update mac_info; + u16 out_size = sizeof(mac_info); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !old_mac || !new_mac) + return -EINVAL; + + memset(&mac_info, 0, sizeof(mac_info)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", + vlan_id & SPNIC_VLAN_ID_MASK); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.old_mac, old_mac); + ether_addr_copy(mac_info.new_mac, new_mac); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_UPDATE_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || spnic_check_mac_info(mac_info.msg_head.status, mac_info.vlan_id)) { + nic_err(nic_cfg->dev_hdl, + "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.msg_head.status, out_size); + return -EINVAL; + } + + if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { + nic_warn(nic_cfg->dev_hdl, "PF has already set VF MAC. Ignore update operation\n"); + return SPNIC_PF_SET_VF_ALREADY; + } + + if (mac_info.msg_head.status == SPNIC_MGMT_STATUS_EXIST) { + nic_warn(nic_cfg->dev_hdl, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} + +int spnic_get_default_mac(void *hwdev, u8 *mac_addr) +{ + struct spnic_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + memset(&mac_info, 0, sizeof(mac_info)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + mac_info.func_id = sphw_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_MAC, &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || mac_info.msg_head.status) { + nic_err(nic_cfg->dev_hdl, + "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.msg_head.status, out_size); + return -EINVAL; + } + + ether_addr_copy(mac_addr, mac_info.mac); + + return 0; +} + +static int spnic_config_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vlan_id, u16 func_id) +{ + struct spnic_cmd_vlan_config vlan_info; + u16 out_size = sizeof(vlan_info); + int err; + + memset(&vlan_info, 0, sizeof(vlan_info)); + vlan_info.opcode = opcode; + vlan_info.func_id = func_id; + vlan_info.vlan_id = vlan_id; + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_FUNC_VLAN, + &vlan_info, sizeof(vlan_info), + &vlan_info, &out_size); + if (err || !out_size || vlan_info.msg_head.status) { + nic_err(nic_cfg->dev_hdl, + "Failed to %s vlan, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == SPNIC_CMD_OP_ADD ? "add" : "delete", + err, vlan_info.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int spnic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + return spnic_config_vlan(nic_cfg, SPNIC_CMD_OP_ADD, vlan_id, func_id); +} + +int spnic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + return spnic_config_vlan(nic_cfg, SPNIC_CMD_OP_DEL, vlan_id, func_id); +} + +int spnic_set_vport_enable(void *hwdev, u16 func_id, bool enable, u16 channel) +{ + struct spnic_vport_state en_state; + u16 out_size = sizeof(en_state); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + memset(&en_state, 0, sizeof(en_state)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + en_state.func_id = func_id; + en_state.state = enable ? 1 : 0; + + err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_SET_VPORT_ENABLE, + &en_state, sizeof(en_state), + &en_state, &out_size, channel); + if (err || !out_size || en_state.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, en_state.msg_head.status, out_size, channel); + return -EINVAL; + } + + return 0; +} + +int spnic_set_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) +{ + struct vf_data_storage *vf_infos = NULL; + struct spnic_cmd_vf_dcb_state vf_dcb; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 vf_id, out_size = 0; + int err; + + if (!hwdev || !dcb_state) + return -EINVAL; + + memset(&vf_dcb, 0, sizeof(vf_dcb)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if (!memcmp(&nic_cfg->dcb_state, dcb_state, sizeof(nic_cfg->dcb_state))) + return 0; + + memcpy(&vf_dcb.state, dcb_state, sizeof(vf_dcb.state)); + /* save in sdk, vf will get dcb state when probing */ + spnic_save_dcb_state(nic_cfg, dcb_state); + + /* notify statefull in pf, than notify all vf */ + spnic_notify_dcb_state_event(nic_cfg, dcb_state); + + /* not vf supported, don't need to notify vf */ + if (!nic_cfg->vf_infos) + return 0; + + vf_infos = nic_cfg->vf_infos; + for (vf_id = 0; vf_id < nic_cfg->max_vfs; vf_id++) { + if (vf_infos[vf_id].registered) { + vf_dcb.msg_head.status = 0; + out_size = sizeof(vf_dcb); + err = sphw_mbox_to_vf(hwdev, OS_VF_ID_TO_HW(vf_id), SPHW_MOD_L2NIC, + SPNIC_NIC_CMD_VF_COS, &vf_dcb, sizeof(vf_dcb), + &vf_dcb, &out_size, 0, SPHW_CHANNEL_NIC); + if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, vf_dcb.msg_head.status, out_size)) + nic_err(nic_cfg->dev_hdl, + "Failed to notify dcb state to VF %u, err: %d, status: 0x%x, out size: 0x%x\n", + vf_id, err, vf_dcb.msg_head.status, out_size); + } + } + + return 0; +} + +int spnic_get_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev || !dcb_state) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memcpy(dcb_state, &nic_cfg->dcb_state, sizeof(*dcb_state)); + + return 0; +} + +int spnic_save_dcb_state(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state) +{ + memcpy(&nic_cfg->dcb_state, dcb_state, sizeof(*dcb_state)); + + return 0; +} + +int spnic_get_pf_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) +{ + struct spnic_cmd_vf_dcb_state vf_dcb; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 out_size = sizeof(vf_dcb); + int err; + + if (!hwdev || !dcb_state) + return -EINVAL; + + memset(&vf_dcb, 0, sizeof(vf_dcb)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if (sphw_func_type(hwdev) != TYPE_VF) { + nic_err(nic_cfg->dev_hdl, "Only vf need to get pf dcb state\n"); + return -EINVAL; + } + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_VF_COS, &vf_dcb, + sizeof(vf_dcb), &vf_dcb, &out_size); + if (err || !out_size || vf_dcb.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to get vf default cos, err: %d, status: 0x%x, out size: 0x%x\n", + err, vf_dcb.msg_head.status, out_size); + return -EFAULT; + } + + memcpy(dcb_state, &vf_dcb.state, sizeof(*dcb_state)); + /* Save dcb_state in hw for statefull module */ + spnic_save_dcb_state(nic_cfg, dcb_state); + + return 0; +} + +static int spnic_cfg_hw_pause(struct spnic_nic_cfg *nic_cfg, u8 opcode, + struct nic_pause_config *nic_pause) +{ + struct spnic_cmd_pause_config pause_info; + u16 out_size = sizeof(pause_info); + int err; + + memset(&pause_info, 0, sizeof(pause_info)); + + pause_info.port_id = sphw_physical_port_id(nic_cfg->hwdev); + pause_info.opcode = opcode; + if (opcode == SPNIC_CMD_OP_SET) { + pause_info.auto_neg = nic_pause->auto_neg; + pause_info.rx_pause = nic_pause->rx_pause; + pause_info.tx_pause = nic_pause->tx_pause; + } + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_PAUSE_INFO, + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err || !out_size || pause_info.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to %s pause info, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == SPNIC_CMD_OP_SET ? "set" : "get", + err, pause_info.msg_head.status, out_size); + return -EINVAL; + } + + if (opcode == SPNIC_CMD_OP_GET) { + nic_pause->auto_neg = pause_info.auto_neg; + nic_pause->rx_pause = pause_info.rx_pause; + nic_pause->tx_pause = pause_info.tx_pause; + } + + return 0; +} + +int spnic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + down(&nic_cfg->cfg_lock); + + err = spnic_cfg_hw_pause(nic_cfg, SPNIC_CMD_OP_SET, &nic_pause); + if (err) { + up(&nic_cfg->cfg_lock); + return err; + } + + nic_cfg->pfc_en = 0; + nic_cfg->pfc_bitmap = 0; + nic_cfg->pause_set = true; + nic_cfg->nic_pause.auto_neg = nic_pause.auto_neg; + nic_cfg->nic_pause.rx_pause = nic_pause.rx_pause; + nic_cfg->nic_pause.tx_pause = nic_pause.tx_pause; + + up(&nic_cfg->cfg_lock); + + return 0; +} + +int spnic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + int err = 0; + + if (!hwdev || !nic_pause) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + err = spnic_cfg_hw_pause(nic_cfg, SPNIC_CMD_OP_GET, nic_pause); + if (err) + return err; + + if (nic_cfg->pause_set || !nic_pause->auto_neg) { + nic_pause->rx_pause = nic_cfg->nic_pause.rx_pause; + nic_pause->tx_pause = nic_cfg->nic_pause.tx_pause; + } + + return 0; +} + +static int spnic_dcb_set_hw_pfc(struct spnic_nic_cfg *nic_cfg, u8 pfc_en, u8 pfc_bitmap) +{ + struct spnic_cmd_set_pfc pfc; + u16 out_size = sizeof(pfc); + int err; + + memset(&pfc, 0, sizeof(pfc)); + + pfc.port_id = sphw_physical_port_id(nic_cfg->hwdev); + pfc.pfc_bitmap = pfc_bitmap; + pfc.pfc_en = pfc_en; + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_PFC, + &pfc, sizeof(pfc), &pfc, &out_size); + if (err || pfc.msg_head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, "Failed to set pfc, err: %d, status: 0x%x, out size: 0x%x\n", + err, pfc.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int spnic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + down(&nic_cfg->cfg_lock); + + err = spnic_dcb_set_hw_pfc(nic_cfg, pfc_en, pfc_bitmap); + if (err) { + up(&nic_cfg->cfg_lock); + return err; + } + + nic_cfg->pfc_en = pfc_en; + nic_cfg->pfc_bitmap = pfc_bitmap; + + /* pause settings is opposite from pfc */ + nic_cfg->nic_pause.rx_pause = pfc_en ? 0 : 1; + nic_cfg->nic_pause.tx_pause = pfc_en ? 0 : 1; + + up(&nic_cfg->cfg_lock); + + return 0; +} + +int spnic_dcb_set_ets(void *hwdev, u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, + u8 *tc_bw, u8 *tc_prio) +{ + struct spnic_up_ets_cfg ets; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 out_size = sizeof(ets); + u16 cos_bw_t = 0; + u8 tc_bw_t = 0; + int i, err; + + memset(&ets, 0, sizeof(ets)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { + cos_bw_t += *(cos_bw + i); + tc_bw_t += *(tc_bw + i); + + if (*(cos_tc + i) > SPNIC_DCB_TC_MAX) { + nic_err(nic_cfg->dev_hdl, "Invalid cos %d mapping tc: %u\n", + i, *(cos_tc + i)); + return -EINVAL; + } + } + + /* The sum of all TCs must be 100%, and the same for cos */ + if ((tc_bw_t != 100 && tc_bw_t != 0) || (cos_bw_t % 100) != 0) { + nic_err(nic_cfg->dev_hdl, + "Invalid pg_bw: %u or up_bw: %u\n", tc_bw_t, cos_bw_t); + return -EINVAL; + } + + ets.port_id = sphw_physical_port_id(hwdev); + memcpy(ets.cos_tc, cos_tc, SPNIC_DCB_COS_MAX); + memcpy(ets.cos_bw, cos_bw, SPNIC_DCB_COS_MAX); + memcpy(ets.cos_prio, cos_prio, SPNIC_DCB_COS_MAX); + memcpy(ets.tc_bw, tc_bw, SPNIC_DCB_TC_MAX); + memcpy(ets.tc_prio, tc_prio, SPNIC_DCB_TC_MAX); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_ETS, + &ets, sizeof(ets), &ets, &out_size); + if (err || ets.msg_head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, + "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x\n", + err, ets.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int spnic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up, u8 max_cos_num) +{ + struct spnic_cos_up_map map; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 out_size = sizeof(map); + int err; + + if (!hwdev || !cos_up) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&map, 0, sizeof(map)); + + map.port_id = sphw_physical_port_id(hwdev); + map.cos_valid_mask = cos_valid_bitmap; + memcpy(map.map, cos_up, sizeof(map.map)); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SETUP_COS_MAPPING, + &map, sizeof(map), &map, &out_size); + if (err || map.msg_head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, + "Failed to set cos2up map, err: %d, status: 0x%x, out size: 0x%x\n", + err, map.msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int spnic_flush_qps_res(void *hwdev) +{ + struct spnic_cmd_clear_qp_resource sq_res; + u16 out_size = sizeof(sq_res); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&sq_res, 0, sizeof(sq_res)); + + sq_res.func_id = sphw_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CLEAR_QP_RESOURCE, + &sq_res, sizeof(sq_res), &sq_res, + &out_size); + if (err || !out_size || sq_res.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x\n", + err, sq_res.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int spnic_get_vport_stats(void *hwdev, struct spnic_vport_stats *stats) +{ + struct spnic_port_stats_info stats_info; + struct spnic_cmd_vport_stats vport_stats; + u16 out_size = sizeof(vport_stats); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !stats) + return -EINVAL; + + memset(&stats_info, 0, sizeof(stats_info)); + memset(&vport_stats, 0, sizeof(vport_stats)); + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + stats_info.func_id = sphw_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_VPORT_STAT, + &stats_info, sizeof(stats_info), + &vport_stats, &out_size); + if (err || !out_size || vport_stats.msg_head.status) { + nic_err(nic_cfg->dev_hdl, + "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, vport_stats.msg_head.status, out_size); + return -EFAULT; + } + + memcpy(stats, &vport_stats.stats, sizeof(*stats)); + + return 0; +} + +int spnic_set_function_table(struct spnic_nic_cfg *nic_cfg, u32 cfg_bitmap, + struct spnic_func_tbl_cfg *cfg) +{ + struct spnic_cmd_set_func_tbl cmd_func_tbl; + u16 out_size = sizeof(cmd_func_tbl); + int err; + + memset(&cmd_func_tbl, 0, sizeof(cmd_func_tbl)); + cmd_func_tbl.func_id = sphw_global_func_id(nic_cfg->hwdev); + cmd_func_tbl.cfg_bitmap = cfg_bitmap; + cmd_func_tbl.tbl_cfg = *cfg; + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_FUNC_TBL, + &cmd_func_tbl, sizeof(cmd_func_tbl), + &cmd_func_tbl, &out_size); + if (err || cmd_func_tbl.msg_head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, + "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x, out size: 0x%x\n", + cfg_bitmap, err, cmd_func_tbl.msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int spnic_init_function_table(struct spnic_nic_cfg *nic_cfg) +{ + struct spnic_func_tbl_cfg func_tbl_cfg = {0}; + u32 cfg_bitmap = BIT(FUNC_CFG_INIT) | BIT(FUNC_CFG_MTU) | + BIT(FUNC_CFG_RX_BUF_SIZE); + + func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */ + func_tbl_cfg.rx_wqe_buf_size = nic_cfg->rx_buff_len; + + return spnic_set_function_table(nic_cfg, cfg_bitmap, &func_tbl_cfg); +} + +int spnic_set_port_mtu(void *hwdev, u16 new_mtu) +{ + struct spnic_func_tbl_cfg func_tbl_cfg = {0}; + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if (new_mtu < SPNIC_MIN_MTU_SIZE) { + nic_err(nic_cfg->dev_hdl, "Invalid mtu size: %ubytes, mtu size < %ubytes", + new_mtu, SPNIC_MIN_MTU_SIZE); + return -EINVAL; + } + + if (new_mtu > SPNIC_MAX_JUMBO_FRAME_SIZE) { + nic_err(nic_cfg->dev_hdl, "Invalid mtu size: %ubytes, mtu size > %ubytes", + new_mtu, SPNIC_MAX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + + func_tbl_cfg.mtu = new_mtu; + return spnic_set_function_table(nic_cfg, BIT(FUNC_CFG_MTU), &func_tbl_cfg); +} + +static int nic_feature_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_cmd_feature_nego feature_nego; + u16 out_size = sizeof(feature_nego); + int err; + + if (!hwdev || !s_feature || size > NIC_MAX_FEATURE_QWORD) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&feature_nego, 0, sizeof(feature_nego)); + feature_nego.func_id = sphw_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == SPNIC_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64)); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_FEATURE_NEGO, + &feature_nego, sizeof(feature_nego), + &feature_nego, &out_size); + if (err || !out_size || feature_nego.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to negotiate nic feature, err:%d, status: 0x%x, out_size: 0x%x\n", + err, feature_nego.msg_head.status, out_size); + return -EIO; + } + + if (opcode == SPNIC_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64)); + + return 0; +} + +int spnic_get_nic_feature(void *hwdev, u64 *s_feature, u16 size) +{ + return nic_feature_nego(hwdev, SPNIC_CMD_OP_GET, s_feature, size); +} + +int spnic_set_nic_feature(void *hwdev, u64 *s_feature, u16 size) +{ + return nic_feature_nego(hwdev, SPNIC_CMD_OP_SET, s_feature, size); +} + +static inline int init_nic_hwdev_param_valid(void *hwdev, void *pcidev_hdl, void *dev_hdl) +{ + if (!hwdev || !pcidev_hdl || !dev_hdl) + return -EINVAL; + + return 0; +} + +/* spnic_init_nic_hwdev - init nic hwdev + * @hwdev: pointer to hwdev + * @pcidev_hdl: pointer to pcidev or handler + * @dev_hdl: pointer to pcidev->dev or handler, for sdk_err() or dma_alloc() + * @rx_buff_len: rx_buff_len is receive buffer length + */ +int spnic_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, u16 rx_buff_len) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (init_nic_hwdev_param_valid(hwdev, pcidev_hdl, dev_hdl)) + return -EINVAL; + + nic_cfg = kzalloc(sizeof(*nic_cfg), GFP_KERNEL); + if (!nic_cfg) + return -ENOMEM; + + nic_cfg->dev_hdl = dev_hdl; + nic_cfg->pcidev_hdl = pcidev_hdl; + nic_cfg->hwdev = hwdev; + + sema_init(&nic_cfg->cfg_lock, 1); + mutex_init(&nic_cfg->sfp_mutex); + + err = sphw_register_service_adapter(hwdev, nic_cfg, SERVICE_T_NIC); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to register service adapter\n"); + goto register_sa_err; + } + + err = spnic_get_nic_feature(hwdev, &nic_cfg->feature_cap, 1); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to get nic features\n"); + goto get_feature_err; + } + + sdk_info(dev_hdl, "nic features: 0x%llx\n", nic_cfg->feature_cap); + + err = sphw_aeq_register_swe_cb(hwdev, SPHW_STATELESS_EVENT, spnic_nic_sw_aeqe_handler); + if (err) { + nic_err(nic_cfg->dev_hdl, + "Failed to register sw aeqe handler\n"); + goto register_sw_aeqe_err; + } + + err = spnic_vf_func_init(nic_cfg); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to init vf info\n"); + goto vf_init_err; + } + + nic_cfg->rx_buff_len = rx_buff_len; + + err = spnic_init_function_table(nic_cfg); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to init function table\n"); + goto init_func_tbl_err; + } + + return 0; + +init_func_tbl_err: + spnic_vf_func_free(nic_cfg); + +vf_init_err: + sphw_aeq_unregister_swe_cb(hwdev, SPHW_STATELESS_EVENT); + +register_sw_aeqe_err: +get_feature_err: + sphw_unregister_service_adapter(hwdev, SERVICE_T_NIC); + +register_sa_err: + kfree(nic_cfg); + + return err; +} + +void spnic_free_nic_hwdev(void *hwdev) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) + return; + + spnic_vf_func_free(nic_cfg); + + sphw_aeq_unregister_swe_cb(hwdev, SPHW_STATELESS_EVENT); + + sphw_unregister_service_adapter(hwdev, SERVICE_T_NIC); + + kfree(nic_cfg); +} + +/* to do : send cmd to MPU to drop nic tx pkt*/ +int spnic_force_drop_tx_pkt(void *hwdev) +{ + return 0; +} + +int spnic_set_rx_mode(void *hwdev, u32 enable) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_rx_mode_config rx_mode_cfg; + u16 out_size = sizeof(rx_mode_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg)); + rx_mode_cfg.func_id = sphw_global_func_id(hwdev); + rx_mode_cfg.rx_mode = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_RX_MODE, + &rx_mode_cfg, sizeof(rx_mode_cfg), + &rx_mode_cfg, &out_size); + if (err || !out_size || rx_mode_cfg.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, rx_mode_cfg.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int spnic_set_rx_vlan_offload(void *hwdev, u8 en) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_cmd_vlan_offload vlan_cfg; + u16 out_size = sizeof(vlan_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&vlan_cfg, 0, sizeof(vlan_cfg)); + vlan_cfg.func_id = sphw_global_func_id(hwdev); + vlan_cfg.vlan_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_RX_VLAN_OFFLOAD, + &vlan_cfg, sizeof(vlan_cfg), + &vlan_cfg, &out_size); + if (err || !out_size || vlan_cfg.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_cfg.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int spnic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id) +{ + struct vf_data_storage *vf_info = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 func_id; + int err; + + if (!hwdev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); + + if (!nic_cfg->vf_infos || is_zero_ether_addr(vf_info->drv_mac_addr)) + return 0; + + func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; + + err = spnic_del_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, + old_vlan, func_id, SPHW_CHANNEL_NIC); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC %pM vlan %u\n", + HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac_addr, old_vlan); + return err; + } + + err = spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, + new_vlan, func_id, SPHW_CHANNEL_NIC); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to add VF %d MAC %pM vlan %u\n", + HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac_addr, new_vlan); + spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, + old_vlan, func_id, SPHW_CHANNEL_NIC); + return err; + } + + return 0; +} + +static int spnic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 lro_max_pkt_len) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_cmd_lro_config lro_cfg; + u16 out_size = sizeof(lro_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&lro_cfg, 0, sizeof(lro_cfg)); + lro_cfg.func_id = sphw_global_func_id(hwdev); + lro_cfg.opcode = SPNIC_CMD_OP_SET; + lro_cfg.lro_ipv4_en = ipv4_en; + lro_cfg.lro_ipv6_en = ipv6_en; + lro_cfg.lro_max_pkt_len = lro_max_pkt_len; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_RX_LRO, + &lro_cfg, sizeof(lro_cfg), + &lro_cfg, &out_size); + if (err || !out_size || lro_cfg.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_cfg.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +static int spnic_set_rx_lro_timer(void *hwdev, u32 timer_value) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_cmd_lro_timer lro_timer; + u16 out_size = sizeof(lro_timer); + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&lro_timer, 0, sizeof(lro_timer)); + lro_timer.opcode = SPNIC_CMD_OP_SET; + lro_timer.timer = timer_value; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_LRO_TIMER, + &lro_timer, sizeof(lro_timer), + &lro_timer, &out_size); + if (err || !out_size || lro_timer.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_timer.msg_head.status, out_size); + + return -EINVAL; + } + + return 0; +} + +int spnic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 lro_max_pkt_len) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + u8 ipv4_en = 0, ipv6_en = 0; + int err; + + if (!hwdev) + return -EINVAL; + + ipv4_en = lro_en ? 1 : 0; + ipv6_en = lro_en ? 1 : 0; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + nic_info(nic_cfg->dev_hdl, "Set LRO max coalesce packet size to %uK\n", + lro_max_pkt_len); + + err = spnic_set_rx_lro(hwdev, ipv4_en, ipv6_en, (u8)lro_max_pkt_len); + if (err) + return err; + + /* we don't set LRO timer for VF */ + if (sphw_func_type(hwdev) == TYPE_VF) + return 0; + + nic_info(nic_cfg->dev_hdl, "Set LRO timer to %u\n", lro_timer); + + return spnic_set_rx_lro_timer(hwdev, lro_timer); +} + +int spnic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_cmd_set_vlan_filter vlan_filter; + u16 out_size = sizeof(vlan_filter); + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&vlan_filter, 0, sizeof(vlan_filter)); + vlan_filter.func_id = sphw_global_func_id(hwdev); + vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_VLAN_FILTER_EN, + &vlan_filter, sizeof(vlan_filter), + &vlan_filter, &out_size); + if (err || !out_size || vlan_filter.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_filter.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +u64 spnic_get_feature_cap(void *hwdev) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + return nic_cfg->feature_cap; +} + +int spnic_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule) +{ + u16 out_size = sizeof(struct nic_cmd_fdir_add_rule); + struct nic_cmd_fdir_add_rule tcam_cmd; + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !tcam_rule) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (tcam_rule->index >= SPNIC_MAX_TCAM_RULES_NUM) { + nic_err(nic_cfg->dev_hdl, "Tcam rules num to add is invalid\n"); + return -EINVAL; + } + + memset(&tcam_cmd, 0, sizeof(struct nic_cmd_fdir_add_rule)); + memcpy((void *)&tcam_cmd.rule, (void *)tcam_rule, + sizeof(struct nic_tcam_cfg_rule)); + tcam_cmd.func_id = sphw_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_ADD_TC_FLOW, + &tcam_cmd, sizeof(tcam_cmd), + &tcam_cmd, &out_size); + if (err || tcam_cmd.head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, + "Add tcam rule failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tcam_cmd.head.status, out_size); + return -EIO; + } + + return 0; +} + +int spnic_del_tcam_rule(void *hwdev, u32 index) +{ + u16 out_size = sizeof(struct nic_cmd_fdir_del_rules); + struct nic_cmd_fdir_del_rules tcam_cmd; + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (index >= SPNIC_MAX_TCAM_RULES_NUM) { + nic_err(nic_cfg->dev_hdl, "Tcam rules num to del is invalid\n"); + return -EINVAL; + } + + memset(&tcam_cmd, 0, sizeof(struct nic_cmd_fdir_del_rules)); + tcam_cmd.index_start = index; + tcam_cmd.index_num = 1; + tcam_cmd.func_id = sphw_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_DEL_TC_FLOW, + &tcam_cmd, sizeof(tcam_cmd), + &tcam_cmd, &out_size); + if (err || tcam_cmd.head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, "Del tcam rule failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tcam_cmd.head.status, out_size); + return -EIO; + } + + return 0; +} + +/** + * spnic_mgmt_tcam_block - alloc or free tcam block for IO packet. + * + * @param hwdev + * The hardware interface of a nic device. + * @param alloc_en + * 1 alloc block. + * 0 free block. + * @param index + * block index from firmware. + * @return + * 0 on success, + * negative error value otherwise. + */ +static int spnic_mgmt_tcam_block(void *hwdev, u8 alloc_en, u16 *index) +{ + struct nic_cmd_ctrl_tcam_block_out tcam_block_info; + u16 out_size = sizeof(struct nic_cmd_ctrl_tcam_block_out); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !index) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&tcam_block_info, 0, sizeof(struct nic_cmd_ctrl_tcam_block_out)); + + tcam_block_info.func_id = sphw_global_func_id(hwdev); + tcam_block_info.alloc_en = alloc_en; + tcam_block_info.tcam_type = SPNIC_TCAM_BLOCK_NORMAL_TYPE; + tcam_block_info.tcam_block_index = *index; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_TCAM_BLOCK, + &tcam_block_info, sizeof(tcam_block_info), + &tcam_block_info, &out_size); + if (err || tcam_block_info.head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, + "Set tcam block failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tcam_block_info.head.status, out_size); + return -EIO; + } + + if (alloc_en) + *index = tcam_block_info.tcam_block_index; + + return 0; +} + +int spnic_alloc_tcam_block(void *hwdev, u16 *index) +{ + return spnic_mgmt_tcam_block(hwdev, SPNIC_TCAM_BLOCK_ENABLE, index); +} + +int spnic_free_tcam_block(void *hwdev, u16 *index) +{ + return spnic_mgmt_tcam_block(hwdev, SPNIC_TCAM_BLOCK_DISABLE, index); +} + +int spnic_set_fdir_tcam_rule_filter(void *hwdev, bool enable) +{ + struct nic_cmd_set_tcam_enable port_tcam_cmd; + u16 out_size = sizeof(port_tcam_cmd); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); + port_tcam_cmd.func_id = sphw_global_func_id(hwdev); + port_tcam_cmd.tcam_enable = (u8)enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_ENABLE_TCAM, + &port_tcam_cmd, sizeof(port_tcam_cmd), + &port_tcam_cmd, &out_size); + if (err || port_tcam_cmd.head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, "Set fdir tcam filter failed, err: %d, status: 0x%x, out size: 0x%x, enable: 0x%x\n", + err, port_tcam_cmd.head.status, out_size, + enable); + return -EIO; + } + + return 0; +} + +int spnic_flush_tcam_rule(void *hwdev) +{ + struct nic_cmd_flush_tcam_rules tcam_flush; + u16 out_size = sizeof(struct nic_cmd_flush_tcam_rules); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&tcam_flush, 0, sizeof(struct nic_cmd_flush_tcam_rules)); + tcam_flush.func_id = sphw_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_FLUSH_TCAM, + &tcam_flush, + sizeof(struct nic_cmd_flush_tcam_rules), + &tcam_flush, &out_size); + if (err || tcam_flush.head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, + "Flush tcam fdir rules failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tcam_flush.head.status, out_size); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h new file mode 100644 index 000000000000..0872a2f94655 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h @@ -0,0 +1,724 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_NIC_CFG_H +#define SPNIC_NIC_CFG_H + +#include <linux/if_link.h> + +#include "spnic_nic_cmd.h" +#include "spnic_mgmt_interface.h" +#include "spnic_mag_cmd.h" + +#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) +#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) + +#define SPNIC_VLAN_PRIORITY_SHIFT 13 + +#define SPNIC_RSS_KEY_RSV_NUM 2 + +#define SPNIC_MIN_MTU_SIZE 256 +#define SPNIC_MAX_JUMBO_FRAME_SIZE 9600 + +#define SPNIC_PF_SET_VF_ALREADY 0x4 +#define SPNIC_MGMT_STATUS_EXIST 0x6 + +#define SPNIC_LOWEST_LATENCY 1 +#define SPNIC_MULTI_VM_LATENCY 32 +#define SPNIC_MULTI_VM_PENDING_LIMIT 4 +#define SPNIC_RX_RATE_LOW 400000 +#define SPNIC_RX_COAL_TIME_LOW 16 +#define SPNIC_RX_PENDING_LIMIT_LOW 2 +#define SPNIC_RX_RATE_HIGH 1000000 +#define SPNIC_RX_COAL_TIME_HIGH 225 +#define SPNIC_RX_PENDING_LIMIT_HIGH 8 +#define SPNIC_RX_RATE_THRESH 50000 +#define SPNIC_TX_RATE_THRESH 50000 +#define SPNIC_RX_RATE_LOW_VM 100000 +#define SPNIC_RX_PENDING_LIMIT_HIGH_VM 87 + +enum spnic_valid_link_settings { + HILINK_LINK_SET_SPEED = 0x1, + HILINK_LINK_SET_AUTONEG = 0x2, + HILINK_LINK_SET_FEC = 0x4, +}; + +struct spnic_link_ksettings { + u32 valid_bitmap; + u8 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ +}; + +u64 spnic_get_feature_cap(void *hwdev); + +#define SPNIC_SUPPORT_FEATURE(hwdev, feature) (spnic_get_feature_cap(hwdev) & NIC_F_##feature) +#define SPNIC_SUPPORT_CSUM(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, CSUM) +#define SPNIC_SUPPORT_SCTP_CRC(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, SCTP_CRC) +#define SPNIC_SUPPORT_TSO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, TSO) +#define SPNIC_SUPPORT_UFO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, UFO) +#define SPNIC_SUPPORT_LRO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, LRO) +#define SPNIC_SUPPORT_RSS(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, RSS) +#define SPNIC_SUPPORT_RXVLAN_FILTER(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, RX_VLAN_FILTER) +#define SPNIC_SUPPORT_VLAN_OFFLOAD(hwdev) (SPNIC_SUPPORT_FEATURE(hwdev, RX_VLAN_STRIP) && \ + SPNIC_SUPPORT_FEATURE(hwdev, TX_VLAN_INSERT)) +#define SPNIC_SUPPORT_VXLAN_OFFLOAD(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, VXLAN_OFFLOAD) +#define SPNIC_SUPPORT_IPSEC_OFFLOAD(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, IPSEC_OFFLOAD) +#define SPNIC_SUPPORT_FDIR(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, FDIR) +#define SPNIC_SUPPORT_PROMISC(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, PROMISC) +#define SPNIC_SUPPORT_ALLMULTI(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, ALLMULTI) + +struct nic_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +enum spnic_rss_hash_type { + SPNIC_RSS_HASH_ENGINE_TYPE_XOR = 0, + SPNIC_RSS_HASH_ENGINE_TYPE_TOEP, + SPNIC_RSS_HASH_ENGINE_TYPE_MAX, +}; + +/* rss */ +struct nic_rss_indirect_tbl { + u32 rsvd[4]; /* Make sure that 16B beyond entry[] */ + u16 entry[SPNIC_RSS_INDIR_SIZE]; +}; + +struct nic_rss_context_tbl { + u32 rsvd[4]; + u32 ctx; +}; + +#define NIC_CONFIG_ALL_QUEUE_VLAN_CTX 0xFFFF +struct nic_vlan_ctx { + u32 func_id; + u32 qid; /* if qid = 0xFFFF, config current function all queue */ + u32 vlan_tag; + u32 vlan_mode; + u32 vlan_sel; +}; + +enum spnic_link_status { + SPNIC_LINK_DOWN = 0, + SPNIC_LINK_UP +}; + +struct nic_port_info { + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 fec; + u32 supported_mode; + u32 advertised_mode; +}; + +struct nic_pause_config { + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; +}; + +#define MODULE_TYPE_SFP 0x3 +#define MODULE_TYPE_QSFP28 0x11 +#define MODULE_TYPE_QSFP 0x0C +#define MODULE_TYPE_QSFP_PLUS 0x0D + +#define TCAM_IP_TYPE_MASK 0x1 +#define TCAM_TUNNEL_TYPE_MASK 0xF +#define TCAM_FUNC_ID_MASK 0x7FFF + +struct spnic_tcam_key_ipv4_mem { + u32 rsvd1:4; + u32 tunnel_type:4; + u32 ip_proto:8; + u32 rsvd0:16; + u32 sipv4_h:16; + u32 ip_type:1; + u32 function_id:15; + u32 dipv4_h:16; + u32 sipv4_l:16; + u32 rsvd2:16; + u32 dipv4_l:16; + u32 rsvd3; + u32 dport:16; + u32 rsvd4:16; + u32 rsvd5:16; + u32 sport:16; + u32 outer_sipv4_h:16; + u32 rsvd6:16; + u32 outer_dipv4_h:16; + u32 outer_sipv4_l:16; + u32 vni_h:16; + u32 outer_dipv4_l:16; + u32 rsvd7:16; + u32 vni_l:16; +}; + +struct spnic_tcam_key_ipv6_mem { + u32 rsvd1:4; + u32 tunnel_type:4; + u32 ip_proto:8; + u32 rsvd0:16; + u32 sipv6_key0:16; + u32 ip_type:1; + u32 function_id:15; + u32 sipv6_key2:16; + u32 sipv6_key1:16; + u32 sipv6_key4:16; + u32 sipv6_key3:16; + u32 sipv6_key6:16; + u32 sipv6_key5:16; + u32 dport:16; + u32 sipv6_key7:16; + u32 dipv6_key0:16; + u32 sport:16; + u32 dipv6_key2:16; + u32 dipv6_key1:16; + u32 dipv6_key4:16; + u32 dipv6_key3:16; + u32 dipv6_key6:16; + u32 dipv6_key5:16; + u32 rsvd2:16; + u32 dipv6_key7:16; +}; + +struct tag_tcam_key { + union { + struct spnic_tcam_key_ipv4_mem key_info; + struct spnic_tcam_key_ipv6_mem key_info_ipv6; + }; + + union { + struct spnic_tcam_key_ipv4_mem key_mask; + struct spnic_tcam_key_ipv6_mem key_mask_ipv6; + }; +}; + +int spnic_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule); +int spnic_del_tcam_rule(void *hwdev, u32 index); + +int spnic_alloc_tcam_block(void *hwdev, u16 *index); +int spnic_free_tcam_block(void *hwdev, u16 *index); + +int spnic_set_fdir_tcam_rule_filter(void *hwdev, bool enable); + +int spnic_flush_tcam_rule(void *hwdev); + +/* * + * @brief spnic_update_mac - update mac address to hardware + * @param hwdev: device pointer to hwdev + * @param old_mac: old mac to delete + * @param new_mac: new mac to update + * @param vlan_id: vlan id + * @param func_id: function index + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id); + +/* * + * @brief spnic_get_default_mac - get default mac address + * @param hwdev: device pointer to hwdev + * @param mac_addr: mac address from hardware + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_get_default_mac(void *hwdev, u8 *mac_addr); + +/* * + * @brief spnic_set_port_mtu - set function mtu + * @param hwdev: device pointer to hwdev + * @param new_mtu: mtu + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_port_mtu(void *hwdev, u16 new_mtu); + +/* * + * @brief spnic_get_link_state - get link state + * @param hwdev: device pointer to hwdev + * @param link_state: link state, 0-link down, 1-link up + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_get_link_state(void *hwdev, u8 *link_state); + +/* * + * @brief spnic_get_vport_stats - get function stats + * @param hwdev: device pointer to hwdev + * @param stats: function stats + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_get_vport_stats(void *hwdev, struct spnic_vport_stats *stats); + +/* * + * @brief spnic_notify_all_vfs_link_changed - notify to all vfs link changed + * @param hwdev: device pointer to hwdev + * @param link_status: link state, 0-link down, 1-link up + */ +void spnic_notify_all_vfs_link_changed(void *hwdev, u8 link_status); + +/* * + * @brief spnic_force_drop_tx_pkt - force drop tx packet + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_force_drop_tx_pkt(void *hwdev); + +/* * + * @brief spnic_set_rx_mode - set function rx mode + * @param hwdev: device pointer to hwdev + * @param enable: rx mode state + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_rx_mode(void *hwdev, u32 enable); + +/* * + * @brief spnic_set_rx_vlan_offload - set function vlan offload valid state + * @param hwdev: device pointer to hwdev + * @param en: 0-disable, 1-enable + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_rx_vlan_offload(void *hwdev, u8 en); + +/* * + * @brief spnic_set_rx_lro_state - set rx LRO configuration + * @param hwdev: device pointer to hwdev + * @param lro_en: 0-disable, 1-enable + * @param lro_timer: LRO aggregation timeout + * @param lro_max_pkt_len: LRO coalesce packet size(unit is 1K) + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 lro_max_pkt_len); + +/* * + * @brief spnic_set_vf_spoofchk - set vf spoofchk + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param spoofchk: spoofchk + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk); + +/* * + * @brief spnic_vf_info_spoofchk - get vf spoofchk info + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @retval spoofchk state + */ +bool spnic_vf_info_spoofchk(void *hwdev, int vf_id); + +/* * + * @brief spnic_add_vf_vlan - add vf vlan id + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param vlan: vlan id + * @param qos: qos + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos); + +/* * + * @brief spnic_kill_vf_vlan - kill vf vlan + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param vlan: vlan id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_kill_vf_vlan(void *hwdev, int vf_id); + +/* * + * @brief spnic_set_vf_mac - set vf mac + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param mac_addr: vf mac address + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr); + +/* * + * @brief spnic_vf_info_vlanprio - get vf vlan priority + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @retval zero: vlan priority + */ +u16 spnic_vf_info_vlanprio(void *hwdev, int vf_id); + +/* * + * @brief spnic_set_vf_tx_rate - set vf tx rate + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param max_rate: max rate + * @param min_rate: min rate + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate); + +/* * + * @brief spnic_set_vf_tx_rate - set vf tx rate + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param ivi: vf info + * @retval zero: success + * @retval non-zero: failure + */ +void spnic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi); + +/* * + * @brief spnic_set_vf_link_state - set vf link state + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param link: link state + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_vf_link_state(void *hwdev, u16 vf_id, int link); + +/* * + * @brief spnic_get_port_info - set port info + * @param hwdev: device pointer to hwdev + * @param port_info: port info + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_get_port_info(void *hwdev, struct nic_port_info *port_info, u16 channel); + +/* * + * @brief spnic_rss_template_alloc - alloc rss template table + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_template_alloc(void *hwdev); + +/* * + * @brief spnic_rss_template_free - free rss template table + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_template_free(void *hwdev); + +/* * + * @brief spnic_set_rss_type - set rss type + * @param hwdev: device pointer to hwdev + * @param rss_type: rss type + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_rss_type(void *hwdev, struct nic_rss_type rss_type); + +/* * + * @brief spnic_get_rss_type - get rss type + * @param hwdev: device pointer to hwdev + * @param rss_type: rss type + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_get_rss_type(void *hwdev, struct nic_rss_type *rss_type); + +/* * + * @brief spnic_rss_get_hash_engine - get rss hash engine + * @param hwdev: device pointer to hwdev + * @param type: hash engine + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_get_hash_engine(void *hwdev, u8 *type); + +/* * + * @brief spnic_rss_set_hash_engine - set rss hash engine + * @param hwdev: device pointer to hwdev + * @param type: hash engine + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_set_hash_engine(void *hwdev, u8 type); + +/* * + * @brief spnic_rss_cfg - set rss configuration + * @param hwdev: device pointer to hwdev + * @param rss_en: enable rss flag + * @param type: number of TC + * @param prio_tc: priorityof TC + * @param num_qps: number of queue + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_cfg(void *hwdev, u8 rss_en, u8 tc_num, u8 *prio_tc, u16 num_qps); + +/* * + * @brief spnic_rss_set_template_tbl - set template table + * @param hwdev: device pointer to hwdev + * @param key: rss key + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_set_hash_key(void *hwdev, const u8 *key); + +/* * + * @brief spnic_rss_get_template_tbl - get template table + * @param hwdev: device pointer to hwdev + * @param key: rss key + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_get_hash_key(void *hwdev, u8 *key); + +/* * + * @brief spnic_refresh_nic_cfg - refresh port cfg + * @param hwdev: device pointer to hwdev + * @param port_info: port information + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info); + +/* * + * @brief spnic_add_vlan - add vlan + * @param hwdev: device pointer to hwdev + * @param vlan_id: vlan id + * @param func_id: function id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id); + +/* * + * @brief spnic_del_vlan - delete vlan + * @param hwdev: device pointer to hwdev + * @param vlan_id: vlan id + * @param func_id: function id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id); + +/* * + * @brief spnic_set_mac - set mac address + * @param hwdev: device pointer to hwdev + * @param mac_addr: mac address from hardware + * @param vlan_id: vlan id + * @param func_id: function index + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel); + +/* * + * @brief spnic_del_mac - delete mac address + * @param hwdev: device pointer to hwdev + * @param mac_addr: mac address from hardware + * @param vlan_id: vlan id + * @param func_id: function index + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel); + +/* * + * @brief spnic_set_vport_enable - set function valid status + * @param hwdev: device pointer to hwdev + * @param func_id: global function index + * @param enable: 0-disable, 1-enable + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_vport_enable(void *hwdev, u16 func_id, bool enable, u16 channel); + +/* * + * @brief spnic_set_port_enable - set port status + * @param hwdev: device pointer to hwdev + * @param enable: 0-disable, 1-enable + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_set_port_enable(void *hwdev, bool enable, u16 channel); + +/* * + * @brief spnic_flush_qps_res - flush queue pairs resource in hardware + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_flush_qps_res(void *hwdev); + +/* * + * @brief spnic_init_nic_hwdev - init nic hwdev + * @param hwdev: device pointer to hwdev + * @param pcidev_hdl: pointer to pcidev or handler + * @param dev_hdl: pointer to pcidev->dev or handler, for sdk_err() or + * dma_alloc() + * @param rx_buff_len: rx_buff_len is receive buffer length + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, u16 rx_buff_len); + +/* * + * @brief spnic_free_nic_hwdev - free nic hwdev + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +void spnic_free_nic_hwdev(void *hwdev); + +/* * + * @brief spnic_get_speed - set link speed + * @param hwdev: device pointer to hwdev + * @param port_info: link speed + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_get_speed(void *hwdev, enum mag_cmd_port_speed *speed, u16 channel); + +int spnic_get_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); + +int spnic_get_pf_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); + +/* * + * @brief spnic_create_qps - create queue pairs + * @param hwdev: device pointer to hwdev + * @param num_qp: number of queue pairs + * @param sq_depth: sq depth + * @param rq_depth: rq depth + * @param qps_msix_arry: msix info + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_create_qps(void *hwdev, u16 num_qp, u32 sq_depth, u32 rq_depth, + struct irq_info *qps_msix_arry); + +/* * + * @brief spnic_destroy_qps - destroy queue pairs + * @param hwdev: device pointer to hwdev + */ +void spnic_destroy_qps(void *hwdev); + +enum spnic_queue_type { + SPNIC_SQ, + SPNIC_RQ, + SPNIC_MAX_QUEUE_TYPE +}; + +/* * + * @brief spnic_get_nic_queue - get nic queue + * @param hwdev: device pointer to hwdev + * @param q_id: queue index + * @param q_type: queue type + * @retval queue address + */ +void *spnic_get_nic_queue(void *hwdev, u16 q_id, enum spnic_queue_type q_type); + +/* * + * @brief spnic_init_qp_ctxts - init queue pair context + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_init_qp_ctxts(void *hwdev); + +/* * + * @brief spnic_free_qp_ctxts - free queue pairs + * @param hwdev: device pointer to hwdev + */ +void spnic_free_qp_ctxts(void *hwdev); + +/* * + * @brief spnic_rss_set_indir_tbl - set rss indirect table + * @param hwdev: device pointer to hwdev + * @param indir_table: rss indirect table + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_set_indir_tbl(void *hwdev, const u32 *indir_table); + +/* * + * @brief spnic_rss_get_indir_tbl - get rss indirect table + * @param hwdev: device pointer to hwdev + * @param indir_table: rss indirect table + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_rss_get_indir_tbl(void *hwdev, u32 *indir_table); + +/* * + * @brief spnic_get_phy_port_stats - get port stats + * @param hwdev: device pointer to hwdev + * @param stats: port stats + * @retval zero: success + * @retval non-zero: failure + */ +int spnic_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats); + +int spnic_set_port_funcs_state(void *hwdev, bool enable); + +int spnic_reset_port_link_cfg(void *hwdev); + +int spnic_force_port_relink(void *hwdev); + +int spnic_set_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); + +int spnic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap); + +int spnic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap); + +int spnic_dcb_set_ets(void *hwdev, u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, u8 *tc_bw, u8 *tc_prio); + +int spnic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up, u8 max_cos_num); + +int spnic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause); + +int spnic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause); + +int spnic_set_link_settings(void *hwdev, struct spnic_link_ksettings *settings); + +int spnic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl); + +void spnic_clear_vfs_info(void *hwdev); + +int spnic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id); + +int spnic_set_led_status(void *hwdev, enum mag_led_type type, enum mag_led_mode mode); + +int spnic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en); + +int spnic_set_loopback_mode(void *hwdev, u8 mode, u8 enable); +int spnic_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable); + +bool spnic_get_vf_trust(void *hwdev, int vf_id); +int spnic_set_vf_trust(void *hwdev, u16 vf_id, bool trust); + +int spnic_set_autoneg(void *hwdev, bool enable); + +int spnic_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext); +int spnic_get_sfp_eeprom(void *hwdev, u8 *data, u32 len); + +int spnic_set_nic_feature(void *hwdev, u64 *s_feature, u16 size); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c new file mode 100644 index 000000000000..53410b2423c8 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/module.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic.h" +#include "spnic_nic_cmd.h" + +static unsigned char set_vf_link_state; +module_param(set_vf_link_state, byte, 0444); +MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0."); + +/* In order to adapt different linux version */ +enum { + SPNIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ + SPNIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */ + SPNIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */ +}; + +#define NIC_CVLAN_INSERT_ENABLE 0x1 +#define NIC_QINQ_INSERT_ENABLE 0X3 +static int spnic_set_vlan_ctx(struct spnic_nic_cfg *nic_cfg, u16 func_id, + u16 vlan_tag, u16 q_id, bool add) +{ + struct nic_vlan_ctx *vlan_ctx = NULL; + struct sphw_cmd_buf *cmd_buf = NULL; + u64 out_param = 0; + int err; + + cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); + if (!cmd_buf) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + cmd_buf->size = sizeof(struct nic_vlan_ctx); + vlan_ctx = (struct nic_vlan_ctx *)cmd_buf->buf; + + vlan_ctx->func_id = func_id; + vlan_ctx->qid = q_id; + vlan_ctx->vlan_tag = vlan_tag; + vlan_ctx->vlan_sel = 0; /* TPID0 in IPSU */ + vlan_ctx->vlan_mode = add ? + NIC_QINQ_INSERT_ENABLE : NIC_CVLAN_INSERT_ENABLE; + + sphw_cpu_to_be32(vlan_ctx, sizeof(struct nic_vlan_ctx)); + + err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_MODIFY_VLAN_CTX, + cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); + + sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); + + if (err || out_param != 0) { + nic_err(nic_cfg->dev_hdl, "Failed to set vlan context, err: %d, out_param: 0x%llx\n", + err, out_param); + return -EFAULT; + } + + return err; +} + +int spnic_cfg_vf_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vid, u8 qos, int vf_id) +{ + struct spnic_cmd_vf_vlan_config vf_vlan; + u16 out_size = sizeof(vf_vlan); + u16 glb_func_id; + int err; + u16 vlan_tag; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && opcode == SPNIC_CMD_OP_DEL) + return 0; + + memset(&vf_vlan, 0, sizeof(vf_vlan)); + + vf_vlan.opcode = opcode; + vf_vlan.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; + vf_vlan.vlan_id = vid; + vf_vlan.qos = qos; + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_VF_VLAN, + &vf_vlan, sizeof(vf_vlan), &vf_vlan, &out_size); + if (err || !out_size || vf_vlan.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set VF %d vlan, err: %d, status: 0x%x,out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, vf_vlan.msg_head.status, out_size); + return -EFAULT; + } + + vlan_tag = vid + (u16)(qos << VLAN_PRIO_SHIFT); + + glb_func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; + err = spnic_set_vlan_ctx(nic_cfg, glb_func_id, vlan_tag, NIC_CONFIG_ALL_QUEUE_VLAN_CTX, + opcode == SPNIC_CMD_OP_ADD); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to set VF %d vlan ctx, err: %d\n", + HW_VF_ID_TO_OS(vf_id), err); + + /* rollback vlan config */ + if (opcode == SPNIC_CMD_OP_DEL) + vf_vlan.opcode = SPNIC_CMD_OP_ADD; + else + vf_vlan.opcode = SPNIC_CMD_OP_DEL; + l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_VF_VLAN, &vf_vlan, + sizeof(vf_vlan), &vf_vlan, &out_size); + return err; + } + + return 0; +} + +/* this function just be called by spnic_ndo_set_vf_mac, + * others are not permitted. + */ +int spnic_set_vf_mac(void *hwdev, int vf, unsigned char *mac_addr) +{ + struct vf_data_storage *vf_info; + struct spnic_nic_cfg *nic_cfg; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); + + /* duplicate request, so just return success */ + if (ether_addr_equal(vf_info->user_mac_addr, mac_addr)) + return 0; + + ether_addr_copy(vf_info->user_mac_addr, mac_addr); + + return 0; +} + +int spnic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos) +{ + struct spnic_nic_cfg *nic_cfg; + int err; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_ADD, vlan, qos, vf_id); + if (err) + return err; + + nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan; + nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos; + + nic_info(nic_cfg->dev_hdl, "Setting VLAN %u, QOS 0x%x on VF %d\n", + vlan, qos, HW_VF_ID_TO_OS(vf_id)); + + return 0; +} + +int spnic_kill_vf_vlan(void *hwdev, int vf_id) +{ + struct vf_data_storage *vf_infos; + struct spnic_nic_cfg *nic_cfg; + int err; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + vf_infos = nic_cfg->vf_infos; + + err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_DEL, vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, vf_id); + if (err) + return err; + + nic_info(nic_cfg->dev_hdl, "Remove VLAN %u on VF %d\n", + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, HW_VF_ID_TO_OS(vf_id)); + + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0; + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0; + + return 0; +} + +u16 spnic_vf_info_vlanprio(void *hwdev, int vf_id) +{ + struct spnic_nic_cfg *nic_cfg; + u16 pf_vlan, vlanprio; + u8 pf_qos; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + pf_vlan = nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; + pf_qos = nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; + vlanprio = (u16)(pf_vlan | pf_qos << SPNIC_VLAN_PRIORITY_SHIFT); + + return vlanprio; +} + +int spnic_set_vf_link_state(void *hwdev, u16 vf_id, int link) +{ + struct spnic_nic_cfg *nic_cfg = + sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + struct vf_data_storage *vf_infos = nic_cfg->vf_infos; + u8 link_status = 0; + + switch (link) { + case SPNIC_IFLA_VF_LINK_STATE_AUTO: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_cfg->link_status ? true : false; + link_status = nic_cfg->link_status; + break; + case SPNIC_IFLA_VF_LINK_STATE_ENABLE: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true; + link_status = SPNIC_LINK_UP; + break; + case SPNIC_IFLA_VF_LINK_STATE_DISABLE: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false; + link_status = SPNIC_LINK_DOWN; + break; + default: + return -EINVAL; + } + + /* Notify the VF of its new link state */ + spnic_notify_vf_link_status(nic_cfg, vf_id, link_status); + + return 0; +} + +int spnic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk) +{ + struct spnic_cmd_spoofchk_set spoofchk_cfg; + struct vf_data_storage *vf_infos = NULL; + u16 out_size = sizeof(spoofchk_cfg); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + vf_infos = nic_cfg->vf_infos; + + memset(&spoofchk_cfg, 0, sizeof(spoofchk_cfg)); + + spoofchk_cfg.func_id = sphw_glb_pf_vf_offset(hwdev) + vf_id; + spoofchk_cfg.state = spoofchk ? 1 : 0; + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_SPOOPCHK_STATE, + &spoofchk_cfg, sizeof(spoofchk_cfg), &spoofchk_cfg, &out_size); + if (err || !out_size || spoofchk_cfg.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.msg_head.status, out_size); + err = -EINVAL; + } + + vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk; + + return err; +} + +bool spnic_vf_info_spoofchk(void *hwdev, int vf_id) +{ + struct spnic_nic_cfg *nic_cfg; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + return nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk; +} + +int spnic_set_vf_trust(void *hwdev, u16 vf_id, bool trust) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (vf_id > nic_cfg->max_vfs) + return -EINVAL; + + nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust = trust; + + return 0; +} + +bool spnic_get_vf_trust(void *hwdev, int vf_id) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (vf_id > nic_cfg->max_vfs) + return -EINVAL; + + return nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust; +} + +static int spnic_cfg_vf_qps(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vf_id, u16 num_qps) +{ + struct spnic_cmd_cfg_qps qps_info; + u16 out_size = sizeof(qps_info); + int err; + + memset(&qps_info, 0, sizeof(qps_info)); + + qps_info.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; + qps_info.opcode = opcode; + qps_info.num_qps = num_qps; + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_FLEX_QUEUE, &qps_info, + sizeof(qps_info), &qps_info, &out_size); + if (err || !out_size || qps_info.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to %s VF(%d) qps, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == SPNIC_CMD_OP_ALLOC ? "alloc" : "free", + HW_VF_ID_TO_OS(vf_id), err, qps_info.msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int spnic_alloc_vf_qps(void *hwdev, u16 vf_id, u16 num_qps) +{ + struct vf_data_storage *vf_infos = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (vf_id > nic_cfg->max_vfs) + return -EINVAL; + + err = spnic_cfg_vf_qps(nic_cfg, SPNIC_CMD_OP_ALLOC, vf_id, num_qps); + if (err) + return err; + + vf_infos = nic_cfg->vf_infos; + vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps = num_qps; + + return 0; +} + +int spnic_free_vf_qps(void *hwdev, u16 vf_id) +{ + struct vf_data_storage *vf_infos = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (vf_id > nic_cfg->max_vfs) + return -EINVAL; + + vf_infos = nic_cfg->vf_infos; + err = spnic_cfg_vf_qps(nic_cfg, SPNIC_CMD_OP_FREE, vf_id, + vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps); + if (err) + return err; + + vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps = 0; + + return 0; +} + +static int spnic_set_vf_tx_rate_max_min(struct spnic_nic_cfg *nic_cfg, u16 vf_id, + u32 max_rate, u32 min_rate) +{ + struct spnic_cmd_tx_rate_cfg rate_cfg; + u16 out_size = sizeof(rate_cfg); + int err; + + memset(&rate_cfg, 0, sizeof(rate_cfg)); + + rate_cfg.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; + rate_cfg.max_rate = max_rate; + rate_cfg.min_rate = min_rate; + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_MAX_MIN_RATE, + &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_size); + if (rate_cfg.msg_head.status || err || !out_size) { + nic_err(nic_cfg->dev_hdl, "Failed to set VF %d max rate %u, min rate %u, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err, + rate_cfg.msg_head.status, out_size); + return -EIO; + } + + return 0; +} + +int spnic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + err = spnic_set_vf_tx_rate_max_min(nic_cfg, vf_id, max_rate, min_rate); + if (err) + return err; + + nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate; + nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate; + + return 0; +} + +void spnic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi) +{ + struct vf_data_storage *vfinfo; + struct spnic_nic_cfg *nic_cfg; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + vfinfo = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); + + ivi->vf = HW_VF_ID_TO_OS(vf_id); + ether_addr_copy(ivi->mac, vfinfo->user_mac_addr); + ivi->vlan = vfinfo->pf_vlan; + ivi->qos = vfinfo->pf_qos; + + ivi->spoofchk = vfinfo->spoofchk; + + ivi->trusted = vfinfo->trust; + + ivi->max_tx_rate = vfinfo->max_rate; + ivi->min_tx_rate = vfinfo->min_rate; + + if (!vfinfo->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vfinfo->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +} + +static int spnic_init_vf_infos(struct spnic_nic_cfg *nic_cfg, u16 vf_id) +{ + struct vf_data_storage *vf_infos = nic_cfg->vf_infos; + u8 vf_link_state; + + if (set_vf_link_state > SPNIC_IFLA_VF_LINK_STATE_DISABLE) { + nic_warn(nic_cfg->dev_hdl, "Module Parameter set_vf_link_state value %u is out of range, resetting to %d\n", + set_vf_link_state, SPNIC_IFLA_VF_LINK_STATE_AUTO); + set_vf_link_state = SPNIC_IFLA_VF_LINK_STATE_AUTO; + } + + vf_link_state = set_vf_link_state; + + switch (vf_link_state) { + case SPNIC_IFLA_VF_LINK_STATE_AUTO: + vf_infos[vf_id].link_forced = false; + break; + case SPNIC_IFLA_VF_LINK_STATE_ENABLE: + vf_infos[vf_id].link_forced = true; + vf_infos[vf_id].link_up = true; + break; + case SPNIC_IFLA_VF_LINK_STATE_DISABLE: + vf_infos[vf_id].link_forced = true; + vf_infos[vf_id].link_up = false; + break; + default: + nic_err(nic_cfg->dev_hdl, "Input parameter set_vf_link_state error: %u\n", + vf_link_state); + return -EINVAL; + } + + return 0; +} + +static int vf_func_register(struct spnic_nic_cfg *nic_cfg) +{ + struct spnic_cmd_register_vf register_info; + u16 out_size = sizeof(register_info); + int err; + + err = sphw_register_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, + spnic_vf_event_handler); + if (err) + return err; + + err = sphw_register_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, + spnic_vf_mag_event_handler); + if (err) + goto reg_hilink_err; + + memset(®ister_info, 0, sizeof(register_info)); + register_info.op_register = 1; + err = sphw_mbox_to_pf(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_NIC_CMD_VF_REGISTER, + ®ister_info, sizeof(register_info), ®ister_info, &out_size, 0, + SPHW_CHANNEL_NIC); + if (err || !out_size || register_info.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", + err, register_info.msg_head.status, out_size); + err = -EIO; + goto register_err; + } + + return 0; + +register_err: + sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); + +reg_hilink_err: + sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); + + return err; +} + +static int pf_init_vf_infos(struct spnic_nic_cfg *nic_cfg) +{ + u32 size; + int err; + u16 i; + + nic_cfg->max_vfs = sphw_func_max_vf(nic_cfg->hwdev); + size = sizeof(*nic_cfg->vf_infos) * nic_cfg->max_vfs; + if (!size) + return 0; + + nic_cfg->vf_infos = kzalloc(size, GFP_KERNEL); + if (!nic_cfg->vf_infos) + return -ENOMEM; + + for (i = 0; i < nic_cfg->max_vfs; i++) { + err = spnic_init_vf_infos(nic_cfg, i); + if (err) + goto init_vf_infos_err; + } + + err = sphw_register_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, + spnic_pf_event_handler); + if (err) + goto register_mgmt_cb_err; + + err = sphw_register_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, + spnic_pf_mbox_handler); + if (err) + goto register_pf_mbox_cb_err; + + err = sphw_register_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, + spnic_pf_mag_event_handler); + if (err) + goto register_mgmt_cb_err; + + err = sphw_register_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, + spnic_pf_mag_mbox_handler); + if (err) + goto register_pf_mag_mbox_cb_err; + + return 0; + +register_pf_mag_mbox_cb_err: + sphw_unregister_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); +register_pf_mbox_cb_err: + sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); +register_mgmt_cb_err: +init_vf_infos_err: + kfree(nic_cfg->vf_infos); + + return err; +} + +int spnic_vf_func_init(struct spnic_nic_cfg *nic_cfg) +{ + if (sphw_func_type(nic_cfg->hwdev) == TYPE_VF) + return vf_func_register(nic_cfg); + + return pf_init_vf_infos(nic_cfg); +} + +void spnic_vf_func_free(struct spnic_nic_cfg *nic_cfg) +{ + struct spnic_cmd_register_vf unregister; + u16 out_size = sizeof(unregister); + int err; + + memset(&unregister, 0, sizeof(unregister)); + unregister.op_register = 0; + if (sphw_func_type(nic_cfg->hwdev) == TYPE_VF) { + err = sphw_mbox_to_pf(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_NIC_CMD_VF_REGISTER, + &unregister, sizeof(unregister), &unregister, &out_size, 0, + SPHW_CHANNEL_NIC); + if (err || !out_size || unregister.msg_head.status) + nic_err(nic_cfg->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", + err, unregister.msg_head.status, out_size); + + sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); + } else { + if (nic_cfg->vf_infos) { + sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); + sphw_unregister_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); + spnic_clear_vfs_info(nic_cfg->hwdev); + kfree(nic_cfg->vf_infos); + } + } +} + +static void clear_vf_infos(void *hwdev, u16 vf_id) +{ + struct vf_data_storage *vf_infos; + struct spnic_nic_cfg *nic_cfg; + u16 func_id; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + func_id = sphw_glb_pf_vf_offset(hwdev) + vf_id; + vf_infos = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (vf_infos->use_specified_mac) + spnic_del_mac(hwdev, vf_infos->drv_mac_addr, vf_infos->pf_vlan, + func_id, SPHW_CHANNEL_NIC); + + if (spnic_vf_info_vlanprio(hwdev, vf_id)) + spnic_kill_vf_vlan(hwdev, vf_id); + + if (vf_infos->max_rate) + spnic_set_vf_tx_rate(hwdev, vf_id, 0, 0); + + if (vf_infos->spoofchk) + spnic_set_vf_spoofchk(hwdev, vf_id, false); + + if (vf_infos->trust) + spnic_set_vf_trust(hwdev, vf_id, false); + + memset(vf_infos, 0, sizeof(*vf_infos)); + /* set vf_infos to default */ + spnic_init_vf_infos(nic_cfg, HW_VF_ID_TO_OS(vf_id)); +} + +void spnic_clear_vfs_info(void *hwdev) +{ + struct spnic_nic_cfg *nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + u16 i; + + for (i = 0; i < nic_cfg->max_vfs; i++) + clear_vf_infos(hwdev, OS_VF_ID_TO_HW(i)); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h new file mode 100644 index 000000000000..689e84d90e97 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_NIC_CMD_H +#define SPNIC_NIC_CMD_H + +/* Commands between NIC to MPU + */ +enum spnic_nic_cmd { + SPNIC_NIC_CMD_VF_REGISTER = 0, /* only for PFD and VFD */ + + /* FUNC CFG */ + SPNIC_NIC_CMD_SET_FUNC_TBL = 5, + SPNIC_NIC_CMD_SET_VPORT_ENABLE, + SPNIC_NIC_CMD_SET_RX_MODE, + SPNIC_NIC_CMD_SQ_CI_ATTR_SET, + SPNIC_NIC_CMD_GET_VPORT_STAT, + SPNIC_NIC_CMD_CLEAN_VPORT_STAT, + SPNIC_NIC_CMD_CLEAR_QP_RESOURCE, + SPNIC_NIC_CMD_CFG_FLEX_QUEUE, + /* LRO CFG */ + SPNIC_NIC_CMD_CFG_RX_LRO, + SPNIC_NIC_CMD_CFG_LRO_TIMER, + SPNIC_NIC_CMD_FEATURE_NEGO, + + /* MAC & VLAN CFG */ + SPNIC_NIC_CMD_GET_MAC = 20, + SPNIC_NIC_CMD_SET_MAC, + SPNIC_NIC_CMD_DEL_MAC, + SPNIC_NIC_CMD_UPDATE_MAC, + SPNIC_NIC_CMD_GET_ALL_DEFAULT_MAC, + + SPNIC_NIC_CMD_CFG_FUNC_VLAN, + SPNIC_NIC_CMD_SET_VLAN_FILTER_EN, + SPNIC_NIC_CMD_SET_RX_VLAN_OFFLOAD, + + /* SR-IOV */ + SPNIC_NIC_CMD_CFG_VF_VLAN = 40, + SPNIC_NIC_CMD_SET_SPOOPCHK_STATE, + /* RATE LIMIT */ + SPNIC_NIC_CMD_SET_MAX_MIN_RATE, + + /* RSS CFG */ + SPNIC_NIC_CMD_RSS_CFG = 60, + SPNIC_NIC_CMD_RSS_TEMP_MGR, + SPNIC_NIC_CMD_GET_RSS_CTX_TBL, + SPNIC_NIC_CMD_CFG_RSS_HASH_KEY, + SPNIC_NIC_CMD_CFG_RSS_HASH_ENGINE, + SPNIC_NIC_CMD_GET_INDIR_TBL, + + /* DPI/FDIR */ + SPNIC_NIC_CMD_ADD_TC_FLOW = 80, + SPNIC_NIC_CMD_DEL_TC_FLOW, + SPNIC_NIC_CMD_GET_TC_FLOW, + SPNIC_NIC_CMD_FLUSH_TCAM, + SPNIC_NIC_CMD_CFG_TCAM_BLOCK, + SPNIC_NIC_CMD_ENABLE_TCAM, + SPNIC_NIC_CMD_GET_TCAM_BLOCK, + SPNIC_NIC_CMD_CFG_DPI_TABLE_ID, + + /* PORT CFG */ + SPNIC_NIC_CMD_SET_PORT_ENABLE = 100, + SPNIC_NIC_CMD_CFG_PAUSE_INFO, + + SPNIC_NIC_CMD_SET_PORT_CAR, + SPNIC_NIC_CMD_SET_ER_DROP_PKT, + + SPNIC_NIC_CMD_VF_COS, + SPNIC_NIC_CMD_SETUP_COS_MAPPING, + SPNIC_NIC_CMD_SET_ETS, + SPNIC_NIC_CMD_SET_PFC, + + SPNIC_NIC_CMD_TX_PAUSE_EXCP_NOTICE = 118, + SPNIC_NIC_CMD_INQUIRT_PAUSE_CFG = 119, + + /* MISC */ + SPNIC_NIC_CMD_BIOS_CFG = 120, + SPNIC_NIC_CMD_SET_FIRMWARE_CUSTOM_PACKETS_MSG, + + /* DFX */ + SPNIC_NIC_CMD_GET_SM_TABLE = 140, + SPNIC_NIC_CMD_RD_LINE_TBL, + + SPNIC_NIC_CMD_SET_VHD_CFG = 161, + + SPNIC_NIC_CMD_MAX = 256, +}; + +/* NIC CMDQ MODE */ +enum spnic_ucode_cmd { + SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0, + SPNIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + SPNIC_UCODE_CMD_ARM_SQ, + SPNIC_UCODE_CMD_ARM_RQ, + SPNIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + SPNIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + SPNIC_UCODE_CMD_GET_RSS_INDIR_TABLE, + SPNIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE, + SPNIC_UCODE_CMD_SET_IQ_ENABLE, + SPNIC_UCODE_CMD_SET_RQ_FLUSH = 10, + SPNIC_UCODE_CMD_MODIFY_VLAN_CTX, + SPNIC_UCODE_CMD_DPI_HASH_TABLE, +}; + +#endif /* SPNIC_NIC_CMD_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c new file mode 100644 index 000000000000..08fe958a6d00 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/types.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_mt.h" +#include "spnic_nic_qp.h" +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic.h" + +int spnic_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size, enum spnic_queue_type q_type) +{ + struct spnic_io_queue *queue = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + void *src_wqebb = NULL; + u32 i, offset; + + if (!hwdev) { + pr_err("hwdev is NULL.\n"); + return -EINVAL; + } + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if (q_id >= nic_cfg->num_qps) { + pr_err("q_id[%u] > num_qps_cfg[%u].\n", q_id, nic_cfg->num_qps); + return -EINVAL; + } + + queue = (q_type == SPNIC_SQ) ? &nic_cfg->sq[q_id] : &nic_cfg->rq[q_id]; + + if ((idx + wqebb_cnt) > queue->wq.q_depth) { + pr_err("(idx[%u] + idx[%u]) > q_depth[%u].\n", + idx, wqebb_cnt, queue->wq.q_depth); + return -EINVAL; + } + + if (*wqe_size != (queue->wq.wqebb_size * wqebb_cnt)) { + pr_err("Unexpect out buf size from user :%u, expect: %d\n", + *wqe_size, (queue->wq.wqebb_size * wqebb_cnt)); + return -EINVAL; + } + + for (i = 0; i < wqebb_cnt; i++) { + src_wqebb = sphw_wq_wqebb_addr(&queue->wq, WQ_MASK_IDX(&queue->wq, idx + i)); + offset = queue->wq.wqebb_size * i; + memcpy(wqe + offset, src_wqebb, queue->wq.wqebb_size); + } + + return 0; +} + +int spnic_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, u32 msg_size) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_io_queue *sq = NULL; + + if (!hwdev || !sq_info) { + pr_err("hwdev or sq_info is NULL.\n"); + return -EINVAL; + } + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if (q_id >= nic_cfg->num_qps) { + nic_err(nic_cfg->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", + q_id); + return -EINVAL; + } + + if (msg_size != sizeof(*sq_info)) { + nic_err(nic_cfg->dev_hdl, "Unexpect out buf size from user :%u, expect: %lu\n", + msg_size, sizeof(*sq_info)); + return -EINVAL; + } + + sq = &nic_cfg->sq[q_id]; + + sq_info->q_id = q_id; + sq_info->pi = spnic_get_sq_local_pi(sq); + sq_info->ci = spnic_get_sq_local_ci(sq); + sq_info->fi = spnic_get_sq_hw_ci(sq); + sq_info->q_depth = sq->wq.q_depth; + sq_info->wqebb_size = sq->wq.wqebb_size; + + sq_info->ci_addr = sq->tx.cons_idx_addr; + + sq_info->cla_addr = sq->wq.wq_block_paddr; + sq_info->slq_handle = sq; + + sq_info->doorbell.map_addr = (u64 *)sq->db_addr; + + return 0; +} + +int spnic_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, u32 msg_size) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_io_queue *rq = NULL; + + if (!hwdev || !rq_info) { + pr_err("hwdev or rq_info is NULL.\n"); + return -EINVAL; + } + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if (q_id >= nic_cfg->num_qps) { + nic_err(nic_cfg->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", + q_id); + return -EINVAL; + } + + if (msg_size != sizeof(*rq_info)) { + nic_err(nic_cfg->dev_hdl, "Unexpect out buf size from user: %u, expect: %lu\n", + msg_size, sizeof(*rq_info)); + return -EINVAL; + } + + rq = &nic_cfg->rq[q_id]; + + rq_info->q_id = q_id; + + rq_info->hw_pi = cpu_to_be16(*rq->rx.pi_virt_addr); + rq_info->ci = spnic_get_rq_local_ci(rq); + + rq_info->sw_pi = 0; + + rq_info->wqebb_size = rq->wq.wqebb_size; + rq_info->q_depth = (u16)rq->wq.q_depth; + + rq_info->buf_len = nic_cfg->rx_buff_len; + + rq_info->slq_handle = rq; + + rq_info->ci_wqe_page_addr = sphw_wq_get_first_wqe_page_addr(&rq->wq); + rq_info->ci_cla_tbl_addr = rq->wq.wq_block_paddr; + + rq_info->msix_idx = rq->msix_entry_idx; + rq_info->msix_vector = 0; + + return 0; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h new file mode 100644 index 000000000000..d86c65ed5f4f --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_NIC_DBG_H +#define SPNIC_NIC_DBG_H + +#include "spnic_nic_io.h" + +int spnic_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, u32 msg_size); + +int spnic_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, u32 msg_size); + +int spnic_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size, enum spnic_queue_type q_type); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h new file mode 100644 index 000000000000..37068f1b48f2 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_NIC_DEV_H +#define SPNIC_NIC_DEV_H + +#include <linux/netdevice.h> +#include <linux/semaphore.h> +#include <linux/types.h> +#include <linux/bitops.h> + +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_tx.h" +#include "spnic_rx.h" +#include "spnic_dcb.h" + +#define SPNIC_NIC_DRV_NAME "spnic" +#define SPNIC_DRV_VERSION "B090" +#define SPNIC_DRV_DESC "Ramaxel(R) Network Interface Card Driver" + +#define SPNIC_FUNC_IS_VF(hwdev) (sphw_func_type(hwdev) == TYPE_VF) + +#define SPNIC_AVG_PKT_SMALL 256U +#define SPNIC_MODERATONE_DELAY HZ + +#define LP_PKT_CNT 64 + +enum spnic_flags { + SPNIC_INTF_UP, + SPNIC_MAC_FILTER_CHANGED, + SPNIC_LP_TEST, + SPNIC_RSS_ENABLE, + SPNIC_DCB_ENABLE, + SPNIC_SAME_RXTX, + SPNIC_INTR_ADAPT, + SPNIC_UPDATE_MAC_FILTER, + SPNIC_CHANGE_RES_INVALID, + SPNIC_RSS_DEFAULT_INDIR, +}; + +#define SPHW_CHANNEL_RES_VALID(nic_dev) \ + (test_bit(SPNIC_INTF_UP, &(nic_dev)->flags) && \ + !test_bit(SPNIC_CHANGE_RES_INVALID, &(nic_dev)->flags)) + +#define RX_BUFF_NUM_PER_PAGE 2 + +#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) +#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) +#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \ + VLAN_BITMAP_BITS_SIZE(nic_dev)) +#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ + VLAN_BITMAP_BYTE_SIZE(nic_dev)) +#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev)) +#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1)) + +enum spnic_event_work_flags { + EVENT_WORK_TX_TIMEOUT, +}; + +enum spnic_rx_mode_state { + SPNIC_HW_PROMISC_ON, + SPNIC_HW_ALLMULTI_ON, + SPNIC_PROMISC_FORCE_ON, + SPNIC_ALLMULTI_FORCE_ON, +}; + +enum mac_filter_state { + SPNIC_MAC_WAIT_HW_SYNC, + SPNIC_MAC_HW_SYNCED, + SPNIC_MAC_WAIT_HW_UNSYNC, + SPNIC_MAC_HW_UNSYNCED, +}; + +struct spnic_mac_filter { + struct list_head list; + u8 addr[ETH_ALEN]; + unsigned long state; +}; + +struct spnic_irq { + struct net_device *netdev; + /* IRQ corresponding index number */ + u16 msix_entry_idx; + u32 irq_id; /* The IRQ number from OS */ + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + struct spnic_txq *txq; + struct spnic_rxq *rxq; +}; + +struct spnic_intr_coal_info { + u8 pending_limt; + u8 coalesce_timer_cfg; + u8 resend_timer_cfg; + + u64 pkt_rate_low; + u8 rx_usecs_low; + u8 rx_pending_limt_low; + u64 pkt_rate_high; + u8 rx_usecs_high; + u8 rx_pending_limt_high; + + u8 user_set_intr_coal_flag; +}; + +struct spnic_dyna_txrxq_params { + u16 num_qps; + u16 num_rss; + u16 rss_limit; + u8 num_tc; + u8 rsvd1; + u32 sq_depth; + u32 rq_depth; + + struct spnic_dyna_txq_res *txqs_res; + struct spnic_dyna_rxq_res *rxqs_res; + struct spnic_irq *irq_cfg; +}; + +#define SPNIC_NIC_STATS_INC(nic_dev, field) \ +do { \ + u64_stats_update_begin(&(nic_dev)->stats.syncp);\ + (nic_dev)->stats.field++; \ + u64_stats_update_end(&(nic_dev)->stats.syncp); \ +} while (0) + +struct spnic_nic_stats { + u64 netdev_tx_timeout; + + /* Subdivision statistics show in private tool */ + u64 tx_carrier_off_drop; + u64 tx_invalid_qid; + + struct u64_stats_sync syncp; +}; + +#define SPNIC_TCAM_DYNAMIC_BLOCK_SIZE 16 +#define SPNIC_MAX_TCAM_FILTERS 512 + +#define SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(block_index) \ + (SPNIC_TCAM_DYNAMIC_BLOCK_SIZE * (block_index)) + +struct spnic_rx_flow_rule { + struct list_head rules; + int tot_num_rules; +}; + +struct spnic_tcam_dynamic_block { + struct list_head block_list; + u16 dynamic_block_id; + u16 dynamic_index_cnt; + u8 dynamic_index_used[SPNIC_TCAM_DYNAMIC_BLOCK_SIZE]; +}; + +struct spnic_tcam_dynamic_block_info { + struct list_head tcam_dynamic_list; + u16 dynamic_block_cnt; +}; + +struct spnic_tcam_filter { + struct list_head tcam_filter_list; + u16 dynamic_block_id; + u16 index; + struct tag_tcam_key tcam_key; + u16 queue; +}; + +/* function level struct info */ +struct spnic_tcam_info { + u16 tcam_rule_nums; + struct list_head tcam_list; + struct spnic_tcam_dynamic_block_info tcam_dynamic_info; +}; + +struct spnic_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + void *hwdev; + + int poll_weight; + + unsigned long *vlan_bitmap; + + u16 max_qps; + + u32 msg_enable; + unsigned long flags; + + u32 lro_replenish_thld; + u32 dma_rx_buff_size; + u16 rx_buff_len; + u32 page_order; + + /* Rss related varibles */ + u8 rss_hash_engine; + struct nic_rss_type rss_type; + u8 *rss_hkey; + /* hkey in big endian */ + u32 *rss_hkey_be; + u32 *rss_indir; + + u32 dcb_changes; + struct spnic_dcb_config hw_dcb_cfg; + struct spnic_dcb_config wanted_dcb_cfg; + unsigned long dcb_flags; + int disable_port_cnt; + /* lock for disable or enable traffic flow */ + struct semaphore dcb_sem; + + struct spnic_intr_coal_info *intr_coalesce; + unsigned long last_moder_jiffies; + u32 adaptive_rx_coal; + u8 intr_coal_set_flag; + + struct spnic_nic_stats stats; + + /* lock for nic resource */ + struct mutex nic_mutex; + bool force_port_disable; + struct semaphore port_state_sem; + u8 link_status; + + struct nic_service_cap nic_cap; + + struct spnic_txq *txqs; + struct spnic_rxq *rxqs; + struct spnic_dyna_txrxq_params q_params; + + u16 num_qp_irq; + struct irq_info *qps_irq_info; + + struct workqueue_struct *workq; + + struct work_struct rx_mode_work; + struct delayed_work moderation_task; + + struct list_head uc_filter_list; + struct list_head mc_filter_list; + unsigned long rx_mod_state; + int netdev_uc_cnt; + int netdev_mc_cnt; + + int lb_test_rx_idx; + int lb_pkt_len; + u8 *lb_test_rx_buf; + + struct spnic_tcam_info tcam; + struct spnic_rx_flow_rule rx_flow_rule; + + struct bpf_prog *xdp_prog; + + struct delayed_work periodic_work; + /* reference to enum spnic_event_work_flags */ + unsigned long event_flag; +}; + +#define IPSEC_CAP_IS_SUPPORT(nic_dev) ((nic_dev)->ipsec) + +#define spnic_msg(level, nic_dev, msglvl, format, arg...) \ +do { \ + if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ + == NETREG_REGISTERED) \ + nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \ + format, ## arg); \ + else \ + nic_##level(&(nic_dev)->pdev->dev, \ + format, ## arg); \ +} while (0) + +#define spnic_info(nic_dev, msglvl, format, arg...) \ + spnic_msg(info, nic_dev, msglvl, format, ## arg) + +#define spnic_warn(nic_dev, msglvl, format, arg...) \ + spnic_msg(warn, nic_dev, msglvl, format, ## arg) + +#define spnic_err(nic_dev, msglvl, format, arg...) \ + spnic_msg(err, nic_dev, msglvl, format, ## arg) + +#define nicif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args) + +extern struct spnic_uld_info nic_uld_info; + +u32 spnic_get_io_stats_size(struct spnic_nic_dev *nic_dev); + +void spnic_get_io_stats(struct spnic_nic_dev *nic_dev, void *stats); + +int spnic_open(struct net_device *netdev); + +int spnic_close(struct net_device *netdev); + +void spnic_set_ethtool_ops(struct net_device *netdev); + +void spnicvf_set_ethtool_ops(struct net_device *netdev); + +int nic_ioctl(void *uld_dev, u32 cmd, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +void spnic_update_num_qps(struct net_device *netdev); + +int spnic_qps_irq_init(struct spnic_nic_dev *nic_dev); + +void spnic_qps_irq_deinit(struct spnic_nic_dev *nic_dev); + +void spnic_set_netdev_ops(struct spnic_nic_dev *nic_dev); + +int spnic_set_hw_features(struct spnic_nic_dev *nic_dev); + +void spnic_set_rx_mode_work(struct work_struct *work); + +void spnic_clean_mac_list_filter(struct spnic_nic_dev *nic_dev); + +void spnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data); + +void spnic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data); + +int spnic_get_sset_count(struct net_device *netdev, int sset); + +int spnic_force_port_disable(struct spnic_nic_dev *nic_dev); + +int spnic_force_set_port_state(struct spnic_nic_dev *nic_dev, bool enable); + +int spnic_maybe_set_port_state(struct spnic_nic_dev *nic_dev, bool enable); + +int spnic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_settings); +int spnic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_settings); + +void spnic_auto_moderation_work(struct work_struct *work); + +typedef void (*spnic_reopen_handler)(struct spnic_nic_dev *nic_dev, const void *priv_data); +int spnic_change_channel_settings(struct spnic_nic_dev *nic_dev, + struct spnic_dyna_txrxq_params *trxq_params, + spnic_reopen_handler reopen_handler, const void *priv_data); + +void spnic_link_status_change(struct spnic_nic_dev *nic_dev, bool status); + +bool spnic_is_xdp_enable(struct spnic_nic_dev *nic_dev); +int spnic_xdp_max_mtu(struct spnic_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c new file mode 100644 index 000000000000..0e8a2c4a3961 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/module.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic.h" +#include "spnic_nic_cmd.h" + +static int spnic_init_vf_config(struct spnic_nic_cfg *nic_cfg, u16 vf_id) +{ + struct vf_data_storage *vf_info; + u16 func_id; + int err = 0; + + vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); + ether_addr_copy(vf_info->drv_mac_addr, vf_info->user_mac_addr); + if (!is_zero_ether_addr(vf_info->drv_mac_addr)) { + vf_info->use_specified_mac = true; + func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; + + err = spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, + vf_info->pf_vlan, func_id, SPHW_CHANNEL_NIC); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to set VF %d MAC\n", + HW_VF_ID_TO_OS(vf_id)); + return err; + } + } else { + vf_info->use_specified_mac = false; + } + + if (spnic_vf_info_vlanprio(nic_cfg->hwdev, vf_id)) { + err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_ADD, + vf_info->pf_vlan, vf_info->pf_qos, vf_id); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to add VF %d VLAN_QOS\n", + HW_VF_ID_TO_OS(vf_id)); + return err; + } + } + + if (vf_info->max_rate) { + err = spnic_set_vf_tx_rate(nic_cfg->hwdev, vf_id, vf_info->max_rate, + vf_info->min_rate); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to set VF %d max rate %u, min rate %u\n", + HW_VF_ID_TO_OS(vf_id), vf_info->max_rate, + vf_info->min_rate); + return err; + } + } + + return 0; +} + +static int register_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id) +{ + int err; + + if (vf_id > nic_cfg->max_vfs) { + nic_err(nic_cfg->dev_hdl, "Register VF id %d exceed limit[0-%d]\n", + HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_cfg->max_vfs)); + return -EFAULT; + } + + err = spnic_init_vf_config(nic_cfg, vf_id); + if (err) + return err; + + nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true; + + return 0; +} + +static int unregister_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id) +{ + struct vf_data_storage *vf_info = + nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); + struct spnic_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + int err; + + if (vf_id > nic_cfg->max_vfs) + return -EFAULT; + + vf_info->registered = false; + + memset(&mac_info, 0, sizeof(mac_info)); + mac_info.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; + mac_info.vlan_id = vf_info->pf_vlan; + ether_addr_copy(mac_info.mac, vf_info->drv_mac_addr); + + if (vf_info->use_specified_mac || vf_info->pf_vlan) { + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_DEL_MAC, + &mac_info, sizeof(mac_info), &mac_info, &out_size); + if (err || mac_info.msg_head.status || !out_size) { + nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, + mac_info.msg_head.status, out_size); + return -EFAULT; + } + } + + memset(vf_info->drv_mac_addr, 0, ETH_ALEN); + + return 0; +} + +static int spnic_register_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, + u16 vf_id, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct spnic_cmd_register_vf *register_vf = buf_in; + struct spnic_cmd_register_vf *register_info = buf_out; + int err; + + if (register_vf->op_register) + err = register_vf_msg_handler(nic_cfg, vf_id); + else + err = unregister_vf_msg_handler(nic_cfg, vf_id); + + if (err) + register_info->msg_head.status = EFAULT; + + *out_size = sizeof(*register_info); + + return 0; +} + +static int spnic_get_vf_cos_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct spnic_cmd_vf_dcb_state *dcb_state = buf_out; + + memcpy(&dcb_state->state, &nic_cfg->dcb_state, + sizeof(nic_cfg->dcb_state)); + + dcb_state->msg_head.status = 0; + *out_size = sizeof(*dcb_state); + return 0; +} + +static int spnic_get_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); + struct spnic_port_mac_set *mac_info = buf_out; + + int err; + + if (sphw_support_ovs(nic_cfg->hwdev, NULL)) { + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_GET_MAC, buf_in, + in_size, buf_out, out_size); + if (!err) { + if (is_zero_ether_addr(mac_info->mac)) + ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); + } + return err; + } + + ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); + mac_info->msg_head.status = 0; + *out_size = sizeof(*mac_info); + + return 0; +} + +static int spnic_set_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); + struct spnic_port_mac_set *mac_in = buf_in; + struct spnic_port_mac_set *mac_out = buf_out; + int err; + + if (vf_info->use_specified_mac && !vf_info->trust && + is_valid_ether_addr(mac_in->mac)) { + nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", + HW_VF_ID_TO_OS(vf)); + mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + if (is_valid_ether_addr(mac_in->mac)) + mac_in->vlan_id = vf_info->pf_vlan; + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_MAC, + buf_in, in_size, buf_out, out_size); + if (err || !(*out_size)) { + nic_err(nic_cfg->dev_hdl, "Failed to set VF %d MAC address, err: %d,status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, + *out_size); + return -EFAULT; + } + + if (is_valid_ether_addr(mac_in->mac) && !mac_out->msg_head.status) + ether_addr_copy(vf_info->drv_mac_addr, mac_in->mac); + + return err; +} + +static int spnic_del_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); + struct spnic_port_mac_set *mac_in = buf_in; + struct spnic_port_mac_set *mac_out = buf_out; + int err; + + if (vf_info->use_specified_mac && !vf_info->trust && + is_valid_ether_addr(mac_in->mac)) { + nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", + HW_VF_ID_TO_OS(vf)); + mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + if (is_valid_ether_addr(mac_in->mac)) + mac_in->vlan_id = vf_info->pf_vlan; + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_DEL_MAC, + buf_in, in_size, buf_out, out_size); + if (err || !(*out_size)) { + nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, + *out_size); + return -EFAULT; + } + + if (is_valid_ether_addr(mac_in->mac) && !mac_out->msg_head.status) + eth_zero_addr(vf_info->drv_mac_addr); + + return err; +} + +static int spnic_update_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, + u16 vf, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); + struct spnic_port_mac_update *mac_in = buf_in; + struct spnic_port_mac_update *mac_out = buf_out; + int err; + + if (!is_valid_ether_addr(mac_in->new_mac)) { + nic_err(nic_cfg->dev_hdl, "Update VF MAC is invalid.\n"); + return -EINVAL; + } + + if (vf_info->use_specified_mac && !vf_info->trust) { + nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", + HW_VF_ID_TO_OS(vf)); + mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + mac_in->vlan_id = vf_info->pf_vlan; + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_UPDATE_MAC, + buf_in, in_size, buf_out, out_size); + if (err || !(*out_size)) { + nic_warn(nic_cfg->dev_hdl, "Failed to update VF %d MAC, err: %d,status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, + *out_size); + return -EFAULT; + } + + if (!mac_out->msg_head.status) + ether_addr_copy(vf_info->drv_mac_addr, mac_in->new_mac); + + return err; +} + +const struct vf_msg_handler vf_cmd_handler[] = { + { + .cmd = SPNIC_NIC_CMD_VF_REGISTER, + .handler = spnic_register_vf_msg_handler, + }, + + { + .cmd = SPNIC_NIC_CMD_GET_MAC, + .handler = spnic_get_vf_mac_msg_handler, + }, + + { + .cmd = SPNIC_NIC_CMD_SET_MAC, + .handler = spnic_set_vf_mac_msg_handler, + }, + + { + .cmd = SPNIC_NIC_CMD_DEL_MAC, + .handler = spnic_del_vf_mac_msg_handler, + }, + + { + .cmd = SPNIC_NIC_CMD_UPDATE_MAC, + .handler = spnic_update_vf_mac_msg_handler, + }, + + { + .cmd = SPNIC_NIC_CMD_VF_COS, + .handler = spnic_get_vf_cos_msg_handler + }, +}; + +static int _l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u16 channel) +{ + u32 i, cmd_cnt = ARRAY_LEN(vf_cmd_handler); + bool cmd_to_pf = false; + + if (sphw_func_type(hwdev) == TYPE_VF) { + for (i = 0; i < cmd_cnt; i++) { + if (cmd == vf_cmd_handler[i].cmd) + cmd_to_pf = true; + } + } + + if (cmd_to_pf) + return sphw_mbox_to_pf(hwdev, SPHW_MOD_L2NIC, cmd, buf_in, in_size, buf_out, + out_size, 0, channel); + + return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_L2NIC, cmd, buf_in, in_size, buf_out, + out_size, 0, channel); +} + +int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return _l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, + out_size, SPHW_CHANNEL_NIC); +} + +int l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel) +{ + return _l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size, channel); +} + +/* pf/ppf handler mbox msg from vf */ +int spnic_pf_mbox_handler(void *hwdev, void *pri_handle, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + u32 index, cmd_size = ARRAY_LEN(vf_cmd_handler); + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EFAULT; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + for (index = 0; index < cmd_size; index++) { + if (cmd == vf_cmd_handler[index].cmd) + return vf_cmd_handler[index].handler(nic_cfg, vf_id, buf_in, in_size, + buf_out, out_size); + } + + nic_warn(nic_cfg->dev_hdl, "NO handler for nic cmd(%u) received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +void spnic_notify_dcb_state_event(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state) +{ + struct sphw_event_info event_info = {0}; + + /* This is 8 user priority to cos mapping relationships */ + sdk_info(nic_cfg->dev_hdl, "DCB %s, default cos %u, up2cos %u%u%u%u%u%u%u%u\n", + dcb_state->dcb_on ? "on" : "off", dcb_state->default_cos, + dcb_state->up_cos[0], dcb_state->up_cos[1], + dcb_state->up_cos[2], dcb_state->up_cos[3], + dcb_state->up_cos[4], dcb_state->up_cos[5], + dcb_state->up_cos[6], dcb_state->up_cos[7]); + + /* Saved in sdk for statefull module */ + spnic_save_dcb_state(nic_cfg, dcb_state); + + event_info.type = SPHW_EVENT_DCB_STATE_CHANGE; + memcpy(&event_info.dcb_state, dcb_state, sizeof(event_info.dcb_state)); + + sphw_event_callback(nic_cfg->hwdev, &event_info); +} + +void dcb_state_event(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct spnic_cmd_vf_dcb_state *vf_dcb; + struct spnic_nic_cfg *nic_cfg; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + vf_dcb = buf_in; + if (!vf_dcb) + return; + + spnic_notify_dcb_state_event(nic_cfg, &vf_dcb->state); +} + +void tx_pause_excp_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct nic_cmd_tx_pause_notice *excp_info = buf_in; + struct spnic_nic_cfg *nic_cfg = NULL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + if (in_size != sizeof(*excp_info)) { + nic_err(nic_cfg->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*excp_info)); + return; + } + + nic_warn(nic_cfg->dev_hdl, "Receive tx pause exception event, excp: %u, level: %u\n", + excp_info->tx_pause_except, excp_info->except_level); + + sphw_fault_event_report(hwdev, SPHW_FAULT_SRC_TX_PAUSE_EXCP, (u16)excp_info->except_level); +} + +struct nic_event_handler nic_cmd_handler[] = { + { + .cmd = SPNIC_NIC_CMD_VF_COS, + .handler = dcb_state_event, + }, + { + .cmd = SPNIC_NIC_CMD_TX_PAUSE_EXCP_NOTICE, + .handler = tx_pause_excp_event_handler, + }, +}; + +static void _event_handler(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + u32 size = sizeof(nic_cmd_handler) / sizeof(struct nic_event_handler); + u32 i; + + if (!hwdev) + return; + + *out_size = 0; + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + for (i = 0; i < size; i++) { + if (cmd == nic_cmd_handler[i].cmd) { + nic_cmd_handler[i].handler(hwdev, buf_in, in_size, buf_out, out_size); + break; + } + } + + /* can't find this event cmd */ + if (i == size) + sdk_warn(nic_cfg->dev_hdl, "Unsupported event cmd(%u) to process\n", + cmd); +} + +/* vf handler mbox msg from ppf/pf */ +/* vf link change event + * vf fault report event, TBD + */ +int spnic_vf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); + return 0; +} + +/* pf/ppf handler mgmt cpu report nic event*/ +void spnic_pf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); +} + +u8 spnic_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return 0; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + nic_err(nic_cfg->dev_hdl, "Received nic ucode aeq event type: 0x%x, data: 0x%llx\n", + event, *((u64 *)data)); + + return 0; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c new file mode 100644 index 000000000000..3f1fb1381844 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c @@ -0,0 +1,1123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/module.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_common.h" +#include "spnic_nic_qp.h" +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic.h" +#include "spnic_nic_cmd.h" + +#define SPNIC_DEAULT_TX_CI_PENDING_LIMIT 0 +#define SPNIC_DEAULT_TX_CI_COALESCING_TIME 0 +#define SPNIC_DEAULT_DROP_THD_ON 0xFFFF +#define SPNIC_DEAULT_DROP_THD_OFF 0 + +static unsigned char tx_pending_limit = SPNIC_DEAULT_TX_CI_PENDING_LIMIT; +module_param(tx_pending_limit, byte, 0444); +MODULE_PARM_DESC(tx_pending_limit, "TX CI coalescing parameter pending_limit (default=0)"); + +static unsigned char tx_coalescing_time = SPNIC_DEAULT_TX_CI_COALESCING_TIME; +module_param(tx_coalescing_time, byte, 0444); +MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time (default=0)"); + +static unsigned char rq_wqe_type = SPNIC_NORMAL_RQ_WQE; +module_param(rq_wqe_type, byte, 0444); +MODULE_PARM_DESC(rq_wqe_type, "RQ WQE type 0-8Bytes, 1-16Bytes, 2-32Bytes (default=2)"); + +static u32 tx_drop_thd_on = SPNIC_DEAULT_DROP_THD_ON; +module_param(tx_drop_thd_on, uint, 0644); +MODULE_PARM_DESC(tx_drop_thd_on, "TX parameter drop_thd_on (default=0xffff)"); + +static u32 tx_drop_thd_off = SPNIC_DEAULT_DROP_THD_OFF; +module_param(tx_drop_thd_off, uint, 0644); +MODULE_PARM_DESC(tx_drop_thd_off, "TX parameter drop_thd_off (default=0)"); +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define SPNIC_CI_Q_ADDR_SIZE 64 + +#define CI_TABLE_SIZE(num_qps, pg_sz) (ALIGN((num_qps) * SPNIC_CI_Q_ADDR_SIZE, pg_sz)) + +#define SPNIC_CI_VADDR(base_addr, q_id) ((u8 *)(base_addr) + (q_id) * SPNIC_CI_Q_ADDR_SIZE) + +#define SPNIC_CI_PADDR(base_paddr, q_id) ((base_paddr) + (q_id) * SPNIC_CI_Q_ADDR_SIZE) + +#define WQ_PREFETCH_MAX 4 +#define WQ_PREFETCH_MIN 1 +#define WQ_PREFETCH_THRESHOLD 256 + +#define SPNIC_Q_CTXT_MAX 31 /* (2048 - 8) / 64 */ + +enum spnic_qp_ctxt_type { + SPNIC_QP_CTXT_TYPE_SQ, + SPNIC_QP_CTXT_TYPE_RQ, +}; + +struct spnic_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u16 start_qid; + u16 rsvd; +}; + +struct spnic_sq_ctxt { + u32 ci_pi; + u32 drop_mode_sp; + u32 wq_pfn_hi_owner; + u32 wq_pfn_lo; + + u32 rsvd0; + u32 pkt_drop_thd; + u32 global_sq_id; + u32 vlan_ceq_attr; + + u32 pref_cache; + u32 pref_ci_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 rsvd8; + u32 rsvd9; + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct spnic_rq_ctxt { + u32 ci_pi; + u32 ceq_attr; + u32 wq_pfn_hi_type_owner; + u32 wq_pfn_lo; + + u32 rsvd[3]; + u32 cqe_sge_len; + + u32 pref_cache; + u32 pref_ci_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct spnic_sq_ctxt_block { + struct spnic_qp_ctxt_header cmdq_hdr; + struct spnic_sq_ctxt sq_ctxt[SPNIC_Q_CTXT_MAX]; +}; + +struct spnic_rq_ctxt_block { + struct spnic_qp_ctxt_header cmdq_hdr; + struct spnic_rq_ctxt rq_ctxt[SPNIC_Q_CTXT_MAX]; +}; + +struct spnic_clean_queue_ctxt { + struct spnic_qp_ctxt_header cmdq_hdr; + u32 rsvd; +}; + +#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct spnic_qp_ctxt_header) + \ + (num_sqs) * sizeof(struct spnic_sq_ctxt))) + +#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct spnic_qp_ctxt_header) + \ + (num_rqs) * sizeof(struct spnic_rq_ctxt))) + +#define CI_IDX_HIGH_SHIFH 12 + +#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH) + +#define SQ_CTXT_PI_IDX_SHIFT 0 +#define SQ_CTXT_CI_IDX_SHIFT 16 + +#define SQ_CTXT_PI_IDX_MASK 0xFFFFU +#define SQ_CTXT_CI_IDX_MASK 0xFFFFU + +#define SQ_CTXT_CI_PI_SET(val, member) (((val) & SQ_CTXT_##member##_MASK) << \ + SQ_CTXT_##member##_SHIFT) + +#define SQ_CTXT_MODE_SP_FLAG_SHIFT 0 +#define SQ_CTXT_MODE_PKT_DROP_SHIFT 1 + +#define SQ_CTXT_MODE_SP_FLAG_MASK 0x1U +#define SQ_CTXT_MODE_PKT_DROP_MASK 0x1U + +#define SQ_CTXT_MODE_SET(val, member) (((val) & SQ_CTXT_MODE_##member##_MASK) << \ + SQ_CTXT_MODE_##member##_SHIFT) + +#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define SQ_CTXT_WQ_PAGE_OWNER_SHIFT 23 + +#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U + +#define SQ_CTXT_WQ_PAGE_SET(val, member) (((val) & SQ_CTXT_WQ_PAGE_##member##_MASK) << \ + SQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define SQ_CTXT_PKT_DROP_THD_ON_SHIFT 0 +#define SQ_CTXT_PKT_DROP_THD_OFF_SHIFT 16 + +#define SQ_CTXT_PKT_DROP_THD_ON_MASK 0xFFFFU +#define SQ_CTXT_PKT_DROP_THD_OFF_MASK 0xFFFFU + +#define SQ_CTXT_PKT_DROP_THD_SET(val, member) (((val) & SQ_CTXT_PKT_DROP_##member##_MASK) << \ + SQ_CTXT_PKT_DROP_##member##_SHIFT) + +#define SQ_CTXT_GLOBAL_SQ_ID_SHIFT 0 + +#define SQ_CTXT_GLOBAL_SQ_ID_MASK 0x1FFFU + +#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) (((val) & SQ_CTXT_##member##_MASK) << \ + SQ_CTXT_##member##_SHIFT) + +#define SQ_CTXT_VLAN_TAG_SHIFT 0 +#define SQ_CTXT_VLAN_TYPE_SEL_SHIFT 16 +#define SQ_CTXT_VLAN_INSERT_MODE_SHIFT 19 +#define SQ_CTXT_VLAN_CEQ_EN_SHIFT 23 + +#define SQ_CTXT_VLAN_TAG_MASK 0xFFFFU +#define SQ_CTXT_VLAN_TYPE_SEL_MASK 0x7U +#define SQ_CTXT_VLAN_INSERT_MODE_MASK 0x3U +#define SQ_CTXT_VLAN_CEQ_EN_MASK 0x1U + +#define SQ_CTXT_VLAN_CEQ_SET(val, member) (((val) & SQ_CTXT_VLAN_##member##_MASK) << \ + SQ_CTXT_VLAN_##member##_SHIFT) + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define SQ_CTXT_PREF_CI_HI_SHIFT 0 +#define SQ_CTXT_PREF_OWNER_SHIFT 4 + +#define SQ_CTXT_PREF_CI_HI_MASK 0xFU +#define SQ_CTXT_PREF_OWNER_MASK 0x1U + +#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define SQ_CTXT_PREF_CI_LOW_SHIFT 20 + +#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SQ_CTXT_PREF_CI_LOW_MASK 0xFFFU + +#define SQ_CTXT_PREF_SET(val, member) (((val) & SQ_CTXT_PREF_##member##_MASK) << \ + SQ_CTXT_PREF_##member##_SHIFT) + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & SQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + SQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define RQ_CTXT_PI_IDX_SHIFT 0 +#define RQ_CTXT_CI_IDX_SHIFT 16 + +#define RQ_CTXT_PI_IDX_MASK 0xFFFFU +#define RQ_CTXT_CI_IDX_MASK 0xFFFFU + +#define RQ_CTXT_CI_PI_SET(val, member) (((val) & RQ_CTXT_##member##_MASK) << \ + RQ_CTXT_##member##_SHIFT) + +#define RQ_CTXT_CEQ_ATTR_INTR_SHIFT 21 +#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 31 + +#define RQ_CTXT_CEQ_ATTR_INTR_MASK 0x3FFU +#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U + +#define RQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & RQ_CTXT_CEQ_ATTR_##member##_MASK) << \ + RQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define RQ_CTXT_WQ_PAGE_WQE_TYPE_SHIFT 28 +#define RQ_CTXT_WQ_PAGE_OWNER_SHIFT 31 + +#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK 0x3U +#define RQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U + +#define RQ_CTXT_WQ_PAGE_SET(val, member) (((val) & RQ_CTXT_WQ_PAGE_##member##_MASK) << \ + RQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define RQ_CTXT_CQE_LEN_SHIFT 28 + +#define RQ_CTXT_CQE_LEN_MASK 0x3U + +#define RQ_CTXT_CQE_LEN_SET(val, member) (((val) & RQ_CTXT_##member##_MASK) << \ + RQ_CTXT_##member##_SHIFT) + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define RQ_CTXT_PREF_CI_HI_SHIFT 0 +#define RQ_CTXT_PREF_OWNER_SHIFT 4 + +#define RQ_CTXT_PREF_CI_HI_MASK 0xFU +#define RQ_CTXT_PREF_OWNER_MASK 0x1U + +#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define RQ_CTXT_PREF_CI_LOW_SHIFT 20 + +#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define RQ_CTXT_PREF_CI_LOW_MASK 0xFFFU + +#define RQ_CTXT_PREF_SET(val, member) (((val) & RQ_CTXT_PREF_##member##_MASK) << \ + RQ_CTXT_PREF_##member##_SHIFT) + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define RQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + RQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4) + +#define WQ_PAGE_PFN_SHIFT 12 +#define WQ_BLOCK_PFN_SHIFT 9 + +#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + +/* sq and rq */ +#define TOTAL_DB_NUM(num_qps) ((u16)(2 * (num_qps))) + +int spnic_create_sq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, + u16 q_id, u32 sq_depth, u16 sq_msix_idx) +{ + int err; + + /* sq used & hardware request init 1*/ + sq->owner = 1; + + sq->q_id = q_id; + sq->msix_entry_idx = sq_msix_idx; + + err = sphw_wq_create(nic_cfg->hwdev, &sq->wq, sq_depth, (u16)BIT(SPNIC_SQ_WQEBB_SHIFT)); + if (err) { + sdk_err(nic_cfg->dev_hdl, "Failed to create tx queue(%u) wq\n", + q_id); + return err; + } + + return 0; +} + +void spnic_destroy_sq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq) +{ + sphw_wq_destroy(&sq->wq); +} + +int spnic_create_rq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *rq, + u16 q_id, u32 rq_depth, u16 rq_msix_idx) +{ + int err; + + rq->wqe_type = rq_wqe_type; + rq->q_id = q_id; + rq->msix_entry_idx = rq_msix_idx; + + err = sphw_wq_create(nic_cfg->hwdev, &rq->wq, rq_depth, + (u16)BIT(SPNIC_RQ_WQEBB_SHIFT + rq_wqe_type)); + if (err) { + sdk_err(nic_cfg->dev_hdl, "Failed to create rx queue(%u) wq\n", + q_id); + return err; + } + + rq->rx.pi_virt_addr = dma_alloc_coherent(nic_cfg->dev_hdl, PAGE_SIZE, + &rq->rx.pi_dma_addr, GFP_KERNEL); + if (!rq->rx.pi_virt_addr) { + sphw_wq_destroy(&rq->wq); + nic_err(nic_cfg->dev_hdl, "Failed to allocate rq pi virt addr\n"); + return -ENOMEM; + } + + return 0; +} + +void spnic_destroy_rq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *rq) +{ + dma_free_coherent(nic_cfg->dev_hdl, PAGE_SIZE, rq->rx.pi_virt_addr, + rq->rx.pi_dma_addr); + + sphw_wq_destroy(&rq->wq); +} + +static int create_qp(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, + struct spnic_io_queue *rq, u16 q_id, u32 sq_depth, + u32 rq_depth, u16 qp_msix_idx) +{ + int err; + + err = spnic_create_sq(nic_cfg, sq, q_id, sq_depth, qp_msix_idx); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to create sq, qid: %u\n", + q_id); + return err; + } + + err = spnic_create_rq(nic_cfg, rq, q_id, rq_depth, qp_msix_idx); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to create rq, qid: %u\n", + q_id); + goto create_rq_err; + } + + return 0; + +create_rq_err: + spnic_destroy_sq(nic_cfg->hwdev, sq); + + return err; +} + +void destroy_qp(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, + struct spnic_io_queue *rq) +{ + spnic_destroy_sq(nic_cfg, sq); + spnic_destroy_rq(nic_cfg, rq); +} + +/* try to alloc the expect number of doorbell, and return the actual number + * of doorbell. + */ +static int spnic_doorbell_change(struct spnic_nic_cfg *nic_cfg, u16 dst_num_db) +{ + void __iomem *db_addr = NULL; + u16 cur_db_num = nic_cfg->allocated_num_db; + u16 db_num_gap, idx, i, cur_db_alloc; + int err; + + if (dst_num_db > nic_cfg->allocated_num_db) { + db_num_gap = dst_num_db - nic_cfg->allocated_num_db; + for (idx = 0; idx < db_num_gap; idx++) { + /* we don't use direct wqe for sq */ + err = sphw_alloc_db_addr(nic_cfg->hwdev, &db_addr, NULL); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to alloc sq doorbell addr\n"); + goto alloc_db_err; + } + nic_cfg->db_addr[cur_db_num + idx] = db_addr; + } + + nic_cfg->allocated_num_db = dst_num_db; + } else if (dst_num_db < nic_cfg->allocated_num_db) { + db_num_gap = nic_cfg->allocated_num_db - dst_num_db; + for (idx = 0; idx < db_num_gap; idx++) { + cur_db_alloc = (cur_db_num - idx) - 1; + sphw_free_db_addr(nic_cfg->hwdev, nic_cfg->db_addr[cur_db_alloc], NULL); + nic_cfg->db_addr[cur_db_alloc] = NULL; + } + + nic_cfg->allocated_num_db = dst_num_db; + } + + return 0; + +alloc_db_err: + for (i = 0; i < idx; i++) { + sphw_free_db_addr(nic_cfg->hwdev, nic_cfg->db_addr[cur_db_num + i], NULL); + nic_cfg->db_addr[cur_db_num + i] = NULL; + } + + return -EFAULT; +} + +int spnic_init_nicio_res(void *hwdev) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) { + pr_err("Failed to get nic service adapter\n"); + return -EFAULT; + } + + nic_cfg->max_qps = sphw_func_max_qnum(hwdev); + + nic_cfg->allocated_num_db = 0; + nic_cfg->db_addr = kcalloc(TOTAL_DB_NUM(nic_cfg->max_qps), + sizeof(*nic_cfg->db_addr), GFP_KERNEL); + if (!nic_cfg->db_addr) { + nic_err(nic_cfg->dev_hdl, "Failed to alloc db addr array\n"); + return -ENOMEM; + } + + nic_cfg->ci_vaddr_base = + dma_alloc_coherent(nic_cfg->dev_hdl, CI_TABLE_SIZE(nic_cfg->max_qps, PAGE_SIZE), + &nic_cfg->ci_dma_base, GFP_KERNEL); + if (!nic_cfg->ci_vaddr_base) { + kfree(nic_cfg->db_addr); + nic_err(nic_cfg->dev_hdl, "Failed to allocate ci area\n"); + return -ENOMEM; + } + + return 0; +} + +void spnic_deinit_nicio_res(void *hwdev) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) { + pr_err("Failed to get nic service adapter\n"); + return; + } + + dma_free_coherent(nic_cfg->dev_hdl, CI_TABLE_SIZE(nic_cfg->max_qps, PAGE_SIZE), + nic_cfg->ci_vaddr_base, nic_cfg->ci_dma_base); + /* free all doorbell */ + spnic_doorbell_change(nic_cfg, 0); + kfree(nic_cfg->db_addr); +} + +int spnic_alloc_qps(void *hwdev, struct irq_info *qps_msix_arry, + struct spnic_dyna_qp_params *qp_params) +{ + struct spnic_io_queue *sqs = NULL; + struct spnic_io_queue *rqs = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 q_id, i, cur_allocated_db, num_qps; + int err; + + if (!hwdev || !qps_msix_arry || !qp_params) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) { + pr_err("Failed to get nic service adapter\n"); + return -EFAULT; + } + + if (qp_params->num_qps > nic_cfg->max_qps) + return -EINVAL; + + num_qps = qp_params->num_qps; + + cur_allocated_db = nic_cfg->allocated_num_db; + if (cur_allocated_db < TOTAL_DB_NUM(num_qps)) { + err = spnic_doorbell_change(nic_cfg, TOTAL_DB_NUM(num_qps)); + if (err) + return err; + } + + sqs = kcalloc(num_qps, sizeof(*sqs), GFP_KERNEL); + if (!sqs) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate sq\n"); + err = -ENOMEM; + goto alloc_sqs_err; + } + + rqs = kcalloc(num_qps, sizeof(*rqs), GFP_KERNEL); + if (!rqs) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate rq\n"); + err = -ENOMEM; + goto alloc_rqs_err; + } + + for (q_id = 0; q_id < num_qps; q_id++) { + err = create_qp(nic_cfg, &sqs[q_id], &rqs[q_id], q_id, + qp_params->sq_depth, qp_params->rq_depth, + qps_msix_arry[q_id].msix_entry_idx); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate qp %u, err: %d\n", + q_id, err); + goto create_qp_err; + } + } + + qp_params->sqs = sqs; + qp_params->rqs = rqs; + + return 0; + +create_qp_err: + for (i = 0; i < q_id; i++) + destroy_qp(nic_cfg, &sqs[i], &rqs[i]); + + kfree(rqs); + +alloc_rqs_err: + kfree(sqs); + +alloc_sqs_err: + /* Only release the newly added doorbell resource, + * the old resource is still in use + */ + spnic_doorbell_change(nic_cfg, cur_allocated_db); + + return err; +} + +void spnic_free_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + u16 q_id; + + if (!hwdev || !qp_params) + return; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) { + pr_err("Failed to get nic service adapter\n"); + return; + } + + for (q_id = 0; q_id < qp_params->num_qps; q_id++) + destroy_qp(nic_cfg, &qp_params->sqs[q_id], + &qp_params->rqs[q_id]); + + kfree(qp_params->sqs); + kfree(qp_params->rqs); +} + +void init_qps_info(struct spnic_nic_cfg *nic_cfg, struct spnic_dyna_qp_params *qp_params) +{ + struct spnic_io_queue *sqs = qp_params->sqs; + struct spnic_io_queue *rqs = qp_params->rqs; + u16 q_id; + + nic_cfg->num_qps = qp_params->num_qps; + nic_cfg->sq = qp_params->sqs; + nic_cfg->rq = qp_params->rqs; + for (q_id = 0; q_id < nic_cfg->num_qps; q_id++) { + sqs[q_id].tx.cons_idx_addr = SPNIC_CI_VADDR(nic_cfg->ci_vaddr_base, q_id); + /* clear ci value */ + *(u16 *)sqs[q_id].tx.cons_idx_addr = 0; + sqs[q_id].db_addr = nic_cfg->db_addr[q_id]; + + /* The first num_qps doorbell is used by sq */ + rqs[q_id].db_addr = nic_cfg->db_addr[nic_cfg->num_qps + q_id]; + } +} + +int spnic_init_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev || !qp_params) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) { + pr_err("Failed to get nic service adapter\n"); + return -EFAULT; + } + + if (nic_cfg->allocated_num_db > TOTAL_DB_NUM(qp_params->num_qps)) + spnic_doorbell_change(nic_cfg, TOTAL_DB_NUM(qp_params->num_qps)); + + init_qps_info(nic_cfg, qp_params); + + return spnic_init_qp_ctxts(hwdev); +} + +void spnic_deinit_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev || !qp_params) + return; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) { + pr_err("Failed to get nic service adapter\n"); + return; + } + + qp_params->sqs = nic_cfg->sq; + qp_params->rqs = nic_cfg->rq; + qp_params->num_qps = nic_cfg->num_qps; + + spnic_free_qp_ctxts(hwdev); +} + +int spnic_create_qps(void *hwdev, u16 num_qp, u32 sq_depth, u32 rq_depth, + struct irq_info *qps_msix_arry) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_dyna_qp_params qp_params = {0}; + int err; + + if (!hwdev || !qps_msix_arry) + return -EFAULT; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) { + pr_err("Failed to get nic service adapter\n"); + return -EFAULT; + } + + err = spnic_init_nicio_res(hwdev); + if (err) + return err; + + qp_params.num_qps = num_qp; + qp_params.sq_depth = sq_depth; + qp_params.rq_depth = rq_depth; + err = spnic_alloc_qps(hwdev, qps_msix_arry, &qp_params); + if (err) { + spnic_deinit_nicio_res(hwdev); + nic_err(nic_cfg->dev_hdl, "Failed to allocate qps, err: %d\n", err); + return err; + } + + init_qps_info(nic_cfg, &qp_params); + + return 0; +} + +void spnic_destroy_qps(void *hwdev) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_dyna_qp_params qp_params = {0}; + + if (!hwdev) + return; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) + return; + + spnic_deinit_qps(hwdev, &qp_params); + spnic_free_qps(hwdev, &qp_params); + spnic_deinit_nicio_res(hwdev); +} + +void *spnic_get_nic_queue(void *hwdev, u16 q_id, enum spnic_queue_type q_type) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev || q_type >= SPNIC_MAX_QUEUE_TYPE) + return NULL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) + return NULL; + + return ((q_type == SPNIC_SQ) ? &nic_cfg->sq[q_id] : &nic_cfg->rq[q_id]); +} + +void spnic_qp_prepare_cmdq_header(struct spnic_qp_ctxt_header *qp_ctxt_hdr, + enum spnic_qp_ctxt_type ctxt_type, + u16 num_queues, u16 q_id) +{ + qp_ctxt_hdr->queue_type = ctxt_type; + qp_ctxt_hdr->num_queues = num_queues; + qp_ctxt_hdr->start_qid = q_id; + qp_ctxt_hdr->rsvd = 0; + + sphw_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +void spnic_sq_prepare_ctxt(struct spnic_io_queue *sq, u16 sq_id, struct spnic_sq_ctxt *sq_ctxt) +{ + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + + ci_start = spnic_get_sq_local_ci(sq); + pi_start = spnic_get_sq_local_pi(sq); + + wq_page_addr = sphw_wq_get_first_wqe_page_addr(&sq->wq); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + sq_ctxt->ci_pi = + SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | + SQ_CTXT_CI_PI_SET(pi_start, PI_IDX); + + sq_ctxt->drop_mode_sp = + SQ_CTXT_MODE_SET(0, SP_FLAG) | + SQ_CTXT_MODE_SET(0, PKT_DROP); + + sq_ctxt->wq_pfn_hi_owner = + SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + SQ_CTXT_WQ_PAGE_SET(1, OWNER); + + sq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + /* TO DO */ + sq_ctxt->pkt_drop_thd = + SQ_CTXT_PKT_DROP_THD_SET(tx_drop_thd_on, THD_ON) | + SQ_CTXT_PKT_DROP_THD_SET(tx_drop_thd_off, THD_OFF); + + sq_ctxt->global_sq_id = + SQ_CTXT_GLOBAL_QUEUE_ID_SET(sq_id, GLOBAL_SQ_ID); + + /* enable insert c-vlan in default */ + sq_ctxt->vlan_ceq_attr = + SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) | + SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE); + + sq_ctxt->rsvd0 = 0; + + sq_ctxt->pref_cache = + SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctxt->pref_ci_owner = + SQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) | + SQ_CTXT_PREF_SET(1, OWNER); + + sq_ctxt->pref_wq_pfn_hi_ci = + SQ_CTXT_PREF_SET(ci_start, CI_LOW) | + SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI); + + sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->wq_block_pfn_hi = SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + sphw_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); +} + +void spnic_rq_prepare_ctxt(struct spnic_io_queue *rq, struct spnic_rq_ctxt *rq_ctxt) +{ + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + u16 wqe_type = rq->wqe_type; + + /* RQ depth is in unit of 8Bytes */ + ci_start = (u16)((u32)spnic_get_rq_local_ci(rq) << wqe_type); + pi_start = (u16)((u32)spnic_get_rq_local_pi(rq) << wqe_type); + + wq_page_addr = sphw_wq_get_first_wqe_page_addr(&rq->wq); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + rq_ctxt->ci_pi = + RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | + RQ_CTXT_CI_PI_SET(pi_start, PI_IDX); + + rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) | + RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR); + + rq_ctxt->wq_pfn_hi_type_owner = + RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + RQ_CTXT_WQ_PAGE_SET(1, OWNER); + + switch (wqe_type) { + case SPNIC_EXTEND_RQ_WQE: + /* use 32Byte WQE with SGE for CQE */ + rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(0, WQE_TYPE); + break; + case SPNIC_NORMAL_RQ_WQE: + /* use 16Byte WQE with 32Bytes SGE for CQE */ + rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE); + rq_ctxt->cqe_sge_len = RQ_CTXT_CQE_LEN_SET(1, CQE_LEN); + break; + default: + pr_err("Invalid rq wqe type: %u", wqe_type); + } + + rq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pref_cache = + RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctxt->pref_ci_owner = + RQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) | + RQ_CTXT_PREF_SET(1, OWNER); + + rq_ctxt->pref_wq_pfn_hi_ci = + RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | + RQ_CTXT_PREF_SET(ci_start, CI_LOW); + + rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pi_paddr_hi = upper_32_bits(rq->rx.pi_dma_addr); + rq_ctxt->pi_paddr_lo = lower_32_bits(rq->rx.pi_dma_addr); + + rq_ctxt->wq_block_pfn_hi = RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + sphw_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); +} + +static int init_sq_ctxts(struct spnic_nic_cfg *nic_cfg) +{ + struct spnic_sq_ctxt_block *sq_ctxt_block = NULL; + struct spnic_sq_ctxt *sq_ctxt = NULL; + struct sphw_cmd_buf *cmd_buf = NULL; + struct spnic_io_queue *sq = NULL; + u64 out_param = 0; + u16 q_id, curr_id, max_ctxts, i; + int err = 0; + + cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); + if (!cmd_buf) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_cfg->num_qps) { + sq_ctxt_block = cmd_buf->buf; + sq_ctxt = sq_ctxt_block->sq_ctxt; + + max_ctxts = (nic_cfg->num_qps - q_id) > SPNIC_Q_CTXT_MAX ? + SPNIC_Q_CTXT_MAX : (nic_cfg->num_qps - q_id); + + spnic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, SPNIC_QP_CTXT_TYPE_SQ, + max_ctxts, q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + sq = &nic_cfg->sq[curr_id]; + + spnic_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]); + } + + cmd_buf->size = SQ_CTXT_SIZE(max_ctxts); + + err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, + SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX, + cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); + if (err || out_param != 0) { + nic_err(nic_cfg->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); + + return err; +} + +static int init_rq_ctxts(struct spnic_nic_cfg *nic_cfg) +{ + struct spnic_rq_ctxt_block *rq_ctxt_block = NULL; + struct spnic_rq_ctxt *rq_ctxt = NULL; + struct sphw_cmd_buf *cmd_buf = NULL; + struct spnic_io_queue *rq = NULL; + u64 out_param = 0; + u16 q_id, curr_id, max_ctxts, i; + int err = 0; + + cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); + if (!cmd_buf) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_cfg->num_qps) { + rq_ctxt_block = cmd_buf->buf; + rq_ctxt = rq_ctxt_block->rq_ctxt; + + max_ctxts = (nic_cfg->num_qps - q_id) > SPNIC_Q_CTXT_MAX ? + SPNIC_Q_CTXT_MAX : (nic_cfg->num_qps - q_id); + + spnic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, SPNIC_QP_CTXT_TYPE_RQ, + max_ctxts, q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + rq = &nic_cfg->rq[curr_id]; + + spnic_rq_prepare_ctxt(rq, &rq_ctxt[i]); + } + + cmd_buf->size = RQ_CTXT_SIZE(max_ctxts); + + err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, + SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX, + cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); + + if (err || out_param != 0) { + nic_err(nic_cfg->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); + + return err; +} + +static int init_qp_ctxts(struct spnic_nic_cfg *nic_cfg) +{ + int err; + + err = init_sq_ctxts(nic_cfg); + if (err) + return err; + + err = init_rq_ctxts(nic_cfg); + if (err) + return err; + + return 0; +} + +static int clean_queue_offload_ctxt(struct spnic_nic_cfg *nic_cfg, + enum spnic_qp_ctxt_type ctxt_type) +{ + struct spnic_clean_queue_ctxt *ctxt_block = NULL; + struct sphw_cmd_buf *cmd_buf = NULL; + u64 out_param = 0; + int err; + + cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); + if (!cmd_buf) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.num_queues = nic_cfg->max_qps; + ctxt_block->cmdq_hdr.queue_type = ctxt_type; + ctxt_block->cmdq_hdr.start_qid = 0; + + sphw_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = sizeof(*ctxt_block); + + err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, + SPNIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); + + if ((err) || (out_param)) { + nic_err(nic_cfg->dev_hdl, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n", + err, out_param); + + err = -EFAULT; + } + + sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); + + return err; +} + +static int clean_qp_offload_ctxt(struct spnic_nic_cfg *nic_cfg) +{ + /* clean LRO/TSO context space */ + return (clean_queue_offload_ctxt(nic_cfg, SPNIC_QP_CTXT_TYPE_SQ) || + clean_queue_offload_ctxt(nic_cfg, SPNIC_QP_CTXT_TYPE_RQ)); +} + +/* init qps ctxt and set sq ci attr and arm all sq*/ +int spnic_init_qp_ctxts(void *hwdev) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + struct spnic_sq_attr sq_attr; + u32 rq_depth; + u16 q_id; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_cfg) + return -EFAULT; + + err = init_qp_ctxts(nic_cfg); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to init QP ctxts\n"); + return err; + } + + /* clean LRO/TSO context space */ + err = clean_qp_offload_ctxt(nic_cfg); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to clean qp offload ctxts\n"); + return err; + } + + rq_depth = nic_cfg->rq[0].wq.q_depth << nic_cfg->rq[0].wqe_type; + + err = sphw_set_root_ctxt(hwdev, rq_depth, nic_cfg->sq[0].wq.q_depth, + nic_cfg->rx_buff_len, SPHW_CHANNEL_NIC); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to set root context\n"); + return err; + } + + for (q_id = 0; q_id < nic_cfg->num_qps; q_id++) { + sq_attr.ci_dma_base = SPNIC_CI_PADDR(nic_cfg->ci_dma_base, q_id) >> 2; + sq_attr.pending_limit = tx_pending_limit; + sq_attr.coalescing_time = tx_coalescing_time; + sq_attr.intr_en = 1; + sq_attr.intr_idx = nic_cfg->sq[q_id].msix_entry_idx; + sq_attr.l2nic_sqn = q_id; + sq_attr.dma_attr_off = 0; + err = spnic_set_ci_table(hwdev, &sq_attr); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to set ci table\n"); + goto set_cons_idx_table_err; + } + } + + return 0; + +set_cons_idx_table_err: + sphw_clean_root_ctxt(hwdev, SPHW_CHANNEL_NIC); + + return err; +} + +void spnic_free_qp_ctxts(void *hwdev) +{ + if (!hwdev) + return; + + sphw_clean_root_ctxt(hwdev, SPHW_CHANNEL_NIC); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h new file mode 100644 index 000000000000..ab1399d3e36c --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_NIC_IO_H +#define SPNIC_NIC_IO_H + +#include "sphw_crm.h" +#include "sphw_common.h" +#include "sphw_wq.h" + +#define SPNIC_MAX_TX_QUEUE_DEPTH 65536 +#define SPNIC_MAX_RX_QUEUE_DEPTH 16384 + +#define SPNIC_MIN_QUEUE_DEPTH 128 + +#define SPNIC_SQ_WQEBB_SHIFT 4 +#define SPNIC_RQ_WQEBB_SHIFT 3 + +#define SPNIC_SQ_WQEBB_SIZE BIT(SPNIC_SQ_WQEBB_SHIFT) +#define SPNIC_CQE_SIZE_SHIFT 4 + +enum spnic_rq_wqe_type { + SPNIC_COMPACT_RQ_WQE, + SPNIC_NORMAL_RQ_WQE, + SPNIC_EXTEND_RQ_WQE, +}; + +struct spnic_io_queue { + struct sphw_wq wq; + union { + u8 wqe_type; /* for rq */ + u8 owner; /* for sq */ + }; + + u16 q_id; + u16 msix_entry_idx; + + u8 __iomem *db_addr; + + union { + struct { + void *cons_idx_addr; + } tx; + + struct { + u16 *pi_virt_addr; + dma_addr_t pi_dma_addr; + } rx; + }; +} ____cacheline_aligned; + +struct spnic_nic_db { + u32 db_info; + u32 pi_hi; +}; + +/* * + * @brief spnic_get_sq_free_wqebbs - get send queue free wqebb + * @param sq: send queue + * @retval : number of free wqebb + */ +static inline u16 spnic_get_sq_free_wqebbs(struct spnic_io_queue *sq) +{ + return sphw_wq_free_wqebbs(&sq->wq); +} + +/* * + * @brief spnic_update_sq_local_ci - update send queue local consumer index + * @param sq: send queue + * @param wqe_cnt: number of wqebb + */ +static inline void spnic_update_sq_local_ci(struct spnic_io_queue *sq, u16 wqebb_cnt) +{ + sphw_wq_put_wqebbs(&sq->wq, wqebb_cnt); +} + +/* * + * @brief spnic_get_sq_local_ci - get send queue local consumer index + * @param sq: send queue + * @retval : local consumer index + */ +static inline u16 spnic_get_sq_local_ci(struct spnic_io_queue *sq) +{ + return WQ_MASK_IDX(&sq->wq, sq->wq.cons_idx); +} + +/* * + * @brief spnic_get_sq_local_pi - get send queue local producer index + * @param sq: send queue + * @retval : local producer index + */ +static inline u16 spnic_get_sq_local_pi(struct spnic_io_queue *sq) +{ + return WQ_MASK_IDX(&sq->wq, sq->wq.prod_idx); +} + +/* * + * @brief spnic_get_sq_hw_ci - get send queue hardware consumer index + * @param sq: send queue + * @retval : hardware consumer index + */ +static inline u16 spnic_get_sq_hw_ci(struct spnic_io_queue *sq) +{ + return WQ_MASK_IDX(&sq->wq, + sphw_hw_cpu16(*(u16 *)sq->tx.cons_idx_addr)); +} + +/* * + * @brief spnic_get_sq_one_wqebb - get send queue wqe with single wqebb + * @param sq: send queue + * @param pi: return current pi + * @retval : wqe base address + */ +static inline void *spnic_get_sq_one_wqebb(struct spnic_io_queue *sq, u16 *pi) +{ + return sphw_wq_get_one_wqebb(&sq->wq, pi); +} + +/* * + * @brief spnic_get_sq_multi_wqebb - get send queue wqe with multiple wqebbs + * @param sq: send queue + * @param wqebb_cnt: wqebb counter + * @param pi: return current pi + * @param second_part_wqebbs_addr: second part wqebbs base address + * @param first_part_wqebbs_num: number wqebbs of first part + * @retval : first part wqebbs base address + */ +static inline void *spnic_get_sq_multi_wqebbs(struct spnic_io_queue *sq, u16 wqebb_cnt, u16 *pi, + void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + return sphw_wq_get_multi_wqebbs(&sq->wq, wqebb_cnt, pi, second_part_wqebbs_addr, + first_part_wqebbs_num); +} + +/* * + * @brief spnic_get_and_update_sq_owner - get and update send queue owner bit + * @param sq: send queue + * @param curr_pi: current pi + * @param wqebb_cnt: wqebb counter + * @retval : owner bit + */ +static inline u16 spnic_get_and_update_sq_owner(struct spnic_io_queue *sq, + u16 curr_pi, u16 wqebb_cnt) +{ + u16 owner = sq->owner; + + if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth)) + sq->owner = !sq->owner; + + return owner; +} + +/* * + * @brief spnic_get_sq_wqe_with_owner - get send queue wqe with owner + * @param sq: send queue + * @param wqebb_cnt: wqebb counter + * @param pi: return current pi + * @param owner: return owner bit + * @param second_part_wqebbs_addr: second part wqebbs base address + * @param first_part_wqebbs_num: number wqebbs of first part + * @retval : first part wqebbs base address + */ +static inline void *spnic_get_sq_wqe_with_owner(struct spnic_io_queue *sq, + u16 wqebb_cnt, u16 *pi, u16 *owner, + void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + void *wqe = sphw_wq_get_multi_wqebbs(&sq->wq, wqebb_cnt, pi, second_part_wqebbs_addr, + first_part_wqebbs_num); + + *owner = sq->owner; + if (unlikely(*pi + wqebb_cnt >= sq->wq.q_depth)) + sq->owner = !sq->owner; + + return wqe; +} + +/* * + * @brief spnic_rollback_sq_wqebbs - rollback send queue wqe + * @param sq: send queue + * @param wqebb_cnt: wqebb counter + * @param owner: owner bit + */ +static inline void spnic_rollback_sq_wqebbs(struct spnic_io_queue *sq, u16 wqebb_cnt, u16 owner) +{ + if (owner != sq->owner) + sq->owner = owner; + sq->wq.prod_idx -= wqebb_cnt; +} + +/* * + * @brief spnic_rq_wqe_addr - get receive queue wqe address by queue index + * @param rq: receive queue + * @param idx: wq index + * @retval: wqe base address + */ +static inline void *spnic_rq_wqe_addr(struct spnic_io_queue *rq, u16 idx) +{ + return sphw_wq_wqebb_addr(&rq->wq, idx); +} + +/* * + * @brief spnic_update_rq_hw_pi - update receive queue hardware pi + * @param rq: receive queue + * @param pi: pi + */ +static inline void spnic_update_rq_hw_pi(struct spnic_io_queue *rq, u16 pi) +{ + *rq->rx.pi_virt_addr = cpu_to_be16((pi & rq->wq.idx_mask) << rq->wqe_type); +} + +/* * + * @brief spnic_update_rq_local_ci - update receive queue local consumer index + * @param sq: receive queue + * @param wqe_cnt: number of wqebb + */ +static inline void spnic_update_rq_local_ci(struct spnic_io_queue *rq, u16 wqebb_cnt) +{ + sphw_wq_put_wqebbs(&rq->wq, wqebb_cnt); +} + +/* * + * @brief spnic_get_rq_local_ci - get receive queue local ci + * @param rq: receive queue + * @retval: receive queue local ci + */ +static inline u16 spnic_get_rq_local_ci(struct spnic_io_queue *rq) +{ + return WQ_MASK_IDX(&rq->wq, rq->wq.cons_idx); +} + +/* * + * @brief spnic_get_rq_local_pi - get receive queue local pi + * @param rq: receive queue + * @retval: receive queue local pi + */ +static inline u16 spnic_get_rq_local_pi(struct spnic_io_queue *rq) +{ + return WQ_MASK_IDX(&rq->wq, rq->wq.prod_idx); +} + +/* ******************** DB INFO ******************** */ +#define DB_INFO_QID_SHIFT 0 +#define DB_INFO_NON_FILTER_SHIFT 22 +#define DB_INFO_CFLAG_SHIFT 23 +#define DB_INFO_COS_SHIFT 24 +#define DB_INFO_TYPE_SHIFT 27 + +#define DB_INFO_QID_MASK 0x1FFFU +#define DB_INFO_NON_FILTER_MASK 0x1U +#define DB_INFO_CFLAG_MASK 0x1U +#define DB_INFO_COS_MASK 0x7U +#define DB_INFO_TYPE_MASK 0x1FU +#define DB_INFO_SET(val, member) \ + (((u32)(val) & DB_INFO_##member##_MASK) << \ + DB_INFO_##member##_SHIFT) + +#define DB_PI_LOW_MASK 0xFFU +#define DB_PI_HIGH_MASK 0xFFU +#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) +#define DB_PI_HI_SHIFT 8 +#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) +#define DB_ADDR(queue, pi) ((u64 *)((queue)->db_addr) + DB_PI_LOW(pi)) +#define SRC_TYPE 1 + +/* CFLAG_DATA_PATH */ +#define SQ_CFLAG_DP 0 +#define RQ_CFLAG_DP 1 +/* * + * @brief spnic_write_db - write doorbell + * @param queue: nic io queue + * @param cos: cos index + * @param cflag: 0--sq, 1--rq + * @param pi: product index + */ +static inline void spnic_write_db(struct spnic_io_queue *queue, int cos, u8 cflag, u16 pi) +{ + struct spnic_nic_db db; + + db.db_info = DB_INFO_SET(SRC_TYPE, TYPE) | DB_INFO_SET(cflag, CFLAG) | + DB_INFO_SET(cos, COS) | DB_INFO_SET(queue->q_id, QID); + db.pi_hi = DB_PI_HIGH(pi); + /* Data should be written to HW in Big Endian Format */ + db.db_info = sphw_hw_be32(db.db_info); + db.pi_hi = sphw_hw_be32(db.pi_hi); + + wmb(); /* Write all before the doorbell */ + + writeq(*((u64 *)&db), DB_ADDR(queue, pi)); +} + +struct spnic_dyna_qp_params { + u16 num_qps; + u32 sq_depth; + u32 rq_depth; + + struct spnic_io_queue *sqs; + struct spnic_io_queue *rqs; +}; + +int spnic_alloc_qps(void *hwdev, struct irq_info *qps_msix_arry, + struct spnic_dyna_qp_params *qp_params); +void spnic_free_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); +int spnic_init_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); +void spnic_deinit_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); +int spnic_init_nicio_res(void *hwdev); +void spnic_deinit_nicio_res(void *hwdev); +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h new file mode 100644 index 000000000000..4c3f37e45a36 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_NIC_QP_H +#define SPNIC_NIC_QP_H + +#include "sphw_common.h" + +#define TX_MSS_DEFAULT 0x3E00 +#define TX_MSS_MIN 0x50 + +#define SPNIC_MAX_SQ_SGE 18 + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 +#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_SHIFT 5 +#define RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_SHIFT 7 +#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_SHIFT 8 +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0x1FU +#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK 0x3U +#define RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_MASK 0x1U +#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK 0xFU +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU + +#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + (((val) >> RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ + RQ_CQE_OFFOLAD_TYPE_##member##_MASK) + +#define SPNIC_GET_RX_PKT_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) +#define SPNIC_GET_RX_IP_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE) +#define SPNIC_GET_RX_ENC_L3_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, ENC_L3_TYPE) +#define SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT) + +#define SPNIC_GET_RX_PKT_UMBCAST(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) + +#define SPNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) + +#define SPNIC_GET_RSS_TYPES(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) + +#define RQ_CQE_SGE_VLAN_SHIFT 0 +#define RQ_CQE_SGE_LEN_SHIFT 16 + +#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU +#define RQ_CQE_SGE_LEN_MASK 0xFFFFU + +#define RQ_CQE_SGE_GET(val, member) \ + (((val) >> RQ_CQE_SGE_##member##_SHIFT) & RQ_CQE_SGE_##member##_MASK) + +#define SPNIC_GET_RX_VLAN_TAG(vlan_len) RQ_CQE_SGE_GET(vlan_len, VLAN) + +#define SPNIC_GET_RX_PKT_LEN(vlan_len) RQ_CQE_SGE_GET(vlan_len, LEN) + +#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 +#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16 +#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 +#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 +#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27 + +#define RQ_CQE_STATUS_BP_EN_SHIFT 30 +#define RQ_CQE_STATUS_RXDONE_SHIFT 31 +#define RQ_CQE_STATUS_DECRY_PKT_SHIFT 29 +#define RQ_CQE_STATUS_FLUSH_SHIFT 28 + +#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU +#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU +#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U +#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U +#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U +#define RQ_CQE_STATUS_BP_EN_MASK 0X1U +#define RQ_CQE_STATUS_RXDONE_MASK 0x1U +#define RQ_CQE_STATUS_FLUSH_MASK 0x1U +#define RQ_CQE_STATUS_DECRY_PKT_MASK 0x1U + +#define RQ_CQE_STATUS_GET(val, member) \ + (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \ + RQ_CQE_STATUS_##member##_MASK) + +#define SPNIC_GET_RX_CSUM_ERR(status) RQ_CQE_STATUS_GET(status, CSUM_ERR) + +#define SPNIC_GET_RX_DONE(status) RQ_CQE_STATUS_GET(status, RXDONE) + +#define SPNIC_GET_RX_FLUSH(status) RQ_CQE_STATUS_GET(status, FLUSH) + +#define SPNIC_GET_RX_BP_EN(status) RQ_CQE_STATUS_GET(status, BP_EN) + +#define SPNIC_GET_RX_NUM_LRO(status) RQ_CQE_STATUS_GET(status, NUM_LRO) + +#define SPNIC_RX_IS_DECRY_PKT(status) RQ_CQE_STATUS_GET(status, DECRY_PKT) + +#define RQ_CQE_SUPER_CQE_EN_SHIFT 0 +#define RQ_CQE_PKT_NUM_SHIFT 1 +#define RQ_CQE_PKT_LAST_LEN_SHIFT 6 +#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19 + +#define RQ_CQE_SUPER_CQE_EN_MASK 0x1 +#define RQ_CQE_PKT_NUM_MASK 0x1FU +#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU +#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU + +#define RQ_CQE_PKT_NUM_GET(val, member) \ + (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) +#define SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM) + +#define RQ_CQE_SUPER_CQE_EN_GET(val, member) \ + (((val) >> RQ_CQE_##member##_SHIFT) & RQ_CQE_##member##_MASK) +#define SPNIC_GET_SUPER_CQE_EN(pkt_info) \ + RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) + +#define RQ_CQE_PKT_LEN_GET(val, member) \ + (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) + +#define RQ_CQE_DECRY_INFO_DECRY_STATUS_SHIFT 8 +#define RQ_CQE_DECRY_INFO_ESP_NEXT_HEAD_SHIFT 0 + +#define RQ_CQE_DECRY_INFO_DECRY_STATUS_MASK 0xFFU +#define RQ_CQE_DECRY_INFO_ESP_NEXT_HEAD_MASK 0xFFU + +#define RQ_CQE_DECRY_INFO_GET(val, member) \ + (((val) >> RQ_CQE_DECRY_INFO_##member##_SHIFT) & \ + RQ_CQE_DECRY_INFO_##member##_MASK) + +#define SPNIC_GET_DECRYPT_STATUS(decry_info) \ + RQ_CQE_DECRY_INFO_GET(decry_info, DECRY_STATUS) + +#define SPNIC_GET_ESP_NEXT_HEAD(decry_info) \ + RQ_CQE_DECRY_INFO_GET(decry_info, ESP_NEXT_HEAD) + +struct spnic_rq_cqe { + u32 status; + u32 vlan_len; + + u32 offload_type; + u32 hash_val; + u32 xid; + u32 decrypt_info; + u32 rsvd6; + u32 pkt_info; +}; + +struct spnic_sge_sect { + struct sphw_sge sge; + u32 rsvd; +}; + +struct spnic_rq_extend_wqe { + struct spnic_sge_sect buf_desc; + struct spnic_sge_sect cqe_sect; +}; + +struct spnic_rq_normal_wqe { + u32 buf_hi_addr; + u32 buf_lo_addr; + u32 cqe_hi_addr; + u32 cqe_lo_addr; +}; + +struct spnic_rq_wqe { + union { + struct spnic_rq_normal_wqe normal_wqe; + struct spnic_rq_extend_wqe extend_wqe; + }; +}; + +struct spnic_sq_wqe_desc { + u32 ctrl_len; + u32 queue_info; + u32 hi_addr; + u32 lo_addr; +}; + +/* Engine only pass first 12B TS field directly to uCode through metadata + * vlan_offoad is used for hardware when vlan insert in tx + */ +struct spnic_sq_task { + u32 pkt_info0; + u32 ip_identify; + u32 pkt_info2; /* ipsec used as spi */ + u32 vlan_offload; +}; + +struct spnic_sq_bufdesc { + u32 len; /* 31-bits Length, L2NIC only use length[17:0] */ + u32 rsvd; + u32 hi_addr; + u32 lo_addr; +}; + +struct spnic_sq_compact_wqe { + struct spnic_sq_wqe_desc wqe_desc; +}; + +struct spnic_sq_extend_wqe { + struct spnic_sq_wqe_desc wqe_desc; + struct spnic_sq_task task; + struct spnic_sq_bufdesc buf_desc[0]; +}; + +struct spnic_sq_wqe { + union { + struct spnic_sq_compact_wqe compact_wqe; + struct spnic_sq_extend_wqe extend_wqe; + }; +}; + +/* use section pointer for support non continuous wqe */ +struct spnic_sq_wqe_combo { + struct spnic_sq_wqe_desc *ctrl_bd0; + struct spnic_sq_task *task; + struct spnic_sq_bufdesc *bds_head; + struct spnic_sq_bufdesc *bds_sec2; + u16 first_bds_num; + u32 wqe_type; + u32 task_type; +}; + +/* ************* SQ_CTRL ************** */ +enum sq_wqe_data_format { + SQ_NORMAL_WQE = 0, +}; + +enum sq_wqe_ec_type { + SQ_WQE_COMPACT_TYPE = 0, + SQ_WQE_EXTENDED_TYPE = 1, +}; + +enum sq_wqe_tasksect_len_type { + SQ_WQE_TASKSECT_46BITS = 0, + SQ_WQE_TASKSECT_16BYTES = 1, +}; + +#define SQ_CTRL_BD0_LEN_SHIFT 0 +#define SQ_CTRL_RSVD_SHIFT 18 +#define SQ_CTRL_BUFDESC_NUM_SHIFT 19 +#define SQ_CTRL_TASKSECT_LEN_SHIFT 27 +#define SQ_CTRL_DATA_FORMAT_SHIFT 28 +#define SQ_CTRL_DIRECT_SHIFT 29 +#define SQ_CTRL_EXTENDED_SHIFT 30 +#define SQ_CTRL_OWNER_SHIFT 31 + +#define SQ_CTRL_BD0_LEN_MASK 0x3FFFFU +#define SQ_CTRL_RSVD_MASK 0x1U +#define SQ_CTRL_BUFDESC_NUM_MASK 0xFFU +#define SQ_CTRL_TASKSECT_LEN_MASK 0x1U +#define SQ_CTRL_DATA_FORMAT_MASK 0x1U +#define SQ_CTRL_DIRECT_MASK 0x1U +#define SQ_CTRL_EXTENDED_MASK 0x1U +#define SQ_CTRL_OWNER_MASK 0x1U + +#define SQ_CTRL_SET(val, member) \ + (((u32)(val) & SQ_CTRL_##member##_MASK) << SQ_CTRL_##member##_SHIFT) + +#define SQ_CTRL_GET(val, member) \ + (((val) >> SQ_CTRL_##member##_SHIFT) & SQ_CTRL_##member##_MASK) + +#define SQ_CTRL_CLEAR(val, member) \ + ((val) & (~(SQ_CTRL_##member##_MASK << SQ_CTRL_##member##_SHIFT))) + +#define SQ_CTRL_QUEUE_INFO_PKT_TYPE_SHIFT 0 +#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 +#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 +#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 +#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 +#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 +#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 +#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 +#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 + +#define SQ_CTRL_QUEUE_INFO_PKT_TYPE_MASK 0x3U +#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU +#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU +#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U + +#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ + (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) << \ + SQ_CTRL_QUEUE_INFO_##member##_SHIFT) + +#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ + (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) & \ + SQ_CTRL_QUEUE_INFO_##member##_MASK) + +#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \ + ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \ + SQ_CTRL_QUEUE_INFO_##member##_SHIFT))) + +#define SQ_TASK_INFO0_TUNNEL_FLAG_SHIFT 19 +#define SQ_TASK_INFO0_ESP_NEXT_PROTO_SHIFT 22 +#define SQ_TASK_INFO0_INNER_L4_EN_SHIFT 24 +#define SQ_TASK_INFO0_INNER_L3_EN_SHIFT 25 +#define SQ_TASK_INFO0_INNER_L4_PSEUDO_SHIFT 26 +#define SQ_TASK_INFO0_OUT_L4_EN_SHIFT 27 +#define SQ_TASK_INFO0_OUT_L3_EN_SHIFT 28 +#define SQ_TASK_INFO0_OUT_L4_PSEUDO_SHIFT 29 +#define SQ_TASK_INFO0_ESP_OFFLOAD_SHIFT 30 +#define SQ_TASK_INFO0_IPSEC_PROTO_SHIFT 31 + +#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK 0x1U +#define SQ_TASK_INFO0_ESP_NEXT_PROTO_MASK 0x3U +#define SQ_TASK_INFO0_INNER_L4_EN_MASK 0x1U +#define SQ_TASK_INFO0_INNER_L3_EN_MASK 0x1U +#define SQ_TASK_INFO0_INNER_L4_PSEUDO_MASK 0x1U +#define SQ_TASK_INFO0_OUT_L4_EN_MASK 0x1U +#define SQ_TASK_INFO0_OUT_L3_EN_MASK 0x1U +#define SQ_TASK_INFO0_OUT_L4_PSEUDO_MASK 0x1U +#define SQ_TASK_INFO0_ESP_OFFLOAD_MASK 0x1U +#define SQ_TASK_INFO0_IPSEC_PROTO_MASK 0x1U + +#define SQ_TASK_INFO0_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \ + SQ_TASK_INFO0_##member##_SHIFT) +#define SQ_TASK_INFO0_GET(val, member) \ + (((val) >> SQ_TASK_INFO0_##member##_SHIFT) & \ + SQ_TASK_INFO0_##member##_MASK) + +#define SQ_TASK_INFO1_SET(val, member) \ + (((val) & SQ_TASK_INFO1_##member##_MASK) << \ + SQ_TASK_INFO1_##member##_SHIFT) +#define SQ_TASK_INFO1_GET(val, member) \ + (((val) >> SQ_TASK_INFO1_##member##_SHIFT) & \ + SQ_TASK_INFO1_##member##_MASK) + +#define SQ_TASK_INFO3_VLAN_TAG_SHIFT 0 +#define SQ_TASK_INFO3_VLAN_TYPE_SHIFT 16 +#define SQ_TASK_INFO3_VLAN_TAG_VALID_SHIFT 19 + +#define SQ_TASK_INFO3_VLAN_TAG_MASK 0xFFFFU +#define SQ_TASK_INFO3_VLAN_TYPE_MASK 0x7U +#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK 0x1U + +#define SQ_TASK_INFO3_SET(val, member) \ + (((val) & SQ_TASK_INFO3_##member##_MASK) << \ + SQ_TASK_INFO3_##member##_SHIFT) +#define SQ_TASK_INFO3_GET(val, member) \ + (((val) >> SQ_TASK_INFO3_##member##_SHIFT) & \ + SQ_TASK_INFO3_##member##_MASK) + +static inline u32 spnic_get_pkt_len_for_super_cqe(struct spnic_rq_cqe *cqe, bool last) +{ + u32 pkt_len = sphw_hw_cpu32(cqe->pkt_info); + + if (!last) + return RQ_CQE_PKT_LEN_GET(pkt_len, FIRST_LEN); + else + return RQ_CQE_PKT_LEN_GET(pkt_len, LAST_LEN); +} + +/* * + * spnic_prepare_sq_ctrl - init sq wqe cs + * @nr_descs: total sge_num, include bd0 in cs + * to do : check with zhangxingguo to confirm WQE init + */ +static inline void spnic_prepare_sq_ctrl(struct spnic_sq_wqe_combo *wqe_combo, + u32 queue_info, int nr_descs, u16 owner) +{ + struct spnic_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + + if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) { + wqe_desc->ctrl_len |= + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + wqe_desc->ctrl_len = sphw_hw_be32(wqe_desc->ctrl_len); + /* compact wqe queue_info will transfer to ucode */ + wqe_desc->queue_info = 0; + return; + } + + wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | + SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + wqe_desc->ctrl_len = sphw_hw_be32(wqe_desc->ctrl_len); + + wqe_desc->queue_info = queue_info; + wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC); + + if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) { + wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS); + } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) < TX_MSS_MIN) { + /* mss should not less than 80 */ + wqe_desc->queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(wqe_desc->queue_info, MSS); + wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS); + } + + wqe_desc->queue_info = sphw_hw_be32(wqe_desc->queue_info); +} + +/* * + * spnic_set_vlan_tx_offload - set vlan offload info + * @task: wqe task section + * @vlan_tag: vlan tag + * @vlan_type: 0--select TPID0 in IPSU, 1--select TPID0 in IPSU + * 2--select TPID2 in IPSU, 3--select TPID3 in IPSU, 4--select TPID4 in IPSU + */ +static inline void spnic_set_vlan_tx_offload(struct spnic_sq_task *task, u16 vlan_tag, u8 vlan_type) +{ + task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | + SQ_TASK_INFO3_SET(vlan_type, VLAN_TYPE) | + SQ_TASK_INFO3_SET(1U, VLAN_TAG_VALID); +} + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c new file mode 100644 index 000000000000..cd92de93a57e --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/errno.h> + +#include "sphw_common.h" +#include "sphw_crm.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic_dev.h" + +#define MAX_NUM_OF_ETHTOOL_NTUPLE_RULES BIT(9) +struct spnic_ethtool_rx_flow_rule { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; +}; + +static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len) +{ + u8 idx; + + for (idx = 0; idx < len; idx++) + key_y[idx] = src_input[idx] & mask[idx]; +} + +static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len) +{ + u8 idx; + + for (idx = 0; idx < len; idx++) + key_x[idx] = key_y[idx] ^ mask[idx]; +} + +static void tcam_key_calculate(struct tag_tcam_key *tcam_key, + struct nic_tcam_cfg_rule *fdir_tcam_rule) +{ + tcam_translate_key_y(fdir_tcam_rule->key.y, (u8 *)(&tcam_key->key_info), + (u8 *)(&tcam_key->key_mask), TCAM_FLOW_KEY_SIZE); + tcam_translate_key_x(fdir_tcam_rule->key.x, fdir_tcam_rule->key.y, + (u8 *)(&tcam_key->key_mask), TCAM_FLOW_KEY_SIZE); +} + +#define TCAM_IPV4_TYPE 0 +#define TCAM_IPV6_TYPE 1 + +static int spnic_base_ipv4_parse(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *val = &fs->h_u.tcp_ip4_spec; + u32 temp; + + switch (mask->ip4src) { + case U32_MAX: + temp = ntohl(val->ip4src); + tcam_key->key_info.sipv4_h = high_16_bits(temp); + tcam_key->key_info.sipv4_l = low_16_bits(temp); + + tcam_key->key_mask.sipv4_h = U16_MAX; + tcam_key->key_mask.sipv4_l = U16_MAX; + break; + case 0: + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ip mask\n"); + return -EINVAL; + } + + switch (mask->ip4dst) { + case U32_MAX: + temp = ntohl(val->ip4dst); + tcam_key->key_info.dipv4_h = high_16_bits(temp); + tcam_key->key_info.dipv4_l = low_16_bits(temp); + + tcam_key->key_mask.dipv4_h = U16_MAX; + tcam_key->key_mask.dipv4_l = U16_MAX; + break; + case 0: + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ip mask\n"); + return -EINVAL; + } + + tcam_key->key_info.ip_type = TCAM_IPV4_TYPE; + tcam_key->key_mask.ip_type = TCAM_IP_TYPE_MASK; + + tcam_key->key_info.function_id = sphw_global_func_id(nic_dev->hwdev); + tcam_key->key_mask.function_id = TCAM_FUNC_ID_MASK; + + return 0; +} + +static int spnic_fdir_tcam_ipv4_l4_init(struct spnic_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec; + int err; + + err = spnic_base_ipv4_parse(nic_dev, fs, tcam_key); + if (err) + return err; + + tcam_key->key_info.dport = ntohs(l4_val->pdst); + tcam_key->key_mask.dport = l4_mask->pdst; + + tcam_key->key_info.sport = ntohs(l4_val->psrc); + tcam_key->key_mask.sport = l4_mask->psrc; + + if (fs->flow_type == TCP_V4_FLOW) + tcam_key->key_info.ip_proto = IPPROTO_TCP; + else + tcam_key->key_info.ip_proto = IPPROTO_UDP; + tcam_key->key_mask.ip_proto = U8_MAX; + + return 0; +} + +static int spnic_fdir_tcam_ipv4_init(struct spnic_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; + struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec; + int err; + + err = spnic_base_ipv4_parse(nic_dev, fs, tcam_key); + if (err) + return err; + + tcam_key->key_info.ip_proto = l3_val->proto; + tcam_key->key_mask.ip_proto = l3_mask->proto; + + return 0; +} + +#ifndef UNSUPPORT_NTUPLE_IPV6 +enum ipv6_parse_res { + IPV6_MASK_INVALID, + IPV6_MASK_ALL_MASK, + IPV6_MASK_ALL_ZERO, +}; + +enum ipv6_index { + IPV6_IDX0, + IPV6_IDX1, + IPV6_IDX2, + IPV6_IDX3, +}; + +static int ipv6_mask_parse(u32 *ipv6_mask) +{ + if (ipv6_mask[IPV6_IDX0] == 0 && ipv6_mask[IPV6_IDX1] == 0 && + ipv6_mask[IPV6_IDX2] == 0 && ipv6_mask[IPV6_IDX3] == 0) + return IPV6_MASK_ALL_ZERO; + + if (ipv6_mask[IPV6_IDX0] == U32_MAX && + ipv6_mask[IPV6_IDX1] == U32_MAX && + ipv6_mask[IPV6_IDX2] == U32_MAX && ipv6_mask[IPV6_IDX3] == U32_MAX) + return IPV6_MASK_ALL_MASK; + + return IPV6_MASK_INVALID; +} + +static int spnic_base_ipv6_parse(struct spnic_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip6_spec *mask = &fs->m_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *val = &fs->h_u.tcp_ip6_spec; + int parse_res; + u32 temp; + + parse_res = ipv6_mask_parse((u32 *)mask->ip6src); + if (parse_res == IPV6_MASK_ALL_MASK) { + temp = ntohl(val->ip6src[IPV6_IDX0]); + tcam_key->key_info_ipv6.sipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX1]); + tcam_key->key_info_ipv6.sipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX2]); + tcam_key->key_info_ipv6.sipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX3]); + tcam_key->key_info_ipv6.sipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key7 = low_16_bits(temp); + + tcam_key->key_mask_ipv6.sipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key7 = U16_MAX; + } else if (parse_res == IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ipv6 mask\n"); + return -EINVAL; + } + + parse_res = ipv6_mask_parse((u32 *)mask->ip6dst); + if (parse_res == IPV6_MASK_ALL_MASK) { + temp = ntohl(val->ip6dst[IPV6_IDX0]); + tcam_key->key_info_ipv6.dipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX1]); + tcam_key->key_info_ipv6.dipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX2]); + tcam_key->key_info_ipv6.dipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX3]); + tcam_key->key_info_ipv6.dipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key7 = low_16_bits(temp); + + tcam_key->key_mask_ipv6.dipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key7 = U16_MAX; + } else if (parse_res == IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid dst_ipv6 mask\n"); + return -EINVAL; + } + + tcam_key->key_info_ipv6.ip_type = TCAM_IPV6_TYPE; + tcam_key->key_mask_ipv6.ip_type = TCAM_IP_TYPE_MASK; + + tcam_key->key_info_ipv6.function_id = sphw_global_func_id(nic_dev->hwdev); + tcam_key->key_mask_ipv6.function_id = TCAM_FUNC_ID_MASK; + + return 0; +} + +static int spnic_fdir_tcam_ipv6_l4_init(struct spnic_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec; + int err; + + err = spnic_base_ipv6_parse(nic_dev, fs, tcam_key); + if (err) + return err; + + tcam_key->key_info_ipv6.dport = ntohs(l4_val->pdst); + tcam_key->key_mask_ipv6.dport = l4_mask->pdst; + + tcam_key->key_info_ipv6.sport = ntohs(l4_val->psrc); + tcam_key->key_mask_ipv6.sport = l4_mask->psrc; + + if (fs->flow_type == TCP_V6_FLOW) + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_TCP; + else + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_UDP; + tcam_key->key_mask_ipv6.ip_proto = U8_MAX; + + return 0; +} + +static int spnic_fdir_tcam_ipv6_init(struct spnic_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec; + struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec; + int err; + + err = spnic_base_ipv6_parse(nic_dev, fs, tcam_key); + if (err) + return err; + + tcam_key->key_info_ipv6.ip_proto = l3_val->l4_proto; + tcam_key->key_mask_ipv6.ip_proto = l3_mask->l4_proto; + + return 0; +} +#endif + +static int spnic_fdir_tcam_info_init(struct spnic_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key, + struct nic_tcam_cfg_rule *fdir_tcam_rule) +{ + int err; + + switch (fs->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + err = spnic_fdir_tcam_ipv4_l4_init(nic_dev, fs, tcam_key); + if (err) + return err; + break; + case IP_USER_FLOW: + err = spnic_fdir_tcam_ipv4_init(nic_dev, fs, tcam_key); + if (err) + return err; + break; +#ifndef UNSUPPORT_NTUPLE_IPV6 + case TCP_V6_FLOW: + case UDP_V6_FLOW: + err = spnic_fdir_tcam_ipv6_l4_init(nic_dev, fs, tcam_key); + if (err) + return err; + break; + case IPV6_USER_FLOW: + err = spnic_fdir_tcam_ipv6_init(nic_dev, fs, tcam_key); + if (err) + return err; + break; +#endif + default: + return -EOPNOTSUPP; + } + + tcam_key->key_info.tunnel_type = 0; + tcam_key->key_mask.tunnel_type = TCAM_TUNNEL_TYPE_MASK; + + fdir_tcam_rule->data.qid = (u32)fs->ring_cookie; + tcam_key_calculate(tcam_key, fdir_tcam_rule); + + return 0; +} + +void spnic_flush_rx_flow_rule(struct spnic_nic_dev *nic_dev) +{ + struct spnic_tcam_info *tcam_info = &nic_dev->tcam; + struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; + struct spnic_ethtool_rx_flow_rule *eth_rule_tmp = NULL; + struct spnic_tcam_filter *tcam_iter = NULL; + struct spnic_tcam_filter *tcam_iter_tmp = NULL; + struct spnic_tcam_dynamic_block *block = NULL; + struct spnic_tcam_dynamic_block *block_tmp = NULL; + struct list_head *dynamic_list = &tcam_info->tcam_dynamic_info.tcam_dynamic_list; + + if (!list_empty(&tcam_info->tcam_list)) { + list_for_each_entry_safe(tcam_iter, tcam_iter_tmp, &tcam_info->tcam_list, + tcam_filter_list) { + list_del(&tcam_iter->tcam_filter_list); + kfree(tcam_iter); + } + } + if (!list_empty(dynamic_list)) { + list_for_each_entry_safe(block, block_tmp, dynamic_list, block_list) { + list_del(&block->block_list); + kfree(block); + } + } + + if (!list_empty(&nic_dev->rx_flow_rule.rules)) { + list_for_each_entry_safe(eth_rule, eth_rule_tmp, + &nic_dev->rx_flow_rule.rules, list) { + list_del(ð_rule->list); + kfree(eth_rule); + } + } + +#ifndef FPGA_SUPPORT + spnic_flush_tcam_rule(nic_dev->hwdev); + spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false); +#endif +} + +static struct spnic_tcam_dynamic_block * +spnic_alloc_dynamic_block_resource(struct spnic_nic_dev *nic_dev, + struct spnic_tcam_info *tcam_info, u16 dynamic_block_id) +{ + struct spnic_tcam_dynamic_block *dynamic_block_ptr = NULL; + + dynamic_block_ptr = kzalloc(sizeof(*dynamic_block_ptr), GFP_KERNEL); + if (!dynamic_block_ptr) { + nicif_err(nic_dev, drv, nic_dev->netdev, "fdir filter dynamic alloc block index %d memory failed\n", + dynamic_block_id); + return NULL; + } + + dynamic_block_ptr->dynamic_block_id = dynamic_block_id; + list_add_tail(&dynamic_block_ptr->block_list, + &tcam_info->tcam_dynamic_info.tcam_dynamic_list); + + tcam_info->tcam_dynamic_info.dynamic_block_cnt++; + + return dynamic_block_ptr; +} + +static void +spnic_free_dynamic_block_resource(struct spnic_tcam_info *tcam_info, + struct spnic_tcam_dynamic_block *block_ptr) +{ + if (!block_ptr) + return; + + list_del(&block_ptr->block_list); + kfree(block_ptr); + + tcam_info->tcam_dynamic_info.dynamic_block_cnt--; +} + +static struct spnic_tcam_dynamic_block * +spnic_dynamic_lookup_tcam_filter(struct spnic_nic_dev *nic_dev, + struct nic_tcam_cfg_rule *fdir_tcam_rule, + struct spnic_tcam_info *tcam_info, + struct spnic_tcam_filter *tcam_filter, u16 *tcam_index) +{ + struct spnic_tcam_dynamic_block *tmp = NULL; + u16 index; + + list_for_each_entry(tmp, &tcam_info->tcam_dynamic_info.tcam_dynamic_list, block_list) + if (tmp->dynamic_index_cnt < SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) + break; + + if (!tmp || tmp->dynamic_index_cnt >= SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic lookup for index failed\n"); + return NULL; + } + + for (index = 0; index < SPNIC_TCAM_DYNAMIC_BLOCK_SIZE; index++) + if (tmp->dynamic_index_used[index] == 0) + break; + + if (index == SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "tcam block 0x%x supports filter rules is full\n", + tmp->dynamic_block_id); + return NULL; + } + + tcam_filter->dynamic_block_id = tmp->dynamic_block_id; + tcam_filter->index = index; + *tcam_index = index; + + fdir_tcam_rule->index = index + + SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(tmp->dynamic_block_id); + + return tmp; +} + +static int spnic_add_tcam_filter(struct spnic_nic_dev *nic_dev, + struct spnic_tcam_filter *tcam_filter, + struct nic_tcam_cfg_rule *fdir_tcam_rule) +{ + struct spnic_tcam_info *tcam_info = &nic_dev->tcam; + struct spnic_tcam_dynamic_block *dynamic_block_ptr = NULL; + struct spnic_tcam_dynamic_block *tmp = NULL; + u16 block_cnt = tcam_info->tcam_dynamic_info.dynamic_block_cnt; + u16 tcam_block_index = 0; + int block_alloc_flag = 0; + u16 index = 0; + int err; + + if (tcam_info->tcam_rule_nums >= + block_cnt * SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { + if (block_cnt >= (SPNIC_MAX_TCAM_FILTERS / SPNIC_TCAM_DYNAMIC_BLOCK_SIZE)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Dynamic tcam block is full, alloc failed\n"); + goto failed; + } + + err = spnic_alloc_tcam_block(nic_dev->hwdev, &tcam_block_index); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic tcam alloc block failed\n"); + goto failed; + } + + block_alloc_flag = 1; + + dynamic_block_ptr = + spnic_alloc_dynamic_block_resource(nic_dev, tcam_info, tcam_block_index); + if (!dynamic_block_ptr) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic alloc block memory failed\n"); + goto block_alloc_failed; + } + } + + tmp = spnic_dynamic_lookup_tcam_filter(nic_dev, fdir_tcam_rule, tcam_info, + tcam_filter, &index); + if (!tmp) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Dynamic lookup tcam filter failed\n"); + goto lookup_tcam_index_failed; + } + + err = spnic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir_tcam_rule add failed\n"); + goto add_tcam_rules_failed; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Add fdir tcam rule, function_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, queue: %d, tcam_rule_nums: %d succeed\n", + sphw_global_func_id(nic_dev->hwdev), + tcam_filter->dynamic_block_id, index, fdir_tcam_rule->index, + fdir_tcam_rule->data.qid, tcam_info->tcam_rule_nums + 1); + + if (tcam_info->tcam_rule_nums == 0) { + err = spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, true); + if (err) + goto enable_failed; + } + + list_add_tail(&tcam_filter->tcam_filter_list, &tcam_info->tcam_list); + + tmp->dynamic_index_used[index] = 1; + tmp->dynamic_index_cnt++; + + tcam_info->tcam_rule_nums++; + + return 0; + +enable_failed: + spnic_del_tcam_rule(nic_dev->hwdev, fdir_tcam_rule->index); + +add_tcam_rules_failed: +lookup_tcam_index_failed: + if (block_alloc_flag == 1) + spnic_free_dynamic_block_resource(tcam_info, dynamic_block_ptr); + +block_alloc_failed: + if (block_alloc_flag == 1) + spnic_free_tcam_block(nic_dev->hwdev, &tcam_block_index); + +failed: + return -EFAULT; +} + +static int spnic_del_tcam_filter(struct spnic_nic_dev *nic_dev, + struct spnic_tcam_filter *tcam_filter) +{ + struct spnic_tcam_info *tcam_info = &nic_dev->tcam; + u16 dynamic_block_id = tcam_filter->dynamic_block_id; + struct spnic_tcam_dynamic_block *tmp = NULL; + u32 index = 0; + int err; + + list_for_each_entry(tmp, &tcam_info->tcam_dynamic_info.tcam_dynamic_list, block_list) { + if (tmp->dynamic_block_id == dynamic_block_id) + break; + } + if (!tmp || tmp->dynamic_block_id != dynamic_block_id) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter del dynamic lookup for block failed\n"); + return -EFAULT; + } + + index = SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(tmp->dynamic_block_id) + tcam_filter->index; + + err = spnic_del_tcam_rule(nic_dev->hwdev, index); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "fdir_tcam_rule del failed\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Del fdir_tcam_dynamic_rule function_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, local_rules_nums: %d, global_rule_nums: %d succeed\n", + sphw_global_func_id(nic_dev->hwdev), dynamic_block_id, + tcam_filter->index, index, tmp->dynamic_index_cnt - 1, + tcam_info->tcam_rule_nums - 1); + + tmp->dynamic_index_used[tcam_filter->index] = 0; + tmp->dynamic_index_cnt--; + tcam_info->tcam_rule_nums--; + if (tmp->dynamic_index_cnt == 0) { + spnic_free_tcam_block(nic_dev->hwdev, &dynamic_block_id); + spnic_free_dynamic_block_resource(tcam_info, tmp); + } + + if (tcam_info->tcam_rule_nums == 0) + spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false); + + list_del(&tcam_filter->tcam_filter_list); + kfree(tcam_filter); + + return 0; +} + +static inline struct spnic_tcam_filter * +spnic_tcam_filter_lookup(struct list_head *filter_list, struct tag_tcam_key *key) +{ + struct spnic_tcam_filter *iter; + + list_for_each_entry(iter, filter_list, tcam_filter_list) { + if (memcmp(key, &iter->tcam_key, sizeof(struct tag_tcam_key)) == 0) + return iter; + } + + return NULL; +} + +static void del_ethtool_rule(struct spnic_nic_dev *nic_dev, + struct spnic_ethtool_rx_flow_rule *eth_rule) +{ + list_del(ð_rule->list); + nic_dev->rx_flow_rule.tot_num_rules--; + + kfree(eth_rule); +} + +static int spnic_remove_one_rule(struct spnic_nic_dev *nic_dev, + struct spnic_ethtool_rx_flow_rule *eth_rule) +{ + struct spnic_tcam_info *tcam_info = &nic_dev->tcam; + struct spnic_tcam_filter *tcam_filter; + struct nic_tcam_cfg_rule fdir_tcam_rule; + struct tag_tcam_key tcam_key; + int err; + + memset(&fdir_tcam_rule, 0, sizeof(fdir_tcam_rule)); + memset(&tcam_key, 0, sizeof(tcam_key)); + + err = spnic_fdir_tcam_info_init(nic_dev, ð_rule->flow_spec, &tcam_key, &fdir_tcam_rule); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Init fdir info failed\n"); + return err; + } + + tcam_filter = spnic_tcam_filter_lookup(&tcam_info->tcam_list, &tcam_key); + if (!tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter does not exists\n"); + return -EEXIST; + } + + err = spnic_del_tcam_filter(nic_dev, tcam_filter); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Delete tcam filter failed\n"); + return err; + } + + del_ethtool_rule(nic_dev, eth_rule); + + return 0; +} + +static void add_rule_to_list(struct spnic_nic_dev *nic_dev, + struct spnic_ethtool_rx_flow_rule *rule) +{ + struct spnic_ethtool_rx_flow_rule *iter = NULL; + struct list_head *head = &nic_dev->rx_flow_rule.rules; + + list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { + if (iter->flow_spec.location > rule->flow_spec.location) + break; + head = &iter->list; + } + nic_dev->rx_flow_rule.tot_num_rules++; + list_add(&rule->list, head); +} + +static int spnic_add_one_rule(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) +{ + struct nic_tcam_cfg_rule fdir_tcam_rule; + struct tag_tcam_key tcam_key; + struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; + struct spnic_tcam_filter *tcam_filter = NULL; + struct spnic_tcam_info *tcam_info = &nic_dev->tcam; + int err; + + memset(&fdir_tcam_rule, 0, sizeof(fdir_tcam_rule)); + memset(&tcam_key, 0, sizeof(tcam_key)); + err = spnic_fdir_tcam_info_init(nic_dev, fs, &tcam_key, &fdir_tcam_rule); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Init fdir info failed\n"); + return err; + } + + tcam_filter = spnic_tcam_filter_lookup(&tcam_info->tcam_list, &tcam_key); + if (tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter exists\n"); + return -EEXIST; + } + + tcam_filter = kzalloc(sizeof(*tcam_filter), GFP_KERNEL); + if (!tcam_filter) + return -ENOMEM; + memcpy(&tcam_filter->tcam_key, &tcam_key, sizeof(struct tag_tcam_key)); + tcam_filter->queue = (u16)fdir_tcam_rule.data.qid; + + err = spnic_add_tcam_filter(nic_dev, tcam_filter, &fdir_tcam_rule); + if (err) + goto add_tcam_filter_fail; + + /* driver save new rule filter */ + eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); + if (!eth_rule) { + err = -ENOMEM; + goto alloc_eth_rule_fail; + } + + eth_rule->flow_spec = *fs; + add_rule_to_list(nic_dev, eth_rule); + + return 0; + +alloc_eth_rule_fail: + spnic_del_tcam_filter(nic_dev, tcam_filter); +add_tcam_filter_fail: + kfree(tcam_filter); + return err; +} + +static struct spnic_ethtool_rx_flow_rule * +find_ethtool_rule(struct spnic_nic_dev *nic_dev, u32 location) +{ + struct spnic_ethtool_rx_flow_rule *iter = NULL; + + list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { + if (iter->flow_spec.location == location) + return iter; + } + return NULL; +} + +static int validate_flow(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) +{ + if (fs->location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) { + nicif_err(nic_dev, drv, nic_dev->netdev, "loc exceed limit[0,%lu]\n", + MAX_NUM_OF_ETHTOOL_NTUPLE_RULES); + return -EINVAL; + } + + if (fs->ring_cookie >= nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, "action is larger than queue number %u\n", + nic_dev->q_params.num_qps); + return -EINVAL; + } + + switch (fs->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case IP_USER_FLOW: +#ifndef UNSUPPORT_NTUPLE_IPV6 + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case IPV6_USER_FLOW: +#endif + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "flow type is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +int spnic_ethtool_flow_replace(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) +{ + struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; + struct ethtool_rx_flow_spec flow_spec_temp; + int loc_exit_flag = 0; + int err; + + err = validate_flow(nic_dev, fs); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "flow is not valid %d\n", err); + return err; + } + + eth_rule = find_ethtool_rule(nic_dev, fs->location); + /* when location is same, delete old location rule. */ + if (eth_rule) { + memcpy(&flow_spec_temp, ð_rule->flow_spec, sizeof(struct ethtool_rx_flow_spec)); + err = spnic_remove_one_rule(nic_dev, eth_rule); + if (err) + return err; + + loc_exit_flag = 1; + } + + /* add new rule filter */ + err = spnic_add_one_rule(nic_dev, fs); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Add new rule filter failed\n"); + if (loc_exit_flag) + spnic_add_one_rule(nic_dev, &flow_spec_temp); + + return -ENOENT; + } + + return 0; +} + +int spnic_ethtool_flow_remove(struct spnic_nic_dev *nic_dev, u32 location) +{ + struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; + int err; + + if (location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) + return -ENOSPC; + + eth_rule = find_ethtool_rule(nic_dev, location); + if (!eth_rule) + return -ENOENT; + + err = spnic_remove_one_rule(nic_dev, eth_rule); + + return err; +} + +int spnic_ethtool_get_flow(struct spnic_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location) +{ + struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; + + if (location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) + return -EINVAL; + + list_for_each_entry(eth_rule, &nic_dev->rx_flow_rule.rules, list) { + if (eth_rule->flow_spec.location == location) { + info->fs = eth_rule->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int spnic_ethtool_get_all_flows(struct spnic_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + int idx = 0; + struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; + + info->data = MAX_NUM_OF_ETHTOOL_NTUPLE_RULES; + list_for_each_entry(eth_rule, &nic_dev->rx_flow_rule.rules, list) + rule_locs[idx++] = eth_rule->flow_spec.location; + + return info->rule_cnt == idx ? 0 : -ENOENT; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h b/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h new file mode 100644 index 000000000000..9d32608e6bb7 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_PCI_ID_TBL_H +#define SPNIC_PCI_ID_TBL_H + +#define PCI_VENDOR_ID_RAMAXEL 0x1E81 +#define SPNIC_DEV_ID_PF_STD 0x9020 +#define SPNIC_DEV_ID_VF 0x9001 +#define SPNIC_DEV_ID_VF_HV 0x9002 + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c new file mode 100644 index 000000000000..a0dcc1519262 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c @@ -0,0 +1,750 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/dcbnl.h> + +#include "sphw_crm.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic_dev.h" +#include "sphw_hw.h" +#include "spnic_rss.h" + +static u16 num_qps; +module_param(num_qps, ushort, 0444); +MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default=0)"); + +#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) do { \ + if ((num_qps) > (nic_dev)->max_qps) \ + nic_warn(&(nic_dev)->pdev->dev, \ + "Module Parameter %s value %u is out of range, " \ + "Maximum value for the device: %u, using %u\n", \ + #num_qps, num_qps, (nic_dev)->max_qps, \ + (nic_dev)->max_qps); \ + if (!(num_qps) || (num_qps) > (nic_dev)->max_qps) \ + (out_qps) = (nic_dev)->max_qps; \ + else \ + (out_qps) = (num_qps); \ +} while (0) + +static void spnic_fillout_indir_tbl(struct spnic_nic_dev *nic_dev, u8 num_tcs, u32 *indir) +{ + u16 num_rss, tc_group_size; + int i; + + if (num_tcs) + tc_group_size = SPNIC_RSS_INDIR_SIZE / num_tcs; + else + tc_group_size = SPNIC_RSS_INDIR_SIZE; + + num_rss = nic_dev->q_params.num_rss; + for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) + indir[i] = (i / tc_group_size) * num_rss + i % num_rss; +} + +int spnic_rss_init(struct spnic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 cos, num_tc = 0; + u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; + u8 max_cos = nic_dev->hw_dcb_cfg.max_cos; + + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + num_tc = max_cos; + for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { + if (cos < SPNIC_DCB_COS_MAX - max_cos) + prio_tc[cos] = max_cos - 1; + else + prio_tc[cos] = (SPNIC_DCB_COS_MAX - 1) - cos; + } + } else { + num_tc = 0; + } + + return spnic_set_hw_rss_parameters(netdev, 1, num_tc, prio_tc); +} + +void spnic_rss_deinit(struct spnic_nic_dev *nic_dev) +{ + u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; + + spnic_rss_cfg(nic_dev->hwdev, 0, 0, prio_tc, 1); +} + +void spnic_init_rss_parameters(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->rss_hash_engine = SPNIC_RSS_HASH_ENGINE_TYPE_XOR; + nic_dev->rss_type.tcp_ipv6_ext = 1; + nic_dev->rss_type.ipv6_ext = 1; + nic_dev->rss_type.tcp_ipv6 = 1; + nic_dev->rss_type.ipv6 = 1; + nic_dev->rss_type.tcp_ipv4 = 1; + nic_dev->rss_type.ipv4 = 1; + nic_dev->rss_type.udp_ipv6 = 1; + nic_dev->rss_type.udp_ipv4 = 1; +} + +void spnic_clear_rss_config(struct spnic_nic_dev *nic_dev) +{ + kfree(nic_dev->rss_hkey); + nic_dev->rss_hkey = NULL; + + kfree(nic_dev->rss_indir); + nic_dev->rss_indir = NULL; +} + +void spnic_set_default_rss_indir(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + set_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); +} + +static void spnic_maybe_reconfig_rss_indir(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int i; + + /* if dcb is enabled, user can not config rss indir table */ + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "DCB is enabled, set default rss indir\n"); + goto discard_user_rss_indir; + } + + for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) { + if (nic_dev->rss_indir[i] >= nic_dev->q_params.num_qps) + goto discard_user_rss_indir; + } + + return; + +discard_user_rss_indir: + spnic_set_default_rss_indir(netdev); +} + +static void decide_num_qps(struct spnic_nic_dev *nic_dev) +{ + u16 tmp_num_qps = nic_dev->q_params.num_qps; + u16 num_cpus = 0; + int i, node; + + MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, tmp_num_qps); + + /* To reduce memory footprint in ovs mode. + * VF can't get board info correctly with early pf driver. + */ + /* if ((spnic_get_func_mode(nic_dev->hwdev) == FUNC_MOD_NORMAL_HOST) && + * service_mode == SPNIC_WORK_MODE_OVS && + * sphw_func_type(nic_dev->hwdev) != TYPE_VF) + * MOD_PARA_VALIDATE_NUM_QPS(nic_dev, ovs_num_qps, + * tmp_num_qps); + */ + + for (i = 0; i < (int)num_online_cpus(); i++) { + node = (int)cpu_to_node(i); + if (node == dev_to_node(&nic_dev->pdev->dev)) + num_cpus++; + } + + if (!num_cpus) + num_cpus = (u16)num_online_cpus(); + + nic_dev->q_params.num_qps = min_t(u16, tmp_num_qps, num_cpus); +} + +static void copy_value_to_rss_hkey(struct spnic_nic_dev *nic_dev, const u8 *hkey) +{ + u32 i; + u32 *rss_hkey = (u32 *)nic_dev->rss_hkey; + + memcpy(nic_dev->rss_hkey, hkey, SPNIC_RSS_KEY_SIZE); + + /* make a copy of the key, and convert it to Big Endian */ + for (i = 0; i < SPNIC_RSS_KEY_SIZE / sizeof(u32); i++) + nic_dev->rss_hkey_be[i] = cpu_to_be32(rss_hkey[i]); +} + +int alloc_rss_resource(struct spnic_nic_dev *nic_dev) +{ + u8 default_rss_key[SPNIC_RSS_KEY_SIZE] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa}; + + /* We request double spaces for the hash key, + * the second one holds the key of Big Edian + * format. + */ + nic_dev->rss_hkey = + kzalloc(SPNIC_RSS_KEY_SIZE * SPNIC_RSS_KEY_RSV_NUM, GFP_KERNEL); + if (!nic_dev->rss_hkey) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc memory for rss_hkey\n"); + return -ENOMEM; + } + + /* The second space is for big edian hash key */ + nic_dev->rss_hkey_be = (u32 *)(nic_dev->rss_hkey + SPNIC_RSS_KEY_SIZE); + copy_value_to_rss_hkey(nic_dev, (u8 *)default_rss_key); + + nic_dev->rss_indir = kzalloc(sizeof(u32) * SPNIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (!nic_dev->rss_indir) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc memory for rss_indir\n"); + kfree(nic_dev->rss_hkey); + nic_dev->rss_hkey = NULL; + return -ENOMEM; + } + + set_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + + return 0; +} + +void spnic_try_to_enable_rss(struct spnic_nic_dev *nic_dev) +{ + u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; + int err = 0; + + if (!nic_dev) + return; + + nic_dev->max_qps = sphw_func_max_nic_qnum(nic_dev->hwdev); + if (nic_dev->max_qps <= 1) + goto set_q_params; + + err = alloc_rss_resource(nic_dev); + if (err) { + nic_dev->max_qps = 1; + goto set_q_params; + } + + err = spnic_rss_template_alloc(nic_dev->hwdev); + if (err) { + if (err == -ENOSPC) + nic_err(&nic_dev->pdev->dev, "Failed to alloc template for rss, table is full\n"); + else + nic_err(&nic_dev->pdev->dev, "Failed to alloc template for rss, can't enable rss for this function\n"); + spnic_clear_rss_config(nic_dev); + nic_dev->max_qps = 1; + goto set_q_params; + } + + set_bit(SPNIC_RSS_ENABLE, &nic_dev->flags); + nic_dev->max_qps = sphw_func_max_nic_qnum(nic_dev->hwdev); + + decide_num_qps(nic_dev); + + nic_dev->q_params.rss_limit = nic_dev->q_params.num_qps; + nic_dev->q_params.num_rss = nic_dev->q_params.num_qps; + + spnic_init_rss_parameters(nic_dev->netdev); + err = spnic_set_hw_rss_parameters(nic_dev->netdev, 0, 0, prio_tc); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to set hardware rss parameters\n"); + + spnic_clear_rss_config(nic_dev); + err = spnic_rss_template_free(nic_dev->hwdev); + if (err) + return; + nic_dev->max_qps = 1; + goto set_q_params; + } + return; + +set_q_params: + clear_bit(SPNIC_RSS_ENABLE, &nic_dev->flags); + nic_dev->q_params.rss_limit = nic_dev->max_qps; + nic_dev->q_params.num_qps = nic_dev->max_qps; + nic_dev->q_params.num_rss = nic_dev->max_qps; +} + +static int spnic_config_rss_hw_resource(struct spnic_nic_dev *nic_dev, u32 *indir_tbl) +{ + int err; + + err = spnic_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl); + if (err) + return err; + + err = spnic_set_rss_type(nic_dev->hwdev, nic_dev->rss_type); + if (err) + return err; + + return spnic_rss_set_hash_engine(nic_dev->hwdev, nic_dev->rss_hash_engine); +} + +int spnic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, u8 *prio_tc) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + /* RSS key */ + err = spnic_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey); + if (err) + return err; + + spnic_maybe_reconfig_rss_indir(netdev); + + if (test_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags)) + spnic_fillout_indir_tbl(nic_dev, num_tc, nic_dev->rss_indir); + + err = spnic_config_rss_hw_resource(nic_dev, nic_dev->rss_indir); + if (err) + return err; + + err = spnic_rss_cfg(nic_dev->hwdev, rss_en, num_tc, prio_tc, nic_dev->q_params.num_qps); + if (err) + return err; + + return 0; +} + +/* for ethtool */ +static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, struct nic_rss_type *rss_type) +{ + u8 rss_l4_en = 0; + + switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_l4_en = 0; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_l4_en = 1; + break; + default: + return -EINVAL; + } + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + rss_type->tcp_ipv4 = rss_l4_en; + break; + case TCP_V6_FLOW: + rss_type->tcp_ipv6 = rss_l4_en; + break; + case UDP_V4_FLOW: + rss_type->udp_ipv4 = rss_l4_en; + break; + case UDP_V6_FLOW: + rss_type->udp_ipv6 = rss_l4_en; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int update_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd, + struct nic_rss_type *rss_type) +{ + int err; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + err = set_l4_rss_hash_ops(cmd, rss_type); + if (err) + return err; + + break; + case IPV4_FLOW: + rss_type->ipv4 = 1; + break; + case IPV6_FLOW: + rss_type->ipv6 = 1; + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported flow type\n"); + return -EINVAL; + } + + return 0; +} + +static int spnic_set_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct nic_rss_type *rss_type = &nic_dev->rss_type; + int err; + + if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { + cmd->data = 0; + nicif_err(nic_dev, drv, nic_dev->netdev, + "RSS is disable, not support to set flow-hash\n"); + return -EOPNOTSUPP; + } + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | + RXH_L4_B_2_3)) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) + return -EINVAL; + + err = spnic_get_rss_type(nic_dev->hwdev, rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); + return -EFAULT; + } + + err = update_rss_hash_opts(nic_dev, cmd, rss_type); + if (err) + return err; + + err = spnic_set_rss_type(nic_dev->hwdev, *rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set rss type\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Set rss hash options success\n"); + + return 0; +} + +static void convert_rss_type(u8 rss_opt, struct ethtool_rxnfc *cmd) +{ + if (rss_opt) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +} + +static int spnic_convert_rss_type(struct spnic_nic_dev *nic_dev, struct nic_rss_type *rss_type, + struct ethtool_rxnfc *cmd) +{ + cmd->data = RXH_IP_SRC | RXH_IP_DST; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + convert_rss_type(rss_type->tcp_ipv4, cmd); + break; + case TCP_V6_FLOW: + convert_rss_type(rss_type->tcp_ipv6, cmd); + break; + case UDP_V4_FLOW: + convert_rss_type(rss_type->udp_ipv4, cmd); + break; + case UDP_V6_FLOW: + convert_rss_type(rss_type->udp_ipv6, cmd); + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported flow type\n"); + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + +static int spnic_get_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct nic_rss_type rss_type = {0}; + int err; + + cmd->data = 0; + + if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) + return 0; + + err = spnic_get_rss_type(nic_dev->hwdev, &rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); + return err; + } + + return spnic_convert_rss_type(nic_dev, &rss_type, cmd); +} + +int spnic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nic_dev->q_params.num_qps; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = nic_dev->rx_flow_rule.tot_num_rules; + break; + case ETHTOOL_GRXCLSRULE: + err = spnic_ethtool_get_flow(nic_dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + err = spnic_ethtool_get_all_flows(nic_dev, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + err = spnic_get_rss_hash_opts(nic_dev, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int spnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + err = spnic_set_rss_hash_opts(nic_dev, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + err = spnic_ethtool_flow_replace(nic_dev, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + err = spnic_ethtool_flow_remove(nic_dev, cmd->fs.location); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static u16 spnic_max_channels(struct spnic_nic_dev *nic_dev) +{ + u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + + return tcs ? nic_dev->max_qps / tcs : nic_dev->max_qps; +} + +static u16 spnic_curr_channels(struct spnic_nic_dev *nic_dev) +{ + if (netif_running(nic_dev->netdev)) + return nic_dev->q_params.num_rss ? nic_dev->q_params.num_rss : 1; + else + return min_t(u16, spnic_max_channels(nic_dev), + nic_dev->q_params.rss_limit); +} + +void spnic_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + /* report maximum channels */ + channels->max_combined = spnic_max_channels(nic_dev); + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + /* report flow director queues as maximum channels */ + channels->combined_count = spnic_curr_channels(nic_dev); +} + +void spnic_update_num_qps(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u16 num_qps; + u8 tcs; + + /* change num_qps to change counter in ethtool -S */ + tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + nic_dev->q_params.num_tc = tcs; + num_qps = (u16)(nic_dev->q_params.rss_limit * (tcs ? tcs : 1)); + nic_dev->q_params.num_qps = min_t(u16, nic_dev->max_qps, num_qps); +} + +static int spnic_validate_channel_parameter(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u16 max_channel = spnic_max_channels(nic_dev); + unsigned int count = channels->combined_count; + + if (!count) { + nicif_err(nic_dev, drv, netdev, "Unsupported combined_count=0\n"); + return -EINVAL; + } + + if (channels->tx_count || channels->rx_count || channels->other_count) { + nicif_err(nic_dev, drv, netdev, "Setting rx/tx/other count not supported\n"); + return -EINVAL; + } + + if (count > max_channel) { + nicif_err(nic_dev, drv, netdev, "Combined count %u exceed limit %u\n", + count, max_channel); + return -EINVAL; + } + + return 0; +} + +static void change_num_channel_reopen_handler(struct spnic_nic_dev *nic_dev, const void *priv_data) +{ + spnic_set_default_rss_indir(nic_dev->netdev); +} + +int spnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_dyna_txrxq_params q_params = {0}; + unsigned int count = channels->combined_count; + int err; + + if (spnic_validate_channel_parameter(netdev, channels)) + return -EINVAL; + + if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "This function don't support RSS, only support 1 queue pair\n"); + return -EOPNOTSUPP; + } + + nicif_info(nic_dev, drv, netdev, "Set max combined queue number from %u to %u\n", + nic_dev->q_params.rss_limit, count); + + if (netif_running(netdev)) { + q_params = nic_dev->q_params; + q_params.rss_limit = (u16)count; + q_params.txqs_res = NULL; + q_params.rxqs_res = NULL; + q_params.irq_cfg = NULL; + + nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); + err = spnic_change_channel_settings(nic_dev, &q_params, + change_num_channel_reopen_handler, NULL); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); + return -EFAULT; + } + } else { + /* Discard user configured rss */ + spnic_set_default_rss_indir(netdev); + nic_dev->q_params.rss_limit = (u16)count; + spnic_update_num_qps(netdev); + } + + return 0; +} + +static int set_rss_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (indir) { + err = spnic_rss_set_indir_tbl(nic_dev->hwdev, indir); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set rss indir table\n"); + return -EFAULT; + } + clear_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + + memcpy(nic_dev->rss_indir, indir, + sizeof(u32) * SPNIC_RSS_INDIR_SIZE); + nicif_info(nic_dev, drv, netdev, "Change rss indir success\n"); + } + + if (key) { + err = spnic_rss_set_hash_key(nic_dev->hwdev, key); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set rss key\n"); + return -EFAULT; + } + + copy_value_to_rss_hkey(nic_dev, key); + nicif_info(nic_dev, drv, netdev, "Change rss key success\n"); + } + + return 0; +} + +u32 spnic_get_rxfh_key_size(struct net_device *netdev) +{ + return SPNIC_RSS_KEY_SIZE; +} + +int spnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); + return -EOPNOTSUPP; + } + + if (hfunc) + *hfunc = nic_dev->rss_hash_engine ? + ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; + + if (indir) { + err = spnic_rss_get_indir_tbl(nic_dev->hwdev, indir); + if (err) + return -EFAULT; + } + + if (key) + memcpy(key, nic_dev->rss_hkey, SPNIC_RSS_KEY_SIZE); + + return err; +} + +int spnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Not support to set rss parameters when rss is disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, "Not support to set indir when DCB is enabled\n"); + return -EOPNOTSUPP; + } + + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { + nicif_err(nic_dev, drv, netdev, "Not support to set hfunc type except TOP and XOR\n"); + return -EOPNOTSUPP; + } + + nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? + SPNIC_RSS_HASH_ENGINE_TYPE_XOR : + SPNIC_RSS_HASH_ENGINE_TYPE_TOEP; + err = spnic_rss_set_hash_engine(nic_dev->hwdev, nic_dev->rss_hash_engine); + if (err) + return -EFAULT; + + nicif_info(nic_dev, drv, netdev, "Change hfunc to RSS_HASH_%s success\n", + (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); + } + err = set_rss_rxfh(netdev, indir, key); + + return err; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h b/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h new file mode 100644 index 000000000000..15c930a271be --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_RSS_H +#define SPNIC_RSS_H + +#include "spnic_nic_dev.h" + +int spnic_rss_init(struct spnic_nic_dev *nic_dev); + +void spnic_rss_deinit(struct spnic_nic_dev *nic_dev); + +int spnic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, u8 *prio_tc); + +void spnic_init_rss_parameters(struct net_device *netdev); + +void spnic_set_default_rss_indir(struct net_device *netdev); + +void spnic_try_to_enable_rss(struct spnic_nic_dev *nic_dev); + +void spnic_clear_rss_config(struct spnic_nic_dev *nic_dev); + +void spnic_flush_rx_flow_rule(struct spnic_nic_dev *nic_dev); +int spnic_ethtool_get_flow(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *info, u32 location); + +int spnic_ethtool_get_all_flows(struct spnic_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); + +int spnic_ethtool_flow_remove(struct spnic_nic_dev *nic_dev, u32 location); + +int spnic_ethtool_flow_replace(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs); + +/* for ethtool */ +int spnic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs); + +int spnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd); + +void spnic_get_channels(struct net_device *netdev, struct ethtool_channels *channels); + +int spnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels); + +u32 spnic_get_rxfh_key_size(struct net_device *netdev); + +int spnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); + +int spnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c new file mode 100644 index 000000000000..b5be80cc304f --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/dcbnl.h> + +#include "sphw_crm.h" +#include "spnic_nic_cfg.h" +#include "sphw_hw.h" +#include "spnic_nic.h" +#include "sphw_common.h" + +int spnic_rss_template_alloc(void *hwdev) +{ + struct spnic_rss_template_mgmt template_mgmt; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 out_size = sizeof(template_mgmt); + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&template_mgmt, 0, sizeof(struct spnic_rss_template_mgmt)); + + template_mgmt.func_id = sphw_global_func_id(hwdev); + template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_RSS_TEMP_MGR, + &template_mgmt, sizeof(template_mgmt), + &template_mgmt, &out_size); + if (err || !out_size || template_mgmt.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n", + err, template_mgmt.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int spnic_rss_template_free(void *hwdev) +{ + struct spnic_rss_template_mgmt template_mgmt; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 out_size = sizeof(template_mgmt); + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&template_mgmt, 0, sizeof(struct spnic_rss_template_mgmt)); + + template_mgmt.func_id = sphw_global_func_id(hwdev); + template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE; + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_RSS_TEMP_MGR, + &template_mgmt, sizeof(template_mgmt), + &template_mgmt, &out_size); + if (err || !out_size || template_mgmt.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n", + err, template_mgmt.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +static int spnic_rss_cfg_hash_key(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *key) +{ + struct spnic_cmd_rss_hash_key hash_key; + u16 out_size = sizeof(hash_key); + int err; + + memset(&hash_key, 0, sizeof(struct spnic_cmd_rss_hash_key)); + hash_key.func_id = sphw_global_func_id(nic_cfg->hwdev); + hash_key.opcode = opcode; + + if (opcode == SPNIC_CMD_OP_SET) + memcpy(hash_key.key, key, SPNIC_RSS_KEY_SIZE); + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, + SPNIC_NIC_CMD_CFG_RSS_HASH_KEY, + &hash_key, sizeof(hash_key), + &hash_key, &out_size); + if (err || !out_size || hash_key.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to %s hash key, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == SPNIC_CMD_OP_SET ? "set" : "get", + err, hash_key.msg_head.status, out_size); + return -EINVAL; + } + + if (opcode == SPNIC_CMD_OP_GET) + memcpy(key, hash_key.key, SPNIC_RSS_KEY_SIZE); + + return 0; +} + +int spnic_rss_set_hash_key(void *hwdev, const u8 *key) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + u8 hash_key[SPNIC_RSS_KEY_SIZE]; + + if (!hwdev || !key) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memcpy(hash_key, key, SPNIC_RSS_KEY_SIZE); + return spnic_rss_cfg_hash_key(nic_cfg, SPNIC_CMD_OP_SET, hash_key); +} + +int spnic_rss_get_hash_key(void *hwdev, u8 *key) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev || !key) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + return spnic_rss_cfg_hash_key(nic_cfg, SPNIC_CMD_OP_GET, key); +} + +int spnic_rss_get_indir_tbl(void *hwdev, u32 *indir_table) +{ + struct sphw_cmd_buf *cmd_buf = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + u16 *indir_tbl = NULL; + int err, i; + + if (!hwdev || !indir_table) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + cmd_buf = sphw_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd_buf.\n"); + return -ENOMEM; + } + + cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); + err = sphw_cmdq_detail_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_GET_RSS_INDIR_TABLE, + cmd_buf, cmd_buf, NULL, 0, SPHW_CHANNEL_NIC); + if (err) { + nic_err(nic_cfg->dev_hdl, "Failed to get rss indir table\n"); + goto get_indir_tbl_failed; + } + + indir_tbl = (u16 *)cmd_buf->buf; + for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) + indir_table[i] = *(indir_tbl + i); + +get_indir_tbl_failed: + sphw_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +int spnic_rss_set_indir_tbl(void *hwdev, const u32 *indir_table) +{ + struct nic_rss_indirect_tbl *indir_tbl = NULL; + struct sphw_cmd_buf *cmd_buf = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + u32 *temp = NULL; + u32 i, size; + u64 out_param = 0; + int err; + + if (!hwdev || !indir_table) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + cmd_buf = sphw_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); + indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf; + memset(indir_tbl, 0, sizeof(*indir_tbl)); + + for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) + indir_tbl->entry[i] = (u16)(*(indir_table + i)); + + size = sizeof(indir_tbl->entry) / sizeof(u32); + temp = (u32 *)indir_tbl->entry; + for (i = 0; i < size; i++) + temp[i] = cpu_to_be32(temp[i]); + + err = sphw_cmdq_direct_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); + if (err || out_param != 0) { + nic_err(nic_cfg->dev_hdl, "Failed to set rss indir table\n"); + err = -EFAULT; + } + + sphw_free_cmd_buf(hwdev, cmd_buf); + return err; +} + +#define SPNIC_RSS_TYPE_VALID_SHIFT 23 +#define SPNIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 +#define SPNIC_RSS_TYPE_IPV6_EXT_SHIFT 25 +#define SPNIC_RSS_TYPE_TCP_IPV6_SHIFT 26 +#define SPNIC_RSS_TYPE_IPV6_SHIFT 27 +#define SPNIC_RSS_TYPE_TCP_IPV4_SHIFT 28 +#define SPNIC_RSS_TYPE_IPV4_SHIFT 29 +#define SPNIC_RSS_TYPE_UDP_IPV6_SHIFT 30 +#define SPNIC_RSS_TYPE_UDP_IPV4_SHIFT 31 +#define SPNIC_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << SPNIC_RSS_TYPE_##member##_SHIFT) + +#define SPNIC_RSS_TYPE_GET(val, member) (((u32)(val) >> SPNIC_RSS_TYPE_##member##_SHIFT) & 0x1) + +int spnic_set_rss_type(void *hwdev, struct nic_rss_type rss_type) +{ + struct nic_rss_context_tbl *ctx_tbl = NULL; + struct sphw_cmd_buf *cmd_buf = NULL; + struct spnic_nic_cfg *nic_cfg = NULL; + u32 ctx = 0; + u64 out_param = 0; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + cmd_buf = sphw_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + ctx |= SPNIC_RSS_TYPE_SET(1, VALID) | + SPNIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + SPNIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + SPNIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + SPNIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + SPNIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); + + cmd_buf->size = sizeof(struct nic_rss_context_tbl); + ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf; + memset(ctx_tbl, 0, sizeof(*ctx_tbl)); + ctx_tbl->ctx = cpu_to_be32(ctx); + + /* cfg the rss context table by command queue */ + err = sphw_cmdq_direct_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); + + sphw_free_cmd_buf(hwdev, cmd_buf); + + if (err || out_param != 0) { + nic_err(nic_cfg->dev_hdl, "Failed to set rss context table, err: %d\n", + err); + return -EFAULT; + } + + return 0; +} + +int spnic_get_rss_type(void *hwdev, struct nic_rss_type *rss_type) +{ + struct spnic_rss_context_table ctx_tbl; + u16 out_size = sizeof(ctx_tbl); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + if (!hwdev || !rss_type) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + + memset(&ctx_tbl, 0, sizeof(struct spnic_rss_context_table)); + ctx_tbl.func_id = sphw_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_RSS_CTX_TBL, + &ctx_tbl, sizeof(ctx_tbl), + &ctx_tbl, &out_size); + if (err || !out_size || ctx_tbl.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n", + err, ctx_tbl.msg_head.status, out_size); + return -EINVAL; + } + + rss_type->ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); + rss_type->ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); + rss_type->ipv6_ext = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6_EXT); + rss_type->udp_ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); + + return 0; +} + +static int spnic_rss_cfg_hash_engine(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *type) +{ + struct spnic_cmd_rss_engine_type hash_type; + u16 out_size = sizeof(hash_type); + int err; + + memset(&hash_type, 0, sizeof(struct spnic_cmd_rss_engine_type)); + + hash_type.func_id = sphw_global_func_id(nic_cfg->hwdev); + hash_type.opcode = opcode; + + if (opcode == SPNIC_CMD_OP_SET) + hash_type.hash_engine = *type; + + err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_RSS_HASH_ENGINE, + &hash_type, sizeof(hash_type), + &hash_type, &out_size); + if (err || !out_size || hash_type.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to %s hash engine, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == SPNIC_CMD_OP_SET ? "set" : "get", + err, hash_type.msg_head.status, out_size); + return -EIO; + } + + if (opcode == SPNIC_CMD_OP_GET) + *type = hash_type.hash_engine; + + return 0; +} + +int spnic_rss_set_hash_engine(void *hwdev, u8 type) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + return spnic_rss_cfg_hash_engine(nic_cfg, SPNIC_CMD_OP_SET, &type); +} + +int spnic_rss_get_hash_engine(void *hwdev, u8 *type) +{ + struct spnic_nic_cfg *nic_cfg = NULL; + + if (!hwdev || !type) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + return spnic_rss_cfg_hash_engine(nic_cfg, SPNIC_CMD_OP_GET, type); +} + +int spnic_rss_cfg(void *hwdev, u8 rss_en, u8 tc_num, u8 *prio_tc, u16 num_qps) +{ + struct spnic_cmd_rss_config rss_cfg; + u16 out_size = sizeof(rss_cfg); + struct spnic_nic_cfg *nic_cfg = NULL; + int err; + + /* micro code required: number of TC should be power of 2 */ + if (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) + return -EINVAL; + + nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); + memset(&rss_cfg, 0, sizeof(struct spnic_cmd_rss_config)); + rss_cfg.func_id = sphw_global_func_id(hwdev); + rss_cfg.rss_en = rss_en; + rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0; + rss_cfg.num_qps = num_qps; + + memcpy(rss_cfg.prio_tc, prio_tc, SPNIC_DCB_UP_MAX); + err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_RSS_CFG, + &rss_cfg, sizeof(rss_cfg), + &rss_cfg, &out_size); + if (err || !out_size || rss_cfg.msg_head.status) { + nic_err(nic_cfg->dev_hdl, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n", + err, rss_cfg.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c new file mode 100644 index 000000000000..e58da6771ff4 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c @@ -0,0 +1,1249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/u64_stats_sync.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/pkt_sched.h> +#include <linux/ipv6.h> +#include <linux/module.h> +#include <linux/compiler.h> + +#include "sphw_crm.h" +#include "sphw_common.h" +#include "spnic_nic_qp.h" +#include "spnic_nic_io.h" +#include "spnic_nic_dev.h" +#include "spnic_rx.h" +#include "spnic_rss.h" + +static u32 rq_pi_rd_en; +module_param(rq_pi_rd_en, uint, 0644); +MODULE_PARM_DESC(rq_pi_rd_en, "Enable rq read pi from host, defaut update pi by doorbell (default=0)"); + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define SPNIC_RX_HDR_SIZE 256 +#define SPNIC_RX_BUFFER_WRITE 16 + +#define SPNIC_RX_TCP_PKT 0x3 +#define SPNIC_RX_UDP_PKT 0x4 +#define SPNIC_RX_SCTP_PKT 0x7 + +#define SPNIC_RX_IPV4_PKT 0 +#define SPNIC_RX_IPV6_PKT 1 +#define SPNIC_RX_INVALID_IP_TYPE 2 + +#define SPNIC_RX_PKT_FORMAT_NON_TUNNEL 0 +#define SPNIC_RX_PKT_FORMAT_VXLAN 1 + +#define RXQ_STATS_INC(rxq, field) \ +do { \ + u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \ + (rxq)->rxq_stats.field++; \ + u64_stats_update_end(&(rxq)->rxq_stats.syncp); \ +} while (0) + +static bool rx_alloc_mapped_page(struct spnic_nic_dev *nic_dev, + struct spnic_rx_info *rx_info) +{ + struct pci_dev *pdev = nic_dev->pdev; + struct page *page = rx_info->page; + dma_addr_t dma = rx_info->buf_dma_addr; + + if (likely(dma)) + return true; + + /* alloc new page for storage */ + page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC | __GFP_COMP, nic_dev->page_order); + if (unlikely(!page)) + return false; + + /* map page for use */ + dma = dma_map_page(&pdev->dev, page, 0, nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (unlikely(dma_mapping_error(&pdev->dev, dma))) { + __free_pages(page, nic_dev->page_order); + return false; + } + + rx_info->page = page; + rx_info->buf_dma_addr = dma; + rx_info->page_offset = 0; + + return true; +} + +static u32 spnic_rx_fill_wqe(struct spnic_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int rq_wqe_len = rxq->rq->wq.wqebb_size; + struct spnic_rq_wqe *rq_wqe = NULL; + struct spnic_rx_info *rx_info = NULL; + u32 i; + + for (i = 0; i < rxq->q_depth; i++) { + rx_info = &rxq->rx_info[i]; + rq_wqe = spnic_rq_wqe_addr(rxq->rq, (u16)i); + + if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { + /* unit of cqe length is 16B */ + sphw_set_sge(&rq_wqe->extend_wqe.cqe_sect.sge, rx_info->cqe_dma, + (sizeof(struct spnic_rq_cqe) >> SPNIC_CQE_SIZE_SHIFT)); + /* use fixed len */ + rq_wqe->extend_wqe.buf_desc.sge.len = + nic_dev->rx_buff_len; + } else { + rq_wqe->normal_wqe.cqe_hi_addr = upper_32_bits(rx_info->cqe_dma); + rq_wqe->normal_wqe.cqe_lo_addr = lower_32_bits(rx_info->cqe_dma); + } + + sphw_hw_be32_len(rq_wqe, rq_wqe_len); + rx_info->rq_wqe = rq_wqe; + } + + return i; +} + +static struct sk_buff *stub_rx_alloc_skb(struct spnic_rxq *rxq, struct spnic_rx_info *rx_info) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct sk_buff *skb = NULL; + u16 random_id; + dma_addr_t addr; + int err; + + get_random_bytes(&random_id, sizeof(u16)); + + rx_info->skb_len = SPNIC_RX_HDR_SIZE + (random_id & 0x3EFF); + skb = netdev_alloc_skb_ip_align(rxq->netdev, rx_info->skb_len); + if (!skb) { + nicif_err(nic_dev, drv, rxq->netdev, "Failed to allocate Rx SKB\n"); + return NULL; + } + + addr = dma_map_single(&nic_dev->pdev->dev, skb->data, rx_info->skb_len, + DMA_FROM_DEVICE); + + err = dma_mapping_error(&nic_dev->pdev->dev, addr); + if (err) { + nicif_err(nic_dev, drv, rxq->netdev, "Failed to map Rx DMA, err = %d\n", err); + goto err_rx_map; + } + + rx_info->buf_dma_addr = addr; + + return skb; + +err_rx_map: + dev_kfree_skb_any(skb); + return NULL; +} + +static u32 stub_spnic_rx_fill_buffers(struct spnic_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u32 i, free_wqebbs = rxq->delta - 1; + struct spnic_rq_wqe *rq_wqe = NULL; + struct spnic_rx_info *rx_info = NULL; + struct sk_buff *skb = NULL; + dma_addr_t dma_addr; + + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[rxq->next_to_update]; + + skb = stub_rx_alloc_skb(rxq, rx_info); + if (!skb) { + nicif_err(nic_dev, drv, rxq->netdev, "Failed to alloc Rx skb\n"); + break; + } + + rq_wqe = rx_info->rq_wqe; + rx_info->saved_skb = skb; + dma_addr = rx_info->buf_dma_addr; + + if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { + rq_wqe->extend_wqe.buf_desc.sge.hi_addr = + sphw_hw_be32(upper_32_bits(dma_addr)); + rq_wqe->extend_wqe.buf_desc.sge.lo_addr = + sphw_hw_be32(lower_32_bits(dma_addr)); + rq_wqe->extend_wqe.buf_desc.sge.len = + sphw_hw_be32(rx_info->skb_len); + } else { + rq_wqe->normal_wqe.buf_hi_addr = + sphw_hw_be32(upper_32_bits(dma_addr)); + rq_wqe->normal_wqe.buf_lo_addr = + sphw_hw_be32(lower_32_bits(dma_addr)); + } + rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; + } + + if (likely(i)) { + if (!rq_pi_rd_en) { + spnic_write_db(rxq->rq, rxq->q_id & (SPNIC_DCB_COS_MAX - 1), RQ_CFLAG_DP, + (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); + } else { + /* Write all the wqes before pi update */ + wmb(); + + spnic_update_rq_hw_pi(rxq->rq, rxq->next_to_update); + } + rxq->delta -= i; + rxq->next_to_alloc = rxq->next_to_update; + } else { + nicif_err(nic_dev, drv, netdev, "Failed to allocate rx buffers, rxq id: %u\n", + rxq->q_id); + } + + return i; +} + +static u32 spnic_rx_fill_buffers(struct spnic_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_rq_wqe *rq_wqe = NULL; + struct spnic_rx_info *rx_info = NULL; + dma_addr_t dma_addr; + u32 i, free_wqebbs = rxq->delta - 1; + + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[rxq->next_to_update]; + + if (unlikely(!rx_alloc_mapped_page(nic_dev, rx_info))) { + RXQ_STATS_INC(rxq, alloc_rx_buf_err); + break; + } + + dma_addr = rx_info->buf_dma_addr + rx_info->page_offset; + + rq_wqe = rx_info->rq_wqe; + + if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { + rq_wqe->extend_wqe.buf_desc.sge.hi_addr = + sphw_hw_be32(upper_32_bits(dma_addr)); + rq_wqe->extend_wqe.buf_desc.sge.lo_addr = + sphw_hw_be32(lower_32_bits(dma_addr)); + } else { + rq_wqe->normal_wqe.buf_hi_addr = + sphw_hw_be32(upper_32_bits(dma_addr)); + rq_wqe->normal_wqe.buf_lo_addr = + sphw_hw_be32(lower_32_bits(dma_addr)); + } + rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; + } + + if (likely(i)) { + if (!rq_pi_rd_en) { + spnic_write_db(rxq->rq, rxq->q_id & (SPNIC_DCB_COS_MAX - 1), RQ_CFLAG_DP, + (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); + } else { + /* Write all the wqes before pi update */ + wmb(); + + spnic_update_rq_hw_pi(rxq->rq, rxq->next_to_update); + } + rxq->delta -= i; + rxq->next_to_alloc = rxq->next_to_update; + } else if (free_wqebbs == rxq->q_depth - 1) { + RXQ_STATS_INC(rxq, rx_buf_empty); + } + + return i; +} + +static u32 spnic_rx_alloc_buffers(struct spnic_nic_dev *nic_dev, u32 rq_depth, + struct spnic_rx_info *rx_info_arr) +{ + u32 free_wqebbs = rq_depth - 1; + u32 idx; + + for (idx = 0; idx < free_wqebbs; idx++) { + if (!rx_alloc_mapped_page(nic_dev, &rx_info_arr[idx])) + break; + } + + return idx; +} + +void spnic_rx_free_buffers(struct spnic_nic_dev *nic_dev, u32 q_depth, + struct spnic_rx_info *rx_info_arr) +{ + struct spnic_rx_info *rx_info = NULL; + u32 i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < q_depth; i++) { + rx_info = &rx_info_arr[i]; + + if (rx_info->buf_dma_addr) { + dma_unmap_page(&nic_dev->pdev->dev, rx_info->buf_dma_addr, + nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); + rx_info->buf_dma_addr = 0; + } + + if (rx_info->page) { + __free_pages(rx_info->page, nic_dev->page_order); + rx_info->page = NULL; + } + } +} + +void stub_spnic_rx_free_buffers(struct spnic_rxq *rxq) +{ + struct spnic_rx_info *rx_info = NULL; + u32 i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rxq->q_depth; i++) { + rx_info = &rxq->rx_info[i]; + + if (rx_info->buf_dma_addr) { + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, rx_info->skb_len, + DMA_FROM_DEVICE); + rx_info->buf_dma_addr = 0; + } + + if (rx_info->saved_skb) { + dev_kfree_skb_any(rx_info->saved_skb); + rx_info->saved_skb = NULL; + } + } +} + +static void spnic_reuse_rx_page(struct spnic_rxq *rxq, struct spnic_rx_info *old_rx_info) +{ + struct spnic_rx_info *new_rx_info; + u16 nta = rxq->next_to_alloc; + + new_rx_info = &rxq->rx_info[nta]; + + /* update, and store next to alloc */ + nta++; + rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0; + + new_rx_info->page = old_rx_info->page; + new_rx_info->page_offset = old_rx_info->page_offset; + new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr, + new_rx_info->page_offset, rxq->buf_len, + DMA_FROM_DEVICE); +} + +static bool spnic_add_rx_frag(struct spnic_rxq *rxq, struct spnic_rx_info *rx_info, + struct sk_buff *skb, u32 size) +{ + struct page *page; + u8 *va; + + page = rx_info->page; + va = (u8 *)page_address(page) + rx_info->page_offset; + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, + rx_info->page_offset, + rxq->buf_len, DMA_FROM_DEVICE); + + if (size <= SPNIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (int)rx_info->page_offset, (int)size, rxq->buf_len); + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_info->page_offset ^= rxq->buf_len; + get_page(page); + + return true; +} + +static void packaging_skb(struct spnic_rxq *rxq, struct sk_buff *head_skb, u8 sge_num, u32 pkt_len) +{ + struct spnic_rx_info *rx_info = NULL; + struct sk_buff *skb = NULL; + u8 frag_num = 0; + u32 size; + u32 sw_ci; + + sw_ci = rxq->cons_idx & rxq->q_mask; + skb = head_skb; + while (sge_num) { + rx_info = &rxq->rx_info[sw_ci]; + sw_ci = (sw_ci + 1) & rxq->q_mask; + if (unlikely(pkt_len > rxq->buf_len)) { + size = rxq->buf_len; + pkt_len -= rxq->buf_len; + } else { + size = pkt_len; + } + + if (unlikely(frag_num == MAX_SKB_FRAGS)) { + frag_num = 0; + if (skb == head_skb) + skb = skb_shinfo(skb)->frag_list; + else + skb = skb->next; + } + + if (unlikely(skb != head_skb)) { + head_skb->len += size; + head_skb->data_len += size; + head_skb->truesize += rxq->buf_len; + } + + if (likely(spnic_add_rx_frag(rxq, rx_info, skb, size))) { + spnic_reuse_rx_page(rxq, rx_info); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, DMA_FROM_DEVICE); + } + /* clear contents of buffer_info */ + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + sge_num--; + frag_num++; + } +} + +#define SPNIC_GET_SGE_NUM(pkt_len, rxq) \ + ((u8)(((pkt_len) >> (rxq)->rx_buff_shift) + \ + (((pkt_len) & ((rxq)->buf_len - 1)) ? 1 : 0))) + +static struct sk_buff *spnic_fetch_rx_buffer(struct spnic_rxq *rxq, u32 pkt_len) +{ + struct sk_buff *head_skb = NULL; + struct sk_buff *cur_skb = NULL; + struct sk_buff *skb = NULL; + struct net_device *netdev = rxq->netdev; + u8 sge_num, skb_num; + u16 wqebb_cnt = 0; + + head_skb = netdev_alloc_skb_ip_align(netdev, SPNIC_RX_HDR_SIZE); + if (unlikely(!head_skb)) + return NULL; + + sge_num = SPNIC_GET_SGE_NUM(pkt_len, rxq); + if (likely(sge_num <= MAX_SKB_FRAGS)) + skb_num = 1; + else + skb_num = (sge_num / MAX_SKB_FRAGS) + ((sge_num % MAX_SKB_FRAGS) ? 1 : 0); + + while (unlikely(skb_num > 1)) { + cur_skb = netdev_alloc_skb_ip_align(netdev, SPNIC_RX_HDR_SIZE); + if (unlikely(!cur_skb)) + goto alloc_skb_fail; + + if (!skb) { + skb_shinfo(head_skb)->frag_list = cur_skb; + skb = cur_skb; + } else { + skb->next = cur_skb; + skb = cur_skb; + } + + skb_num--; + } + + prefetchw(head_skb->data); + wqebb_cnt = sge_num; + + packaging_skb(rxq, head_skb, sge_num, pkt_len); + + rxq->cons_idx += wqebb_cnt; + rxq->delta += wqebb_cnt; + + return head_skb; + +alloc_skb_fail: + dev_kfree_skb_any(head_skb); + return NULL; +} + +void spnic_rxq_get_stats(struct spnic_rxq *rxq, struct spnic_rxq_stats *stats) +{ + struct spnic_rxq_stats *rxq_stats = &rxq->rxq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + stats->bytes = rxq_stats->bytes; + stats->packets = rxq_stats->packets; + stats->errors = rxq_stats->csum_errors + + rxq_stats->other_errors; + stats->csum_errors = rxq_stats->csum_errors; + stats->other_errors = rxq_stats->other_errors; + stats->dropped = rxq_stats->dropped; + stats->xdp_dropped = rxq_stats->xdp_dropped; + stats->rx_buf_empty = rxq_stats->rx_buf_empty; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + +void spnic_rxq_clean_stats(struct spnic_rxq_stats *rxq_stats) +{ + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->bytes = 0; + rxq_stats->packets = 0; + rxq_stats->errors = 0; + rxq_stats->csum_errors = 0; + rxq_stats->other_errors = 0; + rxq_stats->dropped = 0; + rxq_stats->xdp_dropped = 0; + rxq_stats->rx_buf_empty = 0; + + rxq_stats->alloc_skb_err = 0; + rxq_stats->alloc_rx_buf_err = 0; + rxq_stats->xdp_large_pkt = 0; + u64_stats_update_end(&rxq_stats->syncp); +} + +static void rxq_stats_init(struct spnic_rxq *rxq) +{ + struct spnic_rxq_stats *rxq_stats = &rxq->rxq_stats; + + u64_stats_init(&rxq_stats->syncp); + spnic_rxq_clean_stats(rxq_stats); +} + +static void spnic_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va = NULL; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, SPNIC_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, (int)pull_len); + skb_frag_off_add(frag, (int)pull_len); + + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static void spnic_rx_csum(struct spnic_rxq *rxq, u32 offload_type, u32 status, struct sk_buff *skb) +{ + struct net_device *netdev = rxq->netdev; + u32 pkt_type = SPNIC_GET_RX_PKT_TYPE(offload_type); + u32 ip_type = SPNIC_GET_RX_IP_TYPE(offload_type); + u32 pkt_fmt = SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type); + + u32 csum_err; + + csum_err = SPNIC_GET_RX_CSUM_ERR(status); + + if (unlikely(csum_err == SPNIC_RX_CSUM_IPSU_OTHER_ERR)) + rxq->rxq_stats.other_errors++; + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + if (unlikely(csum_err)) { + /* pkt type is recognized by HW, and csum is wrong */ + if (!(csum_err & (SPNIC_RX_CSUM_HW_CHECK_NONE | SPNIC_RX_CSUM_IPSU_OTHER_ERR))) + rxq->rxq_stats.csum_errors++; + skb->ip_summed = CHECKSUM_NONE; + return; + } + + if (ip_type == SPNIC_RX_INVALID_IP_TYPE || + !(pkt_fmt == SPNIC_RX_PKT_FORMAT_NON_TUNNEL || + pkt_fmt == SPNIC_RX_PKT_FORMAT_VXLAN)) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + switch (pkt_type) { + case SPNIC_RX_TCP_PKT: + case SPNIC_RX_UDP_PKT: + case SPNIC_RX_SCTP_PKT: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + skb->ip_summed = CHECKSUM_NONE; + break; + } +} + +static void spnic_rx_gro(struct spnic_rxq *rxq, u32 offload_type, struct sk_buff *skb) +{ + struct net_device *netdev = rxq->netdev; + bool l2_tunnel = false; + + if (!(netdev->features & NETIF_F_GRO)) + return; + + l2_tunnel = + SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) == SPNIC_RX_PKT_FORMAT_VXLAN ? 1 : 0; + + if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY) + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; +} + +static void spnic_copy_lp_data(struct spnic_nic_dev *nic_dev, struct sk_buff *skb) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *lb_buf = nic_dev->lb_test_rx_buf; + void *frag_data = NULL; + int lb_len = nic_dev->lb_pkt_len; + int pkt_offset, frag_len, i; + + if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { + nic_dev->lb_test_rx_idx = 0; + nicif_warn(nic_dev, rx_err, netdev, "Loopback test warning, receive too many test pkts\n"); + } + + if (skb->len != nic_dev->lb_pkt_len) { + nicif_warn(nic_dev, rx_err, netdev, "Wrong packet length\n"); + nic_dev->lb_test_rx_idx++; + return; + } + + pkt_offset = nic_dev->lb_test_rx_idx * lb_len; + frag_len = (int)skb_headlen(skb); + memcpy(lb_buf + pkt_offset, skb->data, frag_len); + + pkt_offset += frag_len; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); + frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); + memcpy(lb_buf + pkt_offset, frag_data, frag_len); + + pkt_offset += frag_len; + } + nic_dev->lb_test_rx_idx++; +} + +static inline void spnic_lro_set_gso_params(struct sk_buff *skb, u16 num_lro) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + __be16 proto; + + proto = __vlan_get_protocol(skb, eth->h_proto, NULL); + + skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP((skb->len - skb_headlen(skb)), num_lro); + skb_shinfo(skb)->gso_type = (proto == htons(ETH_P_IP)) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; +} + +enum spnic_xdp_pkt { + SPNIC_XDP_PKT_PASS, + SPNIC_XDP_PKT_DROP, +}; + +static inline void update_drop_rx_info(struct spnic_rxq *rxq, u16 weqbb_num) +{ + struct spnic_rx_info *rx_info = NULL; + + while (weqbb_num) { + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + if (likely(page_to_nid(rx_info->page) == numa_node_id())) + spnic_reuse_rx_page(rxq, rx_info); + + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + rxq->cons_idx++; + rxq->delta++; + + weqbb_num--; + } +} + +int spnic_run_xdp(struct spnic_rxq *rxq, u32 pkt_len) +{ + struct bpf_prog *xdp_prog = NULL; + struct spnic_rx_info *rx_info = NULL; + struct xdp_buff xdp; + int result = SPNIC_XDP_PKT_PASS; + u16 weqbb_num = 1; /* xdp can only use one rx_buff */ + u8 *va = NULL; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rxq->xdp_prog); + if (!xdp_prog) + goto unlock_rcu; + + if (unlikely(pkt_len > rxq->buf_len)) { + RXQ_STATS_INC(rxq, xdp_large_pkt); + weqbb_num = (u16)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + result = SPNIC_XDP_PKT_DROP; + goto xdp_out; + } + + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + va = (u8 *)page_address(rx_info->page) + rx_info->page_offset; + prefetch(va); + dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, rx_info->page_offset, + rxq->buf_len, DMA_FROM_DEVICE); + xdp.data = va; + xdp.data_hard_start = xdp.data; + xdp.data_end = xdp.data + pkt_len; + xdp.frame_sz = rxq->buf_len; + xdp_set_data_meta_invalid(&xdp); + prefetchw(xdp.data_hard_start); + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_DROP: + result = SPNIC_XDP_PKT_DROP; + break; + default: + result = SPNIC_XDP_PKT_DROP; + bpf_warn_invalid_xdp_action(act); + } + +xdp_out: + if (result == SPNIC_XDP_PKT_DROP) { + RXQ_STATS_INC(rxq, xdp_dropped); + update_drop_rx_info(rxq, weqbb_num); + } + +unlock_rcu: + rcu_read_unlock(); + + return result; +} + +int recv_one_pkt(struct spnic_rxq *rxq, struct spnic_rq_cqe *rx_cqe, + u32 pkt_len, u32 vlan_len, u32 status) +{ + struct sk_buff *skb; + struct net_device *netdev = rxq->netdev; + u32 offload_type; + u16 num_lro; + struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + + u32 xdp_status; + + xdp_status = spnic_run_xdp(rxq, pkt_len); + if (xdp_status == SPNIC_XDP_PKT_DROP) + return 0; + + skb = spnic_fetch_rx_buffer(rxq, pkt_len); + if (unlikely(!skb)) { + RXQ_STATS_INC(rxq, alloc_skb_err); + return -ENOMEM; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + spnic_pull_tail(skb); + + offload_type = sphw_hw_cpu32(rx_cqe->offload_type); + spnic_rx_csum(rxq, offload_type, status, skb); + + spnic_rx_gro(rxq, offload_type, skb); + + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + SPNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { + u16 vid = SPNIC_GET_RX_VLAN_TAG(vlan_len); + + /* if the packet is a vlan pkt, the vid may be 0 */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + if (unlikely(test_bit(SPNIC_LP_TEST, &nic_dev->flags))) + spnic_copy_lp_data(nic_dev, skb); + + num_lro = SPNIC_GET_RX_NUM_LRO(status); + if (num_lro) + spnic_lro_set_gso_params(skb, num_lro); + + skb_record_rx_queue(skb, rxq->q_id); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { + napi_gro_flush(&rxq->irq_cfg->napi, false); + netif_receive_skb(skb); + } else { + napi_gro_receive(&rxq->irq_cfg->napi, skb); + } + + return 0; +} + +void rx_pass_super_cqe(struct spnic_rxq *rxq, u32 index, u32 pkt_num, struct spnic_rq_cqe *cqe) +{ + u8 sge_num = 0; + u32 pkt_len; + + while (index < pkt_num) { + pkt_len = spnic_get_pkt_len_for_super_cqe(cqe, index == (pkt_num - 1)); + sge_num += (u8)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + index++; + } + + rxq->cons_idx += sge_num; + rxq->delta += sge_num; +} + +static inline int recv_supper_cqe(struct spnic_rxq *rxq, struct spnic_rq_cqe *rx_cqe, u32 pkt_info, + u32 vlan_len, u32 status, int *pkts, u64 *rx_bytes, u32 *dropped) +{ + u32 pkt_len; + int i, pkt_num = 0; + + pkt_num = SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info); + i = 0; + while (i < pkt_num) { + pkt_len = ((i == (pkt_num - 1)) ? + RQ_CQE_PKT_LEN_GET(pkt_info, LAST_LEN) : + RQ_CQE_PKT_LEN_GET(pkt_info, FIRST_LEN)); + if (unlikely(recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))) { + if (i) { + rx_pass_super_cqe(rxq, i, pkt_num, rx_cqe); + *dropped += (pkt_num - i); + } + break; + } + + *rx_bytes += pkt_len; + (*pkts)++; + i++; + } + + if (!i) + return -EFAULT; + + return 0; +} + +#define LRO_PKT_HDR_LEN_IPV4 66 +#define LRO_PKT_HDR_LEN_IPV6 86 +#define LRO_PKT_HDR_LEN(cqe) \ + (SPNIC_GET_RX_IP_TYPE(sphw_hw_cpu32((cqe)->offload_type)) == \ + SPNIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) + +static void stub_rx_recv_jumbo_pkt(struct spnic_rxq *rxq, struct sk_buff *head_skb, + unsigned int left_pkt_len) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct sk_buff *skb = NULL; + struct sk_buff *curr_skb = head_skb; + struct spnic_rx_info *rx_info = NULL; + unsigned int curr_len; + + while (left_pkt_len > 0) { + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + skb = rx_info->saved_skb; + + dma_unmap_single(&nic_dev->pdev->dev, rx_info->buf_dma_addr, + rx_info->skb_len, DMA_FROM_DEVICE); + + rx_info->buf_dma_addr = 0; + rx_info->saved_skb = NULL; + + prefetch(skb->data); + + curr_len = (left_pkt_len > rx_info->skb_len) ? rx_info->skb_len : left_pkt_len; + + left_pkt_len -= curr_len; + + __skb_put(skb, curr_len); + + if (curr_skb == head_skb) + skb_shinfo(head_skb)->frag_list = skb; + else + curr_skb->next = skb; + + head_skb->len += skb->len; + head_skb->data_len += skb->len; + head_skb->truesize += skb->truesize; + + curr_skb = skb; + rxq->cons_idx++; + rxq->delta++; + } +} + +int stub_spnic_rx_poll(struct spnic_rxq *rxq, int budget) +{ + u32 sw_ci, status, pkt_len, vlan_len = 0; + struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct spnic_rq_cqe *rx_cqe = NULL; + u64 rx_bytes = 0; + int pkts = 0; + struct sk_buff *skb = NULL; + struct spnic_rx_info *rx_info = NULL; + u32 offload_type; + + while (likely(pkts < budget)) { + sw_ci = rxq->cons_idx & rxq->q_mask; + rx_info = &rxq->rx_info[sw_ci]; + rx_cqe = rxq->rx_info[sw_ci].cqe; + status = be32_to_cpu(rx_cqe->status); + + if (!SPNIC_GET_RX_DONE(status)) + break; + + /* make sure we read rx_done before packet length */ + rmb(); + + vlan_len = be32_to_cpu(rx_cqe->vlan_len); + pkt_len = SPNIC_GET_RX_PKT_LEN(vlan_len); + skb = rx_info->saved_skb; + + dma_unmap_single(&nic_dev->pdev->dev, rx_info->buf_dma_addr, + rx_info->skb_len, DMA_FROM_DEVICE); + + rx_info->buf_dma_addr = 0; + rx_info->saved_skb = NULL; + + rxq->cons_idx++; + rxq->delta++; + + if (pkt_len <= rx_info->skb_len) { + __skb_put(skb, pkt_len); + } else { + __skb_put(skb, rx_info->skb_len); + stub_rx_recv_jumbo_pkt(rxq, skb, pkt_len - rx_info->skb_len); + } + + offload_type = be32_to_cpu(rx_cqe->offload_type); + spnic_rx_csum(rxq, offload_type, status, skb); + + spnic_rx_gro(rxq, offload_type, skb); + + skb_record_rx_queue(skb, rxq->q_id); + skb->protocol = eth_type_trans(skb, rxq->netdev); + + if (skb_has_frag_list(skb)) { + napi_gro_flush(&rxq->irq_cfg->napi, false); + netif_receive_skb(skb); + } else { + napi_gro_receive(&rxq->irq_cfg->napi, skb); + } + rx_bytes += pkt_len; + pkts++; + rx_cqe->status = 0; + } + + if (rxq->delta >= SPNIC_RX_BUFFER_WRITE) + stub_spnic_rx_fill_buffers(rxq); + + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.packets += pkts; + rxq->rxq_stats.bytes += rx_bytes; + u64_stats_update_end(&rxq->rxq_stats.syncp); + return pkts; +} + +int spnic_rx_poll(struct spnic_rxq *rxq, int budget) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u32 sw_ci, status, pkt_len, vlan_len, pkt_info, dropped = 0; + struct spnic_rq_cqe *rx_cqe = NULL; + u64 rx_bytes = 0; + u16 num_lro; + int pkts = 0, nr_pkts = 0; + u16 num_wqe = 0; + + while (likely(pkts < budget)) { + sw_ci = rxq->cons_idx & rxq->q_mask; + rx_cqe = rxq->rx_info[sw_ci].cqe; + status = sphw_hw_cpu32(rx_cqe->status); + + if (!SPNIC_GET_RX_DONE(status)) + break; + + /* make sure we read rx_done before packet length */ + rmb(); + + vlan_len = sphw_hw_cpu32(rx_cqe->vlan_len); + pkt_info = sphw_hw_cpu32(rx_cqe->pkt_info); + pkt_len = SPNIC_GET_RX_PKT_LEN(vlan_len); + + if (unlikely(SPNIC_GET_SUPER_CQE_EN(pkt_info))) { + if (unlikely(recv_supper_cqe(rxq, rx_cqe, pkt_info, vlan_len, status, &pkts, + &rx_bytes, &dropped))) + break; + nr_pkts += (int)SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info); + } else { + if (recv_one_pkt(rxq, rx_cqe, pkt_len, + vlan_len, status)) + break; + rx_bytes += pkt_len; + pkts++; + nr_pkts++; + + num_lro = SPNIC_GET_RX_NUM_LRO(status); + if (num_lro) { + rx_bytes += ((num_lro - 1) * LRO_PKT_HDR_LEN(rx_cqe)); + num_wqe += SPNIC_GET_SGE_NUM(pkt_len, rxq); + } + } + + rx_cqe->status = 0; + + if (num_wqe >= nic_dev->lro_replenish_thld) + break; + } + + if (rxq->delta >= SPNIC_RX_BUFFER_WRITE) + spnic_rx_fill_buffers(rxq); + + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.packets += nr_pkts; + rxq->rxq_stats.bytes += rx_bytes; + rxq->rxq_stats.dropped += dropped; + u64_stats_update_end(&rxq->rxq_stats.syncp); + return pkts; +} + +int spnic_alloc_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) +{ + struct spnic_dyna_rxq_res *rqres = NULL; + u64 cqe_mem_size = sizeof(struct spnic_rq_cqe) * rq_depth; + int idx, i; + u32 pkts; + u64 size; + + for (idx = 0; idx < num_rq; idx++) { + rqres = &rxqs_res[idx]; + size = sizeof(*rqres->rx_info) * rq_depth; + rqres->rx_info = kzalloc(size, GFP_KERNEL); + if (!rqres->rx_info) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc rxq%d rx info\n", idx); + goto err_out; + } + + rqres->cqe_start_vaddr = + dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size, + &rqres->cqe_start_paddr, GFP_KERNEL); + if (!rqres->cqe_start_vaddr) { + kfree(rqres->rx_info); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc rxq%d cqe\n", idx); + goto err_out; + } + + pkts = spnic_rx_alloc_buffers(nic_dev, rq_depth, rqres->rx_info); + if (!pkts) { + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); + kfree(rqres->rx_info); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc rxq%d rx buffers\n", idx); + goto err_out; + } + rqres->next_to_alloc = (u16)pkts; + } + return 0; + +err_out: + for (i = 0; i < idx; i++) { + rqres = &rxqs_res[i]; + + spnic_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, rqres->cqe_start_paddr); + kfree(rqres->rx_info); + } + + return -ENOMEM; +} + +void spnic_free_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) +{ + struct spnic_dyna_rxq_res *rqres = NULL; + u64 cqe_mem_size = sizeof(struct spnic_rq_cqe) * rq_depth; + int idx; + + for (idx = 0; idx < num_rq; idx++) { + rqres = &rxqs_res[idx]; + + spnic_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); + kfree(rqres->rx_info); + } +} + +int spnic_configure_rxqs(struct spnic_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) +{ + struct spnic_dyna_rxq_res *rqres = NULL; + struct irq_info *msix_entry = NULL; + struct spnic_rxq *rxq = NULL; + struct spnic_rq_cqe *cqe_va = NULL; + dma_addr_t cqe_pa; + u16 q_id; + u32 idx; + u32 pkts; + + for (q_id = 0; q_id < num_rq; q_id++) { + rxq = &nic_dev->rxqs[q_id]; + rqres = &rxqs_res[q_id]; + msix_entry = &nic_dev->qps_irq_info[q_id]; + + rxq->irq_id = msix_entry->irq_id; + rxq->msix_entry_idx = msix_entry->msix_entry_idx; + rxq->next_to_update = 0; + rxq->next_to_alloc = rqres->next_to_alloc; + rxq->q_depth = rq_depth; + rxq->delta = rxq->q_depth; + rxq->q_mask = rxq->q_depth - 1; + rxq->cons_idx = 0; + + rxq->rx_info = rqres->rx_info; + + /* fill cqe */ + cqe_va = (struct spnic_rq_cqe *)rqres->cqe_start_vaddr; + cqe_pa = rqres->cqe_start_paddr; + for (idx = 0; idx < rq_depth; idx++) { + rxq->rx_info[idx].cqe = cqe_va; + rxq->rx_info[idx].cqe_dma = cqe_pa; + cqe_va++; + cqe_pa += sizeof(*rxq->rx_info->cqe); + } + + rxq->rq = spnic_get_nic_queue(nic_dev->hwdev, rxq->q_id, SPNIC_RQ); + if (!rxq->rq) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rq\n"); + return -EINVAL; + } + + pkts = spnic_rx_fill_wqe(rxq); + if (pkts != rxq->q_depth) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to fill rx wqe\n"); + return -EFAULT; + } + + pkts = spnic_rx_fill_buffers(rxq); + if (!pkts) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to fill Rx buffer\n"); + return -ENOMEM; + } + } + + return 0; +} + +void spnic_free_rxqs(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->rxqs); +} + +int spnic_alloc_rxqs(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct spnic_rxq *rxq = NULL; + u16 num_rxqs = nic_dev->max_qps; + u16 q_id; + u64 rxq_size; + + rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); + if (!rxq_size) { + nic_err(&pdev->dev, "Cannot allocate zero size rxqs\n"); + return -EINVAL; + } + + nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL); + if (!nic_dev->rxqs) { + nic_err(&pdev->dev, "Failed to allocate rxqs\n"); + return -ENOMEM; + } + + for (q_id = 0; q_id < num_rxqs; q_id++) { + rxq = &nic_dev->rxqs[q_id]; + rxq->netdev = netdev; + rxq->dev = &pdev->dev; + rxq->q_id = q_id; + rxq->buf_len = nic_dev->rx_buff_len; + rxq->rx_buff_shift = ilog2(nic_dev->rx_buff_len); + rxq->dma_rx_buff_size = nic_dev->dma_rx_buff_size; + rxq->q_depth = nic_dev->q_params.rq_depth; + rxq->q_mask = nic_dev->q_params.rq_depth - 1; + + rxq_stats_init(rxq); + } + + return 0; +} + +int spnic_rx_configure(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { + err = spnic_rss_init(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to init rss\n"); + return -EFAULT; + } + } + + return 0; +} + +void spnic_rx_remove_configure(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) + spnic_rss_deinit(nic_dev); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h b/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h new file mode 100644 index 000000000000..564b5765a41b --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_RX_H +#define SPNIC_RX_H + +#include <linux/types.h> + +/*rx cqe checksum err*/ +#define SPNIC_RX_CSUM_IP_CSUM_ERR BIT(0) +#define SPNIC_RX_CSUM_TCP_CSUM_ERR BIT(1) +#define SPNIC_RX_CSUM_UDP_CSUM_ERR BIT(2) +#define SPNIC_RX_CSUM_IGMP_CSUM_ERR BIT(3) +#define SPNIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4) +#define SPNIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5) +#define SPNIC_RX_CSUM_SCTP_CRC_ERR BIT(6) +#define SPNIC_RX_CSUM_HW_CHECK_NONE BIT(7) +#define SPNIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) + +#define SPNIC_HEADER_DATA_UNIT 2 + +struct spnic_rxq_stats { + u64 packets; + u64 bytes; + u64 errors; + u64 csum_errors; + u64 other_errors; + u64 dropped; + u64 xdp_dropped; + u64 rx_buf_empty; + + u64 alloc_skb_err; + u64 alloc_rx_buf_err; + u64 xdp_large_pkt; + struct u64_stats_sync syncp; +}; + +struct spnic_rx_info { + dma_addr_t buf_dma_addr; + + struct spnic_rq_cqe *cqe; + dma_addr_t cqe_dma; + struct page *page; + u32 page_offset; + struct spnic_rq_wqe *rq_wqe; + struct sk_buff *saved_skb; + u32 skb_len; +}; + +struct spnic_rxq { + struct net_device *netdev; + + u16 q_id; + u32 q_depth; + u32 q_mask; + + u16 buf_len; + u32 rx_buff_shift; + u32 dma_rx_buff_size; + + struct spnic_rxq_stats rxq_stats; + u32 cons_idx; + u32 delta; + + u32 irq_id; + u16 msix_entry_idx; + + struct spnic_rx_info *rx_info; + struct spnic_io_queue *rq; + struct bpf_prog *xdp_prog; + + struct spnic_irq *irq_cfg; + u16 next_to_alloc; + u16 next_to_update; + struct device *dev; /* device for DMA mapping */ + + unsigned long status; + dma_addr_t cqe_start_paddr; + void *cqe_start_vaddr; + + u64 last_moder_packets; + u64 last_moder_bytes; + u8 last_coalesc_timer_cfg; + u8 last_pending_limt; +} ____cacheline_aligned; + +struct spnic_dyna_rxq_res { + u16 next_to_alloc; + struct spnic_rx_info *rx_info; + dma_addr_t cqe_start_paddr; + void *cqe_start_vaddr; +}; + +int spnic_alloc_rxqs(struct net_device *netdev); + +void spnic_free_rxqs(struct net_device *netdev); + +int spnic_alloc_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); + +void spnic_free_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); + +int spnic_configure_rxqs(struct spnic_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); + +int spnic_rx_configure(struct net_device *netdev); + +void spnic_rx_remove_configure(struct net_device *netdev); + +int spnic_rx_poll(struct spnic_rxq *rxq, int budget); +int stub_spnic_rx_poll(struct spnic_rxq *rxq, int budget); + +void spnic_rxq_get_stats(struct spnic_rxq *rxq, struct spnic_rxq_stats *stats); + +void spnic_rxq_clean_stats(struct spnic_rxq_stats *rxq_stats); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c b/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c new file mode 100644 index 000000000000..aac22cb302ea --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/pci.h> +#include <linux/interrupt.h> + +#include "sphw_common.h" +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "spnic_lld.h" +#include "spnic_sriov.h" +#include "spnic_dev_mgmt.h" + +int spnic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 i, func_idx; + int err; + + /* mbox msg channel resources will be freed during remove process */ + err = sphw_init_func_mbox_msg_channel(hwdev, sphw_func_max_vf(hwdev)); + if (err) + return err; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = start_vf_id; i <= end_vf_id; i++) { + func_idx = sphw_glb_pf_vf_offset(hwdev) + i; + err = sphw_set_wq_page_size(hwdev, func_idx, SPHW_DEFAULT_WQ_PAGE_SIZE, + SPHW_CHANNEL_COMM); + if (err) + return err; + } + + return 0; +} + +int spnic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 func_idx, idx; + + for (idx = start_vf_id; idx <= end_vf_id; idx++) { + func_idx = sphw_glb_pf_vf_offset(hwdev) + idx; + sphw_set_wq_page_size(hwdev, func_idx, SPHW_HW_WQ_PAGE_SIZE, SPHW_CHANNEL_COMM); + } + + return 0; +} + +int spnic_pci_sriov_disable(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + struct spnic_sriov_info *sriov_info = NULL; + struct sphw_event_info event = {0}; + void *hwdev = NULL; + u16 tmp_vfs; + + sriov_info = spnic_get_sriov_info_by_pcidev(dev); + hwdev = spnic_get_hwdev_by_pcidev(dev); + if (!hwdev) { + sdk_err(&dev->dev, "SR-IOV disable is not permitted, please wait...\n"); + return -EPERM; + } + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!sriov_info->sriov_enabled) + return 0; + + if (test_and_set_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state)) { + sdk_err(&dev->dev, "SR-IOV disable in process, please wait"); + return -EPERM; + } + + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(dev)) { + clear_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state); + sdk_warn(&dev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + + event.type = SPHW_EVENT_SRIOV_STATE_CHANGE; + event.sriov_state.enable = 0; + sphw_event_callback(hwdev, &event); + + sriov_info->sriov_enabled = false; + + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(dev); + + tmp_vfs = (u16)sriov_info->num_vfs; + sriov_info->num_vfs = 0; + spnic_deinit_vf_hw(hwdev, 1, tmp_vfs); + + clear_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state); + +#endif + + return 0; +} + +int spnic_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct spnic_sriov_info *sriov_info = NULL; + struct sphw_event_info event = {0}; + void *hwdev = NULL; + int pre_existing_vfs = 0; + int err = 0; + + sriov_info = spnic_get_sriov_info_by_pcidev(dev); + hwdev = spnic_get_hwdev_by_pcidev(dev); + if (!hwdev) { + sdk_err(&dev->dev, "SR-IOV enable is not permitted, please wait...\n"); + return -EPERM; + } + + if (test_and_set_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state)) { + sdk_err(&dev->dev, "SR-IOV enable in process, please wait, num_vfs %d\n", + num_vfs); + return -EPERM; + } + + pre_existing_vfs = pci_num_vf(dev); + + if (num_vfs > pci_sriov_get_totalvfs(dev)) { + clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); + return -ERANGE; + } + if (pre_existing_vfs && pre_existing_vfs != num_vfs) { + err = spnic_pci_sriov_disable(dev); + if (err) { + clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + } else if (pre_existing_vfs == num_vfs) { + clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); + return num_vfs; + } + + err = spnic_init_vf_hw(hwdev, 1, (u16)num_vfs); + if (err) { + sdk_err(&dev->dev, "Failed to init vf in hardware before enable sriov, error %d\n", + err); + clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + sdk_err(&dev->dev, "Failed to enable SR-IOV, error %d\n", err); + clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + sriov_info->sriov_enabled = true; + sriov_info->num_vfs = num_vfs; + + event.type = SPHW_EVENT_SRIOV_STATE_CHANGE; + event.sriov_state.enable = 1; + event.sriov_state.num_vfs = (u16)num_vfs; + sphw_event_callback(hwdev, &event); + + clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); + + return num_vfs; +#else + + return 0; +#endif +} + +static bool spnic_is_support_sriov_configure(struct pci_dev *pdev) +{ + /* TODO: get cap from firmware */ + + return true; +} + +int spnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct spnic_sriov_info *sriov_info = NULL; + + if (!spnic_is_support_sriov_configure(dev)) + return -EFAULT; + + sriov_info = spnic_get_sriov_info_by_pcidev(dev); + if (!sriov_info) + return -EFAULT; + + if (!test_bit(SPNIC_FUNC_PERSENT, &sriov_info->state)) + return -EFAULT; + + if (!num_vfs) + return spnic_pci_sriov_disable(dev); + else + return spnic_pci_sriov_enable(dev, num_vfs); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h b/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h new file mode 100644 index 000000000000..622845e30427 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_SRIOV_H +#define SPNIC_SRIOV_H +#include <linux/types.h> + +enum spnic_sriov_state { + SPNIC_SRIOV_DISABLE, + SPNIC_SRIOV_ENABLE, + SPNIC_FUNC_PERSENT, +}; + +struct spnic_sriov_info { + bool sriov_enabled; + unsigned int num_vfs; + unsigned long state; +}; + +struct spnic_sriov_info *spnic_get_sriov_info_by_pcidev(struct pci_dev *pdev); +int spnic_pci_sriov_disable(struct pci_dev *dev); +int spnic_pci_sriov_enable(struct pci_dev *dev, int num_vfs); +int spnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c b/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c new file mode 100644 index 000000000000..5b5260fcd9d4 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c @@ -0,0 +1,879 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/netdevice.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <net/xfrm.h> +#include <linux/dma-mapping.h> +#include <linux/types.h> +#include <linux/u64_stats_sync.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "sphw_crm.h" +#include "spnic_nic_qp.h" +#include "spnic_nic_io.h" +#include "spnic_nic_cfg.h" +#include "spnic_nic_dev.h" +#include "spnic_tx.h" + +#define MIN_SKB_LEN 32 + +#define MAX_PAYLOAD_OFFSET 221 + +#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) + +#define SPNIC_TX_TASK_WRAPPED 1 +#define SPNIC_TX_BD_DESC_WRAPPED 2 + +#define TXQ_STATS_INC(txq, field) \ +do { \ + u64_stats_update_begin(&(txq)->txq_stats.syncp); \ + (txq)->txq_stats.field++; \ + u64_stats_update_end(&(txq)->txq_stats.syncp); \ +} while (0) + +void spnic_txq_get_stats(struct spnic_txq *txq, struct spnic_txq_stats *stats) +{ + struct spnic_txq_stats *txq_stats = &txq->txq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + stats->bytes = txq_stats->bytes; + stats->packets = txq_stats->packets; + stats->busy = txq_stats->busy; + stats->wake = txq_stats->wake; + stats->dropped = txq_stats->dropped; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + +void spnic_txq_clean_stats(struct spnic_txq_stats *txq_stats) +{ + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->bytes = 0; + txq_stats->packets = 0; + txq_stats->busy = 0; + txq_stats->wake = 0; + txq_stats->dropped = 0; + + txq_stats->skb_pad_err = 0; + txq_stats->frag_len_overflow = 0; + txq_stats->offload_cow_skb_err = 0; + txq_stats->map_frag_err = 0; + txq_stats->unknown_tunnel_pkt = 0; + txq_stats->frag_size_err = 0; + u64_stats_update_end(&txq_stats->syncp); +} + +static void txq_stats_init(struct spnic_txq *txq) +{ + struct spnic_txq_stats *txq_stats = &txq->txq_stats; + + u64_stats_init(&txq_stats->syncp); + spnic_txq_clean_stats(txq_stats); +} + +static inline void spnic_set_buf_desc(struct spnic_sq_bufdesc *buf_descs, dma_addr_t addr, u32 len) +{ + buf_descs->hi_addr = sphw_hw_be32(upper_32_bits(addr)); + buf_descs->lo_addr = sphw_hw_be32(lower_32_bits(addr)); + buf_descs->len = sphw_hw_be32(len); +} + +static int tx_map_skb(struct spnic_nic_dev *nic_dev, struct sk_buff *skb, + u16 valid_nr_frags, struct spnic_txq *txq, + struct spnic_tx_info *tx_info, + struct spnic_sq_wqe_combo *wqe_combo) +{ + struct spnic_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + struct spnic_sq_bufdesc *buf_desc = wqe_combo->bds_head; + struct spnic_dma_info *dma_info = tx_info->dma_info; + struct pci_dev *pdev = nic_dev->pdev; + skb_frag_t *frag = NULL; + u32 j, i; + int err; + + dma_info[0].dma = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_info[0].dma)) { + TXQ_STATS_INC(txq, map_frag_err); + return -EFAULT; + } + + dma_info[0].len = skb_headlen(skb); + + wqe_desc->hi_addr = sphw_hw_be32(upper_32_bits(dma_info[0].dma)); + wqe_desc->lo_addr = sphw_hw_be32(lower_32_bits(dma_info[0].dma)); + + wqe_desc->ctrl_len = dma_info[0].len; + + for (i = 0; i < valid_nr_frags;) { + frag = &(skb_shinfo(skb)->frags[i]); + if (unlikely(i == wqe_combo->first_bds_num)) + buf_desc = wqe_combo->bds_sec2; + + i++; + dma_info[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_info[i].dma)) { + TXQ_STATS_INC(txq, map_frag_err); + i--; + err = -EFAULT; + goto frag_map_err; + } + dma_info[i].len = skb_frag_size(frag); + + spnic_set_buf_desc(buf_desc, dma_info[i].dma, dma_info[i].len); + buf_desc++; + } + + return 0; + +frag_map_err: + for (j = 0; j < i;) { + j++; + dma_unmap_page(&pdev->dev, dma_info[j].dma, + dma_info[j].len, DMA_TO_DEVICE); + } + dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, DMA_TO_DEVICE); + return err; +} + +static inline void tx_unmap_skb(struct spnic_nic_dev *nic_dev, + struct sk_buff *skb, u16 valid_nr_frags, + struct spnic_dma_info *dma_info) +{ + struct pci_dev *pdev = nic_dev->pdev; + int i; + + for (i = 0; i < valid_nr_frags; ) { + i++; + dma_unmap_page(&pdev->dev, dma_info[i].dma, dma_info[i].len, DMA_TO_DEVICE); + } + + dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, DMA_TO_DEVICE); +} + +union spnic_l4 { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum sq_l3_type { + UNKNOWN_L3TYPE = 0, + IPV6_PKT = 1, + IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, + IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, +}; + +enum sq_l4offload_type { + OFFLOAD_DISABLE = 0, + TCP_OFFLOAD_ENABLE = 1, + SCTP_OFFLOAD_ENABLE = 2, + UDP_OFFLOAD_ENABLE = 3, +}; + +/*initialize l4_len and offset*/ +static inline void get_inner_l4_info(struct sk_buff *skb, union spnic_l4 *l4, + u8 l4_proto, u32 *offset, + enum sq_l4offload_type *l4_offload) +{ + switch (l4_proto) { + case IPPROTO_TCP: + *l4_offload = TCP_OFFLOAD_ENABLE; + /* To keep same with TSO, payload offset begins from paylaod */ + *offset = (l4->tcp->doff << 2) + TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_UDP: + *l4_offload = UDP_OFFLOAD_ENABLE; + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + default: + break; + } +} + +static int spnic_tx_csum(struct spnic_txq *txq, struct spnic_sq_task *task, struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->encapsulation) { + union spnic_ip ip; + u8 l4_proto; + + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, TUNNEL_FLAG); + + ip.hdr = skb_network_header(skb); + if (ip.v4->version == IPV4_VERSION) { + l4_proto = ip.v4->protocol; + } else if (ip.v4->version == IPV6_VERSION) { + union spnic_l4 l4; + unsigned char *exthdr; + __be16 frag_off; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + l4.hdr = skb_transport_header(skb); + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + } else { + l4_proto = IPPROTO_RAW; + } + + if (l4_proto != IPPROTO_UDP || + ((struct udphdr *)skb_transport_header(skb))->dest != VXLAN_OFFLOAD_PORT_LE) { + TXQ_STATS_INC(txq, unknown_tunnel_pkt); + /* Unsupport tunnel packet, disable csum offload */ + skb_checksum_help(skb); + return 0; + } + } + + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); + + return 1; +} + +static void get_inner_l3_l4_type(struct sk_buff *skb, union spnic_ip *ip, + union spnic_l4 *l4, enum sq_l3_type *l3_type, u8 *l4_proto) +{ + unsigned char *exthdr = NULL; + + if (ip->v4->version == 4) { + *l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + *l4_proto = ip->v4->protocol; + + } else if (ip->v4->version == 6) { + *l3_type = IPV6_PKT; + exthdr = ip->hdr + sizeof(*ip->v6); + *l4_proto = ip->v6->nexthdr; + if (exthdr != l4->hdr) { + __be16 frag_off = 0; + + ipv6_skip_exthdr(skb, (int)(exthdr - skb->data), l4_proto, &frag_off); + } + } else { + *l3_type = UNKNOWN_L3TYPE; + *l4_proto = 0; + } +} + +static inline void spnic_set_tso_info(struct spnic_sq_task *task, u32 *queue_info, + enum sq_l4offload_type l4_offload, u32 offset, u32 mss) +{ + if (l4_offload == TCP_OFFLOAD_ENABLE) { + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, TSO); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); + } else if (l4_offload == UDP_OFFLOAD_ENABLE) { + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UFO); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); + } + + /*Default enable L3 calculation*/ + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L3_EN); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF); + + /* set MSS value */ + *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); +} + +static int spnic_tso(struct spnic_sq_task *task, u32 *queue_info, struct sk_buff *skb) +{ + enum sq_l4offload_type l4_offload = OFFLOAD_DISABLE; + enum sq_l3_type l3_type; + union spnic_ip ip; + union spnic_l4 l4; + u32 offset = 0; + u8 l4_proto; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (skb->encapsulation) { + u32 gso_type = skb_shinfo(skb)->gso_type; + /* L3 checksum always enable */ + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, OUT_L3_EN); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, TUNNEL_FLAG); + + l4.hdr = skb_transport_header(skb); + ip.hdr = skb_network_header(skb); + + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, OUT_L4_EN); + } else if (gso_type & SKB_GSO_UDP_TUNNEL) { + } + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + } + + get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); + + if (l4_proto == IPPROTO_TCP) + l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); + + get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload); + + spnic_set_tso_info(task, queue_info, l4_offload, offset, skb_shinfo(skb)->gso_size); + + return 1; +} + +static u32 spnic_tx_offload(struct sk_buff *skb, struct spnic_sq_task *task, + u32 *queue_info, struct spnic_txq *txq) +{ + u32 offload = 0; + int tso_cs_en; + + task->pkt_info0 = 0; + task->ip_identify = 0; + task->pkt_info2 = 0; + task->vlan_offload = 0; + + tso_cs_en = spnic_tso(task, queue_info, skb); + if (tso_cs_en < 0) { + offload = TX_OFFLOAD_INVALID; + return offload; + } else if (tso_cs_en) { + offload |= TX_OFFLOAD_TSO; + } else { + tso_cs_en = spnic_tx_csum(txq, task, skb); + if (tso_cs_en) + offload |= TX_OFFLOAD_CSUM; + } + +#define VLAN_INSERT_MODE_MAX 5 + if (unlikely(skb_vlan_tag_present(skb))) { + /* select vlan insert mode by qid, default 802.1Q Tag type*/ + spnic_set_vlan_tx_offload(task, skb_vlan_tag_get(skb), + txq->q_id % VLAN_INSERT_MODE_MAX); + offload |= TX_OFFLOAD_VLAN; + } + + if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) > MAX_PAYLOAD_OFFSET)) { + offload = TX_OFFLOAD_INVALID; + return offload; + } + + return offload; +} + +static inline void get_pkt_stats(struct spnic_tx_info *tx_info, struct sk_buff *skb) +{ + u32 ihs, hdr_len; + + if (skb_is_gso(skb)) { + if (skb->encapsulation) + ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + + hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs; + tx_info->num_bytes = skb->len + (u64)hdr_len; + + } else { + tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + } + + tx_info->num_pkts = 1; +} + +static inline int spnic_maybe_stop_tx(struct spnic_txq *txq, u16 wqebb_cnt) +{ + if (likely(spnic_get_sq_free_wqebbs(txq->sq) >= wqebb_cnt)) + return 0; + + /* We need to check again in a case another CPU has just + * made room available. + */ + netif_stop_subqueue(txq->netdev, txq->q_id); + + if (likely(spnic_get_sq_free_wqebbs(txq->sq) < wqebb_cnt)) + return -EBUSY; + + /* there have enough wqebbs after queue is wake up */ + netif_start_subqueue(txq->netdev, txq->q_id); + + return 0; +} + +static inline u16 spnic_set_wqe_combo(struct spnic_txq *txq, struct spnic_sq_wqe_combo *wqe_combo, + u32 offload, u16 num_sge, u16 *curr_pi) +{ + void *second_part_wqebbs_addr = NULL; + void *wqe = NULL; + u16 first_part_wqebbs_num, tmp_pi; + + wqe_combo->ctrl_bd0 = spnic_get_sq_one_wqebb(txq->sq, curr_pi); + if (!offload && num_sge == 1) { + wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE; + return spnic_get_and_update_sq_owner(txq->sq, *curr_pi, 1); + } + + wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE; + + if (offload) { + wqe_combo->task = spnic_get_sq_one_wqebb(txq->sq, &tmp_pi); + wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES; + } else { + wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS; + } + + if (num_sge > 1) { + /* first wqebb contain bd0, and bd size is equal to sq wqebb + * size, so we use (num_sge - 1) as wanted weqbb_cnt + */ + wqe = spnic_get_sq_multi_wqebbs(txq->sq, num_sge - 1, &tmp_pi, + &second_part_wqebbs_addr, + &first_part_wqebbs_num); + wqe_combo->bds_head = wqe; + wqe_combo->bds_sec2 = second_part_wqebbs_addr; + wqe_combo->first_bds_num = first_part_wqebbs_num; + } + + return spnic_get_and_update_sq_owner(txq->sq, *curr_pi, num_sge + (u16)!!offload); +} + +inline u8 spnic_get_vlan_pri(struct sk_buff *skb) +{ + u16 vlan_tci = 0; + int err; + + err = vlan_get_tag(skb, &vlan_tci); + if (err) + return 0; + + return (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +} + +static netdev_tx_t spnic_send_one_skb(struct sk_buff *skb, struct net_device *netdev, + struct spnic_txq *txq) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_sq_wqe_combo wqe_combo = {0}; + struct spnic_tx_info *tx_info = NULL; + struct spnic_sq_task task; + u32 offload, queue_info = 0; + u16 owner = 0, pi = 0; + u16 wqebb_cnt, num_sge, valid_nr_frags; + bool find_zero_sge_len = false; + int err, i; + + if (unlikely(skb->len < MIN_SKB_LEN)) { + if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len))) { + TXQ_STATS_INC(txq, skb_pad_err); + goto tx_skb_pad_err; + } + + skb->len = MIN_SKB_LEN; + } + + valid_nr_frags = 0; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + if (!skb_frag_size(&skb_shinfo(skb)->frags[i])) { + find_zero_sge_len = true; + continue; + } else if (find_zero_sge_len) { + TXQ_STATS_INC(txq, frag_size_err); + goto tx_drop_pkts; + } + + valid_nr_frags++; + } + + num_sge = valid_nr_frags + 1; + + /* assume need normal TS format wqe, task info need 1 wqebb */ + wqebb_cnt = num_sge + 1; + if (unlikely(spnic_maybe_stop_tx(txq, wqebb_cnt))) { + TXQ_STATS_INC(txq, busy); + return NETDEV_TX_BUSY; + } + + offload = spnic_tx_offload(skb, &task, &queue_info, txq); + if (unlikely(offload == TX_OFFLOAD_INVALID)) { + TXQ_STATS_INC(txq, offload_cow_skb_err); + goto tx_drop_pkts; + } else if (!offload) { + /* no TS in current wqe */ + wqebb_cnt -= 1; + } + + owner = spnic_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi); + if (offload) { + /* ip6_frag_id is big endiant, not need to transfer */ + wqe_combo.task->ip_identify = sphw_hw_be32(task.ip_identify); + wqe_combo.task->pkt_info0 = sphw_hw_be32(task.pkt_info0); + wqe_combo.task->pkt_info2 = sphw_hw_be32(task.pkt_info2); + wqe_combo.task->vlan_offload = + sphw_hw_be32(task.vlan_offload); + } + + tx_info = &txq->tx_info[pi]; + tx_info->skb = skb; + tx_info->wqebb_cnt = wqebb_cnt; + tx_info->valid_nr_frags = valid_nr_frags; + + err = tx_map_skb(nic_dev, skb, valid_nr_frags, txq, tx_info, &wqe_combo); + if (err) { + spnic_rollback_sq_wqebbs(txq->sq, wqebb_cnt, owner); + goto tx_drop_pkts; + } + + get_pkt_stats(tx_info, skb); + + spnic_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner); + + spnic_write_db(txq->sq, txq->cos, SQ_CFLAG_DP, spnic_get_sq_local_pi(txq->sq)); + + return NETDEV_TX_OK; + +tx_drop_pkts: + dev_kfree_skb_any(skb); + +tx_skb_pad_err: + TXQ_STATS_INC(txq, dropped); + + return NETDEV_TX_OK; +} + +netdev_tx_t spnic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + struct spnic_txq *txq = &nic_dev->txqs[q_id]; + + return spnic_send_one_skb(skb, netdev, txq); +} + +netdev_tx_t spnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct spnic_txq *txq = NULL; + u16 q_id = skb_get_queue_mapping(skb); + + if (unlikely(!netif_carrier_ok(netdev))) { + dev_kfree_skb_any(skb); + SPNIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); + return NETDEV_TX_OK; + } + + if (unlikely(q_id >= nic_dev->q_params.num_qps)) { + txq = &nic_dev->txqs[0]; + SPNIC_NIC_STATS_INC(nic_dev, tx_invalid_qid); + goto tx_drop_pkts; + } + txq = &nic_dev->txqs[q_id]; + + return spnic_send_one_skb(skb, netdev, txq); + +tx_drop_pkts: + dev_kfree_skb_any(skb); + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.dropped++; + u64_stats_update_end(&txq->txq_stats.syncp); + + return NETDEV_TX_OK; +} + +static inline void tx_free_skb(struct spnic_nic_dev *nic_dev, struct spnic_tx_info *tx_info) +{ + tx_unmap_skb(nic_dev, tx_info->skb, tx_info->valid_nr_frags, + tx_info->dma_info); + dev_kfree_skb_any(tx_info->skb); + tx_info->skb = NULL; +} + +static void free_all_tx_skbs(struct spnic_nic_dev *nic_dev, u32 sq_depth, + struct spnic_tx_info *tx_info_arr) +{ + struct spnic_tx_info *tx_info = NULL; + u32 idx; + + for (idx = 0; idx < sq_depth; idx++) { + tx_info = &tx_info_arr[idx]; + if (tx_info->skb) + tx_free_skb(nic_dev, tx_info); + } +} + +int spnic_tx_poll(struct spnic_txq *txq, int budget) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(txq->netdev); + struct spnic_tx_info *tx_info = NULL; + u64 tx_bytes = 0, wake = 0; + int pkts = 0, nr_pkts = 0; + u16 wqebb_cnt = 0; + u16 hw_ci, sw_ci = 0, q_id = txq->sq->q_id; + + hw_ci = spnic_get_sq_hw_ci(txq->sq); + dma_rmb(); + sw_ci = spnic_get_sq_local_ci(txq->sq); + + do { + tx_info = &txq->tx_info[sw_ci]; + + /* Whether all of the wqebb of this wqe is completed */ + if (hw_ci == sw_ci || + ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt) + break; + + sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & txq->q_mask; + prefetch(&txq->tx_info[sw_ci]); + + wqebb_cnt += tx_info->wqebb_cnt; + + tx_bytes += tx_info->num_bytes; + nr_pkts += tx_info->num_pkts; + pkts++; + + tx_free_skb(nic_dev, tx_info); + + } while (likely(pkts < budget)); + + spnic_update_sq_local_ci(txq->sq, wqebb_cnt); + + if (unlikely(__netif_subqueue_stopped(nic_dev->netdev, q_id) && + spnic_get_sq_free_wqebbs(txq->sq) >= 1 && + test_bit(SPNIC_INTF_UP, &nic_dev->flags))) { + struct netdev_queue *netdev_txq = netdev_get_tx_queue(txq->netdev, q_id); + + __netif_tx_lock(netdev_txq, smp_processor_id()); + /* To avoid re-waking subqueue with xmit_frame */ + if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) { + netif_wake_subqueue(nic_dev->netdev, q_id); + wake++; + } + __netif_tx_unlock(netdev_txq); + } + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.bytes += tx_bytes; + txq->txq_stats.packets += nr_pkts; + txq->txq_stats.wake += wake; + u64_stats_update_end(&txq->txq_stats.syncp); + + return pkts; +} + +void spnic_set_txq_cos(struct spnic_nic_dev *nic_dev, u16 start_qid, u16 q_num, u8 cos) +{ + u16 idx; + + for (idx = 0; idx < q_num; idx++) + nic_dev->txqs[idx + start_qid].cos = cos; +} + +#define SPNIC_BDS_PER_SQ_WQEBB \ + (SPNIC_SQ_WQEBB_SIZE / sizeof(struct spnic_sq_bufdesc)) + +int spnic_alloc_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) +{ + struct spnic_dyna_txq_res *tqres = NULL; + int idx, i; + u64 size; + + for (idx = 0; idx < num_sq; idx++) { + tqres = &txqs_res[idx]; + + size = sizeof(*tqres->tx_info) * sq_depth; + tqres->tx_info = kzalloc(size, GFP_KERNEL); + if (!tqres->tx_info) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc txq%d tx info\n", idx); + goto err_out; + } + + size = sizeof(*tqres->bds) * (sq_depth * SPNIC_BDS_PER_SQ_WQEBB + SPNIC_MAX_SQ_SGE); + tqres->bds = kzalloc(size, GFP_KERNEL); + if (!tqres->bds) { + kfree(tqres->tx_info); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc txq%d bds info\n", idx); + goto err_out; + } + } + + return 0; + +err_out: + for (i = 0; i < idx; i++) { + tqres = &txqs_res[i]; + + kfree(tqres->bds); + kfree(tqres->tx_info); + } + + return -ENOMEM; +} + +void spnic_free_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) +{ + struct spnic_dyna_txq_res *tqres = NULL; + int idx; + + for (idx = 0; idx < num_sq; idx++) { + tqres = &txqs_res[idx]; + + free_all_tx_skbs(nic_dev, sq_depth, tqres->tx_info); + kfree(tqres->bds); + kfree(tqres->tx_info); + } +} + +int spnic_configure_txqs(struct spnic_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) +{ + struct spnic_dyna_txq_res *tqres = NULL; + struct spnic_txq *txq = NULL; + u16 q_id; + u32 idx; + + for (q_id = 0; q_id < num_sq; q_id++) { + txq = &nic_dev->txqs[q_id]; + tqres = &txqs_res[q_id]; + + txq->q_depth = sq_depth; + txq->q_mask = sq_depth - 1; + + txq->tx_info = tqres->tx_info; + for (idx = 0; idx < sq_depth; idx++) + txq->tx_info[idx].dma_info = &tqres->bds[idx * SPNIC_BDS_PER_SQ_WQEBB]; + + txq->sq = spnic_get_nic_queue(nic_dev->hwdev, q_id, SPNIC_SQ); + if (!txq->sq) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get %u sq\n", q_id); + return -EFAULT; + } + } + + return 0; +} + +int spnic_alloc_txqs(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct spnic_txq *txq = NULL; + u16 q_id, num_txqs = nic_dev->max_qps; + u64 txq_size; + + txq_size = num_txqs * sizeof(*nic_dev->txqs); + if (!txq_size) { + nic_err(&pdev->dev, "Cannot allocate zero size txqs\n"); + return -EINVAL; + } + + nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL); + if (!nic_dev->txqs) { + nic_err(&pdev->dev, "Failed to allocate txqs\n"); + return -ENOMEM; + } + + for (q_id = 0; q_id < num_txqs; q_id++) { + txq = &nic_dev->txqs[q_id]; + txq->netdev = netdev; + txq->q_id = q_id; + txq->q_depth = nic_dev->q_params.sq_depth; + txq->q_mask = nic_dev->q_params.sq_depth - 1; + txq->dev = &pdev->dev; + + txq_stats_init(txq); + } + + return 0; +} + +void spnic_free_txqs(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->txqs); +} + +static bool is_hw_complete_sq_process(struct spnic_io_queue *sq) +{ + u16 sw_pi, hw_ci; + + sw_pi = spnic_get_sq_local_pi(sq); + hw_ci = spnic_get_sq_hw_ci(sq); + + return sw_pi == hw_ci; +} + +#define SPNIC_FLUSH_QUEUE_TIMEOUT 1000 +static int spnic_stop_sq(struct spnic_txq *txq) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(txq->netdev); + unsigned long timeout; + int err; + + timeout = msecs_to_jiffies(SPNIC_FLUSH_QUEUE_TIMEOUT) + jiffies; + do { + if (is_hw_complete_sq_process(txq->sq)) + return 0; + + usleep_range(900, 1000); + } while (time_before(jiffies, timeout)); + + /* force hardware to drop packets */ + timeout = msecs_to_jiffies(SPNIC_FLUSH_QUEUE_TIMEOUT) + jiffies; + do { + if (is_hw_complete_sq_process(txq->sq)) + return 0; + + err = spnic_force_drop_tx_pkt(nic_dev->hwdev); + if (err) + break; + + usleep_range(9900, 10000); + } while (time_before(jiffies, timeout)); + + /* Avoid msleep takes too long and get a fake result */ + if (is_hw_complete_sq_process(txq->sq)) + return 0; + + return -EFAULT; +} + +/* should stop transmit any packets before calling this function */ +int spnic_flush_txqs(struct net_device *netdev) +{ + struct spnic_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid; + int err; + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + err = spnic_stop_sq(&nic_dev->txqs[qid]); + if (err) + nicif_err(nic_dev, drv, netdev, "Failed to stop sq%u\n", qid); + } + + return 0; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h b/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h new file mode 100644 index 000000000000..32cf4eb1bb1d --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPNIC_TX_H +#define SPNIC_TX_H + +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <net/ipv6.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> + +#include "spnic_nic_qp.h" + +#define VXLAN_OFFLOAD_PORT_LE 46354 /* big end is 4789 */ + +enum tx_offload_type { + TX_OFFLOAD_TSO = BIT(0), + TX_OFFLOAD_CSUM = BIT(1), + TX_OFFLOAD_VLAN = BIT(2), + TX_OFFLOAD_INVALID = BIT(3), + TX_OFFLOAD_ESP = BIT(4), +}; + +struct spnic_txq_stats { + u64 packets; + u64 bytes; + u64 busy; + u64 wake; + u64 dropped; + + /* Subdivision statistics show in private tool */ + u64 skb_pad_err; + u64 frag_len_overflow; + u64 offload_cow_skb_err; + u64 map_frag_err; + u64 unknown_tunnel_pkt; + u64 frag_size_err; + + struct u64_stats_sync syncp; +}; + +struct spnic_dma_info { + dma_addr_t dma; + u32 len; +}; + +#define IPV4_VERSION 4 +#define IPV6_VERSION 6 +#define TCP_HDR_DOFF_UNIT 2 +#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) + +union spnic_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +struct spnic_tx_info { + struct sk_buff *skb; + + u16 wqebb_cnt; + u16 valid_nr_frags; + + int num_sge; + u16 num_pkts; + u64 num_bytes; + struct spnic_dma_info *dma_info; +}; + +struct spnic_txq { + struct net_device *netdev; + struct device *dev; + + struct spnic_txq_stats txq_stats; + + u8 cos; + u16 q_id; + u32 q_mask; + u32 q_depth; + + struct spnic_tx_info *tx_info; + struct spnic_io_queue *sq; + + u64 last_moder_packets; + u64 last_moder_bytes; +} ____cacheline_aligned; + +netdev_tx_t spnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +netdev_tx_t spnic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +struct spnic_dyna_txq_res { + struct spnic_tx_info *tx_info; + struct spnic_dma_info *bds; +}; + +void spnic_txq_get_stats(struct spnic_txq *txq, struct spnic_txq_stats *stats); + +void spnic_txq_clean_stats(struct spnic_txq_stats *txq_stats); + +struct spnic_nic_dev; +int spnic_alloc_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); + +void spnic_free_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); + +int spnic_configure_txqs(struct spnic_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); + +int spnic_alloc_txqs(struct net_device *netdev); + +void spnic_free_txqs(struct net_device *netdev); + +int spnic_tx_poll(struct spnic_txq *txq, int budget); + +int spnic_flush_txqs(struct net_device *netdev); + +void spnic_set_txq_cos(struct spnic_nic_dev *nic_dev, u16 start_qid, u16 q_num, u8 cos); + +static inline __sum16 csum_magic(union spnic_ip *ip, unsigned short proto) +{ + return (ip->v4->version == IPV4_VERSION) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +#endif