Ramaxel inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4CBDP CVE: NA
This initial commit contains Ramaxel's spnic common module
Signed-off-by: Yanling Song songyl@ramaxel.com --- arch/arm64/configs/openeuler_defconfig | 2 + arch/x86/configs/openeuler_defconfig | 2 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/ramaxel/Kconfig | 20 + drivers/net/ethernet/ramaxel/Makefile | 6 + drivers/net/ethernet/ramaxel/spnic/Kconfig | 15 + drivers/net/ethernet/ramaxel/spnic/Makefile | 17 + .../ethernet/ramaxel/spnic/hw/sphw_api_cmd.c | 1165 +++++++++++ .../ethernet/ramaxel/spnic/hw/sphw_api_cmd.h | 277 +++ .../ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h | 126 ++ .../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c | 1606 +++++++++++++++ .../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h | 196 ++ .../ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h | 60 + .../ramaxel/spnic/hw/sphw_comm_msg_intf.h | 273 +++ .../ethernet/ramaxel/spnic/hw/sphw_common.c | 88 + .../ethernet/ramaxel/spnic/hw/sphw_common.h | 118 ++ .../net/ethernet/ramaxel/spnic/hw/sphw_crm.h | 984 +++++++++ .../net/ethernet/ramaxel/spnic/hw/sphw_csr.h | 171 ++ .../net/ethernet/ramaxel/spnic/hw/sphw_eqs.c | 1374 +++++++++++++ .../net/ethernet/ramaxel/spnic/hw/sphw_eqs.h | 157 ++ .../net/ethernet/ramaxel/spnic/hw/sphw_hw.h | 649 ++++++ .../ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c | 1339 ++++++++++++ .../ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h | 327 +++ .../ethernet/ramaxel/spnic/hw/sphw_hw_comm.c | 1253 ++++++++++++ .../ethernet/ramaxel/spnic/hw/sphw_hw_comm.h | 42 + .../ethernet/ramaxel/spnic/hw/sphw_hwdev.c | 1402 +++++++++++++ .../ethernet/ramaxel/spnic/hw/sphw_hwdev.h | 93 + .../net/ethernet/ramaxel/spnic/hw/sphw_hwif.c | 911 +++++++++ .../net/ethernet/ramaxel/spnic/hw/sphw_hwif.h | 102 + .../net/ethernet/ramaxel/spnic/hw/sphw_mbox.c | 1808 +++++++++++++++++ .../net/ethernet/ramaxel/spnic/hw/sphw_mbox.h | 274 +++ .../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c | 1382 +++++++++++++ .../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h | 156 ++ .../ramaxel/spnic/hw/sphw_mgmt_msg_base.h | 19 + .../net/ethernet/ramaxel/spnic/hw/sphw_mt.h | 534 +++++ .../ramaxel/spnic/hw/sphw_prof_adap.c | 94 + .../ramaxel/spnic/hw/sphw_prof_adap.h | 49 + .../ethernet/ramaxel/spnic/hw/sphw_profile.h | 36 + .../net/ethernet/ramaxel/spnic/hw/sphw_wq.c | 152 ++ .../net/ethernet/ramaxel/spnic/hw/sphw_wq.h | 119 ++ 41 files changed, 17400 insertions(+) create mode 100644 drivers/net/ethernet/ramaxel/Kconfig create mode 100644 drivers/net/ethernet/ramaxel/Makefile create mode 100644 drivers/net/ethernet/ramaxel/spnic/Kconfig create mode 100644 drivers/net/ethernet/ramaxel/spnic/Makefile create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c create mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 44a9b95abc74..3704c13abbc9 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -7132,3 +7132,5 @@ CONFIG_RESCTRL=y CONFIG_ACPI_MPAM=y CONFIG_ETMEM_SCAN=m CONFIG_ETMEM_SWAP=m +CONFIG_NET_VENDOR_RAMAXEL=y +CONFIG_SPNIC=m diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 471d5c5faabb..fde802674852 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -8512,3 +8512,5 @@ CONFIG_ARCH_HAS_KCOV=y CONFIG_ETMEM_SCAN=m CONFIG_ETMEM_SWAP=m CONFIG_USERSWAP=y +CONFIG_NET_VENDOR_RAMAXEL=y +CONFIG_SPNIC=m diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 028085045488..e65d548f0cf3 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -159,6 +159,7 @@ source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/pensando/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" +source "drivers/net/ethernet/ramaxel/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 6a7d68ea63ed..5e375d5a6230 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ +obj-$(CONFIG_NET_VENDOR_RAMAXEL) += ramaxel/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ diff --git a/drivers/net/ethernet/ramaxel/Kconfig b/drivers/net/ethernet/ramaxel/Kconfig new file mode 100644 index 000000000000..987c7eb4880b --- /dev/null +++ b/drivers/net/ethernet/ramaxel/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Ramaxel driver configuration +# + +config NET_VENDOR_RAMAXEL + bool "Ramaxel devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Ramaxel cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_RAMAXEL + + source "drivers/net/ethernet/ramaxel/spnic/Kconfig" + +endif # NET_VENDOR_RAMAXEL diff --git a/drivers/net/ethernet/ramaxel/Makefile b/drivers/net/ethernet/ramaxel/Makefile new file mode 100644 index 000000000000..087f570c2257 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Ramaxel device drivers. +# + +obj-$(CONFIG_SPNIC) += spnic/ \ No newline at end of file diff --git a/drivers/net/ethernet/ramaxel/spnic/Kconfig b/drivers/net/ethernet/ramaxel/spnic/Kconfig new file mode 100644 index 000000000000..6864261dfd3d --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Ramaxel SPNIC driver configuration +# + +config SPNIC + tristate "Ramaxel PCIE Network Interface Card" + default n + depends on PCI_MSI && NUMA && PCI_IOV && (X86 || ARM64) + help + This driver supports Ramaxel PCIE Ethernet cards. + To compile this driver as part of the kernel, choose Y here. + If unsure, choose N. + The default is N. + diff --git a/drivers/net/ethernet/ramaxel/spnic/Makefile b/drivers/net/ethernet/ramaxel/spnic/Makefile new file mode 100644 index 000000000000..3c9c7b0c7a80 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SPNIC) += spnic.o + +subdir-ccflags-y += -I$(src)/hw + +spnic-objs := hw/sphw_common.o \ + hw/sphw_hwif.o \ + hw/sphw_eqs.o \ + hw/sphw_mbox.o \ + hw/sphw_api_cmd.o \ + hw/sphw_mgmt.o \ + hw/sphw_wq.o \ + hw/sphw_cmdq.o \ + hw/sphw_prof_adap.o \ + hw/sphw_hw_cfg.o \ + hw/sphw_hw_comm.o \ + hw/sphw_hwdev.o diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c new file mode 100644 index 000000000000..b459ca322515 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c @@ -0,0 +1,1165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/semaphore.h> +#include <linux/jiffies.h> +#include <linux/delay.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_common.h" +#include "sphw_hwdev.h" +#include "sphw_csr.h" +#include "sphw_hwif.h" +#include "sphw_api_cmd.h" + +#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U + +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CHAIN_NUM_CELLS 32 +#define API_CHAIN_CELL_SIZE 128 +#define API_CHAIN_RSP_DATA_SIZE 128 + +#define API_CMD_CELL_WB_ADDR_SIZE 8 + +#define API_CHAIN_CELL_ALIGNMENT 8 + +#define API_CMD_TIMEOUT 10000 +#define API_CMD_STATUS_TIMEOUT 100000 + +#define API_CMD_BUF_SIZE 2048ULL + +#define API_CMD_NODE_ALIGN_SIZE 512ULL +#define API_PAYLOAD_ALIGN_SIZE 64ULL + +#define API_CHAIN_RESP_ALIGNMENT 128ULL + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) + +#define READ_API_CMD_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token)) +#define WRITE_API_CMD_PRIV_DATA(id) (((u8)(id)) << 16) + +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) + +enum api_cmd_data_format { + SGL_DATA = 1, +}; + +enum api_cmd_type { + API_CMD_WRITE_TYPE = 0, + API_CMD_READ_TYPE = 1, +}; + +enum api_cmd_bypass { + NOT_BYPASS = 0, + BYPASS = 1, +}; + +enum api_cmd_resp_aeq { + NOT_TRIGGER = 0, + TRIGGER = 1, +}; + +enum api_cmd_chn_code { + APICHN_0 = 0, +}; + +enum api_cmd_chn_rsvd { + APICHN_VALID = 0, + APICHN_INVALID = 1, +}; + +#define API_DESC_LEN 7 + +static u8 xor_chksum_set(void *data) +{ + int idx; + u8 checksum = 0; + u8 *val = data; + + for (idx = 0; idx < API_DESC_LEN; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct sphw_api_cmd_chain *chain) +{ + enum sphw_api_cmd_chain_type chain_type = chain->chain_type; + struct sphw_hwif *hwif = chain->hwdev->hwif; + u32 hw_prod_idx_addr = SPHW_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + u32 prod_idx = chain->prod_idx; + + sphw_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct sphw_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); + + return SPHW_API_CMD_STATUS_GET(val, CONS_IDX); +} + +static void dump_api_chain_reg(struct sphw_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + u32 addr, val; + + addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); + + sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + chain->chain_type, SPHW_API_CMD_STATUS_GET(val, CPLD_ERR), + SPHW_API_CMD_STATUS_GET(val, CHKSUM_ERR), + SPHW_API_CMD_STATUS_GET(val, FSM)); + + sdk_err(dev, "Chain hw current ci: 0x%x\n", + SPHW_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = SPHW_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); + val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); + sdk_err(dev, "Chain hw current pi: 0x%x\n", val); +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + **/ +static int chain_busy(struct sphw_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + struct sphw_api_cmd_cell_ctxt *ctxt; + u64 resp_header; + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case SPHW_API_CMD_MULTI_READ: + case SPHW_API_CMD_POLL_READ: + resp_header = be64_to_cpu(ctxt->resp->header); + if (ctxt->status && + !SPHW_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n", + ctxt->status, chain->prod_idx, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + case SPHW_API_CMD_POLL_WRITE: + case SPHW_API_CMD_WRITE_TO_MGMT_CPU: + case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + chain->cons_idx = get_hw_cons_idx(chain); + + if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); + return -EINVAL; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of specific cell type + * @type: chain type + **/ +static u16 get_cell_data_size(enum sphw_api_cmd_chain_type type) +{ + u16 cell_data_size = 0; + + switch (type) { + case SPHW_API_CMD_POLL_READ: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_WB_ADDR_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + + case SPHW_API_CMD_WRITE_TO_MGMT_CPU: + case SPHW_API_CMD_POLL_WRITE: + case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control into it + * @cell_len: the size of the cell + **/ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) +{ + u64 ctrl; + u8 chksum; + + ctrl = SPHW_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | + SPHW_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + SPHW_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = xor_chksum_set(&ctrl); + + ctrl |= SPHW_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @cell: the cell of the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_api_cmd(struct sphw_api_cmd_chain *chain, + struct sphw_api_cmd_cell *cell, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct sphw_api_cmd_cell_ctxt *cell_ctxt; + u32 priv; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case SPHW_API_CMD_POLL_READ: + priv = READ_API_CMD_PRIV_DATA(chain->chain_type, + cell_ctxt->saved_prod_idx); + cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + SPHW_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | + SPHW_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + SPHW_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case SPHW_API_CMD_POLL_WRITE: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + SPHW_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + SPHW_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + SPHW_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case SPHW_API_CMD_WRITE_TO_MGMT_CPU: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + SPHW_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + SPHW_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | + SPHW_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | + SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + default: + sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", + chain->chain_type); + return; + } + + cell->desc |= SPHW_API_CMD_DESC_SET(APICHN_0, APICHN_CODE) | + SPHW_API_CMD_DESC_SET(APICHN_VALID, APICHN_RSVD); + + cell->desc |= SPHW_API_CMD_DESC_SET(node_id, DEST) | + SPHW_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + + cell->desc |= SPHW_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), + XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current producer cell + * @chain: chain for the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + * Return: 0 - success, negative - failure + **/ +static void prepare_cell(struct sphw_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct sphw_api_cmd_cell *curr_node; + u16 cell_size; + + curr_node = chain->curr_node; + + cell_size = get_cell_data_size(chain->chain_type); + + prepare_cell_ctrl(&curr_node->ctrl, cell_size); + prepare_api_cmd(chain, curr_node, node_id, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct sphw_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +static void issue_api_cmd(struct sphw_api_cmd_chain *chain) +{ + set_prod_idx(chain); +} + +/** + * api_cmd_status_update - update the status of the chain + * @chain: chain to update + **/ +static void api_cmd_status_update(struct sphw_api_cmd_chain *chain) +{ + struct sphw_api_cmd_status *wb_status; + enum sphw_api_cmd_chain_type chain_type; + u64 status_header; + u32 buf_desc; + + wb_status = chain->wb_status; + + buf_desc = be32_to_cpu(wb_status->buf_desc); + if (SPHW_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) + return; + + status_header = be64_to_cpu(wb_status->header); + chain_type = SPHW_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= SPHW_API_CMD_MAX) + return; + + if (chain_type != chain->chain_type) + return; + + chain->cons_idx = SPHW_API_CMD_STATUS_GET(buf_desc, CONS_IDX); +} + +static enum sphw_wait_return wait_for_status_poll_handler(void *priv_data) +{ + struct sphw_api_cmd_chain *chain = priv_data; + + if (!chain->hwdev->chip_present_flag) + return WAIT_PROCESS_ERR; + + api_cmd_status_update(chain); + /* SYNC API CMD cmd should start after prev cmd finished */ + if (chain->cons_idx == chain->prod_idx) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +/** + * wait_for_status_poll - wait for write to mgmt command to complete + * @chain: the chain of the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_status_poll(struct sphw_api_cmd_chain *chain) +{ + return sphw_wait_for_timeout(chain, wait_for_status_poll_handler, + API_CMD_STATUS_TIMEOUT, 100); +} + +static void copy_resp_data(struct sphw_api_cmd_cell_ctxt *ctxt, void *ack, + u16 ack_size) +{ + struct sphw_api_cmd_resp_fmt *resp = ctxt->resp; + + memcpy(ack, &resp->resp_data, ack_size); + ctxt->status = 0; +} + +static enum sphw_wait_return check_cmd_resp_handler(void *priv_data) +{ + struct sphw_api_cmd_cell_ctxt *ctxt = priv_data; + u64 resp_header; + u8 resp_status; + + resp_header = be64_to_cpu(ctxt->resp->header); + rmb(); /* read the latest header */ + + if (SPHW_API_CMD_RESP_HEADER_VALID(resp_header)) { + resp_status = SPHW_API_CMD_RESP_HEAD_GET(resp_header, STATUS); + if (resp_status) { + pr_err("Api chain response data err, status: %u\n", + resp_status); + return WAIT_PROCESS_ERR; + } + + return WAIT_PROCESS_CPL; + } + + return WAIT_PROCESS_WAITING; +} + +/** + * prepare_cell - polling for respense data of the read api-command + * @chain: pointer to api cmd chain + * + * Return: 0 - success, negative - failure + **/ +static int wait_for_resp_polling(struct sphw_api_cmd_cell_ctxt *ctxt) +{ + return sphw_wait_for_timeout(ctxt, check_cmd_resp_handler, + POLLING_COMPLETION_TIMEOUT_DEFAULT, + USEC_PER_MSEC); +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_api_cmd_completion(struct sphw_api_cmd_chain *chain, + struct sphw_api_cmd_cell_ctxt *ctxt, + void *ack, u16 ack_size) +{ + void *dev = chain->hwdev->dev_hdl; + int err = 0; + + switch (chain->chain_type) { + case SPHW_API_CMD_POLL_READ: + err = wait_for_resp_polling(ctxt); + if (!err) + copy_resp_data(ctxt, ack, ack_size); + else + sdk_err(dev, "API CMD poll response timeout\n"); + break; + case SPHW_API_CMD_POLL_WRITE: + case SPHW_API_CMD_WRITE_TO_MGMT_CPU: + err = wait_for_status_poll(chain); + if (err) { + sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", + chain->chain_type); + break; + } + break; + case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* No need to wait */ + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + if (err) + dump_api_chain_reg(chain); + + return err; +} + +static inline void update_api_cmd_ctxt(struct sphw_api_cmd_chain *chain, + struct sphw_api_cmd_cell_ctxt *ctxt) +{ + ctxt->status = 1; + ctxt->saved_prod_idx = chain->prod_idx; + if (ctxt->resp) { + ctxt->resp->header = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +static int api_cmd(struct sphw_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct sphw_api_cmd_cell_ctxt *ctxt = NULL; + + if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock(&chain->async_lock); + else + down(&chain->sem); + ctxt = &chain->cell_ctxt[chain->prod_idx]; + if (chain_busy(chain)) { + if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + return -EBUSY; + } + update_api_cmd_ctxt(chain, ctxt); + + prepare_cell(chain, node_id, cmd, cmd_size); + + cmd_chain_prod_idx_inc(chain); + + wmb(); /* issue the command */ + + issue_api_cmd(chain); + + /* incremented prod idx, update ctxt */ + + chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; + if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + + return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); +} + +/** + * sphw_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +int sphw_api_cmd_write(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size) +{ + /* Verify the chain type */ + return api_cmd(chain, node_id, cmd, size, NULL, 0); +} + +/** + * sphw_api_cmd_read - Read API CMD command + * @chain: chain for read command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +int sphw_api_cmd_read(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size, + void *ack, u16 ack_size) +{ + return api_cmd(chain, node_id, cmd, size, ack, ack_size); +} + +static enum sphw_wait_return check_chain_restart_handler(void *priv_data) +{ + struct sphw_api_cmd_chain *cmd_chain = priv_data; + u32 reg_addr, val; + + reg_addr = SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); + val = sphw_hwif_read_reg(cmd_chain->hwdev->hwif, reg_addr); + if (!SPHW_API_CMD_CHAIN_REQ_GET(val, RESTART)) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @chain: the API CMD specific chain to restart + **/ +static int api_cmd_hw_restart(struct sphw_api_cmd_chain *cmd_chain) +{ + struct sphw_hwif *hwif = cmd_chain->hwdev->hwif; + u32 reg_addr, val; + + /* Read Modify Write */ + reg_addr = SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); + val = sphw_hwif_read_reg(hwif, reg_addr); + + val = SPHW_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= SPHW_API_CMD_CHAIN_REQ_SET(1, RESTART); + + sphw_hwif_write_reg(hwif, reg_addr, val); + + return sphw_wait_for_timeout(cmd_chain, check_chain_restart_handler, + API_CMD_TIMEOUT, USEC_PER_MSEC); +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + **/ +static void api_cmd_ctrl_init(struct sphw_api_cmd_chain *chain) +{ + struct sphw_hwif *hwif = chain->hwdev->hwif; + u32 reg_addr, ctrl; + u32 size; + + /* Read Modify Write */ + reg_addr = SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); + + ctrl = sphw_hwif_read_reg(hwif, reg_addr); + + ctrl = SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= SPHW_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | + SPHW_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); + + sphw_hwif_write_reg(hwif, reg_addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set status address for + **/ +static void api_cmd_set_status_addr(struct sphw_api_cmd_chain *chain) +{ + struct sphw_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = SPHW_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + sphw_hwif_write_reg(hwif, addr, val); + + addr = SPHW_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + sphw_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set the number of cells for + **/ +static void api_cmd_set_num_cells(struct sphw_api_cmd_chain *chain) +{ + struct sphw_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = SPHW_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); + val = chain->num_cells; + sphw_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head cell of a chain in the HW + * @chain: the API CMD specific chain to set the head for + **/ +static void api_cmd_head_init(struct sphw_api_cmd_chain *chain) +{ + struct sphw_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = SPHW_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + sphw_hwif_write_reg(hwif, addr, val); + + addr = SPHW_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + sphw_hwif_write_reg(hwif, addr, val); +} + +static enum sphw_wait_return check_chain_ready_handler(void *priv_data) +{ + struct sphw_api_cmd_chain *chain = priv_data; + u32 addr, val; + u32 hw_cons_idx; + + addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); + hw_cons_idx = SPHW_API_CMD_STATUS_GET(val, CONS_IDX); + + /* wait for HW cons idx to be updated */ + if (hw_cons_idx == chain->cons_idx) + return WAIT_PROCESS_CPL; + return WAIT_PROCESS_WAITING; +} + +/** + * wait_for_ready_chain - wait for the chain to be ready + * @chain: the API CMD specific chain to wait for + * Return: 0 - success, negative - failure + **/ +static int wait_for_ready_chain(struct sphw_api_cmd_chain *chain) +{ + return sphw_wait_for_timeout(chain, check_chain_ready_handler, + API_CMD_TIMEOUT, USEC_PER_MSEC); +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + **/ +static void api_cmd_chain_hw_clean(struct sphw_api_cmd_chain *chain) +{ + struct sphw_hwif *hwif = chain->hwdev->hwif; + u32 addr, ctrl; + + addr = SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + ctrl = sphw_hwif_read_reg(hwif, addr); + ctrl = SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & + SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + sphw_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + * @chain: the API CMD specific chain to initialize in HW + * Return: 0 - success, negative - failure + **/ +static int api_cmd_chain_hw_init(struct sphw_api_cmd_chain *chain) +{ + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + if (api_cmd_hw_restart(chain)) { + sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); + return -EBUSY; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); + + return wait_for_ready_chain(chain); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * Return: 0 - success, negative - failure + **/ +static int alloc_cmd_buf(struct sphw_api_cmd_chain *chain, + struct sphw_api_cmd_cell *cell, u32 cell_idx) +{ + struct sphw_api_cmd_cell_ctxt *cell_ctxt; + void *dev = chain->hwdev->dev_hdl; + void *buf_vaddr; + u64 buf_paddr; + int err = 0; + + buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base + + chain->buf_size_align * cell_idx); + buf_paddr = chain->buf_paddr_base + + chain->buf_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_vaddr = buf_vaddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case SPHW_API_CMD_POLL_READ: + cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + case SPHW_API_CMD_WRITE_TO_MGMT_CPU: + case SPHW_API_CMD_POLL_WRITE: + case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* The data in the HW should be in Big Endian Format */ + cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + return err; +} + +/** + * alloc_cmd_buf - allocate a resp buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + **/ +static void alloc_resp_buf(struct sphw_api_cmd_chain *chain, + struct sphw_api_cmd_cell *cell, u32 cell_idx) +{ + struct sphw_api_cmd_cell_ctxt *cell_ctxt; + void *resp_vaddr; + u64 resp_paddr; + + resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * cell_idx); + resp_paddr = chain->rsp_paddr_base + + chain->rsp_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->resp = resp_vaddr; + cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); +} + +static int sphw_alloc_api_cmd_cell_buf(struct sphw_api_cmd_chain *chain, u32 cell_idx, + struct sphw_api_cmd_cell *node) +{ + void *dev = chain->hwdev->dev_hdl; + int err; + + /* For read chain, we should allocate buffer for the response data */ + if (chain->chain_type == SPHW_API_CMD_MULTI_READ || + chain->chain_type == SPHW_API_CMD_POLL_READ) + alloc_resp_buf(chain, node, cell_idx); + + switch (chain->chain_type) { + case SPHW_API_CMD_WRITE_TO_MGMT_CPU: + case SPHW_API_CMD_POLL_WRITE: + case SPHW_API_CMD_POLL_READ: + case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err) { + sdk_err(dev, "Failed to allocate cmd buffer\n"); + goto alloc_cmd_buf_err; + } + break; + /* For api command write and api command read, the data section + * is directly inserted in the cell, so no need to allocate. + */ + case SPHW_API_CMD_MULTI_READ: + chain->cell_ctxt[cell_idx].api_cmd_vaddr = + &node->read.hw_cmd_paddr; /* to do: who int this*/ + break; + default: + sdk_err(dev, "Unsupported API CMD chain type\n"); + err = -EINVAL; + goto alloc_cmd_buf_err; + } + + return 0; + +alloc_cmd_buf_err: + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell of specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the cell index to create + * @pre_node: previous cell + * @node_vaddr: the virt addr of the cell + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cell(struct sphw_api_cmd_chain *chain, u32 cell_idx, + struct sphw_api_cmd_cell *pre_node, + struct sphw_api_cmd_cell **node_vaddr) +{ + struct sphw_api_cmd_cell_ctxt *cell_ctxt; + struct sphw_api_cmd_cell *node; + void *cell_vaddr; + u64 cell_paddr; + int err; + + cell_vaddr = (void *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * cell_idx); + cell_paddr = chain->cell_paddr_base + + chain->cell_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = cell_vaddr; + node = cell_ctxt->cell_vaddr; + + if (!pre_node) { + chain->head_node = cell_vaddr; + chain->head_cell_paddr = cell_paddr; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); + } + + /* Driver software should make sure that there is an empty API + * command cell at the end the chain + */ + node->next_cell_paddr = 0; + + err = sphw_alloc_api_cmd_cell_buf(chain, cell_idx, node); + if (err) + return err; + + *node_vaddr = node; + + return 0; +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cells(struct sphw_api_cmd_chain *chain) +{ + struct sphw_api_cmd_cell *node = NULL, *pre_node = NULL; + void *dev = chain->hwdev->dev_hdl; + u32 cell_idx; + int err; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err) { + sdk_err(dev, "Failed to create API CMD cell\n"); + return err; + } + + pre_node = node; + } + + if (!node) + return -EFAULT; + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_chain_init(struct sphw_api_cmd_chain *chain, + struct sphw_api_cmd_chain_attr *attr) +{ + void *dev = chain->hwdev->dev_hdl; + size_t cell_ctxt_size; + size_t cells_buf_size; + int err; + + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + chain->rsp_size = attr->rsp_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_init(&chain->async_lock); + else + sema_init(&chain->sem, 1); + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + if (!cell_ctxt_size) { + sdk_err(dev, "Api chain cell size cannot be zero\n"); + return -EINVAL; + } + + chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) { + sdk_err(dev, "Failed to allocate cell contexts for a chain\n"); + return -ENOMEM; + } + + chain->wb_status = dma_alloc_coherent(dev, sizeof(*chain->wb_status), + &chain->wb_status_paddr, GFP_KERNEL); + if (!chain->wb_status) { + sdk_err(dev, "Failed to allocate DMA wb status\n"); + err = -ENOMEM; + goto alloc_wb_status_err; + } + + chain->cell_size_align = ALIGN((u64)chain->cell_size, + API_CMD_NODE_ALIGN_SIZE); + chain->rsp_size_align = ALIGN((u64)chain->rsp_size, + API_CHAIN_RESP_ALIGNMENT); + chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); + + cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + + chain->buf_size_align) * chain->num_cells; + + err = sphw_dma_alloc_coherent_align(dev, cells_buf_size, API_CMD_NODE_ALIGN_SIZE, + GFP_KERNEL, &chain->cells_addr); + if (err) { + sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); + goto alloc_cells_buf_err; + } + + chain->cell_vaddr_base = chain->cells_addr.align_vaddr; + chain->cell_paddr_base = chain->cells_addr.align_paddr; + + chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * chain->num_cells); + chain->rsp_paddr_base = chain->cell_paddr_base + + chain->cell_size_align * chain->num_cells; + + chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * chain->num_cells); + chain->buf_paddr_base = chain->rsp_paddr_base + + chain->rsp_size_align * chain->num_cells; + + return 0; + +alloc_cells_buf_err: + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + +alloc_wb_status_err: + kfree(chain->cell_ctxt); + + return err; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + **/ +static void api_chain_free(struct sphw_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + + sphw_dma_free_coherent_align(dev, &chain->cells_addr); + + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + kfree(chain->cell_ctxt); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @chain: the API CMD specific chain to create + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_chain(struct sphw_api_cmd_chain **cmd_chain, + struct sphw_api_cmd_chain_attr *attr) +{ + struct sphw_hwdev *hwdev = attr->hwdev; + struct sphw_api_cmd_chain *chain = NULL; + int err; + + if (attr->num_cells & (attr->num_cells - 1)) { + sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); + return -EINVAL; + } + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + chain->hwdev = hwdev; + + err = api_chain_init(chain, attr); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); + goto chain_init_err; + } + + err = api_cmd_create_cells(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); + goto create_cells_err; + } + + err = api_cmd_chain_hw_init(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); + goto chain_hw_init_err; + } + + *cmd_chain = chain; + return 0; + +chain_hw_init_err: +create_cells_err: + api_chain_free(chain); + +chain_init_err: + kfree(chain); + return err; +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + **/ +static void api_cmd_destroy_chain(struct sphw_api_cmd_chain *chain) +{ + api_chain_free(chain); + kfree(chain); +} + +/** + * sphw_api_cmd_init - Initialize all the API CMD chains + * @hwif: the hardware interface of a pci function device + * @chain: the API CMD chains that will be initialized + * Return: 0 - success, negative - failure + **/ +int sphw_api_cmd_init(struct sphw_hwdev *hwdev, struct sphw_api_cmd_chain **chain) +{ + void *dev = hwdev->dev_hdl; + struct sphw_api_cmd_chain_attr attr; + enum sphw_api_cmd_chain_type chain_type, i; + int err; + + attr.hwdev = hwdev; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CHAIN_CELL_SIZE; + attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; + + chain_type = SPHW_API_CMD_WRITE_TO_MGMT_CPU; + for (; chain_type < SPHW_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + + err = api_cmd_create_chain(&chain[chain_type], &attr); + if (err) { + sdk_err(dev, "Failed to create chain %d\n", chain_type); + goto create_chain_err; + } + } + + return 0; + +create_chain_err: + i = SPHW_API_CMD_WRITE_TO_MGMT_CPU; + for (; i < chain_type; i++) + api_cmd_destroy_chain(chain[i]); + + return err; +} + +/** + * sphw_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that will be freed + **/ +void sphw_api_cmd_free(struct sphw_api_cmd_chain **chain) +{ + enum sphw_api_cmd_chain_type chain_type; + + chain_type = SPHW_API_CMD_WRITE_TO_MGMT_CPU; + + for (; chain_type < SPHW_API_CMD_MAX; chain_type++) + api_cmd_destroy_chain(chain[chain_type]); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h new file mode 100644 index 000000000000..14a6c0b50e17 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_API_CMD_H +#define SPHW_API_CMD_H + +#include "sphw_hwif.h" + +/*api_cmd_cell.ctrl structure*/ +#define SPHW_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 +#define SPHW_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define SPHW_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define SPHW_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define SPHW_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU +#define SPHW_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define SPHW_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define SPHW_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define SPHW_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)(val)) & SPHW_API_CMD_CELL_CTRL_##member##_MASK) << \ + SPHW_API_CMD_CELL_CTRL_##member##_SHIFT) + +/*api_cmd_cell.desc structure*/ +#define SPHW_API_CMD_DESC_API_TYPE_SHIFT 0 +#define SPHW_API_CMD_DESC_RD_WR_SHIFT 1 +#define SPHW_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define SPHW_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 +#define SPHW_API_CMD_DESC_APICHN_RSVD_SHIFT 4 +#define SPHW_API_CMD_DESC_APICHN_CODE_SHIFT 6 +#define SPHW_API_CMD_DESC_PRIV_DATA_SHIFT 8 +#define SPHW_API_CMD_DESC_DEST_SHIFT 32 +#define SPHW_API_CMD_DESC_SIZE_SHIFT 40 +#define SPHW_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define SPHW_API_CMD_DESC_API_TYPE_MASK 0x1U +#define SPHW_API_CMD_DESC_RD_WR_MASK 0x1U +#define SPHW_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U +#define SPHW_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U +#define SPHW_API_CMD_DESC_APICHN_RSVD_MASK 0x3U +#define SPHW_API_CMD_DESC_APICHN_CODE_MASK 0x3U +#define SPHW_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU +#define SPHW_API_CMD_DESC_DEST_MASK 0x1FU +#define SPHW_API_CMD_DESC_SIZE_MASK 0x7FFU +#define SPHW_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU + +#define SPHW_API_CMD_DESC_SET(val, member) \ + ((((u64)(val)) & SPHW_API_CMD_DESC_##member##_MASK) << \ + SPHW_API_CMD_DESC_##member##_SHIFT) + +/*api_cmd_status header*/ +#define SPHW_API_CMD_STATUS_HEADER_VALID_SHIFT 0 +#define SPHW_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define SPHW_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU +#define SPHW_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU + +#define SPHW_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> SPHW_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + SPHW_API_CMD_STATUS_HEADER_##member##_MASK) + +/*API_CHAIN_REQ CSR: 0x0020+api_idx*0x080*/ +#define SPHW_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 +#define SPHW_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2 + +#define SPHW_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U +#define SPHW_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U + +#define SPHW_API_CMD_CHAIN_REQ_SET(val, member) \ + (((val) & SPHW_API_CMD_CHAIN_REQ_##member##_MASK) << \ + SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define SPHW_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + SPHW_API_CMD_CHAIN_REQ_##member##_MASK) + +#define SPHW_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(SPHW_API_CMD_CHAIN_REQ_##member##_MASK \ + << SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +/*API_CHAIN_CTL CSR: 0x0014+api_idx*0x080*/ +#define SPHW_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 +#define SPHW_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define SPHW_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define SPHW_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define SPHW_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define SPHW_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define SPHW_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U +#define SPHW_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U +#define SPHW_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U +#define SPHW_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U +#define SPHW_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U +#define SPHW_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U + +#define SPHW_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((val) & SPHW_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + SPHW_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define SPHW_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(SPHW_API_CMD_CHAIN_CTRL_##member##_MASK \ + << SPHW_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +/*api_cmd rsp header*/ +#define SPHW_API_CMD_RESP_HEAD_VALID_SHIFT 0 +#define SPHW_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define SPHW_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24 +#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 + +#define SPHW_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define SPHW_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU +#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU +#define SPHW_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU +#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define SPHW_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define SPHW_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & SPHW_API_CMD_RESP_HEAD_VALID_MASK) == \ + SPHW_API_CMD_RESP_HEAD_VALID_CODE) + +#define SPHW_API_CMD_RESP_HEAD_GET(val, member) \ + (((val) >> SPHW_API_CMD_RESP_HEAD_##member##_SHIFT) & \ + SPHW_API_CMD_RESP_HEAD_##member##_MASK) + +#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID(val) \ + (((val) >> SPHW_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ + SPHW_API_CMD_RESP_HEAD_CHAIN_ID_MASK) + +#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ + ((u16)(((val) >> SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ + SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)) +/*API_STATUS_0 CSR: 0x0030+api_idx*0x080*/ +#define SPHW_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU +#define SPHW_API_CMD_STATUS_CONS_IDX_SHIFT 0 + +#define SPHW_API_CMD_STATUS_FSM_MASK 0xFU +#define SPHW_API_CMD_STATUS_FSM_SHIFT 24 + +#define SPHW_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U +#define SPHW_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 + +#define SPHW_API_CMD_STATUS_CPLD_ERR_MASK 0x1U +#define SPHW_API_CMD_STATUS_CPLD_ERR_SHIFT 30 + +#define SPHW_API_CMD_STATUS_CONS_IDX(val) \ + ((val) & SPHW_API_CMD_STATUS_CONS_IDX_MASK) + +#define SPHW_API_CMD_STATUS_CHKSUM_ERR(val) \ + (((val) >> SPHW_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \ + SPHW_API_CMD_STATUS_CHKSUM_ERR_MASK) + +#define SPHW_API_CMD_STATUS_GET(val, member) \ + (((val) >> SPHW_API_CMD_STATUS_##member##_SHIFT) & \ + SPHW_API_CMD_STATUS_##member##_MASK) + +enum sphw_api_cmd_chain_type { + /* write to mgmt cpu command with completion */ + SPHW_API_CMD_WRITE_TO_MGMT_CPU = 2, + /* multi read command with completion notification - not used */ + SPHW_API_CMD_MULTI_READ = 3, + /* write command without completion notification */ + SPHW_API_CMD_POLL_WRITE = 4, + /* read command without completion notification */ + SPHW_API_CMD_POLL_READ = 5, + /* read from mgmt cpu command with completion */ + SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, + SPHW_API_CMD_MAX, +}; + +struct sphw_api_cmd_status { + u64 header; + u32 buf_desc; + u32 cell_addr_hi; + u32 cell_addr_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct sphw_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct sphw_api_cmd_resp_fmt { + u64 header; + u64 resp_data; +}; + +struct sphw_api_cmd_cell_ctxt { + struct sphw_api_cmd_cell *cell_vaddr; + + void *api_cmd_vaddr; + + struct sphw_api_cmd_resp_fmt *resp; + + struct completion done; + int status; + + u32 saved_prod_idx; +}; + +struct sphw_api_cmd_chain_attr { + struct sphw_hwdev *hwdev; + enum sphw_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 rsp_size; + u16 cell_size; +}; + +struct sphw_api_cmd_chain { + struct sphw_hwdev *hwdev; + enum sphw_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + u16 rsp_size; + + /* HW members is 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct semaphore sem; + /* Async cmd can not be scheduling */ + spinlock_t async_lock; + + dma_addr_t wb_status_paddr; + struct sphw_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct sphw_api_cmd_cell *head_node; + + struct sphw_api_cmd_cell_ctxt *cell_ctxt; + struct sphw_api_cmd_cell *curr_node; + + struct sphw_dma_addr_align cells_addr; + + u8 *cell_vaddr_base; + u64 cell_paddr_base; + u8 *rsp_vaddr_base; + u64 rsp_paddr_base; + u8 *buf_vaddr_base; + u64 buf_paddr_base; + u64 cell_size_align; + u64 rsp_size_align; + u64 buf_size_align; +}; + +int sphw_api_cmd_write(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size); + +int sphw_api_cmd_read(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size, + void *ack, u16 ack_size); + +int sphw_api_cmd_init(struct sphw_hwdev *hwdev, struct sphw_api_cmd_chain **chain); + +void sphw_api_cmd_free(struct sphw_api_cmd_chain **chain); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h new file mode 100644 index 000000000000..23644958e33a --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_CFG_CMD_H +#define SPHW_CFG_CMD_H + +#include "sphw_mgmt_msg_base.h" + +enum cfg_cmd { + CFG_CMD_GET_DEV_CAP = 0, +}; + +struct cfg_cmd_dev_cap { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + + /* Public resources */ + u8 host_id; + u8 ep_id; + u8 er_id; + u8 port_id; + + u16 host_total_func; + u8 host_pf_num; + u8 pf_id_start; + u16 host_vf_num; + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 rsvd_host[3]; + + u16 svc_cap_en; + u16 max_vf; + u8 flexq_en; + u8 valid_cos_bitmap; + /* Reserved for func_valid_cos_bitmap */ + u16 rsvd_func1; + u32 rsvd_func2; + + u8 sf_svc_attr; + u8 func_sf_en; + u16 rsvd_sf; + + u32 max_conn_num; + u16 max_stick2cache_num; + u16 max_bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + + /* shared resource */ + u8 host_sf_en; + u8 rsvd2_sr[3]; + u32 host_pctx_num; + u32 host_ccxt_num; + u32 host_scq_num; + u32 host_srq_num; + u32 host_mpt_num; + + /* l2nic */ + u16 nic_max_sq_id; + u16 nic_max_rq_id; + u32 rsvd_nic[3]; + + /* RoCE */ + u32 roce_max_qp; + u32 roce_max_cq; + u32 roce_max_srq; + u32 roce_max_mpt; + u32 roce_max_drc_qp; + + u32 roce_cmtt_cl_start; + u32 roce_cmtt_cl_end; + u32 roce_cmtt_cl_size; + + u32 roce_dmtt_cl_start; + u32 roce_dmtt_cl_end; + u32 roce_dmtt_cl_size; + + u32 roce_wqe_cl_start; + u32 roce_wqe_cl_end; + u32 roce_wqe_cl_size; + u8 roce_srq_container_mode; + u8 rsvd_roce1[3]; + u32 rsvd_roce2[5]; + + /* IPsec */ + u32 ipsec_max_sactx; + u32 rsvd_ipsec[3]; + + /* OVS */ + u32 ovs_max_qpc; + u16 fake_vf_start_id; + u8 fake_vf_num; + u8 rsvd_ovs1; + u32 rsvd_ovs2[2]; + + /* ToE */ + u32 toe_max_pctx; + u32 toe_max_cq; + u16 toe_max_srq; + u16 toe_srq_id_start; + u16 toe_max_mpt; + u16 toe_max_cctxt; + u32 rsvd_toe[2]; + + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u8 rsvd_fc1[2]; + u32 rsvd_fc2[5]; + + /* VBS */ + u32 rsvd_vbs[4]; + + u32 rsvd_glb[11]; +}; + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c new file mode 100644 index 000000000000..f4264bdfb91c --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c @@ -0,0 +1,1606 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/delay.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_hwdev.h" +#include "sphw_eqs.h" +#include "sphw_common.h" +#include "sphw_wq.h" +#include "sphw_hw_comm.h" +#include "sphw_cmdq.h" + +#define SPHW_CMDQ_BUF_SIZE 2048U +#define SPHW_CMDQ_BUF_HW_RSVD 8 +#define SPHW_CMDQ_MAX_DATA_SIZE \ + (SPHW_CMDQ_BUF_SIZE - SPHW_CMDQ_BUF_HW_RSVD) + +#define CMDQ_CMD_TIMEOUT 300000 /* millisecond */ + +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU +#define CMDQ_DB_INFO_SET(val, member) \ + ((((u32)(val)) & CMDQ_DB_INFO_##member##_MASK) << \ + CMDQ_DB_INFO_##member##_SHIFT) + +#define CMDQ_DB_HEAD_QUEUE_TYPE_SHIFT 23 +#define CMDQ_DB_HEAD_CMDQ_TYPE_SHIFT 24 +#define CMDQ_DB_HEAD_SRC_TYPE_SHIFT 27 +#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U +#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK 0x7U +#define CMDQ_DB_HEAD_SRC_TYPE_MASK 0x1FU +#define CMDQ_DB_HEAD_SET(val, member) \ + ((((u32)(val)) & CMDQ_DB_HEAD_##member##_MASK) << \ + CMDQ_DB_HEAD_##member##_SHIFT) + +#define CMDQ_CTRL_PI_SHIFT 0 +#define CMDQ_CTRL_CMD_SHIFT 16 +#define CMDQ_CTRL_MOD_SHIFT 24 +#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_CTRL_PI_MASK 0xFFFFU +#define CMDQ_CTRL_CMD_MASK 0xFFU +#define CMDQ_CTRL_MOD_MASK 0x1FU +#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U +#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_CTRL_SET(val, member) \ + ((((u32)(val)) & CMDQ_CTRL_##member##_MASK) << \ + CMDQ_CTRL_##member##_SHIFT) + +#define CMDQ_CTRL_GET(val, member) \ + (((val) >> CMDQ_CTRL_##member##_SHIFT) & \ + CMDQ_CTRL_##member##_MASK) + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU +#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_WQE_HEADER_SET(val, member) \ + ((((u32)(val)) & CMDQ_WQE_HEADER_##member##_MASK) << \ + CMDQ_WQE_HEADER_##member##_SHIFT) + +#define CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \ + CMDQ_WQE_HEADER_##member##_MASK) + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define CMDQ_CTXT_EQ_ID_SHIFT 53 +#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_EQ_ID_MASK 0xFF +#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 + +#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ + CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ + CMDQ_CTXT_##member##_MASK) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define CMDQ_CTXT_CI_SHIFT 52 + +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_CI_MASK 0xFFF + +#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ + CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ + CMDQ_CTXT_##member##_MASK) + +#define SAVED_DATA_ARM_SHIFT 31 + +#define SAVED_DATA_ARM_MASK 0x1U + +#define SAVED_DATA_SET(val, member) \ + (((val) & SAVED_DATA_##member##_MASK) << \ + SAVED_DATA_##member##_SHIFT) + +#define SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(SAVED_DATA_##member##_MASK << \ + SAVED_DATA_##member##_SHIFT))) + +#define WQE_ERRCODE_VAL_SHIFT 0 + +#define WQE_ERRCODE_VAL_MASK 0x7FFFFFFF + +#define WQE_ERRCODE_GET(val, member) \ + (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ + WQE_ERRCODE_##member##_MASK) + +#define CEQE_CMDQ_TYPE_SHIFT 0 + +#define CEQE_CMDQ_TYPE_MASK 0x7 + +#define CEQE_CMDQ_GET(val, member) \ + (((val) >> CEQE_CMDQ_##member##_SHIFT) & \ + CEQE_CMDQ_##member##_MASK) + +#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define WQE_HEADER(wqe) ((struct sphw_cmdq_header *)(wqe)) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base)) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_PFN_SHIFT 12 +#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 + +#define COMPLETE_LEN 3 + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct sphw_cmdqs, cmdq[0]) + +#define CMDQ_SEND_CMPT_CODE 10 +#define CMDQ_COMPLETE_CMPT_CODE 11 +#define CMDQ_FORCE_STOP_CMPT_CODE 12 + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, + CTRL_DIRECT_SECT_LEN = 2, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, +}; + +enum data_format { + DATA_SGE, + DATA_DIRECT, +}; + +enum completion_format { + COMPLETE_DIRECT, + COMPLETE_SGE, +}; + +enum completion_request { + CEQ_SET = 1, +}; + +enum cmdq_cmd_type { + SYNC_CMD_DIRECT_RESP, + SYNC_CMD_SGE_RESP, + ASYNC_CMD, +}; + +#define NUM_WQEBBS_FOR_CMDQ_WQE 1 + +bool sphw_cmdq_idle(struct sphw_cmdq *cmdq) +{ + return sphw_wq_is_empty(&cmdq->wq); +} + +static void *cmdq_read_wqe(struct sphw_wq *wq, u16 *ci) +{ + if (sphw_wq_is_empty(wq)) + return NULL; + + return sphw_wq_read_one_wqebb(wq, ci); +} + +static void *cmdq_get_wqe(struct sphw_wq *wq, u16 *pi) +{ + if (!sphw_wq_free_wqebbs(wq)) + return NULL; + + return sphw_wq_get_one_wqebb(wq, pi); +} + +struct sphw_cmd_buf *sphw_alloc_cmd_buf(void *hwdev) +{ + struct sphw_cmdqs *cmdqs = NULL; + struct sphw_cmd_buf *cmd_buf = NULL; + void *dev = NULL; + + if (!hwdev) { + pr_err("Failed to alloc cmd buf, Invalid hwdev\n"); + return NULL; + } + + cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; + dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; + + cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); + if (!cmd_buf) { + sdk_err(dev, "Failed to allocate cmd buf\n"); + return NULL; + } + + cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, + &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); + goto alloc_pci_buf_err; + } + + atomic_set(&cmd_buf->ref_cnt, 1); + + return cmd_buf; + +alloc_pci_buf_err: + kfree(cmd_buf); + return NULL; +} + +void sphw_free_cmd_buf(void *hwdev, struct sphw_cmd_buf *cmd_buf) +{ + struct sphw_cmdqs *cmdqs = NULL; + + if (!hwdev || !cmd_buf) { + pr_err("Failed to free cmd buf: hwdev: %p, cmd_buf: %p\n", + hwdev, cmd_buf); + return; + } + + if (!atomic_dec_and_test(&cmd_buf->ref_cnt)) + return; + + cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; + + pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} + +static void cmdq_set_completion(struct sphw_cmdq_completion *complete, + struct sphw_cmd_buf *buf_out) +{ + struct sphw_sge_resp *sge_resp = &complete->sge_resp; + + sphw_set_sge(&sge_resp->sge, buf_out->dma_addr, SPHW_CMDQ_BUF_SIZE); +} + +static void cmdq_set_lcmd_bufdesc(struct sphw_cmdq_wqe_lcmd *wqe, + struct sphw_cmd_buf *buf_in) +{ + sphw_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_set_inline_wqe_data(struct sphw_cmdq_inline_wqe *wqe, + const void *buf_in, u32 in_size) +{ + struct sphw_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; + + wqe_scmd->buf_desc.buf_len = in_size; + memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); +} + +static void cmdq_fill_db(struct sphw_cmdq_db *db, + enum sphw_cmdq_type cmdq_type, u16 prod_idx) +{ + db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX); + + db->db_head = CMDQ_DB_HEAD_SET(SPHW_DB_CMDQ_TYPE, QUEUE_TYPE) | + CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE) | + CMDQ_DB_HEAD_SET(SPHW_DB_SRC_CMDQ_TYPE, SRC_TYPE); +} + +static void cmdq_set_db(struct sphw_cmdq *cmdq, + enum sphw_cmdq_type cmdq_type, u16 prod_idx) +{ + struct sphw_cmdq_db db = {0}; + + cmdq_fill_db(&db, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db.db_info = sphw_hw_be32(db.db_info); + db.db_head = sphw_hw_be32(db.db_head); + + wmb(); /* write all before the doorbell */ + writeq(*((u64 *)&db), CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); +} + +static void cmdq_wqe_fill(void *dst, const void *src) +{ + memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_prepare_wqe_ctrl(struct sphw_cmdq_wqe *wqe, int wrapped, + u8 mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format data_format, + enum bufdesc_len buf_len) +{ + struct sphw_ctrl *ctrl = NULL; + enum ctrl_sect_len ctrl_len; + struct sphw_cmdq_wqe_lcmd *wqe_lcmd = NULL; + struct sphw_cmdq_wqe_scmd *wqe_scmd = NULL; + u32 saved_data = WQE_HEADER(wqe)->saved_data; + + if (data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD); + + WQE_HEADER(wqe)->header_info = + CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | + CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == CMDQ_SET_ARM_CMD && mod == SPHW_MOD_COMM) { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data | + SAVED_DATA_SET(1, ARM); + } else { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data; + } +} + +static void cmdq_set_lcmd_wqe(struct sphw_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct sphw_cmd_buf *buf_in, + struct sphw_cmd_buf *buf_out, int wrapped, + u8 mod, u8 cmd, u16 prod_idx) +{ + struct sphw_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_DIRECT_RESP: + wqe_lcmd->completion.direct_resp = 0; + break; + case SYNC_CMD_SGE_RESP: + if (buf_out) { + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, + buf_out); + } + break; + case ASYNC_CMD: + wqe_lcmd->completion.direct_resp = 0; + wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format, + DATA_SGE, BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_set_inline_wqe(struct sphw_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + const void *buf_in, u16 in_size, + struct sphw_cmd_buf *buf_out, int wrapped, + u8 mod, u8 cmd, u16 prod_idx) +{ + struct sphw_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_scmd->completion, buf_out); + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_scmd->completion.direct_resp = 0; + break; + default: + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, + complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); + + cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size); +} + +static void cmdq_update_cmd_status(struct sphw_cmdq *cmdq, u16 prod_idx, + struct sphw_cmdq_wqe *wqe) +{ + struct sphw_cmdq_cmd_info *cmd_info; + struct sphw_cmdq_wqe_lcmd *wqe_lcmd; + u32 status_info; + + wqe_lcmd = &wqe->wqe_lcmd; + cmd_info = &cmdq->cmd_infos[prod_idx]; + + if (cmd_info->errcode) { + status_info = sphw_hw_cpu32(wqe_lcmd->status.status_info); + *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); + } + + if (cmd_info->direct_resp) + *cmd_info->direct_resp = + sphw_hw_cpu32(wqe_lcmd->completion.direct_resp); +} + +static int sphw_cmdq_sync_timeout_check(struct sphw_cmdq *cmdq, struct sphw_cmdq_wqe *wqe, u16 pi) +{ + struct sphw_cmdq_wqe_lcmd *wqe_lcmd; + struct sphw_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = sphw_hw_cpu32((ctrl)->ctrl_info); + if (!WQE_COMPLETED(ctrl_info)) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set\n"); + return -EFAULT; + } + + cmdq_update_cmd_status(cmdq, pi, wqe); + + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed\n"); + return 0; +} + +static void clear_cmd_info(struct sphw_cmdq_cmd_info *cmd_info, + struct sphw_cmdq_cmd_info *saved_cmd_info) +{ + if (cmd_info->errcode == saved_cmd_info->errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == saved_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == saved_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int cmdq_ceq_handler_status(struct sphw_cmdq *cmdq, + struct sphw_cmdq_cmd_info *cmd_info, + struct sphw_cmdq_cmd_info *saved_cmd_info, + u64 curr_msg_id, u16 curr_prod_idx, + struct sphw_cmdq_wqe *curr_wqe, + u32 timeout) +{ + ulong timeo; + int err; + ulong start = 0; + ulong end = timeout; + + if (cmdq->hwdev->poll) { + while (start < end) { + sphw_cmdq_ceq_handler(cmdq->hwdev, 0); + if (saved_cmd_info->done->done != 0) + return 0; + usleep_range(900, 1000); + start++; + } + } else { + timeo = msecs_to_jiffies(timeout); + if (wait_for_completion_timeout(saved_cmd_info->done, timeo)) + return 0; + } + + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*saved_cmd_info->cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been completed\n"); + spin_unlock_bh(&cmdq->cmdq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = sphw_cmdq_sync_timeout_check(cmdq, curr_wqe, curr_prod_idx); + if (err) + cmd_info->cmd_type = SPHW_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = SPHW_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command current msg id dismatch with cmd_info msg id\n"); + } + + clear_cmd_info(cmd_info, saved_cmd_info); + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (!err) + return 0; + + sphw_dump_ceq_info(cmdq->hwdev); + + return -ETIMEDOUT; +} + +static int wait_cmdq_sync_cmd_completion(struct sphw_cmdq *cmdq, + struct sphw_cmdq_cmd_info *cmd_info, + struct sphw_cmdq_cmd_info *saved_cmd_info, + u64 curr_msg_id, u16 curr_prod_idx, + struct sphw_cmdq_wqe *curr_wqe, u32 timeout) +{ + return cmdq_ceq_handler_status(cmdq, cmd_info, saved_cmd_info, + curr_msg_id, curr_prod_idx, + curr_wqe, timeout); +} + +static int cmdq_msg_lock(struct sphw_cmdq *cmdq, u16 channel) +{ + struct sphw_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmdqs->lock_channel_en && test_bit(channel, &cmdqs->channel_stop)) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EAGAIN; + } + + return 0; +} + +static void cmdq_msg_unlock(struct sphw_cmdq *cmdq) +{ + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void cmdq_clear_cmd_buf(struct sphw_cmdq_cmd_info *cmd_info, + struct sphw_hwdev *hwdev) +{ + if (cmd_info->buf_in) + sphw_free_cmd_buf(hwdev, cmd_info->buf_in); + + if (cmd_info->buf_out) + sphw_free_cmd_buf(hwdev, cmd_info->buf_out); + + cmd_info->buf_in = NULL; + cmd_info->buf_out = NULL; +} + +static void cmdq_set_cmd_buf(struct sphw_cmdq_cmd_info *cmd_info, + struct sphw_hwdev *hwdev, + struct sphw_cmd_buf *buf_in, + struct sphw_cmd_buf *buf_out) +{ + cmd_info->buf_in = buf_in; + cmd_info->buf_out = buf_out; + + if (buf_in) + atomic_inc(&buf_in->ref_cnt); + + if (buf_out) + atomic_inc(&buf_out->ref_cnt); +} + +static int cmdq_sync_cmd_direct_resp(struct sphw_cmdq *cmdq, u8 mod, + u8 cmd, struct sphw_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sphw_wq *wq = &cmdq->wq; + struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; + struct sphw_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx; + int wrapped, errcode = 0, wqe_size = WQE_LCMD_SIZE; + int cmpt_code = CMDQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int err; + + err = cmdq_msg_lock(cmdq, channel); + if (err) + return err; + + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + if (!curr_wqe) { + cmdq_msg_unlock(cmdq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= (u16)wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->cmd_type = SPHW_CMD_TYPE_DIRECT_RESP; + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, NULL); + + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, + wrapped, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + sphw_hw_be32_len(&wqe, wqe_size); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, SPHW_CMDQ_SYNC, next_prod_idx); + + cmdq_msg_unlock(cmdq); + + timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; + err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, + curr_msg_id, curr_prod_idx, + curr_wqe, timeout); + if (err) { + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", + mod, cmd, curr_prod_idx); + err = -ETIMEDOUT; + } + + if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", + mod, cmd); + err = -EAGAIN; + } + + smp_rmb(); /* read error code after completion */ + + return err ? err : errcode; +} + +static int cmdq_sync_cmd_detail_resp(struct sphw_cmdq *cmdq, u8 mod, u8 cmd, + struct sphw_cmd_buf *buf_in, + struct sphw_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sphw_wq *wq = &cmdq->wq; + struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; + struct sphw_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx; + int wrapped, errcode = 0, wqe_size = WQE_LCMD_SIZE; + int cmpt_code = CMDQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int err; + + err = cmdq_msg_lock(cmdq, channel); + if (err) + return err; + + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + if (!curr_wqe) { + cmdq_msg_unlock(cmdq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= (u16)wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->cmd_type = SPHW_CMD_TYPE_SGE_RESP; + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, buf_out); + + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, + wrapped, mod, cmd, curr_prod_idx); + + sphw_hw_be32_len(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + + cmdq_msg_unlock(cmdq); + + timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; + err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, + curr_msg_id, curr_prod_idx, + curr_wqe, timeout); + if (err) { + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", + mod, cmd, curr_prod_idx); + err = -ETIMEDOUT; + } + + if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", + mod, cmd); + err = -EAGAIN; + } + + smp_rmb(); /* read error code after completion */ + + return err ? err : errcode; +} + +static int cmdq_async_cmd(struct sphw_cmdq *cmdq, u8 mod, u8 cmd, + struct sphw_cmd_buf *buf_in, u16 channel) +{ + struct sphw_cmdq_cmd_info *cmd_info = NULL; + struct sphw_wq *wq = &cmdq->wq; + int wqe_size = WQE_LCMD_SIZE; + u16 curr_prod_idx, next_prod_idx; + struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; + int wrapped, err; + + err = cmdq_msg_lock(cmdq, channel); + if (err) + return err; + + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + if (!curr_wqe) { + cmdq_msg_unlock(cmdq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= (u16)wq->q_depth; + } + + cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, + mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + sphw_hw_be32_len(&wqe, wqe_size); + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + cmd_info->cmd_type = SPHW_CMD_TYPE_ASYNC; + cmd_info->channel = channel; + /* The caller will not free the cmd_buf of the asynchronous command, + * so there is no need to increase the reference count here + */ + cmd_info->buf_in = buf_in; + + /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait */ + cmdq_set_db(cmdq, SPHW_CMDQ_SYNC, next_prod_idx); + + cmdq_msg_unlock(cmdq); + + return 0; +} + +int cmdq_set_arm_bit(struct sphw_cmdq *cmdq, const void *buf_in, + u16 in_size) +{ + struct sphw_wq *wq = &cmdq->wq; + struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; + u16 curr_prod_idx, next_prod_idx; + int wrapped, wqe_size = WQE_SCMD_SIZE; + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= (u16)wq->q_depth; + } + + cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL, + wrapped, SPHW_MOD_COMM, CMDQ_SET_ARM_CMD, + curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + sphw_cpu_to_be32(&wqe, wqe_size); + + /* cmdq wqe is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = SPHW_CMD_TYPE_SET_ARM; + + cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_params_valid(void *hwdev, struct sphw_cmd_buf *buf_in) +{ + if (!buf_in || !hwdev) { + pr_err("Invalid CMDQ buffer addr: %p or hwdev: %p\n", + buf_in, hwdev); + return -EINVAL; + } + + if (!buf_in->size || buf_in->size > SPHW_CMDQ_MAX_DATA_SIZE) { + pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); + return -EINVAL; + } + + return 0; +} + +#define WAIT_CMDQ_ENABLE_TIMEOUT 300 +static int wait_cmdqs_enable(struct sphw_cmdqs *cmdqs) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); + do { + if (cmdqs->status & SPHW_CMDQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && + !cmdqs->disable_flag); + + cmdqs->disable_flag = 1; + + return -EBUSY; +} + +int sphw_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sphw_cmdqs *cmdqs = NULL; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) { + pr_err("Invalid CMDQ parameters\n"); + return err; + } + + /* to do : support send cmdq only when cmdq init*/ + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[SPHW_CMDQ_SYNC], + mod, cmd, buf_in, out_param, + timeout, channel); + + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} + +int sphw_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, + struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel) +{ + struct sphw_cmdqs *cmdqs = NULL; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; + + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[SPHW_CMDQ_SYNC], + mod, cmd, buf_in, buf_out, out_param, + timeout, channel); + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} + +int sphw_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, struct sphw_cmd_buf *buf_in, + struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel) +{ + struct sphw_cmdqs *cmdqs = NULL; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; + + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + if (cos_id >= SPHW_MAX_CMDQ_TYPES) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq id is invalid\n"); + return -EINVAL; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[cos_id], mod, cmd, + buf_in, buf_out, out_param, + timeout, channel); + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} + +int sphw_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, u16 channel) +{ + struct sphw_cmdqs *cmdqs = NULL; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; + + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait*/ + return cmdq_async_cmd(&cmdqs->cmdq[SPHW_CMDQ_SYNC], mod, + cmd, buf_in, channel); +} + +int sphw_set_arm_bit(void *hwdev, enum sphw_set_arm_type q_type, u16 q_id) +{ + struct sphw_cmdqs *cmdqs = NULL; + struct sphw_cmdq *cmdq = NULL; + struct sphw_cmdq_arm_bit arm_bit; + enum sphw_cmdq_type cmdq_type = SPHW_CMDQ_SYNC; + u16 in_size; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag))/* to do*/ + return -EPERM; + + cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; + + if (!(cmdqs->status & SPHW_CMDQ_ENABLE)) + return -EBUSY; + + if (q_type == SPHW_SET_ARM_CMDQ) { + if (q_id >= SPHW_MAX_CMDQ_TYPES) + return -EFAULT; + + cmdq_type = q_id; + } + /* sq is using interrupt now, so we only need to set arm bit for cmdq, + * remove comment below if need to set sq arm bit + * else + * cmdq_type = SPHW_CMDQ_SYNC; + */ + + cmdq = &cmdqs->cmdq[cmdq_type]; + + arm_bit.q_type = q_type; + arm_bit.q_id = q_id; + in_size = sizeof(arm_bit); + + err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, + "Failed to set arm for q_type: %d, qid %d\n", + q_type, q_id); + return err; + } + + return 0; +} + +static void clear_wqe_complete_bit(struct sphw_cmdq *cmdq, + struct sphw_cmdq_wqe *wqe, u16 ci) +{ + struct sphw_ctrl *ctrl = NULL; + u32 header_info = sphw_hw_cpu32(WQE_HEADER(wqe)->header_info); + enum data_format df = CMDQ_WQE_HEADER_GET(header_info, DATA_FMT); + + if (df == DATA_SGE) + ctrl = &wqe->wqe_lcmd.ctrl; + else + ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + cmdq->cmd_infos[ci].cmd_type = SPHW_CMD_TYPE_NONE; + + wmb(); /* verify wqe is clear */ + + sphw_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); +} + +static void cmdq_sync_cmd_handler(struct sphw_cmdq *cmdq, + struct sphw_cmdq_wqe *wqe, u16 ci) +{ + spin_lock(&cmdq->cmdq_lock); + + cmdq_update_cmd_status(cmdq, ci, wqe); + + if (cmdq->cmd_infos[ci].cmpt_code) { + *cmdq->cmd_infos[ci].cmpt_code = CMDQ_COMPLETE_CMPT_CODE; + cmdq->cmd_infos[ci].cmpt_code = NULL; + } + + /* make sure cmpt_code operation before done operation */ + smp_rmb(); + + if (cmdq->cmd_infos[ci].done) { + complete(cmdq->cmd_infos[ci].done); + cmdq->cmd_infos[ci].done = NULL; + } + + spin_unlock(&cmdq->cmdq_lock); + + cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static void cmdq_async_cmd_handler(struct sphw_hwdev *hwdev, + struct sphw_cmdq *cmdq, + struct sphw_cmdq_wqe *wqe, u16 ci) +{ + cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static int cmdq_arm_ceq_handler(struct sphw_cmdq *cmdq, + struct sphw_cmdq_wqe *wqe, u16 ci) +{ + struct sphw_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + u32 ctrl_info = sphw_hw_cpu32((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + return -EBUSY; + + clear_wqe_complete_bit(cmdq, wqe, ci); + + return 0; +} + +#define SPHW_CMDQ_WQE_HEAD_LEN 32 +static void sphw_dump_cmdq_wqe_head(struct sphw_hwdev *hwdev, struct sphw_cmdq_wqe *wqe) +{ + u32 i; + u32 *data = (u32 *)wqe; + + for (i = 0; i < (SPHW_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) { + sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + *(data + i), *(data + i + 1), *(data + i + 2), + *(data + i + 3)); + } +} + +void sphw_cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + struct sphw_cmdqs *cmdqs = ((struct sphw_hwdev *)handle)->cmdqs; + enum sphw_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); + struct sphw_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct sphw_hwdev *hwdev = cmdqs->hwdev; + struct sphw_cmdq_wqe *wqe = NULL; + struct sphw_cmdq_wqe_lcmd *wqe_lcmd = NULL; + struct sphw_ctrl *ctrl = NULL; + struct sphw_cmdq_cmd_info *cmd_info = NULL; + u32 ctrl_info; + u16 ci; + + while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) { + cmd_info = &cmdq->cmd_infos[ci]; + + switch (cmd_info->cmd_type) { + case SPHW_CMD_TYPE_NONE: + return; + case SPHW_CMD_TYPE_TIMEOUT: + sdk_warn(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", + cmdq_type, ci); + sphw_dump_cmdq_wqe_head(hwdev, wqe); + fallthrough; + case SPHW_CMD_TYPE_FAKE_TIMEOUT: + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + break; + case SPHW_CMD_TYPE_SET_ARM: + if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) + return; + + break; + default: + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = sphw_hw_cpu32((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + return; + + /* For FORCE_STOP cmd_type, we also need to wait for + * the firmware processing to complete to prevent the + * firmware from accessing the released cmd_buf + */ + if (cmd_info->cmd_type == SPHW_CMD_TYPE_FORCE_STOP) { + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + } else if (cmd_info->cmd_type == SPHW_CMD_TYPE_ASYNC) { + cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); + } else { + cmdq_sync_cmd_handler(cmdq, wqe, ci); + } + + break; + } + } +} + +static void cmdq_init_queue_ctxt(struct sphw_cmdqs *cmdqs, + struct sphw_cmdq *cmdq, + struct cmdq_ctxt_info *ctxt_info) +{ + struct sphw_wq *wq = &cmdq->wq; + u64 cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + + pfn = CMDQ_PFN(sphw_wq_get_first_wqe_page_addr(wq)); + + ctxt_info->curr_wqe_page_pfn = + CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + CMDQ_CTXT_PAGE_INFO_SET(SPHW_CEQ_ID_CMDQ, EQ_ID) | + CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); + + if (!WQ_IS_0_LEVEL_CLA(wq)) { + cmdq_first_block_paddr = cmdqs->wq_block_paddr; + pfn = CMDQ_PFN(cmdq_first_block_paddr); + } + + ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | + CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); +} + +static int init_cmdq(struct sphw_cmdq *cmdq, struct sphw_hwdev *hwdev, + enum sphw_cmdq_type q_type) +{ + void __iomem *db_base; + int err; + + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + cmdq->hwdev = hwdev; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos), + GFP_KERNEL); + if (!cmdq->cmd_infos) { + sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq infos\n"); + err = -ENOMEM; + goto cmd_infos_err; + } + + err = sphw_alloc_db_addr(hwdev, &db_base, NULL); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate doorbell address\n"); + goto alloc_db_err; + } + + cmdq->db_base = (u8 *)db_base; + return 0; + +alloc_db_err: + kfree(cmdq->cmd_infos); + +cmd_infos_err: + + return err; +} + +static void free_cmdq(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq) +{ + sphw_free_db_addr(hwdev, cmdq->db_base, NULL); + kfree(cmdq->cmd_infos); +} + +static int sphw_set_cmdq_ctxts(struct sphw_hwdev *hwdev) +{ + struct sphw_cmdqs *cmdqs = hwdev->cmdqs; + enum sphw_cmdq_type cmdq_type; + int err; + + cmdq_type = SPHW_CMDQ_SYNC; + for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { + err = sphw_set_cmdq_ctxt(hwdev, (u8)cmdq_type, &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + if (err) + return err; + } + + cmdqs->status |= SPHW_CMDQ_ENABLE; + cmdqs->disable_flag = 0; + + return 0; +} + +static void cmdq_flush_sync_cmd(struct sphw_cmdq_cmd_info *cmd_info) +{ + if (cmd_info->cmd_type != SPHW_CMD_TYPE_DIRECT_RESP && + cmd_info->cmd_type != SPHW_CMD_TYPE_SGE_RESP) + return; + + cmd_info->cmd_type = SPHW_CMD_TYPE_FORCE_STOP; + + if (cmd_info->cmpt_code && + *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE) + *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE; + + if (cmd_info->done) { + complete(cmd_info->done); + cmd_info->done = NULL; + cmd_info->cmpt_code = NULL; + cmd_info->direct_resp = NULL; + cmd_info->errcode = NULL; + } +} + +void sphw_cmdq_flush_cmd(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq) +{ + struct sphw_cmdq_cmd_info *cmd_info = NULL; + u16 ci = 0; + + spin_lock_bh(&cmdq->cmdq_lock); + + while (cmdq_read_wqe(&cmdq->wq, &ci)) { + sphw_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); + cmd_info = &cmdq->cmd_infos[ci]; + + if (cmd_info->cmd_type == SPHW_CMD_TYPE_DIRECT_RESP || + cmd_info->cmd_type == SPHW_CMD_TYPE_SGE_RESP) + cmdq_flush_sync_cmd(cmd_info); + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +void sphw_cmdq_flush_channel_sync_cmd(struct sphw_hwdev *hwdev, u16 channel) +{ + struct sphw_cmdq_cmd_info *cmd_info = NULL; + struct sphw_cmdq *cmdq = NULL; + struct sphw_wq *wq = NULL; + u16 wqe_cnt, ci, i; + + if (channel >= SPHW_CHANNEL_MAX) + return; + + cmdq = &hwdev->cmdqs->cmdq[SPHW_CMDQ_SYNC]; + + spin_lock_bh(&cmdq->cmdq_lock); + + wq = &cmdq->wq; + ci = wq->cons_idx; + wqe_cnt = (u16)WQ_MASK_IDX(wq, wq->prod_idx + + wq->q_depth - wq->cons_idx); + for (i = 0; i < wqe_cnt; i++) { + cmd_info = &cmdq->cmd_infos[WQ_MASK_IDX(wq, ci + i)]; + + if (cmd_info->channel == channel) + cmdq_flush_sync_cmd(cmd_info); + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void cmdq_reset_all_cmd_buff(struct sphw_cmdq *cmdq) +{ + u16 i; + + for (i = 0; i < cmdq->wq.q_depth; i++) + cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev); +} + +int sphw_cmdq_set_channel_status(struct sphw_hwdev *hwdev, u16 channel, bool enable) +{ + if (channel >= SPHW_CHANNEL_MAX) + return -EINVAL; + + if (enable) { + clear_bit(channel, &hwdev->cmdqs->channel_stop); + } else { + set_bit(channel, &hwdev->cmdqs->channel_stop); + sphw_cmdq_flush_channel_sync_cmd(hwdev, channel); + } + + sdk_info(hwdev->dev_hdl, "%s cmdq channel 0x%x\n", + enable ? "Enable" : "Disable", channel); + + return 0; +} + +void sphw_cmdq_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable) +{ + hwdev->cmdqs->lock_channel_en = enable; + + sdk_info(hwdev->dev_hdl, "%s cmdq channel lock\n", + enable ? "Enable" : "Disable"); +} + +int sphw_reinit_cmdq_ctxts(struct sphw_hwdev *hwdev) +{ + struct sphw_cmdqs *cmdqs = hwdev->cmdqs; + enum sphw_cmdq_type cmdq_type; + + cmdq_type = SPHW_CMDQ_SYNC; + for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { + sphw_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); + cmdqs->cmdq[cmdq_type].wrapped = 1; + sphw_wq_reset(&cmdqs->cmdq[cmdq_type].wq); + } + + return sphw_set_cmdq_ctxts(hwdev); +} + +static int create_cmdq_wq(struct sphw_cmdqs *cmdqs) +{ + enum sphw_cmdq_type type, cmdq_type; + int err; + + cmdq_type = SPHW_CMDQ_SYNC; + for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { + err = sphw_wq_create(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type].wq, SPHW_CMDQ_DEPTH, + CMDQ_WQEBB_SIZE); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Failed to create cmdq wq\n"); + goto destroy_wq; + } + } + + /* 1-level CLA must put all cmdq's wq page addr in one wq block */ + if (!WQ_IS_0_LEVEL_CLA(&cmdqs->cmdq[SPHW_CMDQ_SYNC].wq)) { + /* cmdq wq's CLA table is up to 512B */ +#define CMDQ_WQ_CLA_SIZE 512 + if (cmdqs->cmdq[SPHW_CMDQ_SYNC].wq.num_wq_pages > + CMDQ_WQ_CLA_SIZE / sizeof(u64)) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq wq page exceed limit: %lu\n", + CMDQ_WQ_CLA_SIZE / sizeof(u64)); + goto destroy_wq; + } + + cmdqs->wq_block_vaddr = + dma_alloc_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, + &cmdqs->wq_block_paddr, GFP_KERNEL); + if (!cmdqs->wq_block_vaddr) { + sdk_err(cmdqs->hwdev->dev_hdl, "Failed to alloc cmdq wq block\n"); + goto destroy_wq; + } + + type = SPHW_CMDQ_SYNC; + for (; type < SPHW_MAX_CMDQ_TYPES; type++) + memcpy((u8 *)cmdqs->wq_block_vaddr + + CMDQ_WQ_CLA_SIZE * type, + cmdqs->cmdq[type].wq.wq_block_vaddr, + cmdqs->cmdq[type].wq.num_wq_pages * sizeof(u64)); + } + + return 0; + +destroy_wq: + type = SPHW_CMDQ_SYNC; + for (; type < cmdq_type; type++) + sphw_wq_destroy(&cmdqs->cmdq[type].wq); + + return err; +} + +static void destroy_cmdq_wq(struct sphw_cmdqs *cmdqs) +{ + enum sphw_cmdq_type cmdq_type; + + if (cmdqs->wq_block_vaddr) + dma_free_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, + cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr); + + cmdq_type = SPHW_CMDQ_SYNC; + for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) + sphw_wq_destroy(&cmdqs->cmdq[cmdq_type].wq); +} + +int sphw_cmdqs_init(struct sphw_hwdev *hwdev) +{ + struct sphw_cmdqs *cmdqs = NULL; + enum sphw_cmdq_type type, cmdq_type; + int err; + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return -ENOMEM; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + + cmdqs->cmd_buf_pool = dma_pool_create("sphw_cmdq", hwdev->dev_hdl, + SPHW_CMDQ_BUF_SIZE, + SPHW_CMDQ_BUF_SIZE, 0ULL); + if (!cmdqs->cmd_buf_pool) { + sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); + err = -ENOMEM; + goto pool_create_err; + } + + err = create_cmdq_wq(cmdqs); + if (err) + goto create_wq_err; + + cmdq_type = SPHW_CMDQ_SYNC; + for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n", + cmdq_type); + goto init_cmdq_err; + } + + cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + } + + err = sphw_set_cmdq_ctxts(hwdev); + if (err) + goto init_cmdq_err; + + return 0; + +init_cmdq_err: + type = SPHW_CMDQ_SYNC; + for (; type < cmdq_type; type++) + free_cmdq(hwdev, &cmdqs->cmdq[type]); + + destroy_cmdq_wq(cmdqs); + +create_wq_err: + dma_pool_destroy(cmdqs->cmd_buf_pool); + +pool_create_err: + kfree(cmdqs); + + return err; +} + +void sphw_cmdqs_free(struct sphw_hwdev *hwdev) +{ + struct sphw_cmdqs *cmdqs = hwdev->cmdqs; + enum sphw_cmdq_type cmdq_type = SPHW_CMDQ_SYNC; + + cmdqs->status &= ~SPHW_CMDQ_ENABLE; + + for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { + sphw_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); + free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]); + } + + destroy_cmdq_wq(cmdqs); + + dma_pool_destroy(cmdqs->cmd_buf_pool); + + kfree(cmdqs); +} + diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h new file mode 100644 index 000000000000..e76d40d43cba --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_CMDQ_H +#define SPHW_CMDQ_H + +#include "sphw_comm_msg_intf.h" + +#define SPHW_SCMD_DATA_LEN 16 + +#define SPHW_CMDQ_DEPTH 4096 + +enum sphw_cmdq_type { + SPHW_CMDQ_SYNC, + SPHW_CMDQ_ASYNC, + SPHW_MAX_CMDQ_TYPES, +}; + +enum sphw_db_src_type { + SPHW_DB_SRC_CMDQ_TYPE, + SPHW_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum sphw_cmdq_db_type { + SPHW_DB_SQ_RQ_TYPE, + SPHW_DB_CMDQ_TYPE, +}; + +/*hardware define: cmdq wqe*/ +struct sphw_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct sphw_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[SPHW_SCMD_DATA_LEN]; +}; + +struct sphw_lcmd_bufdesc { + struct sphw_sge sge; + u32 rsvd1; + u64 saved_async_buf; + u64 rsvd3; +}; + +struct sphw_cmdq_db { + u32 db_head; + u32 db_info; +}; + +struct sphw_status { + u32 status_info; +}; + +struct sphw_ctrl { + u32 ctrl_info; +}; + +struct sphw_sge_resp { + struct sphw_sge sge; + u32 rsvd; +}; + +struct sphw_cmdq_completion { + union { + struct sphw_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct sphw_cmdq_wqe_scmd { + struct sphw_cmdq_header header; + u64 rsvd; + struct sphw_status status; + struct sphw_ctrl ctrl; + struct sphw_cmdq_completion completion; + struct sphw_scmd_bufdesc buf_desc; +}; + +struct sphw_cmdq_wqe_lcmd { + struct sphw_cmdq_header header; + struct sphw_status status; + struct sphw_ctrl ctrl; + struct sphw_cmdq_completion completion; + struct sphw_lcmd_bufdesc buf_desc; +}; + +struct sphw_cmdq_inline_wqe { + struct sphw_cmdq_wqe_scmd wqe_scmd; +}; + +struct sphw_cmdq_wqe { + union { + struct sphw_cmdq_inline_wqe inline_wqe; + struct sphw_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +struct sphw_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + +enum sphw_cmdq_status { + SPHW_CMDQ_ENABLE = BIT(0), +}; + +enum sphw_cmdq_cmd_type { + SPHW_CMD_TYPE_NONE, + SPHW_CMD_TYPE_SET_ARM, + SPHW_CMD_TYPE_DIRECT_RESP, + SPHW_CMD_TYPE_SGE_RESP, + SPHW_CMD_TYPE_ASYNC, + SPHW_CMD_TYPE_FAKE_TIMEOUT, + SPHW_CMD_TYPE_TIMEOUT, + SPHW_CMD_TYPE_FORCE_STOP, +}; + +struct sphw_cmdq_cmd_info { + enum sphw_cmdq_cmd_type cmd_type; + u16 channel; + + struct completion *done; + int *errcode; + int *cmpt_code; + u64 *direct_resp; + u64 cmdq_msg_id; + + struct sphw_cmd_buf *buf_in; + struct sphw_cmd_buf *buf_out; +}; + +struct sphw_cmdq { + struct sphw_wq wq; + + enum sphw_cmdq_type cmdq_type; + int wrapped; + + /* spinlock for send cmdq commands */ + spinlock_t cmdq_lock; + + /* doorbell area */ + u8 __iomem *db_base; + + struct cmdq_ctxt_info cmdq_ctxt; + + struct sphw_cmdq_cmd_info *cmd_infos; + + struct sphw_hwdev *hwdev; +}; + +struct sphw_cmdqs { + struct sphw_hwdev *hwdev; + + struct pci_pool *cmd_buf_pool; + + /* All cmdq's CLA of a VF occupy a PAGE when cmdq wq is 1-level CLA */ + dma_addr_t wq_block_paddr; + void *wq_block_vaddr; + struct sphw_cmdq cmdq[SPHW_MAX_CMDQ_TYPES]; + + u32 status; + u32 disable_flag; + + bool lock_channel_en; + unsigned long channel_stop; +}; + +enum sphw_set_arm_type { + SPHW_SET_ARM_CMDQ, + SPHW_SET_ARM_SQ, + SPHW_SET_ARM_TYPE_NUM, +}; + +int sphw_set_arm_bit(void *hwdev, enum sphw_set_arm_type q_type, u16 q_id); + +void sphw_cmdq_ceq_handler(void *hwdev, u32 ceqe_data); + +int sphw_reinit_cmdq_ctxts(struct sphw_hwdev *hwdev); + +bool sphw_cmdq_idle(struct sphw_cmdq *cmdq); + +int sphw_cmdqs_init(struct sphw_hwdev *hwdev); + +void sphw_cmdqs_free(struct sphw_hwdev *hwdev); + +void sphw_cmdq_flush_cmd(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq); + +int sphw_cmdq_set_channel_status(struct sphw_hwdev *hwdev, u16 channel, bool enable); + +void sphw_cmdq_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable); + +#endif + diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h new file mode 100644 index 000000000000..d0e4c87942b5 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_COMMON_CMD_H +#define SPHW_COMMON_CMD_H + +/* COMM Commands between Driver to MPU */ +enum sphw_mgmt_cmd { + COMM_MGMT_CMD_FUNC_RESET = 0, + COMM_MGMT_CMD_FEATURE_NEGO, + COMM_MGMT_CMD_FLUSH_DOORBELL, + COMM_MGMT_CMD_START_FLUSH, + COMM_MGMT_CMD_SET_FUNC_FLR, + COMM_MGMT_CMD_GET_GLOBAL_ATTR, + + COMM_MGMT_CMD_SET_CMDQ_CTXT = 20, + COMM_MGMT_CMD_SET_VAT, + COMM_MGMT_CMD_CFG_PAGESIZE, + COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + COMM_MGMT_CMD_SET_DMA_ATTR, + + COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, + COMM_MGMT_CMD_SET_MQM_CFG_INFO, + COMM_MGMT_CMD_SET_MQM_SRCH_GPA, + COMM_MGMT_CMD_SET_PPF_TMR, + COMM_MGMT_CMD_SET_PPF_HT_GPA, + COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, + + COMM_MGMT_CMD_GET_FW_VERSION = 60, + COMM_MGMT_CMD_GET_BOARD_INFO, + COMM_MGMT_CMD_SYNC_TIME, + COMM_MGMT_CMD_GET_HW_PF_INFOS, + COMM_MGMT_CMD_SEND_BDF_INFO, + COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, + + COMM_MGMT_CMD_UPDATE_FW = 80, + COMM_MGMT_CMD_ACTIVE_FW, + COMM_MGMT_CMD_HOT_ACTIVE_FW, + COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, + COMM_MGMT_CMD_SWITCH_CFG, + COMM_MGMT_CMD_CHECK_FLASH, + COMM_MGMT_CMD_CHECK_FLASH_RW, + COMM_MGMT_CMD_RESOURCE_CFG, + + COMM_MGMT_CMD_FAULT_REPORT = 100, + COMM_MGMT_CMD_WATCHDOG_INFO, + COMM_MGMT_CMD_MGMT_RESET, + COMM_MGMT_CMD_FFM_SET, + + COMM_MGMT_CMD_GET_LOG = 120, + COMM_MGMT_CMD_TEMP_OP, + COMM_MGMT_CMD_EN_AUTO_RST_CHIP, + COMM_MGMT_CMD_CFG_REG, + COMM_MGMT_CMD_GET_CHIP_ID, + COMM_MGMT_CMD_SYSINFO_DFX, + COMM_MGMT_CMD_PCIE_DFX_NTC, +}; + +#endif /* SPHW_COMMON_CMD_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h new file mode 100644 index 000000000000..a1abbd054d2a --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_COMM_MSG_INTF_H +#define SPHW_COMM_MSG_INTF_H + +#include "sphw_mgmt_msg_base.h" + +#define FUNC_RESET_FLAG_MAX_VALUE ((1U << (RES_TYPE_IPSEC + 1)) - 1) +struct comm_cmd_func_reset { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd1[3]; + u64 reset_flag; +}; + +enum { + COMM_F_API_CHAIN = 1U << 0, +}; + +#define COMM_MAX_FEATURE_QWORD 4 +struct comm_cmd_feature_nego { + struct mgmt_msg_head head; + + u16 func_id; + u8 opcode; /* 1: set, 0: get */ + u8 rsvd; + u64 s_feature[COMM_MAX_FEATURE_QWORD]; +}; + +struct comm_cmd_clear_doorbell { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd1[3]; +}; + +struct comm_cmd_clear_resource { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd1[3]; +}; + +struct comm_global_attr { + u8 max_host_num; + u8 max_pf_num; + u16 vf_id_start; + + u8 mgmt_host_node_id; /* for api cmd to mgmt cpu */ + u8 rsvd1[3]; + + u32 rsvd2[8]; +}; + +struct comm_cmd_get_glb_attr { + struct mgmt_msg_head head; + + struct comm_global_attr attr; +}; + +enum sphw_fw_ver_type { + SPHW_FW_VER_TYPE_BOOT, + SPHW_FW_VER_TYPE_MPU, + SPHW_FW_VER_TYPE_NPU, + SPHW_FW_VER_TYPE_SMU, + SPHW_FW_VER_TYPE_CFG, +}; + +#define SPHW_FW_VERSION_LEN 16 +#define SPHW_FW_COMPILE_TIME_LEN 20 +struct comm_cmd_get_fw_version { + struct mgmt_msg_head head; + + u16 fw_type; + u16 rsvd1; + u8 ver[SPHW_FW_VERSION_LEN]; + u8 time[SPHW_FW_COMPILE_TIME_LEN]; +}; + +/* hardware define: cmdq context */ +struct cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct comm_cmd_cmdq_ctxt { + struct mgmt_msg_head head; + + u16 func_id; + u8 cmdq_id; + u8 rsvd1[5]; + + struct cmdq_ctxt_info ctxt; +}; + +struct comm_cmd_root_ctxt { + struct mgmt_msg_head head; + + u16 func_id; + u8 set_cmdq_depth; + u8 cmdq_depth; + u16 rx_buf_sz; + u8 lro_en; + u8 rsvd1; + u16 sq_depth; + u16 rq_depth; + u64 rsvd2; +}; + +struct comm_cmd_wq_page_size { + struct mgmt_msg_head head; + + u16 func_id; + u8 opcode; + /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ + u8 page_size; + + u32 rsvd1; +}; + +struct comm_cmd_msix_config { + struct mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 msix_index; + u8 pending_cnt; + u8 coalesce_timer_cnt; + u8 resend_timer_cnt; + u8 lli_timer_cnt; + u8 lli_credit_cnt; + u8 rsvd2[5]; +}; + +struct comm_cmd_dma_attr { + struct mgmt_msg_head head; + + u16 func_id; + u8 entry_idx; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u32 resv1; +}; + +struct comm_cmd_ceq_ctrl_reg { + struct mgmt_msg_head head; + + u16 func_id; + u16 q_id; + u32 ctrl0; + u32 ctrl1; + u32 rsvd1; +}; + +struct comm_cmd_func_tmr_bitmap_op { + struct mgmt_msg_head head; + + u16 func_id; + u8 opcode; /* 1: start, 0: stop */ + u8 rsvd1[5]; +}; + +struct comm_cmd_ppf_tmr_op { + struct mgmt_msg_head head; + + u8 ppf_id; + u8 opcode; /* 1: start, 0: stop */ + u8 rsvd1[6]; +}; + +struct comm_cmd_ht_gpa { + struct mgmt_msg_head head; + + u8 host_id; + u32 rsvd1[7]; + u64 page_pa0; + u64 page_pa1; +}; + +struct comm_cmd_get_eqm_num { + struct mgmt_msg_head head; + + u8 host_id; + u8 rsvd1[3]; + u32 chunk_num; + u32 search_gpa_num; +}; + +struct comm_cmd_eqm_cfg { + struct mgmt_msg_head head; + + u8 host_id; + u8 valid; + u16 rsvd1; + u32 page_size; + u32 rsvd2; +}; + +struct comm_cmd_eqm_search_gpa { + struct mgmt_msg_head head; + + u8 host_id; + u8 rsvd1[3]; + u32 start_idx; + u32 num; + u32 rsvd2; + u64 gpa_hi52[0]; +}; + +struct comm_cmd_ffm_info { + struct mgmt_msg_head head; + + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + u32 rsvd1; +}; + +struct sphw_board_info { + u8 board_type; + u8 port_num; + u8 port_speed; + u8 pcie_width; + u8 host_num; + u8 pf_num; + u16 vf_total_num; + u8 tile_num; + u8 qcm_num; + u8 core_num; + u8 work_mode; + u8 service_mode; + u8 pcie_mode; + u8 boot_sel; + u8 board_id; + u32 cfg_addr; +}; + +struct comm_cmd_board_info { + struct mgmt_msg_head head; + + struct sphw_board_info info; + u32 rsvd[25]; +}; + +struct comm_cmd_sync_time { + struct mgmt_msg_head head; + + u64 mstime; + u64 rsvd1; +}; + +struct comm_cmd_bdf_info { + struct mgmt_msg_head head; + + u16 function_idx; + u8 rsvd1[2]; + u8 bus; + u8 device; + u8 function; + u8 rsvd2[5]; +}; + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c new file mode 100644 index 000000000000..aaba9e68ba31 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> + +#include "sphw_common.h" + +int sphw_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align, unsigned int flag, + struct sphw_dma_addr_align *mem_align) +{ + void *vaddr = NULL, *align_vaddr = NULL; + dma_addr_t paddr, align_paddr; + u64 real_size = size; + + vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev_hdl, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +void sphw_dma_free_coherent_align(void *dev_hdl, struct sphw_dma_addr_align *mem_align) +{ + dma_free_coherent(dev_hdl, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} + +int sphw_wait_for_timeout(void *priv_data, wait_cpl_handler handler, + u32 wait_total_ms, u32 wait_once_us) +{ + enum sphw_wait_return ret; + unsigned long end; + /* Take 9/10 * wait_once_us as the minimum sleep time of usleep_range */ + u32 usleep_min = wait_once_us - wait_once_us / 10; + + if (!handler) + return -EINVAL; + + end = jiffies + msecs_to_jiffies(wait_total_ms); + do { + ret = handler(priv_data); + if (ret == WAIT_PROCESS_CPL) + return 0; + else if (ret == WAIT_PROCESS_ERR) + return -EIO; + + /* Sleep more than 20ms using msleep is accurate */ + if (wait_once_us >= 20 * USEC_PER_MSEC) + msleep(wait_once_us / USEC_PER_MSEC); + else + usleep_range(usleep_min, wait_once_us); + } while (time_before(jiffies, end)); + + ret = handler(priv_data); + if (ret == WAIT_PROCESS_CPL) + return 0; + else if (ret == WAIT_PROCESS_ERR) + return -EIO; + + return -ETIMEDOUT; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h new file mode 100644 index 000000000000..d6e2f183cc36 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_COMMON_H +#define SPHW_COMMON_H + +#include <linux/types.h> + +struct sphw_dma_addr_align { + u32 real_size; + + void *ori_vaddr; + dma_addr_t ori_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +int sphw_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align, unsigned int flag, + struct sphw_dma_addr_align *mem_align); + +void sphw_dma_free_coherent_align(void *dev_hdl, struct sphw_dma_addr_align *mem_align); + +enum sphw_wait_return { + WAIT_PROCESS_CPL = 0, + WAIT_PROCESS_WAITING = 1, + WAIT_PROCESS_ERR = 2, +}; + +typedef enum sphw_wait_return (*wait_cpl_handler)(void *priv_data); + +int sphw_wait_for_timeout(void *priv_data, wait_cpl_handler handler, + u32 wait_total_ms, u32 wait_once_us); + +/* * + * sphw_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +static inline void sphw_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/* * + * sphw_cpu_to_be32 - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + */ +static inline void sphw_be32_to_cpu(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} + +struct sphw_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +/* * + * sphw_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + */ +static inline void sphw_set_sge(struct sphw_sge *sge, dma_addr_t addr, int len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} + +#define sphw_hw_be32(val) (val) +#define sphw_hw_cpu32(val) (val) +#define sphw_hw_cpu16(val) (val) + +static inline void sphw_hw_be32_len(void *data, int len) +{ +} + +static inline void sphw_hw_cpu32_len(void *data, int len) +{ +} + +#define sdk_err(dev, format, ...) dev_err(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) dev_warn(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) dev_notice(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) dev_info(dev, "[COMM]" format, ##__VA_ARGS__) + +#define nic_err(dev, format, ...) dev_err(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_warn(dev, format, ...) dev_warn(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_notice(dev, format, ...) dev_notice(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_info(dev, format, ...) dev_info(dev, "[NIC]" format, ##__VA_ARGS__) + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h new file mode 100644 index 000000000000..bba3d2d501f2 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h @@ -0,0 +1,984 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_CRM_H +#define SPHW_CRM_H + +#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof((arr)[0]))) + +#define SPHW_MGMT_VERSION_MAX_LEN 32 + +#define SPHW_FW_VERSION_NAME 16 +#define SPHW_FW_VERSION_SECTION_CNT 4 +#define SPHW_FW_VERSION_SECTION_BORDER 0xFF +struct sphw_fw_version { + u8 mgmt_ver[SPHW_FW_VERSION_NAME]; + u8 microcode_ver[SPHW_FW_VERSION_NAME]; + u8 boot_ver[SPHW_FW_VERSION_NAME]; +}; + +#define SPHW_MGMT_CMD_UNSUPPORTED 0xFF + +/* show each drivers only such as nic_service_cap, + * toe_service_cap structure, but not show service_cap + */ +enum sphw_service_type { + SERVICE_T_NIC = 0, + SERVICE_T_OVS, + SERVICE_T_ROCE, + SERVICE_T_TOE, + SERVICE_T_IOE, + SERVICE_T_FC, + SERVICE_T_VBS, + SERVICE_T_IPSEC, + SERVICE_T_VIRTIO, + SERVICE_T_MIGRATE, + SERVICE_T_MAX, + + /* Only used for interruption resource management, + * mark the request module + */ + SERVICE_T_INTF = (1 << 15), + SERVICE_T_CQM = (1 << 16), +}; + +struct nic_service_cap { + u16 max_sqs; + u16 max_rqs; +}; + +/* PF/VF ToE service resource structure */ +struct dev_toe_svc_cap { + /* PF resources */ + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 max_cqs; + u16 max_srqs; + u32 srq_id_start; + u32 max_mpts; +}; + +/* ToE services */ +struct toe_service_cap { + struct dev_toe_svc_cap dev_toe_cap; + + bool alloc_flag; + u32 pctx_sz; /* 1KB */ + u32 scqc_sz; /* 64B */ +}; + +/* PF FC service resource structure defined */ +struct dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048 */ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048 */ + u32 child_qpc_id_start; + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ */ + u32 srq_num; /* Number of SRQ is 2 */ + + u8 vp_id_start; + u8 vp_id_end; +}; + +/* FC services */ +struct fc_service_cap { + struct dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode) */ + + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B */ + u32 scqe_size; /* 64B */ + + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B) */ + u32 srqe_size; /* 32B */ +}; + +struct dev_roce_svc_own_cap { + u32 max_qps; + u32 max_cqs; + u32 max_srqs; + u32 max_mpts; + u32 max_drc_qps; + + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_sz; + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_sz; + + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_sz; + + u32 qpc_entry_sz; + u32 max_wqes; + u32 max_rq_sg; + u32 max_sq_inline_data_sz; + u32 max_rq_desc_sz; + + u32 rdmarc_entry_sz; + u32 max_qp_init_rdma; + u32 max_qp_dest_rdma; + + u32 max_srq_wqes; + u32 reserved_srqs; + u32 max_srq_sge; + u32 srqc_entry_sz; + + u32 max_msg_sz; /* Message size 2GB */ +}; + +/* RDMA service capability structure */ +struct dev_rdma_svc_cap { + /* ROCE service unique parameter structure */ + struct dev_roce_svc_own_cap roce_own_cap; +}; + +/* Defines the RDMA service capability flag */ +enum { + RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), + RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), + RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), + RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), + RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), + RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), + + RDMA_DEV_CAP_FLAG_XRC = (1 << 6), + RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), + RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), + RDMA_DEV_CAP_FLAG_APM = (1 << 9), +}; + +/* RDMA services */ +struct rdma_service_cap { + struct dev_rdma_svc_cap dev_rdma_cap; + + u8 log_mtt; /* 1. the number of MTT PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + /* todo: need to check whether related to max_mtt_seg */ + u32 num_mtts; /* Number of MTT table (4M), + * is actually MTT seg number + */ + u32 log_mtt_seg; + u32 mtt_entry_sz; /* MTT table size 8B, including 1 PA(64bits) */ + u32 mpt_entry_sz; /* MPT table size (64B) */ + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_sz; + + u8 log_rdmarc; /* 1. the number of RDMArc PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + + u32 reserved_qps; /* Number of reserved QP */ + u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ + u32 max_sq_desc_sz; /* WQE maximum size of SQ(1024B), inline maximum + * size if 960B(944B aligned to the 960B), + * 960B=>wqebb alignment=>1024B + */ + u32 wqebb_size; /* Currently, the supports 64B and 128B, + * defined as 64Bytes + */ + + u32 max_cqes; /* Size of the depth of the CQ (64K-1) */ + u32 reserved_cqs; /* Number of reserved CQ */ + u32 cqc_entry_sz; /* Size of the CQC (64B/128B) */ + u32 cqe_size; /* Size of CQE (32B) */ + + u32 reserved_mrws; /* Number of reserved MR/MR Window */ + + u32 max_fmr_maps; /* max MAP of FMR, + * (1 << (32-ilog2(num_mpt)))-1; + */ + + /* todo: max value needs to be confirmed */ + /* MTT table number of Each MTT seg(3) */ + + u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ + + /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ + u32 local_ca_ack_delay; + u32 num_ports; /* Physical port number */ + + u32 db_page_size; /* Size of the DB (4KB) */ + u32 direct_wqe_size; /* Size of the DWQE (256B) */ + + u32 num_pds; /* Maximum number of PD (128K) */ + u32 reserved_pds; /* Number of reserved PD */ + u32 max_xrcds; /* Maximum number of xrcd (64K) */ + u32 reserved_xrcds; /* Number of reserved xrcd */ + + u32 max_gid_per_port; /* gid number (16) of each port */ + u32 gid_entry_sz; /* RoCE v2 GID table is 32B, + * compatible RoCE v1 expansion + */ + + u32 reserved_lkey; /* local_dma_lkey */ + u32 num_comp_vectors; /* Number of complete vector (32) */ + u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M and 4M page_size */ + + u32 flags; /* RDMA some identity */ + u32 max_frpl_len; /* Maximum number of pages frmr registration */ + u32 max_pkeys; /* Number of supported pkey group */ +}; + +/* PF OVS service resource structure defined */ +struct dev_ovs_svc_cap { + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u8 dynamic_qp_en; + u8 fake_vf_num; + u16 fake_vf_start_id; +}; + +/* OVS services */ +struct ovs_service_cap { + struct dev_ovs_svc_cap dev_ovs_cap; + + u32 pctx_sz; /* 512B */ +}; + +/* PF IPsec service resource structure defined */ +struct dev_ipsec_svc_cap { + /* PF resources */ + u32 max_sa_ctxs; /* Parent Context: max specifications 8192 */ +}; + +/* IPsec services */ +struct ipsec_service_cap { + struct dev_ipsec_svc_cap dev_ipsec_cap; + u32 sactx_sz; /* 512B */ +}; + +/* Defines the IRQ information structure */ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; + +struct interrupt_info { + u32 lli_set; + u32 interrupt_coalesc_set; + u16 msix_index; + u8 lli_credit_limit; + u8 lli_timer_cfg; + u8 pending_limt; + u8 coalesc_timer_cfg; + u8 resend_timer_cfg; +}; + +enum sphw_msix_state { + SPHW_MSIX_ENABLE, + SPHW_MSIX_DISABLE, +}; + +enum sphw_msix_auto_mask { + SPHW_SET_MSIX_AUTO_MASK, + SPHW_CLR_MSIX_AUTO_MASK, +}; + +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, + TYPE_UNKNOWN, +}; + +struct sphw_init_para { + /* Record spnic_pcidev or NDIS_Adapter pointer address */ + void *adapter_hdl; + /* Record pcidev or Handler pointer address + * for example: ioremap interface input parameter + */ + void *pcidev_hdl; + /* Record pcidev->dev or Handler pointer address which used to + * dma address application or dev_err print the parameter + */ + void *dev_hdl; + + /* Configure virtual address, PF is bar1, VF is bar0/1 */ + void *cfg_reg_base; + /* interrupt configuration register address, PF is bar2, VF is bar2/3 + */ + void *intr_reg_base; + /* for PF bar3 virtual address, if function is VF should set to NULL */ + void *mgmt_reg_base; + + u64 db_dwqe_len; + u64 db_base_phy; + /* the doorbell address, bar4/5 higher 4M space */ + void *db_base; + /* direct wqe 4M, follow the doorbell address space */ + void *dwqe_mapping; + void **hwdev; + void *chip_node; + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + void *ppf_hwdev; + /* if use polling mode, set it true */ + bool poll; +}; + +/* B200 config BAR45 4MB, DB & DWQE both 2MB */ +#define SPHW_DB_DWQE_SIZE 0x00400000 + +/* db/dwqe page size: 4K */ +#define SPHW_DB_PAGE_SIZE 0x00001000ULL +#define SPHW_DWQE_OFFSET 0x00000800ULL + +#define SPHW_DB_MAX_AREAS (SPHW_DB_DWQE_SIZE / SPHW_DB_PAGE_SIZE) + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +#define MAX_FUNCTION_NUM 4096 +#define SPHW_MAX_COS 8 + +struct card_node { + struct list_head node; + struct list_head func_list; + char chip_name[IFNAMSIZ]; + void *log_info; + void *dbgtool_info; + void *func_handle_array[MAX_FUNCTION_NUM]; + unsigned char bus_num; + u8 func_num; + bool up_bitmap_setted; + u8 valid_up_bitmap; +}; + +#define FAULT_SHOW_STR_LEN 16 + +enum sphw_fault_source_type { + /* same as FAULT_TYPE_CHIP */ + SPHW_FAULT_SRC_HW_MGMT_CHIP = 0, + /* same as FAULT_TYPE_UCODE */ + SPHW_FAULT_SRC_HW_MGMT_UCODE, + /* same as FAULT_TYPE_MEM_RD_TIMEOUT */ + SPHW_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, + /* same as FAULT_TYPE_MEM_WR_TIMEOUT */ + SPHW_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, + /* same as FAULT_TYPE_REG_RD_TIMEOUT */ + SPHW_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, + /* same as FAULT_TYPE_REG_WR_TIMEOUT */ + SPHW_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, + SPHW_FAULT_SRC_SW_MGMT_UCODE, + SPHW_FAULT_SRC_MGMT_WATCHDOG, + SPHW_FAULT_SRC_MGMT_RESET = 8, + SPHW_FAULT_SRC_HW_PHY_FAULT, + SPHW_FAULT_SRC_TX_PAUSE_EXCP, + SPHW_FAULT_SRC_PCIE_LINK_DOWN = 20, + SPHW_FAULT_SRC_HOST_HEARTBEAT_LOST = 21, + SPHW_FAULT_SRC_TX_TIMEOUT, + SPHW_FAULT_SRC_TYPE_MAX, +}; + +union sphw_fault_hw_mgmt { + u32 val[4]; + /* valid only type == FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum sphw_fault_err_level */ + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only if err_level == FAULT_LEVEL_SERIOUS_FLR */ + u16 func_id; + u16 rsvd2; + } chip; + + /* valid only if type == FAULT_TYPE_UCODE */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only if type == FAULT_TYPE_MEM_RD_TIMEOUT || + * FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_index; + } mem_timeout; + + /* valid only if type == FAULT_TYPE_REG_RD_TIMEOUT || + * FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +struct sphw_fault_event { + /* enum sphw_fault_type */ + u8 type; + u8 fault_level; /* sdk write fault level for uld event */ + u8 rsvd0[2]; + union sphw_fault_hw_mgmt event; +}; + +struct sphw_cmd_fault_event { + u8 status; + u8 version; + u8 rsvd0[6]; + struct sphw_fault_event event; +}; + +enum sphw_event_type { + SPHW_EVENT_LINK_DOWN = 0, + SPHW_EVENT_LINK_UP = 1, + SPHW_EVENT_FAULT = 3, + SPHW_EVENT_DCB_STATE_CHANGE = 5, + SPHW_EVENT_INIT_MIGRATE_PF, + SPHW_EVENT_SRIOV_STATE_CHANGE, + SPHW_EVENT_PORT_MODULE_EVENT, + SPHW_EVENT_PCIE_LINK_DOWN, + SPHW_EVENT_HEART_LOST, +}; + +struct sphw_event_link_info { + u8 valid; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +struct sphw_dcb_info { + u8 dcb_on; + u8 default_cos; + u8 up_cos[SPHW_MAX_COS]; +}; + +struct sphw_sriov_state_info { + u8 enable; + u16 num_vfs; +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +enum port_module_event_type { + SPHW_PORT_MODULE_CABLE_PLUGGED, + SPHW_PORT_MODULE_CABLE_UNPLUGGED, + SPHW_PORT_MODULE_LINK_ERR, + SPHW_PORT_MODULE_MAX_EVENT, +}; + +struct sphw_port_module_event { + enum port_module_event_type type; + enum link_err_type err_type; +}; + +struct sphw_event_info { + enum sphw_event_type type; + union { + struct sphw_event_link_info link_info; + struct sphw_fault_event info; + struct sphw_dcb_info dcb_state; + struct sphw_sriov_state_info sriov_state; + struct sphw_port_module_event module_event; + }; +}; + +typedef void (*sphw_event_handler)(void *handle, struct sphw_event_info *event); + +/* * + * @brief sphw_event_register - register hardware event + * @param dev: device pointer to hwdev + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + */ +void sphw_event_register(void *dev, void *pri_handle, sphw_event_handler callback); + +/* * + * @brief sphw_event_unregister - unregister hardware event + * @param dev: device pointer to hwdev + */ +void sphw_event_unregister(void *dev); + +/* * + * @brief sphw_set_msix_auto_mask_state - set msix auto mask function + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param flag: msix auto_mask flag, 1-enable, 2-clear + */ +void sphw_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, enum sphw_msix_auto_mask flag); + +/* * + * @brief sphw_set_msix_state - set msix state + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param flag: msix state flag, 0-enable, 1-disable + */ +void sphw_set_msix_state(void *hwdev, u16 msix_idx, enum sphw_msix_state flag); + +/* * + * @brief sphw_misx_intr_clear_resend_bit - clear msix resend bit + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param clear_resend_en: 1-clear + */ +void sphw_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, u8 clear_resend_en); + +/* * + * @brief sphw_set_interrupt_cfg_direct - set interrupt cfg + * @param hwdev: device pointer to hwdev + * @param interrupt_para: interrupt info + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_set_interrupt_cfg_direct(void *hwdev, struct interrupt_info *interrupt_para, u16 channel); + +int sphw_set_interrupt_cfg(void *hwdev, struct interrupt_info interrupt_info, u16 channel); + +/* * + * @brief sphw_get_interrupt_cfg - get interrupt cfg + * @param dev: device pointer to hwdev + * @param info: interrupt info + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_get_interrupt_cfg(void *dev, struct interrupt_info *info, u16 channel); + +/* * + * @brief sphw_alloc_irqs - alloc irq + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param num: alloc number + * @param irq_info_array: alloc irq info + * @param act_num: alloc actual number + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_alloc_irqs(void *hwdev, enum sphw_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num); + +/* * + * @brief sphw_free_irq - free irq + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param irq_id: irq id + */ +void sphw_free_irq(void *hwdev, enum sphw_service_type type, u32 irq_id); + +/* * + * @brief sphw_alloc_ceqs - alloc ceqs + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param num: alloc ceq number + * @param ceq_id_array: alloc ceq_id_array + * @param act_num: alloc actual number + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_alloc_ceqs(void *hwdev, enum sphw_service_type type, int num, int *ceq_id_array, + int *act_num); + +/* * + * @brief sphw_free_irq - free ceq + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param irq_id: ceq id + */ +void sphw_free_ceq(void *hwdev, enum sphw_service_type type, int ceq_id); + +/* * + * @brief sphw_get_pcidev_hdl - get pcidev_hdl + * @param hwdev: device pointer to hwdev + * @retval non-null: success + * @retval null: failure + */ +void *sphw_get_pcidev_hdl(void *hwdev); + +/* * + * @brief sphw_ppf_idx - get ppf id + * @param hwdev: device pointer to hwdev + * @retval ppf id + */ +u8 sphw_ppf_idx(void *hwdev); + +/* * + * @brief sphw_get_chip_present_flag - get chip present flag + * @param hwdev: device pointer to hwdev + * @retval 1: chip is present + * @retval 0: chip is absent + */ +int sphw_get_chip_present_flag(const void *hwdev); + +/* * + * @brief sphw_support_nic - function support nic + * @param hwdev: device pointer to hwdev + * @param cap: nic service capbility + * @retval true: function support nic + * @retval false: function not support nic + */ +bool sphw_support_nic(void *hwdev, struct nic_service_cap *cap); + +/* * + * @brief sphw_support_ipsec - function support ipsec + * @param hwdev: device pointer to hwdev + * @param cap: ipsec service capbility + * @retval true: function support ipsec + * @retval false: function not support ipsec + */ +bool sphw_support_ipsec(void *hwdev, struct ipsec_service_cap *cap); + +/* * + * @brief sphw_support_roce - function support roce + * @param hwdev: device pointer to hwdev + * @param cap: roce service capbility + * @retval true: function support roce + * @retval false: function not support roce + */ +bool sphw_support_roce(void *hwdev, struct rdma_service_cap *cap); + +/* * + * @brief sphw_support_fc - function support fc + * @param hwdev: device pointer to hwdev + * @param cap: fc service capbility + * @retval true: function support fc + * @retval false: function not support fc + */ +bool sphw_support_fc(void *hwdev, struct fc_service_cap *cap); + +/* * + * @brief sphw_support_rdma - function support rdma + * @param hwdev: device pointer to hwdev + * @param cap: rdma service capbility + * @retval true: function support rdma + * @retval false: function not support rdma + */ +bool sphw_support_rdma(void *hwdev, struct rdma_service_cap *cap); + +/* * + * @brief sphw_support_ovs - function support ovs + * @param hwdev: device pointer to hwdev + * @param cap: ovs service capbility + * @retval true: function support ovs + * @retval false: function not support ovs + */ +bool sphw_support_ovs(void *hwdev, struct ovs_service_cap *cap); + +/* * + * @brief sphw_support_toe - sync time to hardware + * @param hwdev: device pointer to hwdev + * @param cap: toe service capbility + * @retval zero: success + * @retval non-zero: failure + */ +bool sphw_support_toe(void *hwdev, struct toe_service_cap *cap); + +/* * + * @brief sphw_sync_time - sync time to hardware + * @param hwdev: device pointer to hwdev + * @param time: time to sync + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_sync_time(void *hwdev, u64 time); + +/* * + * @brief sphw_disable_mgmt_msg_report - disable mgmt report msg + * @param hwdev: device pointer to hwdev + */ +void sphw_disable_mgmt_msg_report(void *hwdev); + +/* * + * @brief sphw_func_for_mgmt - get function service type + * @param hwdev: device pointer to hwdev + * @retval true: function for mgmt + * @retval false: function is not for mgmt + */ +bool sphw_func_for_mgmt(void *hwdev); + +/* * + * @brief sphw_set_pcie_order_cfg - set pcie order cfg + * @param handle: device pointer to hwdev + */ +void sphw_set_pcie_order_cfg(void *handle); + +/* * + * @brief sphw_init_hwdev - call to init hwdev + * @param para: device pointer to para + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_init_hwdev(struct sphw_init_para *para); + +/* * + * @brief sphw_free_hwdev - free hwdev + * @param hwdev: device pointer to hwdev + */ +void sphw_free_hwdev(void *hwdev); + +/* * + * @brief sphw_detect_hw_present - detect hardware present + * @param hwdev: device pointer to hwdev + */ +void sphw_detect_hw_present(void *hwdev); + +/* * + * @brief sphw_record_pcie_error - record pcie error + * @param hwdev: device pointer to hwdev + */ +void sphw_record_pcie_error(void *hwdev); + +/* * + * @brief sphw_shutdown_hwdev - shutdown hwdev + * @param hwdev: device pointer to hwdev + */ +void sphw_shutdown_hwdev(void *hwdev); + +/* * + * @brief sphw_get_mgmt_version - get management cpu version + * @param hwdev: device pointer to hwdev + * @param mgmt_ver: output management version + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, u16 channel); + +/* * + * @brief sphw_get_fw_version - get firmware version + * @param hwdev: device pointer to hwdev + * @param fw_ver: firmware version + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_get_fw_version(void *hwdev, struct sphw_fw_version *fw_ver, u16 channel); + +/* * + * @brief sphw_global_func_id - get global function id + * @param hwdev: device pointer to hwdev + * @retval global function id + */ +u16 sphw_global_func_id(void *hwdev); + +/* * + * @brief sphw_vector_to_eqn - vector to eq id + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param vector: vertor + * @retval eq id + */ +int sphw_vector_to_eqn(void *hwdev, enum sphw_service_type type, int vector); + +/* * + * @brief sphw_glb_pf_vf_offset - get vf offset id of pf + * @param hwdev: device pointer to hwdev + * @retval vf offset id + */ +u16 sphw_glb_pf_vf_offset(void *hwdev); + +/* * + * @brief sphw_pf_id_of_vf - get pf id of vf + * @param hwdev: device pointer to hwdev + * @retval pf id + */ +u8 sphw_pf_id_of_vf(void *hwdev); + +/* * + * @brief sphw_func_type - get function type + * @param hwdev: device pointer to hwdev + * @retval function type + */ +enum func_type sphw_func_type(void *hwdev); + +/* * + * @brief sphw_host_oq_id_mask - get oq id + * @param hwdev: device pointer to hwdev + * @retval oq id + */ +u8 sphw_host_oq_id_mask(void *hwdev); + +/* * + * @brief sphw_host_id - get host id + * @param hwdev: device pointer to hwdev + * @retval host id + */ +u8 sphw_host_id(void *hwdev); + +/* * + * @brief sphw_func_max_qnum - get host total function number + * @param hwdev: device pointer to hwdev + * @retval non-zero: host total function number + * @retval zero: failure + */ +u16 sphw_host_total_func(void *hwdev); + +/* * + * @brief sphw_func_max_qnum - get max nic queue number + * @param hwdev: device pointer to hwdev + * @retval non-zero: max nic queue number + * @retval zero: failure + */ +u16 sphw_func_max_nic_qnum(void *hwdev); + +/* * + * @brief sphw_func_max_qnum - get max queue number + * @param hwdev: device pointer to hwdev + * @retval non-zero: max queue number + * @retval zero: failure + */ +u16 sphw_func_max_qnum(void *hwdev); + +/* * + * @brief sphw_er_id - get ep id + * @param hwdev: device pointer to hwdev + * @retval ep id + */ +u8 sphw_ep_id(void *hwdev); /* Obtain service_cap.ep_id */ + +/* * + * @brief sphw_er_id - get er id + * @param hwdev: device pointer to hwdev + * @retval er id + */ +u8 sphw_er_id(void *hwdev); /* Obtain service_cap.er_id */ + +/* * + * @brief sphw_physical_port_id - get physical port id + * @param hwdev: device pointer to hwdev + * @retval physical port id + */ +u8 sphw_physical_port_id(void *hwdev); /* Obtain service_cap.port_id */ + +/* * + * @brief sphw_func_max_vf - get vf number + * @param hwdev: device pointer to hwdev + * @retval non-zero: vf number + * @retval zero: failure + */ +u16 sphw_func_max_vf(void *hwdev); /* Obtain service_cap.max_vf */ + +/* @brief sphw_max_pf_num - get global max pf number + */ +u8 sphw_max_pf_num(void *hwdev); + +/* * + * @brief sphw_host_pf_num - get current host pf number + * @param hwdev: device pointer to hwdev + * @retval non-zero: pf number + * @retval zero: failure + */ +u32 sphw_host_pf_num(void *hwdev); /* Obtain service_cap.pf_num */ + +/* * + * @brief sphw_pcie_itf_id - get pcie port id + * @param hwdev: device pointer to hwdev + * @retval pcie port id + */ +u8 sphw_pcie_itf_id(void *hwdev); + +/* * + * @brief sphw_vf_in_pf - get vf offset in pf + * @param hwdev: device pointer to hwdev + * @retval vf offset in pf + */ +u8 sphw_vf_in_pf(void *hwdev); + +/* * + * @brief sphw_cos_valid_bitmap - get cos valid bitmap + * @param hwdev: device pointer to hwdev + * @retval non-zero: valid cos bit map + * @retval zero: failure + */ +u8 sphw_cos_valid_bitmap(void *hwdev); + +/* * + * @brief sphw_get_card_present_state - get card present state + * @param hwdev: device pointer to hwdev + * @param card_present_state: return card present state + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_get_card_present_state(void *hwdev, bool *card_present_state); + +/* * + * @brief sphw_func_rx_tx_flush - function flush + * @param hwdev: device pointer to hwdev + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_func_rx_tx_flush(void *hwdev, u16 channel); + +/* * + * @brief sphw_flush_mgmt_workq - when remove function should flush work queue + * @param hwdev: device pointer to hwdev + */ +void sphw_flush_mgmt_workq(void *hwdev); + +/* @brief sphw_ceq_num get toe ceq num + */ +u8 sphw_ceq_num(void *hwdev); + +/* * + * @brief sphw_intr_num get intr num + */ +u16 sphw_intr_num(void *hwdev); + +/* @brief sphw_flexq_en get flexq en + */ +u8 sphw_flexq_en(void *hwdev); + +/** + * @brief sphw_fault_event_report - report fault event + * @param hwdev: device pointer to hwdev + * @param src: fault event source, reference to enum sphw_fault_source_type + * @param level: fault level, reference to enum sphw_fault_err_level + */ +void sphw_fault_event_report(void *hwdev, u16 src, u16 level); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h new file mode 100644 index 000000000000..3d84e1ef2fa3 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_CSR_H +#define SPHW_CSR_H + +/* bit30/bit31 for bar index flag + * 00: bar0 + * 01: bar1 + * 10: bar2 + * 11: bar3 + */ +#define SPHW_CFG_REGS_FLAG 0x40000000 + +#define SPHW_MGMT_REGS_FLAG 0xC0000000 + +#define SPHW_REGS_FLAG_MAKS 0x3FFFFFFF + +#define SPHW_VF_CFG_REG_OFFSET 0x2000 + +#define SPHW_HOST_CSR_BASE_ADDR (SPHW_MGMT_REGS_FLAG + 0x6000) +#define SPHW_CSR_GLOBAL_BASE_ADDR (SPHW_MGMT_REGS_FLAG + 0x6400) + +/* HW interface registers */ +#define SPHW_CSR_FUNC_ATTR0_ADDR (SPHW_CFG_REGS_FLAG + 0x0) +#define SPHW_CSR_FUNC_ATTR1_ADDR (SPHW_CFG_REGS_FLAG + 0x4) +#define SPHW_CSR_FUNC_ATTR2_ADDR (SPHW_CFG_REGS_FLAG + 0x8) +#define SPHW_CSR_FUNC_ATTR3_ADDR (SPHW_CFG_REGS_FLAG + 0xC) +#define SPHW_CSR_FUNC_ATTR4_ADDR (SPHW_CFG_REGS_FLAG + 0x10) +#define SPHW_CSR_FUNC_ATTR5_ADDR (SPHW_CFG_REGS_FLAG + 0x14) +#define SPHW_CSR_FUNC_ATTR6_ADDR (SPHW_CFG_REGS_FLAG + 0x18) + +#define SPHW_FUNC_CSR_MAILBOX_DATA_OFF 0x80 +#define SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF \ + (SPHW_CFG_REGS_FLAG + 0x0100) +#define SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF \ + (SPHW_CFG_REGS_FLAG + 0x0104) +#define SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF \ + (SPHW_CFG_REGS_FLAG + 0x0108) +#define SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF \ + (SPHW_CFG_REGS_FLAG + 0x010C) +/*CLP registers*/ +#define SPHW_BAR3_CLP_BASE_ADDR (SPHW_MGMT_REGS_FLAG + 0x0000) + +#define SPHW_UCPU_CLP_SIZE_REG (SPHW_HOST_CSR_BASE_ADDR + 0x40) +#define SPHW_UCPU_CLP_REQBASE_REG (SPHW_HOST_CSR_BASE_ADDR + 0x44) +#define SPHW_UCPU_CLP_RSPBASE_REG (SPHW_HOST_CSR_BASE_ADDR + 0x48) +#define SPHW_UCPU_CLP_REQ_REG (SPHW_HOST_CSR_BASE_ADDR + 0x4c) +#define SPHW_UCPU_CLP_RSP_REG (SPHW_HOST_CSR_BASE_ADDR + 0x50) +#define SPHW_CLP_REG(member) (SPHW_UCPU_CLP_##member##_REG) + +#define SPHW_CLP_REQ_DATA SPHW_BAR3_CLP_BASE_ADDR +#define SPHW_CLP_RSP_DATA (SPHW_BAR3_CLP_BASE_ADDR + 0x1000) +#define SPHW_CLP_DATA(member) (SPHW_CLP_##member##_DATA) + +#define SPHW_PPF_ELECTION_OFFSET 0x0 +#define SPHW_MPF_ELECTION_OFFSET 0x20 + +#define SPHW_CSR_PPF_ELECTION_ADDR \ + (SPHW_HOST_CSR_BASE_ADDR + SPHW_PPF_ELECTION_OFFSET) + +#define SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR \ + (SPHW_HOST_CSR_BASE_ADDR + SPHW_MPF_ELECTION_OFFSET) + +#define SPHW_CSR_FUNC_PPF_ELECT_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x60) +#define SPHW_CSR_FUNC_PPF_ELECT_PORT_STRIDE 0x4 + +#define SPHW_CSR_FUNC_PPF_ELECT(host_idx) \ + (SPHW_CSR_FUNC_PPF_ELECT_BASE_ADDR + \ + (host_idx) * SPHW_CSR_FUNC_PPF_ELECT_PORT_STRIDE) + +#define SPHW_CSR_DMA_ATTR_TBL_ADDR (SPHW_CFG_REGS_FLAG + 0x380) +#define SPHW_CSR_DMA_ATTR_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x390) + +/* MSI-X registers */ +#define SPHW_CSR_MSIX_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x310) +#define SPHW_CSR_MSIX_CTRL_ADDR (SPHW_CFG_REGS_FLAG + 0x300) +#define SPHW_CSR_MSIX_CNT_ADDR (SPHW_CFG_REGS_FLAG + 0x304) +#define SPHW_CSR_FUNC_MSI_CLR_WR_ADDR (SPHW_CFG_REGS_FLAG + 0x58) + +#define SPHW_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0 +#define SPHW_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1 +#define SPHW_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2 +#define SPHW_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3 +#define SPHW_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4 +#define SPHW_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_SHIFT 22 + +#define SPHW_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U +#define SPHW_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U +#define SPHW_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U +#define SPHW_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U +#define SPHW_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U +#define SPHW_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK 0x3FFU + +#define SPHW_MSI_CLR_INDIR_SET(val, member) \ + (((val) & SPHW_MSI_CLR_INDIR_##member##_MASK) << \ + SPHW_MSI_CLR_INDIR_##member##_SHIFT) + +/* EQ registers */ +#define SPHW_AEQ_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x210) +#define SPHW_CEQ_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x290) + +#define SPHW_EQ_INDIR_IDX_ADDR(type) \ + ((type == SPHW_AEQ) ? SPHW_AEQ_INDIR_IDX_ADDR : SPHW_CEQ_INDIR_IDX_ADDR) + +#define SPHW_AEQ_MTT_OFF_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x240) +#define SPHW_CEQ_MTT_OFF_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x2C0) + +#define SPHW_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define SPHW_AEQ_HI_PHYS_ADDR_REG(pg_num) \ + (SPHW_AEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE) + +#define SPHW_AEQ_LO_PHYS_ADDR_REG(pg_num) \ + (SPHW_AEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define SPHW_CEQ_HI_PHYS_ADDR_REG(pg_num) \ + (SPHW_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE) + +#define SPHW_CEQ_LO_PHYS_ADDR_REG(pg_num) \ + (SPHW_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define SPHW_CSR_AEQ_CTRL_0_ADDR (SPHW_CFG_REGS_FLAG + 0x200) +#define SPHW_CSR_AEQ_CTRL_1_ADDR (SPHW_CFG_REGS_FLAG + 0x204) +#define SPHW_CSR_AEQ_CONS_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x208) +#define SPHW_CSR_AEQ_PROD_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x20C) +#define SPHW_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (SPHW_CFG_REGS_FLAG + 0x50) + +#define SPHW_CSR_CEQ_CTRL_0_ADDR (SPHW_CFG_REGS_FLAG + 0x280) +#define SPHW_CSR_CEQ_CTRL_1_ADDR (SPHW_CFG_REGS_FLAG + 0x284) +#define SPHW_CSR_CEQ_CONS_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x288) +#define SPHW_CSR_CEQ_PROD_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x28c) +#define SPHW_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (SPHW_CFG_REGS_FLAG + 0x54) + +/* API CMD registers */ +#define SPHW_CSR_API_CMD_BASE (SPHW_MGMT_REGS_FLAG + 0x2000) + +#define SPHW_CSR_API_CMD_STRIDE 0x80 + +#define SPHW_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0x0 + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#define SPHW_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0x4 + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#define SPHW_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0x8 + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#define SPHW_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0xC + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#define SPHW_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0x10 + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#define SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0x14 + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#define SPHW_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0x1C + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#define SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0x20 + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#define SPHW_CSR_API_CMD_STATUS_0_ADDR(idx) \ + (SPHW_CSR_API_CMD_BASE + 0x30 + (idx) * SPHW_CSR_API_CMD_STRIDE) + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c new file mode 100644 index 000000000000..e0c84c5bcc58 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c @@ -0,0 +1,1374 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/pci.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/delay.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_common.h" +#include "sphw_hwdev.h" +#include "sphw_hwif.h" +#include "sphw_hw.h" +#include "sphw_csr.h" +#include "sphw_hw_comm.h" +#include "sphw_prof_adap.h" +#include "sphw_eqs.h" + +#define SPHW_EQS_WQ_NAME "sphw_eqs" + +#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x7U +#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define AEQ_CTRL_0_SET(val, member) \ + (((val) & AEQ_CTRL_0_##member##_MASK) << \ + AEQ_CTRL_0_##member##_SHIFT) + +#define AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_0_##member##_MASK << \ + AEQ_CTRL_0_##member##_SHIFT))) + +#define AEQ_CTRL_1_LEN_SHIFT 0 +#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define AEQ_CTRL_1_SET(val, member) \ + (((val) & AEQ_CTRL_1_##member##_MASK) << \ + AEQ_CTRL_1_##member##_SHIFT) + +#define AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_1_##member##_MASK << \ + AEQ_CTRL_1_##member##_SHIFT))) + +#define SPHW_EQ_PROD_IDX_MASK 0xFFFFF +#define SPHW_TASK_PROCESS_EQE_LIMIT 1024 +#define SPHW_EQ_UPDATE_CI_STEP 64 + +static uint g_aeq_len = SPHW_DEFAULT_AEQ_LEN; +module_param(g_aeq_len, uint, 0444); +MODULE_PARM_DESC(g_aeq_len, + "aeq depth, valid range is " __stringify(SPHW_MIN_AEQ_LEN) + " - " __stringify(SPHW_MAX_AEQ_LEN)); + +static uint g_ceq_len = SPHW_DEFAULT_CEQ_LEN; +module_param(g_ceq_len, uint, 0444); +MODULE_PARM_DESC(g_ceq_len, + "ceq depth, valid range is " __stringify(SPHW_MIN_CEQ_LEN) + " - " __stringify(SPHW_MAX_CEQ_LEN)); + +static uint g_num_ceqe_in_tasklet = SPHW_TASK_PROCESS_EQE_LIMIT; +module_param(g_num_ceqe_in_tasklet, uint, 0444); +MODULE_PARM_DESC(g_num_ceqe_in_tasklet, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define CEQ_CTRL_0_PAGE_SIZE_SHIFT 27 +#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define CEQ_CTRL_0_PAGE_SIZE_MASK 0xF +#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define CEQ_CTRL_0_SET(val, member) \ + (((val) & CEQ_CTRL_0_##member##_MASK) << \ + CEQ_CTRL_0_##member##_SHIFT) + +#define CEQ_CTRL_1_LEN_SHIFT 0 +#define CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20 + +#define CEQ_CTRL_1_LEN_MASK 0xFFFFFU +#define CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU + +#define CEQ_CTRL_1_SET(val, member) \ + (((val) & CEQ_CTRL_1_##member##_MASK) << \ + CEQ_CTRL_1_##member##_SHIFT) + +#define EQ_ELEM_DESC_TYPE_SHIFT 0 +#define EQ_ELEM_DESC_SRC_SHIFT 7 +#define EQ_ELEM_DESC_SIZE_SHIFT 8 +#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define EQ_ELEM_DESC_SRC_MASK 0x1U +#define EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define EQ_ELEM_DESC_GET(val, member) \ + (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ + EQ_ELEM_DESC_##member##_MASK) + +#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 +#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 + +#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU +#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U + +#define EQ_CONS_IDX_SET(val, member) \ + (((val) & EQ_CONS_IDX_##member##_MASK) << \ + EQ_CONS_IDX_##member##_SHIFT) + +#define EQ_CONS_IDX_CLEAR(val, member) \ + ((val) & (~(EQ_CONS_IDX_##member##_MASK << \ + EQ_CONS_IDX_##member##_SHIFT))) + +#define EQ_CI_SIMPLE_INDIR_CI_SHIFT 0 +#define EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21 +#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_SHIFT 30 +#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_SHIFT 24 + +#define EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU +#define EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U +#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK 0x3U +#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK 0xFFU + +#define EQ_CI_SIMPLE_INDIR_SET(val, member) \ + (((val) & EQ_CI_SIMPLE_INDIR_##member##_MASK) << \ + EQ_CI_SIMPLE_INDIR_##member##_SHIFT) + +#define EQ_CI_SIMPLE_INDIR_CLEAR(val, member) \ + ((val) & (~(EQ_CI_SIMPLE_INDIR_##member##_MASK << \ + EQ_CI_SIMPLE_INDIR_##member##_SHIFT))) + +#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) + +#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ + ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) + +#define EQ_CONS_IDX_REG_ADDR(eq) \ + (((eq)->type == SPHW_AEQ) ? \ + SPHW_CSR_AEQ_CONS_IDX_ADDR : \ + SPHW_CSR_CEQ_CONS_IDX_ADDR) +#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ + (((eq)->type == SPHW_AEQ) ? \ + SPHW_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ + SPHW_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) + +#define EQ_PROD_IDX_REG_ADDR(eq) \ + (((eq)->type == SPHW_AEQ) ? \ + SPHW_CSR_AEQ_PROD_IDX_ADDR : \ + SPHW_CSR_CEQ_PROD_IDX_ADDR) + +#define SPHW_EQ_HI_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == SPHW_AEQ) ? \ + SPHW_AEQ_HI_PHYS_ADDR_REG(pg_num) : \ + SPHW_CEQ_HI_PHYS_ADDR_REG(pg_num))) + +#define SPHW_EQ_LO_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == SPHW_AEQ) ? \ + SPHW_AEQ_LO_PHYS_ADDR_REG(pg_num) : \ + SPHW_CEQ_LO_PHYS_ADDR_REG(pg_num))) + +#define GET_EQ_NUM_PAGES(eq, size) \ + ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ + (size)) / (size))) + +#define SPHW_EQ_MAX_PAGES(eq) \ + ((eq)->type == SPHW_AEQ ? SPHW_AEQ_MAX_PAGES : \ + SPHW_CEQ_MAX_PAGES) + +#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) + +#define GET_EQ_ELEMENT(eq, idx) \ + (((u8 *)(eq)->eq_pages[(idx) / (eq)->num_elem_in_pg].align_vaddr) + \ + (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) \ + ((struct sphw_aeq_elem *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) + +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ + ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define AEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_DMA_ATTR_DEFAULT 0 + +#define CEQ_LMT_KICK_DEFAULT 0 + +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define EQ_WRAPPED_SHIFT 20 + +#define EQ_VALID_SHIFT 31 + +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) + +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) + +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct sphw_aeqs, aeq[0]) + +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct sphw_ceqs, ceq[0]) + +static irqreturn_t ceq_interrupt(int irq, void *data); +static irqreturn_t aeq_interrupt(int irq, void *data); + +static void ceq_tasklet(ulong eq_tasklet); + +/** + * sphw_aeq_register_hw_cb - register aeq callback for specific event + * @hwdev: the pointer to hw device + * @event: event for the handler + * @hw_cb: callback function + **/ +int sphw_aeq_register_hw_cb(void *hwdev, enum sphw_aeq_type event, sphw_aeq_hwe_cb hwe_cb) +{ + struct sphw_aeqs *aeqs = NULL; + + if (!hwdev || !hwe_cb || event >= SPHW_MAX_AEQ_EVENTS) + return -EINVAL; + + aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; + + aeqs->aeq_hwe_cb[event] = hwe_cb; + + set_bit(SPHW_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + return 0; +} + +/** + * sphw_aeq_unregister_hw_cb - unregister the aeq callback for specific event + * @hwdev: the pointer to hw device + * @event: event for the handler + **/ +void sphw_aeq_unregister_hw_cb(void *hwdev, enum sphw_aeq_type event) +{ + struct sphw_aeqs *aeqs = NULL; + + if (!hwdev || event >= SPHW_MAX_AEQ_EVENTS) + return; + + aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; + + clear_bit(SPHW_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + while (test_bit(SPHW_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + aeqs->aeq_hwe_cb[event] = NULL; +} + +/** + * sphw_aeq_register_swe_cb - register aeq callback for sw event + * @hwdev: the pointer to hw device + * @event: soft event for the handler + * @sw_cb: callback function + **/ +int sphw_aeq_register_swe_cb(void *hwdev, enum sphw_aeq_sw_type event, sphw_aeq_swe_cb aeq_swe_cb) +{ + struct sphw_aeqs *aeqs = NULL; + + if (!hwdev || !aeq_swe_cb || event >= SPHW_MAX_AEQ_SW_EVENTS) + return -EINVAL; + + aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; + + aeqs->aeq_swe_cb[event] = aeq_swe_cb; + + set_bit(SPHW_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + return 0; +} + +/** + * sphw_aeq_unregister_swe_cb - unregister the aeq callback for sw event + * @hwdev: the pointer to hw device + * @event: soft event for the handler + **/ +void sphw_aeq_unregister_swe_cb(void *hwdev, enum sphw_aeq_sw_type event) +{ + struct sphw_aeqs *aeqs = NULL; + + if (!hwdev || event >= SPHW_MAX_AEQ_SW_EVENTS) + return; + + aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; + + clear_bit(SPHW_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + while (test_bit(SPHW_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + aeqs->aeq_swe_cb[event] = NULL; +} + +/** + * sphw_ceq_register_cb - register ceq callback for specific event + * @hwdev: the pointer to hw device + * @event: event for the handler + * @ceq_cb: callback function + **/ +int sphw_ceq_register_cb(void *hwdev, enum sphw_ceq_event event, sphw_ceq_event_cb callback) +{ + struct sphw_ceqs *ceqs = NULL; + + if (!hwdev || event >= SPHW_MAX_CEQ_EVENTS) + return -EINVAL; + + ceqs = ((struct sphw_hwdev *)hwdev)->ceqs; + + ceqs->ceq_cb[event] = callback; + + set_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + return 0; +} + +/** + * sphw_ceq_unregister_cb - unregister ceq callback for specific event + * @hwdev: the pointer to hw device + * @event: event for the handler + **/ +void sphw_ceq_unregister_cb(void *hwdev, enum sphw_ceq_event event) +{ + struct sphw_ceqs *ceqs = NULL; + + if (!hwdev || event >= SPHW_MAX_CEQ_EVENTS) + return; + + ceqs = ((struct sphw_hwdev *)hwdev)->ceqs; + + clear_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + while (test_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + ceqs->ceq_cb[event] = NULL; +} + +/** + * set_eq_cons_idx - write the cons idx to the hw + * @eq: The event queue to update the cons idx for + * @cons idx: consumer index value + **/ +static void set_eq_cons_idx(struct sphw_eq *eq, u32 arm_state) +{ + u32 eq_wrap_ci, val; + u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq); + + eq_wrap_ci = EQ_CONS_IDX(eq); + + /* if use poll mode only eq0 use int_arm mode */ + if (eq->q_id != 0 && eq->hwdev->poll) + val = EQ_CI_SIMPLE_INDIR_SET(SPHW_EQ_NOT_ARMED, ARMED); + else + val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED); + if (eq->type == SPHW_AEQ) { + val = val | + EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | + EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX); + } else { + val = val | + EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | + EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX); + } + + sphw_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +/** + * ceq_event_handler - handle for the ceq events + * @ceqs: ceqs part of the chip + * @ceqe: ceq element of the event + **/ +static void ceq_event_handler(struct sphw_ceqs *ceqs, u32 ceqe) +{ + struct sphw_hwdev *hwdev = ceqs->hwdev; + enum sphw_ceq_event event = CEQE_TYPE(ceqe); + u32 ceqe_data = CEQE_DATA(ceqe); + + if (event >= SPHW_MAX_CEQ_EVENTS) { + sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n", + event, ceqe_data); + return; + } + + set_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); + + if (ceqs->ceq_cb[event] && + test_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) + ceqs->ceq_cb[event](hwdev, ceqe_data); + + clear_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); +} + +static void aeq_elem_handler(struct sphw_eq *eq, u32 aeqe_desc) +{ + struct sphw_aeqs *aeqs = aeq_to_aeqs(eq); + struct sphw_aeq_elem *aeqe_pos; + enum sphw_aeq_type event; + enum sphw_aeq_sw_type sw_type; + u32 sw_event; + u8 data[SPHW_AEQE_DATA_SIZE], size; + + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { + sw_event = event; + sw_type = sw_event >= SPHW_NIC_FATAL_ERROR_MAX ? + SPHW_STATEFULL_EVENT : + SPHW_STATELESS_EVENT; + /* SW event uses only the first 8B */ + memcpy(data, aeqe_pos->aeqe_data, SPHW_AEQE_DATA_SIZE); + sphw_be32_to_cpu(data, SPHW_AEQE_DATA_SIZE); + set_bit(SPHW_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_type]); + if (aeqs->aeq_swe_cb[sw_type] && + test_bit(SPHW_AEQ_SW_CB_REG, + &aeqs->aeq_sw_cb_state[sw_type])) + aeqs->aeq_swe_cb[sw_type](aeqs->hwdev, event, data); + + clear_bit(SPHW_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_type]); + return; + } + + if (event < SPHW_MAX_AEQ_EVENTS) { + memcpy(data, aeqe_pos->aeqe_data, SPHW_AEQE_DATA_SIZE); + sphw_be32_to_cpu(data, SPHW_AEQE_DATA_SIZE); + + size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + set_bit(SPHW_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + if (aeqs->aeq_hwe_cb[event] && + test_bit(SPHW_AEQ_HW_CB_REG, + &aeqs->aeq_hw_cb_state[event])) + aeqs->aeq_hwe_cb[event](aeqs->hwdev, data, size); + clear_bit(SPHW_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + return; + } + sdk_warn(eq->hwdev->dev_hdl, "Unknown aeq hw event %d\n", event); +} + +/** + * aeq_irq_handler - handler for the aeq event + * @eq: the async event queue of the event + **/ +static bool aeq_irq_handler(struct sphw_eq *eq) +{ + struct sphw_aeq_elem *aeqe_pos = NULL; + u32 aeqe_desc; + u32 i, eqe_cnt = 0; + + for (i = 0; i < SPHW_TASK_PROCESS_EQE_LIMIT; i++) { + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_pos->desc); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + return false; + + aeq_elem_handler(eq, aeqe_desc); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= SPHW_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * ceq_irq_handler - handler for the ceq event + * @eq: the completion event queue of the event + **/ +static bool ceq_irq_handler(struct sphw_eq *eq) +{ + struct sphw_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe, eqe_cnt = 0; + u32 i; + + for (i = 0; i < g_num_ceqe_in_tasklet; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + ceqe = be32_to_cpu(ceqe); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + return false; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= SPHW_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); + } + } + + return true; +} + +static void reschedule_eq_handler(struct sphw_eq *eq) +{ + if (eq->type == SPHW_AEQ) { + struct sphw_aeqs *aeqs = aeq_to_aeqs(eq); + struct workqueue_struct *workq = aeqs->workq; + struct sphw_eq_work *aeq_work = &eq->aeq_work; + + queue_work_on(sphw_get_work_cpu_affinity(eq->hwdev, WORK_TYPE_AEQ), + workq, &aeq_work->work); + } else { + tasklet_schedule(&eq->ceq_tasklet); + } +} + +/** + * eq_irq_handler - handler for the eq event + * @data: the event queue of the event + **/ +static bool eq_irq_handler(void *data) +{ + struct sphw_eq *eq = (struct sphw_eq *)data; + bool uncompleted = false; + + if (eq->type == SPHW_AEQ) + uncompleted = aeq_irq_handler(eq); + else + uncompleted = ceq_irq_handler(eq); + + set_eq_cons_idx(eq, uncompleted ? SPHW_EQ_NOT_ARMED : + SPHW_EQ_ARMED); + + return uncompleted; +} + +static struct sphw_eq *find_eq(struct sphw_hwdev *hwdev, int msix_entry_idx) +{ + struct sphw_aeqs *aeqs = hwdev->aeqs; + struct sphw_ceqs *ceqs = hwdev->ceqs; + int i; + + for (i = 0; i < aeqs->num_aeqs; i++) { + struct sphw_eq *eq = &aeqs->aeq[i]; + + if (eq->eq_irq.msix_entry_idx == msix_entry_idx) + return eq; + } + + for (i = 0; i < ceqs->num_ceqs; i++) { + struct sphw_eq *eq = &ceqs->ceq[i]; + + if (eq->eq_irq.msix_entry_idx == msix_entry_idx) + return eq; + } + + return NULL; +} + +/* for windows */ +bool sphw_eq_intr_handler(void *hwdev, int msix_entry_idx) +{ + struct sphw_eq *eq; + + eq = find_eq(hwdev, msix_entry_idx); + if (!eq) { + pr_err("Can't find eq in eq interrupt handler\n"); + return false; + } + + return eq_irq_handler(eq); +} + +/** + * eq_irq_work - eq work for the event + * @work: the work that is associated with the eq + **/ +static void eq_irq_work(struct work_struct *work) +{ + struct sphw_eq_work *aeq_work = + container_of(work, struct sphw_eq_work, work); + + if (eq_irq_handler(aeq_work->data)) + reschedule_eq_handler(aeq_work->data); +} + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the async event queue of the event + **/ +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct sphw_eq *aeq = (struct sphw_eq *)data; + struct sphw_hwdev *hwdev = aeq->hwdev; + struct sphw_aeqs *aeqs = aeq_to_aeqs(aeq); + struct workqueue_struct *workq = aeqs->workq; + struct sphw_eq_work *aeq_work = NULL; + + /* clear resend timer cnt register */ + sphw_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + aeq_work = &aeq->aeq_work; + aeq_work->data = aeq; + + queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_AEQ), + workq, &aeq_work->work); + + return IRQ_HANDLED; +} + +/** + * ceq_tasklet - ceq tasklet for the event + * @ceq_data: data that will be used by the tasklet(ceq) + **/ +static void ceq_tasklet(ulong ceq_data) +{ + struct sphw_ceq_tasklet_data *ceq_tasklet_data = + (struct sphw_ceq_tasklet_data *)ceq_data; + struct sphw_eq *eq = (struct sphw_eq *)ceq_tasklet_data->data; + + eq->soft_intr_jif = jiffies; + + if (eq_irq_handler(ceq_tasklet_data->data)) + reschedule_eq_handler(ceq_tasklet_data->data); +} + +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the completion event queue of the event + **/ +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct sphw_eq *ceq = (struct sphw_eq *)data; + struct sphw_ceq_tasklet_data *ceq_tasklet_data = NULL; + + ceq->hard_intr_jif = jiffies; + + /* clear resend timer counters */ + sphw_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + ceq_tasklet_data = &ceq->ceq_tasklet_data; + ceq_tasklet_data->data = data; + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +/** + * set_eq_ctrls - setting eq's ctrls registers + * @eq: the event queue for setting + **/ +static int set_eq_ctrls(struct sphw_eq *eq) +{ + enum sphw_eq_type type = eq->type; + struct sphw_hwif *hwif = eq->hwdev->hwif; + struct irq_info *eq_irq = &eq->eq_irq; + u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; + u32 pci_intf_idx = SPHW_PCI_INTF_IDX(hwif); + int err; + + if (type == SPHW_AEQ) { + /* set ctrl0 */ + addr = SPHW_CSR_AEQ_CTRL_0_ADDR; + + val = sphw_hwif_read_reg(hwif, addr); + + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); + + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + AEQ_CTRL_0_SET(SPHW_INTR_MODE_ARMED, INTR_MODE); + + val |= ctrl0; + + sphw_hwif_write_reg(hwif, addr, val); + + /* set ctrl1 */ + addr = SPHW_CSR_AEQ_CTRL_1_ADDR; + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + sphw_hwif_write_reg(hwif, addr, ctrl1); + + } else { + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | + CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) | + CEQ_CTRL_0_SET(SPHW_INTR_MODE_ARMED, INTR_MODE); + + ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN); + + /* set ceq ctrl reg through mgmt cpu */ + err = sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1); + if (err) + return err; + } + + return 0; +} + +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void ceq_elements_init(struct sphw_eq *eq, u32 init_val) +{ + u32 *ceqe = NULL; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void aeq_elements_init(struct sphw_eq *eq, u32 init_val) +{ + struct sphw_aeq_elem *aeqe = NULL; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +static void eq_elements_init(struct sphw_eq *eq, u32 init_val) +{ + if (eq->type == SPHW_AEQ) + aeq_elements_init(eq, init_val); + else + ceq_elements_init(eq, init_val); +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + **/ +static int alloc_eq_pages(struct sphw_eq *eq) +{ + struct sphw_hwif *hwif = eq->hwdev->hwif; + struct sphw_dma_addr_align *eq_page = NULL; + u32 reg, init_val; + u16 pg_idx, i; + int err; + + eq->eq_pages = kcalloc(eq->num_pages, sizeof(*eq->eq_pages), + GFP_KERNEL); + if (!eq->eq_pages) { + sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq pages description\n"); + return -ENOMEM; + } + + for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) { + eq_page = &eq->eq_pages[pg_idx]; + err = sphw_dma_alloc_coherent_align(eq->hwdev->dev_hdl, eq->page_size, + SPHW_MIN_EQ_PAGE_SIZE, GFP_KERNEL, eq_page); + if (err) { + sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq page, page index: %hu\n", + pg_idx); + goto dma_alloc_err; + } + + reg = SPHW_EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx); + sphw_hwif_write_reg(hwif, reg, upper_32_bits(eq_page->align_paddr)); + + reg = SPHW_EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx); + sphw_hwif_write_reg(hwif, reg, lower_32_bits(eq_page->align_paddr)); + } + + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); + if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { + sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); + err = -EINVAL; + goto dma_alloc_err; + } + init_val = EQ_WRAPPED(eq); + + eq_elements_init(eq, init_val); + + return 0; + +dma_alloc_err: + for (i = 0; i < pg_idx; i++) + sphw_dma_free_coherent_align(eq->hwdev->dev_hdl, &eq->eq_pages[i]); + + kfree(eq->eq_pages); + + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the event queue + **/ +static void free_eq_pages(struct sphw_eq *eq) +{ + u16 pg_idx; + + for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) + sphw_dma_free_coherent_align(eq->hwdev->dev_hdl, &eq->eq_pages[pg_idx]); + + kfree(eq->eq_pages); +} + +static inline u32 get_page_size(struct sphw_eq *eq) +{ + u32 total_size; + u32 count; + + total_size = ALIGN((eq->eq_len * eq->elem_size), + SPHW_MIN_EQ_PAGE_SIZE); + + if (total_size <= (SPHW_EQ_MAX_PAGES(eq) * SPHW_MIN_EQ_PAGE_SIZE)) + return SPHW_MIN_EQ_PAGE_SIZE; + + count = (u32)(ALIGN((total_size / SPHW_EQ_MAX_PAGES(eq)), + SPHW_MIN_EQ_PAGE_SIZE) / SPHW_MIN_EQ_PAGE_SIZE); + + /* round up to nearest power of two */ + count = 1U << (u8)fls((int)(count - 1)); + + return ((u32)SPHW_MIN_EQ_PAGE_SIZE) * count; +} + +static int request_eq_irq(struct sphw_eq *eq, struct irq_info *entry) +{ + int err = 0; + + if (eq->type == SPHW_AEQ) { + struct sphw_eq_work *aeq_work = &eq->aeq_work; + + INIT_WORK(&aeq_work->work, eq_irq_work); + } else { + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, + (ulong)(&eq->ceq_tasklet_data)); + } + + if (eq->type == SPHW_AEQ) { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "sphw_aeq%u@pci:%s", eq->q_id, + pci_name(eq->hwdev->pcidev_hdl)); + + err = request_irq(entry->irq_id, aeq_interrupt, 0UL, + eq->irq_name, eq); + } else { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "sphw_ceq%u@pci:%s", eq->q_id, + pci_name(eq->hwdev->pcidev_hdl)); + err = request_irq(entry->irq_id, ceq_interrupt, 0UL, + eq->irq_name, eq); + } + + return err; +} + +static void reset_eq(struct sphw_eq *eq) +{ + /* clear eq_len to force eqe drop in hardware */ + if (eq->type == SPHW_AEQ) + sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_CSR_AEQ_CTRL_1_ADDR, 0); + else + sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + + wmb(); /* clear eq_len before clear prod idx */ + + sphw_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); +} + +/** + * init_eq - initialize eq + * @eq: the event queue + * @hwdev: the pointer to hw device + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @type: the type of the event queue, ceq or aeq + * @entry: msix entry associated with the event queue + * Return: 0 - Success, Negative - failure + **/ +static int init_eq(struct sphw_eq *eq, struct sphw_hwdev *hwdev, u16 q_id, + u32 q_len, enum sphw_eq_type type, struct irq_info *entry) +{ + int err = 0; + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = type; + eq->eq_len = q_len; + + /* Indirect access should set q_id first */ + sphw_hwif_write_reg(hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); + wmb(); /* write index before config */ + + reset_eq(eq); + + eq->cons_idx = 0; + eq->wrapped = 0; + + eq->elem_size = (type == SPHW_AEQ) ? SPHW_AEQE_SIZE : SPHW_CEQE_SIZE; + + eq->page_size = get_page_size(eq); + eq->orig_page_size = eq->page_size; + eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); + if (eq->num_pages > SPHW_EQ_MAX_PAGES(eq)) { + sdk_err(hwdev->dev_hdl, "Number pages: %u too many pages for eq\n", + eq->num_pages); + return -EINVAL; + } + + err = alloc_eq_pages(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + return err; + } + + eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; + eq->eq_irq.irq_id = entry->irq_id; + + err = set_eq_ctrls(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set ctrls for eq\n"); + goto init_eq_ctrls_err; + } + + set_eq_cons_idx(eq, SPHW_EQ_ARMED); + + err = request_eq_irq(eq, entry); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to request irq for the eq, err: %d\n", err); + goto req_irq_err; + } + + sphw_set_msix_state(hwdev, entry->msix_entry_idx, SPHW_MSIX_DISABLE); + + return 0; + +init_eq_ctrls_err: +req_irq_err: + free_eq_pages(eq); + return err; +} + +/** + * remove_eq - remove eq + * @eq: the event queue + **/ +static void remove_eq(struct sphw_eq *eq) +{ + struct irq_info *entry = &eq->eq_irq; + + sphw_set_msix_state(eq->hwdev, entry->msix_entry_idx, SPHW_MSIX_DISABLE); + synchronize_irq(entry->irq_id); + + free_irq(entry->irq_id, eq); + + /* Indirect access should set q_id first */ + sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); + + wmb(); /* write index before config */ + + if (eq->type == SPHW_AEQ) { + struct sphw_eq_work *aeq_work = &eq->aeq_work; + + cancel_work_sync(&aeq_work->work); + + /* clear eq_len to avoid hw access host memory */ + sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_CSR_AEQ_CTRL_1_ADDR, 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + + sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + } + + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = sphw_hwif_read_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); + + free_eq_pages(eq); +} + +/** + * sphw_aeqs_init - init all the aeqs + * @hwdev: the pointer to hw device + * @num_aeqs: number of AEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int sphw_aeqs_init(struct sphw_hwdev *hwdev, u16 num_aeqs, struct irq_info *msix_entries) +{ + struct sphw_aeqs *aeqs = NULL; + int err; + u16 i, q_id; + + if (!hwdev) + return -EINVAL; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + aeqs->workq = alloc_workqueue(SPHW_EQS_WQ_NAME, WQ_MEM_RECLAIM, SPHW_MAX_AEQS); + if (!aeqs->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); + err = -ENOMEM; + goto create_work_err; + } + + if (g_aeq_len < SPHW_MIN_AEQ_LEN || g_aeq_len > SPHW_MAX_AEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %u out of range, resetting to %d\n", + g_aeq_len, SPHW_DEFAULT_AEQ_LEN); + g_aeq_len = SPHW_DEFAULT_AEQ_LEN; + } + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len, + SPHW_AEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeq %u\n", + q_id); + goto init_aeq_err; + } + } + for (q_id = 0; q_id < num_aeqs; q_id++) + sphw_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, SPHW_MSIX_ENABLE); + + return 0; + +init_aeq_err: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + +create_work_err: + kfree(aeqs); + + return err; +} + +/** + * sphw_aeqs_free - free all the aeqs + * @hwdev: the pointer to hw device + **/ +void sphw_aeqs_free(struct sphw_hwdev *hwdev) +{ + struct sphw_aeqs *aeqs = hwdev->aeqs; + enum sphw_aeq_type aeq_event = SPHW_HW_INTER_INT; + enum sphw_aeq_sw_type sw_aeq_event = SPHW_STATELESS_EVENT; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + for (; sw_aeq_event < SPHW_MAX_AEQ_SW_EVENTS; sw_aeq_event++) + sphw_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (; aeq_event < SPHW_MAX_AEQ_EVENTS; aeq_event++) + sphw_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeqs->workq); + + kfree(aeqs); +} + +/** + * sphw_ceqs_init - init all the ceqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of CEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int sphw_ceqs_init(struct sphw_hwdev *hwdev, u16 num_ceqs, struct irq_info *msix_entries) +{ + struct sphw_ceqs *ceqs; + int err; + u16 i, q_id; + + ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); + if (!ceqs) + return -ENOMEM; + + hwdev->ceqs = ceqs; + + ceqs->hwdev = hwdev; + ceqs->num_ceqs = num_ceqs; + + if (g_ceq_len < SPHW_MIN_CEQ_LEN || g_ceq_len > SPHW_MAX_CEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %u out of range, resetting to %d\n", + g_ceq_len, SPHW_DEFAULT_CEQ_LEN); + g_ceq_len = SPHW_DEFAULT_CEQ_LEN; + } + + if (!g_num_ceqe_in_tasklet) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", + SPHW_TASK_PROCESS_EQE_LIMIT); + g_num_ceqe_in_tasklet = SPHW_TASK_PROCESS_EQE_LIMIT; + } + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len, + SPHW_CEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceq %u\n", + q_id); + goto init_ceq_err; + } + } + for (q_id = 0; q_id < num_ceqs; q_id++) + sphw_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, SPHW_MSIX_ENABLE); + + return 0; + +init_ceq_err: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + kfree(ceqs); + + return err; +} + +/** + * sphw_ceqs_free - free all the ceqs + * @hwdev: the pointer to hw device + **/ +void sphw_ceqs_free(struct sphw_hwdev *hwdev) +{ + struct sphw_ceqs *ceqs = hwdev->ceqs; + enum sphw_ceq_event ceq_event = SPHW_CMDQ; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); + + for (; ceq_event < SPHW_MAX_CEQ_EVENTS; ceq_event++) + sphw_ceq_unregister_cb(hwdev, ceq_event); + + kfree(ceqs); +} + +void sphw_get_ceq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs) +{ + struct sphw_ceqs *ceqs = hwdev->ceqs; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + ceqs->ceq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = ceqs->num_ceqs; +} + +void sphw_get_aeq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs) +{ + struct sphw_aeqs *aeqs = hwdev->aeqs; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + aeqs->aeq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = aeqs->num_aeqs; +} + +void sphw_dump_aeq_info(struct sphw_hwdev *hwdev) +{ + struct sphw_aeq_elem *aeqe_pos = NULL; + struct sphw_eq *eq = NULL; + u32 addr, ci, pi, ctrl0, idx; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { + eq = &hwdev->aeqs->aeq[q_id]; + /* Indirect access should set q_id first */ + sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); + wmb(); /* write index before config */ + + addr = SPHW_CSR_AEQ_CTRL_0_ADDR; + + ctrl0 = sphw_hwif_read_reg(hwdev->hwif, addr); + + idx = sphw_hwif_read_reg(hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type)); + + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = sphw_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = sphw_hwif_read_reg(hwdev->hwif, addr); + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + sdk_err(hwdev->dev_hdl, "Aeq id: %d, idx: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x\n", + q_id, idx, ctrl0, ci, pi, work_busy(&eq->aeq_work.work), + eq->wrapped, be32_to_cpu(aeqe_pos->desc)); + } +} + +void sphw_dump_ceq_info(struct sphw_hwdev *hwdev) +{ + struct sphw_eq *eq = NULL; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) { + eq = &hwdev->ceqs->ceq[q_id]; + /* Indirect access should set q_id first */ + sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); + wmb(); /* write index before config */ + + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = sphw_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = sphw_hwif_read_reg(hwdev->hwif, addr); + sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n", + q_id, ci, eq->cons_idx, pi, eq->ceq_tasklet.state, + eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq)))); + + sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->hard_intr_jif)); + sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->soft_intr_jif)); + } +} + +int sphw_get_ceq_info(void *hwdev, u16 q_id, struct sphw_ceq_info *ceq_info) +{ + struct sphw_hwdev *dev = hwdev; + struct sphw_eq *eq = NULL; + + if (!hwdev || !ceq_info) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + eq = &dev->ceqs->ceq[q_id]; + ceq_info->q_len = eq->eq_len; + ceq_info->num_pages = eq->num_pages; + ceq_info->page_size = eq->page_size; + ceq_info->num_elem_in_pg = eq->num_elem_in_pg; + ceq_info->elem_size = eq->elem_size; + sdk_info(dev->dev_hdl, "get_ceq_info: qid=0x%x page_size=%ul\n", + q_id, eq->page_size); + + return 0; +} + +int sphw_get_ceq_page_phy_addr(void *hwdev, u16 q_id, u16 page_idx, u64 *page_phy_addr) +{ + struct sphw_hwdev *dev = hwdev; + struct sphw_eq *eq = NULL; + + if (!hwdev || !page_phy_addr) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + eq = &dev->ceqs->ceq[q_id]; + *page_phy_addr = eq->eq_pages[page_idx].align_paddr; + sdk_info(dev->dev_hdl, "ceq_page_phy_addr: 0x%llx page_idx=%u\n", + eq->eq_pages[page_idx].align_paddr, page_idx); + + return 0; +} + +int sphw_set_ceq_irq_disable(void *hwdev, u16 q_id) +{ + struct sphw_hwdev *dev = hwdev; + struct sphw_eq *ceq = NULL; + + if (!hwdev) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + ceq = &dev->ceqs->ceq[q_id]; + + sphw_set_msix_state(ceq->hwdev, ceq->eq_irq.msix_entry_idx, SPHW_MSIX_DISABLE); + + return 0; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h new file mode 100644 index 000000000000..df25b5a5fdcf --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_EQS_H +#define SPHW_EQS_H + +#define SPHW_MAX_AEQS 4 +#define SPHW_MAX_CEQS 32 + +#define SPHW_AEQ_MAX_PAGES 4 +#define SPHW_CEQ_MAX_PAGES 8 + +#define SPHW_AEQE_SIZE 64 +#define SPHW_CEQE_SIZE 4 + +#define SPHW_AEQE_DESC_SIZE 4 +#define SPHW_AEQE_DATA_SIZE \ + (SPHW_AEQE_SIZE - SPHW_AEQE_DESC_SIZE) + +#define SPHW_DEFAULT_AEQ_LEN 4096 +#define SPHW_DEFAULT_CEQ_LEN 8192 + +#define SPHW_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */ +#define SPHW_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */ + +#define SPHW_MIN_AEQ_LEN 64 +#define SPHW_MAX_AEQ_LEN \ + ((SPHW_MAX_EQ_PAGE_SIZE / SPHW_AEQE_SIZE) * SPHW_AEQ_MAX_PAGES) + +#define SPHW_MIN_CEQ_LEN 64 +#define SPHW_MAX_CEQ_LEN \ + ((SPHW_MAX_EQ_PAGE_SIZE / SPHW_CEQE_SIZE) * SPHW_CEQ_MAX_PAGES) +#define SPHW_CEQ_ID_CMDQ 0 + +#define EQ_IRQ_NAME_LEN 64 + +#define EQ_USLEEP_LOW_BOUND 900 +#define EQ_USLEEP_HIG_BOUND 1000 + +enum sphw_eq_type { + SPHW_AEQ, + SPHW_CEQ +}; + +enum sphw_eq_intr_mode { + SPHW_INTR_MODE_ARMED, + SPHW_INTR_MODE_ALWAYS, +}; + +enum sphw_eq_ci_arm_state { + SPHW_EQ_NOT_ARMED, + SPHW_EQ_ARMED, +}; + +struct sphw_eq_work { + struct work_struct work; + void *data; +}; + +struct sphw_ceq_tasklet_data { + void *data; +}; + +struct sphw_eq { + struct sphw_hwdev *hwdev; + u16 q_id; + enum sphw_eq_type type; + u32 page_size; + u32 orig_page_size; + u32 eq_len; + + u32 cons_idx; + u16 wrapped; + + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; + + struct irq_info eq_irq; + char irq_name[EQ_IRQ_NAME_LEN]; + + struct sphw_dma_addr_align *eq_pages; + + struct sphw_eq_work aeq_work; + struct tasklet_struct ceq_tasklet; + struct sphw_ceq_tasklet_data ceq_tasklet_data; + + u64 hard_intr_jif; + u64 soft_intr_jif; +}; + +struct sphw_aeq_elem { + u8 aeqe_data[SPHW_AEQE_DATA_SIZE]; + u32 desc; +}; + +enum sphw_aeq_cb_state { + SPHW_AEQ_HW_CB_REG = 0, + SPHW_AEQ_HW_CB_RUNNING, + SPHW_AEQ_SW_CB_REG, + SPHW_AEQ_SW_CB_RUNNING, +}; + +struct sphw_aeqs { + struct sphw_hwdev *hwdev; + + sphw_aeq_hwe_cb aeq_hwe_cb[SPHW_MAX_AEQ_EVENTS]; + sphw_aeq_swe_cb aeq_swe_cb[SPHW_MAX_AEQ_SW_EVENTS]; + unsigned long aeq_hw_cb_state[SPHW_MAX_AEQ_EVENTS]; + unsigned long aeq_sw_cb_state[SPHW_MAX_AEQ_SW_EVENTS]; + + struct sphw_eq aeq[SPHW_MAX_AEQS]; + u16 num_aeqs; + struct workqueue_struct *workq; +}; + +enum sphw_ceq_cb_state { + SPHW_CEQ_CB_REG = 0, + SPHW_CEQ_CB_RUNNING, +}; + +struct sphw_ceqs { + struct sphw_hwdev *hwdev; + + sphw_ceq_event_cb ceq_cb[SPHW_MAX_CEQ_EVENTS]; + void *ceq_data[SPHW_MAX_CEQ_EVENTS]; + unsigned long ceq_cb_state[SPHW_MAX_CEQ_EVENTS]; + + struct sphw_eq ceq[SPHW_MAX_CEQS]; + u16 num_ceqs; +}; + +struct sphw_ceq_info { + u32 q_len; + u32 page_size; + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; +}; + +int sphw_aeqs_init(struct sphw_hwdev *hwdev, u16 num_aeqs, struct irq_info *msix_entries); + +void sphw_aeqs_free(struct sphw_hwdev *hwdev); + +int sphw_ceqs_init(struct sphw_hwdev *hwdev, u16 num_ceqs, struct irq_info *msix_entries); + +void sphw_ceqs_free(struct sphw_hwdev *hwdev); + +void sphw_get_ceq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs); + +void sphw_get_aeq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs); + +void sphw_dump_ceq_info(struct sphw_hwdev *hwdev); + +void sphw_dump_aeq_info(struct sphw_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h new file mode 100644 index 000000000000..2fc99c5f6096 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h @@ -0,0 +1,649 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_HW_H +#define SPHW_HW_H + +#include "sphw_comm_cmd.h" +#include "sphw_comm_msg_intf.h" +#include "sphw_crm.h" + +enum sphw_mod_type { + SPHW_MOD_COMM = 0, /* HW communication module */ + SPHW_MOD_L2NIC = 1, /* L2NIC module */ + SPHW_MOD_ROCE = 2, + SPHW_MOD_PLOG = 3, + SPHW_MOD_TOE = 4, + SPHW_MOD_FLR = 5, + SPHW_MOD_FC = 6, + SPHW_MOD_CFGM = 7, /* Configuration module */ + SPHW_MOD_CQM = 8, + SPHW_MOD_VSWITCH = 9, + COMM_MOD_FC = 10, + SPHW_MOD_OVS = 11, + SPHW_MOD_DSW = 12, + SPHW_MOD_MIGRATE = 13, + SPHW_MOD_HILINK = 14, + SPHW_MOD_CRYPT = 15, /* secure crypto module */ + SPHW_MOD_VIO = 16, + SPHW_MOD_DFT = 17, /* DFT */ + SPHW_MOD_HW_MAX = 18, /* hardware max module id */ + /* Software module id, for PF/VF and multi-host */ + SPHW_MOD_SW_FUNC = 19, + SPHW_MOD_IOE = 20, + SPHW_MOD_MAX, +}; + +/* to use 0-level CLA, page size must be: SQ 16B(wqe) * 64k(max_q_depth) */ +#define SPHW_DEFAULT_WQ_PAGE_SIZE 0x100000 +#define SPHW_HW_WQ_PAGE_SIZE 0x1000 +#define SPHW_MAX_WQ_PAGE_SIZE_ORDER 8 + +enum sphw_channel_id { + SPHW_CHANNEL_DEFAULT, + SPHW_CHANNEL_COMM, + SPHW_CHANNEL_NIC, + SPHW_CHANNEL_ROCE, + SPHW_CHANNEL_TOE, + SPHW_CHANNEL_FC, + SPHW_CHANNEL_OVS, + SPHW_CHANNEL_DSW, + SPHW_CHANNEL_MIG, + SPHW_CHANNEL_CRYPT, + + SPHW_CHANNEL_MAX = 32, +}; + +struct sphw_cmd_buf { + void *buf; + dma_addr_t dma_addr; + u16 size; + /* Usage count, USERS DO NOT USE */ + atomic_t ref_cnt; +}; + +enum sphw_aeq_type { + SPHW_HW_INTER_INT = 0, + SPHW_MBX_FROM_FUNC = 1, + SPHW_MSG_FROM_MGMT_CPU = 2, + SPHW_API_RSP = 3, + SPHW_API_CHAIN_STS = 4, + SPHW_MBX_SEND_RSLT = 5, + SPHW_MAX_AEQ_EVENTS +}; + +#define SPHW_NIC_FATAL_ERROR_MAX 0x8U + +enum sphw_aeq_sw_type { + SPHW_STATELESS_EVENT = 0, + SPHW_STATEFULL_EVENT = 1, + SPHW_MAX_AEQ_SW_EVENTS +}; + +typedef void (*sphw_aeq_hwe_cb)(void *handle, u8 *data, u8 size); +typedef u8 (*sphw_aeq_swe_cb)(void *handle, u8 event, u8 *data); + +/** + * @brief sphw_aeq_register_hw_cb - register aeq hardware callback + * @param hwdev: device pointer to hwdev + * @param event: event type + * @param hwe_cb: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_aeq_register_hw_cb(void *hwdev, enum sphw_aeq_type event, sphw_aeq_hwe_cb hwe_cb); + +/** + * @brief sphw_aeq_unregister_hw_cb - unregister aeq hardware callback + * @param hwdev: device pointer to hwdev + * @param event: event type + **/ +void sphw_aeq_unregister_hw_cb(void *hwdev, enum sphw_aeq_type event); + +/** + * @brief sphw_aeq_register_swe_cb - register aeq soft event callback + * @param hwdev: device pointer to hwdev + * @param event: event type + * @param aeq_swe_cb: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_aeq_register_swe_cb(void *hwdev, enum sphw_aeq_sw_type event, sphw_aeq_swe_cb aeq_swe_cb); + +/** + * @brief sphw_aeq_unregister_swe_cb - unregister aeq soft event callback + * @param hwdev: device pointer to hwdev + * @param event: event type + **/ +void sphw_aeq_unregister_swe_cb(void *hwdev, enum sphw_aeq_sw_type event); + +enum sphw_ceq_event { + SPHW_NON_L2NIC_SCQ, + SPHW_NON_L2NIC_ECQ, + SPHW_NON_L2NIC_NO_CQ_EQ, + SPHW_CMDQ, + SPHW_L2NIC_SQ, + SPHW_L2NIC_RQ, + SPHW_MAX_CEQ_EVENTS, +}; + +typedef void (*sphw_ceq_event_cb)(void *handle, u32 ceqe_data); + +/** + * @brief sphw_ceq_register_cb - register ceq callback + * @param hwdev: device pointer to hwdev + * @param event: event type + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_ceq_register_cb(void *hwdev, enum sphw_ceq_event event, sphw_ceq_event_cb callback); +/** + * @brief sphw_ceq_unregister_cb - unregister ceq callback + * @param hwdev: device pointer to hwdev + * @param event: event type + **/ +void sphw_ceq_unregister_cb(void *hwdev, enum sphw_ceq_event event); + +typedef int (*sphw_vf_mbox_cb)(void *handle, void *pri_handle, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +typedef int (*sphw_pf_mbox_cb)(void *handle, void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +typedef int (*sphw_ppf_mbox_cb)(void *handle, void *pri_handle, + u16 pf_idx, u16 vf_id, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +typedef int (*sphw_pf_recv_from_ppf_mbox_cb)(void *handle, void *pri_handle, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +/** + * @brief sphw_register_ppf_mbox_cb - ppf register mbox msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_ppf_mbox_cb callback); + +/** + * @brief sphw_register_pf_mbox_cb - pf register mbox msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_pf_mbox_cb callback); +/** + * @brief sphw_register_vf_mbox_cb - vf register mbox msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_vf_mbox_cb callback); +/** + * @brief sphw_register_ppf_to_pf_mbox_cb - register mbox msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_register_ppf_to_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + sphw_pf_recv_from_ppf_mbox_cb callback); + +/** + * @brief sphw_unregister_ppf_mbox_cb - ppf unregister mbox msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sphw_unregister_ppf_mbox_cb(void *hwdev, u8 mod); + +/** + * @brief sphw_unregister_pf_mbox_cb - pf register mbox msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sphw_unregister_pf_mbox_cb(void *hwdev, u8 mod); + +/** + * @brief sphw_unregister_vf_mbox_cb - pf register mbox msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sphw_unregister_vf_mbox_cb(void *hwdev, u8 mod); + +/** + * @brief sphw_unregister_ppf_to_pf_mbox_cb - unregister mbox msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sphw_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod); + +typedef void (*sphw_mgmt_msg_cb)(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +/** + * @brief sphw_register_mgmt_msg_cb - register mgmt msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, sphw_mgmt_msg_cb callback); + +/** + * @brief sphw_unregister_mgmt_msg_cb - unregister mgmt msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sphw_unregister_mgmt_msg_cb(void *hwdev, u8 mod); + +/** + * @brief sphw_register_service_adapter - register service adapter + * @param hwdev: device pointer to hwdev + * @param service_adapter: service adapter + * @param type: service type + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_register_service_adapter(void *hwdev, void *service_adapter, + enum sphw_service_type type); + +/** + * @brief sphw_unregister_service_adapter - unregister service adapter + * @param hwdev: device pointer to hwdev + * @param type: service type + **/ +void sphw_unregister_service_adapter(void *hwdev, enum sphw_service_type type); + +/** + * @brief sphw_get_service_adapter - get service adapter + * @param hwdev: device pointer to hwdev + * @param type: service type + * @retval non-zero: success + * @retval null: failure + **/ +void *sphw_get_service_adapter(void *hwdev, enum sphw_service_type type); + +/** + * @brief sphw_alloc_db_phy_addr - alloc doorbell & direct wqe pyhsical addr + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @param dwqe_base: pointer to alloc direct base address + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); + +/** + * @brief sphw_free_db_phy_addr - free doorbell & direct wqe physical address + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to free doorbell base address + * @param dwqe_base: pointer to free direct base address + **/ +void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); + +/** + * @brief sphw_alloc_db_addr - alloc doorbell & direct wqe + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @param dwqe_base: pointer to alloc direct base address + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_alloc_db_addr(void *hwdev, void __iomem **db_base, void __iomem **dwqe_base); + +/** + * @brief sphw_free_db_addr - free doorbell & direct wqe + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to free doorbell base address + * @param dwqe_base: pointer to free direct base address + **/ +void sphw_free_db_addr(void *hwdev, const void __iomem *db_base, void __iomem *dwqe_base); + +/** + * @brief sphw_alloc_db_phy_addr - alloc physical doorbell & direct wqe + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @param dwqe_base: pointer to alloc direct base address + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); + +/** + * @brief sphw_free_db_phy_addr - free physical doorbell & direct wqe + * @param hwdev: device pointer to hwdev + * @param db_base: free doorbell base address + * @param dwqe_base: free direct base address + **/ + +void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); + +/** + * @brief sphw_set_root_ctxt - set root context + * @param hwdev: device pointer to hwdev + * @param rq_depth: rq depth + * @param sq_depth: sq depth + * @param rx_buf_sz: rx buffer size + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, int rx_buf_sz, u16 channel); + +/** + * @brief sphw_clean_root_ctxt - clean root context + * @param hwdev: device pointer to hwdev + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + **/ +int sphw_clean_root_ctxt(void *hwdev, u16 channel); + +/** + * @brief sphw_alloc_cmd_buf - alloc cmd buffer + * @param hwdev: device pointer to hwdev + * @retval non-zero: success + * @retval null: failure + **/ +struct sphw_cmd_buf *sphw_alloc_cmd_buf(void *hwdev); + +/** + * @brief sphw_free_cmd_buf - free cmd buffer + * @param hwdev: device pointer to hwdev + * @param cmd_buf: cmd buffer to free + **/ +void sphw_free_cmd_buf(void *hwdev, struct sphw_cmd_buf *cmd_buf); + +/** + * @brief sphw_dbg_get_hw_stats - get hardware stats + * @param hwdev: device pointer to hwdev + * @param hw_stats: pointer to memory caller to alloc + * @param out_size: out size + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size); + +/** + * @brief sphw_dbg_clear_hw_stats - clear hardware stats + * @param hwdev: device pointer to hwdev + * @retval clear hardware size + */ +u16 sphw_dbg_clear_hw_stats(void *hwdev); + +/** + * @brief sphw_get_chip_fault_stats - get chip fault stats + * @param hwdev: device pointer to hwdev + * @param chip_fault_stats: pointer to memory caller to alloc + * @param offset: offset + */ +void sphw_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset); + +/** + * @brief sphw_msg_to_mgmt_sync - msg to management cpu + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel); + +/** + * @brief sphw_msg_to_mgmt_async - msg to management cpu async + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + * + * The function does not sleep inside, allowing use in irq context + */ +int sphw_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); + +/** + * @brief sphw_msg_to_mgmt_no_ack - msg to management cpu don't need no ack + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + * + * The function will sleep inside, and it is not allowed to be used in + * interrupt context + */ +int sphw_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); + +int sphw_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size); + +int sphw_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +/** + * @brief sphw_mbox_to_pf - vf mbox message to pf + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel); + +/** + * @brief sphw_mbox_to_vf - mbox message to vf + * @param hwdev: device pointer to hwdev + * @param vf_id: vf index + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +int sphw_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); +/** + * @brief sphw_cmdq_async - cmdq asynchronous message + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, u16 channel); + +/** + * @brief sphw_cmdq_direct_resp - cmdq direct message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param out_param: message out + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief sphw_cmdq_detail_resp - cmdq detail message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param buf_out: message buffer out + * @param out_param: inline output data + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, + struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief sphw_cos_id_detail_resp - cmdq detail message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param cos_id: cos id + * @param buf_in: message buffer in + * @param buf_out: message buffer out + * @param out_param: inline output data + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, struct sphw_cmd_buf *buf_in, + struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief sphw_ppf_tmr_start - start ppf timer + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_ppf_tmr_start(void *hwdev); + +/** + * @brief sphw_ppf_tmr_stop - stop ppf timer + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_ppf_tmr_stop(void *hwdev); + +/** + * @brief sphw_func_tmr_bitmap_set - set timer bitmap status + * @param hwdev: device pointer to hwdev + * @param enable: 0-disable, 1-enable + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_func_tmr_bitmap_set(void *hwdev, bool enable); + +/** + * @brief sphw_get_board_info - get board info + * @param hwdev: device pointer to hwdev + * @param info: board info + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_get_board_info(void *hwdev, struct sphw_board_info *info, u16 channel); + +/** + * @brief sphw_set_wq_page_size - set work queue page size + * @param hwdev: device pointer to hwdev + * @param func_idx: function id + * @param page_size: page size + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sphw_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, u16 channel); + +/** + * @brief sphw_event_callback - evnet callback to notify service driver + * @param hwdev: device pointer to hwdev + * @param event: event info to service driver + */ +void sphw_event_callback(void *hwdev, struct sphw_event_info *event); + +/** + * @brief sphw_link_event_stats - link event stats + * @param hwdev: device pointer to hwdev + * @param link: link status + */ +void sphw_link_event_stats(void *dev, u8 link); + +enum func_reset_flag { + RES_TYPE_COMM = 0, + RES_TYPE_NIC = 1, + RES_TYPE_OVS = 2, + RES_TYPE_VBS = 3, + RES_TYPE_ROCE = 4, + RES_TYPE_FC = 5, + RES_TYPE_TOE = 6, + + RES_TYPE_FLUSH_BIT = 7, + RES_TYPE_MQM, + RES_TYPE_SMF, + RES_TYPE_CMDQ_ROOTCTX, + RES_TYPE_SQ_CI_TABLE, + RES_TYPE_PF_BW_CFG, + RES_TYPE_CEQ, + RES_TYPE_MBOX, + RES_TYPE_AEQ, + RES_TYPE_IPSEC, +}; + +#define SPHW_COMM_RES (BIT(RES_TYPE_COMM) | BIT(RES_TYPE_FLUSH_BIT) | BIT(RES_TYPE_MQM) | \ + BIT(RES_TYPE_SMF) | BIT(RES_TYPE_CMDQ_ROOTCTX)) + +#define SPHW_NIC_RES BIT(RES_TYPE_NIC) +#define SPHW_FC_RES BIT(RES_TYPE_FC) + +/** + * @brief sphw_func_reset - reset func + * @param hwdev: device pointer to hwdev + * @param func_id: global function index + * @param reset_flag: reset flag + * @param channel: channel id + */ +int sphw_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel); + +int sphw_get_dev_cap(void *hwdev); + +int sphw_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function); + +int sphw_init_func_mbox_msg_channel(void *hwdev, u16 num_func); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c new file mode 100644 index 000000000000..a50483c01239 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c @@ -0,0 +1,1339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/semaphore.h> + +#include "sphw_common.h" +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_hwdev.h" +#include "sphw_hwif.h" +#include "sphw_cfg_cmd.h" +#include "sphw_hw_cfg.h" + +uint g_rdma_mtts_num; + +uint intr_mode; +uint timer_enable = 1; +uint bloomfilter_enable; +uint g_test_mode; +uint g_test_qpc_num; +uint g_test_qpc_resvd_num; +uint g_test_pagesize_reorder; +uint g_test_xid_alloc_mode = 1; +uint g_test_gpa_check_enable = 1; +uint g_test_qpc_alloc_mode = 2; +uint g_test_scqc_alloc_mode = 2; +uint g_test_max_conn; +uint g_test_max_cache_conn; +uint g_test_scqc_num; +uint g_test_mpt_num; +uint g_test_mpt_resvd; +uint g_test_scq_resvd; +uint g_test_hash_num; +uint g_test_reorder_num; + +static void set_cfg_test_param(struct cfg_mgmt_info *cfg_mgmt) +{ + cfg_mgmt->svc_cap.test_mode = g_test_mode; + if (cfg_mgmt->svc_cap.test_mode == 0) + return; + + cfg_mgmt->svc_cap.timer_en = (u8)timer_enable; + cfg_mgmt->svc_cap.bloomfilter_en = (u8)bloomfilter_enable; + cfg_mgmt->svc_cap.test_qpc_num = g_test_qpc_num; + cfg_mgmt->svc_cap.test_qpc_resvd_num = g_test_qpc_resvd_num; + cfg_mgmt->svc_cap.test_page_size_reorder = g_test_pagesize_reorder; + cfg_mgmt->svc_cap.test_xid_alloc_mode = (bool)g_test_xid_alloc_mode; + cfg_mgmt->svc_cap.test_gpa_check_enable = (bool)g_test_gpa_check_enable; + cfg_mgmt->svc_cap.test_qpc_alloc_mode = (u8)g_test_qpc_alloc_mode; + cfg_mgmt->svc_cap.test_scqc_alloc_mode = (u8)g_test_scqc_alloc_mode; + cfg_mgmt->svc_cap.test_max_conn_num = g_test_max_conn; + cfg_mgmt->svc_cap.test_max_cache_conn_num = g_test_max_cache_conn; + cfg_mgmt->svc_cap.test_scqc_num = g_test_scqc_num; + cfg_mgmt->svc_cap.test_mpt_num = g_test_mpt_num; + cfg_mgmt->svc_cap.test_scq_resvd_num = g_test_scq_resvd; + cfg_mgmt->svc_cap.test_mpt_recvd_num = g_test_mpt_resvd; + cfg_mgmt->svc_cap.test_hash_num = g_test_hash_num; + cfg_mgmt->svc_cap.test_reorder_num = g_test_reorder_num; +} + +static void parse_pub_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; + + cap->host_id = dev_cap->host_id; + cap->ep_id = dev_cap->ep_id; + cap->er_id = dev_cap->er_id; + cap->port_id = dev_cap->port_id; + + cap->svc_type = dev_cap->svc_cap_en; + cap->chip_svc_type = cap->svc_type; + + cap->cos_valid_bitmap = dev_cap->valid_cos_bitmap; + cap->flexq_en = dev_cap->flexq_en; + + cap->host_total_function = dev_cap->host_total_func; + + if (type != TYPE_VF) { + cap->max_vf = dev_cap->max_vf; + cap->pf_num = dev_cap->host_pf_num; + cap->pf_id_start = dev_cap->pf_id_start; + cap->vf_num = dev_cap->host_vf_num; + cap->vf_id_start = dev_cap->vf_id_start; + } else { + cap->max_vf = 0; + } + + if (dev_cap->sf_svc_attr & SF_SVC_FT_BIT) + attr->ft_en = true; + else + attr->ft_en = false; + + if (dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT) + attr->rdma_en = true; + else + attr->rdma_en = false; + + /* PPF will overwrite it when parse dynamic resource */ + if (dev_cap->func_sf_en) + cap->sf_en = true; + else + cap->sf_en = false; + + cap->timer_en = (u8)timer_enable; /* timer enable */ + cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; + cap->max_connect_num = dev_cap->max_conn_num; + cap->max_stick2cache_num = dev_cap->max_stick2cache_num; + cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr; + cap->bfilter_len = dev_cap->bfilter_len; + cap->hash_bucket_num = dev_cap->hash_bucket_num; + + sdk_info(hwdev->dev_hdl, "Get public resource capbility: svc_cap_en: 0x%x\n", + cap->svc_type); + sdk_info(hwdev->dev_hdl, "Host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x, cos_bitmap: 0x%x, flexq: 0x%x\n", + cap->host_id, cap->ep_id, cap->er_id, cap->port_id, + cap->cos_valid_bitmap, cap->flexq_en); + sdk_info(hwdev->dev_hdl, "Host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n", + cap->host_total_function, cap->host_oq_id_mask_val, + cap->max_vf); + sdk_info(hwdev->dev_hdl, "Host_pf_num: 0x%x, pf_id_start: 0x%x, host_vf_num: 0x%x, vf_id_start: 0x%x\n", + cap->pf_num, cap->pf_id_start, cap->vf_num, cap->vf_id_start); +} + +static void parse_dynamic_share_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap; + + if (dev_cap->host_sf_en) + cap->sf_en = true; + else + cap->sf_en = false; + + shared_cap->host_pctxs = dev_cap->host_pctx_num; + shared_cap->host_cctxs = dev_cap->host_ccxt_num; + shared_cap->host_scqs = dev_cap->host_scq_num; + shared_cap->host_srqs = dev_cap->host_srq_num; + shared_cap->host_mpts = dev_cap->host_mpt_num; + + sdk_info(hwdev->dev_hdl, "Dynamic share resource capbility:\n"); + sdk_info(hwdev->dev_hdl, "host_pctxs: 0x%x, host_cctxs: 0x%x, host_scqs: 0x%x, host_srqs: 0x%x, host_mpts: 0x%x\n", + shared_cap->host_pctxs, shared_cap->host_cctxs, + shared_cap->host_scqs, shared_cap->host_srqs, + shared_cap->host_mpts); +} + +static void parse_l2nic_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct nic_service_cap *nic_cap = &cap->nic_cap; + + nic_cap->max_sqs = dev_cap->nic_max_sq_id + 1; + nic_cap->max_rqs = dev_cap->nic_max_rq_id + 1; + + sdk_info(hwdev->dev_hdl, "L2nic resource capbility, max_sqs: 0x%x, max_rqs: 0x%x\n", + nic_cap->max_sqs, nic_cap->max_rqs); + + /* Check parameters from firmware */ + if (nic_cap->max_sqs > SPHW_CFG_MAX_QP || + nic_cap->max_rqs > SPHW_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %u, rq: %u\n", + SPHW_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs); + nic_cap->max_sqs = SPHW_CFG_MAX_QP; + nic_cap->max_rqs = SPHW_CFG_MAX_QP; + } +} + +static void parse_fc_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; + + fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; + fc_cap->scq_num = dev_cap->fc_max_scq; + fc_cap->srq_num = dev_cap->fc_max_srq; + fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; + fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start; + fc_cap->vp_id_start = dev_cap->fc_vp_id_start; + fc_cap->vp_id_end = dev_cap->fc_vp_id_end; + + sdk_info(hwdev->dev_hdl, "Get fc resource capbility\n"); + sdk_info(hwdev->dev_hdl, "Max_parent_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x, max_child_qpc_num: 0x%x, child_qpc_id_start: 0x%x\n", + fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, + fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start); + sdk_info(hwdev->dev_hdl, "Vp_id_start: 0x%x, vp_id_end: 0x%x\n", + fc_cap->vp_id_start, fc_cap->vp_id_end); +} + +static void parse_roce_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + roce_cap->max_qps = dev_cap->roce_max_qp; + roce_cap->max_cqs = dev_cap->roce_max_cq; + roce_cap->max_srqs = dev_cap->roce_max_srq; + roce_cap->max_mpts = dev_cap->roce_max_mpt; + roce_cap->max_drc_qps = dev_cap->roce_max_drc_qp; + + roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start; + roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end; + roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size; + + sdk_info(hwdev->dev_hdl, "Get roce resource capbility, type: 0x%x\n", + type); + sdk_info(hwdev->dev_hdl, "Max_qps: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, max_mpts: 0x%x, max_drcts: 0x%x\n", + roce_cap->max_qps, roce_cap->max_cqs, roce_cap->max_srqs, + roce_cap->max_mpts, roce_cap->max_drc_qps); + + sdk_info(hwdev->dev_hdl, "Wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x\n", + roce_cap->wqe_cl_start, roce_cap->wqe_cl_end, + roce_cap->wqe_cl_sz); + + if (roce_cap->max_qps == 0) { + if (type == TYPE_PF || type == TYPE_PPF) { + roce_cap->max_qps = 1024; + roce_cap->max_cqs = 2048; + roce_cap->max_srqs = 1024; + roce_cap->max_mpts = 1024; + roce_cap->max_drc_qps = 64; + } else { + roce_cap->max_qps = 512; + roce_cap->max_cqs = 1024; + roce_cap->max_srqs = 512; + roce_cap->max_mpts = 512; + roce_cap->max_drc_qps = 64; + } + } +} + +static void parse_rdma_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start; + roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end; + roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size; + + roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start; + roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end; + roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size; + + sdk_info(hwdev->dev_hdl, "Get rdma resource capbility, Cmtt_start: 0x%x, cmtt_end: 0x%x, cmtt_sz: 0x%x\n", + roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end, + roce_cap->cmtt_cl_sz); + + sdk_info(hwdev->dev_hdl, "Dmtt_start: 0x%x, dmtt_end: 0x%x, dmtt_sz: 0x%x\n", + roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end, + roce_cap->dmtt_cl_sz); +} + +static void parse_ovs_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc; + ovs_cap->dev_ovs_cap.fake_vf_start_id = dev_cap->fake_vf_start_id; + ovs_cap->dev_ovs_cap.fake_vf_num = dev_cap->fake_vf_num; + ovs_cap->dev_ovs_cap.dynamic_qp_en = dev_cap->flexq_en; + + sdk_info(hwdev->dev_hdl, "Get ovs resource capbility, max_qpc: 0x%x, fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, dynamic_qp_en: 0x%x\n", + ovs_cap->dev_ovs_cap.max_pctxs, + ovs_cap->dev_ovs_cap.fake_vf_start_id, + ovs_cap->dev_ovs_cap.fake_vf_num, + ovs_cap->dev_ovs_cap.dynamic_qp_en); +} + +static void parse_toe_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap; + + toe_cap->max_pctxs = dev_cap->toe_max_pctx; + toe_cap->max_cqs = dev_cap->toe_max_cq; + toe_cap->max_srqs = dev_cap->toe_max_srq; + toe_cap->srq_id_start = dev_cap->toe_srq_id_start; + toe_cap->max_mpts = dev_cap->toe_max_mpt; + + sdk_info(hwdev->dev_hdl, "Get toe resource capbility, max_pctxs: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, srq_id_start: 0x%x, max_mpts: 0x%x\n", + toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs, + toe_cap->srq_id_start, toe_cap->max_mpts); +} + +static void parse_ipsec_res_cap(struct sphw_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; + + ipsec_cap->dev_ipsec_cap.max_sa_ctxs = dev_cap->ipsec_max_sactx; + + sdk_info(hwdev->dev_hdl, "Get IPsec resource capbility, max_sa_ctxs: 0x%x\n", + ipsec_cap->dev_ipsec_cap.max_sa_ctxs); +} + +static void parse_dev_cap(struct sphw_hwdev *dev, + struct cfg_cmd_dev_cap *dev_cap, enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + /* Public resource */ + parse_pub_res_cap(dev, cap, dev_cap, type); + + /* PPF managed dynamic resource */ + if (type == TYPE_PPF) + parse_dynamic_share_res_cap(dev, cap, dev_cap, type); + + /* L2 NIC resource */ + if (IS_NIC_TYPE(dev)) + parse_l2nic_res_cap(dev, cap, dev_cap, type); + + /* FC without virtulization */ + if (type == TYPE_PF || type == TYPE_PPF) { + if (IS_FC_TYPE(dev)) + parse_fc_res_cap(dev, cap, dev_cap, type); + } + + /* toe resource */ + if (IS_TOE_TYPE(dev)) + parse_toe_res_cap(dev, cap, dev_cap, type); + + /* mtt cache line */ + if (IS_RDMA_ENABLE(dev)) + parse_rdma_res_cap(dev, cap, dev_cap, type); + + /* RoCE resource */ + if (IS_ROCE_TYPE(dev)) + parse_roce_res_cap(dev, cap, dev_cap, type); + + if (IS_OVS_TYPE(dev)) + parse_ovs_res_cap(dev, cap, dev_cap, type); + + if (IS_IPSEC_TYPE(dev)) + parse_ipsec_res_cap(dev, cap, dev_cap, type); +} + +static int get_cap_from_fw(struct sphw_hwdev *dev, enum func_type type) +{ + struct cfg_cmd_dev_cap dev_cap; + u16 out_len = sizeof(dev_cap); + int err; + + memset(&dev_cap, 0, sizeof(dev_cap)); + dev_cap.func_id = sphw_global_func_id(dev); + sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %u\n", + dev_cap.func_id); + + err = sphw_msg_to_mgmt_sync(dev, SPHW_MOD_CFGM, CFG_CMD_GET_DEV_CAP, + &dev_cap, sizeof(dev_cap), &dev_cap, &out_len, 0, + SPHW_CHANNEL_COMM); + if (err || dev_cap.head.status || !out_len) { + sdk_err(dev->dev_hdl, + "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.head.status, out_len); + return -EIO; + } + + parse_dev_cap(dev, &dev_cap, type); + + return 0; +} + +int sphw_get_dev_cap(void *dev) +{ + struct sphw_hwdev *hwdev = dev; + enum func_type type = SPHW_FUNC_TYPE(hwdev); + int err; + + switch (type) { + case TYPE_PF: + case TYPE_PPF: + case TYPE_VF: + err = get_cap_from_fw(hwdev, type); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to get PF/PPF capability\n"); + return err; + } + break; + default: + sdk_err(hwdev->dev_hdl, "Unsupported PCI Function type: %d\n", + type); + return -EINVAL; + } + + return 0; +} + +static void nic_param_fix(struct sphw_hwdev *dev) +{ +} + +static void rdma_mtt_fix(struct sphw_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct rdma_service_cap *rdma_cap = &cap->rdma_cap; + + rdma_cap->log_mtt = LOG_MTT_SEG; + rdma_cap->log_mtt_seg = LOG_MTT_SEG; + rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; + rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; + rdma_cap->num_mtts = (g_rdma_mtts_num > 0 ? + g_rdma_mtts_num : RDMA_NUM_MTTS); +} + +static void rdma_param_fix(struct sphw_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct rdma_service_cap *rdma_cap = &cap->rdma_cap; + struct dev_roce_svc_own_cap *roce_cap = + &rdma_cap->dev_rdma_cap.roce_own_cap; + + rdma_cap->log_mtt = LOG_MTT_SEG; + rdma_cap->log_rdmarc = LOG_RDMARC_SEG; + rdma_cap->reserved_qps = RDMA_RSVD_QPS; + rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE; + + /* RoCE */ + if (IS_ROCE_TYPE(dev)) { + roce_cap->qpc_entry_sz = ROCE_QPC_ENTRY_SZ; + roce_cap->max_wqes = ROCE_MAX_WQES; + roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE; + roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ; + roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ; + roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ; + roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA; + roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA; + roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES; + roce_cap->reserved_srqs = ROCE_RSVD_SRQS; + roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE; + roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ; + roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ; + } + + rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ; + rdma_cap->wqebb_size = WQEBB_SZ; + rdma_cap->max_cqes = RDMA_MAX_CQES; + rdma_cap->reserved_cqs = RDMA_RSVD_CQS; + rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ; + rdma_cap->cqe_size = RDMA_CQE_SZ; + rdma_cap->reserved_mrws = RDMA_RSVD_MRWS; + rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; + + /* 2^8 - 1 + * +------------------------+-----------+ + * | 4B | 1M(20b) | Key(8b) | + * +------------------------+-----------+ + * key = 8bit key + 24bit index, + * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit, + * we use original 8bits directly for simpilification + */ + rdma_cap->max_fmr_maps = 255; + rdma_cap->num_mtts = (g_rdma_mtts_num > 0 ? + g_rdma_mtts_num : RDMA_NUM_MTTS); + rdma_cap->log_mtt_seg = LOG_MTT_SEG; + rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; + rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG; + rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY; + rdma_cap->num_ports = RDMA_NUM_PORTS; + rdma_cap->db_page_size = DB_PAGE_SZ; + rdma_cap->direct_wqe_size = DWQE_SZ; + rdma_cap->num_pds = NUM_PD; + rdma_cap->reserved_pds = RSVD_PD; + rdma_cap->max_xrcds = MAX_XRCDS; + rdma_cap->reserved_xrcds = RSVD_XRCDS; + rdma_cap->max_gid_per_port = MAX_GID_PER_PORT; + rdma_cap->gid_entry_sz = GID_ENTRY_SZ; + rdma_cap->reserved_lkey = RSVD_LKEY; + rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq; + rdma_cap->page_size_cap = PAGE_SZ_CAP; + rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV | + RDMA_BMME_FLAG_REMOTE_INV | + RDMA_BMME_FLAG_FAST_REG_WR | + RDMA_DEV_CAP_FLAG_XRC | + RDMA_DEV_CAP_FLAG_MEM_WINDOW | + RDMA_BMME_FLAG_TYPE_2_WIN | + RDMA_BMME_FLAG_WIN_TYPE_2B | + RDMA_DEV_CAP_FLAG_ATOMIC); + rdma_cap->max_frpl_len = MAX_FRPL_LEN; + rdma_cap->max_pkeys = MAX_PKEYS; +} + +static void toe_param_fix(struct sphw_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct toe_service_cap *toe_cap = &cap->toe_cap; + + toe_cap->pctx_sz = TOE_PCTX_SZ; + toe_cap->scqc_sz = TOE_CQC_SZ; +} + +static void ovs_param_fix(struct sphw_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->pctx_sz = OVS_PCTX_SZ; +} + +static void fc_param_fix(struct sphw_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fc_service_cap *fc_cap = &cap->fc_cap; + + fc_cap->parent_qpc_size = FC_PCTX_SZ; + fc_cap->child_qpc_size = FC_CCTX_SZ; + fc_cap->sqe_size = FC_SQE_SZ; + + fc_cap->scqc_size = FC_SCQC_SZ; + fc_cap->scqe_size = FC_SCQE_SZ; + + fc_cap->srqc_size = FC_SRQC_SZ; + fc_cap->srqe_size = FC_SRQE_SZ; +} + +static void ipsec_param_fix(struct sphw_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; + + ipsec_cap->sactx_sz = IPSEC_SACTX_SZ; +} + +static void init_service_param(struct sphw_hwdev *dev) +{ + if (IS_NIC_TYPE(dev)) + nic_param_fix(dev); + if (IS_RDMA_ENABLE(dev)) + rdma_mtt_fix(dev); + if (IS_ROCE_TYPE(dev)) + rdma_param_fix(dev); + if (IS_FC_TYPE(dev)) + fc_param_fix(dev); + if (IS_TOE_TYPE(dev)) + toe_param_fix(dev); + if (IS_OVS_TYPE(dev)) + ovs_param_fix(dev); + if (IS_IPSEC_TYPE(dev)) + ipsec_param_fix(dev); +} + +static void cfg_get_eq_num(struct sphw_hwdev *dev) +{ + struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; + + eq_info->num_ceq = dev->hwif->attr.num_ceqs; + eq_info->num_ceq_remain = eq_info->num_ceq; +} + +static int cfg_init_eq(struct sphw_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_eq *eq = NULL; + u8 num_ceq, i = 0; + + cfg_get_eq_num(dev); + num_ceq = cfg_mgmt->eq_info.num_ceq; + + sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", + cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); + + if (!num_ceq) { + sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); + return -EFAULT; + } + + eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); + if (!eq) + return -ENOMEM; + + for (i = 0; i < num_ceq; ++i) { + eq[i].eqn = i; + eq[i].free = CFG_FREE; + eq[i].type = SERVICE_T_MAX; + } + + cfg_mgmt->eq_info.eq = eq; + + mutex_init(&cfg_mgmt->eq_info.eq_mutex); + + return 0; +} + +int sphw_vector_to_eqn(void *hwdev, enum sphw_service_type type, int vector) +{ + struct sphw_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq *eq = NULL; + int eqn = -EINVAL; + + if (!hwdev || vector < 0) + return -EINVAL; + + if (type != SERVICE_T_ROCE) { + sdk_err(dev->dev_hdl, + "Service type :%d, only RDMA service could get eqn by vector.\n", + type); + return -EINVAL; + } + + cfg_mgmt = dev->cfg_mgmt; + vector = (vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE; + + eq = cfg_mgmt->eq_info.eq; + if (eq[vector].type == SERVICE_T_ROCE && eq[vector].free == CFG_BUSY) + eqn = eq[vector].eqn; + + return eqn; +} + +static int cfg_init_interrupt(struct sphw_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; + u16 intr_num = dev->hwif->attr.num_irqs; + + if (!intr_num) { + sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n"); + return -EFAULT; + } + irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), + GFP_KERNEL); + if (!irq_info->alloc_info) + return -ENOMEM; + + irq_info->num_irq_hw = intr_num; + + /* Production requires VF only surppots MSI-X */ + if (SPHW_FUNC_TYPE(dev) == TYPE_VF) + cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX; + else + cfg_mgmt->svc_cap.interrupt_type = intr_mode; + + mutex_init(&irq_info->irq_mutex); + return 0; +} + +static int cfg_enable_interrupt(struct sphw_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; + + void *pcidev = dev->pcidev_hdl; + struct irq_alloc_info_st *irq_info = NULL; + struct msix_entry *entry = NULL; + u16 i = 0; + int actual_irq; + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + + sdk_info(dev->dev_hdl, "Interrupt type: %u, irq num: %u.\n", + cfg_mgmt->svc_cap.interrupt_type, nreq); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + if (!nreq) { + sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); + return -EINVAL; + } + entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < nreq; i++) + entry[i].entry = i; + + actual_irq = pci_enable_msix_range(pcidev, entry, + VECTOR_THRESHOLD, nreq); + if (actual_irq < 0) { + sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n", + actual_irq); + kfree(entry); + return -ENOMEM; + } + + nreq = (u16)actual_irq; + cfg_mgmt->irq_param_info.num_total = nreq; + cfg_mgmt->irq_param_info.num_irq_remain = nreq; + sdk_info(dev->dev_hdl, "Request %u msix vector success.\n", + nreq); + + for (i = 0; i < nreq; ++i) { + /* u16 driver uses to specify entry, OS writes */ + irq_info[i].info.msix_entry_idx = entry[i].entry; + /* u32 kernel uses to write allocated vector */ + irq_info[i].info.irq_id = entry[i].vector; + irq_info[i].type = SERVICE_T_MAX; + irq_info[i].free = CFG_FREE; + } + + kfree(entry); + + break; + + default: + sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", + cfg_mgmt->svc_cap.interrupt_type); + break; + } + + return 0; +} + +int sphw_alloc_irqs(void *hwdev, enum sphw_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num) +{ + struct sphw_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_irq_info *irq_info = NULL; + struct irq_alloc_info_st *alloc_info = NULL; + int max_num_irq; + u16 free_num_irq; + int i, j; + + if (!hwdev || !irq_info_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + free_num_irq = irq_info->num_irq_remain; + + mutex_lock(&irq_info->irq_mutex); + + if (num > free_num_irq) { + if (free_num_irq == 0) { + sdk_err(dev->dev_hdl, + "no free irq resource in cfg mgmt.\n"); + mutex_unlock(&irq_info->irq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "only %u irq resource in cfg mgmt.\n", + free_num_irq); + num = free_num_irq; + } + + *act_num = 0; + + for (i = 0; i < num; i++) { + for (j = 0; j < max_num_irq; j++) { + if (alloc_info[j].free == CFG_FREE) { + if (irq_info->num_irq_remain == 0) { + sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); + mutex_unlock(&irq_info->irq_mutex); + return -EINVAL; + } + alloc_info[j].type = type; + alloc_info[j].free = CFG_BUSY; + + irq_info_array[i].msix_entry_idx = + alloc_info[j].info.msix_entry_idx; + irq_info_array[i].irq_id = + alloc_info[j].info.irq_id; + (*act_num)++; + irq_info->num_irq_remain--; + + break; + } + } + } + + mutex_unlock(&irq_info->irq_mutex); + return 0; +} + +void sphw_free_irq(void *hwdev, enum sphw_service_type type, u32 irq_id) +{ + struct sphw_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_irq_info *irq_info = NULL; + struct irq_alloc_info_st *alloc_info = NULL; + int max_num_irq; + int i; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < max_num_irq; i++) { + if (irq_id == alloc_info[i].info.irq_id && + type == alloc_info[i].type) { + if (alloc_info[i].free == CFG_BUSY) { + alloc_info[i].free = CFG_FREE; + irq_info->num_irq_remain++; + if (irq_info->num_irq_remain > max_num_irq) { + sdk_err(dev->dev_hdl, "Find target,but over range\n"); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + } + } + + if (i >= max_num_irq) + sdk_warn(dev->dev_hdl, "Irq %u don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} + +int sphw_vector_to_irq(void *hwdev, enum sphw_service_type type, int vector) +{ + struct sphw_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct irq_alloc_info_st *irq_info = NULL; + int irq = -EINVAL; + + if (!hwdev) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + if (type != SERVICE_T_ROCE) { + sdk_err(dev->dev_hdl, + "Service type: %u, only RDMA service could get eqn by vector\n", + type); + return -EINVAL; + } + + /* Current RDMA CEQ are 2 - 31, will change in the future */ + vector = ((vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE); + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + if (irq_info[vector].type == SERVICE_T_ROCE) + if (irq_info[vector].free == CFG_BUSY) + irq = (int)irq_info[vector].info.irq_id; + + return irq; +} + +int sphw_alloc_ceqs(void *hwdev, enum sphw_service_type type, int num, int *ceq_id_array, + int *act_num) +{ + struct sphw_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq_info *eq = NULL; + int free_ceq; + int i, j; + + if (!hwdev || !ceq_id_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + free_ceq = eq->num_ceq_remain; + + mutex_lock(&eq->eq_mutex); + + if (num > free_ceq) { + if (free_ceq <= 0) { + sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n"); + mutex_unlock(&eq->eq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n", + free_ceq); + } + + *act_num = 0; + + num = min(num, eq->num_ceq - CFG_RDMA_CEQ_BASE); + for (i = 0; i < num; i++) { + if (eq->num_ceq_remain == 0) { + sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n", + *act_num, num); + mutex_unlock(&eq->eq_mutex); + return 0; + } + + for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) { + if (eq->eq[j].free == CFG_FREE) { + eq->eq[j].type = type; + eq->eq[j].free = CFG_BUSY; + eq->num_ceq_remain--; + ceq_id_array[i] = eq->eq[j].eqn; + (*act_num)++; + break; + } + } + } + + mutex_unlock(&eq->eq_mutex); + return 0; +} + +void sphw_free_ceq(void *hwdev, enum sphw_service_type type, int ceq_id) +{ + struct sphw_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq_info *eq = NULL; + u8 num_ceq; + u8 i = 0; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + num_ceq = eq->num_ceq; + + mutex_lock(&eq->eq_mutex); + + for (i = 0; i < num_ceq; i++) { + if (ceq_id == eq->eq[i].eqn && + type == cfg_mgmt->eq_info.eq[i].type) { + if (eq->eq[i].free == CFG_BUSY) { + eq->eq[i].free = CFG_FREE; + eq->num_ceq_remain++; + if (eq->num_ceq_remain > num_ceq) + eq->num_ceq_remain %= num_ceq; + + mutex_unlock(&eq->eq_mutex); + return; + } + } + } + + if (i >= num_ceq) + sdk_warn(dev->dev_hdl, "ceq %d don`t need to free.\n", ceq_id); + + mutex_unlock(&eq->eq_mutex); +} + +int init_cfg_mgmt(struct sphw_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + dev->cfg_mgmt = cfg_mgmt; + cfg_mgmt->hwdev = dev; + + err = cfg_init_eq(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", + err); + goto free_mgmt_mem; + } + + err = cfg_init_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", + err); + goto free_eq_mem; + } + + err = cfg_enable_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", + err); + goto free_interrupt_mem; + } + + return 0; + +free_interrupt_mem: + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + +free_eq_mem: + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + +free_mgmt_mem: + kfree(cfg_mgmt); + return err; +} + +void free_cfg_mgmt(struct sphw_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + /* if the allocated resource were recycled */ + if (cfg_mgmt->irq_param_info.num_irq_remain != + cfg_mgmt->irq_param_info.num_total || + cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) + sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + pci_disable_msix(dev->pcidev_hdl); + break; + + case INTR_TYPE_MSI: + pci_disable_msi(dev->pcidev_hdl); + break; + + case INTR_TYPE_INT: + default: + break; + } + + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + + kfree(cfg_mgmt); +} + +int init_capability(struct sphw_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + set_cfg_test_param(cfg_mgmt); + + cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false; + cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false; + + err = sphw_get_dev_cap(dev); + if (err) + return err; + + init_service_param(dev); + + sdk_info(dev->dev_hdl, "Init capability success\n"); + return 0; +} + +void free_capability(struct sphw_hwdev *dev) +{ + sdk_info(dev->dev_hdl, "Free capability success"); +} + +bool sphw_support_nic(void *hwdev, struct nic_service_cap *cap) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_NIC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap)); + + return true; +} + +bool sphw_support_ipsec(void *hwdev, struct ipsec_service_cap *cap) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_IPSEC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ipsec_cap, sizeof(*cap)); + + return true; +} + +bool sphw_support_roce(void *hwdev, struct rdma_service_cap *cap) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_ROCE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} + +bool sphw_support_fc(void *hwdev, struct fc_service_cap *cap) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); + + return true; +} + +bool sphw_support_rdma(void *hwdev, struct rdma_service_cap *cap) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_RDMA_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} + +bool sphw_support_ovs(void *hwdev, struct ovs_service_cap *cap) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_OVS_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap)); + + return true; +} + +/* Only PPF support it, PF is not */ +bool sphw_support_toe(void *hwdev, struct toe_service_cap *cap) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_TOE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap)); + + return true; +} + +bool sphw_func_for_mgmt(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (dev->cfg_mgmt->svc_cap.chip_svc_type >= CFG_SVC_NIC_BIT0) + return false; + else + return true; +} + +bool sphw_get_stateful_enable(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return dev->cfg_mgmt->svc_cap.sf_en; +} + +u8 sphw_host_oq_id_mask(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; +} + +u8 sphw_host_id(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_id; +} + +u16 sphw_host_total_func(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host total function number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_total_function; +} + +u16 sphw_func_max_qnum(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; +} + +u16 sphw_func_max_nic_qnum(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; +} + +u8 sphw_ep_id(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting ep id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.ep_id; +} + +u8 sphw_er_id(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting er id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.er_id; +} + +u8 sphw_physical_port_id(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting physical port id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.port_id; +} + +u16 sphw_func_max_vf(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting max vf number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_vf; +} + +u8 sphw_cos_valid_bitmap(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n"); + return 0; + } + return (u8)(dev->cfg_mgmt->svc_cap.cos_valid_bitmap); +} + +void sphw_shutdown_hwdev(void *hwdev) +{ + /* to do : if IS_SLAVE_HOST*/ +} + +u32 sphw_host_pf_num(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting pf number capability\n"); + return 0; + } + + return dev->cfg_mgmt->svc_cap.pf_num; +} + +u8 sphw_flexq_en(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return 0; + + return dev->cfg_mgmt->svc_cap.flexq_en; +} + diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h new file mode 100644 index 000000000000..c4d47f0cae36 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_HW_CFG_H +#define SPHW_HW_CFG_H + +#include <linux/types.h> + +#define CFG_MAX_CMD_TIMEOUT 30000 /* ms */ + +enum { + CFG_FREE = 0, + CFG_BUSY = 1 +}; + +/* start position for CEQs allocation, Max number of CEQs is 32 */ +enum { + CFG_RDMA_CEQ_BASE = 0 +}; + +enum { + SF_SVC_FT_BIT = (1 << 0), + SF_SVC_RDMA_BIT = (1 << 1), +}; + +/* RDMA resource */ +#define K_UNIT BIT(10) +#define M_UNIT BIT(20) +#define G_UNIT BIT(30) + +/* L2NIC */ +#define SPHW_CFG_MAX_QP 256 + +/* RDMA */ +#define RDMA_RSVD_QPS 2 +#define ROCE_MAX_WQES (32 * K_UNIT - 1) +#define IWARP_MAX_WQES (8 * K_UNIT) + +#define RDMA_MAX_SQ_SGE 32 + +#define ROCE_MAX_RQ_SGE 16 + +#define RDMA_MAX_SQ_DESC_SZ (1 * K_UNIT) + +/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 64B(max_task_seg_len)) */ +#define ROCE_MAX_SQ_INLINE_DATA_SZ 912 + +#define ROCE_MAX_RQ_DESC_SZ 256 + +#define ROCE_QPC_ENTRY_SZ 512 + +#define WQEBB_SZ 64 + +#define ROCE_RDMARC_ENTRY_SZ 32 +#define ROCE_MAX_QP_INIT_RDMA 128 +#define ROCE_MAX_QP_DEST_RDMA 128 + +#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1) +#define ROCE_RSVD_SRQS 0 +#define ROCE_MAX_SRQ_SGE 15 +#define ROCE_SRQC_ENTERY_SZ 64 + +#define RDMA_MAX_CQES (8 * M_UNIT - 1) +#define RDMA_RSVD_CQS 0 + +#define RDMA_CQC_ENTRY_SZ 128 + +#define RDMA_CQE_SZ 64 +#define RDMA_RSVD_MRWS 128 +#define RDMA_MPT_ENTRY_SZ 64 +#define RDMA_NUM_MTTS (1 * G_UNIT) +#define LOG_MTT_SEG 5 +#define MTT_ENTRY_SZ 8 +#define LOG_RDMARC_SEG 3 + +#define LOCAL_ACK_DELAY 15 +#define RDMA_NUM_PORTS 1 +#define ROCE_MAX_MSG_SZ (2 * G_UNIT) + +#define DB_PAGE_SZ (4 * K_UNIT) +#define DWQE_SZ 256 + +#define NUM_PD (128 * K_UNIT) +#define RSVD_PD 0 + +#define MAX_XRCDS (64 * K_UNIT) +#define RSVD_XRCDS 0 + +#define MAX_GID_PER_PORT 128 +#define GID_ENTRY_SZ 32 +#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8) +#define NUM_COMP_VECTORS 32 +#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21)) +#define ROCE_MODE 1 + +#define MAX_FRPL_LEN 511 +#define MAX_PKEYS 1 + +/* ToE */ +#define TOE_PCTX_SZ 1024 +#define TOE_CQC_SZ 64 + +/* IoE */ +#define IOE_PCTX_SZ 512 + +/* FC */ +#define FC_PCTX_SZ 256 +#define FC_CCTX_SZ 256 +#define FC_SQE_SZ 128 +#define FC_SCQC_SZ 64 +#define FC_SCQE_SZ 64 +#define FC_SRQC_SZ 64 +#define FC_SRQE_SZ 32 + +/* OVS */ +#define OVS_PCTX_SZ 512 + +/* IPsec */ +#define IPSEC_SACTX_SZ 512 + +struct dev_sf_svc_attr { + bool ft_en; /* business enable flag (not include RDMA) */ + bool ft_pf_en; /* In FPGA Test VF resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ + bool rdma_en; + bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ +}; + +struct host_shared_resource_cap { + u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */ + u32 host_cctxs; /* Child Context: max 8K */ + u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ + * TOE/IOE/FCoE each uses 1 SCQ + * RoCE/IWARP uses multiple SCQs + * So 6 SCQ least + */ + u32 host_srqs; /* SRQ number: 256K */ + u32 host_mpts; /* MR number:1M */ +}; + +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + INTR_TYPE_NONE, + /* PXE,OVS need single thread processing, + * synchronization messages must use poll wait mechanism interface + */ +}; + +/* service type relates define */ +enum cfg_svc_type_en { + CFG_SVC_NIC_BIT0 = (1 << 0), + CFG_SVC_ROCE_BIT1 = (1 << 1), + CFG_SVC_VBS_BIT2 = (1 << 2), + CFG_SVC_TOE_BIT3 = (1 << 3), + CFG_SVC_IPSEC_BIT4 = (1 << 4), + CFG_SVC_FC_BIT5 = (1 << 5), + CFG_SVC_VIRTIO_BIT6 = (1 << 6), + CFG_SVC_OVS_BIT7 = (1 << 7), + CFG_SVC_RSV2_BIT8 = (1 << 8), + CFG_SVC_IOE_BIT9 = (1 << 9), + + CFG_SVC_FT_EN = (CFG_SVC_VBS_BIT2 | CFG_SVC_TOE_BIT3 | + CFG_SVC_IPSEC_BIT4 | CFG_SVC_FC_BIT5 | + CFG_SVC_VIRTIO_BIT6 | CFG_SVC_OVS_BIT7 | + CFG_SVC_IOE_BIT9), + CFG_SVC_RDMA_EN = CFG_SVC_ROCE_BIT1 +}; + +/* device capability */ +struct service_cap { + struct dev_sf_svc_attr sf_svc_attr; + enum cfg_svc_type_en svc_type; /* user input service type */ + enum cfg_svc_type_en chip_svc_type; /* HW supported service type */ + + u8 host_id; + u8 ep_id; + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + + /* Host global resources */ + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; /* max numbers of vf in current host */ + u16 vf_id_start; + u8 host_oq_id_mask_val; + + u8 flexq_en; + u8 cos_valid_bitmap; + u16 max_vf; /* max VF number that PF supported */ + + /* DO NOT get interrupt_type from firmware */ + enum intr_type interrupt_type; + + bool sf_en; /* stateful business status */ + u8 timer_en; /* 0:disable, 1:enable */ + u8 bloomfilter_en; /* 0:disable, 1:enable*/ + + /* For test */ + u32 test_mode; + u32 test_qpc_num; + u32 test_qpc_resvd_num; + u32 test_page_size_reorder; + bool test_xid_alloc_mode; + bool test_gpa_check_enable; + u8 test_qpc_alloc_mode; + u8 test_scqc_alloc_mode; + + u32 test_max_conn_num; + u32 test_max_cache_conn_num; + u32 test_scqc_num; + u32 test_mpt_num; + u32 test_scq_resvd_num; + u32 test_mpt_recvd_num; + u32 test_hash_num; + u32 test_reorder_num; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + /* Starting address in cache memory for bloom filter, 64Bytes aligned */ + u16 bfilter_start_addr; + /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. + * Bloom filter memory size + 1 must be power of 2. + * The maximum memory size of bloom filter is 4M + */ + u16 bfilter_len; + /* The size of hash bucket tables, align on 64 entries. + * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. + * The maximum number of hash bucket is 4M + */ + u16 hash_bucket_num; + + struct host_shared_resource_cap shared_res_cap; /* shared capability */ + struct nic_service_cap nic_cap; /* NIC capability */ + struct rdma_service_cap rdma_cap; /* RDMA capability */ + struct fc_service_cap fc_cap; /* FC capability */ + struct toe_service_cap toe_cap; /* ToE capability */ + struct ovs_service_cap ovs_cap; /* OVS capability */ + struct ipsec_service_cap ipsec_cap; /* IPsec capability */ +}; + +struct cfg_eq { + enum sphw_service_type type; + int eqn; + int free; /* 1 - alocated, 0- freed */ +}; + +struct cfg_eq_info { + struct cfg_eq *eq; + + u8 num_ceq; + + u8 num_ceq_remain; + + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +struct irq_alloc_info_st { + enum sphw_service_type type; + int free; /* 1 - alocated, 0- freed */ + struct irq_info info; +}; + +struct cfg_irq_info { + struct irq_alloc_info_st *alloc_info; + u16 num_total; + u16 num_irq_remain; + u16 num_irq_hw; /* device max irq number */ + + /* mutex used for allocate EQs */ + struct mutex irq_mutex; +}; + +#define VECTOR_THRESHOLD 2 + +struct cfg_mgmt_info { + struct sphw_hwdev *hwdev; + struct service_cap svc_cap; + struct cfg_eq_info eq_info; /* EQ */ + struct cfg_irq_info irq_param_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +#define IS_NIC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_NIC_BIT0) +#define IS_ROCE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_ROCE_BIT1) +#define IS_FCOE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FCOE_BIT2) +#define IS_TOE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_TOE_BIT3) +#define IS_IPSEC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_IPSEC_BIT4) +#define IS_FC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FC_BIT5) +#define IS_FIC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FIC_BIT6) +#define IS_OVS_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_OVS_BIT7) +#define IS_ACL_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_ACL_BIT8) +#define IS_IOE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_IOE_BIT9) +#define IS_FT_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FT_EN) +#define IS_RDMA_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_RDMA_EN) +#define IS_RDMA_ENABLE(dev) \ + ((dev)->cfg_mgmt->svc_cap.sf_svc_attr.rdma_en) + +int init_cfg_mgmt(struct sphw_hwdev *dev); + +void free_cfg_mgmt(struct sphw_hwdev *dev); + +int init_capability(struct sphw_hwdev *dev); + +void free_capability(struct sphw_hwdev *dev); + +#endif + diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c new file mode 100644 index 000000000000..1629d1e480e2 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c @@ -0,0 +1,1253 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_common.h" +#include "sphw_csr.h" +#include "sphw_hwdev.h" +#include "sphw_hwif.h" +#include "sphw_wq.h" +#include "sphw_cmdq.h" +#include "sphw_comm_msg_intf.h" +#include "sphw_hw_comm.h" + +#define SPHW_MSIX_CNT_LLI_TIMER_SHIFT 0 +#define SPHW_MSIX_CNT_LLI_CREDIT_SHIFT 8 +#define SPHW_MSIX_CNT_COALESC_TIMER_SHIFT 8 +#define SPHW_MSIX_CNT_PENDING_SHIFT 8 +#define SPHW_MSIX_CNT_RESEND_TIMER_SHIFT 29 + +#define SPHW_MSIX_CNT_LLI_TIMER_MASK 0xFFU +#define SPHW_MSIX_CNT_LLI_CREDIT_MASK 0xFFU +#define SPHW_MSIX_CNT_COALESC_TIMER_MASK 0xFFU +#define SPHW_MSIX_CNT_PENDING_MASK 0x1FU +#define SPHW_MSIX_CNT_RESEND_TIMER_MASK 0x7U + +#define SPHW_MSIX_CNT_SET(val, member) \ + (((val) & SPHW_MSIX_CNT_##member##_MASK) << \ + SPHW_MSIX_CNT_##member##_SHIFT) + +#define DEFAULT_RX_BUF_SIZE ((u16)0xB) + +enum sphw_rx_buf_size { + SPHW_RX_BUF_SIZE_32B = 0x20, + SPHW_RX_BUF_SIZE_64B = 0x40, + SPHW_RX_BUF_SIZE_96B = 0x60, + SPHW_RX_BUF_SIZE_128B = 0x80, + SPHW_RX_BUF_SIZE_192B = 0xC0, + SPHW_RX_BUF_SIZE_256B = 0x100, + SPHW_RX_BUF_SIZE_384B = 0x180, + SPHW_RX_BUF_SIZE_512B = 0x200, + SPHW_RX_BUF_SIZE_768B = 0x300, + SPHW_RX_BUF_SIZE_1K = 0x400, + SPHW_RX_BUF_SIZE_1_5K = 0x600, + SPHW_RX_BUF_SIZE_2K = 0x800, + SPHW_RX_BUF_SIZE_3K = 0xC00, + SPHW_RX_BUF_SIZE_4K = 0x1000, + SPHW_RX_BUF_SIZE_8K = 0x2000, + SPHW_RX_BUF_SIZE_16K = 0x4000, +}; + +const int sphw_hw_rx_buf_size[] = { + SPHW_RX_BUF_SIZE_32B, + SPHW_RX_BUF_SIZE_64B, + SPHW_RX_BUF_SIZE_96B, + SPHW_RX_BUF_SIZE_128B, + SPHW_RX_BUF_SIZE_192B, + SPHW_RX_BUF_SIZE_256B, + SPHW_RX_BUF_SIZE_384B, + SPHW_RX_BUF_SIZE_512B, + SPHW_RX_BUF_SIZE_768B, + SPHW_RX_BUF_SIZE_1K, + SPHW_RX_BUF_SIZE_1_5K, + SPHW_RX_BUF_SIZE_2K, + SPHW_RX_BUF_SIZE_3K, + SPHW_RX_BUF_SIZE_4K, + SPHW_RX_BUF_SIZE_8K, + SPHW_RX_BUF_SIZE_16K, +}; + +static inline int comm_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_COMM, cmd, buf_in, in_size, buf_out, + out_size, 0, SPHW_CHANNEL_COMM); +} + +static inline int comm_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel) +{ + return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_COMM, cmd, buf_in, + in_size, buf_out, out_size, 0, channel); +} + +int sphw_get_interrupt_cfg(void *dev, struct interrupt_info *info, u16 channel) +{ + struct sphw_hwdev *hwdev = dev; + struct comm_cmd_msix_config msix_cfg; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev || !info) + return -EINVAL; + + memset(&msix_cfg, 0, sizeof(msix_cfg)); + msix_cfg.func_id = sphw_global_func_id(hwdev); + msix_cfg.msix_index = info->msix_index; + msix_cfg.opcode = MGMT_MSG_CMD_OP_GET; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &msix_cfg, sizeof(msix_cfg), &msix_cfg, + &out_size, channel); + if (err || !out_size || msix_cfg.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, msix_cfg.head.status, out_size, channel); + return -EINVAL; + } + + info->lli_credit_limit = msix_cfg.lli_credit_cnt; + info->lli_timer_cfg = msix_cfg.lli_timer_cnt; + info->pending_limt = msix_cfg.pending_cnt; + info->coalesc_timer_cfg = msix_cfg.coalesce_timer_cnt; + info->resend_timer_cfg = msix_cfg.resend_timer_cnt; + + return 0; +} + +int sphw_set_interrupt_cfg_direct(void *hwdev, struct interrupt_info *info, u16 channel) +{ + struct comm_cmd_msix_config msix_cfg; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&msix_cfg, 0, sizeof(msix_cfg)); + msix_cfg.func_id = sphw_global_func_id(hwdev); + msix_cfg.msix_index = (u16)info->msix_index; + msix_cfg.opcode = MGMT_MSG_CMD_OP_SET; + + msix_cfg.lli_credit_cnt = info->lli_credit_limit; + msix_cfg.lli_timer_cnt = info->lli_timer_cfg; + msix_cfg.pending_cnt = info->pending_limt; + msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = info->resend_timer_cfg; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &msix_cfg, sizeof(msix_cfg), &msix_cfg, + &out_size, channel); + if (err || !out_size || msix_cfg.head.status) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, msix_cfg.head.status, out_size, channel); + return -EINVAL; + } + + return 0; +} + +int sphw_set_interrupt_cfg(void *dev, struct interrupt_info info, u16 channel) +{ + struct interrupt_info temp_info; + struct sphw_hwdev *hwdev = dev; + int err; + + if (!hwdev) + return -EINVAL; + + temp_info.msix_index = info.msix_index; + + err = sphw_get_interrupt_cfg(hwdev, &temp_info, channel); + if (err) + return -EINVAL; + + if (!info.lli_set) { + info.lli_credit_limit = temp_info.lli_credit_limit; + info.lli_timer_cfg = temp_info.lli_timer_cfg; + } + + if (!info.interrupt_coalesc_set) { + info.pending_limt = temp_info.pending_limt; + info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg; + info.resend_timer_cfg = temp_info.resend_timer_cfg; + } + + return sphw_set_interrupt_cfg_direct(hwdev, &info, channel); +} + +void sphw_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, u8 clear_resend_en) +{ + struct sphw_hwif *hwif = NULL; + u32 msix_ctrl = 0, addr; + + if (!hwdev) + return; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + msix_ctrl = SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) | + SPHW_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR); + + addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; + sphw_hwif_write_reg(hwif, addr, msix_ctrl); +} + +int sphw_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, u16 channel) +{ + struct comm_cmd_wq_page_size page_size_info; + u16 out_size = sizeof(page_size_info); + int err; + + memset(&page_size_info, 0, sizeof(page_size_info)); + page_size_info.func_id = func_idx; + page_size_info.page_size = SPHW_PAGE_SIZE_HW(page_size); + page_size_info.opcode = MGMT_MSG_CMD_OP_SET; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_PAGESIZE, + &page_size_info, sizeof(page_size_info), + &page_size_info, &out_size, channel); + if (err || !out_size || page_size_info.head.status) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x, channel: 0x%x\n", + err, page_size_info.head.status, out_size, channel); + return -EFAULT; + } + + return 0; +} + +int sphw_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel) +{ + struct comm_cmd_func_reset func_reset; + struct sphw_hwdev *hwdev = dev; + u16 out_size = sizeof(func_reset); + int err = 0; + + if (!dev) { + pr_err("Invalid para: dev is null.\n"); + return -EINVAL; + } + + sdk_info(hwdev->dev_hdl, "Function is reset, flag: 0x%llx, channel:0x%x\n", + reset_flag, channel); + + memset(&func_reset, 0, sizeof(func_reset)); + func_reset.func_id = func_id; + func_reset.reset_flag = reset_flag; + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FUNC_RESET, + &func_reset, sizeof(func_reset), + &func_reset, &out_size, channel); + if (err || !out_size || func_reset.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x, out_size: 0x%x\n", + reset_flag, err, func_reset.head.status, out_size); + return -EIO; + } + + return 0; +} + +static u16 get_hw_rx_buf_size(int rx_buf_sz) +{ + u16 num_hw_types = + sizeof(sphw_hw_rx_buf_size) / + sizeof(sphw_hw_rx_buf_size[0]); + u16 i; + + for (i = 0; i < num_hw_types; i++) { + if (sphw_hw_rx_buf_size[i] == rx_buf_sz) + return i; + } + + pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); + + return DEFAULT_RX_BUF_SIZE; /* default 2K */ +} + +int sphw_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, int rx_buf_sz, u16 channel) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = sphw_global_func_id(hwdev); + + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + + root_ctxt.lro_en = 1; + + root_ctxt.rq_depth = (u16)ilog2(rq_depth); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); + root_ctxt.sq_depth = (u16)ilog2(sq_depth); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, channel); + if (err || !out_size || root_ctxt.head.status) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, root_ctxt.head.status, out_size, channel); + return -EFAULT; + } + + return 0; +} + +int sphw_clean_root_ctxt(void *hwdev, u16 channel) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = sphw_global_func_id(hwdev); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, channel); + if (err || !out_size || root_ctxt.head.status) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, root_ctxt.head.status, out_size, channel); + return -EFAULT; + } + + return 0; +} + +int sphw_set_cmdq_depth(void *hwdev, u16 cmdq_depth) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = sphw_global_func_id(hwdev); + + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_VAT, &root_ctxt, + sizeof(root_ctxt), &root_ctxt, &out_size); + if (err || !out_size || root_ctxt.head.status) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int sphw_set_cmdq_ctxt(struct sphw_hwdev *hwdev, u8 cmdq_id, struct cmdq_ctxt_info *ctxt) +{ + struct comm_cmd_cmdq_ctxt cmdq_ctxt; + u16 out_size = sizeof(cmdq_ctxt); + int err; + + memset(&cmdq_ctxt, 0, sizeof(cmdq_ctxt)); + memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(*ctxt)); + cmdq_ctxt.func_id = sphw_global_func_id(hwdev); + cmdq_ctxt.cmdq_id = cmdq_id; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CMDQ_CTXT, + &cmdq_ctxt, sizeof(cmdq_ctxt), + &cmdq_ctxt, &out_size); + if (err || !out_size || cmdq_ctxt.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int sphw_set_ceq_ctrl_reg(struct sphw_hwdev *hwdev, u16 q_id, u32 ctrl0, u32 ctrl1) +{ + struct comm_cmd_ceq_ctrl_reg ceq_ctrl; + u16 out_size = sizeof(ceq_ctrl); + int err; + + memset(&ceq_ctrl, 0, sizeof(ceq_ctrl)); + ceq_ctrl.func_id = sphw_global_func_id(hwdev); + ceq_ctrl.q_id = q_id; + ceq_ctrl.ctrl0 = ctrl0; + ceq_ctrl.ctrl1 = ctrl1; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + &ceq_ctrl, sizeof(ceq_ctrl), + &ceq_ctrl, &out_size); + if (err || !out_size || ceq_ctrl.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", + q_id, err, ceq_ctrl.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int sphw_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function) +{ + struct comm_cmd_bdf_info bdf_info; + u16 out_size = sizeof(bdf_info); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&bdf_info, 0, sizeof(bdf_info)); + bdf_info.function_idx = sphw_global_func_id(hwdev); + bdf_info.bus = bus; + bdf_info.device = device; + bdf_info.function = function; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SEND_BDF_INFO, + &bdf_info, sizeof(bdf_info), + &bdf_info, &out_size); + if (err || !out_size || bdf_info.head.status) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to set bdf info to MPU, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bdf_info.head.status, out_size); + return -EIO; + } + + return 0; +} + +int sphw_sync_time(void *hwdev, u64 time) +{ + struct comm_cmd_sync_time time_info; + u16 out_size = sizeof(time_info); + int err; + + memset(&time_info, 0, sizeof(time_info)); + time_info.mstime = time; + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SYNC_TIME, &time_info, + sizeof(time_info), &time_info, &out_size); + if (err || time_info.head.status || !out_size) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", + err, time_info.head.status, out_size); + return -EIO; + } + + return 0; +} + +int sphw_get_fw_ver(void *hwdev, enum sphw_fw_ver_type type, u8 *mgmt_ver, + u8 version_size, u16 channel) +{ + struct comm_cmd_get_fw_version fw_ver; + struct sphw_hwdev *dev = hwdev; + u16 out_size = sizeof(fw_ver); + int err; + + if (!hwdev || !mgmt_ver) + return -EINVAL; + + memset(&fw_ver, 0, sizeof(fw_ver)); + fw_ver.fw_type = type; + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_FW_VERSION, + &fw_ver, sizeof(fw_ver), &fw_ver, + &out_size, channel); + if (err || !out_size || fw_ver.head.status) { + sdk_err(dev->dev_hdl, "Failed to get fw version, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, fw_ver.head.status, out_size, channel); + return -EIO; + } + + snprintf(mgmt_ver, version_size, "%s", fw_ver.ver); + + return 0; +} + +int sphw_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, u16 channel) +{ + return sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_MPU, mgmt_ver, version_size, channel); +} + +int sphw_get_fw_version(void *hwdev, struct sphw_fw_version *fw_ver, u16 channel) +{ + int err; + + if (!hwdev || !fw_ver) + return -EINVAL; + + err = sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_MPU, fw_ver->mgmt_ver, + sizeof(fw_ver->mgmt_ver), channel); + if (err) + return err; + + err = sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_NPU, fw_ver->microcode_ver, + sizeof(fw_ver->microcode_ver), channel); + if (err) + return err; + + return sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_BOOT, fw_ver->boot_ver, + sizeof(fw_ver->boot_ver), channel); +} + +static int sphw_comm_features_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) +{ + struct comm_cmd_feature_nego feature_nego; + u16 out_size = sizeof(feature_nego); + struct sphw_hwdev *dev = hwdev; + int err; + + if (!hwdev || !s_feature || size > COMM_MAX_FEATURE_QWORD) + return -EINVAL; + + memset(&feature_nego, 0, sizeof(feature_nego)); + feature_nego.func_id = sphw_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, (size * sizeof(u64))); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_FEATURE_NEGO, + &feature_nego, sizeof(feature_nego), + &feature_nego, &out_size); + if (err || !out_size || feature_nego.head.status) { + sdk_err(dev->dev_hdl, "Failed to negotiate feature, err: %d, status: 0x%x, out size: 0x%x\n", + err, feature_nego.head.status, out_size); + return -EINVAL; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, (size * sizeof(u64))); + + return 0; +} + +int sphw_get_comm_features(void *hwdev, u64 *s_feature, u16 size) +{ + return sphw_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature, size); +} + +int sphw_set_comm_features(void *hwdev, u64 *s_feature, u16 size) +{ + return sphw_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature, size); +} + +int sphw_func_tmr_bitmap_set(void *hwdev, bool en) +{ + struct comm_cmd_func_tmr_bitmap_op bitmap_op; + u16 out_size = sizeof(bitmap_op); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&bitmap_op, 0, sizeof(bitmap_op)); + bitmap_op.func_id = sphw_global_func_id(hwdev); + bitmap_op.opcode = en ? FUNC_TMR_BITMAP_ENABLE : + FUNC_TMR_BITMAP_DISABLE; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, + &bitmap_op, sizeof(bitmap_op), + &bitmap_op, &out_size); + if (err || !out_size || bitmap_op.head.status) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bitmap_op.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int ppf_ht_gpa_set(struct sphw_hwdev *hwdev, struct sphw_page_addr *pg0, struct sphw_page_addr *pg1) +{ + struct comm_cmd_ht_gpa ht_gpa_set; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + memset(&ht_gpa_set, 0, sizeof(ht_gpa_set)); + pg0->virt_addr = dma_alloc_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, + &pg0->phys_addr, GFP_KERNEL); + if (!pg0->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); + return -EFAULT; + } + + pg1->virt_addr = dma_alloc_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, + &pg1->phys_addr, GFP_KERNEL); + if (!pg1->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); + return -EFAULT; + } + + ht_gpa_set.host_id = sphw_host_id(hwdev); + ht_gpa_set.page_pa0 = pg0->phys_addr; + ht_gpa_set.page_pa1 = pg1->phys_addr; + sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", + pg0->phys_addr, pg1->phys_addr); + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_HT_GPA, + &ht_gpa_set, sizeof(ht_gpa_set), + &ht_gpa_set, &out_size); + if (ret || !out_size || ht_gpa_set.head.status) { + sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, ht_gpa_set.head.status, out_size); + return -EFAULT; + } + + hwdev->page_pa0.phys_addr = pg0->phys_addr; + hwdev->page_pa0.virt_addr = pg0->virt_addr; + + hwdev->page_pa1.phys_addr = pg1->phys_addr; + hwdev->page_pa1.virt_addr = pg1->virt_addr; + + return 0; +} + +int sphw_ppf_ht_gpa_init(struct sphw_hwdev *hwdev) +{ + int ret; + int i; + int j; + int size; + + struct sphw_page_addr page_addr0[SPHW_PPF_HT_GPA_SET_RETRY_TIMES]; + struct sphw_page_addr page_addr1[SPHW_PPF_HT_GPA_SET_RETRY_TIMES]; + + size = SPHW_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]); + memset(page_addr0, 0, size); + memset(page_addr1, 0, size); + + for (i = 0; i < SPHW_PPF_HT_GPA_SET_RETRY_TIMES; i++) { + ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); + if (!ret) + break; + } + + for (j = 0; j < i; j++) { + if (page_addr0[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + SPHW_HT_GPA_PAGE_SIZE, + page_addr0[j].virt_addr, + page_addr0[j].phys_addr); + page_addr0[j].virt_addr = NULL; + } + if (page_addr1[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + SPHW_HT_GPA_PAGE_SIZE, + page_addr1[j].virt_addr, + page_addr1[j].phys_addr); + page_addr1[j].virt_addr = NULL; + } + } + + if (i >= SPHW_PPF_HT_GPA_SET_RETRY_TIMES) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", + i); + return -EFAULT; + } + + return 0; +} + +void sphw_ppf_ht_gpa_deinit(struct sphw_hwdev *hwdev) +{ + if (hwdev->page_pa0.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, + hwdev->page_pa0.virt_addr, + hwdev->page_pa0.phys_addr); + hwdev->page_pa0.virt_addr = NULL; + } + + if (hwdev->page_pa1.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, + hwdev->page_pa1.virt_addr, + hwdev->page_pa1.phys_addr); + hwdev->page_pa1.virt_addr = NULL; + } +} + +static int set_ppf_tmr_status(struct sphw_hwdev *hwdev, + enum ppf_tmr_status status) +{ + struct comm_cmd_ppf_tmr_op op; + u16 out_size = sizeof(op); + int err = 0; + + if (!hwdev) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + + if (sphw_func_type(hwdev) != TYPE_PPF) + return -EFAULT; + + if (status == SPHW_PPF_TMR_FLAG_START) { + err = sphw_ppf_ht_gpa_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n"); + return -EFAULT; + } + } else { + sphw_ppf_ht_gpa_deinit(hwdev); + } + + op.opcode = status; + op.ppf_id = sphw_ppf_idx(hwdev); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_TMR, &op, + sizeof(op), &op, &out_size); + if (err || !out_size || op.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", + err, op.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int sphw_ppf_tmr_start(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for starting ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, SPHW_PPF_TMR_FLAG_START); +} + +int sphw_ppf_tmr_stop(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for stop ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, SPHW_PPF_TMR_FLAG_STOP); +} + +int mqm_eqm_try_alloc_mem(struct sphw_hwdev *hwdev, u32 page_size, + u32 page_num) +{ + struct sphw_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr; + u32 valid_num = 0; + u32 flag = 1; + u32 i = 0; + + for (i = 0; i < page_num; i++) { + page_addr->virt_addr = + dma_alloc_coherent(hwdev->dev_hdl, page_size, + &page_addr->phys_addr, GFP_KERNEL); + if (!page_addr->virt_addr) { + flag = 0; + break; + } + valid_num++; + page_addr++; + } + + if (flag == 1) { + hwdev->mqm_att.page_size = page_size; + hwdev->mqm_att.page_num = page_num; + } else { + page_addr = hwdev->mqm_att.brm_srch_page_addr; + for (i = 0; i < valid_num; i++) { + dma_free_coherent(hwdev->dev_hdl, page_size, + page_addr->virt_addr, + page_addr->phys_addr); + page_addr++; + } + return -EFAULT; + } + + return 0; +} + +int mqm_eqm_alloc_page_mem(struct sphw_hwdev *hwdev) +{ + int ret = 0; + int page_num; + + /* apply for 2M page, page number is chunk_num/1024 */ + page_num = (hwdev->mqm_att.chunk_num + 1023) >> 10; + ret = mqm_eqm_try_alloc_mem(hwdev, 2 * 1024 * 1024, page_num); + if (!ret) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 2M OK\n"); + return 0; + } + + /* apply for 64KB page, page number is chunk_num/32 */ + page_num = (hwdev->mqm_att.chunk_num + 31) >> 5; + ret = mqm_eqm_try_alloc_mem(hwdev, 64 * 1024, page_num); + if (!ret) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 64K OK\n"); + return 0; + } + + /* apply for 4KB page, page number is chunk_num/2 */ + page_num = (hwdev->mqm_att.chunk_num + 1) >> 1; + ret = mqm_eqm_try_alloc_mem(hwdev, 4 * 1024, page_num); + if (!ret) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 4K OK\n"); + return 0; + } + + return ret; +} + +void mqm_eqm_free_page_mem(struct sphw_hwdev *hwdev) +{ + u32 i; + struct sphw_page_addr *page_addr; + u32 page_size; + + page_size = hwdev->mqm_att.page_size; + page_addr = hwdev->mqm_att.brm_srch_page_addr; + + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + dma_free_coherent(hwdev->dev_hdl, page_size, + page_addr->virt_addr, page_addr->phys_addr); + page_addr++; + } +} + +int mqm_eqm_set_cfg_2_hw(struct sphw_hwdev *hwdev, u8 valid) +{ + struct comm_cmd_eqm_cfg info_eqm_cfg; + u16 out_size = sizeof(info_eqm_cfg); + int err; + + memset(&info_eqm_cfg, 0, sizeof(info_eqm_cfg)); + + info_eqm_cfg.host_id = sphw_host_id(hwdev); + info_eqm_cfg.page_size = hwdev->mqm_att.page_size; + info_eqm_cfg.valid = valid; + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_MQM_CFG_INFO, + &info_eqm_cfg, sizeof(info_eqm_cfg), + &info_eqm_cfg, &out_size); + if (err || !out_size || info_eqm_cfg.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info_eqm_cfg.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +#define EQM_DATA_BUF_SIZE 1024 +#define MQM_ATT_PAGE_NUM 128 + +int mqm_eqm_set_page_2_hw(struct sphw_hwdev *hwdev) +{ + struct comm_cmd_eqm_search_gpa *info = NULL; + struct sphw_page_addr *page_addr = NULL; + void *send_buf = NULL; + u16 send_buf_size; + u32 i; + u64 *gpa_hi52 = NULL; + u64 gpa; + u32 num; + u32 start_idx; + int err = 0; + u16 out_size; + u8 cmd; + + send_buf_size = sizeof(struct comm_cmd_eqm_search_gpa) + + EQM_DATA_BUF_SIZE; + send_buf = kzalloc(send_buf_size, GFP_KERNEL); + if (!send_buf) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + page_addr = hwdev->mqm_att.brm_srch_page_addr; + info = (struct comm_cmd_eqm_search_gpa *)send_buf; + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = 0; + cmd = COMM_MGMT_CMD_SET_MQM_SRCH_GPA; + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + /* gpa align to 4K, save gpa[31:12] */ + gpa = page_addr->phys_addr >> 12; + gpa_hi52[num] = gpa; + num++; + if (num == MQM_ATT_PAGE_NUM) { + info->num = num; + info->start_idx = start_idx; + info->host_id = sphw_host_id(hwdev); + out_size = send_buf_size; + err = comm_msg_to_mgmt_sync(hwdev, cmd, info, + (u16)send_buf_size, + info, &out_size); + if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, + info->head.status)) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->head.status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = i + 1; + } + page_addr++; + } + + if (num != 0) { + info->num = num; + info->start_idx = start_idx; + info->host_id = sphw_host_id(hwdev); + out_size = send_buf_size; + err = comm_msg_to_mgmt_sync(hwdev, cmd, info, + (u16)send_buf_size, info, + &out_size); + if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, + info->head.status)) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->head.status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + } + +set_page_2_hw_end: + kfree(send_buf); + return err; +} + +int mqm_eqm_init(struct sphw_hwdev *hwdev) +{ + struct comm_cmd_get_eqm_num info_eqm_fix; + u16 len = sizeof(info_eqm_fix); + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return 0; + + memset(&info_eqm_fix, 0, sizeof(info_eqm_fix)); + + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_MQM_FIX_INFO, + &info_eqm_fix, sizeof(info_eqm_fix), + &info_eqm_fix, &len); + if (ret || !len || info_eqm_fix.head.status) { + sdk_err(hwdev->dev_hdl, "Get mqm fix info fail,err: %d, status: 0x%x, out_size: 0x%x\n", + ret, info_eqm_fix.head.status, len); + return -EFAULT; + } + sdk_info(hwdev->dev_hdl, "get chunk_num: 0x%x, search_gpa_num: 0x%08x\n", + info_eqm_fix.chunk_num, info_eqm_fix.search_gpa_num); + if (!(info_eqm_fix.chunk_num)) + return 0; + + hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num; + hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num; + hwdev->mqm_att.page_size = 0; + hwdev->mqm_att.page_num = 0; + + hwdev->mqm_att.brm_srch_page_addr = + kcalloc(hwdev->mqm_att.chunk_num, + sizeof(struct sphw_page_addr), GFP_KERNEL); + if (!(hwdev->mqm_att.brm_srch_page_addr)) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + ret = mqm_eqm_alloc_page_mem(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\r\n"); + goto err_page; + } + + ret = mqm_eqm_set_page_2_hw(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 1); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + + return 0; + +err_ecmd: + mqm_eqm_free_page_mem(hwdev); + +err_page: + kfree(hwdev->mqm_att.brm_srch_page_addr); + + return ret; +} + +void mqm_eqm_deinit(struct sphw_hwdev *hwdev) +{ + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return; + + if (!(hwdev->mqm_att.chunk_num)) + return; + + mqm_eqm_free_page_mem(hwdev); + kfree(hwdev->mqm_att.brm_srch_page_addr); + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 0); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail! err: %d\n", + ret); + return; + } + + hwdev->mqm_att.chunk_num = 0; + hwdev->mqm_att.search_gpa_num = 0; + hwdev->mqm_att.page_num = 0; + hwdev->mqm_att.page_size = 0; +} + +int sphw_ppf_ext_db_init(void *dev) +{ + struct sphw_hwdev *hwdev = dev; + int ret; + + /* IS OVS MODE SURPORT EXT DB NEEDED */ + + ret = mqm_eqm_init(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "MQM eqm init fail!\n"); + return -EFAULT; + } + sdk_info(hwdev->dev_hdl, "ppf_ext_db_init ok\r\n"); + + return 0; +} + +int sphw_ppf_ext_db_deinit(void *dev) +{ + struct sphw_hwdev *hwdev = dev; + + if (!dev) + return -EINVAL; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return 0; + + mqm_eqm_deinit(hwdev); + + return 0; +} + +#define SPHW_FLR_TIMEOUT 1000 + +static enum sphw_wait_return check_flr_finish_handler(void *priv_data) +{ + struct sphw_hwif *hwif = priv_data; + enum sphw_pf_status status; + + status = sphw_get_pf_status(hwif); + if (status == SPHW_PF_STATUS_FLR_FINISH_FLAG) { + sphw_set_pf_status(hwif, SPHW_PF_STATUS_ACTIVE_FLAG); + return WAIT_PROCESS_CPL; + } + + return WAIT_PROCESS_WAITING; +} + +static int wait_for_flr_finish(struct sphw_hwif *hwif) +{ + return sphw_wait_for_timeout(hwif, check_flr_finish_handler, + SPHW_FLR_TIMEOUT, 10 * USEC_PER_MSEC); +} + +#define SPHW_WAIT_CMDQ_IDLE_TIMEOUT 5000 + +static enum sphw_wait_return check_cmdq_stop_handler(void *priv_data) +{ + struct sphw_hwdev *hwdev = priv_data; + struct sphw_cmdqs *cmdqs = hwdev->cmdqs; + enum sphw_cmdq_type cmdq_type; + + /* Stop waiting when card unpresent */ + if (!hwdev->chip_present_flag) + return WAIT_PROCESS_CPL; + + cmdq_type = SPHW_CMDQ_SYNC; + for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { + if (!sphw_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + return WAIT_PROCESS_WAITING; + } + + return WAIT_PROCESS_CPL; +} + +static int wait_cmdq_stop(struct sphw_hwdev *hwdev) +{ + enum sphw_cmdq_type cmdq_type; + struct sphw_cmdqs *cmdqs = hwdev->cmdqs; + int err; + + if (!(cmdqs->status & SPHW_CMDQ_ENABLE)) + return 0; + + cmdqs->status &= ~SPHW_CMDQ_ENABLE; + + err = sphw_wait_for_timeout(hwdev, check_cmdq_stop_handler, + SPHW_WAIT_CMDQ_IDLE_TIMEOUT, USEC_PER_MSEC); + if (!err) + return 0; + + cmdq_type = SPHW_CMDQ_SYNC; + for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { + if (!sphw_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + sdk_err(hwdev->dev_hdl, "Cmdq %d is busy\n", cmdq_type); + } + + cmdqs->status |= SPHW_CMDQ_ENABLE; + + return err; +} + +static int sphw_pf_rx_tx_flush(struct sphw_hwdev *hwdev, u16 channel) +{ + struct sphw_hwif *hwif = hwdev->hwif; + struct comm_cmd_clear_doorbell clear_db; + struct comm_cmd_clear_resource clr_res; + u16 out_size; + int err; + int ret = 0; + + /*wait ucode stop I/O */ + msleep(100); + + err = wait_cmdq_stop(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); + ret = err; + } + + sphw_disable_doorbell(hwif); + + out_size = sizeof(clear_db); + memset(&clear_db, 0, sizeof(clear_db)); + clear_db.func_id = SPHW_HWIF_GLOBAL_IDX(hwif); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FLUSH_DOORBELL, + &clear_db, sizeof(clear_db), + &clear_db, &out_size, channel); + if (err || !out_size || clear_db.head.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, clear_db.head.status, out_size, channel); + if (err) + ret = err; + else + ret = -EFAULT; + } + + sphw_set_pf_status(hwif, SPHW_PF_STATUS_FLR_START_FLAG); + + memset(&clr_res, 0, sizeof(clr_res)); + clr_res.func_id = SPHW_HWIF_GLOBAL_IDX(hwif); + + err = sphw_msg_to_mgmt_no_ack(hwdev, SPHW_MOD_COMM, COMM_MGMT_CMD_START_FLUSH, &clr_res, + sizeof(clr_res), channel); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message, err: %d, channel: 0x%x\n", + err, channel); + ret = err; + } + + err = wait_for_flr_finish(hwif); + if (err) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + + sphw_enable_doorbell(hwif); + + err = sphw_reinit_cmdq_ctxts(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + ret = err; + } + + return ret; +} + +int sphw_func_rx_tx_flush(void *hwdev, u16 channel) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) + return -EINVAL; + + if (!dev->chip_present_flag) + return 0; + + if (SPHW_FUNC_TYPE(dev) == TYPE_VF) + /* TO DO */ + return 0; + else + return sphw_pf_rx_tx_flush(dev, channel); +} + +int sphw_get_board_info(void *hwdev, struct sphw_board_info *info, u16 channel) +{ + struct comm_cmd_board_info board_info; + u16 out_size = sizeof(board_info); + int err; + + if (!hwdev || !info) + return -EINVAL; + + memset(&board_info, 0, sizeof(board_info)); + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_BOARD_INFO, + &board_info, sizeof(board_info), + &board_info, &out_size, channel); + if (err || board_info.head.status || !out_size) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, board_info.head.status, out_size, channel); + return -EIO; + } + + memcpy(info, &board_info.info, sizeof(*info)); + + return 0; +} + +int sphw_get_global_attr(void *hwdev, struct comm_global_attr *attr) +{ + struct comm_cmd_get_glb_attr get_attr; + u16 out_size = sizeof(get_attr); + int err = 0; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_GLOBAL_ATTR, + &get_attr, sizeof(get_attr), &get_attr, + &out_size); + if (err || !out_size || get_attr.head.status) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "Failed to get global attribute, err: %d, status: 0x%x, out size: 0x%x\n", + err, get_attr.head.status, out_size); + return -EIO; + } + + memcpy(attr, &get_attr.attr, sizeof(*attr)); + + return 0; +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h new file mode 100644 index 000000000000..09fdf8764a3e --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_COMM_H +#define SPHW_COMM_H + +#include "sphw_comm_msg_intf.h" + +#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, status) \ + ((err) || (status) || !(out_size)) + +#define SPHW_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +enum func_tmr_bitmap_status { + FUNC_TMR_BITMAP_DISABLE, + FUNC_TMR_BITMAP_ENABLE, +}; + +enum ppf_tmr_status { + SPHW_PPF_TMR_FLAG_STOP, + SPHW_PPF_TMR_FLAG_START, +}; + +#define SPHW_HT_GPA_PAGE_SIZE 4096UL +#define SPHW_PPF_HT_GPA_SET_RETRY_TIMES 10 + +int sphw_set_cmdq_depth(void *hwdev, u16 cmdq_depth); + +int sphw_set_cmdq_ctxt(struct sphw_hwdev *hwdev, u8 cmdq_id, struct cmdq_ctxt_info *ctxt); + +int sphw_ppf_ext_db_init(void *dev); + +int sphw_ppf_ext_db_deinit(void *dev); + +int sphw_set_ceq_ctrl_reg(struct sphw_hwdev *hwdev, u16 q_id, u32 ctrl0, u32 ctrl1); + +int sphw_get_comm_features(void *hwdev, u64 *s_feature, u16 size); +int sphw_set_comm_features(void *hwdev, u64 *s_feature, u16 size); + +int sphw_get_global_attr(void *hwdev, struct comm_global_attr *attr); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c new file mode 100644 index 000000000000..f58b0325bf04 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c @@ -0,0 +1,1402 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> +#include <linux/vmalloc.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_common.h" +#include "sphw_hwdev.h" +#include "sphw_csr.h" +#include "sphw_hwif.h" +#include "sphw_eqs.h" +#include "sphw_api_cmd.h" +#include "sphw_mgmt.h" +#include "sphw_mbox.h" +#include "sphw_wq.h" +#include "sphw_cmdq.h" +#include "sphw_hw_cfg.h" +#include "sphw_hw_comm.h" +#include "sphw_prof_adap.h" + +static bool disable_stateful_load; +module_param(disable_stateful_load, bool, 0444); +MODULE_PARM_DESC(disable_stateful_load, "Disable stateful load - default is false"); + +static bool disable_cfg_comm; +module_param(disable_cfg_comm, bool, 0444); +MODULE_PARM_DESC(disable_cfg_comm, "disable_cfg_comm or not - default is false"); + +static unsigned int wq_page_order = SPHW_MAX_WQ_PAGE_SIZE_ORDER; +module_param(wq_page_order, uint, 0444); +MODULE_PARM_DESC(wq_page_order, "Set wq page size order, wq page size is 4K * (2 ^ wq_page_order) - default is 8"); + +enum sphw_pcie_nosnoop { + SPHW_PCIE_SNOOP = 0, + SPHW_PCIE_NO_SNOOP = 1, +}; + +enum sphw_pcie_tph { + SPHW_PCIE_TPH_DISABLE = 0, + SPHW_PCIE_TPH_ENABLE = 1, +}; + +#define SPHW_DMA_ATTR_INDIR_IDX_SHIFT 0 + +#define SPHW_DMA_ATTR_INDIR_IDX_MASK 0x3FF + +#define SPHW_DMA_ATTR_INDIR_IDX_SET(val, member) \ + (((u32)(val) & SPHW_DMA_ATTR_INDIR_##member##_MASK) << \ + SPHW_DMA_ATTR_INDIR_##member##_SHIFT) + +#define SPHW_DMA_ATTR_INDIR_IDX_CLEAR(val, member) \ + ((val) & (~(SPHW_DMA_ATTR_INDIR_##member##_MASK \ + << SPHW_DMA_ATTR_INDIR_##member##_SHIFT))) + +#define SPHW_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define SPHW_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define SPHW_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define SPHW_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define SPHW_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define SPHW_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define SPHW_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define SPHW_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define SPHW_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define SPHW_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define SPHW_DMA_ATTR_ENTRY_SET(val, member) \ + (((u32)(val) & SPHW_DMA_ATTR_ENTRY_##member##_MASK) << \ + SPHW_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define SPHW_DMA_ATTR_ENTRY_CLEAR(val, member) \ + ((val) & (~(SPHW_DMA_ATTR_ENTRY_##member##_MASK \ + << SPHW_DMA_ATTR_ENTRY_##member##_SHIFT))) + +#define SPHW_PCIE_ST_DISABLE 0 +#define SPHW_PCIE_AT_DISABLE 0 +#define SPHW_PCIE_PH_DISABLE 0 + +#define PCIE_MSIX_ATTR_ENTRY 0 + +#define SPHW_CHIP_PRESENT 1 +#define SPHW_CHIP_ABSENT 0 + +#define SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT 0 +#define SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 + +#define SPHW_HWDEV_WQ_NAME "sphw_hardware" +#define SPHW_WQ_MAX_REQ 10 + +static void sphw_init_heartbeat_detect(struct sphw_hwdev *hwdev); +static void sphw_destroy_heartbeat_detect(struct sphw_hwdev *hwdev); + +typedef void (*mgmt_event_cb)(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +struct mgmt_event_handle { + u16 cmd; + mgmt_event_cb proc; +}; + +int pf_handle_vf_comm_mbox(void *handle, void *pri_handle, + u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct sphw_hwdev *hwdev = handle; + + if (!hwdev) + return -EINVAL; + + sdk_warn(hwdev->dev_hdl, "Unsupported vf mbox event %u to process\n", + cmd); + + return 0; +} + +int vf_handle_pf_comm_mbox(void *handle, void *pri_handle, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct sphw_hwdev *hwdev = handle; + + if (!hwdev) + return -EINVAL; + + sdk_warn(hwdev->dev_hdl, "Unsupported pf mbox event %u to process\n", + cmd); + return 0; +} + +static void chip_fault_show(struct sphw_hwdev *hwdev, struct sphw_fault_event *event) +{ + char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "host", "flr", "general", "suggestion"}; + char level_str[FAULT_SHOW_STR_LEN + 1]; + u8 level; + + memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); + level = event->event.chip.err_level; + if (level < FAULT_LEVEL_MAX) + strncpy(level_str, fault_level[level], + FAULT_SHOW_STR_LEN); + else + strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); + + if (level == FAULT_LEVEL_SERIOUS_FLR) + dev_err(hwdev->dev_hdl, "err_level: %u [%s], flr func_id: %u\n", + level, level_str, event->event.chip.func_id); + + dev_err(hwdev->dev_hdl, "Module_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + event->event.chip.node_id, + event->event.chip.err_type, level, level_str, + event->event.chip.err_csr_addr, + event->event.chip.err_csr_value); +} + +static void fault_report_show(struct sphw_hwdev *hwdev, + struct sphw_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout", "phy fault" + }; + char type_str[FAULT_SHOW_STR_LEN + 1]; + struct fault_event_stats *fault = NULL; + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", + sphw_global_func_id(hwdev)); + + memset(type_str, 0, FAULT_SHOW_STR_LEN + 1); + if (event->type < FAULT_TYPE_MAX) + strncpy(type_str, fault_type[event->type], + strlen(fault_type[event->type])); + else + strncpy(type_str, "Unknown", strlen("Unknown")); + + sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", event->type, type_str); + /* 0, 1, 2 and 3 word Represents array event->event.val index */ + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + event->event.val[0], event->event.val[1], event->event.val[2], + event->event.val[3]); + + fault = &hwdev->hw_stats.fault_event_stats; + + switch (event->type) { + case FAULT_TYPE_CHIP: + chip_fault_show(hwdev, event); + break; + case FAULT_TYPE_UCODE: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n", + event->event.ucode.cause_id, event->event.ucode.core_id, + event->event.ucode.c_id, event->event.ucode.epc); + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", + event->event.mem_timeout.err_csr_ctrl, + event->event.mem_timeout.err_csr_data, + event->event.mem_timeout.ctrl_tab, + event->event.mem_timeout.mem_index); + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", + event->event.reg_timeout.err_csr); + break; + case FAULT_TYPE_PHY_FAULT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + event->event.phy_fault.op_type, + event->event.phy_fault.port_id, + event->event.phy_fault.dev_ad, + event->event.phy_fault.csr_addr, + event->event.phy_fault.op_data); + break; + default: + break; + } +} + +static void fault_event_handler(void *dev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct sphw_cmd_fault_event *fault_event = NULL; + struct sphw_event_info event_info; + struct sphw_hwdev *hwdev = dev; + u8 fault_src = SPHW_FAULT_SRC_TYPE_MAX; + u8 fault_level; + + if (in_size != sizeof(*fault_event)) { + sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %u, should be %ld\n", + in_size, sizeof(*fault_event)); + return; + } + + fault_event = buf_in; + fault_report_show(hwdev, &fault_event->event); + + if (fault_event->event.type == FAULT_TYPE_CHIP) + fault_level = fault_event->event.event.chip.err_level; + else + fault_level = FAULT_LEVEL_FATAL; + + if (hwdev->event_callback) { + event_info.type = SPHW_EVENT_FAULT; + memcpy(&event_info.info, &fault_event->event, + sizeof(struct sphw_fault_event)); + event_info.info.fault_level = fault_level; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT) + fault_src = fault_event->event.type; + else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) + fault_src = SPHW_FAULT_SRC_HW_PHY_FAULT; + + sphw_fault_post_process(hwdev, fault_src, fault_level); +} + +static void ffm_event_msg_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct ffm_intr_info *intr = NULL; + struct sphw_hwdev *dev = hwdev; + + if (in_size != sizeof(*intr)) { + sdk_err(dev->dev_hdl, "Invalid fault event report, length: %u, should be %ld.\n", + in_size, sizeof(*intr)); + return; + } + + intr = buf_in; + + sdk_err(dev->dev_hdl, "node_id: 0x%x, err_type: 0x%x, err_level: %u, err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + intr->node_id, intr->err_type, intr->err_level, + intr->err_csr_addr, intr->err_csr_value); +} + +const struct mgmt_event_handle mgmt_event_proc[] = { + { + .cmd = COMM_MGMT_CMD_FAULT_REPORT, + .proc = fault_event_handler, + }, + + { + .cmd = COMM_MGMT_CMD_FFM_SET, + .proc = ffm_event_msg_handler, + }, +}; + +void pf_handle_mgmt_comm_event(void *handle, void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct sphw_hwdev *hwdev = handle; + u32 i, event_num = ARRAY_LEN(mgmt_event_proc); + + if (!hwdev) + return; + + for (i = 0; i < event_num; i++) { + if (cmd == mgmt_event_proc[i].cmd) { + if (mgmt_event_proc[i].proc) + mgmt_event_proc[i].proc(handle, buf_in, in_size, + buf_out, out_size); + + return; + } + } + + sdk_warn(hwdev->dev_hdl, "Unsupported mgmt cpu event %u to process\n", + cmd); +} + +void sphw_set_chip_present(void *hwdev) +{ + ((struct sphw_hwdev *)hwdev)->chip_present_flag = SPHW_CHIP_PRESENT; +} + +void sphw_set_chip_absent(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + sdk_err(dev->dev_hdl, "Card not present\n"); + dev->chip_present_flag = SPHW_CHIP_ABSENT; +} + +int sphw_get_chip_present_flag(const void *hwdev) +{ + if (!hwdev) + return 0; + + return ((struct sphw_hwdev *)hwdev)->chip_present_flag; +} + +/* TODO */ +void sphw_force_complete_all(void *hwdev) +{ +} + +void sphw_detect_hw_present(void *hwdev) +{ + u32 addr, attr1; + + addr = SPHW_CSR_FUNC_ATTR1_ADDR; + attr1 = sphw_hwif_read_reg(((struct sphw_hwdev *)hwdev)->hwif, addr); + if (attr1 == SPHW_PCIE_LINK_DOWN) { + sphw_set_chip_absent(hwdev); + sphw_force_complete_all(hwdev); + } +} + +/** + * set_pf_dma_attr_entry - set the dma attributes for entry + * @hwif: the hardware interface of a pci function device + * @entry_idx: the entry index in the dma table + * @st: PCIE TLP steering tag + * @at: PCIE TLP AT field + * @ph: PCIE TLP Processing Hint field + * @no_snooping: PCIE TLP No snooping + * @tph_en: PCIE TLP Processing Hint Enable + **/ +static void set_pf_dma_attr_entry(struct sphw_hwdev *hwdev, u32 entry_idx, + u8 st, u8 at, u8 ph, + enum sphw_pcie_nosnoop no_snooping, + enum sphw_pcie_tph tph_en) +{ + u32 addr, val, dma_attr_entry; + + /* Use indirect access should set entry_idx first*/ + addr = SPHW_CSR_DMA_ATTR_INDIR_IDX_ADDR; + val = sphw_hwif_read_reg(hwdev->hwif, addr); + val = SPHW_DMA_ATTR_INDIR_IDX_CLEAR(val, IDX); + + entry_idx = SPHW_DMA_ATTR_INDIR_IDX_SET(entry_idx, IDX); + + val |= entry_idx; + + sphw_hwif_write_reg(hwdev->hwif, addr, val); + + wmb(); /* write index before config */ + + addr = SPHW_CSR_DMA_ATTR_TBL_ADDR; + + val = sphw_hwif_read_reg(hwdev->hwif, addr); + val = SPHW_DMA_ATTR_ENTRY_CLEAR(val, ST) & + SPHW_DMA_ATTR_ENTRY_CLEAR(val, AT) & + SPHW_DMA_ATTR_ENTRY_CLEAR(val, PH) & + SPHW_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) & + SPHW_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN); + + dma_attr_entry = SPHW_DMA_ATTR_ENTRY_SET(st, ST) | + SPHW_DMA_ATTR_ENTRY_SET(at, AT) | + SPHW_DMA_ATTR_ENTRY_SET(ph, PH) | + SPHW_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) | + SPHW_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN); + + val |= dma_attr_entry; + sphw_hwif_write_reg(hwdev->hwif, addr, val); +} + +static int set_vf_dma_attr_entry(struct sphw_hwdev *hwdev, u8 entry_idx, + u8 st, u8 at, u8 ph, + enum sphw_pcie_nosnoop no_snooping, + enum sphw_pcie_tph tph_en) +{ + /* SPHW_MGMT_CMD_DMA_ATTR_SET */ + /*to do vf set dma attr by mpu*/ + return 0; +} + +/** + * dma_attr_table_init - initialize the default dma attributes + * @hwif: the hardware interface of a pci function device + **/ +static int dma_attr_table_init(struct sphw_hwdev *hwdev) +{ + int err = 0; + + /* TODO: check if set pf dma attr through uP, the same as vf */ + if (SPHW_IS_VF(hwdev)) + err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + SPHW_PCIE_ST_DISABLE, + SPHW_PCIE_AT_DISABLE, + SPHW_PCIE_PH_DISABLE, + SPHW_PCIE_SNOOP, + SPHW_PCIE_TPH_DISABLE); + else + set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + SPHW_PCIE_ST_DISABLE, + SPHW_PCIE_AT_DISABLE, + SPHW_PCIE_PH_DISABLE, + SPHW_PCIE_SNOOP, + SPHW_PCIE_TPH_DISABLE); + + return err; +} + +static int init_aeqs_msix_attr(struct sphw_hwdev *hwdev) +{ + struct sphw_aeqs *aeqs = hwdev->aeqs; + struct interrupt_info info = {0}; + struct sphw_eq *eq = NULL; + int q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = aeqs->num_aeqs - 1; q_id >= 0; q_id--) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = sphw_set_interrupt_cfg_direct(hwdev, &info, SPHW_CHANNEL_COMM); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +static int init_ceqs_msix_attr(struct sphw_hwdev *hwdev) +{ + struct sphw_ceqs *ceqs = hwdev->ceqs; + struct interrupt_info info = {0}; + struct sphw_eq *eq = NULL; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = &ceqs->ceq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = sphw_set_interrupt_cfg(hwdev, info, SPHW_CHANNEL_COMM); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %u failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +static int sphw_comm_clp_to_mgmt_init(struct sphw_hwdev *hwdev) +{ + int err; + + if (sphw_func_type(hwdev) == TYPE_VF) + return 0; + + err = sphw_clp_pf_to_mgmt_init(hwdev); + if (err) + return err; + + return 0; +} + +static void sphw_comm_clp_to_mgmt_free(struct sphw_hwdev *hwdev) +{ + if (sphw_func_type(hwdev) == TYPE_VF) + return; + + sphw_clp_pf_to_mgmt_free(hwdev); +} + +static int sphw_comm_aeqs_init(struct sphw_hwdev *hwdev) +{ + struct irq_info aeq_irqs[SPHW_MAX_AEQS] = {{0} }; + u16 num_aeqs, resp_num_irq = 0, i; + int err; + + num_aeqs = SPHW_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs > SPHW_MAX_AEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + SPHW_MAX_AEQS); + num_aeqs = SPHW_MAX_AEQS; + } + err = sphw_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %u\n", + num_aeqs); + return err; + } + + if (resp_num_irq < num_aeqs) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %u\n", + resp_num_irq); + num_aeqs = resp_num_irq; + } + + err = sphw_aeqs_init(hwdev, num_aeqs, aeq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); + goto aeqs_init_err; + } + + return 0; + +aeqs_init_err: + for (i = 0; i < num_aeqs; i++) + sphw_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); + + return err; +} + +static void sphw_comm_aeqs_free(struct sphw_hwdev *hwdev) +{ + struct irq_info aeq_irqs[SPHW_MAX_AEQS] = {{0} }; + u16 num_irqs, i; + + sphw_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); + + sphw_aeqs_free(hwdev); + + for (i = 0; i < num_irqs; i++) + sphw_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); +} + +static int sphw_comm_ceqs_init(struct sphw_hwdev *hwdev) +{ + struct irq_info ceq_irqs[SPHW_MAX_CEQS] = {{0} }; + u16 num_ceqs, resp_num_irq = 0, i; + int err; + + num_ceqs = SPHW_HWIF_NUM_CEQS(hwdev->hwif); + if (num_ceqs > SPHW_MAX_CEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + SPHW_MAX_CEQS); + num_ceqs = SPHW_MAX_CEQS; + } + + err = sphw_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %u\n", + num_ceqs); + return err; + } + + if (resp_num_irq < num_ceqs) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", + resp_num_irq); + num_ceqs = resp_num_irq; + } + + err = sphw_ceqs_init(hwdev, num_ceqs, ceq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to init ceqs, err:%d\n", err); + goto ceqs_init_err; + } + + return 0; + +ceqs_init_err: + for (i = 0; i < num_ceqs; i++) + sphw_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); + + return err; +} + +static void sphw_comm_ceqs_free(struct sphw_hwdev *hwdev) +{ + struct irq_info ceq_irqs[SPHW_MAX_CEQS] = {{0} }; + u16 num_irqs; + int i; + + sphw_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); + + sphw_ceqs_free(hwdev); + + for (i = 0; i < num_irqs; i++) + sphw_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); +} + +static int sphw_comm_func_to_func_init(struct sphw_hwdev *hwdev) +{ + int err; + + err = sphw_func_to_func_init(hwdev); + if (err) + return err; + + sphw_aeq_register_hw_cb(hwdev, SPHW_MBX_FROM_FUNC, sphw_mbox_func_aeqe_handler); + sphw_aeq_register_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU, sphw_mgmt_msg_aeqe_handler); + + if (!SPHW_IS_VF(hwdev)) + sphw_register_pf_mbox_cb(hwdev, SPHW_MOD_COMM, hwdev->func_to_func, + pf_handle_vf_comm_mbox); + else + sphw_register_vf_mbox_cb(hwdev, SPHW_MOD_COMM, hwdev->func_to_func, + vf_handle_pf_comm_mbox); + + return 0; +} + +static void sphw_comm_func_to_func_free(struct sphw_hwdev *hwdev) +{ + sphw_aeq_unregister_hw_cb(hwdev, SPHW_MBX_FROM_FUNC); + + if (!SPHW_IS_VF(hwdev)) { + sphw_unregister_pf_mbox_cb(hwdev, SPHW_MOD_COMM); + } else { + sphw_unregister_vf_mbox_cb(hwdev, SPHW_MOD_COMM); + + sphw_aeq_unregister_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU); + } + + sphw_func_to_func_free(hwdev); +} + +static int sphw_comm_pf_to_mgmt_init(struct sphw_hwdev *hwdev) +{ + int err; + + /* VF do not support api chain */ + if (sphw_func_type(hwdev) == TYPE_VF || + !COMM_SUPPORT_API_CHAIN(hwdev)) + return 0; + + err = sphw_pf_to_mgmt_init(hwdev); + if (err) + return err; + + sphw_register_mgmt_msg_cb(hwdev, SPHW_MOD_COMM, hwdev->pf_to_mgmt, + pf_handle_mgmt_comm_event); + + return 0; +} + +static void sphw_comm_pf_to_mgmt_free(struct sphw_hwdev *hwdev) +{ + /* VF do not support api chain */ + if (sphw_func_type(hwdev) == TYPE_VF || + !COMM_SUPPORT_API_CHAIN(hwdev)) + return; + + sphw_unregister_mgmt_msg_cb(hwdev, SPHW_MOD_COMM); + + sphw_aeq_unregister_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU); + + sphw_pf_to_mgmt_free(hwdev); +} + +static int sphw_comm_cmdqs_init(struct sphw_hwdev *hwdev) +{ + int err; + + err = sphw_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + return err; + } + + sphw_ceq_register_cb(hwdev, SPHW_CMDQ, sphw_cmdq_ceq_handler); + + err = sphw_set_cmdq_depth(hwdev, SPHW_CMDQ_DEPTH); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); + goto set_cmdq_depth_err; + } + + return 0; + +set_cmdq_depth_err: + sphw_cmdqs_free(hwdev); + + return err; +} + +static void sphw_comm_cmdqs_free(struct sphw_hwdev *hwdev) +{ + sphw_ceq_unregister_cb(hwdev, SPHW_CMDQ); + sphw_cmdqs_free(hwdev); +} + +static void sphw_sync_mgmt_func_state(struct sphw_hwdev *hwdev) +{ + sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_ACTIVE_FLAG); +} + +static void sphw_unsync_mgmt_func_state(struct sphw_hwdev *hwdev) +{ + sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_INIT); +} + +static int init_basic_attributes(struct sphw_hwdev *hwdev) +{ + int err; + + err = sphw_get_board_info(hwdev, &hwdev->board_info, SPHW_CHANNEL_COMM); + if (err) + return err; + + err = sphw_get_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); + if (err) { + sdk_err(hwdev->dev_hdl, "Get comm features failed\n"); + return err; + } + + sdk_info(hwdev->dev_hdl, "Comm features: 0x%llx\n", hwdev->features[0]); + + err = sphw_get_global_attr(hwdev, &hwdev->glb_attr); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to get global attribute\n"); + return err; + } + + sdk_info(hwdev->dev_hdl, "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt cpu node id: 0x%x\n", + hwdev->glb_attr.max_host_num, hwdev->glb_attr.max_pf_num, + hwdev->glb_attr.vf_id_start, + hwdev->glb_attr.mgmt_host_node_id); + + sphw_init_profile_adapter(hwdev); + + return 0; +} + +static int init_basic_mgmt_channel(struct sphw_hwdev *hwdev) +{ + int err; + + err = sphw_comm_aeqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); + return err; + } + + err = sphw_comm_func_to_func_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n"); + goto func_to_func_init_err; + } + + err = init_aeqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); + goto aeqs_msix_attr_init_err; + } + + return 0; + +aeqs_msix_attr_init_err: + sphw_comm_func_to_func_free(hwdev); + +func_to_func_init_err: + sphw_comm_aeqs_free(hwdev); + + return err; +} + +static void free_base_mgmt_channel(struct sphw_hwdev *hwdev) +{ + sphw_comm_func_to_func_free(hwdev); + sphw_comm_aeqs_free(hwdev); +} + +static int init_pf_mgmt_channel(struct sphw_hwdev *hwdev) +{ + int err; + + err = sphw_comm_clp_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init clp\n"); + return err; + } + + err = sphw_comm_pf_to_mgmt_init(hwdev); + if (err) { + sphw_comm_clp_to_mgmt_free(hwdev); + sdk_err(hwdev->dev_hdl, "Failed to init pf to mgmt\n"); + return err; + } + + return 0; +} + +static void free_pf_mgmt_channel(struct sphw_hwdev *hwdev) +{ + sphw_comm_clp_to_mgmt_free(hwdev); + sphw_comm_pf_to_mgmt_free(hwdev); +} + +static int init_mgmt_channel_post(struct sphw_hwdev *hwdev) +{ + int err; + + /* mbox host channel resources will be freed in + * sphw_func_to_func_free + */ + err = sphw_mbox_init_host_msg_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mbox host channel\n"); + return err; + } + + err = init_pf_mgmt_channel(hwdev); + if (err) + return err; + + return 0; +} + +static void free_mgmt_msg_channel_post(struct sphw_hwdev *hwdev) +{ + free_pf_mgmt_channel(hwdev); +} + +static int init_cmdqs_channel(struct sphw_hwdev *hwdev) +{ + int err; + + err = dma_attr_table_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init dma attr table\n"); + goto dma_attr_init_err; + } + + err = sphw_comm_ceqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); + goto ceqs_init_err; + } + + err = init_ceqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); + goto init_ceq_msix_err; + } + + /* set default wq page_size */ + if (wq_page_order > SPHW_MAX_WQ_PAGE_SIZE_ORDER) { + sdk_info(hwdev->dev_hdl, "wq_page_order exceed limit[0, %d], reset to %d\n", + SPHW_MAX_WQ_PAGE_SIZE_ORDER, + SPHW_MAX_WQ_PAGE_SIZE_ORDER); + wq_page_order = SPHW_MAX_WQ_PAGE_SIZE_ORDER; + } + hwdev->wq_page_size = SPHW_HW_WQ_PAGE_SIZE * (1U << wq_page_order); + sdk_info(hwdev->dev_hdl, "WQ page size: 0x%x\n", hwdev->wq_page_size); + err = sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), hwdev->wq_page_size, + SPHW_CHANNEL_COMM); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); + goto init_wq_pg_size_err; + } + + err = sphw_comm_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + goto cmdq_init_err; + } + + return 0; + +cmdq_init_err: + if (SPHW_FUNC_TYPE(hwdev) != TYPE_VF) + sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), SPHW_HW_WQ_PAGE_SIZE, + SPHW_CHANNEL_COMM); +init_wq_pg_size_err: +init_ceq_msix_err: + sphw_comm_ceqs_free(hwdev); + +ceqs_init_err: +dma_attr_init_err: + + return err; +} + +int sphw_init_comm_ch(struct sphw_hwdev *hwdev) +{ + int err; + + err = init_basic_mgmt_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mgmt channel\n"); + return err; + } + + err = sphw_func_reset(hwdev, sphw_global_func_id(hwdev), SPHW_COMM_RES, SPHW_CHANNEL_COMM); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to reset function\n"); + goto func_reset_err; + } + + err = init_basic_attributes(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init basic attributes\n"); + goto init_basic_attr_err; + } + + err = init_mgmt_channel_post(hwdev); + if (err) + goto init_mgmt_channel_post_err; + + err = init_cmdqs_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n"); + goto init_cmdqs_channel_err; + } + + sphw_sync_mgmt_func_state(hwdev); + + if (SPHW_F_CHANNEL_LOCK_EN(hwdev)) { + sphw_mbox_enable_channel_lock(hwdev, true); + sphw_cmdq_enable_channel_lock(hwdev, true); + } + + return 0; + +init_cmdqs_channel_err: + free_mgmt_msg_channel_post(hwdev); +init_mgmt_channel_post_err: +init_basic_attr_err: +func_reset_err: + free_base_mgmt_channel(hwdev); + + return err; +} + +void sphw_uninit_comm_ch(struct sphw_hwdev *hwdev) +{ + sphw_unsync_mgmt_func_state(hwdev); + + sphw_comm_cmdqs_free(hwdev); + + if (SPHW_FUNC_TYPE(hwdev) != TYPE_VF) + sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), SPHW_HW_WQ_PAGE_SIZE, + SPHW_CHANNEL_COMM); + + sphw_comm_ceqs_free(hwdev); + + sphw_deinit_profile_adapter(hwdev); + + free_mgmt_msg_channel_post(hwdev); + + free_base_mgmt_channel(hwdev); +} + +int sphw_init_hwdev(struct sphw_init_para *para) +{ + struct sphw_hwdev *hwdev; + int err; + + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + *para->hwdev = hwdev; + hwdev->adapter_hdl = para->adapter_hdl; + hwdev->pcidev_hdl = para->pcidev_hdl; + hwdev->dev_hdl = para->dev_hdl; + hwdev->chip_node = para->chip_node; + hwdev->poll = para->poll; + + hwdev->chip_fault_stats = vzalloc(SPHW_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) + goto alloc_chip_fault_stats_err; + + err = sphw_init_hwif(hwdev, para->cfg_reg_base, para->intr_reg_base, + para->mgmt_reg_base, para->db_base_phy, + para->db_base, para->db_dwqe_len); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); + goto init_hwif_err; + } + + sphw_set_chip_present(hwdev); + + if (disable_cfg_comm) + return 0; + + hwdev->workq = alloc_workqueue(SPHW_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, + SPHW_WQ_MAX_REQ); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Failed to alloc hardware workq\n"); + goto alloc_workq_err; + } + + sphw_init_heartbeat_detect(hwdev); + + err = init_cfg_mgmt(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); + goto init_cfg_mgmt_err; + } + + err = sphw_init_comm_ch(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); + goto init_comm_ch_err; + } + + err = init_capability(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); + goto init_cap_err; + } + + err = sphw_set_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set comm features\n"); + goto set_feature_err; + } + + return 0; + +set_feature_err: + free_capability(hwdev); + +init_cap_err: + sphw_uninit_comm_ch(hwdev); + +init_comm_ch_err: + free_cfg_mgmt(hwdev); + +init_cfg_mgmt_err: + sphw_destroy_heartbeat_detect(hwdev); + destroy_workqueue(hwdev->workq); + +alloc_workq_err: + sphw_free_hwif(hwdev); + +init_hwif_err: + vfree(hwdev->chip_fault_stats); + +alloc_chip_fault_stats_err: + kfree(hwdev); + *para->hwdev = NULL; + + return -EFAULT; +} + +void sphw_free_hwdev(void *hwdev) +{ + struct sphw_hwdev *dev = hwdev; + + sphw_func_rx_tx_flush(hwdev, SPHW_CHANNEL_COMM); + + free_capability(dev); + + sphw_uninit_comm_ch(dev); + + free_cfg_mgmt(dev); + sphw_destroy_heartbeat_detect(hwdev); + destroy_workqueue(dev->workq); + sphw_free_hwif(dev); + vfree(dev->chip_fault_stats); + + kfree(dev); +} + +void *sphw_get_pcidev_hdl(void *hwdev) +{ + struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; + + if (!hwdev) + return NULL; + + return dev->pcidev_hdl; +} + +int sphw_register_service_adapter(void *hwdev, void *service_adapter, enum sphw_service_type type) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev || !service_adapter || type >= SERVICE_T_MAX) + return -EINVAL; + + if (dev->service_adapter[type]) + return -EINVAL; + + dev->service_adapter[type] = service_adapter; + + return 0; +} + +void sphw_unregister_service_adapter(void *hwdev, enum sphw_service_type type) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev || type >= SERVICE_T_MAX) + return; + + dev->service_adapter[type] = NULL; +} + +void *sphw_get_service_adapter(void *hwdev, enum sphw_service_type type) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev || type >= SERVICE_T_MAX) + return NULL; + + return dev->service_adapter[type]; +} + +int sphw_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size) +{ + if (*out_size != sizeof(struct sphw_hw_stats)) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(struct sphw_hw_stats)); + return -EFAULT; + } + + memcpy(hw_stats, &((struct sphw_hwdev *)hwdev)->hw_stats, + sizeof(struct sphw_hw_stats)); + return 0; +} + +u16 sphw_dbg_clear_hw_stats(void *hwdev) +{ + memset((void *)&((struct sphw_hwdev *)hwdev)->hw_stats, 0, + sizeof(struct sphw_hw_stats)); + memset((void *)((struct sphw_hwdev *)hwdev)->chip_fault_stats, 0, + SPHW_CHIP_FAULT_SIZE); + return sizeof(struct sphw_hw_stats); +} + +void sphw_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset) +{ + u32 copy_len = offset + MAX_DRV_BUF_SIZE - SPHW_CHIP_FAULT_SIZE; + + if (offset + MAX_DRV_BUF_SIZE <= SPHW_CHIP_FAULT_SIZE) + memcpy(chip_fault_stats, + ((struct sphw_hwdev *)hwdev)->chip_fault_stats + + offset, MAX_DRV_BUF_SIZE); + else + memcpy(chip_fault_stats, + ((struct sphw_hwdev *)hwdev)->chip_fault_stats + + offset, copy_len); +} + +void sphw_event_register(void *dev, void *pri_handle, sphw_event_handler callback) +{ + struct sphw_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = callback; + hwdev->event_pri_handle = pri_handle; +} + +void sphw_event_unregister(void *dev) +{ + struct sphw_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = NULL; + hwdev->event_pri_handle = NULL; +} + +void sphw_event_callback(void *hwdev, struct sphw_event_info *event) +{ + struct sphw_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Hwdev pointer is NULL for event callback\n"); + return; + } + + if (!dev->event_callback) { + sdk_info(dev->dev_hdl, "Event callback function not register\n"); + return; + } + + dev->event_callback(dev->event_pri_handle, event); +} + +void sphw_set_pcie_order_cfg(void *handle) +{ +} + +void sphw_disable_mgmt_msg_report(void *hwdev) +{ + struct sphw_hwdev *hw_dev = (struct sphw_hwdev *)hwdev; + + sphw_set_pf_status(hw_dev->hwif, SPHW_PF_STATUS_INIT); +} + +void sphw_record_pcie_error(void *hwdev) +{ + struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; + + if (!hwdev) + return; + + atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +int sphw_get_card_present_state(void *hwdev, bool *card_present_state) +{ + struct sphw_hwdev *dev = hwdev; + u32 addr, attr1; + + if (!hwdev || !card_present_state) + return -EINVAL; + + addr = SPHW_CSR_FUNC_ATTR1_ADDR; + attr1 = sphw_hwif_read_reg(dev->hwif, addr); + if (attr1 == SPHW_PCIE_LINK_DOWN) { + sdk_warn(dev->dev_hdl, "Card is not present\n"); + *card_present_state = (bool)0; + } else { + *card_present_state = (bool)1; + } + + return 0; +} + +void sphw_link_event_stats(void *dev, u8 link) +{ + struct sphw_hwdev *hwdev = dev; + + if (link) + atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats); + else + atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats); +} + +u8 sphw_max_pf_num(void *hwdev) +{ + if (!hwdev) + return 0; + + return SPHW_MAX_PF_NUM((struct sphw_hwdev *)hwdev); +} + +void sphw_fault_event_report(void *hwdev, u16 src, u16 level) +{ + if (!hwdev) + return; + + sdk_info(((struct sphw_hwdev *)hwdev)->dev_hdl, "Fault event report, src: %u, level: %u\n", + src, level); + + sphw_fault_post_process(hwdev, src, level); +} + +void sphw_heartbeat_lost_handler(struct work_struct *work) +{ + struct sphw_event_info event_info = { 0 }; + struct sphw_hwdev *hwdev = container_of(work, struct sphw_hwdev, + heartbeat_lost_work); + u16 src, level; + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + + if (hwdev->event_callback) { + event_info.type = + hwdev->pcie_link_down ? SPHW_EVENT_PCIE_LINK_DOWN : + SPHW_EVENT_HEART_LOST; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + if (hwdev->pcie_link_down) { + src = SPHW_FAULT_SRC_PCIE_LINK_DOWN; + level = FAULT_LEVEL_HOST; + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + } else { + src = SPHW_FAULT_SRC_HOST_HEARTBEAT_LOST; + level = FAULT_LEVEL_FATAL; + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + sphw_global_func_id(hwdev)); + } + + sphw_fault_post_process(hwdev, src, level); +} + +#define DETECT_PCIE_LINK_DOWN_RETRY 2 +#define SPHW_HEARTBEAT_START_EXPIRE 5000 +#define SPHW_HEARTBEAT_PERIOD 1000 + +static bool sphw_is_hw_abnormal(struct sphw_hwdev *hwdev) +{ + u32 status; + + if (!sphw_get_chip_present_flag(hwdev)) + return false; + + status = sphw_get_heartbeat_status(hwdev); + if (status == SPHW_PCIE_LINK_DOWN) { + sdk_warn(hwdev->dev_hdl, "Detect BAR register read failed\n"); + hwdev->rd_bar_err_cnt++; + if (hwdev->rd_bar_err_cnt >= DETECT_PCIE_LINK_DOWN_RETRY) { + sphw_set_chip_absent(hwdev); + sphw_force_complete_all(hwdev); + hwdev->pcie_link_down = true; + return true; + } + + return false; + } + + if (status) { + hwdev->heartbeat_lost = true; + return true; + } + + hwdev->rd_bar_err_cnt = 0; + + return false; +} + +static void sphw_heartbeat_timer_handler(struct timer_list *t) +{ + struct sphw_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); + + if (sphw_is_hw_abnormal(hwdev)) + queue_work(hwdev->workq, &hwdev->heartbeat_lost_work); + else + mod_timer(&hwdev->heartbeat_timer, + jiffies + msecs_to_jiffies(SPHW_HEARTBEAT_PERIOD)); +} + +static void sphw_init_heartbeat_detect(struct sphw_hwdev *hwdev) +{ + timer_setup(&hwdev->heartbeat_timer, sphw_heartbeat_timer_handler, 0); + + hwdev->heartbeat_timer.expires = + jiffies + msecs_to_jiffies(SPHW_HEARTBEAT_START_EXPIRE); + + add_timer(&hwdev->heartbeat_timer); + + INIT_WORK(&hwdev->heartbeat_lost_work, sphw_heartbeat_lost_handler); +} + +static void sphw_destroy_heartbeat_detect(struct sphw_hwdev *hwdev) +{ + del_timer_sync(&hwdev->heartbeat_timer); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h new file mode 100644 index 000000000000..451df0306e6c --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_HWDEV_H +#define SPHW_HWDEV_H + +#include "sphw_mt.h" +#include "sphw_crm.h" +#include "sphw_hw.h" + +struct cfg_mgmt_info; + +struct sphw_hwif; +struct sphw_aeqs; +struct sphw_ceqs; +struct sphw_mbox; +struct sphw_msg_pf_to_mgmt; + +struct sphw_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +struct mqm_addr_trans_tbl_info { + u32 chunk_num; + u32 search_gpa_num; + u32 page_size; + u32 page_num; + struct sphw_page_addr *brm_srch_page_addr; +}; + +struct sphw_hwdev { + void *adapter_hdl; /* pointer to spnic_pcidev or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + void *dev_hdl; /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + + void *service_adapter[SERVICE_T_MAX]; + void *chip_node; + void *ppf_hwdev; + + u32 wq_page_size; + int chip_present_flag; + bool poll; /*use polling mode or int mode*/ + + struct sphw_hwif *hwif; /* include void __iomem *bar */ + struct comm_global_attr glb_attr; + u64 features[COMM_MAX_FEATURE_QWORD]; + + struct cfg_mgmt_info *cfg_mgmt; + + struct sphw_cmdqs *cmdqs; + struct sphw_aeqs *aeqs; + struct sphw_ceqs *ceqs; + struct sphw_mbox *func_to_func; + struct sphw_msg_pf_to_mgmt *pf_to_mgmt; + struct sphw_clp_pf_to_mgmt *clp_pf_to_mgmt; + + void *cqm_hdl; + struct mqm_addr_trans_tbl_info mqm_att; + struct sphw_page_addr page_pa0; + struct sphw_page_addr page_pa1; + u32 statufull_ref_cnt; + + struct sphw_hw_stats hw_stats; + u8 *chip_fault_stats; + + sphw_event_handler event_callback; + void *event_pri_handle; + + struct sphw_board_info board_info; + + int prof_adap_type; + struct sphw_prof_attr *prof_attr; + + struct workqueue_struct *workq; + + u32 rd_bar_err_cnt; + bool pcie_link_down; + bool heartbeat_lost; + struct timer_list heartbeat_timer; + struct work_struct heartbeat_lost_work; +}; + +#define SPHW_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) +#define SPHW_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) +#define SPHW_MGMT_CPU_NODE_ID(hwdev) ((hwdev)->glb_attr.mgmt_host_node_id) + +#define COMM_FEATURE_QW0(hwdev, feature) ((hwdev)->features[0] & COMM_F_##feature) +#define COMM_SUPPORT_API_CHAIN(hwdev) COMM_FEATURE_QW0(hwdev, API_CHAIN) + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c new file mode 100644 index 000000000000..7230ce2f0778 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c @@ -0,0 +1,911 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/module.h> + +#include "sphw_csr.h" +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_common.h" +#include "sphw_hwdev.h" +#include "sphw_hwif.h" + +#define WAIT_HWIF_READY_TIMEOUT 10000 +#define SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000 + +#define DB_IDX(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / \ + SPHW_DB_PAGE_SIZE)) + +#define SPHW_AF0_FUNC_GLOBAL_IDX_SHIFT 0 +#define SPHW_AF0_P2P_IDX_SHIFT 12 +#define SPHW_AF0_PCI_INTF_IDX_SHIFT 17 +#define SPHW_AF0_VF_IN_PF_SHIFT 20 +#define SPHW_AF0_FUNC_TYPE_SHIFT 28 + +#define SPHW_AF0_FUNC_GLOBAL_IDX_MASK 0xFFF +#define SPHW_AF0_P2P_IDX_MASK 0x1F +#define SPHW_AF0_PCI_INTF_IDX_MASK 0x7 +#define SPHW_AF0_VF_IN_PF_MASK 0xFF +#define SPHW_AF0_FUNC_TYPE_MASK 0x1 + +#define SPHW_AF0_GET(val, member) \ + (((val) >> SPHW_AF0_##member##_SHIFT) & SPHW_AF0_##member##_MASK) + +#define SPHW_AF1_PPF_IDX_SHIFT 0 +#define SPHW_AF1_AEQS_PER_FUNC_SHIFT 8 +#define SPHW_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define SPHW_AF1_PF_INIT_STATUS_SHIFT 31 + +#define SPHW_AF1_PPF_IDX_MASK 0x3F +#define SPHW_AF1_AEQS_PER_FUNC_MASK 0x3 +#define SPHW_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define SPHW_AF1_PF_INIT_STATUS_MASK 0x1 + +#define SPHW_AF1_GET(val, member) \ + (((val) >> SPHW_AF1_##member##_SHIFT) & SPHW_AF1_##member##_MASK) + +#define SPHW_AF2_CEQS_PER_FUNC_SHIFT 0 +#define SPHW_AF2_DMA_ATTR_PER_FUNC_SHIFT 9 +#define SPHW_AF2_IRQS_PER_FUNC_SHIFT 16 + +#define SPHW_AF2_CEQS_PER_FUNC_MASK 0x1FF +#define SPHW_AF2_DMA_ATTR_PER_FUNC_MASK 0x7 +#define SPHW_AF2_IRQS_PER_FUNC_MASK 0x7FF + +#define SPHW_AF2_GET(val, member) \ + (((val) >> SPHW_AF2_##member##_SHIFT) & SPHW_AF2_##member##_MASK) + +#define SPHW_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0 +#define SPHW_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16 + +#define SPHW_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF +#define SPHW_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF + +#define SPHW_AF3_GET(val, member) \ + (((val) >> SPHW_AF3_##member##_SHIFT) & SPHW_AF3_##member##_MASK) + +#define SPHW_AF4_DOORBELL_CTRL_SHIFT 0 +#define SPHW_AF4_DOORBELL_CTRL_MASK 0x1 + +#define SPHW_AF4_GET(val, member) \ + (((val) >> SPHW_AF4_##member##_SHIFT) & SPHW_AF4_##member##_MASK) + +#define SPHW_AF4_SET(val, member) \ + (((val) & SPHW_AF4_##member##_MASK) << SPHW_AF4_##member##_SHIFT) + +#define SPHW_AF4_CLEAR(val, member) \ + ((val) & (~(SPHW_AF4_##member##_MASK << SPHW_AF4_##member##_SHIFT))) + +#define SPHW_AF5_OUTBOUND_CTRL_SHIFT 0 +#define SPHW_AF5_OUTBOUND_CTRL_MASK 0x1 + +#define SPHW_AF5_GET(val, member) \ + (((val) >> SPHW_AF5_##member##_SHIFT) & SPHW_AF5_##member##_MASK) + +#define SPHW_AF5_SET(val, member) \ + (((val) & SPHW_AF5_##member##_MASK) << SPHW_AF5_##member##_SHIFT) + +#define SPHW_AF5_CLEAR(val, member) \ + ((val) & (~(SPHW_AF5_##member##_MASK << SPHW_AF5_##member##_SHIFT))) + +#define SPHW_AF6_PF_STATUS_SHIFT 0 +#define SPHW_AF6_PF_STATUS_MASK 0xFFFF + +#define SPHW_AF6_SET(val, member) \ + ((((u32)(val)) & SPHW_AF6_##member##_MASK) << \ + SPHW_AF6_##member##_SHIFT) + +#define SPHW_AF6_GET(val, member) \ + (((val) >> SPHW_AF6_##member##_SHIFT) & SPHW_AF6_##member##_MASK) + +#define SPHW_AF6_CLEAR(val, member) \ + ((val) & (~(SPHW_AF6_##member##_MASK << \ + SPHW_AF6_##member##_SHIFT))) + +#define sphw_PPF_ELECT_PORT_IDX_SHIFT 0 + +#define sphw_PPF_ELECT_PORT_IDX_MASK 0x3F + +#define sphw_PPF_ELECT_PORT_GET(val, member) \ + (((val) >> sphw_PPF_ELECT_PORT_##member##_SHIFT) & \ + sphw_PPF_ELECT_PORT_##member##_MASK) + +#define SPHW_PPF_ELECTION_IDX_SHIFT 0 + +#define SPHW_PPF_ELECTION_IDX_MASK 0x3F + +#define SPHW_PPF_ELECTION_SET(val, member) \ + (((val) & SPHW_PPF_ELECTION_##member##_MASK) << \ + SPHW_PPF_ELECTION_##member##_SHIFT) + +#define SPHW_PPF_ELECTION_GET(val, member) \ + (((val) >> SPHW_PPF_ELECTION_##member##_SHIFT) & \ + SPHW_PPF_ELECTION_##member##_MASK) + +#define SPHW_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(SPHW_PPF_ELECTION_##member##_MASK << \ + SPHW_PPF_ELECTION_##member##_SHIFT))) + +#define SPHW_MPF_ELECTION_IDX_SHIFT 0 + +#define SPHW_MPF_ELECTION_IDX_MASK 0x1F + +#define SPHW_MPF_ELECTION_SET(val, member) \ + (((val) & SPHW_MPF_ELECTION_##member##_MASK) << \ + SPHW_MPF_ELECTION_##member##_SHIFT) + +#define SPHW_MPF_ELECTION_GET(val, member) \ + (((val) >> SPHW_MPF_ELECTION_##member##_SHIFT) & \ + SPHW_MPF_ELECTION_##member##_MASK) + +#define SPHW_MPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(SPHW_MPF_ELECTION_##member##_MASK << \ + SPHW_MPF_ELECTION_##member##_SHIFT))) + +#define SPHW_GET_REG_FLAG(reg) ((reg) & (~(SPHW_REGS_FLAG_MAKS))) + +#define SPHW_GET_REG_ADDR(reg) ((reg) & (SPHW_REGS_FLAG_MAKS)) + +u32 sphw_hwif_read_reg(struct sphw_hwif *hwif, u32 reg) +{ + if (SPHW_GET_REG_FLAG(reg) == SPHW_MGMT_REGS_FLAG) + return be32_to_cpu(readl(hwif->mgmt_regs_base + + SPHW_GET_REG_ADDR(reg))); + else + return be32_to_cpu(readl(hwif->cfg_regs_base + + SPHW_GET_REG_ADDR(reg))); +} + +void sphw_hwif_write_reg(struct sphw_hwif *hwif, u32 reg, u32 val) +{ + if (SPHW_GET_REG_FLAG(reg) == SPHW_MGMT_REGS_FLAG) + writel(cpu_to_be32(val), + hwif->mgmt_regs_base + SPHW_GET_REG_ADDR(reg)); + else + writel(cpu_to_be32(val), + hwif->cfg_regs_base + SPHW_GET_REG_ADDR(reg)); +} + +/** + * sphw_get_heartbeat_status - get heart beat status + * @hwdev: the pointer to hw device + * Return: 0 - normal, 1 - heart lost, 0xFFFFFFFF - Pcie link down + **/ +u32 sphw_get_heartbeat_status(struct sphw_hwdev *hwdev) +{ + u32 attr1; + + attr1 = sphw_hwif_read_reg(hwdev->hwif, SPHW_CSR_FUNC_ATTR1_ADDR); + if (attr1 == SPHW_PCIE_LINK_DOWN) + return attr1; + + return !SPHW_AF1_GET(attr1, MGMT_INIT_STATUS); +} + +/** + * hwif_ready - test if the HW initialization passed + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +static int hwif_ready(struct sphw_hwdev *hwdev) +{ + if (sphw_get_heartbeat_status(hwdev)) + return -EBUSY; + + return 0; +} + +static enum sphw_wait_return check_hwif_ready_handler(void *priv_data) +{ + if (!hwif_ready(priv_data)) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +static int wait_hwif_ready(struct sphw_hwdev *hwdev) +{ + if (!sphw_wait_for_timeout(hwdev, check_hwif_ready_handler, + WAIT_HWIF_READY_TIMEOUT, USEC_PER_MSEC)) + return 0; + + sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); + return -EBUSY; +} + +/** + * set_hwif_attr - set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + * @attr2: the third attribute that was read from the hw + * @attr3: the fourth attribute that was read from the hw + **/ +static void set_hwif_attr(struct sphw_hwif *hwif, u32 attr0, u32 attr1, + u32 attr2, u32 attr3) +{ + hwif->attr.func_global_idx = SPHW_AF0_GET(attr0, FUNC_GLOBAL_IDX); + hwif->attr.port_to_port_idx = SPHW_AF0_GET(attr0, P2P_IDX); + hwif->attr.pci_intf_idx = SPHW_AF0_GET(attr0, PCI_INTF_IDX); + hwif->attr.vf_in_pf = SPHW_AF0_GET(attr0, VF_IN_PF); + hwif->attr.func_type = SPHW_AF0_GET(attr0, FUNC_TYPE); + + hwif->attr.ppf_idx = SPHW_AF1_GET(attr1, PPF_IDX); + hwif->attr.num_aeqs = BIT(SPHW_AF1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = (u8)SPHW_AF2_GET(attr2, CEQS_PER_FUNC); + hwif->attr.num_irqs = SPHW_AF2_GET(attr2, IRQS_PER_FUNC); + hwif->attr.num_dma_attr = BIT(SPHW_AF2_GET(attr2, DMA_ATTR_PER_FUNC)); + + hwif->attr.global_vf_id_of_pf = SPHW_AF3_GET(attr3, GLOBAL_VF_ID_OF_PF); + + pr_info("func_global_idx: 0x%x, port_to_port_idx: 0x%x, pci_intf_idx: 0x%x, vf_in_pf: 0x%x, func_type: %d\n", + hwif->attr.func_global_idx, hwif->attr.port_to_port_idx, + hwif->attr.pci_intf_idx, hwif->attr.vf_in_pf, + hwif->attr.func_type); + + pr_info("ppf_idx: 0x%x, num_aeqs: 0x%x, num_ceqs: 0x%x, num_irqs: 0x%x, num_dma_attr: 0x%x, global_vf_id_of_pf: %u\n", + hwif->attr.ppf_idx, hwif->attr.num_aeqs, + hwif->attr.num_ceqs, hwif->attr.num_irqs, + hwif->attr.num_dma_attr, hwif->attr.global_vf_id_of_pf); +} + +/** + * get_hwif_attr - read and set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + **/ +static void get_hwif_attr(struct sphw_hwif *hwif) +{ + u32 addr, attr0, attr1, attr2, attr3; + + addr = SPHW_CSR_FUNC_ATTR0_ADDR; + attr0 = sphw_hwif_read_reg(hwif, addr); + + addr = SPHW_CSR_FUNC_ATTR1_ADDR; + attr1 = sphw_hwif_read_reg(hwif, addr); + + addr = SPHW_CSR_FUNC_ATTR2_ADDR; + attr2 = sphw_hwif_read_reg(hwif, addr); + + addr = SPHW_CSR_FUNC_ATTR3_ADDR; + attr3 = sphw_hwif_read_reg(hwif, addr); + + pr_info("attr0: 0x%08x, attr1: 0x%08x, attr2: 0x%08x, attr3: 0x%08x\n", + attr0, attr1, attr2, attr3); + set_hwif_attr(hwif, attr0, attr1, attr2, attr3); +} + +void sphw_set_pf_status(struct sphw_hwif *hwif, enum sphw_pf_status status) +{ + u32 attr6 = SPHW_AF6_SET(status, PF_STATUS); + u32 addr = SPHW_CSR_FUNC_ATTR6_ADDR; + + if (hwif->attr.func_type == TYPE_VF) + return; + + sphw_hwif_write_reg(hwif, addr, attr6); +} + +enum sphw_pf_status sphw_get_pf_status(struct sphw_hwif *hwif) +{ + u32 attr6 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR6_ADDR); + + return SPHW_AF6_GET(attr6, PF_STATUS); +} + +enum sphw_doorbell_ctrl sphw_get_doorbell_ctrl_status(struct sphw_hwif *hwif) +{ + u32 attr4 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR4_ADDR); + + return SPHW_AF4_GET(attr4, DOORBELL_CTRL); +} + +enum sphw_outbound_ctrl sphw_get_outbound_ctrl_status(struct sphw_hwif *hwif) +{ + u32 attr5 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR5_ADDR); + + return SPHW_AF5_GET(attr5, OUTBOUND_CTRL); +} + +void sphw_enable_doorbell(struct sphw_hwif *hwif) +{ + u32 addr, attr4; + + addr = SPHW_CSR_FUNC_ATTR4_ADDR; + attr4 = sphw_hwif_read_reg(hwif, addr); + + attr4 = SPHW_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= SPHW_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); + + sphw_hwif_write_reg(hwif, addr, attr4); +} + +void sphw_disable_doorbell(struct sphw_hwif *hwif) +{ + u32 addr, attr4; + + addr = SPHW_CSR_FUNC_ATTR4_ADDR; + attr4 = sphw_hwif_read_reg(hwif, addr); + + attr4 = SPHW_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= SPHW_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); + + sphw_hwif_write_reg(hwif, addr, attr4); +} + +void sphw_enable_outbound(struct sphw_hwif *hwif) +{ + u32 addr, attr5; + + addr = SPHW_CSR_FUNC_ATTR5_ADDR; + attr5 = sphw_hwif_read_reg(hwif, addr); + + attr5 = SPHW_AF5_CLEAR(attr5, OUTBOUND_CTRL); + attr5 |= SPHW_AF5_SET(ENABLE_OUTBOUND, OUTBOUND_CTRL); + + sphw_hwif_write_reg(hwif, addr, attr5); +} + +void sphw_disable_outbound(struct sphw_hwif *hwif) +{ + u32 addr, attr5; + + addr = SPHW_CSR_FUNC_ATTR5_ADDR; + attr5 = sphw_hwif_read_reg(hwif, addr); + + attr5 = SPHW_AF5_CLEAR(attr5, OUTBOUND_CTRL); + attr5 |= SPHW_AF5_SET(DISABLE_OUTBOUND, OUTBOUND_CTRL); + + sphw_hwif_write_reg(hwif, addr, attr5); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the hardware interface of a pci function device + **/ +static void set_ppf(struct sphw_hwif *hwif) +{ + struct sphw_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = SPHW_CSR_PPF_ELECTION_ADDR; + + val = sphw_hwif_read_reg(hwif, addr); + val = SPHW_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = SPHW_PPF_ELECTION_SET(attr->func_global_idx, IDX); + val |= ppf_election; + + sphw_hwif_write_reg(hwif, addr, val); + + /* Check PPF */ + val = sphw_hwif_read_reg(hwif, addr); + + attr->ppf_idx = SPHW_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == attr->func_global_idx) + attr->func_type = TYPE_PPF; +} + +/** + * get_mpf - get the mpf index into the hwif + * @hwif: the hardware interface of a pci function device + **/ +static void get_mpf(struct sphw_hwif *hwif) +{ + struct sphw_func_attr *attr = &hwif->attr; + u32 mpf_election, addr; + + addr = SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR; + + mpf_election = sphw_hwif_read_reg(hwif, addr); + attr->mpf_idx = SPHW_MPF_ELECTION_GET(mpf_election, IDX); +} + +/** + * set_mpf - try to set hwif as mpf and set the mpf idx in hwif + * @hwif: the hardware interface of a pci function device + **/ +static void set_mpf(struct sphw_hwif *hwif) +{ + struct sphw_func_attr *attr = &hwif->attr; + u32 addr, val, mpf_election; + + /* Read Modify Write */ + addr = SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR; + + val = sphw_hwif_read_reg(hwif, addr); + + val = SPHW_MPF_ELECTION_CLEAR(val, IDX); + mpf_election = SPHW_MPF_ELECTION_SET(attr->func_global_idx, IDX); + + val |= mpf_election; + sphw_hwif_write_reg(hwif, addr, val); +} + +static int init_db_area_idx(struct sphw_free_db_area *free_db_area, u64 db_dwqe_len) +{ + u32 db_max_areas; + + db_max_areas = (db_dwqe_len > SPHW_DB_DWQE_SIZE) ? SPHW_DB_MAX_AREAS : + (u32)(db_dwqe_len / SPHW_DB_PAGE_SIZE); + free_db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL); + if (!free_db_area->db_bitmap_array) { + pr_err("Failed to allocate db area.\n"); + return -ENOMEM; + } + free_db_area->db_max_areas = db_max_areas; + spin_lock_init(&free_db_area->idx_lock); + return 0; +} + +static void free_db_area(struct sphw_free_db_area *free_db_area) +{ + kfree(free_db_area->db_bitmap_array); +} + +static int get_db_idx(struct sphw_hwif *hwif, u32 *idx) +{ + struct sphw_free_db_area *free_db_area = &hwif->free_db_area; + u32 pg_idx; + + spin_lock(&free_db_area->idx_lock); + pg_idx = (u32)find_first_zero_bit(free_db_area->db_bitmap_array, + free_db_area->db_max_areas); + if (pg_idx == free_db_area->db_max_areas) { + spin_unlock(&free_db_area->idx_lock); + return -ENOMEM; + } + set_bit(pg_idx, free_db_area->db_bitmap_array); + spin_unlock(&free_db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct sphw_hwif *hwif, u32 idx) +{ + struct sphw_free_db_area *free_db_area = &hwif->free_db_area; + + if (idx >= free_db_area->db_max_areas) + return; + + spin_lock(&free_db_area->idx_lock); + clear_bit((int)idx, free_db_area->db_bitmap_array); + + spin_unlock(&free_db_area->idx_lock); +} + +void sphw_free_db_addr(void *hwdev, const void __iomem *db_base, void __iomem *dwqe_base) +{ + struct sphw_hwif *hwif = NULL; + u32 idx; + + if (!hwdev || !db_base) + return; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base); + + free_db_idx(hwif, idx); +} + +int sphw_alloc_db_addr(void *hwdev, void __iomem **db_base, void __iomem **dwqe_base) +{ + struct sphw_hwif *hwif = NULL; + u32 idx = 0; + int err; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base + idx * SPHW_DB_PAGE_SIZE; + + if (!dwqe_base) + return 0; + + *dwqe_base = (u8 *)*db_base + SPHW_DWQE_OFFSET; + + return 0; +} + +void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base) +{ + struct sphw_hwif *hwif = NULL; + u32 idx; + + if (!hwdev) + return; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base_phy); + + free_db_idx(hwif, idx); +} + +int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base) +{ + struct sphw_hwif *hwif = NULL; + u32 idx; + int err; + + if (!hwdev || !db_base || !dwqe_base) + return -EINVAL; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base_phy + idx * SPHW_DB_PAGE_SIZE; + *dwqe_base = *db_base + SPHW_DWQE_OFFSET; + + return 0; +} + +void sphw_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, enum sphw_msix_auto_mask flag) +{ + struct sphw_hwif *hwif = NULL; + u32 mask_bits; + u32 addr; + + if (!hwdev) + return; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + if (flag) + mask_bits = SPHW_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET); + else + mask_bits = SPHW_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR); + + mask_bits = mask_bits | SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); + + addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; + sphw_hwif_write_reg(hwif, addr, mask_bits); +} + +void sphw_set_msix_state(void *hwdev, u16 msix_idx, enum sphw_msix_state flag) +{ + struct sphw_hwif *hwif = NULL; + u32 mask_bits; + u32 addr; + u8 int_msk = 1; + + if (!hwdev) + return; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + if (flag) + mask_bits = SPHW_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET); + else + mask_bits = SPHW_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR); + mask_bits = mask_bits | SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); + + addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; + sphw_hwif_write_reg(hwif, addr, mask_bits); +} + +static void disable_all_msix(struct sphw_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + sphw_set_msix_state(hwdev, i, SPHW_MSIX_DISABLE); +} + +static enum sphw_wait_return check_db_flush_enable_handler(void *priv_data) +{ + struct sphw_hwif *hwif = priv_data; + enum sphw_doorbell_ctrl db_ctrl; + + db_ctrl = sphw_get_doorbell_ctrl_status(hwif); + if (db_ctrl == ENABLE_DOORBELL) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +static enum sphw_wait_return check_db_flush_disable_handler(void *priv_data) +{ + struct sphw_hwif *hwif = priv_data; + enum sphw_doorbell_ctrl db_ctrl; + + db_ctrl = sphw_get_doorbell_ctrl_status(hwif); + if (db_ctrl == DISABLE_DOORBELL) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +int wait_until_doorbell_flush_states(struct sphw_hwif *hwif, + enum sphw_doorbell_ctrl states) +{ + if (!hwif) + return -EFAULT; + + return sphw_wait_for_timeout(hwif, states == ENABLE_DOORBELL ? + check_db_flush_enable_handler : check_db_flush_disable_handler, + SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT, USEC_PER_MSEC); +} + +static enum sphw_wait_return check_db_outbound_enable_handler(void *priv_data) +{ + struct sphw_hwif *hwif = priv_data; + enum sphw_doorbell_ctrl db_ctrl; + enum sphw_outbound_ctrl outbound_ctrl; + + db_ctrl = sphw_get_doorbell_ctrl_status(hwif); + outbound_ctrl = sphw_get_outbound_ctrl_status(hwif); + + if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +static int wait_until_doorbell_and_outbound_enabled(struct sphw_hwif *hwif) +{ + return sphw_wait_for_timeout(hwif, check_db_outbound_enable_handler, + SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT, USEC_PER_MSEC); +} + +/** + * sphw_init_hwif - initialize the hw interface + * @hwif: the hardware interface of a pci function device + * @pdev: the pci device that will be part of the hwif struct + * Return: 0 - success, negative - failure + **/ +int sphw_init_hwif(struct sphw_hwdev *hwdev, void *cfg_reg_base, void *intr_reg_base, + void *mgmt_regs_base, u64 db_base_phy, void *db_base, u64 db_dwqe_len) +{ + struct sphw_hwif *hwif = NULL; + u32 attr4, attr5; + int err; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwdev->hwif = hwif; + hwif->pdev = hwdev->pcidev_hdl; + + /* if function is VF, mgmt_regs_base will be NULL */ + if (!mgmt_regs_base) + hwif->cfg_regs_base = (u8 *)cfg_reg_base + + SPHW_VF_CFG_REG_OFFSET; + else + hwif->cfg_regs_base = cfg_reg_base; + + hwif->intr_regs_base = intr_reg_base; + hwif->mgmt_regs_base = mgmt_regs_base; + sdk_info(hwdev->dev_hdl, "init intr_regs_base: %p, mgmt_regs_base: %p, db_base: %p, db_dwqe_len: 0x%llx\n", + hwif->intr_regs_base, hwif->mgmt_regs_base, + db_base, db_dwqe_len); + + hwif->db_base_phy = db_base_phy; + hwif->db_base = db_base; + hwif->db_dwqe_len = db_dwqe_len; + err = init_db_area_idx(&hwif->free_db_area, hwif->db_dwqe_len); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init db area.\n"); + goto init_db_area_err; + } + + err = wait_hwif_ready(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Chip status is not ready\n"); + goto hwif_ready_err; + } + + get_hwif_attr(hwif); + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err) { + attr4 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR4_ADDR); + attr5 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR5_ADDR); + sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n", + attr4, attr5); + goto hwif_ready_err; + } + + if (!SPHW_IS_VF(hwdev)) { + set_ppf(hwif); + + if (SPHW_IS_PPF(hwdev)) + set_mpf(hwif); + + get_mpf(hwif); + } + + disable_all_msix(hwdev); + /* disable mgmt cpu report any event */ + sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_INIT); + + sdk_info(hwdev->dev_hdl, "global_func_idx: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n", + hwif->attr.func_global_idx, hwif->attr.func_type, + hwif->attr.pci_intf_idx, hwif->attr.ppf_idx, + hwif->attr.mpf_idx); + + return 0; + +hwif_ready_err: + free_db_area(&hwif->free_db_area); +init_db_area_err: + kfree(hwif); + + return err; +} + +/** + * sphw_free_hwif - free the hw interface + * @hwif: the hardware interface of a pci function device + * @pdev: the pci device that will be part of the hwif struct + **/ +void sphw_free_hwif(struct sphw_hwdev *hwdev) +{ + free_db_area(&hwdev->hwif->free_db_area); + kfree(hwdev->hwif); +} + +u16 sphw_global_func_id(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.func_global_idx; +} + +u16 sphw_intr_num(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.num_irqs; +} + +u8 sphw_pf_id_of_vf(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.port_to_port_idx; +} + +u8 sphw_pcie_itf_id(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.pci_intf_idx; +} + +u8 sphw_vf_in_pf(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.vf_in_pf; +} + +enum func_type sphw_func_type(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.func_type; +} + +u8 sphw_ceq_num(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.num_ceqs; +} + +u8 sphw_dma_attr_entry_num(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.num_dma_attr; +} + +u16 sphw_glb_pf_vf_offset(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.global_vf_id_of_pf; +} + +u8 sphw_mpf_idx(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.mpf_idx; +} + +u8 sphw_ppf_idx(void *hwdev) +{ + struct sphw_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sphw_hwdev *)hwdev)->hwif; + + return hwif->attr.ppf_idx; +} + +u8 sphw_host_ppf_idx(void *hwdev, u8 host_id) +{ + struct sphw_hwdev *dev = hwdev; + u32 ppf_elect_port_addr; + u32 val; + + if (!hwdev) + return 0; + + ppf_elect_port_addr = SPHW_CSR_FUNC_PPF_ELECT(host_id); + val = sphw_hwif_read_reg(dev->hwif, ppf_elect_port_addr); + + return sphw_PPF_ELECT_PORT_GET(val, IDX); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h new file mode 100644 index 000000000000..9035baf8a66e --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_HWIF_H +#define SPHW_HWIF_H + +#define SPHW_PCIE_LINK_DOWN 0xFFFFFFFF + +struct sphw_free_db_area { + unsigned long *db_bitmap_array; + u32 db_max_areas; + /* spinlock for allocating doorbell area */ + spinlock_t idx_lock; +}; + +struct sphw_func_attr { + u16 func_global_idx; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 vf_in_pf; + enum func_type func_type; + + u8 mpf_idx; + + u8 ppf_idx; + + u16 num_irqs; + u8 num_aeqs; + u8 num_ceqs; + + u8 num_dma_attr; + + u16 global_vf_id_of_pf; +}; + +struct sphw_hwif { + u8 __iomem *cfg_regs_base; + u8 __iomem *intr_regs_base; + u8 __iomem *mgmt_regs_base; + u64 db_base_phy; + u64 db_dwqe_len; + u8 __iomem *db_base; + + struct sphw_free_db_area free_db_area; + + struct sphw_func_attr attr; + + void *pdev; +}; + +enum sphw_outbound_ctrl { + ENABLE_OUTBOUND = 0x0, + DISABLE_OUTBOUND = 0x1, +}; + +enum sphw_doorbell_ctrl { + ENABLE_DOORBELL = 0x0, + DISABLE_DOORBELL = 0x1, +}; + +enum sphw_pf_status { + SPHW_PF_STATUS_INIT = 0X0, + SPHW_PF_STATUS_ACTIVE_FLAG = 0x11, + SPHW_PF_STATUS_FLR_START_FLAG = 0x12, + SPHW_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +#define SPHW_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define SPHW_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define SPHW_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) +#define SPHW_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx) +#define SPHW_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf) +#define SPHW_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) +#define SPHW_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) + +#define SPHW_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) +#define SPHW_IS_PF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_PF) +#define SPHW_IS_VF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_VF) +#define SPHW_IS_PPF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_PPF) + +u32 sphw_hwif_read_reg(struct sphw_hwif *hwif, u32 reg); + +void sphw_hwif_write_reg(struct sphw_hwif *hwif, u32 reg, u32 val); + +void sphw_set_pf_status(struct sphw_hwif *hwif, enum sphw_pf_status status); + +enum sphw_pf_status sphw_get_pf_status(struct sphw_hwif *hwif); + +void sphw_disable_doorbell(struct sphw_hwif *hwif); + +void sphw_enable_doorbell(struct sphw_hwif *hwif); + +int sphw_init_hwif(struct sphw_hwdev *hwdev, void *cfg_reg_base, void *intr_reg_base, + void *mgmt_regs_base, u64 db_base_phy, void *db_base, u64 db_dwqe_len); + +void sphw_free_hwif(struct sphw_hwdev *hwdev); + +u8 sphw_host_ppf_idx(void *hwdev, u8 host_id); + +u32 sphw_get_heartbeat_status(struct sphw_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c new file mode 100644 index 000000000000..edaef57a6b32 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c @@ -0,0 +1,1808 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/semaphore.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include "sphw_hw.h" +#include "sphw_hwdev.h" +#include "sphw_csr.h" +#include "sphw_hwif.h" +#include "sphw_eqs.h" +#include "sphw_prof_adap.h" +#include "sphw_mbox.h" +#include "sphw_common.h" + +#define SPHW_MBOX_INT_DST_AEQN_SHIFT 10 +#define SPHW_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 +#define SPHW_MBOX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define SPHW_MBOX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define SPHW_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define SPHW_MBOX_INT_WB_EN_SHIFT 28 + +#define SPHW_MBOX_INT_DST_AEQN_MASK 0x3 +#define SPHW_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 +#define SPHW_MBOX_INT_STAT_DMA_MASK 0x3F +#define SPHW_MBOX_INT_TX_SIZE_MASK 0x1F +#define SPHW_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define SPHW_MBOX_INT_WB_EN_MASK 0x1 + +#define SPHW_MBOX_INT_SET(val, field) \ + (((val) & SPHW_MBOX_INT_##field##_MASK) << \ + SPHW_MBOX_INT_##field##_SHIFT) + +enum sphw_mbox_tx_status { + TX_NOT_DONE = 1, +}; + +#define SPHW_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 +/* specifies the issue request for the message data. + * 0 - Tx request is done; + * 1 - Tx request is in process. + */ +#define SPHW_MBOX_CTRL_TX_STATUS_SHIFT 1 +#define SPHW_MBOX_CTRL_DST_FUNC_SHIFT 16 + +#define SPHW_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define SPHW_MBOX_CTRL_TX_STATUS_MASK 0x1 +#define SPHW_MBOX_CTRL_DST_FUNC_MASK 0x1FFF + +#define SPHW_MBOX_CTRL_SET(val, field) \ + (((val) & SPHW_MBOX_CTRL_##field##_MASK) << \ + SPHW_MBOX_CTRL_##field##_SHIFT) + +#define MBOX_SEGLEN_MASK SPHW_MSG_HEADER_SET(SPHW_MSG_HEADER_SEG_LEN_MASK, SEG_LEN) + +#define MBOX_MSG_POLLING_TIMEOUT 300000 +#define SPHW_MBOX_COMP_TIME 300000U + +#define MBOX_MAX_BUF_SZ 2048U +#define MBOX_HEADER_SZ 8 +#define SPHW_MBOX_DATA_SIZE (MBOX_MAX_BUF_SZ - MBOX_HEADER_SZ) + +/* MBOX size is 64B, 8B for mbox_header, 8B reserved */ +#define MBOX_SEG_LEN 48 +#define MBOX_SEG_LEN_ALIGN 4 +#define MBOX_WB_STATUS_LEN 16UL + +#define SEQ_ID_START_VAL 0 +#define SEQ_ID_MAX_VAL 42 +#define MBOX_LAST_SEG_MAX_LEN (MBOX_MAX_BUF_SZ - \ + SEQ_ID_MAX_VAL * MBOX_SEG_LEN) + +/* mbox write back status is 16B, only first 4B is used */ +#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define MBOX_WB_STATUS_MASK 0xFF +#define MBOX_WB_ERROR_CODE_MASK 0xFF00 +#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE +#define MBOX_WB_STATUS_NOT_FINISHED 0x00 + +#define MBOX_STATUS_FINISHED(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) +#define MBOX_STATUS_SUCCESS(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) +#define MBOX_STATUS_ERRCODE(wb) \ + ((wb) & MBOX_WB_ERROR_CODE_MASK) + +#define DST_AEQ_IDX_DEFAULT_VAL 0 +#define SRC_AEQ_IDX_DEFAULT_VAL 0 +#define NO_DMA_ATTRIBUTE_VAL 0 + +#define MBOX_MSG_NO_DATA_LEN 1 + +#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) +#define MBOX_AREA(hwif) \ + ((hwif)->cfg_regs_base + SPHW_FUNC_CSR_MAILBOX_DATA_OFF) + +#define MBOX_DMA_MSG_QUEUE_DEPTH 32 + +#define MBOX_MQ_CI_OFFSET (SPHW_CFG_REGS_FLAG + SPHW_FUNC_CSR_MAILBOX_DATA_OFF + \ + MBOX_HEADER_SZ + MBOX_SEG_LEN) + +#define MBOX_MQ_SYNC_CI_SHIFT 0 +#define MBOX_MQ_ASYNC_CI_SHIFT 8 + +#define MBOX_MQ_SYNC_CI_MASK 0xFF +#define MBOX_MQ_ASYNC_CI_MASK 0xFF + +#define MBOX_MQ_CI_SET(val, field) \ + (((val) & MBOX_MQ_##field##_CI_MASK) << MBOX_MQ_##field##_CI_SHIFT) +#define MBOX_MQ_CI_GET(val, field) \ + (((val) >> MBOX_MQ_##field##_CI_SHIFT) & MBOX_MQ_##field##_CI_MASK) +#define MBOX_MQ_CI_CLEAR(val, field) \ + ((val) & (~(MBOX_MQ_##field##_CI_MASK << MBOX_MQ_##field##_CI_SHIFT))) + +#define IS_PF_OR_PPF_SRC(hwdev, src_func_idx) \ + ((src_func_idx) < SPHW_MAX_PF_NUM(hwdev)) + +#define MBOX_RESPONSE_ERROR 0x1 +#define MBOX_MSG_ID_MASK 0xF +#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) +#define MBOX_MSG_ID_INC(func_to_func) \ + (MBOX_MSG_ID(func_to_func) = \ + (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK) + +/* max message counter wait to process for one function */ +#define SPHW_MAX_MSG_CNT_TO_PROCESS 10 + +#define MBOX_MSG_CHANNEL_STOP(func_to_func) \ + ((((func_to_func)->lock_channel_en) && \ + test_bit((func_to_func)->cur_msg_channel, \ + &(func_to_func)->channel_stop)) ? true : false) + +enum mbox_ordering_type { + STRONG_ORDER, +}; + +enum mbox_write_back_type { + WRITE_BACK = 1, +}; + +enum mbox_aeq_trig_type { + NOT_TRIGGER, + TRIGGER, +}; + +static int send_mbox_msg(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dst_func, + enum sphw_msg_direction_type direction, + enum sphw_msg_ack_type ack_type, + struct mbox_msg_info *msg_info); + +struct sphw_msg_desc *get_mbox_msg_desc(struct sphw_mbox *func_to_func, + u64 dir, u64 src_func_id); + +/** + * sphw_register_ppf_mbox_cb - register mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int sphw_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_ppf_mbox_cb callback) +{ + struct sphw_mbox *func_to_func = NULL; + + if (mod >= SPHW_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + + func_to_func->ppf_mbox_cb[mod] = callback; + func_to_func->ppf_mbox_data[mod] = pri_handle; + + set_bit(SPHW_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); + + return 0; +} + +/** + * sphw_register_pf_mbox_cb - register mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int sphw_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_pf_mbox_cb callback) +{ + struct sphw_mbox *func_to_func = NULL; + + if (mod >= SPHW_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + + func_to_func->pf_mbox_cb[mod] = callback; + func_to_func->pf_mbox_data[mod] = pri_handle; + + set_bit(SPHW_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + return 0; +} + +/** + * sphw_register_vf_mbox_cb - register mbox callback for vf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int sphw_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_vf_mbox_cb callback) +{ + struct sphw_mbox *func_to_func = NULL; + + if (mod >= SPHW_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + + func_to_func->vf_mbox_cb[mod] = callback; + func_to_func->vf_mbox_data[mod] = pri_handle; + + set_bit(SPHW_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + return 0; +} + +/** + * sphw_register_ppf_to_pf_mbox_cb - register mbox callback for pf from ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int sphw_register_ppf_to_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + sphw_pf_recv_from_ppf_mbox_cb callback) +{ + struct sphw_mbox *func_to_func = NULL; + + if (mod >= SPHW_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + + func_to_func->pf_recv_ppf_mbox_cb[mod] = callback; + func_to_func->pf_recv_ppf_mbox_data[mod] = pri_handle; + + set_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + return 0; +} + +/** + * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * Return: + */ +void sphw_unregister_ppf_mbox_cb(void *hwdev, u8 mod) +{ + struct sphw_mbox *func_to_func = NULL; + + if (mod >= SPHW_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + + clear_bit(SPHW_PPF_MBOX_CB_REG, + &func_to_func->ppf_mbox_cb_state[mod]); + + while (test_bit(SPHW_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->ppf_mbox_data[mod] = NULL; + func_to_func->ppf_mbox_cb[mod] = NULL; +} + +/** + * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * Return: + */ +void sphw_unregister_pf_mbox_cb(void *hwdev, u8 mod) +{ + struct sphw_mbox *func_to_func = NULL; + + if (mod >= SPHW_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + + clear_bit(SPHW_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + while (test_bit(SPHW_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->pf_mbox_data[mod] = NULL; + func_to_func->pf_mbox_cb[mod] = NULL; +} + +/** + * sphw_unregister_vf_mbox_cb - unregister the mbox callback for vf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * Return: + */ +void sphw_unregister_vf_mbox_cb(void *hwdev, u8 mod) +{ + struct sphw_mbox *func_to_func = NULL; + + if (mod >= SPHW_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + + clear_bit(SPHW_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + while (test_bit(SPHW_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->vf_mbox_data[mod] = NULL; + func_to_func->vf_mbox_cb[mod] = NULL; +} + +/** + * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * Return: + */ +void sphw_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod) +{ + struct sphw_mbox *func_to_func = NULL; + + if (mod >= SPHW_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + + clear_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + while (test_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->pf_recv_ppf_mbox_data[mod] = NULL; + func_to_func->pf_recv_ppf_mbox_cb[mod] = NULL; +} + +static int recv_vf_mbox_handler(struct sphw_mbox *func_to_func, + struct sphw_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + sphw_vf_mbox_cb cb; + int ret; + + if (recv_mbox->mod >= SPHW_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(SPHW_VF_MBOX_CB_RUNNING, &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(SPHW_VF_MBOX_CB_REG, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->hwdev, + func_to_func->vf_mbox_data[recv_mbox->mod], + recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not registered\n"); + ret = -EINVAL; + } + + clear_bit(SPHW_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int recv_pf_from_ppf_handler(struct sphw_mbox *func_to_func, + struct sphw_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + sphw_pf_recv_from_ppf_mbox_cb cb; + enum sphw_mod_type mod = recv_mbox->mod; + int ret; + + if (mod >= SPHW_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + mod); + return -EINVAL; + } + + set_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + cb = func_to_func->pf_recv_ppf_mbox_cb[mod]; + if (cb && test_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod])) { + ret = cb(func_to_func->hwdev, + func_to_func->pf_recv_ppf_mbox_data[mod], + recv_mbox->cmd, recv_mbox->msg, recv_mbox->msg_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF receive ppf mailbox callback is not registered\n"); + ret = -EINVAL; + } + + clear_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + return ret; +} + +static int recv_ppf_mbox_handler(struct sphw_mbox *func_to_func, + struct sphw_recv_mbox *recv_mbox, + u8 pf_id, void *buf_out, u16 *out_size) +{ + sphw_ppf_mbox_cb cb; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= SPHW_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(SPHW_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->ppf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(SPHW_PPF_MBOX_CB_REG, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->hwdev, + func_to_func->ppf_mbox_data[recv_mbox->mod], + pf_id, vf_id, recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod = %hhu\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(SPHW_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int recv_pf_from_vf_mbox_handler(struct sphw_mbox *func_to_func, + struct sphw_recv_mbox *recv_mbox, + u16 src_func_idx, void *buf_out, + u16 *out_size) +{ + sphw_pf_mbox_cb cb; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= SPHW_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(SPHW_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(SPHW_PF_MBOX_CB_REG, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) { + vf_id = src_func_idx - + sphw_glb_pf_vf_offset(func_to_func->hwdev); + ret = cb(func_to_func->hwdev, + func_to_func->pf_mbox_data[recv_mbox->mod], + vf_id, recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not registered\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(SPHW_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static void response_for_recv_func_mbox(struct sphw_mbox *func_to_func, + struct sphw_recv_mbox *recv_mbox, + int err, u16 out_size, u16 src_func_idx) +{ + struct mbox_msg_info msg_info = {0}; + + msg_info.msg_id = recv_mbox->msg_id; + if (err) + msg_info.status = SPHW_MBOX_PF_SEND_ERR; + + /* if not data need to response, set out_size to 1 */ + if (!out_size || err) + out_size = MBOX_MSG_NO_DATA_LEN; + + send_mbox_msg(func_to_func, recv_mbox->mod, recv_mbox->cmd, + recv_mbox->resp_buff, out_size, src_func_idx, + SPHW_MSG_RESPONSE, SPHW_MSG_NO_ACK, &msg_info); +} + +static void recv_func_mbox_handler(struct sphw_mbox *func_to_func, + struct sphw_recv_mbox *recv_mbox) +{ + struct sphw_hwdev *dev = func_to_func->hwdev; + void *buf_out = recv_mbox->resp_buff; + u16 src_func_idx = recv_mbox->src_func_idx; + u16 out_size = MBOX_MAX_BUF_SZ; + int err = 0; + + if (SPHW_IS_VF(dev)) { + err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, + &out_size); + } else { /* pf/ppf process */ + if (IS_PF_OR_PPF_SRC(dev, src_func_idx)) { + if (SPHW_IS_PPF(dev)) { + err = recv_ppf_mbox_handler(func_to_func, + recv_mbox, + (u8)src_func_idx, + buf_out, &out_size); + if (err) + goto out; + } else { + err = recv_pf_from_ppf_handler(func_to_func, + recv_mbox, + buf_out, + &out_size); + if (err) + goto out; + } + /* The source is neither PF nor PPF, so it is from VF */ + } else { + err = recv_pf_from_vf_mbox_handler(func_to_func, + recv_mbox, + src_func_idx, + buf_out, &out_size); + } + } + +out: + if (recv_mbox->ack_type == SPHW_MSG_ACK) + response_for_recv_func_mbox(func_to_func, recv_mbox, err, + out_size, src_func_idx); +} + +static struct sphw_recv_mbox *alloc_recv_mbox(void) +{ + struct sphw_recv_mbox *recv_msg = NULL; + + recv_msg = kzalloc(sizeof(*recv_msg), GFP_KERNEL); + if (!recv_msg) + return NULL; + + recv_msg->msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!recv_msg->msg) + goto alloc_msg_err; + + recv_msg->resp_buff = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!recv_msg->resp_buff) + goto alloc_resp_bff_err; + + return recv_msg; + +alloc_resp_bff_err: + kfree(recv_msg->msg); + +alloc_msg_err: + kfree(recv_msg); + + return NULL; +} + +static void free_recv_mbox(struct sphw_recv_mbox *recv_msg) +{ + kfree(recv_msg->resp_buff); + kfree(recv_msg->msg); + kfree(recv_msg); +} + +static void recv_func_mbox_work_handler(struct work_struct *work) +{ + struct sphw_mbox_work *mbox_work = + container_of(work, struct sphw_mbox_work, work); + + recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox); + + atomic_dec(&mbox_work->msg_ch->recv_msg_cnt); + + free_recv_mbox(mbox_work->recv_mbox); + kfree(mbox_work); +} + +static void resp_mbox_handler(struct sphw_mbox *func_to_func, + struct sphw_msg_desc *msg_desc) +{ + spin_lock(&func_to_func->mbox_lock); + if (msg_desc->msg_info.msg_id == func_to_func->send_msg_id && + func_to_func->event_flag == EVENT_START) + func_to_func->event_flag = EVENT_SUCCESS; + else + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", + func_to_func->send_msg_id, msg_desc->msg_info.msg_id, + msg_desc->msg_info.status); + spin_unlock(&func_to_func->mbox_lock); +} + +static void recv_mbox_msg_handler(struct sphw_mbox *func_to_func, + struct sphw_msg_desc *msg_desc, + u64 mbox_header) +{ + struct sphw_hwdev *hwdev = func_to_func->hwdev; + struct sphw_recv_mbox *recv_msg = NULL; + struct sphw_mbox_work *mbox_work = NULL; + struct sphw_msg_channel *msg_ch = + container_of(msg_desc, struct sphw_msg_channel, recv_msg); + u16 src_func_idx = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (atomic_read(&msg_ch->recv_msg_cnt) > + SPHW_MAX_MSG_CNT_TO_PROCESS) { + sdk_warn(hwdev->dev_hdl, "This function(%u) have %d message wait to process, can't add to work queue\n", + src_func_idx, atomic_read(&msg_ch->recv_msg_cnt)); + return; + } + + recv_msg = alloc_recv_mbox(); + if (!recv_msg) { + sdk_err(hwdev->dev_hdl, "Failed to alloc receive mbox message buffer\n"); + return; + } + recv_msg->msg_len = msg_desc->msg_len; + memcpy(recv_msg->msg, msg_desc->msg, recv_msg->msg_len); + recv_msg->msg_id = msg_desc->msg_info.msg_id; + recv_msg->mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); + recv_msg->cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->ack_type = SPHW_MSG_HEADER_GET(mbox_header, NO_ACK); + recv_msg->src_func_idx = src_func_idx; + + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); + if (!mbox_work) { + sdk_err(hwdev->dev_hdl, "Allocate mbox work memory failed.\n"); + free_recv_mbox(recv_msg); + return; + } + + atomic_inc(&msg_ch->recv_msg_cnt); + + mbox_work->func_to_func = func_to_func; + mbox_work->recv_mbox = recv_msg; + mbox_work->msg_ch = msg_ch; + + INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); + queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_MBOX), + func_to_func->workq, &mbox_work->work); +} + +static bool check_mbox_segment(struct sphw_mbox *func_to_func, + struct sphw_msg_desc *msg_desc, + u64 mbox_header) +{ + u8 seq_id, seg_len, msg_id, mod; + u16 src_func_idx, cmd; + + seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); + seg_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); + msg_id = SPHW_MSG_HEADER_GET(mbox_header, MSG_ID); + mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); + cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); + src_func_idx = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN || + (seq_id == SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN)) + goto seg_err; + + if (seq_id == 0) { + msg_desc->seq_id = seq_id; + msg_desc->msg_info.msg_id = msg_id; + msg_desc->mod = mod; + msg_desc->cmd = cmd; + } else { + if (seq_id != msg_desc->seq_id + 1 || + msg_id != msg_desc->msg_info.msg_id || + mod != msg_desc->mod || cmd != msg_desc->cmd) + goto seg_err; + + msg_desc->seq_id = seq_id; + } + + return true; + +seg_err: + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", + src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id, + msg_desc->mod, msg_desc->cmd); + sdk_err(func_to_func->hwdev->dev_hdl, + "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", + seg_len, seq_id, msg_id, mod, cmd); + + return false; +} + +static void recv_mbox_handler(struct sphw_mbox *func_to_func, + void *header, struct sphw_msg_desc *msg_desc) +{ + u64 mbox_header = *((u64 *)header); + void *mbox_body = MBOX_BODY_FROM_HDR(header); + u8 seq_id, seg_len; + int pos; + + if (!check_mbox_segment(func_to_func, msg_desc, mbox_header)) { + msg_desc->seq_id = SEQ_ID_MAX_VAL; + return; + } + + seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); + seg_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); + + pos = seq_id * MBOX_SEG_LEN; + memcpy((u8 *)msg_desc->msg + pos, mbox_body, seg_len); + + if (!SPHW_MSG_HEADER_GET(mbox_header, LAST)) + return; + + msg_desc->msg_len = SPHW_MSG_HEADER_GET(mbox_header, MSG_LEN); + msg_desc->msg_info.status = SPHW_MSG_HEADER_GET(mbox_header, STATUS); + + if (SPHW_MSG_HEADER_GET(mbox_header, DIRECTION) == + SPHW_MSG_RESPONSE) { + resp_mbox_handler(func_to_func, msg_desc); + return; + } + + recv_mbox_msg_handler(func_to_func, msg_desc, mbox_header); +} + +void sphw_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size) +{ + struct sphw_mbox *func_to_func = NULL; + struct sphw_msg_desc *msg_desc = NULL; + u64 mbox_header = *((u64 *)header); + u64 src, dir; + + func_to_func = ((struct sphw_hwdev *)handle)->func_to_func; + + dir = SPHW_MSG_HEADER_GET(mbox_header, DIRECTION); + src = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + msg_desc = get_mbox_msg_desc(func_to_func, dir, src); + if (!msg_desc) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox source function id: %u is invalid for current function\n", + (u32)src); + return; + } + + recv_mbox_handler(func_to_func, (u64 *)header, msg_desc); +} + +static int init_mbox_dma_queue(struct sphw_hwdev *hwdev, struct mbox_dma_queue *mq) +{ + u32 size; + + mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH; + mq->prod_idx = 0; + mq->cons_idx = 0; + + size = mq->depth * MBOX_MAX_BUF_SZ; + mq->dma_buff_vaddr = dma_alloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr, + GFP_KERNEL); + if (!mq->dma_buff_vaddr) { + sdk_err(hwdev->dev_hdl, "Failed to alloc dma_buffer\n"); + return -ENOMEM; + } + + return 0; +} + +static void deinit_mbox_dma_queue(struct sphw_hwdev *hwdev, struct mbox_dma_queue *mq) +{ + dma_free_coherent(hwdev->dev_hdl, mq->depth * MBOX_MAX_BUF_SZ, + mq->dma_buff_vaddr, mq->dma_buff_paddr); +} + +static int sphw_init_mbox_dma_queue(struct sphw_mbox *func_to_func) +{ + u32 val; + int err; + + err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + if (err) + return err; + + err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); + if (err) { + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + return err; + } + + val = sphw_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); + val = MBOX_MQ_CI_CLEAR(val, SYNC); + val = MBOX_MQ_CI_CLEAR(val, ASYNC); + sphw_hwif_write_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET, val); + + return 0; +} + +static void sphw_deinit_mbox_dma_queue(struct sphw_mbox *func_to_func) +{ + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); +} + +#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a +static u32 mbox_dma_msg_xor(u32 *data, u16 msg_len) +{ + u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL; + u16 dw_len = msg_len / sizeof(u32); + u16 i; + + for (i = 0; i < dw_len; i++) + xor ^= data[i]; + + return xor; +} + +#define MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1)) +#define IS_MSG_QUEUE_FULL(mq) (MQ_ID_MASK(mq, (mq)->prod_idx + 1) == \ + MQ_ID_MASK(mq, (mq)->cons_idx)) + +static int mbox_prepare_dma_entry(struct sphw_mbox *func_to_func, struct mbox_dma_queue *mq, + struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) +{ + u64 dma_addr, offset; + + if (IS_MSG_QUEUE_FULL(mq)) { + sdk_err(func_to_func->hwdev->dev_hdl, "Mbox sync message queue is busy, pi: %u, ci: %u\n", + mq->prod_idx, MQ_ID_MASK(mq, mq->cons_idx)); + return -EBUSY; + } + + /* copy data to DMA buffer */ + offset = mq->prod_idx * MBOX_MAX_BUF_SZ; + memcpy((u8 *)mq->dma_buff_vaddr + offset, msg, msg_len); + dma_addr = mq->dma_buff_paddr + offset; + dma_msg->dma_addr_high = upper_32_bits(dma_addr); + dma_msg->dma_addr_low = lower_32_bits(dma_addr); + dma_msg->msg_len = msg_len; + dma_msg->xor = mbox_dma_msg_xor(msg, msg_len); + + mq->prod_idx++; + mq->prod_idx = MQ_ID_MASK(mq, mq->prod_idx); + + return 0; +} + +static int mbox_prepare_dma_msg(struct sphw_mbox *func_to_func, enum sphw_msg_ack_type ack_type, + struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) +{ + struct mbox_dma_queue *mq = NULL; + u32 val; + + val = sphw_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); + if (ack_type == SPHW_MSG_ACK) { + mq = &func_to_func->sync_msg_queue; + mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC); + } else { + mq = &func_to_func->async_msg_queue; + mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC); + } + + return mbox_prepare_dma_entry(func_to_func, mq, dma_msg, msg, msg_len); +} + +static void clear_mbox_status(struct sphw_send_mbox *mbox) +{ + *mbox->wb_status = 0; + + /* clear mailbox write back status */ + wmb(); +} + +static void mbox_copy_header(struct sphw_hwdev *hwdev, + struct sphw_send_mbox *mbox, u64 *header) +{ + u32 *data = (u32 *)header; + u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); + + for (i = 0; i < idx_max; i++) + __raw_writel(cpu_to_be32(*(data + i)), mbox->data + i * sizeof(u32)); +} + +static void mbox_copy_send_data(struct sphw_hwdev *hwdev, struct sphw_send_mbox *mbox, void *seg, + u16 seg_len) +{ + u32 *data = seg; + u32 data_len, chk_sz = sizeof(u32); + u32 i, idx_max; + + data_len = seg_len; + idx_max = ALIGN(data_len, chk_sz) / chk_sz; + + for (i = 0; i < idx_max; i++) + __raw_writel(cpu_to_be32(*(data + i)), + mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); +} + +static void write_mbox_msg_attr(struct sphw_mbox *func_to_func, + u16 dst_func, u16 dst_aeqn, u16 seg_len) +{ + u32 mbox_int, mbox_ctrl; + + /* for VF to PF's message, dest func id will self-learning by HW */ + if (SPHW_IS_VF(func_to_func->hwdev) && + dst_func != SPHW_MGMT_SRC_ID) + dst_func = 0; /* the destination is the VF's PF */ + + mbox_int = SPHW_MBOX_INT_SET(dst_aeqn, DST_AEQN) | + SPHW_MBOX_INT_SET(0, SRC_RESP_AEQN) | + SPHW_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | + SPHW_MBOX_INT_SET(ALIGN(seg_len + MBOX_HEADER_SZ, + MBOX_SEG_LEN_ALIGN) >> 2, TX_SIZE) | + SPHW_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | + SPHW_MBOX_INT_SET(WRITE_BACK, WB_EN); + + sphw_hwif_write_reg(func_to_func->hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, + mbox_int); + + wmb(); /* writing the mbox int attributes */ + mbox_ctrl = SPHW_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS); + + mbox_ctrl |= SPHW_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); + + mbox_ctrl |= SPHW_MBOX_CTRL_SET(dst_func, DST_FUNC); + + sphw_hwif_write_reg(func_to_func->hwdev->hwif, + SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); +} + +void dump_mbox_reg(struct sphw_hwdev *hwdev) +{ + u32 val; + + val = sphw_hwif_read_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF); + sdk_err(hwdev->dev_hdl, "Mailbox control reg: 0x%x\n", val); + val = sphw_hwif_read_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); + sdk_err(hwdev->dev_hdl, "Mailbox interrupt offset: 0x%x\n", val); +} + +static u16 get_mbox_status(struct sphw_send_mbox *mbox) +{ + /* write back is 16B, but only use first 4B */ + u64 wb_val = be64_to_cpu(*mbox->wb_status); + + rmb(); /* verify reading before check */ + + return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); +} + +enum sphw_wait_return check_mbox_wb_status(void *priv_data) +{ + struct sphw_mbox *func_to_func = priv_data; + u16 wb_status; + + if (MBOX_MSG_CHANNEL_STOP(func_to_func)) + return WAIT_PROCESS_ERR; + + wb_status = get_mbox_status(&func_to_func->send_mbox); + + return MBOX_STATUS_FINISHED(wb_status) ? + WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; +} + +static int send_mbox_seg(struct sphw_mbox *func_to_func, u64 header, + u16 dst_func, void *seg, u16 seg_len, void *msg_info) +{ + struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; + struct sphw_hwdev *hwdev = func_to_func->hwdev; + u8 num_aeqs = hwdev->hwif->attr.num_aeqs; + u16 dst_aeqn, wb_status = 0, errcode; + u16 seq_dir = SPHW_MSG_HEADER_GET(header, DIRECTION); + int err; + + /* mbox to mgmt cpu, hardware don't care dst aeq id*/ + if (num_aeqs > SPHW_MBOX_RSP_MSG_AEQ) + dst_aeqn = (seq_dir == SPHW_MSG_DIRECT_SEND) ? + SPHW_ASYNC_MSG_AEQ : SPHW_MBOX_RSP_MSG_AEQ; + else + dst_aeqn = 0; + + clear_mbox_status(send_mbox); + + mbox_copy_header(hwdev, send_mbox, &header); + + mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); + + write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len); + + wmb(); /* writing the mbox msg attributes */ + + err = sphw_wait_for_timeout(func_to_func, check_mbox_wb_status, + MBOX_MSG_POLLING_TIMEOUT, USEC_PER_MSEC); + wb_status = get_mbox_status(send_mbox); + if (err) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status: 0x%x\n", + wb_status); + dump_mbox_reg(hwdev); + return -ETIMEDOUT; + } + + if (!MBOX_STATUS_SUCCESS(wb_status)) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %u error, wb status: 0x%x\n", + dst_func, wb_status); + errcode = MBOX_STATUS_ERRCODE(wb_status); + return errcode ? errcode : -EFAULT; + } + + return 0; +} + +static int send_mbox_msg(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dst_func, + enum sphw_msg_direction_type direction, + enum sphw_msg_ack_type ack_type, + struct mbox_msg_info *msg_info) +{ + struct sphw_hwdev *hwdev = func_to_func->hwdev; + struct mbox_dma_msg dma_msg = {0}; + enum sphw_data_type data_type = SPHW_DATA_INLINE; + int err = 0; + u32 seq_id = 0; + u16 seg_len = MBOX_SEG_LEN; + u16 rsp_aeq_id, left; + u8 *msg_seg = NULL; + u64 header = 0; + + if (hwdev->poll || hwdev->hwif->attr.num_aeqs >= 2) + rsp_aeq_id = SPHW_MBOX_RSP_MSG_AEQ; + else + rsp_aeq_id = 0; + + mutex_lock(&func_to_func->msg_send_lock); + + if (IS_DMA_MBX_MSG(dst_func)) { + err = mbox_prepare_dma_msg(func_to_func, ack_type, &dma_msg, msg, msg_len); + if (err) + goto send_err; + + msg = &dma_msg; + msg_len = sizeof(dma_msg); + data_type = SPHW_DATA_DMA; + } + + msg_seg = (u8 *)msg; + left = msg_len; + + header = SPHW_MSG_HEADER_SET(msg_len, MSG_LEN) | + SPHW_MSG_HEADER_SET(mod, MODULE) | + SPHW_MSG_HEADER_SET(seg_len, SEG_LEN) | + SPHW_MSG_HEADER_SET(ack_type, NO_ACK) | + SPHW_MSG_HEADER_SET(data_type, DATA_TYPE) | + SPHW_MSG_HEADER_SET(SEQ_ID_START_VAL, SEQID) | + SPHW_MSG_HEADER_SET(NOT_LAST_SEGMENT, LAST) | + SPHW_MSG_HEADER_SET(direction, DIRECTION) | + SPHW_MSG_HEADER_SET(cmd, CMD) | + /* The vf's offset to it's associated pf */ + SPHW_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) | + SPHW_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) | + SPHW_MSG_HEADER_SET(SPHW_MSG_FROM_MBOX, SOURCE) | + SPHW_MSG_HEADER_SET(!!msg_info->status, STATUS) | + SPHW_MSG_HEADER_SET(sphw_global_func_id(hwdev), SRC_GLB_FUNC_IDX); + + while (!(SPHW_MSG_HEADER_GET(header, LAST))) { + if (left <= MBOX_SEG_LEN) { + header &= ~MBOX_SEGLEN_MASK; + header |= SPHW_MSG_HEADER_SET(left, SEG_LEN); + header |= SPHW_MSG_HEADER_SET(LAST_SEGMENT, LAST); + + seg_len = left; + } + + err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, + seg_len, msg_info); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to send mbox seg, seq_id=0x%llx\n", + SPHW_MSG_HEADER_GET(header, SEQID)); + goto send_err; + } + + left -= MBOX_SEG_LEN; + msg_seg += MBOX_SEG_LEN; + + seq_id++; + header &= ~(SPHW_MSG_HEADER_SET(SPHW_MSG_HEADER_SEQID_MASK, SEQID)); + header |= SPHW_MSG_HEADER_SET(seq_id, SEQID); + } + +send_err: + mutex_unlock(&func_to_func->msg_send_lock); + + return err; +} + +static void set_mbox_to_func_event(struct sphw_mbox *func_to_func, + enum mbox_event_state event_flag) +{ + spin_lock(&func_to_func->mbox_lock); + func_to_func->event_flag = event_flag; + spin_unlock(&func_to_func->mbox_lock); +} + +static enum sphw_wait_return check_mbox_msg_finish(void *priv_data) +{ + struct sphw_mbox *func_to_func = priv_data; + + if (MBOX_MSG_CHANNEL_STOP(func_to_func)) + return WAIT_PROCESS_ERR; + + return (func_to_func->event_flag == EVENT_SUCCESS) ? + WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; +} + +static int wait_mbox_msg_completion(struct sphw_mbox *func_to_func, + u32 timeout) +{ + int err; + + timeout = timeout ? timeout : SPHW_MBOX_COMP_TIME; + err = sphw_wait_for_timeout(func_to_func, check_mbox_msg_finish, + timeout, USEC_PER_MSEC); + if (err) { + set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); + return -ETIMEDOUT; + } + + set_mbox_to_func_event(func_to_func, EVENT_END); + + return 0; +} + +#define TRY_MBOX_LOCK_SLEPP 1000 +static int send_mbox_msg_lock(struct sphw_mbox *func_to_func, u16 channel) +{ + if (!func_to_func->lock_channel_en) { + mutex_lock(&func_to_func->mbox_send_lock); + return 0; + } + + while (!test_bit(channel, &func_to_func->channel_stop)) { + if (mutex_trylock(&func_to_func->mbox_send_lock)) + return 0; + + usleep_range(TRY_MBOX_LOCK_SLEPP - 1, TRY_MBOX_LOCK_SLEPP); + } + + return -EAGAIN; +} + +static void send_mbox_msg_unlock(struct sphw_mbox *func_to_func) +{ + mutex_unlock(&func_to_func->mbox_send_lock); +} + +int sphw_mbox_to_func(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, u16 dst_func, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + /* use mbox_resp to hole data which responsed from other function */ + struct sphw_msg_desc *msg_desc = NULL; + struct mbox_msg_info msg_info = {0}; + int err; + + if (!func_to_func->hwdev->chip_present_flag) + return -EPERM; + + /* expect response message */ + msg_desc = get_mbox_msg_desc(func_to_func, SPHW_MSG_RESPONSE, + dst_func); + if (!msg_desc) + return -EFAULT; + + err = send_mbox_msg_lock(func_to_func, channel); + if (err) + return err; + + func_to_func->cur_msg_channel = channel; + msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); + + set_mbox_to_func_event(func_to_func, EVENT_START); + + err = send_mbox_msg(func_to_func, mod, cmd, buf_in, in_size, dst_func, + SPHW_MSG_DIRECT_SEND, SPHW_MSG_ACK, &msg_info); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n", + mod, cmd, msg_info.msg_id, err); + set_mbox_to_func_event(func_to_func, EVENT_FAIL); + goto send_err; + } + + if (wait_mbox_msg_completion(func_to_func, timeout)) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id); + sphw_dump_aeq_info(func_to_func->hwdev); + err = -ETIMEDOUT; + goto send_err; + } + + if (mod != msg_desc->mod || cmd != msg_desc->cmd) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", + msg_desc->mod, msg_desc->cmd, mod, cmd); + err = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_info.status) { + err = msg_desc->msg_info.status; + goto send_err; + } + + if (buf_out && out_size) { + if (*out_size < msg_desc->msg_len) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid response mbox message length: %u for mod %d cmd %u, should less than: %u\n", + msg_desc->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_len) + memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); + + *out_size = msg_desc->msg_len; + } + +send_err: + send_mbox_msg_unlock(func_to_func); + + return err; +} + +static int mbox_func_params_valid(struct sphw_mbox *func_to_func, + void *buf_in, u16 in_size, u16 channel) +{ + if (!buf_in || !in_size) + return -EINVAL; + + if (in_size > SPHW_MBOX_DATA_SIZE) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox msg len %u exceed limit: [1, %u]\n", + in_size, SPHW_MBOX_DATA_SIZE); + return -EINVAL; + } + + if (channel >= SPHW_CHANNEL_MAX) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + return 0; +} + +int sphw_mbox_to_func_no_ack(struct sphw_hwdev *hwdev, u16 func_idx, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + struct mbox_msg_info msg_info = {0}; + int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size, + channel); + + if (err) + return err; + + err = send_mbox_msg_lock(hwdev->func_to_func, channel); + if (err) + return err; + + err = send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, + func_idx, SPHW_MSG_DIRECT_SEND, + SPHW_MSG_NO_ACK, &msg_info); + if (err) + sdk_err(hwdev->dev_hdl, "Send mailbox no ack failed\n"); + + send_mbox_msg_unlock(hwdev->func_to_func); + + return err; +} + +int sphw_send_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + struct sphw_mbox *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); + + if (err) + return err; + + return sphw_mbox_to_func(func_to_func, mod, cmd, SPHW_MGMT_SRC_ID, + buf_in, in_size, buf_out, out_size, timeout, channel); +} + +void sphw_response_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id) +{ + struct mbox_msg_info msg_info; + + msg_info.msg_id = (u8)msg_id; + msg_info.status = 0; + + send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, + SPHW_MGMT_SRC_ID, SPHW_MSG_RESPONSE, + SPHW_MSG_NO_ACK, &msg_info); +} + +int sphw_send_mbox_to_mgmt_no_ack(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + struct sphw_mbox *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size, + channel); + + if (err) + return err; + + return sphw_mbox_to_func_no_ack(hwdev, SPHW_MGMT_SRC_ID, mod, cmd, + buf_in, in_size, channel); +} + +int sphw_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct sphw_hwdev *dev = hwdev; + u16 dst_ppf_func; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, + channel); + if (err) + return err; + + if (!SPHW_IS_PPF(dev)) { + sdk_err(dev->dev_hdl, "Params error, only ppf support send mbox to ppf. func_type: %d\n", + sphw_func_type(dev)); + return -EINVAL; + } + + if (host_id >= SPHW_MAX_HOST_NUM(dev) || + host_id == SPHW_PCI_INTF_IDX(dev->hwif)) { + sdk_err(dev->dev_hdl, "Params error, host id: %u\n", host_id); + return -EINVAL; + } + + dst_ppf_func = sphw_host_ppf_idx(dev, host_id); + if (dst_ppf_func >= SPHW_MAX_PF_NUM(dev)) { + sdk_err(dev->dev_hdl, "Dest host(%u) have not elect ppf(0x%x).\n", + host_id, dst_ppf_func); + return -EINVAL; + } + + return sphw_mbox_to_func(dev->func_to_func, mod, cmd, dst_ppf_func, buf_in, in_size, + buf_out, out_size, timeout, channel); +} + +int sphw_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct sphw_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, + channel); + if (err) + return err; + + if (!SPHW_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Params error, func_type: %d\n", + sphw_func_type(dev)); + return -EINVAL; + } + + return sphw_mbox_to_func(dev->func_to_func, mod, cmd, sphw_pf_id_of_vf(dev), buf_in, + in_size, buf_out, out_size, timeout, channel); +} + +int sphw_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + struct sphw_mbox *func_to_func = NULL; + int err = 0; + u16 dst_func_idx; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; + err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); + if (err) + return err; + + if (SPHW_IS_VF((struct sphw_hwdev *)hwdev)) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + sphw_func_type(hwdev)); + return -EINVAL; + } + + if (!vf_id) { + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "VF id(%u) error!\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_idx = sphw_glb_pf_vf_offset(hwdev) + vf_id; + + return sphw_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, + in_size, buf_out, out_size, timeout, channel); +} + +int sphw_mbox_set_channel_status(struct sphw_hwdev *hwdev, u16 channel, bool enable) +{ + if (channel >= SPHW_CHANNEL_MAX) { + sdk_err(hwdev->dev_hdl, "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + if (enable) + clear_bit(channel, &hwdev->func_to_func->channel_stop); + else + set_bit(channel, &hwdev->func_to_func->channel_stop); + + sdk_info(hwdev->dev_hdl, "%s mbox channel 0x%x\n", + enable ? "Enable" : "Disable", channel); + + return 0; +} + +void sphw_mbox_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable) +{ + hwdev->func_to_func->lock_channel_en = enable; + + sdk_info(hwdev->dev_hdl, "%s mbox channel lock\n", + enable ? "Enable" : "Disable"); +} + +static int alloc_mbox_msg_channel(struct sphw_msg_channel *msg_ch) +{ + msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!msg_ch->resp_msg.msg) + return -ENOMEM; + + msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!msg_ch->recv_msg.msg) { + kfree(msg_ch->resp_msg.msg); + return -ENOMEM; + } + + msg_ch->resp_msg.seq_id = SEQ_ID_MAX_VAL; + msg_ch->recv_msg.seq_id = SEQ_ID_MAX_VAL; + atomic_set(&msg_ch->recv_msg_cnt, 0); + + return 0; +} + +static void free_mbox_msg_channel(struct sphw_msg_channel *msg_ch) +{ + kfree(msg_ch->recv_msg.msg); + kfree(msg_ch->resp_msg.msg); +} + +static int init_mgmt_msg_channel(struct sphw_mbox *func_to_func) +{ + int err; + + err = alloc_mbox_msg_channel(&func_to_func->mgmt_msg); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc mgmt message channel\n"); + return err; + } + + err = sphw_init_mbox_dma_queue(func_to_func); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to init mbox dma queue\n"); + free_mbox_msg_channel(&func_to_func->mgmt_msg); + } + + return err; +} + +static void deinit_mgmt_msg_channel(struct sphw_mbox *func_to_func) +{ + sphw_deinit_mbox_dma_queue(func_to_func); + free_mbox_msg_channel(&func_to_func->mgmt_msg); +} + +int sphw_mbox_init_host_msg_channel(struct sphw_hwdev *hwdev) +{ + struct sphw_mbox *func_to_func = hwdev->func_to_func; + u8 host_num = SPHW_MAX_HOST_NUM(hwdev); + int i, host_id, err; + + if (host_num == 0) + return 0; + + func_to_func->host_msg = kcalloc(host_num, + sizeof(*func_to_func->host_msg), + GFP_KERNEL); + if (!func_to_func->host_msg) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc host message array\n"); + return -ENOMEM; + } + + for (host_id = 0; host_id < host_num; host_id++) { + err = alloc_mbox_msg_channel(&func_to_func->host_msg[host_id]); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Failed to alloc host %d message channel\n", + host_id); + goto alloc_msg_ch_err; + } + } + + func_to_func->support_h2h_msg = true; + + return 0; + +alloc_msg_ch_err: + for (i = 0; i < host_id; i++) + free_mbox_msg_channel(&func_to_func->host_msg[i]); + + kfree(func_to_func->host_msg); + func_to_func->host_msg = NULL; + + return -ENOMEM; +} + +static void deinit_host_msg_channel(struct sphw_mbox *func_to_func) +{ + int i; + + if (!func_to_func->host_msg) + return; + + for (i = 0; i < SPHW_MAX_HOST_NUM(func_to_func->hwdev); i++) + free_mbox_msg_channel(&func_to_func->host_msg[i]); + + kfree(func_to_func->host_msg); + func_to_func->host_msg = NULL; +} + +int sphw_init_func_mbox_msg_channel(void *hwdev, u16 num_func) +{ + struct sphw_hwdev *dev = hwdev; + struct sphw_mbox *func_to_func = NULL; + u16 func_id, i; + int err; + + if (!hwdev || !num_func || num_func > SPHW_MAX_FUNCTIONS) + return -EINVAL; + + func_to_func = dev->func_to_func; + if (func_to_func->func_msg) + return (func_to_func->num_func_msg == num_func) ? 0 : -EFAULT; + + func_to_func->func_msg = + kcalloc(num_func, sizeof(*func_to_func->func_msg), GFP_KERNEL); + if (!func_to_func->func_msg) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc func message array\n"); + return -ENOMEM; + } + + for (func_id = 0; func_id < num_func; func_id++) { + err = alloc_mbox_msg_channel(&func_to_func->func_msg[func_id]); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Failed to alloc func %hu message channel\n", + func_id); + goto alloc_msg_ch_err; + } + } + + func_to_func->num_func_msg = num_func; + + return 0; + +alloc_msg_ch_err: + for (i = 0; i < func_id; i++) + free_mbox_msg_channel(&func_to_func->func_msg[i]); + + kfree(func_to_func->func_msg); + func_to_func->func_msg = NULL; + + return -ENOMEM; +} + +void sphw_deinit_func_mbox_msg_channel(struct sphw_hwdev *hwdev) +{ + struct sphw_mbox *func_to_func = hwdev->func_to_func; + u16 i; + + if (!func_to_func->func_msg) + return; + + for (i = 0; i < func_to_func->num_func_msg; i++) + free_mbox_msg_channel(&func_to_func->func_msg[i]); + + kfree(func_to_func->func_msg); + func_to_func->func_msg = NULL; +} + +struct sphw_msg_desc *get_mbox_msg_desc(struct sphw_mbox *func_to_func, u64 dir, u64 src_func_id) +{ + struct sphw_hwdev *hwdev = func_to_func->hwdev; + struct sphw_msg_channel *msg_ch = NULL; + u16 id; + + if (src_func_id == SPHW_MGMT_SRC_ID) { + msg_ch = &func_to_func->mgmt_msg; + } else if (SPHW_IS_VF(hwdev)) { + /* message from pf */ + msg_ch = func_to_func->func_msg; + if (src_func_id != sphw_pf_id_of_vf(hwdev) || !msg_ch) + return NULL; + } else if (src_func_id > sphw_glb_pf_vf_offset(hwdev)) { + /* message from vf */ + id = (u16)(src_func_id - 1U) - sphw_glb_pf_vf_offset(hwdev); + if (id >= func_to_func->num_func_msg) + return NULL; + + msg_ch = &func_to_func->func_msg[id]; + } else { + /* message from other host's ppf */ + if (!func_to_func->support_h2h_msg) + return NULL; + + for (id = 0; id < SPHW_MAX_HOST_NUM(hwdev); id++) { + if (src_func_id == sphw_host_ppf_idx(hwdev, (u8)id)) + break; + } + + if (id == SPHW_MAX_HOST_NUM(hwdev) || !func_to_func->host_msg) + return NULL; + + msg_ch = &func_to_func->host_msg[id]; + } + + return (dir == SPHW_MSG_DIRECT_SEND) ? + &msg_ch->recv_msg : &msg_ch->resp_msg; +} + +static void prepare_send_mbox(struct sphw_mbox *func_to_func) +{ + struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; + + send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); +} + +static int alloc_mbox_wb_status(struct sphw_mbox *func_to_func) +{ + struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; + struct sphw_hwdev *hwdev = func_to_func->hwdev; + u32 addr_h, addr_l; + + send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, + &send_mbox->wb_paddr, GFP_KERNEL); + if (!send_mbox->wb_vaddr) + return -ENOMEM; + + send_mbox->wb_status = send_mbox->wb_vaddr; + + addr_h = upper_32_bits(send_mbox->wb_paddr); + addr_l = lower_32_bits(send_mbox->wb_paddr); + + sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF, addr_h); + sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF, addr_l); + + return 0; +} + +static void free_mbox_wb_status(struct sphw_mbox *func_to_func) +{ + struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; + struct sphw_hwdev *hwdev = func_to_func->hwdev; + + sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF, 0); + sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF, 0); + + dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, + send_mbox->wb_vaddr, send_mbox->wb_paddr); +} + +int sphw_func_to_func_init(struct sphw_hwdev *hwdev) +{ + struct sphw_mbox *func_to_func; + int err; + + func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); + if (!func_to_func) + return -ENOMEM; + + hwdev->func_to_func = func_to_func; + func_to_func->hwdev = hwdev; + mutex_init(&func_to_func->mbox_send_lock); + mutex_init(&func_to_func->msg_send_lock); + spin_lock_init(&func_to_func->mbox_lock); + func_to_func->workq = + create_singlethread_workqueue(SPHW_MBOX_WQ_NAME); + if (!func_to_func->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n"); + err = -ENOMEM; + goto create_mbox_workq_err; + } + + err = init_mgmt_msg_channel(func_to_func); + if (err) + goto init_mgmt_msg_ch_err; + + if (SPHW_IS_VF(hwdev)) { + /* VF to PF mbox message channel */ + err = sphw_init_func_mbox_msg_channel(hwdev, 1); + if (err) + goto init_func_msg_ch_err; + } + + err = alloc_mbox_wb_status(func_to_func); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n"); + goto alloc_wb_status_err; + } + + prepare_send_mbox(func_to_func); + + return 0; + +alloc_wb_status_err: + if (SPHW_IS_VF(hwdev)) + sphw_deinit_func_mbox_msg_channel(hwdev); + +init_func_msg_ch_err: + deinit_mgmt_msg_channel(func_to_func); + +init_mgmt_msg_ch_err: + destroy_workqueue(func_to_func->workq); + +create_mbox_workq_err: + kfree(func_to_func); + + return err; +} + +void sphw_func_to_func_free(struct sphw_hwdev *hwdev) +{ + struct sphw_mbox *func_to_func = hwdev->func_to_func; + + /* destroy workqueue before free related mbox resources in case of + * illegal resource access + */ + destroy_workqueue(func_to_func->workq); + + free_mbox_wb_status(func_to_func); + if (SPHW_IS_PPF(hwdev)) + deinit_host_msg_channel(func_to_func); + sphw_deinit_func_mbox_msg_channel(hwdev); + deinit_mgmt_msg_channel(func_to_func); + + kfree(func_to_func); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h new file mode 100644 index 000000000000..346df1427fa1 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_MBOX_H +#define SPHW_MBOX_H + +#include "sphw_crm.h" + +#define SPHW_MBOX_PF_SEND_ERR 0x1 + +#define SPHW_MGMT_SRC_ID 0x1FFF +#define SPHW_MAX_FUNCTIONS 4096 + +/* message header define */ +#define SPHW_MSG_HEADER_SRC_GLB_FUNC_IDX_SHIFT 0 +#define SPHW_MSG_HEADER_STATUS_SHIFT 13 +#define SPHW_MSG_HEADER_SOURCE_SHIFT 15 +#define SPHW_MSG_HEADER_AEQ_ID_SHIFT 16 +#define SPHW_MSG_HEADER_MSG_ID_SHIFT 18 +#define SPHW_MSG_HEADER_CMD_SHIFT 22 + +#define SPHW_MSG_HEADER_MSG_LEN_SHIFT 32 +#define SPHW_MSG_HEADER_MODULE_SHIFT 43 +#define SPHW_MSG_HEADER_SEG_LEN_SHIFT 48 +#define SPHW_MSG_HEADER_NO_ACK_SHIFT 54 +#define SPHW_MSG_HEADER_DATA_TYPE_SHIFT 55 +#define SPHW_MSG_HEADER_SEQID_SHIFT 56 +#define SPHW_MSG_HEADER_LAST_SHIFT 62 +#define SPHW_MSG_HEADER_DIRECTION_SHIFT 63 + +#define SPHW_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK 0x1FFF +#define SPHW_MSG_HEADER_STATUS_MASK 0x1 +#define SPHW_MSG_HEADER_SOURCE_MASK 0x1 +#define SPHW_MSG_HEADER_AEQ_ID_MASK 0x3 +#define SPHW_MSG_HEADER_MSG_ID_MASK 0xF +#define SPHW_MSG_HEADER_CMD_MASK 0x3FF + +#define SPHW_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define SPHW_MSG_HEADER_MODULE_MASK 0x1F +#define SPHW_MSG_HEADER_SEG_LEN_MASK 0x3F +#define SPHW_MSG_HEADER_NO_ACK_MASK 0x1 +#define SPHW_MSG_HEADER_DATA_TYPE_MASK 0x1 +#define SPHW_MSG_HEADER_SEQID_MASK 0x3F +#define SPHW_MSG_HEADER_LAST_MASK 0x1 +#define SPHW_MSG_HEADER_DIRECTION_MASK 0x1 + +#define SPHW_MSG_HEADER_GET(val, field) \ + (((val) >> SPHW_MSG_HEADER_##field##_SHIFT) & \ + SPHW_MSG_HEADER_##field##_MASK) +#define SPHW_MSG_HEADER_SET(val, field) \ + ((u64)(((u64)(val)) & SPHW_MSG_HEADER_##field##_MASK) << \ + SPHW_MSG_HEADER_##field##_SHIFT) + +#define IS_DMA_MBX_MSG(dst_func) ((dst_func) == SPHW_MGMT_SRC_ID) + +enum sphw_msg_direction_type { + SPHW_MSG_DIRECT_SEND = 0, + SPHW_MSG_RESPONSE = 1, +}; + +enum sphw_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum sphw_msg_ack_type { + SPHW_MSG_ACK, + SPHW_MSG_NO_ACK, +}; + +enum sphw_data_type { + SPHW_DATA_INLINE = 0, + SPHW_DATA_DMA = 1, +}; + +enum sphw_msg_src_type { + SPHW_MSG_FROM_MGMT = 0, + SPHW_MSG_FROM_MBOX = 1, +}; + +enum sphw_msg_aeq_type { + SPHW_ASYNC_MSG_AEQ = 0, + /* indicate dest func or mgmt cpu which aeq to response mbox message */ + SPHW_MBOX_RSP_MSG_AEQ = 1, + /* indicate mgmt cpu which aeq to response api cmd message */ + SPHW_MGMT_RSP_MSG_AEQ = 2, +}; + +#define SPHW_MBOX_WQ_NAME "sphw_mbox" + +enum sphw_mbox_seg_errcode { + MBOX_ERRCODE_NO_ERRORS = 0, + /* VF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, + /* PPF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, + /* PF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, + /* The mailbox data size is set to all zero */ + MBOX_ERRCODE_ZERO_DATA_SIZE = 0x400, + /* The sender function attribute has not been learned by hardware */ + MBOX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, + /* The receiver function attr has not been learned by hardware */ + MBOX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, +}; + +struct mbox_msg_info { + u8 msg_id; + u8 status; /* can only use 1 bit */ +}; + +struct sphw_msg_desc { + void *msg; + u16 msg_len; + u8 seq_id; + u8 mod; + u16 cmd; + struct mbox_msg_info msg_info; +}; + +struct sphw_msg_channel { + struct sphw_msg_desc resp_msg; + struct sphw_msg_desc recv_msg; + + atomic_t recv_msg_cnt; +}; + +/* Receive other functions mbox message */ +struct sphw_recv_mbox { + void *msg; + u16 msg_len; + u8 msg_id; + u8 mod; + u16 cmd; + u16 src_func_idx; + enum sphw_msg_ack_type ack_type; + void *resp_buff; +}; + +struct sphw_send_mbox { + u8 *data; + + u64 *wb_status; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +enum mbox_event_state { + EVENT_START = 0, + EVENT_FAIL, + EVENT_SUCCESS, + EVENT_TIMEOUT, + EVENT_END, +}; + +enum sphw_mbox_cb_state { + SPHW_VF_MBOX_CB_REG = 0, + SPHW_VF_MBOX_CB_RUNNING, + SPHW_PF_MBOX_CB_REG, + SPHW_PF_MBOX_CB_RUNNING, + SPHW_PPF_MBOX_CB_REG, + SPHW_PPF_MBOX_CB_RUNNING, + SPHW_PPF_TO_PF_MBOX_CB_REG, + SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, +}; + +struct mbox_dma_msg { + u32 xor; + u32 dma_addr_high; + u32 dma_addr_low; + u32 msg_len; + u64 rsvd; +}; + +struct mbox_dma_queue { + void *dma_buff_vaddr; + dma_addr_t dma_buff_paddr; + + u16 depth; + u16 prod_idx; + u16 cons_idx; +}; + +struct sphw_mbox { + struct sphw_hwdev *hwdev; + + bool lock_channel_en; + unsigned long channel_stop; + u16 cur_msg_channel; + + /* lock for send mbox message and ack message */ + struct mutex mbox_send_lock; + /* lock for send mbox message */ + struct mutex msg_send_lock; + struct sphw_send_mbox send_mbox; + + struct mbox_dma_queue sync_msg_queue; + struct mbox_dma_queue async_msg_queue; + + struct workqueue_struct *workq; + + struct sphw_msg_channel mgmt_msg; /* driver and MGMT CPU */ + struct sphw_msg_channel *host_msg; /* PPF message between hosts */ + struct sphw_msg_channel *func_msg; /* PF to VF or VF to PF */ + u16 num_func_msg; + bool support_h2h_msg; /* host to host */ + + /* vf receive pf/ppf callback */ + sphw_vf_mbox_cb vf_mbox_cb[SPHW_MOD_MAX]; + void *vf_mbox_data[SPHW_MOD_MAX]; + /* pf/ppf receive vf callback */ + sphw_pf_mbox_cb pf_mbox_cb[SPHW_MOD_MAX]; + void *pf_mbox_data[SPHW_MOD_MAX]; + /* ppf receive pf/ppf callback */ + sphw_ppf_mbox_cb ppf_mbox_cb[SPHW_MOD_MAX]; + void *ppf_mbox_data[SPHW_MOD_MAX]; + /* pf receive ppf callback */ + sphw_pf_recv_from_ppf_mbox_cb pf_recv_ppf_mbox_cb[SPHW_MOD_MAX]; + void *pf_recv_ppf_mbox_data[SPHW_MOD_MAX]; + unsigned long ppf_to_pf_mbox_cb_state[SPHW_MOD_MAX]; + unsigned long ppf_mbox_cb_state[SPHW_MOD_MAX]; + unsigned long pf_mbox_cb_state[SPHW_MOD_MAX]; + unsigned long vf_mbox_cb_state[SPHW_MOD_MAX]; + + u8 send_msg_id; + enum mbox_event_state event_flag; + /* lock for mbox event flag */ + spinlock_t mbox_lock; +}; + +struct sphw_mbox_work { + struct work_struct work; + struct sphw_mbox *func_to_func; + struct sphw_recv_mbox *recv_mbox; + struct sphw_msg_channel *msg_ch; +}; + +struct vf_cmd_check_handle { + u16 cmd; + bool (*check_cmd)(struct sphw_hwdev *hwdev, u16 src_func_idx, + void *buf_in, u16 in_size); +}; + +void sphw_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size); + +int sphw_func_to_func_init(struct sphw_hwdev *hwdev); + +void sphw_func_to_func_free(struct sphw_hwdev *hwdev); + +int sphw_send_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel); + +void sphw_response_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id); + +int sphw_send_mbox_to_mgmt_no_ack(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel); +int sphw_mbox_to_func(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, + u16 dst_func, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +int sphw_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel); + +int sphw_mbox_init_host_msg_channel(struct sphw_hwdev *hwdev); + +int sphw_mbox_set_channel_status(struct sphw_hwdev *hwdev, u16 channel, bool enable); + +void sphw_mbox_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable); + +#endif + diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c new file mode 100644 index 000000000000..f885ca8ce761 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c @@ -0,0 +1,1382 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/completion.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/delay.h> + +#include "sphw_crm.h" +#include "sphw_hw.h" +#include "sphw_common.h" +#include "sphw_comm_cmd.h" +#include "sphw_hwdev.h" +#include "sphw_eqs.h" +#include "sphw_mbox.h" +#include "sphw_api_cmd.h" +#include "sphw_prof_adap.h" +#include "sphw_mgmt.h" +#include "sphw_csr.h" + +#define SPHW_MSG_TO_MGMT_MAX_LEN 2016 + +#define SPHW_API_CHAIN_AEQ_ID 2 +#define MAX_PF_MGMT_BUF_SIZE 2048UL +#define SEGMENT_LEN 48 +#define ASYNC_MSG_FLAG 0x8 +#define MGMT_MSG_MAX_SEQ_ID (ALIGN(SPHW_MSG_TO_MGMT_MAX_LEN, \ + SEGMENT_LEN) / SEGMENT_LEN) + +#define BUF_OUT_DEFAULT_SIZE 1 + +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define SYNC_MSG_ID_MASK 0x7 +#define ASYNC_MSG_ID_MASK 0x7 + +#define SYNC_FLAG 0 +#define ASYNC_FLAG 1 + +#define MSG_NO_RESP 0xFFFF + +#define MGMT_MSG_TIMEOUT 300000 /* millisecond */ + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) | ASYNC_MSG_FLAG) + +static void pf_to_mgmt_send_event_set(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_flag = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +/** + * sphw_register_mgmt_msg_cb - register sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + * @pri_handle: specific mod's private data that will be used in callback + * @callback: the handler for a sync message that will handle messages + **/ +int sphw_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, sphw_mgmt_msg_cb callback) +{ + struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; + + if (mod >= SPHW_MOD_HW_MAX || !hwdev) + return -EFAULT; + + pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return -EINVAL; + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; + pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; + + set_bit(SPHW_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + return 0; +} + +/** + * sphw_unregister_mgmt_msg_cb - unregister sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + **/ +void sphw_unregister_mgmt_msg_cb(void *hwdev, u8 mod) +{ + struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; + + if (!hwdev || mod >= SPHW_MOD_HW_MAX) + return; + + pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + clear_bit(SPHW_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + while (test_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[mod])) + usleep_range(900, 1000); + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; + pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; +} + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @direction: the direction of the original message + * @msg_id: message id + **/ +static void prepare_header(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, u16 msg_len, u8 mod, + enum sphw_msg_ack_type ack_type, + enum sphw_msg_direction_type direction, + enum sphw_mgmt_cmd cmd, u32 msg_id) +{ + struct sphw_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = SPHW_MSG_HEADER_SET(msg_len, MSG_LEN) | + SPHW_MSG_HEADER_SET(mod, MODULE) | + SPHW_MSG_HEADER_SET(msg_len, SEG_LEN) | + SPHW_MSG_HEADER_SET(ack_type, NO_ACK) | + SPHW_MSG_HEADER_SET(SPHW_DATA_INLINE, DATA_TYPE) | + SPHW_MSG_HEADER_SET(0, SEQID) | + SPHW_MSG_HEADER_SET(SPHW_API_CHAIN_AEQ_ID, AEQ_ID) | + SPHW_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + SPHW_MSG_HEADER_SET(direction, DIRECTION) | + SPHW_MSG_HEADER_SET(cmd, CMD) | + SPHW_MSG_HEADER_SET(SPHW_MSG_FROM_MGMT, SOURCE) | + SPHW_MSG_HEADER_SET(hwif->attr.func_global_idx, SRC_GLB_FUNC_IDX) | + SPHW_MSG_HEADER_SET(msg_id, MSG_ID); +} + +static void clp_prepare_header(struct sphw_hwdev *hwdev, u64 *header, + u16 msg_len, u8 mod, + enum sphw_msg_ack_type ack_type, + enum sphw_msg_direction_type direction, + enum sphw_mgmt_cmd cmd, u32 msg_id) +{ + struct sphw_hwif *hwif = hwdev->hwif; + + *header = SPHW_MSG_HEADER_SET(msg_len, MSG_LEN) | + SPHW_MSG_HEADER_SET(mod, MODULE) | + SPHW_MSG_HEADER_SET(msg_len, SEG_LEN) | + SPHW_MSG_HEADER_SET(ack_type, NO_ACK) | + SPHW_MSG_HEADER_SET(SPHW_DATA_INLINE, DATA_TYPE) | + SPHW_MSG_HEADER_SET(0, SEQID) | + SPHW_MSG_HEADER_SET(SPHW_API_CHAIN_AEQ_ID, AEQ_ID) | + SPHW_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + SPHW_MSG_HEADER_SET(direction, DIRECTION) | + SPHW_MSG_HEADER_SET(cmd, CMD) | + SPHW_MSG_HEADER_SET(hwif->attr.func_global_idx, SRC_GLB_FUNC_IDX) | + SPHW_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, + int msg_len) +{ + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_sync(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg, u16 msg_len, + enum sphw_msg_ack_type ack_type, + enum sphw_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct sphw_api_cmd_chain *chain = NULL; + u8 node_id = SPHW_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!sphw_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (direction == SPHW_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); + chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_WRITE_TO_MGMT_CPU]; + + if (ack_type == SPHW_MSG_ACK) + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + return sphw_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_async(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg, u16 msg_len, + enum sphw_msg_direction_type direction) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct sphw_api_cmd_chain *chain = NULL; + u8 node_id = SPHW_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!sphw_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + prepare_header(pf_to_mgmt, &header, msg_len, mod, SPHW_MSG_NO_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return sphw_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); +} + +static inline void msg_to_mgmt_pre(u8 mod, void *buf_in) +{ + struct sphw_msg_head *msg_head = NULL; + + /* set aeq fix num to 3, need to ensure response aeq id < 3*/ + if (mod == SPHW_MOD_COMM || mod == SPHW_MOD_L2NIC) { + msg_head = buf_in; + + if (msg_head->resp_aeq_num >= SPHW_MAX_AEQS) + msg_head->resp_aeq_num = 0; + } +} + +int sphw_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; + struct sphw_recv_msg *recv_msg = NULL; + struct completion *recv_done = NULL; + ulong timeo; + int err; + ulong ret; + + msg_to_mgmt_pre(mod, buf_in); + + pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + init_completion(recv_done); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + SPHW_MSG_ACK, SPHW_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (!ret) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + sphw_dump_aeq_info((struct sphw_hwdev *)hwdev); + err = -ETIMEDOUT; + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); + + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) { + up(&pf_to_mgmt->sync_msg_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->msg_len) { + sdk_err(dev, "Invalid response message length: %u for mod %d cmd %u from mgmt, should less than: %u\n", + recv_msg->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->msg_len) + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +int sphw_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size) +{ + struct sphw_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; + int err; + + pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the async_msg_buf */ + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + ASYNC_MSG_ID_INC(pf_to_mgmt); + + err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size, + SPHW_MSG_DIRECT_SEND); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); + + if (err) { + sdk_err(dev, "Failed to send async mgmt msg\n"); + return err; + } + + return 0; +} + +int sphw_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + if (!hwdev) + return -EINVAL; + + if (!sphw_get_chip_present_flag(hwdev)) + return -EPERM; + + if (in_size > SPHW_MSG_TO_MGMT_MAX_LEN) + return -EINVAL; + + return sphw_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, buf_out, out_size, timeout); +} + +int sphw_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (!sphw_get_chip_present_flag(hwdev)) + return -EPERM; + + return sphw_send_mbox_to_mgmt(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, channel); +} + +int sphw_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (!sphw_get_chip_present_flag(hwdev)) + return -EPERM; + + return sphw_send_mbox_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size, channel); +} + +int sphw_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) +{ + return sphw_msg_to_mgmt_api_chain_async(hwdev, mod, cmd, buf_in, in_size); +} + +int sphw_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + if (!hwdev) + return -EINVAL; + + if (!sphw_get_chip_present_flag(hwdev)) + return -EPERM; + + return sphw_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); +} + +int sphw_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (sphw_func_type(hwdev) == TYPE_VF) { + err = -EFAULT; + sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, + "VF don't support async cmd\n"); + } else { + err = sphw_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} + +static void send_mgmt_ack(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 msg_id) +{ + u16 buf_size; + + if (!in_size) + buf_size = BUF_OUT_DEFAULT_SIZE; + else + buf_size = in_size; + + sphw_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mod, cmd, buf_in, buf_size, msg_id); +} + +static void mgmt_recv_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 msg_id, int need_resp) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + void *buf_out = pf_to_mgmt->mgmt_ack_buf; + enum sphw_mod_type tmp_mod = mod; + bool ack_first = false; + u16 out_size = 0; + + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mod >= SPHW_MOD_HW_MAX) { + sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n", + mod); + goto resp; + } + + set_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || + !test_bit(SPHW_MGMT_MSG_CB_REG, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { + sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n", + mod); + clear_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + goto resp; + } + + /* TO DO some message need to ack first*/ + + pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev, + pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], + cmd, buf_in, in_size, buf_out, &out_size); + + clear_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + +resp: + if (!ack_first && need_resp) + send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_resp_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, + struct sphw_recv_msg *recv_msg) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + /* delete async msg */ + if (recv_msg->msg_id & ASYNC_MSG_FLAG) + return; + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && + pf_to_mgmt->event_flag == SEND_EVENT_START) { + pf_to_mgmt->event_flag = SEND_EVENT_SUCCESS; + complete(&recv_msg->recv_done); + } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { + sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } else { + sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static void recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct sphw_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct sphw_mgmt_msg_handle_work, work); + + mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, + mgmt_work->cmd, mgmt_work->msg, + mgmt_work->msg_len, mgmt_work->msg_id, + !mgmt_work->async_mgmt_to_pf); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +static bool check_mgmt_seq_id_and_seg_len(struct sphw_recv_msg *recv_msg, + u8 seq_id, u8 seg_len) +{ + if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN) + return false; + + if (seq_id == 0) { + recv_msg->seq_id = seq_id; + } else { + if (seq_id != recv_msg->seq_id + 1) + return false; + + recv_msg->seq_id = seq_id; + } + + return true; +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + **/ +static void recv_mgmt_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct sphw_recv_msg *recv_msg) +{ + struct sphw_hwdev *hwdev = pf_to_mgmt->hwdev; + struct sphw_mgmt_msg_handle_work *mgmt_work = NULL; + u64 mbox_header = *((u64 *)header); + void *msg_body = header + sizeof(mbox_header); + u8 seq_id, seq_len; + u32 offset; + u64 dir; + + /* Don't need to get anything from hw when cmd is async */ + dir = SPHW_MSG_HEADER_GET(mbox_header, DIRECTION); + if (dir == SPHW_MSG_RESPONSE && SPHW_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG) + return; + + seq_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); + seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); + + if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { + sdk_err(hwdev->dev_hdl, + "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x,current seq_id: 0x%x, seg len: 0x%x\n", + recv_msg->seq_id, seq_id, seq_len); + /* set seq_id to invalid seq_id */ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + return; + } + + offset = seq_id * SEGMENT_LEN; + memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); + + if (!SPHW_MSG_HEADER_GET(mbox_header, LAST)) + return; + + recv_msg->cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); + recv_msg->async_mgmt_to_pf = SPHW_MSG_HEADER_GET(mbox_header, NO_ACK); + recv_msg->msg_len = SPHW_MSG_HEADER_GET(mbox_header, MSG_LEN); + recv_msg->msg_id = SPHW_MSG_HEADER_GET(mbox_header, MSG_ID); + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + if (SPHW_MSG_HEADER_GET(mbox_header, DIRECTION) == SPHW_MSG_RESPONSE) { + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + return; + } + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + sdk_err(hwdev->dev_hdl, "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + sdk_err(hwdev->dev_hdl, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_MGMT_MSG), + pf_to_mgmt->workq, &mgmt_work->work); +} + +/** + * sphw_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @handle: PF to MGMT channel + * @header: the header of the message + * @size: unused + **/ +void sphw_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) +{ + struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; + struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sphw_recv_msg *recv_msg = NULL; + bool is_send_dir = false; + + if ((SPHW_MSG_HEADER_GET(*(u64 *)header, SOURCE) == + SPHW_MSG_FROM_MBOX)) { + sphw_mbox_func_aeqe_handler(hwdev, header, size); + return; + } + + pf_to_mgmt = dev->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + is_send_dir = (SPHW_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + SPHW_MSG_DIRECT_SEND) ? true : false; + + recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + **/ +static int alloc_recv_msg(struct sphw_recv_msg *recv_msg) +{ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + return 0; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + **/ +static void free_recv_msg(struct sphw_recv_msg *recv_msg) +{ + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static int alloc_msg_buf(struct sphw_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate resp recv msg\n"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + err = -ENOMEM; + goto sync_msg_buf_err; + } + + pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) { + err = -ENOMEM; + goto ack_msg_buf_err; + } + + return 0; + +ack_msg_buf_err: + kfree(pf_to_mgmt->sync_msg_buf); + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static void free_msg_buf(struct sphw_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->mgmt_ack_buf); + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * sphw_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +int sphw_pf_to_mgmt_init(struct sphw_hwdev *hwdev) +{ + struct sphw_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = hwdev->dev_hdl; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) + return -ENOMEM; + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_event_lock); + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue(SPHW_MGMT_WQ_NAME); + if (!pf_to_mgmt->workq) { + sdk_err(dev, "Failed to initialize MGMT workqueue\n"); + err = -ENOMEM; + goto create_mgmt_workq_err; + } + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate msg buffers\n"); + goto alloc_msg_buf_err; + } + + err = sphw_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + sdk_err(dev, "Failed to init the api cmd chains\n"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + destroy_workqueue(pf_to_mgmt->workq); + +create_mgmt_workq_err: + kfree(pf_to_mgmt); + + return err; +} + +/** + * sphw_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to hw device + **/ +void sphw_pf_to_mgmt_free(struct sphw_hwdev *hwdev) +{ + struct sphw_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + /* destroy workqueue before free related pf_to_mgmt resources in case of + * illegal resource access + */ + destroy_workqueue(pf_to_mgmt->workq); + sphw_api_cmd_free(pf_to_mgmt->cmd_chain); + + free_msg_buf(pf_to_mgmt); + kfree(pf_to_mgmt); +} + +void sphw_flush_mgmt_workq(void *hwdev) +{ + struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; + + flush_workqueue(dev->aeqs->workq); + + if (sphw_func_type(dev) != TYPE_VF) + flush_workqueue(dev->pf_to_mgmt->workq); +} + +int sphw_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, void *ack, u16 ack_size) +{ + struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sphw_api_cmd_chain *chain = NULL; + + if (!hwdev || !cmd || (ack_size && !ack)) + return -EINVAL; + + pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_POLL_READ]; + + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return sphw_api_cmd_read(chain, dest, cmd, size, ack, ack_size); +} + +/** + * api cmd write or read bypass default use poll, if want to use aeq interrupt, + * please set wb_trigger_aeqe to 1 + **/ +int sphw_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size) +{ + struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sphw_api_cmd_chain *chain = NULL; + + if (!hwdev || !size || !cmd) + return -EINVAL; + + pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_POLL_WRITE]; + + if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return sphw_api_cmd_write(chain, dest, cmd, size); +} + +static int get_clp_reg(void *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *reg_addr) +{ + struct sphw_hwdev *dev = hwdev; + u32 offset; + + offset = SPHW_CLP_REG_GAP * sphw_pcie_itf_id(dev); + + switch (reg_type) { + case SPHW_CLP_BA_HOST: + *reg_addr = (data_type == SPHW_CLP_REQ_HOST) ? + SPHW_CLP_REG(REQBASE) : + SPHW_CLP_REG(RSPBASE); + break; + + case SPHW_CLP_SIZE_HOST: + *reg_addr = SPHW_CLP_REG(SIZE); + break; + + case SPHW_CLP_LEN_HOST: + *reg_addr = (data_type == SPHW_CLP_REQ_HOST) ? + SPHW_CLP_REG(REQ) : SPHW_CLP_REG(RSP); + break; + + case SPHW_CLP_START_REQ_HOST: + *reg_addr = SPHW_CLP_REG(REQ); + break; + + case SPHW_CLP_READY_RSP_HOST: + *reg_addr = SPHW_CLP_REG(RSP); + break; + + default: + *reg_addr = 0; + break; + } + if (*reg_addr == 0) + return -EINVAL; + + *reg_addr += offset; + + return 0; +} + +static inline int clp_param_valid(struct sphw_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == SPHW_CLP_REQ_HOST && + reg_type == SPHW_CLP_READY_RSP_HOST) + return -EINVAL; + + if (data_type == SPHW_CLP_RSP_HOST && + reg_type == SPHW_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static u32 get_clp_reg_value(struct sphw_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 reg_addr) +{ + u32 value; + + value = sphw_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case SPHW_CLP_BA_HOST: + value = ((value >> SPHW_CLP_OFFSET(BASE)) & + SPHW_CLP_MASK(BASE)); + break; + + case SPHW_CLP_SIZE_HOST: + if (data_type == SPHW_CLP_REQ_HOST) + value = ((value >> SPHW_CLP_OFFSET(REQ_SIZE)) & + SPHW_CLP_MASK(SIZE)); + else + value = ((value >> SPHW_CLP_OFFSET(RSP_SIZE)) & + SPHW_CLP_MASK(SIZE)); + break; + + case SPHW_CLP_LEN_HOST: + value = ((value >> SPHW_CLP_OFFSET(LEN)) & + SPHW_CLP_MASK(LEN)); + break; + + case SPHW_CLP_START_REQ_HOST: + value = ((value >> SPHW_CLP_OFFSET(START)) & + SPHW_CLP_MASK(START)); + break; + + case SPHW_CLP_READY_RSP_HOST: + value = ((value >> SPHW_CLP_OFFSET(READY)) & + SPHW_CLP_MASK(READY)); + break; + + default: + break; + } + + return value; +} + +static int sphw_read_clp_reg(struct sphw_hwdev *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *read_value) +{ + u32 reg_addr; + int err; + + err = clp_param_valid(hwdev, data_type, reg_type); + if (err) + return err; + + err = get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (err) + return err; + + *read_value = get_clp_reg_value(hwdev, data_type, reg_type, reg_addr); + + return 0; +} + +static int check_data_type(enum clp_data_type data_type, enum clp_reg_type reg_type) +{ + if (data_type == SPHW_CLP_REQ_HOST && + reg_type == SPHW_CLP_READY_RSP_HOST) + return -EINVAL; + if (data_type == SPHW_CLP_RSP_HOST && + reg_type == SPHW_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static int check_reg_value(enum clp_reg_type reg_type, u32 value) +{ + if (reg_type == SPHW_CLP_BA_HOST && + value > SPHW_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == SPHW_CLP_SIZE_HOST && + value > SPHW_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == SPHW_CLP_LEN_HOST && + value > SPHW_CLP_LEN_REG_MAX) + return -EINVAL; + + if ((reg_type == SPHW_CLP_START_REQ_HOST || + reg_type == SPHW_CLP_READY_RSP_HOST) && + value > SPHW_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static int sphw_check_clp_init_status(struct sphw_hwdev *hwdev) +{ + int err; + u32 reg_value = 0; + + err = sphw_read_clp_reg(hwdev, SPHW_CLP_REQ_HOST, SPHW_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req ba value: 0x%x\n", + reg_value); + return -EINVAL; + } + + err = sphw_read_clp_reg(hwdev, SPHW_CLP_RSP_HOST, SPHW_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp ba value: 0x%x\n", + reg_value); + return -EINVAL; + } + + err = sphw_read_clp_reg(hwdev, SPHW_CLP_REQ_HOST, SPHW_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req size\n"); + return -EINVAL; + } + + err = sphw_read_clp_reg(hwdev, SPHW_CLP_RSP_HOST, SPHW_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void sphw_write_clp_reg(struct sphw_hwdev *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (check_data_type(data_type, reg_type)) + return; + + if (check_reg_value(reg_type, value)) + return; + + if (get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = sphw_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case SPHW_CLP_LEN_HOST: + reg_value = reg_value & + (~(SPHW_CLP_MASK(LEN) << SPHW_CLP_OFFSET(LEN))); + reg_value = reg_value | (value << SPHW_CLP_OFFSET(LEN)); + break; + + case SPHW_CLP_START_REQ_HOST: + reg_value = reg_value & + (~(SPHW_CLP_MASK(START) << + SPHW_CLP_OFFSET(START))); + reg_value = reg_value | (value << SPHW_CLP_OFFSET(START)); + break; + + case SPHW_CLP_READY_RSP_HOST: + reg_value = reg_value & + (~(SPHW_CLP_MASK(READY) << + SPHW_CLP_OFFSET(READY))); + reg_value = reg_value | (value << SPHW_CLP_OFFSET(READY)); + break; + + default: + return; + } + + sphw_hwif_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int sphw_read_clp_data(struct sphw_hwdev *hwdev, void *buf_out, u16 *out_size) +{ + int err; + u32 reg = SPHW_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = sphw_read_clp_reg(hwdev, SPHW_CLP_RSP_HOST, SPHW_CLP_READY_RSP_HOST, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); + delay_cnt++; + err = sphw_read_clp_reg(hwdev, SPHW_CLP_RSP_HOST, SPHW_CLP_READY_RSP_HOST, &ready); + if (err || delay_cnt > SPHW_CLP_DELAY_CNT_MAX) { + sdk_err(hwdev->dev_hdl, "Timeout with delay_cnt: %u\n", + delay_cnt); + return -EINVAL; + } + } + + err = sphw_read_clp_reg(hwdev, SPHW_CLP_RSP_HOST, SPHW_CLP_LEN_HOST, &temp_out_size); + if (err) + return err; + + if (temp_out_size > SPHW_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + sdk_err(hwdev->dev_hdl, "Invalid temp_out_size: %u\n", + temp_out_size); + return -EINVAL; + } + + *out_size = (u16)temp_out_size; + for (; temp_out_size > 0; temp_out_size--) { + *ptr = sphw_hwif_read_reg(hwdev->hwif, reg); + ptr++; + /* read 4 bytes every time */ + reg = reg + 4; + } + + sphw_write_clp_reg(hwdev, SPHW_CLP_RSP_HOST, SPHW_CLP_READY_RSP_HOST, (u32)0x0); + sphw_write_clp_reg(hwdev, SPHW_CLP_RSP_HOST, SPHW_CLP_LEN_HOST, (u32)0x0); + + return 0; +} + +static int sphw_write_clp_data(struct sphw_hwdev *hwdev, void *buf_in, u16 in_size) +{ + int err; + u32 reg = SPHW_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + + err = sphw_read_clp_reg(hwdev, SPHW_CLP_REQ_HOST, SPHW_CLP_START_REQ_HOST, &start); + if (err) + return err; + + while (start == 1) { + usleep_range(9000, 10000); + delay_cnt++; + err = sphw_read_clp_reg(hwdev, SPHW_CLP_REQ_HOST, SPHW_CLP_START_REQ_HOST, &start); + if (err || delay_cnt > SPHW_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + sphw_write_clp_reg(hwdev, SPHW_CLP_REQ_HOST, SPHW_CLP_LEN_HOST, in_size); + sphw_write_clp_reg(hwdev, SPHW_CLP_REQ_HOST, SPHW_CLP_START_REQ_HOST, (u32)0x1); + + for (; in_size > 0; in_size--) { + sphw_hwif_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + 4; + } + + return 0; +} + +static void sphw_clear_clp_data(struct sphw_hwdev *hwdev, enum clp_data_type data_type) +{ + u32 reg = (data_type == SPHW_CLP_REQ_HOST) ? + SPHW_CLP_DATA(REQ) : SPHW_CLP_DATA(RSP); + u32 count = SPHW_CLP_INPUT_BUF_LEN_HOST / SPHW_CLP_DATA_UNIT_HOST; + + for (; count > 0; count--) { + sphw_hwif_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + 4; + } +} + +int sphw_pf_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct sphw_clp_pf_to_mgmt *clp_pf_to_mgmt; + struct sphw_hwdev *dev = hwdev; + u64 header; + u16 real_size; + u8 *clp_msg_buf; + int err; + + clp_pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->clp_pf_to_mgmt; + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + /* 4 bytes alignment */ + if (in_size % SPHW_CLP_DATA_UNIT_HOST) + real_size = (in_size + (u16)sizeof(header) + SPHW_CLP_DATA_UNIT_HOST); + else + real_size = in_size + (u16)sizeof(header); + real_size = real_size / SPHW_CLP_DATA_UNIT_HOST; + + if (real_size > (SPHW_CLP_INPUT_BUF_LEN_HOST / SPHW_CLP_DATA_UNIT_HOST)) { + sdk_err(dev->dev_hdl, "Invalid real_size: %u\n", real_size); + return -EINVAL; + } + down(&clp_pf_to_mgmt->clp_msg_lock); + + err = sphw_check_clp_init_status(dev); + if (err) { + sdk_err(dev->dev_hdl, "Check clp init status failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return err; + } + + sphw_clear_clp_data(dev, SPHW_CLP_RSP_HOST); + sphw_write_clp_reg(dev, SPHW_CLP_RSP_HOST, SPHW_CLP_READY_RSP_HOST, 0x0); + + /* Send request */ + memset(clp_msg_buf, 0x0, SPHW_CLP_INPUT_BUF_LEN_HOST); + clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0); + + memcpy(clp_msg_buf, &header, sizeof(header)); + clp_msg_buf += sizeof(header); + memcpy(clp_msg_buf, buf_in, in_size); + + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + sphw_clear_clp_data(dev, SPHW_CLP_REQ_HOST); + err = sphw_write_clp_data(hwdev, clp_pf_to_mgmt->clp_msg_buf, real_size); + if (err) { + sdk_err(dev->dev_hdl, "Send clp request failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + /* Get response */ + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + memset(clp_msg_buf, 0x0, SPHW_CLP_INPUT_BUF_LEN_HOST); + err = sphw_read_clp_data(hwdev, clp_msg_buf, &real_size); + sphw_clear_clp_data(dev, SPHW_CLP_RSP_HOST); + if (err) { + sdk_err(dev->dev_hdl, "Read clp response failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + real_size = (u16)((real_size * SPHW_CLP_DATA_UNIT_HOST) & 0xffff); + if (real_size <= sizeof(header) || real_size > SPHW_CLP_INPUT_BUF_LEN_HOST) { + sdk_err(dev->dev_hdl, "Invalid response size: %u", real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + real_size = real_size - sizeof(header); + if (real_size != *out_size) { + sdk_err(dev->dev_hdl, "Invalid real_size:%u, out_size: %u\n", + real_size, *out_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + + return 0; +} + +int sphw_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + struct sphw_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (!dev->chip_present_flag) + return -EPERM; + + if (sphw_func_type(hwdev) == TYPE_VF) + return -EINVAL; + + err = sphw_pf_clp_to_mgmt(dev, mod, cmd, buf_in, in_size, buf_out, + out_size); + + return err; +} + +int sphw_clp_pf_to_mgmt_init(struct sphw_hwdev *hwdev) +{ + struct sphw_clp_pf_to_mgmt *clp_pf_to_mgmt; + + clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL); + if (!clp_pf_to_mgmt) + return -ENOMEM; + + clp_pf_to_mgmt->clp_msg_buf = kzalloc(SPHW_CLP_INPUT_BUF_LEN_HOST, + GFP_KERNEL); + if (!clp_pf_to_mgmt->clp_msg_buf) { + kfree(clp_pf_to_mgmt); + return -ENOMEM; + } + sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1); + + hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt; + + return 0; +} + +void sphw_clp_pf_to_mgmt_free(struct sphw_hwdev *hwdev) +{ + struct sphw_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt; + + kfree(clp_pf_to_mgmt->clp_msg_buf); + kfree(clp_pf_to_mgmt); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h new file mode 100644 index 000000000000..c6f63f935ea5 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_MGMT_H +#define SPHW_MGMT_H + +#define SPHW_MGMT_WQ_NAME "sphw_mgmt" + +#define SPHW_CLP_REG_GAP 0x20 +#define SPHW_CLP_INPUT_BUF_LEN_HOST 4096UL +#define SPHW_CLP_DATA_UNIT_HOST 4UL + +enum clp_data_type { + SPHW_CLP_REQ_HOST = 0, + SPHW_CLP_RSP_HOST = 1 +}; + +enum clp_reg_type { + SPHW_CLP_BA_HOST = 0, + SPHW_CLP_SIZE_HOST = 1, + SPHW_CLP_LEN_HOST = 2, + SPHW_CLP_START_REQ_HOST = 3, + SPHW_CLP_READY_RSP_HOST = 4 +}; + +#define SPHW_CLP_REQ_SIZE_OFFSET 0 +#define SPHW_CLP_RSP_SIZE_OFFSET 16 +#define SPHW_CLP_BASE_OFFSET 0 +#define SPHW_CLP_LEN_OFFSET 0 +#define SPHW_CLP_START_OFFSET 31 +#define SPHW_CLP_READY_OFFSET 31 +#define SPHW_CLP_OFFSET(member) (SPHW_CLP_##member##_OFFSET) + +#define SPHW_CLP_SIZE_MASK 0x7ffUL +#define SPHW_CLP_BASE_MASK 0x7ffffffUL +#define SPHW_CLP_LEN_MASK 0x7ffUL +#define SPHW_CLP_START_MASK 0x1UL +#define SPHW_CLP_READY_MASK 0x1UL +#define SPHW_CLP_MASK(member) (SPHW_CLP_##member##_MASK) + +#define SPHW_CLP_DELAY_CNT_MAX 200UL +#define SPHW_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define SPHW_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define SPHW_CLP_LEN_REG_MAX 0x3ff +#define SPHW_CLP_START_OR_READY_REG_MAX 0x1 + +struct sphw_recv_msg { + void *msg; + + u16 msg_len; + enum sphw_mod_type mod; + u16 cmd; + u8 seq_id; + u16 msg_id; + int async_mgmt_to_pf; + + struct completion recv_done; +}; + +struct sphw_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_UNINIT = 0, + SEND_EVENT_START, + SEND_EVENT_SUCCESS, + SEND_EVENT_FAIL, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +enum sphw_mgmt_msg_cb_state { + SPHW_MGMT_MSG_CB_REG = 0, + SPHW_MGMT_MSG_CB_RUNNING, +}; + +struct sphw_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct sphw_msg_pf_to_mgmt { + struct sphw_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + struct semaphore sync_msg_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_msg_buf; + void *mgmt_ack_buf; + + struct sphw_recv_msg recv_msg_from_mgmt; + struct sphw_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + struct sphw_api_cmd_chain *cmd_chain[SPHW_API_CMD_MAX]; + + sphw_mgmt_msg_cb recv_mgmt_msg_cb[SPHW_MOD_HW_MAX]; + void *recv_mgmt_msg_data[SPHW_MOD_HW_MAX]; + unsigned long mgmt_msg_cb_state[SPHW_MOD_HW_MAX]; + + void *async_msg_cb_data[SPHW_MOD_HW_MAX]; + + /* lock when sending msg */ + spinlock_t sync_event_lock; + enum comm_pf_to_mgmt_event_state event_flag; +}; + +struct sphw_mgmt_msg_handle_work { + struct work_struct work; + struct sphw_msg_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + + enum sphw_mod_type mod; + u16 cmd; + u16 msg_id; + + int async_mgmt_to_pf; +}; + +void sphw_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size); + +int sphw_pf_to_mgmt_init(struct sphw_hwdev *hwdev); + +void sphw_pf_to_mgmt_free(struct sphw_hwdev *hwdev); + +int sphw_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); +int sphw_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size); + +int sphw_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); + +int sphw_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, + void *ack, u16 ack_size); + +int sphw_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size); + +int sphw_pf_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +int sphw_clp_pf_to_mgmt_init(struct sphw_hwdev *hwdev); + +void sphw_clp_pf_to_mgmt_free(struct sphw_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h new file mode 100644 index 000000000000..13f726895f58 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_MGMT_MSG_BASE_H +#define SPHW_MGMT_MSG_BASE_H + +#define MGMT_MSG_CMD_OP_SET 1 +#define MGMT_MSG_CMD_OP_GET 0 + +#define MGMT_MSG_CMD_OP_START 1 +#define MGMT_MSG_CMD_OP_STOP 0 + +struct mgmt_msg_head { + u8 status; + u8 version; + u8 rsvd0[6]; +}; + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h new file mode 100644 index 000000000000..1960614b384b --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h @@ -0,0 +1,534 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_MT_H +#define SPHW_MT_H + +#define NICTOOL_CMD_TYPE 0x18 + +struct api_cmd_rd { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; +}; + +struct api_cmd_wr { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; +}; + +struct pf_dev_info { + u64 bar0_size; + u8 bus; + u8 slot; + u8 func; + u64 phy_addr; +}; + +/* Indicates the maximum number of interrupts that can be recorded. + * Subsequent interrupts are not recorded in FFM. + */ +#define FFM_RECORD_NUM_MAX 64 + +struct ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +struct ffm_intr_tm_info { + struct ffm_intr_info intr_info; + u8 times; + u8 sec; + u8 min; + u8 hour; + u8 mday; + u8 mon; + u16 year; +}; + +struct ffm_record_info { + u32 ffm_num; + u32 last_err_csr_addr; + u32 last_err_csr_value; + struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX]; +}; + +struct dbgtool_k_glb_info { + struct semaphore dbgtool_sem; + struct ffm_record_info *ffm; +}; + +struct msg_2_up { + u8 pf_id; + u8 mod; + u8 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; +}; + +struct dbgtool_param { + union { + struct api_cmd_rd api_rd; + struct api_cmd_wr api_wr; + struct pf_dev_info *dev_info; + struct ffm_record_info *ffm_rd; + struct msg_2_up msg2up; + } param; + char chip_name[16]; +}; + +/* dbgtool command type */ +/* You can add commands as required. The dbgtool command can be + * used to invoke all interfaces of the kernel-mode x86 driver. + */ +enum dbgtool_cmd { + DBGTOOL_CMD_API_RD = 0, + DBGTOOL_CMD_API_WR, + DBGTOOL_CMD_FFM_RD, + DBGTOOL_CMD_FFM_CLR, + DBGTOOL_CMD_PF_DEV_INFO_GET, + DBGTOOL_CMD_MSG_2_UP, + DBGTOOL_CMD_FREE_MEM, + DBGTOOL_CMD_NUM +}; + +#define PF_MAX_SIZE 16 +#define BUSINFO_LEN 32 +#define SELF_TEST_BAR_ADDR_OFFSET 0x883c + +enum module_name { + SEND_TO_NPU = 1, + SEND_TO_MPU, + SEND_TO_SM, + + SEND_TO_HW_DRIVER, + SEND_TO_NIC_DRIVER, + SEND_TO_OVS_DRIVER, + SEND_TO_ROCE_DRIVER, + SEND_TO_TOE_DRIVER, + SEND_TO_IWAP_DRIVER, + SEND_TO_FC_DRIVER, + SEND_FCOE_DRIVER, +}; + +enum driver_cmd_type { + TX_INFO = 1, + Q_NUM, + TX_WQE_INFO, + TX_MAPPING, + RX_INFO, + RX_WQE_INFO, + RX_CQE_INFO, + UPRINT_FUNC_EN, + UPRINT_FUNC_RESET, + UPRINT_SET_PATH, + UPRINT_GET_STATISTICS, + FUNC_TYPE, + GET_FUNC_IDX, + GET_INTER_NUM, + CLOSE_TX_STREAM, + GET_DRV_VERSION, + CLEAR_FUNC_STASTIC, + GET_HW_STATS, + CLEAR_HW_STATS, + GET_SELF_TEST_RES, + GET_CHIP_FAULT_STATS, + NIC_RSVD1, + NIC_RSVD2, + NIC_RSVD3, + GET_CHIP_ID, + GET_SINGLE_CARD_INFO, + GET_FIRMWARE_ACTIVE_STATUS, + ROCE_DFX_FUNC, + GET_DEVICE_ID, + GET_PF_DEV_INFO, + CMD_FREE_MEM, + GET_LOOPBACK_MODE = 32, + SET_LOOPBACK_MODE, + SET_LINK_MODE, + SET_PF_BW_LIMIT, + GET_PF_BW_LIMIT, + ROCE_CMD, + GET_POLL_WEIGHT, + SET_POLL_WEIGHT, + GET_HOMOLOGUE, + SET_HOMOLOGUE, + GET_SSET_COUNT, + GET_SSET_ITEMS, + IS_DRV_IN_VM, + LRO_ADPT_MGMT, + SET_INTER_COAL_PARAM, + GET_INTER_COAL_PARAM, + GET_CHIP_INFO, + GET_NIC_STATS_LEN, + GET_NIC_STATS_STRING, + GET_NIC_STATS_INFO, + GET_PF_ID, + NIC_RSVD4, + NIC_RSVD5, + DCB_QOS_INFO, + DCB_PFC_STATE, + DCB_ETS_STATE, + DCB_STATE, + NIC_RSVD6, + NIC_RSVD7, + GET_ULD_DEV_NAME, + + RSS_CFG = 0x40, + RSS_INDIR, + PORT_ID, + + GET_FUNC_CAP = 0x50, + + GET_WIN_STAT = 0x60, + WIN_CSR_READ = 0x61, + WIN_CSR_WRITE = 0x62, + WIN_API_CMD_RD = 0x63, + + VM_COMPAT_TEST = 0xFF +}; + +enum api_chain_cmd_type { + API_CSR_READ, + API_CSR_WRITE +}; + +enum sm_cmd_type { + SM_CTR_RD32 = 1, + SM_CTR_RD64_PAIR, + SM_CTR_RD64, + SM_CTR_RD32_CLEAR, + SM_CTR_RD64_PAIR_CLEAR, + SM_CTR_RD64_CLEAR +}; + +struct cqm_stats { + atomic_t cqm_cmd_alloc_cnt; + atomic_t cqm_cmd_free_cnt; + atomic_t cqm_send_cmd_box_cnt; + atomic_t cqm_send_cmd_imm_cnt; + atomic_t cqm_db_addr_alloc_cnt; + atomic_t cqm_db_addr_free_cnt; + atomic_t cqm_fc_srq_create_cnt; + atomic_t cqm_srq_create_cnt; + atomic_t cqm_rq_create_cnt; + atomic_t cqm_qpc_mpt_create_cnt; + atomic_t cqm_nonrdma_queue_create_cnt; + atomic_t cqm_rdma_queue_create_cnt; + atomic_t cqm_rdma_table_create_cnt; + atomic_t cqm_qpc_mpt_delete_cnt; + atomic_t cqm_nonrdma_queue_delete_cnt; + atomic_t cqm_rdma_queue_delete_cnt; + atomic_t cqm_rdma_table_delete_cnt; + atomic_t cqm_func_timer_clear_cnt; + atomic_t cqm_func_hash_buf_clear_cnt; + atomic_t cqm_scq_callback_cnt; + atomic_t cqm_ecq_callback_cnt; + atomic_t cqm_nocq_callback_cnt; + atomic_t cqm_aeq_callback_cnt[112]; +}; + +enum sphw_fault_err_level { + FAULT_LEVEL_FATAL, + FAULT_LEVEL_SERIOUS_RESET, + FAULT_LEVEL_HOST, + FAULT_LEVEL_SERIOUS_FLR, + FAULT_LEVEL_GENERAL, + FAULT_LEVEL_SUGGESTION, + FAULT_LEVEL_MAX, +}; + +struct link_event_stats { + atomic_t link_down_stats; + atomic_t link_up_stats; +}; + +enum sphw_fault_type { + FAULT_TYPE_CHIP, + FAULT_TYPE_UCODE, + FAULT_TYPE_MEM_RD_TIMEOUT, + FAULT_TYPE_MEM_WR_TIMEOUT, + FAULT_TYPE_REG_RD_TIMEOUT, + FAULT_TYPE_REG_WR_TIMEOUT, + FAULT_TYPE_PHY_FAULT, + FAULT_TYPE_MAX, +}; + +struct fault_event_stats { + atomic_t chip_fault_stats[22][FAULT_LEVEL_MAX]; + atomic_t fault_type_stat[FAULT_TYPE_MAX]; + atomic_t pcie_fault_stats; +}; + +struct sphw_hw_stats { + atomic_t heart_lost_stats; + struct cqm_stats cqm_stats; + struct link_event_stats link_event_stats; + struct fault_event_stats fault_event_stats; +}; + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif + +struct pf_info { + char name[IFNAMSIZ]; + char bus_info[BUSINFO_LEN]; + u32 pf_type; +}; + +struct card_info { + struct pf_info pf[PF_MAX_SIZE]; + u32 pf_num; +}; + +struct spnic_nic_loop_mode { + u32 loop_mode; + u32 loop_ctrl; +}; + +enum spnic_show_set { + SHOW_SSET_IO_STATS = 1, +}; + +#define SPNIC_SHOW_ITEM_LEN 32 +struct spnic_show_item { + char name[SPNIC_SHOW_ITEM_LEN]; + u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */ + u8 rsvd[7]; + u64 value; +}; + +#define SPHW_CHIP_FAULT_SIZE (110 * 1024) +#define MAX_DRV_BUF_SIZE 4096 + +struct nic_cmd_chip_fault_stats { + u32 offset; + u8 chip_fault_stats[MAX_DRV_BUF_SIZE]; +}; + +#define NIC_TOOL_MAGIC 'x' + +#define CARD_MAX_SIZE 16 +struct nic_card_id { + u32 id[CARD_MAX_SIZE]; + u32 num; +}; + +struct func_pdev_info { + u64 bar0_phy_addr; + u64 bar0_size; + u64 bar1_phy_addr; + u64 bar1_size; + u64 bar3_phy_addr; + u64 bar3_size; + u64 rsvd1[4]; +}; + +struct sphw_card_func_info { + u32 num_pf; + u32 rsvd0; + u64 usr_api_phy_addr; + struct func_pdev_info pdev_info[CARD_MAX_SIZE]; +}; + +struct wqe_info { + int q_id; + void *slq_handle; + unsigned int wqe_id; +}; + +#define MAX_VER_INFO_LEN 128 +struct drv_version_info { + char ver[MAX_VER_INFO_LEN]; +}; + +struct spnic_tx_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct nic_sq_info { + u16 q_id; + u16 pi; + u16 ci; /* sw_ci */ + u16 fi; /* hw_ci */ + u32 q_depth; + u16 pi_reverse; + u16 wqebb_size; + u8 priority; + u16 *ci_addr; + u64 cla_addr; + void *slq_handle; + struct spnic_tx_hw_page direct_wqe; + struct spnic_tx_hw_page doorbell; + u32 page_idx; + u32 glb_sq_id; +}; + +struct nic_rq_info { + u16 q_id; + u16 glb_rq_id; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u8 coalesc_timer_cfg; + u8 pending_limt; + u16 msix_idx; + u32 msix_vector; +}; + +#define MT_EPERM 1 /* Operation not permitted */ +#define MT_EIO 2 /* I/O error */ +#define MT_EINVAL 3 /* Invalid argument */ +#define MT_EBUSY 4 /* Device or resource busy */ +#define MT_EOPNOTSUPP 0xFF /* Operation not supported */ + +struct mt_msg_head { + u8 status; + u8 rsvd1[3]; +}; + +#define MT_DCB_OPCODE_WR BIT(0) /* 1 - write, 0 - read */ +struct spnic_mt_qos_info { + struct mt_msg_head head; + + u16 op_code; + u8 valid_cos_bitmap; + u8 valid_up_bitmap; + u32 rsvd1; +}; + +struct spnic_mt_dcb_state { + struct mt_msg_head head; + + u16 op_code; + u8 state; + u8 rsvd; +}; + +#define MT_DCB_ETS_UP_TC BIT(1) +#define MT_DCB_ETS_UP_BW BIT(2) +#define MT_DCB_ETS_UP_PRIO BIT(3) +#define MT_DCB_ETS_TC_BW BIT(4) +#define MT_DCB_ETS_TC_PRIO BIT(5) + +#define DCB_UP_TC_NUM 0x8 +struct spnic_mt_ets_state { + struct mt_msg_head head; + + u16 op_code; + u8 up_tc[DCB_UP_TC_NUM]; + u8 up_bw[DCB_UP_TC_NUM]; + u8 tc_bw[DCB_UP_TC_NUM]; + u8 up_prio_bitmap; + u8 tc_prio_bitmap; + u32 rsvd; +}; + +#define MT_DCB_PFC_PFC_STATE BIT(1) +#define MT_DCB_PFC_PFC_PRI_EN BIT(2) +struct spnic_mt_pfc_state { + struct mt_msg_head head; + + u16 op_code; + u8 state; + u8 pfc_en_bitpamp; + u32 rsvd; +}; + +enum mt_api_type { + API_TYPE_MBOX = 1, + API_TYPE_API_CHAIN_BYPASS, + API_TYPE_API_CHAIN_TO_MPU, + API_TYPE_CLP, +}; + +struct npu_cmd_st { + u32 mod : 8; + u32 cmd : 8; + u32 ack_type : 3; + u32 direct_resp : 1; + u32 len : 12; +}; + +struct mpu_cmd_st { + u32 api_type : 8; + u32 mod : 8; + u32 cmd : 16; +}; + +struct msg_module { + char device_name[IFNAMSIZ]; + u32 module; + union { + u32 msg_formate; /* for driver */ + struct npu_cmd_st npu_cmd; + struct mpu_cmd_st mpu_cmd; + }; + u32 timeout; /* for mpu/npu cmd */ + u32 func_idx; + u32 buf_in_size; + u32 buf_out_size; + void *in_buf; + void *out_buf; + int bus_num; + u32 rsvd2[5]; +}; + +int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, u32 in_size, void **buf_in); + +int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, u32 out_size, void **buf_out); + +void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in); + +void free_buff_out(void *hwdev, struct msg_module *nt_msg, void *buf_out); + +int copy_buf_out_to_user(struct msg_module *nt_msg, u32 out_size, void *buf_out); + +int get_func_type(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +int get_func_id(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +int get_drv_version(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +int get_hw_driver_stats(const void *hwdev, const void *buf_in, u32 in_size, void *buf_out, + u32 *out_size); + +int clear_hw_driver_stats(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, + u32 *out_size); + +int get_chip_faults_stats(const void *hwdev, const void *buf_in, u32 in_size, void *buf_out, + u32 *out_size); + +int get_chip_id_test(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +int send_to_mpu(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, + u32 *out_size); +int send_to_npu(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, + u32 *out_size); + +int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, + u32 *out_size); + +#endif /* SPHW_MT_H_ */ diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c new file mode 100644 index 000000000000..20ebda15cda2 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/semaphore.h> +#include <linux/workqueue.h> +#include <linux/dev_printk.h> + +#include "sphw_common.h" +#include "sphw_hwdev.h" +#include "sphw_profile.h" +#include "sphw_prof_adap.h" + +typedef bool (*sphw_is_match_prof)(struct sphw_hwdev *hwdev); + +static bool is_match_prof_default_adapter(struct sphw_hwdev *hwdev) +{ + /* always match default profile adapter in standard scene */ + return true; +} + +enum prof_adapter_type { + PROF_ADAP_TYPE_PANGEA = 1, + + /* Add prof adapter type before default */ + PROF_ADAP_TYPE_DEFAULT, +}; + +/** + * struct sphw_prof_adapter - custom scene's profile adapter + * @type: adapter type + * @match: Check whether the current function is used in the custom scene. + * Implemented in the current source file + * @init: When @match return true, the initialization function called in probe. + * Implemented in the source file of the custom scene + * @deinit: When @match return true, the deinitialization function called when + * remove. Implemented in the source file of the custom scene + */ +struct sphw_prof_adapter { + enum prof_adapter_type type; + sphw_is_match_prof match; + sphw_init_prof_attr init; + sphw_deinit_prof_attr deinit; +}; + +struct sphw_prof_adapter prof_adap_objs[] = { + /* Add prof adapter before default profile */ + { + .type = PROF_ADAP_TYPE_DEFAULT, + .match = is_match_prof_default_adapter, + .init = NULL, + .deinit = NULL, + }, +}; + +void sphw_init_profile_adapter(struct sphw_hwdev *hwdev) +{ + struct sphw_prof_adapter *prof_obj = NULL; + u16 num_adap = ARRAY_SIZE(prof_adap_objs); + u16 i; + + for (i = 0; i < num_adap; i++) { + prof_obj = &prof_adap_objs[i]; + if (!(prof_obj->match && prof_obj->match(hwdev))) + continue; + + hwdev->prof_adap_type = prof_obj->type; + hwdev->prof_attr = prof_obj->init ? + prof_obj->init(hwdev) : NULL; + sdk_info(hwdev->dev_hdl, "Find profile adapter, type: %d\n", + hwdev->prof_adap_type); + + break; + } +} + +void sphw_deinit_profile_adapter(struct sphw_hwdev *hwdev) +{ + struct sphw_prof_adapter *prof_obj = NULL; + u16 num_adap = ARRAY_SIZE(prof_adap_objs); + u16 i; + + for (i = 0; i < num_adap; i++) { + prof_obj = &prof_adap_objs[i]; + if (hwdev->prof_adap_type != prof_obj->type) + continue; + + if (prof_obj->deinit) + prof_obj->deinit(hwdev->prof_attr); + break; + } +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h new file mode 100644 index 000000000000..f83d3a28c834 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_PROF_ADAP_H +#define SPHW_PROF_ADAP_H + +#include <linux/workqueue.h> + +#include "sphw_profile.h" + +#define GET_PROF_ATTR_OPS(hwdev) \ + ((hwdev)->prof_attr ? (hwdev)->prof_attr->ops : NULL) + +static inline int sphw_get_work_cpu_affinity(struct sphw_hwdev *hwdev, + enum cpu_affinity_work_type type) +{ + struct sphw_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->get_work_cpu_affinity) + return ops->get_work_cpu_affinity(hwdev->prof_attr->priv_data, type); + + return WORK_CPU_UNBOUND; +} + +static inline void sphw_fault_post_process(struct sphw_hwdev *hwdev, u16 src, u16 level) +{ + struct sphw_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->fault_recover) + ops->fault_recover(hwdev->prof_attr->priv_data, src, level); +} + +static inline bool sphw_sw_feature_en(struct sphw_hwdev *hwdev, u64 feature_bit) +{ + if (!hwdev->prof_attr) + return false; + + return (hwdev->prof_attr->sw_feature_cap & feature_bit) && + (hwdev->prof_attr->dft_sw_feature & feature_bit); +} + +#define SW_FEATURE_EN(hwdev, f_bit) \ + sphw_sw_feature_en(hwdev, SPHW_SW_F_##f_bit) +#define SPHW_F_CHANNEL_LOCK_EN(hwdev) SW_FEATURE_EN(hwdev, CHANNEL_LOCK) + +void sphw_init_profile_adapter(struct sphw_hwdev *hwdev); +void sphw_deinit_profile_adapter(struct sphw_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h new file mode 100644 index 000000000000..0e1c6c91ba31 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_PROFILE_H +#define SPHW_PROFILE_H + +enum cpu_affinity_work_type { + WORK_TYPE_AEQ, + WORK_TYPE_MBOX, + WORK_TYPE_MGMT_MSG, + WORK_TYPE_COMM, +}; + +enum sphw_sw_features { + SPHW_SW_F_CHANNEL_LOCK = BIT(0), +}; + +struct sphw_prof_ops { + void (*fault_recover)(void *data, u16 src, u16 level); + int (*get_work_cpu_affinity)(void *data, u32 work_type); +}; + +struct sphw_prof_attr { + void *priv_data; + u64 hw_feature_cap; + u64 sw_feature_cap; + u64 dft_hw_feature; + u64 dft_sw_feature; + + struct sphw_prof_ops *ops; +}; + +typedef struct sphw_prof_attr *(*sphw_init_prof_attr)(void *hwdev); +typedef void (*sphw_deinit_prof_attr)(struct sphw_prof_attr *porf_attr); + +#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c new file mode 100644 index 000000000000..0ec202dfc4d7 --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "sphw_common.h" +#include "sphw_wq.h" +#include "sphw_hwdev.h" + +#define WQ_MIN_DEPTH 64 +#define WQ_MAX_DEPTH 65536 +#define WQ_MAX_NUM_PAGES (PAGE_SIZE / sizeof(u64)) + +static int wq_init_wq_block(struct sphw_wq *wq) +{ + int i; + + if (WQ_IS_0_LEVEL_CLA(wq)) { + wq->wq_block_paddr = wq->wq_pages[0].align_paddr; + wq->wq_block_vaddr = wq->wq_pages[0].align_vaddr; + + return 0; + } + + if (wq->num_wq_pages > WQ_MAX_NUM_PAGES) { + sdk_err(wq->dev_hdl, "num_wq_pages exceed limit: %lu\n", + WQ_MAX_NUM_PAGES); + return -EFAULT; + } + + wq->wq_block_vaddr = dma_alloc_coherent(wq->dev_hdl, PAGE_SIZE, &wq->wq_block_paddr, + GFP_KERNEL); + if (!wq->wq_block_vaddr) { + sdk_err(wq->dev_hdl, "Failed to alloc wq block\n"); + return -ENOMEM; + } + + for (i = 0; i < wq->num_wq_pages; i++) + wq->wq_block_vaddr[i] = + cpu_to_be64(wq->wq_pages[i].align_paddr); + + return 0; +} + +static int wq_alloc_pages(struct sphw_wq *wq) +{ + int i, page_idx, err; + + wq->wq_pages = kcalloc(wq->num_wq_pages, sizeof(*wq->wq_pages), + GFP_KERNEL); + if (!wq->wq_pages) { + sdk_err(wq->dev_hdl, "Failed to alloc wq pages handle\n"); + return -ENOMEM; + } + + for (page_idx = 0; page_idx < wq->num_wq_pages; page_idx++) { + err = sphw_dma_alloc_coherent_align(wq->dev_hdl, wq->wq_page_size, + wq->wq_page_size, GFP_KERNEL, + &wq->wq_pages[page_idx]); + if (err) { + sdk_err(wq->dev_hdl, "Failed to alloc wq page\n"); + goto free_wq_pages; + } + } + + err = wq_init_wq_block(wq); + if (err) + goto free_wq_pages; + + return 0; + +free_wq_pages: + for (i = 0; i < page_idx; i++) + sphw_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); + + kfree(wq->wq_pages); + wq->wq_pages = NULL; + + return -ENOMEM; +} + +static void wq_free_pages(struct sphw_wq *wq) +{ + int i; + + if (!WQ_IS_0_LEVEL_CLA(wq)) + dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->wq_block_vaddr, + wq->wq_block_paddr); + + for (i = 0; i < wq->num_wq_pages; i++) + sphw_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); + + kfree(wq->wq_pages); + wq->wq_pages = NULL; +} + +int sphw_wq_create(void *hwdev, struct sphw_wq *wq, u32 q_depth, u16 wqebb_size) +{ + struct sphw_hwdev *dev = hwdev; + u32 wq_page_size; + + if (!wq || !dev) { + pr_err("Invalid wq or dev_hdl\n"); + return -EINVAL; + } + + if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH || + (q_depth & (q_depth - 1)) || !wqebb_size || + (wqebb_size & (wqebb_size - 1))) { + sdk_err(dev->dev_hdl, "Wq q_depth(%u) or wqebb_size(%u) is invalid\n", + q_depth, wqebb_size); + return -EINVAL; + } + + wq_page_size = ALIGN(dev->wq_page_size, PAGE_SIZE); + + memset(wq, 0, sizeof(*wq)); + wq->dev_hdl = dev->dev_hdl; + wq->q_depth = q_depth; + wq->idx_mask = (u16)(q_depth - 1); + wq->wqebb_size = wqebb_size; + wq->wqebb_size_shift = (u16)ilog2(wq->wqebb_size); + wq->wq_page_size = wq_page_size; + + wq->wqebbs_per_page = wq_page_size / wqebb_size; + /* In case of wq_page_size is larger than q_depth * wqebb_size */ + if (wq->wqebbs_per_page > q_depth) + wq->wqebbs_per_page = q_depth; + wq->wqebbs_per_page_shift = (u16)ilog2(wq->wqebbs_per_page); + wq->wqebbs_per_page_mask = (u16)(wq->wqebbs_per_page - 1); + wq->num_wq_pages = (u16)(ALIGN(((u32)q_depth * wqebb_size), + wq_page_size) / wq_page_size); + + return wq_alloc_pages(wq); +} + +void sphw_wq_destroy(struct sphw_wq *wq) +{ + if (!wq) + return; + + wq_free_pages(wq); +} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h new file mode 100644 index 000000000000..01d564ca527a --- /dev/null +++ b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef SPHW_WQ_H +#define SPHW_WQ_H + +struct sphw_wq { + u16 cons_idx; + u16 prod_idx; + + u32 q_depth; + u16 idx_mask; + u16 wqebb_size_shift; + u16 num_wq_pages; + u32 wqebbs_per_page; + u16 wqebbs_per_page_shift; + u16 wqebbs_per_page_mask; + + struct sphw_dma_addr_align *wq_pages; + + dma_addr_t wq_block_paddr; + u64 *wq_block_vaddr; + + void *dev_hdl; + u32 wq_page_size; + u16 wqebb_size; +} ____cacheline_aligned; + +int sphw_wq_create(void *hwdev, struct sphw_wq *wq, u32 q_depth, u16 wqebb_size); +void sphw_wq_destroy(struct sphw_wq *wq); + +#define WQ_MASK_IDX(wq, idx) ((idx) & (wq)->idx_mask) +#define WQ_MASK_PAGE(wq, pg_idx) ((pg_idx) < (wq)->num_wq_pages ? (pg_idx) : 0) +#define WQ_PAGE_IDX(wq, idx) ((idx) >> (wq)->wqebbs_per_page_shift) +#define WQ_OFFSET_IN_PAGE(wq, idx) ((idx) & (wq)->wqebbs_per_page_mask) +#define WQ_GET_WQEBB_ADDR(wq, pg_idx, idx_in_pg) \ + ((u8 *)(wq)->wq_pages[pg_idx].align_vaddr + ((idx_in_pg) << (wq)->wqebb_size_shift)) +#define WQ_IS_0_LEVEL_CLA(wq) ((wq)->num_wq_pages == 1) + +static inline u16 sphw_wq_free_wqebbs(struct sphw_wq *wq) +{ + return wq->q_depth - ((wq->q_depth + wq->prod_idx - wq->cons_idx) & + wq->idx_mask) - 1; +} + +static inline bool sphw_wq_is_empty(struct sphw_wq *wq) +{ + return WQ_MASK_IDX(wq, wq->prod_idx) == WQ_MASK_IDX(wq, wq->cons_idx); +} + +static inline void *sphw_wq_get_one_wqebb(struct sphw_wq *wq, u16 *pi) +{ + *pi = WQ_MASK_IDX(wq, wq->prod_idx); + wq->prod_idx++; + + return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, *pi), + WQ_OFFSET_IN_PAGE(wq, *pi)); +} + +static inline void *sphw_wq_get_multi_wqebbs(struct sphw_wq *wq, u16 num_wqebbs, u16 *prod_idx, + void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + u32 pg_idx, off_in_page; + + *prod_idx = WQ_MASK_IDX(wq, wq->prod_idx); + wq->prod_idx += num_wqebbs; + + pg_idx = WQ_PAGE_IDX(wq, *prod_idx); + off_in_page = WQ_OFFSET_IN_PAGE(wq, *prod_idx); + + if (off_in_page + num_wqebbs > wq->wqebbs_per_page) { + /* wqe across wq page boundary */ + *second_part_wqebbs_addr = + WQ_GET_WQEBB_ADDR(wq, WQ_MASK_PAGE(wq, pg_idx + 1), 0); + *first_part_wqebbs_num = wq->wqebbs_per_page - off_in_page; + } else { + *second_part_wqebbs_addr = NULL; + *first_part_wqebbs_num = num_wqebbs; + } + + return WQ_GET_WQEBB_ADDR(wq, pg_idx, off_in_page); +} + +static inline void sphw_wq_put_wqebbs(struct sphw_wq *wq, u16 num_wqebbs) +{ + wq->cons_idx += num_wqebbs; +} + +static inline void *sphw_wq_wqebb_addr(struct sphw_wq *wq, u16 idx) +{ + return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, idx), + WQ_OFFSET_IN_PAGE(wq, idx)); +} + +static inline void *sphw_wq_read_one_wqebb(struct sphw_wq *wq, u16 *cons_idx) +{ + *cons_idx = WQ_MASK_IDX(wq, wq->cons_idx); + + return sphw_wq_wqebb_addr(wq, *cons_idx); +} + +static inline u64 sphw_wq_get_first_wqe_page_addr(struct sphw_wq *wq) +{ + return wq->wq_pages[0].align_paddr; +} + +static inline void sphw_wq_reset(struct sphw_wq *wq) +{ + u16 pg_idx; + + wq->cons_idx = 0; + wq->prod_idx = 0; + + for (pg_idx = 0; pg_idx < wq->num_wq_pages; pg_idx++) + memset(wq->wq_pages[pg_idx].align_vaddr, 0, wq->wq_page_size); +} + +#endif