Signed-off-by: zhoujiadong zhoujiadong5@huawei.com --- .../huawei/hinic3/hw/hinic3_api_cmd.c | 1211 ++++++++++ .../huawei/hinic3/hw/hinic3_api_cmd.h | 286 +++ .../ethernet/huawei/hinic3/hw/hinic3_cmdq.c | 1543 ++++++++++++ .../ethernet/huawei/hinic3/hw/hinic3_cmdq.h | 204 ++ .../ethernet/huawei/hinic3/hw/hinic3_common.c | 93 + .../ethernet/huawei/hinic3/hw/hinic3_csr.h | 187 ++ .../huawei/hinic3/hw/hinic3_dev_mgmt.c | 803 +++++++ .../huawei/hinic3/hw/hinic3_dev_mgmt.h | 105 + .../huawei/hinic3/hw/hinic3_devlink.c | 431 ++++ .../huawei/hinic3/hw/hinic3_devlink.h | 149 ++ .../ethernet/huawei/hinic3/hw/hinic3_eqs.c | 1385 +++++++++++ .../ethernet/huawei/hinic3/hw/hinic3_eqs.h | 165 ++ .../ethernet/huawei/hinic3/hw/hinic3_hw_api.c | 453 ++++ .../ethernet/huawei/hinic3/hw/hinic3_hw_api.h | 141 ++ .../ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c | 1480 ++++++++++++ .../ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h | 332 +++ .../huawei/hinic3/hw/hinic3_hw_comm.c | 1540 ++++++++++++ .../huawei/hinic3/hw/hinic3_hw_comm.h | 51 + .../ethernet/huawei/hinic3/hw/hinic3_hw_mt.c | 599 +++++ .../ethernet/huawei/hinic3/hw/hinic3_hw_mt.h | 49 + .../ethernet/huawei/hinic3/hw/hinic3_hwdev.c | 2141 +++++++++++++++++ .../ethernet/huawei/hinic3/hw/hinic3_hwdev.h | 175 ++ .../ethernet/huawei/hinic3/hw/hinic3_hwif.c | 994 ++++++++ .../ethernet/huawei/hinic3/hw/hinic3_hwif.h | 113 + .../ethernet/huawei/hinic3/hw/hinic3_lld.c | 1413 +++++++++++ .../ethernet/huawei/hinic3/hw/hinic3_mbox.c | 1842 ++++++++++++++ .../ethernet/huawei/hinic3/hw/hinic3_mbox.h | 267 ++ .../ethernet/huawei/hinic3/hw/hinic3_mgmt.c | 1515 ++++++++++++ .../ethernet/huawei/hinic3/hw/hinic3_mgmt.h | 179 ++ .../huawei/hinic3/hw/hinic3_nictool.c | 974 ++++++++ .../huawei/hinic3/hw/hinic3_nictool.h | 35 + .../huawei/hinic3/hw/hinic3_pci_id_tbl.h | 15 + .../huawei/hinic3/hw/hinic3_prof_adap.c | 44 + .../huawei/hinic3/hw/hinic3_prof_adap.h | 111 + .../ethernet/huawei/hinic3/hw/hinic3_sm_lt.h | 160 ++ .../ethernet/huawei/hinic3/hw/hinic3_sml_lt.c | 160 ++ .../ethernet/huawei/hinic3/hw/hinic3_sriov.c | 267 ++ .../ethernet/huawei/hinic3/hw/hinic3_sriov.h | 35 + .../net/ethernet/huawei/hinic3/hw/hinic3_wq.c | 159 ++ .../huawei/hinic3/hw/ossl_knl_linux.c | 533 ++++ 40 files changed, 22339 insertions(+) create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_common.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_csr.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_prof_adap.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_prof_adap.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_sm_lt.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_sml_lt.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_wq.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/ossl_knl_linux.c
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.c new file mode 100644 index 000000000000..b742f8a8d9fe --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.c @@ -0,0 +1,1211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/semaphore.h> +#include <linux/jiffies.h> +#include <linux/delay.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_csr.h" +#include "hinic3_hwif.h" +#include "hinic3_api_cmd.h" + +#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U + +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CHAIN_NUM_CELLS 32 +#define API_CHAIN_CELL_SIZE 128 +#define API_CHAIN_RSP_DATA_SIZE 128 + +#define API_CMD_CELL_WB_ADDR_SIZE 8 + +#define API_CHAIN_CELL_ALIGNMENT 8 + +#define API_CMD_TIMEOUT 10000 +#define API_CMD_STATUS_TIMEOUT 10000 + +#define API_CMD_BUF_SIZE 2048ULL + +#define API_CMD_NODE_ALIGN_SIZE 512ULL +#define API_PAYLOAD_ALIGN_SIZE 64ULL + +#define API_CHAIN_RESP_ALIGNMENT 128ULL + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) + +#define READ_API_CMD_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token)) +#define WRITE_API_CMD_PRIV_DATA(id) (((u8)(id)) << 16) + +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) + +enum api_cmd_data_format { + SGL_DATA = 1, +}; + +enum api_cmd_type { + API_CMD_WRITE_TYPE = 0, + API_CMD_READ_TYPE = 1, +}; + +enum api_cmd_bypass { + NOT_BYPASS = 0, + BYPASS = 1, +}; + +enum api_cmd_resp_aeq { + NOT_TRIGGER = 0, + TRIGGER = 1, +}; + +enum api_cmd_chn_code { + APICHN_0 = 0, +}; + +enum api_cmd_chn_rsvd { + APICHN_VALID = 0, + APICHN_INVALID = 1, +}; + +#define API_DESC_LEN (7) + +static u8 xor_chksum_set(void *data) +{ + int idx; + u8 checksum = 0; + u8 *val = data; + + for (idx = 0; idx < API_DESC_LEN; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct hinic3_api_cmd_chain *chain) +{ + enum hinic3_api_cmd_chain_type chain_type = chain->chain_type; + struct hinic3_hwif *hwif = chain->hwdev->hwif; + u32 hw_prod_idx_addr = HINIC3_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + u32 prod_idx = chain->prod_idx; + + hinic3_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct hinic3_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HINIC3_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic3_hwif_read_reg(chain->hwdev->hwif, addr); + + return HINIC3_API_CMD_STATUS_GET(val, CONS_IDX); +} + +static void dump_api_chain_reg(struct hinic3_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + u32 addr, val; + u16 pci_cmd = 0; + + addr = HINIC3_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic3_hwif_read_reg(chain->hwdev->hwif, addr); + + sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + chain->chain_type, HINIC3_API_CMD_STATUS_GET(val, CPLD_ERR), + HINIC3_API_CMD_STATUS_GET(val, CHKSUM_ERR), + HINIC3_API_CMD_STATUS_GET(val, FSM)); + + sdk_err(dev, "Chain hw current ci: 0x%x\n", + HINIC3_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = HINIC3_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); + val = hinic3_hwif_read_reg(chain->hwdev->hwif, addr); + sdk_err(dev, "Chain hw current pi: 0x%x\n", val); + pci_read_config_word(chain->hwdev->pcidev_hdl, PCI_COMMAND, &pci_cmd); + sdk_err(dev, "PCI command reg: 0x%x\n", pci_cmd); +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + **/ +static int chain_busy(struct hinic3_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + struct hinic3_api_cmd_cell_ctxt *ctxt; + u64 resp_header; + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HINIC3_API_CMD_MULTI_READ: + case HINIC3_API_CMD_POLL_READ: + resp_header = be64_to_cpu(ctxt->resp->header); + if (ctxt->status && + !HINIC3_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n", + ctxt->status, chain->prod_idx, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + case HINIC3_API_CMD_POLL_WRITE: + case HINIC3_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + chain->cons_idx = get_hw_cons_idx(chain); + + if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); + return -EINVAL; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of specific cell type + * @type: chain type + **/ +static u16 get_cell_data_size(enum hinic3_api_cmd_chain_type type) +{ + u16 cell_data_size = 0; + + switch (type) { + case HINIC3_API_CMD_POLL_READ: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_WB_ADDR_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + + case HINIC3_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC3_API_CMD_POLL_WRITE: + case HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control into it + * @cell_len: the size of the cell + **/ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) +{ + u64 ctrl; + u8 chksum; + + ctrl = HINIC3_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | + HINIC3_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + HINIC3_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = xor_chksum_set(&ctrl); + + ctrl |= HINIC3_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @cell: the cell of the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_api_cmd(struct hinic3_api_cmd_chain *chain, + struct hinic3_api_cmd_cell *cell, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct hinic3_api_cmd_cell_ctxt *cell_ctxt; + u32 priv; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HINIC3_API_CMD_POLL_READ: + priv = READ_API_CMD_PRIV_DATA(chain->chain_type, + cell_ctxt->saved_prod_idx); + cell->desc = HINIC3_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC3_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | + HINIC3_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HINIC3_API_CMD_DESC_SET(NOT_TRIGGER, + RESP_AEQE_EN) | + HINIC3_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HINIC3_API_CMD_POLL_WRITE: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC3_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC3_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, + RD_WR) | + HINIC3_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HINIC3_API_CMD_DESC_SET(NOT_TRIGGER, + RESP_AEQE_EN) | + HINIC3_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HINIC3_API_CMD_WRITE_TO_MGMT_CPU: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC3_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC3_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, + RD_WR) | + HINIC3_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | + HINIC3_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | + HINIC3_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + default: + sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", + chain->chain_type); + return; + } + + cell->desc |= HINIC3_API_CMD_DESC_SET(APICHN_0, APICHN_CODE) | + HINIC3_API_CMD_DESC_SET(APICHN_VALID, APICHN_RSVD); + + cell->desc |= HINIC3_API_CMD_DESC_SET(node_id, DEST) | + HINIC3_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + + cell->desc |= HINIC3_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), + XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current producer cell + * @chain: chain for the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + * Return: 0 - success, negative - failure + **/ +static void prepare_cell(struct hinic3_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct hinic3_api_cmd_cell *curr_node; + u16 cell_size; + + curr_node = chain->curr_node; + + cell_size = get_cell_data_size(chain->chain_type); + + prepare_cell_ctrl(&curr_node->ctrl, cell_size); + prepare_api_cmd(chain, curr_node, node_id, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct hinic3_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +static void issue_api_cmd(struct hinic3_api_cmd_chain *chain) +{ + set_prod_idx(chain); +} + +/** + * api_cmd_status_update - update the status of the chain + * @chain: chain to update + **/ +static void api_cmd_status_update(struct hinic3_api_cmd_chain *chain) +{ + struct hinic3_api_cmd_status *wb_status; + enum hinic3_api_cmd_chain_type chain_type; + u64 status_header; + u32 buf_desc; + + wb_status = chain->wb_status; + + buf_desc = be32_to_cpu(wb_status->buf_desc); + if (HINIC3_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) + return; + + status_header = be64_to_cpu(wb_status->header); + chain_type = HINIC3_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= HINIC3_API_CMD_MAX) + return; + + if (chain_type != chain->chain_type) + return; + + chain->cons_idx = HINIC3_API_CMD_STATUS_GET(buf_desc, CONS_IDX); +} + +static enum hinic3_wait_return wait_for_status_poll_handler(void *priv_data) +{ + struct hinic3_api_cmd_chain *chain = priv_data; + + if (!chain->hwdev->chip_present_flag) + return WAIT_PROCESS_ERR; + + api_cmd_status_update(chain); + /* SYNC API CMD cmd should start after prev cmd finished */ + if (chain->cons_idx == chain->prod_idx) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +/** + * wait_for_status_poll - wait for write to mgmt command to complete + * @chain: the chain of the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_status_poll(struct hinic3_api_cmd_chain *chain) +{ + return hinic3_wait_for_timeout(chain, + wait_for_status_poll_handler, + API_CMD_STATUS_TIMEOUT, 100); /* wait 100 us once */ +} + +static void copy_resp_data(struct hinic3_api_cmd_cell_ctxt *ctxt, void *ack, + u16 ack_size) +{ + struct hinic3_api_cmd_resp_fmt *resp = ctxt->resp; + + memcpy(ack, &resp->resp_data, ack_size); + ctxt->status = 0; +} + +static enum hinic3_wait_return check_cmd_resp_handler(void *priv_data) +{ + struct hinic3_api_cmd_cell_ctxt *ctxt = priv_data; + u64 resp_header; + u8 resp_status; + + if (!ctxt->hwdev->chip_present_flag) + return WAIT_PROCESS_ERR; + + resp_header = be64_to_cpu(ctxt->resp->header); + rmb(); /* read the latest header */ + + if (HINIC3_API_CMD_RESP_HEADER_VALID(resp_header)) { + resp_status = HINIC3_API_CMD_RESP_HEAD_GET(resp_header, STATUS); + if (resp_status) { + pr_err("Api chain response data err, status: %u\n", + resp_status); + return WAIT_PROCESS_ERR; + } + + return WAIT_PROCESS_CPL; + } + + return WAIT_PROCESS_WAITING; +} + +/** + * prepare_cell - polling for respense data of the read api-command + * @chain: pointer to api cmd chain + * + * Return: 0 - success, negative - failure + **/ +static int wait_for_resp_polling(struct hinic3_api_cmd_cell_ctxt *ctxt) +{ + return hinic3_wait_for_timeout(ctxt, check_cmd_resp_handler, + POLLING_COMPLETION_TIMEOUT_DEFAULT, + USEC_PER_MSEC); +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_api_cmd_completion(struct hinic3_api_cmd_chain *chain, + struct hinic3_api_cmd_cell_ctxt *ctxt, + void *ack, u16 ack_size) +{ + void *dev = chain->hwdev->dev_hdl; + int err = 0; + + switch (chain->chain_type) { + case HINIC3_API_CMD_POLL_READ: + err = wait_for_resp_polling(ctxt); + if (err == 0) + copy_resp_data(ctxt, ack, ack_size); + else + sdk_err(dev, "API CMD poll response timeout\n"); + break; + case HINIC3_API_CMD_POLL_WRITE: + case HINIC3_API_CMD_WRITE_TO_MGMT_CPU: + err = wait_for_status_poll(chain); + if (err != 0) { + sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", + chain->chain_type); + break; + } + break; + case HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* No need to wait */ + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + if (err != 0) + dump_api_chain_reg(chain); + + return err; +} + +static inline void update_api_cmd_ctxt(struct hinic3_api_cmd_chain *chain, + struct hinic3_api_cmd_cell_ctxt *ctxt) +{ + ctxt->status = 1; + ctxt->saved_prod_idx = chain->prod_idx; + if (ctxt->resp) { + ctxt->resp->header = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +static int api_cmd(struct hinic3_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct hinic3_api_cmd_cell_ctxt *ctxt = NULL; + + if (chain->chain_type == HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock(&chain->async_lock); + else + down(&chain->sem); + ctxt = &chain->cell_ctxt[chain->prod_idx]; + if (chain_busy(chain)) { + if (chain->chain_type == HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + return -EBUSY; + } + update_api_cmd_ctxt(chain, ctxt); + + prepare_cell(chain, node_id, cmd, cmd_size); + + cmd_chain_prod_idx_inc(chain); + + wmb(); /* issue the command */ + + issue_api_cmd(chain); + + /* incremented prod idx, update ctxt */ + + chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; + if (chain->chain_type == HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + + return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); +} + +/** + * hinic3_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +int hinic3_api_cmd_write(struct hinic3_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 size) +{ + /* Verify the chain type */ + return api_cmd(chain, node_id, cmd, size, NULL, 0); +} + +/** + * hinic3_api_cmd_read - Read API CMD command + * @chain: chain for read command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +int hinic3_api_cmd_read(struct hinic3_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 size, void *ack, u16 ack_size) +{ + return api_cmd(chain, node_id, cmd, size, ack, ack_size); +} + +static enum hinic3_wait_return check_chain_restart_handler(void *priv_data) +{ + struct hinic3_api_cmd_chain *cmd_chain = priv_data; + u32 reg_addr, val; + + if (!cmd_chain->hwdev->chip_present_flag) + return WAIT_PROCESS_ERR; + + reg_addr = HINIC3_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); + val = hinic3_hwif_read_reg(cmd_chain->hwdev->hwif, reg_addr); + if (!HINIC3_API_CMD_CHAIN_REQ_GET(val, RESTART)) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @chain: the API CMD specific chain to restart + **/ +static int api_cmd_hw_restart(struct hinic3_api_cmd_chain *cmd_chain) +{ + struct hinic3_hwif *hwif = cmd_chain->hwdev->hwif; + u32 reg_addr, val; + + /* Read Modify Write */ + reg_addr = HINIC3_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); + val = hinic3_hwif_read_reg(hwif, reg_addr); + + val = HINIC3_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= HINIC3_API_CMD_CHAIN_REQ_SET(1, RESTART); + + hinic3_hwif_write_reg(hwif, reg_addr, val); + + return hinic3_wait_for_timeout(cmd_chain, check_chain_restart_handler, + API_CMD_TIMEOUT, USEC_PER_MSEC); +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + **/ +static void api_cmd_ctrl_init(struct hinic3_api_cmd_chain *chain) +{ + struct hinic3_hwif *hwif = chain->hwdev->hwif; + u32 reg_addr, ctrl; + u32 size; + + /* Read Modify Write */ + reg_addr = HINIC3_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); + + ctrl = hinic3_hwif_read_reg(hwif, reg_addr); + + ctrl = HINIC3_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC3_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= HINIC3_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | + HINIC3_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); + + hinic3_hwif_write_reg(hwif, reg_addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set status address for + **/ +static void api_cmd_set_status_addr(struct hinic3_api_cmd_chain *chain) +{ + struct hinic3_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC3_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + hinic3_hwif_write_reg(hwif, addr, val); + + addr = HINIC3_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + hinic3_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set the number of cells for + **/ +static void api_cmd_set_num_cells(struct hinic3_api_cmd_chain *chain) +{ + struct hinic3_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC3_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); + val = chain->num_cells; + hinic3_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head cell of a chain in the HW + * @chain: the API CMD specific chain to set the head for + **/ +static void api_cmd_head_init(struct hinic3_api_cmd_chain *chain) +{ + struct hinic3_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC3_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + hinic3_hwif_write_reg(hwif, addr, val); + + addr = HINIC3_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + hinic3_hwif_write_reg(hwif, addr, val); +} + +static enum hinic3_wait_return check_chain_ready_handler(void *priv_data) +{ + struct hinic3_api_cmd_chain *chain = priv_data; + u32 addr, val; + u32 hw_cons_idx; + + if (!chain->hwdev->chip_present_flag) + return WAIT_PROCESS_ERR; + + addr = HINIC3_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic3_hwif_read_reg(chain->hwdev->hwif, addr); + hw_cons_idx = HINIC3_API_CMD_STATUS_GET(val, CONS_IDX); + /* wait for HW cons idx to be updated */ + if (hw_cons_idx == chain->cons_idx) + return WAIT_PROCESS_CPL; + return WAIT_PROCESS_WAITING; +} + +/** + * wait_for_ready_chain - wait for the chain to be ready + * @chain: the API CMD specific chain to wait for + * Return: 0 - success, negative - failure + **/ +static int wait_for_ready_chain(struct hinic3_api_cmd_chain *chain) +{ + return hinic3_wait_for_timeout(chain, check_chain_ready_handler, + API_CMD_TIMEOUT, USEC_PER_MSEC); +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + **/ +static void api_cmd_chain_hw_clean(struct hinic3_api_cmd_chain *chain) +{ + struct hinic3_hwif *hwif = chain->hwdev->hwif; + u32 addr, ctrl; + + addr = HINIC3_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + ctrl = hinic3_hwif_read_reg(hwif, addr); + ctrl = HINIC3_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & + HINIC3_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC3_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC3_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HINIC3_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + hinic3_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + * @chain: the API CMD specific chain to initialize in HW + * Return: 0 - success, negative - failure + **/ +static int api_cmd_chain_hw_init(struct hinic3_api_cmd_chain *chain) +{ + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + if (api_cmd_hw_restart(chain)) { + sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); + return -EBUSY; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); + + return wait_for_ready_chain(chain); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * Return: 0 - success, negative - failure + **/ +static int alloc_cmd_buf(struct hinic3_api_cmd_chain *chain, + struct hinic3_api_cmd_cell *cell, u32 cell_idx) +{ + struct hinic3_api_cmd_cell_ctxt *cell_ctxt; + void *dev = chain->hwdev->dev_hdl; + void *buf_vaddr; + u64 buf_paddr; + int err = 0; + + buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base + + chain->buf_size_align * cell_idx); + buf_paddr = chain->buf_paddr_base + + chain->buf_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_vaddr = buf_vaddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case HINIC3_API_CMD_POLL_READ: + cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + case HINIC3_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC3_API_CMD_POLL_WRITE: + case HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* The data in the HW should be in Big Endian Format */ + cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + return err; +} + +/** + * alloc_cmd_buf - allocate a resp buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + **/ +static void alloc_resp_buf(struct hinic3_api_cmd_chain *chain, + struct hinic3_api_cmd_cell *cell, u32 cell_idx) +{ + struct hinic3_api_cmd_cell_ctxt *cell_ctxt; + void *resp_vaddr; + u64 resp_paddr; + + resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * cell_idx); + resp_paddr = chain->rsp_paddr_base + + chain->rsp_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->resp = resp_vaddr; + cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); +} + +static int hinic3_alloc_api_cmd_cell_buf(struct hinic3_api_cmd_chain *chain, + u32 cell_idx, + struct hinic3_api_cmd_cell *node) +{ + void *dev = chain->hwdev->dev_hdl; + int err; + + /* For read chain, we should allocate buffer for the response data */ + if (chain->chain_type == HINIC3_API_CMD_MULTI_READ || + chain->chain_type == HINIC3_API_CMD_POLL_READ) + alloc_resp_buf(chain, node, cell_idx); + + switch (chain->chain_type) { + case HINIC3_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC3_API_CMD_POLL_WRITE: + case HINIC3_API_CMD_POLL_READ: + case HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err) { + sdk_err(dev, "Failed to allocate cmd buffer\n"); + goto alloc_cmd_buf_err; + } + break; + /* For api command write and api command read, the data section + * is directly inserted in the cell, so no need to allocate. + */ + case HINIC3_API_CMD_MULTI_READ: + chain->cell_ctxt[cell_idx].api_cmd_vaddr = + &node->read.hw_cmd_paddr; + break; + default: + sdk_err(dev, "Unsupported API CMD chain type\n"); + err = -EINVAL; + goto alloc_cmd_buf_err; + } + + return 0; + +alloc_cmd_buf_err: + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell of specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the cell index to create + * @pre_node: previous cell + * @node_vaddr: the virt addr of the cell + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cell(struct hinic3_api_cmd_chain *chain, u32 cell_idx, + struct hinic3_api_cmd_cell *pre_node, + struct hinic3_api_cmd_cell **node_vaddr) +{ + struct hinic3_api_cmd_cell_ctxt *cell_ctxt; + struct hinic3_api_cmd_cell *node; + void *cell_vaddr; + u64 cell_paddr; + int err; + + cell_vaddr = (void *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * cell_idx); + cell_paddr = chain->cell_paddr_base + + chain->cell_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = cell_vaddr; + cell_ctxt->hwdev = chain->hwdev; + node = cell_ctxt->cell_vaddr; + + if (!pre_node) { + chain->head_node = cell_vaddr; + chain->head_cell_paddr = (dma_addr_t)cell_paddr; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); + } + + /* Driver software should make sure that there is an empty API + * command cell at the end the chain + */ + node->next_cell_paddr = 0; + + err = hinic3_alloc_api_cmd_cell_buf(chain, cell_idx, node); + if (err) + return err; + + *node_vaddr = node; + + return 0; +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cells(struct hinic3_api_cmd_chain *chain) +{ + struct hinic3_api_cmd_cell *node = NULL, *pre_node = NULL; + void *dev = chain->hwdev->dev_hdl; + u32 cell_idx; + int err; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err) { + sdk_err(dev, "Failed to create API CMD cell\n"); + return err; + } + + pre_node = node; + } + + if (!node) + return -EFAULT; + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_chain_init(struct hinic3_api_cmd_chain *chain, + struct hinic3_api_cmd_chain_attr *attr) +{ + void *dev = chain->hwdev->dev_hdl; + size_t cell_ctxt_size; + size_t cells_buf_size; + int err; + + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + chain->rsp_size = attr->rsp_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + if (chain->chain_type == HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_init(&chain->async_lock); + else + sema_init(&chain->sem, 1); + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + if (!cell_ctxt_size) { + sdk_err(dev, "Api chain cell size cannot be zero\n"); + err = -EINVAL; + goto alloc_cell_ctxt_err; + } + + chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) { + sdk_err(dev, "Failed to allocate cell contexts for a chain\n"); + err = -ENOMEM; + goto alloc_cell_ctxt_err; + } + + chain->wb_status = dma_zalloc_coherent(dev, + sizeof(*chain->wb_status), + &chain->wb_status_paddr, + GFP_KERNEL); + if (!chain->wb_status) { + sdk_err(dev, "Failed to allocate DMA wb status\n"); + err = -ENOMEM; + goto alloc_wb_status_err; + } + + chain->cell_size_align = ALIGN((u64)chain->cell_size, + API_CMD_NODE_ALIGN_SIZE); + chain->rsp_size_align = ALIGN((u64)chain->rsp_size, + API_CHAIN_RESP_ALIGNMENT); + chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); + + cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + + chain->buf_size_align) * chain->num_cells; + + err = hinic3_dma_zalloc_coherent_align(dev, cells_buf_size, + API_CMD_NODE_ALIGN_SIZE, + GFP_KERNEL, + &chain->cells_addr); + if (err) { + sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); + goto alloc_cells_buf_err; + } + + chain->cell_vaddr_base = chain->cells_addr.align_vaddr; + chain->cell_paddr_base = chain->cells_addr.align_paddr; + + chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * chain->num_cells); + chain->rsp_paddr_base = chain->cell_paddr_base + + chain->cell_size_align * chain->num_cells; + + chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * chain->num_cells); + chain->buf_paddr_base = chain->rsp_paddr_base + + chain->rsp_size_align * chain->num_cells; + + return 0; + +alloc_cells_buf_err: + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + +alloc_wb_status_err: + kfree(chain->cell_ctxt); + +/*lint -save -e548*/ +alloc_cell_ctxt_err: + if (chain->chain_type == HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_deinit(&chain->async_lock); + else + sema_deinit(&chain->sem); +/*lint -restore*/ + return err; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + **/ +static void api_chain_free(struct hinic3_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + + hinic3_dma_free_coherent_align(dev, &chain->cells_addr); + + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + kfree(chain->cell_ctxt); + + if (chain->chain_type == HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_deinit(&chain->async_lock); + else + sema_deinit(&chain->sem); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @chain: the API CMD specific chain to create + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_chain(struct hinic3_api_cmd_chain **cmd_chain, + struct hinic3_api_cmd_chain_attr *attr) +{ + struct hinic3_hwdev *hwdev = attr->hwdev; + struct hinic3_api_cmd_chain *chain = NULL; + int err; + + if (attr->num_cells & (attr->num_cells - 1)) { + sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); + return -EINVAL; + } + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + chain->hwdev = hwdev; + + err = api_chain_init(chain, attr); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); + goto chain_init_err; + } + + err = api_cmd_create_cells(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); + goto create_cells_err; + } + + err = api_cmd_chain_hw_init(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); + goto chain_hw_init_err; + } + + *cmd_chain = chain; + return 0; + +chain_hw_init_err: +create_cells_err: + api_chain_free(chain); + +chain_init_err: + kfree(chain); + return err; +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + **/ +static void api_cmd_destroy_chain(struct hinic3_api_cmd_chain *chain) +{ + api_chain_free(chain); + kfree(chain); +} + +/** + * hinic3_api_cmd_init - Initialize all the API CMD chains + * @hwif: the hardware interface of a pci function device + * @chain: the API CMD chains that will be initialized + * Return: 0 - success, negative - failure + **/ +int hinic3_api_cmd_init(struct hinic3_hwdev *hwdev, + struct hinic3_api_cmd_chain **chain) +{ + void *dev = hwdev->dev_hdl; + struct hinic3_api_cmd_chain_attr attr; + u8 chain_type, i; + int err; + + if (COMM_SUPPORT_API_CHAIN(hwdev) == 0) + return 0; + + attr.hwdev = hwdev; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CHAIN_CELL_SIZE; + attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; + + chain_type = HINIC3_API_CMD_WRITE_TO_MGMT_CPU; + for (; chain_type < HINIC3_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + + err = api_cmd_create_chain(&chain[chain_type], &attr); + if (err) { + sdk_err(dev, "Failed to create chain %d\n", chain_type); + goto create_chain_err; + } + } + + return 0; + +create_chain_err: + i = HINIC3_API_CMD_WRITE_TO_MGMT_CPU; + for (; i < chain_type; i++) + api_cmd_destroy_chain(chain[i]); + + return err; +} + +/** + * hinic3_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that will be freed + **/ +void hinic3_api_cmd_free(const struct hinic3_hwdev *hwdev, struct hinic3_api_cmd_chain **chain) +{ + u8 chain_type; + + if (COMM_SUPPORT_API_CHAIN(hwdev) == 0) + return; + + chain_type = HINIC3_API_CMD_WRITE_TO_MGMT_CPU; + + for (; chain_type < HINIC3_API_CMD_MAX; chain_type++) + api_cmd_destroy_chain(chain[chain_type]); +} + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.h new file mode 100644 index 000000000000..727e668bf237 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.h @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_API_CMD_H +#define HINIC3_API_CMD_H + +#include <linux/semaphore.h> + +#include "hinic3_eqs.h" +#include "hinic3_hwif.h" + +/* api_cmd_cell.ctrl structure */ +#define HINIC3_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 +#define HINIC3_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define HINIC3_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define HINIC3_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HINIC3_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU +#define HINIC3_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC3_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC3_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define HINIC3_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)(val)) & HINIC3_API_CMD_CELL_CTRL_##member##_MASK) << \ + HINIC3_API_CMD_CELL_CTRL_##member##_SHIFT) + +/* api_cmd_cell.desc structure */ +#define HINIC3_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HINIC3_API_CMD_DESC_RD_WR_SHIFT 1 +#define HINIC3_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HINIC3_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 +#define HINIC3_API_CMD_DESC_APICHN_RSVD_SHIFT 4 +#define HINIC3_API_CMD_DESC_APICHN_CODE_SHIFT 6 +#define HINIC3_API_CMD_DESC_PRIV_DATA_SHIFT 8 +#define HINIC3_API_CMD_DESC_DEST_SHIFT 32 +#define HINIC3_API_CMD_DESC_SIZE_SHIFT 40 +#define HINIC3_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HINIC3_API_CMD_DESC_API_TYPE_MASK 0x1U +#define HINIC3_API_CMD_DESC_RD_WR_MASK 0x1U +#define HINIC3_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U +#define HINIC3_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U +#define HINIC3_API_CMD_DESC_APICHN_RSVD_MASK 0x3U +#define HINIC3_API_CMD_DESC_APICHN_CODE_MASK 0x3U +#define HINIC3_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU +#define HINIC3_API_CMD_DESC_DEST_MASK 0x1FU +#define HINIC3_API_CMD_DESC_SIZE_MASK 0x7FFU +#define HINIC3_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU + +#define HINIC3_API_CMD_DESC_SET(val, member) \ + ((((u64)(val)) & HINIC3_API_CMD_DESC_##member##_MASK) << \ + HINIC3_API_CMD_DESC_##member##_SHIFT) + +/* api_cmd_status header */ +#define HINIC3_API_CMD_STATUS_HEADER_VALID_SHIFT 0 +#define HINIC3_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HINIC3_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU +#define HINIC3_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU + +#define HINIC3_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HINIC3_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HINIC3_API_CMD_STATUS_HEADER_##member##_MASK) + +/* API_CHAIN_REQ CSR: 0x0020+api_idx*0x080 */ +#define HINIC3_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 +#define HINIC3_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2 + +#define HINIC3_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U +#define HINIC3_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U + +#define HINIC3_API_CMD_CHAIN_REQ_SET(val, member) \ + (((val) & HINIC3_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HINIC3_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HINIC3_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HINIC3_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HINIC3_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HINIC3_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HINIC3_API_CMD_CHAIN_REQ_##member##_MASK \ + << HINIC3_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +/* API_CHAIN_CTL CSR: 0x0014+api_idx*0x080 */ +#define HINIC3_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 +#define HINIC3_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HINIC3_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HINIC3_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HINIC3_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HINIC3_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HINIC3_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U +#define HINIC3_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U +#define HINIC3_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U +#define HINIC3_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U +#define HINIC3_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U +#define HINIC3_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U + +#define HINIC3_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((val) & HINIC3_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HINIC3_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HINIC3_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HINIC3_API_CMD_CHAIN_CTRL_##member##_MASK \ + << HINIC3_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +/* api_cmd rsp header */ +#define HINIC3_API_CMD_RESP_HEAD_VALID_SHIFT 0 +#define HINIC3_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define HINIC3_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define HINIC3_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24 +#define HINIC3_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 + +#define HINIC3_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define HINIC3_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU +#define HINIC3_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU +#define HINIC3_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU +#define HINIC3_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define HINIC3_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define HINIC3_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & HINIC3_API_CMD_RESP_HEAD_VALID_MASK) == \ + HINIC3_API_CMD_RESP_HEAD_VALID_CODE) + +#define HINIC3_API_CMD_RESP_HEAD_GET(val, member) \ + (((val) >> HINIC3_API_CMD_RESP_HEAD_##member##_SHIFT) & \ + HINIC3_API_CMD_RESP_HEAD_##member##_MASK) + +#define HINIC3_API_CMD_RESP_HEAD_CHAIN_ID(val) \ + (((val) >> HINIC3_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ + HINIC3_API_CMD_RESP_HEAD_CHAIN_ID_MASK) + +#define HINIC3_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ + ((u16)(((val) >> HINIC3_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ + HINIC3_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)) +/* API_STATUS_0 CSR: 0x0030+api_idx*0x080 */ +#define HINIC3_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU +#define HINIC3_API_CMD_STATUS_CONS_IDX_SHIFT 0 + +#define HINIC3_API_CMD_STATUS_FSM_MASK 0xFU +#define HINIC3_API_CMD_STATUS_FSM_SHIFT 24 + +#define HINIC3_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U +#define HINIC3_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 + +#define HINIC3_API_CMD_STATUS_CPLD_ERR_MASK 0x1U +#define HINIC3_API_CMD_STATUS_CPLD_ERR_SHIFT 30 + +#define HINIC3_API_CMD_STATUS_CONS_IDX(val) \ + ((val) & HINIC3_API_CMD_STATUS_CONS_IDX_MASK) + +#define HINIC3_API_CMD_STATUS_CHKSUM_ERR(val) \ + (((val) >> HINIC3_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \ + HINIC3_API_CMD_STATUS_CHKSUM_ERR_MASK) + +#define HINIC3_API_CMD_STATUS_GET(val, member) \ + (((val) >> HINIC3_API_CMD_STATUS_##member##_SHIFT) & \ + HINIC3_API_CMD_STATUS_##member##_MASK) + +enum hinic3_api_cmd_chain_type { + /* write to mgmt cpu command with completion */ + HINIC3_API_CMD_WRITE_TO_MGMT_CPU = 2, + /* multi read command with completion notification - not used */ + HINIC3_API_CMD_MULTI_READ = 3, + /* write command without completion notification */ + HINIC3_API_CMD_POLL_WRITE = 4, + /* read command without completion notification */ + HINIC3_API_CMD_POLL_READ = 5, + /* read from mgmt cpu command with completion */ + HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, + HINIC3_API_CMD_MAX, +}; + +struct hinic3_api_cmd_status { + u64 header; + u32 buf_desc; + u32 cell_addr_hi; + u32 cell_addr_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct hinic3_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hinic3_api_cmd_resp_fmt { + u64 header; + u64 resp_data; +}; + +struct hinic3_api_cmd_cell_ctxt { + struct hinic3_api_cmd_cell *cell_vaddr; + + void *api_cmd_vaddr; + + struct hinic3_api_cmd_resp_fmt *resp; + + struct completion done; + int status; + + u32 saved_prod_idx; + struct hinic3_hwdev *hwdev; +}; + +struct hinic3_api_cmd_chain_attr { + struct hinic3_hwdev *hwdev; + enum hinic3_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 rsp_size; + u16 cell_size; +}; + +struct hinic3_api_cmd_chain { + struct hinic3_hwdev *hwdev; + enum hinic3_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + u16 rsp_size; + u32 rsvd1; + + /* HW members is 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct semaphore sem; + /* Async cmd can not be scheduling */ + spinlock_t async_lock; + + dma_addr_t wb_status_paddr; + struct hinic3_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hinic3_api_cmd_cell *head_node; + + struct hinic3_api_cmd_cell_ctxt *cell_ctxt; + struct hinic3_api_cmd_cell *curr_node; + + struct hinic3_dma_addr_align cells_addr; + + u8 *cell_vaddr_base; + u64 cell_paddr_base; + u8 *rsp_vaddr_base; + u64 rsp_paddr_base; + u8 *buf_vaddr_base; + u64 buf_paddr_base; + u64 cell_size_align; + u64 rsp_size_align; + u64 buf_size_align; + + u64 rsvd2; +}; + +int hinic3_api_cmd_write(struct hinic3_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 size); + +int hinic3_api_cmd_read(struct hinic3_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 size, void *ack, u16 ack_size); + +int hinic3_api_cmd_init(struct hinic3_hwdev *hwdev, + struct hinic3_api_cmd_chain **chain); + +void hinic3_api_cmd_free(const struct hinic3_hwdev *hwdev, struct hinic3_api_cmd_chain **chain); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c new file mode 100644 index 000000000000..230859adf0b2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c @@ -0,0 +1,1543 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" +#include "hinic3_eqs.h" +#include "hinic3_common.h" +#include "hinic3_wq.h" +#include "hinic3_hw_comm.h" +#include "hinic3_hwif.h" +#include "hinic3_cmdq.h" + +#define HINIC3_CMDQ_BUF_SIZE 2048U + +#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */ + +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU +#define CMDQ_DB_INFO_SET(val, member) \ + ((((u32)(val)) & CMDQ_DB_INFO_##member##_MASK) << \ + CMDQ_DB_INFO_##member##_SHIFT) + +#define CMDQ_DB_HEAD_QUEUE_TYPE_SHIFT 23 +#define CMDQ_DB_HEAD_CMDQ_TYPE_SHIFT 24 +#define CMDQ_DB_HEAD_SRC_TYPE_SHIFT 27 +#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U +#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK 0x7U +#define CMDQ_DB_HEAD_SRC_TYPE_MASK 0x1FU +#define CMDQ_DB_HEAD_SET(val, member) \ + ((((u32)(val)) & CMDQ_DB_HEAD_##member##_MASK) << \ + CMDQ_DB_HEAD_##member##_SHIFT) + +#define CMDQ_CTRL_PI_SHIFT 0 +#define CMDQ_CTRL_CMD_SHIFT 16 +#define CMDQ_CTRL_MOD_SHIFT 24 +#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_CTRL_PI_MASK 0xFFFFU +#define CMDQ_CTRL_CMD_MASK 0xFFU +#define CMDQ_CTRL_MOD_MASK 0x1FU +#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U +#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_CTRL_SET(val, member) \ + ((((u32)(val)) & CMDQ_CTRL_##member##_MASK) << \ + CMDQ_CTRL_##member##_SHIFT) + +#define CMDQ_CTRL_GET(val, member) \ + (((val) >> CMDQ_CTRL_##member##_SHIFT) & \ + CMDQ_CTRL_##member##_MASK) + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU +#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_WQE_HEADER_SET(val, member) \ + ((((u32)(val)) & CMDQ_WQE_HEADER_##member##_MASK) << \ + CMDQ_WQE_HEADER_##member##_SHIFT) + +#define CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \ + CMDQ_WQE_HEADER_##member##_MASK) + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define CMDQ_CTXT_EQ_ID_SHIFT 53 +#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_EQ_ID_MASK 0xFF +#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 + +#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ + CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ + CMDQ_CTXT_##member##_MASK) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define CMDQ_CTXT_CI_SHIFT 52 + +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_CI_MASK 0xFFF + +#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ + CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ + CMDQ_CTXT_##member##_MASK) + +#define SAVED_DATA_ARM_SHIFT 31 + +#define SAVED_DATA_ARM_MASK 0x1U + +#define SAVED_DATA_SET(val, member) \ + (((val) & SAVED_DATA_##member##_MASK) << \ + SAVED_DATA_##member##_SHIFT) + +#define SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(SAVED_DATA_##member##_MASK << \ + SAVED_DATA_##member##_SHIFT))) + +#define WQE_ERRCODE_VAL_SHIFT 0 + +#define WQE_ERRCODE_VAL_MASK 0x7FFFFFFF + +#define WQE_ERRCODE_GET(val, member) \ + (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ + WQE_ERRCODE_##member##_MASK) + +#define CEQE_CMDQ_TYPE_SHIFT 0 + +#define CEQE_CMDQ_TYPE_MASK 0x7 + +#define CEQE_CMDQ_GET(val, member) \ + (((val) >> CEQE_CMDQ_##member##_SHIFT) & \ + CEQE_CMDQ_##member##_MASK) + +#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define WQE_HEADER(wqe) ((struct hinic3_cmdq_header *)(wqe)) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base)) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_PFN_SHIFT 12 +#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 + +#define COMPLETE_LEN 3 + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hinic3_cmdqs, cmdq[0]) + +#define CMDQ_SEND_CMPT_CODE 10 +#define CMDQ_COMPLETE_CMPT_CODE 11 +#define CMDQ_FORCE_STOP_CMPT_CODE 12 + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, + CTRL_DIRECT_SECT_LEN = 2, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, +}; + +enum data_format { + DATA_SGE, + DATA_DIRECT, +}; + +enum completion_format { + COMPLETE_DIRECT, + COMPLETE_SGE, +}; + +enum completion_request { + CEQ_SET = 1, +}; + +enum cmdq_cmd_type { + SYNC_CMD_DIRECT_RESP, + SYNC_CMD_SGE_RESP, + ASYNC_CMD, +}; + +#define NUM_WQEBBS_FOR_CMDQ_WQE 1 + +bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq) +{ + return hinic3_wq_is_empty(&cmdq->wq); +} + +static void *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci) +{ + if (hinic3_wq_is_empty(wq)) + return NULL; + + return hinic3_wq_read_one_wqebb(wq, ci); +} + +static void *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi) +{ + if (!hinic3_wq_free_wqebbs(wq)) + return NULL; + + return hinic3_wq_get_one_wqebb(wq, pi); +} + +struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(void *hwdev) +{ + struct hinic3_cmdqs *cmdqs = NULL; + struct hinic3_cmd_buf *cmd_buf = NULL; + void *dev = NULL; + + if (!hwdev) { + pr_err("Failed to alloc cmd buf, Invalid hwdev\n"); + return NULL; + } + + cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; + dev = ((struct hinic3_hwdev *)hwdev)->dev_hdl; + + cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); + if (!cmd_buf) { + sdk_err(dev, "Failed to allocate cmd buf\n"); + return NULL; + } + + cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, + &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); + goto alloc_pci_buf_err; + } + + cmd_buf->size = HINIC3_CMDQ_BUF_SIZE; + atomic_set(&cmd_buf->ref_cnt, 1); + + return cmd_buf; + +alloc_pci_buf_err: + kfree(cmd_buf); + return NULL; +} +EXPORT_SYMBOL(hinic3_alloc_cmd_buf); + +void hinic3_free_cmd_buf(void *hwdev, struct hinic3_cmd_buf *cmd_buf) +{ + struct hinic3_cmdqs *cmdqs = NULL; + + if (!hwdev || !cmd_buf) { + pr_err("Failed to free cmd buf, hwdev or cmd_buf is NULL\n"); + return; + } + + if (!atomic_dec_and_test(&cmd_buf->ref_cnt)) + return; + + cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; + + pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} +EXPORT_SYMBOL(hinic3_free_cmd_buf); + +static void cmdq_set_completion(struct hinic3_cmdq_completion *complete, + struct hinic3_cmd_buf *buf_out) +{ + struct hinic3_sge_resp *sge_resp = &complete->sge_resp; + + hinic3_set_sge(&sge_resp->sge, buf_out->dma_addr, + HINIC3_CMDQ_BUF_SIZE); +} + +static void cmdq_set_lcmd_bufdesc(struct hinic3_cmdq_wqe_lcmd *wqe, + struct hinic3_cmd_buf *buf_in) +{ + hinic3_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_fill_db(struct hinic3_cmdq_db *db, + enum hinic3_cmdq_type cmdq_type, u16 prod_idx) +{ + db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX); + + db->db_head = CMDQ_DB_HEAD_SET(HINIC3_DB_CMDQ_TYPE, QUEUE_TYPE) | + CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE) | + CMDQ_DB_HEAD_SET(HINIC3_DB_SRC_CMDQ_TYPE, SRC_TYPE); +} + +static void cmdq_set_db(struct hinic3_cmdq *cmdq, + enum hinic3_cmdq_type cmdq_type, u16 prod_idx) +{ + struct hinic3_cmdq_db db = {0}; + u8 *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base; + + cmdq_fill_db(&db, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db.db_info = hinic3_hw_be32(db.db_info); + db.db_head = hinic3_hw_be32(db.db_head); + + wmb(); /* write all before the doorbell */ + writeq(*((u64 *)&db), CMDQ_DB_ADDR(db_base, prod_idx)); +} + +static void cmdq_wqe_fill(void *dst, const void *src) +{ + memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_prepare_wqe_ctrl(struct hinic3_cmdq_wqe *wqe, int wrapped, + u8 mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format data_format, + enum bufdesc_len buf_len) +{ + struct hinic3_ctrl *ctrl = NULL; + enum ctrl_sect_len ctrl_len; + struct hinic3_cmdq_wqe_lcmd *wqe_lcmd = NULL; + struct hinic3_cmdq_wqe_scmd *wqe_scmd = NULL; + u32 saved_data = WQE_HEADER(wqe)->saved_data; + + if (data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD) | + CMDQ_CTRL_SET(HINIC3_ACK_TYPE_CMDQ, ACK_TYPE); + + WQE_HEADER(wqe)->header_info = + CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | + CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC3_MOD_COMM) { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data | + SAVED_DATA_SET(1, ARM); + } else { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data; + } +} + +static void cmdq_set_lcmd_wqe(struct hinic3_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hinic3_cmd_buf *buf_in, + struct hinic3_cmd_buf *buf_out, int wrapped, + u8 mod, u8 cmd, u16 prod_idx) +{ + struct hinic3_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_DIRECT_RESP: + wqe_lcmd->completion.direct_resp = 0; + break; + case SYNC_CMD_SGE_RESP: + if (buf_out) { + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, + buf_out); + } + break; + case ASYNC_CMD: + wqe_lcmd->completion.direct_resp = 0; + wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format, + DATA_SGE, BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx, + struct hinic3_cmdq_wqe *wqe) +{ + struct hinic3_cmdq_cmd_info *cmd_info; + struct hinic3_cmdq_wqe_lcmd *wqe_lcmd; + u32 status_info; + + wqe_lcmd = &wqe->wqe_lcmd; + cmd_info = &cmdq->cmd_infos[prod_idx]; + + if (cmd_info->errcode) { + status_info = hinic3_hw_cpu32(wqe_lcmd->status.status_info); + *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); + } + + if (cmd_info->direct_resp) + *cmd_info->direct_resp = + hinic3_hw_cpu32(wqe_lcmd->completion.direct_resp); +} + +static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_wqe *wqe, u16 pi) +{ + struct hinic3_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic3_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = hinic3_hw_cpu32((ctrl)->ctrl_info); + if (!WQE_COMPLETED(ctrl_info)) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set\n"); + return -EFAULT; + } + + cmdq_update_cmd_status(cmdq, pi, wqe); + + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed\n"); + return 0; +} + +static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info, + const struct hinic3_cmdq_cmd_info *saved_cmd_info) +{ + if (cmd_info->errcode == saved_cmd_info->errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == saved_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == saved_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int cmdq_ceq_handler_status(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_cmd_info *cmd_info, + struct hinic3_cmdq_cmd_info *saved_cmd_info, + u64 curr_msg_id, u16 curr_prod_idx, + struct hinic3_cmdq_wqe *curr_wqe, + u32 timeout) +{ + ulong timeo; + int err; + ulong end = jiffies + msecs_to_jiffies(timeout); + + if (cmdq->hwdev->poll) { + while (time_before(jiffies, end)) { + hinic3_cmdq_ceq_handler(cmdq->hwdev, 0); + if (saved_cmd_info->done->done != 0) + return 0; + usleep_range(9, 10); /* sleep 9 us ~ 10 us */ + } + } else { + timeo = msecs_to_jiffies(timeout); + if (wait_for_completion_timeout(saved_cmd_info->done, timeo)) + return 0; + } + + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*saved_cmd_info->cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been completed\n"); + spin_unlock_bh(&cmdq->cmdq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx); + if (err) + cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command current msg id dismatch with cmd_info msg id\n"); + } + + clear_cmd_info(cmd_info, saved_cmd_info); + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (err == 0) + return 0; + + hinic3_dump_ceq_info(cmdq->hwdev); + + return -ETIMEDOUT; +} + +static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_cmd_info *cmd_info, + struct hinic3_cmdq_cmd_info *saved_cmd_info, + u64 curr_msg_id, u16 curr_prod_idx, + struct hinic3_cmdq_wqe *curr_wqe, u32 timeout) +{ + return cmdq_ceq_handler_status(cmdq, cmd_info, saved_cmd_info, + curr_msg_id, curr_prod_idx, + curr_wqe, timeout); +} + +static int cmdq_msg_lock(struct hinic3_cmdq *cmdq, u16 channel) +{ + struct hinic3_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmdqs->lock_channel_en && test_bit(channel, &cmdqs->channel_stop)) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EAGAIN; + } + + return 0; +} + +static void cmdq_msg_unlock(struct hinic3_cmdq *cmdq) +{ + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info, + struct hinic3_hwdev *hwdev) +{ + if (cmd_info->buf_in) + hinic3_free_cmd_buf(hwdev, cmd_info->buf_in); + + if (cmd_info->buf_out) + hinic3_free_cmd_buf(hwdev, cmd_info->buf_out); + + cmd_info->buf_in = NULL; + cmd_info->buf_out = NULL; +} + +static void cmdq_set_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info, + struct hinic3_hwdev *hwdev, + struct hinic3_cmd_buf *buf_in, + struct hinic3_cmd_buf *buf_out) +{ + cmd_info->buf_in = buf_in; + cmd_info->buf_out = buf_out; + + if (buf_in) + atomic_inc(&buf_in->ref_cnt); + + if (buf_out) + atomic_inc(&buf_out->ref_cnt); +} + +static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, + u8 cmd, struct hinic3_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel) +{ + struct hinic3_wq *wq = &cmdq->wq; + struct hinic3_cmdq_wqe *curr_wqe = NULL, wqe; + struct hinic3_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx; + int wrapped, errcode = 0, wqe_size = WQE_LCMD_SIZE; + int cmpt_code = CMDQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int err; + u32 real_timeout; + + err = cmdq_msg_lock(cmdq, channel); + if (err) + return err; + + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + if (!curr_wqe) { + cmdq_msg_unlock(cmdq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = (cmdq->wrapped == 0) ? 1 : 0; + next_prod_idx -= (u16)wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP; + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, NULL); + + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, + wrapped, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic3_hw_be32_len(&wqe, wqe_size); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx); + + cmdq_msg_unlock(cmdq); + + real_timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; + err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, + curr_msg_id, curr_prod_idx, + curr_wqe, real_timeout); + if (err) { + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", + mod, cmd, curr_prod_idx); + err = -ETIMEDOUT; + } + + if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", + mod, cmd); + err = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (err != 0) ? err : errcode; +} + +static int cmdq_sync_cmd_detail_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, + struct hinic3_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel) +{ + struct hinic3_wq *wq = &cmdq->wq; + struct hinic3_cmdq_wqe *curr_wqe = NULL, wqe; + struct hinic3_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx; + int wrapped, errcode = 0, wqe_size = WQE_LCMD_SIZE; + int cmpt_code = CMDQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int err; + u32 real_timeout; + + err = cmdq_msg_lock(cmdq, channel); + if (err) + return err; + + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + if (!curr_wqe) { + cmdq_msg_unlock(cmdq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = (cmdq->wrapped == 0) ? 1 : 0; + next_prod_idx -= (u16)wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->cmd_type = HINIC3_CMD_TYPE_SGE_RESP; + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, buf_out); + + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, + wrapped, mod, cmd, curr_prod_idx); + + hinic3_hw_be32_len(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + + cmdq_msg_unlock(cmdq); + + real_timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; + err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, + curr_msg_id, curr_prod_idx, + curr_wqe, real_timeout); + if (err) { + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", + mod, cmd, curr_prod_idx); + err = -ETIMEDOUT; + } + + if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", + mod, cmd); + err = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (err != 0) ? err : errcode; +} + +static int cmdq_async_cmd(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, u16 channel) +{ + struct hinic3_cmdq_cmd_info *cmd_info = NULL; + struct hinic3_wq *wq = &cmdq->wq; + int wqe_size = WQE_LCMD_SIZE; + u16 curr_prod_idx, next_prod_idx; + struct hinic3_cmdq_wqe *curr_wqe = NULL, wqe; + int wrapped, err; + + err = cmdq_msg_lock(cmdq, channel); + if (err) + return err; + + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + if (!curr_wqe) { + cmdq_msg_unlock(cmdq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = (cmdq->wrapped == 0) ? 1 : 0; + next_prod_idx -= (u16)wq->q_depth; + } + + cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, + mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic3_hw_be32_len(&wqe, wqe_size); + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + cmd_info->cmd_type = HINIC3_CMD_TYPE_ASYNC; + cmd_info->channel = channel; + /* The caller will not free the cmd_buf of the asynchronous command, + * so there is no need to increase the reference count here + */ + cmd_info->buf_in = buf_in; + + /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait */ + cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx); + + cmdq_msg_unlock(cmdq); + + return 0; +} + +static int cmdq_params_valid(const void *hwdev, const struct hinic3_cmd_buf *buf_in) +{ + if (!buf_in || !hwdev) { + pr_err("Invalid CMDQ buffer addr or hwdev\n"); + return -EINVAL; + } + + if (!buf_in->size || buf_in->size > HINIC3_CMDQ_BUF_SIZE) { + pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); + return -EINVAL; + } + + return 0; +} + +#define WAIT_CMDQ_ENABLE_TIMEOUT 300 +static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); + do { + if (cmdqs->status & HINIC3_CMDQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && + !cmdqs->disable_flag); + + cmdqs->disable_flag = 1; + + return -EBUSY; +} + +int hinic3_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel) +{ + struct hinic3_cmdqs *cmdqs = NULL; + int err; + + err = cmdq_params_valid(hwdev, buf_in); + if (err) { + pr_err("Invalid CMDQ parameters\n"); + return err; + } + + if (!get_card_present_state((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC], + mod, cmd, buf_in, out_param, + timeout, channel); + + if (!(((struct hinic3_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} +EXPORT_SYMBOL(hinic3_cmdq_direct_resp); + +int hinic3_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, + struct hinic3_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel) +{ + struct hinic3_cmdqs *cmdqs = NULL; + int err; + + err = cmdq_params_valid(hwdev, buf_in); + if (err) + return err; + + cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; + + if (!get_card_present_state((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC], + mod, cmd, buf_in, buf_out, out_param, + timeout, channel); + if (!(((struct hinic3_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} +EXPORT_SYMBOL(hinic3_cmdq_detail_resp); + +int hinic3_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, + struct hinic3_cmd_buf *buf_in, + struct hinic3_cmd_buf *buf_out, u64 *out_param, + u32 timeout, u16 channel) +{ + struct hinic3_cmdqs *cmdqs = NULL; + int err; + + err = cmdq_params_valid(hwdev, buf_in); + if (err) + return err; + + cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; + + if (!get_card_present_state((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + if (cos_id >= cmdqs->cmdq_num) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq id is invalid\n"); + return -EINVAL; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[cos_id], mod, cmd, + buf_in, buf_out, out_param, + timeout, channel); + if (!(((struct hinic3_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} +EXPORT_SYMBOL(hinic3_cos_id_detail_resp); + +int hinic3_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct hinic3_cmd_buf *buf_in, u16 channel) +{ + struct hinic3_cmdqs *cmdqs = NULL; + int err; + + err = cmdq_params_valid(hwdev, buf_in); + if (err) + return err; + + cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; + + if (!get_card_present_state((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait */ + return cmdq_async_cmd(&cmdqs->cmdq[HINIC3_CMDQ_SYNC], mod, + cmd, buf_in, channel); +} + +static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_wqe *wqe, u16 ci) +{ + struct hinic3_ctrl *ctrl = NULL; + u32 header_info = hinic3_hw_cpu32(WQE_HEADER(wqe)->header_info); + enum data_format df = CMDQ_WQE_HEADER_GET(header_info, DATA_FMT); + + if (df == DATA_SGE) + ctrl = &wqe->wqe_lcmd.ctrl; + else + ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE; + + wmb(); /* verify wqe is clear */ + + hinic3_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); +} + +static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_wqe *wqe, u16 ci) +{ + spin_lock(&cmdq->cmdq_lock); + + cmdq_update_cmd_status(cmdq, ci, wqe); + + if (cmdq->cmd_infos[ci].cmpt_code) { + *cmdq->cmd_infos[ci].cmpt_code = CMDQ_COMPLETE_CMPT_CODE; + cmdq->cmd_infos[ci].cmpt_code = NULL; + } + + /* make sure cmpt_code operation before done operation */ + smp_rmb(); + + if (cmdq->cmd_infos[ci].done) { + complete(cmdq->cmd_infos[ci].done); + cmdq->cmd_infos[ci].done = NULL; + } + + spin_unlock(&cmdq->cmdq_lock); + + cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static void cmdq_async_cmd_handler(struct hinic3_hwdev *hwdev, + struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_wqe *wqe, u16 ci) +{ + cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static int cmdq_arm_ceq_handler(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_wqe *wqe, u16 ci) +{ + struct hinic3_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + u32 ctrl_info = hinic3_hw_cpu32((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + return -EBUSY; + + clear_wqe_complete_bit(cmdq, wqe, ci); + + return 0; +} + +#define HINIC3_CMDQ_WQE_HEAD_LEN 32 +static void hinic3_dump_cmdq_wqe_head(struct hinic3_hwdev *hwdev, + struct hinic3_cmdq_wqe *wqe) +{ + u32 i; + u32 *data = (u32 *)wqe; + + for (i = 0; i < (HINIC3_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 0x4) { + sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + *(data + i), *(data + i + 0x1), *(data + i + 0x2), + *(data + i + 0x3)); + } +} + +void hinic3_cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + struct hinic3_cmdqs *cmdqs = ((struct hinic3_hwdev *)handle)->cmdqs; + enum hinic3_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); + struct hinic3_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct hinic3_hwdev *hwdev = cmdqs->hwdev; + struct hinic3_cmdq_wqe *wqe = NULL; + struct hinic3_cmdq_wqe_lcmd *wqe_lcmd = NULL; + struct hinic3_ctrl *ctrl = NULL; + struct hinic3_cmdq_cmd_info *cmd_info = NULL; + u16 ci; + + while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) { + cmd_info = &cmdq->cmd_infos[ci]; + + switch (cmd_info->cmd_type) { + case HINIC3_CMD_TYPE_NONE: + return; + case HINIC3_CMD_TYPE_TIMEOUT: + sdk_warn(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", + cmdq_type, ci); + hinic3_dump_cmdq_wqe_head(hwdev, wqe); + /*lint -fallthrough */ + case HINIC3_CMD_TYPE_FAKE_TIMEOUT: + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + break; + case HINIC3_CMD_TYPE_SET_ARM: + /* arm_bit was set until here */ + if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) + return; + break; + default: + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + if (!WQE_COMPLETED(hinic3_hw_cpu32((ctrl)->ctrl_info))) + return; + + dma_rmb(); + /* For FORCE_STOP cmd_type, we also need to wait for + * the firmware processing to complete to prevent the + * firmware from accessing the released cmd_buf + */ + if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) { + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + } else if (cmd_info->cmd_type == HINIC3_CMD_TYPE_ASYNC) { + cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); + } else { + cmdq_sync_cmd_handler(cmdq, wqe, ci); + } + + break; + } + } +} + +static void cmdq_init_queue_ctxt(struct hinic3_cmdqs *cmdqs, + struct hinic3_cmdq *cmdq, + struct cmdq_ctxt_info *ctxt_info) +{ + struct hinic3_wq *wq = &cmdq->wq; + u64 cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + + pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq)); + + ctxt_info->curr_wqe_page_pfn = + CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + CMDQ_CTXT_PAGE_INFO_SET(HINIC3_CEQ_ID_CMDQ, EQ_ID) | + CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); + + if (!WQ_IS_0_LEVEL_CLA(wq)) { + cmdq_first_block_paddr = cmdqs->wq_block_paddr; + pfn = CMDQ_PFN(cmdq_first_block_paddr); + } + + ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | + CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); +} + +static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev, + enum hinic3_cmdq_type q_type) +{ + int err; + + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + cmdq->hwdev = hwdev; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos), + GFP_KERNEL); + if (!cmdq->cmd_infos) { + sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq infos\n"); + err = -ENOMEM; + goto cmd_infos_err; + } + + return 0; + +cmd_infos_err: + spin_lock_deinit(&cmdq->cmdq_lock); + + return err; +} + +static void free_cmdq(struct hinic3_cmdq *cmdq) +{ + kfree(cmdq->cmd_infos); + spin_lock_deinit(&cmdq->cmdq_lock); +} + +static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + u8 cmdq_type; + int err; + + cmdq_type = HINIC3_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type, + &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + if (err) + return err; + } + + cmdqs->status |= HINIC3_CMDQ_ENABLE; + cmdqs->disable_flag = 0; + + return 0; +} + +static void cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info *cmd_info) +{ + if (cmd_info->cmd_type != HINIC3_CMD_TYPE_DIRECT_RESP && + cmd_info->cmd_type != HINIC3_CMD_TYPE_SGE_RESP) + return; + + cmd_info->cmd_type = HINIC3_CMD_TYPE_FORCE_STOP; + + if (cmd_info->cmpt_code && + *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE) + *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE; + + if (cmd_info->done) { + complete(cmd_info->done); + cmd_info->done = NULL; + cmd_info->cmpt_code = NULL; + cmd_info->direct_resp = NULL; + cmd_info->errcode = NULL; + } +} + +void hinic3_cmdq_flush_cmd(struct hinic3_hwdev *hwdev, + struct hinic3_cmdq *cmdq) +{ + struct hinic3_cmdq_cmd_info *cmd_info = NULL; + u16 ci = 0; + + spin_lock_bh(&cmdq->cmdq_lock); + + while (cmdq_read_wqe(&cmdq->wq, &ci)) { + hinic3_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); + cmd_info = &cmdq->cmd_infos[ci]; + + if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP || + cmd_info->cmd_type == HINIC3_CMD_TYPE_SGE_RESP) + cmdq_flush_sync_cmd(cmd_info); + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void hinic3_cmdq_flush_channel_sync_cmd(struct hinic3_hwdev *hwdev, u16 channel) +{ + struct hinic3_cmdq_cmd_info *cmd_info = NULL; + struct hinic3_cmdq *cmdq = NULL; + struct hinic3_wq *wq = NULL; + u16 wqe_cnt, ci, i; + + if (channel >= HINIC3_CHANNEL_MAX) + return; + + cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC]; + + spin_lock_bh(&cmdq->cmdq_lock); + + wq = &cmdq->wq; + ci = wq->cons_idx; + wqe_cnt = (u16)WQ_MASK_IDX(wq, wq->prod_idx + + wq->q_depth - wq->cons_idx); + for (i = 0; i < wqe_cnt; i++) { + cmd_info = &cmdq->cmd_infos[WQ_MASK_IDX(wq, ci + i)]; + if (cmd_info->channel == channel) + cmdq_flush_sync_cmd(cmd_info); + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdq_cmd_info *cmd_info = NULL; + struct hinic3_cmdq *cmdq = NULL; + struct hinic3_wq *wq = NULL; + u16 wqe_cnt, ci, i; + + cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC]; + + spin_lock_bh(&cmdq->cmdq_lock); + + wq = &cmdq->wq; + ci = wq->cons_idx; + wqe_cnt = (u16)WQ_MASK_IDX(wq, wq->prod_idx + + wq->q_depth - wq->cons_idx); + for (i = 0; i < wqe_cnt; i++) { + cmd_info = &cmdq->cmd_infos[WQ_MASK_IDX(wq, ci + i)]; + cmdq_flush_sync_cmd(cmd_info); + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void cmdq_reset_all_cmd_buff(struct hinic3_cmdq *cmdq) +{ + u16 i; + + for (i = 0; i < cmdq->wq.q_depth; i++) + cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev); +} + +int hinic3_cmdq_set_channel_status(struct hinic3_hwdev *hwdev, u16 channel, + bool enable) +{ + if (channel >= HINIC3_CHANNEL_MAX) + return -EINVAL; + + if (enable) { + clear_bit(channel, &hwdev->cmdqs->channel_stop); + } else { + set_bit(channel, &hwdev->cmdqs->channel_stop); + hinic3_cmdq_flush_channel_sync_cmd(hwdev, channel); + } + + sdk_info(hwdev->dev_hdl, "%s cmdq channel 0x%x\n", + enable ? "Enable" : "Disable", channel); + + return 0; +} + +void hinic3_cmdq_enable_channel_lock(struct hinic3_hwdev *hwdev, bool enable) +{ + hwdev->cmdqs->lock_channel_en = enable; + + sdk_info(hwdev->dev_hdl, "%s cmdq channel lock\n", + enable ? "Enable" : "Disable"); +} + +int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + u8 cmdq_type; + + cmdq_type = HINIC3_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + hinic3_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); + cmdqs->cmdq[cmdq_type].wrapped = 1; + hinic3_wq_reset(&cmdqs->cmdq[cmdq_type].wq); + } + + return hinic3_set_cmdq_ctxts(hwdev); +} + +static int create_cmdq_wq(struct hinic3_cmdqs *cmdqs) +{ + u8 type, cmdq_type; + int err; + + cmdq_type = HINIC3_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + err = hinic3_wq_create(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type].wq, + HINIC3_CMDQ_DEPTH, CMDQ_WQEBB_SIZE); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Failed to create cmdq wq\n"); + goto destroy_wq; + } + } + + /* 1-level CLA must put all cmdq's wq page addr in one wq block */ + if (!WQ_IS_0_LEVEL_CLA(&cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq)) { + /* cmdq wq's CLA table is up to 512B */ +#define CMDQ_WQ_CLA_SIZE 512 + if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.num_wq_pages > + CMDQ_WQ_CLA_SIZE / sizeof(u64)) { + err = -EINVAL; + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq wq page exceed limit: %lu\n", + CMDQ_WQ_CLA_SIZE / sizeof(u64)); + goto destroy_wq; + } + + cmdqs->wq_block_vaddr = + dma_zalloc_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, + &cmdqs->wq_block_paddr, GFP_KERNEL); + if (!cmdqs->wq_block_vaddr) { + err = -ENOMEM; + sdk_err(cmdqs->hwdev->dev_hdl, "Failed to alloc cmdq wq block\n"); + goto destroy_wq; + } + + type = HINIC3_CMDQ_SYNC; + for (; type < cmdqs->cmdq_num; type++) + memcpy((u8 *)cmdqs->wq_block_vaddr + + CMDQ_WQ_CLA_SIZE * type, + cmdqs->cmdq[type].wq.wq_block_vaddr, + cmdqs->cmdq[type].wq.num_wq_pages * sizeof(u64)); + } + + return 0; + +destroy_wq: + type = HINIC3_CMDQ_SYNC; + for (; type < cmdq_type; type++) + hinic3_wq_destroy(&cmdqs->cmdq[type].wq); + + return err; +} + +static void destroy_cmdq_wq(struct hinic3_cmdqs *cmdqs) +{ + u8 cmdq_type; + + if (cmdqs->wq_block_vaddr) + dma_free_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, + cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr); + + cmdq_type = HINIC3_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) + hinic3_wq_destroy(&cmdqs->cmdq[cmdq_type].wq); +} + +static int init_cmdqs(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = NULL; + u8 cmdq_num; + int err = -ENOMEM; + + if (COMM_SUPPORT_CMDQ_NUM(hwdev)) { + cmdq_num = hwdev->glb_attr.cmdq_num; + if (hwdev->glb_attr.cmdq_num > HINIC3_MAX_CMDQ_TYPES) { + sdk_warn(hwdev->dev_hdl, "Adjust cmdq num to %d\n", HINIC3_MAX_CMDQ_TYPES); + cmdq_num = HINIC3_MAX_CMDQ_TYPES; + } + } else { + cmdq_num = HINIC3_MAX_CMDQ_TYPES; + } + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return err; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + cmdqs->cmdq_num = cmdq_num; + + cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev_hdl, + HINIC3_CMDQ_BUF_SIZE, HINIC3_CMDQ_BUF_SIZE, 0ULL); + if (!cmdqs->cmd_buf_pool) { + sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); + goto pool_create_err; + } + + return 0; + +pool_create_err: + kfree(cmdqs); + + return err; +} + +int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = NULL; + void __iomem *db_base = NULL; + u8 type, cmdq_type; + int err = -ENOMEM; + + err = init_cmdqs(hwdev); + if (err) + return err; + + cmdqs = hwdev->cmdqs; + + err = create_cmdq_wq(cmdqs); + if (err) + goto create_wq_err; + + err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate doorbell address\n"); + goto alloc_db_err; + } + + cmdqs->cmdqs_db_base = (u8 *)db_base; + for (cmdq_type = HINIC3_CMDQ_SYNC; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n", cmdq_type); + goto init_cmdq_err; + } + + cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + } + + err = hinic3_set_cmdq_ctxts(hwdev); + if (err) + goto init_cmdq_err; + + return 0; + +init_cmdq_err: + for (type = HINIC3_CMDQ_SYNC; type < cmdq_type; type++) + free_cmdq(&cmdqs->cmdq[type]); + + hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base, NULL); + +alloc_db_err: + destroy_cmdq_wq(cmdqs); + +create_wq_err: + dma_pool_destroy(cmdqs->cmd_buf_pool); + kfree(cmdqs); + + return err; +} + +void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + u8 cmdq_type = HINIC3_CMDQ_SYNC; + + cmdqs->status &= ~HINIC3_CMDQ_ENABLE; + + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + hinic3_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); + free_cmdq(&cmdqs->cmdq[cmdq_type]); + } + + hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base, NULL); + destroy_cmdq_wq(cmdqs); + + dma_pool_destroy(cmdqs->cmd_buf_pool); + + kfree(cmdqs); +} + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h new file mode 100644 index 000000000000..ab36dc9c2ba6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_CMDQ_H +#define HINIC3_CMDQ_H + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/spinlock.h> + +#include "comm_msg_intf.h" +#include "hinic3_hw.h" +#include "hinic3_wq.h" +#include "hinic3_common.h" +#include "hinic3_hwdev.h" + +#define HINIC3_SCMD_DATA_LEN 16 + +#define HINIC3_CMDQ_DEPTH 4096 + +enum hinic3_cmdq_type { + HINIC3_CMDQ_SYNC, + HINIC3_CMDQ_ASYNC, + HINIC3_MAX_CMDQ_TYPES = 4 +}; + +enum hinic3_db_src_type { + HINIC3_DB_SRC_CMDQ_TYPE, + HINIC3_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum hinic3_cmdq_db_type { + HINIC3_DB_SQ_RQ_TYPE, + HINIC3_DB_CMDQ_TYPE, +}; + +/* hardware define: cmdq wqe */ +struct hinic3_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hinic3_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HINIC3_SCMD_DATA_LEN]; +}; + +struct hinic3_lcmd_bufdesc { + struct hinic3_sge sge; + u32 rsvd1; + u64 saved_async_buf; + u64 rsvd3; +}; + +struct hinic3_cmdq_db { + u32 db_head; + u32 db_info; +}; + +struct hinic3_status { + u32 status_info; +}; + +struct hinic3_ctrl { + u32 ctrl_info; +}; + +struct hinic3_sge_resp { + struct hinic3_sge sge; + u32 rsvd; +}; + +struct hinic3_cmdq_completion { + union { + struct hinic3_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hinic3_cmdq_wqe_scmd { + struct hinic3_cmdq_header header; + u64 rsvd; + struct hinic3_status status; + struct hinic3_ctrl ctrl; + struct hinic3_cmdq_completion completion; + struct hinic3_scmd_bufdesc buf_desc; +}; + +struct hinic3_cmdq_wqe_lcmd { + struct hinic3_cmdq_header header; + struct hinic3_status status; + struct hinic3_ctrl ctrl; + struct hinic3_cmdq_completion completion; + struct hinic3_lcmd_bufdesc buf_desc; +}; + +struct hinic3_cmdq_inline_wqe { + struct hinic3_cmdq_wqe_scmd wqe_scmd; +}; + +struct hinic3_cmdq_wqe { + union { + struct hinic3_cmdq_inline_wqe inline_wqe; + struct hinic3_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +struct hinic3_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + +enum hinic3_cmdq_status { + HINIC3_CMDQ_ENABLE = BIT(0), +}; + +enum hinic3_cmdq_cmd_type { + HINIC3_CMD_TYPE_NONE, + HINIC3_CMD_TYPE_SET_ARM, + HINIC3_CMD_TYPE_DIRECT_RESP, + HINIC3_CMD_TYPE_SGE_RESP, + HINIC3_CMD_TYPE_ASYNC, + HINIC3_CMD_TYPE_FAKE_TIMEOUT, + HINIC3_CMD_TYPE_TIMEOUT, + HINIC3_CMD_TYPE_FORCE_STOP, +}; + +struct hinic3_cmdq_cmd_info { + enum hinic3_cmdq_cmd_type cmd_type; + u16 channel; + u16 rsvd1; + + struct completion *done; + int *errcode; + int *cmpt_code; + u64 *direct_resp; + u64 cmdq_msg_id; + + struct hinic3_cmd_buf *buf_in; + struct hinic3_cmd_buf *buf_out; +}; + +struct hinic3_cmdq { + struct hinic3_wq wq; + + enum hinic3_cmdq_type cmdq_type; + int wrapped; + + /* spinlock for send cmdq commands */ + spinlock_t cmdq_lock; + + struct cmdq_ctxt_info cmdq_ctxt; + + struct hinic3_cmdq_cmd_info *cmd_infos; + + struct hinic3_hwdev *hwdev; + u64 rsvd1[2]; +}; + +struct hinic3_cmdqs { + struct hinic3_hwdev *hwdev; + + struct pci_pool *cmd_buf_pool; + /* doorbell area */ + u8 __iomem *cmdqs_db_base; + + /* All cmdq's CLA of a VF occupy a PAGE when cmdq wq is 1-level CLA */ + dma_addr_t wq_block_paddr; + void *wq_block_vaddr; + struct hinic3_cmdq cmdq[HINIC3_MAX_CMDQ_TYPES]; + + u32 status; + u32 disable_flag; + + bool lock_channel_en; + unsigned long channel_stop; + u8 cmdq_num; + u32 rsvd1; + u64 rsvd2; +}; + +void hinic3_cmdq_ceq_handler(void *handle, u32 ceqe_data); + +int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev); + +bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq); + +int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev); + +void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev); + +void hinic3_cmdq_flush_cmd(struct hinic3_hwdev *hwdev, + struct hinic3_cmdq *cmdq); + +int hinic3_cmdq_set_channel_status(struct hinic3_hwdev *hwdev, u16 channel, + bool enable); + +void hinic3_cmdq_enable_channel_lock(struct hinic3_hwdev *hwdev, bool enable); + +void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev); + +#endif + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_common.c new file mode 100644 index 000000000000..a942ef185e6f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_common.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/kernel.h> +#include <linux/io-mapping.h> +#include <linux/delay.h> + +#include "ossl_knl.h" +#include "hinic3_common.h" + +int hinic3_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, + struct hinic3_dma_addr_align *mem_align) +{ + void *vaddr = NULL, *align_vaddr = NULL; + dma_addr_t paddr, align_paddr; + u64 real_size = size; + + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev_hdl, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} +EXPORT_SYMBOL(hinic3_dma_zalloc_coherent_align); + +void hinic3_dma_free_coherent_align(void *dev_hdl, + struct hinic3_dma_addr_align *mem_align) +{ + dma_free_coherent(dev_hdl, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} +EXPORT_SYMBOL(hinic3_dma_free_coherent_align); + +int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler, + u32 wait_total_ms, u32 wait_once_us) +{ + enum hinic3_wait_return ret; + unsigned long end; + /* Take 9/10 * wait_once_us as the minimum sleep time of usleep_range */ + u32 usleep_min = wait_once_us - wait_once_us / 10; + + if (!handler) + return -EINVAL; + + end = jiffies + msecs_to_jiffies(wait_total_ms); + do { + ret = handler(priv_data); + if (ret == WAIT_PROCESS_CPL) + return 0; + else if (ret == WAIT_PROCESS_ERR) + return -EIO; + + /* Sleep more than 20ms using msleep is accurate */ + if (wait_once_us >= 20 * USEC_PER_MSEC) + msleep(wait_once_us / USEC_PER_MSEC); + else + usleep_range(usleep_min, wait_once_us); + } while (time_before(jiffies, end)); + + ret = handler(priv_data); + if (ret == WAIT_PROCESS_CPL) + return 0; + else if (ret == WAIT_PROCESS_ERR) + return -EIO; + + return -ETIMEDOUT; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_csr.h new file mode 100644 index 000000000000..b5390c9ed488 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_csr.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_CSR_H +#define HINIC3_CSR_H + +/* bit30/bit31 for bar index flag + * 00: bar0 + * 01: bar1 + * 10: bar2 + * 11: bar3 + */ +#define HINIC3_CFG_REGS_FLAG 0x40000000 + +#define HINIC3_MGMT_REGS_FLAG 0xC0000000 + +#define HINIC3_REGS_FLAG_MAKS 0x3FFFFFFF + +#define HINIC3_VF_CFG_REG_OFFSET 0x2000 + +#define HINIC3_HOST_CSR_BASE_ADDR (HINIC3_MGMT_REGS_FLAG + 0x6000) +#define HINIC3_CSR_GLOBAL_BASE_ADDR (HINIC3_MGMT_REGS_FLAG + 0x6400) + +/* HW interface registers */ +#define HINIC3_CSR_FUNC_ATTR0_ADDR (HINIC3_CFG_REGS_FLAG + 0x0) +#define HINIC3_CSR_FUNC_ATTR1_ADDR (HINIC3_CFG_REGS_FLAG + 0x4) +#define HINIC3_CSR_FUNC_ATTR2_ADDR (HINIC3_CFG_REGS_FLAG + 0x8) +#define HINIC3_CSR_FUNC_ATTR3_ADDR (HINIC3_CFG_REGS_FLAG + 0xC) +#define HINIC3_CSR_FUNC_ATTR4_ADDR (HINIC3_CFG_REGS_FLAG + 0x10) +#define HINIC3_CSR_FUNC_ATTR5_ADDR (HINIC3_CFG_REGS_FLAG + 0x14) +#define HINIC3_CSR_FUNC_ATTR6_ADDR (HINIC3_CFG_REGS_FLAG + 0x18) + +#define HINIC3_FUNC_CSR_MAILBOX_DATA_OFF 0x80 +#define HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF \ + (HINIC3_CFG_REGS_FLAG + 0x0100) +#define HINIC3_FUNC_CSR_MAILBOX_INT_OFFSET_OFF \ + (HINIC3_CFG_REGS_FLAG + 0x0104) +#define HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF \ + (HINIC3_CFG_REGS_FLAG + 0x0108) +#define HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF \ + (HINIC3_CFG_REGS_FLAG + 0x010C) +/* CLP registers */ +#define HINIC3_BAR3_CLP_BASE_ADDR (HINIC3_MGMT_REGS_FLAG + 0x0000) + +#define HINIC3_UCPU_CLP_SIZE_REG (HINIC3_HOST_CSR_BASE_ADDR + 0x40) +#define HINIC3_UCPU_CLP_REQBASE_REG (HINIC3_HOST_CSR_BASE_ADDR + 0x44) +#define HINIC3_UCPU_CLP_RSPBASE_REG (HINIC3_HOST_CSR_BASE_ADDR + 0x48) +#define HINIC3_UCPU_CLP_REQ_REG (HINIC3_HOST_CSR_BASE_ADDR + 0x4c) +#define HINIC3_UCPU_CLP_RSP_REG (HINIC3_HOST_CSR_BASE_ADDR + 0x50) +#define HINIC3_CLP_REG(member) (HINIC3_UCPU_CLP_##member##_REG) + +#define HINIC3_CLP_REQ_DATA HINIC3_BAR3_CLP_BASE_ADDR +#define HINIC3_CLP_RSP_DATA (HINIC3_BAR3_CLP_BASE_ADDR + 0x1000) +#define HINIC3_CLP_DATA(member) (HINIC3_CLP_##member##_DATA) + +#define HINIC3_PPF_ELECTION_OFFSET 0x0 +#define HINIC3_MPF_ELECTION_OFFSET 0x20 + +#define HINIC3_CSR_PPF_ELECTION_ADDR \ + (HINIC3_HOST_CSR_BASE_ADDR + HINIC3_PPF_ELECTION_OFFSET) + +#define HINIC3_CSR_GLOBAL_MPF_ELECTION_ADDR \ + (HINIC3_HOST_CSR_BASE_ADDR + HINIC3_MPF_ELECTION_OFFSET) + +#define HINIC3_CSR_FUNC_PPF_ELECT_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x60) +#define HINIC3_CSR_FUNC_PPF_ELECT_PORT_STRIDE 0x4 + +#define HINIC3_CSR_FUNC_PPF_ELECT(host_idx) \ + (HINIC3_CSR_FUNC_PPF_ELECT_BASE_ADDR + \ + (host_idx) * HINIC3_CSR_FUNC_PPF_ELECT_PORT_STRIDE) + +#define HINIC3_CSR_DMA_ATTR_TBL_ADDR (HINIC3_CFG_REGS_FLAG + 0x380) +#define HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x390) + +/* MSI-X registers */ +#define HINIC3_CSR_MSIX_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x310) +#define HINIC3_CSR_MSIX_CTRL_ADDR (HINIC3_CFG_REGS_FLAG + 0x300) +#define HINIC3_CSR_MSIX_CNT_ADDR (HINIC3_CFG_REGS_FLAG + 0x304) +#define HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR (HINIC3_CFG_REGS_FLAG + 0x58) + +#define HINIC3_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0 +#define HINIC3_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1 +#define HINIC3_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2 +#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3 +#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4 +#define HINIC3_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_SHIFT 22 + +#define HINIC3_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U +#define HINIC3_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U +#define HINIC3_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U +#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U +#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U +#define HINIC3_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK 0x3FFU + +#define HINIC3_MSI_CLR_INDIR_SET(val, member) \ + (((val) & HINIC3_MSI_CLR_INDIR_##member##_MASK) << \ + HINIC3_MSI_CLR_INDIR_##member##_SHIFT) + +/* EQ registers */ +#define HINIC3_AEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x210) +#define HINIC3_CEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x290) + +#define HINIC3_EQ_INDIR_IDX_ADDR(type) \ + ((type == HINIC3_AEQ) ? \ + HINIC3_AEQ_INDIR_IDX_ADDR : HINIC3_CEQ_INDIR_IDX_ADDR) + +#define HINIC3_AEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x240) +#define HINIC3_CEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x2C0) + +#define HINIC3_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) \ + (HINIC3_AEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * HINIC3_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) \ + (HINIC3_AEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num) \ + (HINIC3_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * HINIC3_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num) \ + (HINIC3_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC3_CSR_AEQ_CTRL_0_ADDR (HINIC3_CFG_REGS_FLAG + 0x200) +#define HINIC3_CSR_AEQ_CTRL_1_ADDR (HINIC3_CFG_REGS_FLAG + 0x204) +#define HINIC3_CSR_AEQ_CONS_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x208) +#define HINIC3_CSR_AEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x20C) +#define HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x50) + +#define HINIC3_CSR_CEQ_CTRL_0_ADDR (HINIC3_CFG_REGS_FLAG + 0x280) +#define HINIC3_CSR_CEQ_CTRL_1_ADDR (HINIC3_CFG_REGS_FLAG + 0x284) +#define HINIC3_CSR_CEQ_CONS_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x288) +#define HINIC3_CSR_CEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x28c) +#define HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x54) + +/* API CMD registers */ +#define HINIC3_CSR_API_CMD_BASE (HINIC3_MGMT_REGS_FLAG + 0x2000) + +#define HINIC3_CSR_API_CMD_STRIDE 0x80 + +#define HINIC3_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +#define HINIC3_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +#define HINIC3_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +#define HINIC3_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0xC + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +#define HINIC3_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +#define HINIC3_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +#define HINIC3_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +#define HINIC3_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +#define HINIC3_CSR_API_CMD_STATUS_0_ADDR(idx) \ + (HINIC3_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC3_CSR_API_CMD_STRIDE) + +/* self test register */ +#define HINIC3_MGMT_HEALTH_STATUS_ADDR (HINIC3_MGMT_REGS_FLAG + 0x983c) + +#define HINIC3_CHIP_BASE_INFO_ADDR (HINIC3_MGMT_REGS_FLAG + 0xB02C) + +#define HINIC3_CHIP_ERR_STATUS0_ADDR (HINIC3_MGMT_REGS_FLAG + 0xC0EC) +#define HINIC3_CHIP_ERR_STATUS1_ADDR (HINIC3_MGMT_REGS_FLAG + 0xC0F0) + +#define HINIC3_ERR_INFO0_ADDR (HINIC3_MGMT_REGS_FLAG + 0xC0F4) +#define HINIC3_ERR_INFO1_ADDR (HINIC3_MGMT_REGS_FLAG + 0xC0F8) +#define HINIC3_ERR_INFO2_ADDR (HINIC3_MGMT_REGS_FLAG + 0xC0FC) + +#define HINIC3_MULT_HOST_SLAVE_STATUS_ADDR (HINIC3_MGMT_REGS_FLAG + 0xDF30) +#define HINIC3_MULT_MIGRATE_HOST_STATUS_ADDR (HINIC3_MGMT_REGS_FLAG + 0xDF4C) + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c new file mode 100644 index 000000000000..4c13a2e8ffd6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c @@ -0,0 +1,803 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <net/addrconf.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/debugfs.h> + +#include "ossl_knl.h" +#include "hinic3_mt.h" +#include "hinic3_crm.h" +#include "hinic3_lld.h" +#include "hinic3_sriov.h" +#include "hinic3_nictool.h" +#include "hinic3_pci_id_tbl.h" +#include "hinic3_dev_mgmt.h" + +#define HINIC3_WAIT_TOOL_CNT_TIMEOUT 10000 +#define HINIC3_WAIT_TOOL_MIN_USLEEP_TIME 9900 +#define HINIC3_WAIT_TOOL_MAX_USLEEP_TIME 10000 + +static unsigned long card_bit_map; + +LIST_HEAD(g_hinic3_chip_list); + +struct list_head *get_hinic3_chip_list(void) +{ + return &g_hinic3_chip_list; +} + +void uld_dev_hold(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(lld_dev->pdev); + + atomic_inc(&pci_adapter->uld_ref_cnt[type]); +} +EXPORT_SYMBOL(uld_dev_hold); + +void uld_dev_put(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(lld_dev->pdev); + + atomic_dec(&pci_adapter->uld_ref_cnt[type]); +} +EXPORT_SYMBOL(uld_dev_put); + +void lld_dev_cnt_init(struct hinic3_pcidev *pci_adapter) +{ + atomic_set(&pci_adapter->ref_cnt, 0); +} + +void lld_dev_hold(struct hinic3_lld_dev *dev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_inc(&pci_adapter->ref_cnt); +} + +void lld_dev_put(struct hinic3_lld_dev *dev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_dec(&pci_adapter->ref_cnt); +} + +void wait_lld_dev_unused(struct hinic3_pcidev *pci_adapter) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(HINIC3_WAIT_TOOL_CNT_TIMEOUT); + do { + if (!atomic_read(&pci_adapter->ref_cnt)) + return; + + /* if sleep 10ms, use usleep_range to be more precise */ + usleep_range(HINIC3_WAIT_TOOL_MIN_USLEEP_TIME, + HINIC3_WAIT_TOOL_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); +} + +enum hinic3_lld_status { + HINIC3_NODE_CHANGE = BIT(0), +}; + +struct hinic3_lld_lock { + /* lock for chip list */ + struct mutex lld_mutex; + unsigned long status; + atomic_t dev_ref_cnt; +}; + +struct hinic3_lld_lock g_lld_lock; + +#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ +#define PRINT_TIMEOUT_INTERVAL 10000 +#define MS_PER_SEC 1000 +#define LLD_LOCK_MIN_USLEEP_TIME 900 +#define LLD_LOCK_MAX_USLEEP_TIME 1000 + +/* node in chip_node will changed, tools or driver can't get node + * during this situation + */ +void lld_lock_chip_node(void) +{ + unsigned long end; + bool timeout = true; + u32 loop_cnt; + + mutex_lock(&g_lld_lock.lld_mutex); + + loop_cnt = 0; + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); + do { + if (!test_and_set_bit(HINIC3_NODE_CHANGE, &g_lld_lock.status)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for lld node change complete for %us\n", + loop_cnt / MS_PER_SEC); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (timeout && test_and_set_bit(HINIC3_NODE_CHANGE, &g_lld_lock.status)) + pr_warn("Wait for lld node change complete timeout when trying to get lld lock\n"); + + loop_cnt = 0; + timeout = true; + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); + do { + if (!atomic_read(&g_lld_lock.dev_ref_cnt)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for lld dev unused for %us, reference count: %d\n", + loop_cnt / MS_PER_SEC, + atomic_read(&g_lld_lock.dev_ref_cnt)); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (timeout && atomic_read(&g_lld_lock.dev_ref_cnt)) + pr_warn("Wait for lld dev unused timeout\n"); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +void lld_unlock_chip_node(void) +{ + clear_bit(HINIC3_NODE_CHANGE, &g_lld_lock.status); +} + +/* When tools or other drivers want to get node of chip_node, use this function + * to prevent node be freed + */ +void lld_hold(void) +{ + unsigned long end; + u32 loop_cnt = 0; + + /* ensure there have not any chip node in changing */ + mutex_lock(&g_lld_lock.lld_mutex); + + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_HOLD_TIMEOUT); + do { + if (!test_bit(HINIC3_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait lld node change complete for %us\n", + loop_cnt / MS_PER_SEC); + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (test_bit(HINIC3_NODE_CHANGE, &g_lld_lock.status)) + pr_warn("Wait lld node change complete timeout when trying to hode lld dev\n"); + + atomic_inc(&g_lld_lock.dev_ref_cnt); + mutex_unlock(&g_lld_lock.lld_mutex); +} + +void lld_put(void) +{ + atomic_dec(&g_lld_lock.dev_ref_cnt); +} + +void hinic3_lld_lock_init(void) +{ + mutex_init(&g_lld_lock.lld_mutex); + atomic_set(&g_lld_lock.dev_ref_cnt, 0); +} + +void hinic3_get_all_chip_id(void *id_info) +{ + struct nic_card_id *card_id = (struct nic_card_id *)id_info; + struct card_node *chip_node = NULL; + int i = 0; + int id, err; + + lld_hold(); + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + err = sscanf(chip_node->chip_name, HINIC3_CHIP_NAME "%d", &id); + if (err < 0) { + pr_err("Failed to get hinic3 id\n"); + continue; + } + card_id->id[i] = (u32)id; + i++; + } + lld_put(); + card_id->num = (u32)i; +} + +void hinic3_get_card_func_info_by_card_name(const char *chip_name, + struct hinic3_card_func_info *card_func) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + struct func_pdev_info *pdev_info = NULL; + + card_func->num_pf = 0; + + lld_hold(); + + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic3_func_type(dev->hwdev) == TYPE_VF) + continue; + + pdev_info = &card_func->pdev_info[card_func->num_pf]; + pdev_info->bar1_size = + pci_resource_len(dev->pcidev, + HINIC3_PF_PCI_CFG_REG_BAR); + pdev_info->bar1_phy_addr = + pci_resource_start(dev->pcidev, + HINIC3_PF_PCI_CFG_REG_BAR); + + pdev_info->bar3_size = + pci_resource_len(dev->pcidev, + HINIC3_PCI_MGMT_REG_BAR); + pdev_info->bar3_phy_addr = + pci_resource_start(dev->pcidev, + HINIC3_PCI_MGMT_REG_BAR); + + card_func->num_pf++; + if (card_func->num_pf >= MAX_SIZE) { + lld_put(); + return; + } + } + } + + lld_put(); +} + +static bool is_pcidev_match_chip_name(const char *ifname, struct hinic3_pcidev *dev, + struct card_node *chip_node, enum func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (hinic3_func_type(dev->hwdev) != type) + return false; + return true; + } + + return false; +} + +static struct hinic3_lld_dev *get_dst_type_lld_dev_by_chip_name(const char *ifname, + enum func_type type) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (is_pcidev_match_chip_name(ifname, dev, chip_node, type)) + return &dev->lld_dev; + } + } + + return NULL; +} + +struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_name(const char *chip_name) +{ + struct hinic3_lld_dev *dev = NULL; + + lld_hold(); + + dev = get_dst_type_lld_dev_by_chip_name(chip_name, TYPE_PPF); + if (dev) + goto out; + + dev = get_dst_type_lld_dev_by_chip_name(chip_name, TYPE_PF); + if (dev) + goto out; + + dev = get_dst_type_lld_dev_by_chip_name(chip_name, TYPE_VF); +out: + if (dev) + lld_dev_hold(dev); + lld_put(); + + return dev; +} + +static int get_dynamic_uld_dev_name(struct hinic3_pcidev *dev, enum hinic3_service_type type, + char *ifname) +{ + u32 out_size = IFNAMSIZ; + + if (!g_uld_info[type].ioctl) + return -EFAULT; + + return g_uld_info[type].ioctl(dev->uld_dev[type], GET_ULD_DEV_NAME, + NULL, 0, ifname, &out_size); +} + +static bool is_pcidev_match_dev_name(const char *dev_name, struct hinic3_pcidev *dev, + enum hinic3_service_type type) +{ + enum hinic3_service_type i; + char nic_uld_name[IFNAMSIZ] = {0}; + int err; + + if (type > SERVICE_T_MAX) + return false; + + if (type == SERVICE_T_MAX) { + for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) { + if (!strncmp(dev->uld_dev_name[i], dev_name, IFNAMSIZ)) + return true; + } + } else { + if (!strncmp(dev->uld_dev_name[type], dev_name, IFNAMSIZ)) + return true; + } + + err = get_dynamic_uld_dev_name(dev, SERVICE_T_NIC, (char *)nic_uld_name); + if (err == 0) { + if (!strncmp(nic_uld_name, dev_name, IFNAMSIZ)) + return true; + } + + return false; +} + +static struct hinic3_lld_dev *get_lld_dev_by_dev_name(const char *dev_name, + enum hinic3_service_type type, bool hold) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + + lld_hold(); + + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (is_pcidev_match_dev_name(dev_name, dev, type)) { + if (hold) + lld_dev_hold(&dev->lld_dev); + lld_put(); + return &dev->lld_dev; + } + } + } + + lld_put(); + + return NULL; +} + +struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + + lld_hold(); + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic3_func_type(dev->hwdev) == TYPE_VF) + continue; + + if (hinic3_physical_port_id(dev->hwdev) == port_id && + !strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) { + lld_dev_hold(&dev->lld_dev); + lld_put(); + + return &dev->lld_dev; + } + } + } + lld_put(); + + return NULL; +} + +struct hinic3_lld_dev *hinic3_get_lld_dev_by_dev_name(const char *dev_name, + enum hinic3_service_type type) +{ + return get_lld_dev_by_dev_name(dev_name, type, true); +} +EXPORT_SYMBOL(hinic3_get_lld_dev_by_dev_name); + +struct hinic3_lld_dev *hinic3_get_lld_dev_by_dev_name_unsafe(const char *dev_name, + enum hinic3_service_type type) +{ + return get_lld_dev_by_dev_name(dev_name, type, false); +} +EXPORT_SYMBOL(hinic3_get_lld_dev_by_dev_name_unsafe); + +static void *get_uld_by_lld_dev(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type, + bool hold) +{ + struct hinic3_pcidev *dev = NULL; + void *uld = NULL; + + if (!lld_dev) + return NULL; + + dev = pci_get_drvdata(lld_dev->pdev); + if (!dev) + return NULL; + + spin_lock_bh(&dev->uld_lock); + if (!dev->uld_dev[type] || !test_bit(type, &dev->uld_state)) { + spin_unlock_bh(&dev->uld_lock); + return NULL; + } + uld = dev->uld_dev[type]; + + if (hold) + atomic_inc(&dev->uld_ref_cnt[type]); + spin_unlock_bh(&dev->uld_lock); + + return uld; +} + +void *hinic3_get_uld_dev(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type) +{ + return get_uld_by_lld_dev(lld_dev, type, true); +} +EXPORT_SYMBOL(hinic3_get_uld_dev); + +void *hinic3_get_uld_dev_unsafe(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type) +{ + return get_uld_by_lld_dev(lld_dev, type, false); +} +EXPORT_SYMBOL(hinic3_get_uld_dev_unsafe); + +static struct hinic3_lld_dev *get_ppf_lld_dev(struct hinic3_lld_dev *lld_dev, bool hold) +{ + struct hinic3_pcidev *pci_adapter = NULL; + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + + if (!lld_dev) + return NULL; + + pci_adapter = pci_get_drvdata(lld_dev->pdev); + if (!pci_adapter) + return NULL; + + lld_hold(); + chip_node = pci_adapter->chip_node; + list_for_each_entry(dev, &chip_node->func_list, node) { + if (dev->hwdev && hinic3_func_type(dev->hwdev) == TYPE_PPF) { + if (hold) + lld_dev_hold(&dev->lld_dev); + lld_put(); + return &dev->lld_dev; + } + } + lld_put(); + + return NULL; +} + +struct hinic3_lld_dev *hinic3_get_ppf_lld_dev(struct hinic3_lld_dev *lld_dev) +{ + return get_ppf_lld_dev(lld_dev, true); +} +EXPORT_SYMBOL(hinic3_get_ppf_lld_dev); + +struct hinic3_lld_dev *hinic3_get_ppf_lld_dev_unsafe(struct hinic3_lld_dev *lld_dev) +{ + return get_ppf_lld_dev(lld_dev, false); +} +EXPORT_SYMBOL(hinic3_get_ppf_lld_dev_unsafe); + +int hinic3_get_chip_name(struct hinic3_lld_dev *lld_dev, char *chip_name, u16 max_len) +{ + struct hinic3_pcidev *pci_adapter = NULL; + + if (!lld_dev || !chip_name || !max_len) + return -EINVAL; + + pci_adapter = pci_get_drvdata(lld_dev->pdev); + if (!pci_adapter) + return -EFAULT; + + lld_hold(); + strncpy(chip_name, pci_adapter->chip_node->chip_name, max_len); + chip_name[max_len - 1] = '\0'; + + lld_put(); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_chip_name); + +struct hinic3_hwdev *hinic3_get_sdk_hwdev_by_lld(struct hinic3_lld_dev *lld_dev) +{ + return lld_dev->hwdev; +} + +struct card_node *hinic3_get_chip_node_by_lld(struct hinic3_lld_dev *lld_dev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(lld_dev->pdev); + + return pci_adapter->chip_node; +} + +static struct card_node *hinic3_get_chip_node_by_hwdev(const void *hwdev) +{ + struct card_node *chip_node = NULL; + struct card_node *node_tmp = NULL; + struct hinic3_pcidev *dev = NULL; + + if (!hwdev) + return NULL; + + lld_hold(); + + list_for_each_entry(node_tmp, &g_hinic3_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + lld_put(); + + return chip_node; +} + +static bool is_func_valid(struct hinic3_pcidev *dev) +{ + if (hinic3_func_type(dev->hwdev) == TYPE_VF) + return false; + + return true; +} + +void hinic3_get_card_info(const void *hwdev, void *bufin) +{ + struct card_node *chip_node = NULL; + struct card_info *info = (struct card_info *)bufin; + struct hinic3_pcidev *dev = NULL; + void *fun_hwdev = NULL; + u32 i = 0; + + info->pf_num = 0; + + chip_node = hinic3_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + lld_hold(); + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!is_func_valid(dev)) + continue; + + fun_hwdev = dev->hwdev; + + if (hinic3_support_nic(fun_hwdev, NULL)) { + if (dev->uld_dev[SERVICE_T_NIC]) { + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC); + get_dynamic_uld_dev_name(dev, SERVICE_T_NIC, info->pf[i].name); + } + } + + if (hinic3_support_ppa(fun_hwdev, NULL)) { + if (dev->uld_dev[SERVICE_T_PPA]) { + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_PPA); + get_dynamic_uld_dev_name(dev, SERVICE_T_PPA, info->pf[i].name); + } + } + + if (hinic3_func_for_mgmt(fun_hwdev)) + strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + + strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[i].bus_info)); + info->pf_num++; + i = info->pf_num; + } + + lld_put(); +} + +struct hinic3_sriov_info *hinic3_get_sriov_info_by_pcidev(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = NULL; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + return &pci_adapter->sriov_info; +} + +void *hinic3_get_hwdev_by_pcidev(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = NULL; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + return pci_adapter->hwdev; +} + +bool hinic3_is_in_host(void) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + + lld_hold(); + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic3_func_type(dev->hwdev) != TYPE_VF) { + lld_put(); + return true; + } + } + } + + lld_put(); + + return false; +} + + +static bool chip_node_is_exist(struct hinic3_pcidev *pci_adapter, + unsigned char *bus_number) +{ + struct card_node *chip_node = NULL; + struct pci_dev *pf_pdev = NULL; + + if (!pci_is_root_bus(pci_adapter->pcidev->bus)) + *bus_number = pci_adapter->pcidev->bus->number; + + if (*bus_number != 0) { + if (pci_adapter->pcidev->is_virtfn) { + pf_pdev = pci_adapter->pcidev->physfn; + *bus_number = pf_pdev->bus->number; + } + + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + if (chip_node->bus_num == *bus_number) { + pci_adapter->chip_node = chip_node; + return true; + } + } + } else if (HINIC3_IS_VF_DEV(pci_adapter->pcidev) || + HINIC3_IS_SPU_DEV(pci_adapter->pcidev)) { + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + if (chip_node) { + pci_adapter->chip_node = chip_node; + return true; + } + } + } + + return false; +} + +int alloc_chip_node(struct hinic3_pcidev *pci_adapter) +{ + struct card_node *chip_node = NULL; + unsigned char i; + unsigned char bus_number = 0; + + if (chip_node_is_exist(pci_adapter, &bus_number)) + return 0; + + for (i = 0; i < CARD_MAX_SIZE; i++) { + if (test_and_set_bit(i, &card_bit_map) == 0) + break; + } + + if (i == CARD_MAX_SIZE) { + sdk_err(&pci_adapter->pcidev->dev, "Failed to alloc card id\n"); + return -EFAULT; + } + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) { + clear_bit(i, &card_bit_map); + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc chip node\n"); + return -ENOMEM; + } + + /* bus number */ + chip_node->bus_num = bus_number; + + if (snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", HINIC3_CHIP_NAME, i) < 0) { + clear_bit(i, &card_bit_map); + kfree(chip_node); + return -EINVAL; + } + + sdk_info(&pci_adapter->pcidev->dev, + "Add new chip %s to global list succeed\n", + chip_node->chip_name); + + list_add_tail(&chip_node->node, &g_hinic3_chip_list); + + INIT_LIST_HEAD(&chip_node->func_list); + pci_adapter->chip_node = chip_node; + + return 0; +} + +void free_chip_node(struct hinic3_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + int id, err; + + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&pci_adapter->pcidev->dev, + "Delete chip %s from global list succeed\n", + chip_node->chip_name); + err = sscanf(chip_node->chip_name, HINIC3_CHIP_NAME "%d", &id); + if (err < 0) + sdk_err(&pci_adapter->pcidev->dev, "Failed to get hinic3 id\n"); + + clear_bit(id, &card_bit_map); + + kfree(chip_node); + } +} + +int hinic3_get_pf_id(struct card_node *chip_node, u32 port_id, u32 *pf_id, u32 *isvalid) +{ + struct hinic3_pcidev *dev = NULL; + + lld_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic3_func_type(dev->hwdev) == TYPE_VF) + continue; + + if (hinic3_physical_port_id(dev->hwdev) == port_id) { + *pf_id = hinic3_global_func_id(dev->hwdev); + *isvalid = 1; + break; + } + } + lld_put(); + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h new file mode 100644 index 000000000000..0b7bf8e18732 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_DEV_MGMT_H +#define HINIC3_DEV_MGMT_H +#include <linux/types.h> +#include <linux/bitops.h> + +#include "hinic3_sriov.h" +#include "hinic3_lld.h" + +#define HINIC3_VF_PCI_CFG_REG_BAR 0 +#define HINIC3_PF_PCI_CFG_REG_BAR 1 + +#define HINIC3_PCI_INTR_REG_BAR 2 +#define HINIC3_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */ +#define HINIC3_PCI_DB_BAR 4 + +#define PRINT_ULD_DETACH_TIMEOUT_INTERVAL 1000 /* 1 second */ +#define ULD_LOCK_MIN_USLEEP_TIME 900 +#define ULD_LOCK_MAX_USLEEP_TIME 1000 + +#define HINIC3_IS_VF_DEV(pdev) ((pdev)->device == HINIC3_DEV_ID_VF) +#define HINIC3_IS_SPU_DEV(pdev) ((pdev)->device == HINIC3_DEV_ID_SPU) + +enum { + HINIC3_NOT_PROBE = 1, + HINIC3_PROBE_START = 2, + HINIC3_PROBE_OK = 3, + HINIC3_IN_REMOVE = 4, +}; + +/* Structure pcidev private */ +struct hinic3_pcidev { + struct pci_dev *pcidev; + void *hwdev; + struct card_node *chip_node; + struct hinic3_lld_dev lld_dev; + /* Record the service object address, + * such as hinic3_dev and toe_dev, fc_dev + */ + void *uld_dev[SERVICE_T_MAX]; + /* Record the service object name */ + char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; + /* It is a the global variable for driver to manage + * all function device linked list + */ + struct list_head node; + + bool disable_vf_load; + bool disable_srv_load[SERVICE_T_MAX]; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + void __iomem *mgmt_reg_base; + u64 db_dwqe_len; + u64 db_base_phy; + void __iomem *db_base; + + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + int lld_state; + u32 rsvd1; + + struct hinic3_sriov_info sriov_info; + + /* setted when uld driver processing event */ + unsigned long state; + struct pci_device_id id; + + atomic_t ref_cnt; + + atomic_t uld_ref_cnt[SERVICE_T_MAX]; + unsigned long uld_state; + spinlock_t uld_lock; + + u16 probe_fault_level; + u16 rsvd2; + u64 rsvd4; +}; + +struct hinic_chip_info { + u8 chip_id; /* chip id within card */ + u8 card_type; /* hinic_multi_chip_card_type */ + u8 rsvd[10]; /* reserved 10 bytes */ +}; + +struct list_head *get_hinic3_chip_list(void); + +int alloc_chip_node(struct hinic3_pcidev *pci_adapter); + +void free_chip_node(struct hinic3_pcidev *pci_adapter); + +void lld_lock_chip_node(void); + +void lld_unlock_chip_node(void); + +void hinic3_lld_lock_init(void); + +void lld_dev_cnt_init(struct hinic3_pcidev *pci_adapter); +void wait_lld_dev_unused(struct hinic3_pcidev *pci_adapter); + +void *hinic3_get_hwdev_by_pcidev(struct pci_dev *pdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c new file mode 100644 index 000000000000..1949ab879cbc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/netlink.h> +#include <linux/pci.h> +#include <linux/firmware.h> + +#include "hinic3_devlink.h" +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#include "hinic3_common.h" +#include "hinic3_api_cmd.h" +#include "hinic3_mgmt.h" +#include "hinic3_hw.h" + +static bool check_image_valid(struct hinic3_hwdev *hwdev, const u8 *buf, + u32 size, struct host_image *host_image) +{ + struct firmware_image *fw_image = NULL; + u32 len = 0; + u32 i; + + fw_image = (struct firmware_image *)buf; + if (fw_image->fw_magic != FW_MAGIC_NUM) { + sdk_err(hwdev->dev_hdl, "Wrong fw magic read from file, fw_magic: 0x%x\n", + fw_image->fw_magic); + return false; + } + + if (fw_image->fw_info.section_cnt > FW_TYPE_MAX_NUM) { + sdk_err(hwdev->dev_hdl, "Wrong fw type number read from file, fw_type_num: 0x%x\n", + fw_image->fw_info.section_cnt); + return false; + } + + for (i = 0; i < fw_image->fw_info.section_cnt; i++) { + len += fw_image->section_info[i].section_len; + memcpy(&host_image->section_info[i], &fw_image->section_info[i], + sizeof(struct firmware_section)); + } + + if (len != fw_image->fw_len || + (u32)(fw_image->fw_len + FW_IMAGE_HEAD_SIZE) != size) { + sdk_err(hwdev->dev_hdl, "Wrong data size read from file\n"); + return false; + } + + host_image->image_info.total_len = fw_image->fw_len; + host_image->image_info.fw_version = fw_image->fw_version; + host_image->type_num = fw_image->fw_info.section_cnt; + host_image->device_id = fw_image->device_id; + + return true; +} + +static bool check_image_integrity(struct hinic3_hwdev *hwdev, struct host_image *host_image) +{ + u64 collect_section_type = 0; + u32 type, i; + + for (i = 0; i < host_image->type_num; i++) { + type = host_image->section_info[i].section_type; + if (collect_section_type & (1ULL << type)) { + sdk_err(hwdev->dev_hdl, "Duplicate section type: %u\n", type); + return false; + } + collect_section_type |= (1ULL << type); + } + + if ((collect_section_type & IMAGE_COLD_SUB_MODULES_MUST_IN) == + IMAGE_COLD_SUB_MODULES_MUST_IN && + (collect_section_type & IMAGE_CFG_SUB_MODULES_MUST_IN) != 0) + return true; + + sdk_err(hwdev->dev_hdl, "Failed to check file integrity, valid: 0x%llx, current: 0x%llx\n", + (IMAGE_COLD_SUB_MODULES_MUST_IN | IMAGE_CFG_SUB_MODULES_MUST_IN), + collect_section_type); + + return false; +} + +static bool check_image_device_type(struct hinic3_hwdev *hwdev, u32 device_type) +{ + struct comm_cmd_board_info board_info; + + memset(&board_info, 0, sizeof(board_info)); + if (hinic3_get_board_info(hwdev, &board_info.info, HINIC3_CHANNEL_COMM)) { + sdk_err(hwdev->dev_hdl, "Failed to get board info\n"); + return false; + } + + if (device_type == board_info.info.board_type) + return true; + + sdk_err(hwdev->dev_hdl, "The image device type: 0x%x doesn't match the firmware device type: 0x%x\n", + device_type, board_info.info.board_type); + + return false; +} + +static void encapsulate_update_cmd(struct hinic3_cmd_update_firmware *msg, + struct firmware_section *section_info, + int *remain_len, u32 *send_len, u32 *send_pos) +{ + memset(msg->data, 0, sizeof(msg->data)); + msg->ctl_info.sf = (*remain_len == section_info->section_len) ? true : false; + msg->section_info.section_crc = section_info->section_crc; + msg->section_info.section_type = section_info->section_type; + msg->section_version = section_info->section_version; + msg->section_len = section_info->section_len; + msg->section_offset = *send_pos; + msg->ctl_info.bit_signed = section_info->section_flag & 0x1; + + if (*remain_len <= FW_FRAGMENT_MAX_LEN) { + msg->ctl_info.sl = true; + msg->ctl_info.fragment_len = (u32)(*remain_len); + *send_len += section_info->section_len; + } else { + msg->ctl_info.sl = false; + msg->ctl_info.fragment_len = FW_FRAGMENT_MAX_LEN; + *send_len += FW_FRAGMENT_MAX_LEN; + } +} + +static int hinic3_flash_firmware(struct hinic3_hwdev *hwdev, const u8 *data, + struct host_image *image) +{ + u32 send_pos, send_len, section_offset, i; + struct hinic3_cmd_update_firmware *update_msg = NULL; + u16 out_size = sizeof(*update_msg); + bool total_flag = false; + int remain_len, err; + + update_msg = kzalloc(sizeof(*update_msg), GFP_KERNEL); + if (!update_msg) { + sdk_err(hwdev->dev_hdl, "Failed to alloc update message\n"); + return -ENOMEM; + } + + for (i = 0; i < image->type_num; i++) { + section_offset = image->section_info[i].section_offset; + remain_len = (int)(image->section_info[i].section_len); + send_len = 0; + send_pos = 0; + + while (remain_len > 0) { + if (!total_flag) { + update_msg->total_len = image->image_info.total_len; + total_flag = true; + } else { + update_msg->total_len = 0; + } + + encapsulate_update_cmd(update_msg, &image->section_info[i], + &remain_len, &send_len, &send_pos); + + memcpy(update_msg->data, + ((data + FW_IMAGE_HEAD_SIZE) + section_offset) + send_pos, + update_msg->ctl_info.fragment_len); + + err = hinic3_pf_to_mgmt_sync(hwdev, HINIC3_MOD_COMM, + COMM_MGMT_CMD_UPDATE_FW, + update_msg, sizeof(*update_msg), + update_msg, &out_size, + FW_UPDATE_MGMT_TIMEOUT); + if (err || !out_size || update_msg->msg_head.status) { + sdk_err(hwdev->dev_hdl, "Failed to update firmware, err: %d, status: 0x%x, out size: 0x%x\n", + err, update_msg->msg_head.status, out_size); + err = update_msg->msg_head.status ? + update_msg->msg_head.status : -EIO; + kfree(update_msg); + return err; + } + + send_pos = send_len; + remain_len = (int)(image->section_info[i].section_len - send_len); + } + } + + kfree(update_msg); + + return 0; +} + +static int hinic3_flash_update_notify(struct devlink *devlink, const struct firmware *fw, + struct host_image *image, struct netlink_ext_ack *extack) +{ + struct hinic3_devlink *devlink_dev = devlink_priv(devlink); + struct hinic3_hwdev *hwdev = devlink_dev->hwdev; + int err; + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + devlink_flash_update_begin_notify(devlink); +#endif + devlink_flash_update_status_notify(devlink, "Flash firmware begin", NULL, 0, 0); + sdk_info(hwdev->dev_hdl, "Flash firmware begin\n"); + err = hinic3_flash_firmware(hwdev, fw->data, image); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to flash firmware, err: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, "Flash firmware failed"); + devlink_flash_update_status_notify(devlink, "Flash firmware failed", NULL, 0, 0); + } else { + sdk_info(hwdev->dev_hdl, "Flash firmware end\n"); + devlink_flash_update_status_notify(devlink, "Flash firmware end", NULL, 0, 0); + } +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + devlink_flash_update_end_notify(devlink); +#endif + + return err; +} + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM +static int hinic3_devlink_flash_update(struct devlink *devlink, const char *file_name, + const char *component, struct netlink_ext_ack *extack) +#else +static int hinic3_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +#endif +{ + struct hinic3_devlink *devlink_dev = devlink_priv(devlink); + struct hinic3_hwdev *hwdev = devlink_dev->hwdev; +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + const struct firmware *fw = NULL; +#else + const struct firmware *fw = params->fw; +#endif + struct host_image *image = NULL; + int err; + + image = kzalloc(sizeof(*image), GFP_KERNEL); + if (!image) { + sdk_err(hwdev->dev_hdl, "Failed to alloc host image\n"); + err = -ENOMEM; + goto devlink_param_reset; + } + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM + err = request_firmware_direct(&fw, file_name, hwdev->dev_hdl); +#else + err = request_firmware_direct(&fw, params->file_name, hwdev->dev_hdl); +#endif + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to request firmware\n"); + goto devlink_request_fw_err; + } +#endif + + if (!check_image_valid(hwdev, fw->data, (u32)(fw->size), image) || + !check_image_integrity(hwdev, image) || + !check_image_device_type(hwdev, image->device_id)) { + sdk_err(hwdev->dev_hdl, "Failed to check image\n"); + NL_SET_ERR_MSG_MOD(extack, "Check image failed"); + err = -EINVAL; + goto devlink_update_out; + } + + err = hinic3_flash_update_notify(devlink, fw, image, extack); + +devlink_update_out: +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + release_firmware(fw); + +devlink_request_fw_err: +#endif + kfree(image); + +devlink_param_reset: + /* reset activate_fw and switch_cfg after flash update operation */ + devlink_dev->activate_fw = FW_CFG_DEFAULT_INDEX; + devlink_dev->switch_cfg = FW_CFG_DEFAULT_INDEX; + + return err; +} + +static const struct devlink_ops hinic3_devlink_ops = { + .flash_update = hinic3_devlink_flash_update, +}; + +static int hinic3_devlink_get_activate_firmware_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hinic3_devlink *devlink_dev = devlink_priv(devlink); + + ctx->val.vu8 = devlink_dev->activate_fw; + + return 0; +} + +static int hinic3_devlink_set_activate_firmware_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hinic3_devlink *devlink_dev = devlink_priv(devlink); + struct hinic3_hwdev *hwdev = devlink_dev->hwdev; + int err; + + devlink_dev->activate_fw = ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Activate firmware begin\n"); + + err = hinic3_activate_firmware(hwdev, devlink_dev->activate_fw); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to activate firmware, err: %d\n", err); + return err; + } + + sdk_info(hwdev->dev_hdl, "Activate firmware end\n"); + + return 0; +} + +static int hinic3_devlink_get_switch_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hinic3_devlink *devlink_dev = devlink_priv(devlink); + + ctx->val.vu8 = devlink_dev->switch_cfg; + + return 0; +} + +static int hinic3_devlink_set_switch_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hinic3_devlink *devlink_dev = devlink_priv(devlink); + struct hinic3_hwdev *hwdev = devlink_dev->hwdev; + int err; + + devlink_dev->switch_cfg = ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Switch cfg begin"); + + err = hinic3_switch_config(hwdev, devlink_dev->switch_cfg); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to switch cfg, err: %d\n", err); + return err; + } + + sdk_info(hwdev->dev_hdl, "Switch cfg end\n"); + + return 0; +} + +static int hinic3_devlink_firmware_config_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct hinic3_devlink *devlink_dev = devlink_priv(devlink); + struct hinic3_hwdev *hwdev = devlink_dev->hwdev; + u8 cfg_index = val.vu8; + + if (cfg_index > FW_CFG_MAX_INDEX) { + sdk_err(hwdev->dev_hdl, "Firmware cfg index out of range [0,7]\n"); + NL_SET_ERR_MSG_MOD(extack, "Firmware cfg index out of range [0,7]"); + return -ERANGE; + } + + return 0; +} + +static const struct devlink_param hinic3_devlink_params[] = { + DEVLINK_PARAM_DRIVER(HINIC3_DEVLINK_PARAM_ID_ACTIVATE_FW, + "activate_fw", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + hinic3_devlink_get_activate_firmware_config, + hinic3_devlink_set_activate_firmware_config, + hinic3_devlink_firmware_config_validate), + DEVLINK_PARAM_DRIVER(HINIC3_DEVLINK_PARAM_ID_SWITCH_CFG, + "switch_cfg", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + hinic3_devlink_get_switch_config, + hinic3_devlink_set_switch_config, + hinic3_devlink_firmware_config_validate), +}; + +int hinic3_init_devlink(struct hinic3_hwdev *hwdev) +{ + struct devlink *devlink = NULL; + struct pci_dev *pdev = NULL; + int err; + + devlink = devlink_alloc(&hinic3_devlink_ops, sizeof(struct hinic3_devlink)); + if (!devlink) { + sdk_err(hwdev->dev_hdl, "Failed to alloc devlink\n"); + return -ENOMEM; + } + + hwdev->devlink_dev = devlink_priv(devlink); + hwdev->devlink_dev->hwdev = hwdev; + hwdev->devlink_dev->activate_fw = FW_CFG_DEFAULT_INDEX; + hwdev->devlink_dev->switch_cfg = FW_CFG_DEFAULT_INDEX; + + pdev = hwdev->hwif->pdev; + err = devlink_register(devlink, &pdev->dev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to register devlink\n"); + goto register_devlink_err; + } + + err = devlink_params_register(devlink, hinic3_devlink_params, + ARRAY_SIZE(hinic3_devlink_params)); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to register devlink params\n"); + goto register_devlink_params_err; + } + + devlink_params_publish(devlink); + + return 0; + +register_devlink_params_err: + devlink_unregister(devlink); + +register_devlink_err: + devlink_free(devlink); + + return -EFAULT; +} + +void hinic3_uninit_devlink(struct hinic3_hwdev *hwdev) +{ + struct devlink *devlink = priv_to_devlink(hwdev->devlink_dev); + + devlink_params_unpublish(devlink); + devlink_params_unregister(devlink, hinic3_devlink_params, + ARRAY_SIZE(hinic3_devlink_params)); + devlink_unregister(devlink); + devlink_free(devlink); +} +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.h new file mode 100644 index 000000000000..0b5a086358b9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_DEVLINK_H +#define HINIC3_DEVLINK_H + +#include "ossl_knl.h" +#include "hinic3_hwdev.h" + +#define FW_MAGIC_NUM 0x5a5a1100 +#define FW_IMAGE_HEAD_SIZE 4096 +#define FW_FRAGMENT_MAX_LEN 1536 +#define FW_CFG_DEFAULT_INDEX 0xFF +#define FW_TYPE_MAX_NUM 0x40 +#define FW_CFG_MAX_INDEX 7 + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +enum hinic3_devlink_param_id { + HINIC3_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + HINIC3_DEVLINK_PARAM_ID_ACTIVATE_FW, + HINIC3_DEVLINK_PARAM_ID_SWITCH_CFG, +}; +#endif + +enum hinic3_firmware_type { + UP_FW_UPDATE_MIN_TYPE1 = 0x0, + UP_FW_UPDATE_UP_TEXT = 0x0, + UP_FW_UPDATE_UP_DATA = 0x1, + UP_FW_UPDATE_UP_DICT = 0x2, + UP_FW_UPDATE_TILE_PCPTR = 0x3, + UP_FW_UPDATE_TILE_TEXT = 0x4, + UP_FW_UPDATE_TILE_DATA = 0x5, + UP_FW_UPDATE_TILE_DICT = 0x6, + UP_FW_UPDATE_PPE_STATE = 0x7, + UP_FW_UPDATE_PPE_BRANCH = 0x8, + UP_FW_UPDATE_PPE_EXTACT = 0x9, + UP_FW_UPDATE_MAX_TYPE1 = 0x9, + UP_FW_UPDATE_CFG0 = 0xa, + UP_FW_UPDATE_CFG1 = 0xb, + UP_FW_UPDATE_CFG2 = 0xc, + UP_FW_UPDATE_CFG3 = 0xd, + UP_FW_UPDATE_MAX_TYPE1_CFG = 0xd, + + UP_FW_UPDATE_MIN_TYPE2 = 0x14, + UP_FW_UPDATE_MAX_TYPE2 = 0x14, + + UP_FW_UPDATE_MIN_TYPE3 = 0x18, + UP_FW_UPDATE_PHY = 0x18, + UP_FW_UPDATE_BIOS = 0x19, + UP_FW_UPDATE_HLINK_ONE = 0x1a, + UP_FW_UPDATE_HLINK_TWO = 0x1b, + UP_FW_UPDATE_HLINK_THR = 0x1c, + UP_FW_UPDATE_MAX_TYPE3 = 0x1c, + + UP_FW_UPDATE_MIN_TYPE4 = 0x20, + UP_FW_UPDATE_L0FW = 0x20, + UP_FW_UPDATE_L1FW = 0x21, + UP_FW_UPDATE_BOOT = 0x22, + UP_FW_UPDATE_SEC_DICT = 0x23, + UP_FW_UPDATE_HOT_PATCH0 = 0x24, + UP_FW_UPDATE_HOT_PATCH1 = 0x25, + UP_FW_UPDATE_HOT_PATCH2 = 0x26, + UP_FW_UPDATE_HOT_PATCH3 = 0x27, + UP_FW_UPDATE_HOT_PATCH4 = 0x28, + UP_FW_UPDATE_HOT_PATCH5 = 0x29, + UP_FW_UPDATE_HOT_PATCH6 = 0x2a, + UP_FW_UPDATE_HOT_PATCH7 = 0x2b, + UP_FW_UPDATE_HOT_PATCH8 = 0x2c, + UP_FW_UPDATE_HOT_PATCH9 = 0x2d, + UP_FW_UPDATE_HOT_PATCH10 = 0x2e, + UP_FW_UPDATE_HOT_PATCH11 = 0x2f, + UP_FW_UPDATE_HOT_PATCH12 = 0x30, + UP_FW_UPDATE_HOT_PATCH13 = 0x31, + UP_FW_UPDATE_HOT_PATCH14 = 0x32, + UP_FW_UPDATE_HOT_PATCH15 = 0x33, + UP_FW_UPDATE_HOT_PATCH16 = 0x34, + UP_FW_UPDATE_HOT_PATCH17 = 0x35, + UP_FW_UPDATE_HOT_PATCH18 = 0x36, + UP_FW_UPDATE_HOT_PATCH19 = 0x37, + UP_FW_UPDATE_MAX_TYPE4 = 0x37, + + UP_FW_UPDATE_MIN_TYPE5 = 0x3a, + UP_FW_UPDATE_OPTION_ROM = 0x3a, + UP_FW_UPDATE_MAX_TYPE5 = 0x3a, + + UP_FW_UPDATE_MIN_TYPE6 = 0x3e, + UP_FW_UPDATE_MAX_TYPE6 = 0x3e, + + UP_FW_UPDATE_MIN_TYPE7 = 0x40, + UP_FW_UPDATE_MAX_TYPE7 = 0x40, +}; + +#define IMAGE_MPU_ALL_IN (BIT_ULL(UP_FW_UPDATE_UP_TEXT) | \ + BIT_ULL(UP_FW_UPDATE_UP_DATA) | \ + BIT_ULL(UP_FW_UPDATE_UP_DICT)) + +#define IMAGE_NPU_ALL_IN (BIT_ULL(UP_FW_UPDATE_TILE_PCPTR) | \ + BIT_ULL(UP_FW_UPDATE_TILE_TEXT) | \ + BIT_ULL(UP_FW_UPDATE_TILE_DATA) | \ + BIT_ULL(UP_FW_UPDATE_TILE_DICT) | \ + BIT_ULL(UP_FW_UPDATE_PPE_STATE) | \ + BIT_ULL(UP_FW_UPDATE_PPE_BRANCH) | \ + BIT_ULL(UP_FW_UPDATE_PPE_EXTACT)) + +#define IMAGE_COLD_SUB_MODULES_MUST_IN (IMAGE_MPU_ALL_IN | IMAGE_NPU_ALL_IN) + +#define IMAGE_CFG_SUB_MODULES_MUST_IN (BIT_ULL(UP_FW_UPDATE_CFG0) | \ + BIT_ULL(UP_FW_UPDATE_CFG1) | \ + BIT_ULL(UP_FW_UPDATE_CFG2) | \ + BIT_ULL(UP_FW_UPDATE_CFG3)) + +struct firmware_section { + u32 section_len; + u32 section_offset; + u32 section_version; + u32 section_type; + u32 section_crc; + u32 section_flag; +}; + +struct firmware_image { + u32 fw_version; + u32 fw_len; + u32 fw_magic; + struct { + u32 section_cnt : 16; + u32 rsvd : 16; + } fw_info; + struct firmware_section section_info[FW_TYPE_MAX_NUM]; + u32 device_id; /* cfg fw board_type value */ + u32 rsvd0[101]; /* device_id and rsvd0[101] is update_head_extend_info */ + u32 rsvd1[534]; /* big bin file total size 4096B */ + u32 bin_data; /* obtain the address for use */ +}; + +struct host_image { + struct firmware_section section_info[FW_TYPE_MAX_NUM]; + struct { + u32 total_len; + u32 fw_version; + } image_info; + u32 type_num; + u32 device_id; +}; + +int hinic3_init_devlink(struct hinic3_hwdev *hwdev); +void hinic3_uninit_devlink(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c new file mode 100644 index 000000000000..7231e84bc692 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c @@ -0,0 +1,1385 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/pci.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_hw.h" +#include "hinic3_csr.h" +#include "hinic3_hw_comm.h" +#include "hinic3_prof_adap.h" +#include "hinic3_eqs.h" + +#define HINIC3_EQS_WQ_NAME "hinic3_eqs" + +#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x7U +#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define AEQ_CTRL_0_SET(val, member) \ + (((val) & AEQ_CTRL_0_##member##_MASK) << \ + AEQ_CTRL_0_##member##_SHIFT) + +#define AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_0_##member##_MASK << \ + AEQ_CTRL_0_##member##_SHIFT))) + +#define AEQ_CTRL_1_LEN_SHIFT 0 +#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define AEQ_CTRL_1_SET(val, member) \ + (((val) & AEQ_CTRL_1_##member##_MASK) << \ + AEQ_CTRL_1_##member##_SHIFT) + +#define AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_1_##member##_MASK << \ + AEQ_CTRL_1_##member##_SHIFT))) + +#define HINIC3_EQ_PROD_IDX_MASK 0xFFFFF +#define HINIC3_TASK_PROCESS_EQE_LIMIT 1024 +#define HINIC3_EQ_UPDATE_CI_STEP 64 + +/*lint -e806*/ +static uint g_aeq_len = HINIC3_DEFAULT_AEQ_LEN; +module_param(g_aeq_len, uint, 0444); +MODULE_PARM_DESC(g_aeq_len, + "aeq depth, valid range is " __stringify(HINIC3_MIN_AEQ_LEN) + " - " __stringify(HINIC3_MAX_AEQ_LEN)); + +static uint g_ceq_len = HINIC3_DEFAULT_CEQ_LEN; +module_param(g_ceq_len, uint, 0444); +MODULE_PARM_DESC(g_ceq_len, + "ceq depth, valid range is " __stringify(HINIC3_MIN_CEQ_LEN) + " - " __stringify(HINIC3_MAX_CEQ_LEN)); + +static uint g_num_ceqe_in_tasklet = HINIC3_TASK_PROCESS_EQE_LIMIT; +module_param(g_num_ceqe_in_tasklet, uint, 0444); +MODULE_PARM_DESC(g_num_ceqe_in_tasklet, + "The max number of ceqe can be processed in tasklet, default = 1024"); +/*lint +e806*/ + +#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define CEQ_CTRL_0_PAGE_SIZE_SHIFT 27 +#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define CEQ_CTRL_0_PAGE_SIZE_MASK 0xF +#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define CEQ_CTRL_0_SET(val, member) \ + (((val) & CEQ_CTRL_0_##member##_MASK) << \ + CEQ_CTRL_0_##member##_SHIFT) + +#define CEQ_CTRL_1_LEN_SHIFT 0 +#define CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20 + +#define CEQ_CTRL_1_LEN_MASK 0xFFFFFU +#define CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU + +#define CEQ_CTRL_1_SET(val, member) \ + (((val) & CEQ_CTRL_1_##member##_MASK) << \ + CEQ_CTRL_1_##member##_SHIFT) + +#define EQ_ELEM_DESC_TYPE_SHIFT 0 +#define EQ_ELEM_DESC_SRC_SHIFT 7 +#define EQ_ELEM_DESC_SIZE_SHIFT 8 +#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define EQ_ELEM_DESC_SRC_MASK 0x1U +#define EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define EQ_ELEM_DESC_GET(val, member) \ + (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ + EQ_ELEM_DESC_##member##_MASK) + +#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 +#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 + +#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU +#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U + +#define EQ_CONS_IDX_SET(val, member) \ + (((val) & EQ_CONS_IDX_##member##_MASK) << \ + EQ_CONS_IDX_##member##_SHIFT) + +#define EQ_CONS_IDX_CLEAR(val, member) \ + ((val) & (~(EQ_CONS_IDX_##member##_MASK << \ + EQ_CONS_IDX_##member##_SHIFT))) + +#define EQ_CI_SIMPLE_INDIR_CI_SHIFT 0 +#define EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21 +#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_SHIFT 30 +#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_SHIFT 24 + +#define EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU +#define EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U +#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK 0x3U +#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK 0xFFU + +#define EQ_CI_SIMPLE_INDIR_SET(val, member) \ + (((val) & EQ_CI_SIMPLE_INDIR_##member##_MASK) << \ + EQ_CI_SIMPLE_INDIR_##member##_SHIFT) + +#define EQ_CI_SIMPLE_INDIR_CLEAR(val, member) \ + ((val) & (~(EQ_CI_SIMPLE_INDIR_##member##_MASK << \ + EQ_CI_SIMPLE_INDIR_##member##_SHIFT))) + +#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) + +#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ + ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) + +#define EQ_CONS_IDX_REG_ADDR(eq) \ + (((eq)->type == HINIC3_AEQ) ? \ + HINIC3_CSR_AEQ_CONS_IDX_ADDR : \ + HINIC3_CSR_CEQ_CONS_IDX_ADDR) +#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ + (((eq)->type == HINIC3_AEQ) ? \ + HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ + HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) + +#define EQ_PROD_IDX_REG_ADDR(eq) \ + (((eq)->type == HINIC3_AEQ) ? \ + HINIC3_CSR_AEQ_PROD_IDX_ADDR : \ + HINIC3_CSR_CEQ_PROD_IDX_ADDR) + +#define HINIC3_EQ_HI_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == HINIC3_AEQ) ? \ + HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) : \ + HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num))) + +#define HINIC3_EQ_LO_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == HINIC3_AEQ) ? \ + HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) : \ + HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num))) + +#define GET_EQ_NUM_PAGES(eq, size) \ + ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ + (size)) / (size))) + +#define HINIC3_EQ_MAX_PAGES(eq) \ + ((eq)->type == HINIC3_AEQ ? HINIC3_AEQ_MAX_PAGES : \ + HINIC3_CEQ_MAX_PAGES) + +#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) + +#define GET_EQ_ELEMENT(eq, idx) \ + (((u8 *)(eq)->eq_pages[(idx) / (eq)->num_elem_in_pg].align_vaddr) + \ + (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) \ + ((struct hinic3_aeq_elem *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) + +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ + ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define AEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_DMA_ATTR_DEFAULT 0 + +#define CEQ_LMT_KICK_DEFAULT 0 + +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define EQ_WRAPPED_SHIFT 20 + +#define EQ_VALID_SHIFT 31 + +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) + +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) + +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic3_aeqs, aeq[0]) + +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic3_ceqs, ceq[0]) + +static irqreturn_t ceq_interrupt(int irq, void *data); +static irqreturn_t aeq_interrupt(int irq, void *data); + +static void ceq_tasklet(ulong ceq_data); + +/** + * hinic3_aeq_register_hw_cb - register aeq callback for specific event + * @hwdev: the pointer to hw device + * @pri_handle: the pointer to private invoker device + * @event: event for the handler + * @hw_cb: callback function + **/ +int hinic3_aeq_register_hw_cb(void *hwdev, void *pri_handle, enum hinic3_aeq_type event, + hinic3_aeq_hwe_cb hwe_cb) +{ + struct hinic3_aeqs *aeqs = NULL; + + if (!hwdev || !hwe_cb || event >= HINIC3_MAX_AEQ_EVENTS) + return -EINVAL; + + aeqs = ((struct hinic3_hwdev *)hwdev)->aeqs; + + aeqs->aeq_hwe_cb[event] = hwe_cb; + aeqs->aeq_hwe_cb_data[event] = pri_handle; + + set_bit(HINIC3_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic3_aeq_register_hw_cb); + +/** + * hinic3_aeq_unregister_hw_cb - unregister the aeq callback for specific event + * @hwdev: the pointer to hw device + * @event: event for the handler + **/ +void hinic3_aeq_unregister_hw_cb(void *hwdev, enum hinic3_aeq_type event) +{ + struct hinic3_aeqs *aeqs = NULL; + + if (!hwdev || event >= HINIC3_MAX_AEQ_EVENTS) + return; + + aeqs = ((struct hinic3_hwdev *)hwdev)->aeqs; + + clear_bit(HINIC3_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + while (test_bit(HINIC3_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + aeqs->aeq_hwe_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic3_aeq_unregister_hw_cb); + +/** + * hinic3_aeq_register_swe_cb - register aeq callback for sw event + * @hwdev: the pointer to hw device + * @pri_handle: the pointer to private invoker device + * @event: soft event for the handler + * @sw_cb: callback function + **/ +int hinic3_aeq_register_swe_cb(void *hwdev, void *pri_handle, enum hinic3_aeq_sw_type event, + hinic3_aeq_swe_cb aeq_swe_cb) +{ + struct hinic3_aeqs *aeqs = NULL; + + if (!hwdev || !aeq_swe_cb || event >= HINIC3_MAX_AEQ_SW_EVENTS) + return -EINVAL; + + aeqs = ((struct hinic3_hwdev *)hwdev)->aeqs; + + aeqs->aeq_swe_cb[event] = aeq_swe_cb; + aeqs->aeq_swe_cb_data[event] = pri_handle; + + set_bit(HINIC3_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic3_aeq_register_swe_cb); + +/** + * hinic3_aeq_unregister_swe_cb - unregister the aeq callback for sw event + * @hwdev: the pointer to hw device + * @event: soft event for the handler + **/ +void hinic3_aeq_unregister_swe_cb(void *hwdev, enum hinic3_aeq_sw_type event) +{ + struct hinic3_aeqs *aeqs = NULL; + + if (!hwdev || event >= HINIC3_MAX_AEQ_SW_EVENTS) + return; + + aeqs = ((struct hinic3_hwdev *)hwdev)->aeqs; + + clear_bit(HINIC3_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + while (test_bit(HINIC3_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + aeqs->aeq_swe_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic3_aeq_unregister_swe_cb); + +/** + * hinic3_ceq_register_cb - register ceq callback for specific event + * @hwdev: the pointer to hw device + * @pri_handle: the pointer to private invoker device + * @event: event for the handler + * @ceq_cb: callback function + **/ +int hinic3_ceq_register_cb(void *hwdev, void *pri_handle, enum hinic3_ceq_event event, + hinic3_ceq_event_cb callback) +{ + struct hinic3_ceqs *ceqs = NULL; + + + if (!hwdev || event >= HINIC3_MAX_CEQ_EVENTS) + return -EINVAL; + + ceqs = ((struct hinic3_hwdev *)hwdev)->ceqs; + + ceqs->ceq_cb[event] = callback; + ceqs->ceq_cb_data[event] = pri_handle; + + set_bit(HINIC3_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic3_ceq_register_cb); + +/** + * hinic3_ceq_unregister_cb - unregister ceq callback for specific event + * @hwdev: the pointer to hw device + * @event: event for the handler + **/ +void hinic3_ceq_unregister_cb(void *hwdev, enum hinic3_ceq_event event) +{ + struct hinic3_ceqs *ceqs = NULL; + + + if (!hwdev || event >= HINIC3_MAX_CEQ_EVENTS) + return; + + ceqs = ((struct hinic3_hwdev *)hwdev)->ceqs; + + clear_bit(HINIC3_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + while (test_bit(HINIC3_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + ceqs->ceq_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic3_ceq_unregister_cb); + +/** + * set_eq_cons_idx - write the cons idx to the hw + * @eq: The event queue to update the cons idx for + * @cons idx: consumer index value + **/ +static void set_eq_cons_idx(struct hinic3_eq *eq, u32 arm_state) +{ + u32 eq_wrap_ci, val; + u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq); + + eq_wrap_ci = EQ_CONS_IDX(eq); + + /* if use poll mode only eq0 use int_arm mode */ + if (eq->q_id != 0 && eq->hwdev->poll) + val = EQ_CI_SIMPLE_INDIR_SET(HINIC3_EQ_NOT_ARMED, ARMED); + else + val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED); + if (eq->type == HINIC3_AEQ) { + val = val | + EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | + EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX); + } else { + val = val | + EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | + EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX); + } + + hinic3_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +/** + * ceq_event_handler - handle for the ceq events + * @ceqs: ceqs part of the chip + * @ceqe: ceq element of the event + **/ +static void ceq_event_handler(struct hinic3_ceqs *ceqs, u32 ceqe) +{ + struct hinic3_hwdev *hwdev = ceqs->hwdev; + enum hinic3_ceq_event event = CEQE_TYPE(ceqe); + u32 ceqe_data = CEQE_DATA(ceqe); + + if (event >= HINIC3_MAX_CEQ_EVENTS) { + sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n", + event, ceqe_data); + return; + } + + set_bit(HINIC3_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); + + if (ceqs->ceq_cb[event] && + test_bit(HINIC3_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) + ceqs->ceq_cb[event](ceqs->ceq_cb_data[event], ceqe_data); + + clear_bit(HINIC3_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); +} + +static void aeq_elem_handler(struct hinic3_eq *eq, u32 aeqe_desc) +{ + struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq); + struct hinic3_aeq_elem *aeqe_pos; + enum hinic3_aeq_type event; + enum hinic3_aeq_sw_type sw_type; + u32 sw_event; + u8 data[HINIC3_AEQE_DATA_SIZE], size; + + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + eq->hwdev->cur_recv_aeq_cnt++; + + event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { + sw_event = event; + sw_type = sw_event >= HINIC3_NIC_FATAL_ERROR_MAX ? + HINIC3_STATEFUL_EVENT : HINIC3_STATELESS_EVENT; + /* SW event uses only the first 8B */ + memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE); + hinic3_be32_to_cpu(data, HINIC3_AEQE_DATA_SIZE); + set_bit(HINIC3_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_type]); + if (aeqs->aeq_swe_cb[sw_type] && + test_bit(HINIC3_AEQ_SW_CB_REG, + &aeqs->aeq_sw_cb_state[sw_type])) + aeqs->aeq_swe_cb[sw_type](aeqs->aeq_swe_cb_data[sw_type], event, data); + + clear_bit(HINIC3_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_type]); + return; + } + + if (event < HINIC3_MAX_AEQ_EVENTS) { + memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE); + hinic3_be32_to_cpu(data, HINIC3_AEQE_DATA_SIZE); + + size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + set_bit(HINIC3_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + if (aeqs->aeq_hwe_cb[event] && + test_bit(HINIC3_AEQ_HW_CB_REG, + &aeqs->aeq_hw_cb_state[event])) + aeqs->aeq_hwe_cb[event](aeqs->aeq_hwe_cb_data[event], data, size); + clear_bit(HINIC3_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + return; + } + sdk_warn(eq->hwdev->dev_hdl, "Unknown aeq hw event %d\n", event); +} + +/** + * aeq_irq_handler - handler for the aeq event + * @eq: the async event queue of the event + **/ +static bool aeq_irq_handler(struct hinic3_eq *eq) +{ + struct hinic3_aeq_elem *aeqe_pos = NULL; + u32 aeqe_desc; + u32 i, eqe_cnt = 0; + + for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) { + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_pos->desc); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + return false; + + dma_rmb(); + + aeq_elem_handler(eq, aeqe_desc); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * ceq_irq_handler - handler for the ceq event + * @eq: the completion event queue of the event + **/ +static bool ceq_irq_handler(struct hinic3_eq *eq) +{ + struct hinic3_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe, eqe_cnt = 0; + u32 i; + + for (i = 0; i < g_num_ceqe_in_tasklet; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + ceqe = be32_to_cpu(ceqe); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + return false; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED); + } + } + + return true; +} + +static void reschedule_eq_handler(struct hinic3_eq *eq) +{ + if (eq->type == HINIC3_AEQ) { + struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq); + + queue_work_on(hisdk3_get_work_cpu_affinity(eq->hwdev, WORK_TYPE_AEQ), + aeqs->workq, &eq->aeq_work); + } else { + tasklet_schedule(&eq->ceq_tasklet); + } +} + +/** + * eq_irq_handler - handler for the eq event + * @data: the event queue of the event + **/ +static bool eq_irq_handler(void *data) +{ + struct hinic3_eq *eq = (struct hinic3_eq *)data; + bool uncompleted = false; + + if (eq->type == HINIC3_AEQ) + uncompleted = aeq_irq_handler(eq); + else + uncompleted = ceq_irq_handler(eq); + + set_eq_cons_idx(eq, uncompleted ? HINIC3_EQ_NOT_ARMED : + HINIC3_EQ_ARMED); + + return uncompleted; +} + + +/** + * eq_irq_work - eq work for the event + * @work: the work that is associated with the eq + **/ +static void eq_irq_work(struct work_struct *work) +{ + struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work); + + if (eq_irq_handler(eq)) + reschedule_eq_handler(eq); +} + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the async event queue of the event + **/ +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct hinic3_eq *aeq = (struct hinic3_eq *)data; + struct hinic3_hwdev *hwdev = aeq->hwdev; + struct hinic3_aeqs *aeqs = aeq_to_aeqs(aeq); + struct workqueue_struct *workq = aeqs->workq; + + /* clear resend timer cnt register */ + hinic3_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + queue_work_on(hisdk3_get_work_cpu_affinity(hwdev, WORK_TYPE_AEQ), + workq, &aeq->aeq_work); + return IRQ_HANDLED; +} + +/** + * ceq_tasklet - ceq tasklet for the event + * @ceq_data: data that will be used by the tasklet(ceq) + **/ +static void ceq_tasklet(ulong ceq_data) +{ + struct hinic3_eq *eq = (struct hinic3_eq *)ceq_data; + + eq->soft_intr_jif = jiffies; + + if (eq_irq_handler(eq)) + reschedule_eq_handler(eq); +} + +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the completion event queue of the event + **/ +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct hinic3_eq *ceq = (struct hinic3_eq *)data; + + ceq->hard_intr_jif = jiffies; + + /* clear resend timer counters */ + hinic3_misx_intr_clear_resend_bit(ceq->hwdev, + ceq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +/** + * set_eq_ctrls - setting eq's ctrls registers + * @eq: the event queue for setting + **/ +static int set_eq_ctrls(struct hinic3_eq *eq) +{ + enum hinic3_eq_type type = eq->type; + struct hinic3_hwif *hwif = eq->hwdev->hwif; + struct irq_info *eq_irq = &eq->eq_irq; + u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; + u32 pci_intf_idx = HINIC3_PCI_INTF_IDX(hwif); + int err; + + if (type == HINIC3_AEQ) { + /* set ctrl0 */ + addr = HINIC3_CSR_AEQ_CTRL_0_ADDR; + + val = hinic3_hwif_read_reg(hwif, addr); + + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); + + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + AEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE); + + val |= ctrl0; + + hinic3_hwif_write_reg(hwif, addr, val); + + /* set ctrl1 */ + addr = HINIC3_CSR_AEQ_CTRL_1_ADDR; + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + hinic3_hwif_write_reg(hwif, addr, ctrl1); + } else { + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | + CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) | + CEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE); + + ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN); + + /* set ceq ctrl reg through mgmt cpu */ + err = hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, + ctrl1); + if (err) + return err; + } + + return 0; +} + +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void ceq_elements_init(struct hinic3_eq *eq, u32 init_val) +{ + u32 *ceqe = NULL; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void aeq_elements_init(struct hinic3_eq *eq, u32 init_val) +{ + struct hinic3_aeq_elem *aeqe = NULL; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +static void eq_elements_init(struct hinic3_eq *eq, u32 init_val) +{ + if (eq->type == HINIC3_AEQ) + aeq_elements_init(eq, init_val); + else + ceq_elements_init(eq, init_val); +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + **/ +static int alloc_eq_pages(struct hinic3_eq *eq) +{ + struct hinic3_hwif *hwif = eq->hwdev->hwif; + struct hinic3_dma_addr_align *eq_page = NULL; + u32 reg, init_val; + u16 pg_idx, i; + int err; + + eq->eq_pages = kcalloc(eq->num_pages, sizeof(*eq->eq_pages), + GFP_KERNEL); + if (!eq->eq_pages) { + sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq pages description\n"); + return -ENOMEM; + } + + for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) { + eq_page = &eq->eq_pages[pg_idx]; + err = hinic3_dma_zalloc_coherent_align(eq->hwdev->dev_hdl, + eq->page_size, + HINIC3_MIN_EQ_PAGE_SIZE, + GFP_KERNEL, eq_page); + if (err) { + sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq page, page index: %hu\n", + pg_idx); + goto dma_alloc_err; + } + + reg = HINIC3_EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx); + hinic3_hwif_write_reg(hwif, reg, + upper_32_bits(eq_page->align_paddr)); + + reg = HINIC3_EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx); + hinic3_hwif_write_reg(hwif, reg, + lower_32_bits(eq_page->align_paddr)); + } + + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); + if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { + sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); + err = -EINVAL; + goto dma_alloc_err; + } + init_val = EQ_WRAPPED(eq); + + eq_elements_init(eq, init_val); + + return 0; + +dma_alloc_err: + for (i = 0; i < pg_idx; i++) + hinic3_dma_free_coherent_align(eq->hwdev->dev_hdl, + &eq->eq_pages[i]); + + kfree(eq->eq_pages); + + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the event queue + **/ +static void free_eq_pages(struct hinic3_eq *eq) +{ + u16 pg_idx; + + for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) + hinic3_dma_free_coherent_align(eq->hwdev->dev_hdl, + &eq->eq_pages[pg_idx]); + + kfree(eq->eq_pages); +} + +static inline u32 get_page_size(const struct hinic3_eq *eq) +{ + u32 total_size; + u32 count; + + total_size = ALIGN((eq->eq_len * eq->elem_size), + HINIC3_MIN_EQ_PAGE_SIZE); + if (total_size <= (HINIC3_EQ_MAX_PAGES(eq) * HINIC3_MIN_EQ_PAGE_SIZE)) + return HINIC3_MIN_EQ_PAGE_SIZE; + + count = (u32)(ALIGN((total_size / HINIC3_EQ_MAX_PAGES(eq)), + HINIC3_MIN_EQ_PAGE_SIZE) / HINIC3_MIN_EQ_PAGE_SIZE); + + /* round up to nearest power of two */ + count = 1U << (u8)fls((int)(count - 1)); + + return ((u32)HINIC3_MIN_EQ_PAGE_SIZE) * count; +} + + +static int request_eq_irq(struct hinic3_eq *eq, struct irq_info *entry) +{ + int err = 0; + + if (eq->type == HINIC3_AEQ) + INIT_WORK(&eq->aeq_work, eq_irq_work); + else + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, (ulong)eq); + + if (eq->type == HINIC3_AEQ) { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic3_aeq%u@pci:%s", eq->q_id, + pci_name(eq->hwdev->pcidev_hdl)); + + err = request_irq(entry->irq_id, aeq_interrupt, 0UL, + eq->irq_name, eq); + } else { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic3_ceq%u@pci:%s", eq->q_id, + pci_name(eq->hwdev->pcidev_hdl)); + err = request_irq(entry->irq_id, ceq_interrupt, 0UL, + eq->irq_name, eq); + } + + return err; +} + +static void reset_eq(struct hinic3_eq *eq) +{ + /* clear eq_len to force eqe drop in hardware */ + if (eq->type == HINIC3_AEQ) + hinic3_hwif_write_reg(eq->hwdev->hwif, + HINIC3_CSR_AEQ_CTRL_1_ADDR, 0); + else + hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + + wmb(); /* clear eq_len before clear prod idx */ + + hinic3_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); +} + +/** + * init_eq - initialize eq + * @eq: the event queue + * @hwdev: the pointer to hw device + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @type: the type of the event queue, ceq or aeq + * @entry: msix entry associated with the event queue + * Return: 0 - Success, Negative - failure + **/ +static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id, + u32 q_len, enum hinic3_eq_type type, struct irq_info *entry) +{ + int err = 0; + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = type; + eq->eq_len = q_len; + + /* Indirect access should set q_id first */ + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + wmb(); /* write index before config */ + + reset_eq(eq); + + eq->cons_idx = 0; + eq->wrapped = 0; + + eq->elem_size = (type == HINIC3_AEQ) ? HINIC3_AEQE_SIZE : HINIC3_CEQE_SIZE; + + eq->page_size = get_page_size(eq); + eq->orig_page_size = eq->page_size; + eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); + + if (eq->num_pages > HINIC3_EQ_MAX_PAGES(eq)) { + sdk_err(hwdev->dev_hdl, "Number pages: %u too many pages for eq\n", + eq->num_pages); + return -EINVAL; + } + + err = alloc_eq_pages(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + return err; + } + + eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; + eq->eq_irq.irq_id = entry->irq_id; + + err = set_eq_ctrls(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set ctrls for eq\n"); + goto init_eq_ctrls_err; + } + + set_eq_cons_idx(eq, HINIC3_EQ_ARMED); + + err = request_eq_irq(eq, entry); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to request irq for the eq, err: %d\n", err); + goto req_irq_err; + } + + hinic3_set_msix_state(hwdev, entry->msix_entry_idx, + HINIC3_MSIX_DISABLE); + + return 0; + +init_eq_ctrls_err: +req_irq_err: + free_eq_pages(eq); + return err; +} + +/** + * remove_eq - remove eq + * @eq: the event queue + **/ +static void remove_eq(struct hinic3_eq *eq) +{ + struct irq_info *entry = &eq->eq_irq; + + hinic3_set_msix_state(eq->hwdev, entry->msix_entry_idx, + HINIC3_MSIX_DISABLE); + synchronize_irq(entry->irq_id); + + free_irq(entry->irq_id, eq); + + /* Indirect access should set q_id first */ + hinic3_hwif_write_reg(eq->hwdev->hwif, + HINIC3_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + + wmb(); /* write index before config */ + + if (eq->type == HINIC3_AEQ) { + cancel_work_sync(&eq->aeq_work); + + /* clear eq_len to avoid hw access host memory */ + hinic3_hwif_write_reg(eq->hwdev->hwif, + HINIC3_CSR_AEQ_CTRL_1_ADDR, 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + + hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + } + + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = hinic3_hwif_read_reg(eq->hwdev->hwif, + EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED); + + free_eq_pages(eq); +} + +/** + * hinic3_aeqs_init - init all the aeqs + * @hwdev: the pointer to hw device + * @num_aeqs: number of AEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries) +{ + struct hinic3_aeqs *aeqs = NULL; + int err; + u16 i, q_id; + + if (!hwdev) + return -EINVAL; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM, + HINIC3_MAX_AEQS); + if (!aeqs->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); + err = -ENOMEM; + goto create_work_err; + } + + if (g_aeq_len < HINIC3_MIN_AEQ_LEN || g_aeq_len > HINIC3_MAX_AEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %u out of range, resetting to %d\n", + g_aeq_len, HINIC3_DEFAULT_AEQ_LEN); + g_aeq_len = HINIC3_DEFAULT_AEQ_LEN; + } + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len, + HINIC3_AEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeq %u\n", + q_id); + goto init_aeq_err; + } + } + for (q_id = 0; q_id < num_aeqs; q_id++) + hinic3_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, + HINIC3_MSIX_ENABLE); + + return 0; + +init_aeq_err: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + +create_work_err: + kfree(aeqs); + + return err; +} + +/** + * hinic3_aeqs_free - free all the aeqs + * @hwdev: the pointer to hw device + **/ +void hinic3_aeqs_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_aeqs *aeqs = hwdev->aeqs; + enum hinic3_aeq_type aeq_event = HINIC3_HW_INTER_INT; + enum hinic3_aeq_sw_type sw_aeq_event = HINIC3_STATELESS_EVENT; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + for (; sw_aeq_event < HINIC3_MAX_AEQ_SW_EVENTS; sw_aeq_event++) + hinic3_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (; aeq_event < HINIC3_MAX_AEQ_EVENTS; aeq_event++) + hinic3_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeqs->workq); + + kfree(aeqs); +} + +/** + * hinic3_ceqs_init - init all the ceqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of CEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries) +{ + struct hinic3_ceqs *ceqs; + int err; + u16 i, q_id; + + ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); + if (!ceqs) + return -ENOMEM; + + hwdev->ceqs = ceqs; + + ceqs->hwdev = hwdev; + ceqs->num_ceqs = num_ceqs; + + if (g_ceq_len < HINIC3_MIN_CEQ_LEN || g_ceq_len > HINIC3_MAX_CEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %u out of range, resetting to %d\n", + g_ceq_len, HINIC3_DEFAULT_CEQ_LEN); + g_ceq_len = HINIC3_DEFAULT_CEQ_LEN; + } + + if (!g_num_ceqe_in_tasklet) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", + HINIC3_TASK_PROCESS_EQE_LIMIT); + g_num_ceqe_in_tasklet = HINIC3_TASK_PROCESS_EQE_LIMIT; + } + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len, + HINIC3_CEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceq %u\n", + q_id); + goto init_ceq_err; + } + } + for (q_id = 0; q_id < num_ceqs; q_id++) + hinic3_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, + HINIC3_MSIX_ENABLE); + + for (i = 0; i < HINIC3_MAX_CEQ_EVENTS; i++) + ceqs->ceq_cb_state[i] = 0; + + return 0; + +init_ceq_err: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + kfree(ceqs); + + return err; +} + +/** + * hinic3_ceqs_free - free all the ceqs + * @hwdev: the pointer to hw device + **/ +void hinic3_ceqs_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_ceqs *ceqs = hwdev->ceqs; + enum hinic3_ceq_event ceq_event = HINIC3_CMDQ; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); + + for (; ceq_event < HINIC3_MAX_CEQ_EVENTS; ceq_event++) + hinic3_ceq_unregister_cb(hwdev, ceq_event); + + kfree(ceqs); +} + +void hinic3_get_ceq_irqs(struct hinic3_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hinic3_ceqs *ceqs = hwdev->ceqs; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + ceqs->ceq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = ceqs->num_ceqs; +} + +void hinic3_get_aeq_irqs(struct hinic3_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hinic3_aeqs *aeqs = hwdev->aeqs; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + aeqs->aeq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = aeqs->num_aeqs; +} + +void hinic3_dump_aeq_info(struct hinic3_hwdev *hwdev) +{ + struct hinic3_aeq_elem *aeqe_pos = NULL; + struct hinic3_eq *eq = NULL; + u32 addr, ci, pi, ctrl0, idx; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { + eq = &hwdev->aeqs->aeq[q_id]; + /* Indirect access should set q_id first */ + hinic3_hwif_write_reg(eq->hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + wmb(); /* write index before config */ + + addr = HINIC3_CSR_AEQ_CTRL_0_ADDR; + + ctrl0 = hinic3_hwif_read_reg(hwdev->hwif, addr); + + idx = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type)); + + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic3_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic3_hwif_read_reg(hwdev->hwif, addr); + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + sdk_err(hwdev->dev_hdl, + "Aeq id: %d, idx: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x swci:0x%x\n", + q_id, idx, ctrl0, ci, pi, work_busy(&eq->aeq_work), + eq->wrapped, be32_to_cpu(aeqe_pos->desc), eq->cons_idx); + } + + hinic3_show_chip_err_info(hwdev); +} + +void hinic3_dump_ceq_info(struct hinic3_hwdev *hwdev) +{ + struct hinic3_eq *eq = NULL; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) { + eq = &hwdev->ceqs->ceq[q_id]; + /* Indirect access should set q_id first */ + hinic3_hwif_write_reg(eq->hwdev->hwif, + HINIC3_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + wmb(); /* write index before config */ + + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic3_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic3_hwif_read_reg(hwdev->hwif, addr); + sdk_err(hwdev->dev_hdl, + "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n", + q_id, ci, eq->cons_idx, pi, + tasklet_state(&eq->ceq_tasklet), + eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq)))); + + sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->hard_intr_jif)); + sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->soft_intr_jif)); + } + + hinic3_show_chip_err_info(hwdev); +} + +int hinic3_get_ceq_info(void *hwdev, u16 q_id, struct hinic3_ceq_info *ceq_info) +{ + struct hinic3_hwdev *dev = hwdev; + struct hinic3_eq *eq = NULL; + + if (!hwdev || !ceq_info) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + eq = &dev->ceqs->ceq[q_id]; + ceq_info->q_len = eq->eq_len; + ceq_info->num_pages = eq->num_pages; + ceq_info->page_size = eq->page_size; + ceq_info->num_elem_in_pg = eq->num_elem_in_pg; + ceq_info->elem_size = eq->elem_size; + sdk_info(dev->dev_hdl, "get_ceq_info: qid=0x%x page_size=%ul\n", + q_id, eq->page_size); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_ceq_info); + +int hinic3_get_ceq_page_phy_addr(void *hwdev, u16 q_id, + u16 page_idx, u64 *page_phy_addr) +{ + struct hinic3_hwdev *dev = hwdev; + struct hinic3_eq *eq = NULL; + + if (!hwdev || !page_phy_addr) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + eq = &dev->ceqs->ceq[q_id]; + if (page_idx >= eq->num_pages) + return -EINVAL; + + *page_phy_addr = eq->eq_pages[page_idx].align_paddr; + sdk_info(dev->dev_hdl, "ceq_page_phy_addr: 0x%llx page_idx=%u\n", + eq->eq_pages[page_idx].align_paddr, page_idx); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_ceq_page_phy_addr); + +int hinic3_set_ceq_irq_disable(void *hwdev, u16 q_id) +{ + struct hinic3_hwdev *dev = hwdev; + struct hinic3_eq *ceq = NULL; + + if (!hwdev) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + ceq = &dev->ceqs->ceq[q_id]; + + hinic3_set_msix_state(ceq->hwdev, ceq->eq_irq.msix_entry_idx, + HINIC3_MSIX_DISABLE); + + return 0; +} +EXPORT_SYMBOL(hinic3_set_ceq_irq_disable); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h new file mode 100644 index 000000000000..e562175af467 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_EQS_H +#define HINIC3_EQS_H + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> + +#include "hinic3_common.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" + +#define HINIC3_MAX_AEQS 4 +#define HINIC3_MAX_CEQS 32 + +#define HINIC3_AEQ_MAX_PAGES 4 +#define HINIC3_CEQ_MAX_PAGES 8 + +#define HINIC3_AEQE_SIZE 64 +#define HINIC3_CEQE_SIZE 4 + +#define HINIC3_AEQE_DESC_SIZE 4 +#define HINIC3_AEQE_DATA_SIZE \ + (HINIC3_AEQE_SIZE - HINIC3_AEQE_DESC_SIZE) + +#define HINIC3_DEFAULT_AEQ_LEN 0x10000 +#define HINIC3_DEFAULT_CEQ_LEN 0x10000 + +#define HINIC3_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */ +#define HINIC3_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */ + +#define HINIC3_MIN_AEQ_LEN 64 +#define HINIC3_MAX_AEQ_LEN \ + ((HINIC3_MAX_EQ_PAGE_SIZE / HINIC3_AEQE_SIZE) * HINIC3_AEQ_MAX_PAGES) + +#define HINIC3_MIN_CEQ_LEN 64 +#define HINIC3_MAX_CEQ_LEN \ + ((HINIC3_MAX_EQ_PAGE_SIZE / HINIC3_CEQE_SIZE) * HINIC3_CEQ_MAX_PAGES) +#define HINIC3_CEQ_ID_CMDQ 0 + +#define EQ_IRQ_NAME_LEN 64 + +#define EQ_USLEEP_LOW_BOUND 900 +#define EQ_USLEEP_HIG_BOUND 1000 + +enum hinic3_eq_type { + HINIC3_AEQ, + HINIC3_CEQ +}; + +enum hinic3_eq_intr_mode { + HINIC3_INTR_MODE_ARMED, + HINIC3_INTR_MODE_ALWAYS, +}; + +enum hinic3_eq_ci_arm_state { + HINIC3_EQ_NOT_ARMED, + HINIC3_EQ_ARMED, +}; + +struct hinic3_eq { + struct hinic3_hwdev *hwdev; + u16 q_id; + u16 rsvd1; + enum hinic3_eq_type type; + u32 page_size; + u32 orig_page_size; + u32 eq_len; + + u32 cons_idx; + u16 wrapped; + u16 rsvd2; + + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; + + struct irq_info eq_irq; + char irq_name[EQ_IRQ_NAME_LEN]; + + struct hinic3_dma_addr_align *eq_pages; + + struct work_struct aeq_work; + struct tasklet_struct ceq_tasklet; + + u64 hard_intr_jif; + u64 soft_intr_jif; + + u64 rsvd3; +}; + +struct hinic3_aeq_elem { + u8 aeqe_data[HINIC3_AEQE_DATA_SIZE]; + u32 desc; +}; + +enum hinic3_aeq_cb_state { + HINIC3_AEQ_HW_CB_REG = 0, + HINIC3_AEQ_HW_CB_RUNNING, + HINIC3_AEQ_SW_CB_REG, + HINIC3_AEQ_SW_CB_RUNNING, +}; + +struct hinic3_aeqs { + struct hinic3_hwdev *hwdev; + + hinic3_aeq_hwe_cb aeq_hwe_cb[HINIC3_MAX_AEQ_EVENTS]; + void *aeq_hwe_cb_data[HINIC3_MAX_AEQ_EVENTS]; + hinic3_aeq_swe_cb aeq_swe_cb[HINIC3_MAX_AEQ_SW_EVENTS]; + void *aeq_swe_cb_data[HINIC3_MAX_AEQ_SW_EVENTS]; + unsigned long aeq_hw_cb_state[HINIC3_MAX_AEQ_EVENTS]; + unsigned long aeq_sw_cb_state[HINIC3_MAX_AEQ_SW_EVENTS]; + + struct hinic3_eq aeq[HINIC3_MAX_AEQS]; + u16 num_aeqs; + u16 rsvd1; + u32 rsvd2; + + struct workqueue_struct *workq; +}; + +enum hinic3_ceq_cb_state { + HINIC3_CEQ_CB_REG = 0, + HINIC3_CEQ_CB_RUNNING, +}; + +struct hinic3_ceqs { + struct hinic3_hwdev *hwdev; + + hinic3_ceq_event_cb ceq_cb[HINIC3_MAX_CEQ_EVENTS]; + void *ceq_cb_data[HINIC3_MAX_CEQ_EVENTS]; + void *ceq_data[HINIC3_MAX_CEQ_EVENTS]; + unsigned long ceq_cb_state[HINIC3_MAX_CEQ_EVENTS]; + + struct hinic3_eq ceq[HINIC3_MAX_CEQS]; + u16 num_ceqs; + u16 rsvd1; + u32 rsvd2; +}; + +int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries); + +void hinic3_aeqs_free(struct hinic3_hwdev *hwdev); + +int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries); + +void hinic3_ceqs_free(struct hinic3_hwdev *hwdev); + +void hinic3_get_ceq_irqs(struct hinic3_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hinic3_get_aeq_irqs(struct hinic3_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hinic3_dump_ceq_info(struct hinic3_hwdev *hwdev); + +void hinic3_dump_aeq_info(struct hinic3_hwdev *hwdev); + + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.c new file mode 100644 index 000000000000..a4cbac8e4cc1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "ossl_knl.h" +#include "hinic3_hw.h" +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_api_cmd.h" +#include "hinic3_mgmt.h" +#include "hinic3_hw_api.h" + #ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) \ + | (((x) & 0x0000ff00) << 8) \ + | (((x) & 0x00ff0000) >> 8) \ + | (((x) & 0xff000000) >> 24)) +#endif + +static void hinic3_sml_ctr_read_build_req(struct chipif_sml_ctr_rd_req *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, u32 init_val) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = HTONL(msg->head.value); + msg->ctr_id = ctr_id; + msg->ctr_id = HTONL(msg->ctr_id); + msg->initial = init_val; +} + +static void sml_ctr_htonl_n(u32 *node, u32 len) +{ + u32 i; + u32 *node_new = node; + + for (i = 0; i < len; i++) { + *node_new = HTONL(*node_new); + node_new++; + } +} + +/** + * hinic3_sm_ctr_rd16 - small single 16 counter read + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic3_sm_ctr_rd16(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u16 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic3_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Sm 16bit counter read fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = rsp.bs_ss16_rsp.value1; + + return 0; +} + +/** + * hinic3_sm_ctr_rd32 - small single 32 counter read + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic3_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u32 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic3_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter read fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} + +/** + * hinic3_sm_ctr_rd32_clear - small single 32 counter read and clear to zero + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc) + **/ +int hinic3_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic3_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter clear fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} + +/** + * hinic3_sm_ctr_rd64_pair - big pair 128 counter read + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value1: read counter value ptr + * @value2: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic3_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!value1) { + pr_err("First value is NULL for read 64 bit pair\n"); + return -EFAULT; + } + + if (!value2) { + pr_err("Second value is NULL for read 64 bit pair\n"); + return -EFAULT; + } + + if (!hwdev || ((ctr_id & 0x1) != 0)) { + pr_err("Hwdev is NULL or ctr_id(%d) is odd number for read 64 bit pair\n", + ctr_id); + return -EFAULT; + } + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic3_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Sm 64 bit rd pair ret(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << BIT_32) | rsp.bs_bp64_rsp.val1_l; + *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << BIT_32) | rsp.bs_bp64_rsp.val2_l; + + return 0; +} + +/** + * hinic3_sm_ctr_rd64_pair_clear - big pair 128 counter read and clear to zero + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value1: read counter value ptr + * @value2: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic3_sm_ctr_rd64_pair_clear(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value1, u64 *value2) +{ + struct chipif_sml_ctr_rd_req req = {0}; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value1 || !value2 || ((ctr_id & 0x1) != 0)) { + pr_err("Hwdev or value1 or value2 is NULL or ctr_id(%u) is odd number\n", ctr_id); + return -EINVAL; + } + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + hinic3_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Sm 64 bit clear pair fail. ret(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << BIT_32) | rsp.bs_bp64_rsp.val1_l; + *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << BIT_32) | rsp.bs_bp64_rsp.val2_l; + + return 0; +} + +/** + * hinic3_sm_ctr_rd64 - big counter 64 read + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic3_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic3_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Sm 64bit counter read fail err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = ((u64)rsp.bs_bs64_rsp.value1 << BIT_32) | rsp.bs_bs64_rsp.value2; + + return 0; +} +EXPORT_SYMBOL(hinic3_sm_ctr_rd64); + +/** + * hinic3_sm_ctr_rd64_clear - big counter 64 read and clear to zero + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic3_sm_ctr_rd64_clear(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value) +{ + struct chipif_sml_ctr_rd_req req = {0}; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + hinic3_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Sm 64bit counter clear fail err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = ((u64)rsp.bs_bs64_rsp.value1 << BIT_32) | rsp.bs_bs64_rsp.value2; + + return 0; +} + +int hinic3_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val) +{ + struct hinic3_csr_request_api_data api_data = {0}; + u32 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&api_data, 0, sizeof(struct hinic3_csr_request_api_data)); + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC3_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HINIC3_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HINIC3_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hinic3_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, 0x4); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Read 32 bit csr fail, dest %u addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} + +int hinic3_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val) +{ + struct hinic3_csr_request_api_data api_data; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&api_data, 0, sizeof(struct hinic3_csr_request_api_data)); + api_data.dw1.bits.operation_id = HINIC3_CSR_OPERATION_WRITE_CSR; + api_data.dw1.bits.need_response = HINIC3_CSR_NO_RESP_DATA; + api_data.dw1.bits.data_size = HINIC3_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + api_data.csr_write_data_h = 0xffffffff; + api_data.csr_write_data_l = val; + + ret = hinic3_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), + in_size); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Write 32 bit csr fail! dest %u addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} + +int hinic3_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val) +{ + struct hinic3_csr_request_api_data api_data = {0}; + u64 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&api_data, 0, sizeof(struct hinic3_csr_request_api_data)); + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC3_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HINIC3_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HINIC3_CSR_DATA_SZ_64; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hinic3_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, 0x8); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Read 64 bit csr fail, dest %u addr 0x%x\n", + dest, addr); + return ret; + } + + *val = csr_val; + + return 0; +} +EXPORT_SYMBOL(hinic3_api_csr_rd64); + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.h new file mode 100644 index 000000000000..9ec812eac684 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_HW_API_H +#define HINIC3_HW_API_H + +#include <linux/types.h> + +#define CHIPIF_ACK 1 +#define CHIPIF_NOACK 0 + +#define CHIPIF_SM_CTR_OP_READ 0x2 +#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6 + +#define BIT_32 32 + +/* request head */ +union chipif_sml_ctr_req_head { + struct { + u32 pad:15; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +}; + +/* counter read request struct */ +struct chipif_sml_ctr_rd_req { + u32 extra; + union chipif_sml_ctr_req_head head; + u32 ctr_id; + u32 initial; + u32 pad; +}; + +struct hinic3_csr_request_api_data { + u32 dw0; + + union { + struct { + u32 reserved1:13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size:2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response:1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id:5; + u32 reserved2:6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id:5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr:26; + u32 reserved3:6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +/* counter read response union */ +union ctr_rd_rsp { + struct { + u32 value1:16; + u32 pad0:16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1:20; + u32 pad0:12; + u32 value2:12; + u32 pad1:20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; +}; + +enum HINIC3_CSR_API_DATA_OPERATION_ID { + HINIC3_CSR_OPERATION_WRITE_CSR = 0x1E, + HINIC3_CSR_OPERATION_READ_CSR = 0x1F +}; + +enum HINIC3_CSR_API_DATA_NEED_RESPONSE_DATA { + HINIC3_CSR_NO_RESP_DATA = 0, + HINIC3_CSR_NEED_RESP_DATA = 1 +}; + +enum HINIC3_CSR_API_DATA_DATA_SIZE { + HINIC3_CSR_DATA_SZ_32 = 0, + HINIC3_CSR_DATA_SZ_64 = 1 +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c new file mode 100644 index 000000000000..08a1b8f15cb7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c @@ -0,0 +1,1480 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/semaphore.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "cfg_mgt_comm_pub.h" +#include "hinic3_hw_cfg.h" + +static void parse_pub_res_cap_dfx(struct hinic3_hwdev *hwdev, + const struct service_cap *cap) +{ + sdk_info(hwdev->dev_hdl, "Get public resource capbility: svc_cap_en: 0x%x\n", + cap->svc_type); + sdk_info(hwdev->dev_hdl, "Host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x\n", + cap->host_id, cap->ep_id, cap->er_id, cap->port_id); + sdk_info(hwdev->dev_hdl, "cos_bitmap: 0x%x, flexq: 0x%x, virtio_vq_size: 0x%x\n", + cap->cos_valid_bitmap, cap->flexq_en, cap->virtio_vq_size); + sdk_info(hwdev->dev_hdl, "Host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n", + cap->host_total_function, cap->host_oq_id_mask_val, + cap->max_vf); + sdk_info(hwdev->dev_hdl, "Host_pf_num: 0x%x, pf_id_start: 0x%x, host_vf_num: 0x%x, vf_id_start: 0x%x\n", + cap->pf_num, cap->pf_id_start, cap->vf_num, cap->vf_id_start); + sdk_info(hwdev->dev_hdl, "host_valid_bitmap: 0x%x, master_host_id: 0x%x, srv_multi_host_mode: 0x%x\n", + cap->host_valid_bitmap, cap->master_host_id, cap->srv_multi_host_mode); + sdk_info(hwdev->dev_hdl, + "fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, fake_vf_max_pctx: 0x%x\n", + cap->fake_vf_start_id, cap->fake_vf_num, cap->fake_vf_max_pctx); + sdk_info(hwdev->dev_hdl, "fake_vf_bfilter_start_addr: 0x%x, fake_vf_bfilter_len: 0x%x\n", + cap->fake_vf_bfilter_start_addr, cap->fake_vf_bfilter_len); +} + +static void parse_cqm_res_cap(struct hinic3_hwdev *hwdev, struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap) +{ + struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; + + cap->fake_vf_start_id = dev_cap->fake_vf_start_id; + cap->fake_vf_num = dev_cap->fake_vf_num; + cap->fake_vf_max_pctx = dev_cap->fake_vf_max_pctx; + cap->fake_vf_num_cfg = dev_cap->fake_vf_num; + cap->fake_vf_bfilter_start_addr = dev_cap->fake_vf_bfilter_start_addr; + cap->fake_vf_bfilter_len = dev_cap->fake_vf_bfilter_len; + + if (COMM_SUPPORT_VIRTIO_VQ_SIZE(hwdev)) + cap->virtio_vq_size = (u16)(VIRTIO_BASE_VQ_SIZE << dev_cap->virtio_vq_size); + else + cap->virtio_vq_size = VIRTIO_DEFAULT_VQ_SIZE; + + if (dev_cap->sf_svc_attr & SF_SVC_FT_BIT) + attr->ft_en = true; + else + attr->ft_en = false; + + if (dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT) + attr->rdma_en = true; + else + attr->rdma_en = false; + + /* PPF will overwrite it when parse dynamic resource */ + if (dev_cap->func_sf_en) + cap->sf_en = true; + else + cap->sf_en = false; + + cap->lb_mode = dev_cap->lb_mode; + cap->smf_pg = dev_cap->smf_pg; + + cap->timer_en = dev_cap->timer_en; + cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; + cap->max_connect_num = dev_cap->max_conn_num; + cap->max_stick2cache_num = dev_cap->max_stick2cache_num; + cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr; + cap->bfilter_len = dev_cap->bfilter_len; + cap->hash_bucket_num = dev_cap->hash_bucket_num; +} + +static void parse_pub_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + cap->host_id = dev_cap->host_id; + cap->ep_id = dev_cap->ep_id; + cap->er_id = dev_cap->er_id; + cap->port_id = dev_cap->port_id; + + cap->svc_type = dev_cap->svc_cap_en; + cap->chip_svc_type = cap->svc_type; + + cap->cos_valid_bitmap = dev_cap->valid_cos_bitmap; + cap->port_cos_valid_bitmap = dev_cap->port_cos_valid_bitmap; + cap->flexq_en = dev_cap->flexq_en; + + cap->host_total_function = dev_cap->host_total_func; + cap->host_valid_bitmap = dev_cap->host_valid_bitmap; + cap->master_host_id = dev_cap->master_host_id; + cap->srv_multi_host_mode = dev_cap->srv_multi_host_mode; + + if (type != TYPE_VF) { + cap->max_vf = dev_cap->max_vf; + cap->pf_num = dev_cap->host_pf_num; + cap->pf_id_start = dev_cap->pf_id_start; + cap->vf_num = dev_cap->host_vf_num; + cap->vf_id_start = dev_cap->vf_id_start; + } else { + cap->max_vf = 0; + } + + parse_cqm_res_cap(hwdev, cap, dev_cap); + parse_pub_res_cap_dfx(hwdev, cap); +} + +static void parse_dynamic_share_res_cap(struct service_cap *cap, + const struct cfg_cmd_dev_cap *dev_cap) +{ + if (dev_cap->host_sf_en) + cap->sf_en = true; + else + cap->sf_en = false; +} + +static void parse_l2nic_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct nic_service_cap *nic_cap = &cap->nic_cap; + + nic_cap->max_sqs = dev_cap->nic_max_sq_id + 1; + nic_cap->max_rqs = dev_cap->nic_max_rq_id + 1; + nic_cap->default_num_queues = dev_cap->nic_default_num_queues; + + sdk_info(hwdev->dev_hdl, "L2nic resource capbility, max_sqs: 0x%x, max_rqs: 0x%x\n", + nic_cap->max_sqs, nic_cap->max_rqs); + + /* Check parameters from firmware */ + if (nic_cap->max_sqs > HINIC3_CFG_MAX_QP || + nic_cap->max_rqs > HINIC3_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %u, rq: %u\n", + HINIC3_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs); + nic_cap->max_sqs = HINIC3_CFG_MAX_QP; + nic_cap->max_rqs = HINIC3_CFG_MAX_QP; + } +} + +static void parse_fc_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; + + fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; + fc_cap->scq_num = dev_cap->fc_max_scq; + fc_cap->srq_num = dev_cap->fc_max_srq; + fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; + fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start; + fc_cap->vp_id_start = dev_cap->fc_vp_id_start; + fc_cap->vp_id_end = dev_cap->fc_vp_id_end; + + sdk_info(hwdev->dev_hdl, "Get fc resource capbility\n"); + sdk_info(hwdev->dev_hdl, + "Max_parent_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x, max_child_qpc_num: 0x%x, child_qpc_id_start: 0x%x\n", + fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, + fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start); + sdk_info(hwdev->dev_hdl, "Vp_id_start: 0x%x, vp_id_end: 0x%x\n", + fc_cap->vp_id_start, fc_cap->vp_id_end); +} + +static void parse_roce_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + roce_cap->max_qps = dev_cap->roce_max_qp; + roce_cap->max_cqs = dev_cap->roce_max_cq; + roce_cap->max_srqs = dev_cap->roce_max_srq; + roce_cap->max_mpts = dev_cap->roce_max_mpt; + roce_cap->max_drc_qps = dev_cap->roce_max_drc_qp; + + roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start; + roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end; + roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size; + + sdk_info(hwdev->dev_hdl, "Get roce resource capbility, type: 0x%x\n", + type); + sdk_info(hwdev->dev_hdl, "Max_qps: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, max_mpts: 0x%x, max_drcts: 0x%x\n", + roce_cap->max_qps, roce_cap->max_cqs, roce_cap->max_srqs, + roce_cap->max_mpts, roce_cap->max_drc_qps); + + sdk_info(hwdev->dev_hdl, "Wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x\n", + roce_cap->wqe_cl_start, roce_cap->wqe_cl_end, + roce_cap->wqe_cl_sz); + + if (roce_cap->max_qps == 0) { + if (type == TYPE_PF || type == TYPE_PPF) { + roce_cap->max_qps = 0x400; + roce_cap->max_cqs = 0x800; + roce_cap->max_srqs = 0x400; + roce_cap->max_mpts = 0x400; + roce_cap->max_drc_qps = 0x40; + } else { + roce_cap->max_qps = 0x200; + roce_cap->max_cqs = 0x400; + roce_cap->max_srqs = 0x200; + roce_cap->max_mpts = 0x200; + roce_cap->max_drc_qps = 0x40; + } + } +} + +static void parse_rdma_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start; + roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end; + roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size; + + roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start; + roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end; + roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size; + + sdk_info(hwdev->dev_hdl, "Get rdma resource capbility, Cmtt_start: 0x%x, cmtt_end: 0x%x, cmtt_sz: 0x%x\n", + roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end, + roce_cap->cmtt_cl_sz); + + sdk_info(hwdev->dev_hdl, "Dmtt_start: 0x%x, dmtt_end: 0x%x, dmtt_sz: 0x%x\n", + roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end, + roce_cap->dmtt_cl_sz); +} + +static void parse_ovs_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc; + ovs_cap->dev_ovs_cap.fake_vf_max_pctx = dev_cap->fake_vf_max_pctx; + ovs_cap->dev_ovs_cap.fake_vf_start_id = dev_cap->fake_vf_start_id; + ovs_cap->dev_ovs_cap.fake_vf_num = dev_cap->fake_vf_num; + ovs_cap->dev_ovs_cap.dynamic_qp_en = dev_cap->flexq_en; + + sdk_info(hwdev->dev_hdl, + "Get ovs resource capbility, max_qpc: 0x%x, fake_vf_start_id: 0x%x, fake_vf_num: 0x%x\n", + ovs_cap->dev_ovs_cap.max_pctxs, + ovs_cap->dev_ovs_cap.fake_vf_start_id, + ovs_cap->dev_ovs_cap.fake_vf_num); + sdk_info(hwdev->dev_hdl, + "fake_vf_max_qpc: 0x%x, dynamic_qp_en: 0x%x\n", + ovs_cap->dev_ovs_cap.fake_vf_max_pctx, + ovs_cap->dev_ovs_cap.dynamic_qp_en); +} + +static void parse_ppa_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct ppa_service_cap *dip_cap = &cap->ppa_cap; + + dip_cap->qpc_fake_vf_ctx_num = dev_cap->fake_vf_max_pctx; + dip_cap->qpc_fake_vf_start = dev_cap->fake_vf_start_id; + dip_cap->qpc_fake_vf_num = dev_cap->fake_vf_num; + dip_cap->bloomfilter_en = dev_cap->fake_vf_bfilter_len ? 1 : 0; + dip_cap->bloomfilter_length = dev_cap->fake_vf_bfilter_len; + sdk_info(hwdev->dev_hdl, + "Get ppa resource capbility, fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, fake_vf_max_qpc: 0x%x\n", + dip_cap->qpc_fake_vf_start, + dip_cap->qpc_fake_vf_num, + dip_cap->qpc_fake_vf_ctx_num); +} + +static void parse_toe_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap; + + toe_cap->max_pctxs = dev_cap->toe_max_pctx; + toe_cap->max_cqs = dev_cap->toe_max_cq; + toe_cap->max_srqs = dev_cap->toe_max_srq; + toe_cap->srq_id_start = dev_cap->toe_srq_id_start; + toe_cap->max_mpts = dev_cap->toe_max_mpt; + toe_cap->max_cctxt = dev_cap->toe_max_cctxt; + + sdk_info(hwdev->dev_hdl, + "Get toe resource capbility, max_pctxs: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, srq_id_start: 0x%x, max_mpts: 0x%x\n", + toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs, + toe_cap->srq_id_start, toe_cap->max_mpts); +} + +static void parse_ipsec_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; + + ipsec_cap->dev_ipsec_cap.max_sactxs = dev_cap->ipsec_max_sactx; + ipsec_cap->dev_ipsec_cap.max_cqs = dev_cap->ipsec_max_cq; + + sdk_info(hwdev->dev_hdl, "Get IPsec resource capbility, max_sactxs: 0x%x, max_cqs: 0x%x\n", + dev_cap->ipsec_max_sactx, dev_cap->ipsec_max_cq); +} + +static void parse_vbs_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct vbs_service_cap *vbs_cap = &cap->vbs_cap; + + vbs_cap->vbs_max_volq = dev_cap->vbs_max_volq; + + sdk_info(hwdev->dev_hdl, "Get VBS resource capbility, vbs_max_volq: 0x%x\n", + dev_cap->vbs_max_volq); +} + +static void parse_dev_cap(struct hinic3_hwdev *dev, + struct cfg_cmd_dev_cap *dev_cap, enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + /* Public resource */ + parse_pub_res_cap(dev, cap, dev_cap, type); + + /* PPF managed dynamic resource */ + if (type == TYPE_PPF) + parse_dynamic_share_res_cap(cap, dev_cap); + + /* L2 NIC resource */ + if (IS_NIC_TYPE(dev)) + parse_l2nic_res_cap(dev, cap, dev_cap, type); + + /* FC without virtulization */ + if (type == TYPE_PF || type == TYPE_PPF) { + if (IS_FC_TYPE(dev)) + parse_fc_res_cap(dev, cap, dev_cap, type); + } + + /* toe resource */ + if (IS_TOE_TYPE(dev)) + parse_toe_res_cap(dev, cap, dev_cap, type); + + /* mtt cache line */ + if (IS_RDMA_ENABLE(dev)) + parse_rdma_res_cap(dev, cap, dev_cap, type); + + /* RoCE resource */ + if (IS_ROCE_TYPE(dev)) + parse_roce_res_cap(dev, cap, dev_cap, type); + + if (IS_OVS_TYPE(dev)) + parse_ovs_res_cap(dev, cap, dev_cap, type); + + if (IS_IPSEC_TYPE(dev)) + parse_ipsec_res_cap(dev, cap, dev_cap, type); + + if (IS_PPA_TYPE(dev)) + parse_ppa_res_cap(dev, cap, dev_cap, type); + + if (IS_VBS_TYPE(dev)) + parse_vbs_res_cap(dev, cap, dev_cap, type); +} + +static int get_cap_from_fw(struct hinic3_hwdev *dev, enum func_type type) +{ + struct cfg_cmd_dev_cap dev_cap; + u16 out_len = sizeof(dev_cap); + int err; + + memset(&dev_cap, 0, sizeof(dev_cap)); + dev_cap.func_id = hinic3_global_func_id(dev); + sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %u\n", + dev_cap.func_id); + + err = hinic3_msg_to_mgmt_sync(dev, HINIC3_MOD_CFGM, CFG_CMD_GET_DEV_CAP, + &dev_cap, sizeof(dev_cap), + &dev_cap, &out_len, 0, + HINIC3_CHANNEL_COMM); + if (err || dev_cap.head.status || !out_len) { + sdk_err(dev->dev_hdl, + "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.head.status, out_len); + return -EIO; + } + + parse_dev_cap(dev, &dev_cap, type); + + return 0; +} + +static int hinic3_get_dev_cap(struct hinic3_hwdev *dev) +{ + enum func_type type = HINIC3_FUNC_TYPE(dev); + int err; + + switch (type) { + case TYPE_PF: + case TYPE_PPF: + case TYPE_VF: + err = get_cap_from_fw(dev, type); + if (err) { + sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n"); + return err; + } + break; + default: + sdk_err(dev->dev_hdl, "Unsupported PCI Function type: %d\n", + type); + return -EINVAL; + } + + return 0; +} + +int hinic3_get_ppf_timer_cfg(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + struct cfg_cmd_host_timer cfg_host_timer; + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + u16 out_len = sizeof(cfg_host_timer); + int err; + + memset(&cfg_host_timer, 0, sizeof(cfg_host_timer)); + cfg_host_timer.host_id = dev->cfg_mgmt->svc_cap.host_id; + + err = hinic3_msg_to_mgmt_sync(dev, HINIC3_MOD_CFGM, CFG_CMD_GET_HOST_TIMER, + &cfg_host_timer, sizeof(cfg_host_timer), + &cfg_host_timer, &out_len, 0, + HINIC3_CHANNEL_COMM); + if (err || cfg_host_timer.head.status || !out_len) { + sdk_err(dev->dev_hdl, + "Failed to get host timer cfg from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, cfg_host_timer.head.status, out_len); + return -EIO; + } + + cap->timer_pf_id_start = cfg_host_timer.timer_pf_id_start; + cap->timer_pf_num = cfg_host_timer.timer_pf_num; + cap->timer_vf_id_start = cfg_host_timer.timer_vf_id_start; + cap->timer_vf_num = cfg_host_timer.timer_vf_num; + + return 0; +} + +static void nic_param_fix(struct hinic3_hwdev *dev) +{ +} + +static void rdma_mtt_fix(struct hinic3_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct rdma_service_cap *rdma_cap = &cap->rdma_cap; + + rdma_cap->log_mtt = LOG_MTT_SEG; + rdma_cap->log_mtt_seg = LOG_MTT_SEG; + rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; + rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; + rdma_cap->num_mtts = RDMA_NUM_MTTS; +} + +static void rdma_param_fix(struct hinic3_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct rdma_service_cap *rdma_cap = &cap->rdma_cap; + struct dev_roce_svc_own_cap *roce_cap = + &rdma_cap->dev_rdma_cap.roce_own_cap; + + rdma_cap->log_mtt = LOG_MTT_SEG; + rdma_cap->log_rdmarc = LOG_RDMARC_SEG; + rdma_cap->reserved_qps = RDMA_RSVD_QPS; + rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE; + + /* RoCE */ + if (IS_ROCE_TYPE(dev)) { + roce_cap->qpc_entry_sz = ROCE_QPC_ENTRY_SZ; + roce_cap->max_wqes = ROCE_MAX_WQES; + roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE; + roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ; + roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ; + roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ; + roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA; + roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA; + roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES; + roce_cap->reserved_srqs = ROCE_RSVD_SRQS; + roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE; + roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ; + roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ; + } + + rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ; + rdma_cap->wqebb_size = WQEBB_SZ; + rdma_cap->max_cqes = RDMA_MAX_CQES; + rdma_cap->reserved_cqs = RDMA_RSVD_CQS; + rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ; + rdma_cap->cqe_size = RDMA_CQE_SZ; + rdma_cap->reserved_mrws = RDMA_RSVD_MRWS; + rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; + + /* 2^8 - 1 + * +------------------------+-----------+ + * | 4B | 1M(20b) | Key(8b) | + * +------------------------+-----------+ + * key = 8bit key + 24bit index, + * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit, + * we use original 8bits directly for simpilification + */ + rdma_cap->max_fmr_maps = 0xff; + rdma_cap->num_mtts = RDMA_NUM_MTTS; + rdma_cap->log_mtt_seg = LOG_MTT_SEG; + rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; + rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG; + rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY; + rdma_cap->num_ports = RDMA_NUM_PORTS; + rdma_cap->db_page_size = DB_PAGE_SZ; + rdma_cap->direct_wqe_size = DWQE_SZ; + rdma_cap->num_pds = NUM_PD; + rdma_cap->reserved_pds = RSVD_PD; + rdma_cap->max_xrcds = MAX_XRCDS; + rdma_cap->reserved_xrcds = RSVD_XRCDS; + rdma_cap->max_gid_per_port = MAX_GID_PER_PORT; + rdma_cap->gid_entry_sz = GID_ENTRY_SZ; + rdma_cap->reserved_lkey = RSVD_LKEY; + rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq; + rdma_cap->page_size_cap = PAGE_SZ_CAP; + rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV | + RDMA_BMME_FLAG_REMOTE_INV | + RDMA_BMME_FLAG_FAST_REG_WR | + RDMA_DEV_CAP_FLAG_XRC | + RDMA_DEV_CAP_FLAG_MEM_WINDOW | + RDMA_BMME_FLAG_TYPE_2_WIN | + RDMA_BMME_FLAG_WIN_TYPE_2B | + RDMA_DEV_CAP_FLAG_ATOMIC); + rdma_cap->max_frpl_len = MAX_FRPL_LEN; + rdma_cap->max_pkeys = MAX_PKEYS; +} + +static void toe_param_fix(struct hinic3_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct toe_service_cap *toe_cap = &cap->toe_cap; + + toe_cap->pctx_sz = TOE_PCTX_SZ; + toe_cap->scqc_sz = TOE_CQC_SZ; +} + +static void ovs_param_fix(struct hinic3_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->pctx_sz = OVS_PCTX_SZ; +} + +static void ppa_param_fix(struct hinic3_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ppa_service_cap *ppa_cap = &cap->ppa_cap; + + ppa_cap->pctx_sz = PPA_PCTX_SZ; +} + +static void fc_param_fix(struct hinic3_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fc_service_cap *fc_cap = &cap->fc_cap; + + fc_cap->parent_qpc_size = FC_PCTX_SZ; + fc_cap->child_qpc_size = FC_CCTX_SZ; + fc_cap->sqe_size = FC_SQE_SZ; + + fc_cap->scqc_size = FC_SCQC_SZ; + fc_cap->scqe_size = FC_SCQE_SZ; + + fc_cap->srqc_size = FC_SRQC_SZ; + fc_cap->srqe_size = FC_SRQE_SZ; +} + +static void ipsec_param_fix(struct hinic3_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; + + ipsec_cap->sactx_sz = IPSEC_SACTX_SZ; +} + +static void init_service_param(struct hinic3_hwdev *dev) +{ + if (IS_NIC_TYPE(dev)) + nic_param_fix(dev); + if (IS_RDMA_ENABLE(dev)) + rdma_mtt_fix(dev); + if (IS_ROCE_TYPE(dev)) + rdma_param_fix(dev); + if (IS_FC_TYPE(dev)) + fc_param_fix(dev); + if (IS_TOE_TYPE(dev)) + toe_param_fix(dev); + if (IS_OVS_TYPE(dev)) + ovs_param_fix(dev); + if (IS_IPSEC_TYPE(dev)) + ipsec_param_fix(dev); + if (IS_PPA_TYPE(dev)) + ppa_param_fix(dev); +} + +static void cfg_get_eq_num(struct hinic3_hwdev *dev) +{ + struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; + + eq_info->num_ceq = dev->hwif->attr.num_ceqs; + eq_info->num_ceq_remain = eq_info->num_ceq; +} + +static int cfg_init_eq(struct hinic3_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_eq *eq = NULL; + u8 num_ceq, i = 0; + + cfg_get_eq_num(dev); + num_ceq = cfg_mgmt->eq_info.num_ceq; + + sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", + cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); + + if (!num_ceq) { + sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); + return -EFAULT; + } + + eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); + if (!eq) + return -ENOMEM; + + for (i = 0; i < num_ceq; ++i) { + eq[i].eqn = i; + eq[i].free = CFG_FREE; + eq[i].type = SERVICE_T_MAX; + } + + cfg_mgmt->eq_info.eq = eq; + + mutex_init(&cfg_mgmt->eq_info.eq_mutex); + + return 0; +} + +int hinic3_vector_to_eqn(void *hwdev, enum hinic3_service_type type, int vector) +{ + struct hinic3_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq *eq = NULL; + int eqn = -EINVAL; + int vector_num = vector; + + if (!hwdev || vector < 0) + return -EINVAL; + + if (type != SERVICE_T_ROCE) { + sdk_err(dev->dev_hdl, + "Service type :%d, only RDMA service could get eqn by vector.\n", + type); + return -EINVAL; + } + + cfg_mgmt = dev->cfg_mgmt; + vector_num = (vector_num % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE; + + eq = cfg_mgmt->eq_info.eq; + if (eq[vector_num].type == SERVICE_T_ROCE && eq[vector_num].free == CFG_BUSY) + eqn = eq[vector_num].eqn; + + return eqn; +} +EXPORT_SYMBOL(hinic3_vector_to_eqn); + +static int cfg_init_interrupt(struct hinic3_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; + u16 intr_num = dev->hwif->attr.num_irqs; + u16 intr_needed = dev->hwif->attr.msix_flex_en ? (dev->hwif->attr.num_aeqs + + dev->hwif->attr.num_ceqs + dev->hwif->attr.num_sq) : intr_num; + + if (!intr_num) { + sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero, msix_flex_en %d\n", + dev->hwif->attr.msix_flex_en); + return -EFAULT; + } + + if (intr_needed > intr_num) { + sdk_warn(dev->dev_hdl, "Irq num cfg(%d) is less than the needed irq num(%d) msix_flex_en %d\n", + intr_num, intr_needed, dev->hwif->attr.msix_flex_en); + intr_needed = intr_num; + } + + irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), + GFP_KERNEL); + if (!irq_info->alloc_info) + return -ENOMEM; + + irq_info->num_irq_hw = intr_needed; + /* Production requires VF only surppots MSI-X */ + if (HINIC3_FUNC_TYPE(dev) == TYPE_VF) + cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX; + else + cfg_mgmt->svc_cap.interrupt_type = 0; + + mutex_init(&irq_info->irq_mutex); + return 0; +} + +static int cfg_enable_interrupt(struct hinic3_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; + + void *pcidev = dev->pcidev_hdl; + struct irq_alloc_info_st *irq_info = NULL; + struct msix_entry *entry = NULL; + u16 i = 0; + int actual_irq; + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + + sdk_info(dev->dev_hdl, "Interrupt type: %u, irq num: %u.\n", + cfg_mgmt->svc_cap.interrupt_type, nreq); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + if (!nreq) { + sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); + return -EINVAL; + } + entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < nreq; i++) + entry[i].entry = i; + + actual_irq = pci_enable_msix_range(pcidev, entry, + VECTOR_THRESHOLD, nreq); + if (actual_irq < 0) { + sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n", + actual_irq); + kfree(entry); + return -ENOMEM; + } + + nreq = (u16)actual_irq; + cfg_mgmt->irq_param_info.num_total = nreq; + cfg_mgmt->irq_param_info.num_irq_remain = nreq; + sdk_info(dev->dev_hdl, "Request %u msix vector success.\n", + nreq); + + for (i = 0; i < nreq; ++i) { + /* u16 driver uses to specify entry, OS writes */ + irq_info[i].info.msix_entry_idx = entry[i].entry; + /* u32 kernel uses to write allocated vector */ + irq_info[i].info.irq_id = entry[i].vector; + irq_info[i].type = SERVICE_T_MAX; + irq_info[i].free = CFG_FREE; + } + + kfree(entry); + + break; + + default: + sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", + cfg_mgmt->svc_cap.interrupt_type); + break; + } + + return 0; +} + +int hinic3_alloc_irqs(void *hwdev, enum hinic3_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num) +{ + struct hinic3_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_irq_info *irq_info = NULL; + struct irq_alloc_info_st *alloc_info = NULL; + int max_num_irq; + u16 free_num_irq; + int i, j; + u16 num_new = num; + + if (!hwdev || !irq_info_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + free_num_irq = irq_info->num_irq_remain; + + mutex_lock(&irq_info->irq_mutex); + + if (num > free_num_irq) { + if (free_num_irq == 0) { + sdk_err(dev->dev_hdl, "no free irq resource in cfg mgmt.\n"); + mutex_unlock(&irq_info->irq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "only %u irq resource in cfg mgmt.\n", free_num_irq); + num_new = free_num_irq; + } + + *act_num = 0; + + for (i = 0; i < num_new; i++) { + for (j = 0; j < max_num_irq; j++) { + if (alloc_info[j].free == CFG_FREE) { + if (irq_info->num_irq_remain == 0) { + sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); + mutex_unlock(&irq_info->irq_mutex); + return -EINVAL; + } + alloc_info[j].type = type; + alloc_info[j].free = CFG_BUSY; + + irq_info_array[i].msix_entry_idx = + alloc_info[j].info.msix_entry_idx; + irq_info_array[i].irq_id = alloc_info[j].info.irq_id; + (*act_num)++; + irq_info->num_irq_remain--; + + break; + } + } + } + + mutex_unlock(&irq_info->irq_mutex); + return 0; +} +EXPORT_SYMBOL(hinic3_alloc_irqs); + +void hinic3_free_irq(void *hwdev, enum hinic3_service_type type, u32 irq_id) +{ + struct hinic3_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_irq_info *irq_info = NULL; + struct irq_alloc_info_st *alloc_info = NULL; + int max_num_irq; + int i; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < max_num_irq; i++) { + if (irq_id == alloc_info[i].info.irq_id && + type == alloc_info[i].type) { + if (alloc_info[i].free == CFG_BUSY) { + alloc_info[i].free = CFG_FREE; + irq_info->num_irq_remain++; + if (irq_info->num_irq_remain > max_num_irq) { + sdk_err(dev->dev_hdl, "Find target,but over range\n"); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + } + } + + if (i >= max_num_irq) + sdk_warn(dev->dev_hdl, "Irq %u don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} +EXPORT_SYMBOL(hinic3_free_irq); + +int hinic3_alloc_ceqs(void *hwdev, enum hinic3_service_type type, int num, + int *ceq_id_array, int *act_num) +{ + struct hinic3_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq_info *eq = NULL; + int free_ceq; + int i, j; + int num_new = num; + + if (!hwdev || !ceq_id_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + free_ceq = eq->num_ceq_remain; + + mutex_lock(&eq->eq_mutex); + + if (num > free_ceq) { + if (free_ceq <= 0) { + sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n"); + mutex_unlock(&eq->eq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n", + free_ceq); + } + + *act_num = 0; + + num_new = min(num_new, eq->num_ceq - CFG_RDMA_CEQ_BASE); + for (i = 0; i < num_new; i++) { + if (eq->num_ceq_remain == 0) { + sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n", + *act_num, num_new); + mutex_unlock(&eq->eq_mutex); + return 0; + } + + for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) { + if (eq->eq[j].free == CFG_FREE) { + eq->eq[j].type = type; + eq->eq[j].free = CFG_BUSY; + eq->num_ceq_remain--; + ceq_id_array[i] = eq->eq[j].eqn; + (*act_num)++; + break; + } + } + } + + mutex_unlock(&eq->eq_mutex); + return 0; +} +EXPORT_SYMBOL(hinic3_alloc_ceqs); + +void hinic3_free_ceq(void *hwdev, enum hinic3_service_type type, int ceq_id) +{ + struct hinic3_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq_info *eq = NULL; + u8 num_ceq; + u8 i = 0; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + num_ceq = eq->num_ceq; + + mutex_lock(&eq->eq_mutex); + + for (i = 0; i < num_ceq; i++) { + if (ceq_id == eq->eq[i].eqn && + type == cfg_mgmt->eq_info.eq[i].type) { + if (eq->eq[i].free == CFG_BUSY) { + eq->eq[i].free = CFG_FREE; + eq->num_ceq_remain++; + if (eq->num_ceq_remain > num_ceq) + eq->num_ceq_remain %= num_ceq; + + mutex_unlock(&eq->eq_mutex); + return; + } + } + } + + if (i >= num_ceq) + sdk_warn(dev->dev_hdl, "ceq %d don`t need to free.\n", ceq_id); + + mutex_unlock(&eq->eq_mutex); +} +EXPORT_SYMBOL(hinic3_free_ceq); + +int init_cfg_mgmt(struct hinic3_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + dev->cfg_mgmt = cfg_mgmt; + cfg_mgmt->hwdev = dev; + + err = cfg_init_eq(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", + err); + goto free_mgmt_mem; + } + + err = cfg_init_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", + err); + goto free_eq_mem; + } + + err = cfg_enable_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", + err); + goto free_interrupt_mem; + } + + return 0; + +free_interrupt_mem: + kfree(cfg_mgmt->irq_param_info.alloc_info); + mutex_deinit(&((cfg_mgmt->irq_param_info).irq_mutex)); + cfg_mgmt->irq_param_info.alloc_info = NULL; + +free_eq_mem: + kfree(cfg_mgmt->eq_info.eq); + mutex_deinit(&cfg_mgmt->eq_info.eq_mutex); + cfg_mgmt->eq_info.eq = NULL; + +free_mgmt_mem: + kfree(cfg_mgmt); + return err; +} + +void free_cfg_mgmt(struct hinic3_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + /* if the allocated resource were recycled */ + if (cfg_mgmt->irq_param_info.num_irq_remain != + cfg_mgmt->irq_param_info.num_total || + cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) + sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + pci_disable_msix(dev->pcidev_hdl); + break; + + case INTR_TYPE_MSI: + pci_disable_msi(dev->pcidev_hdl); + break; + + case INTR_TYPE_INT: + default: + break; + } + + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + mutex_deinit(&((cfg_mgmt->irq_param_info).irq_mutex)); + + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + mutex_deinit(&cfg_mgmt->eq_info.eq_mutex); + + kfree(cfg_mgmt); +} + +int init_capability(struct hinic3_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false; + cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false; + + err = hinic3_get_dev_cap(dev); + if (err != 0) + return err; + + init_service_param(dev); + + sdk_info(dev->dev_hdl, "Init capability success\n"); + return 0; +} + +void free_capability(struct hinic3_hwdev *dev) +{ + sdk_info(dev->dev_hdl, "Free capability success"); +} + +bool hinic3_support_nic(void *hwdev, struct nic_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_NIC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_nic); + +bool hinic3_support_ppa(void *hwdev, struct ppa_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_PPA_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ppa_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_ppa); + +bool hinic3_support_migr(void *hwdev, struct migr_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_MIGR_TYPE(dev)) + return false; + + if (cap) + cap->master_host_id = dev->cfg_mgmt->svc_cap.master_host_id; + + return true; +} +EXPORT_SYMBOL(hinic3_support_migr); + +bool hinic3_support_ipsec(void *hwdev, struct ipsec_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_IPSEC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ipsec_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_ipsec); + +bool hinic3_support_roce(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_ROCE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_roce); + +bool hinic3_support_fc(void *hwdev, struct fc_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_fc); + +bool hinic3_support_rdma(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_RDMA_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_rdma); + +bool hinic3_support_ovs(void *hwdev, struct ovs_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_OVS_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_ovs); + +bool hinic3_support_vbs(void *hwdev, struct vbs_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_VBS_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.vbs_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_vbs); + +/* Only PPF support it, PF is not */ +bool hinic3_support_toe(void *hwdev, struct toe_service_cap *cap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_TOE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic3_support_toe); + +bool hinic3_func_for_mgmt(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (dev->cfg_mgmt->svc_cap.chip_svc_type) + return false; + else + return true; +} + +bool hinic3_get_stateful_enable(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return dev->cfg_mgmt->svc_cap.sf_en; +} +EXPORT_SYMBOL(hinic3_get_stateful_enable); + +u8 hinic3_host_oq_id_mask(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; +} +EXPORT_SYMBOL(hinic3_host_oq_id_mask); + +u8 hinic3_host_id(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_id; +} +EXPORT_SYMBOL(hinic3_host_id); + +u16 hinic3_host_total_func(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host total function number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_total_function; +} +EXPORT_SYMBOL(hinic3_host_total_func); + +u16 hinic3_func_max_qnum(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; +} +EXPORT_SYMBOL(hinic3_func_max_qnum); + +u16 hinic3_func_max_nic_qnum(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; +} +EXPORT_SYMBOL(hinic3_func_max_nic_qnum); + +u8 hinic3_ep_id(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting ep id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.ep_id; +} +EXPORT_SYMBOL(hinic3_ep_id); + +u8 hinic3_er_id(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting er id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.er_id; +} +EXPORT_SYMBOL(hinic3_er_id); + +u8 hinic3_physical_port_id(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting physical port id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.port_id; +} +EXPORT_SYMBOL(hinic3_physical_port_id); + +u16 hinic3_func_max_vf(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting max vf number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_vf; +} +EXPORT_SYMBOL(hinic3_func_max_vf); + +int hinic3_cos_valid_bitmap(void *hwdev, u8 *func_dft_cos, u8 *port_cos_bitmap) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n"); + return 1; + } + *func_dft_cos = dev->cfg_mgmt->svc_cap.cos_valid_bitmap; + *port_cos_bitmap = dev->cfg_mgmt->svc_cap.port_cos_valid_bitmap; + + return 0; +} +EXPORT_SYMBOL(hinic3_cos_valid_bitmap); + +void hinic3_shutdown_hwdev(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (IS_SLAVE_HOST(dev)) + set_slave_host_enable(hwdev, hinic3_pcie_itf_id(hwdev), false); +} + +u32 hinic3_host_pf_num(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting pf number capability\n"); + return 0; + } + + return dev->cfg_mgmt->svc_cap.pf_num; +} +EXPORT_SYMBOL(hinic3_host_pf_num); + +u32 hinic3_host_pf_id_start(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting pf id start capability\n"); + return 0; + } + + return dev->cfg_mgmt->svc_cap.pf_id_start; +} +EXPORT_SYMBOL(hinic3_host_pf_id_start); + +u8 hinic3_flexq_en(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return 0; + + return dev->cfg_mgmt->svc_cap.flexq_en; +} +EXPORT_SYMBOL(hinic3_flexq_en); + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h new file mode 100644 index 000000000000..0a27530ba522 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_HW_CFG_H +#define HINIC3_HW_CFG_H + +#include <linux/types.h> +#include "cfg_mgt_comm_pub.h" +#include "hinic3_hwdev.h" + +#define CFG_MAX_CMD_TIMEOUT 30000 /* ms */ + +enum { + CFG_FREE = 0, + CFG_BUSY = 1 +}; + +/* start position for CEQs allocation, Max number of CEQs is 32 */ +/*lint -save -e849*/ +enum { + CFG_RDMA_CEQ_BASE = 0 +}; + +/*lint -restore*/ + +/* RDMA resource */ +#define K_UNIT BIT(10) +#define M_UNIT BIT(20) +#define G_UNIT BIT(30) + +#define VIRTIO_BASE_VQ_SIZE 2048U +#define VIRTIO_DEFAULT_VQ_SIZE 8192U + +/* L2NIC */ +#define HINIC3_CFG_MAX_QP 256 + +/* RDMA */ +#define RDMA_RSVD_QPS 2 +#define ROCE_MAX_WQES (8 * K_UNIT - 1) +#define IWARP_MAX_WQES (8 * K_UNIT) + +#define RDMA_MAX_SQ_SGE 16 + +#define ROCE_MAX_RQ_SGE 16 + +/* value changed should change ROCE_MAX_WQE_BB_PER_WR synchronously */ +#define RDMA_MAX_SQ_DESC_SZ (256) + +/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 48B(max_task_seg_len)) */ +#define ROCE_MAX_SQ_INLINE_DATA_SZ 192 + +#define ROCE_MAX_RQ_DESC_SZ 256 + +#define ROCE_QPC_ENTRY_SZ 512 + +#define WQEBB_SZ 64 + +#define ROCE_RDMARC_ENTRY_SZ 32 +#define ROCE_MAX_QP_INIT_RDMA 128 +#define ROCE_MAX_QP_DEST_RDMA 128 + +#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1) +#define ROCE_RSVD_SRQS 0 +#define ROCE_MAX_SRQ_SGE 15 +#define ROCE_SRQC_ENTERY_SZ 64 + +#define RDMA_MAX_CQES (8 * M_UNIT - 1) +#define RDMA_RSVD_CQS 0 + +#define RDMA_CQC_ENTRY_SZ 128 + +#define RDMA_CQE_SZ 64 +#define RDMA_RSVD_MRWS 128 +#define RDMA_MPT_ENTRY_SZ 64 +#define RDMA_NUM_MTTS (1 * G_UNIT) +#define LOG_MTT_SEG 5 +#define MTT_ENTRY_SZ 8 +#define LOG_RDMARC_SEG 3 + +#define LOCAL_ACK_DELAY 15 +#define RDMA_NUM_PORTS 1 +#define ROCE_MAX_MSG_SZ (2 * G_UNIT) + +#define DB_PAGE_SZ (4 * K_UNIT) +#define DWQE_SZ 256 + +#define NUM_PD (128 * K_UNIT) +#define RSVD_PD 0 + +#define MAX_XRCDS (64 * K_UNIT) +#define RSVD_XRCDS 0 + +#define MAX_GID_PER_PORT 128 +#define GID_ENTRY_SZ 32 +#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8) +#define NUM_COMP_VECTORS 32 +#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21)) +#define ROCE_MODE 1 + +#define MAX_FRPL_LEN 511 +#define MAX_PKEYS 1 + +/* ToE */ +#define TOE_PCTX_SZ 1024 +#define TOE_CQC_SZ 64 + +/* IoE */ +#define IOE_PCTX_SZ 512 + +/* FC */ +#define FC_PCTX_SZ 256 +#define FC_CCTX_SZ 256 +#define FC_SQE_SZ 128 +#define FC_SCQC_SZ 64 +#define FC_SCQE_SZ 64 +#define FC_SRQC_SZ 64 +#define FC_SRQE_SZ 32 + +/* OVS */ +#define OVS_PCTX_SZ 512 + +/* PPA */ +#define PPA_PCTX_SZ 512 + +/* IPsec */ +#define IPSEC_SACTX_SZ 512 + +struct dev_sf_svc_attr { + bool ft_en; /* business enable flag (not include RDMA) */ + bool ft_pf_en; /* In FPGA Test VF resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ + bool rdma_en; + bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ +}; + +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + INTR_TYPE_NONE, + /* PXE,OVS need single thread processing, + * synchronization messages must use poll wait mechanism interface + */ +}; + +/* device capability */ +struct service_cap { + struct dev_sf_svc_attr sf_svc_attr; + u16 svc_type; /* user input service type */ + u16 chip_svc_type; /* HW supported service type, reference to servic_bit_define_e */ + + u8 host_id; + u8 ep_id; + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + + /* Host global resources */ + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; /* max numbers of vf in current host */ + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 host_valid_bitmap; + u8 master_host_id; + u8 srv_multi_host_mode; + u16 virtio_vq_size; + + u8 timer_pf_num; + u8 timer_pf_id_start; + u16 timer_vf_num; + u16 timer_vf_id_start; + + u8 flexq_en; + u8 cos_valid_bitmap; + u8 port_cos_valid_bitmap; + u16 max_vf; /* max VF number that PF supported */ + + u16 fake_vf_start_id; + u16 fake_vf_num; + u32 fake_vf_max_pctx; + u16 fake_vf_bfilter_start_addr; + u16 fake_vf_bfilter_len; + + u16 fake_vf_num_cfg; + + /* DO NOT get interrupt_type from firmware */ + enum intr_type interrupt_type; + + bool sf_en; /* stateful business status */ + u8 timer_en; /* 0:disable, 1:enable */ + u8 bloomfilter_en; /* 0:disable, 1:enable */ + + u8 lb_mode; + u8 smf_pg; + + /* For test */ + u32 test_mode; + u32 test_qpc_num; + u32 test_qpc_resvd_num; + u32 test_page_size_reorder; + bool test_xid_alloc_mode; + bool test_gpa_check_enable; + u8 test_qpc_alloc_mode; + u8 test_scqc_alloc_mode; + + u32 test_max_conn_num; + u32 test_max_cache_conn_num; + u32 test_scqc_num; + u32 test_mpt_num; + u32 test_scq_resvd_num; + u32 test_mpt_recvd_num; + u32 test_hash_num; + u32 test_reorder_num; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + /* Starting address in cache memory for bloom filter, 64Bytes aligned */ + u16 bfilter_start_addr; + /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. + * Bloom filter memory size + 1 must be power of 2. + * The maximum memory size of bloom filter is 4M + */ + u16 bfilter_len; + /* The size of hash bucket tables, align on 64 entries. + * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. + * The maximum number of hash bucket is 4M + */ + u16 hash_bucket_num; + + struct nic_service_cap nic_cap; /* NIC capability */ + struct rdma_service_cap rdma_cap; /* RDMA capability */ + struct fc_service_cap fc_cap; /* FC capability */ + struct toe_service_cap toe_cap; /* ToE capability */ + struct ovs_service_cap ovs_cap; /* OVS capability */ + struct ipsec_service_cap ipsec_cap; /* IPsec capability */ + struct ppa_service_cap ppa_cap; /* PPA capability */ + struct vbs_service_cap vbs_cap; /* VBS capability */ +}; + +struct svc_cap_info { + u32 func_idx; + struct service_cap cap; +}; + +struct cfg_eq { + enum hinic3_service_type type; + int eqn; + int free; /* 1 - alocated, 0- freed */ +}; + +struct cfg_eq_info { + struct cfg_eq *eq; + + u8 num_ceq; + + u8 num_ceq_remain; + + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +struct irq_alloc_info_st { + enum hinic3_service_type type; + int free; /* 1 - alocated, 0- freed */ + struct irq_info info; +}; + +struct cfg_irq_info { + struct irq_alloc_info_st *alloc_info; + u16 num_total; + u16 num_irq_remain; + u16 num_irq_hw; /* device max irq number */ + + /* mutex used for allocate EQs */ + struct mutex irq_mutex; +}; + +#define VECTOR_THRESHOLD 2 + +struct cfg_mgmt_info { + struct hinic3_hwdev *hwdev; + struct service_cap svc_cap; + struct cfg_eq_info eq_info; /* EQ */ + struct cfg_irq_info irq_param_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +#define CFG_SERVICE_FT_EN (CFG_SERVICE_MASK_VBS | CFG_SERVICE_MASK_TOE | \ + CFG_SERVICE_MASK_IPSEC | CFG_SERVICE_MASK_FC | \ + CFG_SERVICE_MASK_VIRTIO | CFG_SERVICE_MASK_OVS) +#define CFG_SERVICE_RDMA_EN CFG_SERVICE_MASK_ROCE + +#define IS_NIC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_NIC) +#define IS_ROCE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_ROCE) +#define IS_VBS_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_VBS) +#define IS_TOE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_TOE) +#define IS_IPSEC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_IPSEC) +#define IS_FC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_FC) +#define IS_OVS_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_OVS) +#define IS_FT_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_FT_EN) +#define IS_RDMA_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_RDMA_EN) +#define IS_RDMA_ENABLE(dev) \ + ((dev)->cfg_mgmt->svc_cap.sf_svc_attr.rdma_en) +#define IS_PPA_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_PPA) +#define IS_MIGR_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_MIGRATE) + +int init_cfg_mgmt(struct hinic3_hwdev *dev); + +void free_cfg_mgmt(struct hinic3_hwdev *dev); + +int init_capability(struct hinic3_hwdev *dev); + +void free_capability(struct hinic3_hwdev *dev); + +#endif + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c new file mode 100644 index 000000000000..f207408b19d6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c @@ -0,0 +1,1540 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/msi.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_common.h" +#include "hinic3_csr.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mgmt.h" +#include "hinic3_hw_cfg.h" +#include "hinic3_cmdq.h" +#include "comm_msg_intf.h" +#include "hinic3_hw_comm.h" + +#define HINIC3_MSIX_CNT_LLI_TIMER_SHIFT 0 +#define HINIC3_MSIX_CNT_LLI_CREDIT_SHIFT 8 +#define HINIC3_MSIX_CNT_COALESC_TIMER_SHIFT 8 +#define HINIC3_MSIX_CNT_PENDING_SHIFT 8 +#define HINIC3_MSIX_CNT_RESEND_TIMER_SHIFT 29 + +#define HINIC3_MSIX_CNT_LLI_TIMER_MASK 0xFFU +#define HINIC3_MSIX_CNT_LLI_CREDIT_MASK 0xFFU +#define HINIC3_MSIX_CNT_COALESC_TIMER_MASK 0xFFU +#define HINIC3_MSIX_CNT_PENDING_MASK 0x1FU +#define HINIC3_MSIX_CNT_RESEND_TIMER_MASK 0x7U + +#define HINIC3_MSIX_CNT_SET(val, member) \ + (((val) & HINIC3_MSIX_CNT_##member##_MASK) << \ + HINIC3_MSIX_CNT_##member##_SHIFT) + +#define DEFAULT_RX_BUF_SIZE ((u16)0xB) + +enum hinic3_rx_buf_size { + HINIC3_RX_BUF_SIZE_32B = 0x20, + HINIC3_RX_BUF_SIZE_64B = 0x40, + HINIC3_RX_BUF_SIZE_96B = 0x60, + HINIC3_RX_BUF_SIZE_128B = 0x80, + HINIC3_RX_BUF_SIZE_192B = 0xC0, + HINIC3_RX_BUF_SIZE_256B = 0x100, + HINIC3_RX_BUF_SIZE_384B = 0x180, + HINIC3_RX_BUF_SIZE_512B = 0x200, + HINIC3_RX_BUF_SIZE_768B = 0x300, + HINIC3_RX_BUF_SIZE_1K = 0x400, + HINIC3_RX_BUF_SIZE_1_5K = 0x600, + HINIC3_RX_BUF_SIZE_2K = 0x800, + HINIC3_RX_BUF_SIZE_3K = 0xC00, + HINIC3_RX_BUF_SIZE_4K = 0x1000, + HINIC3_RX_BUF_SIZE_8K = 0x2000, + HINIC3_RX_BUF_SIZE_16K = 0x4000, +}; + +const int hinic3_hw_rx_buf_size[] = { + HINIC3_RX_BUF_SIZE_32B, + HINIC3_RX_BUF_SIZE_64B, + HINIC3_RX_BUF_SIZE_96B, + HINIC3_RX_BUF_SIZE_128B, + HINIC3_RX_BUF_SIZE_192B, + HINIC3_RX_BUF_SIZE_256B, + HINIC3_RX_BUF_SIZE_384B, + HINIC3_RX_BUF_SIZE_512B, + HINIC3_RX_BUF_SIZE_768B, + HINIC3_RX_BUF_SIZE_1K, + HINIC3_RX_BUF_SIZE_1_5K, + HINIC3_RX_BUF_SIZE_2K, + HINIC3_RX_BUF_SIZE_3K, + HINIC3_RX_BUF_SIZE_4K, + HINIC3_RX_BUF_SIZE_8K, + HINIC3_RX_BUF_SIZE_16K, +}; + +static inline int comm_msg_to_mgmt_sync(struct hinic3_hwdev *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + return hinic3_msg_to_mgmt_sync(hwdev, HINIC3_MOD_COMM, cmd, buf_in, + in_size, buf_out, out_size, 0, + HINIC3_CHANNEL_COMM); +} + +static inline int comm_msg_to_mgmt_sync_ch(struct hinic3_hwdev *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u16 channel) +{ + return hinic3_msg_to_mgmt_sync(hwdev, HINIC3_MOD_COMM, cmd, buf_in, + in_size, buf_out, out_size, 0, channel); +} + +int hinic3_get_interrupt_cfg(void *dev, struct interrupt_info *info, + u16 channel) +{ + struct hinic3_hwdev *hwdev = dev; + struct comm_cmd_msix_config msix_cfg; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev || !info) + return -EINVAL; + + memset(&msix_cfg, 0, sizeof(msix_cfg)); + msix_cfg.func_id = hinic3_global_func_id(hwdev); + msix_cfg.msix_index = info->msix_index; + msix_cfg.opcode = MGMT_MSG_CMD_OP_GET; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &msix_cfg, sizeof(msix_cfg), &msix_cfg, + &out_size, channel); + if (err || !out_size || msix_cfg.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, msix_cfg.head.status, out_size, channel); + return -EINVAL; + } + + info->lli_credit_limit = msix_cfg.lli_credit_cnt; + info->lli_timer_cfg = msix_cfg.lli_timer_cnt; + info->pending_limt = msix_cfg.pending_cnt; + info->coalesc_timer_cfg = msix_cfg.coalesce_timer_cnt; + info->resend_timer_cfg = msix_cfg.resend_timer_cnt; + + return 0; +} + +int hinic3_set_interrupt_cfg_direct(void *hwdev, struct interrupt_info *info, + u16 channel) +{ + struct comm_cmd_msix_config msix_cfg; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&msix_cfg, 0, sizeof(msix_cfg)); + msix_cfg.func_id = hinic3_global_func_id(hwdev); + msix_cfg.msix_index = (u16)info->msix_index; + msix_cfg.opcode = MGMT_MSG_CMD_OP_SET; + + msix_cfg.lli_credit_cnt = info->lli_credit_limit; + msix_cfg.lli_timer_cnt = info->lli_timer_cfg; + msix_cfg.pending_cnt = info->pending_limt; + msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = info->resend_timer_cfg; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &msix_cfg, sizeof(msix_cfg), &msix_cfg, + &out_size, channel); + if (err || !out_size || msix_cfg.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, msix_cfg.head.status, out_size, channel); + return -EINVAL; + } + + return 0; +} + +int hinic3_set_interrupt_cfg(void *dev, struct interrupt_info info, u16 channel) +{ + struct interrupt_info temp_info; + struct hinic3_hwdev *hwdev = dev; + int err; + + if (!hwdev) + return -EINVAL; + + temp_info.msix_index = info.msix_index; + + err = hinic3_get_interrupt_cfg(hwdev, &temp_info, channel); + if (err) + return -EINVAL; + + if (!info.lli_set) { + info.lli_credit_limit = temp_info.lli_credit_limit; + info.lli_timer_cfg = temp_info.lli_timer_cfg; + } + + if (!info.interrupt_coalesc_set) { + info.pending_limt = temp_info.pending_limt; + info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg; + info.resend_timer_cfg = temp_info.resend_timer_cfg; + } + + return hinic3_set_interrupt_cfg_direct(hwdev, &info, channel); +} +EXPORT_SYMBOL(hinic3_set_interrupt_cfg); + +void hinic3_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en) +{ + struct hinic3_hwif *hwif = NULL; + u32 msix_ctrl = 0, addr; + + if (!hwdev) + return; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + msix_ctrl = HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) | + HINIC3_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR); + + addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic3_hwif_write_reg(hwif, addr, msix_ctrl); +} +EXPORT_SYMBOL(hinic3_misx_intr_clear_resend_bit); + +int hinic3_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, + u16 channel) +{ + struct comm_cmd_wq_page_size page_size_info; + u16 out_size = sizeof(page_size_info); + int err; + + memset(&page_size_info, 0, sizeof(page_size_info)); + page_size_info.func_id = func_idx; + page_size_info.page_size = HINIC3_PAGE_SIZE_HW(page_size); + page_size_info.opcode = MGMT_MSG_CMD_OP_SET; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_PAGESIZE, + &page_size_info, sizeof(page_size_info), + &page_size_info, &out_size, channel); + if (err || !out_size || page_size_info.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x, channel: 0x%x\n", + err, page_size_info.head.status, out_size, channel); + return -EFAULT; + } + + return 0; +} + +int hinic3_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel) +{ + struct comm_cmd_func_reset func_reset; + struct hinic3_hwdev *hwdev = dev; + u16 out_size = sizeof(func_reset); + int err = 0; + + if (!dev) { + pr_err("Invalid para: dev is null.\n"); + return -EINVAL; + } + + sdk_info(hwdev->dev_hdl, "Function is reset, flag: 0x%llx, channel:0x%x\n", + reset_flag, channel); + + memset(&func_reset, 0, sizeof(func_reset)); + func_reset.func_id = func_id; + func_reset.reset_flag = reset_flag; + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FUNC_RESET, + &func_reset, sizeof(func_reset), + &func_reset, &out_size, channel); + if (err || !out_size || func_reset.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x, out_size: 0x%x\n", + reset_flag, err, func_reset.head.status, out_size); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic3_func_reset); + +static u16 get_hw_rx_buf_size(int rx_buf_sz) +{ + u16 num_hw_types = + sizeof(hinic3_hw_rx_buf_size) / + sizeof(hinic3_hw_rx_buf_size[0]); + u16 i; + + for (i = 0; i < num_hw_types; i++) { + if (hinic3_hw_rx_buf_size[i] == rx_buf_sz) + return i; + } + + pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); + + return DEFAULT_RX_BUF_SIZE; /* default 2K */ +} + +int hinic3_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, int rx_buf_sz, + u16 channel) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = hinic3_global_func_id(hwdev); + + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + + root_ctxt.lro_en = 1; + + root_ctxt.rq_depth = (u16)ilog2(rq_depth); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); + root_ctxt.sq_depth = (u16)ilog2(sq_depth); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, channel); + if (err || !out_size || root_ctxt.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, root_ctxt.head.status, out_size, channel); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic3_set_root_ctxt); + +int hinic3_clean_root_ctxt(void *hwdev, u16 channel) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = hinic3_global_func_id(hwdev); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, channel); + if (err || !out_size || root_ctxt.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, root_ctxt.head.status, out_size, channel); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic3_clean_root_ctxt); + +int hinic3_set_cmdq_depth(void *hwdev, u16 cmdq_depth) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = hinic3_global_func_id(hwdev); + + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_VAT, &root_ctxt, + sizeof(root_ctxt), &root_ctxt, &out_size); + if (err || !out_size || root_ctxt.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id, + struct cmdq_ctxt_info *ctxt) +{ + struct comm_cmd_cmdq_ctxt cmdq_ctxt; + u16 out_size = sizeof(cmdq_ctxt); + int err; + + memset(&cmdq_ctxt, 0, sizeof(cmdq_ctxt)); + memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(*ctxt)); + cmdq_ctxt.func_id = hinic3_global_func_id(hwdev); + cmdq_ctxt.cmdq_id = cmdq_id; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CMDQ_CTXT, + &cmdq_ctxt, sizeof(cmdq_ctxt), + &cmdq_ctxt, &out_size); + if (err || !out_size || cmdq_ctxt.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1) +{ + struct comm_cmd_ceq_ctrl_reg ceq_ctrl; + u16 out_size = sizeof(ceq_ctrl); + int err; + + memset(&ceq_ctrl, 0, sizeof(ceq_ctrl)); + ceq_ctrl.func_id = hinic3_global_func_id(hwdev); + ceq_ctrl.q_id = q_id; + ceq_ctrl.ctrl0 = ctrl0; + ceq_ctrl.ctrl1 = ctrl1; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + &ceq_ctrl, sizeof(ceq_ctrl), + &ceq_ctrl, &out_size); + if (err || !out_size || ceq_ctrl.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", + q_id, err, ceq_ctrl.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st, u8 at, u8 ph, + u8 no_snooping, u8 tph_en) +{ + struct comm_cmd_dma_attr_config dma_attr; + u16 out_size = sizeof(dma_attr); + int err; + + memset(&dma_attr, 0, sizeof(dma_attr)); + dma_attr.func_id = hinic3_global_func_id(hwdev); + dma_attr.entry_idx = entry_idx; + dma_attr.st = st; + dma_attr.at = at; + dma_attr.ph = ph; + dma_attr.no_snooping = no_snooping; + dma_attr.tph_en = tph_en; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_DMA_ATTR, &dma_attr, sizeof(dma_attr), + &dma_attr, &out_size); + if (err || !out_size || dma_attr.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to set dma attr, err: %d, status: 0x%x, out_size: 0x%x\n", + err, dma_attr.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic3_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function) +{ + struct comm_cmd_bdf_info bdf_info; + u16 out_size = sizeof(bdf_info); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&bdf_info, 0, sizeof(bdf_info)); + bdf_info.function_idx = hinic3_global_func_id(hwdev); + bdf_info.bus = bus; + bdf_info.device = device; + bdf_info.function = function; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SEND_BDF_INFO, + &bdf_info, sizeof(bdf_info), + &bdf_info, &out_size); + if (err || !out_size || bdf_info.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to set bdf info to MPU, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bdf_info.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic3_sync_time(void *hwdev, u64 time) +{ + struct comm_cmd_sync_time time_info; + u16 out_size = sizeof(time_info); + int err; + + memset(&time_info, 0, sizeof(time_info)); + time_info.mstime = time; + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SYNC_TIME, &time_info, + sizeof(time_info), &time_info, &out_size); + if (err || time_info.head.status || !out_size) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", + err, time_info.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic3_set_ppf_flr_type(void *hwdev, enum hinic3_ppf_flr_type flr_type) +{ + struct comm_cmd_ppf_flr_type_set flr_type_set; + u16 out_size = sizeof(struct comm_cmd_ppf_flr_type_set); + struct hinic3_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + memset(&flr_type_set, 0, sizeof(flr_type_set)); + flr_type_set.func_id = hinic3_global_func_id(hwdev); + flr_type_set.ppf_flr_type = flr_type; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_FLR_TYPE, + &flr_type_set, sizeof(flr_type_set), + &flr_type_set, &out_size); + if (err || !out_size || flr_type_set.head.status) { + sdk_err(dev->dev_hdl, "Failed to set ppf flr type, err: %d, status: 0x%x, out size: 0x%x\n", + err, flr_type_set.head.status, out_size); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic3_set_ppf_flr_type); + +static int hinic3_get_fw_ver(struct hinic3_hwdev *hwdev, enum hinic3_fw_ver_type type, + u8 *mgmt_ver, u8 version_size, u16 channel) +{ + struct comm_cmd_get_fw_version fw_ver; + u16 out_size = sizeof(fw_ver); + int err; + + if (!hwdev || !mgmt_ver) + return -EINVAL; + + memset(&fw_ver, 0, sizeof(fw_ver)); + fw_ver.fw_type = type; + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_FW_VERSION, + &fw_ver, sizeof(fw_ver), &fw_ver, + &out_size, channel); + if (err || !out_size || fw_ver.head.status) { + sdk_err(hwdev->dev_hdl, + "Failed to get fw version, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, fw_ver.head.status, out_size, channel); + return -EIO; + } + + err = snprintf(mgmt_ver, version_size, "%s", fw_ver.ver); + if (err < 0) + return -EINVAL; + + return 0; +} + +int hinic3_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, + u16 channel) +{ + return hinic3_get_fw_ver(hwdev, HINIC3_FW_VER_TYPE_MPU, mgmt_ver, + version_size, channel); +} +EXPORT_SYMBOL(hinic3_get_mgmt_version); + +int hinic3_get_fw_version(void *hwdev, struct hinic3_fw_version *fw_ver, + u16 channel) +{ + int err; + + if (!hwdev || !fw_ver) + return -EINVAL; + + err = hinic3_get_fw_ver(hwdev, HINIC3_FW_VER_TYPE_MPU, + fw_ver->mgmt_ver, sizeof(fw_ver->mgmt_ver), + channel); + if (err) + return err; + + err = hinic3_get_fw_ver(hwdev, HINIC3_FW_VER_TYPE_NPU, + fw_ver->microcode_ver, + sizeof(fw_ver->microcode_ver), channel); + if (err) + return err; + + return hinic3_get_fw_ver(hwdev, HINIC3_FW_VER_TYPE_BOOT, + fw_ver->boot_ver, sizeof(fw_ver->boot_ver), + channel); +} +EXPORT_SYMBOL(hinic3_get_fw_version); + +static int hinic3_comm_features_nego(void *hwdev, u8 opcode, u64 *s_feature, + u16 size) +{ + struct comm_cmd_feature_nego feature_nego; + u16 out_size = sizeof(feature_nego); + struct hinic3_hwdev *dev = hwdev; + int err; + + if (!hwdev || !s_feature || size > COMM_MAX_FEATURE_QWORD) + return -EINVAL; + + memset(&feature_nego, 0, sizeof(feature_nego)); + feature_nego.func_id = hinic3_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, (size * sizeof(u64))); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_FEATURE_NEGO, + &feature_nego, sizeof(feature_nego), + &feature_nego, &out_size); + if (err || !out_size || feature_nego.head.status) { + sdk_err(dev->dev_hdl, "Failed to negotiate feature, err: %d, status: 0x%x, out size: 0x%x\n", + err, feature_nego.head.status, out_size); + return -EINVAL; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, (size * sizeof(u64))); + + return 0; +} + +int hinic3_get_comm_features(void *hwdev, u64 *s_feature, u16 size) +{ + return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature, + size); +} + +int hinic3_set_comm_features(void *hwdev, u64 *s_feature, u16 size) +{ + return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature, + size); +} + +int hinic3_comm_channel_detect(struct hinic3_hwdev *hwdev) +{ + struct comm_cmd_channel_detect channel_detect_info; + u16 out_size = sizeof(channel_detect_info); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&channel_detect_info, 0, sizeof(channel_detect_info)); + channel_detect_info.func_id = hinic3_global_func_id(hwdev); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_CHANNEL_DETECT, + &channel_detect_info, sizeof(channel_detect_info), + &channel_detect_info, &out_size); + if ((channel_detect_info.head.status != HINIC3_MGMT_CMD_UNSUPPORTED && + channel_detect_info.head.status) || err || !out_size) { + sdk_err(hwdev->dev_hdl, "Failed to send channel detect, err: %d, status: 0x%x, out size: 0x%x\n", + err, channel_detect_info.head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic3_func_tmr_bitmap_set(void *hwdev, u16 func_id, bool en) +{ + struct comm_cmd_func_tmr_bitmap_op bitmap_op; + u16 out_size = sizeof(bitmap_op); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&bitmap_op, 0, sizeof(bitmap_op)); + bitmap_op.func_id = func_id; + bitmap_op.opcode = en ? FUNC_TMR_BITMAP_ENABLE : FUNC_TMR_BITMAP_DISABLE; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, + &bitmap_op, sizeof(bitmap_op), + &bitmap_op, &out_size); + if (err || !out_size || bitmap_op.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bitmap_op.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int ppf_ht_gpa_set(struct hinic3_hwdev *hwdev, struct hinic3_page_addr *pg0, + struct hinic3_page_addr *pg1) +{ + struct comm_cmd_ht_gpa ht_gpa_set; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + memset(&ht_gpa_set, 0, sizeof(ht_gpa_set)); + pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HINIC3_HT_GPA_PAGE_SIZE, + &pg0->phys_addr, GFP_KERNEL); + if (!pg0->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); + return -EFAULT; + } + + pg1->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HINIC3_HT_GPA_PAGE_SIZE, + &pg1->phys_addr, GFP_KERNEL); + if (!pg1->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); + return -EFAULT; + } + + ht_gpa_set.host_id = hinic3_host_id(hwdev); + ht_gpa_set.page_pa0 = pg0->phys_addr; + ht_gpa_set.page_pa1 = pg1->phys_addr; + sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", + pg0->phys_addr, pg1->phys_addr); + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_HT_GPA, + &ht_gpa_set, sizeof(ht_gpa_set), + &ht_gpa_set, &out_size); + if (ret || !out_size || ht_gpa_set.head.status) { + sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, ht_gpa_set.head.status, out_size); + return -EFAULT; + } + + hwdev->page_pa0.phys_addr = pg0->phys_addr; + hwdev->page_pa0.virt_addr = pg0->virt_addr; + + hwdev->page_pa1.phys_addr = pg1->phys_addr; + hwdev->page_pa1.virt_addr = pg1->virt_addr; + + return 0; +} + +int hinic3_ppf_ht_gpa_init(void *dev) +{ + struct hinic3_page_addr page_addr0[HINIC3_PPF_HT_GPA_SET_RETRY_TIMES]; + struct hinic3_page_addr page_addr1[HINIC3_PPF_HT_GPA_SET_RETRY_TIMES]; + struct hinic3_hwdev *hwdev = dev; + int ret; + int i; + int j; + size_t size; + + if (!dev) { + pr_err("Invalid para: dev is null.\n"); + return -EINVAL; + } + + size = HINIC3_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]); + memset(page_addr0, 0, size); + memset(page_addr1, 0, size); + + for (i = 0; i < HINIC3_PPF_HT_GPA_SET_RETRY_TIMES; i++) { + ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); + if (ret == 0) + break; + } + + for (j = 0; j < i; j++) { + if (page_addr0[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HINIC3_HT_GPA_PAGE_SIZE, + page_addr0[j].virt_addr, + (dma_addr_t)page_addr0[j].phys_addr); + page_addr0[j].virt_addr = NULL; + } + if (page_addr1[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HINIC3_HT_GPA_PAGE_SIZE, + page_addr1[j].virt_addr, + (dma_addr_t)page_addr1[j].phys_addr); + page_addr1[j].virt_addr = NULL; + } + } + + if (i >= HINIC3_PPF_HT_GPA_SET_RETRY_TIMES) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", + i); + return -EFAULT; + } + + return 0; +} + +void hinic3_ppf_ht_gpa_deinit(void *dev) +{ + struct hinic3_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Invalid para: dev is null.\n"); + return; + } + + if (hwdev->page_pa0.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HINIC3_HT_GPA_PAGE_SIZE, + hwdev->page_pa0.virt_addr, + (dma_addr_t)(hwdev->page_pa0.phys_addr)); + hwdev->page_pa0.virt_addr = NULL; + } + + if (hwdev->page_pa1.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HINIC3_HT_GPA_PAGE_SIZE, + hwdev->page_pa1.virt_addr, + (dma_addr_t)hwdev->page_pa1.phys_addr); + hwdev->page_pa1.virt_addr = NULL; + } +} + +static int set_ppf_tmr_status(struct hinic3_hwdev *hwdev, + enum ppf_tmr_status status) +{ + struct comm_cmd_ppf_tmr_op op; + u16 out_size = sizeof(op); + int err = 0; + + if (!hwdev) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + + if (hinic3_func_type(hwdev) != TYPE_PPF) + return -EFAULT; + + op.opcode = status; + op.ppf_id = hinic3_ppf_idx(hwdev); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_TMR, &op, + sizeof(op), &op, &out_size); + if (err || !out_size || op.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", + err, op.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic3_ppf_tmr_start(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for starting ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_START); +} +EXPORT_SYMBOL(hinic3_ppf_tmr_start); + +int hinic3_ppf_tmr_stop(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for stop ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_STOP); +} +EXPORT_SYMBOL(hinic3_ppf_tmr_stop); + +static int mqm_eqm_try_alloc_mem(struct hinic3_hwdev *hwdev, u32 page_size, + u32 page_num) +{ + struct hinic3_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr; + u32 valid_num = 0; + u32 flag = 1; + u32 i = 0; + + for (i = 0; i < page_num; i++) { + page_addr->virt_addr = + dma_zalloc_coherent(hwdev->dev_hdl, page_size, + &page_addr->phys_addr, GFP_KERNEL); + if (!page_addr->virt_addr) { + flag = 0; + break; + } + valid_num++; + page_addr++; + } + + if (flag == 1) { + hwdev->mqm_att.page_size = page_size; + hwdev->mqm_att.page_num = page_num; + } else { + page_addr = hwdev->mqm_att.brm_srch_page_addr; + for (i = 0; i < valid_num; i++) { + dma_free_coherent(hwdev->dev_hdl, page_size, + page_addr->virt_addr, + (dma_addr_t)page_addr->phys_addr); + page_addr++; + } + return -EFAULT; + } + + return 0; +} + +static int mqm_eqm_alloc_page_mem(struct hinic3_hwdev *hwdev) +{ + int ret = 0; + u32 page_num; + + /* apply for 2M page, page number is chunk_num/1024 */ + page_num = (hwdev->mqm_att.chunk_num + 0x3ff) >> 0xa; + ret = mqm_eqm_try_alloc_mem(hwdev, 0x2 * 0x400 * 0x400, page_num); + if (ret == 0) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 2M OK\n"); + return 0; + } + + /* apply for 64KB page, page number is chunk_num/32 */ + page_num = (hwdev->mqm_att.chunk_num + 0x1f) >> 0x5; + ret = mqm_eqm_try_alloc_mem(hwdev, 0x40 * 0x400, page_num); + if (ret == 0) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 64K OK\n"); + return 0; + } + + /* apply for 4KB page, page number is chunk_num/2 */ + page_num = (hwdev->mqm_att.chunk_num + 1) >> 1; + ret = mqm_eqm_try_alloc_mem(hwdev, 0x4 * 0x400, page_num); + if (ret == 0) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 4K OK\n"); + return 0; + } + + return ret; +} + +static void mqm_eqm_free_page_mem(struct hinic3_hwdev *hwdev) +{ + u32 i; + struct hinic3_page_addr *page_addr; + u32 page_size; + + page_size = hwdev->mqm_att.page_size; + page_addr = hwdev->mqm_att.brm_srch_page_addr; + + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + dma_free_coherent(hwdev->dev_hdl, page_size, + page_addr->virt_addr, (dma_addr_t)(page_addr->phys_addr)); + page_addr++; + } +} + +static int mqm_eqm_set_cfg_2_hw(struct hinic3_hwdev *hwdev, u8 valid) +{ + struct comm_cmd_eqm_cfg info_eqm_cfg; + u16 out_size = sizeof(info_eqm_cfg); + int err; + + memset(&info_eqm_cfg, 0, sizeof(info_eqm_cfg)); + + info_eqm_cfg.host_id = hinic3_host_id(hwdev); + info_eqm_cfg.page_size = hwdev->mqm_att.page_size; + info_eqm_cfg.valid = valid; + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_MQM_CFG_INFO, + &info_eqm_cfg, sizeof(info_eqm_cfg), + &info_eqm_cfg, &out_size); + if (err || !out_size || info_eqm_cfg.head.status) { + sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info_eqm_cfg.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +#define EQM_DATA_BUF_SIZE 1024 +#define MQM_ATT_PAGE_NUM 128 + +static int mqm_eqm_set_page_2_hw(struct hinic3_hwdev *hwdev) +{ + struct comm_cmd_eqm_search_gpa *info = NULL; + struct hinic3_page_addr *page_addr = NULL; + void *send_buf = NULL; + u16 send_buf_size; + u32 i; + u64 *gpa_hi52 = NULL; + u64 gpa; + u32 num; + u32 start_idx; + int err = 0; + u16 out_size; + u8 cmd; + + send_buf_size = sizeof(struct comm_cmd_eqm_search_gpa) + + EQM_DATA_BUF_SIZE; + send_buf = kzalloc(send_buf_size, GFP_KERNEL); + if (!send_buf) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + page_addr = hwdev->mqm_att.brm_srch_page_addr; + info = (struct comm_cmd_eqm_search_gpa *)send_buf; + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = 0; + cmd = COMM_MGMT_CMD_SET_MQM_SRCH_GPA; + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + /* gpa align to 4K, save gpa[31:12] */ + gpa = page_addr->phys_addr >> 12; + gpa_hi52[num] = gpa; + num++; + if (num == MQM_ATT_PAGE_NUM) { + info->num = num; + info->start_idx = start_idx; + info->host_id = hinic3_host_id(hwdev); + out_size = send_buf_size; + err = comm_msg_to_mgmt_sync(hwdev, cmd, info, + (u16)send_buf_size, + info, &out_size); + if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, + info->head.status)) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->head.status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = i + 1; + } + page_addr++; + } + + if (num != 0) { + info->num = num; + info->start_idx = start_idx; + info->host_id = hinic3_host_id(hwdev); + out_size = send_buf_size; + err = comm_msg_to_mgmt_sync(hwdev, cmd, info, + (u16)send_buf_size, info, + &out_size); + if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, + info->head.status)) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->head.status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + } + +set_page_2_hw_end: + kfree(send_buf); + return err; +} + +static int get_eqm_num(struct hinic3_hwdev *hwdev, struct comm_cmd_get_eqm_num *info_eqm_fix) +{ + int ret; + u16 len = sizeof(*info_eqm_fix); + + memset(info_eqm_fix, 0, sizeof(*info_eqm_fix)); + + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_MQM_FIX_INFO, + info_eqm_fix, sizeof(*info_eqm_fix), info_eqm_fix, &len); + if (ret || !len || info_eqm_fix->head.status) { + sdk_err(hwdev->dev_hdl, "Get mqm fix info fail,err: %d, status: 0x%x, out_size: 0x%x\n", + ret, info_eqm_fix->head.status, len); + return -EFAULT; + } + + sdk_info(hwdev->dev_hdl, "get chunk_num: 0x%x, search_gpa_num: 0x%08x\n", + info_eqm_fix->chunk_num, info_eqm_fix->search_gpa_num); + + return 0; +} + +static int mqm_eqm_init(struct hinic3_hwdev *hwdev) +{ + struct comm_cmd_get_eqm_num info_eqm_fix; + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return 0; + + ret = get_eqm_num(hwdev, &info_eqm_fix); + if (ret) + return ret; + + if (!(info_eqm_fix.chunk_num)) + return 0; + + hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num; + hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num; + hwdev->mqm_att.page_size = 0; + hwdev->mqm_att.page_num = 0; + + hwdev->mqm_att.brm_srch_page_addr = + kcalloc(hwdev->mqm_att.chunk_num, sizeof(struct hinic3_page_addr), GFP_KERNEL); + if (!(hwdev->mqm_att.brm_srch_page_addr)) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + ret = mqm_eqm_alloc_page_mem(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\r\n"); + goto err_page; + } + + ret = mqm_eqm_set_page_2_hw(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 1); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + + sdk_info(hwdev->dev_hdl, "ppf_ext_db_init ok\r\n"); + + return 0; + +err_ecmd: + mqm_eqm_free_page_mem(hwdev); + +err_page: + kfree(hwdev->mqm_att.brm_srch_page_addr); + + return ret; +} + +static void mqm_eqm_deinit(struct hinic3_hwdev *hwdev) +{ + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return; + + if (!(hwdev->mqm_att.chunk_num)) + return; + + mqm_eqm_free_page_mem(hwdev); + kfree(hwdev->mqm_att.brm_srch_page_addr); + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 0); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail! err: %d\n", + ret); + return; + } + + hwdev->mqm_att.chunk_num = 0; + hwdev->mqm_att.search_gpa_num = 0; + hwdev->mqm_att.page_num = 0; + hwdev->mqm_att.page_size = 0; +} + +int hinic3_ppf_ext_db_init(struct hinic3_hwdev *hwdev) +{ + int ret; + + ret = mqm_eqm_init(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "MQM eqm init fail!\n"); + return -EFAULT; + } + + return 0; +} + +int hinic3_ppf_ext_db_deinit(struct hinic3_hwdev *hwdev) +{ + if (!hwdev) + return -EINVAL; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return 0; + + mqm_eqm_deinit(hwdev); + + return 0; +} + +#define HINIC3_FLR_TIMEOUT 1000 + +static enum hinic3_wait_return check_flr_finish_handler(void *priv_data) +{ + struct hinic3_hwif *hwif = priv_data; + enum hinic3_pf_status status; + + status = hinic3_get_pf_status(hwif); + if (status == HINIC3_PF_STATUS_FLR_FINISH_FLAG) { + hinic3_set_pf_status(hwif, HINIC3_PF_STATUS_ACTIVE_FLAG); + return WAIT_PROCESS_CPL; + } + + return WAIT_PROCESS_WAITING; +} + +static int wait_for_flr_finish(struct hinic3_hwif *hwif) +{ + return hinic3_wait_for_timeout(hwif, check_flr_finish_handler, + HINIC3_FLR_TIMEOUT, 0xa * USEC_PER_MSEC); +} + +#define HINIC3_WAIT_CMDQ_IDLE_TIMEOUT 5000 + +static enum hinic3_wait_return check_cmdq_stop_handler(void *priv_data) +{ + struct hinic3_hwdev *hwdev = priv_data; + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + enum hinic3_cmdq_type cmdq_type; + + /* Stop waiting when card unpresent */ + if (!hwdev->chip_present_flag) + return WAIT_PROCESS_CPL; + + cmdq_type = HINIC3_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + return WAIT_PROCESS_WAITING; + } + + return WAIT_PROCESS_CPL; +} + +static int wait_cmdq_stop(struct hinic3_hwdev *hwdev) +{ + enum hinic3_cmdq_type cmdq_type; + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + int err; + + if (!(cmdqs->status & HINIC3_CMDQ_ENABLE)) + return 0; + + cmdqs->status &= ~HINIC3_CMDQ_ENABLE; + + err = hinic3_wait_for_timeout(hwdev, check_cmdq_stop_handler, + HINIC3_WAIT_CMDQ_IDLE_TIMEOUT, + USEC_PER_MSEC); + if (err == 0) + return 0; + + cmdq_type = HINIC3_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + sdk_err(hwdev->dev_hdl, "Cmdq %d is busy\n", cmdq_type); + } + + cmdqs->status |= HINIC3_CMDQ_ENABLE; + + return err; +} + +static int hinic3_rx_tx_flush(struct hinic3_hwdev *hwdev, u16 channel) +{ + struct hinic3_hwif *hwif = hwdev->hwif; + struct comm_cmd_clear_doorbell clear_db; + struct comm_cmd_clear_resource clr_res; + u16 out_size; + int err; + int ret = 0; + + if (HINIC3_FUNC_TYPE(hwdev) != TYPE_VF) + msleep(100); /* wait ucode 100 ms stop I/O */ + + err = wait_cmdq_stop(hwdev); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); + ret = err; + } + + hinic3_disable_doorbell(hwif); + + out_size = sizeof(clear_db); + memset(&clear_db, 0, sizeof(clear_db)); + clear_db.func_id = HINIC3_HWIF_GLOBAL_IDX(hwif); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FLUSH_DOORBELL, + &clear_db, sizeof(clear_db), + &clear_db, &out_size, channel); + if (err != 0 || !out_size || clear_db.head.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, clear_db.head.status, out_size, channel); + if (err != 0) + ret = err; + else + ret = -EFAULT; + } + + if (HINIC3_FUNC_TYPE(hwdev) != TYPE_VF) + hinic3_set_pf_status(hwif, HINIC3_PF_STATUS_FLR_START_FLAG); + else + msleep(100); /* wait ucode 100 ms stop I/O */ + + memset(&clr_res, 0, sizeof(clr_res)); + clr_res.func_id = HINIC3_HWIF_GLOBAL_IDX(hwif); + + err = hinic3_msg_to_mgmt_no_ack(hwdev, HINIC3_MOD_COMM, + COMM_MGMT_CMD_START_FLUSH, &clr_res, + sizeof(clr_res), channel); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message, err: %d, channel: 0x%x\n", + err, channel); + ret = err; + } + + if (HINIC3_FUNC_TYPE(hwdev) != TYPE_VF) { + err = wait_for_flr_finish(hwif); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + } + + hinic3_enable_doorbell(hwif); + + err = hinic3_reinit_cmdq_ctxts(hwdev); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + ret = err; + } + + return ret; +} + +int hinic3_func_rx_tx_flush(void *hwdev, u16 channel) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return -EINVAL; + + if (dev->chip_present_flag == 0) + return 0; + + return hinic3_rx_tx_flush(dev, channel); +} +EXPORT_SYMBOL(hinic3_func_rx_tx_flush); + +int hinic3_get_board_info(void *hwdev, struct hinic3_board_info *info, + u16 channel) +{ + struct comm_cmd_board_info board_info; + u16 out_size = sizeof(board_info); + int err; + + if (!hwdev || !info) + return -EINVAL; + + memset(&board_info, 0, sizeof(board_info)); + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_BOARD_INFO, + &board_info, sizeof(board_info), + &board_info, &out_size, channel); + if (err || board_info.head.status || !out_size) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, board_info.head.status, out_size, channel); + return -EIO; + } + + memcpy(info, &board_info.info, sizeof(*info)); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_board_info); + +int hinic3_get_hw_pf_infos(void *hwdev, struct hinic3_hw_pf_infos *infos, + u16 channel) +{ + struct comm_cmd_hw_pf_infos *pf_infos = NULL; + u16 out_size = sizeof(*pf_infos); + int err = 0; + + if (!hwdev || !infos) + return -EINVAL; + + pf_infos = kzalloc(sizeof(*pf_infos), GFP_KERNEL); + if (!pf_infos) + return -ENOMEM; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_HW_PF_INFOS, + pf_infos, sizeof(*pf_infos), + pf_infos, &out_size, channel); + if (pf_infos->head.status || err || !out_size) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to get hw pf information, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, pf_infos->head.status, out_size, channel); + err = -EIO; + goto free_buf; + } + + memcpy(infos, &pf_infos->infos, sizeof(*infos)); + +free_buf: + kfree(pf_infos); + return err; +} +EXPORT_SYMBOL(hinic3_get_hw_pf_infos); + +int hinic3_get_global_attr(void *hwdev, struct comm_global_attr *attr) +{ + struct comm_cmd_get_glb_attr get_attr; + u16 out_size = sizeof(get_attr); + int err = 0; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_GLOBAL_ATTR, + &get_attr, sizeof(get_attr), &get_attr, + &out_size); + if (err || !out_size || get_attr.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to get global attribute, err: %d, status: 0x%x, out size: 0x%x\n", + err, get_attr.head.status, out_size); + return -EIO; + } + + memcpy(attr, &get_attr.attr, sizeof(*attr)); + + return 0; +} + +int hinic3_set_func_svc_used_state(void *hwdev, u16 svc_type, u8 state, + u16 channel) +{ + struct comm_cmd_func_svc_used_state used_state; + u16 out_size = sizeof(used_state); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&used_state, 0, sizeof(used_state)); + used_state.func_id = hinic3_global_func_id(hwdev); + used_state.svc_type = svc_type; + used_state.used_state = state; + + err = comm_msg_to_mgmt_sync_ch(hwdev, + COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, + &used_state, sizeof(used_state), + &used_state, &out_size, channel); + if (err || !out_size || used_state.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to set func service used state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n\n", + err, used_state.head.status, out_size, channel); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic3_set_func_svc_used_state); + +int hinic3_get_sml_table_info(void *hwdev, u32 tbl_id, u8 *node_id, u8 *instance_id) +{ + struct sml_table_id_info sml_table[TABLE_INDEX_MAX]; + struct comm_cmd_get_sml_tbl_data sml_tbl; + u16 out_size = sizeof(sml_tbl); + int err; + + if (!hwdev) + return -EINVAL; + + if (tbl_id >= TABLE_INDEX_MAX) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, "sml table index out of range [0, %u]", + TABLE_INDEX_MAX - 1); + return -EINVAL; + } + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_SML_TABLE_INFO, + &sml_tbl, sizeof(sml_tbl), &sml_tbl, &out_size); + if (err || !out_size || sml_tbl.head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to get sml table information, err: %d, status: 0x%x, out size: 0x%x\n", + err, sml_tbl.head.status, out_size); + return -EIO; + } + + memcpy(sml_table, sml_tbl.tbl_data, sizeof(sml_table)); + + *node_id = sml_table[tbl_id].node_id; + *instance_id = sml_table[tbl_id].instance_id; + + return 0; +} + +int hinic3_activate_firmware(void *hwdev, u8 cfg_index) +{ + struct hinic3_cmd_activate_firmware activate_msg; + u16 out_size = sizeof(activate_msg); + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic3_func_type(hwdev) != TYPE_PF) + return -EOPNOTSUPP; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&activate_msg, 0, sizeof(activate_msg)); + activate_msg.index = cfg_index; + + err = hinic3_pf_to_mgmt_sync(hwdev, HINIC3_MOD_COMM, COMM_MGMT_CMD_ACTIVE_FW, + &activate_msg, sizeof(activate_msg), + &activate_msg, &out_size, FW_UPDATE_MGMT_TIMEOUT); + if (err || !out_size || activate_msg.msg_head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to activate firmware, err: %d, status: 0x%x, out size: 0x%x\n", + err, activate_msg.msg_head.status, out_size); + err = activate_msg.msg_head.status ? activate_msg.msg_head.status : -EIO; + return err; + } + + return 0; +} + +int hinic3_switch_config(void *hwdev, u8 cfg_index) +{ + struct hinic3_cmd_switch_config switch_cfg; + u16 out_size = sizeof(switch_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic3_func_type(hwdev) != TYPE_PF) + return -EOPNOTSUPP; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&switch_cfg, 0, sizeof(switch_cfg)); + switch_cfg.index = cfg_index; + + err = hinic3_pf_to_mgmt_sync(hwdev, HINIC3_MOD_COMM, COMM_MGMT_CMD_SWITCH_CFG, + &switch_cfg, sizeof(switch_cfg), + &switch_cfg, &out_size, FW_UPDATE_MGMT_TIMEOUT); + if (err || !out_size || switch_cfg.msg_head.status) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to switch cfg, err: %d, status: 0x%x, out size: 0x%x\n", + err, switch_cfg.msg_head.status, out_size); + err = switch_cfg.msg_head.status ? switch_cfg.msg_head.status : -EIO; + return err; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h new file mode 100644 index 000000000000..be9e4a6b24f9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_COMM_H +#define HINIC3_COMM_H + +#include <linux/types.h> + +#include "comm_msg_intf.h" +#include "hinic3_hwdev.h" + +#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, status) \ + ((err) || (status) || !(out_size)) + +#define HINIC3_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +enum func_tmr_bitmap_status { + FUNC_TMR_BITMAP_DISABLE, + FUNC_TMR_BITMAP_ENABLE, +}; + +enum ppf_tmr_status { + HINIC_PPF_TMR_FLAG_STOP, + HINIC_PPF_TMR_FLAG_START, +}; + +#define HINIC3_HT_GPA_PAGE_SIZE 4096UL +#define HINIC3_PPF_HT_GPA_SET_RETRY_TIMES 10 + +int hinic3_set_cmdq_depth(void *hwdev, u16 cmdq_depth); + +int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id, + struct cmdq_ctxt_info *ctxt); + +int hinic3_ppf_ext_db_init(struct hinic3_hwdev *hwdev); + +int hinic3_ppf_ext_db_deinit(struct hinic3_hwdev *hwdev); + +int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1); + +int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st, u8 at, u8 ph, + u8 no_snooping, u8 tph_en); + +int hinic3_get_comm_features(void *hwdev, u64 *s_feature, u16 size); +int hinic3_set_comm_features(void *hwdev, u64 *s_feature, u16 size); + +int hinic3_comm_channel_detect(struct hinic3_hwdev *hwdev); + +int hinic3_get_global_attr(void *hwdev, struct comm_global_attr *attr); +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c new file mode 100644 index 000000000000..79e4dacbd0c9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "ossl_knl.h" +#include "hinic3_mt.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_comm_cmd.h" +#include "hinic3_hw_mt.h" + +#define HINIC3_CMDQ_BUF_MAX_SIZE 2048U +#define DW_WIDTH 4 + +#define MSG_MAX_IN_SIZE (2048 * 1024) +#define MSG_MAX_OUT_SIZE (2048 * 1024) + +/* completion timeout interval, unit is millisecond */ +#define MGMT_MSG_UPDATE_TIMEOUT 50000U + +void free_buff_in(void *hwdev, const struct msg_module *nt_msg, void *buf_in) +{ + if (!buf_in) + return; + + if (nt_msg->module == SEND_TO_NPU) + hinic3_free_cmd_buf(hwdev, buf_in); + else + kfree(buf_in); +} + +void free_buff_out(void *hwdev, struct msg_module *nt_msg, + void *buf_out) +{ + if (!buf_out) + return; + + if (nt_msg->module == SEND_TO_NPU && + !nt_msg->npu_cmd.direct_resp) + hinic3_free_cmd_buf(hwdev, buf_out); + else + kfree(buf_out); +} + +int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, + u32 in_size, void **buf_in) +{ + void *msg_buf = NULL; + + if (!in_size) + return 0; + + if (nt_msg->module == SEND_TO_NPU) { + struct hinic3_cmd_buf *cmd_buf = NULL; + + if (in_size > HINIC3_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq in size(%u) more than 2KB\n", in_size); + return -ENOMEM; + } + + cmd_buf = hinic3_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + pr_err("Alloc cmdq cmd buffer failed in %s\n", + __func__); + return -ENOMEM; + } + msg_buf = cmd_buf->buf; + *buf_in = (void *)cmd_buf; + cmd_buf->size = (u16)in_size; + } else { + if (in_size > MSG_MAX_IN_SIZE) { + pr_err("In size(%u) more than 2M\n", in_size); + return -ENOMEM; + } + msg_buf = kzalloc(in_size, GFP_KERNEL); + *buf_in = msg_buf; + } + if (!(*buf_in)) { + pr_err("Alloc buffer in failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, nt_msg->in_buf, in_size)) { + pr_err("%s:%d: Copy from user failed\n", + __func__, __LINE__); + free_buff_in(hwdev, nt_msg, *buf_in); + return -EFAULT; + } + + return 0; +} + +int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, + u32 out_size, void **buf_out) +{ + if (!out_size) + return 0; + + if (nt_msg->module == SEND_TO_NPU && + !nt_msg->npu_cmd.direct_resp) { + struct hinic3_cmd_buf *cmd_buf = NULL; + + if (out_size > HINIC3_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq out size(%u) more than 2KB\n", out_size); + return -ENOMEM; + } + + cmd_buf = hinic3_alloc_cmd_buf(hwdev); + *buf_out = (void *)cmd_buf; + } else { + if (out_size > MSG_MAX_OUT_SIZE) { + pr_err("out size(%u) more than 2M\n", out_size); + return -ENOMEM; + } + *buf_out = kzalloc(out_size, GFP_KERNEL); + } + if (!(*buf_out)) { + pr_err("Alloc buffer out failed\n"); + return -ENOMEM; + } + + return 0; +} + +int copy_buf_out_to_user(struct msg_module *nt_msg, + u32 out_size, void *buf_out) +{ + int ret = 0; + void *msg_out = NULL; + + if (nt_msg->module == SEND_TO_NPU && + !nt_msg->npu_cmd.direct_resp) + msg_out = ((struct hinic3_cmd_buf *)buf_out)->buf; + else + msg_out = buf_out; + + if (copy_to_user(nt_msg->out_buf, msg_out, out_size)) + ret = -EFAULT; + + return ret; +} + +int get_func_type(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_type; + + if (*out_size != sizeof(u16) || !buf_out) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + func_type = hinic3_func_type(hinic3_get_sdk_hwdev_by_lld(lld_dev)); + + *(u16 *)buf_out = func_type; + return 0; +} + +int get_func_id(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_id; + + if (*out_size != sizeof(u16) || !buf_out) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + func_id = hinic3_global_func_id(hinic3_get_sdk_hwdev_by_lld(lld_dev)); + *(u16 *)buf_out = func_id; + + return 0; +} + +int get_hw_driver_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return hinic3_dbg_get_hw_stats(hinic3_get_sdk_hwdev_by_lld(lld_dev), + buf_out, (u16 *)out_size); +} + +int clear_hw_driver_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 size; + + size = hinic3_dbg_clear_hw_stats(hinic3_get_sdk_hwdev_by_lld(lld_dev)); + if (*out_size != size) { + pr_err("Unexpect out buf size from user :%u, expect: %u\n", + *out_size, size); + return -EFAULT; + } + + return 0; +} + +int get_self_test_result(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 result; + + if (*out_size != sizeof(u32) || !buf_out) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(u32)); + return -EFAULT; + } + + result = hinic3_get_self_test_result(hinic3_get_sdk_hwdev_by_lld(lld_dev)); + *(u32 *)buf_out = result; + + return 0; +} + +int get_chip_faults_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 offset = 0; + struct nic_cmd_chip_fault_stats *fault_info = NULL; + + if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) || + in_size != sizeof(*fault_info)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*fault_info)); + return -EFAULT; + } + fault_info = (struct nic_cmd_chip_fault_stats *)buf_in; + offset = fault_info->offset; + + fault_info = (struct nic_cmd_chip_fault_stats *)buf_out; + hinic3_get_chip_fault_stats(hinic3_get_sdk_hwdev_by_lld(lld_dev), + fault_info->chip_fault_stats, offset); + + return 0; +} + +static u32 get_up_timeout_val(enum hinic3_mod_type mod, u16 cmd) +{ + if (mod == HINIC3_MOD_COMM && + (cmd == COMM_MGMT_CMD_UPDATE_FW || + cmd == COMM_MGMT_CMD_UPDATE_BIOS || + cmd == COMM_MGMT_CMD_ACTIVE_FW || + cmd == COMM_MGMT_CMD_SWITCH_CFG || + cmd == COMM_MGMT_CMD_HOT_ACTIVE_FW)) + return MGMT_MSG_UPDATE_TIMEOUT; + + return 0; /* use default mbox/apichain timeout time */ +} + +static int api_csr_read(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in; + int ret = 0; + u32 rd_len; + u32 rd_addr; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + + if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) || + *out_size != up_log_msg->rd_len || up_log_msg->rd_len % DW_WIDTH != 0) + return -EINVAL; + + rd_len = up_log_msg->rd_len; + rd_addr = up_log_msg->addr; + node_id = (u8)nt_msg->mpu_cmd.mod; + + rd_cnt = rd_len / DW_WIDTH; + + for (i = 0; i < rd_cnt; i++) { + ret = hinic3_api_csr_rd32(hwdev, node_id, + rd_addr + offset, + (u32 *)(((u8 *)buf_out) + offset)); + if (ret) { + pr_err("Csr rd fail, err: %d, node_id: %u, csr addr: 0x%08x\n", + ret, node_id, rd_addr + offset); + return ret; + } + offset += DW_WIDTH; + } + *out_size = rd_len; + + return ret; +} + +static int api_csr_write(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; + int ret = 0; + u32 rd_len; + u32 rd_addr; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + u8 *data = NULL; + + if (!buf_in || in_size != sizeof(*csr_write_msg) || csr_write_msg->rd_len % DW_WIDTH != 0) + return -EINVAL; + + rd_len = csr_write_msg->rd_len; + rd_addr = csr_write_msg->addr; + node_id = (u8)nt_msg->mpu_cmd.mod; + + rd_cnt = rd_len / DW_WIDTH; + + data = kzalloc(rd_len, GFP_KERNEL); + if (!data) { + pr_err("No more memory\n"); + return -EFAULT; + } + if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) { + pr_err("Copy information from user failed\n"); + kfree(data); + return -EFAULT; + } + + for (i = 0; i < rd_cnt; i++) { + ret = hinic3_api_csr_wr32(hwdev, node_id, + rd_addr + offset, + *((u32 *)(data + offset))); + if (ret) { + pr_err("Csr wr fail, ret: %d, node_id: %u, csr addr: 0x%08x\n", + ret, rd_addr + offset, node_id); + kfree(data); + return ret; + } + offset += DW_WIDTH; + } + + *out_size = 0; + kfree(data); + return ret; +} + +int send_to_mpu(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + enum hinic3_mod_type mod; + u32 timeout; + int ret = 0; + u16 cmd; + + mod = (enum hinic3_mod_type)nt_msg->mpu_cmd.mod; + cmd = nt_msg->mpu_cmd.cmd; + + if (nt_msg->mpu_cmd.api_type == API_TYPE_MBOX || nt_msg->mpu_cmd.api_type == API_TYPE_CLP) { + timeout = get_up_timeout_val(mod, cmd); + + if (nt_msg->mpu_cmd.api_type == API_TYPE_MBOX) + ret = hinic3_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout, + HINIC3_CHANNEL_DEFAULT); + else + ret = hinic3_clp_to_mgmt(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size); + if (ret) { + pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %u\n", mod, cmd); + return ret; + } + } else if (nt_msg->mpu_cmd.api_type == API_TYPE_API_CHAIN_BYPASS) { + if (nt_msg->mpu_cmd.cmd == API_CSR_WRITE) + return api_csr_write(hwdev, nt_msg, buf_in, in_size, buf_out, out_size); + + ret = api_csr_read(hwdev, nt_msg, buf_in, in_size, buf_out, out_size); + } else if (nt_msg->mpu_cmd.api_type == API_TYPE_API_CHAIN_TO_MPU) { + timeout = get_up_timeout_val(mod, cmd); + if (hinic3_pcie_itf_id(hwdev) != SPU_HOST_ID) + ret = hinic3_msg_to_mgmt_api_chain_sync(hwdev, mod, cmd, buf_in, + (u16)in_size, buf_out, + (u16 *)out_size, timeout); + else + ret = hinic3_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout, + HINIC3_CHANNEL_DEFAULT); + if (ret) { + pr_err("Message to mgmt api chain cpu return fail, mod: %d, cmd: %u\n", + mod, cmd); + return ret; + } + } else { + pr_err("Unsupported api_type %d\n", nt_msg->mpu_cmd.api_type); + return -EINVAL; + } + + return ret; +} + +int send_to_npu(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u8 cmd; + enum hinic3_mod_type mod; + + mod = (enum hinic3_mod_type)nt_msg->npu_cmd.mod; + cmd = nt_msg->npu_cmd.cmd; + + if (nt_msg->npu_cmd.direct_resp) { + ret = hinic3_cmdq_direct_resp(hwdev, mod, cmd, + buf_in, buf_out, 0, + HINIC3_CHANNEL_DEFAULT); + if (ret) + pr_err("Send direct cmdq failed, err: %d\n", ret); + } else { + ret = hinic3_cmdq_detail_resp(hwdev, mod, cmd, buf_in, buf_out, + NULL, 0, HINIC3_CHANNEL_DEFAULT); + if (ret) + pr_err("Send detail cmdq failed, err: %d\n", ret); + } + + return ret; +} + +static int sm_rd16(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u16 val1; + int ret; + + ret = hinic3_sm_ctr_rd16(hwdev, node, instance, id, &val1); + if (ret != 0) { + pr_err("Get sm ctr information (16 bits)failed!\n"); + val1 = 0xffff; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u32 val1; + int ret; + + ret = hinic3_sm_ctr_rd32(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (32 bits)failed!\n"); + val1 = ~0; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd32_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u32 val1; + int ret; + + ret = hinic3_sm_ctr_rd32_clear(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr clear information(32 bits) failed!\n"); + val1 = ~0; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd64_pair(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1 = 0, val2 = 0; + int ret; + + ret = hinic3_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2); + if (ret) { + pr_err("Get sm ctr information (64 bits pair)failed!\n"); + val1 = ~0; + val2 = ~0; + } + + buf_out->val1 = val1; + buf_out->val2 = val2; + + return ret; +} + +static int sm_rd64_pair_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1 = 0; + u64 val2 = 0; + int ret; + + ret = hinic3_sm_ctr_rd64_pair_clear(hwdev, node, instance, id, &val1, + &val2); + if (ret) { + pr_err("Get sm ctr clear information(64 bits pair) failed!\n"); + val1 = ~0; + val2 = ~0; + } + + buf_out->val1 = val1; + buf_out->val2 = val2; + + return ret; +} + +static int sm_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1; + int ret; + + ret = hinic3_sm_ctr_rd64(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (64 bits)failed!\n"); + val1 = ~0; + } + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd64_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1; + int ret; + + ret = hinic3_sm_ctr_rd64_clear(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr clear information(64 bits) failed!\n"); + val1 = ~0; + } + buf_out->val1 = val1; + + return ret; +} + +typedef int (*sm_module)(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out); + +struct sm_module_handle { + enum sm_cmd_type sm_cmd_name; + sm_module sm_func; +}; + +const struct sm_module_handle sm_module_cmd_handle[] = { + {SM_CTR_RD16, sm_rd16}, + {SM_CTR_RD32, sm_rd32}, + {SM_CTR_RD64_PAIR, sm_rd64_pair}, + {SM_CTR_RD64, sm_rd64}, + {SM_CTR_RD32_CLEAR, sm_rd32_clear}, + {SM_CTR_RD64_PAIR_CLEAR, sm_rd64_pair_clear}, + {SM_CTR_RD64_CLEAR, sm_rd64_clear} +}; + +int send_to_sm(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct sm_in_st *sm_in = buf_in; + struct sm_out_st *sm_out = buf_out; + u32 msg_formate = nt_msg->msg_formate; + int index, num_cmds = sizeof(sm_module_cmd_handle) / + sizeof(sm_module_cmd_handle[0]); + int ret = 0; + + if (!buf_in || !buf_out || in_size != sizeof(*sm_in) || *out_size != sizeof(*sm_out)) { + pr_err("Unexpect out buf size :%u, in buf size: %u\n", + *out_size, in_size); + return -EINVAL; + } + + for (index = 0; index < num_cmds; index++) { + if (msg_formate != sm_module_cmd_handle[index].sm_cmd_name) + continue; + + ret = sm_module_cmd_handle[index].sm_func(hwdev, (u32)sm_in->id, + (u8)sm_in->instance, + (u8)sm_in->node, sm_out); + break; + } + + if (index == num_cmds) { + pr_err("Can't find callback for %d\n", msg_formate); + return -EINVAL; + } + + if (ret != 0) + pr_err("Get sm information fail, id:%u, instance:%u, node:%u\n", + sm_in->id, sm_in->instance, sm_in->node); + + *out_size = sizeof(struct sm_out_st); + + return ret; +} + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.h new file mode 100644 index 000000000000..9330200823b9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_HW_MT_H +#define HINIC3_HW_MT_H + +#include "hinic3_lld.h" + +struct sm_in_st { + int node; + int id; + int instance; +}; + +struct sm_out_st { + u64 val1; + u64 val2; +}; + +struct up_log_msg_st { + u32 rd_len; + u32 addr; +}; + +struct csr_write_st { + u32 rd_len; + u32 addr; + u8 *data; +}; + +int get_func_type(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +int get_func_id(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +int get_hw_driver_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +int clear_hw_driver_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +int get_self_test_result(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +int get_chip_faults_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c new file mode 100644 index 000000000000..2d29290f59e9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c @@ -0,0 +1,2141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic3_mt.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_common.h" +#include "hinic3_csr.h" +#include "hinic3_hwif.h" +#include "hinic3_eqs.h" +#include "hinic3_api_cmd.h" +#include "hinic3_mgmt.h" +#include "hinic3_mbox.h" +#include "hinic3_cmdq.h" +#include "hinic3_hw_cfg.h" +#include "hinic3_hw_comm.h" +#include "hinic3_prof_adap.h" +#include "hinic3_devlink.h" +#include "hinic3_hwdev.h" + +static unsigned int wq_page_order = HINIC3_MAX_WQ_PAGE_SIZE_ORDER; +module_param(wq_page_order, uint, 0444); +MODULE_PARM_DESC(wq_page_order, "Set wq page size order, wq page size is 4K * (2 ^ wq_page_order) - default is 8"); + +enum hinic3_pcie_nosnoop { + HINIC3_PCIE_SNOOP = 0, + HINIC3_PCIE_NO_SNOOP = 1, +}; + +enum hinic3_pcie_tph { + HINIC3_PCIE_TPH_DISABLE = 0, + HINIC3_PCIE_TPH_ENABLE = 1, +}; + +#define HINIC3_DMA_ATTR_INDIR_IDX_SHIFT 0 + +#define HINIC3_DMA_ATTR_INDIR_IDX_MASK 0x3FF + +#define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \ + (((u32)(val) & HINIC3_DMA_ATTR_INDIR_##member##_MASK) << \ + HINIC3_DMA_ATTR_INDIR_##member##_SHIFT) + +#define HINIC3_DMA_ATTR_INDIR_IDX_CLEAR(val, member) \ + ((val) & (~(HINIC3_DMA_ATTR_INDIR_##member##_MASK \ + << HINIC3_DMA_ATTR_INDIR_##member##_SHIFT))) + +#define HINIC3_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define HINIC3_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define HINIC3_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define HINIC3_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define HINIC3_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define HINIC3_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define HINIC3_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define HINIC3_DMA_ATTR_ENTRY_SET(val, member) \ + (((u32)(val) & HINIC3_DMA_ATTR_ENTRY_##member##_MASK) << \ + HINIC3_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define HINIC3_DMA_ATTR_ENTRY_CLEAR(val, member) \ + ((val) & (~(HINIC3_DMA_ATTR_ENTRY_##member##_MASK \ + << HINIC3_DMA_ATTR_ENTRY_##member##_SHIFT))) + +#define HINIC3_PCIE_ST_DISABLE 0 +#define HINIC3_PCIE_AT_DISABLE 0 +#define HINIC3_PCIE_PH_DISABLE 0 + +#define PCIE_MSIX_ATTR_ENTRY 0 + +#define HINIC3_CHIP_PRESENT 1 +#define HINIC3_CHIP_ABSENT 0 + +#define HINIC3_DEAULT_EQ_MSIX_PENDING_LIMIT 0 +#define HINIC3_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define HINIC3_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 + +#define HINIC3_HWDEV_WQ_NAME "hinic3_hardware" +#define HINIC3_WQ_MAX_REQ 10 + +#define SLAVE_HOST_STATUS_CLEAR(host_id, val) ((val) & (~(1U << (host_id)))) +#define SLAVE_HOST_STATUS_SET(host_id, enable) (((u8)(enable) & 1U) << (host_id)) +#define SLAVE_HOST_STATUS_GET(host_id, val) (!!((val) & (1U << (host_id)))) + +void set_slave_host_enable(void *hwdev, u8 host_id, bool enable) +{ + u32 reg_val; + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev; + + if (HINIC3_FUNC_TYPE(dev) != TYPE_PPF) + return; + + reg_val = hinic3_hwif_read_reg(dev->hwif, HINIC3_MULT_HOST_SLAVE_STATUS_ADDR); + + reg_val = SLAVE_HOST_STATUS_CLEAR(host_id, reg_val); + reg_val |= SLAVE_HOST_STATUS_SET(host_id, enable); + hinic3_hwif_write_reg(dev->hwif, HINIC3_MULT_HOST_SLAVE_STATUS_ADDR, reg_val); + + sdk_info(dev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n", + host_id, enable, reg_val); +} + +int hinic3_get_slave_host_enable(void *hwdev, u8 host_id, u8 *slave_en) +{ + struct hinic3_hwdev *dev = hwdev; + + u32 reg_val; + + if (HINIC3_FUNC_TYPE(dev) != TYPE_PPF) { + sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); + return -EINVAL; + } + + reg_val = hinic3_hwif_read_reg(dev->hwif, HINIC3_MULT_HOST_SLAVE_STATUS_ADDR); + *slave_en = SLAVE_HOST_STATUS_GET(host_id, reg_val); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_slave_host_enable); + +int hinic3_get_slave_bitmap(void *hwdev, u8 *slave_host_bitmap) +{ + struct hinic3_hwdev *dev = hwdev; + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + if (HINIC3_FUNC_TYPE(dev) != TYPE_PPF) { + sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); + return -EINVAL; + } + + *slave_host_bitmap = cap->host_valid_bitmap & (~(1U << cap->master_host_id)); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_slave_bitmap); + +static void set_func_host_mode(struct hinic3_hwdev *hwdev, enum hinic3_func_mode mode) +{ + switch (mode) { + case FUNC_MOD_MULTI_BM_MASTER: + sdk_info(hwdev->dev_hdl, "Detect multi-host BM master host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_BM_MASTER; + break; + case FUNC_MOD_MULTI_BM_SLAVE: + sdk_info(hwdev->dev_hdl, "Detect multi-host BM slave host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_BM_SLAVE; + break; + case FUNC_MOD_MULTI_VM_MASTER: + sdk_info(hwdev->dev_hdl, "Detect multi-host VM master host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_VM_MASTER; + break; + case FUNC_MOD_MULTI_VM_SLAVE: + sdk_info(hwdev->dev_hdl, "Detect multi-host VM slave host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_VM_SLAVE; + break; + default: + hwdev->func_mode = FUNC_MOD_NORMAL_HOST; + break; + } +} + +static void hinic3_init_host_mode_pre(struct hinic3_hwdev *hwdev) +{ + struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; + u8 host_id = hwdev->hwif->attr.pci_intf_idx; + + if (HINIC3_FUNC_TYPE(hwdev) == TYPE_VF) { + set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST); + return; + } + + switch (cap->srv_multi_host_mode) { + case HINIC3_SDI_MODE_BM: + if (host_id == cap->master_host_id) + set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_MASTER); + else + set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE); + break; + case HINIC3_SDI_MODE_VM: + if (host_id == cap->master_host_id) + set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_MASTER); + else + set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE); + break; + default: + set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST); + break; + } +} + +static int hinic3_multi_host_init(struct hinic3_hwdev *hwdev) +{ + if (!IS_MULTI_HOST(hwdev) || !HINIC3_IS_PPF(hwdev)) + return 0; + + if (IS_SLAVE_HOST(hwdev)) + set_slave_host_enable(hwdev, hinic3_pcie_itf_id(hwdev), true); + + return 0; +} + +static int hinic3_multi_host_free(struct hinic3_hwdev *hwdev) +{ + if (!IS_MULTI_HOST(hwdev) || !HINIC3_IS_PPF(hwdev)) + return 0; + + if (IS_SLAVE_HOST(hwdev)) + set_slave_host_enable(hwdev, hinic3_pcie_itf_id(hwdev), false); + + return 0; +} + +static u8 hinic3_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) + return 0; + + sdk_err(dev->dev_hdl, "Received nic ucode aeq event type: 0x%x, data: 0x%llx\n", + event, *((u64 *)data)); + + if (event < HINIC3_NIC_FATAL_ERROR_MAX) + atomic_inc(&dev->hw_stats.nic_ucode_event_stats[event]); + + return 0; +} + +static void hinic3_init_heartbeat_detect(struct hinic3_hwdev *hwdev); +static void hinic3_destroy_heartbeat_detect(struct hinic3_hwdev *hwdev); + +typedef void (*mgmt_event_cb)(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +struct mgmt_event_handle { + u16 cmd; + mgmt_event_cb proc; +}; + +static int pf_handle_vf_comm_mbox(void *pri_handle, + u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic3_hwdev *hwdev = pri_handle; + + if (!hwdev) + return -EINVAL; + + sdk_warn(hwdev->dev_hdl, "Unsupported vf mbox event %u to process\n", + cmd); + + return 0; +} + +static int vf_handle_pf_comm_mbox(void *pri_handle, + u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic3_hwdev *hwdev = pri_handle; + + if (!hwdev) + return -EINVAL; + + sdk_warn(hwdev->dev_hdl, "Unsupported pf mbox event %u to process\n", + cmd); + return 0; +} + +static void chip_fault_show(struct hinic3_hwdev *hwdev, + struct hinic3_fault_event *event) +{ + char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "host", "flr", "general", "suggestion"}; + char level_str[FAULT_SHOW_STR_LEN + 1]; + u8 level; + + memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); + level = event->event.chip.err_level; + if (level < FAULT_LEVEL_MAX) + strncpy(level_str, fault_level[level], + FAULT_SHOW_STR_LEN); + else + strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); + + if (level == FAULT_LEVEL_SERIOUS_FLR) + dev_err(hwdev->dev_hdl, "err_level: %u [%s], flr func_id: %u\n", + level, level_str, event->event.chip.func_id); + + dev_err(hwdev->dev_hdl, + "Module_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + event->event.chip.node_id, + event->event.chip.err_type, level, level_str, + event->event.chip.err_csr_addr, + event->event.chip.err_csr_value); +} + +static void fault_report_show(struct hinic3_hwdev *hwdev, + struct hinic3_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout", "phy fault", "tsensor fault"}; + char type_str[FAULT_SHOW_STR_LEN + 1] = {0}; + struct fault_event_stats *fault = NULL; + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", + hinic3_global_func_id(hwdev)); + + fault = &hwdev->hw_stats.fault_event_stats; + + if (event->type < FAULT_TYPE_MAX) { + strncpy(type_str, fault_type[event->type], sizeof(type_str)); + atomic_inc(&fault->fault_type_stat[event->type]); + } else { + strncpy(type_str, "Unknown", sizeof(type_str)); + } + + sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", event->type, type_str); + /* 0, 1, 2 and 3 word Represents array event->event.val index */ + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + event->event.val[0x0], event->event.val[0x1], + event->event.val[0x2], event->event.val[0x3]); + + hinic3_show_chip_err_info(hwdev); + + switch (event->type) { + case FAULT_TYPE_CHIP: + chip_fault_show(hwdev, event); + break; + case FAULT_TYPE_UCODE: + sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n", + event->event.ucode.cause_id, event->event.ucode.core_id, + event->event.ucode.c_id, event->event.ucode.epc); + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + sdk_err(hwdev->dev_hdl, "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", + event->event.mem_timeout.err_csr_ctrl, + event->event.mem_timeout.err_csr_data, + event->event.mem_timeout.ctrl_tab, event->event.mem_timeout.mem_index); + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", event->event.reg_timeout.err_csr); + break; + case FAULT_TYPE_PHY_FAULT: + sdk_err(hwdev->dev_hdl, "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + event->event.phy_fault.op_type, event->event.phy_fault.port_id, + event->event.phy_fault.dev_ad, event->event.phy_fault.csr_addr, + event->event.phy_fault.op_data); + break; + default: + break; + } +} + +static void fault_event_handler(void *dev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic3_cmd_fault_event *fault_event = NULL; + struct hinic3_fault_event *fault = NULL; + struct hinic3_event_info event_info; + struct hinic3_hwdev *hwdev = dev; + u8 fault_src = HINIC3_FAULT_SRC_TYPE_MAX; + u8 fault_level; + + if (in_size != sizeof(*fault_event)) { + sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %u, should be %ld\n", + in_size, sizeof(*fault_event)); + return; + } + + fault_event = buf_in; + fault_report_show(hwdev, &fault_event->event); + + if (fault_event->event.type == FAULT_TYPE_CHIP) + fault_level = fault_event->event.event.chip.err_level; + else + fault_level = FAULT_LEVEL_FATAL; + + if (hwdev->event_callback) { + event_info.service = EVENT_SRV_COMM; + event_info.type = EVENT_COMM_FAULT; + fault = (void *)event_info.event_data; + memcpy(fault, &fault_event->event, sizeof(struct hinic3_fault_event)); + fault->fault_level = fault_level; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT) + fault_src = fault_event->event.type; + else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) + fault_src = HINIC3_FAULT_SRC_HW_PHY_FAULT; + + hisdk3_fault_post_process(hwdev, fault_src, fault_level); +} + +static void ffm_event_record(struct hinic3_hwdev *dev, struct dbgtool_k_glb_info *dbgtool_info, + struct ffm_intr_info *intr) +{ + struct rtc_time rctm; + struct timeval txc; + u32 ffm_idx; + u32 last_err_csr_addr; + u32 last_err_csr_value; + + ffm_idx = dbgtool_info->ffm->ffm_num; + last_err_csr_addr = dbgtool_info->ffm->last_err_csr_addr; + last_err_csr_value = dbgtool_info->ffm->last_err_csr_value; + if (ffm_idx < FFM_RECORD_NUM_MAX) { + if (intr->err_csr_addr == last_err_csr_addr && + intr->err_csr_value == last_err_csr_value) { + dbgtool_info->ffm->ffm[ffm_idx - 1].times++; + sdk_err(dev->dev_hdl, "Receive intr same, ffm_idx: %u\n", ffm_idx - 1); + return; + } + sdk_err(dev->dev_hdl, "Receive intr, ffm_idx: %u\n", ffm_idx); + + dbgtool_info->ffm->ffm[ffm_idx].intr_info.node_id = intr->node_id; + dbgtool_info->ffm->ffm[ffm_idx].intr_info.err_level = intr->err_level; + dbgtool_info->ffm->ffm[ffm_idx].intr_info.err_type = intr->err_type; + dbgtool_info->ffm->ffm[ffm_idx].intr_info.err_csr_addr = intr->err_csr_addr; + dbgtool_info->ffm->ffm[ffm_idx].intr_info.err_csr_value = intr->err_csr_value; + dbgtool_info->ffm->last_err_csr_addr = intr->err_csr_addr; + dbgtool_info->ffm->last_err_csr_value = intr->err_csr_value; + dbgtool_info->ffm->ffm[ffm_idx].times = 1; + + /* Obtain the current UTC time */ + do_gettimeofday(&txc); + + /* Calculate the time in date value to tm, i.e. GMT + 8, mutiplied by 60 * 60 */ + rtc_time_to_tm((unsigned long)txc.tv_sec + 60 * 60 * 8, &rctm); + + /* tm_year starts from 1900; 0->1900, 1->1901, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].year = (u16)(rctm.tm_year + 1900); + /* tm_mon starts from 0, 0 indicates January, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)rctm.tm_mon + 1; + dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)rctm.tm_mday; + dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)rctm.tm_hour; + dbgtool_info->ffm->ffm[ffm_idx].min = (u8)rctm.tm_min; + dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)rctm.tm_sec; + + dbgtool_info->ffm->ffm_num++; + } +} + +static void ffm_event_msg_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct dbgtool_k_glb_info *dbgtool_info = NULL; + struct hinic3_hwdev *dev = hwdev; + struct card_node *card_info = NULL; + struct ffm_intr_info *intr = NULL; + + if (in_size != sizeof(*intr)) { + sdk_err(dev->dev_hdl, "Invalid fault event report, length: %u, should be %ld.\n", + in_size, sizeof(*intr)); + return; + } + + intr = buf_in; + + sdk_err(dev->dev_hdl, "node_id: 0x%x, err_type: 0x%x, err_level: %u, err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + intr->node_id, intr->err_type, intr->err_level, + intr->err_csr_addr, intr->err_csr_value); + + hinic3_show_chip_err_info(hwdev); + + card_info = dev->chip_node; + dbgtool_info = card_info->dbgtool_info; + + *out_size = sizeof(*intr); + + if (!dbgtool_info) + return; + + if (!dbgtool_info->ffm) + return; + + ffm_event_record(dev, dbgtool_info, intr); +} + +#define X_CSR_INDEX 30 + +static void sw_watchdog_timeout_info_show(struct hinic3_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct comm_info_sw_watchdog *watchdog_info = buf_in; + u32 stack_len, i, j, tmp; + u32 *dump_addr = NULL; + u64 *reg = NULL; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld\n", + in_size, sizeof(*watchdog_info)); + return; + } + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%llx\n", + watchdog_info->curr_time_h, watchdog_info->curr_time_l, + watchdog_info->task_id, watchdog_info->sp); + sdk_err(hwdev->dev_hdl, + "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%llx, bottom: 0x%llx\n", + watchdog_info->curr_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%llx, elr: 0x%llx, spsr: 0x%llx, far: 0x%llx, esr: 0x%llx, xzr: 0x%llx\n", + watchdog_info->pc, watchdog_info->elr, watchdog_info->spsr, watchdog_info->far, + watchdog_info->esr, watchdog_info->xzr); /*lint !e10 !e26 */ + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + reg = &watchdog_info->x30; + for (i = 0; i <= X_CSR_INDEX; i++) + sdk_err(hwdev->dev_hdl, "x%02u:0x%llx\n", + X_CSR_INDEX - i, reg[i]); /*lint !e661 !e662 */ + + if (watchdog_info->stack_actlen <= DATA_LEN_1K) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = DATA_LEN_1K; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16 bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / DUMP_16B_PER_LINE); i++) { + dump_addr = (u32 *)(watchdog_info->stack_data + (u32)(i * DUMP_16B_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 0x1), *(dump_addr + 0x2), *(dump_addr + 0x3)); + } + + tmp = (stack_len % DUMP_16B_PER_LINE) / DUMP_4_VAR_PER_LINE; + for (j = 0; j < tmp; j++) { + dump_addr = (u32 *)(watchdog_info->stack_data + + (u32)(i * DUMP_16B_PER_LINE + j * DUMP_4_VAR_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->head.status = 0; +} + +static void mgmt_watchdog_timeout_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic3_event_info event_info = { 0 }; + struct hinic3_hwdev *dev = hwdev; + + sw_watchdog_timeout_info_show(dev, buf_in, in_size, buf_out, out_size); + + if (dev->event_callback) { + event_info.type = EVENT_COMM_MGMT_WATCHDOG; + dev->event_callback(dev->event_pri_handle, &event_info); + } +} + +static void show_exc_info(struct hinic3_hwdev *hwdev, EXC_INFO_S *exc_info) +{ + u32 i; + + /* key information */ + sdk_err(hwdev->dev_hdl, "==================== Exception Info Begin ====================\n"); + sdk_err(hwdev->dev_hdl, "Exception CpuTick : 0x%08x 0x%08x\n", + exc_info->cpu_tick.cnt_hi, exc_info->cpu_tick.cnt_lo); + sdk_err(hwdev->dev_hdl, "Exception Cause : %u\n", exc_info->exc_cause); + sdk_err(hwdev->dev_hdl, "Os Version : %s\n", exc_info->os_ver); + sdk_err(hwdev->dev_hdl, "App Version : %s\n", exc_info->app_ver); + sdk_err(hwdev->dev_hdl, "CPU Type : 0x%08x\n", exc_info->cpu_type); + sdk_err(hwdev->dev_hdl, "CPU ID : 0x%08x\n", exc_info->cpu_id); + sdk_err(hwdev->dev_hdl, "Thread Type : 0x%08x\n", exc_info->thread_type); + sdk_err(hwdev->dev_hdl, "Thread ID : 0x%08x\n", exc_info->thread_id); + sdk_err(hwdev->dev_hdl, "Byte Order : 0x%08x\n", exc_info->byte_order); + sdk_err(hwdev->dev_hdl, "Nest Count : 0x%08x\n", exc_info->nest_cnt); + sdk_err(hwdev->dev_hdl, "Fatal Error Num : 0x%08x\n", exc_info->fatal_errno); + sdk_err(hwdev->dev_hdl, "Current SP : 0x%016llx\n", exc_info->uw_sp); + sdk_err(hwdev->dev_hdl, "Stack Bottom : 0x%016llx\n", exc_info->stack_bottom); + + /* register field */ + sdk_err(hwdev->dev_hdl, "Register contents when exception occur.\n"); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TTBR0", + exc_info->reg_info.ttbr0, "TTBR1", exc_info->reg_info.ttbr1); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TCR", + exc_info->reg_info.tcr, "MAIR", exc_info->reg_info.mair); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "SCTLR", + exc_info->reg_info.sctlr, "VBAR", exc_info->reg_info.vbar); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "CURRENTE1", + exc_info->reg_info.current_el, "SP", exc_info->reg_info.sp); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "ELR", + exc_info->reg_info.elr, "SPSR", exc_info->reg_info.spsr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "FAR", + exc_info->reg_info.far_r, "ESR", exc_info->reg_info.esr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx\n", "XZR", exc_info->reg_info.xzr); + + for (i = 0; i < XREGS_NUM - 1; i += 0x2) + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t XREGS[%02u]%-5s: 0x%016llx", + i, " ", exc_info->reg_info.xregs[i], + (u32)(i + 0x1U), " ", exc_info->reg_info.xregs[(u32)(i + 0x1U)]); + + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t ", XREGS_NUM - 1, " ", + exc_info->reg_info.xregs[XREGS_NUM - 1]); +} + +#define FOUR_REG_LEN 16 + +static void mgmt_lastword_report_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + comm_info_up_lastword_s *lastword_info = buf_in; + EXC_INFO_S *exc_info = &lastword_info->stack_info; + u32 stack_len = lastword_info->stack_actlen; + struct hinic3_hwdev *dev = hwdev; + u32 *curr_reg = NULL; + u32 reg_i, cnt; + + if (in_size != sizeof(*lastword_info)) { + sdk_err(dev->dev_hdl, "Invalid mgmt lastword, length: %u, should be %ld\n", + in_size, sizeof(*lastword_info)); + return; + } + + show_exc_info(dev, exc_info); + + /* call stack dump */ + sdk_err(dev->dev_hdl, "Dump stack when exceptioin occurs, 16Bytes per line.\n"); + + cnt = stack_len / FOUR_REG_LEN; + for (reg_i = 0; reg_i < cnt; reg_i++) { + curr_reg = (u32 *)(lastword_info->stack_data + ((u64)(u32)(reg_i * FOUR_REG_LEN))); + sdk_err(dev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *curr_reg, *(curr_reg + 0x1), *(curr_reg + 0x2), *(curr_reg + 0x3)); + } + + sdk_err(dev->dev_hdl, "==================== Exception Info End ====================\n"); +} + +const struct mgmt_event_handle mgmt_event_proc[] = { + { + .cmd = COMM_MGMT_CMD_FAULT_REPORT, + .proc = fault_event_handler, + }, + + { + .cmd = COMM_MGMT_CMD_FFM_SET, + .proc = ffm_event_msg_handler, + }, + + { + .cmd = COMM_MGMT_CMD_WATCHDOG_INFO, + .proc = mgmt_watchdog_timeout_event_handler, + }, + + { + .cmd = COMM_MGMT_CMD_LASTWORD_GET, + .proc = mgmt_lastword_report_event_handler, + }, +}; + +static void pf_handle_mgmt_comm_event(void *handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic3_hwdev *hwdev = handle; + u32 i, event_num = ARRAY_LEN(mgmt_event_proc); + + if (!hwdev) + return; + + for (i = 0; i < event_num; i++) { + if (cmd == mgmt_event_proc[i].cmd) { + if (mgmt_event_proc[i].proc) + mgmt_event_proc[i].proc(handle, buf_in, in_size, + buf_out, out_size); + + return; + } + } + + sdk_warn(hwdev->dev_hdl, "Unsupported mgmt cpu event %u to process\n", + cmd); + *out_size = sizeof(struct mgmt_msg_head); + ((struct mgmt_msg_head *)buf_out)->status = HINIC3_MGMT_CMD_UNSUPPORTED; +} + +static void hinic3_set_chip_present(struct hinic3_hwdev *hwdev) +{ + hwdev->chip_present_flag = HINIC3_CHIP_PRESENT; +} + +static void hinic3_set_chip_absent(struct hinic3_hwdev *hwdev) +{ + sdk_err(hwdev->dev_hdl, "Card not present\n"); + hwdev->chip_present_flag = HINIC3_CHIP_ABSENT; +} + +int hinic3_get_chip_present_flag(const void *hwdev) +{ + if (!hwdev) + return 0; + + return ((struct hinic3_hwdev *)hwdev)->chip_present_flag; +} +EXPORT_SYMBOL(hinic3_get_chip_present_flag); + +void hinic3_force_complete_all(void *dev) +{ + struct hinic3_recv_msg *recv_resp_msg = NULL; + struct hinic3_hwdev *hwdev = dev; + struct hinic3_mbox *func_to_func = NULL; + + spin_lock_bh(&hwdev->channel_lock); + if (hinic3_func_type(hwdev) != TYPE_VF && + test_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state)) { + recv_resp_msg = &hwdev->pf_to_mgmt->recv_resp_msg_from_mgmt; + spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock); + if (hwdev->pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_resp_msg->recv_done); + hwdev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT; + } + spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock); + } + + if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) { + func_to_func = hwdev->func_to_func; + spin_lock(&func_to_func->mbox_lock); + if (func_to_func->event_flag == EVENT_START) + func_to_func->event_flag = EVENT_TIMEOUT; + spin_unlock(&func_to_func->mbox_lock); + } + + if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state)) + hinic3_cmdq_flush_sync_cmd(hwdev); + + spin_unlock_bh(&hwdev->channel_lock); +} +EXPORT_SYMBOL(hinic3_force_complete_all); + +void hinic3_detect_hw_present(void *hwdev) +{ + if (!get_card_present_state((struct hinic3_hwdev *)hwdev)) { + hinic3_set_chip_absent(hwdev); + hinic3_force_complete_all(hwdev); + } +} + +/** + * dma_attr_table_init - initialize the default dma attributes + * @hwdev: the pointer to hw device + **/ +static int dma_attr_table_init(struct hinic3_hwdev *hwdev) +{ + u32 addr, val, dst_attr; + + /* Use indirect access should set entry_idx first */ + addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR; + val = hinic3_hwif_read_reg(hwdev->hwif, addr); + val = HINIC3_DMA_ATTR_INDIR_IDX_CLEAR(val, IDX); + + val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(PCIE_MSIX_ATTR_ENTRY, IDX); + + hinic3_hwif_write_reg(hwdev->hwif, addr, val); + + wmb(); /* write index before config */ + + addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR; + val = hinic3_hwif_read_reg(hwdev->hwif, addr); + + dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) | + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) | + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) | + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) | + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN); + + if (val == dst_attr) + return 0; + + return hinic3_set_dma_attr_tbl(hwdev, PCIE_MSIX_ATTR_ENTRY, HINIC3_PCIE_ST_DISABLE, + HINIC3_PCIE_AT_DISABLE, HINIC3_PCIE_PH_DISABLE, + HINIC3_PCIE_SNOOP, HINIC3_PCIE_TPH_DISABLE); +} + +static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev) +{ + struct hinic3_aeqs *aeqs = hwdev->aeqs; + struct interrupt_info info = {0}; + struct hinic3_eq *eq = NULL; + int q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC3_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC3_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC3_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = aeqs->num_aeqs - 1; q_id >= 0; q_id--) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic3_set_interrupt_cfg_direct(hwdev, &info, + HINIC3_CHANNEL_COMM); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev) +{ + struct hinic3_ceqs *ceqs = hwdev->ceqs; + struct interrupt_info info = {0}; + struct hinic3_eq *eq = NULL; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC3_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC3_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC3_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = &ceqs->ceq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic3_set_interrupt_cfg(hwdev, info, + HINIC3_CHANNEL_COMM); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %u failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +static int hinic3_comm_clp_to_mgmt_init(struct hinic3_hwdev *hwdev) +{ + int err; + + if (hinic3_func_type(hwdev) == TYPE_VF || !COMM_SUPPORT_CLP(hwdev)) + return 0; + + err = hinic3_clp_pf_to_mgmt_init(hwdev); + if (err) + return err; + + return 0; +} + +static void hinic3_comm_clp_to_mgmt_free(struct hinic3_hwdev *hwdev) +{ + if (hinic3_func_type(hwdev) == TYPE_VF || !COMM_SUPPORT_CLP(hwdev)) + return; + + hinic3_clp_pf_to_mgmt_free(hwdev); +} + +static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HINIC3_MAX_AEQS] = {{0} }; + u16 num_aeqs, resp_num_irq = 0, i; + int err; + + num_aeqs = HINIC3_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs > HINIC3_MAX_AEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + HINIC3_MAX_AEQS); + num_aeqs = HINIC3_MAX_AEQS; + } + err = hinic3_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %u\n", + num_aeqs); + return err; + } + + if (resp_num_irq < num_aeqs) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %u\n", + resp_num_irq); + num_aeqs = resp_num_irq; + } + + err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); + goto aeqs_init_err; + } + + return 0; + +aeqs_init_err: + for (i = 0; i < num_aeqs; i++) + hinic3_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); + + return err; +} + +static void hinic3_comm_aeqs_free(struct hinic3_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HINIC3_MAX_AEQS] = {{0} }; + u16 num_irqs, i; + + hinic3_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); + + hinic3_aeqs_free(hwdev); + + for (i = 0; i < num_irqs; i++) + hinic3_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); +} + +static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HINIC3_MAX_CEQS] = {{0} }; + u16 num_ceqs, resp_num_irq = 0, i; + int err; + + num_ceqs = HINIC3_HWIF_NUM_CEQS(hwdev->hwif); + if (num_ceqs > HINIC3_MAX_CEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + HINIC3_MAX_CEQS); + num_ceqs = HINIC3_MAX_CEQS; + } + + err = hinic3_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %u\n", + num_ceqs); + return err; + } + + if (resp_num_irq < num_ceqs) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", + resp_num_irq); + num_ceqs = resp_num_irq; + } + + err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to init ceqs, err:%d\n", err); + goto ceqs_init_err; + } + + return 0; + +ceqs_init_err: + for (i = 0; i < num_ceqs; i++) + hinic3_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); + + return err; +} + +static void hinic3_comm_ceqs_free(struct hinic3_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HINIC3_MAX_CEQS] = {{0} }; + u16 num_irqs; + int i; + + hinic3_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); + + hinic3_ceqs_free(hwdev); + + for (i = 0; i < num_irqs; i++) + hinic3_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); +} + +static int hinic3_comm_func_to_func_init(struct hinic3_hwdev *hwdev) +{ + int err; + + err = hinic3_func_to_func_init(hwdev); + if (err) + return err; + + hinic3_aeq_register_hw_cb(hwdev, hwdev, HINIC3_MBX_FROM_FUNC, + hinic3_mbox_func_aeqe_handler); + hinic3_aeq_register_hw_cb(hwdev, hwdev, HINIC3_MSG_FROM_MGMT_CPU, + hinic3_mgmt_msg_aeqe_handler); + + if (!HINIC3_IS_VF(hwdev)) + hinic3_register_pf_mbox_cb(hwdev, HINIC3_MOD_COMM, + hwdev, + pf_handle_vf_comm_mbox); + else + hinic3_register_vf_mbox_cb(hwdev, HINIC3_MOD_COMM, + hwdev, + vf_handle_pf_comm_mbox); + + set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic3_comm_func_to_func_free(struct hinic3_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + hinic3_aeq_unregister_hw_cb(hwdev, HINIC3_MBX_FROM_FUNC); + + if (!HINIC3_IS_VF(hwdev)) { + hinic3_unregister_pf_mbox_cb(hwdev, HINIC3_MOD_COMM); + } else { + hinic3_unregister_vf_mbox_cb(hwdev, HINIC3_MOD_COMM); + + hinic3_aeq_unregister_hw_cb(hwdev, HINIC3_MSG_FROM_MGMT_CPU); + } + + hinic3_func_to_func_free(hwdev); +} + +static int hinic3_comm_pf_to_mgmt_init(struct hinic3_hwdev *hwdev) +{ + int err; + + if (hinic3_func_type(hwdev) == TYPE_VF) + return 0; + + err = hinic3_pf_to_mgmt_init(hwdev); + if (err) + return err; + + hinic3_register_mgmt_msg_cb(hwdev, HINIC3_MOD_COMM, hwdev, + pf_handle_mgmt_comm_event); + + set_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic3_comm_pf_to_mgmt_free(struct hinic3_hwdev *hwdev) +{ + if (hinic3_func_type(hwdev) == TYPE_VF) + return; + + spin_lock_bh(&hwdev->channel_lock); + clear_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + hinic3_unregister_mgmt_msg_cb(hwdev, HINIC3_MOD_COMM); + + hinic3_aeq_unregister_hw_cb(hwdev, HINIC3_MSG_FROM_MGMT_CPU); + + hinic3_pf_to_mgmt_free(hwdev); +} + +static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev) +{ + int err; + + err = hinic3_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + return err; + } + + hinic3_ceq_register_cb(hwdev, hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler); + + err = hinic3_set_cmdq_depth(hwdev, HINIC3_CMDQ_DEPTH); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); + goto set_cmdq_depth_err; + } + + set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state); + + return 0; + +set_cmdq_depth_err: + hinic3_cmdqs_free(hwdev); + + return err; +} + +static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ); + hinic3_cmdqs_free(hwdev); +} + +static void hinic3_sync_mgmt_func_state(struct hinic3_hwdev *hwdev) +{ + hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_ACTIVE_FLAG); +} + +static void hinic3_unsync_mgmt_func_state(struct hinic3_hwdev *hwdev) +{ + hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_INIT); +} + +static int init_basic_attributes(struct hinic3_hwdev *hwdev) +{ + u64 drv_features[COMM_MAX_FEATURE_QWORD] = {HINIC3_DRV_FEATURE_QW0, 0, 0, 0}; + int err, i; + + if (hinic3_func_type(hwdev) == TYPE_PPF) + drv_features[0] |= COMM_F_CHANNEL_DETECT; + + err = hinic3_get_board_info(hwdev, &hwdev->board_info, + HINIC3_CHANNEL_COMM); + if (err) + return err; + + err = hinic3_get_comm_features(hwdev, hwdev->features, + COMM_MAX_FEATURE_QWORD); + if (err) { + sdk_err(hwdev->dev_hdl, "Get comm features failed\n"); + return err; + } + + sdk_info(hwdev->dev_hdl, "Comm hw features: 0x%llx, drv features: 0x%llx\n", + hwdev->features[0], drv_features[0]); + + for (i = 0; i < COMM_MAX_FEATURE_QWORD; i++) + hwdev->features[i] &= drv_features[i]; + + err = hinic3_get_global_attr(hwdev, &hwdev->glb_attr); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to get global attribute\n"); + return err; + } + + sdk_info(hwdev->dev_hdl, + "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n", + hwdev->glb_attr.max_host_num, hwdev->glb_attr.max_pf_num, + hwdev->glb_attr.vf_id_start, + hwdev->glb_attr.mgmt_host_node_id, + hwdev->glb_attr.cmdq_num); + + return 0; +} + +static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev) +{ + int err; + + err = hinic3_comm_aeqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); + return err; + } + + err = hinic3_comm_func_to_func_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n"); + goto func_to_func_init_err; + } + + err = init_aeqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); + goto aeqs_msix_attr_init_err; + } + + return 0; + +aeqs_msix_attr_init_err: + hinic3_comm_func_to_func_free(hwdev); + +func_to_func_init_err: + hinic3_comm_aeqs_free(hwdev); + + return err; +} + +static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev) +{ + hinic3_comm_func_to_func_free(hwdev); + hinic3_comm_aeqs_free(hwdev); +} + +static int init_pf_mgmt_channel(struct hinic3_hwdev *hwdev) +{ + int err; + + err = hinic3_comm_clp_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init clp\n"); + return err; + } + + err = hinic3_comm_pf_to_mgmt_init(hwdev); + if (err) { + hinic3_comm_clp_to_mgmt_free(hwdev); + sdk_err(hwdev->dev_hdl, "Failed to init pf to mgmt\n"); + return err; + } + + return 0; +} + +static void free_pf_mgmt_channel(struct hinic3_hwdev *hwdev) +{ + hinic3_comm_clp_to_mgmt_free(hwdev); + hinic3_comm_pf_to_mgmt_free(hwdev); +} + +static int init_mgmt_channel_post(struct hinic3_hwdev *hwdev) +{ + int err; + + /* mbox host channel resources will be freed in + * hinic3_func_to_func_free + */ + if (HINIC3_IS_PPF(hwdev)) { + err = hinic3_mbox_init_host_msg_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mbox host channel\n"); + return err; + } + } + + err = init_pf_mgmt_channel(hwdev); + if (err) + return err; + + return 0; +} + +static void free_mgmt_msg_channel_post(struct hinic3_hwdev *hwdev) +{ + free_pf_mgmt_channel(hwdev); +} + +static int init_cmdqs_channel(struct hinic3_hwdev *hwdev) +{ + int err; + + err = dma_attr_table_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init dma attr table\n"); + goto dma_attr_init_err; + } + + err = hinic3_comm_ceqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); + goto ceqs_init_err; + } + + err = init_ceqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); + goto init_ceq_msix_err; + } + + /* set default wq page_size */ + if (wq_page_order > HINIC3_MAX_WQ_PAGE_SIZE_ORDER) { + sdk_info(hwdev->dev_hdl, "wq_page_order exceed limit[0, %d], reset to %d\n", + HINIC3_MAX_WQ_PAGE_SIZE_ORDER, + HINIC3_MAX_WQ_PAGE_SIZE_ORDER); + wq_page_order = HINIC3_MAX_WQ_PAGE_SIZE_ORDER; + } + hwdev->wq_page_size = HINIC3_HW_WQ_PAGE_SIZE * (1U << wq_page_order); + sdk_info(hwdev->dev_hdl, "WQ page size: 0x%x\n", hwdev->wq_page_size); + err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev), + hwdev->wq_page_size, HINIC3_CHANNEL_COMM); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); + goto init_wq_pg_size_err; + } + + err = hinic3_comm_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + goto cmdq_init_err; + } + + return 0; + +cmdq_init_err: + if (HINIC3_FUNC_TYPE(hwdev) != TYPE_VF) + hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev), + HINIC3_HW_WQ_PAGE_SIZE, + HINIC3_CHANNEL_COMM); +init_wq_pg_size_err: +init_ceq_msix_err: + hinic3_comm_ceqs_free(hwdev); + +ceqs_init_err: +dma_attr_init_err: + + return err; +} + +static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev) +{ + hinic3_comm_cmdqs_free(hwdev); + + if (HINIC3_FUNC_TYPE(hwdev) != TYPE_VF) + hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev), + HINIC3_HW_WQ_PAGE_SIZE, HINIC3_CHANNEL_COMM); + + hinic3_comm_ceqs_free(hwdev); +} + +static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev) +{ + int err; + + err = init_basic_mgmt_channel(hwdev); + if (err) + return err; + + err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev), + HINIC3_COMM_RES, HINIC3_CHANNEL_COMM); + if (err) + goto func_reset_err; + + err = init_basic_attributes(hwdev); + if (err) + goto init_basic_attr_err; + + err = init_mgmt_channel_post(hwdev); + if (err) + goto init_mgmt_channel_post_err; + + err = hinic3_set_func_svc_used_state(hwdev, SVC_T_COMM, 1, HINIC3_CHANNEL_COMM); + if (err) + goto set_used_state_err; + + err = init_cmdqs_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n"); + goto init_cmdqs_channel_err; + } + + hinic3_sync_mgmt_func_state(hwdev); + + if (HISDK3_F_CHANNEL_LOCK_EN(hwdev)) { + hinic3_mbox_enable_channel_lock(hwdev, true); + hinic3_cmdq_enable_channel_lock(hwdev, true); + } + + err = hinic3_aeq_register_swe_cb(hwdev, hwdev, HINIC3_STATELESS_EVENT, + hinic3_nic_sw_aeqe_handler); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to register sw aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + return 0; + +register_ucode_aeqe_err: + hinic3_unsync_mgmt_func_state(hwdev); + hinic3_free_cmdqs_channel(hwdev); +init_cmdqs_channel_err: + hinic3_set_func_svc_used_state(hwdev, SVC_T_COMM, 0, HINIC3_CHANNEL_COMM); +set_used_state_err: + free_mgmt_msg_channel_post(hwdev); +init_mgmt_channel_post_err: +init_basic_attr_err: +func_reset_err: + free_base_mgmt_channel(hwdev); + + return err; +} + +static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev) +{ + hinic3_aeq_unregister_swe_cb(hwdev, HINIC3_STATELESS_EVENT); + + hinic3_unsync_mgmt_func_state(hwdev); + + hinic3_free_cmdqs_channel(hwdev); + + hinic3_set_func_svc_used_state(hwdev, SVC_T_COMM, 0, HINIC3_CHANNEL_COMM); + + free_mgmt_msg_channel_post(hwdev); + + free_base_mgmt_channel(hwdev); +} + +static void hinic3_auto_sync_time_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic3_hwdev *hwdev = container_of(delay, struct hinic3_hwdev, sync_time_task); + int err; + + err = hinic3_sync_time(hwdev, ossl_get_real_time()); + if (err) + sdk_err(hwdev->dev_hdl, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(HINIC3_SYNFW_TIME_PERIOD)); +} + +static void hinic3_auto_channel_detect_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic3_hwdev *hwdev = container_of(delay, struct hinic3_hwdev, channel_detect_task); + struct card_node *chip_node = NULL; + + hinic3_comm_channel_detect(hwdev); + + chip_node = hwdev->chip_node; + if (!atomic_read(&chip_node->channel_busy_cnt)) + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(HINIC3_CHANNEL_DETECT_PERIOD)); +} + +static int hinic3_init_ppf_work(struct hinic3_hwdev *hwdev) +{ + + if (hinic3_func_type(hwdev) != TYPE_PPF) + return 0; + + INIT_DELAYED_WORK(&hwdev->sync_time_task, hinic3_auto_sync_time_work); + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(HINIC3_SYNFW_TIME_PERIOD)); + + if (COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + INIT_DELAYED_WORK(&hwdev->channel_detect_task, + hinic3_auto_channel_detect_work); + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(HINIC3_CHANNEL_DETECT_PERIOD)); + } + + + return 0; + +} + +static void hinic3_free_ppf_work(struct hinic3_hwdev *hwdev) +{ + if (hinic3_func_type(hwdev) != TYPE_PPF) + return; + + + if (COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + hwdev->features[0] &= ~(COMM_F_CHANNEL_DETECT); + cancel_delayed_work_sync(&hwdev->channel_detect_task); + } + + cancel_delayed_work_sync(&hwdev->sync_time_task); +} + +static int init_hwdew(struct hinic3_init_para *para) +{ + struct hinic3_hwdev *hwdev; + + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + *para->hwdev = hwdev; + hwdev->adapter_hdl = para->adapter_hdl; + hwdev->pcidev_hdl = para->pcidev_hdl; + hwdev->dev_hdl = para->dev_hdl; + hwdev->chip_node = para->chip_node; + hwdev->poll = para->poll; + hwdev->probe_fault_level = para->probe_fault_level; + hwdev->func_state = 0; + + hwdev->chip_fault_stats = vzalloc(HINIC3_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) + goto alloc_chip_fault_stats_err; + + hwdev->stateful_ref_cnt = 0; + memset(hwdev->features, 0, sizeof(hwdev->features)); + + spin_lock_init(&hwdev->channel_lock); + mutex_init(&hwdev->stateful_mutex); + + return 0; + +alloc_chip_fault_stats_err: + para->probe_fault_level = hwdev->probe_fault_level; + kfree(hwdev); + *para->hwdev = NULL; + return -EFAULT; +} + +int hinic3_init_hwdev(struct hinic3_init_para *para) +{ + struct hinic3_hwdev *hwdev; + int err; + + err = init_hwdew(para); + if (err) + return err; + + hwdev = *para->hwdev; + + err = hinic3_init_hwif(hwdev, para->cfg_reg_base, para->intr_reg_base, para->mgmt_reg_base, + para->db_base_phy, para->db_base, para->db_dwqe_len); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); + goto init_hwif_err; + } + + hinic3_set_chip_present(hwdev); + + hisdk3_init_profile_adapter(hwdev); + + hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, HINIC3_WQ_MAX_REQ); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Failed to alloc hardware workq\n"); + goto alloc_workq_err; + } + + hinic3_init_heartbeat_detect(hwdev); + + err = init_cfg_mgmt(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); + goto init_cfg_mgmt_err; + } + + err = hinic3_init_comm_ch(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); + goto init_comm_ch_err; + } + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + err = hinic3_init_devlink(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init devlink\n"); + goto init_devlink_err; + } +#endif + + err = init_capability(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); + goto init_cap_err; + } + + hinic3_init_host_mode_pre(hwdev); + + err = hinic3_multi_host_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init function mode\n"); + goto init_multi_host_fail; + } + + err = hinic3_init_ppf_work(hwdev); + if (err) + goto init_ppf_work_fail; + + err = hinic3_set_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set comm features\n"); + goto set_feature_err; + } + + return 0; + +set_feature_err: + hinic3_free_ppf_work(hwdev); + +init_ppf_work_fail: + hinic3_multi_host_free(hwdev); + +init_multi_host_fail: + free_capability(hwdev); + +init_cap_err: +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + hinic3_uninit_devlink(hwdev); + +init_devlink_err: +#endif + hinic3_uninit_comm_ch(hwdev); + +init_comm_ch_err: + free_cfg_mgmt(hwdev); + +init_cfg_mgmt_err: + hinic3_destroy_heartbeat_detect(hwdev); + destroy_workqueue(hwdev->workq); + +alloc_workq_err: + hisdk3_deinit_profile_adapter(hwdev); + + hinic3_free_hwif(hwdev); + +init_hwif_err: + spin_lock_deinit(&hwdev->channel_lock); + vfree(hwdev->chip_fault_stats); + para->probe_fault_level = hwdev->probe_fault_level; + kfree(hwdev); + *para->hwdev = NULL; + + return -EFAULT; +} + +void hinic3_free_hwdev(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + u64 drv_features[COMM_MAX_FEATURE_QWORD]; + + memset(drv_features, 0, sizeof(drv_features)); + hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD); + + hinic3_free_ppf_work(dev); + + hinic3_multi_host_free(dev); + + hinic3_func_rx_tx_flush(hwdev, HINIC3_CHANNEL_COMM); + + free_capability(dev); + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + hinic3_uninit_devlink(dev); +#endif + + hinic3_uninit_comm_ch(dev); + + free_cfg_mgmt(dev); + hinic3_destroy_heartbeat_detect(hwdev); + destroy_workqueue(dev->workq); + + hisdk3_deinit_profile_adapter(hwdev); + hinic3_free_hwif(dev); + + spin_lock_deinit(&dev->channel_lock); + vfree(dev->chip_fault_stats); + + kfree(dev); +} + +void *hinic3_get_pcidev_hdl(void *hwdev) +{ + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev; + + if (!hwdev) + return NULL; + + return dev->pcidev_hdl; +} + +int hinic3_register_service_adapter(void *hwdev, void *service_adapter, + enum hinic3_service_type type) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev || !service_adapter || type >= SERVICE_T_MAX) + return -EINVAL; + + if (dev->service_adapter[type]) + return -EINVAL; + + dev->service_adapter[type] = service_adapter; + + return 0; +} +EXPORT_SYMBOL(hinic3_register_service_adapter); + +void hinic3_unregister_service_adapter(void *hwdev, + enum hinic3_service_type type) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev || type >= SERVICE_T_MAX) + return; + + dev->service_adapter[type] = NULL; +} +EXPORT_SYMBOL(hinic3_unregister_service_adapter); + +void *hinic3_get_service_adapter(void *hwdev, enum hinic3_service_type type) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev || type >= SERVICE_T_MAX) + return NULL; + + return dev->service_adapter[type]; +} +EXPORT_SYMBOL(hinic3_get_service_adapter); + +int hinic3_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, const u16 *out_size) +{ + struct hinic3_hw_stats *tmp_hw_stats = (struct hinic3_hw_stats *)hw_stats; + struct card_node *chip_node = NULL; + + if (!hwdev) + return -EINVAL; + + if (*out_size != sizeof(struct hinic3_hw_stats) || !hw_stats) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(struct hinic3_hw_stats)); + return -EFAULT; + } + + memcpy(hw_stats, &((struct hinic3_hwdev *)hwdev)->hw_stats, + sizeof(struct hinic3_hw_stats)); + + chip_node = ((struct hinic3_hwdev *)hwdev)->chip_node; + + atomic_set(&tmp_hw_stats->nic_ucode_event_stats[HINIC3_CHANNEL_BUSY], + atomic_read(&chip_node->channel_busy_cnt)); + + return 0; +} + +u16 hinic3_dbg_clear_hw_stats(void *hwdev) +{ + struct card_node *chip_node = NULL; + struct hinic3_hwdev *dev = hwdev; + + memset((void *)&dev->hw_stats, 0, sizeof(struct hinic3_hw_stats)); + memset((void *)dev->chip_fault_stats, 0, HINIC3_CHIP_FAULT_SIZE); + + chip_node = dev->chip_node; + if (COMM_SUPPORT_CHANNEL_DETECT(dev) && atomic_read(&chip_node->channel_busy_cnt)) { + atomic_set(&chip_node->channel_busy_cnt, 0); + dev->aeq_busy_cnt = 0; + queue_delayed_work(dev->workq, &dev->channel_detect_task, + msecs_to_jiffies(HINIC3_CHANNEL_DETECT_PERIOD)); + } + + return sizeof(struct hinic3_hw_stats); +} + +void hinic3_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, + u32 offset) +{ + if (offset >= HINIC3_CHIP_FAULT_SIZE) { + pr_err("Invalid chip offset value: %d\n", offset); + return; + } + + if (offset + MAX_DRV_BUF_SIZE <= HINIC3_CHIP_FAULT_SIZE) + memcpy(chip_fault_stats, + ((struct hinic3_hwdev *)hwdev)->chip_fault_stats + + offset, MAX_DRV_BUF_SIZE); + else + memcpy(chip_fault_stats, + ((struct hinic3_hwdev *)hwdev)->chip_fault_stats + + offset, HINIC3_CHIP_FAULT_SIZE - offset); +} + +void hinic3_event_register(void *dev, void *pri_handle, + hinic3_event_handler callback) +{ + struct hinic3_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = callback; + hwdev->event_pri_handle = pri_handle; +} + +void hinic3_event_unregister(void *dev) +{ + struct hinic3_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = NULL; + hwdev->event_pri_handle = NULL; +} + +void hinic3_event_callback(void *hwdev, struct hinic3_event_info *event) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Hwdev pointer is NULL for event callback\n"); + return; + } + + if (!dev->event_callback) { + sdk_info(dev->dev_hdl, "Event callback function not register\n"); + return; + } + + dev->event_callback(dev->event_pri_handle, event); +} +EXPORT_SYMBOL(hinic3_event_callback); + +void hinic3_set_pcie_order_cfg(void *handle) +{ +} + +void hinic3_disable_mgmt_msg_report(void *hwdev) +{ + struct hinic3_hwdev *hw_dev = (struct hinic3_hwdev *)hwdev; + + hinic3_set_pf_status(hw_dev->hwif, HINIC3_PF_STATUS_INIT); +} + +void hinic3_record_pcie_error(void *hwdev) +{ + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev; + + if (!hwdev) + return; + + atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +bool hinic3_need_init_stateful_default(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + u16 chip_svc_type = dev->cfg_mgmt->svc_cap.svc_type; + + + /* Current virtio net have to init cqm in PPF. */ + if (hinic3_func_type(hwdev) == TYPE_PPF && (chip_svc_type & CFG_SERVICE_MASK_VIRTIO) != 0) + return true; + + /* Other service type will init cqm when uld call. */ + return false; +} + +static inline void stateful_uninit(struct hinic3_hwdev *hwdev) +{ + u32 stateful_en; + + + stateful_en = IS_FT_TYPE(hwdev) | IS_RDMA_TYPE(hwdev); + if (stateful_en) + hinic3_ppf_ext_db_deinit(hwdev); +} + +int hinic3_stateful_init(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + int stateful_en; + int err; + + if (!dev) + return -EINVAL; + + if (!hinic3_get_stateful_enable(dev)) + return 0; + + mutex_lock(&dev->stateful_mutex); + if (dev->stateful_ref_cnt++) { + mutex_unlock(&dev->stateful_mutex); + return 0; + } + + stateful_en = (int)(IS_FT_TYPE(dev) | IS_RDMA_TYPE(dev)); + if (stateful_en != 0 && HINIC3_IS_PPF(dev)) { + err = hinic3_ppf_ext_db_init(dev); + if (err) + goto out; + } + + + mutex_unlock(&dev->stateful_mutex); + sdk_info(dev->dev_hdl, "Initialize stateful resource success\n"); + + return 0; + + +out: + dev->stateful_ref_cnt--; + mutex_unlock(&dev->stateful_mutex); + + return err; +} +EXPORT_SYMBOL(hinic3_stateful_init); + +void hinic3_stateful_deinit(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev || !hinic3_get_stateful_enable(dev)) + return; + + mutex_lock(&dev->stateful_mutex); + if (!dev->stateful_ref_cnt || --dev->stateful_ref_cnt) { + mutex_unlock(&dev->stateful_mutex); + return; + } + + stateful_uninit(hwdev); + mutex_unlock(&dev->stateful_mutex); + + sdk_info(dev->dev_hdl, "Clear stateful resource success\n"); +} +EXPORT_SYMBOL(hinic3_stateful_deinit); + +void hinic3_free_stateful(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev || !hinic3_get_stateful_enable(dev) || !dev->stateful_ref_cnt) + return; + + if (!hinic3_need_init_stateful_default(hwdev) || dev->stateful_ref_cnt > 1) + sdk_info(dev->dev_hdl, "Current stateful resource ref is incorrect, ref_cnt:%u\n", + dev->stateful_ref_cnt); + + stateful_uninit(hwdev); + + sdk_info(dev->dev_hdl, "Clear stateful resource success\n"); +} + +int hinic3_get_card_present_state(void *hwdev, bool *card_present_state) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev || !card_present_state) + return -EINVAL; + + *card_present_state = get_card_present_state(dev); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_card_present_state); + +void hinic3_link_event_stats(void *dev, u8 link) +{ + struct hinic3_hwdev *hwdev = dev; + + if (link) + atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats); + else + atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats); +} +EXPORT_SYMBOL(hinic3_link_event_stats); + +u8 hinic3_max_pf_num(void *hwdev) +{ + if (!hwdev) + return 0; + + return HINIC3_MAX_PF_NUM((struct hinic3_hwdev *)hwdev); +} +EXPORT_SYMBOL(hinic3_max_pf_num); + +void hinic3_fault_event_report(void *hwdev, u16 src, u16 level) +{ + if (!hwdev) + return; + + sdk_info(((struct hinic3_hwdev *)hwdev)->dev_hdl, "Fault event report, src: %u, level: %u\n", + src, level); + + hisdk3_fault_post_process(hwdev, src, level); +} +EXPORT_SYMBOL(hinic3_fault_event_report); + +void hinic3_probe_success(void *hwdev) +{ + if (!hwdev) + return; + + hisdk3_probe_success(hwdev); +} + +#define HINIC3_CHANNEL_BUSY_TIMEOUT 25 + +static void hinic3_update_channel_status(struct hinic3_hwdev *hwdev) +{ + struct card_node *chip_node = hwdev->chip_node; + + if (!chip_node) + return; + + if (hinic3_func_type(hwdev) != TYPE_PPF || !COMM_SUPPORT_CHANNEL_DETECT(hwdev) || + atomic_read(&chip_node->channel_busy_cnt)) + return; + + if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) { + if (hwdev->last_recv_aeq_cnt != hwdev->cur_recv_aeq_cnt) { + hwdev->aeq_busy_cnt = 0; + hwdev->last_recv_aeq_cnt = hwdev->cur_recv_aeq_cnt; + } else { + hwdev->aeq_busy_cnt++; + } + + if (hwdev->aeq_busy_cnt > HINIC3_CHANNEL_BUSY_TIMEOUT) { + atomic_inc(&chip_node->channel_busy_cnt); + sdk_err(hwdev->dev_hdl, "Detect channel busy\n"); + } + } +} + +static void hinic3_heartbeat_lost_handler(struct work_struct *work) +{ + struct hinic3_event_info event_info = { 0 }; + struct hinic3_hwdev *hwdev = container_of(work, struct hinic3_hwdev, + heartbeat_lost_work); + u16 src, level; + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + + if (hwdev->event_callback) { + event_info.service = EVENT_SRV_COMM; + event_info.type = + hwdev->pcie_link_down ? EVENT_COMM_PCIE_LINK_DOWN : + EVENT_COMM_HEART_LOST; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + if (hwdev->pcie_link_down) { + src = HINIC3_FAULT_SRC_PCIE_LINK_DOWN; + level = FAULT_LEVEL_HOST; + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + } else { + src = HINIC3_FAULT_SRC_HOST_HEARTBEAT_LOST; + level = FAULT_LEVEL_FATAL; + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + hinic3_global_func_id(hwdev)); + } + + hinic3_show_chip_err_info(hwdev); + + hisdk3_fault_post_process(hwdev, src, level); +} + +#define DETECT_PCIE_LINK_DOWN_RETRY 2 +#define HINIC3_HEARTBEAT_START_EXPIRE 5000 +#define HINIC3_HEARTBEAT_PERIOD 1000 + +static bool hinic3_is_hw_abnormal(struct hinic3_hwdev *hwdev) +{ + u32 status; + + if (!hinic3_get_chip_present_flag(hwdev)) + return false; + + status = hinic3_get_heartbeat_status(hwdev); + if (status == HINIC3_PCIE_LINK_DOWN) { + sdk_warn(hwdev->dev_hdl, "Detect BAR register read failed\n"); + hwdev->rd_bar_err_cnt++; + if (hwdev->rd_bar_err_cnt >= DETECT_PCIE_LINK_DOWN_RETRY) { + hinic3_set_chip_absent(hwdev); + hinic3_force_complete_all(hwdev); + hwdev->pcie_link_down = true; + return true; + } + + return false; + } + + if (status) { + hwdev->heartbeat_lost = true; + return true; + } + + hwdev->rd_bar_err_cnt = 0; + + return false; +} + +#ifdef HAVE_TIMER_SETUP +static void hinic3_heartbeat_timer_handler(struct timer_list *t) +#else +static void hinic3_heartbeat_timer_handler(unsigned long data) +#endif +{ +#ifdef HAVE_TIMER_SETUP + struct hinic3_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); +#else + struct hinic3_hwdev *hwdev = (struct hinic3_hwdev *)data; +#endif + + if (hinic3_is_hw_abnormal(hwdev)) { + stop_timer(&hwdev->heartbeat_timer); + queue_work(hwdev->workq, &hwdev->heartbeat_lost_work); + } else { + mod_timer(&hwdev->heartbeat_timer, + jiffies + msecs_to_jiffies(HINIC3_HEARTBEAT_PERIOD)); + } + + hinic3_update_channel_status(hwdev); +} + +static void hinic3_init_heartbeat_detect(struct hinic3_hwdev *hwdev) +{ +#ifdef HAVE_TIMER_SETUP + timer_setup(&hwdev->heartbeat_timer, hinic3_heartbeat_timer_handler, 0); +#else + initialize_timer(hwdev->adapter_hdl, &hwdev->heartbeat_timer); + hwdev->heartbeat_timer.data = (u64)hwdev; + hwdev->heartbeat_timer.function = hinic3_heartbeat_timer_handler; +#endif + + hwdev->heartbeat_timer.expires = + jiffies + msecs_to_jiffies(HINIC3_HEARTBEAT_START_EXPIRE); + + add_to_timer(&hwdev->heartbeat_timer, HINIC3_HEARTBEAT_PERIOD); + + INIT_WORK(&hwdev->heartbeat_lost_work, hinic3_heartbeat_lost_handler); +} + +static void hinic3_destroy_heartbeat_detect(struct hinic3_hwdev *hwdev) +{ + destroy_work(&hwdev->heartbeat_lost_work); + stop_timer(&hwdev->heartbeat_timer); + delete_timer(&hwdev->heartbeat_timer); +} + +void hinic3_set_api_stop(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return; + + dev->chip_present_flag = HINIC3_CHIP_ABSENT; + sdk_info(dev->dev_hdl, "Set card absent\n"); + hinic3_force_complete_all(dev); + sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n"); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h new file mode 100644 index 000000000000..9f7d8a4859ec --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_HWDEV_H +#define HINIC3_HWDEV_H + +#include "hinic3_mt.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_profile.h" + +struct cfg_mgmt_info; + +struct hinic3_hwif; +struct hinic3_aeqs; +struct hinic3_ceqs; +struct hinic3_mbox; +struct hinic3_msg_pf_to_mgmt; +struct hinic3_hwdev; + +#define HINIC3_CHANNEL_DETECT_PERIOD (5 * 1000) + +struct hinic3_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +struct mqm_addr_trans_tbl_info { + u32 chunk_num; + u32 search_gpa_num; + u32 page_size; + u32 page_num; + struct hinic3_page_addr *brm_srch_page_addr; +}; + +struct hinic3_devlink { + struct hinic3_hwdev *hwdev; + u8 activate_fw; /* 0 ~ 7 */ + u8 switch_cfg; /* 0 ~ 7 */ +}; + +enum hinic3_func_mode { + /* single host */ + FUNC_MOD_NORMAL_HOST, + /* multi host, bare-metal, sdi side */ + FUNC_MOD_MULTI_BM_MASTER, + /* multi host, bare-metal, host side */ + FUNC_MOD_MULTI_BM_SLAVE, + /* multi host, vm mode, sdi side */ + FUNC_MOD_MULTI_VM_MASTER, + /* multi host, vm mode, host side */ + FUNC_MOD_MULTI_VM_SLAVE, +}; + +#define IS_BMGW_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_MASTER) +#define IS_BMGW_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_SLAVE) +#define IS_VM_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_MASTER) +#define IS_VM_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_SLAVE) + +#define IS_MASTER_HOST(hwdev) \ + (IS_BMGW_MASTER_HOST(hwdev) || IS_VM_MASTER_HOST(hwdev)) + +#define IS_SLAVE_HOST(hwdev) \ + (IS_BMGW_SLAVE_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev)) + +#define IS_MULTI_HOST(hwdev) \ + (IS_BMGW_MASTER_HOST(hwdev) || IS_BMGW_SLAVE_HOST(hwdev) || \ + IS_VM_MASTER_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev)) + +#define NEED_MBOX_FORWARD(hwdev) IS_BMGW_SLAVE_HOST(hwdev) + +enum hinic3_host_mode_e { + HINIC3_MODE_NORMAL = 0, + HINIC3_SDI_MODE_VM, + HINIC3_SDI_MODE_BM, + HINIC3_SDI_MODE_MAX, +}; + +struct hinic3_hwdev { + void *adapter_hdl; /* pointer to hinic3_pcidev or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + void *dev_hdl; /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + + void *service_adapter[SERVICE_T_MAX]; + void *chip_node; + void *ppf_hwdev; + + u32 wq_page_size; + int chip_present_flag; + bool poll; /* use polling mode or int mode */ + u32 rsvd1; + + struct hinic3_hwif *hwif; /* include void __iomem *bar */ + struct comm_global_attr glb_attr; + u64 features[COMM_MAX_FEATURE_QWORD]; + + struct cfg_mgmt_info *cfg_mgmt; + + struct hinic3_cmdqs *cmdqs; + struct hinic3_aeqs *aeqs; + struct hinic3_ceqs *ceqs; + struct hinic3_mbox *func_to_func; + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic3_clp_pf_to_mgmt *clp_pf_to_mgmt; + + void *cqm_hdl; + struct mqm_addr_trans_tbl_info mqm_att; + struct hinic3_page_addr page_pa0; + struct hinic3_page_addr page_pa1; + u32 stateful_ref_cnt; + u32 rsvd2; + + struct mutex stateful_mutex; /* protect cqm init and deinit */ + + struct hinic3_hw_stats hw_stats; + u8 *chip_fault_stats; + + hinic3_event_handler event_callback; + void *event_pri_handle; + + struct hinic3_board_info board_info; + + struct delayed_work sync_time_task; + struct delayed_work channel_detect_task; + struct hisdk3_prof_attr *prof_attr; + struct hinic3_prof_adapter *prof_adap; + + struct workqueue_struct *workq; + + u32 rd_bar_err_cnt; + bool pcie_link_down; + bool heartbeat_lost; + struct timer_list heartbeat_timer; + struct work_struct heartbeat_lost_work; + + ulong func_state; + spinlock_t channel_lock; /* protect channel init and deinit */ + + u16 probe_fault_level; + + struct hinic3_devlink *devlink_dev; + + enum hinic3_func_mode func_mode; + u32 rsvd3; + + u64 cur_recv_aeq_cnt; + u64 last_recv_aeq_cnt; + u16 aeq_busy_cnt; + u64 rsvd4[8]; +}; + +#define HINIC3_DRV_FEATURE_QW0 \ + (COMM_F_API_CHAIN | COMM_F_CLP | COMM_F_MBOX_SEGMENT | \ + COMM_F_CMDQ_NUM | COMM_F_VIRTIO_VQ_SIZE) + +#define HINIC3_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) +#define HINIC3_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) +#define HINIC3_MGMT_CPU_NODE_ID(hwdev) ((hwdev)->glb_attr.mgmt_host_node_id) + +#define COMM_FEATURE_QW0(hwdev, feature) \ + ((hwdev)->features[0] & COMM_F_##feature) +#define COMM_SUPPORT_API_CHAIN(hwdev) COMM_FEATURE_QW0(hwdev, API_CHAIN) +#define COMM_SUPPORT_CLP(hwdev) COMM_FEATURE_QW0(hwdev, CLP) +#define COMM_SUPPORT_CHANNEL_DETECT(hwdev) COMM_FEATURE_QW0(hwdev, CHANNEL_DETECT) +#define COMM_SUPPORT_MBOX_SEGMENT(hwdev) (hinic3_pcie_itf_id(hwdev) == SPU_HOST_ID) +#define COMM_SUPPORT_CMDQ_NUM(hwdev) COMM_FEATURE_QW0(hwdev, CMDQ_NUM) +#define COMM_SUPPORT_VIRTIO_VQ_SIZE(hwdev) COMM_FEATURE_QW0(hwdev, VIRTIO_VQ_SIZE) + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c new file mode 100644 index 000000000000..9b749135dbed --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c @@ -0,0 +1,994 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/module.h> + +#include "ossl_knl.h" +#include "hinic3_csr.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" + +#ifndef CONFIG_MODULE_PROF +#define WAIT_HWIF_READY_TIMEOUT 10000 +#else +#define WAIT_HWIF_READY_TIMEOUT 30000 +#endif + +#define HINIC3_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000 + +#define MAX_MSIX_ENTRY 2048 + +#define DB_IDX(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / \ + HINIC3_DB_PAGE_SIZE)) + +#define HINIC3_AF0_FUNC_GLOBAL_IDX_SHIFT 0 +#define HINIC3_AF0_P2P_IDX_SHIFT 12 +#define HINIC3_AF0_PCI_INTF_IDX_SHIFT 17 +#define HINIC3_AF0_VF_IN_PF_SHIFT 20 +#define HINIC3_AF0_FUNC_TYPE_SHIFT 28 + +#define HINIC3_AF0_FUNC_GLOBAL_IDX_MASK 0xFFF +#define HINIC3_AF0_P2P_IDX_MASK 0x1F +#define HINIC3_AF0_PCI_INTF_IDX_MASK 0x7 +#define HINIC3_AF0_VF_IN_PF_MASK 0xFF +#define HINIC3_AF0_FUNC_TYPE_MASK 0x1 + +#define HINIC3_AF0_GET(val, member) \ + (((val) >> HINIC3_AF0_##member##_SHIFT) & HINIC3_AF0_##member##_MASK) + +#define HINIC3_AF1_PPF_IDX_SHIFT 0 +#define HINIC3_AF1_AEQS_PER_FUNC_SHIFT 8 +#define HINIC3_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define HINIC3_AF1_PF_INIT_STATUS_SHIFT 31 + +#define HINIC3_AF1_PPF_IDX_MASK 0x3F +#define HINIC3_AF1_AEQS_PER_FUNC_MASK 0x3 +#define HINIC3_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define HINIC3_AF1_PF_INIT_STATUS_MASK 0x1 + +#define HINIC3_AF1_GET(val, member) \ + (((val) >> HINIC3_AF1_##member##_SHIFT) & HINIC3_AF1_##member##_MASK) + +#define HINIC3_AF2_CEQS_PER_FUNC_SHIFT 0 +#define HINIC3_AF2_DMA_ATTR_PER_FUNC_SHIFT 9 +#define HINIC3_AF2_IRQS_PER_FUNC_SHIFT 16 + +#define HINIC3_AF2_CEQS_PER_FUNC_MASK 0x1FF +#define HINIC3_AF2_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HINIC3_AF2_IRQS_PER_FUNC_MASK 0x7FF + +#define HINIC3_AF2_GET(val, member) \ + (((val) >> HINIC3_AF2_##member##_SHIFT) & HINIC3_AF2_##member##_MASK) + +#define HINIC3_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0 +#define HINIC3_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16 + +#define HINIC3_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF +#define HINIC3_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF + +#define HINIC3_AF3_GET(val, member) \ + (((val) >> HINIC3_AF3_##member##_SHIFT) & HINIC3_AF3_##member##_MASK) + +#define HINIC3_AF4_DOORBELL_CTRL_SHIFT 0 +#define HINIC3_AF4_DOORBELL_CTRL_MASK 0x1 + +#define HINIC3_AF4_GET(val, member) \ + (((val) >> HINIC3_AF4_##member##_SHIFT) & HINIC3_AF4_##member##_MASK) + +#define HINIC3_AF4_SET(val, member) \ + (((val) & HINIC3_AF4_##member##_MASK) << HINIC3_AF4_##member##_SHIFT) + +#define HINIC3_AF4_CLEAR(val, member) \ + ((val) & (~(HINIC3_AF4_##member##_MASK << HINIC3_AF4_##member##_SHIFT))) + +#define HINIC3_AF5_OUTBOUND_CTRL_SHIFT 0 +#define HINIC3_AF5_OUTBOUND_CTRL_MASK 0x1 + +#define HINIC3_AF5_GET(val, member) \ + (((val) >> HINIC3_AF5_##member##_SHIFT) & HINIC3_AF5_##member##_MASK) + +#define HINIC3_AF5_SET(val, member) \ + (((val) & HINIC3_AF5_##member##_MASK) << HINIC3_AF5_##member##_SHIFT) + +#define HINIC3_AF5_CLEAR(val, member) \ + ((val) & (~(HINIC3_AF5_##member##_MASK << HINIC3_AF5_##member##_SHIFT))) + +#define HINIC3_AF6_PF_STATUS_SHIFT 0 +#define HINIC3_AF6_PF_STATUS_MASK 0xFFFF + +#define HINIC3_AF6_FUNC_MAX_SQ_SHIFT 23 +#define HINIC3_AF6_FUNC_MAX_SQ_MASK 0x1FF + +#define HINIC3_AF6_MSIX_FLEX_EN_SHIFT 22 +#define HINIC3_AF6_MSIX_FLEX_EN_MASK 0x1 + +#define HINIC3_AF6_SET(val, member) \ + ((((u32)(val)) & HINIC3_AF6_##member##_MASK) << \ + HINIC3_AF6_##member##_SHIFT) + +#define HINIC3_AF6_GET(val, member) \ + (((u32)(val) >> HINIC3_AF6_##member##_SHIFT) & HINIC3_AF6_##member##_MASK) + +#define HINIC3_AF6_CLEAR(val, member) \ + ((u32)(val) & (~(HINIC3_AF6_##member##_MASK << \ + HINIC3_AF6_##member##_SHIFT))) + +#define HINIC3_PPF_ELECT_PORT_IDX_SHIFT 0 + +#define HINIC3_PPF_ELECT_PORT_IDX_MASK 0x3F + +#define HINIC3_PPF_ELECT_PORT_GET(val, member) \ + (((val) >> HINIC3_PPF_ELECT_PORT_##member##_SHIFT) & \ + HINIC3_PPF_ELECT_PORT_##member##_MASK) + +#define HINIC3_PPF_ELECTION_IDX_SHIFT 0 + +#define HINIC3_PPF_ELECTION_IDX_MASK 0x3F + +#define HINIC3_PPF_ELECTION_SET(val, member) \ + (((val) & HINIC3_PPF_ELECTION_##member##_MASK) << \ + HINIC3_PPF_ELECTION_##member##_SHIFT) + +#define HINIC3_PPF_ELECTION_GET(val, member) \ + (((val) >> HINIC3_PPF_ELECTION_##member##_SHIFT) & \ + HINIC3_PPF_ELECTION_##member##_MASK) + +#define HINIC3_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC3_PPF_ELECTION_##member##_MASK << \ + HINIC3_PPF_ELECTION_##member##_SHIFT))) + +#define HINIC3_MPF_ELECTION_IDX_SHIFT 0 + +#define HINIC3_MPF_ELECTION_IDX_MASK 0x1F + +#define HINIC3_MPF_ELECTION_SET(val, member) \ + (((val) & HINIC3_MPF_ELECTION_##member##_MASK) << \ + HINIC3_MPF_ELECTION_##member##_SHIFT) + +#define HINIC3_MPF_ELECTION_GET(val, member) \ + (((val) >> HINIC3_MPF_ELECTION_##member##_SHIFT) & \ + HINIC3_MPF_ELECTION_##member##_MASK) + +#define HINIC3_MPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC3_MPF_ELECTION_##member##_MASK << \ + HINIC3_MPF_ELECTION_##member##_SHIFT))) + +#define HINIC3_GET_REG_FLAG(reg) ((reg) & (~(HINIC3_REGS_FLAG_MAKS))) + +#define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MAKS)) + +u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg) +{ + if (HINIC3_GET_REG_FLAG(reg) == HINIC3_MGMT_REGS_FLAG) + return be32_to_cpu(readl(hwif->mgmt_regs_base + + HINIC3_GET_REG_ADDR(reg))); + else + return be32_to_cpu(readl(hwif->cfg_regs_base + + HINIC3_GET_REG_ADDR(reg))); +} + +void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val) +{ + if (HINIC3_GET_REG_FLAG(reg) == HINIC3_MGMT_REGS_FLAG) + writel(cpu_to_be32(val), + hwif->mgmt_regs_base + HINIC3_GET_REG_ADDR(reg)); + else + writel(cpu_to_be32(val), + hwif->cfg_regs_base + HINIC3_GET_REG_ADDR(reg)); +} + +bool get_card_present_state(struct hinic3_hwdev *hwdev) +{ + u32 attr1; + + attr1 = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HINIC3_PCIE_LINK_DOWN) { + sdk_warn(hwdev->dev_hdl, "Card is not present\n"); + return false; + } + + return true; +} + +/** + * hinic3_get_heartbeat_status - get heart beat status + * @hwdev: the pointer to hw device + * Return: 0 - normal, 1 - heart lost, 0xFFFFFFFF - Pcie link down + **/ +u32 hinic3_get_heartbeat_status(void *hwdev) +{ + u32 attr1; + + if (!hwdev) + return HINIC3_PCIE_LINK_DOWN; + + attr1 = hinic3_hwif_read_reg(((struct hinic3_hwdev *)hwdev)->hwif, + HINIC3_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HINIC3_PCIE_LINK_DOWN) + return attr1; + + return !HINIC3_AF1_GET(attr1, MGMT_INIT_STATUS); +} +EXPORT_SYMBOL(hinic3_get_heartbeat_status); + +#define MIGRATE_HOST_STATUS_CLEAR(host_id, val) ((val) & (~(1U << (host_id)))) +#define MIGRATE_HOST_STATUS_SET(host_id, enable) (((u8)(enable) & 1U) << (host_id)) +#define MIGRATE_HOST_STATUS_GET(host_id, val) (!!((val) & (1U << (host_id)))) + +int hinic3_set_host_migrate_enable(void *hwdev, u8 host_id, bool enable) +{ + struct hinic3_hwdev *dev = hwdev; + + u32 reg_val; + + if (HINIC3_FUNC_TYPE(dev) != TYPE_PPF) { + sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); + return -EINVAL; + } + + reg_val = hinic3_hwif_read_reg(dev->hwif, HINIC3_MULT_MIGRATE_HOST_STATUS_ADDR); + reg_val = MIGRATE_HOST_STATUS_CLEAR(host_id, reg_val); + reg_val |= MIGRATE_HOST_STATUS_SET(host_id, enable); + + hinic3_hwif_write_reg(dev->hwif, HINIC3_MULT_MIGRATE_HOST_STATUS_ADDR, reg_val); + + sdk_info(dev->dev_hdl, "Set migrate host %d status %d, reg value: 0x%x\n", + host_id, enable, reg_val); + + return 0; +} +EXPORT_SYMBOL(hinic3_set_host_migrate_enable); + +int hinic3_get_host_migrate_enable(void *hwdev, u8 host_id, u8 *migrate_en) +{ + struct hinic3_hwdev *dev = hwdev; + + u32 reg_val; + + if (HINIC3_FUNC_TYPE(dev) != TYPE_PPF) { + sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); + return -EINVAL; + } + + reg_val = hinic3_hwif_read_reg(dev->hwif, HINIC3_MULT_MIGRATE_HOST_STATUS_ADDR); + *migrate_en = MIGRATE_HOST_STATUS_GET(host_id, reg_val); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_host_migrate_enable); + +static enum hinic3_wait_return check_hwif_ready_handler(void *priv_data) +{ + u32 status; + + status = hinic3_get_heartbeat_status(priv_data); + if (status == HINIC3_PCIE_LINK_DOWN) + return WAIT_PROCESS_ERR; + else if (!status) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +static int wait_hwif_ready(struct hinic3_hwdev *hwdev) +{ + int ret; + + ret = hinic3_wait_for_timeout(hwdev, check_hwif_ready_handler, + WAIT_HWIF_READY_TIMEOUT, USEC_PER_MSEC); + if (ret == -ETIMEDOUT) { + hwdev->probe_fault_level = FAULT_LEVEL_FATAL; + sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); + } + + return ret; +} + +/** + * set_hwif_attr - set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + * @attr2: the third attribute that was read from the hw + * @attr3: the fourth attribute that was read from the hw + **/ +static void set_hwif_attr(struct hinic3_hwif *hwif, u32 attr0, u32 attr1, + u32 attr2, u32 attr3, u32 attr6) +{ + hwif->attr.func_global_idx = HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX); + hwif->attr.port_to_port_idx = HINIC3_AF0_GET(attr0, P2P_IDX); + hwif->attr.pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX); + hwif->attr.vf_in_pf = HINIC3_AF0_GET(attr0, VF_IN_PF); + hwif->attr.func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE); + + hwif->attr.ppf_idx = HINIC3_AF1_GET(attr1, PPF_IDX); + hwif->attr.num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = (u8)HINIC3_AF2_GET(attr2, CEQS_PER_FUNC); + hwif->attr.num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC); + if (hwif->attr.num_irqs > MAX_MSIX_ENTRY) + hwif->attr.num_irqs = MAX_MSIX_ENTRY; + + hwif->attr.num_dma_attr = BIT(HINIC3_AF2_GET(attr2, DMA_ATTR_PER_FUNC)); + + hwif->attr.global_vf_id_of_pf = HINIC3_AF3_GET(attr3, + GLOBAL_VF_ID_OF_PF); + + hwif->attr.num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ); + hwif->attr.msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN); +} + +/** + * get_hwif_attr - read and set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + **/ +static int get_hwif_attr(struct hinic3_hwif *hwif) +{ + u32 addr, attr0, attr1, attr2, attr3, attr6; + + addr = HINIC3_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic3_hwif_read_reg(hwif, addr); + if (attr0 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + addr = HINIC3_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic3_hwif_read_reg(hwif, addr); + if (attr1 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + addr = HINIC3_CSR_FUNC_ATTR2_ADDR; + attr2 = hinic3_hwif_read_reg(hwif, addr); + if (attr2 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + addr = HINIC3_CSR_FUNC_ATTR3_ADDR; + attr3 = hinic3_hwif_read_reg(hwif, addr); + if (attr3 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + addr = HINIC3_CSR_FUNC_ATTR6_ADDR; + attr6 = hinic3_hwif_read_reg(hwif, addr); + if (attr6 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + set_hwif_attr(hwif, attr0, attr1, attr2, attr3, attr6); + + return 0; +} + +void hinic3_set_pf_status(struct hinic3_hwif *hwif, + enum hinic3_pf_status status) +{ + u32 attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR); + + attr6 = HINIC3_AF6_CLEAR(attr6, PF_STATUS); + attr6 |= HINIC3_AF6_SET(status, PF_STATUS); + + if (hwif->attr.func_type == TYPE_VF) + return; + + hinic3_hwif_write_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR, attr6); +} + +enum hinic3_pf_status hinic3_get_pf_status(struct hinic3_hwif *hwif) +{ + u32 attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR); + + return HINIC3_AF6_GET(attr6, PF_STATUS); +} + +static enum hinic3_doorbell_ctrl hinic3_get_doorbell_ctrl_status(struct hinic3_hwif *hwif) +{ + u32 attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR); + + return HINIC3_AF4_GET(attr4, DOORBELL_CTRL); +} + +static enum hinic3_outbound_ctrl hinic3_get_outbound_ctrl_status(struct hinic3_hwif *hwif) +{ + u32 attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR); + + return HINIC3_AF5_GET(attr5, OUTBOUND_CTRL); +} + +void hinic3_enable_doorbell(struct hinic3_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC3_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic3_hwif_read_reg(hwif, addr); + + attr4 = HINIC3_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC3_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); + + hinic3_hwif_write_reg(hwif, addr, attr4); +} + +void hinic3_disable_doorbell(struct hinic3_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC3_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic3_hwif_read_reg(hwif, addr); + + attr4 = HINIC3_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC3_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); + + hinic3_hwif_write_reg(hwif, addr, attr4); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the hardware interface of a pci function device + **/ +static void set_ppf(struct hinic3_hwif *hwif) +{ + struct hinic3_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HINIC3_CSR_PPF_ELECTION_ADDR; + + val = hinic3_hwif_read_reg(hwif, addr); + val = HINIC3_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HINIC3_PPF_ELECTION_SET(attr->func_global_idx, IDX); + val |= ppf_election; + + hinic3_hwif_write_reg(hwif, addr, val); + + /* Check PPF */ + val = hinic3_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HINIC3_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == attr->func_global_idx) + attr->func_type = TYPE_PPF; +} + +/** + * get_mpf - get the mpf index into the hwif + * @hwif: the hardware interface of a pci function device + **/ +static void get_mpf(struct hinic3_hwif *hwif) +{ + struct hinic3_func_attr *attr = &hwif->attr; + u32 mpf_election, addr; + + addr = HINIC3_CSR_GLOBAL_MPF_ELECTION_ADDR; + + mpf_election = hinic3_hwif_read_reg(hwif, addr); + attr->mpf_idx = HINIC3_MPF_ELECTION_GET(mpf_election, IDX); +} + +/** + * set_mpf - try to set hwif as mpf and set the mpf idx in hwif + * @hwif: the hardware interface of a pci function device + **/ +static void set_mpf(struct hinic3_hwif *hwif) +{ + struct hinic3_func_attr *attr = &hwif->attr; + u32 addr, val, mpf_election; + + /* Read Modify Write */ + addr = HINIC3_CSR_GLOBAL_MPF_ELECTION_ADDR; + + val = hinic3_hwif_read_reg(hwif, addr); + + val = HINIC3_MPF_ELECTION_CLEAR(val, IDX); + mpf_election = HINIC3_MPF_ELECTION_SET(attr->func_global_idx, IDX); + + val |= mpf_election; + hinic3_hwif_write_reg(hwif, addr, val); +} + +static int init_hwif(struct hinic3_hwdev *hwdev, void *cfg_reg_base, void *intr_reg_base, + void *mgmt_regs_base) +{ + struct hinic3_hwif *hwif = NULL; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwdev->hwif = hwif; + hwif->pdev = hwdev->pcidev_hdl; + + /* if function is VF, mgmt_regs_base will be NULL */ + hwif->cfg_regs_base = mgmt_regs_base ? cfg_reg_base : + (u8 *)cfg_reg_base + HINIC3_VF_CFG_REG_OFFSET; + + hwif->intr_regs_base = intr_reg_base; + hwif->mgmt_regs_base = mgmt_regs_base; + + return 0; +} + +static int init_db_area_idx(struct hinic3_hwif *hwif, u64 db_base_phy, u8 *db_base, + u64 db_dwqe_len) +{ + struct hinic3_free_db_area *free_db_area = &hwif->free_db_area; + u32 db_max_areas; + + hwif->db_base_phy = db_base_phy; + hwif->db_base = db_base; + hwif->db_dwqe_len = db_dwqe_len; + + db_max_areas = (db_dwqe_len > HINIC3_DB_DWQE_SIZE) ? + HINIC3_DB_MAX_AREAS : + (u32)(db_dwqe_len / HINIC3_DB_PAGE_SIZE); + free_db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL); + if (!free_db_area->db_bitmap_array) { + pr_err("Failed to allocate db area.\n"); + return -ENOMEM; + } + free_db_area->db_max_areas = db_max_areas; + spin_lock_init(&free_db_area->idx_lock); + return 0; +} + +static void free_db_area(struct hinic3_free_db_area *free_db_area) +{ + spin_lock_deinit(&free_db_area->idx_lock); + kfree(free_db_area->db_bitmap_array); +} + +static int get_db_idx(struct hinic3_hwif *hwif, u32 *idx) +{ + struct hinic3_free_db_area *free_db_area = &hwif->free_db_area; + u32 pg_idx; + + spin_lock(&free_db_area->idx_lock); + pg_idx = (u32)find_first_zero_bit(free_db_area->db_bitmap_array, + free_db_area->db_max_areas); + if (pg_idx == free_db_area->db_max_areas) { + spin_unlock(&free_db_area->idx_lock); + return -ENOMEM; + } + set_bit(pg_idx, free_db_area->db_bitmap_array); + spin_unlock(&free_db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct hinic3_hwif *hwif, u32 idx) +{ + struct hinic3_free_db_area *free_db_area = &hwif->free_db_area; + + if (idx >= free_db_area->db_max_areas) + return; + + spin_lock(&free_db_area->idx_lock); + clear_bit((int)idx, free_db_area->db_bitmap_array); + + spin_unlock(&free_db_area->idx_lock); +} + +void hinic3_free_db_addr(void *hwdev, const void __iomem *db_base, + void __iomem *dwqe_base) +{ + struct hinic3_hwif *hwif = NULL; + u32 idx; + + if (!hwdev || !db_base) + return; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base); + + free_db_idx(hwif, idx); +} +EXPORT_SYMBOL(hinic3_free_db_addr); + +int hinic3_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base) +{ + struct hinic3_hwif *hwif = NULL; + u32 idx = 0; + int err; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base + idx * HINIC3_DB_PAGE_SIZE; + + if (!dwqe_base) + return 0; + + *dwqe_base = (u8 *)*db_base + HINIC3_DWQE_OFFSET; + + return 0; +} +EXPORT_SYMBOL(hinic3_alloc_db_addr); + +void hinic3_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base) +{ + struct hinic3_hwif *hwif = NULL; + u32 idx; + + if (!hwdev) + return; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base_phy); + + free_db_idx(hwif, idx); +} +EXPORT_SYMBOL(hinic3_free_db_phy_addr); + +int hinic3_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base) +{ + struct hinic3_hwif *hwif = NULL; + u32 idx; + int err; + + if (!hwdev || !db_base || !dwqe_base) + return -EINVAL; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base_phy + idx * HINIC3_DB_PAGE_SIZE; + *dwqe_base = *db_base + HINIC3_DWQE_OFFSET; + + return 0; +} +EXPORT_SYMBOL(hinic3_alloc_db_phy_addr); + +void hinic3_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, + enum hinic3_msix_auto_mask flag) +{ + struct hinic3_hwif *hwif = NULL; + u32 mask_bits; + u32 addr; + + if (!hwdev) + return; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + if (flag) + mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET); + else + mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR); + + mask_bits = mask_bits | + HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); + + addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic3_hwif_write_reg(hwif, addr, mask_bits); +} +EXPORT_SYMBOL(hinic3_set_msix_auto_mask_state); + +void hinic3_set_msix_state(void *hwdev, u16 msix_idx, + enum hinic3_msix_state flag) +{ + struct hinic3_hwif *hwif = NULL; + u32 mask_bits; + u32 addr; + u8 int_msk = 1; + + if (!hwdev) + return; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + if (flag) + mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET); + else + mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR); + mask_bits = mask_bits | + HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); + + addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic3_hwif_write_reg(hwif, addr, mask_bits); +} +EXPORT_SYMBOL(hinic3_set_msix_state); + +static void disable_all_msix(struct hinic3_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_DISABLE); +} + +static enum hinic3_wait_return check_db_outbound_enable_handler(void *priv_data) +{ + struct hinic3_hwif *hwif = priv_data; + enum hinic3_doorbell_ctrl db_ctrl; + enum hinic3_outbound_ctrl outbound_ctrl; + + db_ctrl = hinic3_get_doorbell_ctrl_status(hwif); + outbound_ctrl = hinic3_get_outbound_ctrl_status(hwif); + if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif) +{ + return hinic3_wait_for_timeout(hwif, check_db_outbound_enable_handler, + HINIC3_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT, USEC_PER_MSEC); +} + +static void select_ppf_mpf(struct hinic3_hwdev *hwdev) +{ + struct hinic3_hwif *hwif = hwdev->hwif; + + if (!HINIC3_IS_VF(hwdev)) { + set_ppf(hwif); + + if (HINIC3_IS_PPF(hwdev)) + set_mpf(hwif); + + get_mpf(hwif); + } +} + +/** + * hinic3_init_hwif - initialize the hw interface + * @hwif: the hardware interface of a pci function device + * @pdev: the pci device that will be part of the hwif struct + * Return: 0 - success, negative - failure + **/ +int hinic3_init_hwif(struct hinic3_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, void *mgmt_regs_base, u64 db_base_phy, + void *db_base, u64 db_dwqe_len) +{ + struct hinic3_hwif *hwif = NULL; + u32 attr1, attr4, attr5; + int err; + + err = init_hwif(hwdev, cfg_reg_base, intr_reg_base, mgmt_regs_base); + if (err) + return err; + + hwif = hwdev->hwif; + + err = init_db_area_idx(hwif, db_base_phy, db_base, db_dwqe_len); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init db area.\n"); + goto init_db_area_err; + } + + err = wait_hwif_ready(hwdev); + if (err) { + attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR); + sdk_err(hwdev->dev_hdl, "Chip status is not ready, attr1:0x%x\n", attr1); + goto hwif_ready_err; + } + + err = get_hwif_attr(hwif); + if (err) { + sdk_err(hwdev->dev_hdl, "Get hwif attr failed\n"); + goto hwif_ready_err; + } + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err) { + attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR); + attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR); + sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n", + attr4, attr5); + goto hwif_ready_err; + } + + select_ppf_mpf(hwdev); + + disable_all_msix(hwdev); + /* disable mgmt cpu report any event */ + hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_INIT); + + sdk_info(hwdev->dev_hdl, "global_func_idx: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n", + hwif->attr.func_global_idx, hwif->attr.func_type, hwif->attr.pci_intf_idx, + hwif->attr.ppf_idx, hwif->attr.mpf_idx); + + return 0; + +hwif_ready_err: + hinic3_show_chip_err_info(hwdev); + free_db_area(&hwif->free_db_area); +init_db_area_err: + kfree(hwif); + + return err; +} + +/** + * hinic3_free_hwif - free the hw interface + * @hwif: the hardware interface of a pci function device + * @pdev: the pci device that will be part of the hwif struct + **/ +void hinic3_free_hwif(struct hinic3_hwdev *hwdev) +{ + spin_lock_deinit(&hwdev->hwif->free_db_area.idx_lock); + free_db_area(&hwdev->hwif->free_db_area); + kfree(hwdev->hwif); +} + +u16 hinic3_global_func_id(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.func_global_idx; +} +EXPORT_SYMBOL(hinic3_global_func_id); + +u16 hinic3_intr_num(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.num_irqs; +} +EXPORT_SYMBOL(hinic3_intr_num); + +u8 hinic3_pf_id_of_vf(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.port_to_port_idx; +} +EXPORT_SYMBOL(hinic3_pf_id_of_vf); + +u8 hinic3_pcie_itf_id(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.pci_intf_idx; +} +EXPORT_SYMBOL(hinic3_pcie_itf_id); + +u8 hinic3_vf_in_pf(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.vf_in_pf; +} +EXPORT_SYMBOL(hinic3_vf_in_pf); + +enum func_type hinic3_func_type(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.func_type; +} +EXPORT_SYMBOL(hinic3_func_type); + +u8 hinic3_ceq_num(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.num_ceqs; +} +EXPORT_SYMBOL(hinic3_ceq_num); + +u16 hinic3_glb_pf_vf_offset(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.global_vf_id_of_pf; +} +EXPORT_SYMBOL(hinic3_glb_pf_vf_offset); + +u8 hinic3_ppf_idx(void *hwdev) +{ + struct hinic3_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hwif->attr.ppf_idx; +} +EXPORT_SYMBOL(hinic3_ppf_idx); + +u8 hinic3_host_ppf_idx(struct hinic3_hwdev *hwdev, u8 host_id) +{ + u32 ppf_elect_port_addr; + u32 val; + + if (!hwdev) + return 0; + + ppf_elect_port_addr = HINIC3_CSR_FUNC_PPF_ELECT(host_id); + val = hinic3_hwif_read_reg(hwdev->hwif, ppf_elect_port_addr); + + return HINIC3_PPF_ELECT_PORT_GET(val, IDX); +} + +u32 hinic3_get_self_test_result(void *hwdev) +{ + struct hinic3_hwif *hwif = ((struct hinic3_hwdev *)hwdev)->hwif; + + return hinic3_hwif_read_reg(hwif, HINIC3_MGMT_HEALTH_STATUS_ADDR); +} + +void hinic3_show_chip_err_info(struct hinic3_hwdev *hwdev) +{ + struct hinic3_hwif *hwif = hwdev->hwif; + u32 value; + + if (hinic3_func_type(hwdev) == TYPE_VF) + return; + + value = hinic3_hwif_read_reg(hwif, HINIC3_CHIP_BASE_INFO_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip base info: 0x%08x\n", value); + + value = hinic3_hwif_read_reg(hwif, HINIC3_MGMT_HEALTH_STATUS_ADDR); + sdk_warn(hwdev->dev_hdl, "Mgmt CPU health status: 0x%08x\n", value); + + value = hinic3_hwif_read_reg(hwif, HINIC3_CHIP_ERR_STATUS0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status0: 0x%08x\n", value); + value = hinic3_hwif_read_reg(hwif, HINIC3_CHIP_ERR_STATUS1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status1: 0x%08x\n", value); + + value = hinic3_hwif_read_reg(hwif, HINIC3_ERR_INFO0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info0: 0x%08x\n", value); + value = hinic3_hwif_read_reg(hwif, HINIC3_ERR_INFO1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info1: 0x%08x\n", value); + value = hinic3_hwif_read_reg(hwif, HINIC3_ERR_INFO2_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info2: 0x%08x\n", value); +} + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.h new file mode 100644 index 000000000000..b204b213c43f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_HWIF_H +#define HINIC3_HWIF_H + +#include "hinic3_hwdev.h" + +#define HINIC3_PCIE_LINK_DOWN 0xFFFFFFFF + +struct hinic3_free_db_area { + unsigned long *db_bitmap_array; + u32 db_max_areas; + /* spinlock for allocating doorbell area */ + spinlock_t idx_lock; +}; + +struct hinic3_func_attr { + u16 func_global_idx; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 vf_in_pf; + u8 rsvd1; + u16 rsvd2; + enum func_type func_type; + + u8 mpf_idx; + + u8 ppf_idx; + + u16 num_irqs; /* max: 2 ^ 15 */ + u8 num_aeqs; /* max: 2 ^ 3 */ + u8 num_ceqs; /* max: 2 ^ 7 */ + + u16 num_sq; /* max: 2 ^ 8 */ + u8 num_dma_attr; /* max: 2 ^ 6 */ + u8 msix_flex_en; + + u16 global_vf_id_of_pf; +}; + +struct hinic3_hwif { + u8 __iomem *cfg_regs_base; + u8 __iomem *intr_regs_base; + u8 __iomem *mgmt_regs_base; + u64 db_base_phy; + u64 db_dwqe_len; + u8 __iomem *db_base; + + struct hinic3_free_db_area free_db_area; + + struct hinic3_func_attr attr; + + void *pdev; + u64 rsvd; +}; + +enum hinic3_outbound_ctrl { + ENABLE_OUTBOUND = 0x0, + DISABLE_OUTBOUND = 0x1, +}; + +enum hinic3_doorbell_ctrl { + ENABLE_DOORBELL = 0x0, + DISABLE_DOORBELL = 0x1, +}; + +enum hinic3_pf_status { + HINIC3_PF_STATUS_INIT = 0X0, + HINIC3_PF_STATUS_ACTIVE_FLAG = 0x11, + HINIC3_PF_STATUS_FLR_START_FLAG = 0x12, + HINIC3_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +#define HINIC3_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HINIC3_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HINIC3_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) +#define HINIC3_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx) +#define HINIC3_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf) +#define HINIC3_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) +#define HINIC3_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) + +#define HINIC3_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) +#define HINIC3_IS_PF(dev) (HINIC3_FUNC_TYPE(dev) == TYPE_PF) +#define HINIC3_IS_VF(dev) (HINIC3_FUNC_TYPE(dev) == TYPE_VF) +#define HINIC3_IS_PPF(dev) (HINIC3_FUNC_TYPE(dev) == TYPE_PPF) + +u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg); + +void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val); + +void hinic3_set_pf_status(struct hinic3_hwif *hwif, + enum hinic3_pf_status status); + +enum hinic3_pf_status hinic3_get_pf_status(struct hinic3_hwif *hwif); + +void hinic3_disable_doorbell(struct hinic3_hwif *hwif); + +void hinic3_enable_doorbell(struct hinic3_hwif *hwif); + +int hinic3_init_hwif(struct hinic3_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, void *mgmt_regs_base, u64 db_base_phy, + void *db_base, u64 db_dwqe_len); + +void hinic3_free_hwif(struct hinic3_hwdev *hwdev); + +void hinic3_show_chip_err_info(struct hinic3_hwdev *hwdev); + +u8 hinic3_host_ppf_idx(struct hinic3_hwdev *hwdev, u8 host_id); + +bool get_card_present_state(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c new file mode 100644 index 000000000000..ac4302dabfe3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c @@ -0,0 +1,1413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <net/addrconf.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <linux/inetdevice.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/aer.h> +#include <linux/debugfs.h> + +#include "ossl_knl.h" +#include "hinic3_mt.h" +#include "hinic3_common.h" +#include "hinic3_crm.h" +#include "hinic3_pci_id_tbl.h" +#include "hinic3_sriov.h" +#include "hinic3_dev_mgmt.h" +#include "hinic3_nictool.h" +#include "hinic3_hw.h" +#include "hinic3_lld.h" + +#include "hinic3_profile.h" +#include "hinic3_hwdev.h" +#include "hinic3_prof_adap.h" +#include "comm_msg_intf.h" + + +static bool disable_vf_load; +module_param(disable_vf_load, bool, 0444); +MODULE_PARM_DESC(disable_vf_load, + "Disable virtual functions probe or not - default is false"); + +static bool disable_attach = false; +module_param(disable_attach, bool, 0444); +MODULE_PARM_DESC(disable_attach, "disable_attach or not - default is false"); + +#define HINIC3_WAIT_SRIOV_CFG_TIMEOUT 15000 + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HINIC3_DRV_DESC); +MODULE_VERSION(HINIC3_DRV_VERSION); +MODULE_LICENSE("GPL"); + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) +static DEVICE_ATTR(sriov_numvfs, 0664, + hinic3_sriov_numvfs_show, hinic3_sriov_numvfs_store); +static DEVICE_ATTR(sriov_totalvfs, 0444, + hinic3_sriov_totalvfs_show, NULL); +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + +static struct attribute *hinic3_attributes[] = { +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) + &dev_attr_sriov_numvfs.attr, + &dev_attr_sriov_totalvfs.attr, +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + NULL +}; + +static const struct attribute_group hinic3_attr_group = { + .attrs = hinic3_attributes, +}; + +struct hinic3_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; + +#define HINIC3_EVENT_PROCESS_TIMEOUT 10000 +struct mutex g_uld_mutex; + +void hinic3_uld_lock_init(void) +{ + mutex_init(&g_uld_mutex); +} + +static const char *s_uld_name[SERVICE_T_MAX] = { + "nic", "ovs", "roce", "toe", "ioe", + "fc", "vbs", "ipsec", "virtio", "migrate", "ppa", "custom"}; + +const char **hinic3_get_uld_names(void) +{ + return s_uld_name; +} + +static int attach_uld(struct hinic3_pcidev *dev, enum hinic3_service_type type, + const struct hinic3_uld_info *uld_info) +{ + void *uld_dev = NULL; + int err; + + mutex_lock(&dev->pdev_mutex); + + if (dev->uld_dev[type]) { + sdk_err(&dev->pcidev->dev, + "%s driver has attached to pcie device\n", + s_uld_name[type]); + err = 0; + goto out_unlock; + } + + atomic_set(&dev->uld_ref_cnt[type], 0); + + err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]); + if (err) { + sdk_err(&dev->pcidev->dev, + "Failed to add object for %s driver to pcie device\n", + s_uld_name[type]); + goto probe_failed; + } + + dev->uld_dev[type] = uld_dev; + set_bit(type, &dev->uld_state); + mutex_unlock(&dev->pdev_mutex); + + sdk_info(&dev->pcidev->dev, + "Attach %s driver to pcie device succeed\n", s_uld_name[type]); + return 0; + +probe_failed: +out_unlock: + mutex_unlock(&dev->pdev_mutex); + + return err; +} + +static void wait_uld_unused(struct hinic3_pcidev *dev, enum hinic3_service_type type) +{ + u32 loop_cnt = 0; + + while (atomic_read(&dev->uld_ref_cnt[type])) { + loop_cnt++; + if (loop_cnt % PRINT_ULD_DETACH_TIMEOUT_INTERVAL == 0) + sdk_err(&dev->pcidev->dev, "Wait for uld unused for %lds, reference count: %d\n", + loop_cnt / MSEC_PER_SEC, atomic_read(&dev->uld_ref_cnt[type])); + + usleep_range(ULD_LOCK_MIN_USLEEP_TIME, ULD_LOCK_MAX_USLEEP_TIME); + } +} + +static void detach_uld(struct hinic3_pcidev *dev, + enum hinic3_service_type type) +{ + struct hinic3_uld_info *uld_info = &g_uld_info[type]; + unsigned long end; + bool timeout = true; + + mutex_lock(&dev->pdev_mutex); + if (!dev->uld_dev[type]) { + mutex_unlock(&dev->pdev_mutex); + return; + } + + end = jiffies + msecs_to_jiffies(HINIC3_EVENT_PROCESS_TIMEOUT); + do { + if (!test_and_set_bit(type, &dev->state)) { + timeout = false; + break; + } + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + } while (time_before(jiffies, end)); + + if (timeout && !test_and_set_bit(type, &dev->state)) + timeout = false; + + spin_lock_bh(&dev->uld_lock); + clear_bit(type, &dev->uld_state); + spin_unlock_bh(&dev->uld_lock); + + wait_uld_unused(dev, type); + + uld_info->remove(&dev->lld_dev, dev->uld_dev[type]); + + dev->uld_dev[type] = NULL; + if (!timeout) + clear_bit(type, &dev->state); + + sdk_info(&dev->pcidev->dev, + "Detach %s driver from pcie device succeed\n", + s_uld_name[type]); + mutex_unlock(&dev->pdev_mutex); +} + +static void attach_ulds(struct hinic3_pcidev *dev) +{ + enum hinic3_service_type type; + struct pci_dev *pdev = dev->pcidev; + + lld_hold(); + mutex_lock(&g_uld_mutex); + + for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + if (g_uld_info[type].probe) { + if (pdev->is_virtfn && + (!hinic3_get_vf_service_load(pdev, (u16)type))) { + sdk_info(&pdev->dev, "VF device disable service_type = %d load in host\n", + type); + continue; + } + attach_uld(dev, type, &g_uld_info[type]); + } + } + mutex_unlock(&g_uld_mutex); + lld_put(); +} + +static void detach_ulds(struct hinic3_pcidev *dev) +{ + enum hinic3_service_type type; + + lld_hold(); + mutex_lock(&g_uld_mutex); + for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) { + if (g_uld_info[type].probe) + detach_uld(dev, type); + } + + if (g_uld_info[SERVICE_T_NIC].probe) + detach_uld(dev, SERVICE_T_NIC); + mutex_unlock(&g_uld_mutex); + lld_put(); +} + +int hinic3_register_uld(enum hinic3_service_type type, + struct hinic3_uld_info *uld_info) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + struct list_head *chip_list = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to register\n", + type); + return -EINVAL; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + pr_err("Invalid information of %s driver to register\n", + s_uld_name[type]); + return -EINVAL; + } + + lld_hold(); + mutex_lock(&g_uld_mutex); + + if (g_uld_info[type].probe) { + pr_err("%s driver has registered\n", s_uld_name[type]); + mutex_unlock(&g_uld_mutex); + lld_put(); + return -EINVAL; + } + + chip_list = get_hinic3_chip_list(); + memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info)); + list_for_each_entry(chip_node, chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (attach_uld(dev, type, uld_info)) { + sdk_err(&dev->pcidev->dev, + "Attach %s driver to pcie device failed\n", + s_uld_name[type]); +#ifdef CONFIG_MODULE_PROF + hinic3_probe_fault_process(dev->pcidev, FAULT_LEVEL_HOST); + break; +#else + continue; +#endif + } + } + } + + mutex_unlock(&g_uld_mutex); + lld_put(); + + pr_info("Register %s driver succeed\n", s_uld_name[type]); + return 0; +} +EXPORT_SYMBOL(hinic3_register_uld); + +void hinic3_unregister_uld(enum hinic3_service_type type) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + struct hinic3_uld_info *uld_info = NULL; + struct list_head *chip_list = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to unregister\n", + type); + return; + } + + lld_hold(); + mutex_lock(&g_uld_mutex); + chip_list = get_hinic3_chip_list(); + list_for_each_entry(chip_node, chip_list, node) { + /* detach vf first */ + list_for_each_entry(dev, &chip_node->func_list, node) + if (hinic3_func_type(dev->hwdev) == TYPE_VF) + detach_uld(dev, type); + + list_for_each_entry(dev, &chip_node->func_list, node) + if (hinic3_func_type(dev->hwdev) == TYPE_PF) + detach_uld(dev, type); + + list_for_each_entry(dev, &chip_node->func_list, node) + if (hinic3_func_type(dev->hwdev) == TYPE_PPF) + detach_uld(dev, type); + } + + uld_info = &g_uld_info[type]; + memset(uld_info, 0, sizeof(*uld_info)); + mutex_unlock(&g_uld_mutex); + lld_put(); +} +EXPORT_SYMBOL(hinic3_unregister_uld); + +int hinic3_attach_nic(struct hinic3_lld_dev *lld_dev) +{ + struct hinic3_pcidev *dev = NULL; + + if (!lld_dev) + return -EINVAL; + + dev = container_of(lld_dev, struct hinic3_pcidev, lld_dev); + return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]); +} +EXPORT_SYMBOL(hinic3_attach_nic); + +void hinic3_detach_nic(const struct hinic3_lld_dev *lld_dev) +{ + struct hinic3_pcidev *dev = NULL; + + if (!lld_dev) + return; + + dev = container_of(lld_dev, struct hinic3_pcidev, lld_dev); + detach_uld(dev, SERVICE_T_NIC); +} +EXPORT_SYMBOL(hinic3_detach_nic); + +int hinic3_attach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type) +{ + struct hinic3_pcidev *dev = NULL; + + if (!lld_dev || type >= SERVICE_T_MAX) + return -EINVAL; + + dev = container_of(lld_dev, struct hinic3_pcidev, lld_dev); + return attach_uld(dev, type, &g_uld_info[type]); +} +EXPORT_SYMBOL(hinic3_attach_service); + +void hinic3_detach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type) +{ + struct hinic3_pcidev *dev = NULL; + + if (!lld_dev || type >= SERVICE_T_MAX) + return; + + dev = container_of(lld_dev, struct hinic3_pcidev, lld_dev); + detach_uld(dev, type); +} +EXPORT_SYMBOL(hinic3_detach_service); + +static void hinic3_sync_time_to_fmw(struct hinic3_pcidev *pdev_pri) +{ + struct timeval tv = {0}; + struct rtc_time rt_time = {0}; + u64 tv_msec; + int err; + + do_gettimeofday(&tv); + + tv_msec = (u64)(tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC); + err = hinic3_sync_time(pdev_pri->hwdev, tv_msec); + if (err) { + sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + } else { + rtc_time_to_tm((unsigned long)(tv.tv_sec), &rt_time); + sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + rt_time.tm_year + HINIC3_SYNC_YEAR_OFFSET, + rt_time.tm_mon + HINIC3_SYNC_MONTH_OFFSET, + rt_time.tm_mday, rt_time.tm_hour, + rt_time.tm_min, rt_time.tm_sec); + } +} + +static void send_uld_dev_event(struct hinic3_pcidev *dev, + struct hinic3_event_info *event) +{ + enum hinic3_service_type type; + + for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + if (test_and_set_bit(type, &dev->state)) { + sdk_warn(&dev->pcidev->dev, "Svc: 0x%x, event: 0x%x can't handler, %s is in detach\n", + event->service, event->type, s_uld_name[type]); + continue; + } + + if (g_uld_info[type].event) + g_uld_info[type].event(&dev->lld_dev, + dev->uld_dev[type], event); + clear_bit(type, &dev->state); + } +} + +static void send_event_to_dst_pf(struct hinic3_pcidev *dev, u16 func_id, + struct hinic3_event_info *event) +{ + struct hinic3_pcidev *des_dev = NULL; + + lld_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (dev->lld_state == HINIC3_IN_REMOVE) + continue; + + if (hinic3_func_type(des_dev->hwdev) == TYPE_VF) + continue; + + if (hinic3_global_func_id(des_dev->hwdev) == func_id) { + send_uld_dev_event(des_dev, event); + break; + } + } + lld_put(); +} + +static void send_event_to_all_pf(struct hinic3_pcidev *dev, + struct hinic3_event_info *event) +{ + struct hinic3_pcidev *des_dev = NULL; + + lld_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (dev->lld_state == HINIC3_IN_REMOVE) + continue; + + if (hinic3_func_type(des_dev->hwdev) == TYPE_VF) + continue; + + send_uld_dev_event(des_dev, event); + } + lld_put(); +} + +static void hinic3_event_process(void *adapter, struct hinic3_event_info *event) +{ + struct hinic3_pcidev *dev = adapter; + struct hinic3_fault_event *fault = (void *)event->event_data; + u16 func_id; + + if ((event->service == EVENT_SRV_COMM && event->type == EVENT_COMM_FAULT) && + fault->fault_level == FAULT_LEVEL_SERIOUS_FLR && + fault->event.chip.func_id < hinic3_max_pf_num(dev->hwdev)) { + func_id = fault->event.chip.func_id; + return send_event_to_dst_pf(adapter, func_id, event); + } + + if (event->type == EVENT_COMM_MGMT_WATCHDOG) + send_event_to_all_pf(adapter, event); + else + send_uld_dev_event(adapter, event); +} + +static void uld_def_init(struct hinic3_pcidev *pci_adapter) +{ + int type; + + for (type = 0; type < SERVICE_T_MAX; type++) { + atomic_set(&pci_adapter->uld_ref_cnt[type], 0); + clear_bit(type, &pci_adapter->uld_state); + } + + spin_lock_init(&pci_adapter->uld_lock); +} + +static int mapping_bar(struct pci_dev *pdev, + struct hinic3_pcidev *pci_adapter) +{ + int cfg_bar; + + cfg_bar = HINIC3_IS_VF_DEV(pdev) ? + HINIC3_VF_PCI_CFG_REG_BAR : HINIC3_PF_PCI_CFG_REG_BAR; + + pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar); + if (!pci_adapter->cfg_reg_base) { + sdk_err(&pdev->dev, + "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, + HINIC3_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + sdk_err(&pdev->dev, + "Failed to map interrupt regs\n"); + goto map_intr_bar_err; + } + + if (!HINIC3_IS_VF_DEV(pdev)) { + pci_adapter->mgmt_reg_base = + pci_ioremap_bar(pdev, HINIC3_PCI_MGMT_REG_BAR); + if (!pci_adapter->mgmt_reg_base) { + sdk_err(&pdev->dev, + "Failed to map mgmt regs\n"); + goto map_mgmt_bar_err; + } + } + + pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR); + pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR); + pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR); + if (!pci_adapter->db_base) { + sdk_err(&pdev->dev, + "Failed to map doorbell regs\n"); + goto map_db_err; + } + + return 0; + +map_db_err: + if (!HINIC3_IS_VF_DEV(pdev)) + iounmap(pci_adapter->mgmt_reg_base); + +map_mgmt_bar_err: + iounmap(pci_adapter->intr_reg_base); + +map_intr_bar_err: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void unmapping_bar(struct hinic3_pcidev *pci_adapter) +{ + iounmap(pci_adapter->db_base); + + if (!HINIC3_IS_VF_DEV(pci_adapter->pcidev)) + iounmap(pci_adapter->mgmt_reg_base); + + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + +static int hinic3_pci_init(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = NULL; + int err; + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) { + sdk_err(&pdev->dev, + "Failed to alloc pci device adapter\n"); + return -ENOMEM; + } + pci_adapter->pcidev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + + err = pci_enable_device(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to enable PCI device\n"); + goto pci_enable_err; + } + + err = pci_request_regions(pdev, HINIC3_DRV_NAME); + if (err) { + sdk_err(&pdev->dev, "Failed to request regions\n"); + goto pci_regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); /* 64 bit DMA mask */ + if (err) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); /* 32 bit DMA mask */ + if (err) { + sdk_err(&pdev->dev, "Failed to set DMA mask\n"); + goto dma_mask_err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); /* 64 bit DMA mask */ + if (err) { + sdk_warn(&pdev->dev, + "Couldn't set 64-bit coherent DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* 32 bit DMA mask */ + if (err) { + sdk_err(&pdev->dev, + "Failed to set coherent DMA mask\n"); + goto dma_consistnet_mask_err; + } + } + + return 0; + +dma_consistnet_mask_err: +dma_mask_err: + pci_clear_master(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + +pci_regions_err: + pci_disable_device(pdev); + +pci_enable_err: + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); + + return err; +} + +static void hinic3_pci_deinit(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); +} + +#ifdef CONFIG_X86 +/** + * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma + * order register to zero + * @pci_adapter: pci_adapter + **/ +/*lint -save -e40 */ +static void cfg_order_reg(struct hinic3_pcidev *pci_adapter) +{ + u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; + struct cpuinfo_x86 *cpuinfo = NULL; + u32 i; + + if (hinic3_func_type(pci_adapter->hwdev) == TYPE_VF) + return; + + cpuinfo = &cpu_data(0); + for (i = 0; i < sizeof(cpu_model); i++) { + if (cpu_model[i] == cpuinfo->x86_model) + hinic3_set_pcie_order_cfg(pci_adapter->hwdev); + } +} + +/*lint -restore*/ +#endif + +static int hinic3_func_init(struct pci_dev *pdev, struct hinic3_pcidev *pci_adapter) +{ + struct hinic3_init_para init_para = {0}; + bool cqm_init_en = false; + int err; + + init_para.adapter_hdl = pci_adapter; + init_para.pcidev_hdl = pdev; + init_para.dev_hdl = &pdev->dev; + init_para.cfg_reg_base = pci_adapter->cfg_reg_base; + init_para.intr_reg_base = pci_adapter->intr_reg_base; + init_para.mgmt_reg_base = pci_adapter->mgmt_reg_base; + init_para.db_base = pci_adapter->db_base; + init_para.db_base_phy = pci_adapter->db_base_phy; + init_para.db_dwqe_len = pci_adapter->db_dwqe_len; + init_para.hwdev = &pci_adapter->hwdev; + init_para.chip_node = pci_adapter->chip_node; + init_para.probe_fault_level = pci_adapter->probe_fault_level; + err = hinic3_init_hwdev(&init_para); + if (err) { + pci_adapter->hwdev = NULL; + pci_adapter->probe_fault_level = init_para.probe_fault_level; + sdk_err(&pdev->dev, "Failed to initialize hardware device\n"); + return -EFAULT; + } + + cqm_init_en = hinic3_need_init_stateful_default(pci_adapter->hwdev); + if (cqm_init_en) { + err = hinic3_stateful_init(pci_adapter->hwdev); + if (err) { + sdk_err(&pdev->dev, "Failed to init stateful\n"); + goto stateful_init_err; + } + } + + pci_adapter->lld_dev.pdev = pdev; + + pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; + if (hinic3_func_type(pci_adapter->hwdev) != TYPE_VF) + set_bit(HINIC3_FUNC_PERSENT, &pci_adapter->sriov_info.state); + + hinic3_event_register(pci_adapter->hwdev, pci_adapter, + hinic3_event_process); + + if (hinic3_func_type(pci_adapter->hwdev) != TYPE_VF) + hinic3_sync_time_to_fmw(pci_adapter); + + /* dbgtool init */ + lld_lock_chip_node(); + err = nictool_k_init(pci_adapter->hwdev, pci_adapter->chip_node); + if (err) { + lld_unlock_chip_node(); + sdk_err(&pdev->dev, "Failed to initialize dbgtool\n"); + goto nictool_init_err; + } + list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); + lld_unlock_chip_node(); + + if (!disable_attach) { + attach_ulds(pci_adapter); + + if (hinic3_func_type(pci_adapter->hwdev) != TYPE_VF) { + err = sysfs_create_group(&pdev->dev.kobj, + &hinic3_attr_group); + if (err) { + sdk_err(&pdev->dev, "Failed to create sysfs group\n"); + goto create_sysfs_err; + } + } + +#ifdef CONFIG_X86 + cfg_order_reg(pci_adapter); +#endif + } + + return 0; + +create_sysfs_err: + detach_ulds(pci_adapter); + + lld_lock_chip_node(); + list_del(&pci_adapter->node); + lld_unlock_chip_node(); + + wait_lld_dev_unused(pci_adapter); + + lld_lock_chip_node(); + nictool_k_uninit(pci_adapter->hwdev, pci_adapter->chip_node); + lld_unlock_chip_node(); + +nictool_init_err: + hinic3_event_unregister(pci_adapter->hwdev); + if (cqm_init_en) + hinic3_stateful_deinit(pci_adapter->hwdev); +stateful_init_err: + hinic3_free_hwdev(pci_adapter->hwdev); + + return err; +} + +static void hinic3_func_deinit(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + /* When function deinit, disable mgmt initiative report events firstly, + * then flush mgmt work-queue. + */ + hinic3_disable_mgmt_msg_report(pci_adapter->hwdev); + + hinic3_flush_mgmt_workq(pci_adapter->hwdev); + + lld_lock_chip_node(); + list_del(&pci_adapter->node); + lld_unlock_chip_node(); + + detach_ulds(pci_adapter); + + wait_lld_dev_unused(pci_adapter); + + lld_lock_chip_node(); + nictool_k_uninit(pci_adapter->hwdev, pci_adapter->chip_node); + lld_unlock_chip_node(); + + hinic3_event_unregister(pci_adapter->hwdev); + + hinic3_free_stateful(pci_adapter->hwdev); + + hinic3_free_hwdev(pci_adapter->hwdev); +} + +static void wait_sriov_cfg_complete(struct hinic3_pcidev *pci_adapter) +{ + struct hinic3_sriov_info *sriov_info; + unsigned long end; + + sriov_info = &pci_adapter->sriov_info; + clear_bit(HINIC3_FUNC_PERSENT, &sriov_info->state); + usleep_range(9900, 10000); /* sleep 9900 us ~ 10000 us */ + + end = jiffies + msecs_to_jiffies(HINIC3_WAIT_SRIOV_CFG_TIMEOUT); + do { + if (!test_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state) && + !test_bit(HINIC3_SRIOV_DISABLE, &sriov_info->state)) + return; + + usleep_range(9900, 10000); /* sleep 9900 us ~ 10000 us */ + } while (time_before(jiffies, end)); +} + +bool hinic3_get_vf_load_state(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = NULL; + struct pci_dev *pf_pdev = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return false; + } + + /* vf used in vm */ + if (pci_is_root_bus(pdev->bus)) + return false; + + if (pdev->is_virtfn) + pf_pdev = pdev->physfn; + else + pf_pdev = pdev; + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return false; + } + + return !pci_adapter->disable_vf_load; +} + + +int hinic3_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state) +{ + struct hinic3_pcidev *pci_adapter = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return -EINVAL; + } + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return -EINVAL; + } + + if (hinic3_func_type(pci_adapter->hwdev) == TYPE_VF) + return 0; + + pci_adapter->disable_vf_load = !vf_load_state; + sdk_info(&pci_adapter->pcidev->dev, "Current function %s vf load in host\n", + vf_load_state ? "enable" : "disable"); + + return 0; +} +EXPORT_SYMBOL(hinic3_set_vf_load_state); + + +bool hinic3_get_vf_service_load(struct pci_dev *pdev, u16 service) +{ + struct hinic3_pcidev *pci_adapter = NULL; + struct pci_dev *pf_pdev = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return false; + } + + if (pdev->is_virtfn) + pf_pdev = pdev->physfn; + else + pf_pdev = pdev; + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return false; + } + + if (service >= SERVICE_T_MAX) { + sdk_err(&pdev->dev, "service_type = %u state is error\n", + service); + return false; + } + + return !pci_adapter->disable_srv_load[service]; +} + +int hinic3_set_vf_service_load(struct pci_dev *pdev, u16 service, + bool vf_srv_load) +{ + struct hinic3_pcidev *pci_adapter = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return -EINVAL; + } + + if (service >= SERVICE_T_MAX) { + sdk_err(&pdev->dev, "service_type = %u state is error\n", + service); + return -EFAULT; + } + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return -EINVAL; + } + + if (hinic3_func_type(pci_adapter->hwdev) == TYPE_VF) + return 0; + + pci_adapter->disable_srv_load[service] = !vf_srv_load; + sdk_info(&pci_adapter->pcidev->dev, "Current function %s vf load in host\n", + vf_srv_load ? "enable" : "disable"); + + return 0; +} +EXPORT_SYMBOL(hinic3_set_vf_service_load); + +static int hinic3_remove_func(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + + mutex_lock(&pci_adapter->pdev_mutex); + if (pci_adapter->lld_state != HINIC3_PROBE_OK) { + sdk_warn(&pdev->dev, "Current function don not need remove\n"); + mutex_unlock(&pci_adapter->pdev_mutex); + return 0; + } + pci_adapter->lld_state = HINIC3_IN_REMOVE; + mutex_unlock(&pci_adapter->pdev_mutex); + + hinic3_detect_hw_present(pci_adapter->hwdev); + + hisdk3_remove_pre_process(pci_adapter->hwdev); + + if (hinic3_func_type(pci_adapter->hwdev) != TYPE_VF) { + sysfs_remove_group(&pdev->dev.kobj, &hinic3_attr_group); + wait_sriov_cfg_complete(pci_adapter); + hinic3_pci_sriov_disable(pdev); + } + + hinic3_func_deinit(pdev); + + lld_lock_chip_node(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + + unmapping_bar(pci_adapter); + + mutex_lock(&pci_adapter->pdev_mutex); + pci_adapter->lld_state = HINIC3_NOT_PROBE; + mutex_unlock(&pci_adapter->pdev_mutex); + + sdk_info(&pdev->dev, "Pcie device removed function\n"); + + return 0; +} + +static void hinic3_remove(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + if (!pci_adapter) + return; + + sdk_info(&pdev->dev, "Pcie device remove begin\n"); + + hinic3_remove_func(pci_adapter); + + hinic3_pci_deinit(pdev); + hinic3_probe_pre_unprocess(pdev); + + sdk_info(&pdev->dev, "Pcie device removed\n"); +} + +static int probe_func_param_init(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = NULL; + + if (!pci_adapter) + return -EFAULT; + + pdev = pci_adapter->pcidev; + if (!pdev) + return -EFAULT; + + mutex_lock(&pci_adapter->pdev_mutex); + if (pci_adapter->lld_state >= HINIC3_PROBE_START) { + sdk_warn(&pdev->dev, "Don not probe repeat\n"); + mutex_unlock(&pci_adapter->pdev_mutex); + return 0; + } + pci_adapter->lld_state = HINIC3_PROBE_START; + mutex_unlock(&pci_adapter->pdev_mutex); + + return 0; +} + +static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + int err; + + err = probe_func_param_init(pci_adapter); + if (err) + return err; + + err = mapping_bar(pdev, pci_adapter); + if (err) { + sdk_err(&pdev->dev, "Failed to map bar\n"); + goto map_bar_failed; + } + + uld_def_init(pci_adapter); + + /* if chip information of pcie function exist, add the function into chip */ + lld_lock_chip_node(); + err = alloc_chip_node(pci_adapter); + if (err) { + lld_unlock_chip_node(); + sdk_err(&pdev->dev, "Failed to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + lld_unlock_chip_node(); + + err = hinic3_func_init(pdev, pci_adapter); + if (err) + goto func_init_err; + + if (hinic3_func_type(pci_adapter->hwdev) != TYPE_VF) { + err = hinic3_set_bdf_ctxt(pci_adapter->hwdev, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (err) { + sdk_err(&pdev->dev, "Failed to set BDF info to MPU\n"); + goto set_bdf_err; + } + } + + hinic3_probe_success(pci_adapter->hwdev); + + mutex_lock(&pci_adapter->pdev_mutex); + pci_adapter->lld_state = HINIC3_PROBE_OK; + mutex_unlock(&pci_adapter->pdev_mutex); + + return 0; + +set_bdf_err: + hinic3_func_deinit(pdev); + +func_init_err: + lld_lock_chip_node(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + +alloc_chip_node_fail: + unmapping_bar(pci_adapter); + +map_bar_failed: + sdk_err(&pdev->dev, "Pcie device probe function failed\n"); + return err; +} + +static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hinic3_pcidev *pci_adapter = NULL; + u16 probe_fault_level = FAULT_LEVEL_SERIOUS_FLR; + int err; + + sdk_info(&pdev->dev, "Pcie device probe begin\n"); + + err = hinic3_probe_pre_process(pdev); + if (err != 0 && err != HINIC3_NOT_PROBE) + goto out; + + if (err == HINIC3_NOT_PROBE) + return 0; + + err = hinic3_pci_init(pdev); + if (err) + goto pci_init_err; + + pci_adapter = pci_get_drvdata(pdev); + pci_adapter->disable_vf_load = disable_vf_load; + pci_adapter->id = *id; + pci_adapter->lld_state = HINIC3_NOT_PROBE; + pci_adapter->probe_fault_level = probe_fault_level; + lld_dev_cnt_init(pci_adapter); + + if (pdev->is_virtfn && (!hinic3_get_vf_load_state(pdev))) { + sdk_info(&pdev->dev, "VF device disable load in host\n"); + return 0; + } + + err = hinic3_probe_func(pci_adapter); + if (err) + goto hinic3_probe_func_fail; + + sdk_info(&pdev->dev, "Pcie device probed\n"); + return 0; + +hinic3_probe_func_fail: + probe_fault_level = pci_adapter->probe_fault_level; + hinic3_pci_deinit(pdev); + +pci_init_err: + hinic3_probe_pre_unprocess(pdev); + +out: + hinic3_probe_fault_process(pdev, probe_fault_level); + sdk_err(&pdev->dev, "Pcie device probe failed\n"); + return err; +} + +static int hinic3_get_pf_info(struct pci_dev *pdev, u16 service, + struct hinic3_hw_pf_infos **pf_infos) +{ + struct hinic3_pcidev *dev = pci_get_drvdata(pdev); + int err; + + if (service >= SERVICE_T_MAX) { + sdk_err(&pdev->dev, "Current vf do not supports set service_type = %u state in host\n", + service); + return -EFAULT; + } + + *pf_infos = kzalloc(sizeof(struct hinic3_hw_pf_infos), GFP_KERNEL); + err = hinic3_get_hw_pf_infos(dev->hwdev, *pf_infos, HINIC3_CHANNEL_COMM); + if (err) { + kfree(*pf_infos); + sdk_err(&pdev->dev, "Get chipf pf info failed, ret %d\n", err); + return -EFAULT; + } + + return 0; +} + +static int hinic3_set_func_en(struct pci_dev *des_pdev, struct hinic3_pcidev *dst_dev, + bool en, u16 vf_func_id) +{ + int err; + + /* unload invalid vf func id */ + if (!en && vf_func_id != hinic3_global_func_id(dst_dev->hwdev) && + !strcmp(des_pdev->driver->name, HINIC3_DRV_NAME)) { + pr_err("dst_dev func id:%u, vf_func_id:%u\n", + hinic3_global_func_id(dst_dev->hwdev), vf_func_id); + mutex_unlock(&dst_dev->pdev_mutex); + return -EFAULT; + } + + if (!en && dst_dev->lld_state == HINIC3_PROBE_OK) { + mutex_unlock(&dst_dev->pdev_mutex); + hinic3_remove_func(dst_dev); + } else if (en && dst_dev->lld_state == HINIC3_NOT_PROBE) { + mutex_unlock(&dst_dev->pdev_mutex); + err = hinic3_probe_func(dst_dev); + if (err) + return -EFAULT; + } + + return 0; +} + +static int get_vf_service_state_param(struct pci_dev *pdev, struct hinic3_pcidev **dev_ptr, + u16 service, struct hinic3_hw_pf_infos **pf_infos) +{ + int err; + + if (!pdev) + return -EINVAL; + + *dev_ptr = pci_get_drvdata(pdev); + if (!(*dev_ptr)) + return -EINVAL; + + err = hinic3_get_pf_info(pdev, service, pf_infos); + if (err) + return err; + + return 0; +} + +#define BUS_MAX_DEV_NUM 256 +static int hinic3_dst_pdev_valid(struct hinic3_pcidev *dst_dev, struct pci_dev **des_pdev_ptr, + u16 vf_devfn, bool en) +{ + u16 bus; + + bus = dst_dev->pcidev->bus->number + vf_devfn / BUS_MAX_DEV_NUM; + *des_pdev_ptr = pci_get_domain_bus_and_slot(pci_domain_nr(dst_dev->pcidev->bus), + bus, vf_devfn % BUS_MAX_DEV_NUM); + if (!(*des_pdev_ptr)) { + pr_err("des_pdev is NULL\n"); + return -EFAULT; + } + + if ((*des_pdev_ptr)->driver == NULL) { + pr_err("des_pdev_ptr->driver is NULL\n"); + return -EFAULT; + } + + /* OVS sriov hw scene, when vf bind to vf_io return error. */ + if ((!en && strcmp((*des_pdev_ptr)->driver->name, HINIC3_DRV_NAME))) { + pr_err("vf bind driver:%s\n", (*des_pdev_ptr)->driver->name); + return -EFAULT; + } + + return 0; +} + +static int paramerter_is_unexpected(struct hinic3_pcidev *dst_dev, u16 *func_id, u16 *vf_start, + u16 *vf_end, u16 vf_func_id) +{ + if (hinic3_func_type(dst_dev->hwdev) == TYPE_VF) + return -EPERM; + + *func_id = hinic3_global_func_id(dst_dev->hwdev); + *vf_start = hinic3_glb_pf_vf_offset(dst_dev->hwdev) + 1; + *vf_end = *vf_start + hinic3_func_max_vf(dst_dev->hwdev); + if (vf_func_id < *vf_start || vf_func_id > *vf_end) + return -EPERM; + + return 0; +} + +int hinic3_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 service, bool en) +{ + struct hinic3_hw_pf_infos *pf_infos = NULL; + struct hinic3_pcidev *dev = NULL, *dst_dev = NULL; + struct pci_dev *des_pdev = NULL; + u16 vf_start, vf_end, vf_devfn, func_id; + int err; + bool find_dst_dev = false; + + err = get_vf_service_state_param(pdev, &dev, service, &pf_infos); + if (err) + return err; + + lld_hold(); + list_for_each_entry(dst_dev, &dev->chip_node->func_list, node) { + if (paramerter_is_unexpected(dst_dev, &func_id, &vf_start, &vf_end, vf_func_id)) + continue; + + vf_devfn = pf_infos->infos[func_id].vf_offset + (vf_func_id - vf_start) + + (u16)dst_dev->pcidev->devfn; + err = hinic3_dst_pdev_valid(dst_dev, &des_pdev, vf_devfn, en); + if (err) { + sdk_err(&pdev->dev, "Can not get vf func_id %u from pf %u\n", + vf_func_id, func_id); + lld_put(); + goto free_pf_info; + } + + dst_dev = pci_get_drvdata(des_pdev); + /* When enable vf scene, if vf bind to vf-io, return ok */ + if (strcmp(des_pdev->driver->name, HINIC3_DRV_NAME) || + !dst_dev || (!en && dst_dev->lld_state != HINIC3_PROBE_OK) || + (en && dst_dev->lld_state != HINIC3_NOT_PROBE)) { + lld_put(); + goto free_pf_info; + } + + if (en) + pci_dev_put(des_pdev); + mutex_lock(&dst_dev->pdev_mutex); + find_dst_dev = true; + break; + } + lld_put(); + + if (!find_dst_dev) { + err = -EFAULT; + sdk_err(&pdev->dev, "Invalid parameter vf_id %u \n", vf_func_id); + goto free_pf_info; + } + + err = hinic3_set_func_en(des_pdev, dst_dev, en, vf_func_id); + +free_pf_info: + kfree(pf_infos); + return err; +} +EXPORT_SYMBOL(hinic3_set_vf_service_state); + +/*lint -save -e133 -e10*/ +static const struct pci_device_id hinic3_pci_table[] = { + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_SPU), 0}, + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_STANDARD), 0}, + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_SDI_5_1_PF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_SDI_5_0_PF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_VF), 0}, + {0, 0} + +}; + +/*lint -restore*/ + +MODULE_DEVICE_TABLE(pci, hinic3_pci_table); + +/** + * hinic3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + * + * Since we only need error detecting not error handling, so we + * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER + * driver that we don't need reset(error handling). + */ +static pci_ers_result_t hinic3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hinic3_pcidev *pci_adapter = NULL; + + sdk_err(&pdev->dev, + "Uncorrectable error detected, log and cleanup error status: 0x%08x\n", + state); + + pci_cleanup_aer_uncorrect_error_status(pdev); + pci_adapter = pci_get_drvdata(pdev); + if (pci_adapter) + hinic3_record_pcie_error(pci_adapter->hwdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static void hinic3_shutdown(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + sdk_info(&pdev->dev, "Shutdown device\n"); + + if (pci_adapter) + hinic3_shutdown_hwdev(pci_adapter->hwdev); + + pci_disable_device(pdev); + + if (pci_adapter) + hinic3_set_api_stop(pci_adapter->hwdev); +} + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh hinic3_driver_rh = { + .sriov_configure = hinic3_pci_sriov_configure, +}; +#endif + +/* Cause we only need error detecting not error handling, so only error_detected + * callback is enough. + */ +static struct pci_error_handlers hinic3_err_handler = { + .error_detected = hinic3_io_error_detected, +}; + +static struct pci_driver hinic3_driver = { + .name = HINIC3_DRV_NAME, + .id_table = hinic3_pci_table, + .probe = hinic3_probe, + .remove = hinic3_remove, + .shutdown = hinic3_shutdown, +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = hinic3_pci_sriov_configure, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &hinic3_driver_rh, +#endif + .err_handler = &hinic3_err_handler +}; + +int hinic3_lld_init(void) +{ + int err; + + pr_info("%s - version %s\n", HINIC3_DRV_DESC, HINIC3_DRV_VERSION); + memset(g_uld_info, 0, sizeof(g_uld_info)); + + hinic3_lld_lock_init(); + hinic3_uld_lock_init(); + + err = hinic3_module_pre_init(); + if (err) { + pr_err("Init custom failed\n"); + return err; + } + + err = pci_register_driver(&hinic3_driver); + if (err) { + hinic3_module_post_exit(); + return err; + } + + return 0; +} + +void hinic3_lld_exit(void) +{ + pci_unregister_driver(&hinic3_driver); + + hinic3_module_post_exit(); + +} + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c new file mode 100644 index 000000000000..9112f94acad3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c @@ -0,0 +1,1842 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/semaphore.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include "ossl_knl.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" +#include "hinic3_csr.h" +#include "hinic3_hwif.h" +#include "hinic3_eqs.h" +#include "hinic3_prof_adap.h" +#include "hinic3_common.h" +#include "hinic3_mbox.h" + +#define HINIC3_MBOX_INT_DST_AEQN_SHIFT 10 +#define HINIC3_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 +#define HINIC3_MBOX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define HINIC3_MBOX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define HINIC3_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define HINIC3_MBOX_INT_WB_EN_SHIFT 28 + +#define HINIC3_MBOX_INT_DST_AEQN_MASK 0x3 +#define HINIC3_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 +#define HINIC3_MBOX_INT_STAT_DMA_MASK 0x3F +#define HINIC3_MBOX_INT_TX_SIZE_MASK 0x1F +#define HINIC3_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define HINIC3_MBOX_INT_WB_EN_MASK 0x1 + +#define HINIC3_MBOX_INT_SET(val, field) \ + (((val) & HINIC3_MBOX_INT_##field##_MASK) << \ + HINIC3_MBOX_INT_##field##_SHIFT) + +enum hinic3_mbox_tx_status { + TX_NOT_DONE = 1, +}; + +#define HINIC3_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 +/* specifies the issue request for the message data. + * 0 - Tx request is done; + * 1 - Tx request is in process. + */ +#define HINIC3_MBOX_CTRL_TX_STATUS_SHIFT 1 +#define HINIC3_MBOX_CTRL_DST_FUNC_SHIFT 16 + +#define HINIC3_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define HINIC3_MBOX_CTRL_TX_STATUS_MASK 0x1 +#define HINIC3_MBOX_CTRL_DST_FUNC_MASK 0x1FFF + +#define HINIC3_MBOX_CTRL_SET(val, field) \ + (((val) & HINIC3_MBOX_CTRL_##field##_MASK) << \ + HINIC3_MBOX_CTRL_##field##_SHIFT) + +#define MBOX_SEGLEN_MASK \ + HINIC3_MSG_HEADER_SET(HINIC3_MSG_HEADER_SEG_LEN_MASK, SEG_LEN) + +#define MBOX_MSG_POLLING_TIMEOUT 8000 +#define HINIC3_MBOX_COMP_TIME 40000U + +#define MBOX_MAX_BUF_SZ 2048U +#define MBOX_HEADER_SZ 8 +#define HINIC3_MBOX_DATA_SIZE (MBOX_MAX_BUF_SZ - MBOX_HEADER_SZ) + +/* MBOX size is 64B, 8B for mbox_header, 8B reserved */ +#define MBOX_SEG_LEN 48 +#define MBOX_SEG_LEN_ALIGN 4 +#define MBOX_WB_STATUS_LEN 16UL + +#define SEQ_ID_START_VAL 0 +#define SEQ_ID_MAX_VAL 42 +#define MBOX_LAST_SEG_MAX_LEN (MBOX_MAX_BUF_SZ - \ + SEQ_ID_MAX_VAL * MBOX_SEG_LEN) + +/* mbox write back status is 16B, only first 4B is used */ +#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define MBOX_WB_STATUS_MASK 0xFF +#define MBOX_WB_ERROR_CODE_MASK 0xFF00 +#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE +#define MBOX_WB_STATUS_NOT_FINISHED 0x00 + +#define MBOX_STATUS_FINISHED(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) +#define MBOX_STATUS_SUCCESS(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) +#define MBOX_STATUS_ERRCODE(wb) \ + ((wb) & MBOX_WB_ERROR_CODE_MASK) + +#define DST_AEQ_IDX_DEFAULT_VAL 0 +#define SRC_AEQ_IDX_DEFAULT_VAL 0 +#define NO_DMA_ATTRIBUTE_VAL 0 + +#define MBOX_MSG_NO_DATA_LEN 1 + +#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) +#define MBOX_AREA(hwif) \ + ((hwif)->cfg_regs_base + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF) + +#define MBOX_DMA_MSG_QUEUE_DEPTH 32 + +#define MBOX_MQ_CI_OFFSET (HINIC3_CFG_REGS_FLAG + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF + \ + MBOX_HEADER_SZ + MBOX_SEG_LEN) + +#define MBOX_MQ_SYNC_CI_SHIFT 0 +#define MBOX_MQ_ASYNC_CI_SHIFT 8 + +#define MBOX_MQ_SYNC_CI_MASK 0xFF +#define MBOX_MQ_ASYNC_CI_MASK 0xFF + +#define MBOX_MQ_CI_SET(val, field) \ + (((val) & MBOX_MQ_##field##_CI_MASK) << MBOX_MQ_##field##_CI_SHIFT) +#define MBOX_MQ_CI_GET(val, field) \ + (((val) >> MBOX_MQ_##field##_CI_SHIFT) & MBOX_MQ_##field##_CI_MASK) +#define MBOX_MQ_CI_CLEAR(val, field) \ + ((val) & (~(MBOX_MQ_##field##_CI_MASK << MBOX_MQ_##field##_CI_SHIFT))) + +#define IS_PF_OR_PPF_SRC(hwdev, src_func_idx) \ + ((src_func_idx) < HINIC3_MAX_PF_NUM(hwdev)) + +#define MBOX_RESPONSE_ERROR 0x1 +#define MBOX_MSG_ID_MASK 0xF +#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) +#define MBOX_MSG_ID_INC(func_to_func) \ + (MBOX_MSG_ID(func_to_func) = \ + (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK) + +/* max message counter wait to process for one function */ +#define HINIC3_MAX_MSG_CNT_TO_PROCESS 10 + +#define MBOX_MSG_CHANNEL_STOP(func_to_func) \ + ((((func_to_func)->lock_channel_en) && \ + test_bit((func_to_func)->cur_msg_channel, \ + &(func_to_func)->channel_stop)) ? true : false) + +enum mbox_ordering_type { + STRONG_ORDER, +}; + +enum mbox_write_back_type { + WRITE_BACK = 1, +}; + +enum mbox_aeq_trig_type { + NOT_TRIGGER, + TRIGGER, +}; + +static int send_mbox_msg(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dst_func, + enum hinic3_msg_direction_type direction, + enum hinic3_msg_ack_type ack_type, + struct mbox_msg_info *msg_info); + +static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *func_to_func, + u64 dir, u64 src_func_id); + +/** + * hinic3_register_ppf_mbox_cb - register mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic3_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic3_ppf_mbox_cb callback) +{ + struct hinic3_mbox *func_to_func = NULL; + + if (mod >= HINIC3_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + + func_to_func->ppf_mbox_cb[mod] = callback; + func_to_func->ppf_mbox_data[mod] = pri_handle; + + set_bit(HINIC3_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic3_register_ppf_mbox_cb); + +/** + * hinic3_register_pf_mbox_cb - register mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic3_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic3_pf_mbox_cb callback) +{ + struct hinic3_mbox *func_to_func = NULL; + + if (mod >= HINIC3_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + + func_to_func->pf_mbox_cb[mod] = callback; + func_to_func->pf_mbox_data[mod] = pri_handle; + + set_bit(HINIC3_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic3_register_pf_mbox_cb); + +/** + * hinic3_register_vf_mbox_cb - register mbox callback for vf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic3_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic3_vf_mbox_cb callback) +{ + struct hinic3_mbox *func_to_func = NULL; + + if (mod >= HINIC3_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + + func_to_func->vf_mbox_cb[mod] = callback; + func_to_func->vf_mbox_data[mod] = pri_handle; + + set_bit(HINIC3_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic3_register_vf_mbox_cb); + +/** + * hinic3_unregister_ppf_mbox_cb - unregister the mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic3_unregister_ppf_mbox_cb(void *hwdev, u8 mod) +{ + struct hinic3_mbox *func_to_func = NULL; + + if (mod >= HINIC3_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + + clear_bit(HINIC3_PPF_MBOX_CB_REG, + &func_to_func->ppf_mbox_cb_state[mod]); + + while (test_bit(HINIC3_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[mod])) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + func_to_func->ppf_mbox_data[mod] = NULL; + func_to_func->ppf_mbox_cb[mod] = NULL; +} +EXPORT_SYMBOL(hinic3_unregister_ppf_mbox_cb); + +/** + * hinic3_unregister_ppf_mbox_cb - unregister the mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic3_unregister_pf_mbox_cb(void *hwdev, u8 mod) +{ + struct hinic3_mbox *func_to_func = NULL; + + if (mod >= HINIC3_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + + clear_bit(HINIC3_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + while (test_bit(HINIC3_PF_MBOX_CB_RUNNING, &func_to_func->pf_mbox_cb_state[mod]) != 0) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + func_to_func->pf_mbox_data[mod] = NULL; + func_to_func->pf_mbox_cb[mod] = NULL; +} +EXPORT_SYMBOL(hinic3_unregister_pf_mbox_cb); + +/** + * hinic3_unregister_vf_mbox_cb - unregister the mbox callback for vf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic3_unregister_vf_mbox_cb(void *hwdev, u8 mod) +{ + struct hinic3_mbox *func_to_func = NULL; + + if (mod >= HINIC3_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + + clear_bit(HINIC3_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + while (test_bit(HINIC3_VF_MBOX_CB_RUNNING, &func_to_func->vf_mbox_cb_state[mod]) != 0) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + func_to_func->vf_mbox_data[mod] = NULL; + func_to_func->vf_mbox_cb[mod] = NULL; +} +EXPORT_SYMBOL(hinic3_unregister_vf_mbox_cb); + +/** + * hinic3_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic3_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod) +{ + struct hinic3_mbox *func_to_func = NULL; + + if (mod >= HINIC3_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + + clear_bit(HINIC3_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + while (test_bit(HINIC3_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod])) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + func_to_func->pf_recv_ppf_mbox_data[mod] = NULL; + func_to_func->pf_recv_ppf_mbox_cb[mod] = NULL; +} + +static int recv_vf_mbox_handler(struct hinic3_mbox *func_to_func, + struct hinic3_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + hinic3_vf_mbox_cb cb; + int ret; + + if (recv_mbox->mod >= HINIC3_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC3_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC3_VF_MBOX_CB_REG, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->vf_mbox_data[recv_mbox->mod], + recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not registered\n"); + ret = -EINVAL; + } + + clear_bit(HINIC3_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int recv_pf_from_ppf_handler(struct hinic3_mbox *func_to_func, + struct hinic3_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + hinic3_pf_recv_from_ppf_mbox_cb cb; + enum hinic3_mod_type mod = recv_mbox->mod; + int ret; + + if (mod >= HINIC3_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + mod); + return -EINVAL; + } + + set_bit(HINIC3_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + cb = func_to_func->pf_recv_ppf_mbox_cb[mod]; + if (cb && test_bit(HINIC3_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]) != 0) { + ret = cb(func_to_func->pf_recv_ppf_mbox_data[mod], + recv_mbox->cmd, recv_mbox->msg, recv_mbox->msg_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF receive ppf mailbox callback is not registered\n"); + ret = -EINVAL; + } + + clear_bit(HINIC3_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + return ret; +} + +static int recv_ppf_mbox_handler(struct hinic3_mbox *func_to_func, + struct hinic3_recv_mbox *recv_mbox, + u8 pf_id, void *buf_out, u16 *out_size) +{ + hinic3_ppf_mbox_cb cb; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= HINIC3_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC3_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->ppf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC3_PPF_MBOX_CB_REG, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->ppf_mbox_data[recv_mbox->mod], + pf_id, vf_id, recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod = %hhu\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(HINIC3_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int recv_pf_from_vf_mbox_handler(struct hinic3_mbox *func_to_func, + struct hinic3_recv_mbox *recv_mbox, + u16 src_func_idx, void *buf_out, + u16 *out_size) +{ + hinic3_pf_mbox_cb cb; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= HINIC3_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC3_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC3_PF_MBOX_CB_REG, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]) != 0) { + vf_id = src_func_idx - + hinic3_glb_pf_vf_offset(func_to_func->hwdev); + ret = cb(func_to_func->pf_mbox_data[recv_mbox->mod], + vf_id, recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not registered\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(HINIC3_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static void response_for_recv_func_mbox(struct hinic3_mbox *func_to_func, + struct hinic3_recv_mbox *recv_mbox, + int err, u16 out_size, u16 src_func_idx) +{ + struct mbox_msg_info msg_info = {0}; + u16 size = out_size; + + msg_info.msg_id = recv_mbox->msg_id; + if (err) + msg_info.status = HINIC3_MBOX_PF_SEND_ERR; + + /* if not data need to response, set out_size to 1 */ + if (!out_size || err) + size = MBOX_MSG_NO_DATA_LEN; + + if (size > HINIC3_MBOX_DATA_SIZE) { + sdk_err(func_to_func->hwdev->dev_hdl, "Response msg len(%d) exceed limit(%d)\n", + size, HINIC3_MBOX_DATA_SIZE); + size = HINIC3_MBOX_DATA_SIZE; + } + + send_mbox_msg(func_to_func, recv_mbox->mod, recv_mbox->cmd, + recv_mbox->resp_buff, size, src_func_idx, + HINIC3_MSG_RESPONSE, HINIC3_MSG_NO_ACK, &msg_info); +} + +static void recv_func_mbox_handler(struct hinic3_mbox *func_to_func, + struct hinic3_recv_mbox *recv_mbox) +{ + struct hinic3_hwdev *dev = func_to_func->hwdev; + void *buf_out = recv_mbox->resp_buff; + u16 src_func_idx = recv_mbox->src_func_idx; + u16 out_size = HINIC3_MBOX_DATA_SIZE; + int err = 0; + + if (HINIC3_IS_VF(dev)) { + err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, + &out_size); + } else { /* pf/ppf process */ + if (IS_PF_OR_PPF_SRC(dev, src_func_idx)) { + if (HINIC3_IS_PPF(dev)) { + err = recv_ppf_mbox_handler(func_to_func, + recv_mbox, + (u8)src_func_idx, + buf_out, &out_size); + if (err) + goto out; + } else { + err = recv_pf_from_ppf_handler(func_to_func, + recv_mbox, + buf_out, + &out_size); + if (err) + goto out; + } + /* The source is neither PF nor PPF, so it is from VF */ + } else { + err = recv_pf_from_vf_mbox_handler(func_to_func, + recv_mbox, + src_func_idx, + buf_out, &out_size); + } + } + +out: + if (recv_mbox->ack_type == HINIC3_MSG_ACK) + response_for_recv_func_mbox(func_to_func, recv_mbox, err, + out_size, src_func_idx); +} + +static struct hinic3_recv_mbox *alloc_recv_mbox(void) +{ + struct hinic3_recv_mbox *recv_msg = NULL; + + recv_msg = kzalloc(sizeof(*recv_msg), GFP_KERNEL); + if (!recv_msg) + return NULL; + + recv_msg->msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!recv_msg->msg) + goto alloc_msg_err; + + recv_msg->resp_buff = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!recv_msg->resp_buff) + goto alloc_resp_bff_err; + + return recv_msg; + +alloc_resp_bff_err: + kfree(recv_msg->msg); + +alloc_msg_err: + kfree(recv_msg); + + return NULL; +} + +static void free_recv_mbox(struct hinic3_recv_mbox *recv_msg) +{ + kfree(recv_msg->resp_buff); + kfree(recv_msg->msg); + kfree(recv_msg); +} + +static void recv_func_mbox_work_handler(struct work_struct *work) +{ + struct hinic3_mbox_work *mbox_work = + container_of(work, struct hinic3_mbox_work, work); + + recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox); + + atomic_dec(&mbox_work->msg_ch->recv_msg_cnt); + + destroy_work(&mbox_work->work); + + free_recv_mbox(mbox_work->recv_mbox); + kfree(mbox_work); +} + +static void resp_mbox_handler(struct hinic3_mbox *func_to_func, + const struct hinic3_msg_desc *msg_desc) +{ + spin_lock(&func_to_func->mbox_lock); + if (msg_desc->msg_info.msg_id == func_to_func->send_msg_id && + func_to_func->event_flag == EVENT_START) + func_to_func->event_flag = EVENT_SUCCESS; + else + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", + func_to_func->send_msg_id, msg_desc->msg_info.msg_id, + msg_desc->msg_info.status); + spin_unlock(&func_to_func->mbox_lock); +} + +static void recv_mbox_msg_handler(struct hinic3_mbox *func_to_func, + struct hinic3_msg_desc *msg_desc, + u64 mbox_header) +{ + struct hinic3_hwdev *hwdev = func_to_func->hwdev; + struct hinic3_recv_mbox *recv_msg = NULL; + struct hinic3_mbox_work *mbox_work = NULL; + struct hinic3_msg_channel *msg_ch = + container_of(msg_desc, struct hinic3_msg_channel, recv_msg); + u16 src_func_idx = HINIC3_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (atomic_read(&msg_ch->recv_msg_cnt) > + HINIC3_MAX_MSG_CNT_TO_PROCESS) { + sdk_warn(hwdev->dev_hdl, "This function(%u) have %d message wait to process, can't add to work queue\n", + src_func_idx, atomic_read(&msg_ch->recv_msg_cnt)); + return; + } + + recv_msg = alloc_recv_mbox(); + if (!recv_msg) { + sdk_err(hwdev->dev_hdl, "Failed to alloc receive mbox message buffer\n"); + return; + } + recv_msg->msg_len = msg_desc->msg_len; + memcpy(recv_msg->msg, msg_desc->msg, recv_msg->msg_len); + recv_msg->msg_id = msg_desc->msg_info.msg_id; + recv_msg->mod = HINIC3_MSG_HEADER_GET(mbox_header, MODULE); + recv_msg->cmd = HINIC3_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->ack_type = HINIC3_MSG_HEADER_GET(mbox_header, NO_ACK); + recv_msg->src_func_idx = src_func_idx; + + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); + if (!mbox_work) { + sdk_err(hwdev->dev_hdl, "Allocate mbox work memory failed.\n"); + free_recv_mbox(recv_msg); + return; + } + + atomic_inc(&msg_ch->recv_msg_cnt); + + mbox_work->func_to_func = func_to_func; + mbox_work->recv_mbox = recv_msg; + mbox_work->msg_ch = msg_ch; + + INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); + queue_work_on(hisdk3_get_work_cpu_affinity(hwdev, WORK_TYPE_MBOX), + func_to_func->workq, &mbox_work->work); +} + +static bool check_mbox_segment(struct hinic3_mbox *func_to_func, + struct hinic3_msg_desc *msg_desc, + u64 mbox_header, void *mbox_body) +{ + u8 seq_id, seg_len, msg_id, mod; + u16 src_func_idx, cmd; + + seq_id = HINIC3_MSG_HEADER_GET(mbox_header, SEQID); + seg_len = HINIC3_MSG_HEADER_GET(mbox_header, SEG_LEN); + msg_id = HINIC3_MSG_HEADER_GET(mbox_header, MSG_ID); + mod = HINIC3_MSG_HEADER_GET(mbox_header, MODULE); + cmd = HINIC3_MSG_HEADER_GET(mbox_header, CMD); + src_func_idx = HINIC3_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN || + (seq_id == SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN)) + goto seg_err; + + if (seq_id == 0) { + msg_desc->seq_id = seq_id; + msg_desc->msg_info.msg_id = msg_id; + msg_desc->mod = mod; + msg_desc->cmd = cmd; + } else { + if (seq_id != msg_desc->seq_id + 1 || msg_id != msg_desc->msg_info.msg_id || + mod != msg_desc->mod || cmd != msg_desc->cmd) + goto seg_err; + + msg_desc->seq_id = seq_id; + } + + return true; + +seg_err: + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", + src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id, + msg_desc->mod, msg_desc->cmd); + sdk_err(func_to_func->hwdev->dev_hdl, + "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", + seg_len, seq_id, msg_id, mod, cmd); + + return false; +} + +static void recv_mbox_handler(struct hinic3_mbox *func_to_func, + u64 *header, struct hinic3_msg_desc *msg_desc) +{ + u64 mbox_header = *header; + void *mbox_body = MBOX_BODY_FROM_HDR(((void *)header)); + u8 seq_id, seg_len; + int pos; + + if (!check_mbox_segment(func_to_func, msg_desc, mbox_header, mbox_body)) { + msg_desc->seq_id = SEQ_ID_MAX_VAL; + return; + } + + seq_id = HINIC3_MSG_HEADER_GET(mbox_header, SEQID); + seg_len = HINIC3_MSG_HEADER_GET(mbox_header, SEG_LEN); + + pos = seq_id * MBOX_SEG_LEN; + memcpy((u8 *)msg_desc->msg + pos, mbox_body, seg_len); + + if (!HINIC3_MSG_HEADER_GET(mbox_header, LAST)) + return; + + msg_desc->msg_len = HINIC3_MSG_HEADER_GET(mbox_header, MSG_LEN); + msg_desc->msg_info.status = HINIC3_MSG_HEADER_GET(mbox_header, STATUS); + + if (HINIC3_MSG_HEADER_GET(mbox_header, DIRECTION) == + HINIC3_MSG_RESPONSE) { + resp_mbox_handler(func_to_func, msg_desc); + return; + } + + recv_mbox_msg_handler(func_to_func, msg_desc, mbox_header); +} + +void hinic3_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size) +{ + struct hinic3_mbox *func_to_func = NULL; + struct hinic3_msg_desc *msg_desc = NULL; + u64 mbox_header = *((u64 *)header); + u64 src, dir; + + func_to_func = ((struct hinic3_hwdev *)handle)->func_to_func; + + dir = HINIC3_MSG_HEADER_GET(mbox_header, DIRECTION); + src = HINIC3_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + msg_desc = get_mbox_msg_desc(func_to_func, dir, src); + if (!msg_desc) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox source function id: %u is invalid for current function\n", + (u32)src); + return; + } + + recv_mbox_handler(func_to_func, (u64 *)header, msg_desc); +} + +static int init_mbox_dma_queue(struct hinic3_hwdev *hwdev, struct mbox_dma_queue *mq) +{ + u32 size; + + mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH; + mq->prod_idx = 0; + mq->cons_idx = 0; + + size = mq->depth * MBOX_MAX_BUF_SZ; + mq->dma_buff_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr, + GFP_KERNEL); + if (!mq->dma_buff_vaddr) { + sdk_err(hwdev->dev_hdl, "Failed to alloc dma_buffer\n"); + return -ENOMEM; + } + + return 0; +} + +static void deinit_mbox_dma_queue(struct hinic3_hwdev *hwdev, struct mbox_dma_queue *mq) +{ + dma_free_coherent(hwdev->dev_hdl, mq->depth * MBOX_MAX_BUF_SZ, + mq->dma_buff_vaddr, mq->dma_buff_paddr); +} + +static int hinic3_init_mbox_dma_queue(struct hinic3_mbox *func_to_func) +{ + u32 val; + int err; + + err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + if (err) + return err; + + err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); + if (err) { + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + return err; + } + + val = hinic3_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); + val = MBOX_MQ_CI_CLEAR(val, SYNC); + val = MBOX_MQ_CI_CLEAR(val, ASYNC); + hinic3_hwif_write_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET, val); + + return 0; +} + +static void hinic3_deinit_mbox_dma_queue(struct hinic3_mbox *func_to_func) +{ + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); +} + +#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a +#define MBOX_XOR_DATA_ALIGN 4 +static u32 mbox_dma_msg_xor(u32 *data, u16 msg_len) +{ + u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL; + u16 dw_len = msg_len / sizeof(u32); + u16 i; + + for (i = 0; i < dw_len; i++) + xor ^= data[i]; + + return xor; +} + +#define MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1)) +#define IS_MSG_QUEUE_FULL(mq) (MQ_ID_MASK(mq, (mq)->prod_idx + 1) == \ + MQ_ID_MASK(mq, (mq)->cons_idx)) + +static int mbox_prepare_dma_entry(struct hinic3_mbox *func_to_func, struct mbox_dma_queue *mq, + struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) +{ + u64 dma_addr, offset; + void *dma_vaddr; + + if (IS_MSG_QUEUE_FULL(mq)) { + sdk_err(func_to_func->hwdev->dev_hdl, "Mbox sync message queue is busy, pi: %u, ci: %u\n", + mq->prod_idx, MQ_ID_MASK(mq, mq->cons_idx)); + return -EBUSY; + } + + /* copy data to DMA buffer */ + offset = mq->prod_idx * MBOX_MAX_BUF_SZ; + dma_vaddr = (u8 *)mq->dma_buff_vaddr + offset; + memcpy(dma_vaddr, msg, msg_len); + dma_addr = mq->dma_buff_paddr + offset; + dma_msg->dma_addr_high = upper_32_bits(dma_addr); + dma_msg->dma_addr_low = lower_32_bits(dma_addr); + dma_msg->msg_len = msg_len; + /* The firmware obtains message based on 4B alignment. */ + dma_msg->xor = mbox_dma_msg_xor(dma_vaddr, ALIGN(msg_len, MBOX_XOR_DATA_ALIGN)); + + mq->prod_idx++; + mq->prod_idx = MQ_ID_MASK(mq, mq->prod_idx); + + return 0; +} + +static int mbox_prepare_dma_msg(struct hinic3_mbox *func_to_func, enum hinic3_msg_ack_type ack_type, + struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) +{ + struct mbox_dma_queue *mq = NULL; + u32 val; + + val = hinic3_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); + if (ack_type == HINIC3_MSG_ACK) { + mq = &func_to_func->sync_msg_queue; + mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC); + } else { + mq = &func_to_func->async_msg_queue; + mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC); + } + + return mbox_prepare_dma_entry(func_to_func, mq, dma_msg, msg, msg_len); +} + +static void clear_mbox_status(struct hinic3_send_mbox *mbox) +{ + *mbox->wb_status = 0; + + /* clear mailbox write back status */ + wmb(); +} + + +static void mbox_copy_header(struct hinic3_hwdev *hwdev, + struct hinic3_send_mbox *mbox, u64 *header) +{ + u32 *data = (u32 *)header; + u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); + + for (i = 0; i < idx_max; i++) { + __raw_writel(cpu_to_be32(*(data + i)), + mbox->data + i * sizeof(u32)); + } +} + +static void mbox_copy_send_data(struct hinic3_hwdev *hwdev, + struct hinic3_send_mbox *mbox, void *seg, + u16 seg_len) +{ + u32 *data = seg; + u32 data_len, chk_sz = sizeof(u32); + u32 i, idx_max; + u8 mbox_max_buf[MBOX_SEG_LEN] = {0}; + + /* The mbox message should be aligned in 4 bytes. */ + if (seg_len % chk_sz) { + memcpy(mbox_max_buf, seg, seg_len); + data = (u32 *)mbox_max_buf; + } + + data_len = seg_len; + idx_max = ALIGN(data_len, chk_sz) / chk_sz; + + for (i = 0; i < idx_max; i++) { + __raw_writel(cpu_to_be32(*(data + i)), + mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); + } +} + +static void write_mbox_msg_attr(struct hinic3_mbox *func_to_func, + u16 dst_func, u16 dst_aeqn, u16 seg_len) +{ + u32 mbox_int, mbox_ctrl; + u16 func = dst_func; + + /* for VF to PF's message, dest func id will self-learning by HW */ + if (HINIC3_IS_VF(func_to_func->hwdev) && dst_func != HINIC3_MGMT_SRC_ID) + func = 0; /* the destination is the VF's PF */ + + mbox_int = HINIC3_MBOX_INT_SET(dst_aeqn, DST_AEQN) | + HINIC3_MBOX_INT_SET(0, SRC_RESP_AEQN) | + HINIC3_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | + HINIC3_MBOX_INT_SET(ALIGN(seg_len + MBOX_HEADER_SZ, + MBOX_SEG_LEN_ALIGN) >> 2, + TX_SIZE) | + HINIC3_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | + HINIC3_MBOX_INT_SET(WRITE_BACK, WB_EN); + + hinic3_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC3_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int); + + wmb(); /* writing the mbox int attributes */ + mbox_ctrl = HINIC3_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS); + + mbox_ctrl |= HINIC3_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); + + mbox_ctrl |= HINIC3_MBOX_CTRL_SET(func, DST_FUNC); + + hinic3_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); +} + +static void dump_mbox_reg(struct hinic3_hwdev *hwdev) +{ + u32 val; + + val = hinic3_hwif_read_reg(hwdev->hwif, + HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF); + sdk_err(hwdev->dev_hdl, "Mailbox control reg: 0x%x\n", val); + val = hinic3_hwif_read_reg(hwdev->hwif, + HINIC3_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); + sdk_err(hwdev->dev_hdl, "Mailbox interrupt offset: 0x%x\n", val); +} + +static u16 get_mbox_status(const struct hinic3_send_mbox *mbox) +{ + /* write back is 16B, but only use first 4B */ + u64 wb_val = be64_to_cpu(*mbox->wb_status); + + rmb(); /* verify reading before check */ + + return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); +} + +static enum hinic3_wait_return check_mbox_wb_status(void *priv_data) +{ + struct hinic3_mbox *func_to_func = priv_data; + u16 wb_status; + + if (MBOX_MSG_CHANNEL_STOP(func_to_func) || !func_to_func->hwdev->chip_present_flag) + return WAIT_PROCESS_ERR; + + wb_status = get_mbox_status(&func_to_func->send_mbox); + + return MBOX_STATUS_FINISHED(wb_status) ? + WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; +} + +static int send_mbox_seg(struct hinic3_mbox *func_to_func, u64 header, + u16 dst_func, void *seg, u16 seg_len, void *msg_info) +{ + struct hinic3_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic3_hwdev *hwdev = func_to_func->hwdev; + u8 num_aeqs = hwdev->hwif->attr.num_aeqs; + u16 dst_aeqn, wb_status = 0, errcode; + u16 seq_dir = HINIC3_MSG_HEADER_GET(header, DIRECTION); + int err; + + /* mbox to mgmt cpu, hardware don't care dst aeq id */ + if (num_aeqs > HINIC3_MBOX_RSP_MSG_AEQ) + dst_aeqn = (seq_dir == HINIC3_MSG_DIRECT_SEND) ? + HINIC3_ASYNC_MSG_AEQ : HINIC3_MBOX_RSP_MSG_AEQ; + else + dst_aeqn = 0; + + clear_mbox_status(send_mbox); + + mbox_copy_header(hwdev, send_mbox, &header); + + mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); + + write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len); + + wmb(); /* writing the mbox msg attributes */ + + err = hinic3_wait_for_timeout(func_to_func, check_mbox_wb_status, + MBOX_MSG_POLLING_TIMEOUT, USEC_PER_MSEC); + wb_status = get_mbox_status(send_mbox); + if (err) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status: 0x%x\n", + wb_status); + dump_mbox_reg(hwdev); + return -ETIMEDOUT; + } + + if (!MBOX_STATUS_SUCCESS(wb_status)) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %u error, wb status: 0x%x\n", + dst_func, wb_status); + errcode = MBOX_STATUS_ERRCODE(wb_status); + return errcode ? errcode : -EFAULT; + } + + return 0; +} + +static int send_mbox_msg(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dst_func, + enum hinic3_msg_direction_type direction, + enum hinic3_msg_ack_type ack_type, + struct mbox_msg_info *msg_info) +{ + struct hinic3_hwdev *hwdev = func_to_func->hwdev; + struct mbox_dma_msg dma_msg = {0}; + enum hinic3_data_type data_type = HINIC3_DATA_INLINE; + int err = 0; + u32 seq_id = 0; + u16 seg_len = MBOX_SEG_LEN; + u16 rsp_aeq_id, left; + u8 *msg_seg = NULL; + u64 header = 0; + + if (hwdev->poll || hwdev->hwif->attr.num_aeqs >= 0x2) + rsp_aeq_id = HINIC3_MBOX_RSP_MSG_AEQ; + else + rsp_aeq_id = 0; + + mutex_lock(&func_to_func->msg_send_lock); + + if (IS_DMA_MBX_MSG(dst_func) && !COMM_SUPPORT_MBOX_SEGMENT(hwdev)) { + err = mbox_prepare_dma_msg(func_to_func, ack_type, &dma_msg, msg, msg_len); + if (err != 0) + goto send_err; + + msg = &dma_msg; + msg_len = sizeof(dma_msg); + data_type = HINIC3_DATA_DMA; + } + + msg_seg = (u8 *)msg; + left = msg_len; + + header = HINIC3_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC3_MSG_HEADER_SET(mod, MODULE) | + HINIC3_MSG_HEADER_SET(seg_len, SEG_LEN) | + HINIC3_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC3_MSG_HEADER_SET(data_type, DATA_TYPE) | + HINIC3_MSG_HEADER_SET(SEQ_ID_START_VAL, SEQID) | + HINIC3_MSG_HEADER_SET(NOT_LAST_SEGMENT, LAST) | + HINIC3_MSG_HEADER_SET(direction, DIRECTION) | + HINIC3_MSG_HEADER_SET(cmd, CMD) | + /* The vf's offset to it's associated pf */ + HINIC3_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) | + HINIC3_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) | + HINIC3_MSG_HEADER_SET(HINIC3_MSG_FROM_MBOX, SOURCE) | + HINIC3_MSG_HEADER_SET(!!msg_info->status, STATUS) | + HINIC3_MSG_HEADER_SET(hinic3_global_func_id(hwdev), + SRC_GLB_FUNC_IDX); + + while (!(HINIC3_MSG_HEADER_GET(header, LAST))) { + if (left <= MBOX_SEG_LEN) { + header &= ~MBOX_SEGLEN_MASK; + header |= HINIC3_MSG_HEADER_SET(left, SEG_LEN); + header |= HINIC3_MSG_HEADER_SET(LAST_SEGMENT, LAST); + + seg_len = left; + } + + err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, + seg_len, msg_info); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to send mbox seg, seq_id=0x%llx\n", + HINIC3_MSG_HEADER_GET(header, SEQID)); + goto send_err; + } + + left -= MBOX_SEG_LEN; + msg_seg += MBOX_SEG_LEN; /*lint !e662 */ + + seq_id++; + header &= ~(HINIC3_MSG_HEADER_SET(HINIC3_MSG_HEADER_SEQID_MASK, + SEQID)); + header |= HINIC3_MSG_HEADER_SET(seq_id, SEQID); + } + +send_err: + mutex_unlock(&func_to_func->msg_send_lock); + + return err; +} + +static void set_mbox_to_func_event(struct hinic3_mbox *func_to_func, + enum mbox_event_state event_flag) +{ + spin_lock(&func_to_func->mbox_lock); + func_to_func->event_flag = event_flag; + spin_unlock(&func_to_func->mbox_lock); +} + +static enum hinic3_wait_return check_mbox_msg_finish(void *priv_data) +{ + struct hinic3_mbox *func_to_func = priv_data; + + if (MBOX_MSG_CHANNEL_STOP(func_to_func) || func_to_func->hwdev->chip_present_flag == 0) + return WAIT_PROCESS_ERR; + + if (func_to_func->hwdev->poll) { + } + + return (func_to_func->event_flag == EVENT_SUCCESS) ? + WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; +} + +static int wait_mbox_msg_completion(struct hinic3_mbox *func_to_func, + u32 timeout) +{ + u32 wait_time; + int err; + + wait_time = (timeout != 0) ? timeout : HINIC3_MBOX_COMP_TIME; + err = hinic3_wait_for_timeout(func_to_func, check_mbox_msg_finish, + wait_time, USEC_PER_MSEC); + if (err) { + set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); + return -ETIMEDOUT; + } + + set_mbox_to_func_event(func_to_func, EVENT_END); + + return 0; +} + +#define TRY_MBOX_LOCK_SLEPP 1000 +static int send_mbox_msg_lock(struct hinic3_mbox *func_to_func, u16 channel) +{ + if (!func_to_func->lock_channel_en) { + mutex_lock(&func_to_func->mbox_send_lock); + return 0; + } + + while (test_bit(channel, &func_to_func->channel_stop) == 0) { + if (mutex_trylock(&func_to_func->mbox_send_lock) != 0) + return 0; + + usleep_range(TRY_MBOX_LOCK_SLEPP - 1, TRY_MBOX_LOCK_SLEPP); + } + + return -EAGAIN; +} + +static void send_mbox_msg_unlock(struct hinic3_mbox *func_to_func) +{ + mutex_unlock(&func_to_func->mbox_send_lock); +} + +int hinic3_mbox_to_func(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, + u16 dst_func, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + /* use mbox_resp to hole data which responsed from other function */ + struct hinic3_msg_desc *msg_desc = NULL; + struct mbox_msg_info msg_info = {0}; + int err; + + if (func_to_func->hwdev->chip_present_flag == 0) + return -EPERM; + + /* expect response message */ + msg_desc = get_mbox_msg_desc(func_to_func, HINIC3_MSG_RESPONSE, + dst_func); + if (!msg_desc) + return -EFAULT; + + err = send_mbox_msg_lock(func_to_func, channel); + if (err) + return err; + + func_to_func->cur_msg_channel = channel; + msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); + + set_mbox_to_func_event(func_to_func, EVENT_START); + + err = send_mbox_msg(func_to_func, mod, cmd, buf_in, in_size, dst_func, + HINIC3_MSG_DIRECT_SEND, HINIC3_MSG_ACK, &msg_info); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n", + mod, cmd, msg_info.msg_id, err); + set_mbox_to_func_event(func_to_func, EVENT_FAIL); + goto send_err; + } + + if (wait_mbox_msg_completion(func_to_func, timeout)) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id); + hinic3_dump_aeq_info(func_to_func->hwdev); + err = -ETIMEDOUT; + goto send_err; + } + + if (mod != msg_desc->mod || cmd != msg_desc->cmd) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", + msg_desc->mod, msg_desc->cmd, mod, cmd); + err = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_info.status) { + err = msg_desc->msg_info.status; + goto send_err; + } + + if (buf_out && out_size) { + if (*out_size < msg_desc->msg_len) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid response mbox message length: %u for mod %d cmd %u, should less than: %u\n", + msg_desc->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_len) + memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); + + *out_size = msg_desc->msg_len; + } + +send_err: + send_mbox_msg_unlock(func_to_func); + + return err; +} + +static int mbox_func_params_valid(struct hinic3_mbox *func_to_func, + void *buf_in, u16 in_size, u16 channel) +{ + if (!buf_in || !in_size) + return -EINVAL; + + if (in_size > HINIC3_MBOX_DATA_SIZE) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox msg len %u exceed limit: [1, %u]\n", + in_size, HINIC3_MBOX_DATA_SIZE); + return -EINVAL; + } + + if (channel >= HINIC3_CHANNEL_MAX) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + return 0; +} + +static int hinic3_mbox_to_func_no_ack(struct hinic3_hwdev *hwdev, u16 func_idx, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 channel) +{ + struct mbox_msg_info msg_info = {0}; + int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size, + channel); + if (err) + return err; + + err = send_mbox_msg_lock(hwdev->func_to_func, channel); + if (err) + return err; + + err = send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, + func_idx, HINIC3_MSG_DIRECT_SEND, + HINIC3_MSG_NO_ACK, &msg_info); + if (err) + sdk_err(hwdev->dev_hdl, "Send mailbox no ack failed\n"); + + send_mbox_msg_unlock(hwdev->func_to_func); + + return err; +} + +int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct hinic3_mbox *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size, + channel); + if (err) + return err; + + /* TODO: MPU have not implement this cmd yet */ + if (mod == HINIC3_MOD_COMM && cmd == COMM_MGMT_CMD_SEND_API_ACK_BY_UP) + return 0; + + return hinic3_mbox_to_func(func_to_func, mod, cmd, HINIC3_MGMT_SRC_ID, + buf_in, in_size, buf_out, out_size, timeout, + channel); +} + +void hinic3_response_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id) +{ + struct mbox_msg_info msg_info; + + msg_info.msg_id = (u8)msg_id; + msg_info.status = 0; + + send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, + HINIC3_MGMT_SRC_ID, HINIC3_MSG_RESPONSE, + HINIC3_MSG_NO_ACK, &msg_info); +} + +int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + struct hinic3_mbox *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size, + channel); + if (err) + return err; + + return hinic3_mbox_to_func_no_ack(hwdev, HINIC3_MGMT_SRC_ID, mod, cmd, + buf_in, in_size, channel); +} + +int hinic3_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct hinic3_hwdev *dev = hwdev; + u16 dst_ppf_func; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, + channel); + if (err) + return err; + + if (!HINIC3_IS_PPF(dev)) { + sdk_err(dev->dev_hdl, "Params error, only ppf support send mbox to ppf. func_type: %d\n", + hinic3_func_type(dev)); + return -EINVAL; + } + + if (host_id >= HINIC3_MAX_HOST_NUM(dev) || + host_id == HINIC3_PCI_INTF_IDX(dev->hwif)) { + sdk_err(dev->dev_hdl, "Params error, host id: %u\n", host_id); + return -EINVAL; + } + + dst_ppf_func = hinic3_host_ppf_idx(dev, host_id); + if (dst_ppf_func >= HINIC3_MAX_PF_NUM(dev)) { + sdk_err(dev->dev_hdl, "Dest host(%u) have not elect ppf(0x%x).\n", + host_id, dst_ppf_func); + return -EINVAL; + } + + return hinic3_mbox_to_func(dev->func_to_func, mod, cmd, + dst_ppf_func, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(hinic3_mbox_ppf_to_host); + +int hinic3_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel) +{ + struct hinic3_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, + channel); + if (err) + return err; + + if (!HINIC3_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Params error, func_type: %d\n", + hinic3_func_type(dev)); + return -EINVAL; + } + + return hinic3_mbox_to_func(dev->func_to_func, mod, cmd, + hinic3_pf_id_of_vf(dev), buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(hinic3_mbox_to_pf); + +int hinic3_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, + u16 channel) +{ + struct hinic3_mbox *func_to_func = NULL; + int err = 0; + u16 dst_func_idx; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); + if (err != 0) + return err; + + if (HINIC3_IS_VF((struct hinic3_hwdev *)hwdev)) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + hinic3_func_type(hwdev)); + return -EINVAL; + } + + if (!vf_id) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "VF id(%u) error!\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_idx = hinic3_glb_pf_vf_offset(hwdev) + vf_id; + + return hinic3_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, + in_size, buf_out, out_size, timeout, + channel); +} +EXPORT_SYMBOL(hinic3_mbox_to_vf); + +int hinic3_mbox_set_channel_status(struct hinic3_hwdev *hwdev, u16 channel, + bool enable) +{ + if (channel >= HINIC3_CHANNEL_MAX) { + sdk_err(hwdev->dev_hdl, "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + if (enable) + clear_bit(channel, &hwdev->func_to_func->channel_stop); + else + set_bit(channel, &hwdev->func_to_func->channel_stop); + + sdk_info(hwdev->dev_hdl, "%s mbox channel 0x%x\n", + enable ? "Enable" : "Disable", channel); + + return 0; +} + +void hinic3_mbox_enable_channel_lock(struct hinic3_hwdev *hwdev, bool enable) +{ + hwdev->func_to_func->lock_channel_en = enable; + + sdk_info(hwdev->dev_hdl, "%s mbox channel lock\n", + enable ? "Enable" : "Disable"); +} + +static int alloc_mbox_msg_channel(struct hinic3_msg_channel *msg_ch) +{ + msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!msg_ch->resp_msg.msg) + return -ENOMEM; + + msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!msg_ch->recv_msg.msg) { + kfree(msg_ch->resp_msg.msg); + return -ENOMEM; + } + + msg_ch->resp_msg.seq_id = SEQ_ID_MAX_VAL; + msg_ch->recv_msg.seq_id = SEQ_ID_MAX_VAL; + atomic_set(&msg_ch->recv_msg_cnt, 0); + + return 0; +} + +static void free_mbox_msg_channel(struct hinic3_msg_channel *msg_ch) +{ + kfree(msg_ch->recv_msg.msg); + kfree(msg_ch->resp_msg.msg); +} + +static int init_mgmt_msg_channel(struct hinic3_mbox *func_to_func) +{ + int err; + + err = alloc_mbox_msg_channel(&func_to_func->mgmt_msg); + if (err != 0) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc mgmt message channel\n"); + return err; + } + + err = hinic3_init_mbox_dma_queue(func_to_func); + if (err != 0) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to init mbox dma queue\n"); + free_mbox_msg_channel(&func_to_func->mgmt_msg); + } + + return err; +} + +static void deinit_mgmt_msg_channel(struct hinic3_mbox *func_to_func) +{ + hinic3_deinit_mbox_dma_queue(func_to_func); + free_mbox_msg_channel(&func_to_func->mgmt_msg); +} + +int hinic3_mbox_init_host_msg_channel(struct hinic3_hwdev *hwdev) +{ + struct hinic3_mbox *func_to_func = hwdev->func_to_func; + u8 host_num = HINIC3_MAX_HOST_NUM(hwdev); + int i, host_id, err; + + if (host_num == 0) + return 0; + + func_to_func->host_msg = kcalloc(host_num, + sizeof(*func_to_func->host_msg), + GFP_KERNEL); + if (!func_to_func->host_msg) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc host message array\n"); + return -ENOMEM; + } + + for (host_id = 0; host_id < host_num; host_id++) { + err = alloc_mbox_msg_channel(&func_to_func->host_msg[host_id]); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Failed to alloc host %d message channel\n", + host_id); + goto alloc_msg_ch_err; + } + } + + func_to_func->support_h2h_msg = true; + + return 0; + +alloc_msg_ch_err: + for (i = 0; i < host_id; i++) + free_mbox_msg_channel(&func_to_func->host_msg[i]); + + kfree(func_to_func->host_msg); + func_to_func->host_msg = NULL; + + return -ENOMEM; +} + +static void deinit_host_msg_channel(struct hinic3_mbox *func_to_func) +{ + int i; + + if (!func_to_func->host_msg) + return; + + for (i = 0; i < HINIC3_MAX_HOST_NUM(func_to_func->hwdev); i++) + free_mbox_msg_channel(&func_to_func->host_msg[i]); + + kfree(func_to_func->host_msg); + func_to_func->host_msg = NULL; +} + +int hinic3_init_func_mbox_msg_channel(void *hwdev, u16 num_func) +{ + struct hinic3_hwdev *dev = hwdev; + struct hinic3_mbox *func_to_func = NULL; + u16 func_id, i; + int err; + + if (!hwdev || !num_func || num_func > HINIC3_MAX_FUNCTIONS) + return -EINVAL; + + func_to_func = dev->func_to_func; + if (func_to_func->func_msg) + return (func_to_func->num_func_msg == num_func) ? 0 : -EFAULT; + + func_to_func->func_msg = + kcalloc(num_func, sizeof(*func_to_func->func_msg), GFP_KERNEL); + if (!func_to_func->func_msg) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc func message array\n"); + return -ENOMEM; + } + + for (func_id = 0; func_id < num_func; func_id++) { + err = alloc_mbox_msg_channel(&func_to_func->func_msg[func_id]); + if (err != 0) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Failed to alloc func %hu message channel\n", + func_id); + goto alloc_msg_ch_err; + } + } + + func_to_func->num_func_msg = num_func; + + return 0; + +alloc_msg_ch_err: + for (i = 0; i < func_id; i++) + free_mbox_msg_channel(&func_to_func->func_msg[i]); + + kfree(func_to_func->func_msg); + func_to_func->func_msg = NULL; + + return -ENOMEM; +} + +static void hinic3_deinit_func_mbox_msg_channel(struct hinic3_hwdev *hwdev) +{ + struct hinic3_mbox *func_to_func = hwdev->func_to_func; + u16 i; + + if (!func_to_func->func_msg) + return; + + for (i = 0; i < func_to_func->num_func_msg; i++) + free_mbox_msg_channel(&func_to_func->func_msg[i]); + + kfree(func_to_func->func_msg); + func_to_func->func_msg = NULL; +} + +static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *func_to_func, + u64 dir, u64 src_func_id) +{ + struct hinic3_hwdev *hwdev = func_to_func->hwdev; + struct hinic3_msg_channel *msg_ch = NULL; + u16 id; + + if (src_func_id == HINIC3_MGMT_SRC_ID) { + msg_ch = &func_to_func->mgmt_msg; + } else if (HINIC3_IS_VF(hwdev)) { + /* message from pf */ + msg_ch = func_to_func->func_msg; + if (src_func_id != hinic3_pf_id_of_vf(hwdev) || !msg_ch) + return NULL; + } else if (src_func_id > hinic3_glb_pf_vf_offset(hwdev)) { + /* message from vf */ + id = (u16)(src_func_id - 1U) - hinic3_glb_pf_vf_offset(hwdev); + if (id >= func_to_func->num_func_msg) + return NULL; + + msg_ch = &func_to_func->func_msg[id]; + } else { + /* message from other host's ppf */ + if (!func_to_func->support_h2h_msg) + return NULL; + + for (id = 0; id < HINIC3_MAX_HOST_NUM(hwdev); id++) { + if (src_func_id == hinic3_host_ppf_idx(hwdev, (u8)id)) + break; + } + + if (id == HINIC3_MAX_HOST_NUM(hwdev) || !func_to_func->host_msg) + return NULL; + + msg_ch = &func_to_func->host_msg[id]; + } + + return (dir == HINIC3_MSG_DIRECT_SEND) ? + &msg_ch->recv_msg : &msg_ch->resp_msg; +} + +static void prepare_send_mbox(struct hinic3_mbox *func_to_func) +{ + struct hinic3_send_mbox *send_mbox = &func_to_func->send_mbox; + + send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); +} + +static int alloc_mbox_wb_status(struct hinic3_mbox *func_to_func) +{ + struct hinic3_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic3_hwdev *hwdev = func_to_func->hwdev; + u32 addr_h, addr_l; + + send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, + MBOX_WB_STATUS_LEN, + &send_mbox->wb_paddr, + GFP_KERNEL); + if (!send_mbox->wb_vaddr) + return -ENOMEM; + + send_mbox->wb_status = send_mbox->wb_vaddr; + + addr_h = upper_32_bits(send_mbox->wb_paddr); + addr_l = lower_32_bits(send_mbox->wb_paddr); + + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF, + addr_h); + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF, + addr_l); + + return 0; +} + +static void free_mbox_wb_status(struct hinic3_mbox *func_to_func) +{ + struct hinic3_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic3_hwdev *hwdev = func_to_func->hwdev; + + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF, + 0); + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF, + 0); + + dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, + send_mbox->wb_vaddr, send_mbox->wb_paddr); +} + +int hinic3_func_to_func_init(struct hinic3_hwdev *hwdev) +{ + struct hinic3_mbox *func_to_func; + int err = -ENOMEM; + + func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); + if (!func_to_func) + return -ENOMEM; + + hwdev->func_to_func = func_to_func; + func_to_func->hwdev = hwdev; + mutex_init(&func_to_func->mbox_send_lock); + mutex_init(&func_to_func->msg_send_lock); + spin_lock_init(&func_to_func->mbox_lock); + func_to_func->workq = create_singlethread_workqueue(HINIC3_MBOX_WQ_NAME); + if (!func_to_func->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n"); + goto create_mbox_workq_err; + } + + err = init_mgmt_msg_channel(func_to_func); + if (err) + goto init_mgmt_msg_ch_err; + + if (HINIC3_IS_VF(hwdev)) { + /* VF to PF mbox message channel */ + err = hinic3_init_func_mbox_msg_channel(hwdev, 1); + if (err) + goto init_func_msg_ch_err; + } + + err = alloc_mbox_wb_status(func_to_func); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n"); + goto alloc_wb_status_err; + } + + prepare_send_mbox(func_to_func); + + return 0; + +alloc_wb_status_err: + if (HINIC3_IS_VF(hwdev)) + hinic3_deinit_func_mbox_msg_channel(hwdev); + +init_func_msg_ch_err: + deinit_mgmt_msg_channel(func_to_func); + +init_mgmt_msg_ch_err: + destroy_workqueue(func_to_func->workq); + +create_mbox_workq_err: + spin_lock_deinit(&func_to_func->mbox_lock); + mutex_deinit(&func_to_func->msg_send_lock); + mutex_deinit(&func_to_func->mbox_send_lock); + kfree(func_to_func); + + return err; +} + +void hinic3_func_to_func_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_mbox *func_to_func = hwdev->func_to_func; + + /* destroy workqueue before free related mbox resources in case of + * illegal resource access + */ + destroy_workqueue(func_to_func->workq); + + free_mbox_wb_status(func_to_func); + if (HINIC3_IS_PPF(hwdev)) + deinit_host_msg_channel(func_to_func); + hinic3_deinit_func_mbox_msg_channel(hwdev); + deinit_mgmt_msg_channel(func_to_func); + spin_lock_deinit(&func_to_func->mbox_lock); + mutex_deinit(&func_to_func->mbox_send_lock); + mutex_deinit(&func_to_func->msg_send_lock); + + kfree(func_to_func); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.h new file mode 100644 index 000000000000..bf723e8a68fb --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_MBOX_H +#define HINIC3_MBOX_H + +#include "hinic3_crm.h" +#include "hinic3_hwdev.h" + +#define HINIC3_MBOX_PF_SEND_ERR 0x1 + +#define HINIC3_MGMT_SRC_ID 0x1FFF +#define HINIC3_MAX_FUNCTIONS 4096 + +/* message header define */ +#define HINIC3_MSG_HEADER_SRC_GLB_FUNC_IDX_SHIFT 0 +#define HINIC3_MSG_HEADER_STATUS_SHIFT 13 +#define HINIC3_MSG_HEADER_SOURCE_SHIFT 15 +#define HINIC3_MSG_HEADER_AEQ_ID_SHIFT 16 +#define HINIC3_MSG_HEADER_MSG_ID_SHIFT 18 +#define HINIC3_MSG_HEADER_CMD_SHIFT 22 + +#define HINIC3_MSG_HEADER_MSG_LEN_SHIFT 32 +#define HINIC3_MSG_HEADER_MODULE_SHIFT 43 +#define HINIC3_MSG_HEADER_SEG_LEN_SHIFT 48 +#define HINIC3_MSG_HEADER_NO_ACK_SHIFT 54 +#define HINIC3_MSG_HEADER_DATA_TYPE_SHIFT 55 +#define HINIC3_MSG_HEADER_SEQID_SHIFT 56 +#define HINIC3_MSG_HEADER_LAST_SHIFT 62 +#define HINIC3_MSG_HEADER_DIRECTION_SHIFT 63 + +#define HINIC3_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK 0x1FFF +#define HINIC3_MSG_HEADER_STATUS_MASK 0x1 +#define HINIC3_MSG_HEADER_SOURCE_MASK 0x1 +#define HINIC3_MSG_HEADER_AEQ_ID_MASK 0x3 +#define HINIC3_MSG_HEADER_MSG_ID_MASK 0xF +#define HINIC3_MSG_HEADER_CMD_MASK 0x3FF + +#define HINIC3_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC3_MSG_HEADER_MODULE_MASK 0x1F +#define HINIC3_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HINIC3_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC3_MSG_HEADER_DATA_TYPE_MASK 0x1 +#define HINIC3_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC3_MSG_HEADER_LAST_MASK 0x1 +#define HINIC3_MSG_HEADER_DIRECTION_MASK 0x1 + +#define HINIC3_MSG_HEADER_GET(val, field) \ + (((val) >> HINIC3_MSG_HEADER_##field##_SHIFT) & \ + HINIC3_MSG_HEADER_##field##_MASK) +#define HINIC3_MSG_HEADER_SET(val, field) \ + ((u64)(((u64)(val)) & HINIC3_MSG_HEADER_##field##_MASK) << \ + HINIC3_MSG_HEADER_##field##_SHIFT) + +#define IS_DMA_MBX_MSG(dst_func) ((dst_func) == HINIC3_MGMT_SRC_ID) + +enum hinic3_msg_direction_type { + HINIC3_MSG_DIRECT_SEND = 0, + HINIC3_MSG_RESPONSE = 1, +}; + +enum hinic3_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hinic3_msg_ack_type { + HINIC3_MSG_ACK, + HINIC3_MSG_NO_ACK, +}; + +enum hinic3_data_type { + HINIC3_DATA_INLINE = 0, + HINIC3_DATA_DMA = 1, +}; + +enum hinic3_msg_src_type { + HINIC3_MSG_FROM_MGMT = 0, + HINIC3_MSG_FROM_MBOX = 1, +}; + +enum hinic3_msg_aeq_type { + HINIC3_ASYNC_MSG_AEQ = 0, + /* indicate dest func or mgmt cpu which aeq to response mbox message */ + HINIC3_MBOX_RSP_MSG_AEQ = 1, + /* indicate mgmt cpu which aeq to response api cmd message */ + HINIC3_MGMT_RSP_MSG_AEQ = 2, +}; + +#define HINIC3_MBOX_WQ_NAME "hinic3_mbox" + +struct mbox_msg_info { + u8 msg_id; + u8 status; /* can only use 1 bit */ +}; + +struct hinic3_msg_desc { + void *msg; + u16 msg_len; + u8 seq_id; + u8 mod; + u16 cmd; + struct mbox_msg_info msg_info; +}; + +struct hinic3_msg_channel { + struct hinic3_msg_desc resp_msg; + struct hinic3_msg_desc recv_msg; + + atomic_t recv_msg_cnt; +}; + +/* Receive other functions mbox message */ +struct hinic3_recv_mbox { + void *msg; + u16 msg_len; + u8 msg_id; + u8 mod; + u16 cmd; + u16 src_func_idx; + + enum hinic3_msg_ack_type ack_type; + u32 rsvd1; + + void *resp_buff; +}; + +struct hinic3_send_mbox { + u8 *data; + + u64 *wb_status; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +enum mbox_event_state { + EVENT_START = 0, + EVENT_FAIL, + EVENT_SUCCESS, + EVENT_TIMEOUT, + EVENT_END, +}; + +enum hinic3_mbox_cb_state { + HINIC3_VF_MBOX_CB_REG = 0, + HINIC3_VF_MBOX_CB_RUNNING, + HINIC3_PF_MBOX_CB_REG, + HINIC3_PF_MBOX_CB_RUNNING, + HINIC3_PPF_MBOX_CB_REG, + HINIC3_PPF_MBOX_CB_RUNNING, + HINIC3_PPF_TO_PF_MBOX_CB_REG, + HINIC3_PPF_TO_PF_MBOX_CB_RUNNIG, +}; + +struct mbox_dma_msg { + u32 xor; + u32 dma_addr_high; + u32 dma_addr_low; + u32 msg_len; + u64 rsvd; +}; + +struct mbox_dma_queue { + void *dma_buff_vaddr; + dma_addr_t dma_buff_paddr; + + u16 depth; + u16 prod_idx; + u16 cons_idx; +}; + +struct hinic3_mbox { + struct hinic3_hwdev *hwdev; + + bool lock_channel_en; + unsigned long channel_stop; + u16 cur_msg_channel; + u32 rsvd1; + + /* lock for send mbox message and ack message */ + struct mutex mbox_send_lock; + /* lock for send mbox message */ + struct mutex msg_send_lock; + struct hinic3_send_mbox send_mbox; + + struct mbox_dma_queue sync_msg_queue; + struct mbox_dma_queue async_msg_queue; + + struct workqueue_struct *workq; + + struct hinic3_msg_channel mgmt_msg; /* driver and MGMT CPU */ + struct hinic3_msg_channel *host_msg; /* PPF message between hosts */ + struct hinic3_msg_channel *func_msg; /* PF to VF or VF to PF */ + u16 num_func_msg; + bool support_h2h_msg; /* host to host */ + + /* vf receive pf/ppf callback */ + hinic3_vf_mbox_cb vf_mbox_cb[HINIC3_MOD_MAX]; + void *vf_mbox_data[HINIC3_MOD_MAX]; + /* pf/ppf receive vf callback */ + hinic3_pf_mbox_cb pf_mbox_cb[HINIC3_MOD_MAX]; + void *pf_mbox_data[HINIC3_MOD_MAX]; + /* ppf receive pf/ppf callback */ + hinic3_ppf_mbox_cb ppf_mbox_cb[HINIC3_MOD_MAX]; + void *ppf_mbox_data[HINIC3_MOD_MAX]; + /* pf receive ppf callback */ + hinic3_pf_recv_from_ppf_mbox_cb pf_recv_ppf_mbox_cb[HINIC3_MOD_MAX]; + void *pf_recv_ppf_mbox_data[HINIC3_MOD_MAX]; + unsigned long ppf_to_pf_mbox_cb_state[HINIC3_MOD_MAX]; + unsigned long ppf_mbox_cb_state[HINIC3_MOD_MAX]; + unsigned long pf_mbox_cb_state[HINIC3_MOD_MAX]; + unsigned long vf_mbox_cb_state[HINIC3_MOD_MAX]; + + u8 send_msg_id; + u16 rsvd2; + enum mbox_event_state event_flag; + /* lock for mbox event flag */ + spinlock_t mbox_lock; + u64 rsvd3; +}; + +struct hinic3_mbox_work { + struct work_struct work; + struct hinic3_mbox *func_to_func; + struct hinic3_recv_mbox *recv_mbox; + struct hinic3_msg_channel *msg_ch; +}; + +struct vf_cmd_check_handle { + u16 cmd; + bool (*check_cmd)(struct hinic3_hwdev *hwdev, u16 src_func_idx, + void *buf_in, u16 in_size); +}; + +void hinic3_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size); + +bool hinic3_mbox_check_cmd_valid(struct hinic3_hwdev *hwdev, + struct vf_cmd_check_handle *cmd_handle, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + u8 size); + +int hinic3_func_to_func_init(struct hinic3_hwdev *hwdev); + +void hinic3_func_to_func_free(struct hinic3_hwdev *hwdev); + +int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel); + +void hinic3_response_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id); + +int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel); +int hinic3_mbox_to_func(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, + u16 dst_func, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +int hinic3_mbox_init_host_msg_channel(struct hinic3_hwdev *hwdev); + +int hinic3_mbox_set_channel_status(struct hinic3_hwdev *hwdev, u16 channel, + bool enable); + +void hinic3_mbox_enable_channel_lock(struct hinic3_hwdev *hwdev, bool enable); + +#endif + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c new file mode 100644 index 000000000000..f633262e8b71 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c @@ -0,0 +1,1515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/completion.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_common.h" +#include "hinic3_comm_cmd.h" +#include "hinic3_hwdev.h" +#include "hinic3_eqs.h" +#include "hinic3_mbox.h" +#include "hinic3_api_cmd.h" +#include "hinic3_prof_adap.h" +#include "hinic3_csr.h" +#include "hinic3_mgmt.h" + +#define HINIC3_MSG_TO_MGMT_MAX_LEN 2016 + +#define HINIC3_API_CHAIN_AEQ_ID 2 +#define MAX_PF_MGMT_BUF_SIZE 2048UL +#define SEGMENT_LEN 48 +#define ASYNC_MSG_FLAG 0x8 +#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HINIC3_MSG_TO_MGMT_MAX_LEN, \ + SEGMENT_LEN) / SEGMENT_LEN) + +#define MGMT_MSG_LAST_SEG_MAX_LEN (MAX_PF_MGMT_BUF_SIZE - \ + SEGMENT_LEN * MGMT_MSG_MAX_SEQ_ID) + +#define BUF_OUT_DEFAULT_SIZE 1 + +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define SYNC_MSG_ID_MASK 0x7 +#define ASYNC_MSG_ID_MASK 0x7 + +#define SYNC_FLAG 0 +#define ASYNC_FLAG 1 + +#define MSG_NO_RESP 0xFFFF + +#define MGMT_MSG_TIMEOUT 20000 /* millisecond */ + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +static void pf_to_mgmt_send_event_set(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_flag = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +/** + * hinic3_register_mgmt_msg_cb - register sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + * @pri_handle: specific mod's private data that will be used in callback + * @callback: the handler for a sync message that will handle messages + **/ +int hinic3_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, + hinic3_mgmt_msg_cb callback) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = NULL; + + if (mod >= HINIC3_MOD_HW_MAX || !hwdev) + return -EFAULT; + + pf_to_mgmt = ((struct hinic3_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return -EINVAL; + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; + pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; + + set_bit(HINIC3_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic3_register_mgmt_msg_cb); + +/** + * hinic3_unregister_mgmt_msg_cb - unregister sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + **/ +void hinic3_unregister_mgmt_msg_cb(void *hwdev, u8 mod) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = NULL; + + if (!hwdev || mod >= HINIC3_MOD_HW_MAX) + return; + + pf_to_mgmt = ((struct hinic3_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + clear_bit(HINIC3_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + while (test_bit(HINIC3_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[mod])) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; + pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; +} +EXPORT_SYMBOL(hinic3_unregister_mgmt_msg_cb); + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @direction: the direction of the original message + * @msg_id: message id + **/ +static void prepare_header(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, u16 msg_len, u8 mod, + enum hinic3_msg_ack_type ack_type, + enum hinic3_msg_direction_type direction, + enum hinic3_mgmt_cmd cmd, u32 msg_id) +{ + struct hinic3_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = HINIC3_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC3_MSG_HEADER_SET(mod, MODULE) | + HINIC3_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC3_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC3_MSG_HEADER_SET(HINIC3_DATA_INLINE, DATA_TYPE) | + HINIC3_MSG_HEADER_SET(0, SEQID) | + HINIC3_MSG_HEADER_SET(HINIC3_API_CHAIN_AEQ_ID, AEQ_ID) | + HINIC3_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC3_MSG_HEADER_SET(direction, DIRECTION) | + HINIC3_MSG_HEADER_SET(cmd, CMD) | + HINIC3_MSG_HEADER_SET(HINIC3_MSG_FROM_MGMT, SOURCE) | + HINIC3_MSG_HEADER_SET(hwif->attr.func_global_idx, + SRC_GLB_FUNC_IDX) | + HINIC3_MSG_HEADER_SET(msg_id, MSG_ID); +} + +static void clp_prepare_header(struct hinic3_hwdev *hwdev, u64 *header, + u16 msg_len, u8 mod, + enum hinic3_msg_ack_type ack_type, + enum hinic3_msg_direction_type direction, + enum hinic3_mgmt_cmd cmd, u32 msg_id) +{ + struct hinic3_hwif *hwif = hwdev->hwif; + + *header = HINIC3_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC3_MSG_HEADER_SET(mod, MODULE) | + HINIC3_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC3_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC3_MSG_HEADER_SET(HINIC3_DATA_INLINE, DATA_TYPE) | + HINIC3_MSG_HEADER_SET(0, SEQID) | + HINIC3_MSG_HEADER_SET(HINIC3_API_CHAIN_AEQ_ID, AEQ_ID) | + HINIC3_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC3_MSG_HEADER_SET(direction, DIRECTION) | + HINIC3_MSG_HEADER_SET(cmd, CMD) | + HINIC3_MSG_HEADER_SET(hwif->attr.func_global_idx, + SRC_GLB_FUNC_IDX) | + HINIC3_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, + int msg_len) +{ + u8 *mgmt_cmd_new = mgmt_cmd; + + memset(mgmt_cmd_new, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd_new += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd_new, header, sizeof(*header)); + + mgmt_cmd_new += sizeof(*header); + memcpy(mgmt_cmd_new, msg, (size_t)(u32)msg_len); +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_sync(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg, u16 msg_len, + enum hinic3_msg_ack_type ack_type, + enum hinic3_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hinic3_api_cmd_chain *chain = NULL; + u8 node_id = HINIC3_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (hinic3_get_chip_present_flag(pf_to_mgmt->hwdev) == 0) + return -EFAULT; + + if (cmd_size > HINIC3_MSG_TO_MGMT_MAX_LEN) + return -EFAULT; + + if (direction == HINIC3_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); + chain = pf_to_mgmt->cmd_chain[HINIC3_API_CMD_WRITE_TO_MGMT_CPU]; + + if (ack_type == HINIC3_MSG_ACK) + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + return hinic3_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_async(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg, u16 msg_len, + enum hinic3_msg_direction_type direction) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hinic3_api_cmd_chain *chain = NULL; + u8 node_id = HINIC3_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (hinic3_get_chip_present_flag(pf_to_mgmt->hwdev) == 0) + return -EFAULT; + + if (cmd_size > HINIC3_MSG_TO_MGMT_MAX_LEN) + return -EFAULT; + + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC3_MSG_NO_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hinic3_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); +} + +static inline void msg_to_mgmt_pre(u8 mod, void *buf_in) +{ + struct hinic3_msg_head *msg_head = NULL; + + /* set aeq fix num to 3, need to ensure response aeq id < 3 */ + if (mod == HINIC3_MOD_COMM || mod == HINIC3_MOD_L2NIC) { + msg_head = buf_in; + + if (msg_head->resp_aeq_num >= HINIC3_MAX_AEQS) + msg_head->resp_aeq_num = 0; + } +} + +int hinic3_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = ((struct hinic3_hwdev *)hwdev)->dev_hdl; + struct hinic3_recv_msg *recv_msg = NULL; + struct completion *recv_done = NULL; + ulong timeo; + int err; + ulong ret; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + msg_to_mgmt_pre(mod, buf_in); + + pf_to_mgmt = ((struct hinic3_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + init_completion(recv_done); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC3_MSG_ACK, HINIC3_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (!ret) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + hinic3_dump_aeq_info((struct hinic3_hwdev *)hwdev); + err = -ETIMEDOUT; + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (pf_to_mgmt->event_flag == SEND_EVENT_TIMEOUT) { + spin_unlock(&pf_to_mgmt->sync_event_lock); + err = -ETIMEDOUT; + goto unlock_sync_msg; + } + spin_unlock(&pf_to_mgmt->sync_event_lock); + + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); + + if (!(((struct hinic3_hwdev *)hwdev)->chip_present_flag)) { + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_msg_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->msg_len) { + sdk_err(dev, "Invalid response message length: %u for mod %d cmd %u from mgmt, should less than: %u\n", + recv_msg->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->msg_len) + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +int hinic3_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hinic3_hwdev *)hwdev)->dev_hdl; + int err; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic3_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the async_msg_buf */ + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + ASYNC_MSG_ID_INC(pf_to_mgmt); + + err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC3_MSG_DIRECT_SEND); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); + + if (err) { + sdk_err(dev, "Failed to send async mgmt msg\n"); + return err; + } + + return 0; +} + +int hinic3_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout) +{ + if (!hwdev) + return -EINVAL; + + if (hinic3_get_chip_present_flag(hwdev) == 0) + return -EPERM; + + if (in_size > HINIC3_MSG_TO_MGMT_MAX_LEN) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + return hinic3_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); +} + +int hinic3_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (hinic3_get_chip_present_flag(hwdev) == 0) + return -EPERM; + + return hinic3_send_mbox_to_mgmt(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(hinic3_msg_to_mgmt_sync); + +int hinic3_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (hinic3_get_chip_present_flag(hwdev) == 0) + return -EPERM; + + return hinic3_send_mbox_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, + in_size, channel); +} +EXPORT_SYMBOL(hinic3_msg_to_mgmt_no_ack); + +int hinic3_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, u16 channel) +{ + return hinic3_msg_to_mgmt_api_chain_async(hwdev, mod, cmd, buf_in, + in_size); +} +EXPORT_SYMBOL(hinic3_msg_to_mgmt_async); + +int hinic3_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + if (!hwdev) + return -EINVAL; + + if (hinic3_get_chip_present_flag(hwdev) == 0) + return -EPERM; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "PF don't support api chain\n"); + return -EPERM; + } + + return hinic3_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); +} + +int hinic3_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, + const void *buf_in, u16 in_size) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic3_func_type(hwdev) == TYPE_VF) { + err = -EFAULT; + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "VF don't support async cmd\n"); + } else if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) { + err = -EPERM; + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "PF don't support api chain\n"); + } else { + err = hinic3_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} +EXPORT_SYMBOL(hinic3_msg_to_mgmt_api_chain_async); + +static void send_mgmt_ack(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 msg_id) +{ + u16 buf_size; + + if (!in_size) + buf_size = BUF_OUT_DEFAULT_SIZE; + else + buf_size = in_size; + + hinic3_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mod, cmd, buf_in, + buf_size, msg_id); +} + +static void mgmt_recv_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 msg_id, int need_resp) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + void *buf_out = pf_to_mgmt->mgmt_ack_buf; + enum hinic3_mod_type tmp_mod = mod; + bool ack_first = false; + u16 out_size = 0; + + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mod >= HINIC3_MOD_HW_MAX) { + sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n", + mod); + goto unsupported; + } + + set_bit(HINIC3_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || + !test_bit(HINIC3_MGMT_MSG_CB_REG, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { + sdk_warn(dev, "Receive mgmt callback is null, mod = %u, cmd=%u\n", mod, cmd); + clear_bit(HINIC3_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + goto unsupported; + } + + pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], + cmd, buf_in, in_size, + buf_out, &out_size); + + clear_bit(HINIC3_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + goto resp; + +unsupported: + out_size = sizeof(struct mgmt_msg_head); + ((struct mgmt_msg_head *)buf_out)->status = HINIC3_MGMT_CMD_UNSUPPORTED; + +resp: + if (!ack_first && need_resp) + send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_resp_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic3_recv_msg *recv_msg) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + /* delete async msg */ + if (recv_msg->msg_id & ASYNC_MSG_FLAG) + return; + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && + pf_to_mgmt->event_flag == SEND_EVENT_START) { + pf_to_mgmt->event_flag = SEND_EVENT_SUCCESS; + complete(&recv_msg->recv_done); + } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { + sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } else { + sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static void recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct hinic3_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hinic3_mgmt_msg_handle_work, work); + + mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, + mgmt_work->cmd, mgmt_work->msg, + mgmt_work->msg_len, mgmt_work->msg_id, + !mgmt_work->async_mgmt_to_pf); + + destroy_work(&mgmt_work->work); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +static bool check_mgmt_head_info(struct hinic3_recv_msg *recv_msg, + u8 seq_id, u8 seg_len, u16 msg_id) +{ + if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN || + (seq_id == MGMT_MSG_MAX_SEQ_ID && seg_len > MGMT_MSG_LAST_SEG_MAX_LEN)) + return false; + + if (seq_id == 0) { + recv_msg->seq_id = seq_id; + recv_msg->msg_id = msg_id; + } else { + if (seq_id != recv_msg->seq_id + 1 || msg_id != recv_msg->msg_id) + return false; + + recv_msg->seq_id = seq_id; + } + + return true; +} + +static void init_mgmt_msg_work(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic3_recv_msg *recv_msg) +{ + struct hinic3_mgmt_msg_handle_work *mgmt_work = NULL; + struct hinic3_hwdev *hwdev = pf_to_mgmt->hwdev; + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + sdk_err(hwdev->dev_hdl, "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + sdk_err(hwdev->dev_hdl, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work_on(hisdk3_get_work_cpu_affinity(hwdev, WORK_TYPE_MGMT_MSG), + pf_to_mgmt->workq, &mgmt_work->work); +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + **/ +static void recv_mgmt_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hinic3_recv_msg *recv_msg) +{ + struct hinic3_hwdev *hwdev = pf_to_mgmt->hwdev; + u64 mbox_header = *((u64 *)header); + void *msg_body = header + sizeof(mbox_header); + u8 seq_id, seq_len; + u16 msg_id; + u32 offset; + u64 dir; + + /* Don't need to get anything from hw when cmd is async */ + dir = HINIC3_MSG_HEADER_GET(mbox_header, DIRECTION); + if (dir == HINIC3_MSG_RESPONSE && + (HINIC3_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG)) + return; + + seq_len = HINIC3_MSG_HEADER_GET(mbox_header, SEG_LEN); + seq_id = HINIC3_MSG_HEADER_GET(mbox_header, SEQID); + msg_id = HINIC3_MSG_HEADER_GET(mbox_header, MSG_ID); + if (!check_mgmt_head_info(recv_msg, seq_id, seq_len, msg_id)) { + sdk_err(hwdev->dev_hdl, "Mgmt msg sequence id and segment length check failed\n"); + sdk_err(hwdev->dev_hdl, + "Front seq_id: 0x%x,current seq_id: 0x%x, seg len: 0x%x, front msg_id: %d, cur: %d\n", + recv_msg->seq_id, seq_id, seq_len, recv_msg->msg_id, msg_id); + /* set seq_id to invalid seq_id */ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + return; + } + + offset = seq_id * SEGMENT_LEN; + memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); + + if (!HINIC3_MSG_HEADER_GET(mbox_header, LAST)) + return; + + recv_msg->cmd = HINIC3_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->mod = HINIC3_MSG_HEADER_GET(mbox_header, MODULE); + recv_msg->async_mgmt_to_pf = HINIC3_MSG_HEADER_GET(mbox_header, + NO_ACK); + recv_msg->msg_len = HINIC3_MSG_HEADER_GET(mbox_header, MSG_LEN); + recv_msg->msg_id = msg_id; + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + if (HINIC3_MSG_HEADER_GET(mbox_header, DIRECTION) == + HINIC3_MSG_RESPONSE) { + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + return; + } + + init_mgmt_msg_work(pf_to_mgmt, recv_msg); +} + +/** + * hinic3_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @handle: PF to MGMT channel + * @header: the header of the message + * @size: unused + **/ +void hinic3_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) +{ + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev; + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct hinic3_recv_msg *recv_msg = NULL; + bool is_send_dir = false; + + if ((HINIC3_MSG_HEADER_GET(*(u64 *)header, SOURCE) == + HINIC3_MSG_FROM_MBOX)) { + hinic3_mbox_func_aeqe_handler(hwdev, header, size); + return; + } + + pf_to_mgmt = dev->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + is_send_dir = (HINIC3_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HINIC3_MSG_DIRECT_SEND) ? true : false; + + recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + **/ +static int alloc_recv_msg(struct hinic3_recv_msg *recv_msg) +{ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + return 0; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + **/ +static void free_recv_msg(struct hinic3_recv_msg *recv_msg) +{ + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static int alloc_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate resp recv msg\n"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + err = -ENOMEM; + goto sync_msg_buf_err; + } + + pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) { + err = -ENOMEM; + goto ack_msg_buf_err; + } + + return 0; + +ack_msg_buf_err: + kfree(pf_to_mgmt->sync_msg_buf); + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static void free_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->mgmt_ack_buf); + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = hwdev->dev_hdl; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) + return -ENOMEM; + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_event_lock); + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue(HINIC3_MGMT_WQ_NAME); + if (!pf_to_mgmt->workq) { + sdk_err(dev, "Failed to initialize MGMT workqueue\n"); + err = -ENOMEM; + goto create_mgmt_workq_err; + } + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate msg buffers\n"); + goto alloc_msg_buf_err; + } + + err = hinic3_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + sdk_err(dev, "Failed to init the api cmd chains\n"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + destroy_workqueue(pf_to_mgmt->workq); + +create_mgmt_workq_err: + spin_lock_deinit(&pf_to_mgmt->sync_event_lock); + spin_lock_deinit(&pf_to_mgmt->async_msg_lock); + sema_deinit(&pf_to_mgmt->sync_msg_lock); + kfree(pf_to_mgmt); + + return err; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to hw device + **/ +void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + /* destroy workqueue before free related pf_to_mgmt resources in case of + * illegal resource access + */ + destroy_workqueue(pf_to_mgmt->workq); + hinic3_api_cmd_free(hwdev, pf_to_mgmt->cmd_chain); + + free_msg_buf(pf_to_mgmt); + spin_lock_deinit(&pf_to_mgmt->sync_event_lock); + spin_lock_deinit(&pf_to_mgmt->async_msg_lock); + sema_deinit(&pf_to_mgmt->sync_msg_lock); + kfree(pf_to_mgmt); +} + +void hinic3_flush_mgmt_workq(void *hwdev) +{ + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev; + + flush_workqueue(dev->aeqs->workq); + + if (hinic3_func_type(dev) != TYPE_VF) + flush_workqueue(dev->pf_to_mgmt->workq); +} + +int hinic3_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, + u16 size, void *ack, u16 ack_size) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct hinic3_api_cmd_chain *chain = NULL; + + if (!hwdev || !cmd || (ack_size && !ack) || size > MAX_PF_MGMT_BUF_SIZE) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic3_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HINIC3_API_CMD_POLL_READ]; + + if (!(((struct hinic3_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hinic3_api_cmd_read(chain, dest, cmd, size, ack, ack_size); +} + +/** + * api cmd write or read bypass default use poll, if want to use aeq interrupt, + * please set wb_trigger_aeqe to 1 + **/ +int hinic3_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct hinic3_api_cmd_chain *chain = NULL; + + if (!hwdev || !size || !cmd || size > MAX_PF_MGMT_BUF_SIZE) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic3_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HINIC3_API_CMD_POLL_WRITE]; + + if (!(((struct hinic3_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hinic3_api_cmd_write(chain, dest, cmd, size); +} + +static int get_clp_reg(void *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *reg_addr) +{ + switch (reg_type) { + case HINIC3_CLP_BA_HOST: + *reg_addr = (data_type == HINIC3_CLP_REQ_HOST) ? + HINIC3_CLP_REG(REQBASE) : + HINIC3_CLP_REG(RSPBASE); + break; + + case HINIC3_CLP_SIZE_HOST: + *reg_addr = HINIC3_CLP_REG(SIZE); + break; + + case HINIC3_CLP_LEN_HOST: + *reg_addr = (data_type == HINIC3_CLP_REQ_HOST) ? + HINIC3_CLP_REG(REQ) : HINIC3_CLP_REG(RSP); + break; + + case HINIC3_CLP_START_REQ_HOST: + *reg_addr = HINIC3_CLP_REG(REQ); + break; + + case HINIC3_CLP_READY_RSP_HOST: + *reg_addr = HINIC3_CLP_REG(RSP); + break; + + default: + *reg_addr = 0; + break; + } + if (*reg_addr == 0) + return -EINVAL; + + return 0; +} + +static inline int clp_param_valid(struct hinic3_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == HINIC3_CLP_REQ_HOST && + reg_type == HINIC3_CLP_READY_RSP_HOST) + return -EINVAL; + + if (data_type == HINIC3_CLP_RSP_HOST && + reg_type == HINIC3_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static u32 get_clp_reg_value(struct hinic3_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 reg_addr) +{ + u32 value; + + value = hinic3_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HINIC3_CLP_BA_HOST: + value = ((value >> HINIC3_CLP_OFFSET(BASE)) & + HINIC3_CLP_MASK(BASE)); + break; + + case HINIC3_CLP_SIZE_HOST: + if (data_type == HINIC3_CLP_REQ_HOST) + value = ((value >> HINIC3_CLP_OFFSET(REQ_SIZE)) & + HINIC3_CLP_MASK(SIZE)); + else + value = ((value >> HINIC3_CLP_OFFSET(RSP_SIZE)) & + HINIC3_CLP_MASK(SIZE)); + break; + + case HINIC3_CLP_LEN_HOST: + value = ((value >> HINIC3_CLP_OFFSET(LEN)) & + HINIC3_CLP_MASK(LEN)); + break; + + case HINIC3_CLP_START_REQ_HOST: + value = ((value >> HINIC3_CLP_OFFSET(START)) & + HINIC3_CLP_MASK(START)); + break; + + case HINIC3_CLP_READY_RSP_HOST: + value = ((value >> HINIC3_CLP_OFFSET(READY)) & + HINIC3_CLP_MASK(READY)); + break; + + default: + break; + } + + return value; +} + +static int hinic3_read_clp_reg(struct hinic3_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *read_value) +{ + u32 reg_addr; + int err; + + err = clp_param_valid(hwdev, data_type, reg_type); + if (err) + return err; + + err = get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (err) + return err; + + *read_value = get_clp_reg_value(hwdev, data_type, reg_type, reg_addr); + + return 0; +} + +static int check_data_type(enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == HINIC3_CLP_REQ_HOST && + reg_type == HINIC3_CLP_READY_RSP_HOST) + return -EINVAL; + if (data_type == HINIC3_CLP_RSP_HOST && + reg_type == HINIC3_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static int check_reg_value(enum clp_reg_type reg_type, u32 value) +{ + if (reg_type == HINIC3_CLP_BA_HOST && + value > HINIC3_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC3_CLP_SIZE_HOST && + value > HINIC3_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC3_CLP_LEN_HOST && + value > HINIC3_CLP_LEN_REG_MAX) + return -EINVAL; + + if ((reg_type == HINIC3_CLP_START_REQ_HOST || + reg_type == HINIC3_CLP_READY_RSP_HOST) && + value > HINIC3_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static int hinic3_check_clp_init_status(struct hinic3_hwdev *hwdev) +{ + int err; + u32 reg_value = 0; + + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_REQ_HOST, + HINIC3_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req ba value: 0x%x\n", + reg_value); + return -EINVAL; + } + + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_RSP_HOST, + HINIC3_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp ba value: 0x%x\n", + reg_value); + return -EINVAL; + } + + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_REQ_HOST, + HINIC3_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req size\n"); + return -EINVAL; + } + + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_RSP_HOST, + HINIC3_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void hinic3_write_clp_reg(struct hinic3_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (check_data_type(data_type, reg_type)) + return; + + if (check_reg_value(reg_type, value)) + return; + + if (get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = hinic3_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HINIC3_CLP_LEN_HOST: + reg_value = reg_value & + (~(HINIC3_CLP_MASK(LEN) << HINIC3_CLP_OFFSET(LEN))); + reg_value = reg_value | (value << HINIC3_CLP_OFFSET(LEN)); + break; + + case HINIC3_CLP_START_REQ_HOST: + reg_value = reg_value & + (~(HINIC3_CLP_MASK(START) << + HINIC3_CLP_OFFSET(START))); + reg_value = reg_value | (value << HINIC3_CLP_OFFSET(START)); + break; + + case HINIC3_CLP_READY_RSP_HOST: + reg_value = reg_value & + (~(HINIC3_CLP_MASK(READY) << + HINIC3_CLP_OFFSET(READY))); + reg_value = reg_value | (value << HINIC3_CLP_OFFSET(READY)); + break; + + default: + return; + } + + hinic3_hwif_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int hinic3_read_clp_data(struct hinic3_hwdev *hwdev, + void *buf_out, u16 *out_size) +{ + int err; + u32 reg = HINIC3_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_RSP_HOST, + HINIC3_CLP_READY_RSP_HOST, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_RSP_HOST, + HINIC3_CLP_READY_RSP_HOST, &ready); + if (err || delay_cnt > HINIC3_CLP_DELAY_CNT_MAX) { + sdk_err(hwdev->dev_hdl, "Timeout with delay_cnt: %u\n", + delay_cnt); + return -EINVAL; + } + } + + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_RSP_HOST, + HINIC3_CLP_LEN_HOST, &temp_out_size); + if (err) + return err; + + if (temp_out_size > HINIC3_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + sdk_err(hwdev->dev_hdl, "Invalid temp_out_size: %u\n", + temp_out_size); + return -EINVAL; + } + + *out_size = (u16)temp_out_size; + for (; temp_out_size > 0; temp_out_size--) { + *ptr = hinic3_hwif_read_reg(hwdev->hwif, reg); + ptr++; + /* read 4 bytes every time */ + reg = reg + 4; + } + + hinic3_write_clp_reg(hwdev, HINIC3_CLP_RSP_HOST, + HINIC3_CLP_READY_RSP_HOST, (u32)0x0); + hinic3_write_clp_reg(hwdev, HINIC3_CLP_RSP_HOST, HINIC3_CLP_LEN_HOST, + (u32)0x0); + + return 0; +} + +static int hinic3_write_clp_data(struct hinic3_hwdev *hwdev, + void *buf_in, u16 in_size) +{ + int err; + u32 reg = HINIC3_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + u16 size_in = in_size; + + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_REQ_HOST, + HINIC3_CLP_START_REQ_HOST, &start); + if (err != 0) + return err; + + while (start == 1) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + err = hinic3_read_clp_reg(hwdev, HINIC3_CLP_REQ_HOST, + HINIC3_CLP_START_REQ_HOST, &start); + if (err || delay_cnt > HINIC3_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + hinic3_write_clp_reg(hwdev, HINIC3_CLP_REQ_HOST, + HINIC3_CLP_LEN_HOST, size_in); + hinic3_write_clp_reg(hwdev, HINIC3_CLP_REQ_HOST, + HINIC3_CLP_START_REQ_HOST, (u32)0x1); + + for (; size_in > 0; size_in--) { + hinic3_hwif_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + sizeof(u32); + } + + return 0; +} + +static void hinic3_clear_clp_data(struct hinic3_hwdev *hwdev, + enum clp_data_type data_type) +{ + u32 reg = (data_type == HINIC3_CLP_REQ_HOST) ? + HINIC3_CLP_DATA(REQ) : HINIC3_CLP_DATA(RSP); + u32 count = HINIC3_CLP_INPUT_BUF_LEN_HOST / HINIC3_CLP_DATA_UNIT_HOST; + + for (; count > 0; count--) { + hinic3_hwif_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + sizeof(u32); + } +} + +int hinic3_pf_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic3_clp_pf_to_mgmt *clp_pf_to_mgmt; + struct hinic3_hwdev *dev = hwdev; + u64 header; + u16 real_size; + u8 *clp_msg_buf; + int err; + + if (!COMM_SUPPORT_CLP(dev)) + return -EPERM; + + clp_pf_to_mgmt = ((struct hinic3_hwdev *)hwdev)->clp_pf_to_mgmt; + if (!clp_pf_to_mgmt) + return -EPERM; + + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + /* 4 bytes alignment */ + if (in_size % HINIC3_CLP_DATA_UNIT_HOST) + real_size = (in_size + (u16)sizeof(header) + + HINIC3_CLP_DATA_UNIT_HOST); + else + real_size = in_size + (u16)sizeof(header); + real_size = real_size / HINIC3_CLP_DATA_UNIT_HOST; + + if (real_size > + (HINIC3_CLP_INPUT_BUF_LEN_HOST / HINIC3_CLP_DATA_UNIT_HOST)) { + sdk_err(dev->dev_hdl, "Invalid real_size: %u\n", real_size); + return -EINVAL; + } + down(&clp_pf_to_mgmt->clp_msg_lock); + + err = hinic3_check_clp_init_status(dev); + if (err) { + sdk_err(dev->dev_hdl, "Check clp init status failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return err; + } + + hinic3_clear_clp_data(dev, HINIC3_CLP_RSP_HOST); + hinic3_write_clp_reg(dev, HINIC3_CLP_RSP_HOST, + HINIC3_CLP_READY_RSP_HOST, 0x0); + + /* Send request */ + memset(clp_msg_buf, 0x0, HINIC3_CLP_INPUT_BUF_LEN_HOST); + clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0); + + memcpy(clp_msg_buf, &header, sizeof(header)); + clp_msg_buf += sizeof(header); + memcpy(clp_msg_buf, buf_in, in_size); + + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + hinic3_clear_clp_data(dev, HINIC3_CLP_REQ_HOST); + err = hinic3_write_clp_data(hwdev, + clp_pf_to_mgmt->clp_msg_buf, real_size); + if (err) { + sdk_err(dev->dev_hdl, "Send clp request failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + /* Get response */ + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + memset(clp_msg_buf, 0x0, HINIC3_CLP_INPUT_BUF_LEN_HOST); + err = hinic3_read_clp_data(hwdev, clp_msg_buf, &real_size); + hinic3_clear_clp_data(dev, HINIC3_CLP_RSP_HOST); + if (err) { + sdk_err(dev->dev_hdl, "Read clp response failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + real_size = (u16)((real_size * HINIC3_CLP_DATA_UNIT_HOST) & 0xffff); + if (real_size <= sizeof(header) || real_size > HINIC3_CLP_INPUT_BUF_LEN_HOST) { + sdk_err(dev->dev_hdl, "Invalid response size: %u", real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + real_size = real_size - sizeof(header); + if (real_size != *out_size) { + sdk_err(dev->dev_hdl, "Invalid real_size:%u, out_size: %u\n", + real_size, *out_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + + return 0; +} + +int hinic3_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + struct hinic3_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (!dev->chip_present_flag) + return -EPERM; + + if (hinic3_func_type(hwdev) == TYPE_VF) + return -EINVAL; + + if (!COMM_SUPPORT_CLP(dev)) + return -EPERM; + + err = hinic3_pf_clp_to_mgmt(dev, mod, cmd, buf_in, in_size, buf_out, + out_size); + + return err; +} + +int hinic3_clp_pf_to_mgmt_init(struct hinic3_hwdev *hwdev) +{ + struct hinic3_clp_pf_to_mgmt *clp_pf_to_mgmt; + + if (!COMM_SUPPORT_CLP(hwdev)) + return 0; + + clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL); + if (!clp_pf_to_mgmt) + return -ENOMEM; + + clp_pf_to_mgmt->clp_msg_buf = kzalloc(HINIC3_CLP_INPUT_BUF_LEN_HOST, + GFP_KERNEL); + if (!clp_pf_to_mgmt->clp_msg_buf) { + kfree(clp_pf_to_mgmt); + return -ENOMEM; + } + sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1); + + hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt; + + return 0; +} + +void hinic3_clp_pf_to_mgmt_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt; + + if (!COMM_SUPPORT_CLP(hwdev)) + return; + + sema_deinit(&clp_pf_to_mgmt->clp_msg_lock); + kfree(clp_pf_to_mgmt->clp_msg_buf); + kfree(clp_pf_to_mgmt); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.h new file mode 100644 index 000000000000..ad86a82e7040 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_MGMT_H +#define HINIC3_MGMT_H + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/semaphore.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include "comm_defs.h" +#include "hinic3_hw.h" +#include "hinic3_api_cmd.h" +#include "hinic3_hwdev.h" + +#define HINIC3_MGMT_WQ_NAME "hinic3_mgmt" + +#define HINIC3_CLP_REG_GAP 0x20 +#define HINIC3_CLP_INPUT_BUF_LEN_HOST 4096UL +#define HINIC3_CLP_DATA_UNIT_HOST 4UL + +enum clp_data_type { + HINIC3_CLP_REQ_HOST = 0, + HINIC3_CLP_RSP_HOST = 1 +}; + +enum clp_reg_type { + HINIC3_CLP_BA_HOST = 0, + HINIC3_CLP_SIZE_HOST = 1, + HINIC3_CLP_LEN_HOST = 2, + HINIC3_CLP_START_REQ_HOST = 3, + HINIC3_CLP_READY_RSP_HOST = 4 +}; + +#define HINIC3_CLP_REQ_SIZE_OFFSET 0 +#define HINIC3_CLP_RSP_SIZE_OFFSET 16 +#define HINIC3_CLP_BASE_OFFSET 0 +#define HINIC3_CLP_LEN_OFFSET 0 +#define HINIC3_CLP_START_OFFSET 31 +#define HINIC3_CLP_READY_OFFSET 31 +#define HINIC3_CLP_OFFSET(member) (HINIC3_CLP_##member##_OFFSET) + +#define HINIC3_CLP_SIZE_MASK 0x7ffUL +#define HINIC3_CLP_BASE_MASK 0x7ffffffUL +#define HINIC3_CLP_LEN_MASK 0x7ffUL +#define HINIC3_CLP_START_MASK 0x1UL +#define HINIC3_CLP_READY_MASK 0x1UL +#define HINIC3_CLP_MASK(member) (HINIC3_CLP_##member##_MASK) + +#define HINIC3_CLP_DELAY_CNT_MAX 200UL +#define HINIC3_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define HINIC3_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define HINIC3_CLP_LEN_REG_MAX 0x3ff +#define HINIC3_CLP_START_OR_READY_REG_MAX 0x1 + +struct hinic3_recv_msg { + void *msg; + + u16 msg_len; + u16 rsvd1; + enum hinic3_mod_type mod; + + u16 cmd; + u8 seq_id; + u8 rsvd2; + u16 msg_id; + u16 rsvd3; + + int async_mgmt_to_pf; + u32 rsvd4; + + struct completion recv_done; +}; + +struct hinic3_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_UNINIT = 0, + SEND_EVENT_START, + SEND_EVENT_SUCCESS, + SEND_EVENT_FAIL, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +enum hinic3_mgmt_msg_cb_state { + HINIC3_MGMT_MSG_CB_REG = 0, + HINIC3_MGMT_MSG_CB_RUNNING, +}; + +struct hinic3_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct hinic3_msg_pf_to_mgmt { + struct hinic3_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + struct semaphore sync_msg_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_msg_buf; + void *mgmt_ack_buf; + + struct hinic3_recv_msg recv_msg_from_mgmt; + struct hinic3_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + u32 rsvd1; + struct hinic3_api_cmd_chain *cmd_chain[HINIC3_API_CMD_MAX]; + + hinic3_mgmt_msg_cb recv_mgmt_msg_cb[HINIC3_MOD_HW_MAX]; + void *recv_mgmt_msg_data[HINIC3_MOD_HW_MAX]; + unsigned long mgmt_msg_cb_state[HINIC3_MOD_HW_MAX]; + + void *async_msg_cb_data[HINIC3_MOD_HW_MAX]; + + /* lock when sending msg */ + spinlock_t sync_event_lock; + enum comm_pf_to_mgmt_event_state event_flag; + u64 rsvd2; +}; + +struct hinic3_mgmt_msg_handle_work { + struct work_struct work; + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + u16 rsvd1; + + enum hinic3_mod_type mod; + u16 cmd; + u16 msg_id; + + int async_mgmt_to_pf; +}; + +void hinic3_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size); + +int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev); + +void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev); + +int hinic3_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout); +int hinic3_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size); + +int hinic3_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout); + +int hinic3_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, + void *ack, u16 ack_size); + +int hinic3_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size); + +int hinic3_pf_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +int hinic3_clp_pf_to_mgmt_init(struct hinic3_hwdev *hwdev); + +void hinic3_clp_pf_to_mgmt_free(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c new file mode 100644 index 000000000000..c42a7dd08d60 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c @@ -0,0 +1,974 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <net/sock.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/pci.h> + +#include "ossl_knl.h" +#include "hinic3_mt.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hw_cfg.h" +#include "hinic3_hwdev.h" +#include "hinic3_lld.h" +#include "hinic3_hw_mt.h" +#include "hinic3_nictool.h" + +static int g_nictool_ref_cnt; + +static dev_t g_dev_id = {0}; +/*lint -save -e104 -e808*/ +static struct class *g_nictool_class; +/*lint -restore*/ +static struct cdev g_nictool_cdev; + +#define HINIC3_MAX_BUF_SIZE (2048 * 1024) + +void *g_card_node_array[MAX_CARD_NUM] = {0}; +void *g_card_vir_addr[MAX_CARD_NUM] = {0}; +u64 g_card_phy_addr[MAX_CARD_NUM] = {0}; +int card_id; + +#define HIADM3_DEV_PATH "/dev/hinic3_nictool_dev" +#define HIADM3_DEV_CLASS "hinic3_nictool_class" +#define HIADM3_DEV_NAME "hinic3_nictool_dev" + +typedef int (*hw_driv_module)(struct hinic3_lld_dev *lld_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +struct hw_drv_module_handle { + enum driver_cmd_type driv_cmd_name; + hw_driv_module driv_func; +}; + +static int get_single_card_info(struct hinic3_lld_dev *lld_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (!buf_out || *out_size != sizeof(struct card_info)) { + pr_err("buf_out is NULL, or out_size != %lu\n", sizeof(struct card_info)); + return -EINVAL; + } + + hinic3_get_card_info(hinic3_get_sdk_hwdev_by_lld(lld_dev), buf_out); + + return 0; +} + +static int is_driver_in_vm(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + bool in_host = false; + + if (!buf_out || (*out_size != sizeof(u8))) { + pr_err("buf_out is NULL, or out_size != %lu\n", sizeof(u8)); + return -EINVAL; + } + + in_host = hinic3_is_in_host(); + if (in_host) + *((u8 *)buf_out) = 0; + else + *((u8 *)buf_out) = 1; + + return 0; +} + +static int get_all_chip_id_cmd(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(struct nic_card_id) || !buf_out) { + pr_err("Invalid parameter: out_buf_size %u, expect %lu\n", + *out_size, sizeof(struct nic_card_id)); + return -EFAULT; + } + + hinic3_get_all_chip_id(buf_out); + + return 0; +} + +static int get_card_usr_api_chain_mem(int card_idx) +{ + unsigned char *tmp = NULL; + int i; + + card_id = card_idx; + if (!g_card_vir_addr[card_idx]) { + g_card_vir_addr[card_idx] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_card_vir_addr[card_idx]) { + pr_err("Alloc api chain memory fail for card %d!\n", card_idx); + return -EFAULT; + } + + memset(g_card_vir_addr[card_idx], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_card_phy_addr[card_idx] = + virt_to_phys(g_card_vir_addr[card_idx]); + if (!g_card_phy_addr[card_idx]) { + pr_err("phy addr for card %d is 0\n", card_idx); + free_pages((unsigned long)g_card_vir_addr[card_idx], DBGTOOL_PAGE_ORDER); + g_card_vir_addr[card_idx] = NULL; + return -EFAULT; + } + + tmp = g_card_vir_addr[card_idx]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + + return 0; +} + +static void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx, + void **g_func_handle_array) +{ + u32 func_idx; + void *hwdev = NULL; + struct pci_dev *pdev = NULL; + + for (func_idx = 0; func_idx < PF_DEV_INFO_NUM; func_idx++) { + hwdev = (void *)g_func_handle_array[func_idx]; + + dev_info[func_idx].phy_addr = g_card_phy_addr[card_idx]; + + if (!hwdev) { + dev_info[func_idx].bar0_size = 0; + dev_info[func_idx].bus = 0; + dev_info[func_idx].slot = 0; + dev_info[func_idx].func = 0; + } else { + pdev = (struct pci_dev *)hinic3_get_pcidev_hdl(hwdev); + dev_info[func_idx].bar0_size = + pci_resource_len(pdev, 0); + dev_info[func_idx].bus = pdev->bus->number; + dev_info[func_idx].slot = PCI_SLOT(pdev->devfn); + dev_info[func_idx].func = PCI_FUNC(pdev->devfn); + } + } +} + +static int get_pf_dev_info(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct pf_dev_info *dev_info = buf_out; + struct card_node *card_info = hinic3_get_chip_node_by_lld(lld_dev); + int id, err; + + if (!buf_out || *out_size != sizeof(struct pf_dev_info) * PF_DEV_INFO_NUM) { + pr_err("Invalid parameter: out_buf_size %u, expect %lu\n", + *out_size, sizeof(dev_info) * PF_DEV_INFO_NUM); + return -EFAULT; + } + + err = sscanf(card_info->chip_name, HINIC3_CHIP_NAME "%d", &id); + if (err < 0) { + pr_err("Failed to get card id\n"); + return err; + } + + if (id >= MAX_CARD_NUM || id < 0) { + pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1); + return -EINVAL; + } + + chipif_get_all_pf_dev_info(dev_info, id, card_info->func_handle_array); + + err = get_card_usr_api_chain_mem(id); + if (err) { + pr_err("Faile to get api chain memory for userspace %s\n", + card_info->chip_name); + return -EFAULT; + } + + return 0; +} + +static long dbgtool_knl_free_mem(int id) +{ + unsigned char *tmp = NULL; + int i; + + if (!g_card_vir_addr[id]) + return 0; + + tmp = g_card_vir_addr[id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + + free_pages((unsigned long)g_card_vir_addr[id], DBGTOOL_PAGE_ORDER); + g_card_vir_addr[id] = NULL; + g_card_phy_addr[id] = 0; + + return 0; +} + +static int free_knl_mem(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct card_node *card_info = hinic3_get_chip_node_by_lld(lld_dev); + int id, err; + + err = sscanf(card_info->chip_name, HINIC3_CHIP_NAME "%d", &id); + if (err < 0) { + pr_err("Failed to get card id\n"); + return err; + } + + if (id >= MAX_CARD_NUM || id < 0) { + pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1); + return -EINVAL; + } + + dbgtool_knl_free_mem(id); + + return 0; +} + +static int card_info_param_valid(char *dev_name, const void *buf_out, u32 buf_out_size, int *id) +{ + int err; + + if (!buf_out || buf_out_size != sizeof(struct hinic3_card_func_info)) { + pr_err("Invalid parameter: out_buf_size %u, expect %lu\n", + buf_out_size, sizeof(struct hinic3_card_func_info)); + return -EINVAL; + } + + err = memcmp(dev_name, HINIC3_CHIP_NAME, strlen(HINIC3_CHIP_NAME)); + if (err) { + pr_err("Invalid chip name %s\n", dev_name); + return err; + } + + err = sscanf(dev_name, HINIC3_CHIP_NAME "%d", id); + if (err < 0) { + pr_err("Failed to get card id\n"); + return err; + } + + if (*id >= MAX_CARD_NUM || *id < 0) { + pr_err("chip id %d exceed limit[0-%d]\n", + *id, MAX_CARD_NUM - 1); + return -EINVAL; + } + + return 0; +} + +static int get_card_func_info(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic3_card_func_info *card_func_info = buf_out; + struct card_node *card_info = hinic3_get_chip_node_by_lld(lld_dev); + int err, id = 0; + + err = card_info_param_valid(card_info->chip_name, buf_out, *out_size, &id); + if (err) + return err; + + hinic3_get_card_func_info_by_card_name(card_info->chip_name, card_func_info); + + if (!card_func_info->num_pf) { + pr_err("None function found for %s\n", card_info->chip_name); + return -EFAULT; + } + + err = get_card_usr_api_chain_mem(id); + if (err) { + pr_err("Faile to get api chain memory for userspace %s\n", + card_info->chip_name); + return -EFAULT; + } + + card_func_info->usr_api_phy_addr = g_card_phy_addr[id]; + + return 0; +} + +static int get_pf_cap_info(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct service_cap *func_cap = NULL; + struct hinic3_hwdev *hwdev = NULL; + struct card_node *card_info = hinic3_get_chip_node_by_lld(lld_dev); + struct svc_cap_info *svc_cap_info_in = (struct svc_cap_info *)buf_in; + struct svc_cap_info *svc_cap_info_out = (struct svc_cap_info *)buf_out; + + if (*out_size != sizeof(struct svc_cap_info) || in_size != sizeof(struct svc_cap_info) || + !buf_in || !buf_out) { + pr_err("Invalid parameter: out_buf_size %u, in_size: %u, expect %lu\n", + *out_size, in_size, sizeof(struct svc_cap_info)); + return -EINVAL; + } + + if (svc_cap_info_in->func_idx >= MAX_FUNCTION_NUM) { + pr_err("func_idx is illegal. func_idx: %u, max_num: %u\n", + svc_cap_info_in->func_idx, MAX_FUNCTION_NUM); + return -EINVAL; + } + + lld_hold(); + hwdev = (struct hinic3_hwdev *)(card_info->func_handle_array)[svc_cap_info_in->func_idx]; + if (!hwdev) { + lld_put(); + return -EINVAL; + } + + func_cap = &hwdev->cfg_mgmt->svc_cap; + memcpy(&svc_cap_info_out->cap, func_cap, sizeof(struct service_cap)); + lld_put(); + + return 0; +} + +static int get_hw_drv_version(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct drv_version_info *ver_info = buf_out; + int err; + + if (!buf_out) { + pr_err("Buf_out is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(*ver_info)) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(*ver_info)); + return -EINVAL; + } + + err = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", HINIC3_DRV_VERSION, + "2023-04-13_16:36:41"); + if (err < 0) + return -EINVAL; + + return 0; +} + +static int get_pf_id(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic3_pf_info *pf_info = NULL; + struct card_node *chip_node = hinic3_get_chip_node_by_lld(lld_dev); + u32 port_id; + int err; + + if (!chip_node) + return -ENODEV; + + if (!buf_out || (*out_size != sizeof(*pf_info)) || !buf_in || in_size != sizeof(u32)) { + pr_err("Unexpect out buf size from user :%u, expect: %lu, in size:%u\n", + *out_size, sizeof(*pf_info), in_size); + return -EINVAL; + } + + port_id = *((u32 *)buf_in); + pf_info = (struct hinic3_pf_info *)buf_out; + err = hinic3_get_pf_id(chip_node, port_id, &pf_info->pf_id, &pf_info->isvalid); + if (err) + return err; + + *out_size = sizeof(*pf_info); + + return 0; +} + +struct hw_drv_module_handle hw_driv_module_cmd_handle[] = { + {FUNC_TYPE, get_func_type}, + {GET_FUNC_IDX, get_func_id}, + {GET_HW_STATS, (hw_driv_module)get_hw_driver_stats}, + {CLEAR_HW_STATS, clear_hw_driver_stats}, + {GET_SELF_TEST_RES, get_self_test_result}, + {GET_CHIP_FAULT_STATS, (hw_driv_module)get_chip_faults_stats}, + {GET_SINGLE_CARD_INFO, (hw_driv_module)get_single_card_info}, + {IS_DRV_IN_VM, is_driver_in_vm}, + {GET_CHIP_ID, get_all_chip_id_cmd}, + {GET_PF_DEV_INFO, get_pf_dev_info}, + {CMD_FREE_MEM, free_knl_mem}, + {GET_CHIP_INFO, get_card_func_info}, + {GET_FUNC_CAP, get_pf_cap_info}, + {GET_DRV_VERSION, get_hw_drv_version}, + {GET_PF_ID, get_pf_id}, +}; + +static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size, + void **buf_in, u32 out_size, void **buf_out) +{ + int ret; + + ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in); + if (ret) { + pr_err("Alloc tool cmd buff in failed\n"); + return ret; + } + + ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out); + if (ret) { + pr_err("Alloc tool cmd buff out failed\n"); + goto out_free_buf_in; + } + + return 0; + +out_free_buf_in: + free_buff_in(hwdev, nt_msg, *buf_in); + + return ret; +} + +static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg, + void *buf_in, void *buf_out) +{ + free_buff_out(hwdev, nt_msg, buf_out); + free_buff_in(hwdev, nt_msg, buf_in); +} + +static int send_to_hw_driver(struct hinic3_lld_dev *lld_dev, struct msg_module *nt_msg, + const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int index, num_cmds = sizeof(hw_driv_module_cmd_handle) / + sizeof(hw_driv_module_cmd_handle[0]); + enum driver_cmd_type cmd_type = + (enum driver_cmd_type)(nt_msg->msg_formate); + int err = 0; + + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + hw_driv_module_cmd_handle[index].driv_cmd_name) { + err = hw_driv_module_cmd_handle[index].driv_func + (lld_dev, buf_in, in_size, buf_out, out_size); + break; + } + } + + if (index == num_cmds) { + pr_err("Can't find callback for %d\n", cmd_type); + return -EINVAL; + } + + return err; +} + +static int send_to_service_driver(struct hinic3_lld_dev *lld_dev, struct msg_module *nt_msg, + const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + const char **service_name = NULL; + enum hinic3_service_type type; + void *uld_dev = NULL; + int ret = -EINVAL; + + service_name = hinic3_get_uld_names(); + type = nt_msg->module - SEND_TO_SRV_DRV_BASE; + if (type >= SERVICE_T_MAX) { + pr_err("Ioctl input module id: %u is incorrectly\n", nt_msg->module); + return -EINVAL; + } + + uld_dev = hinic3_get_uld_dev(lld_dev, type); + if (!uld_dev) { + if (nt_msg->msg_formate == GET_DRV_VERSION) + return 0; + + pr_err("Can not get the uld dev correctly: %s, %s driver may be not register\n", + nt_msg->device_name, service_name[type]); + return -EINVAL; + } + + if (g_uld_info[type].ioctl) + ret = g_uld_info[type].ioctl(uld_dev, nt_msg->msg_formate, + buf_in, in_size, buf_out, out_size); + uld_dev_put(lld_dev, type); + + return ret; +} + +static int nictool_exec_cmd(struct hinic3_lld_dev *lld_dev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + + switch (nt_msg->module) { + case SEND_TO_HW_DRIVER: + ret = send_to_hw_driver(lld_dev, nt_msg, buf_in, in_size, buf_out, out_size); + break; + case SEND_TO_MPU: + ret = send_to_mpu(hinic3_get_sdk_hwdev_by_lld(lld_dev), + nt_msg, buf_in, in_size, buf_out, out_size); + break; + case SEND_TO_SM: + ret = send_to_sm(hinic3_get_sdk_hwdev_by_lld(lld_dev), + nt_msg, buf_in, in_size, buf_out, out_size); + break; + case SEND_TO_NPU: + ret = send_to_npu(hinic3_get_sdk_hwdev_by_lld(lld_dev), + nt_msg, buf_in, in_size, buf_out, out_size); + break; + default: + ret = send_to_service_driver(lld_dev, nt_msg, buf_in, in_size, buf_out, out_size); + break; + } + + return ret; +} + +static int cmd_parameter_valid(struct msg_module *nt_msg, unsigned long arg, + u32 *out_size_expect, u32 *in_size) +{ + if (copy_from_user(nt_msg, (void *)arg, sizeof(*nt_msg))) { + pr_err("Copy information from user failed\n"); + return -EFAULT; + } + + *out_size_expect = nt_msg->buf_out_size; + *in_size = nt_msg->buf_in_size; + if (*out_size_expect > HINIC3_MAX_BUF_SIZE || + *in_size > HINIC3_MAX_BUF_SIZE) { + pr_err("Invalid in size: %u or out size: %u\n", + *in_size, *out_size_expect); + return -EFAULT; + } + + nt_msg->device_name[IFNAMSIZ - 1] = '\0'; + + return 0; +} + +static struct hinic3_lld_dev *get_lld_dev_by_nt_msg(struct msg_module *nt_msg) +{ + struct hinic3_lld_dev *lld_dev = NULL; + + if (nt_msg->module >= SEND_TO_SRV_DRV_BASE && nt_msg->module < SEND_TO_DRIVER_MAX && + nt_msg->module != SEND_TO_HW_DRIVER && nt_msg->msg_formate != GET_DRV_VERSION) { + lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, + nt_msg->module - SEND_TO_SRV_DRV_BASE); + } else { + lld_dev = hinic3_get_lld_dev_by_chip_name(nt_msg->device_name); + if (!lld_dev) + lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, + SERVICE_T_MAX); + } + + if (nt_msg->module == SEND_TO_NIC_DRIVER && (nt_msg->msg_formate == GET_XSFP_INFO || + nt_msg->msg_formate == GET_XSFP_PRESENT)) + lld_dev = hinic3_get_lld_dev_by_chip_and_port(nt_msg->device_name, + nt_msg->port_id); + + if (nt_msg->module == SEND_TO_CUSTOM_DRIVER && + nt_msg->msg_formate == CMD_CUSTOM_BOND_GET_CHIP_NAME) + lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, SERVICE_T_MAX); + + return lld_dev; +} + +static long hinicadm_k_unlocked_ioctl(struct file *pfile, unsigned long arg) +{ + struct hinic3_lld_dev *lld_dev = NULL; + struct msg_module nt_msg; + void *buf_out = NULL; + void *buf_in = NULL; + u32 out_size_expect = 0; + u32 out_size = 0; + u32 in_size = 0; + int ret = 0; + + memset(&nt_msg, 0, sizeof(nt_msg)); + if (cmd_parameter_valid(&nt_msg, arg, &out_size_expect, &in_size)) + return -EFAULT; + + lld_dev = get_lld_dev_by_nt_msg(&nt_msg); + if (!lld_dev) { + if (nt_msg.msg_formate != DEV_NAME_TEST) + pr_err("Can not find device %s for module %d\n", + nt_msg.device_name, nt_msg.module); + + return -ENODEV; + } + + if (nt_msg.msg_formate == DEV_NAME_TEST) + return 0; + + ret = alloc_tmp_buf(hinic3_get_sdk_hwdev_by_lld(lld_dev), &nt_msg, + in_size, &buf_in, out_size_expect, &buf_out); + if (ret) { + pr_err("Alloc tmp buff failed\n"); + goto out_free_lock; + } + + out_size = out_size_expect; + + ret = nictool_exec_cmd(lld_dev, &nt_msg, buf_in, in_size, buf_out, &out_size); + if (ret) { + pr_err("nictool_exec_cmd failed, module: %u, ret: %d.\n", nt_msg.module, ret); + goto out_free_buf; + } + + if (out_size > out_size_expect) { + ret = -EFAULT; + pr_err("Out size is greater than expected out size from user: %u, out size: %u\n", + out_size_expect, out_size); + goto out_free_buf; + } + + ret = copy_buf_out_to_user(&nt_msg, out_size, buf_out); + if (ret) + pr_err("Copy information to user failed\n"); + +out_free_buf: + free_tmp_buf(hinic3_get_sdk_hwdev_by_lld(lld_dev), &nt_msg, buf_in, buf_out); + +out_free_lock: + lld_dev_put(lld_dev); + return (long)ret; +} + +/** + * dbgtool_knl_ffm_info_rd - Read ffm information + * @para: the dbgtool parameter + * @dbgtool_info: the dbgtool info + **/ +static long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + /* Copy the ffm_info to user mode */ + if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm, + (unsigned int)sizeof(struct ffm_record_info))) { + pr_err("Copy ffm_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +static long dbgtool_k_unlocked_ioctl(struct file *pfile, + unsigned int real_cmd, + unsigned long arg) +{ + long ret; + struct dbgtool_param param; + struct dbgtool_k_glb_info *dbgtool_info = NULL; + struct card_node *card_info = NULL; + int i; + + (void)memset(¶m, 0, sizeof(param)); + + if (copy_from_user(¶m, (void *)arg, sizeof(param))) { + pr_err("Copy param from user fail\n"); + return -EFAULT; + } + + lld_hold(); + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(param.chip_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + lld_put(); + pr_err("Can't find this card %s\n", param.chip_name); + return -EFAULT; + } + + card_id = i; + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + + down(&dbgtool_info->dbgtool_sem); + + switch (real_cmd) { + case DBGTOOL_CMD_FFM_RD: + ret = dbgtool_knl_ffm_info_rd(¶m, dbgtool_info); + break; + case DBGTOOL_CMD_MSG_2_UP: + pr_err("Not suppose to use this cmd(0x%x).\n", real_cmd); + ret = 0; + break; + + default: + pr_err("Dbgtool cmd(0x%x) not support now\n", real_cmd); + ret = -EFAULT; + } + + up(&dbgtool_info->dbgtool_sem); + + lld_put(); + + return ret; +} + +static int nictool_k_release(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static int nictool_k_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static long nictool_k_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + unsigned int real_cmd; + + real_cmd = _IOC_NR(cmd); + + return (real_cmd == NICTOOL_CMD_TYPE) ? + hinicadm_k_unlocked_ioctl(pfile, arg) : + dbgtool_k_unlocked_ioctl(pfile, real_cmd, arg); +} + +static int hinic3_mem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vmsize = vma->vm_end - vma->vm_start; + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + + if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) { + pr_err("Map size = %lu is bigger than alloc\n", vmsize); + return -EAGAIN; + } + + /* old version of tool set vma->vm_pgoff to 0 */ + phy_addr = offset ? offset : g_card_phy_addr[card_id]; + + if (!phy_addr) { + pr_err("Card_id = %d physical address is 0\n", card_id); + return -EAGAIN; + } + + /* Disable cache and write buffer in the mapping area */ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, vma->vm_start, (phy_addr >> PAGE_SHIFT), + vmsize, vma->vm_page_prot)) { + pr_err("Remap pfn range failed.\n"); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations fifo_operations = { + .owner = THIS_MODULE, + .release = nictool_k_release, + .open = nictool_k_open, + .read = nictool_k_read, + .write = nictool_k_write, + .unlocked_ioctl = nictool_k_unlocked_ioctl, + .mmap = hinic3_mem_mmap, +}; + +static void free_dbgtool_info(void *hwdev, struct card_node *chip_info) +{ + struct dbgtool_k_glb_info *dbgtool_info = NULL; + int err, id; + + if (hinic3_func_type(hwdev) != TYPE_VF) + chip_info->func_handle_array[hinic3_global_func_id(hwdev)] = NULL; + + if (--chip_info->func_num) + return; + + err = sscanf(chip_info->chip_name, HINIC3_CHIP_NAME "%d", &id); + if (err < 0) + pr_err("Failed to get card id\n"); + + if (id < MAX_CARD_NUM) + g_card_node_array[id] = NULL; + + dbgtool_info = chip_info->dbgtool_info; + /* FFM deinit */ + kfree(dbgtool_info->ffm); + dbgtool_info->ffm = NULL; + + kfree(dbgtool_info); + chip_info->dbgtool_info = NULL; + + if (id < MAX_CARD_NUM) + (void)dbgtool_knl_free_mem(id); +} + +static int alloc_dbgtool_info(void *hwdev, struct card_node *chip_info) +{ + struct dbgtool_k_glb_info *dbgtool_info = NULL; + int err, id = 0; + + if (hinic3_func_type(hwdev) != TYPE_VF) + chip_info->func_handle_array[hinic3_global_func_id(hwdev)] = hwdev; + + if (chip_info->func_num++) + return 0; + + dbgtool_info = (struct dbgtool_k_glb_info *) + kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL); + if (!dbgtool_info) { + pr_err("Failed to allocate dbgtool_info\n"); + goto dbgtool_info_fail; + } + + chip_info->dbgtool_info = dbgtool_info; + + /* FFM init */ + dbgtool_info->ffm = (struct ffm_record_info *) + kzalloc(sizeof(struct ffm_record_info), GFP_KERNEL); + if (!dbgtool_info->ffm) { + pr_err("Failed to allocate cell contexts for a chain\n"); + goto dbgtool_info_ffm_fail; + } + + sema_init(&dbgtool_info->dbgtool_sem, 1); + + err = sscanf(chip_info->chip_name, HINIC3_CHIP_NAME "%d", &id); + if (err < 0) { + pr_err("Failed to get card id\n"); + goto sscanf_chdev_fail; + } + + g_card_node_array[id] = chip_info; + + return 0; + +sscanf_chdev_fail: + kfree(dbgtool_info->ffm); + +dbgtool_info_ffm_fail: + kfree(dbgtool_info); + chip_info->dbgtool_info = NULL; + +dbgtool_info_fail: + if (hinic3_func_type(hwdev) != TYPE_VF) + chip_info->func_handle_array[hinic3_global_func_id(hwdev)] = NULL; + chip_info->func_num--; + return -ENOMEM; +} + +/** + * nictool_k_init - initialize the hw interface + **/ +/* temp for dbgtool_info */ +/*lint -e438*/ +int nictool_k_init(void *hwdev, void *chip_node) +{ + struct card_node *chip_info = (struct card_node *)chip_node; + struct device *pdevice = NULL; + int err; + + err = alloc_dbgtool_info(hwdev, chip_info); + if (err) + return err; + + if (g_nictool_ref_cnt++) { + /* already initialized */ + return 0; + } + + err = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM3_DEV_NAME); + if (err) { + pr_err("Register nictool_dev failed(0x%x)\n", err); + goto alloc_chdev_fail; + } + + /* Create equipment */ + /*lint -save -e160*/ + g_nictool_class = class_create(THIS_MODULE, HIADM3_DEV_CLASS); + /*lint -restore*/ + if (IS_ERR(g_nictool_class)) { + pr_err("Create nictool_class fail\n"); + err = -EFAULT; + goto class_create_err; + } + + /* Initializing the character device */ + cdev_init(&g_nictool_cdev, &fifo_operations); + + /* Add devices to the operating system */ + err = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (err < 0) { + pr_err("Add nictool_dev to operating system fail(0x%x)\n", err); + goto cdev_add_err; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(g_nictool_class, NULL, + g_dev_id, NULL, HIADM3_DEV_NAME); + if (IS_ERR(pdevice)) { + pr_err("Export nictool device information to user space fail\n"); + err = -EFAULT; + goto device_create_err; + } + + pr_info("Register nictool_dev to system succeed\n"); + + return 0; + +device_create_err: + cdev_del(&g_nictool_cdev); + +cdev_add_err: + class_destroy(g_nictool_class); + +class_create_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + +alloc_chdev_fail: + g_nictool_ref_cnt--; + free_dbgtool_info(hwdev, chip_info); + + return err; +} /*lint +e438*/ + +void nictool_k_uninit(void *hwdev, void *chip_node) +{ + struct card_node *chip_info = (struct card_node *)chip_node; + + free_dbgtool_info(hwdev, chip_info); + + if (!g_nictool_ref_cnt) + return; + + if (--g_nictool_ref_cnt) + return; + + if (!g_nictool_class || IS_ERR(g_nictool_class)) { + pr_err("Nictool class is NULL.\n"); + return; + } + + device_destroy(g_nictool_class, g_dev_id); + cdev_del(&g_nictool_cdev); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + + unregister_chrdev_region(g_dev_id, 1); + + pr_info("Unregister nictool_dev succeed\n"); +} + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h new file mode 100644 index 000000000000..f368133e341e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_NICTOOL_H +#define HINIC3_NICTOOL_H + +#include "hinic3_mt.h" +#include "hinic3_crm.h" + +#ifndef MAX_SIZE +#define MAX_SIZE (16) +#endif + +#define DBGTOOL_PAGE_ORDER (10) + +#define MAX_CARD_NUM (64) + +int nictool_k_init(void *hwdev, void *chip_node); +void nictool_k_uninit(void *hwdev, void *chip_node); + +void hinic3_get_all_chip_id(void *id_info); + +void hinic3_get_card_func_info_by_card_name + (const char *chip_name, struct hinic3_card_func_info *card_func); + +void hinic3_get_card_info(const void *hwdev, void *bufin); + +bool hinic3_is_in_host(void); + +int hinic3_get_pf_id(struct card_node *chip_node, u32 port_id, u32 *pf_id, u32 *isvalid); + +extern struct hinic3_uld_info g_uld_info[SERVICE_T_MAX]; + +#endif + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h new file mode 100644 index 000000000000..d028ca62fab3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_PCI_ID_TBL_H +#define HINIC3_PCI_ID_TBL_H + +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HINIC3_DEV_ID_STANDARD 0x0222 +#define HINIC3_DEV_ID_SDI_5_1_PF 0x0226 +#define HINIC3_DEV_ID_SDI_5_0_PF 0x0225 +#define HINIC3_DEV_ID_VF 0x375F +#define HINIC3_DEV_ID_VF_HV 0x379F +#define HINIC3_DEV_ID_SPU 0xAC00 +#endif + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_prof_adap.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_prof_adap.c new file mode 100644 index 000000000000..fbb6198a30f6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_prof_adap.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/semaphore.h> +#include <linux/workqueue.h> + +#include "ossl_knl.h" +#include "hinic3_hwdev.h" +#include "hinic3_profile.h" +#include "hinic3_prof_adap.h" + +static bool is_match_prof_default_adapter(void *device) +{ + /* always match default profile adapter in standard scene */ + return true; +} + +struct hinic3_prof_adapter prof_adap_objs[] = { + /* Add prof adapter before default profile */ + { + .type = PROF_ADAP_TYPE_DEFAULT, + .match = is_match_prof_default_adapter, + .init = NULL, + .deinit = NULL, + }, +}; + +void hisdk3_init_profile_adapter(struct hinic3_hwdev *hwdev) +{ + u16 num_adap = ARRAY_SIZE(prof_adap_objs); + + hwdev->prof_adap = hinic3_prof_init(hwdev, prof_adap_objs, num_adap, + (void *)&hwdev->prof_attr); + if (hwdev->prof_adap) + sdk_info(hwdev->dev_hdl, "Find profile adapter type: %d\n", hwdev->prof_adap->type); +} + +void hisdk3_deinit_profile_adapter(struct hinic3_hwdev *hwdev) +{ + hinic3_prof_deinit(hwdev->prof_adap, hwdev->prof_attr); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_prof_adap.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_prof_adap.h new file mode 100644 index 000000000000..da632da2f898 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_prof_adap.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_PROF_ADAP_H +#define HINIC3_PROF_ADAP_H + +#include <linux/workqueue.h> + +#include "hinic3_profile.h" +#include "hinic3_hwdev.h" + +enum cpu_affinity_work_type { + WORK_TYPE_AEQ, + WORK_TYPE_MBOX, + WORK_TYPE_MGMT_MSG, + WORK_TYPE_COMM, +}; + +enum hisdk3_sw_features { + HISDK3_SW_F_CHANNEL_LOCK = BIT(0), +}; + +struct hisdk3_prof_ops { + void (*fault_recover)(void *data, u16 src, u16 level); + int (*get_work_cpu_affinity)(void *data, u32 work_type); + void (*probe_success)(void *data); + void (*remove_pre_handle)(struct hinic3_hwdev *hwdev); +}; + +struct hisdk3_prof_attr { + void *priv_data; + u64 hw_feature_cap; + u64 sw_feature_cap; + u64 dft_hw_feature; + u64 dft_sw_feature; + + struct hisdk3_prof_ops *ops; +}; + +#define GET_PROF_ATTR_OPS(hwdev) \ + ((hwdev)->prof_attr ? (hwdev)->prof_attr->ops : NULL) + +#ifdef static +#undef static +#define LLT_STATIC_DEF_SAVED +#endif + +static inline int hisdk3_get_work_cpu_affinity(struct hinic3_hwdev *hwdev, + enum cpu_affinity_work_type type) +{ + struct hisdk3_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->get_work_cpu_affinity) + return ops->get_work_cpu_affinity(hwdev->prof_attr->priv_data, type); + + return WORK_CPU_UNBOUND; +} + +static inline void hisdk3_fault_post_process(struct hinic3_hwdev *hwdev, + u16 src, u16 level) +{ + struct hisdk3_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->fault_recover) + ops->fault_recover(hwdev->prof_attr->priv_data, src, level); +} + +static inline void hisdk3_probe_success(struct hinic3_hwdev *hwdev) +{ + struct hisdk3_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->probe_success) + ops->probe_success(hwdev->prof_attr->priv_data); +} + +static inline bool hisdk3_sw_feature_en(const struct hinic3_hwdev *hwdev, + u64 feature_bit) +{ + if (!hwdev->prof_attr) + return false; + + return (hwdev->prof_attr->sw_feature_cap & feature_bit) && + (hwdev->prof_attr->dft_sw_feature & feature_bit); +} + +#ifdef CONFIG_MODULE_PROF +static inline void hisdk3_remove_pre_process(struct hinic3_hwdev *hwdev) +{ + struct hisdk3_prof_ops *ops = NULL; + + if (!hwdev) + return; + + ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->remove_pre_handle) + ops->remove_pre_handle(hwdev); +} +#else +static inline void hisdk3_remove_pre_process(struct hinic3_hwdev *hwdev) {}; +#endif + + +#define SW_FEATURE_EN(hwdev, f_bit) \ + hisdk3_sw_feature_en(hwdev, HISDK3_SW_F_##f_bit) +#define HISDK3_F_CHANNEL_LOCK_EN(hwdev) SW_FEATURE_EN(hwdev, CHANNEL_LOCK) + +void hisdk3_init_profile_adapter(struct hinic3_hwdev *hwdev); +void hisdk3_deinit_profile_adapter(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sm_lt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sm_lt.h new file mode 100644 index 000000000000..e204a9815ea8 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sm_lt.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CHIPIF_SM_LT_H +#define CHIPIF_SM_LT_H + +#include <linux/types.h> + +#define SM_LT_LOAD (0x12) +#define SM_LT_STORE (0x14) + +#define SM_LT_NUM_OFFSET 13 +#define SM_LT_ABUF_FLG_OFFSET 12 +#define SM_LT_BC_OFFSET 11 + +#define SM_LT_ENTRY_16B 16 +#define SM_LT_ENTRY_32B 32 +#define SM_LT_ENTRY_48B 48 +#define SM_LT_ENTRY_64B 64 + +#define TBL_LT_OFFSET_DEFAULT 0 + +#define SM_CACHE_LINE_SHFT 4 /* log2(16) */ +#define SM_CACHE_LINE_SIZE 16 /* the size of cache line */ + +#define MAX_SM_LT_READ_LINE_NUM 4 +#define MAX_SM_LT_WRITE_LINE_NUM 3 + +#define SM_LT_FULL_BYTEENB 0xFFFF + +#define TBL_GET_ENB3_MASK(bitmask) ((u16)(((bitmask) >> 32) & 0xFFFF)) +#define TBL_GET_ENB2_MASK(bitmask) ((u16)(((bitmask) >> 16) & 0xFFFF)) +#define TBL_GET_ENB1_MASK(bitmask) ((u16)((bitmask) & 0xFFFF)) + +enum { + SM_LT_NUM_0 = 0, /* lt num = 0, load/store 16B */ + SM_LT_NUM_1, /* lt num = 1, load/store 32B */ + SM_LT_NUM_2, /* lt num = 2, load/store 48B */ + SM_LT_NUM_3 /* lt num = 3, load 64B */ +}; + +/* lt load request */ +union sml_lt_req_head { + struct { + u32 offset:8; + u32 pad:3; + u32 bc:1; + u32 abuf_flg:1; + u32 num:2; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +}; + +struct sml_lt_load_req { + u32 extra; + union sml_lt_req_head head; + u32 index; + u32 pad0; + u32 pad1; +}; + +struct sml_lt_store_req { + u32 extra; + union sml_lt_req_head head; + u32 index; + u32 byte_enb[2]; + u8 write_data[48]; +}; + +enum { + SM_LT_OFFSET_1 = 1, + SM_LT_OFFSET_2, + SM_LT_OFFSET_3, + SM_LT_OFFSET_4, + SM_LT_OFFSET_5, + SM_LT_OFFSET_6, + SM_LT_OFFSET_7, + SM_LT_OFFSET_8, + SM_LT_OFFSET_9, + SM_LT_OFFSET_10, + SM_LT_OFFSET_11, + SM_LT_OFFSET_12, + SM_LT_OFFSET_13, + SM_LT_OFFSET_14, + SM_LT_OFFSET_15 +}; + +enum HINIC_CSR_API_DATA_OPERATION_ID { + HINIC_CSR_OPERATION_WRITE_CSR = 0x1E, + HINIC_CSR_OPERATION_READ_CSR = 0x1F +}; + +enum HINIC_CSR_API_DATA_NEED_RESPONSE_DATA { + HINIC_CSR_NO_RESP_DATA = 0, + HINIC_CSR_NEED_RESP_DATA = 1 +}; + +enum HINIC_CSR_API_DATA_DATA_SIZE { + HINIC_CSR_DATA_SZ_32 = 0, + HINIC_CSR_DATA_SZ_64 = 1 +}; + +struct hinic_csr_request_api_data { + u32 dw0; + + union { + struct { + u32 reserved1:13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size:2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response:1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id:5; + u32 reserved2:6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id:5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr:26; + u32 reserved3:6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; +#endif + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sml_lt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sml_lt.c new file mode 100644 index 000000000000..b802104153c5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sml_lt.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "ossl_knl.h" +#include "hinic3_common.h" +#include "hinic3_sm_lt.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" +#include "hinic3_api_cmd.h" +#include "hinic3_mgmt.h" + +#define ACK 1 +#define NOACK 0 + +#define LT_LOAD16_API_SIZE (16 + 4) +#define LT_STORE16_API_SIZE (32 + 4) + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) \ + | (((x) & 0x0000ff00) << 8) \ + | (((x) & 0x00ff0000) >> 8) \ + | (((x) & 0xff000000) >> 24)) +#endif + +static inline void sm_lt_build_head(union sml_lt_req_head *head, + u8 instance_id, + u8 op_id, u8 ack, + u8 offset, u8 num) +{ + head->value = 0; + head->bs.instance = instance_id; + head->bs.op_id = op_id; + head->bs.ack = ack; + head->bs.num = num; + head->bs.abuf_flg = 0; + head->bs.bc = 1; + head->bs.offset = offset; + head->value = HTONL((head->value)); +} + +static inline void sm_lt_load_build_req(struct sml_lt_load_req *req, + u8 instance_id, + u8 op_id, u8 ack, + u32 lt_index, + u8 offset, u8 num) +{ + sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num); + req->extra = 0; + req->index = lt_index; + req->index = HTONL(req->index); + req->pad0 = 0; + req->pad1 = 0; +} + +static void sml_lt_store_data(u32 *dst, const u32 *src, u8 num) +{ + switch (num) { + case SM_LT_NUM_2: + *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11); + *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10); + *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9); + *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8); + /*lint -fallthrough */ + case SM_LT_NUM_1: + *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7); + *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6); + *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5); + *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4); + /*lint -fallthrough */ + case SM_LT_NUM_0: + *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3); + *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2); + *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1); + *dst = *src; + break; + default: + break; + } +} + +static inline void sm_lt_store_build_req(struct sml_lt_store_req *req, + u8 instance_id, + u8 op_id, u8 ack, + u32 lt_index, + u8 offset, + u8 num, + u16 byte_enb3, + u16 byte_enb2, + u16 byte_enb1, + u8 *data) +{ + sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num); + req->index = lt_index; + req->index = HTONL(req->index); + req->extra = 0; + req->byte_enb[0] = (u32)(byte_enb3); + req->byte_enb[0] = HTONL(req->byte_enb[0]); + req->byte_enb[1] = HTONL((((u32)byte_enb2) << 16) | byte_enb1); + sml_lt_store_data((u32 *)req->write_data, (u32 *)(void *)data, num); +} + +int hinic3_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data) +{ + struct sml_lt_load_req req; + int ret; + + if (!hwdev) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + sm_lt_load_build_req(&req, instance, SM_LT_LOAD, ACK, lt_index, 0, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, dest, (u8 *)(&req), + LT_LOAD16_API_SIZE, (void *)data, 0x10); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Read linear table 16byte fail, err: %d\n", ret); + return -EFAULT; + } + + return 0; +} + +int hinic3_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data, u16 mask) +{ + struct sml_lt_store_req req; + int ret; + + if (!hwdev || !data) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + sm_lt_store_build_req(&req, instance, SM_LT_STORE, NOACK, lt_index, + 0, 0, 0, 0, mask, data); + + ret = hinic3_api_cmd_write_nack(hwdev, dest, &req, LT_STORE16_API_SIZE); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Write linear table 16byte fail, err: %d\n", ret); + return -EFAULT; + } + + return 0; +} + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.c new file mode 100644 index 000000000000..b23b69f3dbe7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/pci.h> +#include <linux/interrupt.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_lld.h" +#include "hinic3_dev_mgmt.h" +#include "hinic3_sriov.h" + +static int hinic3_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 i, func_idx; + int err; + + /* mbox msg channel resources will be freed during remove process */ + err = hinic3_init_func_mbox_msg_channel(hwdev, + hinic3_func_max_vf(hwdev)); + if (err != 0) + return err; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = start_vf_id; i <= end_vf_id; i++) { + func_idx = hinic3_glb_pf_vf_offset(hwdev) + i; + err = hinic3_set_wq_page_size(hwdev, func_idx, + HINIC3_DEFAULT_WQ_PAGE_SIZE, + HINIC3_CHANNEL_COMM); + if (err) + return err; + } + + return 0; +} + +static int hinic3_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 func_idx, idx; + + for (idx = start_vf_id; idx <= end_vf_id; idx++) { + func_idx = hinic3_glb_pf_vf_offset(hwdev) + idx; + hinic3_set_wq_page_size(hwdev, func_idx, + HINIC3_HW_WQ_PAGE_SIZE, + HINIC3_CHANNEL_COMM); + } + + return 0; +} + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) +ssize_t hinic3_sriov_totalvfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%d\n", pci_sriov_get_totalvfs(pdev)); +} + +ssize_t hinic3_sriov_numvfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%d\n", pci_num_vf(pdev)); +} + +/*lint -save -e713*/ +ssize_t hinic3_sriov_numvfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + u16 num_vfs; + int cur_vfs, total_vfs; + + ret = kstrtou16(buf, 0, &num_vfs); + if (ret < 0) + return ret; + + cur_vfs = pci_num_vf(pdev); + total_vfs = pci_sriov_get_totalvfs(pdev); + if (num_vfs > total_vfs) + return -ERANGE; + + if (num_vfs == cur_vfs) + return count; /* no change */ + + if (num_vfs == 0) { + /* disable VFs */ + ret = hinic3_pci_sriov_configure(pdev, 0); + if (ret < 0) + return ret; + return count; + } + + /* enable VFs */ + if (cur_vfs) { + nic_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n", + cur_vfs, num_vfs); + return -EBUSY; + } + + ret = hinic3_pci_sriov_configure(pdev, num_vfs); + if (ret < 0) + return ret; + + if (ret != num_vfs) + nic_warn(&pdev->dev, "%d VFs requested; only %d enabled\n", + num_vfs, ret); + + return count; +} + +/*lint -restore*/ +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + +int hinic3_pci_sriov_disable(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + struct hinic3_sriov_info *sriov_info = NULL; + struct hinic3_event_info event = {0}; + void *hwdev = NULL; + u16 tmp_vfs; + + sriov_info = hinic3_get_sriov_info_by_pcidev(dev); + hwdev = hinic3_get_hwdev_by_pcidev(dev); + if (!hwdev) { + sdk_err(&dev->dev, "SR-IOV disable is not permitted, please wait...\n"); + return -EPERM; + } + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!sriov_info->sriov_enabled) + return 0; + + if (test_and_set_bit(HINIC3_SRIOV_DISABLE, &sriov_info->state)) { + sdk_err(&dev->dev, "SR-IOV disable in process, please wait"); + return -EPERM; + } + + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(dev)) { + clear_bit(HINIC3_SRIOV_DISABLE, &sriov_info->state); + sdk_warn(&dev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + + event.service = EVENT_SRV_COMM; + event.type = EVENT_COMM_SRIOV_STATE_CHANGE; + ((struct hinic3_sriov_state_info *)(void *)event.event_data)->enable = 0; + hinic3_event_callback(hwdev, &event); + + sriov_info->sriov_enabled = false; + + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(dev); + + tmp_vfs = (u16)sriov_info->num_vfs; + sriov_info->num_vfs = 0; + hinic3_deinit_vf_hw(hwdev, 1, tmp_vfs); + + clear_bit(HINIC3_SRIOV_DISABLE, &sriov_info->state); + +#endif + + return 0; +} + +int hinic3_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct hinic3_sriov_info *sriov_info = NULL; + struct hinic3_event_info event = {0}; + void *hwdev = NULL; + int pre_existing_vfs = 0; + int err = 0; + + sriov_info = hinic3_get_sriov_info_by_pcidev(dev); + hwdev = hinic3_get_hwdev_by_pcidev(dev); + if (!hwdev) { + sdk_err(&dev->dev, "SR-IOV enable is not permitted, please wait...\n"); + return -EPERM; + } + + if (test_and_set_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state)) { + sdk_err(&dev->dev, "SR-IOV enable in process, please wait, num_vfs %d\n", + num_vfs); + return -EPERM; + } + + pre_existing_vfs = pci_num_vf(dev); + + if (num_vfs > pci_sriov_get_totalvfs(dev)) { + clear_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state); + return -ERANGE; + } + if (pre_existing_vfs && pre_existing_vfs != num_vfs) { + err = hinic3_pci_sriov_disable(dev); + if (err) { + clear_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state); + return err; + } + } else if (pre_existing_vfs == num_vfs) { + clear_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state); + return num_vfs; + } + + err = hinic3_init_vf_hw(hwdev, 1, (u16)num_vfs); + if (err) { + sdk_err(&dev->dev, "Failed to init vf in hardware before enable sriov, error %d\n", + err); + clear_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + sdk_err(&dev->dev, "Failed to enable SR-IOV, error %d\n", err); + clear_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + sriov_info->sriov_enabled = true; + sriov_info->num_vfs = num_vfs; + + event.service = EVENT_SRV_COMM; + event.type = EVENT_COMM_SRIOV_STATE_CHANGE; + ((struct hinic3_sriov_state_info *)(void *)event.event_data)->enable = 1; + ((struct hinic3_sriov_state_info *)(void *)event.event_data)->num_vfs = (u16)num_vfs; + hinic3_event_callback(hwdev, &event); + + clear_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state); + + return num_vfs; +#else + + return 0; +#endif +} + +int hinic3_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct hinic3_sriov_info *sriov_info = NULL; + + sriov_info = hinic3_get_sriov_info_by_pcidev(dev); + if (!sriov_info) + return -EFAULT; + + if (!test_bit(HINIC3_FUNC_PERSENT, &sriov_info->state)) + return -EFAULT; + + if (num_vfs == 0) + return hinic3_pci_sriov_disable(dev); + else + return hinic3_pci_sriov_enable(dev, num_vfs); +} + +/*lint -restore*/ + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.h new file mode 100644 index 000000000000..4a640adf15b4 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_SRIOV_H +#define HINIC3_SRIOV_H +#include <linux/types.h> +#include <linux/pci.h> + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) +ssize_t hinic3_sriov_totalvfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hinic3_sriov_numvfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hinic3_sriov_numvfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + +enum hinic3_sriov_state { + HINIC3_SRIOV_DISABLE, + HINIC3_SRIOV_ENABLE, + HINIC3_FUNC_PERSENT, +}; + +struct hinic3_sriov_info { + bool sriov_enabled; + unsigned int num_vfs; + unsigned long state; +}; + +struct hinic3_sriov_info *hinic3_get_sriov_info_by_pcidev(struct pci_dev *pdev); +int hinic3_pci_sriov_disable(struct pci_dev *dev); +int hinic3_pci_sriov_enable(struct pci_dev *dev, int num_vfs); +int hinic3_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_wq.c new file mode 100644 index 000000000000..2f5e0984e429 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_wq.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "ossl_knl.h" +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_wq.h" + +#define WQ_MIN_DEPTH 64 +#define WQ_MAX_DEPTH 65536 +#define WQ_MAX_NUM_PAGES (PAGE_SIZE / sizeof(u64)) + +static int wq_init_wq_block(struct hinic3_wq *wq) +{ + int i; + + if (WQ_IS_0_LEVEL_CLA(wq)) { + wq->wq_block_paddr = wq->wq_pages[0].align_paddr; + wq->wq_block_vaddr = wq->wq_pages[0].align_vaddr; + + return 0; + } + + if (wq->num_wq_pages > WQ_MAX_NUM_PAGES) { + sdk_err(wq->dev_hdl, "num_wq_pages exceed limit: %lu\n", + WQ_MAX_NUM_PAGES); + return -EFAULT; + } + + wq->wq_block_vaddr = dma_zalloc_coherent(wq->dev_hdl, PAGE_SIZE, + &wq->wq_block_paddr, + GFP_KERNEL); + if (!wq->wq_block_vaddr) { + sdk_err(wq->dev_hdl, "Failed to alloc wq block\n"); + return -ENOMEM; + } + + for (i = 0; i < wq->num_wq_pages; i++) + wq->wq_block_vaddr[i] = + cpu_to_be64(wq->wq_pages[i].align_paddr); + + return 0; +} + +static int wq_alloc_pages(struct hinic3_wq *wq) +{ + int i, page_idx, err; + + wq->wq_pages = kcalloc(wq->num_wq_pages, sizeof(*wq->wq_pages), + GFP_KERNEL); + if (!wq->wq_pages) { + sdk_err(wq->dev_hdl, "Failed to alloc wq pages handle\n"); + return -ENOMEM; + } + + for (page_idx = 0; page_idx < wq->num_wq_pages; page_idx++) { + err = hinic3_dma_zalloc_coherent_align(wq->dev_hdl, + wq->wq_page_size, + wq->wq_page_size, + GFP_KERNEL, + &wq->wq_pages[page_idx]); + if (err) { + sdk_err(wq->dev_hdl, "Failed to alloc wq page\n"); + goto free_wq_pages; + } + } + + err = wq_init_wq_block(wq); + if (err) + goto free_wq_pages; + + return 0; + +free_wq_pages: + for (i = 0; i < page_idx; i++) + hinic3_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); + + kfree(wq->wq_pages); + wq->wq_pages = NULL; + + return -ENOMEM; +} + +static void wq_free_pages(struct hinic3_wq *wq) +{ + int i; + + if (!WQ_IS_0_LEVEL_CLA(wq)) + dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->wq_block_vaddr, + wq->wq_block_paddr); + + for (i = 0; i < wq->num_wq_pages; i++) + hinic3_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); + + kfree(wq->wq_pages); + wq->wq_pages = NULL; +} + +int hinic3_wq_create(void *hwdev, struct hinic3_wq *wq, u32 q_depth, + u16 wqebb_size) +{ + struct hinic3_hwdev *dev = hwdev; + u32 wq_page_size; + + if (!wq || !dev) { + pr_err("Invalid wq or dev_hdl\n"); + return -EINVAL; + } + + if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH || + (q_depth & (q_depth - 1)) || !wqebb_size || + (wqebb_size & (wqebb_size - 1))) { + sdk_err(dev->dev_hdl, "Wq q_depth(%u) or wqebb_size(%u) is invalid\n", + q_depth, wqebb_size); + return -EINVAL; + } + + wq_page_size = ALIGN(dev->wq_page_size, PAGE_SIZE); + + memset(wq, 0, sizeof(*wq)); + wq->dev_hdl = dev->dev_hdl; + wq->q_depth = q_depth; + wq->idx_mask = (u16)(q_depth - 1); + wq->wqebb_size = wqebb_size; + wq->wqebb_size_shift = (u16)ilog2(wq->wqebb_size); + wq->wq_page_size = wq_page_size; + + wq->wqebbs_per_page = wq_page_size / wqebb_size; + /* In case of wq_page_size is larger than q_depth * wqebb_size */ + if (wq->wqebbs_per_page > q_depth) + wq->wqebbs_per_page = q_depth; + wq->wqebbs_per_page_shift = (u16)ilog2(wq->wqebbs_per_page); + wq->wqebbs_per_page_mask = (u16)(wq->wqebbs_per_page - 1); + wq->num_wq_pages = (u16)(ALIGN(((u32)q_depth * wqebb_size), + wq_page_size) / wq_page_size); + + return wq_alloc_pages(wq); +} +EXPORT_SYMBOL(hinic3_wq_create); + +void hinic3_wq_destroy(struct hinic3_wq *wq) +{ + if (!wq) + return; + + wq_free_pages(wq); +} +EXPORT_SYMBOL(hinic3_wq_destroy); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/ossl_knl_linux.c b/drivers/net/ethernet/huawei/hinic3/hw/ossl_knl_linux.c new file mode 100644 index 000000000000..8b1cc08bb122 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/ossl_knl_linux.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/vmalloc.h> +#include "ossl_knl_linux.h" + +#define OSSL_MINUTE_BASE (60) + +#if (KERNEL_VERSION(2, 6, 36) > LINUX_VERSION_CODE) +#ifdef __LINX_6_0_60__ +unsigned int _work_busy(struct work_struct *work) +{ + if (work_pending(work)) + return WORK_BUSY_PENDING; + else + return WORK_BUSY_RUNNING; +} +#endif /* work_busy */ +#endif + +#if (KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#endif /* < 3.4.0 */ +#if (KERNEL_VERSION(3, 8, 0) > LINUX_VERSION_CODE) +/* + * pci_sriov_get_totalvfs -- get total VFs supported on this device + * @dev: the PCI PF device + * + * For a PCIe device with SRIOV support, return the PCIe + * SRIOV capability value of TotalVFs. Otherwise 0. + */ +int pci_sriov_get_totalvfs(struct pci_dev *dev) +{ + int sriov_cap_pos; + u16 total_vfs = 0; + + if (dev->is_virtfn) + return 0; + + sriov_cap_pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(dev, sriov_cap_pos + PCI_SRIOV_TOTAL_VF, + &total_vfs); + + return total_vfs; +} + +#endif +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) +/* + * pci_vfs_assigned - returns number of VFs are assigned to a guest + * @dev: the PCI device + * + * Returns number of VFs belonging to this device that are assigned to a guest. + * If device is not a physical function returns -ENODEV. + */ +int pci_vfs_assigned(struct pci_dev *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + struct pci_dev *vfdev; + unsigned short dev_id = 0; + int sriov_cap_pos; + + /* only search if we are a PF. */ + if (dev->is_virtfn) + return 0; + + /* determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one. + */ + sriov_cap_pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(dev, sriov_cap_pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned. */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set. + */ + if (vfdev->is_virtfn && vfdev->physfn == dev && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + + return (int)vfs_assigned; +} + +#endif /* 3.10.0 */ +#if (KERNEL_VERSION(3, 13, 0) > LINUX_VERSION_CODE) +int kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int ret; + + ret = dma_set_mask(dev, mask); + if (!ret) + ret = dma_set_coherent_mask(dev, mask); + + return ret; +} + +void kc_netdev_rss_key_fill(void *buffer, size_t buf_len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, 0x95, + 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, 0xB1, + 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, 0xCD, + 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, 0x6A, + 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, 0xBF, + 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, 0xD7, + 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + WARN_ON(buf_len > NETDEV_RSS_KEY_LEN); /*lint !e506 !e522*/ + memcpy(buffer, seed, buf_len); +} + +#endif /* 3.13.0 */ +#if (KERNEL_VERSION(3, 14, 0) > LINUX_VERSION_CODE) +int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int min_vec, int max_vec) +{ + int nvec = max_vec; + int ret; + + if (max_vec < min_vec) + return -ERANGE; + + do { + ret = pci_enable_msix(dev, entries, nvec); + if (ret < 0) { + return ret; + } else if (ret > 0) { + if (ret < min_vec) + return -ENOSPC; + nvec = ret; + } + } while (ret != 0); + + return nvec; +} + +#endif +#if (KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct netdev_hw_addr *tmp; + struct netdev_hw_addr *ha; + int err; + + /* first go through and flush out any stale entries. */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); /*lint !e78 !e530*/ + list->count--; + } + + /* go through and sync new entries to the list. */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + if (ha->synced != 0) +#else + if (ha->sync_cnt != 0) +#endif + continue; + + err = sync(dev, ha->addr); + if (err != 0) + return err; +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct netdev_hw_addr *tmp; + struct netdev_hw_addr *ha; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount != 0) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); /*lint !e78 !e530*/ + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list **next_addr = list; + struct dev_addr_list *dev_addr; + int ret; + + /* first go through and flush out any stale entries. */ + while ((dev_addr = *next_addr)) { + if (dev_addr->da_synced && dev_addr->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next_addr = dev_addr->next; + kfree(dev_addr); + (*count)--; + continue; + } + } + next_addr = &dev_addr->next; + } + + /* go through and sync new entries to the list. */ + for (dev_addr = *list; dev_addr; dev_addr = dev_addr->next) { + if (dev_addr->da_synced) + continue; + + ret = sync(dev, dev_addr->da_addr); + if (ret != 0) + return ret; + + dev_addr->da_synced++; + dev_addr->da_users++; + } + + return 0; +} + +void kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *dev_addr; + + while ((dev_addr = *list) != NULL) { + if (!dev_addr->da_synced) { + list = &dev_addr->next; + continue; + } + + if (!unsync || !unsync(dev, dev_addr->da_addr)) { + dev_addr->da_synced--; + if (--dev_addr->da_users == 0) { + *list = dev_addr->next; + kfree(dev_addr); + (*count)--; + continue; + } + } + + list = &dev_addr->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST ops */ +#endif /* HAVE_SET_RX_MODE ops */ + +#endif /* 3.16.0 */ +#if (KERNEL_VERSION(3, 19, 0) > LINUX_VERSION_CODE) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void kc_netdev_rss_key_fill(void *buffer, size_t buf_len) +{ + WARN_ON(buf_len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, buf_len); +} + +#endif +#endif + +#if (KERNEL_VERSION(3, 19, 0) > LINUX_VERSION_CODE) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 1) < RHEL_RELEASE_CODE || \ + RHEL_RELEASE_VERSION(6, 8) == RHEL_RELEASE_CODE || \ + RHEL_RELEASE_VERSION(6, 9) == RHEL_RELEASE_CODE)) +__be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, int *next_depth) +{ + unsigned int depth = skb->mac_len; + + /* if type is 802.1Q/AD then the header should already be + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at + * ETH_HLEN otherwise + */ + if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + if (depth) { + if (WARN_ON(depth < VLAN_HLEN)) + return 0; + depth -= VLAN_HLEN; + } else { + depth = ETH_HLEN; + } + do { + struct vlan_hdr *vh; + + if (unlikely(!pskb_may_pull(skb, + depth + VLAN_HLEN))) + return 0; + + vh = (struct vlan_hdr *)(skb->data + depth); + type = vh->h_vlan_encapsulated_proto; + depth += VLAN_HLEN; + } while (type == htons(ETH_P_8021Q) || + type == htons(ETH_P_8021AD)); + } + + if (next_depth) + *next_depth = (int)depth; + + return type; +} +#endif +#endif /* < 3.19.0 */ + +#if (KERNEL_VERSION(4, 1, 0) > LINUX_VERSION_CODE) +/*lint -e713 -e666 -e533 -e506*/ +unsigned int cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + unsigned int num = i; + + /* Wrap: we always want a cpu. */ + num %= (unsigned int)num_online_cpus(); + + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) { + if (num-- == 0) + return (unsigned int)cpu; + } + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) { + if (num-- == 0) + return (unsigned int)cpu; + } + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node)) != 0) + continue; + + if (num-- == 0) + return (unsigned int)cpu; + } + } + + WARN_ON(num != 0); /*lint !e522*/ + return 0; +} /*lint +e713 +e666 +e533 -e506*/ + +#endif +struct file *file_creat(const char *file_name) +{ + return filp_open(file_name, O_CREAT | O_RDWR | O_APPEND, 0); +} + +struct file *file_open(const char *file_name) +{ + return filp_open(file_name, O_RDONLY, 0); +} + +void file_close(struct file *file_handle) +{ + (void)filp_close(file_handle, NULL); +} + +u32 get_file_size(struct file *file_handle) +{ + struct inode *file_inode; + +#if (KERNEL_VERSION(3, 19, 0) > LINUX_VERSION_CODE) + file_inode = file_handle->f_dentry->d_inode; +#else + file_inode = file_handle->f_inode; +#endif + + return (u32)(file_inode->i_size); +} + +void set_file_position(struct file *file_handle, u32 position) +{ + file_handle->f_pos = position; +} + +int file_read(struct file *file_handle, char *log_buffer, u32 rd_length, + u32 *file_pos) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + return (int)file_handle->f_op->read(file_handle, log_buffer, rd_length, + &file_handle->f_pos); +#elif (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) + return (int)vfs_read(file_handle, log_buffer, rd_length, + &file_handle->f_pos); +#else + return (int)kernel_read(file_handle, log_buffer, rd_length, + &file_handle->f_pos); + +#endif +} + +u32 file_write(struct file *file_handle, const char *log_buffer, u32 wr_length) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + return (u32)file_handle->f_op->write(file_handle, log_buffer, + wr_length, &file_handle->f_pos); +#elif (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) + return (u32)vfs_write(file_handle, + (__force const char __user *)log_buffer, + wr_length, &file_handle->f_pos); +#else + return (u32)kernel_write(file_handle, log_buffer, wr_length, + &file_handle->f_pos); + +#endif +} + +static int _linux_thread_func(void *thread) +{ + struct sdk_thread_info *info = (struct sdk_thread_info *)thread; + + while (!kthread_should_stop()) + info->thread_fn(info->data); + + return 0; +} + +int creat_thread(struct sdk_thread_info *thread_info) +{ + thread_info->thread_obj = kthread_run(_linux_thread_func, thread_info, + thread_info->name); + if (!thread_info->thread_obj) + return -EFAULT; + + return 0; +} + +void stop_thread(struct sdk_thread_info *thread_info) +{ + if (thread_info->thread_obj) + (void)kthread_stop(thread_info->thread_obj); +} + +void utctime_to_localtime(u64 utctime, u64 *localtime) +{ + *localtime = utctime - sys_tz.tz_minuteswest * + OSSL_MINUTE_BASE; /*lint !e647*/ +} + +#ifndef HAVE_TIMER_SETUP +void initialize_timer(const void *adapter_hdl, struct timer_list *timer) +{ + if (!adapter_hdl || !timer) + return; + + init_timer(timer); +} +#endif + +void add_to_timer(struct timer_list *timer, long period) +{ + if (!timer) + return; + + add_timer(timer); +} + +void stop_timer(struct timer_list *timer) {} + +void delete_timer(struct timer_list *timer) +{ + if (!timer) + return; + + del_timer_sync(timer); +} + +u64 ossl_get_real_time(void) +{ + struct timeval tv = {0}; + u64 tv_msec; + + do_gettimeofday(&tv); + + tv_msec = (u64)tv.tv_sec * MSEC_PER_SEC + (u64)tv.tv_usec / USEC_PER_MSEC; + return tv_msec; +}