From: Chenguangli <chenguangli2(a)huawei.com>
driver inclusion
category: feature
bugzilla: NA
-----------------------------------------------------------------------
This module includes cfg, cqm, hwdev, hwif, mgmt, sml. and are mainly used to
initialize chip capabilityes and to initialize resources for communication between
drivers and chip.
Signed-off-by: Chenguangli <chenguangli2(a)huawei.com>
Acked-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/scsi/huawei/hifc/hifc_api_cmd.c | 1155 ++++++
drivers/scsi/huawei/hifc/hifc_api_cmd.h | 268 ++
drivers/scsi/huawei/hifc/hifc_cfg.c | 823 +++++
drivers/scsi/huawei/hifc/hifc_cfg.h | 171 +
drivers/scsi/huawei/hifc/hifc_cmdq.c | 1507 ++++++++
drivers/scsi/huawei/hifc/hifc_cmdq.h | 210 ++
drivers/scsi/huawei/hifc/hifc_cqm_main.c | 694 ++++
drivers/scsi/huawei/hifc/hifc_cqm_main.h | 366 ++
drivers/scsi/huawei/hifc/hifc_cqm_object.c | 3599 +++++++++++++++++++
drivers/scsi/huawei/hifc/hifc_cqm_object.h | 244 ++
drivers/scsi/huawei/hifc/hifc_eqs.c | 1347 +++++++
drivers/scsi/huawei/hifc/hifc_eqs.h | 233 ++
drivers/scsi/huawei/hifc/hifc_hw.h | 611 ++++
drivers/scsi/huawei/hifc/hifc_hwdev.c | 3675 ++++++++++++++++++++
drivers/scsi/huawei/hifc/hifc_hwdev.h | 456 +++
drivers/scsi/huawei/hifc/hifc_hwif.c | 630 ++++
drivers/scsi/huawei/hifc/hifc_hwif.h | 243 ++
drivers/scsi/huawei/hifc/hifc_mgmt.c | 1426 ++++++++
drivers/scsi/huawei/hifc/hifc_mgmt.h | 407 +++
drivers/scsi/huawei/hifc/hifc_sml.c | 361 ++
drivers/scsi/huawei/hifc/hifc_sml.h | 183 +
drivers/scsi/huawei/hifc/hifc_wq.c | 624 ++++
drivers/scsi/huawei/hifc/hifc_wq.h | 165 +
23 files changed, 19398 insertions(+)
create mode 100644 drivers/scsi/huawei/hifc/hifc_api_cmd.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_api_cmd.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_cfg.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_cfg.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_cmdq.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_cmdq.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_cqm_main.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_cqm_main.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_cqm_object.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_cqm_object.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_eqs.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_eqs.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_hw.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_hwdev.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_hwdev.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_hwif.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_hwif.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_mgmt.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_mgmt.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_sml.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_sml.h
create mode 100644 drivers/scsi/huawei/hifc/hifc_wq.c
create mode 100644 drivers/scsi/huawei/hifc/hifc_wq.h
diff --git a/drivers/scsi/huawei/hifc/hifc_api_cmd.c b/drivers/scsi/huawei/hifc/hifc_api_cmd.c
new file mode 100644
index 000000000000..22632f779582
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_api_cmd.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/semaphore.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_hwif.h"
+#include "hifc_api_cmd.h"
+
+#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U
+
+#define API_CMD_CELL_DESC_SIZE 8
+#define API_CMD_CELL_DATA_ADDR_SIZE 8
+
+#define API_CHAIN_NUM_CELLS 32
+#define API_CHAIN_CELL_SIZE 128
+#define API_CHAIN_RSP_DATA_SIZE 128
+
+#define API_CMD_CELL_WB_ADDR_SIZE 8
+
+#define API_CHAIN_CELL_ALIGNMENT 8
+
+#define API_CMD_TIMEOUT 10000
+#define API_CMD_STATUS_TIMEOUT 100000
+
+#define API_CMD_BUF_SIZE 2048ULL
+
+#define API_CMD_NODE_ALIGN_SIZE 512ULL
+#define API_PAYLOAD_ALIGN_SIZE 64ULL
+
+#define API_CHAIN_RESP_ALIGNMENT 64ULL
+
+#define COMPLETION_TIMEOUT_DEFAULT 1000UL
+#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U
+
+#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val)))
+
+#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token))
+#define WRITE_API_CMD_PRIV_DATA(id) (((u8)id) << 16)
+
+#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
+
+#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2)
+#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3)
+
+enum api_cmd_data_format {
+ SGL_DATA = 1,
+};
+
+enum api_cmd_type {
+ API_CMD_WRITE_TYPE = 0,
+ API_CMD_READ_TYPE = 1,
+};
+
+enum api_cmd_bypass {
+ NOT_BYPASS = 0,
+ BYPASS = 1,
+};
+
+enum api_cmd_resp_aeq {
+ NOT_TRIGGER = 0,
+ TRIGGER = 1,
+};
+
+static u8 xor_chksum_set(void *data)
+{
+ int idx;
+ u8 checksum = 0;
+ u8 *val = data;
+
+ for (idx = 0; idx < 7; idx++)
+ checksum ^= val[idx];
+
+ return checksum;
+}
+
+static void set_prod_idx(struct hifc_api_cmd_chain *chain)
+{
+ enum hifc_api_cmd_chain_type chain_type = chain->chain_type;
+ struct hifc_hwif *hwif = chain->hwdev->hwif;
+ u32 hw_prod_idx_addr = HIFC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
+ u32 prod_idx = chain->prod_idx;
+
+ hifc_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx);
+}
+
+static u32 get_hw_cons_idx(struct hifc_api_cmd_chain *chain)
+{
+ u32 addr, val;
+
+ addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ val = hifc_hwif_read_reg(chain->hwdev->hwif, addr);
+
+ return HIFC_API_CMD_STATUS_GET(val, CONS_IDX);
+}
+
+static void dump_api_chain_reg(struct hifc_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ u32 addr, val;
+
+ addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ val = hifc_hwif_read_reg(chain->hwdev->hwif, addr);
+
+ sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n",
+ chain->chain_type, HIFC_API_CMD_STATUS_GET(val, CPLD_ERR),
+ HIFC_API_CMD_STATUS_GET(val, CHKSUM_ERR),
+ HIFC_API_CMD_STATUS_GET(val, FSM));
+
+ sdk_err(dev, "Chain hw current ci: 0x%x\n",
+ HIFC_API_CMD_STATUS_GET(val, CONS_IDX));
+
+ addr = HIFC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type);
+ val = hifc_hwif_read_reg(chain->hwdev->hwif, addr);
+ sdk_err(dev, "Chain hw current pi: 0x%x\n", val);
+}
+
+/**
+ * chain_busy - check if the chain is still processing last requests
+ * @chain: chain to check
+ * Return: 0 - success, negative - failure
+ **/
+static int chain_busy(struct hifc_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ struct hifc_api_cmd_cell_ctxt *ctxt;
+ u64 resp_header;
+
+ ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ switch (chain->chain_type) {
+ case HIFC_API_CMD_MULTI_READ:
+ case HIFC_API_CMD_POLL_READ:
+ resp_header = be64_to_cpu(ctxt->resp->header);
+ if (ctxt->status &&
+ !HIFC_API_CMD_RESP_HEADER_VALID(resp_header)) {
+ sdk_err(dev, "Context(0x%x) busy!, pi: %d, resp_header: 0x%08x%08x\n",
+ ctxt->status, chain->prod_idx,
+ upper_32_bits(resp_header),
+ lower_32_bits(resp_header));
+ dump_api_chain_reg(chain);
+ return -EBUSY;
+ }
+ break;
+ case HIFC_API_CMD_POLL_WRITE:
+ case HIFC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ chain->cons_idx = get_hw_cons_idx(chain);
+
+ if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) {
+ sdk_err(dev, "API CMD chain %d is busy, cons_idx = %d, prod_idx = %d\n",
+ chain->chain_type, chain->cons_idx,
+ chain->prod_idx);
+ dump_api_chain_reg(chain);
+ return -EBUSY;
+ }
+ break;
+ default:
+ sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * get_cell_data_size - get the data size of specific cell type
+ * @type: chain type
+ * @cmd_size: the command size
+ * Return: cell_data_size
+ **/
+static u16 get_cell_data_size(enum hifc_api_cmd_chain_type type, u16 cmd_size)
+{
+ u16 cell_data_size = 0;
+
+ switch (type) {
+ case HIFC_API_CMD_POLL_READ:
+ cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
+ API_CMD_CELL_WB_ADDR_SIZE +
+ API_CMD_CELL_DATA_ADDR_SIZE,
+ API_CHAIN_CELL_ALIGNMENT);
+ break;
+
+ case HIFC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HIFC_API_CMD_POLL_WRITE:
+ case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
+ API_CMD_CELL_DATA_ADDR_SIZE,
+ API_CHAIN_CELL_ALIGNMENT);
+ break;
+ default:
+ break;
+ }
+
+ return cell_data_size;
+}
+
+/**
+ * prepare_cell_ctrl - prepare the ctrl of the cell for the command
+ * @cell_ctrl: the control of the cell to set the control into it
+ * @cell_len: the size of the cell
+ **/
+static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len)
+{
+ u64 ctrl;
+ u8 chksum;
+
+ ctrl = HIFC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) |
+ HIFC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) |
+ HIFC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF);
+
+ chksum = xor_chksum_set(&ctrl);
+
+ ctrl |= HIFC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ *cell_ctrl = cpu_to_be64(ctrl);
+}
+
+/**
+ * prepare_api_cmd - prepare API CMD command
+ * @chain: chain for the command
+ * @cell: the cell of the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ **/
+static void prepare_api_cmd(struct hifc_api_cmd_chain *chain,
+ struct hifc_api_cmd_cell *cell,
+ enum hifc_node_id dest,
+ const void *cmd, u16 cmd_size)
+{
+ struct hifc_api_cmd_cell_ctxt *cell_ctxt;
+ u32 priv;
+
+ cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ switch (chain->chain_type) {
+ case HIFC_API_CMD_POLL_READ:
+ priv = READ_API_CMD_PRIV_DATA(chain->chain_type,
+ cell_ctxt->saved_prod_idx);
+ cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HIFC_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) |
+ HIFC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) |
+ HIFC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) |
+ HIFC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ case HIFC_API_CMD_POLL_WRITE:
+ priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type);
+ cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HIFC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) |
+ HIFC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) |
+ HIFC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) |
+ HIFC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ case HIFC_API_CMD_WRITE_TO_MGMT_CPU:
+ priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type);
+ cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HIFC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) |
+ HIFC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) |
+ HIFC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) |
+ HIFC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ default:
+ sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n",
+ chain->chain_type);
+ return;
+ }
+
+ cell->desc |= HIFC_API_CMD_DESC_SET(dest, DEST) |
+ HIFC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
+
+ cell->desc |= HIFC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
+ XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ cell->desc = cpu_to_be64(cell->desc);
+
+ memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
+}
+
+/**
+ * prepare_cell - prepare cell ctrl and cmd in the current producer cell
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ **/
+static void prepare_cell(struct hifc_api_cmd_chain *chain,
+ enum hifc_node_id dest,
+ void *cmd, u16 cmd_size)
+{
+ struct hifc_api_cmd_cell *curr_node;
+ u16 cell_size;
+
+ curr_node = chain->curr_node;
+
+ cell_size = get_cell_data_size(chain->chain_type, cmd_size);
+
+ prepare_cell_ctrl(&curr_node->ctrl, cell_size);
+ prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size);
+}
+
+static inline void cmd_chain_prod_idx_inc(struct hifc_api_cmd_chain *chain)
+{
+ chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
+}
+
+static void issue_api_cmd(struct hifc_api_cmd_chain *chain)
+{
+ set_prod_idx(chain);
+}
+
+/**
+ * api_cmd_status_update - update the status of the chain
+ * @chain: chain to update
+ **/
+static void api_cmd_status_update(struct hifc_api_cmd_chain *chain)
+{
+ struct hifc_api_cmd_status *wb_status;
+ enum hifc_api_cmd_chain_type chain_type;
+ u64 status_header;
+ u32 buf_desc;
+
+ wb_status = chain->wb_status;
+
+ buf_desc = be32_to_cpu(wb_status->buf_desc);
+ if (HIFC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR))
+ return;
+
+ status_header = be64_to_cpu(wb_status->header);
+ chain_type = HIFC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
+ if (chain_type >= HIFC_API_CMD_MAX)
+ return;
+
+ if (chain_type != chain->chain_type)
+ return;
+
+ chain->cons_idx = HIFC_API_CMD_STATUS_GET(buf_desc, CONS_IDX);
+}
+
+/**
+ * wait_for_status_poll - wait for write to mgmt command to complete
+ * @chain: the chain of the command
+ * Return: 0 - success, negative - failure
+ **/
+static int wait_for_status_poll(struct hifc_api_cmd_chain *chain)
+{
+ int err = -ETIMEDOUT;
+ u32 cnt = 0;
+
+ while (cnt < API_CMD_STATUS_TIMEOUT &&
+ chain->hwdev->chip_present_flag) {
+ api_cmd_status_update(chain);
+
+ /* SYNC API CMD cmd should start after prev cmd finished */
+ if (chain->cons_idx == chain->prod_idx) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(50, 100);
+ cnt++;
+ }
+
+ return err;
+}
+
+static void copy_resp_data(struct hifc_api_cmd_cell_ctxt *ctxt, void *ack,
+ u16 ack_size)
+{
+ struct hifc_api_cmd_resp_fmt *resp = ctxt->resp;
+
+ memcpy(ack, &resp->resp_data, ack_size);
+ ctxt->status = 0;
+}
+
+/**
+ * prepare_cell - polling for respense data of the read api-command
+ * @chain: pointer to api cmd chain
+ *
+ * Return: 0 - success, negative - failure
+ **/
+static int wait_for_resp_polling(struct hifc_api_cmd_cell_ctxt *ctxt)
+{
+ u64 resp_header;
+ int ret = -ETIMEDOUT;
+ u32 cnt = 0;
+
+ while (cnt < POLLING_COMPLETION_TIMEOUT_DEFAULT) {
+ resp_header = be64_to_cpu(ctxt->resp->header);
+
+ rmb(); /* read the latest header */
+
+ if (HIFC_API_CMD_RESP_HEADER_VALID(resp_header)) {
+ ret = 0;
+ break;
+ }
+ usleep_range(100, 1000);
+ cnt++;
+ }
+
+ if (ret)
+ pr_err("Wait for api chain response timeout\n");
+
+ return ret;
+}
+
+/**
+ * wait_for_api_cmd_completion - wait for command to complete
+ * @chain: chain for the command
+ * Return: 0 - success, negative - failure
+ **/
+static int wait_for_api_cmd_completion(struct hifc_api_cmd_chain *chain,
+ struct hifc_api_cmd_cell_ctxt *ctxt,
+ void *ack, u16 ack_size)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ int err = 0;
+
+ switch (chain->chain_type) {
+ case HIFC_API_CMD_POLL_READ:
+ err = wait_for_resp_polling(ctxt);
+ if (!err)
+ copy_resp_data(ctxt, ack, ack_size);
+ break;
+ case HIFC_API_CMD_POLL_WRITE:
+ case HIFC_API_CMD_WRITE_TO_MGMT_CPU:
+ err = wait_for_status_poll(chain);
+ if (err) {
+ sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n",
+ chain->chain_type);
+ break;
+ }
+ break;
+ case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ /* No need to wait */
+ break;
+ default:
+ sdk_err(dev, "Unknown API CMD Chain type: %d\n",
+ chain->chain_type);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ dump_api_chain_reg(chain);
+
+ return err;
+}
+
+static inline void update_api_cmd_ctxt(struct hifc_api_cmd_chain *chain,
+ struct hifc_api_cmd_cell_ctxt *ctxt)
+{
+ ctxt->status = 1;
+ ctxt->saved_prod_idx = chain->prod_idx;
+ if (ctxt->resp) {
+ ctxt->resp->header = 0;
+
+ /* make sure "header" was cleared */
+ wmb();
+ }
+}
+
+/**
+ * api_cmd - API CMD command
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @size: the command size
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd(struct hifc_api_cmd_chain *chain,
+ enum hifc_node_id dest,
+ void *cmd, u16 cmd_size, void *ack, u16 ack_size)
+{
+ struct hifc_api_cmd_cell_ctxt *ctxt;
+
+ if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_lock(&chain->async_lock);
+ else
+ down(&chain->sem);
+ ctxt = &chain->cell_ctxt[chain->prod_idx];
+ if (chain_busy(chain)) {
+ if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_unlock(&chain->async_lock);
+ else
+ up(&chain->sem);
+ return -EBUSY;
+ }
+ update_api_cmd_ctxt(chain, ctxt);
+
+ prepare_cell(chain, dest, cmd, cmd_size);
+
+ cmd_chain_prod_idx_inc(chain);
+
+ wmb(); /* issue the command */
+
+ issue_api_cmd(chain);
+
+ /* incremented prod idx, update ctxt */
+
+ chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr;
+ if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_unlock(&chain->async_lock);
+ else
+ up(&chain->sem);
+
+ return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size);
+}
+
+/**
+ * hifc_api_cmd_write - Write API CMD command
+ * @chain: chain for write command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @size: the command size
+ * Return: 0 - success, negative - failure
+ **/
+int hifc_api_cmd_write(struct hifc_api_cmd_chain *chain,
+ enum hifc_node_id dest, void *cmd, u16 size)
+{
+ /* Verify the chain type */
+ return api_cmd(chain, dest, cmd, size, NULL, 0);
+}
+
+int hifc_api_cmd_read(struct hifc_api_cmd_chain *chain,
+ enum hifc_node_id dest,
+ void *cmd, u16 size, void *ack, u16 ack_size)
+{
+ return api_cmd(chain, dest, cmd, size, ack, ack_size);
+}
+
+/**
+ * api_cmd_hw_restart - restart the chain in the HW
+ * @chain: the API CMD specific chain to restart
+ **/
+static int api_cmd_hw_restart(struct hifc_api_cmd_chain *cmd_chain)
+{
+ struct hifc_hwif *hwif = cmd_chain->hwdev->hwif;
+ u32 reg_addr, val;
+ int err;
+ u32 cnt = 0;
+
+ /* Read Modify Write */
+ reg_addr = HIFC_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type);
+ val = hifc_hwif_read_reg(hwif, reg_addr);
+
+ val = HIFC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
+ val |= HIFC_API_CMD_CHAIN_REQ_SET(1, RESTART);
+
+ hifc_hwif_write_reg(hwif, reg_addr, val);
+
+ err = -ETIMEDOUT;
+ while (cnt < API_CMD_TIMEOUT) {
+ val = hifc_hwif_read_reg(hwif, reg_addr);
+
+ if (!HIFC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd_ctrl_init - set the control register of a chain
+ * @chain: the API CMD specific chain to set control register for
+ **/
+static void api_cmd_ctrl_init(struct hifc_api_cmd_chain *chain)
+{
+ struct hifc_hwif *hwif = chain->hwdev->hwif;
+ u32 reg_addr, ctrl;
+ u32 size;
+
+ /* Read Modify Write */
+ reg_addr = HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT);
+
+ ctrl = hifc_hwif_read_reg(hwif, reg_addr);
+
+ ctrl = HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ ctrl |= HIFC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) |
+ HIFC_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE);
+
+ hifc_hwif_write_reg(hwif, reg_addr, ctrl);
+}
+
+/**
+ * api_cmd_set_status_addr - set the status address of a chain in the HW
+ * @chain: the API CMD specific chain to set status address for
+ **/
+static void api_cmd_set_status_addr(struct hifc_api_cmd_chain *chain)
+{
+ struct hifc_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HIFC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->wb_status_paddr);
+ hifc_hwif_write_reg(hwif, addr, val);
+
+ addr = HIFC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->wb_status_paddr);
+ hifc_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_set_num_cells - set the number cells of a chain in the HW
+ * @chain: the API CMD specific chain to set the number of cells for
+ **/
+static void api_cmd_set_num_cells(struct hifc_api_cmd_chain *chain)
+{
+ struct hifc_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HIFC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
+ val = chain->num_cells;
+ hifc_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_head_init - set the head cell of a chain in the HW
+ * @chain: the API CMD specific chain to set the head for
+ **/
+static void api_cmd_head_init(struct hifc_api_cmd_chain *chain)
+{
+ struct hifc_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HIFC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->head_cell_paddr);
+ hifc_hwif_write_reg(hwif, addr, val);
+
+ addr = HIFC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->head_cell_paddr);
+ hifc_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * wait_for_ready_chain - wait for the chain to be ready
+ * @chain: the API CMD specific chain to wait for
+ * Return: 0 - success, negative - failure
+ **/
+static int wait_for_ready_chain(struct hifc_api_cmd_chain *chain)
+{
+ struct hifc_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+ u32 hw_cons_idx;
+ u32 cnt = 0;
+ int err;
+
+ addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ err = -ETIMEDOUT;
+ while (cnt < API_CMD_TIMEOUT) {
+ val = hifc_hwif_read_reg(hwif, addr);
+ hw_cons_idx = HIFC_API_CMD_STATUS_GET(val, CONS_IDX);
+
+ /* wait for HW cons idx to be updated */
+ if (hw_cons_idx == chain->cons_idx) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd_chain_hw_clean - clean the HW
+ * @chain: the API CMD specific chain
+ **/
+static void api_cmd_chain_hw_clean(struct hifc_api_cmd_chain *chain)
+{
+ struct hifc_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, ctrl;
+
+ addr = HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ ctrl = hifc_hwif_read_reg(hwif, addr);
+ ctrl = HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) &
+ HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
+ HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
+ HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ hifc_hwif_write_reg(hwif, addr, ctrl);
+}
+
+/**
+ * api_cmd_chain_hw_init - initialize the chain in the HW
+ * @chain: the API CMD specific chain to initialize in HW
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd_chain_hw_init(struct hifc_api_cmd_chain *chain)
+{
+ api_cmd_chain_hw_clean(chain);
+
+ api_cmd_set_status_addr(chain);
+
+ if (api_cmd_hw_restart(chain)) {
+ sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n");
+ return -EBUSY;
+ }
+
+ api_cmd_ctrl_init(chain);
+ api_cmd_set_num_cells(chain);
+ api_cmd_head_init(chain);
+
+ return wait_for_ready_chain(chain);
+}
+
+/**
+ * alloc_cmd_buf - allocate a dma buffer for API CMD command
+ * @chain: the API CMD specific chain for the cmd
+ * @cell: the cell in the HW for the cmd
+ * @cell_idx: the index of the cell
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_cmd_buf(struct hifc_api_cmd_chain *chain,
+ struct hifc_api_cmd_cell *cell, u32 cell_idx)
+{
+ struct hifc_api_cmd_cell_ctxt *cell_ctxt;
+ void *dev = chain->hwdev->dev_hdl;
+ void *buf_vaddr;
+ u64 buf_paddr;
+ int err = 0;
+
+ buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base +
+ chain->buf_size_align * cell_idx);
+ buf_paddr = chain->buf_paddr_base +
+ chain->buf_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->api_cmd_vaddr = buf_vaddr;
+
+ /* set the cmd DMA address in the cell */
+ switch (chain->chain_type) {
+ case HIFC_API_CMD_POLL_READ:
+ cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr);
+ break;
+ case HIFC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HIFC_API_CMD_POLL_WRITE:
+ case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ /* The data in the HW should be in Big Endian Format */
+ cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr);
+ break;
+ default:
+ sdk_err(dev, "Unknown API CMD Chain type: %d\n",
+ chain->chain_type);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static void alloc_resp_buf(struct hifc_api_cmd_chain *chain,
+ struct hifc_api_cmd_cell *cell, u32 cell_idx)
+{
+ struct hifc_api_cmd_cell_ctxt *cell_ctxt;
+ void *resp_vaddr;
+ u64 resp_paddr;
+
+ resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base +
+ chain->rsp_size_align * cell_idx);
+ resp_paddr = chain->rsp_paddr_base +
+ chain->rsp_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->resp = resp_vaddr;
+ cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr);
+}
+
+static int hifc_alloc_api_cmd_cell_buf(struct hifc_api_cmd_chain *chain,
+ u32 cell_idx,
+ struct hifc_api_cmd_cell *node)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ int err;
+
+ /* For read chain, we should allocate buffer for the response data */
+ if (chain->chain_type == HIFC_API_CMD_MULTI_READ ||
+ chain->chain_type == HIFC_API_CMD_POLL_READ)
+ alloc_resp_buf(chain, node, cell_idx);
+
+ switch (chain->chain_type) {
+ case HIFC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HIFC_API_CMD_POLL_WRITE:
+ case HIFC_API_CMD_POLL_READ:
+ case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ err = alloc_cmd_buf(chain, node, cell_idx);
+ if (err) {
+ sdk_err(dev, "Failed to allocate cmd buffer\n");
+ goto alloc_cmd_buf_err;
+ }
+ break;
+ /* For api command write and api command read, the data section
+ * is directly inserted in the cell, so no need to allocate.
+ */
+ case HIFC_API_CMD_MULTI_READ:
+ chain->cell_ctxt[cell_idx].api_cmd_vaddr =
+ &node->read.hw_cmd_paddr;
+ break;
+ default:
+ sdk_err(dev, "Unsupported API CMD chain type\n");
+ err = -EINVAL;
+ goto alloc_cmd_buf_err;
+ }
+
+ return 0;
+
+alloc_cmd_buf_err:
+
+ return err;
+}
+
+/**
+ * api_cmd_create_cell - create API CMD cell of specific chain
+ * @chain: the API CMD specific chain to create its cell
+ * @cell_idx: the cell index to create
+ * @pre_node: previous cell
+ * @node_vaddr: the virt addr of the cell
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd_create_cell(struct hifc_api_cmd_chain *chain, u32 cell_idx,
+ struct hifc_api_cmd_cell *pre_node,
+ struct hifc_api_cmd_cell **node_vaddr)
+{
+ struct hifc_api_cmd_cell_ctxt *cell_ctxt;
+ struct hifc_api_cmd_cell *node;
+ void *cell_vaddr;
+ u64 cell_paddr;
+ int err;
+
+ cell_vaddr = (void *)((u64)chain->cell_vaddr_base +
+ chain->cell_size_align * cell_idx);
+ cell_paddr = chain->cell_paddr_base +
+ chain->cell_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+ cell_ctxt->cell_vaddr = cell_vaddr;
+ node = cell_ctxt->cell_vaddr;
+
+ if (!pre_node) {
+ chain->head_node = cell_vaddr;
+ chain->head_cell_paddr = cell_paddr;
+ } else {
+ /* The data in the HW should be in Big Endian Format */
+ pre_node->next_cell_paddr = cpu_to_be64(cell_paddr);
+ }
+
+ /* Driver software should make sure that there is an empty API
+ * command cell at the end the chain
+ */
+ node->next_cell_paddr = 0;
+
+ err = hifc_alloc_api_cmd_cell_buf(chain, cell_idx, node);
+ if (err)
+ return err;
+
+ *node_vaddr = node;
+
+ return 0;
+}
+
+/**
+ * api_cmd_create_cells - create API CMD cells for specific chain
+ * @chain: the API CMD specific chain
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd_create_cells(struct hifc_api_cmd_chain *chain)
+{
+ struct hifc_api_cmd_cell *node = NULL, *pre_node = NULL;
+ void *dev = chain->hwdev->dev_hdl;
+ u32 cell_idx;
+ int err;
+
+ for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
+ err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
+ if (err) {
+ sdk_err(dev, "Failed to create API CMD cell\n");
+ return err;
+ }
+
+ pre_node = node;
+ }
+
+ if (!node)
+ return -EFAULT;
+
+ /* set the Final node to point on the start */
+ node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
+
+ /* set the current node to be the head */
+ chain->curr_node = chain->head_node;
+ return 0;
+}
+
+/**
+ * api_chain_init - initialize API CMD specific chain
+ * @chain: the API CMD specific chain to initialize
+ * @attr: attributes to set in the chain
+ * Return: 0 - success, negative - failure
+ **/
+static int api_chain_init(struct hifc_api_cmd_chain *chain,
+ struct hifc_api_cmd_chain_attr *attr)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ size_t cell_ctxt_size;
+ size_t cells_buf_size;
+ int err;
+
+ chain->chain_type = attr->chain_type;
+ chain->num_cells = attr->num_cells;
+ chain->cell_size = attr->cell_size;
+ chain->rsp_size = attr->rsp_size;
+
+ chain->prod_idx = 0;
+ chain->cons_idx = 0;
+
+ if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_lock_init(&chain->async_lock);
+ else
+ sema_init(&chain->sem, 1);
+
+ cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
+ if (!cell_ctxt_size) {
+ sdk_err(dev, "Api chain cell size cannot be zero\n");
+ err = -EINVAL;
+ goto alloc_cell_ctxt_err;
+ }
+
+ chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL);
+ if (!chain->cell_ctxt) {
+ sdk_err(dev, "Failed to allocate cell contexts for a chain\n");
+ err = -ENOMEM;
+ goto alloc_cell_ctxt_err;
+ }
+
+ chain->wb_status = dma_zalloc_coherent(dev,
+ sizeof(*chain->wb_status),
+ &chain->wb_status_paddr,
+ GFP_KERNEL);
+ if (!chain->wb_status) {
+ sdk_err(dev, "Failed to allocate DMA wb status\n");
+ err = -ENOMEM;
+ goto alloc_wb_status_err;
+ }
+
+ chain->cell_size_align = ALIGN((u64)chain->cell_size,
+ API_CMD_NODE_ALIGN_SIZE);
+ chain->rsp_size_align = ALIGN((u64)chain->rsp_size,
+ API_CHAIN_RESP_ALIGNMENT);
+ chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE);
+
+ cells_buf_size = (chain->cell_size_align + chain->rsp_size_align +
+ chain->buf_size_align) * chain->num_cells;
+
+ err = hifc_dma_zalloc_coherent_align(dev, cells_buf_size,
+ API_CMD_NODE_ALIGN_SIZE,
+ GFP_KERNEL,
+ &chain->cells_addr);
+ if (err) {
+ sdk_err(dev, "Failed to allocate API CMD cells buffer\n");
+ goto alloc_cells_buf_err;
+ }
+
+ chain->cell_vaddr_base = chain->cells_addr.align_vaddr;
+ chain->cell_paddr_base = chain->cells_addr.align_paddr;
+
+ chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base +
+ chain->cell_size_align * chain->num_cells);
+ chain->rsp_paddr_base = chain->cell_paddr_base +
+ chain->cell_size_align * chain->num_cells;
+
+ chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base +
+ chain->rsp_size_align * chain->num_cells);
+ chain->buf_paddr_base = chain->rsp_paddr_base +
+ chain->rsp_size_align * chain->num_cells;
+
+ return 0;
+
+alloc_cells_buf_err:
+ dma_free_coherent(dev, sizeof(*chain->wb_status),
+ chain->wb_status, chain->wb_status_paddr);
+
+alloc_wb_status_err:
+ kfree(chain->cell_ctxt);
+
+alloc_cell_ctxt_err:
+ return err;
+}
+
+/**
+ * api_chain_free - free API CMD specific chain
+ * @chain: the API CMD specific chain to free
+ **/
+static void api_chain_free(struct hifc_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+
+ hifc_dma_free_coherent_align(dev, &chain->cells_addr);
+
+ dma_free_coherent(dev, sizeof(*chain->wb_status),
+ chain->wb_status, chain->wb_status_paddr);
+ kfree(chain->cell_ctxt);
+}
+
+/**
+ * api_cmd_create_chain - create API CMD specific chain
+ * @chain: the API CMD specific chain to create
+ * @attr: attributes to set in the chain
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd_create_chain(struct hifc_api_cmd_chain **cmd_chain,
+ struct hifc_api_cmd_chain_attr *attr)
+{
+ struct hifc_hwdev *hwdev = attr->hwdev;
+ struct hifc_api_cmd_chain *chain;
+ int err;
+
+ if (attr->num_cells & (attr->num_cells - 1)) {
+ sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n");
+ return -EINVAL;
+ }
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (!chain)
+ return -ENOMEM;
+
+ chain->hwdev = hwdev;
+
+ err = api_chain_init(chain, attr);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n");
+ goto chain_init_err;
+ }
+
+ err = api_cmd_create_cells(chain);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n");
+ goto create_cells_err;
+ }
+
+ err = api_cmd_chain_hw_init(chain);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n");
+ goto chain_hw_init_err;
+ }
+
+ *cmd_chain = chain;
+ return 0;
+
+chain_hw_init_err:
+create_cells_err:
+ api_chain_free(chain);
+
+chain_init_err:
+ kfree(chain);
+ return err;
+}
+
+/**
+ * api_cmd_destroy_chain - destroy API CMD specific chain
+ * @chain: the API CMD specific chain to destroy
+ **/
+static void api_cmd_destroy_chain(struct hifc_api_cmd_chain *chain)
+{
+ api_chain_free(chain);
+ kfree(chain);
+}
+
+/**
+ * hifc_api_cmd_init - Initialize all the API CMD chains
+ * @hwdev: the pointer to hw device
+ * @chain: the API CMD chains that will be initialized
+ * Return: 0 - success, negative - failure
+ **/
+int hifc_api_cmd_init(struct hifc_hwdev *hwdev,
+ struct hifc_api_cmd_chain **chain)
+{
+ void *dev = hwdev->dev_hdl;
+ struct hifc_api_cmd_chain_attr attr;
+ enum hifc_api_cmd_chain_type chain_type, i;
+ int err;
+
+ attr.hwdev = hwdev;
+ attr.num_cells = API_CHAIN_NUM_CELLS;
+ attr.cell_size = API_CHAIN_CELL_SIZE;
+ attr.rsp_size = API_CHAIN_RSP_DATA_SIZE;
+
+ chain_type = HIFC_API_CMD_WRITE_TO_MGMT_CPU;
+ for (; chain_type < HIFC_API_CMD_MAX; chain_type++) {
+ attr.chain_type = chain_type;
+
+ err = api_cmd_create_chain(&chain[chain_type], &attr);
+ if (err) {
+ sdk_err(dev, "Failed to create chain %d\n", chain_type);
+ goto create_chain_err;
+ }
+ }
+
+ return 0;
+
+create_chain_err:
+ i = HIFC_API_CMD_WRITE_TO_MGMT_CPU;
+ for (; i < chain_type; i++)
+ api_cmd_destroy_chain(chain[i]);
+
+ return err;
+}
+
+/**
+ * hifc_api_cmd_free - free the API CMD chains
+ * @chain: the API CMD chains that will be freed
+ **/
+void hifc_api_cmd_free(struct hifc_api_cmd_chain **chain)
+{
+ enum hifc_api_cmd_chain_type chain_type;
+
+ chain_type = HIFC_API_CMD_WRITE_TO_MGMT_CPU;
+
+ for (; chain_type < HIFC_API_CMD_MAX; chain_type++)
+ api_cmd_destroy_chain(chain[chain_type]);
+}
+
diff --git a/drivers/scsi/huawei/hifc/hifc_api_cmd.h b/drivers/scsi/huawei/hifc/hifc_api_cmd.h
new file mode 100644
index 000000000000..bd14db34a119
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_api_cmd.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef HIFC_API_CMD_H_
+#define HIFC_API_CMD_H_
+
+#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0
+#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16
+#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24
+#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
+
+#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU
+#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU
+#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU
+#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU
+
+#define HIFC_API_CMD_CELL_CTRL_SET(val, member) \
+ ((((u64)val) & HIFC_API_CMD_CELL_CTRL_##member##_MASK) << \
+ HIFC_API_CMD_CELL_CTRL_##member##_SHIFT)
+
+#define HIFC_API_CMD_DESC_API_TYPE_SHIFT 0
+#define HIFC_API_CMD_DESC_RD_WR_SHIFT 1
+#define HIFC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
+#define HIFC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3
+#define HIFC_API_CMD_DESC_PRIV_DATA_SHIFT 8
+#define HIFC_API_CMD_DESC_DEST_SHIFT 32
+#define HIFC_API_CMD_DESC_SIZE_SHIFT 40
+#define HIFC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
+
+#define HIFC_API_CMD_DESC_API_TYPE_MASK 0x1U
+#define HIFC_API_CMD_DESC_RD_WR_MASK 0x1U
+#define HIFC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U
+#define HIFC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U
+#define HIFC_API_CMD_DESC_DEST_MASK 0x1FU
+#define HIFC_API_CMD_DESC_SIZE_MASK 0x7FFU
+#define HIFC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU
+#define HIFC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU
+
+#define HIFC_API_CMD_DESC_SET(val, member) \
+ ((((u64)val) & HIFC_API_CMD_DESC_##member##_MASK) << \
+ HIFC_API_CMD_DESC_##member##_SHIFT)
+#define HIFC_API_CMD_STATUS_HEADER_VALID_SHIFT 0
+#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
+
+#define HIFC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU
+#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU
+#define HIFC_API_CMD_STATUS_HEADER_GET(val, member) \
+ (((val) >> HIFC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
+ HIFC_API_CMD_STATUS_HEADER_##member##_MASK)
+#define HIFC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
+#define HIFC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U
+#define HIFC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U
+#define HIFC_API_CMD_CHAIN_REQ_SET(val, member) \
+ (((val) & HIFC_API_CMD_CHAIN_REQ_##member##_MASK) << \
+ HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT)
+
+#define HIFC_API_CMD_CHAIN_REQ_GET(val, member) \
+ (((val) >> HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
+ HIFC_API_CMD_CHAIN_REQ_##member##_MASK)
+
+#define HIFC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
+ ((val) & (~(HIFC_API_CMD_CHAIN_REQ_##member##_MASK \
+ << HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
+
+#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1
+#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
+#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
+#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
+#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
+#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
+
+#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U
+#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U
+#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U
+#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U
+#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U
+#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U
+
+#define HIFC_API_CMD_CHAIN_CTRL_SET(val, member) \
+ (((val) & HIFC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
+ HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
+
+#define HIFC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
+ ((val) & (~(HIFC_API_CMD_CHAIN_CTRL_##member##_MASK \
+ << HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
+
+#define HIFC_API_CMD_RESP_HEAD_VALID_MASK 0xFF
+#define HIFC_API_CMD_RESP_HEAD_VALID_CODE 0xFF
+
+#define HIFC_API_CMD_RESP_HEADER_VALID(val) \
+ (((val) & HIFC_API_CMD_RESP_HEAD_VALID_MASK) == \
+ HIFC_API_CMD_RESP_HEAD_VALID_CODE)
+#define HIFC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU
+#define HIFC_API_CMD_STATUS_CONS_IDX_SHIFT 0
+#define HIFC_API_CMD_STATUS_FSM_MASK 0xFU
+#define HIFC_API_CMD_STATUS_FSM_SHIFT 24
+#define HIFC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U
+#define HIFC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
+#define HIFC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
+#define HIFC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
+
+#define HIFC_API_CMD_STATUS_GET(val, member) \
+ (((val) >> HIFC_API_CMD_STATUS_##member##_SHIFT) & \
+ HIFC_API_CMD_STATUS_##member##_MASK)
+
+/* API CMD registers */
+#define HIFC_CSR_API_CMD_BASE 0xF000
+
+#define HIFC_CSR_API_CMD_STRIDE 0x100
+
+#define HIFC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0x0 + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+#define HIFC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0x4 + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+#define HIFC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0x8 + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+#define HIFC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0xC + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+#define HIFC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0x10 + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+#define HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0x14 + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+#define HIFC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0x1C + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+#define HIFC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0x20 + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+#define HIFC_CSR_API_CMD_STATUS_0_ADDR(idx) \
+ (HIFC_CSR_API_CMD_BASE + 0x30 + (idx) * HIFC_CSR_API_CMD_STRIDE)
+
+enum hifc_api_cmd_chain_type {
+ /* write command with completion notification */
+ HIFC_API_CMD_WRITE = 0,
+ /* read command with completion notification */
+ HIFC_API_CMD_READ = 1,
+ /* write to mgmt cpu command with completion */
+ HIFC_API_CMD_WRITE_TO_MGMT_CPU = 2,
+ /* multi read command with completion notification - not used */
+ HIFC_API_CMD_MULTI_READ = 3,
+ /* write command without completion notification */
+ HIFC_API_CMD_POLL_WRITE = 4,
+ /* read command without completion notification */
+ HIFC_API_CMD_POLL_READ = 5,
+ /* read from mgmt cpu command with completion */
+ HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6,
+ HIFC_API_CMD_MAX,
+};
+
+struct hifc_api_cmd_status {
+ u64 header;
+ u32 buf_desc;
+ u32 cell_addr_hi;
+ u32 cell_addr_lo;
+ u32 rsvd0;
+ u64 rsvd1;
+};
+
+/* HW struct */
+struct hifc_api_cmd_cell {
+ u64 ctrl;
+
+ /* address is 64 bit in HW struct */
+ u64 next_cell_paddr;
+
+ u64 desc;
+
+ /* HW struct */
+ union {
+ struct {
+ u64 hw_cmd_paddr;
+ } write;
+
+ struct {
+ u64 hw_wb_resp_paddr;
+ u64 hw_cmd_paddr;
+ } read;
+ };
+};
+
+struct hifc_api_cmd_resp_fmt {
+ u64 header;
+ u64 rsvd[3];
+ u64 resp_data;
+};
+
+struct hifc_api_cmd_cell_ctxt {
+ struct hifc_api_cmd_cell *cell_vaddr;
+
+ void *api_cmd_vaddr;
+
+ struct hifc_api_cmd_resp_fmt *resp;
+
+ struct completion done;
+ int status;
+
+ u32 saved_prod_idx;
+};
+
+struct hifc_api_cmd_chain_attr {
+ struct hifc_hwdev *hwdev;
+ enum hifc_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 rsp_size;
+ u16 cell_size;
+};
+
+struct hifc_api_cmd_chain {
+ struct hifc_hwdev *hwdev;
+ enum hifc_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 cell_size;
+ u16 rsp_size;
+
+ /* HW members is 24 bit format */
+ u32 prod_idx;
+ u32 cons_idx;
+
+ struct semaphore sem;
+ /* Async cmd can not be scheduling */
+ spinlock_t async_lock;
+
+ dma_addr_t wb_status_paddr;
+ struct hifc_api_cmd_status *wb_status;
+
+ dma_addr_t head_cell_paddr;
+ struct hifc_api_cmd_cell *head_node;
+
+ struct hifc_api_cmd_cell_ctxt *cell_ctxt;
+ struct hifc_api_cmd_cell *curr_node;
+
+ struct hifc_dma_addr_align cells_addr;
+
+ u8 *cell_vaddr_base;
+ u64 cell_paddr_base;
+ u8 *rsp_vaddr_base;
+ u64 rsp_paddr_base;
+ u8 *buf_vaddr_base;
+ u64 buf_paddr_base;
+ u64 cell_size_align;
+ u64 rsp_size_align;
+ u64 buf_size_align;
+};
+
+int hifc_api_cmd_write(struct hifc_api_cmd_chain *chain,
+ enum hifc_node_id dest, void *cmd, u16 size);
+
+int hifc_api_cmd_read(struct hifc_api_cmd_chain *chain,
+ enum hifc_node_id dest, void *cmd, u16 size,
+ void *ack, u16 ack_size);
+
+int hifc_api_cmd_init(struct hifc_hwdev *hwdev,
+ struct hifc_api_cmd_chain **chain);
+
+void hifc_api_cmd_free(struct hifc_api_cmd_chain **chain);
+
+#endif
diff --git a/drivers/scsi/huawei/hifc/hifc_cfg.c b/drivers/scsi/huawei/hifc/hifc_cfg.c
new file mode 100644
index 000000000000..5ebe5d754c41
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_cfg.c
@@ -0,0 +1,823 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/vmalloc.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_hwif.h"
+#include "hifc_cqm_main.h"
+#include "hifc_api_cmd.h"
+#include "hifc_hw.h"
+#include "hifc_mgmt.h"
+#include "hifc_cfg.h"
+
+uint intr_mode;
+
+int hifc_sync_time(void *hwdev, u64 time)
+{
+ struct hifc_sync_time_info time_info = {0};
+ u16 out_size = sizeof(time_info);
+ int err;
+
+ time_info.mstime = time;
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_SYNC_TIME, &time_info,
+ sizeof(time_info), &time_info, &out_size,
+ 0);
+ if (err || time_info.status || !out_size) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, time_info.status, out_size);
+ }
+
+ return err;
+}
+
+static void parse_pub_res_cap(struct service_cap *cap,
+ struct hifc_dev_cap *dev_cap,
+ enum func_type type)
+{
+ cap->port_id = dev_cap->port_id;
+ cap->force_up = dev_cap->force_up;
+
+ pr_info("Get public resource capbility, force_up: 0x%x\n",
+ cap->force_up);
+ /* FC need max queue number, but max queue number info is in
+ * l2nic cap, we also put max queue num info in public cap, so
+ * FC can get correct max queue number info.
+ */
+ cap->max_sqs = dev_cap->nic_max_sq + 1;
+ cap->max_rqs = dev_cap->nic_max_rq + 1;
+
+ cap->host_total_function = dev_cap->host_total_func;
+ cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val;
+ cap->max_connect_num = dev_cap->max_conn_num;
+ cap->max_stick2cache_num = dev_cap->max_stick2cache_num;
+
+ pr_info("Get public resource capbility, svc_cap_en: 0x%x\n",
+ dev_cap->svc_cap_en);
+ pr_info("port_id=0x%x\n", cap->port_id);
+ pr_info("Host_total_function=0x%x, host_oq_id_mask_val=0x%x\n",
+ cap->host_total_function, cap->host_oq_id_mask_val);
+}
+
+static void parse_fc_res_cap(struct service_cap *cap,
+ struct hifc_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap;
+
+ fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx;
+ fc_cap->scq_num = dev_cap->fc_max_scq;
+ fc_cap->srq_num = dev_cap->fc_max_srq;
+ fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx;
+ fc_cap->vp_id_start = dev_cap->fc_vp_id_start;
+ fc_cap->vp_id_end = dev_cap->fc_vp_id_end;
+
+ pr_info("Get fc resource capbility\n");
+ pr_info("Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x, max_child_qpc_num=0x%x\n",
+ fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num,
+ fc_cap->max_child_qpc_num);
+ pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n",
+ fc_cap->vp_id_start, fc_cap->vp_id_end);
+}
+
+static void parse_dev_cap(struct hifc_hwdev *dev,
+ struct hifc_dev_cap *dev_cap, enum func_type type)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+
+ /* Public resource */
+ parse_pub_res_cap(cap, dev_cap, type);
+
+ /* PPF managed dynamic resource */
+
+ parse_fc_res_cap(cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hifc_hwdev *dev, enum func_type type)
+{
+ struct hifc_dev_cap dev_cap = {0};
+ u16 out_len = sizeof(dev_cap);
+ int err;
+
+ dev_cap.version = HIFC_CMD_VER_FUNC_ID;
+ err = hifc_global_func_id_get(dev, &dev_cap.func_id);
+ if (err)
+ return err;
+
+ sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n",
+ dev_cap.func_id);
+
+ err = hifc_msg_to_mgmt_sync(dev, HIFC_MOD_CFGM, HIFC_CFG_NIC_CAP,
+ &dev_cap, sizeof(dev_cap),
+ &dev_cap, &out_len, 0);
+ if (err || dev_cap.status || !out_len) {
+ sdk_err(dev->dev_hdl,
+ "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, dev_cap.status, out_len);
+ return -EFAULT;
+ }
+
+ parse_dev_cap(dev, &dev_cap, type);
+ return 0;
+}
+
+static void fc_param_fix(struct hifc_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct fc_service_cap *fc_cap = &cap->fc_cap;
+
+ fc_cap->parent_qpc_size = FC_PCTX_SZ;
+ fc_cap->child_qpc_size = FC_CCTX_SZ;
+ fc_cap->sqe_size = FC_SQE_SZ;
+
+ fc_cap->scqc_size = FC_SCQC_SZ;
+ fc_cap->scqe_size = FC_SCQE_SZ;
+
+ fc_cap->srqc_size = FC_SRQC_SZ;
+ fc_cap->srqe_size = FC_SRQE_SZ;
+}
+
+static void cfg_get_eq_num(struct hifc_hwdev *dev)
+{
+ struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info;
+
+ eq_info->num_ceq = dev->hwif->attr.num_ceqs;
+ eq_info->num_ceq_remain = eq_info->num_ceq;
+}
+
+static int cfg_init_eq(struct hifc_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ struct cfg_eq *eq;
+ u8 num_ceq, i = 0;
+
+ cfg_get_eq_num(dev);
+ num_ceq = cfg_mgmt->eq_info.num_ceq;
+
+ sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n",
+ cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain);
+
+ if (!num_ceq) {
+ sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+ eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL);
+ if (!eq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_ceq; ++i) {
+ eq[i].eqn = i;
+ eq[i].free = CFG_FREE;
+ eq[i].type = SERVICE_T_MAX;
+ }
+
+ cfg_mgmt->eq_info.eq = eq;
+
+ mutex_init(&cfg_mgmt->eq_info.eq_mutex);
+
+ return 0;
+}
+
+static int cfg_init_interrupt(struct hifc_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info;
+ u16 intr_num = dev->hwif->attr.num_irqs;
+
+ if (!intr_num) {
+ sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+ irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info),
+ GFP_KERNEL);
+ if (!irq_info->alloc_info)
+ return -ENOMEM;
+
+ irq_info->num_irq_hw = intr_num;
+
+ cfg_mgmt->svc_cap.interrupt_type = intr_mode;
+
+ mutex_init(&irq_info->irq_mutex);
+
+ return 0;
+}
+
+static int cfg_enable_interrupt(struct hifc_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw;
+
+ void *pcidev = dev->pcidev_hdl;
+ struct irq_alloc_info_st *irq_info;
+ struct msix_entry *entry;
+ u16 i = 0;
+ int actual_irq;
+
+ irq_info = cfg_mgmt->irq_param_info.alloc_info;
+
+ sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d.\n",
+ cfg_mgmt->svc_cap.interrupt_type, nreq);
+
+ switch (cfg_mgmt->svc_cap.interrupt_type) {
+ case INTR_TYPE_MSIX:
+
+ if (!nreq) {
+ sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n");
+ return -EINVAL;
+ }
+ entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ for (i = 0; i < nreq; i++)
+ entry[i].entry = i;
+
+ actual_irq = pci_enable_msix_range(pcidev, entry,
+ VECTOR_THRESHOLD, nreq);
+ if (actual_irq < 0) {
+ sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed.\n");
+ kfree(entry);
+ return -ENOMEM;
+ }
+
+ nreq = (u16)actual_irq;
+ cfg_mgmt->irq_param_info.num_total = nreq;
+ cfg_mgmt->irq_param_info.num_irq_remain = nreq;
+ sdk_info(dev->dev_hdl, "Request %d msix vector success.\n",
+ nreq);
+
+ for (i = 0; i < nreq; ++i) {
+ /* u16 driver uses to specify entry, OS writes */
+ irq_info[i].info.msix_entry_idx = entry[i].entry;
+ /* u32 kernel uses to write allocated vector */
+ irq_info[i].info.irq_id = entry[i].vector;
+ irq_info[i].type = SERVICE_T_MAX;
+ irq_info[i].free = CFG_FREE;
+ }
+
+ kfree(entry);
+
+ break;
+
+ default:
+ sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n",
+ cfg_mgmt->svc_cap.interrupt_type);
+ break;
+ }
+
+ return 0;
+}
+
+int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 num,
+ struct irq_info *irq_info_array, u16 *act_num)
+{
+ struct hifc_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_irq_info *irq_info;
+ struct irq_alloc_info_st *alloc_info;
+ int max_num_irq;
+ u16 free_num_irq;
+ int i, j;
+
+ if (!hwdev || !irq_info_array || !act_num)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ irq_info = &cfg_mgmt->irq_param_info;
+ alloc_info = irq_info->alloc_info;
+ max_num_irq = irq_info->num_total;
+ free_num_irq = irq_info->num_irq_remain;
+
+ mutex_lock(&irq_info->irq_mutex);
+
+ if (num > free_num_irq) {
+ if (free_num_irq == 0) {
+ sdk_err(dev->dev_hdl,
+ "no free irq resource in cfg mgmt.\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return -ENOMEM;
+ }
+
+ sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt.\n",
+ free_num_irq);
+ num = free_num_irq;
+ }
+
+ *act_num = 0;
+
+ for (i = 0; i < num; i++) {
+ for (j = 0; j < max_num_irq; j++) {
+ if (alloc_info[j].free == CFG_FREE) {
+ if (irq_info->num_irq_remain == 0) {
+ sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return -EINVAL;
+ }
+ alloc_info[j].type = type;
+ alloc_info[j].free = CFG_BUSY;
+
+ irq_info_array[i].msix_entry_idx =
+ alloc_info[j].info.msix_entry_idx;
+ irq_info_array[i].irq_id =
+ alloc_info[j].info.irq_id;
+ (*act_num)++;
+ irq_info->num_irq_remain--;
+
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&irq_info->irq_mutex);
+ return 0;
+}
+
+void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id)
+{
+ struct hifc_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_irq_info *irq_info;
+ struct irq_alloc_info_st *alloc_info;
+ int max_num_irq;
+ int i;
+
+ if (!hwdev)
+ return;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ irq_info = &cfg_mgmt->irq_param_info;
+ alloc_info = irq_info->alloc_info;
+ max_num_irq = irq_info->num_total;
+
+ mutex_lock(&irq_info->irq_mutex);
+
+ for (i = 0; i < max_num_irq; i++) {
+ if (irq_id == alloc_info[i].info.irq_id &&
+ type == alloc_info[i].type) {
+ if (alloc_info[i].free == CFG_BUSY) {
+ alloc_info[i].free = CFG_FREE;
+ irq_info->num_irq_remain++;
+ if (irq_info->num_irq_remain > max_num_irq) {
+ sdk_err(dev->dev_hdl, "Find target,but over range\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return;
+ }
+ break;
+ }
+ }
+ }
+
+ if (i >= max_num_irq)
+ sdk_warn(dev->dev_hdl, "Irq %d don`t need to free\n", irq_id);
+
+ mutex_unlock(&irq_info->irq_mutex);
+}
+
+int init_cfg_mgmt(struct hifc_hwdev *dev)
+{
+ int err;
+ struct cfg_mgmt_info *cfg_mgmt;
+
+ cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+ if (!cfg_mgmt)
+ return -ENOMEM;
+
+ dev->cfg_mgmt = cfg_mgmt;
+ cfg_mgmt->hwdev = dev;
+
+ err = cfg_init_eq(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n",
+ err);
+ goto free_mgmt_mem;
+ }
+
+ err = cfg_init_interrupt(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n",
+ err);
+ goto free_eq_mem;
+ }
+
+ err = cfg_enable_interrupt(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n",
+ err);
+ goto free_interrupt_mem;
+ }
+
+ return 0;
+
+free_interrupt_mem:
+ kfree(cfg_mgmt->irq_param_info.alloc_info);
+
+ cfg_mgmt->irq_param_info.alloc_info = NULL;
+
+free_eq_mem:
+ kfree(cfg_mgmt->eq_info.eq);
+
+ cfg_mgmt->eq_info.eq = NULL;
+
+free_mgmt_mem:
+ kfree(cfg_mgmt);
+ return err;
+}
+
+void free_cfg_mgmt(struct hifc_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+
+ /* if the allocated resource were recycled */
+ if (cfg_mgmt->irq_param_info.num_irq_remain !=
+ cfg_mgmt->irq_param_info.num_total ||
+ cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq)
+ sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n");
+
+ switch (cfg_mgmt->svc_cap.interrupt_type) {
+ case INTR_TYPE_MSIX:
+ pci_disable_msix(dev->pcidev_hdl);
+ break;
+
+ case INTR_TYPE_MSI:
+ pci_disable_msi(dev->pcidev_hdl);
+ break;
+
+ case INTR_TYPE_INT:
+ default:
+ break;
+ }
+
+ kfree(cfg_mgmt->irq_param_info.alloc_info);
+ cfg_mgmt->irq_param_info.alloc_info = NULL;
+
+ kfree(cfg_mgmt->eq_info.eq);
+ cfg_mgmt->eq_info.eq = NULL;
+
+ kfree(cfg_mgmt);
+}
+
+int init_capability(struct hifc_hwdev *dev)
+{
+ int err;
+ enum func_type type = HIFC_FUNC_TYPE(dev);
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+
+ cfg_mgmt->svc_cap.timer_en = 1;
+ cfg_mgmt->svc_cap.test_xid_alloc_mode = 1;
+ cfg_mgmt->svc_cap.test_gpa_check_enable = 1;
+
+ err = get_cap_from_fw(dev, type);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n");
+ return err;
+ }
+
+ fc_param_fix(dev);
+
+ if (dev->cfg_mgmt->svc_cap.force_up)
+ dev->feature_cap |= HIFC_FUNC_FORCE_LINK_UP;
+
+ sdk_info(dev->dev_hdl, "Init capability success\n");
+ return 0;
+}
+
+void free_capability(struct hifc_hwdev *dev)
+{
+ sdk_info(dev->dev_hdl, "Free capability success");
+}
+
+bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap));
+
+ return true;
+}
+
+u8 hifc_host_oq_id_mask(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting host oq id mask\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val;
+}
+
+u16 hifc_func_max_qnum(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting function max queue number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.max_sqs;
+}
+
+/* Caller should ensure atomicity when calling this function */
+int hifc_stateful_init(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (dev->statufull_ref_cnt++)
+ return 0;
+
+ err = cqm_init(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to init cqm, err: %d\n", err);
+ goto init_cqm_err;
+ }
+
+ sdk_info(dev->dev_hdl, "Initialize statefull resource success\n");
+
+ return 0;
+
+init_cqm_err:
+
+ dev->statufull_ref_cnt--;
+
+ return err;
+}
+
+/* Caller should ensure atomicity when calling this function */
+void hifc_stateful_deinit(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ if (!dev || !dev->statufull_ref_cnt)
+ return;
+
+ if (--dev->statufull_ref_cnt)
+ return;
+
+ cqm_uninit(hwdev);
+
+ sdk_info(dev->dev_hdl, "Clear statefull resource success\n");
+}
+
+bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ if (!hwdev || state >= HIFC_HWDEV_MAX_INVAL_INITED)
+ return false;
+
+ return !!test_bit(state, &dev->func_state);
+}
+
+static int hifc_os_dep_init(struct hifc_hwdev *hwdev)
+{
+ hwdev->workq = create_singlethread_workqueue(HIFC_HW_WQ_NAME);
+ if (!hwdev->workq) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n");
+ return -EFAULT;
+ }
+
+ sema_init(&hwdev->fault_list_sem, 1);
+
+ return 0;
+}
+
+static void hifc_os_dep_deinit(struct hifc_hwdev *hwdev)
+{
+ destroy_workqueue(hwdev->workq);
+}
+
+static int __hilink_phy_init(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ err = hifc_phy_init_status_judge(hwdev);
+ if (err) {
+ sdk_info(hwdev->dev_hdl, "Phy init failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int init_hwdev_and_hwif(struct hifc_init_para *para)
+{
+ struct hifc_hwdev *hwdev;
+ int err;
+
+ if (!(*para->hwdev)) {
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ *para->hwdev = hwdev;
+ hwdev->adapter_hdl = para->adapter_hdl;
+ hwdev->pcidev_hdl = para->pcidev_hdl;
+ hwdev->dev_hdl = para->dev_hdl;
+ hwdev->chip_node = para->chip_node;
+
+ hwdev->chip_fault_stats = vzalloc(HIFC_CHIP_FAULT_SIZE);
+ if (!hwdev->chip_fault_stats)
+ goto alloc_chip_fault_stats_err;
+
+ err = hifc_init_hwif(hwdev, para->cfg_reg_base,
+ para->intr_reg_base,
+ para->db_base_phy, para->db_base,
+ para->dwqe_mapping);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init hwif\n");
+ goto init_hwif_err;
+ }
+ }
+
+ return 0;
+
+init_hwif_err:
+ vfree(hwdev->chip_fault_stats);
+
+alloc_chip_fault_stats_err:
+
+ *para->hwdev = NULL;
+
+ return -EFAULT;
+}
+
+static void deinit_hwdev_and_hwif(struct hifc_hwdev *hwdev)
+{
+ hifc_free_hwif(hwdev);
+
+ vfree(hwdev->chip_fault_stats);
+
+ kfree(hwdev);
+}
+
+static int init_hw_cfg(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ err = init_capability(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init capability\n");
+ return err;
+ }
+
+ err = __hilink_phy_init(hwdev);
+ if (err)
+ goto hilink_phy_init_err;
+
+ return 0;
+
+hilink_phy_init_err:
+ free_capability(hwdev);
+
+ return err;
+}
+
+/* Return:
+ * 0: all success
+ * >0: partitial success
+ * <0: all failed
+ */
+int hifc_init_hwdev(struct hifc_init_para *para)
+{
+ struct hifc_hwdev *hwdev;
+ int err;
+
+ err = init_hwdev_and_hwif(para);
+ if (err)
+ return err;
+
+ hwdev = *para->hwdev;
+
+ /* detect slave host according to BAR reg */
+ hwdev->feature_cap = HIFC_FUNC_MGMT | HIFC_FUNC_PORT |
+ HIFC_FUNC_SUPP_RATE_LIMIT | HIFC_FUNC_SUPP_DFX_REG |
+ HIFC_FUNC_SUPP_RX_MODE | HIFC_FUNC_SUPP_SET_VF_MAC_VLAN |
+ HIFC_FUNC_SUPP_CHANGE_MAC;
+
+ err = hifc_os_dep_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n");
+ goto os_dep_init_err;
+ }
+
+ hifc_set_chip_present(hwdev);
+ hifc_init_heartbeat(hwdev);
+
+ err = init_cfg_mgmt(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n");
+ goto init_cfg_mgmt_err;
+ }
+
+ err = hifc_init_comm_ch(hwdev);
+ if (err) {
+ if (!(hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK)) {
+ sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n");
+ goto init_comm_ch_err;
+ } else {
+ sdk_err(hwdev->dev_hdl, "Init communication channel partitail failed\n");
+ return hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK;
+ }
+ }
+
+ err = init_hw_cfg(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init hardware config\n");
+ goto init_hw_cfg_err;
+ }
+
+ set_bit(HIFC_HWDEV_ALL_INITED, &hwdev->func_state);
+
+ sdk_info(hwdev->dev_hdl, "Init hwdev success\n");
+
+ return 0;
+
+init_hw_cfg_err:
+ return (hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK);
+
+init_comm_ch_err:
+ free_cfg_mgmt(hwdev);
+
+init_cfg_mgmt_err:
+ hifc_destroy_heartbeat(hwdev);
+ hifc_os_dep_deinit(hwdev);
+
+os_dep_init_err:
+ deinit_hwdev_and_hwif(hwdev);
+ *para->hwdev = NULL;
+
+ return -EFAULT;
+}
+
+void hifc_free_hwdev(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+ enum hifc_hwdev_init_state state = HIFC_HWDEV_ALL_INITED;
+ int flag = 0;
+
+ if (!hwdev)
+ return;
+
+ if (test_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state)) {
+ clear_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state);
+
+ /* BM slave function not need to exec rx_tx_flush */
+
+ hifc_func_rx_tx_flush(hwdev);
+
+ free_capability(dev);
+ }
+ while (state > HIFC_HWDEV_NONE_INITED) {
+ if (test_bit(state, &dev->func_state)) {
+ flag = 1;
+ break;
+ }
+ state--;
+ }
+ if (flag) {
+ hifc_uninit_comm_ch(dev);
+ free_cfg_mgmt(dev);
+ hifc_destroy_heartbeat(dev);
+ hifc_os_dep_deinit(dev);
+ }
+ clear_bit(HIFC_HWDEV_NONE_INITED, &dev->func_state);
+
+ deinit_hwdev_and_hwif(dev);
+}
+
+u64 hifc_get_func_feature_cap(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting function feature capability\n");
+ return 0;
+ }
+
+ return dev->feature_cap;
+}
+
diff --git a/drivers/scsi/huawei/hifc/hifc_cfg.h b/drivers/scsi/huawei/hifc/hifc_cfg.h
new file mode 100644
index 000000000000..b8a9dd35b1fd
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_cfg.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef __CFG_MGT_H__
+#define __CFG_MGT_H__
+
+enum {
+ CFG_FREE = 0,
+ CFG_BUSY = 1
+};
+
+/* FC */
+#define FC_PCTX_SZ 256
+#define FC_CCTX_SZ 256
+#define FC_SQE_SZ 128
+#define FC_SCQC_SZ 64
+#define FC_SCQE_SZ 64
+#define FC_SRQC_SZ 64
+#define FC_SRQE_SZ 32
+
+/* device capability */
+struct service_cap {
+ /* Host global resources */
+ u16 host_total_function;
+ u8 host_oq_id_mask_val;
+
+ /* DO NOT get interrupt_type from firmware */
+ enum intr_type interrupt_type;
+ u8 intr_chip_en;
+
+ u8 port_id; /* PF/VF's physical port */
+ u8 force_up;
+
+ u8 timer_en; /* 0:disable, 1:enable */
+
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* For test */
+ bool test_xid_alloc_mode;
+ bool test_gpa_check_enable;
+
+ u32 max_connect_num; /* PF/VF maximum connection number(1M) */
+ /* The maximum connections which can be stick to cache memory, max 1K */
+ u16 max_stick2cache_num;
+
+ struct nic_service_cap nic_cap; /* NIC capability */
+ struct fc_service_cap fc_cap; /* FC capability */
+};
+
+struct hifc_sync_time_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u64 mstime;
+};
+
+struct cfg_eq {
+ enum hifc_service_type type;
+ int eqn;
+ int free; /* 1 - alocated, 0- freed */
+};
+
+struct cfg_eq_info {
+ struct cfg_eq *eq;
+ u8 num_ceq;
+ u8 num_ceq_remain;
+ /* mutex used for allocate EQs */
+ struct mutex eq_mutex;
+};
+
+struct irq_alloc_info_st {
+ enum hifc_service_type type;
+ int free; /* 1 - alocated, 0- freed */
+ struct irq_info info;
+};
+
+struct cfg_irq_info {
+ struct irq_alloc_info_st *alloc_info;
+ u16 num_total;
+ u16 num_irq_remain;
+ u16 num_irq_hw; /* device max irq number */
+
+ /* mutex used for allocate EQs */
+ struct mutex irq_mutex;
+};
+
+#define VECTOR_THRESHOLD 2
+
+struct cfg_mgmt_info {
+ struct hifc_hwdev *hwdev;
+ struct service_cap svc_cap;
+ struct cfg_eq_info eq_info; /* EQ */
+ struct cfg_irq_info irq_param_info; /* IRQ */
+ u32 func_seq_num; /* temporary */
+};
+
+enum cfg_sub_cmd {
+ /* PPF(PF) <-> FW */
+ HIFC_CFG_NIC_CAP = 0,
+ CFG_FW_VERSION,
+ CFG_UCODE_VERSION,
+ HIFC_CFG_FUNC_CAP,
+ HIFC_CFG_MBOX_CAP = 6,
+};
+
+struct hifc_dev_cap {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ /* Public resource */
+ u8 sf_svc_attr;
+ u8 host_id;
+ u8 sf_en_pf;
+ u8 sf_en_vf;
+
+ u8 ep_id;
+ u8 intr_type;
+ u8 max_cos_id;
+ u8 er_id;
+ u8 port_id;
+ u8 max_vf;
+ u16 svc_cap_en;
+ u16 host_total_func;
+ u8 host_oq_id_mask_val;
+ u8 max_vf_cos_id;
+
+ u32 max_conn_num;
+ u16 max_stick2cache_num;
+ u16 max_bfilter_start_addr;
+ u16 bfilter_len;
+ u16 hash_bucket_num;
+ u8 cfg_file_ver;
+ u8 net_port_mode;
+ u8 valid_cos_bitmap; /* every bit indicate cos is valid */
+ u8 force_up;
+ u32 pf_num;
+ u32 pf_id_start;
+ u32 vf_num;
+ u32 vf_id_start;
+
+ /* shared resource */
+ u32 host_pctx_num;
+ u8 host_sf_en;
+ u8 rsvd2[3];
+ u32 host_ccxt_num;
+ u32 host_scq_num;
+ u32 host_srq_num;
+ u32 host_mpt_num;
+ /* l2nic */
+ u16 nic_max_sq;
+ u16 nic_max_rq;
+ u32 rsvd[46];
+ /* FC */
+ u32 fc_max_pctx;
+ u32 fc_max_scq;
+ u32 fc_max_srq;
+
+ u32 fc_max_cctx;
+ u32 fc_cctx_id_start;
+
+ u8 fc_vp_id_start;
+ u8 fc_vp_id_end;
+ u16 func_id;
+};
+#endif
diff --git a/drivers/scsi/huawei/hifc/hifc_cmdq.c b/drivers/scsi/huawei/hifc/hifc_cmdq.c
new file mode 100644
index 000000000000..03531017c412
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_cmdq.c
@@ -0,0 +1,1507 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_hwif.h"
+#include "hifc_wq.h"
+#include "hifc_api_cmd.h"
+#include "hifc_mgmt.h"
+#include "hifc_eqs.h"
+#include "hifc_cmdq.h"
+
+#define CMDQ_CMD_TIMEOUT 1000 /* millisecond */
+#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
+#define LOWER_8_BITS(data) ((data) & 0xFF)
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
+#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23
+#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
+#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU
+#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U
+#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U
+#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU
+
+#define CMDQ_DB_INFO_SET(val, member) \
+ (((val) & CMDQ_DB_INFO_##member##_MASK) << \
+ CMDQ_DB_INFO_##member##_SHIFT)
+
+#define CMDQ_CTRL_PI_SHIFT 0
+#define CMDQ_CTRL_CMD_SHIFT 16
+#define CMDQ_CTRL_MOD_SHIFT 24
+#define CMDQ_CTRL_ACK_TYPE_SHIFT 29
+#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
+#define CMDQ_CTRL_PI_MASK 0xFFFFU
+#define CMDQ_CTRL_CMD_MASK 0xFFU
+#define CMDQ_CTRL_MOD_MASK 0x1FU
+#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_CTRL_SET(val, member) \
+ (((val) & CMDQ_CTRL_##member##_MASK) \
+ << CMDQ_CTRL_##member##_SHIFT)
+
+#define CMDQ_CTRL_GET(val, member) \
+ (((val) >> CMDQ_CTRL_##member##_SHIFT) \
+ & CMDQ_CTRL_##member##_MASK)
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
+#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
+#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_WQE_HEADER_SET(val, member) \
+ (((val) & CMDQ_WQE_HEADER_##member##_MASK) \
+ << CMDQ_WQE_HEADER_##member##_SHIFT)
+
+#define CMDQ_WQE_HEADER_GET(val, member) \
+ (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) \
+ & CMDQ_WQE_HEADER_##member##_MASK)
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
+#define CMDQ_CTXT_EQ_ID_SHIFT 56
+#define CMDQ_CTXT_CEQ_ARM_SHIFT 61
+#define CMDQ_CTXT_CEQ_EN_SHIFT 62
+#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
+#define CMDQ_CTXT_EQ_ID_MASK 0x1F
+#define CMDQ_CTXT_CEQ_ARM_MASK 0x1
+#define CMDQ_CTXT_CEQ_EN_MASK 0x1
+#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1
+
+#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \
+ (((u64)(val) & CMDQ_CTXT_##member##_MASK) \
+ << CMDQ_CTXT_##member##_SHIFT)
+
+#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \
+ (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \
+ & CMDQ_CTXT_##member##_MASK)
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
+#define CMDQ_CTXT_CI_SHIFT 52
+#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
+#define CMDQ_CTXT_CI_MASK 0xFFF
+
+#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
+ (((u64)(val) & CMDQ_CTXT_##member##_MASK) \
+ << CMDQ_CTXT_##member##_SHIFT)
+
+#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \
+ (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \
+ & CMDQ_CTXT_##member##_MASK)
+
+#define SAVED_DATA_ARM_SHIFT 31
+#define SAVED_DATA_ARM_MASK 0x1U
+
+#define SAVED_DATA_SET(val, member) \
+ (((val) & SAVED_DATA_##member##_MASK) \
+ << SAVED_DATA_##member##_SHIFT)
+
+#define SAVED_DATA_CLEAR(val, member) \
+ ((val) & (~(SAVED_DATA_##member##_MASK \
+ << SAVED_DATA_##member##_SHIFT)))
+
+#define WQE_ERRCODE_VAL_SHIFT 20
+#define WQE_ERRCODE_VAL_MASK 0xF
+
+#define WQE_ERRCODE_GET(val, member) \
+ (((val) >> WQE_ERRCODE_##member##_SHIFT) & \
+ WQE_ERRCODE_##member##_MASK)
+
+#define CEQE_CMDQ_TYPE_SHIFT 0
+#define CEQE_CMDQ_TYPE_MASK 0x7
+
+#define CEQE_CMDQ_GET(val, member) \
+ (((val) >> CEQE_CMDQ_##member##_SHIFT) & CEQE_CMDQ_##member##_MASK)
+
+#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
+
+#define WQE_HEADER(wqe) ((struct hifc_cmdq_header *)(wqe))
+
+#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
+
+#define CMDQ_DB_ADDR(db_base, pi) \
+ (((u8 *)(db_base) + HIFC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
+
+#define CMDQ_PFN_SHIFT 12
+#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT)
+
+#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
+#define WQE_LCMD_SIZE 64
+#define WQE_SCMD_SIZE 64
+#define COMPLETE_LEN 3
+#define CMDQ_WQEBB_SIZE 64
+#define CMDQ_WQE_SIZE 64
+#define CMDQ_WQ_PAGE_SIZE 4096
+
+#define WQE_NUM_WQEBBS(wqe_size, wq) \
+ ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
+
+#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
+ struct hifc_cmdqs, cmdq[0])
+
+#define CMDQ_SEND_CMPT_CODE 10
+#define CMDQ_COMPLETE_CMPT_CODE 11
+
+#define HIFC_GET_CMDQ_FREE_WQEBBS(cmdq_wq) \
+ atomic_read(&(cmdq_wq)->delta)
+
+enum cmdq_scmd_type {
+ CMDQ_SET_ARM_CMD = 2,
+};
+
+enum cmdq_wqe_type {
+ WQE_LCMD_TYPE,
+ WQE_SCMD_TYPE,
+};
+
+enum ctrl_sect_len {
+ CTRL_SECT_LEN = 1,
+ CTRL_DIRECT_SECT_LEN = 2,
+};
+
+enum bufdesc_len {
+ BUFDESC_LCMD_LEN = 2,
+ BUFDESC_SCMD_LEN = 3,
+};
+
+enum data_format {
+ DATA_SGE,
+ DATA_DIRECT,
+};
+
+enum completion_format {
+ COMPLETE_DIRECT,
+ COMPLETE_SGE,
+};
+
+enum completion_request {
+ CEQ_SET = 1,
+};
+
+enum cmdq_cmd_type {
+ SYNC_CMD_DIRECT_RESP,
+ SYNC_CMD_SGE_RESP,
+ ASYNC_CMD,
+};
+
+bool hifc_cmdq_idle(struct hifc_cmdq *cmdq)
+{
+ struct hifc_wq *wq = cmdq->wq;
+
+ return (atomic_read(&wq->delta) == wq->q_depth ? true : false);
+}
+
+struct hifc_cmd_buf *hifc_alloc_cmd_buf(void *hwdev)
+{
+ struct hifc_cmdqs *cmdqs;
+ struct hifc_cmd_buf *cmd_buf;
+ void *dev;
+
+ if (!hwdev) {
+ pr_err("Failed to alloc cmd buf, Invalid hwdev\n");
+ return NULL;
+ }
+
+ cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs;
+ dev = ((struct hifc_hwdev *)hwdev)->dev_hdl;
+
+ cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC);
+ if (!cmd_buf)
+ return NULL;
+
+ cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
+ &cmd_buf->dma_addr);
+ if (!cmd_buf->buf) {
+ sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n");
+ goto alloc_pci_buf_err;
+ }
+
+ return cmd_buf;
+
+alloc_pci_buf_err:
+ kfree(cmd_buf);
+ return NULL;
+}
+
+void hifc_free_cmd_buf(void *hwdev, struct hifc_cmd_buf *cmd_buf)
+{
+ struct hifc_cmdqs *cmdqs;
+
+ if (!hwdev || !cmd_buf) {
+ pr_err("Failed to free cmd buf\n");
+ return;
+ }
+
+ cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs;
+
+ pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
+ kfree(cmd_buf);
+}
+
+static int cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
+{
+ int wqe_size = 0;
+
+ switch (wqe_type) {
+ case WQE_LCMD_TYPE:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case WQE_SCMD_TYPE:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static int cmdq_get_wqe_size(enum bufdesc_len len)
+{
+ int wqe_size = 0;
+
+ switch (len) {
+ case BUFDESC_LCMD_LEN:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case BUFDESC_SCMD_LEN:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static void cmdq_set_completion(struct hifc_cmdq_completion *complete,
+ struct hifc_cmd_buf *buf_out)
+{
+ struct hifc_sge_resp *sge_resp = &complete->sge_resp;
+
+ hifc_set_sge(&sge_resp->sge, buf_out->dma_addr,
+ HIFC_CMDQ_BUF_SIZE);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct hifc_cmdq_wqe_lcmd *wqe,
+ struct hifc_cmd_buf *buf_in)
+{
+ hifc_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
+}
+
+static void cmdq_set_inline_wqe_data(struct hifc_cmdq_inline_wqe *wqe,
+ const void *buf_in, u32 in_size)
+{
+ struct hifc_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
+
+ wqe_scmd->buf_desc.buf_len = in_size;
+ memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
+}
+
+static void cmdq_fill_db(struct hifc_cmdq_db *db,
+ enum hifc_cmdq_type cmdq_type, u16 prod_idx)
+{
+ db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
+ CMDQ_DB_INFO_SET(HIFC_DB_CMDQ_TYPE, QUEUE_TYPE) |
+ CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
+ CMDQ_DB_INFO_SET(HIFC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
+}
+
+static void cmdq_set_db(struct hifc_cmdq *cmdq,
+ enum hifc_cmdq_type cmdq_type, u16 prod_idx)
+{
+ struct hifc_cmdq_db db;
+
+ cmdq_fill_db(&db, cmdq_type, prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ db.db_info = cpu_to_be32(db.db_info);
+
+ wmb(); /* write all before the doorbell */
+ writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
+}
+
+static void cmdq_wqe_fill(void *dst, const void *src)
+{
+ memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
+ (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
+ CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
+
+ wmb(); /* The first 8 bytes should be written last */
+
+ *(u64 *)dst = *(u64 *)src;
+}
+
+static void cmdq_prepare_wqe_ctrl(struct hifc_cmdq_wqe *wqe, int wrapped,
+ enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd, u16 prod_idx,
+ enum completion_format complete_format,
+ enum data_format data_format,
+ enum bufdesc_len buf_len)
+{
+ struct hifc_ctrl *ctrl;
+ enum ctrl_sect_len ctrl_len;
+ struct hifc_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hifc_cmdq_wqe_scmd *wqe_scmd;
+ u32 saved_data = WQE_HEADER(wqe)->saved_data;
+
+ if (data_format == DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->inline_wqe.wqe_scmd;
+
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD) |
+ CMDQ_CTRL_SET(ack_type, ACK_TYPE);
+
+ WQE_HEADER(wqe)->header_info =
+ CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
+ CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
+ CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
+
+ if (cmd == CMDQ_SET_ARM_CMD && mod == HIFC_MOD_COMM) {
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ WQE_HEADER(wqe)->saved_data = saved_data |
+ SAVED_DATA_SET(1, ARM);
+ } else {
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ WQE_HEADER(wqe)->saved_data = saved_data;
+ }
+}
+
+static void cmdq_set_lcmd_wqe(struct hifc_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hifc_cmd_buf *buf_in,
+ struct hifc_cmd_buf *buf_out, int wrapped,
+ enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hifc_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+ enum completion_format complete_format = COMPLETE_DIRECT;
+
+ switch (cmd_type) {
+ case SYNC_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ case SYNC_CMD_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+ break;
+ case ASYNC_CMD:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+
+ wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
+ prod_idx, complete_format, DATA_SGE,
+ BUFDESC_LCMD_LEN);
+
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static void cmdq_set_inline_wqe(struct hifc_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ void *buf_in, u16 in_size,
+ struct hifc_cmd_buf *buf_out, int wrapped,
+ enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hifc_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd;
+ enum completion_format complete_format = COMPLETE_DIRECT;
+
+ switch (cmd_type) {
+ case SYNC_CMD_SGE_RESP:
+ complete_format = COMPLETE_SGE;
+ cmdq_set_completion(&wqe_scmd->completion, buf_out);
+ break;
+ case SYNC_CMD_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_scmd->completion.direct_resp = 0;
+ break;
+ default:
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
+ complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
+
+ cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size);
+}
+
+static void cmdq_update_cmd_status(struct hifc_cmdq *cmdq, u16 prod_idx,
+ struct hifc_cmdq_wqe *wqe)
+{
+ struct hifc_cmdq_cmd_info *cmd_info;
+ struct hifc_cmdq_wqe_lcmd *wqe_lcmd;
+ u32 status_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ cmd_info = &cmdq->cmd_infos[prod_idx];
+
+ if (cmd_info->errcode) {
+ status_info = be32_to_cpu(wqe_lcmd->status.status_info);
+ *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL);
+ }
+
+ if (cmd_info->direct_resp &&
+ cmd_info->cmd_type == HIFC_CMD_TYPE_DIRECT_RESP)
+ *cmd_info->direct_resp =
+ cpu_to_be64(wqe_lcmd->completion.direct_resp);
+}
+
+static int hifc_cmdq_sync_timeout_check(struct hifc_cmdq *cmdq,
+ struct hifc_cmdq_wqe *wqe, u16 pi,
+ enum hifc_mod_type mod, u8 cmd)
+{
+ struct hifc_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hifc_ctrl *ctrl;
+ u32 ctrl_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+ if (!WQE_COMPLETED(ctrl_info)) {
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return -EFAULT;
+ }
+
+ cmdq_update_cmd_status(cmdq, pi, wqe);
+
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return 0;
+}
+
+static void __clear_cmd_info(struct hifc_cmdq_cmd_info *cmd_info,
+ struct hifc_cmdq_cmd_info *saved_cmd_info)
+{
+ if (cmd_info->errcode == saved_cmd_info->errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == saved_cmd_info->done)
+ cmd_info->done = NULL;
+
+ if (cmd_info->direct_resp == saved_cmd_info->direct_resp)
+ cmd_info->direct_resp = NULL;
+}
+
+static int
+cmdq_sync_cmd_timeout_handler(struct hifc_cmdq *cmdq,
+ struct hifc_cmdq_cmd_info *cmd_info,
+ struct hifc_cmdq_cmd_info *saved_cmd_info,
+ struct hifc_cmdq_wqe *curr_wqe,
+ enum hifc_mod_type mod, u8 cmd,
+ u16 curr_prod_idx, u64 curr_msg_id)
+{
+ int err;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (*saved_cmd_info->cmpt_code == CMDQ_COMPLETE_CMPT_CODE) {
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command (mod: %u, cmd: 0x%x)has been completed\n",
+ mod, cmd);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return 0;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hifc_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx,
+ mod, cmd);
+ if (err)
+ cmd_info->cmd_type = HIFC_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type = HIFC_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ sdk_err(cmdq->hwdev->dev_hdl,
+ "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ }
+
+ __clear_cmd_info(cmd_info, saved_cmd_info);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hifc_cmdq *cmdq,
+ enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout)
+{
+ struct hifc_wq *wq = cmdq->wq;
+ struct hifc_cmdq_wqe *curr_wqe, wqe;
+ struct hifc_cmdq_cmd_info *cmd_info, saved_cmd_info;
+ struct completion done;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ ulong timeo;
+ u64 curr_msg_id;
+ int err;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* in order to save a wqebb for setting arm_bit when
+ * send cmdq commands frequently resulting in cmdq full
+ */
+ if (HIFC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */
+ curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+
+ init_completion(&done);
+
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->direct_resp = out_param;
+ cmd_info->cmpt_code = &cmpt_code;
+
+ memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info));
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, ack_type, mod, cmd, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hifc_cpu_to_be32(&wqe, wqe_size);
+
+ /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmd_info->cmd_type = HIFC_CMD_TYPE_DIRECT_RESP;
+
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+
+ cmdq_set_db(cmdq, HIFC_CMDQ_SYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ if (!wait_for_completion_timeout(&done, timeo)) {
+ err = cmdq_sync_cmd_timeout_handler(cmdq, cmd_info,
+ &saved_cmd_info,
+ curr_wqe, mod, cmd,
+ curr_prod_idx, curr_msg_id);
+
+ if (!err)
+ goto timeout_check_ok;
+
+ sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx: 0x%x\n",
+ curr_prod_idx);
+ return -ETIMEDOUT;
+ }
+
+timeout_check_ok:
+ smp_rmb(); /* read error code after completion */
+
+ if (errcode > 1)
+ return errcode;
+
+ return 0;
+}
+
+static int cmdq_sync_cmd_detail_resp(struct hifc_cmdq *cmdq,
+ enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in,
+ struct hifc_cmd_buf *buf_out,
+ u32 timeout)
+{
+ struct hifc_wq *wq = cmdq->wq;
+ struct hifc_cmdq_wqe *curr_wqe, wqe;
+ struct hifc_cmdq_cmd_info *cmd_info, saved_cmd_info;
+ struct completion done;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ ulong timeo;
+ u64 curr_msg_id;
+ int err;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* in order to save a wqebb for setting arm_bit when
+ * send cmdq commands frequently resulting in cmdq full
+ */
+ if (HIFC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+
+ init_completion(&done);
+
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->cmpt_code = &cmpt_code;
+
+ memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info));
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out,
+ wrapped, ack_type, mod, cmd, curr_prod_idx);
+
+ hifc_cpu_to_be32(&wqe, wqe_size);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmd_info->cmd_type = HIFC_CMD_TYPE_SGE_RESP;
+
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+
+ cmdq_set_db(cmdq, HIFC_CMDQ_SYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ if (!wait_for_completion_timeout(&done, timeo)) {
+ err = cmdq_sync_cmd_timeout_handler(cmdq, cmd_info,
+ &saved_cmd_info,
+ curr_wqe, mod, cmd,
+ curr_prod_idx, curr_msg_id);
+ if (!err)
+ goto timeout_check_ok;
+
+ sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx: 0x%x\n",
+ curr_prod_idx);
+ return -ETIMEDOUT;
+ }
+
+timeout_check_ok:
+
+ smp_rmb(); /* read error code after completion */
+
+ if (errcode > 1)
+ return errcode;
+
+ return 0;
+}
+
+static int cmdq_async_cmd(struct hifc_cmdq *cmdq, enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in)
+{
+ struct hifc_wq *wq = cmdq->wq;
+ int wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ struct hifc_cmdq_wqe *curr_wqe, wqe;
+ int wrapped;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= cmdq->wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= cmdq->wq->q_depth;
+ }
+
+ cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped,
+ ack_type, mod, cmd, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hifc_cpu_to_be32(&wqe, wqe_size);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = HIFC_CMD_TYPE_ASYNC;
+
+ cmdq_set_db(cmdq, HIFC_CMDQ_ASYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return 0;
+}
+
+static int cmdq_set_arm_bit(struct hifc_cmdq *cmdq, void *buf_in, u16 in_size)
+{
+ struct hifc_wq *wq = cmdq->wq;
+ struct hifc_cmdq_wqe *curr_wqe, wqe;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, wqe_size = cmdq_wqe_size(WQE_SCMD_TYPE);
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb setting arm\n");
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL,
+ wrapped, HIFC_ACK_TYPE_CMDQ, HIFC_MOD_COMM,
+ CMDQ_SET_ARM_CMD, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hifc_cpu_to_be32(&wqe, wqe_size);
+
+ /* cmdq wqe is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = HIFC_CMD_TYPE_SET_ARM;
+
+ cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return 0;
+}
+
+static int cmdq_params_valid(void *hwdev, struct hifc_cmd_buf *buf_in)
+{
+ if (!buf_in || !hwdev) {
+ pr_err("Invalid CMDQ buffer addr\n");
+ return -EINVAL;
+ }
+
+ if (!buf_in->size || buf_in->size > HIFC_CMDQ_MAX_DATA_SIZE) {
+ pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define WAIT_CMDQ_ENABLE_TIMEOUT 300
+
+static int wait_cmdqs_enable(struct hifc_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
+ do {
+ if (cmdqs->status & HIFC_CMDQ_ENABLE)
+ return 0;
+ } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag &&
+ !cmdqs->disable_flag);
+
+ cmdqs->disable_flag = 1;
+
+ return -EBUSY;
+}
+
+int hifc_cmdq_direct_resp(void *hwdev, enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in, u64 *out_param,
+ u32 timeout)
+{
+ struct hifc_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err) {
+ pr_err("Invalid CMDQ parameters\n");
+ return err;
+ }
+
+ cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) ||
+ !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HIFC_CMDQ_SYNC], ack_type,
+ mod, cmd, buf_in, out_param, timeout);
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag))
+ return -ETIMEDOUT;
+ else
+ return err;
+}
+
+int hifc_cmdq_detail_resp(void *hwdev,
+ enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in,
+ struct hifc_cmd_buf *buf_out,
+ u32 timeout)
+{
+ struct hifc_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err)
+ return err;
+
+ cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) ||
+ !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[HIFC_CMDQ_SYNC], ack_type,
+ mod, cmd, buf_in, buf_out, timeout);
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag))
+ return -ETIMEDOUT;
+ else
+ return err;
+}
+
+int hifc_cmdq_async(void *hwdev, enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in)
+{
+ struct hifc_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err)
+ return err;
+
+ cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) ||
+ !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ return cmdq_async_cmd(&cmdqs->cmdq[HIFC_CMDQ_ASYNC], ack_type, mod,
+ cmd, buf_in);
+}
+
+int hifc_set_arm_bit(void *hwdev, enum hifc_set_arm_type q_type, u16 q_id)
+{
+ struct hifc_cmdqs *cmdqs;
+ struct hifc_cmdq *cmdq;
+ struct hifc_cmdq_arm_bit arm_bit;
+ enum hifc_cmdq_type cmdq_type = HIFC_CMDQ_SYNC;
+ u16 in_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) ||
+ !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs;
+
+ if (!(cmdqs->status & HIFC_CMDQ_ENABLE))
+ return -EBUSY;
+
+ if (q_type == HIFC_SET_ARM_CMDQ) {
+ if (q_id >= HIFC_MAX_CMDQ_TYPES)
+ return -EFAULT;
+
+ cmdq_type = q_id;
+ }
+ /* sq is using interrupt now, so we only need to set arm bit for cmdq,
+ * remove comment below if need to set sq arm bit
+ * else
+ * cmdq_type = HIFC_CMDQ_SYNC;
+ */
+
+ cmdq = &cmdqs->cmdq[cmdq_type];
+
+ arm_bit.q_type = q_type;
+ arm_bit.q_id = q_id;
+ in_size = sizeof(arm_bit);
+
+ err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl,
+ "Failed to set arm for q_type: %d, qid %d\n",
+ q_type, q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static void clear_wqe_complete_bit(struct hifc_cmdq *cmdq,
+ struct hifc_cmdq_wqe *wqe, u16 ci)
+{
+ struct hifc_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hifc_cmdq_inline_wqe *inline_wqe;
+ struct hifc_cmdq_wqe_scmd *wqe_scmd;
+ struct hifc_ctrl *ctrl;
+ u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
+ int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
+ int wqe_size = cmdq_get_wqe_size(buf_len);
+ u16 num_wqebbs;
+
+ if (wqe_size == WQE_LCMD_SIZE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ } else {
+ inline_wqe = &wqe->inline_wqe;
+ wqe_scmd = &inline_wqe->wqe_scmd;
+ ctrl = &wqe_scmd->ctrl;
+ }
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+ cmdq->cmd_infos[ci].cmd_type = HIFC_CMD_TYPE_NONE;
+
+ wmb(); /* verify wqe is clear */
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
+ hifc_put_wqe(cmdq->wq, num_wqebbs);
+}
+
+static void cmdq_sync_cmd_handler(struct hifc_cmdq *cmdq,
+ struct hifc_cmdq_wqe *wqe, u16 cons_idx)
+{
+ u16 prod_idx = cons_idx;
+
+ spin_lock(&cmdq->cmdq_lock);
+
+ cmdq_update_cmd_status(cmdq, prod_idx, wqe);
+
+ if (cmdq->cmd_infos[prod_idx].cmpt_code) {
+ *cmdq->cmd_infos[prod_idx].cmpt_code =
+ CMDQ_COMPLETE_CMPT_CODE;
+ cmdq->cmd_infos[prod_idx].cmpt_code = NULL;
+ }
+
+ /* make sure cmpt_code operation before done operation */
+ smp_rmb();
+
+ if (cmdq->cmd_infos[prod_idx].done) {
+ complete(cmdq->cmd_infos[prod_idx].done);
+ cmdq->cmd_infos[prod_idx].done = NULL;
+ }
+
+ spin_unlock(&cmdq->cmdq_lock);
+
+ clear_wqe_complete_bit(cmdq, wqe, cons_idx);
+}
+
+static void cmdq_async_cmd_handler(struct hifc_hwdev *hwdev,
+ struct hifc_cmdq *cmdq,
+ struct hifc_cmdq_wqe *wqe, u16 ci)
+{
+ u64 buf = wqe->wqe_lcmd.buf_desc.saved_async_buf;
+ int addr_sz = sizeof(u64);
+
+ hifc_be32_to_cpu((void *)&buf, addr_sz);
+ if (buf)
+ hifc_free_cmd_buf(hwdev, (struct hifc_cmd_buf *)buf);
+
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+}
+
+static int cmdq_arm_ceq_handler(struct hifc_cmdq *cmdq,
+ struct hifc_cmdq_wqe *wqe, u16 ci)
+{
+ struct hifc_cmdq_inline_wqe *inline_wqe = &wqe->inline_wqe;
+ struct hifc_cmdq_wqe_scmd *wqe_scmd = &inline_wqe->wqe_scmd;
+ struct hifc_ctrl *ctrl = &wqe_scmd->ctrl;
+ u32 ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+
+ if (!WQE_COMPLETED(ctrl_info))
+ return -EBUSY;
+
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+
+ return 0;
+}
+
+#define HIFC_CMDQ_WQE_HEAD_LEN 32
+static void hifc_dump_cmdq_wqe_head(struct hifc_hwdev *hwdev,
+ struct hifc_cmdq_wqe *wqe)
+{
+ u32 i;
+ u32 *data = (u32 *)wqe;
+
+ for (i = 0; i < (HIFC_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) {
+ sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ data[i], data[i + 1], data[i + 2],
+ data[i + 3]);/*lint !e679*/
+ }
+}
+
+#define CMDQ_CMD_TYPE_TIMEOUT(cmd_type) \
+ ((cmd_type) == HIFC_CMD_TYPE_TIMEOUT || \
+ (cmd_type) == HIFC_CMD_TYPE_FAKE_TIMEOUT)
+
+static inline void cmdq_response_handle(struct hifc_hwdev *hwdev,
+ struct hifc_cmdq *cmdq,
+ struct hifc_cmdq_wqe *wqe,
+ enum hifc_cmdq_type cmdq_type, u16 ci)
+{
+ if (cmdq_type == HIFC_CMDQ_ASYNC)
+ cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci);
+ else
+ cmdq_sync_cmd_handler(cmdq, wqe, ci);
+}
+
+static inline void set_arm_bit(struct hifc_hwdev *hwdev, int set_arm,
+ enum hifc_cmdq_type cmdq_type)
+{
+ if (set_arm)
+ hifc_set_arm_bit(hwdev, HIFC_SET_ARM_CMDQ, cmdq_type);
+}
+
+void hifc_cmdq_ceq_handler(void *handle, u32 ceqe_data)
+{
+ struct hifc_cmdqs *cmdqs = ((struct hifc_hwdev *)handle)->cmdqs;
+ enum hifc_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE);
+ struct hifc_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
+ struct hifc_hwdev *hwdev = cmdqs->hwdev;
+ struct hifc_cmdq_wqe *wqe;
+ struct hifc_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hifc_ctrl *ctrl;
+ struct hifc_cmdq_cmd_info *cmd_info;
+ u32 ctrl_info;
+ u16 ci;
+ int set_arm = 1;
+
+ while ((wqe = hifc_read_wqe(cmdq->wq, 1, &ci)) != NULL) {
+ cmd_info = &cmdq->cmd_infos[ci];
+
+ if (cmd_info->cmd_type == HIFC_CMD_TYPE_NONE) {
+ set_arm = 1;
+ break;
+ } else if (CMDQ_CMD_TYPE_TIMEOUT(cmd_info->cmd_type)) {
+ if (cmd_info->cmd_type == HIFC_CMD_TYPE_TIMEOUT) {
+ sdk_info(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n",
+ cmdq_type, ci);
+ hifc_dump_cmdq_wqe_head(hwdev, wqe);
+ }
+
+ set_arm = 1;
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ } else if (cmd_info->cmd_type == HIFC_CMD_TYPE_SET_ARM) {
+ /* arm_bit was set until here */
+ set_arm = 0;
+
+ if (cmdq_arm_ceq_handler(cmdq, wqe, ci))
+ break;
+ } else {
+ set_arm = 1;
+
+ /* only arm bit is using scmd wqe, the wqe is lcmd */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+
+ if (!WQE_COMPLETED(ctrl_info))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the cmdq wqe until we have
+ * verified the command has been processed and
+ * written back.
+ */
+ dma_rmb();
+
+ cmdq_response_handle(hwdev, cmdq, wqe, cmdq_type, ci);
+ }
+ }
+
+ set_arm_bit(hwdev, set_arm, cmdq_type);
+}
+
+static void cmdq_init_queue_ctxt(struct hifc_cmdq *cmdq,
+ struct hifc_cmdq_pages *cmdq_pages,
+ struct hifc_cmdq_ctxt *cmdq_ctxt)
+{
+ struct hifc_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
+ struct hifc_hwdev *hwdev = cmdqs->hwdev;
+ struct hifc_wq *wq = cmdq->wq;
+ struct hifc_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
+ u16 start_ci = (u16)wq->cons_idx;
+
+ /* The data in the HW is in Big Endian Format */
+ wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
+
+ pfn = CMDQ_PFN(wq_first_page_paddr);
+
+ ctxt_info->curr_wqe_page_pfn =
+ CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
+ CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
+ CMDQ_CTXT_PAGE_INFO_SET(HIFC_CEQ_ID_CMDQ, EQ_ID) |
+ CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
+
+ /* If only use one page, use 0-level CLA */
+ if (cmdq->wq->num_q_pages != 1) {
+ cmdq_first_block_paddr = cmdq_pages->cmdq_page_paddr;
+ pfn = CMDQ_PFN(cmdq_first_block_paddr);
+ }
+
+ ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
+ CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
+
+ cmdq_ctxt->func_idx = hifc_global_func_id_hw(hwdev);
+ cmdq_ctxt->ppf_idx = HIFC_HWIF_PPF_IDX(hwdev->hwif);
+ cmdq_ctxt->cmdq_id = cmdq->cmdq_type;
+}
+
+static int init_cmdq(struct hifc_cmdq *cmdq, struct hifc_hwdev *hwdev,
+ struct hifc_wq *wq, enum hifc_cmdq_type q_type)
+{
+ void __iomem *db_base;
+ int err = 0;
+
+ cmdq->wq = wq;
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+ cmdq->hwdev = hwdev;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ cmdq->cmd_infos = kcalloc(wq->q_depth, sizeof(*cmdq->cmd_infos),
+ GFP_KERNEL);
+ if (!cmdq->cmd_infos) {
+ err = -ENOMEM;
+ goto cmd_infos_err;
+ }
+
+ err = hifc_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err)
+ goto alloc_db_err;
+
+ cmdq->db_base = (u8 *)db_base;
+ return 0;
+
+alloc_db_err:
+ kfree(cmdq->cmd_infos);
+
+cmd_infos_err:
+
+ return err;
+}
+
+static void free_cmdq(struct hifc_hwdev *hwdev, struct hifc_cmdq *cmdq)
+{
+ hifc_free_db_addr(hwdev, cmdq->db_base, NULL);
+ kfree(cmdq->cmd_infos);
+}
+
+int hifc_set_cmdq_ctxts(struct hifc_hwdev *hwdev)
+{
+ struct hifc_cmdqs *cmdqs = hwdev->cmdqs;
+ struct hifc_cmdq_ctxt *cmdq_ctxt, cmdq_ctxt_out = {0};
+ enum hifc_cmdq_type cmdq_type;
+ u16 in_size;
+ u16 out_size = sizeof(*cmdq_ctxt);
+ int err;
+
+ cmdq_type = HIFC_CMDQ_SYNC;
+ for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) {
+ cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
+ cmdq_ctxt->func_idx = hifc_global_func_id_hw(hwdev);
+ in_size = sizeof(*cmdq_ctxt);
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_CMDQ_CTXT_SET,
+ cmdq_ctxt, in_size,
+ &cmdq_ctxt_out, &out_size, 0);
+ if (err || !out_size || cmdq_ctxt_out.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, cmdq_ctxt_out.status, out_size);
+ return -EFAULT;
+ }
+ }
+
+ cmdqs->status |= HIFC_CMDQ_ENABLE;
+ cmdqs->disable_flag = 0;
+
+ return 0;
+}
+
+void hifc_cmdq_flush_cmd(struct hifc_hwdev *hwdev,
+ struct hifc_cmdq *cmdq)
+{
+ struct hifc_cmdq_wqe *wqe;
+ struct hifc_cmdq_cmd_info *cmdq_info;
+ u16 ci, wqe_left, i;
+ u64 buf;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ wqe_left = cmdq->wq->q_depth - (u16)atomic_read(&cmdq->wq->delta);
+ ci = MASKED_WQE_IDX(cmdq->wq, cmdq->wq->cons_idx);
+ for (i = 0; i < wqe_left; i++, ci++) {
+ ci = MASKED_WQE_IDX(cmdq->wq, ci);
+ cmdq_info = &cmdq->cmd_infos[ci];
+
+ if (cmdq_info->cmd_type == HIFC_CMD_TYPE_SET_ARM)
+ continue;
+
+ if (cmdq->cmdq_type == HIFC_CMDQ_ASYNC) {
+ wqe = hifc_get_wqebb_addr(cmdq->wq, ci);
+ buf = wqe->wqe_lcmd.buf_desc.saved_async_buf;
+ wqe->wqe_lcmd.buf_desc.saved_async_buf = 0;
+
+ hifc_be32_to_cpu((void *)&buf, sizeof(u64));
+ if (buf)
+ hifc_free_cmd_buf(hwdev,
+ (struct hifc_cmd_buf *)buf);
+ } else {
+ if (cmdq_info->done) {
+ complete(cmdq_info->done);
+ cmdq_info->done = NULL;
+ cmdq_info->cmpt_code = NULL;
+ cmdq_info->direct_resp = NULL;
+ cmdq_info->errcode = NULL;
+ }
+ }
+ }
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+int hifc_reinit_cmdq_ctxts(struct hifc_hwdev *hwdev)
+{
+ struct hifc_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hifc_cmdq_type cmdq_type;
+
+ cmdq_type = HIFC_CMDQ_SYNC;
+ for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) {
+ hifc_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]);
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ hifc_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return hifc_set_cmdq_ctxts(hwdev);
+}
+
+int hifc_cmdqs_init(struct hifc_hwdev *hwdev)
+{
+ struct hifc_cmdqs *cmdqs;
+ struct hifc_cmdq_ctxt *cmdq_ctxt;
+ enum hifc_cmdq_type type, cmdq_type;
+ size_t saved_wqs_size;
+ u32 max_wqe_size;
+ int err;
+
+ cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
+ if (!cmdqs)
+ return -ENOMEM;
+
+ hwdev->cmdqs = cmdqs;
+ cmdqs->hwdev = hwdev;
+
+ saved_wqs_size = HIFC_MAX_CMDQ_TYPES * sizeof(struct hifc_wq);
+ cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
+ if (!cmdqs->saved_wqs) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate saved wqs\n");
+ err = -ENOMEM;
+ goto alloc_wqs_err;
+ }
+
+ cmdqs->cmd_buf_pool = dma_pool_create("hifc_cmdq", hwdev->dev_hdl,
+ HIFC_CMDQ_BUF_SIZE,
+ HIFC_CMDQ_BUF_SIZE, 0ULL);
+ if (!cmdqs->cmd_buf_pool) {
+ sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n");
+ err = -ENOMEM;
+ goto pool_create_err;
+ }
+
+ max_wqe_size = (u32)cmdq_wqe_size(WQE_LCMD_TYPE);
+ err = hifc_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ hwdev->dev_hdl, HIFC_MAX_CMDQ_TYPES,
+ hwdev->wq_page_size, CMDQ_WQEBB_SIZE,
+ HIFC_CMDQ_DEPTH, max_wqe_size);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq\n");
+ goto cmdq_alloc_err;
+ }
+
+ cmdq_type = HIFC_CMDQ_SYNC;
+ for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) {
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
+ &cmdqs->saved_wqs[cmdq_type], cmdq_type);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n",
+ cmdq_type);
+ goto init_cmdq_err;
+ }
+
+ cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
+ cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type],
+ &cmdqs->cmdq_pages, cmdq_ctxt);
+ }
+
+ err = hifc_set_cmdq_ctxts(hwdev);
+ if (err)
+ goto init_cmdq_err;
+
+ return 0;
+
+init_cmdq_err:
+ type = HIFC_CMDQ_SYNC;
+ for (; type < cmdq_type; type++)
+ free_cmdq(hwdev, &cmdqs->cmdq[type]);
+
+ hifc_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HIFC_MAX_CMDQ_TYPES);
+
+cmdq_alloc_err:
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+
+pool_create_err:
+ kfree(cmdqs->saved_wqs);
+
+alloc_wqs_err:
+ kfree(cmdqs);
+
+ return err;
+}
+
+void hifc_cmdqs_free(struct hifc_hwdev *hwdev)
+{
+ struct hifc_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hifc_cmdq_type cmdq_type = HIFC_CMDQ_SYNC;
+
+ cmdqs->status &= ~HIFC_CMDQ_ENABLE;
+
+ for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) {
+ hifc_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]);
+ free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
+ }
+
+ hifc_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HIFC_MAX_CMDQ_TYPES);
+
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+
+ kfree(cmdqs->saved_wqs);
+
+ kfree(cmdqs);
+}
diff --git a/drivers/scsi/huawei/hifc/hifc_cmdq.h b/drivers/scsi/huawei/hifc/hifc_cmdq.h
new file mode 100644
index 000000000000..cb2ac81c5edc
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_cmdq.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef HIFC_CMDQ_H_
+#define HIFC_CMDQ_H_
+
+#define HIFC_DB_OFF 0x00000800
+
+#define HIFC_SCMD_DATA_LEN 16
+
+#define HIFC_CMDQ_DEPTH 4096
+
+#define HIFC_CMDQ_BUF_SIZE 2048U
+#define HIFC_CMDQ_BUF_HW_RSVD 8
+#define HIFC_CMDQ_MAX_DATA_SIZE \
+ (HIFC_CMDQ_BUF_SIZE - HIFC_CMDQ_BUF_HW_RSVD)
+#define WQ_PAGE_PFN_SHIFT 12
+#define WQ_BLOCK_PFN_SHIFT 9
+
+#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
+enum hifc_cmdq_type {
+ HIFC_CMDQ_SYNC,
+ HIFC_CMDQ_ASYNC,
+ HIFC_MAX_CMDQ_TYPES,
+};
+
+enum hifc_db_src_type {
+ HIFC_DB_SRC_CMDQ_TYPE,
+ HIFC_DB_SRC_L2NIC_SQ_TYPE,
+};
+
+enum hifc_cmdq_db_type {
+ HIFC_DB_SQ_RQ_TYPE,
+ HIFC_DB_CMDQ_TYPE,
+};
+
+/* CMDQ WQE CTRLS */
+struct hifc_cmdq_header {
+ u32 header_info;
+ u32 saved_data;
+};
+
+struct hifc_scmd_bufdesc {
+ u32 buf_len;
+ u32 rsvd;
+ u8 data[HIFC_SCMD_DATA_LEN];
+};
+
+struct hifc_lcmd_bufdesc {
+ struct hifc_sge sge;
+ u32 rsvd1;
+ u64 saved_async_buf;
+ u64 rsvd3;
+};
+
+struct hifc_cmdq_db {
+ u32 db_info;
+ u32 rsvd;
+};
+
+struct hifc_status {
+ u32 status_info;
+};
+
+struct hifc_ctrl {
+ u32 ctrl_info;
+};
+
+struct hifc_sge_resp {
+ struct hifc_sge sge;
+ u32 rsvd;
+};
+
+struct hifc_cmdq_completion {
+ /* HW Format */
+ union {
+ struct hifc_sge_resp sge_resp;
+ u64 direct_resp;
+ };
+};
+
+struct hifc_cmdq_wqe_scmd {
+ struct hifc_cmdq_header header;
+ struct hifc_cmdq_db db;
+ struct hifc_status status;
+ struct hifc_ctrl ctrl;
+ struct hifc_cmdq_completion completion;
+ struct hifc_scmd_bufdesc buf_desc;
+};
+
+struct hifc_cmdq_wqe_lcmd {
+ struct hifc_cmdq_header header;
+ struct hifc_status status;
+ struct hifc_ctrl ctrl;
+ struct hifc_cmdq_completion completion;
+ struct hifc_lcmd_bufdesc buf_desc;
+};
+
+struct hifc_cmdq_inline_wqe {
+ struct hifc_cmdq_wqe_scmd wqe_scmd;
+};
+
+struct hifc_cmdq_wqe {
+ /* HW Format */
+ union {
+ struct hifc_cmdq_inline_wqe inline_wqe;
+ struct hifc_cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+struct hifc_cmdq_arm_bit {
+ u32 q_type;
+ u32 q_id;
+};
+
+struct hifc_cmdq_ctxt_info {
+ u64 curr_wqe_page_pfn;
+ u64 wq_block_pfn;
+};
+
+struct hifc_cmdq_ctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 cmdq_id;
+ u8 ppf_idx;
+
+ u8 rsvd1[4];
+
+ struct hifc_cmdq_ctxt_info ctxt_info;
+};
+
+enum hifc_cmdq_status {
+ HIFC_CMDQ_ENABLE = BIT(0),
+};
+
+enum hifc_cmdq_cmd_type {
+ HIFC_CMD_TYPE_NONE,
+ HIFC_CMD_TYPE_SET_ARM,
+ HIFC_CMD_TYPE_DIRECT_RESP,
+ HIFC_CMD_TYPE_SGE_RESP,
+ HIFC_CMD_TYPE_ASYNC,
+ HIFC_CMD_TYPE_TIMEOUT,
+ HIFC_CMD_TYPE_FAKE_TIMEOUT,
+};
+
+struct hifc_cmdq_cmd_info {
+ enum hifc_cmdq_cmd_type cmd_type;
+
+ struct completion *done;
+ int *errcode;
+ int *cmpt_code;
+ u64 *direct_resp;
+ u64 cmdq_msg_id;
+};
+
+struct hifc_cmdq {
+ struct hifc_wq *wq;
+
+ enum hifc_cmdq_type cmdq_type;
+ int wrapped;
+
+ /* spinlock for send cmdq commands */
+ spinlock_t cmdq_lock;
+
+ /* doorbell area */
+ u8 __iomem *db_base;
+
+ struct hifc_cmdq_ctxt cmdq_ctxt;
+
+ struct hifc_cmdq_cmd_info *cmd_infos;
+
+ struct hifc_hwdev *hwdev;
+};
+
+struct hifc_cmdqs {
+ struct hifc_hwdev *hwdev;
+
+ struct pci_pool *cmd_buf_pool;
+
+ struct hifc_wq *saved_wqs;
+
+ struct hifc_cmdq_pages cmdq_pages;
+ struct hifc_cmdq cmdq[HIFC_MAX_CMDQ_TYPES];
+
+ u32 status;
+ u32 disable_flag;
+};
+
+void hifc_cmdq_ceq_handler(void *hwdev, u32 ceqe_data);
+
+int hifc_reinit_cmdq_ctxts(struct hifc_hwdev *hwdev);
+
+bool hifc_cmdq_idle(struct hifc_cmdq *cmdq);
+
+int hifc_cmdqs_init(struct hifc_hwdev *hwdev);
+
+void hifc_cmdqs_free(struct hifc_hwdev *hwdev);
+
+void hifc_cmdq_flush_cmd(struct hifc_hwdev *hwdev,
+ struct hifc_cmdq *cmdq);
+
+#endif
diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_main.c b/drivers/scsi/huawei/hifc/hifc_cqm_main.c
new file mode 100644
index 000000000000..4cd048f1e662
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_cqm_main.c
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_hwif.h"
+#include "hifc_api_cmd.h"
+#include "hifc_mgmt.h"
+#include "hifc_cfg.h"
+#include "hifc_cqm_object.h"
+#include "hifc_cqm_main.h"
+
+#define GET_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define GET_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+static void cqm_capability_init_check_ppf(void *ex_handle,
+ u32 *total_function_num)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap;
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ (handle->cqm_hdl);
+
+ if (cqm_handle->func_attribute.func_type == CQM_PPF) {
+ *total_function_num = service_capability->host_total_function;
+ cqm_handle->func_capability.timer_enable =
+ service_capability->timer_en;
+
+ cqm_info(handle->dev_hdl, "Cap init: total function num 0x%x\n",
+ *total_function_num);
+ cqm_info(handle->dev_hdl, "Cap init: timer_enable %d (1: enable; 0: disable)\n",
+ cqm_handle->func_capability.timer_enable);
+ }
+}
+
+void cqm_test_mode_init(struct cqm_handle_s *cqm_handle,
+ struct service_cap *service_capability)
+{
+ cqm_handle->func_capability.xid_alloc_mode =
+ service_capability->test_xid_alloc_mode;
+ cqm_handle->func_capability.gpa_check_enable =
+ service_capability->test_gpa_check_enable;
+}
+
+static s32 cqm_service_capability_init_for_each(
+ struct cqm_handle_s *cqm_handle,
+ struct service_cap *service_capability)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)cqm_handle->ex_handle;
+
+ cqm_info(handle->dev_hdl, "Cap init: fc is valid\n");
+ cqm_handle->func_capability.hash_number +=
+ service_capability->fc_cap.dev_fc_cap.max_parent_qpc_num;
+ cqm_handle->func_capability.hash_basic_size = CQM_HASH_BUCKET_SIZE_64;
+ cqm_handle->func_capability.qpc_number +=
+ service_capability->fc_cap.dev_fc_cap.max_parent_qpc_num;
+ cqm_handle->func_capability.qpc_basic_size =
+ GET_MAX(service_capability->fc_cap.parent_qpc_size,
+ cqm_handle->func_capability.qpc_basic_size);
+ cqm_handle->func_capability.qpc_alloc_static = true;
+ cqm_handle->func_capability.scqc_number +=
+ service_capability->fc_cap.dev_fc_cap.scq_num;
+ cqm_handle->func_capability.scqc_basic_size =
+ GET_MAX(service_capability->fc_cap.scqc_size,
+ cqm_handle->func_capability.scqc_basic_size);
+ cqm_handle->func_capability.srqc_number +=
+ service_capability->fc_cap.dev_fc_cap.srq_num;
+ cqm_handle->func_capability.srqc_basic_size =
+ GET_MAX(service_capability->fc_cap.srqc_size,
+ cqm_handle->func_capability.srqc_basic_size);
+ cqm_handle->func_capability.lun_number = CQM_LUN_FC_NUM;
+ cqm_handle->func_capability.lun_basic_size = CQM_LUN_SIZE_8;
+ cqm_handle->func_capability.taskmap_number = CQM_TASKMAP_FC_NUM;
+ cqm_handle->func_capability.taskmap_basic_size = PAGE_SIZE;
+ cqm_handle->func_capability.childc_number +=
+ service_capability->fc_cap.dev_fc_cap.max_child_qpc_num;
+ cqm_handle->func_capability.childc_basic_size =
+ GET_MAX(service_capability->fc_cap.child_qpc_size,
+ cqm_handle->func_capability.childc_basic_size);
+ cqm_handle->func_capability.pagesize_reorder = CQM_FC_PAGESIZE_ORDER;
+
+ return CQM_SUCCESS;
+}
+
+s32 cqm_service_capability_init(struct cqm_handle_s *cqm_handle,
+ struct service_cap *service_capability)
+{
+ cqm_handle->service.has_register = false;
+ cqm_handle->service.buf_order = 0;
+
+ if (cqm_service_capability_init_for_each(
+ cqm_handle,
+ service_capability) == CQM_FAIL)
+ return CQM_FAIL;
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_capability_init - Initialize capability of cqm function and service,
+ * need to read information from the configuration management module
+ * @ex_handle: handle of hwdev
+ */
+s32 cqm_capability_init(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap;
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ (handle->cqm_hdl);
+ u32 total_function_num = 0;
+ int err = 0;
+
+ cqm_capability_init_check_ppf(ex_handle, &total_function_num);
+
+ cqm_handle->func_capability.flow_table_based_conn_number =
+ service_capability->max_connect_num;
+ cqm_handle->func_capability.flow_table_based_conn_cache_number =
+ service_capability->max_stick2cache_num;
+ cqm_info(handle->dev_hdl, "Cap init: cfg max_conn_num 0x%x, max_cache_conn_num 0x%x\n",
+ cqm_handle->func_capability.flow_table_based_conn_number,
+ cqm_handle->func_capability.flow_table_based_conn_cache_number);
+
+ cqm_handle->func_capability.qpc_reserved = 0;
+ cqm_handle->func_capability.mpt_reserved = 0;
+ cqm_handle->func_capability.qpc_alloc_static = false;
+ cqm_handle->func_capability.scqc_alloc_static = false;
+
+ cqm_handle->func_capability.l3i_number = CQM_L3I_COMM_NUM;
+ cqm_handle->func_capability.l3i_basic_size = CQM_L3I_SIZE_8;
+
+ cqm_handle->func_capability.timer_number = CQM_TIMER_ALIGN_SCALE_NUM *
+ total_function_num;
+ cqm_handle->func_capability.timer_basic_size = CQM_TIMER_SIZE_32;
+
+ if (cqm_service_capability_init(cqm_handle, service_capability) ==
+ CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_service_capability_init));
+ err = CQM_FAIL;
+ goto out;
+ }
+
+ cqm_test_mode_init(cqm_handle, service_capability);
+
+ cqm_info(handle->dev_hdl, "Cap init: pagesize_reorder %d\n",
+ cqm_handle->func_capability.pagesize_reorder);
+ cqm_info(handle->dev_hdl, "Cap init: xid_alloc_mode %d, gpa_check_enable %d\n",
+ cqm_handle->func_capability.xid_alloc_mode,
+ cqm_handle->func_capability.gpa_check_enable);
+ cqm_info(handle->dev_hdl, "Cap init: qpc_alloc_mode %d, scqc_alloc_mode %d\n",
+ cqm_handle->func_capability.qpc_alloc_static,
+ cqm_handle->func_capability.scqc_alloc_static);
+ cqm_info(handle->dev_hdl, "Cap init: hash_number 0x%x\n",
+ cqm_handle->func_capability.hash_number);
+ cqm_info(handle->dev_hdl, "Cap init: qpc_number 0x%x, qpc_reserved 0x%x\n",
+ cqm_handle->func_capability.qpc_number,
+ cqm_handle->func_capability.qpc_reserved);
+ cqm_info(handle->dev_hdl, "Cap init: scqc_number 0x%x scqc_reserved 0x%x\n",
+ cqm_handle->func_capability.scqc_number,
+ cqm_handle->func_capability.scq_reserved);
+ cqm_info(handle->dev_hdl, "Cap init: srqc_number 0x%x\n",
+ cqm_handle->func_capability.srqc_number);
+ cqm_info(handle->dev_hdl, "Cap init: mpt_number 0x%x, mpt_reserved 0x%x\n",
+ cqm_handle->func_capability.mpt_number,
+ cqm_handle->func_capability.mpt_reserved);
+ cqm_info(handle->dev_hdl, "Cap init: gid_number 0x%x, lun_number 0x%x\n",
+ cqm_handle->func_capability.gid_number,
+ cqm_handle->func_capability.lun_number);
+ cqm_info(handle->dev_hdl, "Cap init: taskmap_number 0x%x, l3i_number 0x%x\n",
+ cqm_handle->func_capability.taskmap_number,
+ cqm_handle->func_capability.l3i_number);
+ cqm_info(handle->dev_hdl, "Cap init: timer_number 0x%x\n",
+ cqm_handle->func_capability.timer_number);
+ cqm_info(handle->dev_hdl, "Cap init: xid2cid_number 0x%x, reorder_number 0x%x\n",
+ cqm_handle->func_capability.xid2cid_number,
+ cqm_handle->func_capability.reorder_number);
+
+ return CQM_SUCCESS;
+
+out:
+ if (cqm_handle->func_attribute.func_type == CQM_PPF)
+ cqm_handle->func_capability.timer_enable = 0;
+
+ return err;
+}
+
+/**
+ * cqm_init - Initialize cqm
+ * @ex_handle: handle of hwdev
+ */
+s32 cqm_init(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ s32 ret = CQM_FAIL;
+
+ CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
+
+ cqm_handle = (struct cqm_handle_s *)kmalloc(sizeof(struct cqm_handle_s),
+ GFP_KERNEL | __GFP_ZERO);
+ CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL,
+ CQM_ALLOC_FAIL(cqm_handle));
+ /* Clear memory to prevent other systems' memory from being cleared */
+ memset(cqm_handle, 0, sizeof(struct cqm_handle_s));
+
+ cqm_handle->ex_handle = handle;
+ cqm_handle->dev = (struct pci_dev *)(handle->pcidev_hdl);
+
+ handle->cqm_hdl = (void *)cqm_handle;
+
+ /* Clear statistics */
+ memset(&handle->hw_stats.cqm_stats, 0, sizeof(struct hifc_cqm_stats));
+
+ /* Read information of vf or pf */
+ cqm_handle->func_attribute = handle->hwif->attr;
+ cqm_info(handle->dev_hdl, "Func init: function type %d\n",
+ cqm_handle->func_attribute.func_type);
+
+ /* Read ability from configuration management module */
+ ret = cqm_capability_init(ex_handle);
+ if (ret == CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_capability_init));
+ goto err1;
+ }
+
+ /* Initialize entries of memory table such as BAT/CLA/bitmap */
+ if (cqm_mem_init(ex_handle) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_mem_init));
+ goto err1;
+ }
+
+ /* Initialize event callback */
+ if (cqm_event_init(ex_handle) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_event_init));
+ goto err2;
+ }
+
+ /* Initialize doorbell */
+ if (cqm_db_init(ex_handle) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_init));
+ goto err3;
+ }
+
+ /* The timer bitmap is set directly from the beginning through CQM,
+ * no longer set/clear the bitmap through ifconfig up/down
+ */
+ if (hifc_func_tmr_bitmap_set(ex_handle, 1) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, "Timer start: enable timer bitmap failed\n");
+ goto err5;
+ }
+
+ return CQM_SUCCESS;
+
+err5:
+ cqm_db_uninit(ex_handle);
+err3:
+ cqm_event_uninit(ex_handle);
+err2:
+ cqm_mem_uninit(ex_handle);
+err1:
+ handle->cqm_hdl = NULL;
+ kfree(cqm_handle);
+ return CQM_FAIL;
+}
+
+/**
+ * cqm_uninit - Deinitialize the cqm, and is called once removing a function
+ * @ex_handle: handle of hwdev
+ */
+void cqm_uninit(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ s32 ret = CQM_FAIL;
+
+ CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return);
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle), return);
+
+ /* The timer bitmap is set directly from the beginning through CQM,
+ * no longer set/clear the bitmap through ifconfig up/down
+ */
+ cqm_info(handle->dev_hdl, "Timer stop: disable timer\n");
+ if (hifc_func_tmr_bitmap_set(ex_handle, 0) != CQM_SUCCESS)
+ cqm_err(handle->dev_hdl, "Timer stop: disable timer bitmap failed\n");
+
+ /* Stopping timer, release the resource
+ * after a delay of one or two milliseconds
+ */
+ if ((cqm_handle->func_attribute.func_type == CQM_PPF) &&
+ (cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE)) {
+ cqm_info(handle->dev_hdl, "Timer stop: hifc ppf timer stop\n");
+ ret = hifc_ppf_tmr_stop(handle);
+
+ if (ret != CQM_SUCCESS) {
+ cqm_info(handle->dev_hdl, "Timer stop: hifc ppf timer stop, ret=%d\n",
+ ret);
+ /* The timer fails to stop
+ * and does not affect resource release
+ */
+ }
+ usleep_range(900, 1000);
+ }
+
+ /* Release hardware doorbell */
+ cqm_db_uninit(ex_handle);
+
+ /* Cancel the callback of chipif */
+ cqm_event_uninit(ex_handle);
+
+ /* Release all table items
+ * and require the service to release all objects
+ */
+ cqm_mem_uninit(ex_handle);
+
+ /* Release cqm_handle */
+ handle->cqm_hdl = NULL;
+ kfree(cqm_handle);
+}
+
+/**
+ * cqm_mem_init - Initialize related memory of cqm,
+ * including all levels of entries
+ * @ex_handle: handle of hwdev
+ */
+s32 cqm_mem_init(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+
+ if (cqm_bat_init(cqm_handle) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_init));
+ return CQM_FAIL;
+ }
+
+ if (cqm_cla_init(cqm_handle) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init));
+ goto err1;
+ }
+
+ if (cqm_bitmap_init(cqm_handle) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_init));
+ goto err2;
+ }
+
+ if (cqm_object_table_init(cqm_handle) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_object_table_init));
+ goto err3;
+ }
+
+ return CQM_SUCCESS;
+
+err3:
+ cqm_bitmap_uninit(cqm_handle);
+err2:
+ cqm_cla_uninit(cqm_handle);
+err1:
+ cqm_bat_uninit(cqm_handle);
+ return CQM_FAIL;
+}
+
+/**
+ * cqm_mem_uninit - Deinitialize related memory of cqm,
+ * including all levels of entries
+ * @ex_handle: handle of hwdev
+ */
+void cqm_mem_uninit(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+
+ cqm_object_table_uninit(cqm_handle);
+ cqm_bitmap_uninit(cqm_handle);
+ cqm_cla_uninit(cqm_handle);
+ cqm_bat_uninit(cqm_handle);
+}
+
+/**
+ * cqm_event_init - Initialize the event callback of cqm
+ * @ex_handle: handle of hwdev
+ */
+s32 cqm_event_init(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+
+ /* Register ceq and aeq callbacks with chipif */
+ if (hifc_aeq_register_swe_cb(ex_handle,
+ HIFC_STATEFULL_EVENT,
+ cqm_aeq_callback) != CHIPIF_SUCCESS) {
+ cqm_err(handle->dev_hdl, "Event: fail to register aeq callback\n");
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_event_uninit - Deinitialize the event callback of cqm
+ * @ex_handle: handle of hwdev
+ */
+void cqm_event_uninit(void *ex_handle)
+{
+ (void)hifc_aeq_unregister_swe_cb(ex_handle, HIFC_STATEFULL_EVENT);
+}
+
+/**
+ * cqm_db_addr_alloc - Apply for a page of hardware doorbell and dwqe,
+ * with the same index, all obtained are physical addresses
+ * each function has up to 1K
+ * @ex_handle: handle of hwdev
+ * @db_addr: the address of doorbell
+ * @dwqe_addr: the address of dwqe
+ */
+s32 cqm_db_addr_alloc(void *ex_handle, void __iomem **db_addr,
+ void __iomem **dwqe_addr)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+
+ CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
+ CQM_PTR_CHECK_RET(db_addr, return CQM_FAIL, CQM_PTR_NULL(db_addr));
+ CQM_PTR_CHECK_RET(dwqe_addr, return CQM_FAIL, CQM_PTR_NULL(dwqe_addr));
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_alloc_cnt);
+
+ return hifc_alloc_db_addr(ex_handle, db_addr, dwqe_addr);
+}
+
+/**
+ * cqm_db_init - Initialize doorbell of cqm
+ * @ex_handle: handle of hwdev
+ */
+s32 cqm_db_init(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct cqm_service_s *service = NULL;
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+
+ /* Assign hardware doorbell for service */
+ service = &cqm_handle->service;
+
+ if (cqm_db_addr_alloc(ex_handle,
+ &service->hardware_db_vaddr,
+ &service->dwqe_vaddr) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_addr_alloc));
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_db_addr_free - Release a page of hardware doorbell and dwqe
+ * @ex_handle: handle of hwdev
+ * @db_addr: the address of doorbell
+ * @dwqe_addr: the address of dwqe
+ */
+void cqm_db_addr_free(void *ex_handle, void __iomem *db_addr,
+ void __iomem *dwqe_addr)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+
+ CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return);
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_free_cnt);
+
+ hifc_free_db_addr(ex_handle, db_addr, dwqe_addr);
+}
+
+/**
+ * cqm_db_uninit - Deinitialize doorbell of cqm
+ * @ex_handle: handle of hwdev
+ */
+void cqm_db_uninit(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct cqm_service_s *service = NULL;
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+
+ /* Release hardware doorbell */
+ service = &cqm_handle->service;
+
+ cqm_db_addr_free(ex_handle, service->hardware_db_vaddr,
+ service->dwqe_vaddr);
+}
+
+/**
+ * cqm_aeq_callback - cqm module callback processing of aeq
+ * @ex_handle: handle of hwdev
+ * @event: the input type of event
+ * @data: the input data
+ */
+u8 cqm_aeq_callback(void *ex_handle, u8 event, u64 data)
+{
+#define CQM_AEQ_BASE_T_FC 48
+#define CQM_AEQ_BASE_T_FCOE 56
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct cqm_service_s *service = NULL;
+ struct service_register_template_s *service_template = NULL;
+ u8 event_level = FAULT_LEVEL_MAX;
+
+ CQM_PTR_CHECK_RET(ex_handle, return event_level,
+ CQM_PTR_NULL(ex_handle));
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_aeq_callback_cnt[event]);
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ CQM_PTR_CHECK_RET(cqm_handle, return event_level,
+ CQM_PTR_NULL(cqm_handle));
+
+ if (event >= (u8)CQM_AEQ_BASE_T_FC &&
+ (event < (u8)CQM_AEQ_BASE_T_FCOE)) {
+ service = &cqm_handle->service;
+ service_template = &service->service_template;
+
+ if (!service_template->aeq_callback) {
+ cqm_err(handle->dev_hdl, "Event: service aeq_callback unregistered\n");
+ } else {
+ service_template->aeq_callback(
+ service_template->service_handle, event, data);
+ }
+
+ return event_level;
+ }
+
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(event));
+ return CQM_FAIL;
+}
+
+/**
+ * cqm_service_register - Service driver registers callback template with cqm
+ * @ex_handle: handle of hwdev
+ * @service_template: the template of service registration
+ */
+s32 cqm_service_register(void *ex_handle,
+ struct service_register_template_s *service_template)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct cqm_service_s *service = NULL;
+
+ CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL,
+ CQM_PTR_NULL(cqm_handle));
+ CQM_PTR_CHECK_RET(service_template, return CQM_FAIL,
+ CQM_PTR_NULL(service_template));
+
+ service = &cqm_handle->service;
+
+ if (service->has_register == true) {
+ cqm_err(handle->dev_hdl, "Service register: service has registered\n");
+ return CQM_FAIL;
+ }
+
+ service->has_register = true;
+ (void)memcpy((void *)(&service->service_template),
+ (void *)service_template,
+ sizeof(struct service_register_template_s));
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_service_unregister - Service-driven cancellation to CQM
+ * @ex_handle: handle of hwdev
+ * @service_type: the type of service module
+ */
+void cqm_service_unregister(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct cqm_service_s *service = NULL;
+
+ CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return);
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle), return);
+
+ service = &cqm_handle->service;
+
+ service->has_register = false;
+ memset(&service->service_template, 0,
+ sizeof(struct service_register_template_s));
+}
+
+/**
+ * cqm_cmd_alloc - Apply for a cmd buffer, the buffer size is fixed at 2K,
+ * the buffer content is not cleared, but the service needs to be cleared
+ * @ex_handle: handle of hwdev
+ */
+struct cqm_cmd_buf_s *cqm_cmd_alloc(void *ex_handle)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+
+ CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle));
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_alloc_cnt);
+
+ return (struct cqm_cmd_buf_s *)hifc_alloc_cmd_buf(ex_handle);
+}
+
+/**
+ * cqm_cmd_free - Free a cmd buffer
+ * @ex_handle: handle of hwdev
+ * @cmd_buf: the cmd buffer which needs freeing memory for
+ */
+void cqm_cmd_free(void *ex_handle, struct cqm_cmd_buf_s *cmd_buf)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+
+ CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return);
+ CQM_PTR_CHECK_NO_RET(cmd_buf, CQM_PTR_NULL(cmd_buf), return);
+ CQM_PTR_CHECK_NO_RET(cmd_buf->buf, CQM_PTR_NULL(buf), return);
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_free_cnt);
+
+ hifc_free_cmd_buf(ex_handle, (struct hifc_cmd_buf *)cmd_buf);
+}
+
+/**
+ * cqm_send_cmd_box - Send a cmd in box mode,
+ * the interface will hang the completed amount, causing sleep
+ * @ex_handle: handle of hwdev
+ * @ack_type: the type of ack
+ * @mod: the mode of cqm send
+ * @cmd: the input cmd
+ * @buf_in: the input buffer of cqm_cmd
+ * @buf_out: the output buffer of cqm_cmd
+ * @timeout: exceeding the time limit will cause sleep
+ */
+s32 cqm_send_cmd_box(void *ex_handle, u8 ack_type, u8 mod, u8 cmd,
+ struct cqm_cmd_buf_s *buf_in,
+ struct cqm_cmd_buf_s *buf_out, u32 timeout)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+
+ CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
+ CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_PTR_NULL(buf_in));
+ CQM_PTR_CHECK_RET(buf_in->buf, return CQM_FAIL, CQM_PTR_NULL(buf));
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt);
+
+ return hifc_cmdq_detail_resp(ex_handle, ack_type, mod, cmd,
+ (struct hifc_cmd_buf *)buf_in,
+ (struct hifc_cmd_buf *)buf_out, timeout);
+}
+
+/**
+ * cqm_ring_hardware_db - Knock hardware doorbell
+ * @ex_handle: handle of hwdev
+ * @service_type: each kernel mode will be allocated a page of hardware doorbell
+ * @db_count: PI exceeding 64b in doorbell[7:0]
+ * @db: doorbell content, organized by the business,
+ * if there is a small-end conversion, the business needs to be completed
+ */
+s32 cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db)
+{
+ struct hifc_hwdev *handle;
+ struct cqm_handle_s *cqm_handle;
+ struct cqm_service_s *service;
+
+ handle = (struct hifc_hwdev *)ex_handle;
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ service = &cqm_handle->service;
+
+ /* Write all before the doorbell */
+ wmb();
+ *((u64 *)service->hardware_db_vaddr + db_count) = db;
+
+ return CQM_SUCCESS;
+}
diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_main.h b/drivers/scsi/huawei/hifc/hifc_cqm_main.h
new file mode 100644
index 000000000000..70b0c9ae0609
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_cqm_main.h
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+#ifndef __CQM_MAIN_H__
+#define __CQM_MAIN_H__
+
+#define CHIPIF_SUCCESS 0
+#define CQM_TIMER_ENABLE 1
+
+enum cqm_object_type_e {
+ CQM_OBJECT_ROOT_CTX = 0,
+ CQM_OBJECT_SERVICE_CTX,
+ CQM_OBJECT_NONRDMA_EMBEDDED_RQ = 10,
+ CQM_OBJECT_NONRDMA_EMBEDDED_SQ,
+ CQM_OBJECT_NONRDMA_SRQ,
+ CQM_OBJECT_NONRDMA_EMBEDDED_CQ,
+ CQM_OBJECT_NONRDMA_SCQ,
+};
+
+struct service_register_template_s {
+ u32 service_type;
+ u32 srq_ctx_size; /* srq,scq context_size config */
+ u32 scq_ctx_size;
+ void *service_handle; /* ceq/aeq callback fun */
+
+ void (*aeq_callback)(void *service_handle, u8 event_type, u64 val);
+};
+
+struct cqm_service_s {
+ bool has_register;
+ void __iomem *hardware_db_vaddr;
+ void __iomem *dwqe_vaddr;
+ u32 buf_order; /* size of per buf 2^buf_order page */
+ struct service_register_template_s service_template;
+};
+
+struct cqm_func_capability_s {
+ bool qpc_alloc_static; /* Allocate qpc memory dynamicly/statically */
+ bool scqc_alloc_static;
+ u8 timer_enable; /* whether timer enable */
+
+ u32 flow_table_based_conn_number;
+ u32 flow_table_based_conn_cache_number; /* Maximum number in cache */
+ u32 bloomfilter_length; /* Bloomfilter table size, aligned by 64B */
+ /* The starting position of the bloomfilter table in the cache */
+ u32 bloomfilter_addr;
+ u32 qpc_reserved; /* Reserved bits in bitmap */
+ u32 mpt_reserved; /* There are also reserved bits in ROCE/IWARP mpt */
+ /* All basic_size must be 2^n aligned */
+ u32 hash_number;
+ /* Number of hash buckets, BAT table fill size is
+ * aligned with 64 buckets, at least 64
+ */
+ u32 hash_basic_size;
+ /* Hash bucket size is 64B, including 5 valid
+ * entries and 1 nxt_entry
+ */
+ u32 qpc_number;
+ u32 qpc_basic_size;
+
+ /* Note: for cqm specail test */
+ u32 pagesize_reorder;
+ bool xid_alloc_mode;
+ bool gpa_check_enable;
+ u32 scq_reserved;
+
+ u32 mpt_number;
+ u32 mpt_basic_size;
+ u32 scqc_number;
+ u32 scqc_basic_size;
+ u32 srqc_number;
+ u32 srqc_basic_size;
+
+ u32 gid_number;
+ u32 gid_basic_size;
+ u32 lun_number;
+ u32 lun_basic_size;
+ u32 taskmap_number;
+ u32 taskmap_basic_size;
+ u32 l3i_number;
+ u32 l3i_basic_size;
+ u32 childc_number;
+ u32 childc_basic_size;
+ u32 child_qpc_id_start; /* Child ctx of FC is global addressing */
+ /* The maximum number of child ctx in
+ * chip is 8096
+ */
+ u32 childc_number_all_function;
+
+ u32 timer_number;
+ u32 timer_basic_size;
+ u32 xid2cid_number;
+ u32 xid2cid_basic_size;
+ u32 reorder_number;
+ u32 reorder_basic_size;
+};
+
+#define CQM_PF TYPE_PF
+#define CQM_PPF TYPE_PPF
+#define CQM_BAT_ENTRY_MAX (16)
+#define CQM_BAT_ENTRY_SIZE (16)
+
+struct cqm_buf_list_s {
+ void *va;
+ dma_addr_t pa;
+ u32 refcount;
+};
+
+struct cqm_buf_s {
+ struct cqm_buf_list_s *buf_list;
+ struct cqm_buf_list_s direct;
+ u32 page_number; /* page_number=2^n buf_number */
+ u32 buf_number; /* buf_list node count */
+ u32 buf_size; /* buf_size=2^n PAGE_SIZE */
+};
+
+struct cqm_bitmap_s {
+ ulong *table;
+ u32 max_num;
+ u32 last;
+ /* The index that cannot be allocated is reserved in the front */
+ u32 reserved_top;
+ /* Lock for bitmap allocation */
+ spinlock_t lock;
+};
+
+struct completion;
+struct cqm_object_s {
+ u32 service_type;
+ u32 object_type; /* context,queue,mpt,mtt etc */
+ u32 object_size;
+ /* for queue, ctx, MPT Byte */
+ atomic_t refcount;
+ struct completion free;
+ void *cqm_handle;
+};
+
+struct cqm_object_table_s {
+ struct cqm_object_s **table;
+ u32 max_num;
+ rwlock_t lock;
+};
+
+struct cqm_cla_table_s {
+ u32 type;
+ u32 max_buffer_size;
+ u32 obj_num;
+ bool alloc_static; /* Whether the buffer is statically allocated */
+ u32 cla_lvl;
+ /* The value of x calculated by the cacheline, used for chip */
+ u32 cacheline_x;
+ /* The value of y calculated by the cacheline, used for chip */
+ u32 cacheline_y;
+ /* The value of z calculated by the cacheline, used for chip */
+ u32 cacheline_z;
+ /* The value of x calculated by the obj_size, used for software */
+ u32 x;
+ /* The value of y calculated by the obj_size, used for software */
+ u32 y;
+ /* The value of z calculated by the obj_size, used for software */
+ u32 z;
+ struct cqm_buf_s cla_x_buf;
+ struct cqm_buf_s cla_y_buf;
+ struct cqm_buf_s cla_z_buf;
+ u32 trunk_order;/* A continuous physical page contains 2^order pages */
+ u32 obj_size;
+ /* Lock for cla buffer allocation and free */
+ struct mutex lock;
+ struct cqm_bitmap_s bitmap;
+ /* The association mapping table of index and object */
+ struct cqm_object_table_s obj_table;
+};
+
+typedef void (*init_handler)(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap);
+
+struct cqm_cla_entry_init_s {
+ u32 type;
+ init_handler cqm_cla_init_handler;
+};
+
+struct cqm_bat_table_s {
+ u32 bat_entry_type[CQM_BAT_ENTRY_MAX];
+ u8 bat[CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE];
+ struct cqm_cla_table_s entry[CQM_BAT_ENTRY_MAX];
+ u32 bat_size;
+};
+
+struct cqm_handle_s {
+ struct hifc_hwdev *ex_handle;
+ struct pci_dev *dev;
+ struct hifc_func_attr func_attribute; /* vf or pf */
+ struct cqm_func_capability_s func_capability;
+ struct cqm_service_s service;
+ struct cqm_bat_table_s bat_table;
+
+ struct list_head node;
+};
+
+struct cqm_cmd_buf_s {
+ void *buf;
+ dma_addr_t dma;
+ u16 size;
+};
+
+struct cqm_queue_header_s {
+ u64 doorbell_record;
+ u64 ci_record;
+ u64 rsv1; /* the share area bettween driver and ucode */
+ u64 rsv2; /* the share area bettween driver and ucode*/
+};
+
+struct cqm_queue_s {
+ struct cqm_object_s object;
+ u32 index; /* embedded queue QP has not index, SRQ and SCQ have */
+ void *priv; /* service driver private info */
+ u32 current_q_doorbell;
+ u32 current_q_room;
+ /* nonrdma: only select q_room_buf_1 for q_room_buf */
+ struct cqm_buf_s q_room_buf_1;
+ struct cqm_buf_s q_room_buf_2;
+ struct cqm_queue_header_s *q_header_vaddr;
+ dma_addr_t q_header_paddr;
+ u8 *q_ctx_vaddr; /* SRQ and SCQ ctx space */
+ dma_addr_t q_ctx_paddr;
+ u32 valid_wqe_num;
+ /*add for srq*/
+ u8 *tail_container;
+ u8 *head_container;
+ u8 queue_link_mode; /*link,ring */
+};
+
+struct cqm_nonrdma_qinfo_s {
+ struct cqm_queue_s common;
+ u32 wqe_size;
+ /* The number of wqe contained in each buf (excluding link wqe),
+ * For srq, it is the number of wqe contained in 1 container
+ */
+ u32 wqe_per_buf;
+ u32 q_ctx_size;
+ /* When different services use different sizes of ctx, a large ctx will
+ * occupy multiple consecutive indexes of the bitmap
+ */
+ u32 index_count;
+ u32 container_size;
+};
+
+/* service context, QPC, mpt */
+struct cqm_qpc_mpt_s {
+ struct cqm_object_s object;
+ u32 xid;
+ dma_addr_t paddr;
+ void *priv; /* service driver private info */
+ u8 *vaddr;
+};
+
+struct cqm_qpc_mpt_info_s {
+ struct cqm_qpc_mpt_s common;
+ /* When different services use different sizes of QPC, large QPC/mpt
+ * will occupy multiple consecutive indexes of the bitmap
+ */
+ u32 index_count;
+};
+
+#define CQM_ADDR_COMBINE(high_addr, low_addr) \
+ ((((dma_addr_t)(high_addr)) << 32) + ((dma_addr_t)(low_addr)))
+#define CQM_ADDR_HI(addr) ((u32)((u64)(addr) >> 32))
+#define CQM_ADDR_LW(addr) ((u32)((u64)(addr) & 0xffffffff))
+#define CQM_HASH_BUCKET_SIZE_64 (64)
+#define CQM_LUN_SIZE_8 (8)
+#define CQM_L3I_SIZE_8 (8)
+#define CQM_TIMER_SIZE_32 (32)
+#define CQM_LUN_FC_NUM (64)
+#define CQM_TASKMAP_FC_NUM (4)
+#define CQM_L3I_COMM_NUM (64)
+#define CQM_TIMER_SCALE_NUM (2*1024)
+#define CQM_TIMER_ALIGN_WHEEL_NUM (8)
+#define CQM_TIMER_ALIGN_SCALE_NUM \
+ (CQM_TIMER_SCALE_NUM*CQM_TIMER_ALIGN_WHEEL_NUM)
+#define CQM_FC_PAGESIZE_ORDER (0)
+#define CQM_QHEAD_ALIGN_ORDER (6)
+
+s32 cqm_mem_init(void *ex_handle);
+void cqm_mem_uninit(void *ex_handle);
+s32 cqm_event_init(void *ex_handle);
+void cqm_event_uninit(void *ex_handle);
+s32 cqm_db_init(void *ex_handle);
+void cqm_db_uninit(void *ex_handle);
+s32 cqm_init(void *ex_handle);
+void cqm_uninit(void *ex_handle);
+s32 cqm_service_register(void *ex_handle,
+ struct service_register_template_s *service_template);
+void cqm_service_unregister(void *ex_handle);
+s32 cqm_ring_hardware_db(void *ex_handle,
+ u32 service_type,
+ u8 db_count, u64 db);
+s32 cqm_send_cmd_box(void *ex_handle, u8 ack_type, u8 mod, u8 cmd,
+ struct cqm_cmd_buf_s *buf_in,
+ struct cqm_cmd_buf_s *buf_out,
+ u32 timeout);
+u8 cqm_aeq_callback(void *ex_handle, u8 event, u64 data);
+void cqm_object_delete(struct cqm_object_s *object);
+struct cqm_cmd_buf_s *cqm_cmd_alloc(void *ex_handle);
+void cqm_cmd_free(void *ex_handle, struct cqm_cmd_buf_s *cmd_buf);
+struct cqm_queue_s *cqm_object_fc_srq_create(
+ void *ex_handle,
+ enum cqm_object_type_e object_type,
+ u32 wqe_number,
+ u32 wqe_size,
+ void *object_priv);
+struct cqm_qpc_mpt_s *cqm_object_qpc_mpt_create(
+ void *ex_handle,
+ enum cqm_object_type_e object_type,
+ u32 object_size,
+ void *object_priv,
+ u32 index);
+struct cqm_queue_s *cqm_object_nonrdma_queue_create(
+ void *ex_handle,
+ enum cqm_object_type_e object_type,
+ u32 wqe_number,
+ u32 wqe_size,
+ void *object_priv);
+
+#define CQM_PTR_NULL(x) "%s: "#x" is null\n", __func__
+#define CQM_ALLOC_FAIL(x) "%s: "#x" alloc fail\n", __func__
+#define CQM_MAP_FAIL(x) "%s: "#x" map fail\n", __func__
+#define CQM_FUNCTION_FAIL(x) "%s: "#x" return failure\n", __func__
+#define CQM_WRONG_VALUE(x) "%s: "#x" %u is wrong\n", __func__, (u32)x
+
+#define cqm_err(dev, format, ...) \
+ dev_err(dev, "[CQM]"format, ##__VA_ARGS__)
+#define cqm_warn(dev, format, ...) \
+ dev_warn(dev, "[CQM]"format, ##__VA_ARGS__)
+#define cqm_notice(dev, format, ...) \
+ dev_notice(dev, "[CQM]"format, ##__VA_ARGS__)
+#define cqm_info(dev, format, ...) \
+ dev_info(dev, "[CQM]"format, ##__VA_ARGS__)
+#define cqm_dbg(format, ...)
+
+#define CQM_PTR_CHECK_RET(ptr, ret, desc) \
+ do {\
+ if (unlikely(NULL == (ptr))) {\
+ pr_err("[CQM]"desc);\
+ ret; \
+ } \
+ } while (0)
+
+#define CQM_PTR_CHECK_NO_RET(ptr, desc, ret) \
+ do {\
+ if (unlikely((ptr) == NULL)) {\
+ pr_err("[CQM]"desc);\
+ ret; \
+ } \
+ } while (0)
+#define CQM_CHECK_EQUAL_RET(dev_hdl, actual, expect, ret, desc) \
+ do {\
+ if (unlikely((expect) != (actual))) {\
+ cqm_err(dev_hdl, desc);\
+ ret; \
+ } \
+ } while (0)
+
+#endif /* __CQM_MAIN_H__ */
diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_object.c b/drivers/scsi/huawei/hifc/hifc_cqm_object.c
new file mode 100644
index 000000000000..406b13f92e64
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_cqm_object.c
@@ -0,0 +1,3599 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_hwif.h"
+#include "hifc_api_cmd.h"
+#include "hifc_mgmt.h"
+#include "hifc_cfg.h"
+#include "hifc_cqm_object.h"
+#include "hifc_cqm_main.h"
+#define common_section
+
+#define CQM_MOD_CQM 8
+#define CQM_HARDWARE_DOORBELL 1
+/**
+ * cqm_swab64 - Convert a memory block to another endian by 8 byte basis
+ * @addr: start address of the memory block
+ * @cnt: the number of 8 byte basis in the memory block
+ */
+void cqm_swab64(u8 *addr, u32 cnt)
+{
+ u32 i = 0;
+ u64 *temp = (u64 *)addr;
+ u64 value = 0;
+
+ for (i = 0; i < cnt; i++) {
+ value = __swab64(*temp);
+ *temp = value;
+ temp++;
+ }
+}
+
+/**
+ * cqm_swab32 - Convert a memory block to another endian by 4 byte basis
+ * @addr: start address of the memory block
+ * @cnt: the number of 4 byte basis in the memory block
+ */
+void cqm_swab32(u8 *addr, u32 cnt)
+{
+ u32 i = 0;
+ u32 *temp = (u32 *)addr;
+ u32 value = 0;
+
+ for (i = 0; i < cnt; i++) {
+ value = __swab32(*temp);
+ *temp = value;
+ temp++;
+ }
+}
+
+/**
+ * cqm_shift - Find the base logarithm of two
+ * @data: the input data
+ */
+s32 cqm_shift(u32 data)
+{
+ s32 shift = -1;
+
+ do {
+ data >>= 1;
+ shift++;
+ } while (data);
+
+ return shift;
+}
+
+/**
+ * cqm_check_align - Check whether the data is aligned as the base of 2^n
+ * @data: the input data
+ */
+bool cqm_check_align(u32 data)
+{
+ if (data == 0)
+ return false;
+
+ do {
+ /* If data can be divided exactly by 2,
+ * it right shifts one bit
+ */
+ if ((data & 0x1) == 0) {
+ data >>= 1;
+ } else {
+ /* If data can not be divided exactly by 2
+ * it is not the base of 2^n,return false
+ */
+ return false;
+ }
+ } while (data != 1);
+
+ return true;
+}
+
+/**
+ * cqm_kmalloc_align - Alloc memory whose start address is aligned as the basis
+ * of 2^n
+ * @size: the size of memory allocated
+ * @flags: the type of memory allocated
+ * @align_order: the basis for aligning
+ */
+static void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order)
+{
+ void *orig_addr = NULL;
+ void *align_addr = NULL;
+ void *index_addr = NULL;
+
+ orig_addr = kmalloc(size + ((u64)1 << align_order) + sizeof(void *),
+ flags);
+ if (!orig_addr)
+ return NULL;
+
+ index_addr = (void *)((char *)orig_addr + sizeof(void *));
+ align_addr = (void *)((((u64)index_addr +
+ ((u64)1 << align_order) - 1) >> align_order) << align_order);
+
+ /* Record the original memory address for memory release. */
+ index_addr = (void *)((char *)align_addr - sizeof(void *));
+ *(void **)index_addr = orig_addr;
+
+ cqm_dbg("allocate %lu bytes aligned address: %p, original address: %p\n",
+ size, align_addr, orig_addr);
+
+ return align_addr;
+}
+
+/**
+ * cqm_kfree_align - Free memory whose start address is aligned as the basis of
+ * 2^n
+ * @addr: aligned address which would be free
+ */
+static void cqm_kfree_align(void *addr)
+{
+ void *index_addr = NULL;
+
+ /* Release original memory address */
+ index_addr = (void *)((char *)addr - sizeof(void *));
+
+ cqm_dbg("free aligned address: %p, original address: %p\n",
+ addr, *(void **)index_addr);
+
+ kfree(*(void **)index_addr);
+}
+
+/**
+ * cqm_buf_alloc_page - Alloc total pages memory for buffers
+ * @cqm_handle: handle of cqm
+ * @buf: the buffer which needs allocating memory for
+ */
+s32 cqm_buf_alloc_page(struct cqm_handle_s *cqm_handle, struct cqm_buf_s *buf)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ u32 order = 0;
+ void *va = NULL;
+ s32 i = 0;
+
+ order = get_order(buf->buf_size);
+
+ /*Here to allocate for every buffer's page for non-ovs*/
+ for (i = 0; i < (s32)buf->buf_number; i++) {
+ va = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!va) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(buf_page));
+ break;
+ }
+ /* Pages should be initialized to 0 after applied
+ * especially related to the hash table
+ */
+ memset(va, 0, buf->buf_size);
+ buf->buf_list[i].va = va;
+ }
+
+ if (i != buf->buf_number) {
+ i--;
+ for (; i >= 0; i--) {
+ free_pages((ulong)(buf->buf_list[i].va), order);
+ buf->buf_list[i].va = NULL;
+ }
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_buf_alloc_map - Buffer pci mapping
+ * @cqm_handle: handle of cqm
+ * @buf: the buffer which needs map
+ */
+s32 cqm_buf_alloc_map(struct cqm_handle_s *cqm_handle, struct cqm_buf_s *buf)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct pci_dev *dev = cqm_handle->dev;
+ s32 i = 0;
+ void *va = NULL;
+
+ for (i = 0; i < (s32)buf->buf_number; i++) {
+ va = buf->buf_list[i].va;
+ buf->buf_list[i].pa =
+ pci_map_single(dev, va, buf->buf_size,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev, buf->buf_list[i].pa)) {
+ cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_list));
+ break;
+ }
+ }
+
+ if (i != buf->buf_number) {
+ i--;
+ for (; i >= 0; i--) {
+ pci_unmap_single(dev, buf->buf_list[i].pa,
+ buf->buf_size, PCI_DMA_BIDIRECTIONAL);
+ }
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_buf_alloc_direct - Buffer pci direct remapping
+ * @cqm_handle: handle of cqm
+ * @buf: the buffer which needs remap
+ */
+s32 cqm_buf_alloc_direct(struct cqm_handle_s *cqm_handle,
+ struct cqm_buf_s *buf, bool direct)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct page **pages = NULL;
+ u32 order = 0;
+ u32 i = 0;
+ u32 j = 0;
+
+ order = get_order(buf->buf_size);
+
+ if (direct == true) {
+ pages = (struct page **)
+ vmalloc(sizeof(struct page *) * buf->page_number);
+ if (!pages) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(pages));
+ return CQM_FAIL;
+ }
+
+ for (i = 0; i < buf->buf_number; i++) {
+ for (j = 0; j < ((u32)1 << order); j++) {
+ pages[(i << order) + j] = (struct page *)
+ (void *)virt_to_page(
+ (u8 *)(buf->buf_list[i].va) +
+ (PAGE_SIZE * j));
+ }
+ }
+
+ /*lint -save -e648
+ *Shield alarm for kernel functions' vmapping
+ */
+ buf->direct.va = vmap(pages, buf->page_number,
+ VM_MAP, PAGE_KERNEL);
+ /*lint -restore*/
+ vfree(pages);
+ if (!buf->direct.va) {
+ cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf->direct.va));
+ return CQM_FAIL;
+ }
+ } else {
+ buf->direct.va = NULL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_buf_alloc - Allocate for buffer and dma for the struct cqm_buf_s
+ * @cqm_handle: handle of cqm
+ * @buf: the buffer which needs allocating memory for and dma
+ */
+s32 cqm_buf_alloc(struct cqm_handle_s *cqm_handle,
+ struct cqm_buf_s *buf, bool direct)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct pci_dev *dev = cqm_handle->dev;
+ u32 order = 0;
+ s32 i = 0;
+
+ order = get_order(buf->buf_size);
+
+ /* Allocate for the descriptor space of buffer lists */
+ buf->buf_list = (struct cqm_buf_list_s *)
+ vmalloc(buf->buf_number *
+ sizeof(struct cqm_buf_list_s));
+
+ CQM_PTR_CHECK_RET(buf->buf_list, return CQM_FAIL,
+ CQM_ALLOC_FAIL(buf_list));
+ memset(buf->buf_list, 0,
+ buf->buf_number * sizeof(struct cqm_buf_list_s));
+
+ /* Allocate for every buffer's page */
+ if (cqm_buf_alloc_page(cqm_handle, buf) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc_page));
+ goto err1;
+ }
+
+ /* Buffer pci remapping */
+ if (cqm_buf_alloc_map(cqm_handle, buf) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc_map));
+ goto err2;
+ }
+
+ /* Buffer pci mapping */
+ if (cqm_buf_alloc_direct(cqm_handle, buf, direct) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_buf_alloc_direct));
+ goto err3;
+ }
+
+ return CQM_SUCCESS;
+
+err3:
+ for (i = 0; i < (s32)buf->buf_number; i++) {
+ pci_unmap_single(dev, buf->buf_list[i].pa, buf->buf_size,
+ PCI_DMA_BIDIRECTIONAL);
+ }
+err2:
+ for (i = 0; i < (s32)buf->buf_number; i++) {
+ free_pages((ulong)(buf->buf_list[i].va), order);
+ buf->buf_list[i].va = NULL;
+ }
+err1:
+ vfree(buf->buf_list);
+ buf->buf_list = NULL;
+ return CQM_FAIL;
+}
+
+/**
+ * cqm_cla_cache_invalid - Set the chip logical address cache invalid
+ * @cqm_handle: handle of cqm
+ * @gpa: global physical address
+ * @cache_size: chip cache size
+ */
+s32 cqm_cla_cache_invalid(struct cqm_handle_s *cqm_handle, dma_addr_t gpa,
+ u32 cache_size)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_cmd_buf_s *buf_in = NULL;
+ struct cqm_cla_cache_invalid_cmd_s *cmd = NULL;
+ s32 ret = CQM_FAIL;
+
+ buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle));
+ CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL,
+ CQM_ALLOC_FAIL(buf_in));
+ buf_in->size = sizeof(struct cqm_cla_cache_invalid_cmd_s);
+
+ /* Fill command format, and turn into big endian */
+ cmd = (struct cqm_cla_cache_invalid_cmd_s *)(buf_in->buf);
+ cmd->cache_size = cache_size;
+ cmd->gpa_h = CQM_ADDR_HI(gpa);
+ cmd->gpa_l = CQM_ADDR_LW(gpa);
+
+ cqm_swab32((u8 *)cmd,
+ (sizeof(struct cqm_cla_cache_invalid_cmd_s) >> 2));
+
+ /* cmdq send a cmd */
+ ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle),
+ CQM_CMD_ACK_TYPE_CMDQ,
+ CQM_MOD_CQM, CQM_CMD_T_CLA_CACHE_INVALID,
+ buf_in, NULL, CQM_CMD_TIMEOUT);
+ if (ret != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box));
+ cqm_err(handle->dev_hdl, "Cla cache invalid: cqm_send_cmd_box_ret=%d\n",
+ ret);
+ cqm_err(handle->dev_hdl, "Cla cache invalid: cla_cache_invalid_cmd: 0x%x 0x%x 0x%x\n",
+ cmd->gpa_h, cmd->gpa_l, cmd->cache_size);
+ }
+
+ cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in);
+ return ret;
+}
+
+/**
+ * cqm_buf_free - Free buffer space and dma for the struct cqm_buf_s
+ * @buf: the buffer which needs freeing memory for
+ * @dev: specific pci device
+ */
+void cqm_buf_free(struct cqm_buf_s *buf, struct pci_dev *dev)
+{
+ u32 order = 0;
+ s32 i = 0;
+
+ order = get_order(buf->buf_size);
+
+ if (buf->direct.va) {
+ vunmap(buf->direct.va);
+ buf->direct.va = NULL;
+ }
+
+ if (buf->buf_list) {
+ for (i = 0; i < (s32)(buf->buf_number); i++) {
+ if (buf->buf_list[i].va) {
+ pci_unmap_single(dev, buf->buf_list[i].pa,
+ buf->buf_size,
+ PCI_DMA_BIDIRECTIONAL);
+ free_pages((ulong)(buf->buf_list[i].va), order);
+ buf->buf_list[i].va = NULL;
+ }
+ }
+
+ vfree(buf->buf_list);
+ buf->buf_list = NULL;
+ }
+}
+
+/**
+ * __free_cache_inv - Free cache and make buffer list invalid
+ * @cqm_handle: handle of cqm
+ * @buf: the buffer which needs freeing memory for
+ * @inv_flag: invalid or not
+ * @order:the basis for aligning
+ * @buf_idx:buffer index
+ */
+static void __free_cache_inv(struct cqm_handle_s *cqm_handle,
+ struct cqm_buf_s *buf, s32 *inv_flag,
+ u32 order, s32 buf_idx)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+
+ if (handle->chip_present_flag) {
+ *inv_flag = cqm_cla_cache_invalid(cqm_handle,
+ buf->buf_list[buf_idx].pa,
+ PAGE_SIZE << order);
+ if (*inv_flag != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, "Buffer free: fail to invalid buf_list pa cache, inv_flag=%d\n",
+ *inv_flag);
+ }
+ }
+
+ pci_unmap_single(cqm_handle->dev, buf->buf_list[buf_idx].pa,
+ buf->buf_size, PCI_DMA_BIDIRECTIONAL);
+
+ free_pages((unsigned long)(buf->buf_list[buf_idx].va), order);
+
+ buf->buf_list[buf_idx].va = NULL;
+}
+
+/**
+ * cqm_buf_free_cache_inv - Free cache and make buffer list invalid
+ * @cqm_handle: handle of cqm
+ * @buf: the buffer which needs freeing memory for
+ * @inv_flag: invalid or not
+ */
+void cqm_buf_free_cache_inv(struct cqm_handle_s *cqm_handle,
+ struct cqm_buf_s *buf, s32 *inv_flag)
+{
+ u32 order = 0;
+ s32 i = 0;
+
+ order = get_order(buf->buf_size);
+
+ if (buf->direct.va) {
+ vunmap(buf->direct.va);
+ buf->direct.va = NULL;
+ }
+
+ if (buf->buf_list) {
+ for (i = 0; i < (s32)(buf->buf_number); i++) {
+ if (buf->buf_list[i].va) {
+ __free_cache_inv(cqm_handle, buf,
+ inv_flag, order, i);
+ }
+ }
+
+ vfree(buf->buf_list);
+ buf->buf_list = NULL;
+ }
+}
+
+#define bat_cla_section
+
+/**
+ * cqm_bat_update - Send cmds to the tile to update the BAT table through cmdq
+ * @cqm_handle: cqm handle
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_bat_update(struct cqm_handle_s *cqm_handle)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_cmd_buf_s *buf_in = NULL;
+ s32 ret = CQM_FAIL;
+ struct cqm_bat_update_cmd_s *bat_update_cmd = NULL;
+
+ /* Allocate a cmd and fill */
+ buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle));
+ CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_ALLOC_FAIL(buf_in));
+ buf_in->size = sizeof(struct cqm_bat_update_cmd_s);
+
+ bat_update_cmd = (struct cqm_bat_update_cmd_s *)(buf_in->buf);
+ bat_update_cmd->byte_len = cqm_handle->bat_table.bat_size;
+ bat_update_cmd->offset = 0;
+ memcpy(bat_update_cmd->data, cqm_handle->bat_table.bat,
+ bat_update_cmd->byte_len);
+
+ /*Big endian conversion*/
+ cqm_swab32((u8 *)bat_update_cmd,
+ sizeof(struct cqm_bat_update_cmd_s) >> 2);
+
+ /* send a cmd */
+ ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle),
+ CQM_CMD_ACK_TYPE_CMDQ, CQM_MOD_CQM,
+ CQM_CMD_T_BAT_UPDATE, buf_in,
+ NULL, CQM_CMD_TIMEOUT);
+ if (ret != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box));
+ cqm_err(handle->dev_hdl, "Bat update: send_cmd_box ret=%d\n",
+ ret);
+ cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in);
+ return CQM_FAIL;
+ }
+
+ /* Free a cmd */
+ cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in);
+
+ return CQM_SUCCESS;
+}
+
+s32 cqm_bat_init_ft(struct cqm_handle_s *cqm_handle,
+ struct cqm_bat_table_s *bat_table,
+ enum func_type function_type)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+
+ if (function_type == CQM_PF || function_type == CQM_PPF) {
+ bat_table->bat_entry_type[0] = CQM_BAT_ENTRY_T_CFG;
+ bat_table->bat_entry_type[1] = CQM_BAT_ENTRY_T_HASH;
+ bat_table->bat_entry_type[2] = CQM_BAT_ENTRY_T_QPC;
+ bat_table->bat_entry_type[3] = CQM_BAT_ENTRY_T_SCQC;
+ bat_table->bat_entry_type[4] = CQM_BAT_ENTRY_T_LUN;
+ bat_table->bat_entry_type[5] = CQM_BAT_ENTRY_T_TASKMAP;
+ bat_table->bat_entry_type[6] = CQM_BAT_ENTRY_T_L3I;
+ bat_table->bat_entry_type[7] = CQM_BAT_ENTRY_T_CHILDC;
+ bat_table->bat_entry_type[8] = CQM_BAT_ENTRY_T_TIMER;
+ bat_table->bat_entry_type[9] = CQM_BAT_ENTRY_T_XID2CID;
+ bat_table->bat_entry_type[10] = CQM_BAT_ENTRY_T_REORDER;
+ bat_table->bat_size = CQM_BAT_SIZE_FT_PF;
+ } else {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(function_type));
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_bat_init - Initialize the BAT table, only select the items to be
+ * initialized and arrange the entry order, the content of the BAT table entry
+ * needs to be filled after the CLA allocation
+ * @cqm_handle: cqm handle
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_bat_init(struct cqm_handle_s *cqm_handle)
+{
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ u32 i = 0;
+
+ memset(bat_table, 0, sizeof(struct cqm_bat_table_s));
+
+ /* Initialize the type of each bat entry */
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++)
+ bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID;
+
+ if (cqm_bat_init_ft(cqm_handle, bat_table,
+ cqm_handle->func_attribute.func_type) == CQM_FAIL) {
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_bat_uninit - Deinitialize BAT table
+ * @cqm_handle: cqm handle
+ */
+void cqm_bat_uninit(struct cqm_handle_s *cqm_handle)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ u32 i = 0;
+
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++)
+ bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID;
+
+ memset(bat_table->bat, 0, CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE);
+
+ /* Notify the chip to refresh the BAT table */
+ if (cqm_bat_update(cqm_handle) != CQM_SUCCESS)
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update));
+}
+
+static void cqm_bat_config_entry_size(
+ struct cqm_cla_table_s *cla_table,
+ struct cqm_bat_entry_standerd_s *bat_entry_standerd)
+{
+ /* Except for QPC of 256/512/1024, the others are all cacheline 256B,
+ * and the conversion will be done inside the chip
+ */
+ if (cla_table->obj_size > CQM_CHIP_CACHELINE) {
+ if (cla_table->obj_size == 512) {
+ bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512;
+ } else {
+ bat_entry_standerd->entry_size =
+ CQM_BAT_ENTRY_SIZE_1024;
+ }
+ bat_entry_standerd->max_number =
+ cla_table->max_buffer_size / cla_table->obj_size;
+ } else {
+ bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_256;
+ bat_entry_standerd->max_number =
+ cla_table->max_buffer_size / CQM_CHIP_CACHELINE;
+ }
+}
+
+void cqm_bat_fill_cla_std_entry(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ u8 *entry_base_addr, u32 entry_type,
+ u8 gpa_check_enable)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_bat_entry_standerd_s *bat_entry_standerd = NULL;
+ dma_addr_t pa = 0;
+
+ if (cla_table->obj_num == 0) {
+ cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't init bat entry\n",
+ cla_table->type);
+ return;
+ }
+
+ bat_entry_standerd = (struct cqm_bat_entry_standerd_s *)entry_base_addr;
+ cqm_bat_config_entry_size(cla_table, bat_entry_standerd);
+ bat_entry_standerd->max_number = bat_entry_standerd->max_number - 1;
+
+ bat_entry_standerd->bypass = CQM_BAT_NO_BYPASS_CACHE;
+ bat_entry_standerd->z = cla_table->cacheline_z;
+ bat_entry_standerd->y = cla_table->cacheline_y;
+ bat_entry_standerd->x = cla_table->cacheline_x;
+ bat_entry_standerd->cla_level = cla_table->cla_lvl;
+
+ if (cla_table->cla_lvl == CQM_CLA_LVL_0)
+ pa = cla_table->cla_z_buf.buf_list[0].pa;
+ else if (cla_table->cla_lvl == CQM_CLA_LVL_1)
+ pa = cla_table->cla_y_buf.buf_list[0].pa;
+ else
+ pa = cla_table->cla_x_buf.buf_list[0].pa;
+
+ bat_entry_standerd->cla_gpa_h = CQM_ADDR_HI(pa);
+ if (entry_type == CQM_BAT_ENTRY_T_REORDER) {
+ /* Reorder does not support GPA validity check */
+ bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa);
+ } else {
+ /* GPA is valid when gpa[0]=1 */
+ bat_entry_standerd->cla_gpa_l =
+ CQM_ADDR_LW(pa) | gpa_check_enable;
+ }
+}
+
+static void cqm_bat_fill_cla_cfg(struct cqm_handle_s *cqm_handle,
+ u8 *entry_base_addr)
+{
+ struct cqm_bat_entry_cfg_s *bat_entry_cfg =
+ (struct cqm_bat_entry_cfg_s *)entry_base_addr;
+
+ bat_entry_cfg->cur_conn_cache = 0;
+ bat_entry_cfg->max_conn_cache =
+ cqm_handle->func_capability.flow_table_based_conn_cache_number;
+ bat_entry_cfg->cur_conn_num_h_4 = 0;
+ bat_entry_cfg->cur_conn_num_l_16 = 0;
+ bat_entry_cfg->max_conn_num =
+ cqm_handle->func_capability.flow_table_based_conn_number;
+ /* Align by 64 buckets, shift right 6 bits */
+ if ((cqm_handle->func_capability.hash_number >> 6) != 0) {
+ /* After shift right 6 bits, the value should - 1 for the hash
+ * value
+ */
+ bat_entry_cfg->bucket_num =
+ ((cqm_handle->func_capability.hash_number >> 6) - 1);
+ }
+ if (cqm_handle->func_capability.bloomfilter_length != 0) {
+ bat_entry_cfg->bloom_filter_len =
+ cqm_handle->func_capability.bloomfilter_length - 1;
+ bat_entry_cfg->bloom_filter_addr =
+ cqm_handle->func_capability.bloomfilter_addr;
+ }
+}
+
+static void cqm_bat_fill_cla_taskmap(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ u8 *entry_base_addr)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_bat_entry_taskmap_s *bat_entry_taskmap =
+ (struct cqm_bat_entry_taskmap_s *)entry_base_addr;
+ if (cqm_handle->func_capability.taskmap_number != 0) {
+ bat_entry_taskmap->gpa0_h =
+ (u32)(cla_table->cla_z_buf.buf_list[0].pa >> 32);
+ bat_entry_taskmap->gpa0_l =
+ (u32)(cla_table->cla_z_buf.buf_list[0].pa & 0xffffffff);
+
+ bat_entry_taskmap->gpa1_h =
+ (u32)(cla_table->cla_z_buf.buf_list[1].pa >> 32);
+ bat_entry_taskmap->gpa1_l =
+ (u32)(cla_table->cla_z_buf.buf_list[1].pa & 0xffffffff);
+
+ bat_entry_taskmap->gpa2_h =
+ (u32)(cla_table->cla_z_buf.buf_list[2].pa >> 32);
+ bat_entry_taskmap->gpa2_l =
+ (u32)(cla_table->cla_z_buf.buf_list[2].pa & 0xffffffff);
+
+ bat_entry_taskmap->gpa3_h =
+ (u32)(cla_table->cla_z_buf.buf_list[3].pa >> 32);
+ bat_entry_taskmap->gpa3_l =
+ (u32)(cla_table->cla_z_buf.buf_list[3].pa & 0xffffffff);
+
+ cqm_info(handle->dev_hdl, "Cla alloc: taskmap bat entry: 0x%x 0x%x, 0x%x 0x%x, 0x%x 0x%x, 0x%x 0x%x\n",
+ bat_entry_taskmap->gpa0_h, bat_entry_taskmap->gpa0_l,
+ bat_entry_taskmap->gpa1_h, bat_entry_taskmap->gpa1_l,
+ bat_entry_taskmap->gpa2_h, bat_entry_taskmap->gpa2_l,
+ bat_entry_taskmap->gpa3_h, bat_entry_taskmap->gpa3_l);
+ }
+}
+
+/**
+ * cqm_bat_fill_cla - Fill the base address of the cla table into the bat table
+ * @cqm_handle: cqm handle
+ */
+void cqm_bat_fill_cla(struct cqm_handle_s *cqm_handle)
+{
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ u32 entry_type = CQM_BAT_ENTRY_T_INVALID;
+ u8 *entry_base_addr = NULL;
+ u32 i = 0;
+ u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable;
+
+ /* Fill each item according to the arranged BAT table */
+ entry_base_addr = bat_table->bat;
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) {
+ entry_type = bat_table->bat_entry_type[i];
+ if (entry_type == CQM_BAT_ENTRY_T_CFG) {
+ cqm_bat_fill_cla_cfg(cqm_handle, entry_base_addr);
+ entry_base_addr += sizeof(struct cqm_bat_entry_cfg_s);
+ } else if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) {
+ cqm_bat_fill_cla_taskmap(cqm_handle,
+ &bat_table->entry[i],
+ entry_base_addr);
+ entry_base_addr +=
+ sizeof(struct cqm_bat_entry_taskmap_s);
+ } else if ((entry_type == CQM_BAT_ENTRY_T_INVALID) ||
+ ((entry_type == CQM_BAT_ENTRY_T_TIMER) &&
+ (cqm_handle->func_attribute.func_type != CQM_PPF))) {
+ /* When entry_type is invalid, or the timer entry under
+ * PF does not need to apply for memory and bat filling
+ */
+ entry_base_addr += CQM_BAT_ENTRY_SIZE;
+ } else {
+ cla_table = &bat_table->entry[i];
+ cqm_bat_fill_cla_std_entry(cqm_handle, cla_table,
+ entry_base_addr, entry_type,
+ gpa_check_enable);
+ entry_base_addr +=
+ sizeof(struct cqm_bat_entry_standerd_s);
+ }
+ /* Checks if entry_base_addr is out of bounds */
+ if (entry_base_addr >=
+ (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE))
+ break;
+ }
+}
+
+static void cqm_cla_xyz_cacheline_lvl1(struct cqm_cla_table_s *cla_table,
+ u32 trunk_size)
+{
+ s32 shift = 0;
+
+ if (cla_table->obj_size >= CQM_CHIP_CACHELINE) {
+ cla_table->cacheline_z = cla_table->z;
+ cla_table->cacheline_y = cla_table->y;
+ cla_table->cacheline_x = cla_table->x;
+ } else {
+ shift = cqm_shift(trunk_size / CQM_CHIP_CACHELINE);
+ cla_table->cacheline_z = shift ? (shift - 1) : (shift);
+ cla_table->cacheline_y = CQM_MAX_INDEX_BIT;
+ cla_table->cacheline_x = 0;
+ }
+}
+
+s32 cqm_cla_xyz_lvl1(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ u32 trunk_size)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_buf_s *cla_y_buf = NULL;
+ struct cqm_buf_s *cla_z_buf = NULL;
+ dma_addr_t *base = NULL;
+ s32 shift = 0;
+ u32 i = 0;
+ s32 ret = CQM_FAIL;
+ u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable;
+
+ if (cla_table->type == CQM_BAT_ENTRY_T_REORDER)
+ gpa_check_enable = 0;
+
+ cla_table->cla_lvl = CQM_CLA_LVL_1;
+
+ shift = cqm_shift(trunk_size / cla_table->obj_size);
+ cla_table->z = shift ? (shift - 1) : (shift);
+ cla_table->y = CQM_MAX_INDEX_BIT;
+ cla_table->x = 0;
+ cqm_cla_xyz_cacheline_lvl1(cla_table, trunk_size);
+
+ /* Allocate y buf space */
+ cla_y_buf = &cla_table->cla_y_buf;
+ cla_y_buf->buf_size = trunk_size;
+ cla_y_buf->buf_number = 1;
+ cla_y_buf->page_number = cla_y_buf->buf_number <<
+ cla_table->trunk_order;
+ ret = cqm_buf_alloc(cqm_handle, cla_y_buf, false);
+
+ CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, return CQM_FAIL,
+ CQM_ALLOC_FAIL(lvl_1_y_buf));
+
+ /* Allocate z buf space */
+ cla_z_buf = &cla_table->cla_z_buf;
+ cla_z_buf->buf_size = trunk_size;
+ cla_z_buf->buf_number = ALIGN(cla_table->max_buffer_size, trunk_size) /
+ trunk_size;
+ cla_z_buf->page_number = cla_z_buf->buf_number <<
+ cla_table->trunk_order;
+ /* Requires static allocation of all buffer space */
+ if (cla_table->alloc_static == true) {
+ if (cqm_buf_alloc(cqm_handle, cla_z_buf, false) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf));
+ cqm_buf_free(cla_y_buf, cqm_handle->dev);
+ return CQM_FAIL;
+ }
+
+ /* Fill gpa of z buf list into y buf */
+ base = (dma_addr_t *)(cla_y_buf->buf_list->va);
+ for (i = 0; i < cla_z_buf->buf_number; i++) {
+ /*gpa[0]=1 means this GPA is valid*/
+ *base = (cla_z_buf->buf_list[i].pa | gpa_check_enable);
+ base++;
+ }
+
+ /* big-endian conversion */
+ cqm_swab64((u8 *)(cla_y_buf->buf_list->va),
+ cla_z_buf->buf_number);
+ } else {
+ /* Only initialize and allocate buf list space, buffer spaces
+ * are dynamically allocated in service
+ */
+ cla_z_buf->buf_list = (struct cqm_buf_list_s *)
+ vmalloc(cla_z_buf->buf_number *
+ sizeof(struct cqm_buf_list_s));
+
+ if (!cla_z_buf->buf_list) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf));
+ cqm_buf_free(cla_y_buf, cqm_handle->dev);
+ return CQM_FAIL;
+ }
+ memset(cla_z_buf->buf_list, 0,
+ cla_z_buf->buf_number * sizeof(struct cqm_buf_list_s));
+ }
+
+ return CQM_SUCCESS;
+}
+
+static s32 cqm_cla_yz_lvl2_static(struct cqm_handle_s *cqm_handle,
+ struct cqm_buf_s *cla_y_buf,
+ struct cqm_buf_s *cla_z_buf,
+ u8 gpa_check_enable)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ dma_addr_t *base = NULL;
+ u32 i = 0;
+
+ if (cqm_buf_alloc(cqm_handle, cla_z_buf, false) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf));
+ return CQM_FAIL;
+ }
+
+ /* The virtual address of y buf is remapped for software access */
+ if (cqm_buf_alloc(cqm_handle, cla_y_buf, true) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf));
+ cqm_buf_free(cla_z_buf, cqm_handle->dev);
+ return CQM_FAIL;
+ }
+
+ /* Fill gpa of z buf list into y buf */
+ base = (dma_addr_t *)(cla_y_buf->direct.va);
+ for (i = 0; i < cla_z_buf->buf_number; i++) {
+ /*gpa[0]=1 means this GPA is valid*/
+ *base = (cla_z_buf->buf_list[i].pa | gpa_check_enable);
+ base++;
+ }
+
+ /* big-endian conversion */
+ cqm_swab64((u8 *)(cla_y_buf->direct.va), cla_z_buf->buf_number);
+
+ return CQM_SUCCESS;
+}
+
+static void cqm_cla_yz_lvl2_init_cacheline(struct cqm_cla_table_s *cla_table,
+ u32 trunk_size)
+{
+ s32 shift = 0;
+
+ if (cla_table->obj_size >= CQM_CHIP_CACHELINE) {
+ cla_table->cacheline_z = cla_table->z;
+ cla_table->cacheline_y = cla_table->y;
+ cla_table->cacheline_x = cla_table->x;
+ } else {
+ shift = cqm_shift(trunk_size / CQM_CHIP_CACHELINE);
+ cla_table->cacheline_z = shift ? (shift - 1) : (shift);
+ shift = cqm_shift(trunk_size / sizeof(dma_addr_t));
+ cla_table->cacheline_y = cla_table->cacheline_z + shift;
+ cla_table->cacheline_x = CQM_MAX_INDEX_BIT;
+ }
+}
+
+s32 cqm_cla_xyz_lvl2(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ u32 trunk_size)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_buf_s *cla_x_buf = NULL;
+ struct cqm_buf_s *cla_y_buf = NULL;
+ struct cqm_buf_s *cla_z_buf = NULL;
+ dma_addr_t *base = NULL;
+ s32 shift = 0;
+ u32 i = 0;
+ s32 ret = CQM_FAIL;
+ u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable;
+
+ if (cla_table->type == CQM_BAT_ENTRY_T_REORDER)
+ gpa_check_enable = 0;
+
+ cla_table->cla_lvl = CQM_CLA_LVL_2;
+
+ shift = cqm_shift(trunk_size / cla_table->obj_size);
+ cla_table->z = shift ? (shift - 1) : (shift);
+ shift = cqm_shift(trunk_size / sizeof(dma_addr_t));
+ cla_table->y = cla_table->z + shift;
+ cla_table->x = CQM_MAX_INDEX_BIT;
+
+ cqm_cla_yz_lvl2_init_cacheline(cla_table, trunk_size);
+
+ /* Allocate x buf space */
+ cla_x_buf = &cla_table->cla_x_buf;
+ cla_x_buf->buf_size = trunk_size;
+ cla_x_buf->buf_number = 1;
+ cla_x_buf->page_number = cla_x_buf->buf_number <<
+ cla_table->trunk_order;
+
+ ret = cqm_buf_alloc(cqm_handle, cla_x_buf, false);
+ CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, return CQM_FAIL,
+ CQM_ALLOC_FAIL(lvl_2_x_buf));
+
+ /* Allocate y buf and z buf space */
+ cla_z_buf = &cla_table->cla_z_buf;
+ cla_z_buf->buf_size = trunk_size;
+ cla_z_buf->buf_number = ALIGN(cla_table->max_buffer_size, trunk_size) /
+ trunk_size;
+ cla_z_buf->page_number = cla_z_buf->buf_number <<
+ cla_table->trunk_order;
+
+ cla_y_buf = &cla_table->cla_y_buf;
+ cla_y_buf->buf_size = trunk_size;
+ cla_y_buf->buf_number =
+ (ALIGN(cla_z_buf->buf_number * sizeof(dma_addr_t),
+ trunk_size)) / trunk_size;
+
+ cla_y_buf->page_number = cla_y_buf->buf_number <<
+ cla_table->trunk_order;
+
+ /* Requires static allocation of all buffer space */
+ if (cla_table->alloc_static == true) {
+ if (cqm_cla_yz_lvl2_static(cqm_handle,
+ cla_y_buf,
+ cla_z_buf,
+ gpa_check_enable) == CQM_FAIL) {
+ cqm_buf_free(cla_x_buf, cqm_handle->dev);
+ return CQM_FAIL;
+ }
+ /* Fill gpa of y buf list into x buf */
+ base = (dma_addr_t *)(cla_x_buf->buf_list->va);
+ for (i = 0; i < cla_y_buf->buf_number; i++) {
+ /* gpa[0]=1 means this GPA is valid */
+ *base = (cla_y_buf->buf_list[i].pa | gpa_check_enable);
+ base++;
+ }
+
+ /* big-endian conversion */
+ cqm_swab64((u8 *)(cla_x_buf->buf_list->va),
+ cla_y_buf->buf_number);
+ } else {
+ /* Only initialize and allocate buf list space, buffer spaces
+ * are allocated in service
+ */
+ cla_z_buf->buf_list = (struct cqm_buf_list_s *)
+ vmalloc(cla_z_buf->buf_number *
+ sizeof(struct cqm_buf_list_s));
+ if (!cla_z_buf->buf_list) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf));
+ cqm_buf_free(cla_x_buf, cqm_handle->dev);
+ return CQM_FAIL;
+ }
+ memset(cla_z_buf->buf_list, 0,
+ cla_z_buf->buf_number * sizeof(struct cqm_buf_list_s));
+
+ cla_y_buf->buf_list = (struct cqm_buf_list_s *)
+ vmalloc(cla_y_buf->buf_number *
+ sizeof(struct cqm_buf_list_s));
+
+ if (!cla_y_buf->buf_list) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf));
+ cqm_buf_free(cla_z_buf, cqm_handle->dev);
+ cqm_buf_free(cla_x_buf, cqm_handle->dev);
+ return CQM_FAIL;
+ }
+ memset(cla_y_buf->buf_list, 0,
+ cla_y_buf->buf_number * sizeof(struct cqm_buf_list_s));
+ }
+
+ return CQM_SUCCESS;
+}
+
+static s32 cqm_cla_xyz_check(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+
+ if (cla_table->obj_num == 0) {
+ /* If the capability is set to 0, the CLA does not need to be
+ * initialized and exits directly
+ */
+ cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't alloc buffer\n",
+ cla_table->type);
+ return CQM_SUCCESS;
+ }
+
+ /* Check whether obj_size is aligned with 2^n, and error is reported in
+ * case of 0 and 1
+ */
+ if (cqm_check_align(cla_table->obj_size) == false) {
+ cqm_err(handle->dev_hdl, "Cla alloc: cla_type %u, obj_size 0x%x is not align on 2^n\n",
+ cla_table->type, cla_table->obj_size);
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_cla_xyz - Calculate how many levels of cla tables and allocate space
+ * for each level of cla tables
+ * @cqm_handle: cqm handle
+ * @cla_table: cla table
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_cla_xyz(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_buf_s *cla_z_buf = NULL;
+ u32 trunk_size = 0;
+ s32 ret = CQM_FAIL;
+
+ if (cqm_cla_xyz_check(cqm_handle, cla_table) == CQM_FAIL)
+ return CQM_FAIL;
+
+ trunk_size = PAGE_SIZE << cla_table->trunk_order;
+
+ if (trunk_size < cla_table->obj_size) {
+ cqm_err(handle->dev_hdl, "Cla alloc: cla type %u, obj_size 0x%x is out of trunk size\n",
+ cla_table->type, cla_table->obj_size);
+ return CQM_FAIL;
+ }
+
+ /* Level 0 CLA: The buffer occupies little space, and can be assigned to
+ * cla_z_buf during initialization
+ */
+ if (cla_table->max_buffer_size <= trunk_size) {
+ cla_table->cla_lvl = CQM_CLA_LVL_0;
+
+ cla_table->z = CQM_MAX_INDEX_BIT;
+ cla_table->y = 0;
+ cla_table->x = 0;
+
+ cla_table->cacheline_z = cla_table->z;
+ cla_table->cacheline_y = cla_table->y;
+ cla_table->cacheline_x = cla_table->x;
+
+ /* Allocate z buf space */
+ cla_z_buf = &cla_table->cla_z_buf;
+ cla_z_buf->buf_size = trunk_size;
+ cla_z_buf->buf_number = 1;
+ cla_z_buf->page_number =
+ cla_z_buf->buf_number << cla_table->trunk_order;
+ ret = cqm_buf_alloc(cqm_handle, cla_z_buf, false);
+ CQM_CHECK_EQUAL_RET(
+ handle->dev_hdl, ret, CQM_SUCCESS,
+ return CQM_FAIL, CQM_ALLOC_FAIL(lvl_0_z_buf));
+
+ } else if (cla_table->max_buffer_size <=
+ (trunk_size * (trunk_size / sizeof(dma_addr_t)))) {
+ /* Level 1 CLA: Cla_y_buf is allocated during initialization,
+ * and cla_z_buf can be allocated dynamically
+ */
+ if (cqm_cla_xyz_lvl1(cqm_handle,
+ cla_table, trunk_size) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl1));
+ return CQM_FAIL;
+ }
+ } else if (cla_table->max_buffer_size <=
+ (trunk_size * (trunk_size / sizeof(dma_addr_t)) *
+ (trunk_size / sizeof(dma_addr_t)))) {
+ /* Level 2 CLA: Cla_x_buf is allocated during initialization,
+ * and cla_y_buf and cla_z_buf can be dynamically allocated
+ */
+ if (cqm_cla_xyz_lvl2(cqm_handle, cla_table, trunk_size) ==
+ CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl2));
+ return CQM_FAIL;
+ }
+ } else {
+ cqm_err(handle->dev_hdl, "Cla alloc: cla max_buffer_size 0x%x exceeds support range\n",
+ cla_table->max_buffer_size);
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+static void cqm_bat_entry_hash_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = capability->pagesize_reorder;
+ cla_table->max_buffer_size = capability->hash_number *
+ capability->hash_basic_size;
+ cla_table->obj_size = capability->hash_basic_size;
+ cla_table->obj_num = capability->hash_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_qpc_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_handle_s *handle = (struct cqm_handle_s *)cqm_handle;
+ struct hifc_hwdev *hwdev_handle = handle->ex_handle;
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = capability->pagesize_reorder;
+ cla_table->max_buffer_size = capability->qpc_number *
+ capability->qpc_basic_size;
+ cla_table->obj_size = capability->qpc_basic_size;
+ cla_table->obj_num = capability->qpc_number;
+ cla_table->alloc_static = capability->qpc_alloc_static;
+ cqm_info(hwdev_handle->dev_hdl, "Cla alloc: qpc alloc_static=%d\n",
+ cla_table->alloc_static);
+}
+
+static void cqm_bat_entry_mpt_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = capability->pagesize_reorder;
+ cla_table->max_buffer_size = capability->mpt_number *
+ capability->mpt_basic_size;
+ cla_table->obj_size = capability->mpt_basic_size;
+ cla_table->obj_num = capability->mpt_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_scqc_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_handle_s *handle = (struct cqm_handle_s *)cqm_handle;
+ struct hifc_hwdev *hwdev_handle = handle->ex_handle;
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = capability->pagesize_reorder;
+ cla_table->max_buffer_size = capability->scqc_number *
+ capability->scqc_basic_size;
+ cla_table->obj_size = capability->scqc_basic_size;
+ cla_table->obj_num = capability->scqc_number;
+ cla_table->alloc_static = capability->scqc_alloc_static;
+ cqm_info(hwdev_handle->dev_hdl, "Cla alloc: scqc alloc_static=%d\n",
+ cla_table->alloc_static);
+}
+
+static void cqm_bat_entry_srqc_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = capability->pagesize_reorder;
+ cla_table->max_buffer_size = capability->srqc_number *
+ capability->srqc_basic_size;
+ cla_table->obj_size = capability->srqc_basic_size;
+ cla_table->obj_num = capability->srqc_number;
+ cla_table->alloc_static = false;
+}
+
+static void cqm_bat_entry_gid_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->max_buffer_size = capability->gid_number *
+ capability->gid_basic_size;
+ cla_table->trunk_order = (u32)cqm_shift(
+ ALIGN(
+ cla_table->max_buffer_size,
+ PAGE_SIZE) / PAGE_SIZE);
+ cla_table->obj_size = capability->gid_basic_size;
+ cla_table->obj_num = capability->gid_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_lun_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = CLA_TABLE_PAGE_ORDER;
+ cla_table->max_buffer_size = capability->lun_number *
+ capability->lun_basic_size;
+ cla_table->obj_size = capability->lun_basic_size;
+ cla_table->obj_num = capability->lun_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_taskmap_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = CQM_4K_PAGE_ORDER;
+ cla_table->max_buffer_size = capability->taskmap_number *
+ capability->taskmap_basic_size;
+ cla_table->obj_size = capability->taskmap_basic_size;
+ cla_table->obj_num = capability->taskmap_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_l3i_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = CLA_TABLE_PAGE_ORDER;
+ cla_table->max_buffer_size = capability->l3i_number *
+ capability->l3i_basic_size;
+ cla_table->obj_size = capability->l3i_basic_size;
+ cla_table->obj_num = capability->l3i_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_childc_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = capability->pagesize_reorder;
+ cla_table->max_buffer_size = capability->childc_number *
+ capability->childc_basic_size;
+ cla_table->obj_size = capability->childc_basic_size;
+ cla_table->obj_num = capability->childc_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_timer_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = CQM_4K_PAGE_ORDER;
+ cla_table->max_buffer_size = capability->timer_number *
+ capability->timer_basic_size;
+ cla_table->obj_size = capability->timer_basic_size;
+ cla_table->obj_num = capability->timer_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_xid2cid_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = capability->pagesize_reorder;
+ cla_table->max_buffer_size = capability->xid2cid_number *
+ capability->xid2cid_basic_size;
+ cla_table->obj_size = capability->xid2cid_basic_size;
+ cla_table->obj_num = capability->xid2cid_number;
+ cla_table->alloc_static = true;
+}
+
+static void cqm_bat_entry_reoder_init(void *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ void *cap)
+{
+ struct cqm_func_capability_s *capability =
+ (struct cqm_func_capability_s *)cap;
+
+ cla_table->trunk_order = capability->pagesize_reorder;
+ cla_table->max_buffer_size = capability->reorder_number *
+ capability->reorder_basic_size;
+ cla_table->obj_size = capability->reorder_basic_size;
+ cla_table->obj_num = capability->reorder_number;
+ cla_table->alloc_static = true;
+}
+
+struct cqm_cla_entry_init_s cqm_cla_entry_init_tbl[] = {
+ {CQM_BAT_ENTRY_T_HASH, cqm_bat_entry_hash_init},
+ {CQM_BAT_ENTRY_T_QPC, cqm_bat_entry_qpc_init},
+ {CQM_BAT_ENTRY_T_MPT, cqm_bat_entry_mpt_init},
+ {CQM_BAT_ENTRY_T_SCQC, cqm_bat_entry_scqc_init},
+ {CQM_BAT_ENTRY_T_SRQC, cqm_bat_entry_srqc_init},
+ {CQM_BAT_ENTRY_T_GID, cqm_bat_entry_gid_init},
+ {CQM_BAT_ENTRY_T_LUN, cqm_bat_entry_lun_init},
+ {CQM_BAT_ENTRY_T_TASKMAP, cqm_bat_entry_taskmap_init},
+ {CQM_BAT_ENTRY_T_L3I, cqm_bat_entry_l3i_init},
+ {CQM_BAT_ENTRY_T_CHILDC, cqm_bat_entry_childc_init},
+ {CQM_BAT_ENTRY_T_TIMER, cqm_bat_entry_timer_init},
+ {CQM_BAT_ENTRY_T_XID2CID, cqm_bat_entry_xid2cid_init},
+ {CQM_BAT_ENTRY_T_REORDER, cqm_bat_entry_reoder_init},
+};
+
+static struct cqm_cla_entry_init_s *cqm_get_cla_init_entry(
+ struct cqm_handle_s *cqm_handle,
+ u32 type)
+{
+ int i;
+ struct cqm_cla_entry_init_s *entry = NULL;
+
+ for (i = 0;
+ i < (sizeof(cqm_cla_entry_init_tbl) /
+ sizeof(struct cqm_cla_entry_init_s)); i++) {
+ entry = &cqm_cla_entry_init_tbl[i];
+ if (entry->type == type)
+ return entry;
+ }
+
+ return NULL;
+}
+
+void cqm_cla_init_entry(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ struct cqm_func_capability_s *capability)
+{
+ struct cqm_cla_entry_init_s *entry;
+
+ entry = cqm_get_cla_init_entry(cqm_handle, cla_table->type);
+ if (entry && entry->cqm_cla_init_handler)
+ entry->cqm_cla_init_handler(cqm_handle, cla_table, capability);
+}
+
+static s32 cqm_cla_fill_entry(struct cqm_handle_s *cqm_handle)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ s32 ret = CQM_FAIL;
+
+ /* After the allocation of CLA entry, fill in the BAT table */
+ cqm_bat_fill_cla(cqm_handle);
+
+ /* Notify the chip to refresh the BAT table */
+ ret = cqm_bat_update(cqm_handle);
+ if (ret != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update));
+ return CQM_FAIL;
+ }
+
+ cqm_info(handle->dev_hdl, "Timer start: func_type=%d, timer_enable=%u\n",
+ cqm_handle->func_attribute.func_type,
+ cqm_handle->func_capability.timer_enable);
+
+ if ((cqm_handle->func_attribute.func_type == CQM_PPF) &&
+ (cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE)) {
+ /* After the timer resource is allocated,
+ * the timer needs to be enabled
+ */
+ cqm_info(handle->dev_hdl, "Timer start: hifc ppf timer start\n");
+ ret = hifc_ppf_tmr_start((void *)(cqm_handle->ex_handle));
+ if (ret != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, "Timer start: hifc ppf timer start, ret=%d\n",
+ ret);
+ return CQM_FAIL;
+ }
+ }
+ return CQM_SUCCESS;
+}
+
+s32 cqm_cla_init(struct cqm_handle_s *cqm_handle)
+{
+ struct cqm_func_capability_s *capability = &cqm_handle->func_capability;
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ s32 inv_flag = 0;
+ u32 i = 0;
+ u32 j = 0;
+
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) {
+ cla_table = &bat_table->entry[i];
+ cla_table->type = bat_table->bat_entry_type[i];
+
+ cqm_cla_init_entry(cqm_handle, cla_table, capability);
+
+ /* Allocate CLA entry space of all levels */
+ if ((cla_table->type >= CQM_BAT_ENTRY_T_HASH) &&
+ (cla_table->type <= CQM_BAT_ENTRY_T_REORDER)) {
+ /* Only needs to allocate timer resources for PPF,
+ * 8 wheels * 2k scales * 32B * func_num, for PF, there
+ * is no need to allocate resources for the timer, nor
+ * to fill in the structure of the timer entry in the
+ * BAT table.
+ */
+ if (!((cla_table->type == CQM_BAT_ENTRY_T_TIMER) &&
+ (cqm_handle->func_attribute.func_type
+ != CQM_PPF))) {
+ if (cqm_cla_xyz(cqm_handle, cla_table) ==
+ CQM_FAIL)
+ goto err;
+ }
+ }
+ mutex_init(&cla_table->lock);
+ }
+ if (cqm_cla_fill_entry(cqm_handle) == CQM_FAIL)
+ goto err;
+
+ return CQM_SUCCESS;
+
+err:
+ for (j = 0; j < i; j++) {
+ cla_table = &bat_table->entry[j];
+ if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) {
+ cqm_buf_free_cache_inv(cqm_handle,
+ &cla_table->cla_x_buf,
+ &inv_flag);
+ cqm_buf_free_cache_inv(cqm_handle,
+ &cla_table->cla_y_buf,
+ &inv_flag);
+ cqm_buf_free_cache_inv(cqm_handle,
+ &cla_table->cla_z_buf,
+ &inv_flag);
+ }
+ }
+
+ return CQM_FAIL;
+}
+
+void cqm_cla_uninit(struct cqm_handle_s *cqm_handle)
+{
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ s32 inv_flag = 0;
+ u32 i = 0;
+
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) {
+ cla_table = &bat_table->entry[i];
+ if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) {
+ cqm_buf_free_cache_inv(cqm_handle,
+ &cla_table->cla_x_buf,
+ &inv_flag);
+ cqm_buf_free_cache_inv(cqm_handle,
+ &cla_table->cla_y_buf,
+ &inv_flag);
+ cqm_buf_free_cache_inv(cqm_handle,
+ &cla_table->cla_z_buf,
+ &inv_flag);
+ }
+ }
+}
+
+s32 cqm_cla_update(struct cqm_handle_s *cqm_handle,
+ struct cqm_buf_list_s *buf_node_parent,
+ struct cqm_buf_list_s *buf_node_child,
+ u32 child_index, u8 cla_update_mode)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_cmd_buf_s *buf_in = NULL;
+ struct cqm_cla_update_cmd_s *cmd = NULL;
+ dma_addr_t pa = 0;
+ s32 ret = CQM_FAIL;
+ u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable;
+
+ buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle));
+ CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_ALLOC_FAIL(buf_in));
+ buf_in->size = sizeof(struct cqm_cla_update_cmd_s);
+
+ /* Fill the command format and convert to big endian */
+ cmd = (struct cqm_cla_update_cmd_s *)(buf_in->buf);
+
+ pa = buf_node_parent->pa + (child_index * sizeof(dma_addr_t));
+ cmd->gpa_h = CQM_ADDR_HI(pa);
+ cmd->gpa_l = CQM_ADDR_LW(pa);
+
+ pa = buf_node_child->pa;
+ cmd->value_h = CQM_ADDR_HI(pa);
+ cmd->value_l = CQM_ADDR_LW(pa);
+
+ cqm_dbg("Cla alloc: cqm_cla_update, gpa=0x%x 0x%x, value=0x%x 0x%x, cla_update_mode=0x%x\n",
+ cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l,
+ cla_update_mode);
+
+ /* CLA GPA check */
+ if (gpa_check_enable) {
+ switch (cla_update_mode) {
+ /* gpa[0]=1 means this GPA is valid */
+ case CQM_CLA_RECORD_NEW_GPA:
+ cmd->value_l |= 1;
+ break;
+ /* gpa[0]=0 means this GPA is valid */
+ case CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID:
+ case CQM_CLA_DEL_GPA_WITH_CACHE_INVALID:
+ cmd->value_l &= (~1);
+ break;
+ default:
+ cqm_err(handle->dev_hdl,
+ "Cla alloc: cqm_cla_update, cqm_cla_update, wrong cla_update_mode=%u\n",
+ cla_update_mode);
+ break;
+ }
+ }
+
+ cqm_swab32((u8 *)cmd, (sizeof(struct cqm_cla_update_cmd_s) >> 2));
+
+ ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle),
+ CQM_CMD_ACK_TYPE_CMDQ,
+ CQM_MOD_CQM, CQM_CMD_T_CLA_UPDATE,
+ buf_in, NULL, CQM_CMD_TIMEOUT);
+ if (ret != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box));
+ cqm_err(handle->dev_hdl,
+ "Cla alloc: cqm_cla_update, cqm_send_cmd_box_ret=%d\n",
+ ret);
+ cqm_err(handle->dev_hdl, "Cla alloc: cqm_cla_update, cla_update_cmd: 0x%x 0x%x 0x%x 0x%x\n",
+ cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l);
+ cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in);
+ return CQM_FAIL;
+ }
+
+ cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in);
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_cla_alloc - Allocate a CLA trunk page
+ * @cqm_handle: cqm handle
+ * @cla_table: cla handle
+ * @buf_node_parent: the parent node whose content is to be updated
+ * @buf_node_child: the child node whose content is to be allocated
+ * @child_index: child index
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_cla_alloc(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ struct cqm_buf_list_s *buf_node_parent,
+ struct cqm_buf_list_s *buf_node_child, u32 child_index)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ s32 ret = CQM_FAIL;
+
+ /* Allocate trunk page */
+ buf_node_child->va = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ cla_table->trunk_order);
+ CQM_PTR_CHECK_RET(buf_node_child->va, return CQM_FAIL,
+ CQM_ALLOC_FAIL(va));
+
+ /* pci mapping */
+ buf_node_child->pa =
+ pci_map_single(cqm_handle->dev, buf_node_child->va,
+ PAGE_SIZE << cla_table->trunk_order,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(cqm_handle->dev, buf_node_child->pa)) {
+ cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_node_child->pa));
+ goto err1;
+ }
+
+ /* Notify the chip of trunk_pa and
+ * let it fill in the cla table entry
+ */
+ ret = cqm_cla_update(cqm_handle, buf_node_parent,
+ buf_node_child, child_index,
+ CQM_CLA_RECORD_NEW_GPA);
+ if (ret != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update));
+ goto err2;
+ }
+
+ return CQM_SUCCESS;
+
+err2:
+ pci_unmap_single(cqm_handle->dev, buf_node_child->pa,
+ PAGE_SIZE << cla_table->trunk_order,
+ PCI_DMA_BIDIRECTIONAL);
+err1:
+ free_pages((ulong)(buf_node_child->va), cla_table->trunk_order);
+ buf_node_child->va = NULL;
+ return CQM_FAIL;
+}
+
+void cqm_cla_free(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ struct cqm_buf_list_s *buf_node_parent,
+ struct cqm_buf_list_s *buf_node_child,
+ u32 child_index, u8 cla_update_mode)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+
+ cqm_dbg("Cla free: cla_update_mode=%u\n", cla_update_mode);
+
+ if (cqm_cla_update(cqm_handle, buf_node_parent,
+ buf_node_child, child_index,
+ cla_update_mode) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update));
+ return;
+ }
+
+ if (cla_update_mode == CQM_CLA_DEL_GPA_WITH_CACHE_INVALID) {
+ if (cqm_cla_cache_invalid(
+ cqm_handle, buf_node_child->pa,
+ PAGE_SIZE << cla_table->trunk_order) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_cla_cache_invalid));
+ return;
+ }
+ }
+
+ /* Unblock the pci mapping of the trunk page */
+ pci_unmap_single(cqm_handle->dev, buf_node_child->pa,
+ PAGE_SIZE << cla_table->trunk_order,
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Free trunk page */
+ free_pages((ulong)(buf_node_child->va), cla_table->trunk_order);
+ buf_node_child->va = NULL;
+}
+
+/**
+ * cqm_static_qpc_cla_get - When QPC is a static allocation, allocate the count
+ * of buffer from the index position in the cla table without lock
+ * @cqm_handle: cqm handle
+ * @cla_table: cla handle
+ * @index: the index of table
+ * @count: the count of buffer
+ * @pa: the physical address
+ * Return: the virtual address
+ */
+u8 *cqm_static_qpc_cla_get(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ u32 index, u32 count, dma_addr_t *pa)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf;
+ struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf;
+ struct cqm_buf_list_s *buf_node_z = NULL;
+ u32 x_index = 0;
+ u32 y_index = 0;
+ u32 z_index = 0;
+ u32 trunk_size = PAGE_SIZE << cla_table->trunk_order;
+ u8 *ret_addr = NULL;
+ u32 offset = 0;
+
+ if (cla_table->cla_lvl == CQM_CLA_LVL_0) {
+ offset = index * cla_table->obj_size;
+ ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset;
+ *pa = cla_z_buf->buf_list->pa + offset;
+ } else if (cla_table->cla_lvl == CQM_CLA_LVL_1) {
+ z_index = index & ((1 << (cla_table->z + 1)) - 1);
+ y_index = index >> (cla_table->z + 1);
+
+ if (y_index >= cla_z_buf->buf_number) {
+ cqm_err(handle->dev_hdl,
+ "Static qpc cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n",
+ y_index, cla_z_buf->buf_number);
+ return NULL;
+ }
+ buf_node_z = &cla_z_buf->buf_list[y_index];
+ if (!buf_node_z->va) {
+ cqm_err(handle->dev_hdl, "Cla get: static qpc cla_z_buf[%u].va=NULL\n",
+ y_index);
+ return NULL;
+ }
+ buf_node_z->refcount += count;
+ offset = z_index * cla_table->obj_size;
+ ret_addr = (u8 *)(buf_node_z->va) + offset;
+ *pa = buf_node_z->pa + offset;
+ } else {
+ z_index = index & ((1 << (cla_table->z + 1)) - 1);
+ y_index = (index >> (cla_table->z + 1)) &
+ ((1 << (cla_table->y - cla_table->z)) - 1);
+ x_index = index >> (cla_table->y + 1);
+
+ if ((x_index >= cla_y_buf->buf_number) ||
+ ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >=
+ cla_z_buf->buf_number)) {
+ cqm_err(handle->dev_hdl,
+ "Static qpc cla get: index exceeds buf_number,x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n ",
+ x_index, y_index, cla_y_buf->buf_number,
+ cla_z_buf->buf_number);
+ return NULL;
+ }
+
+ buf_node_z = &(cla_z_buf->buf_list[x_index *
+ (trunk_size / sizeof(dma_addr_t)) + y_index]);
+ if (!buf_node_z->va) {
+ cqm_err(handle->dev_hdl, "Cla get: static qpc cla_z_buf.va=NULL\n");
+ return NULL;
+ }
+
+ buf_node_z->refcount += count;
+ offset = z_index * cla_table->obj_size;
+ ret_addr = (u8 *)(buf_node_z->va) + offset;
+ *pa = buf_node_z->pa + offset;
+ }
+
+ return ret_addr;
+}
+
+static s32 cqm_cla_get_level_two(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ u32 index, u32 count,
+ dma_addr_t *pa, u8 **ret_addr)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_buf_s *cla_x_buf = &cla_table->cla_x_buf;
+ struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf;
+ struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf;
+ struct cqm_buf_list_s *buf_node_x = NULL;
+ struct cqm_buf_list_s *buf_node_y = NULL;
+ struct cqm_buf_list_s *buf_node_z = NULL;
+ u32 x_index = 0;
+ u32 y_index = 0;
+ u32 z_index = 0;
+ u32 trunk_size = PAGE_SIZE << cla_table->trunk_order;
+ u32 offset = 0;
+
+ z_index = index & ((1 << (cla_table->z + 1)) - 1);
+ y_index = (index >> (cla_table->z + 1)) &
+ ((1 << (cla_table->y - cla_table->z)) - 1);
+ x_index = index >> (cla_table->y + 1);
+
+ if ((x_index >= cla_y_buf->buf_number) ||
+ ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >=
+ cla_z_buf->buf_number)) {
+ cqm_err(handle->dev_hdl,
+ "Cla get: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n",
+ x_index, y_index, cla_y_buf->buf_number,
+ cla_z_buf->buf_number);
+ return CQM_FAIL;
+ }
+
+ buf_node_x = cla_x_buf->buf_list;
+ buf_node_y = &cla_y_buf->buf_list[x_index];
+ buf_node_z = &(cla_z_buf->buf_list[x_index *
+ (trunk_size / sizeof(dma_addr_t)) + y_index]);
+
+ /* Y buf node does not exist, allocates y node page */
+ if (!buf_node_y->va) {
+ if (cqm_cla_alloc(
+ cqm_handle, cla_table,
+ buf_node_x, buf_node_y, x_index) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_cla_alloc));
+ return CQM_FAIL;
+ }
+ }
+
+ /* Z buf node does not exist, allocates z node page */
+ if (!buf_node_z->va) {
+ if (cqm_cla_alloc(cqm_handle,
+ cla_table,
+ buf_node_y,
+ buf_node_z,
+ y_index) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_cla_alloc));
+ if (buf_node_y->refcount == 0) {
+ /* Free y node, needs cache_invalid */
+ cqm_cla_free(
+ cqm_handle, cla_table,
+ buf_node_x, buf_node_y, x_index,
+ CQM_CLA_DEL_GPA_WITH_CACHE_INVALID);
+ }
+ return CQM_FAIL;
+ }
+
+ cqm_dbg("Cla get: 2L: y_refcount=0x%x\n", buf_node_y->refcount);
+ /* Y buf node's reference count should be +1 */
+ buf_node_y->refcount++;
+ }
+
+ cqm_dbg("Cla get: 2L: z_refcount=0x%x, count=0x%x\n",
+ buf_node_z->refcount, count);
+ buf_node_z->refcount += count;
+ offset = z_index * cla_table->obj_size;
+ *ret_addr = (u8 *)(buf_node_z->va) + offset;
+ *pa = buf_node_z->pa + offset;
+
+ return CQM_SUCCESS;
+}
+
+static s32 cqm_cla_get_level_one(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ u32 index, u32 count, dma_addr_t *pa,
+ u8 **ret_addr)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf;
+ struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf;
+ struct cqm_buf_list_s *buf_node_y = NULL;
+ struct cqm_buf_list_s *buf_node_z = NULL;
+ u32 y_index = 0;
+ u32 z_index = 0;
+ u32 offset = 0;
+
+ z_index = index & ((1 << (cla_table->z + 1)) - 1);
+ y_index = index >> (cla_table->z + 1);
+
+ if (y_index >= cla_z_buf->buf_number) {
+ cqm_err(handle->dev_hdl,
+ "Cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n",
+ y_index, cla_z_buf->buf_number);
+ return CQM_FAIL;
+ }
+ buf_node_z = &cla_z_buf->buf_list[y_index];
+ buf_node_y = cla_y_buf->buf_list;
+
+ /* Z buf node does not exist, first allocate the page */
+ if (!buf_node_z->va) {
+ if (cqm_cla_alloc(cqm_handle,
+ cla_table,
+ buf_node_y,
+ buf_node_z,
+ y_index) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_cla_alloc));
+ cqm_err(handle->dev_hdl,
+ "Cla get: cla_table->type=%u\n",
+ cla_table->type);
+ return CQM_FAIL;
+ }
+ }
+
+ cqm_dbg("Cla get: 1L: z_refcount=0x%x, count=0x%x\n",
+ buf_node_z->refcount, count);
+ buf_node_z->refcount += count;
+ offset = z_index * cla_table->obj_size;
+ *ret_addr = (u8 *)(buf_node_z->va) + offset;
+ *pa = buf_node_z->pa + offset;
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_cla_get - Allocate the count of buffer from the index position in the
+ * cla table
+ * @cqm_handle: cqm handle
+ * @cla_table: cla table
+ * @index: the index of table
+ * @count: the count of buffer
+ * @pa: the physical address
+ * Return: the virtual address
+ */
+u8 *cqm_cla_get(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table, u32 index,
+ u32 count, dma_addr_t *pa)
+{
+ struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf;
+ u8 *ret_addr = NULL;
+ u32 offset = 0;
+
+ mutex_lock(&cla_table->lock);
+ if (cla_table->cla_lvl == CQM_CLA_LVL_0) {
+ /* Level 0 CLA pages are statically allocated */
+ offset = index * cla_table->obj_size;
+ ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset;
+ *pa = cla_z_buf->buf_list->pa + offset;
+ } else if (cla_table->cla_lvl == CQM_CLA_LVL_1) {
+ if (cqm_cla_get_level_one(cqm_handle, cla_table,
+ index, count,
+ pa, &ret_addr) == CQM_FAIL) {
+ mutex_unlock(&cla_table->lock);
+ return NULL;
+ }
+ } else {
+ if (cqm_cla_get_level_two(cqm_handle,
+ cla_table,
+ index,
+ count,
+ pa,
+ &ret_addr) == CQM_FAIL) {
+ mutex_unlock(&cla_table->lock);
+ return NULL;
+ }
+ }
+
+ mutex_unlock(&cla_table->lock);
+ return ret_addr;
+}
+
+/**
+ * cqm_cla_put -Decrease the reference count of the trunk page, if it is reduced
+ * to 0, release the trunk page
+ * @cqm_handle: cqm handle
+ * @cla_table: cla table
+ * @index: the index of table
+ * @count: the count of buffer
+ */
+void cqm_cla_put(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ u32 index, u32 count)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_buf_s *cla_x_buf = &cla_table->cla_x_buf;
+ struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf;
+ struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf;
+ struct cqm_buf_list_s *buf_node_x = NULL;
+ struct cqm_buf_list_s *buf_node_y = NULL;
+ struct cqm_buf_list_s *buf_node_z = NULL;
+ u32 x_index = 0;
+ u32 y_index = 0;
+ u32 trunk_size = PAGE_SIZE << cla_table->trunk_order;
+
+ /* Buffer is statically allocated,
+ * no need to control the reference count
+ */
+ if (cla_table->alloc_static == true)
+ return;
+
+ mutex_lock(&cla_table->lock);
+
+ if (cla_table->cla_lvl == CQM_CLA_LVL_1) {
+ y_index = index >> (cla_table->z + 1);
+
+ if (y_index >= cla_z_buf->buf_number) {
+ cqm_err(handle->dev_hdl,
+ "Cla put: index exceeds buf_number, y_index %u, z_buf_number %u\n",
+ y_index, cla_z_buf->buf_number);
+ cqm_err(handle->dev_hdl,
+ "Cla put: cla_table->type=%u\n",
+ cla_table->type);
+ mutex_unlock(&cla_table->lock);
+ return;
+ }
+
+ buf_node_z = &cla_z_buf->buf_list[y_index];
+ buf_node_y = cla_y_buf->buf_list;
+
+ /* When the z node page reference count is 0,
+ * release the z node page
+ */
+ cqm_dbg("Cla put: 1L: z_refcount=0x%x, count=0x%x\n",
+ buf_node_z->refcount, count);
+ buf_node_z->refcount -= count;
+ if (buf_node_z->refcount == 0) {
+ /* Z node does not need cache invalid */
+ cqm_cla_free(cqm_handle, cla_table, buf_node_y,
+ buf_node_z, y_index,
+ CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID);
+ }
+ } else if (cla_table->cla_lvl == CQM_CLA_LVL_2) {
+ y_index = (index >> (cla_table->z + 1)) &
+ ((1 << (cla_table->y - cla_table->z)) - 1);
+ x_index = index >> (cla_table->y + 1);
+
+ if ((x_index >= cla_y_buf->buf_number) ||
+ ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >=
+ cla_z_buf->buf_number)) {
+ cqm_err(handle->dev_hdl,
+ "Cla put: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n",
+ x_index, y_index, cla_y_buf->buf_number,
+ cla_z_buf->buf_number);
+ mutex_unlock(&cla_table->lock);
+ return;
+ }
+
+ buf_node_x = cla_x_buf->buf_list;
+ buf_node_y = &cla_y_buf->buf_list[x_index];
+ buf_node_z = &(cla_z_buf->buf_list[x_index *
+ (trunk_size / sizeof(dma_addr_t)) + y_index]);
+ cqm_dbg("Cla put: 2L: z_refcount=0x%x, count=0x%x\n",
+ buf_node_z->refcount, count);
+
+ /* When the z node page reference count is 0,
+ * release the z node page
+ */
+ buf_node_z->refcount -= count;
+ if (buf_node_z->refcount == 0) {
+ cqm_cla_free(cqm_handle, cla_table, buf_node_y,
+ buf_node_z, y_index,
+ CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID);
+
+ /* When the y node page reference count is 0,
+ * release the y node page
+ */
+ cqm_dbg("Cla put: 2L: y_refcount=0x%x\n",
+ buf_node_y->refcount);
+ buf_node_y->refcount--;
+ if (buf_node_y->refcount == 0) {
+ /* Y node needs cache invalid */
+ cqm_cla_free(
+ cqm_handle, cla_table, buf_node_x,
+ buf_node_y, x_index,
+ CQM_CLA_DEL_GPA_WITH_CACHE_INVALID);
+ }
+ }
+ }
+
+ mutex_unlock(&cla_table->lock);
+}
+
+/**
+ * cqm_cla_table_get - Find the CLA table structure corresponding to a BAT entry
+ * @bat_table: bat table
+ * @entry_type: entry type
+ * @count: the count of buffer
+ * Return: the CLA table
+ */
+struct cqm_cla_table_s *cqm_cla_table_get(struct cqm_bat_table_s *bat_table,
+ u32 entry_type)
+{
+ struct cqm_cla_table_s *cla_table = NULL;
+ u32 i = 0;
+
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) {
+ cla_table = &bat_table->entry[i];
+ if (entry_type == cla_table->type)
+ return cla_table;
+ }
+
+ return NULL;
+}
+
+#define bitmap_section
+
+/**
+ * __cqm_bitmap_init - Initialize a bitmap
+ * @bitmap: cqm bitmap table
+ * Return: 0 - success, negative - failure
+ */
+s32 __cqm_bitmap_init(struct cqm_bitmap_s *bitmap)
+{
+ spin_lock_init(&bitmap->lock);
+
+ /* The max_num of bitmap is aligned by 8, and then shifted right by
+ * 3bits to get how many Bytes are needed
+ */
+ bitmap->table =
+ (ulong *)vmalloc((ALIGN(bitmap->max_num, 8) >> 3));
+ CQM_PTR_CHECK_RET(bitmap->table, return CQM_FAIL,
+ CQM_ALLOC_FAIL(bitmap->table));
+ memset(bitmap->table, 0, (ALIGN(bitmap->max_num, 8) >> 3));
+
+ return CQM_SUCCESS;
+}
+
+static s32 cqm_bitmap_init_by_type(struct cqm_handle_s *cqm_handle,
+ struct cqm_cla_table_s *cla_table,
+ struct cqm_bitmap_s *bitmap)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_func_capability_s *capability = &cqm_handle->func_capability;
+ s32 ret = CQM_SUCCESS;
+
+ switch (cla_table->type) {
+ case CQM_BAT_ENTRY_T_QPC:
+ bitmap->max_num = capability->qpc_number;
+ bitmap->reserved_top = capability->qpc_reserved;
+ bitmap->last = capability->qpc_reserved;
+ cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n",
+ cla_table->type, bitmap->max_num);
+ ret = __cqm_bitmap_init(bitmap);
+ break;
+ case CQM_BAT_ENTRY_T_MPT:
+ bitmap->max_num = capability->mpt_number;
+ bitmap->reserved_top = capability->mpt_reserved;
+ bitmap->last = capability->mpt_reserved;
+ cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n",
+ cla_table->type, bitmap->max_num);
+ ret = __cqm_bitmap_init(bitmap);
+ break;
+ case CQM_BAT_ENTRY_T_SCQC:
+ bitmap->max_num = capability->scqc_number;
+ bitmap->reserved_top = capability->scq_reserved;
+ bitmap->last = capability->scq_reserved;
+ cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n",
+ cla_table->type, bitmap->max_num);
+ ret = __cqm_bitmap_init(bitmap);
+ break;
+ case CQM_BAT_ENTRY_T_SRQC:
+ bitmap->max_num = capability->srqc_number;
+ bitmap->reserved_top = 0;
+ bitmap->last = 0;
+ cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n",
+ cla_table->type, bitmap->max_num);
+ ret = __cqm_bitmap_init(bitmap);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * cqm_bitmap_init - Initialize a bitmap
+ * @cqm_handle: cqm handle
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_bitmap_init(struct cqm_handle_s *cqm_handle)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ struct cqm_bitmap_s *bitmap = NULL;
+ u32 i = 0;
+ s32 ret = CQM_SUCCESS;
+
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) {
+ cla_table = &bat_table->entry[i];
+ if (cla_table->obj_num == 0) {
+ cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't init bitmap\n",
+ cla_table->type);
+ continue;
+ }
+
+ bitmap = &cla_table->bitmap;
+ ret = cqm_bitmap_init_by_type(cqm_handle, cla_table, bitmap);
+ if (ret != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl, "Bitmap init: failed to init cla_table_type=%u, obj_num=0x%x\n",
+ cla_table->type, cla_table->obj_num);
+ goto err;
+ }
+ }
+
+ return CQM_SUCCESS;
+
+err:
+ cqm_bitmap_uninit(cqm_handle);
+ return CQM_FAIL;
+}
+
+/**
+ * cqm_bitmap_uninit - Uninitialize a bitmap
+ * @cqm_handle: cqm handle
+ */
+void cqm_bitmap_uninit(struct cqm_handle_s *cqm_handle)
+{
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ struct cqm_bitmap_s *bitmap = NULL;
+ u32 i = 0;
+
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) {
+ cla_table = &bat_table->entry[i];
+ bitmap = &cla_table->bitmap;
+ if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) {
+ if (bitmap->table) {
+ vfree(bitmap->table);
+ bitmap->table = NULL;
+ }
+ }
+ }
+}
+
+/**
+ * cqm_bitmap_check_range - Starting from begin, check whether count bits are
+ * free in the table, required: 1. This set of bits cannot cross step, 2. This
+ * group of bits must be 0
+ * @table: bitmap table
+ * @step: steps
+ * @max_num: max num
+ * @begin: begin position
+ * @count: the count of bit to check
+ * Return: If check valid return begin position
+ */
+u32 cqm_bitmap_check_range(const ulong *table, u32 step,
+ u32 max_num, u32 begin, u32 count)
+{
+ u32 i = 0;
+ u32 end = (begin + (count - 1));
+
+ /* Single bit is not checked */
+ if (count == 1)
+ return begin;
+
+ /* End is out of bounds */
+ if (end >= max_num)
+ return max_num;
+
+ /* Bit check, if there is a bit other than 0, return next bit */
+ for (i = (begin + 1); i <= end; i++) {
+ if (test_bit((s32)i, table))
+ return i + 1;
+ }
+
+ /* Check if it is in a different step */
+ if ((begin & (~(step - 1))) != (end & (~(step - 1))))
+ return (end & (~(step - 1)));
+
+ /* If check pass, return begin position */
+ return begin;
+}
+
+static void cqm_bitmap_set_bit(struct cqm_bitmap_s *bitmap, u32 index,
+ u32 max_num, u32 count, bool update_last,
+ ulong *table)
+{
+ u32 i;
+
+ /* Set 1 to the found bit and reset last */
+ if (index < max_num) {
+ for (i = index; i < (index + count); i++)
+ set_bit(i, table);
+
+ if (update_last) {
+ bitmap->last = (index + count);
+ if (bitmap->last >= bitmap->max_num)
+ bitmap->last = bitmap->reserved_top;
+ }
+ }
+}
+
+/**
+ * cqm_bitmap_alloc - Allocate a bitmap index, 0 and 1 should not be used, Scan
+ * back from the place where you last applied, and needs to support the
+ * application of a series of consecutive indexes, and should not to cross trunk
+ * @table: bitmap table
+ * @step: steps
+ * @count: the count of bit to check
+ * @update_last: update last
+ * Return: Success - return the index, failure - return the max
+ */
+u32 cqm_bitmap_alloc(struct cqm_bitmap_s *bitmap, u32 step, u32 count,
+ bool update_last)
+{
+ u32 index = 0;
+ u32 max_num = bitmap->max_num;
+ u32 last = bitmap->last;
+ ulong *table = bitmap->table;
+
+ spin_lock(&bitmap->lock);
+
+ /* Search for a free bit from the last position */
+ do {
+ index = find_next_zero_bit(table, max_num, last);
+ if (index < max_num) {
+ last = cqm_bitmap_check_range(table, step,
+ max_num, index, count);
+ } else {
+ break;
+ }
+ } while (last != index);
+
+ /* The above search failed, search for a free bit from the beginning */
+ if (index >= max_num) {
+ last = bitmap->reserved_top;
+ do {
+ index = find_next_zero_bit(table, max_num, last);
+ if (index < max_num) {
+ last = cqm_bitmap_check_range(table, step,
+ max_num,
+ index, count);
+ } else {
+ break;
+ }
+ } while (last != index);
+ }
+ cqm_bitmap_set_bit(bitmap, index, max_num, count, update_last, table);
+ spin_unlock(&bitmap->lock);
+ return index;
+}
+
+/**
+ * cqm_bitmap_alloc_reserved - Allocate the reserve bit according to index
+ * @bitmap: bitmap table
+ * @count: count
+ * @index: the index of bitmap
+ * Return: Success - return the index, failure - return the max
+ */
+u32 cqm_bitmap_alloc_reserved(struct cqm_bitmap_s *bitmap, u32 count, u32 index)
+{
+ ulong *table = bitmap->table;
+ u32 ret_index = CQM_INDEX_INVALID;
+
+ if ((index >= bitmap->reserved_top) || (index >= bitmap->max_num) ||
+ (count != 1)) {
+ return CQM_INDEX_INVALID;
+ }
+
+ spin_lock(&bitmap->lock);
+
+ if (test_bit(index, table)) {
+ ret_index = CQM_INDEX_INVALID;
+ } else {
+ set_bit(index, table);
+ ret_index = index;
+ }
+
+ spin_unlock(&bitmap->lock);
+ return ret_index;
+}
+
+/**
+ * cqm_bitmap_free - Release a bitmap index
+ * @bitmap: bitmap table
+ * @index: the index of bitmap
+ * @count: count
+ */
+void cqm_bitmap_free(struct cqm_bitmap_s *bitmap, u32 index, u32 count)
+{
+ ulong i = 0;
+
+ spin_lock(&bitmap->lock);
+
+ for (i = index; i < (index + count); i++)
+ clear_bit((s32)i, bitmap->table);
+
+ spin_unlock(&bitmap->lock);
+}
+
+#define obj_table_section
+
+/**
+ * _cqm_object_table_init - Initialize a table of object and index
+ * @cqm_handle: cqm handle
+ * Return: 0 - success, negative - failure
+ */
+s32 __cqm_object_table_init(struct cqm_object_table_s *obj_table)
+{
+ rwlock_init(&obj_table->lock);
+
+ obj_table->table = (struct cqm_object_s **)vmalloc(obj_table->max_num *
+ sizeof(void *));
+ CQM_PTR_CHECK_RET(obj_table->table, return CQM_FAIL,
+ CQM_ALLOC_FAIL(table));
+ memset(obj_table->table, 0, obj_table->max_num * sizeof(void *));
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_object_table_init - Initialize the association table of object and index
+ * @cqm_handle: cqm handle
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_object_table_init(struct cqm_handle_s *cqm_handle)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_func_capability_s *capability = &cqm_handle->func_capability;
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ struct cqm_object_table_s *obj_table = NULL;
+ s32 ret = CQM_SUCCESS;
+ u32 i = 0;
+
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) {
+ cla_table = &bat_table->entry[i];
+ if (cla_table->obj_num == 0) {
+ cqm_info(handle->dev_hdl,
+ "Obj table init: cla_table_type %u, obj_num=0, don't init obj table\n",
+ cla_table->type);
+ continue;
+ }
+
+ obj_table = &cla_table->obj_table;
+
+ switch (cla_table->type) {
+ case CQM_BAT_ENTRY_T_QPC:
+ obj_table->max_num = capability->qpc_number;
+ ret = __cqm_object_table_init(obj_table);
+ break;
+ case CQM_BAT_ENTRY_T_MPT:
+ obj_table->max_num = capability->mpt_number;
+ ret = __cqm_object_table_init(obj_table);
+ break;
+ case CQM_BAT_ENTRY_T_SCQC:
+ obj_table->max_num = capability->scqc_number;
+ ret = __cqm_object_table_init(obj_table);
+ break;
+ case CQM_BAT_ENTRY_T_SRQC:
+ obj_table->max_num = capability->srqc_number;
+ ret = __cqm_object_table_init(obj_table);
+ break;
+ default:
+ break;
+ }
+
+ if (ret != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl,
+ "Obj table init: failed to init cla_table_type=%u, obj_num=0x%x\n",
+ cla_table->type, cla_table->obj_num);
+ goto err;
+ }
+ }
+
+ return CQM_SUCCESS;
+
+err:
+ cqm_object_table_uninit(cqm_handle);
+ return CQM_FAIL;
+}
+
+/**
+ * cqm_object_table_uninit - Deinitialize the association table of object and
+ * index
+ * @cqm_handle: cqm handle
+ */
+void cqm_object_table_uninit(struct cqm_handle_s *cqm_handle)
+{
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ struct cqm_object_table_s *obj_table = NULL;
+ u32 i = 0;
+
+ for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) {
+ cla_table = &bat_table->entry[i];
+ obj_table = &cla_table->obj_table;
+ if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) {
+ if (obj_table->table) {
+ vfree(obj_table->table);
+ obj_table->table = NULL;
+ }
+ }
+ }
+}
+
+/**
+ * cqm_object_table_insert - Insert an object, turn off the soft interrupt
+ * @cqm_handle: cqm handle
+ * @object_table: object table
+ * @index: the index of table
+ * @obj: the object to insert
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_object_table_insert(struct cqm_handle_s *cqm_handle,
+ struct cqm_object_table_s *object_table, u32 index,
+ struct cqm_object_s *obj)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+
+ if (index >= object_table->max_num) {
+ cqm_err(handle->dev_hdl, "Obj table insert: index 0x%x exceeds max_num 0x%x\n",
+ index, object_table->max_num);
+ return CQM_FAIL;
+ }
+
+ write_lock(&object_table->lock);
+
+ if (!object_table->table[index]) {
+ object_table->table[index] = obj;
+ write_unlock(&object_table->lock);
+ return CQM_SUCCESS;
+ }
+ write_unlock(&object_table->lock);
+ cqm_err(handle->dev_hdl, "Obj table insert: object_table->table[0x%x] has been inserted\n",
+ index);
+ return CQM_FAIL;
+}
+
+/**
+ * cqm_object_table_remove - remove an object
+ * @cqm_handle: cqm handle
+ * @object_table: object table
+ * @index: the index of table
+ * @obj: the object to remove
+ * Return: 0 - success, negative - failure
+ */
+void cqm_object_table_remove(struct cqm_handle_s *cqm_handle,
+ struct cqm_object_table_s *object_table,
+ u32 index, const struct cqm_object_s *obj)
+{
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+
+ if (index >= object_table->max_num) {
+ cqm_err(handle->dev_hdl, "Obj table remove: index 0x%x exceeds max_num 0x%x\n",
+ index, object_table->max_num);
+ return;
+ }
+
+ write_lock(&object_table->lock);
+
+ if ((object_table->table[index]) &&
+ (object_table->table[index] == obj)) {
+ object_table->table[index] = NULL;
+ } else {
+ cqm_err(handle->dev_hdl, "Obj table remove: object_table->table[0x%x] has been removed\n",
+ index);
+ }
+
+ write_unlock(&object_table->lock);
+}
+
+/**
+ * cqm_srq_used_rq_delete - Delete rq in TOE SRQ mode
+ * @object: cqm object
+ */
+void cqm_srq_used_rq_delete(struct cqm_object_s *object)
+{
+ struct cqm_queue_s *common = container_of(object, struct cqm_queue_s,
+ object);
+ struct cqm_nonrdma_qinfo_s *qinfo = container_of(
+ common,
+ struct cqm_nonrdma_qinfo_s,
+ common);
+ u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size;
+ struct cqm_srq_linkwqe_s *srq_link_wqe = NULL;
+ dma_addr_t addr;
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ (common->object.cqm_handle);
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+
+ /* The current SRQ solution does not support the case where RQ
+ * initialization without container, which may cause error when RQ
+ * resources are released. So RQ initializes with only one container,
+ * and releases only one contaienr when resourced are released.
+ */
+ CQM_PTR_CHECK_NO_RET(
+ common->head_container, "Rq del: rq has no contianer to release\n",
+ return);
+
+ /* Get current container pa from link wqe, and ummap it */
+ srq_link_wqe = (struct cqm_srq_linkwqe_s *)(common->head_container +
+ link_wqe_offset);
+ /* Convert only the big endian of the wqe part of the link */
+ cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct cqm_linkwqe_s) >> 2);
+
+ addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h,
+ srq_link_wqe->current_buffer_gpa_l);
+ if (addr == 0) {
+ cqm_err(handle->dev_hdl, "Rq del: buffer physical addr is null\n");
+ return;
+ }
+ pci_unmap_single(cqm_handle->dev, addr, qinfo->container_size,
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Get current container va from link wqe, and free it */
+ addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h,
+ srq_link_wqe->current_buffer_addr_l);
+ if (addr == 0) {
+ cqm_err(handle->dev_hdl, "Rq del: buffer virtual addr is null\n");
+ return;
+ }
+ kfree((void *)addr);
+}
+
+#define obj_intern_if_section
+
+/**
+ * cqm_qpc_mpt_bitmap_alloc - Apply index from bitmap when creating qpc or mpt
+ * @object: cqm object
+ * @cla_table: cla table
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_qpc_mpt_bitmap_alloc(struct cqm_object_s *object,
+ struct cqm_cla_table_s *cla_table)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_qpc_mpt_s *common = container_of(object,
+ struct cqm_qpc_mpt_s,
+ object);
+ struct cqm_qpc_mpt_info_s *qpc_mpt_info =
+ container_of(
+ common,
+ struct cqm_qpc_mpt_info_s,
+ common);
+ struct cqm_bitmap_s *bitmap = &cla_table->bitmap;
+ u32 index = 0;
+ u32 count = 0;
+
+ count = (ALIGN(object->object_size, cla_table->obj_size)) /
+ cla_table->obj_size;
+ qpc_mpt_info->index_count = count;
+
+ if (qpc_mpt_info->common.xid == CQM_INDEX_INVALID) {
+ /* Allocate index normally */
+ index = cqm_bitmap_alloc(
+ bitmap,
+ 1 << (cla_table->z + 1),
+ count,
+ cqm_handle->func_capability.xid_alloc_mode);
+ if (index < bitmap->max_num) {
+ qpc_mpt_info->common.xid = index;
+ } else {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_bitmap_alloc));
+ return CQM_FAIL;
+ }
+ } else {
+ /* Allocate reserved index */
+ index = cqm_bitmap_alloc_reserved(
+ bitmap, count,
+ qpc_mpt_info->common.xid);
+ if (index != qpc_mpt_info->common.xid) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_bitmap_alloc_reserved));
+ return CQM_FAIL;
+ }
+ }
+
+ return CQM_SUCCESS;
+}
+
+static struct cqm_cla_table_s *cqm_qpc_mpt_prepare_cla_table(
+ struct cqm_object_s *object)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+
+ struct cqm_cla_table_s *cla_table = NULL;
+
+ /* Get the corresponding cla table */
+ if (object->object_type == CQM_OBJECT_SERVICE_CTX) {
+ cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC);
+ } else {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type));
+ return NULL;
+ }
+
+ CQM_PTR_CHECK_RET(cla_table, return NULL,
+ CQM_FUNCTION_FAIL(cqm_cla_table_get));
+
+ /* Allocate index for bitmap */
+ if (cqm_qpc_mpt_bitmap_alloc(object, cla_table) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_qpc_mpt_bitmap_alloc));
+ return NULL;
+ }
+
+ return cla_table;
+}
+
+/**
+ * cqm_qpc_mpt_create - Create qpc or mpt
+ * @object: cqm object
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_qpc_mpt_create(struct cqm_object_s *object)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_qpc_mpt_s *common =
+ container_of(object, struct cqm_qpc_mpt_s, object);
+ struct cqm_qpc_mpt_info_s *qpc_mpt_info =
+ container_of(common, struct cqm_qpc_mpt_info_s, common);
+ struct cqm_cla_table_s *cla_table = NULL;
+ struct cqm_bitmap_s *bitmap = NULL;
+ struct cqm_object_table_s *object_table = NULL;
+ u32 index = 0;
+ u32 count = 0;
+
+ cla_table = cqm_qpc_mpt_prepare_cla_table(object);
+ CQM_PTR_CHECK_RET(cla_table, return CQM_FAIL,
+ CQM_FUNCTION_FAIL(cqm_qpc_mpt_prepare_cla_table));
+
+ bitmap = &cla_table->bitmap;
+ index = qpc_mpt_info->common.xid;
+ count = qpc_mpt_info->index_count;
+
+ /* Find the trunk page from BAT/CLA and allocate the buffer, the
+ * business needs to ensure that the released buffer has been cleared
+ */
+ if (cla_table->alloc_static == true) {
+ qpc_mpt_info->common.vaddr =
+ cqm_static_qpc_cla_get(cqm_handle, cla_table,
+ index, count, &common->paddr);
+ } else {
+ qpc_mpt_info->common.vaddr =
+ cqm_cla_get(cqm_handle, cla_table,
+ index, count, &common->paddr);
+ }
+ if (!qpc_mpt_info->common.vaddr) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get));
+ cqm_err(handle->dev_hdl,
+ "Qpc mpt init: qpc mpt vaddr is null, cla_table->alloc_static=%d\n",
+ cla_table->alloc_static);
+ goto err1;
+ }
+
+ /* Associate index with object, FC executes in interrupt context */
+ object_table = &cla_table->obj_table;
+
+ if (cqm_object_table_insert(cqm_handle, object_table, index, object) !=
+ CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_object_table_insert));
+ goto err2;
+ }
+
+ return CQM_SUCCESS;
+
+err2:
+ cqm_cla_put(cqm_handle, cla_table, index, count);
+err1:
+ cqm_bitmap_free(bitmap, index, count);
+ return CQM_FAIL;
+}
+
+/**
+ * cqm_qpc_mpt_delete - Delete qpc or mpt
+ * @object: cqm object
+ */
+void cqm_qpc_mpt_delete(struct cqm_object_s *object)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_qpc_mpt_s *common = container_of(object,
+ struct cqm_qpc_mpt_s,
+ object);
+ struct cqm_qpc_mpt_info_s *qpc_mpt_info = container_of(
+ common,
+ struct cqm_qpc_mpt_info_s,
+ common);
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ struct cqm_bitmap_s *bitmap = NULL;
+ struct cqm_object_table_s *object_table = NULL;
+ u32 index = qpc_mpt_info->common.xid;
+ u32 count = qpc_mpt_info->index_count;
+
+ /* Find the response cla table */
+ atomic_inc(&cqm_handle->ex_handle->hw_stats.cqm_stats.cqm_qpc_mpt_delete_cnt);
+
+ if (object->object_type == CQM_OBJECT_SERVICE_CTX) {
+ cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC);
+ } else {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type));
+ return;
+ }
+
+ CQM_PTR_CHECK_NO_RET(
+ cla_table, CQM_FUNCTION_FAIL(cqm_cla_table_get), return);
+
+ /* Disassociate index with object */
+ object_table = &cla_table->obj_table;
+
+ cqm_object_table_remove(cqm_handle, object_table, index, object);
+
+ /* Wait for the completion and ensure that all references to the QPC
+ * are completed
+ */
+ if (atomic_dec_and_test(&object->refcount)) {
+ complete(&object->free);
+ } else {
+ cqm_err(handle->dev_hdl,
+ "Qpc mpt del: object is referred by others, has to wait for completion\n");
+ }
+
+ /* The QPC static allocation needs to be non-blocking, and the service
+ * guarantees that the QPC is completed when the QPC is deleted
+ */
+ if (cla_table->alloc_static == false)
+ wait_for_completion(&object->free);
+ /* Free qpc buffer */
+ cqm_cla_put(cqm_handle, cla_table, index, count);
+
+ /* Free index into bitmap */
+ bitmap = &cla_table->bitmap;
+ cqm_bitmap_free(bitmap, index, count);
+}
+
+/**
+ * cqm_linkwqe_fill - Fill link wqe for non RDMA queue buffer
+ * @buf: cqm buffer
+ * @wqe_per_buf: not include link wqe
+ * @wqe_size: wqe size
+ * @wqe_number: not include link wqe
+ * @tail: true linkwqe must be at the tail, false linkwqe may not be at the tail
+ * @link_mode: link wqe mode
+ */
+void cqm_linkwqe_fill(struct cqm_buf_s *buf,
+ u32 wqe_per_buf,
+ u32 wqe_size,
+ u32 wqe_number,
+ bool tail,
+ u8 link_mode)
+{
+ struct cqm_linkwqe_s *wqe = NULL;
+ struct cqm_linkwqe_128b_s *linkwqe = NULL;
+ u8 *va = NULL;
+ u32 i = 0;
+ dma_addr_t addr;
+
+ /* Except for the last buffer, the linkwqe of other buffers is directly
+ * filled to the tail
+ */
+ for (i = 0; i < buf->buf_number; i++) {
+ va = (u8 *)(buf->buf_list[i].va);
+
+ if (i != (buf->buf_number - 1)) {
+ wqe = (struct cqm_linkwqe_s *)(va + (u32)(wqe_size *
+ wqe_per_buf));
+ wqe->wf = CQM_WQE_WF_LINK;
+ wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE;
+ wqe->lp = CQM_LINK_WQE_LP_INVALID;
+ /* The Obit of valid link wqe needs to be set to 1, and
+ * each service needs to confirm that o-bit=1 means
+ * valid, o-bit=0 means invalid
+ */
+ wqe->o = CQM_LINK_WQE_OWNER_VALID;
+ addr = buf->buf_list[(u32)(i + 1)].pa;
+ wqe->next_page_gpa_h = CQM_ADDR_HI(addr);
+ wqe->next_page_gpa_l = CQM_ADDR_LW(addr);
+ } else {
+ /* The last buffer of Linkwqe must fill specially */
+ if (tail == true) {
+ /* Must be filled at the end of the page */
+ wqe = (struct cqm_linkwqe_s *)(va +
+ (u32)(wqe_size * wqe_per_buf));
+ } else {
+ /* The last linkwqe is filled immediately after
+ * the last wqe
+ */
+ wqe = (struct cqm_linkwqe_s *)
+ (va + (u32)(wqe_size *
+ (wqe_number - wqe_per_buf *
+ (buf->buf_number - 1))));
+ }
+ wqe->wf = CQM_WQE_WF_LINK;
+ wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE;
+
+ /* In link mode, the last link wqe is invalid, In ring
+ * mode, the last link wqe is valid, pointing to the
+ * home page, and lp is set
+ */
+ if (link_mode == CQM_QUEUE_LINK_MODE) {
+ wqe->o = CQM_LINK_WQE_OWNER_INVALID;
+ } else {
+ /* The lp field of the last link_wqe is filled
+ * with 1,indicating that the o-bit is flipped
+ * from here
+ */
+ wqe->lp = CQM_LINK_WQE_LP_VALID;
+ wqe->o = CQM_LINK_WQE_OWNER_VALID;
+ addr = buf->buf_list[0].pa;
+ wqe->next_page_gpa_h = CQM_ADDR_HI(addr);
+ wqe->next_page_gpa_l = CQM_ADDR_LW(addr);
+ }
+ }
+
+ if (wqe_size == 128) {
+ /* The 128B wqe before and after 64B have obit need to be
+ * assigned, For IFOE, 63th penultimate bit of last 64B is
+ * obit, for TOE, 157th penultimate bit of last 64B is obit
+ */
+ linkwqe = (struct cqm_linkwqe_128b_s *)wqe;
+ linkwqe->second_64b.third_16B.bs.toe_o =
+ CQM_LINK_WQE_OWNER_VALID;
+ linkwqe->second_64b.forth_16B.bs.ifoe_o =
+ CQM_LINK_WQE_OWNER_VALID;
+
+ /* big endian conversion */
+ cqm_swab32((u8 *)wqe,
+ sizeof(struct cqm_linkwqe_128b_s) >> 2);
+ } else {
+ /* big endian conversion */
+ cqm_swab32((u8 *)wqe,
+ sizeof(struct cqm_linkwqe_s) >> 2);
+ }
+ }
+}
+
+static s32 cqm_nonrdma_queue_ctx_create_srq(struct cqm_object_s *object)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_queue_s *common = container_of(object,
+ struct cqm_queue_s, object);
+ struct cqm_nonrdma_qinfo_s *qinfo = container_of(
+ common,
+ struct cqm_nonrdma_qinfo_s,
+ common);
+ s32 shift = 0;
+
+ shift = cqm_shift(qinfo->q_ctx_size);
+ common->q_ctx_vaddr = (u8 *)cqm_kmalloc_align(
+ qinfo->q_ctx_size,
+ GFP_KERNEL | __GFP_ZERO,
+ (u16)shift);
+ if (!common->q_ctx_vaddr) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_ctx_vaddr));
+ return CQM_FAIL;
+ }
+
+ common->q_ctx_paddr =
+ pci_map_single(cqm_handle->dev, common->q_ctx_vaddr,
+ qinfo->q_ctx_size, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(cqm_handle->dev, common->q_ctx_paddr)) {
+ cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_ctx_vaddr));
+ cqm_kfree_align(common->q_ctx_vaddr);
+ common->q_ctx_vaddr = NULL;
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+static s32 cqm_nonrdma_queue_ctx_create_scq(struct cqm_object_s *object)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_queue_s *common = container_of(object,
+ struct cqm_queue_s,
+ object);
+ struct cqm_nonrdma_qinfo_s *qinfo = container_of(
+ common,
+ struct cqm_nonrdma_qinfo_s,
+ common);
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ struct cqm_bitmap_s *bitmap = NULL;
+ struct cqm_object_table_s *object_table = NULL;
+
+ /* Find the corresponding cla table */
+ cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC);
+ if (!cla_table) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_table_get));
+ return CQM_FAIL;
+ }
+
+ /* Allocate index for bitmap */
+ bitmap = &cla_table->bitmap;
+ qinfo->index_count = (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) /
+ cla_table->obj_size;
+ qinfo->common.index = cqm_bitmap_alloc(bitmap, 1 << (cla_table->z + 1),
+ qinfo->index_count, cqm_handle->func_capability.xid_alloc_mode);
+ if (qinfo->common.index >= bitmap->max_num) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_alloc));
+ return CQM_FAIL;
+ }
+
+ /* Find the trunk page from BAT/CLA and allocate buffer */
+ common->q_ctx_vaddr = cqm_cla_get(cqm_handle, cla_table,
+ qinfo->common.index,
+ qinfo->index_count,
+ &common->q_ctx_paddr);
+ if (!common->q_ctx_vaddr) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get));
+ cqm_bitmap_free(bitmap, qinfo->common.index,
+ qinfo->index_count);
+ return CQM_FAIL;
+ }
+
+ /* Associate index with object */
+ object_table = &cla_table->obj_table;
+
+ if (cqm_object_table_insert(
+ cqm_handle, object_table,
+ qinfo->common.index, object) != CQM_SUCCESS) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_object_table_insert));
+ cqm_cla_put(cqm_handle, cla_table, qinfo->common.index,
+ qinfo->index_count);
+ cqm_bitmap_free(bitmap, qinfo->common.index,
+ qinfo->index_count);
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+s32 cqm_nonrdma_queue_ctx_create(struct cqm_object_s *object)
+{
+ if (object->object_type == CQM_OBJECT_NONRDMA_SRQ)
+ return cqm_nonrdma_queue_ctx_create_srq(object);
+ else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ)
+ return cqm_nonrdma_queue_ctx_create_scq(object);
+
+ return CQM_SUCCESS;
+}
+
+#define CQM_NORDMA_CHECK_WEQ_NUMBER(number) \
+ (((number) < CQM_CQ_DEPTH_MIN) || ((number) > CQM_CQ_DEPTH_MAX))
+
+/**
+ * cqm_nonrdma_queue_create - Create queue for non RDMA service
+ * @buf: cqm object
+ * Return: 0 - success, negative - failure
+ */
+s32 cqm_nonrdma_queue_create(struct cqm_object_s *object)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_service_s *service = &cqm_handle->service;
+ struct cqm_queue_s *common = container_of(object,
+ struct cqm_queue_s,
+ object);
+ struct cqm_nonrdma_qinfo_s *qinfo = container_of(
+ common,
+ struct cqm_nonrdma_qinfo_s,
+ common);
+ struct cqm_buf_s *q_room_buf = &common->q_room_buf_1;
+ u32 wqe_number = qinfo->common.object.object_size;
+ u32 wqe_size = qinfo->wqe_size;
+ u32 order = service->buf_order;
+ u32 buf_number = 0;
+ u32 buf_size = 0;
+ bool tail = false; /* Whether linkwqe is at the end of the page */
+
+ /* When creating CQ/SCQ queue, the page size is 4k, linkwqe must be at
+ * the end of the page
+ */
+ if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) {
+ /* Depth must be 2^n alignment, depth range is 256~32K */
+ if (CQM_NORDMA_CHECK_WEQ_NUMBER(wqe_number)) {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_number));
+ return CQM_FAIL;
+ }
+ if (cqm_check_align(wqe_number) == false) {
+ cqm_err(handle->dev_hdl, "Nonrdma queue alloc: wqe_number is not align on 2^n\n");
+ return CQM_FAIL;
+ }
+
+ order = CQM_4K_PAGE_ORDER; /* wqe page is 4k */
+ tail = true; /* linkwqe must be at the end of the page */
+ buf_size = CQM_4K_PAGE_SIZE;
+ } else {
+ buf_size = PAGE_SIZE << order;
+ }
+
+ /* Calculate how many buffers are required, -1 is to deduct link wqe in
+ * a buf
+ */
+ qinfo->wqe_per_buf = (buf_size / wqe_size) - 1;
+ /* The depth from service includes the number of linkwqe */
+ buf_number = ALIGN((wqe_size * wqe_number), buf_size) / buf_size;
+ /* Allocate cqm buffer */
+ q_room_buf->buf_number = buf_number;
+ q_room_buf->buf_size = buf_size;
+ q_room_buf->page_number = (buf_number << order);
+ if (cqm_buf_alloc(cqm_handle, q_room_buf, false) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc));
+ return CQM_FAIL;
+ }
+ /* Fill link wqe, (wqe_number - buf_number) is the number of wqe without
+ * linkwqe
+ */
+ cqm_linkwqe_fill(q_room_buf, qinfo->wqe_per_buf, wqe_size,
+ wqe_number - buf_number, tail,
+ common->queue_link_mode);
+
+ /* Create queue header */
+ qinfo->common.q_header_vaddr =
+ (struct cqm_queue_header_s *)cqm_kmalloc_align(
+ sizeof(struct cqm_queue_header_s),
+ GFP_KERNEL | __GFP_ZERO, CQM_QHEAD_ALIGN_ORDER);
+ if (!qinfo->common.q_header_vaddr) {
+ cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_header_vaddr));
+ goto err1;
+ }
+
+ common->q_header_paddr =
+ pci_map_single(cqm_handle->dev,
+ qinfo->common.q_header_vaddr,
+ sizeof(struct cqm_queue_header_s),
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(cqm_handle->dev, common->q_header_paddr)) {
+ cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_header_vaddr));
+ goto err2;
+ }
+
+ /* Create queue ctx */
+ if (cqm_nonrdma_queue_ctx_create(object) == CQM_FAIL) {
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_nonrdma_queue_ctx_create));
+ goto err3;
+ }
+
+ return CQM_SUCCESS;
+
+err3:
+ pci_unmap_single(cqm_handle->dev, common->q_header_paddr,
+ sizeof(struct cqm_queue_header_s),
+ PCI_DMA_BIDIRECTIONAL);
+err2:
+ cqm_kfree_align(qinfo->common.q_header_vaddr);
+ qinfo->common.q_header_vaddr = NULL;
+err1:
+ cqm_buf_free(q_room_buf, cqm_handle->dev);
+ return CQM_FAIL;
+}
+
+static void cqm_nonrdma_queue_free_scq_srq(struct cqm_object_s *object,
+ struct cqm_cla_table_s *cla_table)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct cqm_queue_s *common = container_of(object,
+ struct cqm_queue_s,
+ object);
+ struct cqm_nonrdma_qinfo_s *qinfo =
+ container_of(common, struct cqm_nonrdma_qinfo_s, common);
+ struct cqm_buf_s *q_room_buf = &common->q_room_buf_1;
+ u32 index = qinfo->common.index;
+ u32 count = qinfo->index_count;
+ struct cqm_bitmap_s *bitmap = NULL;
+
+ /* If it is in TOE SRQ mode, delete the RQ */
+ if (common->queue_link_mode == CQM_QUEUE_TOE_SRQ_LINK_MODE) {
+ cqm_dbg("Nonrdma queue del: delete srq used rq\n");
+ cqm_srq_used_rq_delete(&common->object);
+ } else {
+ /* Free it if exists q room */
+ cqm_buf_free(q_room_buf, cqm_handle->dev);
+ }
+ /* Free SRQ or SCQ ctx */
+ if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) {
+ /* ctx of nonrdma's SRQ is applied independently */
+ if (common->q_ctx_vaddr) {
+ pci_unmap_single(cqm_handle->dev, common->q_ctx_paddr,
+ qinfo->q_ctx_size,
+ PCI_DMA_BIDIRECTIONAL);
+
+ cqm_kfree_align(common->q_ctx_vaddr);
+ common->q_ctx_vaddr = NULL;
+ }
+ } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) {
+ /* The ctx of SCQ of nonrdma is managed by BAT/CLA */
+ cqm_cla_put(cqm_handle, cla_table, index, count);
+
+ /* Release index into bitmap */
+ bitmap = &cla_table->bitmap;
+ cqm_bitmap_free(bitmap, index, count);
+ }
+}
+
+/**
+ * cqm_nonrdma_queue_delete - Free queue for non RDMA service
+ * @buf: cqm object
+ */
+void cqm_nonrdma_queue_delete(struct cqm_object_s *object)
+{
+ struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
+ object->cqm_handle;
+ struct hifc_hwdev *handle = cqm_handle->ex_handle;
+ struct cqm_queue_s *common = container_of(object,
+ struct cqm_queue_s, object);
+ struct cqm_nonrdma_qinfo_s *qinfo = container_of(
+ common,
+ struct cqm_nonrdma_qinfo_s,
+ common);
+ struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table;
+ struct cqm_cla_table_s *cla_table = NULL;
+ struct cqm_object_table_s *object_table = NULL;
+ u32 index = qinfo->common.index;
+
+ atomic_inc(&cqm_handle->ex_handle->hw_stats.cqm_stats.cqm_nonrdma_queue_delete_cnt);
+
+ /* SCQ has independent SCQN association */
+ if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) {
+ cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC);
+ CQM_PTR_CHECK_NO_RET(
+ cla_table,
+ CQM_FUNCTION_FAIL(cqm_cla_table_get),
+ return);
+
+ /* index and object disassociate */
+ object_table = &cla_table->obj_table;
+
+ cqm_object_table_remove(cqm_handle, object_table,
+ index, object);
+ }
+
+ /* Wait for the completion and ensure that all references to the QPC
+ * are completed
+ */
+ if (atomic_dec_and_test(&object->refcount))
+ complete(&object->free);
+ else
+ cqm_err(handle->dev_hdl, "Nonrdma queue del: object is referred by others, has to wait for completion\n");
+ wait_for_completion(&object->free);
+
+ /* Free it if exists q header */
+ if (qinfo->common.q_header_vaddr) {
+ pci_unmap_single(cqm_handle->dev, common->q_header_paddr,
+ sizeof(struct cqm_queue_header_s),
+ PCI_DMA_BIDIRECTIONAL);
+
+ cqm_kfree_align(qinfo->common.q_header_vaddr);
+ qinfo->common.q_header_vaddr = NULL;
+ }
+ cqm_nonrdma_queue_free_scq_srq(object, cla_table);
+}
+
+#define obj_extern_if_section
+
+/**
+ * cqm_object_qpc_mpt_create - Create QPC and MPT
+ * @ex_handle: hw dev handle
+ * @service_type: service type
+ * @object_type: must be mpt and ctx
+ * @object_size: the unit is byte
+ * @object_priv: the private structure for service, can be NULL
+ * @index: get the reserved qpn based on this value, if wants to automatically
+ * allocate it, the value should be CQM_INDEX_INVALID
+ * Return: service ctx
+ */
+struct cqm_qpc_mpt_s *cqm_object_qpc_mpt_create(
+ void *ex_handle,
+ enum cqm_object_type_e object_type,
+ u32 object_size, void *object_priv,
+ u32 index)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct cqm_qpc_mpt_info_s *qpc_mpt_info = NULL;
+ s32 ret = CQM_FAIL;
+
+ CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle));
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_create_cnt);
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ CQM_PTR_CHECK_RET(cqm_handle, return NULL, CQM_PTR_NULL(cqm_handle));
+
+ /* If service does not register, returns NULL */
+ if (cqm_handle->service.has_register == false) {
+ cqm_err(handle->dev_hdl, "service is not register");
+ return NULL;
+ }
+
+ if (object_type != CQM_OBJECT_SERVICE_CTX) {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type));
+ return NULL;
+ }
+
+ qpc_mpt_info = (struct cqm_qpc_mpt_info_s *)
+ kmalloc(sizeof(struct cqm_qpc_mpt_info_s),
+ GFP_ATOMIC | __GFP_ZERO);
+ CQM_PTR_CHECK_RET(qpc_mpt_info, return NULL,
+ CQM_ALLOC_FAIL(qpc_mpt_info));
+
+ qpc_mpt_info->common.object.object_type = object_type;
+ qpc_mpt_info->common.object.object_size = object_size;
+ atomic_set(&qpc_mpt_info->common.object.refcount, 1);
+ init_completion(&qpc_mpt_info->common.object.free);
+ qpc_mpt_info->common.object.cqm_handle = cqm_handle;
+ qpc_mpt_info->common.xid = index;
+ qpc_mpt_info->common.priv = object_priv;
+
+ ret = cqm_qpc_mpt_create(&qpc_mpt_info->common.object);
+ if (ret == CQM_SUCCESS)
+ return &qpc_mpt_info->common;
+
+ cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_qpc_mpt_create));
+ kfree(qpc_mpt_info);
+ return NULL;
+}
+
+/**
+ * cqm_object_fc_srq_create - Create RQ for FC, the number of valid wqe in the
+ * queue must be meet the incoming wqe number. Because linkwqe can only be
+ * filled at the end of the page, the actual effective number exceeds demand,
+ * need to inform the number of business creation.
+ * @ex_handle: hw dev handle
+ * @service_type: service type
+ * @object_type: must be CQM_OBJECT_NONRDMA_SRQ
+ * @wqe_number: valid wqe number
+ * @wqe_size: wqe size
+ * @object_priv: the private structure for service
+ * Return: srq structure
+ */
+struct cqm_queue_s *cqm_object_fc_srq_create(
+ void *ex_handle,
+ enum cqm_object_type_e object_type,
+ u32 wqe_number, u32 wqe_size,
+ void *object_priv)
+{
+ struct cqm_nonrdma_qinfo_s *nonrdma_qinfo = NULL;
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct cqm_service_s *service = NULL;
+ u32 valid_wqe_per_buffer = 0;
+ u32 wqe_sum = 0; /* includes linkwqe, normal wqe */
+ u32 buf_size = 0;
+ u32 buf_num = 0;
+ s32 ret = CQM_FAIL;
+
+ CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle));
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_fc_srq_create_cnt);
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ CQM_PTR_CHECK_RET(cqm_handle, return NULL, CQM_PTR_NULL(cqm_handle));
+
+ /* service_type must be FC */
+ if (cqm_handle->service.has_register == false) {
+ cqm_err(handle->dev_hdl, "service is not register\n");
+ return NULL;
+ }
+
+ /* wqe_size can not exceed PAGE_SIZE and should not be 0, and must be
+ * 2^n aligned.
+ */
+ if ((wqe_size >= PAGE_SIZE) || (cqm_check_align(wqe_size) == false)) {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size));
+ return NULL;
+ }
+
+ /* FC's RQ is SRQ (unlike TOE's SRQ, fc is that all packets received by
+ * the stream will be put on the same rq, and TOE's srq is similar to
+ * rq's resource pool)
+ */
+ if (object_type != CQM_OBJECT_NONRDMA_SRQ) {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type));
+ return NULL;
+ }
+
+ service = &cqm_handle->service;
+ buf_size = PAGE_SIZE << (service->buf_order);
+ valid_wqe_per_buffer = buf_size / wqe_size - 1; /* Minus 1 link wqe */
+ buf_num = wqe_number / valid_wqe_per_buffer;
+ if (wqe_number % valid_wqe_per_buffer != 0)
+ buf_num++;
+
+ /* Calculate the total number of all wqe */
+ wqe_sum = buf_num * (valid_wqe_per_buffer + 1);
+ nonrdma_qinfo = (struct cqm_nonrdma_qinfo_s *)
+ kmalloc(sizeof(struct cqm_nonrdma_qinfo_s),
+ GFP_KERNEL | __GFP_ZERO);
+
+ CQM_PTR_CHECK_RET(nonrdma_qinfo, return NULL,
+ CQM_ALLOC_FAIL(nonrdma_qinfo));
+
+ /* Initialize object members */
+ nonrdma_qinfo->common.object.object_type = object_type;
+ /* The total number of all wqe */
+ nonrdma_qinfo->common.object.object_size = wqe_sum;
+ atomic_set(&nonrdma_qinfo->common.object.refcount, 1);
+ init_completion(&nonrdma_qinfo->common.object.free);
+ nonrdma_qinfo->common.object.cqm_handle = cqm_handle;
+
+ /* Initialize the doorbell used by the current queue, default is the
+ * hardware doorbell
+ */
+ nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL;
+ nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE;
+
+ /* Initialize external public members */
+ nonrdma_qinfo->common.priv = object_priv;
+ nonrdma_qinfo->common.valid_wqe_num = wqe_sum - buf_num;
+
+ /* Initialize internal private members */
+ nonrdma_qinfo->wqe_size = wqe_size;
+ /* The SRQ for FC, which needs to create ctx */
+ nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size;
+
+ ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object);
+ if (ret == CQM_SUCCESS)
+ return &nonrdma_qinfo->common;
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create));
+ kfree(nonrdma_qinfo);
+ return NULL;
+}
+
+static int cqm_object_nonrdma_queue_create_check(
+ void *ex_handle,
+ enum cqm_object_type_e object_type,
+ u32 wqe_size)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+
+ CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
+
+ atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_create_cnt);
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL,
+ CQM_PTR_NULL(cqm_handle));
+
+ /* If service does not register, returns NULL */
+ if (cqm_handle->service.has_register == false) {
+ cqm_err(handle->dev_hdl, "service is not register\n");
+ return CQM_FAIL;
+ }
+ /* Wqe size cannot exceed PAGE_SIZE, cannot be 0, and must be 2^n
+ * aligned. cqm_check_align check excludes 0, 1, non 2^n alignment
+ */
+ if ((wqe_size >= PAGE_SIZE) || (cqm_check_align(wqe_size) == false)) {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size));
+ return CQM_FAIL;
+ }
+
+ /* Supported Nonrdma queue: RQ, SQ, SRQ, CQ, SCQ */
+ if ((object_type < CQM_OBJECT_NONRDMA_EMBEDDED_RQ) ||
+ (object_type > CQM_OBJECT_NONRDMA_SCQ)) {
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type));
+ return CQM_FAIL;
+ }
+
+ return CQM_SUCCESS;
+}
+
+/**
+ * cqm_object_nonrdma_queue_create - Create queues for non-RDMA services
+ * @ex_handle: hw dev handle
+ * @service_type: service type
+ * @object_type: can create embedded RQ/SQ/CQ and SRQ/SCQ
+ * @wqe_number: wqe number, including link wqe
+ * @wqe_size: wqe size, nust be 2^n
+ * @object_priv: the private structure for service, can be NULL
+ * Return: srq structure
+ */
+struct cqm_queue_s *cqm_object_nonrdma_queue_create(
+ void *ex_handle,
+ enum cqm_object_type_e object_type,
+ u32 wqe_number, u32 wqe_size,
+ void *object_priv)
+{
+ struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct cqm_nonrdma_qinfo_s *nonrdma_qinfo = NULL;
+ struct cqm_service_s *service = NULL;
+ s32 ret = CQM_FAIL;
+
+ cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
+ if (cqm_object_nonrdma_queue_create_check(ex_handle,
+ object_type,
+ wqe_size) == CQM_FAIL) {
+ return NULL;
+ }
+
+ nonrdma_qinfo = (struct cqm_nonrdma_qinfo_s *)
+ kmalloc(sizeof(struct cqm_nonrdma_qinfo_s),
+ GFP_KERNEL | __GFP_ZERO);
+ CQM_PTR_CHECK_RET(nonrdma_qinfo, return NULL,
+ CQM_ALLOC_FAIL(nonrdma_qinfo));
+
+ /* Initialize object members */
+ nonrdma_qinfo->common.object.object_type = object_type;
+ nonrdma_qinfo->common.object.object_size = wqe_number;
+ atomic_set(&nonrdma_qinfo->common.object.refcount, 1);
+ init_completion(&nonrdma_qinfo->common.object.free);
+ nonrdma_qinfo->common.object.cqm_handle = cqm_handle;
+
+ /* Initialize the doorbell used by the current queue, default is the
+ * hardware doorbell
+ */
+ nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL;
+ nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE;
+
+ /* Initialize external public members */
+ nonrdma_qinfo->common.priv = object_priv;
+
+ /* Initialize internal private members */
+ nonrdma_qinfo->wqe_size = wqe_size;
+ service = &cqm_handle->service;
+ switch (object_type) {
+ case CQM_OBJECT_NONRDMA_SCQ:
+ nonrdma_qinfo->q_ctx_size =
+ service->service_template.scq_ctx_size;
+ break;
+ case CQM_OBJECT_NONRDMA_SRQ:
+ /* The creation for SRQ uses a dedicated interface */
+ nonrdma_qinfo->q_ctx_size =
+ service->service_template.srq_ctx_size;
+ break;
+ default:
+ break;
+ }
+
+ ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object);
+ if (ret == CQM_SUCCESS)
+ return &nonrdma_qinfo->common;
+
+ cqm_err(handle->dev_hdl,
+ CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create));
+ kfree(nonrdma_qinfo);
+ return NULL;
+}
+
+s32 cqm_qpc_mpt_delete_ret(struct cqm_object_s *object)
+{
+ u32 object_type = 0;
+
+ object_type = object->object_type;
+ switch (object_type) {
+ case CQM_OBJECT_SERVICE_CTX:
+ cqm_qpc_mpt_delete(object);
+ return CQM_SUCCESS;
+ default:
+ return CQM_FAIL;
+ }
+}
+
+s32 cqm_nonrdma_queue_delete_ret(struct cqm_object_s *object)
+{
+ u32 object_type = 0;
+
+ object_type = object->object_type;
+ switch (object_type) {
+ case CQM_OBJECT_NONRDMA_SCQ:
+ case CQM_OBJECT_NONRDMA_SRQ:
+ cqm_nonrdma_queue_delete(object);
+ return CQM_SUCCESS;
+ default:
+ return CQM_FAIL;
+ }
+}
+
+/**
+ * cqm_object_nonrdma_queue_create - Delete the created object, the function
+ * will sleep and wait for all operations on the object to complete before
+ * returning
+ * @object: cqm object
+ */
+void cqm_object_delete(struct cqm_object_s *object)
+{
+ struct cqm_handle_s *cqm_handle = NULL;
+ struct hifc_hwdev *handle = NULL;
+
+ CQM_PTR_CHECK_NO_RET(object, CQM_PTR_NULL(object), return);
+ if (!object->cqm_handle) {
+ pr_err("[CQM]Obj del: cqm_handle is null, refcount %d\n",
+ (int)object->refcount.counter);
+ kfree(object);
+ return;
+ }
+ cqm_handle = (struct cqm_handle_s *)object->cqm_handle;
+
+ if (!cqm_handle->ex_handle) {
+ pr_err("[CQM]Obj del: ex_handle is null, refcount %d\n",
+ (int)object->refcount.counter);
+ kfree(object);
+ return;
+ }
+ handle = cqm_handle->ex_handle;
+
+ if (cqm_qpc_mpt_delete_ret(object) == CQM_SUCCESS) {
+ kfree(object);
+ return;
+ }
+
+ if (cqm_nonrdma_queue_delete_ret(object) == CQM_SUCCESS) {
+ kfree(object);
+ return;
+ }
+
+ cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type));
+ kfree(object);
+}
diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_object.h b/drivers/scsi/huawei/hifc/hifc_cqm_object.h
new file mode 100644
index 000000000000..308166ddd534
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_cqm_object.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef __CQM_OBJECT_H__
+#define __CQM_OBJECT_H__
+
+#define CLA_TABLE_PAGE_ORDER (0)
+#define CQM_4K_PAGE_ORDER (0)
+
+#define CQM_CQ_DEPTH_MAX (32768)
+#define CQM_CQ_DEPTH_MIN (256)
+#define CQM_BAT_SIZE_FT_PF (192)
+
+#define CQM_WQE_WF_LINK 1
+#define CQM_WQE_WF_NORMAL 0
+#define CQM_QUEUE_LINK_MODE 0
+#define CQM_QUEUE_RING_MODE 1
+#define CQM_4K_PAGE_SIZE 4096
+
+#define CQM_SUCCESS 0
+#define CQM_FAIL -1
+#define CQM_QUEUE_TOE_SRQ_LINK_MODE 2
+#define CQM_CMD_TIMEOUT 10000 /*ms*/
+
+#define CQM_INDEX_INVALID ~(0U)
+#define CQM_INDEX_RESERVED (0xfffff) /* reserved by cqm alloc */
+
+enum cqm_bat_entry_type_e {
+ CQM_BAT_ENTRY_T_CFG = 0,
+ CQM_BAT_ENTRY_T_HASH,
+ CQM_BAT_ENTRY_T_QPC,
+ CQM_BAT_ENTRY_T_SCQC,
+ CQM_BAT_ENTRY_T_SRQC,
+ CQM_BAT_ENTRY_T_MPT,
+ CQM_BAT_ENTRY_T_GID,
+ CQM_BAT_ENTRY_T_LUN,
+ CQM_BAT_ENTRY_T_TASKMAP,
+ CQM_BAT_ENTRY_T_L3I,
+ CQM_BAT_ENTRY_T_CHILDC,
+ CQM_BAT_ENTRY_T_TIMER,
+ CQM_BAT_ENTRY_T_XID2CID,
+ CQM_BAT_ENTRY_T_REORDER,
+
+ CQM_BAT_ENTRY_T_INVALID = 0xff,
+};
+
+enum cqm_cmd_type_e {
+ CQM_CMD_T_INVALID = 0,
+ CQM_CMD_T_BAT_UPDATE,
+ CQM_CMD_T_CLA_UPDATE,
+ CQM_CMD_T_BLOOMFILTER_SET,
+ CQM_CMD_T_BLOOMFILTER_CLEAR,
+ CQM_CMD_T_COMPACT_SRQ_UPDATE,
+ CQM_CMD_T_CLA_CACHE_INVALID,
+ CQM_CMD_T_BLOOMFILTER_INIT,
+ QM_CMD_T_MAX
+};
+
+/*linkwqe*/
+#define CQM_LINK_WQE_CTRLSL_VALUE 2
+#define CQM_LINK_WQE_LP_VALID 1
+#define CQM_LINK_WQE_LP_INVALID 0
+#define CQM_LINK_WQE_OWNER_VALID 1
+#define CQM_LINK_WQE_OWNER_INVALID 0
+
+/*CLA update mode*/
+#define CQM_CLA_RECORD_NEW_GPA 0
+#define CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID 1
+#define CQM_CLA_DEL_GPA_WITH_CACHE_INVALID 2
+
+#define CQM_CLA_LVL_0 0
+#define CQM_CLA_LVL_1 1
+#define CQM_CLA_LVL_2 2
+
+#define CQM_MAX_INDEX_BIT 19
+#define CQM_CHIP_CACHELINE 256
+enum cqm_cmd_ack_type_e {
+ CQM_CMD_ACK_TYPE_CMDQ = 0, /* ack: write back to cmdq */
+ CQM_CMD_ACK_TYPE_SHARE_CQN = 1, /* ack report scq by root ctx ctx */
+ CQM_CMD_ACK_TYPE_APP_CQN = 2 /* ack report scq by parent ctx */
+};
+
+struct cqm_bat_entry_cfg_s {
+ u32 cur_conn_num_h_4 :4;
+ u32 rsv1 :4;
+ u32 max_conn_num :20;
+ u32 rsv2 :4;
+
+ u32 max_conn_cache :10;
+ u32 rsv3 :6;
+ u32 cur_conn_num_l_16 :16;
+
+ u32 bloom_filter_addr :16;
+ u32 cur_conn_cache :10;
+ u32 rsv4 :6;
+
+ u32 bucket_num :16;
+ u32 bloom_filter_len :16;
+};
+
+#define CQM_BAT_NO_BYPASS_CACHE 0
+#define CQM_BAT_ENTRY_SIZE_256 0
+#define CQM_BAT_ENTRY_SIZE_512 1
+#define CQM_BAT_ENTRY_SIZE_1024 2
+
+struct cqm_bat_entry_standerd_s {
+ u32 entry_size :2;
+ u32 rsv1 :6;
+ u32 max_number :20;
+ u32 rsv2 :4;
+
+ u32 cla_gpa_h :32;
+
+ u32 cla_gpa_l :32;
+
+ u32 rsv3 :8;
+ u32 z :5;
+ u32 y :5;
+ u32 x :5;
+ u32 rsv24 :1;
+ u32 bypass :1;
+ u32 cla_level :2;
+ u32 rsv5 :5;
+};
+
+struct cqm_bat_entry_taskmap_s {
+ u32 gpa0_h;
+ u32 gpa0_l;
+
+ u32 gpa1_h;
+ u32 gpa1_l;
+
+ u32 gpa2_h;
+ u32 gpa2_l;
+
+ u32 gpa3_h;
+ u32 gpa3_l;
+};
+
+struct cqm_cla_cache_invalid_cmd_s {
+ u32 gpa_h;
+ u32 gpa_l;
+ u32 cache_size;/* CLA cache size=4096B */
+};
+
+struct cqm_cla_update_cmd_s {
+ /* need to update gpa addr */
+ u32 gpa_h;
+ u32 gpa_l;
+
+ /* update value */
+ u32 value_h;
+ u32 value_l;
+};
+
+struct cqm_bat_update_cmd_s {
+#define CQM_BAT_MAX_SIZE 256
+ u32 offset; /* byte offset,16Byte aligned */
+ u32 byte_len; /* max size: 256byte */
+ u8 data[CQM_BAT_MAX_SIZE];
+};
+
+struct cqm_handle_s;
+
+struct cqm_linkwqe_s {
+ u32 rsv1 :14;
+ u32 wf :1;
+ u32 rsv2 :14;
+ u32 ctrlsl :2;
+ u32 o :1;
+
+ u32 rsv3 :31;
+ u32 lp :1;
+
+ u32 next_page_gpa_h;
+ u32 next_page_gpa_l;
+
+ u32 next_buffer_addr_h;
+ u32 next_buffer_addr_l;
+};
+
+struct cqm_srq_linkwqe_s {
+ struct cqm_linkwqe_s linkwqe;
+ /*add by wss for srq*/
+ u32 current_buffer_gpa_h;
+ u32 current_buffer_gpa_l;
+ u32 current_buffer_addr_h;
+ u32 current_buffer_addr_l;
+
+ u32 fast_link_page_addr_h;
+ u32 fast_link_page_addr_l;
+
+ u32 fixed_next_buffer_addr_h;
+ u32 fixed_next_buffer_addr_l;
+};
+
+union cqm_linkwqe_first_64b_s {
+ struct cqm_linkwqe_s basic_linkwqe;
+ u32 value[16];
+};
+
+struct cqm_linkwqe_second_64b_s {
+ u32 rsvd0[4];
+ u32 rsvd1[4];
+ union {
+ struct {
+ u32 rsvd0[3];
+ u32 rsvd1 :29;
+ u32 toe_o :1;
+ u32 resvd2 :2;
+ } bs;
+ u32 value[4];
+ } third_16B;
+
+ union {
+ struct {
+ u32 rsvd0[2];
+ u32 rsvd1 :31;
+ u32 ifoe_o :1;
+ u32 rsvd2;
+ } bs;
+ u32 value[4];
+ } forth_16B;
+
+};
+
+struct cqm_linkwqe_128b_s {
+ union cqm_linkwqe_first_64b_s first_64b;
+ struct cqm_linkwqe_second_64b_s second_64b;
+};
+
+s32 cqm_bat_init(struct cqm_handle_s *cqm_handle);
+void cqm_bat_uninit(struct cqm_handle_s *cqm_handle);
+s32 cqm_cla_init(struct cqm_handle_s *cqm_handle);
+void cqm_cla_uninit(struct cqm_handle_s *cqm_handle);
+s32 cqm_bitmap_init(struct cqm_handle_s *cqm_handle);
+void cqm_bitmap_uninit(struct cqm_handle_s *cqm_handle);
+s32 cqm_object_table_init(struct cqm_handle_s *cqm_handle);
+void cqm_object_table_uninit(struct cqm_handle_s *cqm_handle);
+
+#endif /* __CQM_OBJECT_H__ */
diff --git a/drivers/scsi/huawei/hifc/hifc_eqs.c b/drivers/scsi/huawei/hifc/hifc_eqs.c
new file mode 100644
index 000000000000..803866e1fbf9
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_eqs.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwif.h"
+#include "hifc_api_cmd.h"
+#include "hifc_mgmt.h"
+#include "hifc_hwdev.h"
+#include "hifc_eqs.h"
+
+#define HIFC_EQS_WQ_NAME "hifc_eqs"
+
+#define AEQ_CTRL_0_INTR_IDX_SHIFT 0
+#define AEQ_CTRL_0_FUNC_BUSY_SHIFT 10
+#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
+#define AEQ_CTRL_0_QPS_NUM_SHIFT 22
+#define AEQ_CTRL_0_INTR_MODE_SHIFT 31
+
+#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
+#define AEQ_CTRL_0_FUNC_BUSY_MASK 0x1U
+#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
+#define AEQ_CTRL_0_QPS_NUM_MASK 0xFFU
+#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U
+
+#define AEQ_CTRL_0_GET(val, member) \
+ (((val) >> AEQ_CTRL_0_##member##_SHIFT) & \
+ AEQ_CTRL_0_##member##_MASK)
+
+#define AEQ_CTRL_0_SET(val, member) \
+ (((val) & AEQ_CTRL_0_##member##_MASK) << \
+ AEQ_CTRL_0_##member##_SHIFT)
+
+#define AEQ_CTRL_0_CLEAR(val, member) \
+ ((val) & (~(AEQ_CTRL_0_##member##_MASK \
+ << AEQ_CTRL_0_##member##_SHIFT)))
+
+#define AEQ_CTRL_1_LEN_SHIFT 0
+#define AEQ_CTRL_1_FUNC_OWN_SHIFT 21
+#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
+#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+
+#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU
+#define AEQ_CTRL_1_FUNC_OWN_MASK 0x1U
+#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U
+#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
+
+#define AEQ_CTRL_1_GET(val, member) \
+ (((val) >> AEQ_CTRL_1_##member##_SHIFT) & \
+ AEQ_CTRL_1_##member##_MASK)
+
+#define AEQ_CTRL_1_SET(val, member) \
+ (((val) & AEQ_CTRL_1_##member##_MASK) << \
+ AEQ_CTRL_1_##member##_SHIFT)
+
+#define AEQ_CTRL_1_CLEAR(val, member) \
+ ((val) & (~(AEQ_CTRL_1_##member##_MASK \
+ << AEQ_CTRL_1_##member##_SHIFT)))
+
+#define HIFC_EQ_PROD_IDX_MASK 0xFFFFF
+#define HIFC_TASK_PROCESS_EQE_LIMIT 1024
+#define HIFC_EQ_UPDATE_CI_STEP 64
+
+static uint g_aeq_len = HIFC_DEFAULT_AEQ_LEN;
+module_param(g_aeq_len, uint, 0444);
+MODULE_PARM_DESC(g_aeq_len,
+ "aeq depth, valid range is " __stringify(HIFC_MIN_AEQ_LEN)
+ " - " __stringify(HIFC_MAX_AEQ_LEN));
+
+static uint g_ceq_len = HIFC_DEFAULT_CEQ_LEN;
+module_param(g_ceq_len, uint, 0444);
+MODULE_PARM_DESC(g_ceq_len,
+ "ceq depth, valid range is " __stringify(HIFC_MIN_CEQ_LEN)
+ " - " __stringify(HIFC_MAX_CEQ_LEN));
+
+static uint g_num_ceqe_in_tasklet = HIFC_TASK_PROCESS_EQE_LIMIT;
+module_param(g_num_ceqe_in_tasklet, uint, 0444);
+MODULE_PARM_DESC(g_num_ceqe_in_tasklet,
+ "The max number of ceqe can be processed in tasklet, default = 1024");
+
+#define CEQ_CTRL_0_INTR_IDX_SHIFT 0
+#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20
+#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
+#define CEQ_CTRL_0_INTR_MODE_SHIFT 31
+
+#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
+#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
+#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
+#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U
+
+#define CEQ_CTRL_0_SET(val, member) \
+ (((val) & CEQ_CTRL_0_##member##_MASK) << \
+ CEQ_CTRL_0_##member##_SHIFT)
+
+#define CEQ_CTRL_1_LEN_SHIFT 0
+#define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+#define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU
+#define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
+
+#define CEQ_CTRL_1_SET(val, member) \
+ (((val) & CEQ_CTRL_1_##member##_MASK) << \
+ CEQ_CTRL_1_##member##_SHIFT)
+
+#define EQ_ELEM_DESC_TYPE_SHIFT 0
+#define EQ_ELEM_DESC_SRC_SHIFT 7
+#define EQ_ELEM_DESC_SIZE_SHIFT 8
+#define EQ_ELEM_DESC_WRAPPED_SHIFT 31
+#define EQ_ELEM_DESC_TYPE_MASK 0x7FU
+#define EQ_ELEM_DESC_SRC_MASK 0x1U
+#define EQ_ELEM_DESC_SIZE_MASK 0xFFU
+#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U
+
+#define EQ_ELEM_DESC_GET(val, member) \
+ (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \
+ EQ_ELEM_DESC_##member##_MASK)
+
+#define EQ_CONS_IDX_CONS_IDX_SHIFT 0
+#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24
+#define EQ_CONS_IDX_INT_ARMED_SHIFT 31
+#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU
+#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU
+#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U
+
+#define EQ_CONS_IDX_SET(val, member) \
+ (((val) & EQ_CONS_IDX_##member##_MASK) << \
+ EQ_CONS_IDX_##member##_SHIFT)
+
+#define EQ_CONS_IDX_CLEAR(val, member) \
+ ((val) & (~(EQ_CONS_IDX_##member##_MASK \
+ << EQ_CONS_IDX_##member##_SHIFT)))
+
+#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT)
+
+#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \
+ ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
+
+#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HIFC_AEQ) ? \
+ HIFC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
+ HIFC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
+
+#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HIFC_AEQ) ? \
+ HIFC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
+ HIFC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
+
+#define GET_EQ_NUM_PAGES(eq, size) \
+ ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \
+ (size)) / (size)))
+
+#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size)
+
+#define GET_EQ_ELEMENT(eq, idx) \
+ (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \
+ (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
+
+#define GET_AEQ_ELEM(eq, idx) ((struct hifc_aeq_elem *)\
+ GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx)
+
+#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx)
+
+#define PAGE_IN_4K(page_size) ((page_size) >> 12)
+#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \
+ ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
+
+#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
+#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
+
+#define AEQ_DMA_ATTR_DEFAULT 0
+#define CEQ_DMA_ATTR_DEFAULT 0
+#define CEQ_LMT_KICK_DEFAULT 0
+#define EQ_MSIX_RESEND_TIMER_CLEAR 1
+#define EQ_WRAPPED_SHIFT 20
+#define EQ_VALID_SHIFT 31
+#define CEQE_TYPE_SHIFT 23
+#define CEQE_TYPE_MASK 0x7
+
+#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \
+ CEQE_TYPE_MASK)
+#define CEQE_DATA_MASK 0x3FFFFFF
+#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK)
+#define EQ_MIN_PAGE_SIZE 0x1000U
+#define aeq_to_aeqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hifc_aeqs, aeq[0])
+
+#define ceq_to_ceqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hifc_ceqs, ceq[0])
+
+/**
+ * aeq_interrupt - aeq interrupt handler
+ * @irq: irq number
+ * @data: the async event queue of the event
+ **/
+static irqreturn_t aeq_interrupt(int irq, void *data)
+{
+ struct hifc_eq *aeq = (struct hifc_eq *)data;
+ struct hifc_hwdev *hwdev = aeq->hwdev;
+
+ struct hifc_aeqs *aeqs = aeq_to_aeqs(aeq);
+ struct workqueue_struct *workq = aeqs->workq;
+ struct hifc_eq_work *aeq_work;
+
+ /* clear resend timer cnt register */
+ hifc_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+
+ aeq_work = &aeq->aeq_work;
+ aeq_work->data = aeq;
+
+ queue_work(workq, &aeq_work->work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ceq_interrupt - ceq interrupt handler
+ * @irq: irq number
+ * @data: the completion event queue of the event
+ **/
+static irqreturn_t ceq_interrupt(int irq, void *data)
+{
+ struct hifc_eq *ceq = (struct hifc_eq *)data;
+ struct hifc_ceq_tasklet_data *ceq_tasklet_data;
+
+ ceq->hard_intr_jif = jiffies;
+
+ /* clear resend timer counters */
+ hifc_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+
+ ceq_tasklet_data = &ceq->ceq_tasklet_data;
+ ceq_tasklet_data->data = data;
+ tasklet_schedule(&ceq->ceq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static u8 eq_cons_idx_checksum_set(u32 val)
+{
+ u8 checksum = 0;
+ u8 idx;
+
+ for (idx = 0; idx < 32; idx += 4)
+ checksum ^= ((val >> idx) & 0xF);
+
+ return checksum & 0xF;
+}
+
+/**
+ * hifc_aeq_register_hw_cb - register aeq callback for specific event
+ * @hwdev: pointer to hw device
+ * @event: event for the handler
+ * @hw_cb: callback function
+ * Return: 0 - success, negative - failure
+ **/
+int hifc_aeq_register_hw_cb(void *hwdev, enum hifc_aeq_type event,
+ hifc_aeq_hwe_cb hwe_cb)
+{
+ struct hifc_aeqs *aeqs;
+
+ if (!hwdev || !hwe_cb || event >= HIFC_MAX_AEQ_EVENTS)
+ return -EINVAL;
+
+ aeqs = ((struct hifc_hwdev *)hwdev)->aeqs;
+
+ aeqs->aeq_hwe_cb[event] = hwe_cb;
+
+ set_bit(HIFC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]);
+
+ return 0;
+}
+
+/**
+ * hifc_aeq_unregister_hw_cb - unregister the aeq callback for specific event
+ * @hwdev: pointer to hw device
+ * @event: event for the handler
+ **/
+void hifc_aeq_unregister_hw_cb(void *hwdev, enum hifc_aeq_type event)
+{
+ struct hifc_aeqs *aeqs;
+
+ if (!hwdev || event >= HIFC_MAX_AEQ_EVENTS)
+ return;
+
+ aeqs = ((struct hifc_hwdev *)hwdev)->aeqs;
+
+ clear_bit(HIFC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]);
+
+ while (test_bit(HIFC_AEQ_HW_CB_RUNNING, &aeqs->aeq_hw_cb_state[event]))
+ usleep_range(900, 1000);
+
+ aeqs->aeq_hwe_cb[event] = NULL;
+}
+
+/**
+ * hifc_aeq_register_sw_cb - register aeq callback for sw event
+ * @hwdev: pointer to hw device
+ * @event: soft event for the handler
+ * @sw_cb: callback function
+ * Return: 0 - success, negative - failure
+ **/
+int hifc_aeq_register_swe_cb(void *hwdev, enum hifc_aeq_sw_type event,
+ hifc_aeq_swe_cb aeq_swe_cb)
+{
+ struct hifc_aeqs *aeqs;
+
+ if (!hwdev || !aeq_swe_cb || event >= HIFC_MAX_AEQ_SW_EVENTS)
+ return -EINVAL;
+
+ aeqs = ((struct hifc_hwdev *)hwdev)->aeqs;
+
+ aeqs->aeq_swe_cb[event] = aeq_swe_cb;
+
+ set_bit(HIFC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]);
+
+ return 0;
+}
+
+/**
+ * hifc_aeq_unregister_sw_cb - unregister the aeq callback for sw event
+ * @hwdev: pointer to hw device
+ * @event: soft event for the handler
+ **/
+void hifc_aeq_unregister_swe_cb(void *hwdev, enum hifc_aeq_sw_type event)
+{
+ struct hifc_aeqs *aeqs;
+
+ if (!hwdev || event >= HIFC_MAX_AEQ_SW_EVENTS)
+ return;
+
+ aeqs = ((struct hifc_hwdev *)hwdev)->aeqs;
+
+ clear_bit(HIFC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]);
+
+ while (test_bit(HIFC_AEQ_SW_CB_RUNNING, &aeqs->aeq_sw_cb_state[event]))
+ usleep_range(900, 1000);
+
+ aeqs->aeq_swe_cb[event] = NULL;
+}
+
+/**
+ * hifc_ceq_register_sw_cb - register ceq callback for specific event
+ * @hwdev: pointer to hw device
+ * @event: event for the handler
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ **/
+int hifc_ceq_register_cb(void *hwdev, enum hifc_ceq_event event,
+ hifc_ceq_event_cb callback)
+{
+ struct hifc_ceqs *ceqs;
+
+ if (!hwdev || event >= HIFC_MAX_CEQ_EVENTS)
+ return -EINVAL;
+
+ ceqs = ((struct hifc_hwdev *)hwdev)->ceqs;
+
+ ceqs->ceq_cb[event] = callback;
+
+ set_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]);
+
+ return 0;
+}
+
+/**
+ * hifc_ceq_unregister_cb - unregister ceq callback for specific event
+ * @hwdev: pointer to hw device
+ * @event: event for the handler
+ **/
+void hifc_ceq_unregister_cb(void *hwdev, enum hifc_ceq_event event)
+{
+ struct hifc_ceqs *ceqs;
+
+ if (!hwdev || event >= HIFC_MAX_CEQ_EVENTS)
+ return;
+
+ ceqs = ((struct hifc_hwdev *)hwdev)->ceqs;
+
+ clear_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]);
+
+ while (test_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]))
+ usleep_range(900, 1000);
+
+ ceqs->ceq_cb[event] = NULL;
+}
+
+/**
+ * set_eq_cons_idx - write the cons idx to the hw
+ * @eq: The event queue to update the cons idx for
+ * @arm_state: arm state value
+ **/
+static void set_eq_cons_idx(struct hifc_eq *eq, u32 arm_state)
+{
+ u32 eq_wrap_ci, val;
+ u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
+
+ eq_wrap_ci = EQ_CONS_IDX(eq);
+
+ /* other filed is resverd, set to 0 */
+ val = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+ EQ_CONS_IDX_SET(arm_state, INT_ARMED);
+
+ val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
+
+ hifc_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+/**
+ * ceq_event_handler - handle for the ceq events
+ * @eqs: eqs part of the chip
+ * @ceqe: ceq element of the event
+ **/
+static void ceq_event_handler(struct hifc_ceqs *ceqs, u32 ceqe)
+{
+ struct hifc_hwdev *hwdev = ceqs->hwdev;
+ enum hifc_ceq_event event = CEQE_TYPE(ceqe);
+ u32 ceqe_data = CEQE_DATA(ceqe);
+
+ if (event >= HIFC_MAX_CEQ_EVENTS) {
+ sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n",
+ event, ceqe_data);
+ return;
+ }
+
+ set_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]);
+
+ if (ceqs->ceq_cb[event] &&
+ test_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]))
+ ceqs->ceq_cb[event](hwdev, ceqe_data);
+
+ clear_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]);
+}
+
+static void aeq_swe_handler(struct hifc_aeqs *aeqs,
+ struct hifc_aeq_elem *aeqe_pos,
+ enum hifc_aeq_type event)
+{
+ enum hifc_ucode_event_type ucode_event;
+ enum hifc_aeq_sw_type sw_event;
+ u64 aeqe_data;
+ u8 lev;
+
+ ucode_event = event;
+ /* SW event uses only the first 8B */
+ sw_event = ucode_event >= HIFC_NIC_FATAL_ERROR_MAX ?
+ HIFC_STATEFULL_EVENT :
+ HIFC_STATELESS_EVENT;
+ aeqe_data = be64_to_cpu((*(u64 *)aeqe_pos->aeqe_data));
+ set_bit(HIFC_AEQ_SW_CB_RUNNING,
+ &aeqs->aeq_sw_cb_state[sw_event]);
+ if (aeqs->aeq_swe_cb[sw_event] &&
+ test_bit(HIFC_AEQ_SW_CB_REG,
+ &aeqs->aeq_sw_cb_state[sw_event])) {
+ lev = aeqs->aeq_swe_cb[sw_event](aeqs->hwdev,
+ ucode_event,
+ aeqe_data);
+ hifc_swe_fault_handler(aeqs->hwdev, lev,
+ ucode_event, aeqe_data);
+ }
+ clear_bit(HIFC_AEQ_SW_CB_RUNNING,
+ &aeqs->aeq_sw_cb_state[sw_event]);
+}
+
+static void aeq_hwe_handler(struct hifc_aeqs *aeqs,
+ struct hifc_aeq_elem *aeqe_pos,
+ enum hifc_aeq_type event, u32 aeqe_desc)
+{
+ u8 size;
+
+ if (event < HIFC_MAX_AEQ_EVENTS) {
+ size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
+ set_bit(HIFC_AEQ_HW_CB_RUNNING,
+ &aeqs->aeq_hw_cb_state[event]);
+ if (aeqs->aeq_hwe_cb[event] &&
+ test_bit(HIFC_AEQ_HW_CB_REG,
+ &aeqs->aeq_hw_cb_state[event]))
+ aeqs->aeq_hwe_cb[event](aeqs->hwdev,
+ aeqe_pos->aeqe_data, size);
+ clear_bit(HIFC_AEQ_HW_CB_RUNNING,
+ &aeqs->aeq_hw_cb_state[event]);
+
+ return;
+ }
+
+ sdk_warn(aeqs->hwdev->dev_hdl, "Unknown aeq hw event %d\n", event);
+}
+
+/**
+ * aeq_irq_handler - handler for the aeq event
+ * @eq: the async event queue of the event
+ * Return: true - success, false - failure
+ **/
+static bool aeq_irq_handler(struct hifc_eq *eq)
+{
+ struct hifc_aeqs *aeqs = aeq_to_aeqs(eq);
+ struct hifc_aeq_elem *aeqe_pos;
+ enum hifc_aeq_type event;
+ u32 aeqe_desc;
+ u32 i, eqe_cnt = 0;
+
+ for (i = 0; i < HIFC_TASK_PROCESS_EQE_LIMIT; i++) {
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+
+ /* Data in HW is in Big endian Format */
+ aeqe_desc = be32_to_cpu(aeqe_pos->desc);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
+ return false;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the cmdq wqe until we have
+ * verified the command has been processed and
+ * written back.
+ */
+ dma_rmb();
+
+ event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
+ if (EQ_ELEM_DESC_GET(aeqe_desc, SRC))
+ aeq_swe_handler(aeqs, aeqe_pos, event);
+ else
+ aeq_hwe_handler(aeqs, aeqe_pos, event, aeqe_desc);
+
+ eq->cons_idx++;
+
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HIFC_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ceq_irq_handler - handler for the ceq event
+ * @eq: the completion event queue of the event
+ * Return: true - success, false - failure
+ **/
+static bool ceq_irq_handler(struct hifc_eq *eq)
+{
+ struct hifc_ceqs *ceqs = ceq_to_ceqs(eq);
+ u32 ceqe, eqe_cnt = 0;
+ u32 i;
+
+ for (i = 0; i < g_num_ceqe_in_tasklet; i++) {
+ ceqe = *(GET_CURR_CEQ_ELEM(eq));
+ ceqe = be32_to_cpu(ceqe);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
+ return false;
+
+ ceq_event_handler(ceqs, ceqe);
+
+ eq->cons_idx++;
+
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HIFC_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * eq_irq_handler - handler for the eq event
+ * @data: the event queue of the event
+ * Return: true - success, false - failure
+ **/
+static bool eq_irq_handler(void *data)
+{
+ struct hifc_eq *eq = (struct hifc_eq *)data;
+ bool uncompleted;
+
+ if (eq->type == HIFC_AEQ)
+ uncompleted = aeq_irq_handler(eq);
+ else
+ uncompleted = ceq_irq_handler(eq);
+
+ set_eq_cons_idx(eq, uncompleted ? HIFC_EQ_NOT_ARMED : HIFC_EQ_ARMED);
+
+ return uncompleted;
+}
+
+static void reschedule_eq_handler(struct hifc_eq *eq)
+{
+ if (eq->type == HIFC_AEQ) {
+ struct hifc_aeqs *aeqs = aeq_to_aeqs(eq);
+ struct workqueue_struct *workq = aeqs->workq;
+ struct hifc_eq_work *aeq_work = &eq->aeq_work;
+
+ queue_work(workq, &aeq_work->work);
+ } else {
+ tasklet_schedule(&eq->ceq_tasklet);
+ }
+}
+
+/**
+ * ceq_tasklet - ceq tasklet for the event
+ * @ceq_data: data that will be used by the tasklet(ceq)
+ **/
+
+static void ceq_tasklet(ulong ceq_data)
+{
+ struct hifc_ceq_tasklet_data *ceq_tasklet_data =
+ (struct hifc_ceq_tasklet_data *)ceq_data;
+ struct hifc_eq *eq = (struct hifc_eq *)ceq_tasklet_data->data;
+
+ eq->soft_intr_jif = jiffies;
+
+ if (eq_irq_handler(ceq_tasklet_data->data))
+ reschedule_eq_handler(ceq_tasklet_data->data);
+}
+
+/**
+ * eq_irq_work - eq work for the event
+ * @work: the work that is associated with the eq
+ **/
+static void eq_irq_work(struct work_struct *work)
+{
+ struct hifc_eq_work *aeq_work =
+ container_of(work, struct hifc_eq_work, work);
+
+ if (eq_irq_handler(aeq_work->data))
+ reschedule_eq_handler(aeq_work->data);
+}
+
+struct hifc_ceq_ctrl_reg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+};
+
+static int set_ceq_ctrl_reg(struct hifc_hwdev *hwdev, u16 q_id,
+ u32 ctrl0, u32 ctrl1)
+{
+ struct hifc_ceq_ctrl_reg ceq_ctrl = {0};
+ u16 in_size = sizeof(ceq_ctrl);
+ u16 out_size = sizeof(ceq_ctrl);
+ int err;
+
+ err = hifc_global_func_id_get(hwdev, &ceq_ctrl.func_id);
+ if (err)
+ return err;
+
+ ceq_ctrl.q_id = q_id;
+ ceq_ctrl.ctrl0 = ctrl0;
+ ceq_ctrl.ctrl1 = ctrl1;
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP,
+ &ceq_ctrl, in_size,
+ &ceq_ctrl, &out_size, 0);
+ if (err || !out_size || ceq_ctrl.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
+ q_id, err, ceq_ctrl.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * set_eq_ctrls - setting eq's ctrls registers
+ * @eq: the event queue for setting
+ * Return: 0 - success, negative - failure
+ **/
+static int set_eq_ctrls(struct hifc_eq *eq)
+{
+ enum hifc_eq_type type = eq->type;
+ struct hifc_hwif *hwif = eq->hwdev->hwif;
+ struct irq_info *eq_irq = &eq->eq_irq;
+ u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
+ u32 pci_intf_idx = HIFC_PCI_INTF_IDX(hwif);
+ int err;
+
+ if (type == HIFC_AEQ) {
+ /* set ctrl0 */
+ addr = HIFC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = hifc_hwif_read_reg(hwif, addr);
+
+ val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
+ AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
+ AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
+ AEQ_CTRL_0_CLEAR(val, INTR_MODE);
+
+ ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+ AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
+ AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+
+ AEQ_CTRL_0_SET(HIFC_INTR_MODE_ARMED, INTR_MODE);
+
+ val |= ctrl0;
+
+ hifc_hwif_write_reg(hwif, addr, val);
+
+ /* set ctrl1 */
+ addr = HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+ elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
+
+ ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ hifc_hwif_write_reg(hwif, addr, ctrl1);
+
+ } else {
+ ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+ CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
+ CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) |
+ CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ CEQ_CTRL_0_SET(HIFC_INTR_MODE_ARMED, INTR_MODE);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+
+ ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ /* set ceq ctrl reg through mgmt cpu */
+ err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ceq_elements_init - Initialize all the elements in the ceq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void ceq_elements_init(struct hifc_eq *eq, u32 init_val)
+{
+ u32 i;
+ u32 *ceqe;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ ceqe = GET_CEQ_ELEM(eq, i);
+ *(ceqe) = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Write the init values */
+}
+
+/**
+ * aeq_elements_init - initialize all the elements in the aeq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void aeq_elements_init(struct hifc_eq *eq, u32 init_val)
+{
+ struct hifc_aeq_elem *aeqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ aeqe = GET_AEQ_ELEM(eq, i);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Write the init values */
+}
+
+static void free_eq_pages_desc(struct hifc_eq *eq)
+{
+ kfree(eq->virt_addr_for_free);
+ kfree(eq->dma_addr_for_free);
+ kfree(eq->virt_addr);
+ kfree(eq->dma_addr);
+}
+
+static int alloc_eq_pages_desc(struct hifc_eq *eq)
+{
+ u64 dma_addr_size, virt_addr_size;
+ int err;
+
+ dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
+ virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
+
+ eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
+ if (!eq->dma_addr)
+ return -ENOMEM;
+
+ eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
+ if (!eq->virt_addr) {
+ err = -ENOMEM;
+ goto virt_addr_alloc_err;
+ }
+
+ eq->dma_addr_for_free = kzalloc(dma_addr_size, GFP_KERNEL);
+ if (!eq->dma_addr_for_free) {
+ err = -ENOMEM;
+ goto dma_addr_free_alloc_err;
+ }
+
+ eq->virt_addr_for_free = kzalloc(virt_addr_size, GFP_KERNEL);
+ if (!eq->virt_addr_for_free) {
+ err = -ENOMEM;
+ goto virt_addr_free_alloc_err;
+ }
+
+ return 0;
+
+virt_addr_free_alloc_err:
+ kfree(eq->dma_addr_for_free);
+dma_addr_free_alloc_err:
+ kfree(eq->virt_addr);
+virt_addr_alloc_err:
+ kfree(eq->dma_addr);
+ return err;
+}
+
+#define IS_ALIGN(x, a) (((x) & ((a) - 1)) == 0)
+
+static int init_eq_elements(struct hifc_eq *eq)
+{
+ u32 init_val;
+
+ eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size);
+ if (!IS_ALIGN(eq->num_elem_in_pg, eq->num_elem_in_pg)) {
+ sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n");
+ return -EINVAL;
+ }
+
+ init_val = EQ_WRAPPED(eq);
+
+ if (eq->type == HIFC_AEQ)
+ aeq_elements_init(eq, init_val);
+ else
+ ceq_elements_init(eq, init_val);
+
+ return 0;
+}
+
+/**
+ * alloc_eq_pages - allocate the pages for the queue
+ * @eq: the event queue
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_eq_pages(struct hifc_eq *eq)
+{
+ struct hifc_hwif *hwif = eq->hwdev->hwif;
+ u16 pg_num, i;
+ u32 reg;
+ int err;
+ u8 flag = 0;
+
+ err = alloc_eq_pages_desc(eq);
+ if (err) {
+ sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq pages description\n");
+ return err;
+ }
+
+ for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
+ eq->virt_addr_for_free[pg_num] = dma_zalloc_coherent
+ (eq->hwdev->dev_hdl, eq->page_size,
+ &eq->dma_addr_for_free[pg_num], GFP_KERNEL);
+ if (!eq->virt_addr_for_free[pg_num]) {
+ err = -ENOMEM;
+ goto dma_alloc_err;
+ }
+
+ eq->dma_addr[pg_num] = eq->dma_addr_for_free[pg_num];
+ eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num];
+ if (!IS_ALIGN(eq->dma_addr_for_free[pg_num],
+ eq->page_size)) {
+ sdk_info(eq->hwdev->dev_hdl,
+ "Address is not aligned to %u-bytes as hardware required\n",
+ eq->page_size);
+ sdk_info(eq->hwdev->dev_hdl, "Change eq's page size %u\n",
+ ((eq->page_size) >> 1));
+ eq->dma_addr[pg_num] = ALIGN
+ (eq->dma_addr_for_free[pg_num],
+ (u64)((eq->page_size) >> 1));
+ eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num] +
+ ((u64)eq->dma_addr[pg_num]
+ - (u64)eq->dma_addr_for_free[pg_num]);
+ flag = 1;
+ }
+ reg = HIFC_EQ_HI_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num);
+ hifc_hwif_write_reg(hwif, reg,
+ upper_32_bits(eq->dma_addr[pg_num]));
+
+ reg = HIFC_EQ_LO_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num);
+ hifc_hwif_write_reg(hwif, reg,
+ lower_32_bits(eq->dma_addr[pg_num]));
+ }
+
+ if (flag) {
+ eq->page_size = eq->page_size >> 1;
+ eq->eq_len = eq->eq_len >> 1;
+ }
+
+ err = init_eq_elements(eq);
+ if (err) {
+ sdk_err(eq->hwdev->dev_hdl, "Failed to init eq elements\n");
+ goto dma_alloc_err;
+ }
+
+ return 0;
+
+dma_alloc_err:
+ for (i = 0; i < pg_num; i++)
+ dma_free_coherent(eq->hwdev->dev_hdl, eq->page_size,
+ eq->virt_addr_for_free[i],
+ eq->dma_addr_for_free[i]);
+ free_eq_pages_desc(eq);
+ return err;
+}
+
+/**
+ * free_eq_pages - free the pages of the queue
+ * @eq: the event queue
+ **/
+static void free_eq_pages(struct hifc_eq *eq)
+{
+ struct hifc_hwdev *hwdev = eq->hwdev;
+ u16 pg_num;
+
+ for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
+ dma_free_coherent(hwdev->dev_hdl, eq->orig_page_size,
+ eq->virt_addr_for_free[pg_num],
+ eq->dma_addr_for_free[pg_num]);
+
+ free_eq_pages_desc(eq);
+}
+
+static inline u32 get_page_size(struct hifc_eq *eq)
+{
+ u32 total_size;
+ u16 count, n = 0;
+
+ total_size = ALIGN((eq->eq_len * eq->elem_size), EQ_MIN_PAGE_SIZE);
+
+ if (total_size <= (HIFC_EQ_MAX_PAGES * EQ_MIN_PAGE_SIZE))
+ return EQ_MIN_PAGE_SIZE;
+
+ count = (u16)(ALIGN((total_size / HIFC_EQ_MAX_PAGES),
+ EQ_MIN_PAGE_SIZE) / EQ_MIN_PAGE_SIZE);
+
+ if (!(count & (count - 1)))
+ return EQ_MIN_PAGE_SIZE * count;
+
+ while (count) {
+ count >>= 1;
+ n++;
+ }
+
+ return EQ_MIN_PAGE_SIZE << n;
+}
+
+static int request_eq_irq(struct hifc_eq *eq, enum hifc_eq_type type,
+ struct irq_info *entry)
+{
+ int err = 0;
+
+ if (type == HIFC_AEQ) {
+ struct hifc_eq_work *aeq_work = &eq->aeq_work;
+
+ INIT_WORK(&aeq_work->work, eq_irq_work);
+ } else {
+ tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
+ (ulong)(&eq->ceq_tasklet_data));
+ }
+
+ if (type == HIFC_AEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hifc_aeq%d@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pcidev_hdl));
+
+ err = request_irq(entry->irq_id, aeq_interrupt, 0UL,
+ eq->irq_name, eq);
+ } else {
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hifc_ceq%d@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pcidev_hdl));
+
+ err = request_irq(entry->irq_id, ceq_interrupt, 0UL,
+ eq->irq_name, eq);
+ }
+
+ return err;
+}
+
+/**
+ * init_eq - initialize eq
+ * @eq: the event queue
+ * @hwdev: the pointer to hw device
+ * @q_id: Queue id number
+ * @q_len: the number of EQ elements
+ * @type: the type of the event queue, ceq or aeq
+ * @entry: msix entry associated with the event queue
+ * Return: 0 - Success, Negative - failure
+ **/
+static int init_eq(struct hifc_eq *eq, struct hifc_hwdev *hwdev, u16 q_id,
+ u32 q_len, enum hifc_eq_type type, struct irq_info *entry)
+{
+ int err = 0;
+
+ eq->hwdev = hwdev;
+ eq->q_id = q_id;
+ eq->type = type;
+ eq->eq_len = q_len;
+
+ /* clear eq_len to force eqe drop in hardware */
+ if (eq->type == HIFC_AEQ)
+ hifc_hwif_write_reg(eq->hwdev->hwif,
+ HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+ else
+ set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ eq->elem_size = (type == HIFC_AEQ) ?
+ HIFC_AEQE_SIZE : HIFC_CEQE_SIZE;
+
+ eq->page_size = get_page_size(eq);
+ eq->orig_page_size = eq->page_size;
+ eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size);
+ if (eq->num_pages > HIFC_EQ_MAX_PAGES) {
+ sdk_err(hwdev->dev_hdl, "Number pages:%d too many pages for eq\n",
+ eq->num_pages);
+ return -EINVAL;
+ }
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n");
+ return err;
+ }
+
+ eq->eq_irq.msix_entry_idx = entry->msix_entry_idx;
+ eq->eq_irq.irq_id = entry->irq_id;
+
+ err = set_eq_ctrls(eq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n");
+ goto init_eq_ctrls_err;
+ }
+
+ hifc_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+ set_eq_cons_idx(eq, HIFC_EQ_ARMED);
+
+ err = request_eq_irq(eq, type, entry);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to request irq for the eq, err: %d\n",
+ err);
+ goto req_irq_err;
+ }
+
+ hifc_set_msix_state(hwdev, entry->msix_entry_idx, HIFC_MSIX_ENABLE);
+
+ return 0;
+
+init_eq_ctrls_err:
+req_irq_err:
+ free_eq_pages(eq);
+ return err;
+}
+
+/**
+ * remove_eq - remove eq
+ * @eq: the event queue
+ **/
+static void remove_eq(struct hifc_eq *eq)
+{
+ struct irq_info *entry = &eq->eq_irq;
+
+ hifc_set_msix_state(eq->hwdev, entry->msix_entry_idx,
+ HIFC_MSIX_DISABLE);
+ synchronize_irq(entry->irq_id);
+
+ free_irq(entry->irq_id, eq);
+
+ if (eq->type == HIFC_AEQ) {
+ struct hifc_eq_work *aeq_work = &eq->aeq_work;
+
+ cancel_work_sync(&aeq_work->work);
+
+ /* clear eq_len to avoid hw access host memory */
+ hifc_hwif_write_reg(eq->hwdev->hwif,
+ HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+ } else {
+ tasklet_kill(&eq->ceq_tasklet);
+
+ set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+ }
+
+ /* update cons_idx to avoid invalid interrupt */
+ eq->cons_idx = hifc_hwif_read_reg(eq->hwdev->hwif,
+ EQ_PROD_IDX_REG_ADDR(eq));
+ set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED);
+
+ free_eq_pages(eq);
+}
+
+/**
+ * hifc_aeqs_init - init all the aeqs
+ * @hwdev: the pointer to hw device
+ * @num_ceqs: number of AEQs
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ **/
+int hifc_aeqs_init(struct hifc_hwdev *hwdev, u16 num_aeqs,
+ struct irq_info *msix_entries)
+{
+ struct hifc_aeqs *aeqs;
+ int err;
+ u16 i, q_id;
+
+ aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+ if (!aeqs)
+ return -ENOMEM;
+
+ hwdev->aeqs = aeqs;
+ aeqs->hwdev = hwdev;
+ aeqs->num_aeqs = num_aeqs;
+
+ aeqs->workq = create_singlethread_workqueue(HIFC_EQS_WQ_NAME);
+ if (!aeqs->workq) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n");
+ err = -ENOMEM;
+ goto create_work_err;
+ }
+
+ if (g_aeq_len < HIFC_MIN_AEQ_LEN || g_aeq_len > HIFC_MAX_AEQ_LEN) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %d out of range, resetting to %d\n",
+ g_aeq_len, HIFC_DEFAULT_AEQ_LEN);
+ g_aeq_len = HIFC_DEFAULT_AEQ_LEN;
+ }
+
+ for (q_id = 0; q_id < num_aeqs; q_id++) {
+ err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len,
+ HIFC_AEQ, &msix_entries[q_id]);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeq %d\n",
+ q_id);
+ goto init_aeq_err;
+ }
+ }
+
+ return 0;
+
+init_aeq_err:
+ for (i = 0; i < q_id; i++)
+ remove_eq(&aeqs->aeq[i]);
+
+ destroy_workqueue(aeqs->workq);
+
+create_work_err:
+ kfree(aeqs);
+
+ return err;
+}
+
+/**
+ * hifc_aeqs_free - free all the aeqs
+ * @hwdev: the pointer to hw device
+ **/
+void hifc_aeqs_free(struct hifc_hwdev *hwdev)
+{
+ struct hifc_aeqs *aeqs = hwdev->aeqs;
+ enum hifc_aeq_type aeq_event = HIFC_HW_INTER_INT;
+ enum hifc_aeq_sw_type sw_aeq_event = HIFC_STATELESS_EVENT;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++)
+ remove_eq(&aeqs->aeq[q_id]);
+
+ for (; sw_aeq_event < HIFC_MAX_AEQ_SW_EVENTS; sw_aeq_event++)
+ hifc_aeq_unregister_swe_cb(hwdev, sw_aeq_event);
+
+ for (; aeq_event < HIFC_MAX_AEQ_EVENTS; aeq_event++)
+ hifc_aeq_unregister_hw_cb(hwdev, aeq_event);
+
+ destroy_workqueue(aeqs->workq);
+
+ kfree(aeqs);
+}
+
+/**
+ * hifc_ceqs_init - init all the ceqs
+ * @hwdev: the pointer to hw device
+ * @num_ceqs: number of CEQs
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ **/
+int hifc_ceqs_init(struct hifc_hwdev *hwdev, u16 num_ceqs,
+ struct irq_info *msix_entries)
+{
+ struct hifc_ceqs *ceqs;
+ int err;
+ u16 i, q_id;
+
+ ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
+ if (!ceqs)
+ return -ENOMEM;
+
+ hwdev->ceqs = ceqs;
+
+ ceqs->hwdev = hwdev;
+ ceqs->num_ceqs = num_ceqs;
+
+ if (g_ceq_len < HIFC_MIN_CEQ_LEN || g_ceq_len > HIFC_MAX_CEQ_LEN) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %d out of range, resetting to %d\n",
+ g_ceq_len, HIFC_DEFAULT_CEQ_LEN);
+ g_ceq_len = HIFC_DEFAULT_CEQ_LEN;
+ }
+
+ if (!g_num_ceqe_in_tasklet) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n",
+ HIFC_TASK_PROCESS_EQE_LIMIT);
+ g_num_ceqe_in_tasklet = HIFC_TASK_PROCESS_EQE_LIMIT;
+ }
+
+ for (q_id = 0; q_id < num_ceqs; q_id++) {
+ err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len,
+ HIFC_CEQ, &msix_entries[q_id]);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init ceq %d\n",
+ q_id);
+ goto init_ceq_err;
+ }
+ }
+
+ return 0;
+
+init_ceq_err:
+ for (i = 0; i < q_id; i++)
+ remove_eq(&ceqs->ceq[i]);
+
+ kfree(ceqs);
+
+ return err;
+}
+
+/**
+ * hifc_ceqs_free - free all the ceqs
+ * @hwdev: the pointer to hw device
+ **/
+void hifc_ceqs_free(struct hifc_hwdev *hwdev)
+{
+ struct hifc_ceqs *ceqs = hwdev->ceqs;
+ enum hifc_ceq_event ceq_event = HIFC_CMDQ;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
+ remove_eq(&ceqs->ceq[q_id]);
+
+ for (; ceq_event < HIFC_MAX_CEQ_EVENTS; ceq_event++)
+ hifc_ceq_unregister_cb(hwdev, ceq_event);
+
+ kfree(ceqs);
+}
+
+void hifc_get_ceq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs)
+{
+ struct hifc_ceqs *ceqs = hwdev->ceqs;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id;
+ irqs[q_id].msix_entry_idx =
+ ceqs->ceq[q_id].eq_irq.msix_entry_idx;
+ }
+
+ *num_irqs = ceqs->num_ceqs;
+}
+
+void hifc_get_aeq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs)
+{
+ struct hifc_aeqs *aeqs = hwdev->aeqs;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id;
+ irqs[q_id].msix_entry_idx =
+ aeqs->aeq[q_id].eq_irq.msix_entry_idx;
+ }
+
+ *num_irqs = aeqs->num_aeqs;
+}
+
+void hifc_dump_aeq_info(struct hifc_hwdev *hwdev)
+{
+ struct hifc_aeq_elem *aeqe_pos;
+ struct hifc_eq *eq;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
+ eq = &hwdev->aeqs->aeq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hifc_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hifc_hwif_read_reg(hwdev->hwif, addr);
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+ sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n",
+ q_id, ci, pi, work_busy(&eq->aeq_work.work),
+ eq->wrapped, be32_to_cpu(aeqe_pos->desc));
+ }
+}
+
diff --git a/drivers/scsi/huawei/hifc/hifc_eqs.h b/drivers/scsi/huawei/hifc/hifc_eqs.h
new file mode 100644
index 000000000000..2dcfc432c8f2
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_eqs.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HIFC_EQS_H
+#define HIFC_EQS_H
+
+#define HIFC_MAX_AEQS 3
+#define HIFC_MAX_CEQS 32
+
+#define HIFC_EQ_MAX_PAGES 8
+
+#define HIFC_AEQE_SIZE 64
+#define HIFC_CEQE_SIZE 4
+
+#define HIFC_AEQE_DESC_SIZE 4
+#define HIFC_AEQE_DATA_SIZE \
+ (HIFC_AEQE_SIZE - HIFC_AEQE_DESC_SIZE)
+
+#define HIFC_DEFAULT_AEQ_LEN 4096
+#define HIFC_DEFAULT_CEQ_LEN 8192
+
+#define HIFC_MIN_AEQ_LEN 64
+#define HIFC_MAX_AEQ_LEN (512 * 1024)
+#define HIFC_MIN_CEQ_LEN 64
+#define HIFC_MAX_CEQ_LEN (1024 * 1024)
+
+#define HIFC_CEQ_ID_CMDQ 0
+#define EQ_IRQ_NAME_LEN 64
+
+/* EQ registers */
+#define HIFC_AEQ_MTT_OFF_BASE_ADDR 0x200
+#define HIFC_CEQ_MTT_OFF_BASE_ADDR 0x400
+
+#define HIFC_EQ_MTT_OFF_STRIDE 0x40
+
+#define HIFC_CSR_AEQ_MTT_OFF(id) \
+ (HIFC_AEQ_MTT_OFF_BASE_ADDR + (id) * HIFC_EQ_MTT_OFF_STRIDE)
+
+#define HIFC_CSR_CEQ_MTT_OFF(id) \
+ (HIFC_CEQ_MTT_OFF_BASE_ADDR + (id) * HIFC_EQ_MTT_OFF_STRIDE)
+
+#define HIFC_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HIFC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HIFC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HIFC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HIFC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HIFC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HIFC_CSR_CEQ_MTT_OFF(q_id) + \
+ (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HIFC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HIFC_CSR_CEQ_MTT_OFF(q_id) + \
+ (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HIFC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \
+ ((u32)((type == HIFC_AEQ) ? \
+ HIFC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \
+ HIFC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num)))
+
+#define HIFC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \
+ ((u32)((type == HIFC_AEQ) ? \
+ HIFC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \
+ HIFC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num)))
+
+#define HIFC_AEQ_CTRL_0_ADDR_BASE 0xE00
+#define HIFC_AEQ_CTRL_1_ADDR_BASE 0xE04
+#define HIFC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08
+#define HIFC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C
+
+#define HIFC_EQ_OFF_STRIDE 0x80
+
+#define HIFC_CSR_AEQ_CTRL_0_ADDR(idx) \
+ (HIFC_AEQ_CTRL_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
+
+#define HIFC_CSR_AEQ_CTRL_1_ADDR(idx) \
+ (HIFC_AEQ_CTRL_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
+
+#define HIFC_CSR_AEQ_CONS_IDX_ADDR(idx) \
+ (HIFC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
+
+#define HIFC_CSR_AEQ_PROD_IDX_ADDR(idx) \
+ (HIFC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
+
+#define HIFC_CEQ_CTRL_0_ADDR_BASE 0x1000
+#define HIFC_CEQ_CTRL_1_ADDR_BASE 0x1004
+#define HIFC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008
+#define HIFC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C
+
+#define HIFC_CSR_CEQ_CTRL_0_ADDR(idx) \
+ (HIFC_CEQ_CTRL_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
+
+#define HIFC_CSR_CEQ_CTRL_1_ADDR(idx) \
+ (HIFC_CEQ_CTRL_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
+
+#define HIFC_CSR_CEQ_CONS_IDX_ADDR(idx) \
+ (HIFC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
+
+#define HIFC_CSR_CEQ_PROD_IDX_ADDR(idx) \
+ (HIFC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
+
+enum hifc_eq_type {
+ HIFC_AEQ,
+ HIFC_CEQ
+};
+
+enum hifc_eq_intr_mode {
+ HIFC_INTR_MODE_ARMED,
+ HIFC_INTR_MODE_ALWAYS,
+};
+
+enum hifc_eq_ci_arm_state {
+ HIFC_EQ_NOT_ARMED,
+ HIFC_EQ_ARMED,
+};
+
+struct hifc_eq_work {
+ struct work_struct work;
+ void *data;
+};
+
+struct hifc_ceq_tasklet_data {
+ void *data;
+};
+
+struct hifc_eq {
+ struct hifc_hwdev *hwdev;
+ u16 q_id;
+ enum hifc_eq_type type;
+ u32 page_size;
+ u32 orig_page_size;
+ u32 eq_len;
+
+ u32 cons_idx;
+ u16 wrapped;
+
+ u16 elem_size;
+ u16 num_pages;
+ u32 num_elem_in_pg;
+
+ struct irq_info eq_irq;
+ char irq_name[EQ_IRQ_NAME_LEN];
+
+ dma_addr_t *dma_addr;
+ u8 **virt_addr;
+ dma_addr_t *dma_addr_for_free;
+ u8 **virt_addr_for_free;
+
+ struct hifc_eq_work aeq_work;
+ struct tasklet_struct ceq_tasklet;
+ struct hifc_ceq_tasklet_data ceq_tasklet_data;
+
+ u64 hard_intr_jif;
+ u64 soft_intr_jif;
+};
+
+struct hifc_aeq_elem {
+ u8 aeqe_data[HIFC_AEQE_DATA_SIZE];
+ u32 desc;
+};
+
+enum hifc_aeq_cb_state {
+ HIFC_AEQ_HW_CB_REG = 0,
+ HIFC_AEQ_HW_CB_RUNNING,
+ HIFC_AEQ_SW_CB_REG,
+ HIFC_AEQ_SW_CB_RUNNING,
+};
+
+struct hifc_aeqs {
+ struct hifc_hwdev *hwdev;
+
+ hifc_aeq_hwe_cb aeq_hwe_cb[HIFC_MAX_AEQ_EVENTS];
+ hifc_aeq_swe_cb aeq_swe_cb[HIFC_MAX_AEQ_SW_EVENTS];
+ unsigned long aeq_hw_cb_state[HIFC_MAX_AEQ_EVENTS];
+ unsigned long aeq_sw_cb_state[HIFC_MAX_AEQ_SW_EVENTS];
+
+ struct hifc_eq aeq[HIFC_MAX_AEQS];
+ u16 num_aeqs;
+
+ struct workqueue_struct *workq;
+};
+
+enum hifc_ceq_cb_state {
+ HIFC_CEQ_CB_REG = 0,
+ HIFC_CEQ_CB_RUNNING,
+};
+
+struct hifc_ceqs {
+ struct hifc_hwdev *hwdev;
+
+ hifc_ceq_event_cb ceq_cb[HIFC_MAX_CEQ_EVENTS];
+ void *ceq_data[HIFC_MAX_CEQ_EVENTS];
+ unsigned long ceq_cb_state[HIFC_MAX_CEQ_EVENTS];
+
+ struct hifc_eq ceq[HIFC_MAX_CEQS];
+ u16 num_ceqs;
+};
+
+int hifc_aeqs_init(struct hifc_hwdev *hwdev, u16 num_aeqs,
+ struct irq_info *msix_entries);
+
+void hifc_aeqs_free(struct hifc_hwdev *hwdev);
+
+int hifc_ceqs_init(struct hifc_hwdev *hwdev, u16 num_ceqs,
+ struct irq_info *msix_entries);
+
+void hifc_ceqs_free(struct hifc_hwdev *hwdev);
+
+void hifc_get_ceq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs);
+
+void hifc_get_aeq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs);
+
+void hifc_dump_aeq_info(struct hifc_hwdev *hwdev);
+
+#endif
diff --git a/drivers/scsi/huawei/hifc/hifc_hw.h b/drivers/scsi/huawei/hifc/hifc_hw.h
new file mode 100644
index 000000000000..49b2edd2bac6
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_hw.h
@@ -0,0 +1,611 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef HIFC_HW_H_
+#define HIFC_HW_H_
+
+#ifndef __BIG_ENDIAN__
+#define __BIG_ENDIAN__ 0x4321
+#endif
+
+#ifndef __LITTLE_ENDIAN__
+#define __LITTLE_ENDIAN__ 0x1234
+#endif
+
+enum hifc_mod_type {
+ HIFC_MOD_COMM = 0, /* HW communication module */
+ HIFC_MOD_L2NIC = 1, /* L2NIC module*/
+ HIFC_MOD_FCOE = 6,
+ HIFC_MOD_CFGM = 7, /* Configuration module */
+ HIFC_MOD_FC = 10,
+ HIFC_MOD_HILINK = 14,
+ HIFC_MOD_HW_MAX = 16, /* hardware max module id */
+
+ /* Software module id, for PF/VF and multi-host */
+ HIFC_MOD_MAX,
+};
+
+struct hifc_cmd_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+enum hifc_ack_type {
+ HIFC_ACK_TYPE_CMDQ,
+ HIFC_ACK_TYPE_SHARE_CQN,
+ HIFC_ACK_TYPE_APP_CQN,
+ HIFC_MOD_ACK_MAX = 15,
+};
+
+int hifc_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+/* PF/VF send msg to uP by api cmd, and return immediately */
+int hifc_msg_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size);
+
+int hifc_api_cmd_write_nack(void *hwdev, u8 dest,
+ void *cmd, u16 size);
+
+int hifc_api_cmd_read_ack(void *hwdev, u8 dest,
+ void *cmd, u16 size, void *ack, u16 ack_size);
+/* PF/VF send cmd to ucode by cmdq, and return if success.
+ * timeout=0, use default timeout.
+ */
+int hifc_cmdq_direct_resp(void *hwdev, enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout);
+/* 1. whether need the timeout parameter
+ * 2. out_param indicates the status of the microcode processing command
+ */
+
+/* PF/VF send cmd to ucode by cmdq, and return detailed result.
+ * timeout=0, use default timeout.
+ */
+int hifc_cmdq_detail_resp(void *hwdev, enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in,
+ struct hifc_cmd_buf *buf_out, u32 timeout);
+
+/* PF/VF send cmd to ucode by cmdq, and return immediately
+ */
+int hifc_cmdq_async(void *hwdev, enum hifc_ack_type ack_type,
+ enum hifc_mod_type mod, u8 cmd,
+ struct hifc_cmd_buf *buf_in);
+
+int hifc_ppf_tmr_start(void *hwdev);
+int hifc_ppf_tmr_stop(void *hwdev);
+
+enum hifc_ceq_event {
+ HIFC_CMDQ = 3,
+ HIFC_MAX_CEQ_EVENTS = 6,
+};
+
+typedef void (*hifc_ceq_event_cb)(void *handle, u32 ceqe_data);
+int hifc_ceq_register_cb(void *hwdev, enum hifc_ceq_event event,
+ hifc_ceq_event_cb callback);
+void hifc_ceq_unregister_cb(void *hwdev, enum hifc_ceq_event event);
+
+enum hifc_aeq_type {
+ HIFC_HW_INTER_INT = 0,
+ HIFC_MBX_FROM_FUNC = 1,
+ HIFC_MSG_FROM_MGMT_CPU = 2,
+ HIFC_API_RSP = 3,
+ HIFC_API_CHAIN_STS = 4,
+ HIFC_MBX_SEND_RSLT = 5,
+ HIFC_MAX_AEQ_EVENTS
+};
+
+enum hifc_aeq_sw_type {
+ HIFC_STATELESS_EVENT = 0,
+ HIFC_STATEFULL_EVENT = 1,
+ HIFC_MAX_AEQ_SW_EVENTS
+};
+
+typedef void (*hifc_aeq_hwe_cb)(void *handle, u8 *data, u8 size);
+int hifc_aeq_register_hw_cb(void *hwdev, enum hifc_aeq_type event,
+ hifc_aeq_hwe_cb hwe_cb);
+void hifc_aeq_unregister_hw_cb(void *hwdev, enum hifc_aeq_type event);
+
+typedef u8 (*hifc_aeq_swe_cb)(void *handle, u8 event, u64 data);
+int hifc_aeq_register_swe_cb(void *hwdev, enum hifc_aeq_sw_type event,
+ hifc_aeq_swe_cb aeq_swe_cb);
+void hifc_aeq_unregister_swe_cb(void *hwdev, enum hifc_aeq_sw_type event);
+
+typedef void (*hifc_mgmt_msg_cb)(void *hwdev, void *pri_handle,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size);
+
+int hifc_register_mgmt_msg_cb(void *hwdev,
+ enum hifc_mod_type mod, void *pri_handle,
+ hifc_mgmt_msg_cb callback);
+void hifc_unregister_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod);
+
+struct hifc_cmd_buf *hifc_alloc_cmd_buf(void *hwdev);
+void hifc_free_cmd_buf(void *hwdev, struct hifc_cmd_buf *buf);
+
+int hifc_alloc_db_addr(void *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base);
+void hifc_free_db_addr(void *hwdev, void __iomem *db_base,
+ void __iomem *dwqe_base);
+
+struct nic_interrupt_info {
+ u32 lli_set;
+ u32 interrupt_coalesc_set;
+ u16 msix_index;
+ u8 lli_credit_limit;
+ u8 lli_timer_cfg;
+ u8 pending_limt;
+ u8 coalesc_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+int hifc_get_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info *interrupt_info);
+
+int hifc_set_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info interrupt_info);
+
+/* The driver code implementation interface*/
+void hifc_misx_intr_clear_resend_bit(void *hwdev,
+ u16 msix_idx, u8 clear_resend_en);
+
+struct hifc_sq_attr {
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u64 ci_dma_base;
+};
+
+int hifc_set_ci_table(void *hwdev, u16 q_id, struct hifc_sq_attr *attr);
+
+int hifc_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz);
+int hifc_clean_root_ctxt(void *hwdev);
+void hifc_record_pcie_error(void *hwdev);
+
+int hifc_func_rx_tx_flush(void *hwdev);
+
+int hifc_func_tmr_bitmap_set(void *hwdev, bool enable);
+
+struct hifc_init_para {
+ /* Record hifc_pcidev or NDIS_Adapter pointer address*/
+ void *adapter_hdl;
+ /* Record pcidev or Handler pointer address
+ * for example: ioremap interface input parameter
+ */
+ void *pcidev_hdl;
+ /* Record pcidev->dev or Handler pointer address which used to
+ * dma address application or dev_err print the parameter
+ */
+ void *dev_hdl;
+
+ void *cfg_reg_base; /* Configure virtual address, bar0/1*/
+ /* interrupt configuration register address, bar2/3 */
+ void *intr_reg_base;
+ u64 db_base_phy;
+ void *db_base; /* the doorbell address, bar4/5 higher 4M space*/
+ void *dwqe_mapping;/* direct wqe 4M, follow the doorbell address space*/
+ void **hwdev;
+ void *chip_node;
+ /* In bmgw x86 host, driver can't send message to mgmt cpu directly,
+ * need to trasmit message ppf mbox to bmgw arm host.
+ */
+ void *ppf_hwdev;
+};
+
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+#define MAX_FUNCTION_NUM 512
+#define HIFC_MAX_PF_NUM 16
+#define HIFC_MAX_COS 8
+#define INIT_FAILED 0
+#define INIT_SUCCESS 1
+#define MAX_DRV_BUF_SIZE 4096
+
+struct hifc_cmd_get_light_module_abs {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 abs_status; /* 0:present, 1:absent */
+ u8 rsv[2];
+};
+
+#define SFP_INFO_MAX_SIZE 512
+struct hifc_cmd_get_sfp_qsfp_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 wire_type;
+ u16 out_len;
+ u8 sfp_qsfp_info[SFP_INFO_MAX_SIZE];
+};
+
+#define HIFC_MAX_PORT_ID 4
+
+struct hifc_port_routine_cmd {
+ bool up_send_sfp_info;
+ bool up_send_sfp_abs;
+
+ struct hifc_cmd_get_sfp_qsfp_info sfp_info;
+ struct hifc_cmd_get_light_module_abs abs;
+};
+
+struct card_node {
+ struct list_head node;
+ struct list_head func_list;
+ char chip_name[IFNAMSIZ];
+ void *log_info;
+ void *dbgtool_info;
+ void *func_handle_array[MAX_FUNCTION_NUM];
+ unsigned char dp_bus_num;
+ u8 func_num;
+ struct attribute dbgtool_attr_file;
+
+ bool cos_up_setted;
+ u8 cos_up[HIFC_MAX_COS];
+ bool ppf_state;
+ u8 pf_bus_num[HIFC_MAX_PF_NUM];
+
+ struct hifc_port_routine_cmd rt_cmd[HIFC_MAX_PORT_ID];
+
+ /* mutex used for copy sfp info */
+ struct mutex sfp_mutex;
+};
+
+enum hifc_hwdev_init_state {
+ HIFC_HWDEV_NONE_INITED = 0,
+ HIFC_HWDEV_CLP_INITED,
+ HIFC_HWDEV_AEQ_INITED,
+ HIFC_HWDEV_MGMT_INITED,
+ HIFC_HWDEV_MBOX_INITED,
+ HIFC_HWDEV_CMDQ_INITED,
+ HIFC_HWDEV_COMM_CH_INITED,
+ HIFC_HWDEV_ALL_INITED,
+ HIFC_HWDEV_MAX_INVAL_INITED
+};
+
+enum hifc_func_cap {
+ /* send message to mgmt cpu directly */
+ HIFC_FUNC_MGMT = 1 << 0,
+ /* setting port attribute, pause/speed etc. */
+ HIFC_FUNC_PORT = 1 << 1,
+ /* Enable SR-IOV in default */
+ HIFC_FUNC_SRIOV_EN_DFLT = 1 << 2,
+ /* Can't change VF num */
+ HIFC_FUNC_SRIOV_NUM_FIX = 1 << 3,
+ /* Fcorce pf/vf link up */
+ HIFC_FUNC_FORCE_LINK_UP = 1 << 4,
+ /* Support rate limit */
+ HIFC_FUNC_SUPP_RATE_LIMIT = 1 << 5,
+ HIFC_FUNC_SUPP_DFX_REG = 1 << 6,
+ /* Support promisc/multicast/all-multi */
+ HIFC_FUNC_SUPP_RX_MODE = 1 << 7,
+ /* Set vf mac and vlan by ip link */
+ HIFC_FUNC_SUPP_SET_VF_MAC_VLAN = 1 << 8,
+ /* Support set mac by ifconfig */
+ HIFC_FUNC_SUPP_CHANGE_MAC = 1 << 9,
+ /* OVS don't support SCTP_CRC/HW_VLAN/LRO */
+ HIFC_FUNC_OFFLOAD_OVS_UNSUPP = 1 << 10,
+};
+
+#define FUNC_SUPPORT_MGMT(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & HIFC_FUNC_MGMT))
+#define FUNC_SUPPORT_PORT_SETTING(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & HIFC_FUNC_PORT))
+#define FUNC_SUPPORT_DCB(hwdev) \
+ (FUNC_SUPPORT_PORT_SETTING(hwdev))
+#define FUNC_ENABLE_SRIOV_IN_DEFAULT(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_SRIOV_EN_DFLT))
+#define FUNC_SRIOV_FIX_NUM_VF(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_SRIOV_NUM_FIX))
+#define FUNC_SUPPORT_RX_MODE(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_SUPP_RX_MODE))
+#define FUNC_SUPPORT_RATE_LIMIT(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_SUPP_RATE_LIMIT))
+#define FUNC_SUPPORT_SET_VF_MAC_VLAN(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_SUPP_SET_VF_MAC_VLAN))
+#define FUNC_SUPPORT_CHANGE_MAC(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_SUPP_CHANGE_MAC))
+#define FUNC_FORCE_LINK_UP(hwdev) \
+ (!!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_FORCE_LINK_UP))
+#define FUNC_SUPPORT_SCTP_CRC(hwdev) \
+ (!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_OFFLOAD_OVS_UNSUPP))
+#define FUNC_SUPPORT_HW_VLAN(hwdev) \
+ (!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_OFFLOAD_OVS_UNSUPP))
+#define FUNC_SUPPORT_LRO(hwdev) \
+ (!(hifc_get_func_feature_cap(hwdev) & \
+ HIFC_FUNC_OFFLOAD_OVS_UNSUPP))
+
+int hifc_init_hwdev(struct hifc_init_para *para);
+void hifc_free_hwdev(void *hwdev);
+int hifc_stateful_init(void *hwdev);
+void hifc_stateful_deinit(void *hwdev);
+bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state);
+u64 hifc_get_func_feature_cap(void *hwdev);
+int hifc_slq_init(void *dev, int num_wqs);
+void hifc_slq_uninit(void *dev);
+int hifc_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth,
+ u16 page_size, u64 *cla_addr, void **handle);
+void hifc_slq_free(void *dev, void *handle);
+u64 hifc_slq_get_addr(void *handle, u16 index);
+u64 hifc_slq_get_first_pageaddr(void *handle);
+
+typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in,
+ u16 in_size, void *buf_out,
+ u16 *out_size);
+void hifc_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd,
+ comm_up_self_msg_proc proc);
+void hifc_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd);
+
+/* defined by chip */
+enum hifc_fault_type {
+ FAULT_TYPE_CHIP,
+ FAULT_TYPE_UCODE,
+ FAULT_TYPE_MEM_RD_TIMEOUT,
+ FAULT_TYPE_MEM_WR_TIMEOUT,
+ FAULT_TYPE_REG_RD_TIMEOUT,
+ FAULT_TYPE_REG_WR_TIMEOUT,
+ FAULT_TYPE_PHY_FAULT,
+ FAULT_TYPE_MAX,
+};
+
+/* defined by chip */
+enum hifc_fault_err_level {
+ /* default err_level=FAULT_LEVEL_FATAL if
+ * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT ||
+ * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT ||
+ * FAULT_TYPE_UCODE
+ * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP
+ */
+ FAULT_LEVEL_FATAL,
+ FAULT_LEVEL_SERIOUS_RESET,
+ FAULT_LEVEL_SERIOUS_FLR,
+ FAULT_LEVEL_GENERAL,
+ FAULT_LEVEL_SUGGESTION,
+ FAULT_LEVEL_MAX
+};
+
+enum hifc_fault_source_type {
+ /* same as FAULT_TYPE_CHIP */
+ HIFC_FAULT_SRC_HW_MGMT_CHIP = 0,
+ /* same as FAULT_TYPE_UCODE */
+ HIFC_FAULT_SRC_HW_MGMT_UCODE,
+ /* same as FAULT_TYPE_MEM_RD_TIMEOUT */
+ HIFC_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT,
+ /* same as FAULT_TYPE_MEM_WR_TIMEOUT */
+ HIFC_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT,
+ /* same as FAULT_TYPE_REG_RD_TIMEOUT */
+ HIFC_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT,
+ /* same as FAULT_TYPE_REG_WR_TIMEOUT */
+ HIFC_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT,
+ HIFC_FAULT_SRC_SW_MGMT_UCODE,
+ HIFC_FAULT_SRC_MGMT_WATCHDOG,
+ HIFC_FAULT_SRC_MGMT_RESET = 8,
+ HIFC_FAULT_SRC_HW_PHY_FAULT,
+ HIFC_FAULT_SRC_HOST_HEARTBEAT_LOST = 20,
+ HIFC_FAULT_SRC_TYPE_MAX,
+};
+
+struct hifc_fault_sw_mgmt {
+ u8 event_id;
+ u64 event_data;
+};
+
+union hifc_fault_hw_mgmt {
+ u32 val[4];
+ /* valid only type==FAULT_TYPE_CHIP */
+ struct {
+ u8 node_id;
+ /* enum hifc_fault_err_level */
+ u8 err_level;
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+ /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR
+ */
+ u16 func_id;
+ u16 rsvd2;
+ } chip;
+
+ /* valid only type==FAULT_TYPE_UCODE */
+ struct {
+ u8 cause_id;
+ u8 core_id;
+ u8 c_id;
+ u8 rsvd3;
+ u32 epc;
+ u32 rsvd4;
+ u32 rsvd5;
+ } ucode;
+
+ /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT ||
+ * FAULT_TYPE_MEM_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr_ctrl;
+ u32 err_csr_data;
+ u32 ctrl_tab;
+ u32 mem_index;
+ } mem_timeout;
+
+ /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT ||
+ * FAULT_TYPE_REG_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr;
+ u32 rsvd6;
+ u32 rsvd7;
+ u32 rsvd8;
+ } reg_timeout;
+
+ struct {
+ /* 0: read; 1: write */
+ u8 op_type;
+ u8 port_id;
+ u8 dev_ad;
+ u8 rsvd9;
+ u32 csr_addr;
+ u32 op_data;
+ u32 rsvd10;
+ } phy_fault;
+};
+
+/* defined by chip */
+struct hifc_fault_event {
+ /* enum hifc_fault_type */
+ u8 type;
+ u8 rsvd0[3];
+ union hifc_fault_hw_mgmt event;
+};
+
+struct hifc_fault_recover_info {
+ u8 fault_src; /* enum hifc_fault_source_type */
+ u8 fault_lev; /* enum hifc_fault_err_level */
+ u8 rsvd0[2];
+ union {
+ union hifc_fault_hw_mgmt hw_mgmt;
+ struct hifc_fault_sw_mgmt sw_mgmt;
+ u32 mgmt_rsvd[4];
+ u32 host_rsvd[4];
+ } fault_data;
+};
+
+struct hifc_dcb_state {
+ u8 dcb_on;
+ u8 default_cos;
+ u8 up_cos[8];
+};
+
+enum link_err_type {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+enum port_module_event_type {
+ HIFC_PORT_MODULE_CABLE_PLUGGED,
+ HIFC_PORT_MODULE_CABLE_UNPLUGGED,
+ HIFC_PORT_MODULE_LINK_ERR,
+ HIFC_PORT_MODULE_MAX_EVENT,
+};
+
+struct hifc_port_module_event {
+ enum port_module_event_type type;
+ enum link_err_type err_type;
+};
+
+struct hifc_event_link_info {
+ u8 valid;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+};
+
+struct hifc_mctp_host_info {
+ u8 major_cmd;
+ u8 sub_cmd;
+ u8 rsvd[2];
+
+ u32 data_len;
+ void *data;
+};
+
+enum hifc_event_type {
+ HIFC_EVENT_LINK_DOWN = 0,
+ HIFC_EVENT_LINK_UP = 1,
+ HIFC_EVENT_HEART_LOST = 2,
+ HIFC_EVENT_FAULT = 3,
+ HIFC_EVENT_NOTIFY_VF_DCB_STATE = 4,
+ HIFC_EVENT_DCB_STATE_CHANGE = 5,
+ HIFC_EVENT_FMW_ACT_NTC = 6,
+ HIFC_EVENT_PORT_MODULE_EVENT = 7,
+ HIFC_EVENT_MCTP_GET_HOST_INFO,
+ HIFC_EVENT_MULTI_HOST_MGMT,
+ HIFC_EVENT_INIT_MIGRATE_PF,
+};
+
+struct hifc_event_info {
+ enum hifc_event_type type;
+ union {
+ struct hifc_event_link_info link_info;
+ struct hifc_fault_event info;
+ struct hifc_dcb_state dcb_state;
+ struct hifc_port_module_event module_event;
+ u8 vf_default_cos;
+ struct hifc_mctp_host_info mctp_info;
+ };
+};
+
+enum hifc_ucode_event_type {
+ HIFC_INTERNAL_TSO_FATAL_ERROR = 0x0,
+ HIFC_INTERNAL_LRO_FATAL_ERROR = 0x1,
+ HIFC_INTERNAL_TX_FATAL_ERROR = 0x2,
+ HIFC_INTERNAL_RX_FATAL_ERROR = 0x3,
+ HIFC_INTERNAL_OTHER_FATAL_ERROR = 0x4,
+ HIFC_NIC_FATAL_ERROR_MAX = 0x8,
+};
+
+typedef void (*hifc_event_handler)(void *handle,
+ struct hifc_event_info *event);
+/* only register once */
+void hifc_event_register(void *dev, void *pri_handle,
+ hifc_event_handler callback);
+void hifc_event_unregister(void *dev);
+
+void hifc_detect_hw_present(void *hwdev);
+
+void hifc_set_chip_absent(void *hwdev);
+
+int hifc_get_chip_present_flag(void *hwdev);
+
+void hifc_set_pcie_order_cfg(void *handle);
+
+int hifc_get_mgmt_channel_status(void *handle);
+
+struct hifc_board_info {
+ u32 board_type;
+ u32 port_num;
+ u32 port_speed;
+ u32 pcie_width;
+ u32 host_num;
+ u32 pf_num;
+ u32 vf_total_num;
+ u32 tile_num;
+ u32 qcm_num;
+ u32 core_num;
+ u32 work_mode;
+ u32 service_mode;
+ u32 pcie_mode;
+ u32 cfg_addr;
+ u32 boot_sel;
+ u32 board_id;
+};
+
+int hifc_get_board_info(void *hwdev, struct hifc_board_info *info);
+
+int hifc_get_card_present_state(void *hwdev, bool *card_present_state);
+
+#endif
diff --git a/drivers/scsi/huawei/hifc/hifc_hwdev.c b/drivers/scsi/huawei/hifc/hifc_hwdev.c
new file mode 100644
index 000000000000..760e02394b05
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_hwdev.c
@@ -0,0 +1,3675 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_hwif.h"
+#include "hifc_api_cmd.h"
+#include "hifc_mgmt.h"
+#include "hifc_eqs.h"
+#include "hifc_wq.h"
+#include "hifc_cmdq.h"
+#include "hifc_hwif.h"
+
+#define HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT 0
+#define HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
+#define HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7
+#define HIFC_FLR_TIMEOUT 1000
+#define HIFC_HT_GPA_PAGE_SIZE 4096UL
+#define HIFC_PPF_HT_GPA_SET_RETRY_TIMES 10
+#define HIFC_GET_SFP_INFO_REAL_TIME 0x1
+#define HIFC_GLB_SO_RO_CFG_SHIFT 0x0
+#define HIFC_GLB_SO_RO_CFG_MASK 0x1
+#define HIFC_DISABLE_ORDER 0
+#define HIFC_GLB_DMA_SO_RO_GET(val, member) \
+ (((val) >> HIFC_GLB_##member##_SHIFT) & HIFC_GLB_##member##_MASK)
+
+#define HIFC_GLB_DMA_SO_R0_CLEAR(val, member) \
+ ((val) & (~(HIFC_GLB_##member##_MASK << HIFC_GLB_##member##_SHIFT)))
+
+#define HIFC_GLB_DMA_SO_R0_SET(val, member) \
+ (((val) & HIFC_GLB_##member##_MASK) << HIFC_GLB_##member##_SHIFT)
+
+#define HIFC_MGMT_CHANNEL_STATUS_SHIFT 0x0
+#define HIFC_MGMT_CHANNEL_STATUS_MASK 0x1
+#define HIFC_ACTIVE_STATUS_MASK 0x80000000
+#define HIFC_ACTIVE_STATUS_CLEAR 0x7FFFFFFF
+
+#define HIFC_GET_MGMT_CHANNEL_STATUS(val, member) \
+ (((val) >> HIFC_##member##_SHIFT) & HIFC_##member##_MASK)
+
+#define HIFC_CLEAR_MGMT_CHANNEL_STATUS(val, member) \
+ ((val) & (~(HIFC_##member##_MASK << HIFC_##member##_SHIFT)))
+
+#define HIFC_SET_MGMT_CHANNEL_STATUS(val, member) \
+ (((val) & HIFC_##member##_MASK) << HIFC_##member##_SHIFT)
+
+#define HIFC_BOARD_IS_PHY(hwdev) \
+ ((hwdev)->board_info.board_type == 4 && \
+ (hwdev)->board_info.board_id == 24)
+
+struct comm_info_ht_gpa_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+ u32 rsvd1;
+ u32 rsvd2;
+ u64 page_pa0;
+ u64 page_pa1;
+};
+
+struct hifc_cons_idx_attr {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u32 sq_id;
+ u64 ci_addr;
+};
+
+struct hifc_clear_doorbell {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 rsvd1;
+};
+
+struct hifc_clear_resource {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 rsvd1;
+};
+
+struct hifc_msix_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesct_timer_cnt;
+ u8 lli_tmier_cnt;
+ u8 lli_credit_cnt;
+ u8 resend_timer_cnt;
+ u8 rsvd1[3];
+};
+
+enum func_tmr_bitmap_status {
+ FUNC_TMR_BITMAP_DISABLE,
+ FUNC_TMR_BITMAP_ENABLE,
+};
+
+struct hifc_func_tmr_bitmap_op {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 op_id; /* 0:start; 1:stop */
+ u8 ppf_idx;
+ u32 rsvd1;
+};
+
+struct hifc_ppf_tmr_op {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 ppf_idx;
+ u8 op_id; /* 0: stop timer; 1:start timer */
+ u8 rsvd1[2];
+ u32 rsvd2;
+};
+
+struct hifc_cmd_set_res_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 state;
+ u8 rsvd1;
+ u32 rsvd2;
+};
+
+int hifc_hw_rx_buf_size[] = {
+ HIFC_RX_BUF_SIZE_32B,
+ HIFC_RX_BUF_SIZE_64B,
+ HIFC_RX_BUF_SIZE_96B,
+ HIFC_RX_BUF_SIZE_128B,
+ HIFC_RX_BUF_SIZE_192B,
+ HIFC_RX_BUF_SIZE_256B,
+ HIFC_RX_BUF_SIZE_384B,
+ HIFC_RX_BUF_SIZE_512B,
+ HIFC_RX_BUF_SIZE_768B,
+ HIFC_RX_BUF_SIZE_1K,
+ HIFC_RX_BUF_SIZE_1_5K,
+ HIFC_RX_BUF_SIZE_2K,
+ HIFC_RX_BUF_SIZE_3K,
+ HIFC_RX_BUF_SIZE_4K,
+ HIFC_RX_BUF_SIZE_8K,
+ HIFC_RX_BUF_SIZE_16K,
+};
+
+struct hifc_comm_board_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hifc_board_info info;
+
+ u32 rsvd1[4];
+};
+
+#define PHY_DOING_INIT_TIMEOUT (15 * 1000)
+
+struct hifc_phy_init_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 init_status;
+ u8 rsvd1[3];
+};
+
+enum phy_init_status_type {
+ PHY_INIT_DOING = 0,
+ PHY_INIT_SUCCESS = 1,
+ PHY_INIT_FAIL = 2,
+ PHY_NONSUPPORT = 3,
+};
+
+struct hifc_update_active {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 update_flag;
+ u32 update_status;
+};
+
+struct hifc_mgmt_watchdog_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 curr_time_h;
+ u32 curr_time_l;
+ u32 task_id;
+ u32 rsv;
+
+ u32 reg[13];
+ u32 pc;
+ u32 lr;
+ u32 cpsr;
+
+ u32 stack_top;
+ u32 stack_bottom;
+ u32 sp;
+ u32 curr_used;
+ u32 peak_used;
+ u32 is_overflow;
+
+ u32 stack_actlen;
+ u8 data[1024];
+};
+
+struct hifc_fmw_act_ntc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 rsvd1[5];
+};
+
+#define HIFC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
+
+struct hifc_wq_page_size {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */
+ u8 page_size;
+
+ u32 rsvd1;
+};
+
+#define MAX_PCIE_DFX_BUF_SIZE (1024)
+
+struct hifc_pcie_dfx_ntc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ int len;
+ u32 rsvd;
+};
+
+struct hifc_pcie_dfx_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 host_id;
+ u8 last;
+ u8 rsvd[2];
+ u32 offset;
+
+ u8 data[MAX_PCIE_DFX_BUF_SIZE];
+};
+
+struct hifc_reg_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 reg_addr;
+ u32 val_length;
+
+ u32 data[2];
+};
+
+#define HIFC_DMA_ATTR_ENTRY_ST_SHIFT 0
+#define HIFC_DMA_ATTR_ENTRY_AT_SHIFT 8
+#define HIFC_DMA_ATTR_ENTRY_PH_SHIFT 10
+#define HIFC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12
+#define HIFC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13
+
+#define HIFC_DMA_ATTR_ENTRY_ST_MASK 0xFF
+#define HIFC_DMA_ATTR_ENTRY_AT_MASK 0x3
+#define HIFC_DMA_ATTR_ENTRY_PH_MASK 0x3
+#define HIFC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1
+#define HIFC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1
+
+#define HIFC_DMA_ATTR_ENTRY_SET(val, member) \
+ (((u32)(val) & HIFC_DMA_ATTR_ENTRY_##member##_MASK) << \
+ HIFC_DMA_ATTR_ENTRY_##member##_SHIFT)
+
+#define HIFC_DMA_ATTR_ENTRY_CLEAR(val, member) \
+ ((val) & (~(HIFC_DMA_ATTR_ENTRY_##member##_MASK \
+ << HIFC_DMA_ATTR_ENTRY_##member##_SHIFT)))
+
+#define HIFC_PCIE_ST_DISABLE 0
+#define HIFC_PCIE_AT_DISABLE 0
+#define HIFC_PCIE_PH_DISABLE 0
+
+#define PCIE_MSIX_ATTR_ENTRY 0
+
+#define HIFC_CHIP_PRESENT 1
+#define HIFC_CHIP_ABSENT 0
+
+struct hifc_cmd_fault_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hifc_fault_event event;
+};
+
+#define HEARTBEAT_DRV_MAGIC_ACK 0x5A5A5A5A
+
+struct hifc_heartbeat_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 mgmt_init_state;
+ u8 rsvd1[3];
+ u32 heart; /* increased every event */
+ u32 drv_heart;
+};
+
+static void hifc_set_mgmt_channel_status(void *handle, bool state)
+{
+ struct hifc_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev || hifc_func_type(hwdev) == TYPE_VF ||
+ !(hwdev->feature_cap & HIFC_FUNC_SUPP_DFX_REG))
+ return;
+
+ val = hifc_hwif_read_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR);
+ val = HIFC_CLEAR_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
+ val |= HIFC_SET_MGMT_CHANNEL_STATUS((u32)state, MGMT_CHANNEL_STATUS);
+
+ hifc_hwif_write_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR, val);
+}
+
+static void hifc_enable_mgmt_channel(void *hwdev, void *buf_out)
+{
+ struct hifc_hwdev *dev = hwdev;
+ struct hifc_update_active *active_info = buf_out;
+
+ if (!active_info || hifc_func_type(hwdev) == TYPE_VF ||
+ !(dev->feature_cap & HIFC_FUNC_SUPP_DFX_REG))
+ return;
+
+ if ((!active_info->status) &&
+ (active_info->update_status & HIFC_ACTIVE_STATUS_MASK)) {
+ active_info->update_status &= HIFC_ACTIVE_STATUS_CLEAR;
+ return;
+ }
+
+ hifc_set_mgmt_channel_status(hwdev, false);
+}
+
+int hifc_set_wq_page_size(struct hifc_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+
+#define HIFC_QUEUE_MIN_DEPTH 6
+#define HIFC_QUEUE_MAX_DEPTH 12
+#define HIFC_MAX_RX_BUFFER_SIZE 15
+
+#define ROOT_CTX_QPS_VALID(root_ctxt) \
+ ((root_ctxt)->rq_depth >= HIFC_QUEUE_MIN_DEPTH && \
+ (root_ctxt)->rq_depth <= HIFC_QUEUE_MAX_DEPTH && \
+ (root_ctxt)->sq_depth >= HIFC_QUEUE_MIN_DEPTH && \
+ (root_ctxt)->sq_depth <= HIFC_QUEUE_MAX_DEPTH && \
+ (root_ctxt)->rx_buf_sz <= HIFC_MAX_RX_BUFFER_SIZE)
+
+struct hifc_mgmt_status_log {
+ u8 status;
+ const char *log;
+};
+
+struct hifc_mgmt_status_log mgmt_status_log[] = {
+ {HIFC_MGMT_STATUS_ERR_PARAM, "Invalid parameter"},
+ {HIFC_MGMT_STATUS_ERR_FAILED, "Operation failed"},
+ {HIFC_MGMT_STATUS_ERR_PORT, "Invalid port"},
+ {HIFC_MGMT_STATUS_ERR_TIMEOUT, "Operation time out"},
+ {HIFC_MGMT_STATUS_ERR_NOMATCH, "Version not match"},
+ {HIFC_MGMT_STATUS_ERR_EXIST, "Entry exists"},
+ {HIFC_MGMT_STATUS_ERR_NOMEM, "Out of memory"},
+ {HIFC_MGMT_STATUS_ERR_INIT, "Feature not initialized"},
+ {HIFC_MGMT_STATUS_ERR_FAULT, "Invalid address"},
+ {HIFC_MGMT_STATUS_ERR_PERM, "Operation not permitted"},
+ {HIFC_MGMT_STATUS_ERR_EMPTY, "Table empty"},
+ {HIFC_MGMT_STATUS_ERR_FULL, "Table full"},
+ {HIFC_MGMT_STATUS_ERR_NOT_FOUND, "Not found"},
+ {HIFC_MGMT_STATUS_ERR_BUSY, "Device or resource busy "},
+ {HIFC_MGMT_STATUS_ERR_RESOURCE, "No resources for operation "},
+ {HIFC_MGMT_STATUS_ERR_CONFIG, "Invalid configuration"},
+ {HIFC_MGMT_STATUS_ERR_UNAVAIL, "Feature unavailable"},
+ {HIFC_MGMT_STATUS_ERR_CRC, "CRC check failed"},
+ {HIFC_MGMT_STATUS_ERR_NXIO, "No such device or address"},
+ {HIFC_MGMT_STATUS_ERR_ROLLBACK, "Chip rollback fail"},
+ {HIFC_MGMT_STATUS_ERR_LEN, "Length too short or too long"},
+ {HIFC_MGMT_STATUS_ERR_UNSUPPORT, "Feature not supported"},
+};
+
+static void __print_status_info(struct hifc_hwdev *dev,
+ enum hifc_mod_type mod, u8 cmd, int index)
+{
+ if (mod == HIFC_MOD_COMM) {
+ sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s",
+ mod, cmd, mgmt_status_log[index].log);
+ } else if (mod == HIFC_MOD_L2NIC ||
+ mod == HIFC_MOD_HILINK) {
+ sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s",
+ mod, cmd, mgmt_status_log[index].log);
+ }
+}
+
+static bool hifc_status_need_special_handle(enum hifc_mod_type mod,
+ u8 cmd, u8 status)
+{
+ if (mod == HIFC_MOD_L2NIC) {
+ /* optical module isn't plugged in */
+ if (((cmd == HIFC_PORT_CMD_GET_STD_SFP_INFO) ||
+ (cmd == HIFC_PORT_CMD_GET_SFP_INFO)) &&
+ (status == HIFC_MGMT_STATUS_ERR_NXIO))
+ return true;
+
+ if ((cmd == HIFC_PORT_CMD_SET_MAC ||
+ cmd == HIFC_PORT_CMD_UPDATE_MAC) &&
+ status == HIFC_MGMT_STATUS_ERR_EXIST)
+ return true;
+ }
+
+ return false;
+}
+
+static bool print_status_info_valid(enum hifc_mod_type mod,
+ const void *buf_out)
+{
+ if (!buf_out)
+ return false;
+
+ if (mod != HIFC_MOD_COMM && mod != HIFC_MOD_L2NIC &&
+ mod != HIFC_MOD_HILINK)
+ return false;
+
+ return true;
+}
+
+static void hifc_print_status_info(void *hwdev, enum hifc_mod_type mod,
+ u8 cmd, const void *buf_out)
+{
+ struct hifc_hwdev *dev = hwdev;
+ int i, size;
+ u8 status;
+
+ if (!print_status_info_valid(mod, buf_out))
+ return;
+
+ status = *(u8 *)buf_out;
+
+ if (!status)
+ return;
+
+ if (hifc_status_need_special_handle(mod, cmd, status))
+ return;
+
+ size = sizeof(mgmt_status_log) / sizeof(mgmt_status_log[0]);
+ for (i = 0; i < size; i++) {
+ if (status == mgmt_status_log[i].status) {
+ __print_status_info(dev, mod, cmd, i);
+ return;
+ }
+ }
+
+ if (mod == HIFC_MOD_COMM) {
+ sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n",
+ mod, cmd, status);
+ } else if (mod == HIFC_MOD_L2NIC || mod == HIFC_MOD_HILINK) {
+ sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n",
+ mod, cmd, status);
+ }
+}
+
+void hifc_set_chip_present(void *hwdev)
+{
+ ((struct hifc_hwdev *)hwdev)->chip_present_flag = HIFC_CHIP_PRESENT;
+}
+
+void hifc_set_chip_absent(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ sdk_err(dev->dev_hdl, "Card not present\n");
+ dev->chip_present_flag = HIFC_CHIP_ABSENT;
+}
+
+int hifc_get_chip_present_flag(void *hwdev)
+{
+ int flag;
+
+ if (!hwdev)
+ return -EINVAL;
+ flag = ((struct hifc_hwdev *)hwdev)->chip_present_flag;
+ return flag;
+}
+
+void hifc_force_complete_all(void *hwdev)
+{
+ struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev;
+ struct hifc_recv_msg *recv_resp_msg;
+
+ set_bit(HIFC_HWDEV_STATE_BUSY, &dev->func_state);
+
+ if (hifc_func_type(dev) != TYPE_VF &&
+ hifc_is_hwdev_mod_inited(dev, HIFC_HWDEV_MGMT_INITED)) {
+ recv_resp_msg = &dev->pf_to_mgmt->recv_resp_msg_from_mgmt;
+ if (dev->pf_to_mgmt->event_flag == SEND_EVENT_START) {
+ complete(&recv_resp_msg->recv_done);
+ dev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT;
+ }
+ }
+
+ /* only flush sync cmdq to avoid blocking remove */
+ if (hifc_is_hwdev_mod_inited(dev, HIFC_HWDEV_CMDQ_INITED))
+ hifc_cmdq_flush_cmd(hwdev,
+ &dev->cmdqs->cmdq[HIFC_CMDQ_SYNC]);
+
+ clear_bit(HIFC_HWDEV_STATE_BUSY, &dev->func_state);
+}
+
+void hifc_detect_hw_present(void *hwdev)
+{
+ u32 addr, attr1;
+
+ addr = HIFC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hifc_hwif_read_reg(((struct hifc_hwdev *)hwdev)->hwif, addr);
+ if (attr1 == HIFC_PCIE_LINK_DOWN) {
+ hifc_set_chip_absent(hwdev);
+ hifc_force_complete_all(hwdev);
+ }
+}
+
+void hifc_record_pcie_error(void *hwdev)
+{
+ struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev;
+
+ if (!hwdev)
+ return;
+
+ atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats);
+}
+
+static inline void __set_heartbeat_ehd_detect_delay(struct hifc_hwdev *hwdev,
+ u32 delay_ms)
+{
+ hwdev->heartbeat_ehd.start_detect_jiffies =
+ jiffies + msecs_to_jiffies(delay_ms);
+}
+
+static int __pf_to_mgmt_pre_handle(struct hifc_hwdev *hwdev,
+ enum hifc_mod_type mod, u8 cmd)
+{
+ if (hifc_get_mgmt_channel_status(hwdev)) {
+ if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC)
+ return HIFC_DEV_BUSY_ACTIVE_FW;
+ else
+ return -EBUSY;
+ }
+
+ /* Set channel invalid, don't allowed to send other cmd */
+ if (mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_ACTIVATE_FW) {
+ hifc_set_mgmt_channel_status(hwdev, true);
+ /* stop heartbeat enhanced detection temporary, and will
+ * restart in firmware active event when mgmt is resetted
+ */
+ __set_heartbeat_ehd_detect_delay(hwdev,
+ HIFC_DEV_ACTIVE_FW_TIMEOUT);
+ }
+
+ return 0;
+}
+
+static void __pf_to_mgmt_after_handle(struct hifc_hwdev *hwdev,
+ enum hifc_mod_type mod, u8 cmd,
+ int sw_status, void *mgmt_status)
+{
+ /* if activate fw is failed, set channel valid */
+ if (mod == HIFC_MOD_COMM &&
+ cmd == HIFC_MGMT_CMD_ACTIVATE_FW) {
+ if (sw_status)
+ hifc_set_mgmt_channel_status(hwdev, false);
+ else
+ hifc_enable_mgmt_channel(hwdev, mgmt_status);
+ }
+}
+
+int hifc_pf_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!((struct hifc_hwdev *)hwdev)->chip_present_flag)
+ return -EPERM;
+
+ if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED))
+ return -EPERM;
+
+ if (in_size > HIFC_MSG_TO_MGMT_MAX_LEN)
+ return -EINVAL;
+
+ err = __pf_to_mgmt_pre_handle(hwdev, mod, cmd);
+ if (err)
+ return err;
+
+ err = hifc_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ __pf_to_mgmt_after_handle(hwdev, mod, cmd, err, buf_out);
+
+ return err;
+}
+
+static bool is_sfp_info_cmd_cached(struct hifc_hwdev *hwdev,
+ enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_cmd_get_sfp_qsfp_info *sfp_info;
+ struct hifc_port_routine_cmd *rt_cmd;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ sfp_info = buf_in;
+ if (!chip_node->rt_cmd || sfp_info->port_id >= HIFC_MAX_PORT_ID ||
+ *out_size < sizeof(*sfp_info))
+ return false;
+
+ if (sfp_info->version == HIFC_GET_SFP_INFO_REAL_TIME)
+ return false;
+
+ rt_cmd = &chip_node->rt_cmd[sfp_info->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ memcpy(buf_out, &rt_cmd->sfp_info, sizeof(*sfp_info));
+ mutex_unlock(&chip_node->sfp_mutex);
+
+ return true;
+}
+
+static bool is_sfp_abs_cmd_cached(struct hifc_hwdev *hwdev,
+ enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_cmd_get_light_module_abs *abs;
+ struct hifc_port_routine_cmd *rt_cmd;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ abs = buf_in;
+ if (!chip_node->rt_cmd || abs->port_id >= HIFC_MAX_PORT_ID ||
+ *out_size < sizeof(*abs))
+ return false;
+
+ if (abs->version == HIFC_GET_SFP_INFO_REAL_TIME)
+ return false;
+
+ rt_cmd = &chip_node->rt_cmd[abs->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ memcpy(buf_out, &rt_cmd->abs, sizeof(*abs));
+ mutex_unlock(&chip_node->sfp_mutex);
+
+ return true;
+}
+
+static bool driver_processed_cmd(struct hifc_hwdev *hwdev,
+ enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct card_node *chip_node = hwdev->chip_node;
+
+ if (mod == HIFC_MOD_L2NIC) {
+ if (cmd == HIFC_PORT_CMD_GET_SFP_INFO &&
+ chip_node->rt_cmd->up_send_sfp_info) {
+ return is_sfp_info_cmd_cached(hwdev, mod, cmd, buf_in,
+ in_size, buf_out,
+ out_size);
+ } else if (cmd == HIFC_PORT_CMD_GET_SFP_ABS &&
+ chip_node->rt_cmd->up_send_sfp_abs) {
+ return is_sfp_abs_cmd_cached(hwdev, mod, cmd, buf_in,
+ in_size, buf_out,
+ out_size);
+ }
+ }
+
+ return false;
+}
+
+static int send_sync_mgmt_msg(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(HIFC_DEV_ACTIVE_FW_TIMEOUT);
+ do {
+ if (!hifc_get_mgmt_channel_status(hwdev) ||
+ !hifc_get_chip_present_flag(hwdev))
+ break;
+
+ msleep(1000);
+ } while (time_before(jiffies, end));
+
+ if (driver_processed_cmd(hwdev, mod, cmd, buf_in, in_size, buf_out,
+ out_size))
+ return 0;
+
+ return hifc_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+}
+
+int hifc_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hifc_hwdev *dev = hwdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(dev->chip_present_flag))
+ return -EPERM;
+
+ err = send_sync_mgmt_msg(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+
+ hifc_print_status_info(hwdev, mod, cmd, buf_out);
+
+ return err;
+}
+
+/* PF/VF send msg to uP by api cmd, and return immediately */
+int hifc_msg_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) ||
+ !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) ||
+ hifc_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ if (hifc_func_type(hwdev) == TYPE_VF) {
+ err = -EFAULT;
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Mailbox don't support async cmd\n");
+ } else {
+ err = hifc_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size);
+ }
+
+ return err;
+}
+
+int hifc_msg_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ struct hifc_hwdev *dev = hwdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(dev->chip_present_flag))
+ return -EPERM;
+
+ err = hifc_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size);
+
+ return err;
+}
+
+/**
+ * hifc_cpu_to_be32 - convert data to big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert, must be Multiple of 4B
+ **/
+void hifc_cpu_to_be32(void *data, int len)
+{
+ int i, chunk_sz = sizeof(u32);
+ u32 *mem = data;
+
+ if (!data)
+ return;
+
+ len = len / chunk_sz;
+
+ for (i = 0; i < len; i++) {
+ *mem = cpu_to_be32(*mem);
+ mem++;
+ }
+}
+
+/**
+ * hifc_cpu_to_be32 - convert data from big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert
+ **/
+void hifc_be32_to_cpu(void *data, int len)
+{
+ int i, chunk_sz = sizeof(u32);
+ u32 *mem = data;
+
+ if (!data)
+ return;
+
+ len = len / chunk_sz;
+
+ for (i = 0; i < len; i++) {
+ *mem = be32_to_cpu(*mem);
+ mem++;
+ }
+}
+
+/**
+ * hifc_set_sge - set dma area in scatter gather entry
+ * @sge: scatter gather entry
+ * @addr: dma address
+ * @len: length of relevant data in the dma address
+ **/
+void hifc_set_sge(struct hifc_sge *sge, dma_addr_t addr, u32 len)
+{
+ sge->hi_addr = upper_32_bits(addr);
+ sge->lo_addr = lower_32_bits(addr);
+ sge->len = len;
+}
+
+int hifc_set_ci_table(void *hwdev, u16 q_id, struct hifc_sq_attr *attr)
+{
+ struct hifc_cons_idx_attr cons_idx_attr = {0};
+ u16 out_size = sizeof(cons_idx_attr);
+ int err;
+
+ if (!hwdev || !attr)
+ return -EINVAL;
+
+ err = hifc_global_func_id_get(hwdev, &cons_idx_attr.func_idx);
+ if (err)
+ return err;
+
+ cons_idx_attr.dma_attr_off = attr->dma_attr_off;
+ cons_idx_attr.pending_limit = attr->pending_limit;
+ cons_idx_attr.coalescing_time = attr->coalescing_time;
+
+ if (attr->intr_en) {
+ cons_idx_attr.intr_en = attr->intr_en;
+ cons_idx_attr.intr_idx = attr->intr_idx;
+ }
+
+ cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+ cons_idx_attr.sq_id = q_id;
+
+ cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET,
+ &cons_idx_attr, sizeof(cons_idx_attr),
+ &cons_idx_attr, &out_size, 0);
+ if (err || !out_size || cons_idx_attr.status) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, cons_idx_attr.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hifc_set_cmdq_depth(struct hifc_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hifc_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx);
+ if (err)
+ return err;
+
+ root_ctxt.ppf_idx = hifc_ppf_idx(hwdev);
+
+ root_ctxt.set_cmdq_depth = 1;
+ root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static u16 get_hw_rx_buf_size(int rx_buf_sz)
+{
+#define DEFAULT_RX_BUF_SIZE ((u16)0xB)
+ u16 num_hw_types =
+ sizeof(hifc_hw_rx_buf_size) /
+ sizeof(hifc_hw_rx_buf_size[0]);
+ u16 i;
+
+ for (i = 0; i < num_hw_types; i++) {
+ if (hifc_hw_rx_buf_size[i] == rx_buf_sz)
+ return i;
+ }
+
+ pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz);
+
+ return DEFAULT_RX_BUF_SIZE;
+}
+
+int hifc_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz)
+{
+ struct hifc_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx);
+ if (err)
+ return err;
+
+ root_ctxt.ppf_idx = hifc_ppf_idx(hwdev);
+
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+
+ root_ctxt.lro_en = 1;
+
+ root_ctxt.rq_depth = (u16)ilog2(rq_depth);
+ root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz);
+ root_ctxt.sq_depth = (u16)ilog2(sq_depth);
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hifc_clean_root_ctxt(void *hwdev)
+{
+ struct hifc_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx);
+ if (err)
+ return err;
+
+ root_ctxt.ppf_idx = hifc_ppf_idx(hwdev);
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int wait_for_flr_finish(struct hifc_hwif *hwif)
+{
+ u32 cnt = 0;
+ enum hifc_pf_status status;
+
+ while (cnt < HIFC_FLR_TIMEOUT) {
+ status = hifc_get_pf_status(hwif);
+ if (status == HIFC_PF_STATUS_FLR_FINISH_FLAG) {
+ hifc_set_pf_status(hwif, HIFC_PF_STATUS_ACTIVE_FLAG);
+ return 0;
+ }
+
+ usleep_range(9900, 10000);
+ cnt++;
+ }
+
+ return -EFAULT;
+}
+
+#define HIFC_WAIT_CMDQ_IDLE_TIMEOUT 5000
+
+static int wait_cmdq_stop(struct hifc_hwdev *hwdev)
+{
+ enum hifc_cmdq_type cmdq_type;
+ struct hifc_cmdqs *cmdqs = hwdev->cmdqs;
+ u32 cnt = 0;
+ int err = 0;
+
+ if (!(cmdqs->status & HIFC_CMDQ_ENABLE))
+ return 0;
+
+ cmdqs->status &= ~HIFC_CMDQ_ENABLE;
+
+ while (cnt < HIFC_WAIT_CMDQ_IDLE_TIMEOUT && hwdev->chip_present_flag) {
+ err = 0;
+ cmdq_type = HIFC_CMDQ_SYNC;
+ for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) {
+ if (!hifc_cmdq_idle(&cmdqs->cmdq[cmdq_type])) {
+ err = -EBUSY;
+ break;
+ }
+ }
+
+ if (!err)
+ return 0;
+
+ usleep_range(500, 1000);
+ cnt++;
+ }
+
+ cmdq_type = HIFC_CMDQ_SYNC;
+ for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) {
+ if (!hifc_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ sdk_err(hwdev->dev_hdl, "Cmdq %d busy\n", cmdq_type);
+ }
+
+ cmdqs->status |= HIFC_CMDQ_ENABLE;
+
+ return err;
+}
+
+static int hifc_pf_rx_tx_flush(struct hifc_hwdev *hwdev)
+{
+ struct hifc_hwif *hwif = hwdev->hwif;
+ struct hifc_clear_doorbell clear_db = {0};
+ struct hifc_clear_resource clr_res = {0};
+ u16 out_size, func_id;
+ int err;
+ int ret = 0;
+
+ /* wait ucode stop I/O */
+ msleep(100);
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n");
+ ret = err;
+ }
+
+ hifc_disable_doorbell(hwif);
+
+ out_size = sizeof(clear_db);
+ func_id = hifc_global_func_id_hw(hwdev);
+ clear_db.func_idx = func_id;
+ clear_db.ppf_idx = HIFC_HWIF_PPF_IDX(hwif);
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
+ sizeof(clear_db), &clear_db, &out_size, 0);
+ if (err || !out_size || clear_db.status) {
+ sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, clear_db.status, out_size);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+
+ hifc_set_pf_status(hwif, HIFC_PF_STATUS_FLR_START_FLAG);
+
+ clr_res.func_idx = func_id;
+ clr_res.ppf_idx = HIFC_HWIF_PPF_IDX(hwif);
+
+ err = hifc_msg_to_mgmt_no_ack(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_START_FLR, &clr_res,
+ sizeof(clr_res));
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n");
+ ret = err;
+ }
+
+ err = wait_for_flr_finish(hwif);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n");
+ ret = err;
+ }
+
+ hifc_enable_doorbell(hwif);
+
+ err = hifc_reinit_cmdq_ctxts(hwdev);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n");
+ ret = err;
+ }
+
+ return ret;
+}
+
+int hifc_func_rx_tx_flush(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!dev->chip_present_flag)
+ return 0;
+
+ return hifc_pf_rx_tx_flush(dev);
+}
+
+int hifc_get_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info *interrupt_info)
+{
+ struct hifc_hwdev *nic_hwdev = hwdev;
+ struct hifc_msix_config msix_cfg = {0};
+ u16 out_size = sizeof(msix_cfg);
+ int err;
+
+ if (!hwdev || !interrupt_info)
+ return -EINVAL;
+
+ err = hifc_global_func_id_get(hwdev, &msix_cfg.func_id);
+ if (err)
+ return err;
+
+ msix_cfg.msix_index = interrupt_info->msix_index;
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+ &msix_cfg, sizeof(msix_cfg),
+ &msix_cfg, &out_size, 0);
+ if (err || !out_size || msix_cfg.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, msix_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt;
+ interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt;
+ interrupt_info->pending_limt = msix_cfg.pending_cnt;
+ interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt;
+ interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
+
+ return 0;
+}
+
+int hifc_set_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info interrupt_info)
+{
+ struct hifc_hwdev *nic_hwdev = hwdev;
+ struct hifc_msix_config msix_cfg = {0};
+ struct nic_interrupt_info temp_info;
+ u16 out_size = sizeof(msix_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ temp_info.msix_index = interrupt_info.msix_index;
+
+ err = hifc_get_interrupt_cfg(hwdev, &temp_info);
+ if (err)
+ return -EINVAL;
+
+ err = hifc_global_func_id_get(hwdev, &msix_cfg.func_id);
+ if (err)
+ return err;
+
+ msix_cfg.msix_index = (u16)interrupt_info.msix_index;
+ msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit;
+ msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg;
+ msix_cfg.pending_cnt = temp_info.pending_limt;
+ msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg;
+
+ if (interrupt_info.lli_set) {
+ msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit;
+ msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg;
+ }
+
+ if (interrupt_info.interrupt_coalesc_set) {
+ msix_cfg.pending_cnt = interrupt_info.pending_limt;
+ msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg;
+ }
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+ &msix_cfg, sizeof(msix_cfg),
+ &msix_cfg, &out_size, 0);
+ if (err || !out_size || msix_cfg.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, msix_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define HIFC_MSIX_CNT_RESEND_TIMER_SHIFT 29
+#define HIFC_MSIX_CNT_RESEND_TIMER_MASK 0x7U
+
+#define HIFC_MSIX_CNT_SET(val, member) \
+ (((val) & HIFC_MSIX_CNT_##member##_MASK) << \
+ HIFC_MSIX_CNT_##member##_SHIFT)
+
+void hifc_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx,
+ u8 clear_resend_en)
+{
+ struct hifc_hwif *hwif;
+ u32 msix_ctrl = 0, addr;
+
+ if (!hwdev)
+ return;
+
+ hwif = ((struct hifc_hwdev *)hwdev)->hwif;
+
+ msix_ctrl = HIFC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER);
+
+ addr = HIFC_CSR_MSIX_CNT_ADDR(msix_idx);
+
+ hifc_hwif_write_reg(hwif, addr, msix_ctrl);
+}
+
+static int init_aeqs_msix_attr(struct hifc_hwdev *hwdev)
+{
+ struct hifc_aeqs *aeqs = hwdev->aeqs;
+ struct nic_interrupt_info info = {0};
+ struct hifc_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = &aeqs->aeq[q_id];
+ info.msix_index = eq->eq_irq.msix_entry_idx;
+ err = hifc_set_interrupt_cfg(hwdev, info);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n",
+ q_id);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int init_ceqs_msix_attr(struct hifc_hwdev *hwdev)
+{
+ struct hifc_ceqs *ceqs = hwdev->ceqs;
+ struct nic_interrupt_info info = {0};
+ struct hifc_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = &ceqs->ceq[q_id];
+ info.msix_index = eq->eq_irq.msix_entry_idx;
+ err = hifc_set_interrupt_cfg(hwdev, info);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %d failed\n",
+ q_id);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * set_pf_dma_attr_entry - set the dma attributes for entry
+ * @hwdev: the pointer to hw device
+ * @entry_idx: the entry index in the dma table
+ * @st: PCIE TLP steering tag
+ * @at: PCIE TLP AT field
+ * @ph: PCIE TLP Processing Hint field
+ * @no_snooping: PCIE TLP No snooping
+ * @tph_en: PCIE TLP Processing Hint Enable
+ **/
+static void set_pf_dma_attr_entry(struct hifc_hwdev *hwdev, u32 entry_idx,
+ u8 st, u8 at, u8 ph,
+ enum hifc_pcie_nosnoop no_snooping,
+ enum hifc_pcie_tph tph_en)
+{
+ u32 addr, val, dma_attr_entry;
+
+ /* Read Modify Write */
+ addr = HIFC_CSR_DMA_ATTR_TBL_ADDR(entry_idx);
+
+ val = hifc_hwif_read_reg(hwdev->hwif, addr);
+ val = HIFC_DMA_ATTR_ENTRY_CLEAR(val, ST) &
+ HIFC_DMA_ATTR_ENTRY_CLEAR(val, AT) &
+ HIFC_DMA_ATTR_ENTRY_CLEAR(val, PH) &
+ HIFC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) &
+ HIFC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN);
+
+ dma_attr_entry = HIFC_DMA_ATTR_ENTRY_SET(st, ST) |
+ HIFC_DMA_ATTR_ENTRY_SET(at, AT) |
+ HIFC_DMA_ATTR_ENTRY_SET(ph, PH) |
+ HIFC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) |
+ HIFC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN);
+
+ val |= dma_attr_entry;
+ hifc_hwif_write_reg(hwdev->hwif, addr, val);
+}
+
+/**
+ * dma_attr_table_init - initialize the the default dma attributes
+ * @hwdev: the pointer to hw device
+ * Return: 0 - success, negative - failure
+ **/
+static int dma_attr_table_init(struct hifc_hwdev *hwdev)
+{
+ int err = 0;
+
+ set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
+ HIFC_PCIE_ST_DISABLE,
+ HIFC_PCIE_AT_DISABLE,
+ HIFC_PCIE_PH_DISABLE,
+ HIFC_PCIE_SNOOP,
+ HIFC_PCIE_TPH_DISABLE);
+
+ return err;
+}
+
+static int resources_state_set(struct hifc_hwdev *hwdev,
+ enum hifc_res_state state)
+{
+ struct hifc_cmd_set_res_state res_state = {0};
+ u16 out_size = sizeof(res_state);
+ int err;
+
+ err = hifc_global_func_id_get(hwdev, &res_state.func_idx);
+ if (err)
+ return err;
+
+ res_state.state = state;
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_RES_STATE_SET,
+ &res_state, sizeof(res_state),
+ &res_state, &out_size, 0);
+ if (err || !out_size || res_state.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set resources state, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, res_state.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void comm_mgmt_msg_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt = pri_handle;
+ u8 cmd_idx;
+ u32 *mem;
+ u16 i;
+
+ for (cmd_idx = 0; cmd_idx < pf_to_mgmt->proc.cmd_num; cmd_idx++) {
+ if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) {
+ if (!pf_to_mgmt->proc.info[cmd_idx].proc) {
+ sdk_warn(pf_to_mgmt->hwdev->dev_hdl,
+ "PF recv up comm msg handle null, cmd(0x%x)\n",
+ cmd);
+ } else {
+ pf_to_mgmt->proc.info[cmd_idx].proc(hwdev,
+ buf_in, in_size, buf_out, out_size);
+ }
+
+ return;
+ }
+ }
+
+ sdk_warn(pf_to_mgmt->hwdev->dev_hdl, "Received mgmt cpu event: 0x%x\n",
+ cmd);
+
+ mem = buf_in;
+ for (i = 0; i < (in_size / sizeof(u32)); i++) {
+ pr_info("0x%x\n", *mem);
+ mem++;
+ }
+
+ *out_size = 0;
+}
+
+static int hifc_comm_aeqs_init(struct hifc_hwdev *hwdev)
+{
+ struct irq_info aeq_irqs[HIFC_MAX_AEQS] = {{0} };
+ u16 num_aeqs, resp_num_irq = 0, i;
+ int err;
+
+ num_aeqs = HIFC_HWIF_NUM_AEQS(hwdev->hwif);
+ if (num_aeqs > HIFC_MAX_AEQS) {
+ sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n",
+ HIFC_MAX_AEQS);
+ num_aeqs = HIFC_MAX_AEQS;
+ }
+ err = hifc_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs,
+ &resp_num_irq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %d\n",
+ num_aeqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_aeqs) {
+ sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n",
+ resp_num_irq);
+ num_aeqs = resp_num_irq;
+ }
+
+ err = hifc_aeqs_init(hwdev, num_aeqs, aeq_irqs);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n");
+ goto aeqs_init_err;
+ }
+
+ set_bit(HIFC_HWDEV_AEQ_INITED, &hwdev->func_state);
+
+ return 0;
+
+aeqs_init_err:
+ for (i = 0; i < num_aeqs; i++)
+ hifc_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id);
+
+ return err;
+}
+
+static void hifc_comm_aeqs_free(struct hifc_hwdev *hwdev)
+{
+ struct irq_info aeq_irqs[HIFC_MAX_AEQS] = {{0} };
+ u16 num_irqs, i;
+
+ clear_bit(HIFC_HWDEV_AEQ_INITED, &hwdev->func_state);
+
+ hifc_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs);
+ hifc_aeqs_free(hwdev);
+ for (i = 0; i < num_irqs; i++)
+ hifc_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id);
+}
+
+static int hifc_comm_ceqs_init(struct hifc_hwdev *hwdev)
+{
+ struct irq_info ceq_irqs[HIFC_MAX_CEQS] = {{0} };
+ u16 num_ceqs, resp_num_irq = 0, i;
+ int err;
+
+ num_ceqs = HIFC_HWIF_NUM_CEQS(hwdev->hwif);
+ if (num_ceqs > HIFC_MAX_CEQS) {
+ sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n",
+ HIFC_MAX_CEQS);
+ num_ceqs = HIFC_MAX_CEQS;
+ }
+
+ err = hifc_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs,
+ &resp_num_irq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %d\n",
+ num_ceqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_ceqs) {
+ sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n",
+ resp_num_irq);
+ num_ceqs = resp_num_irq;
+ }
+
+ err = hifc_ceqs_init(hwdev, num_ceqs, ceq_irqs);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to init ceqs, err:%d\n", err);
+ goto ceqs_init_err;
+ }
+
+ return 0;
+
+ceqs_init_err:
+ for (i = 0; i < num_ceqs; i++)
+ hifc_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id);
+
+ return err;
+}
+
+static void hifc_comm_ceqs_free(struct hifc_hwdev *hwdev)
+{
+ struct irq_info ceq_irqs[HIFC_MAX_CEQS] = {{0} };
+ u16 num_irqs;
+ int i;
+
+ hifc_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs);
+ hifc_ceqs_free(hwdev);
+ for (i = 0; i < num_irqs; i++)
+ hifc_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id);
+}
+
+static int hifc_comm_pf_to_mgmt_init(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ if (hifc_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return 0; /* VF do not support send msg to mgmt directly */
+
+ err = hifc_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ hifc_aeq_register_hw_cb(hwdev, HIFC_MSG_FROM_MGMT_CPU,
+ hifc_mgmt_msg_aeqe_handler);
+
+ hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_COMM,
+ hwdev->pf_to_mgmt, comm_mgmt_msg_handler);
+
+ set_bit(HIFC_HWDEV_MGMT_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hifc_comm_pf_to_mgmt_free(struct hifc_hwdev *hwdev)
+{
+ if (hifc_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return; /* VF do not support send msg to mgmt directly */
+
+ hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_COMM);
+
+ hifc_aeq_unregister_hw_cb(hwdev, HIFC_MSG_FROM_MGMT_CPU);
+
+ hifc_pf_to_mgmt_free(hwdev);
+}
+
+static int hifc_comm_clp_to_mgmt_init(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ if (hifc_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return 0;
+
+ err = hifc_clp_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ set_bit(HIFC_HWDEV_CLP_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hifc_comm_clp_to_mgmt_free(struct hifc_hwdev *hwdev)
+{
+ if (hifc_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return;
+
+ clear_bit(HIFC_HWDEV_CLP_INITED, &hwdev->func_state);
+ hifc_clp_pf_to_mgmt_free(hwdev);
+}
+
+static int hifc_comm_cmdqs_init(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ err = hifc_cmdqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n");
+ return err;
+ }
+
+ hifc_ceq_register_cb(hwdev, HIFC_CMDQ, hifc_cmdq_ceq_handler);
+
+ err = hifc_set_cmdq_depth(hwdev, HIFC_CMDQ_DEPTH);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n");
+ goto set_cmdq_depth_err;
+ }
+
+ return 0;
+
+set_cmdq_depth_err:
+ hifc_cmdqs_free(hwdev);
+
+ return err;
+}
+
+static void hifc_comm_cmdqs_free(struct hifc_hwdev *hwdev)
+{
+ hifc_ceq_unregister_cb(hwdev, HIFC_CMDQ);
+ hifc_cmdqs_free(hwdev);
+}
+
+static int hifc_sync_mgmt_func_state(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_ACTIVE_FLAG);
+
+ err = resources_state_set(hwdev, HIFC_RES_ACTIVE);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to set function resources state\n");
+ goto resources_state_set_err;
+ }
+
+ hwdev->heartbeat_ehd.en = false;
+ if (HIFC_FUNC_TYPE(hwdev) == TYPE_PPF) {
+ /* heartbeat synchronize must be after set pf active status */
+ hifc_comm_recv_mgmt_self_cmd_reg(
+ hwdev, HIFC_MGMT_CMD_HEARTBEAT_EVENT,
+ mgmt_heartbeat_event_handler);
+ }
+
+ return 0;
+
+resources_state_set_err:
+ hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT);
+
+ return err;
+}
+
+static void hifc_unsync_mgmt_func_state(struct hifc_hwdev *hwdev)
+{
+ hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT);
+
+ hwdev->heartbeat_ehd.en = false;
+ if (HIFC_FUNC_TYPE(hwdev) == TYPE_PPF) {
+ hifc_comm_recv_up_self_cmd_unreg(
+ hwdev, HIFC_MGMT_CMD_HEARTBEAT_EVENT);
+ }
+
+ resources_state_set(hwdev, HIFC_RES_CLEAN);
+}
+
+int hifc_set_vport_enable(void *hwdev, bool enable)
+{
+ struct hifc_hwdev *nic_hwdev = (struct hifc_hwdev *)hwdev;
+ struct hifc_vport_state en_state = {0};
+ u16 out_size = sizeof(en_state);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hifc_global_func_id_get(hwdev, &en_state.func_id);
+ if (err)
+ return err;
+
+ en_state.state = enable ? 1 : 0;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HIFC_PORT_CMD_SET_VPORT_ENABLE,
+ &en_state, sizeof(en_state),
+ &en_state, &out_size);
+ if (err || !out_size || en_state.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, en_state.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hifc_l2nic_reset_base(struct hifc_hwdev *hwdev, u16 reset_flag)
+{
+ struct hifc_l2nic_reset l2nic_reset = {0};
+ u16 out_size = sizeof(l2nic_reset);
+ int err = 0;
+
+ err = hifc_set_vport_enable(hwdev, false);
+ if (err)
+ return err;
+
+ msleep(100);
+
+ sdk_info(hwdev->dev_hdl, "L2nic reset flag 0x%x\n", reset_flag);
+
+ err = hifc_global_func_id_get(hwdev, &l2nic_reset.func_id);
+ if (err)
+ return err;
+
+ l2nic_reset.reset_flag = reset_flag;
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_L2NIC_RESET, &l2nic_reset,
+ sizeof(l2nic_reset), &l2nic_reset,
+ &out_size, 0);
+ if (err || !out_size || l2nic_reset.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, l2nic_reset.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hifc_l2nic_reset(struct hifc_hwdev *hwdev)
+{
+ return hifc_l2nic_reset_base(hwdev, 0);
+}
+
+static int __get_func_misc_info(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ err = hifc_get_board_info(hwdev, &hwdev->board_info);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Get board info failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int init_func_mode(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ err = __get_func_misc_info(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to get function msic information\n");
+ return err;
+ }
+
+ err = hifc_l2nic_reset(hwdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int __init_eqs_msix_attr(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ err = init_aeqs_msix_attr(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n");
+ return err;
+ }
+
+ err = init_ceqs_msix_attr(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int init_cmdqs_channel(struct hifc_hwdev *hwdev)
+{
+ u16 func_id;
+ int err;
+
+ dma_attr_table_init(hwdev);
+
+ err = hifc_comm_ceqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n");
+ return err;
+ }
+
+ err = __init_eqs_msix_attr(hwdev);
+ if (err)
+ goto init_eqs_msix_err;
+
+ /* set default wq page_size */
+ hwdev->wq_page_size = HIFC_DEFAULT_WQ_PAGE_SIZE;
+
+ err = hifc_global_func_id_get(hwdev, &func_id);
+ if (err)
+ goto get_func_id_err;
+
+ err = hifc_set_wq_page_size(hwdev, func_id, hwdev->wq_page_size);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n");
+ goto init_wq_pg_size_err;
+ }
+
+ err = hifc_comm_cmdqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n");
+ goto cmdq_init_err;
+ }
+
+ set_bit(HIFC_HWDEV_CMDQ_INITED, &hwdev->func_state);
+
+ return 0;
+
+cmdq_init_err:
+ if (HIFC_FUNC_TYPE(hwdev) != TYPE_VF)
+ hifc_set_wq_page_size(hwdev, func_id, HIFC_HW_WQ_PAGE_SIZE);
+init_wq_pg_size_err:
+get_func_id_err:
+init_eqs_msix_err:
+ hifc_comm_ceqs_free(hwdev);
+
+ return err;
+}
+
+static int init_mgmt_channel(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ err = hifc_comm_clp_to_mgmt_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init clp\n");
+ return err;
+ }
+
+ err = hifc_comm_aeqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n");
+ goto aeqs_init_err;
+ }
+
+ err = hifc_comm_pf_to_mgmt_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init msg\n");
+ goto msg_init_err;
+ }
+
+ return err;
+
+msg_init_err:
+ hifc_comm_aeqs_free(hwdev);
+
+aeqs_init_err:
+ hifc_comm_clp_to_mgmt_free(hwdev);
+
+ return err;
+}
+
+/* initialize communication channel */
+int hifc_init_comm_ch(struct hifc_hwdev *hwdev)
+{
+ int err;
+
+ err = init_mgmt_channel(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init mgmt channel\n");
+ return err;
+ }
+
+ err = init_func_mode(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init function mode\n");
+ goto func_mode_err;
+ }
+
+ err = init_cmdqs_channel(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n");
+ goto init_cmdqs_channel_err;
+ }
+
+ err = hifc_sync_mgmt_func_state(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to synchronize mgmt function state\n");
+ goto sync_mgmt_func_err;
+ }
+
+ err = hifc_aeq_register_swe_cb(hwdev, HIFC_STATELESS_EVENT,
+ hifc_nic_sw_aeqe_handler);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to register ucode aeqe handler\n");
+ goto register_ucode_aeqe_err;
+ }
+
+ set_bit(HIFC_HWDEV_COMM_CH_INITED, &hwdev->func_state);
+
+ return 0;
+
+register_ucode_aeqe_err:
+ hifc_unsync_mgmt_func_state(hwdev);
+sync_mgmt_func_err:
+ return err;
+
+init_cmdqs_channel_err:
+
+func_mode_err:
+ return err;
+}
+
+static void __uninit_comm_module(struct hifc_hwdev *hwdev,
+ enum hifc_hwdev_init_state init_state)
+{
+ u16 func_id;
+
+ switch (init_state) {
+ case HIFC_HWDEV_COMM_CH_INITED:
+ hifc_aeq_unregister_swe_cb(hwdev,
+ HIFC_STATELESS_EVENT);
+ hifc_unsync_mgmt_func_state(hwdev);
+ break;
+ case HIFC_HWDEV_CMDQ_INITED:
+ hifc_comm_cmdqs_free(hwdev);
+ /* VF can set page size of 256K only, any other value
+ * will return error in pf, pf will set all vf's page
+ * size to 4K when disable sriov
+ */
+ if (HIFC_FUNC_TYPE(hwdev) != TYPE_VF) {
+ func_id = hifc_global_func_id_hw(hwdev);
+ hifc_set_wq_page_size(hwdev, func_id,
+ HIFC_HW_WQ_PAGE_SIZE);
+ }
+
+ hifc_comm_ceqs_free(hwdev);
+
+ break;
+ case HIFC_HWDEV_MBOX_INITED:
+ break;
+ case HIFC_HWDEV_MGMT_INITED:
+ hifc_comm_pf_to_mgmt_free(hwdev);
+ break;
+ case HIFC_HWDEV_AEQ_INITED:
+ hifc_comm_aeqs_free(hwdev);
+ break;
+ case HIFC_HWDEV_CLP_INITED:
+ hifc_comm_clp_to_mgmt_free(hwdev);
+ break;
+ default:
+ break;
+ }
+}
+
+#define HIFC_FUNC_STATE_BUSY_TIMEOUT 300
+void hifc_uninit_comm_ch(struct hifc_hwdev *hwdev)
+{
+ enum hifc_hwdev_init_state init_state = HIFC_HWDEV_COMM_CH_INITED;
+ int cnt;
+
+ while (init_state > HIFC_HWDEV_NONE_INITED) {
+ if (!test_bit(init_state, &hwdev->func_state)) {
+ init_state--;
+ continue;
+ }
+ clear_bit(init_state, &hwdev->func_state);
+
+ cnt = 0;
+ while (test_bit(HIFC_HWDEV_STATE_BUSY, &hwdev->func_state) &&
+ cnt++ <= HIFC_FUNC_STATE_BUSY_TIMEOUT)
+ usleep_range(900, 1000);
+
+ __uninit_comm_module(hwdev, init_state);
+
+ init_state--;
+ }
+}
+
+int hifc_slq_init(void *dev, int num_wqs)
+{
+ struct hifc_hwdev *hwdev = dev;
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ hwdev->wqs = kzalloc(sizeof(*hwdev->wqs), GFP_KERNEL);
+ if (!hwdev->wqs)
+ return -ENOMEM;
+
+ err = hifc_wqs_alloc(hwdev->wqs, num_wqs, hwdev->dev_hdl);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc wqs\n");
+ kfree(hwdev->wqs);
+ hwdev->wqs = NULL;
+ }
+
+ return err;
+}
+
+void hifc_slq_uninit(void *dev)
+{
+ struct hifc_hwdev *hwdev = dev;
+
+ if (!hwdev)
+ return;
+
+ hifc_wqs_free(hwdev->wqs);
+
+ kfree(hwdev->wqs);
+}
+
+int hifc_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, u16 page_size,
+ u64 *cla_addr, void **handle)
+{
+ struct hifc_hwdev *hwdev = dev;
+ struct hifc_wq *wq;
+ int err;
+
+ if (!dev || !cla_addr || !handle)
+ return -EINVAL;
+
+ wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+ if (!wq)
+ return -ENOMEM;
+
+ err = hifc_wq_allocate(hwdev->wqs, wq, wqebb_size, hwdev->wq_page_size,
+ q_depth, 0);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc wq\n");
+ kfree(wq);
+ return -EFAULT;
+ }
+
+ *cla_addr = wq->block_paddr;
+ *handle = wq;
+
+ return 0;
+}
+
+void hifc_slq_free(void *dev, void *handle)
+{
+ struct hifc_hwdev *hwdev = dev;
+
+ if (!hwdev || !handle)
+ return;
+
+ hifc_wq_free(hwdev->wqs, handle);
+ kfree(handle);
+}
+
+u64 hifc_slq_get_addr(void *handle, u16 index)
+{
+ if (!handle)
+ return 0; /* NULL of wqe addr */
+
+ return (u64)hifc_get_wqebb_addr(handle, index);
+}
+
+u64 hifc_slq_get_first_pageaddr(void *handle)
+{
+ struct hifc_wq *wq = handle;
+
+ if (!handle)
+ return 0; /* NULL of wqe addr */
+
+ return hifc_get_first_wqe_page_addr(wq);
+}
+
+int hifc_func_tmr_bitmap_set(void *hwdev, bool en)
+{
+ struct hifc_func_tmr_bitmap_op bitmap_op = {0};
+ u16 out_size = sizeof(bitmap_op);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hifc_global_func_id_get(hwdev, &bitmap_op.func_idx);
+ if (err)
+ return err;
+
+ bitmap_op.ppf_idx = hifc_ppf_idx(hwdev);
+ if (en)
+ bitmap_op.op_id = FUNC_TMR_BITMAP_ENABLE;
+ else
+ bitmap_op.op_id = FUNC_TMR_BITMAP_DISABLE;
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_FUNC_TMR_BITMAT_SET,
+ &bitmap_op, sizeof(bitmap_op),
+ &bitmap_op, &out_size, 0);
+ if (err || !out_size || bitmap_op.status) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, bitmap_op.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int ppf_ht_gpa_set(struct hifc_hwdev *hwdev, struct hifc_page_addr *pg0,
+ struct hifc_page_addr *pg1)
+{
+ struct comm_info_ht_gpa_set ht_gpa_set = {0};
+ u16 out_size = sizeof(ht_gpa_set);
+ int ret;
+
+ pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl,
+ HIFC_HT_GPA_PAGE_SIZE,
+ &pg0->phys_addr, GFP_KERNEL);
+ if (!pg0->virt_addr) {
+ sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n");
+ return -EFAULT;
+ }
+
+ pg1->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl,
+ HIFC_HT_GPA_PAGE_SIZE,
+ &pg1->phys_addr, GFP_KERNEL);
+ if (!pg1->virt_addr) {
+ sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n");
+ return -EFAULT;
+ }
+
+ ht_gpa_set.page_pa0 = pg0->phys_addr;
+ ht_gpa_set.page_pa1 = pg1->phys_addr;
+ sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n",
+ pg0->phys_addr, pg1->phys_addr);
+ ret = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_PPF_HT_GPA_SET,
+ &ht_gpa_set, sizeof(ht_gpa_set),
+ &ht_gpa_set, &out_size, 0);
+ if (ret || !out_size || ht_gpa_set.status) {
+ sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n",
+ ret, ht_gpa_set.status, out_size);
+ return -EFAULT;
+ }
+
+ hwdev->page_pa0.phys_addr = pg0->phys_addr;
+ hwdev->page_pa0.virt_addr = pg0->virt_addr;
+
+ hwdev->page_pa1.phys_addr = pg1->phys_addr;
+ hwdev->page_pa1.virt_addr = pg1->virt_addr;
+
+ return 0;
+}
+
+int hifc_ppf_ht_gpa_init(struct hifc_hwdev *hwdev)
+{
+ int ret;
+ int i;
+ int j;
+ int size;
+
+ struct hifc_page_addr page_addr0[HIFC_PPF_HT_GPA_SET_RETRY_TIMES];
+ struct hifc_page_addr page_addr1[HIFC_PPF_HT_GPA_SET_RETRY_TIMES];
+
+ size = HIFC_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]);
+ memset(page_addr0, 0, size);
+ memset(page_addr1, 0, size);
+
+ for (i = 0; i < HIFC_PPF_HT_GPA_SET_RETRY_TIMES; i++) {
+ ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]);
+ if (!ret)
+ break;
+ }
+
+ for (j = 0; j < i; j++) {
+ if (page_addr0[j].virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl,
+ HIFC_HT_GPA_PAGE_SIZE,
+ page_addr0[j].virt_addr,
+ page_addr0[j].phys_addr);
+ page_addr0[j].virt_addr = NULL;
+ }
+ if (page_addr1[j].virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl,
+ HIFC_HT_GPA_PAGE_SIZE,
+ page_addr1[j].virt_addr,
+ page_addr1[j].phys_addr);
+ page_addr1[j].virt_addr = NULL;
+ }
+ }
+
+ if (i >= HIFC_PPF_HT_GPA_SET_RETRY_TIMES) {
+ sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n",
+ i);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void hifc_ppf_ht_gpa_deinit(struct hifc_hwdev *hwdev)
+{
+ if (hwdev->page_pa0.virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl, HIFC_HT_GPA_PAGE_SIZE,
+ hwdev->page_pa0.virt_addr,
+ hwdev->page_pa0.phys_addr);
+ hwdev->page_pa0.virt_addr = NULL;
+ }
+
+ if (hwdev->page_pa1.virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl, HIFC_HT_GPA_PAGE_SIZE,
+ hwdev->page_pa1.virt_addr,
+ hwdev->page_pa1.phys_addr);
+ hwdev->page_pa1.virt_addr = NULL;
+ }
+}
+
+static int set_ppf_tmr_status(struct hifc_hwdev *hwdev,
+ enum ppf_tmr_status status)
+{
+ struct hifc_ppf_tmr_op op = {0};
+ u16 out_size = sizeof(op);
+ int err = 0;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (hifc_func_type(hwdev) != TYPE_PPF)
+ return -EFAULT;
+
+ if (status == HIFC_PPF_TMR_FLAG_START) {
+ err = hifc_ppf_ht_gpa_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n");
+ return -EFAULT;
+ }
+ } else {
+ hifc_ppf_ht_gpa_deinit(hwdev);
+ }
+
+ op.op_id = status;
+ op.ppf_idx = hifc_ppf_idx(hwdev);
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_PPF_TMR_SET, &op,
+ sizeof(op), &op, &out_size, 0);
+ if (err || !out_size || op.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, op.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hifc_ppf_tmr_start(void *hwdev)
+{
+ if (!hwdev) {
+ pr_err("Hwdev pointer is NULL for starting ppf timer\n");
+ return -EINVAL;
+ }
+
+ return set_ppf_tmr_status(hwdev, HIFC_PPF_TMR_FLAG_START);
+}
+
+int hifc_ppf_tmr_stop(void *hwdev)
+{
+ if (!hwdev) {
+ pr_err("Hwdev pointer is NULL for stop ppf timer\n");
+ return -EINVAL;
+ }
+
+ return set_ppf_tmr_status(hwdev, HIFC_PPF_TMR_FLAG_STOP);
+}
+
+int hifc_set_wq_page_size(struct hifc_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct hifc_wq_page_size page_size_info = {0};
+ u16 out_size = sizeof(page_size_info);
+ int err;
+
+ page_size_info.func_idx = func_idx;
+ page_size_info.ppf_idx = hifc_ppf_idx(hwdev);
+ page_size_info.page_size = HIFC_PAGE_SIZE_HW(page_size);
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_PAGESIZE_SET,
+ &page_size_info, sizeof(page_size_info),
+ &page_size_info, &out_size, 0);
+ if (err || !out_size || page_size_info.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
+ err, page_size_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+bool hifc_mgmt_event_ack_first(u8 mod, u8 cmd)
+{
+ if ((mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_GET_HOST_INFO) ||
+ (mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_HEARTBEAT_EVENT))
+ return false;
+
+ if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC ||
+ mod == HIFC_MOD_HILINK)
+ return true;
+
+ return false;
+}
+
+#define FAULT_SHOW_STR_LEN 16
+
+static void chip_fault_show(struct hifc_hwdev *hwdev,
+ struct hifc_fault_event *event)
+{
+ char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "fatal", "reset", "flr", "general", "suggestion"};
+ char level_str[FAULT_SHOW_STR_LEN + 1];
+ struct hifc_fault_event_stats *fault;
+ u8 node_id, level;
+ u32 pos, base;
+
+ fault = &hwdev->hw_stats.fault_event_stats;
+
+ memset(level_str, 0, FAULT_SHOW_STR_LEN + 1);
+ level = event->event.chip.err_level;
+ if (level < FAULT_LEVEL_MAX)
+ strncpy(level_str, fault_level[level],
+ FAULT_SHOW_STR_LEN);
+ else
+ strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN);
+
+ if (level == FAULT_LEVEL_SERIOUS_FLR) {
+ sdk_err(hwdev->dev_hdl, "err_level: %d [%s], flr func_id: %d\n",
+ level, level_str, event->event.chip.func_id);
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ }
+ sdk_err(hwdev->dev_hdl, "module_id: 0x%x, err_type: 0x%x, err_level: %d[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n",
+ event->event.chip.node_id,
+ event->event.chip.err_type, level, level_str,
+ event->event.chip.err_csr_addr,
+ event->event.chip.err_csr_value);
+
+ node_id = event->event.chip.node_id;
+ atomic_inc(&fault->chip_fault_stats[node_id][level]);
+
+ base = event->event.chip.node_id * FAULT_LEVEL_MAX *
+ HIFC_CHIP_ERROR_TYPE_MAX;
+ pos = base + HIFC_CHIP_ERROR_TYPE_MAX * level +
+ event->event.chip.err_type;
+ if (pos < HIFC_CHIP_FAULT_SIZE)
+ hwdev->chip_fault_stats[pos]++;
+}
+
+static void fault_report_show(struct hifc_hwdev *hwdev,
+ struct hifc_fault_event *event)
+{
+ char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "chip", "ucode", "mem rd timeout", "mem wr timeout",
+ "reg rd timeout", "reg wr timeout", "phy fault"};
+ char type_str[FAULT_SHOW_STR_LEN + 1];
+ struct hifc_fault_event_stats *fault;
+
+ sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %d.\n",
+ hifc_global_func_id(hwdev));
+
+ memset(type_str, 0, FAULT_SHOW_STR_LEN + 1);
+ if (event->type < FAULT_TYPE_MAX)
+ strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN);
+ else
+ strncpy(type_str, "Unknown", FAULT_SHOW_STR_LEN);
+
+ sdk_err(hwdev->dev_hdl, "Fault type: %d [%s]\n", event->type, type_str);
+ sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n",
+ event->event.val[0], event->event.val[1], event->event.val[2],
+ event->event.val[3]);
+
+ fault = &hwdev->hw_stats.fault_event_stats;
+
+ switch (event->type) {
+ case FAULT_TYPE_CHIP:
+ chip_fault_show(hwdev, event);
+ break;
+ case FAULT_TYPE_UCODE:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+
+ sdk_err(hwdev->dev_hdl, "cause_id: %d, core_id: %d, c_id: %d, epc: 0x%08x\n",
+ event->event.ucode.cause_id, event->event.ucode.core_id,
+ event->event.ucode.c_id, event->event.ucode.epc);
+ break;
+ case FAULT_TYPE_MEM_RD_TIMEOUT:
+ case FAULT_TYPE_MEM_WR_TIMEOUT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+
+ sdk_err(hwdev->dev_hdl, "err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n",
+ event->event.mem_timeout.err_csr_ctrl,
+ event->event.mem_timeout.err_csr_data,
+ event->event.mem_timeout.ctrl_tab,
+ event->event.mem_timeout.mem_index);
+ break;
+ case FAULT_TYPE_REG_RD_TIMEOUT:
+ case FAULT_TYPE_REG_WR_TIMEOUT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ sdk_err(hwdev->dev_hdl, "err_csr: 0x%08x\n",
+ event->event.reg_timeout.err_csr);
+ break;
+ case FAULT_TYPE_PHY_FAULT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ sdk_err(hwdev->dev_hdl, "op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n",
+ event->event.phy_fault.op_type,
+ event->event.phy_fault.port_id,
+ event->event.phy_fault.dev_ad,
+ event->event.phy_fault.csr_addr,
+ event->event.phy_fault.op_data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hifc_refresh_history_fault(struct hifc_hwdev *hwdev,
+ struct hifc_fault_recover_info *info)
+{
+ if (!hwdev->history_fault_flag) {
+ hwdev->history_fault_flag = true;
+ memcpy(&hwdev->history_fault, info,
+ sizeof(struct hifc_fault_recover_info));
+ } else {
+ if (hwdev->history_fault.fault_lev >= info->fault_lev)
+ memcpy(&hwdev->history_fault, info,
+ sizeof(struct hifc_fault_recover_info));
+ }
+}
+
+static void fault_event_handler(struct hifc_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hifc_cmd_fault_event *fault_event;
+ struct hifc_event_info event_info;
+ struct hifc_fault_info_node *fault_node;
+
+ if (in_size != sizeof(*fault_event)) {
+ sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be %ld.\n",
+ in_size, sizeof(*fault_event));
+ return;
+ }
+
+ fault_event = buf_in;
+ fault_report_show(hwdev, &fault_event->event);
+
+ if (hwdev->event_callback) {
+ event_info.type = HIFC_EVENT_FAULT;
+ memcpy(&event_info.info, &fault_event->event,
+ sizeof(event_info.info));
+
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ }
+
+ /* refresh history fault info */
+ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL);
+ if (!fault_node) {
+ sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n");
+ return;
+ }
+
+ if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT)
+ fault_node->info.fault_src = fault_event->event.type;
+ else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT)
+ fault_node->info.fault_src = HIFC_FAULT_SRC_HW_PHY_FAULT;
+
+ if (fault_node->info.fault_src == HIFC_FAULT_SRC_HW_MGMT_CHIP)
+ fault_node->info.fault_lev =
+ fault_event->event.event.chip.err_level;
+ else
+ fault_node->info.fault_lev = FAULT_LEVEL_FATAL;
+
+ memcpy(&fault_node->info.fault_data.hw_mgmt, &fault_event->event.event,
+ sizeof(union hifc_fault_hw_mgmt));
+ hifc_refresh_history_fault(hwdev, &fault_node->info);
+
+ down(&hwdev->fault_list_sem);
+ kfree(fault_node);
+ up(&hwdev->fault_list_sem);
+}
+
+static void heartbeat_lost_event_handler(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_fault_info_node *fault_node;
+ struct hifc_event_info event_info = {0};
+
+ atomic_inc(&hwdev->hw_stats.heart_lost_stats);
+ sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n",
+ hifc_global_func_id(hwdev));
+
+ if (hwdev->event_callback) {
+ event_info.type = HIFC_EVENT_HEART_LOST;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ }
+
+ /* refresh history fault info */
+ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL);
+ if (!fault_node) {
+ sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n");
+ return;
+ }
+
+ fault_node->info.fault_src = HIFC_FAULT_SRC_HOST_HEARTBEAT_LOST;
+ fault_node->info.fault_lev = FAULT_LEVEL_FATAL;
+ hifc_refresh_history_fault(hwdev, &fault_node->info);
+
+ down(&hwdev->fault_list_sem);
+ kfree(fault_node);
+ up(&hwdev->fault_list_sem);
+}
+
+static void sw_watchdog_timeout_info_show(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_mgmt_watchdog_info *watchdog_info;
+ u32 *dump_addr, *reg, stack_len, i, j;
+
+ if (in_size != sizeof(*watchdog_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld.\n",
+ in_size, sizeof(*watchdog_info));
+ return;
+ }
+
+ watchdog_info = buf_in;
+
+ sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x\n",
+ watchdog_info->curr_time_h, watchdog_info->curr_time_l,
+ watchdog_info->task_id, watchdog_info->sp);
+ sdk_err(hwdev->dev_hdl, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x\n",
+ watchdog_info->curr_used, watchdog_info->peak_used,
+ watchdog_info->is_overflow, watchdog_info->stack_top,
+ watchdog_info->stack_bottom);
+
+ sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr:0x%08x\n",
+ watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr);
+
+ sdk_err(hwdev->dev_hdl, "Mgmt register info\n");
+
+ for (i = 0; i < 3; i++) {
+ reg = watchdog_info->reg + (u64)(u32)(4 * i);
+ sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(reg), *(reg + 1), *(reg + 2), *(reg + 3));
+ }
+
+ sdk_err(hwdev->dev_hdl, "0x%08x\n", watchdog_info->reg[12]);
+
+ if (watchdog_info->stack_actlen <= 1024) {
+ stack_len = watchdog_info->stack_actlen;
+ } else {
+ sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n",
+ watchdog_info->stack_actlen);
+ stack_len = 1024;
+ }
+
+ sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16Bytes per line(start from sp)\n");
+ for (i = 0; i < (stack_len / 16); i++) {
+ dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16)));
+ sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *dump_addr, *(dump_addr + 1), *(dump_addr + 2),
+ *(dump_addr + 3));
+ }
+
+ for (j = 0; j < ((stack_len % 16) / 4); j++) {
+ dump_addr = (u32 *)(watchdog_info->data +
+ ((u64)(u32)(i * 16 + j * 4)));
+ sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr);
+ }
+
+ *out_size = sizeof(*watchdog_info);
+ watchdog_info = buf_out;
+ watchdog_info->status = 0;
+}
+
+static void mgmt_watchdog_timeout_event_handler(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_fault_info_node *fault_node;
+
+ sw_watchdog_timeout_info_show(hwdev, buf_in, in_size,
+ buf_out, out_size);
+
+ /* refresh history fault info */
+ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL);
+ if (!fault_node) {
+ sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n");
+ return;
+ }
+
+ fault_node->info.fault_src = HIFC_FAULT_SRC_MGMT_WATCHDOG;
+ fault_node->info.fault_lev = FAULT_LEVEL_FATAL;
+ hifc_refresh_history_fault(hwdev, &fault_node->info);
+
+ down(&hwdev->fault_list_sem);
+ kfree(fault_node);
+ up(&hwdev->fault_list_sem);
+}
+
+static void mgmt_reset_event_handler(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ sdk_info(hwdev->dev_hdl, "Mgmt is reset\n");
+
+ /* mgmt reset only occurred when hot update or Mgmt deadloop,
+ * if Mgmt deadloop, mgmt will report an event with
+ * mod=0, cmd=0x56, and will reported fault to os,
+ * so mgmt reset event don't need to report fault
+ */
+}
+
+static void hifc_fmw_act_ntc_handler(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_event_info event_info = {0};
+ struct hifc_fmw_act_ntc *notice_info;
+
+ if (in_size != sizeof(*notice_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld.\n",
+ in_size, sizeof(*notice_info));
+ return;
+ }
+
+ /* mgmt is active now, restart heartbeat enhanced detection */
+ __set_heartbeat_ehd_detect_delay(hwdev, 0);
+
+ if (!hwdev->event_callback)
+ return;
+
+ event_info.type = HIFC_EVENT_FMW_ACT_NTC;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+
+ *out_size = sizeof(*notice_info);
+ notice_info = buf_out;
+ notice_info->status = 0;
+}
+
+static void hifc_pcie_dfx_event_handler(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_pcie_dfx_ntc *notice_info = buf_in;
+ struct hifc_pcie_dfx_info *dfx_info;
+ u16 size = 0;
+ u16 cnt = 0;
+ u32 num = 0;
+ u32 i, j;
+ int err;
+ u32 *reg;
+
+ if (in_size != sizeof(*notice_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld.\n",
+ in_size, sizeof(*notice_info));
+ return;
+ }
+
+ dfx_info = kzalloc(sizeof(*dfx_info), GFP_KERNEL);
+ if (!dfx_info) {
+ sdk_err(hwdev->dev_hdl, "Malloc dfx_info memory failed\n");
+ return;
+ }
+
+ ((struct hifc_pcie_dfx_ntc *)buf_out)->status = 0;
+ *out_size = sizeof(*notice_info);
+ num = (u32)(notice_info->len / 1024);
+ sdk_info(hwdev->dev_hdl, "INFO LEN: %d\n", notice_info->len);
+ sdk_info(hwdev->dev_hdl, "PCIE DFX:\n");
+ dfx_info->host_id = 0;
+ for (i = 0; i < num; i++) {
+ dfx_info->offset = i * MAX_PCIE_DFX_BUF_SIZE;
+ if (i == (num - 1))
+ dfx_info->last = 1;
+ size = sizeof(*dfx_info);
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_PCIE_DFX_GET,
+ dfx_info, sizeof(*dfx_info),
+ dfx_info, &size, 0);
+ if (err || dfx_info->status || !size) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, dfx_info->status, size);
+ kfree(dfx_info);
+ return;
+ }
+
+ reg = (u32 *)dfx_info->data;
+ for (j = 0; j < 256; j = j + 8) {
+ /*lint -save -e661 -e662*/
+ sdk_info(hwdev->dev_hdl, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ cnt, reg[j], reg[(u32)(j + 1)],
+ reg[(u32)(j + 2)], reg[(u32)(j + 3)],
+ reg[(u32)(j + 4)], reg[(u32)(j + 5)],
+ reg[(u32)(j + 6)], reg[(u32)(j + 7)]);
+ /*lint -restore*/
+ cnt = cnt + 32;
+ }
+ memset(dfx_info->data, 0, MAX_PCIE_DFX_BUF_SIZE);
+ }
+ kfree(dfx_info);
+}
+
+struct hifc_mctp_get_host_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 huawei_cmd;
+ u8 sub_cmd;
+ u8 rsvd[2];
+
+ u32 actual_len;
+
+ u8 data[1024];
+};
+
+static void hifc_mctp_get_host_info_event_handler(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_event_info event_info = {0};
+ struct hifc_mctp_get_host_info *mctp_out, *mctp_in;
+ struct hifc_mctp_host_info *host_info;
+
+ if (in_size != sizeof(*mctp_in)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt mctp info, length: %d, should be %ld\n",
+ in_size, sizeof(*mctp_in));
+ return;
+ }
+
+ *out_size = sizeof(*mctp_out);
+ mctp_out = buf_out;
+ mctp_out->status = 0;
+
+ if (!hwdev->event_callback) {
+ mctp_out->status = HIFC_MGMT_STATUS_ERR_INIT;
+ return;
+ }
+
+ mctp_in = buf_in;
+ host_info = &event_info.mctp_info;
+ host_info->major_cmd = mctp_in->huawei_cmd;
+ host_info->sub_cmd = mctp_in->sub_cmd;
+ host_info->data = mctp_out->data;
+
+ event_info.type = HIFC_EVENT_MCTP_GET_HOST_INFO;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+
+ mctp_out->actual_len = host_info->data_len;
+}
+
+char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {"RS-FEC", "BASE-FEC", "NO-FEC"};
+
+char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = {
+ "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC",
+ "Back plane", "BaseT"
+};
+
+static void __get_port_type(struct hifc_hwdev *hwdev,
+ struct hifc_link_info *info, char **port_type)
+{
+ if (info->cable_absent) {
+ sdk_info(hwdev->dev_hdl, "Cable unpresent\n");
+ return;
+ }
+
+ if (info->port_type < LINK_PORT_MAX_TYPE)
+ *port_type = __hw_to_char_port_type[info->port_type];
+ else
+ sdk_info(hwdev->dev_hdl, "Unknown port type: %u\n",
+ info->port_type);
+ if (info->port_type == LINK_PORT_FIBRE) {
+ if (info->port_sub_type == FIBRE_SUBTYPE_SR)
+ *port_type = "Fibre-SR";
+ else if (info->port_sub_type == FIBRE_SUBTYPE_LR)
+ *port_type = "Fibre-LR";
+ }
+}
+
+static void __print_cable_info(struct hifc_hwdev *hwdev,
+ struct hifc_link_info *info)
+{
+ char tmp_str[512] = {0};
+ char tmp_vendor[17] = {0};
+ char *port_type = "Unknown port type";
+ int i;
+
+ __get_port_type(hwdev, info, &port_type);
+
+ for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) {
+ if (info->vendor_name[i] == ' ')
+ info->vendor_name[i] = '\0';
+ else
+ break;
+ }
+
+ memcpy(tmp_vendor, info->vendor_name,
+ sizeof(info->vendor_name));
+ snprintf(tmp_str, sizeof(tmp_str) - 1,
+ "Vendor: %s, %s, length: %um, max_speed: %uGbps",
+ tmp_vendor, port_type, info->cable_length,
+ info->cable_max_speed);
+ if (info->port_type == LINK_PORT_FIBRE ||
+ info->port_type == LINK_PORT_AOC) {
+ snprintf(tmp_str, sizeof(tmp_str) - 1,
+ "%s, %s, Temperature: %u", tmp_str,
+ info->sfp_type ? "SFP" : "QSFP", info->cable_temp);
+ if (info->sfp_type) {
+ snprintf(tmp_str, sizeof(tmp_str) - 1,
+ "%s, rx power: %uuW, tx power: %uuW",
+ tmp_str, info->power[0], info->power[1]);
+ } else {
+ snprintf(tmp_str, sizeof(tmp_str) - 1,
+ "%s, rx power: %uuw %uuW %uuW %uuW",
+ tmp_str, info->power[0], info->power[1],
+ info->power[2], info->power[3]);
+ }
+ }
+
+ sdk_info(hwdev->dev_hdl, "Cable information: %s\n",
+ tmp_str);
+}
+
+static void __hi30_lane_info(struct hifc_hwdev *hwdev,
+ struct hilink_lane *lane)
+{
+ struct hi30_ffe_data *ffe_data;
+ struct hi30_ctle_data *ctle_data;
+
+ ffe_data = (struct hi30_ffe_data *)lane->hi30_ffe;
+ ctle_data = (struct hi30_ctle_data *)lane->hi30_ctle;
+
+ sdk_info(hwdev->dev_hdl, "TX_FFE: PRE1=%s%d; PRE2=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d\n",
+ (ffe_data->PRE1 & 0x10) ? "-" : "",
+ (int)(ffe_data->PRE1 & 0xf),
+ (ffe_data->PRE2 & 0x10) ? "-" : "",
+ (int)(ffe_data->PRE2 & 0xf),
+ (int)ffe_data->MAIN,
+ (ffe_data->POST1 & 0x10) ? "-" : "",
+ (int)(ffe_data->POST1 & 0xf),
+ (ffe_data->POST2 & 0x10) ? "-" : "",
+ (int)(ffe_data->POST2 & 0xf));
+ sdk_info(hwdev->dev_hdl, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u\n",
+ ctle_data->ctlebst[0], ctle_data->ctlebst[1],
+ ctle_data->ctlebst[2], ctle_data->ctlecmband[0],
+ ctle_data->ctlecmband[1], ctle_data->ctlecmband[2],
+ ctle_data->ctlermband[0], ctle_data->ctlermband[1],
+ ctle_data->ctlermband[2], ctle_data->ctleza[0],
+ ctle_data->ctleza[1], ctle_data->ctleza[2]);
+}
+
+static void __print_hi30_status(struct hifc_hwdev *hwdev,
+ struct hifc_link_info *info)
+{
+ struct hilink_lane *lane;
+ int lane_used_num = 0, i;
+
+ for (i = 0; i < HILINK_MAX_LANE; i++) {
+ lane = (struct hilink_lane *)(info->lane2 + i * sizeof(*lane));
+ if (!lane->lane_used)
+ continue;
+
+ __hi30_lane_info(hwdev, lane);
+ lane_used_num++;
+ }
+
+ /* in new firmware, all lane info setted in lane2 */
+ if (lane_used_num)
+ return;
+
+ /* compatible old firmware */
+ __hi30_lane_info(hwdev, (struct hilink_lane *)info->lane1);
+}
+
+static void __print_link_info(struct hifc_hwdev *hwdev,
+ struct hifc_link_info *info,
+ enum hilink_info_print_event type)
+{
+ char *fec = "None";
+
+ if (info->fec < HILINK_FEC_MAX_TYPE)
+ fec = __hw_to_char_fec[info->fec];
+ else
+ sdk_info(hwdev->dev_hdl, "Unknown fec type: %u\n",
+ info->fec);
+
+ if (type == HILINK_EVENT_LINK_UP || !info->an_state) {
+ sdk_info(hwdev->dev_hdl, "Link information: speed %dGbps, %s, autoneg %s\n",
+ info->speed, fec, info->an_state ? "on" : "off");
+ } else {
+ sdk_info(hwdev->dev_hdl, "Link information: antoneg: %s\n",
+ info->an_state ? "on" : "off");
+ }
+}
+
+static char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = {
+ "", "link up", "link down", "cable plugged"
+};
+
+void print_hilink_info(struct hifc_hwdev *hwdev,
+ enum hilink_info_print_event type,
+ struct hifc_link_info *info)
+{
+ __print_cable_info(hwdev, info);
+
+ __print_link_info(hwdev, info, type);
+
+ __print_hi30_status(hwdev, info);
+
+ if (type == HILINK_EVENT_LINK_UP)
+ return;
+
+ if (type == HILINK_EVENT_CABLE_PLUGGED) {
+ sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u\n",
+ info->alos, info->rx_los);
+ return;
+ }
+
+ sdk_info(hwdev->dev_hdl, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug info reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n",
+ info->pma_status == 1 ? "off" : "on",
+ info->mac_tx_en ? "enable" : "disable",
+ info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg,
+ info->pma_signal_ok_reg, info->rf_lf_status_reg);
+ sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x, PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n",
+ info->alos, info->rx_los, info->pcs_err_blk_cnt_reg,
+ info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt);
+}
+
+static void hifc_print_hilink_info(struct hifc_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hifc_hilink_link_info *hilink_info = buf_in;
+ struct hifc_link_info *info;
+ enum hilink_info_print_event type;
+
+ if (in_size != sizeof(*hilink_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid hilink info message size %d, should be %ld\n",
+ in_size, sizeof(*hilink_info));
+ return;
+ }
+
+ ((struct hifc_hilink_link_info *)buf_out)->status = 0;
+ *out_size = sizeof(*hilink_info);
+
+ info = &hilink_info->info;
+ type = hilink_info->info_type;
+
+ if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) {
+ sdk_info(hwdev->dev_hdl, "Invalid hilink info report, type: %d\n",
+ type);
+ return;
+ }
+
+ sdk_info(hwdev->dev_hdl, "Hilink info report after %s\n",
+ hilink_info_report_type[type]);
+
+ print_hilink_info(hwdev, type, info);
+}
+
+static void __port_sfp_info_event(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_cmd_get_sfp_qsfp_info *sfp_info = buf_in;
+ struct hifc_port_routine_cmd *rt_cmd;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ if (in_size != sizeof(*sfp_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid sfp info cmd, length: %d, should be %ld\n",
+ in_size, sizeof(*sfp_info));
+ return;
+ }
+
+ if (sfp_info->port_id >= HIFC_MAX_PORT_ID) {
+ sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n",
+ sfp_info->port_id, HIFC_MAX_PORT_ID - 1);
+ return;
+ }
+
+ if (!chip_node->rt_cmd)
+ return;
+
+ rt_cmd = &chip_node->rt_cmd[sfp_info->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ memcpy(&rt_cmd->sfp_info, sfp_info, sizeof(rt_cmd->sfp_info));
+ rt_cmd->up_send_sfp_info = true;
+ mutex_unlock(&chip_node->sfp_mutex);
+}
+
+static void __port_sfp_abs_event(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_cmd_get_light_module_abs *sfp_abs = buf_in;
+ struct hifc_port_routine_cmd *rt_cmd;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ if (in_size != sizeof(*sfp_abs)) {
+ sdk_err(hwdev->dev_hdl, "Invalid sfp absent cmd, length: %d, should be %ld\n",
+ in_size, sizeof(*sfp_abs));
+ return;
+ }
+
+ if (sfp_abs->port_id >= HIFC_MAX_PORT_ID) {
+ sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n",
+ sfp_abs->port_id, HIFC_MAX_PORT_ID - 1);
+ return;
+ }
+
+ if (!chip_node->rt_cmd)
+ return;
+
+ rt_cmd = &chip_node->rt_cmd[sfp_abs->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ memcpy(&rt_cmd->abs, sfp_abs, sizeof(rt_cmd->abs));
+ rt_cmd->up_send_sfp_abs = true;
+ mutex_unlock(&chip_node->sfp_mutex);
+}
+
+static void mgmt_heartbeat_enhanced_event(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_heartbeat_event *hb_event = buf_in;
+ struct hifc_heartbeat_event *hb_event_out = buf_out;
+ struct hifc_hwdev *dev = hwdev;
+
+ if (in_size != sizeof(*hb_event)) {
+ sdk_err(dev->dev_hdl, "Invalid data size from mgmt for heartbeat event: %d\n",
+ in_size);
+ return;
+ }
+
+ if (dev->heartbeat_ehd.last_heartbeat != hb_event->heart) {
+ dev->heartbeat_ehd.last_update_jiffies = jiffies;
+ dev->heartbeat_ehd.last_heartbeat = hb_event->heart;
+ }
+
+ hb_event_out->drv_heart = HEARTBEAT_DRV_MAGIC_ACK;
+
+ hb_event_out->status = 0;
+ *out_size = sizeof(*hb_event_out);
+}
+
+struct dev_event_handler {
+ u8 mod;
+ u8 cmd;
+ void (*handler)(struct hifc_hwdev *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+};
+
+struct dev_event_handler dev_cmd_handler[] = {
+ {
+ .mod = HIFC_MOD_L2NIC,
+ .cmd = HIFC_PORT_CMD_GET_SFP_INFO,
+ .handler = __port_sfp_info_event,
+ },
+
+ {
+ .mod = HIFC_MOD_L2NIC,
+ .cmd = HIFC_PORT_CMD_GET_SFP_ABS,
+ .handler = __port_sfp_abs_event,
+ },
+
+ {
+ .mod = HIFC_MOD_HILINK,
+ .cmd = HIFC_HILINK_CMD_GET_LINK_INFO,
+ .handler = hifc_print_hilink_info,
+ },
+
+ {
+ .mod = HIFC_MOD_COMM,
+ .cmd = HIFC_MGMT_CMD_FAULT_REPORT,
+ .handler = fault_event_handler,
+ },
+
+ {
+ .mod = HIFC_MOD_L2NIC,
+ .cmd = HIFC_MGMT_CMD_HEART_LOST_REPORT,
+ .handler = heartbeat_lost_event_handler,
+ },
+
+ {
+ .mod = HIFC_MOD_COMM,
+ .cmd = HIFC_MGMT_CMD_WATCHDOG_INFO,
+ .handler = mgmt_watchdog_timeout_event_handler,
+ },
+
+ {
+ .mod = HIFC_MOD_L2NIC,
+ .cmd = HIFC_PORT_CMD_MGMT_RESET,
+ .handler = mgmt_reset_event_handler,
+ },
+
+ {
+ .mod = HIFC_MOD_COMM,
+ .cmd = HIFC_MGMT_CMD_FMW_ACT_NTC,
+ .handler = hifc_fmw_act_ntc_handler,
+ },
+
+ {
+ .mod = HIFC_MOD_COMM,
+ .cmd = HIFC_MGMT_CMD_PCIE_DFX_NTC,
+ .handler = hifc_pcie_dfx_event_handler,
+ },
+
+ {
+ .mod = HIFC_MOD_COMM,
+ .cmd = HIFC_MGMT_CMD_GET_HOST_INFO,
+ .handler = hifc_mctp_get_host_info_event_handler,
+ },
+
+ {
+ .mod = HIFC_MOD_COMM,
+ .cmd = HIFC_MGMT_CMD_HEARTBEAT_EVENT,
+ .handler = mgmt_heartbeat_enhanced_event,
+ },
+};
+
+/* public process for this event:
+ * pf link change event
+ * pf heart lost event ,TBD
+ * pf fault report event
+ * vf link change event
+ * vf heart lost event, TBD
+ * vf fault report event, TBD
+ */
+static void _event_handler(struct hifc_hwdev *hwdev, enum hifc_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ u32 i, size = sizeof(dev_cmd_handler) / sizeof(dev_cmd_handler[0]);
+
+ if (!hwdev)
+ return;
+
+ *out_size = 0;
+
+ for (i = 0; i < size; i++) {
+ if (cmd == dev_cmd_handler[i].cmd &&
+ mod == dev_cmd_handler[i].mod) {
+ dev_cmd_handler[i].handler(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+ }
+ }
+
+ /* can't find this event cmd */
+ if (i == size)
+ sdk_warn(hwdev->dev_hdl, "Unsupported mod(%d) event cmd(%d) to process\n",
+ mod, cmd);
+}
+
+/* pf link change event */
+static void pf_nic_event_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HIFC_MOD_L2NIC, cmd, buf_in, in_size,
+ buf_out, out_size);
+}
+
+static void pf_hilink_event_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HIFC_MOD_HILINK, cmd, buf_in, in_size,
+ buf_out, out_size);
+}
+
+/* pf fault report event */
+void pf_fault_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_FAULT_REPORT,
+ buf_in, in_size, buf_out, out_size);
+}
+
+void mgmt_watchdog_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_WATCHDOG_INFO,
+ buf_in, in_size, buf_out, out_size);
+}
+
+void mgmt_fmw_act_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_FMW_ACT_NTC,
+ buf_in, in_size, buf_out, out_size);
+}
+
+void mgmt_pcie_dfx_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_PCIE_DFX_NTC,
+ buf_in, in_size, buf_out, out_size);
+}
+
+void mgmt_get_mctp_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_GET_HOST_INFO,
+ buf_in, in_size, buf_out, out_size);
+}
+
+void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_HEARTBEAT_EVENT,
+ buf_in, in_size, buf_out, out_size);
+}
+
+static void pf_event_register(struct hifc_hwdev *hwdev)
+{
+ if (hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) {
+ hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_L2NIC,
+ hwdev, pf_nic_event_handler);
+ hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_HILINK,
+ hwdev,
+ pf_hilink_event_handler);
+ hifc_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HIFC_MGMT_CMD_FAULT_REPORT,
+ pf_fault_event_handler);
+
+ hifc_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HIFC_MGMT_CMD_WATCHDOG_INFO,
+ mgmt_watchdog_event_handler);
+
+ hifc_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HIFC_MGMT_CMD_FMW_ACT_NTC,
+ mgmt_fmw_act_event_handler);
+ hifc_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HIFC_MGMT_CMD_PCIE_DFX_NTC,
+ mgmt_pcie_dfx_event_handler);
+ hifc_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HIFC_MGMT_CMD_GET_HOST_INFO,
+ mgmt_get_mctp_event_handler);
+ }
+}
+
+void hifc_event_register(void *dev, void *pri_handle,
+ hifc_event_handler callback)
+{
+ struct hifc_hwdev *hwdev = dev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for register event\n");
+ return;
+ }
+
+ hwdev->event_callback = callback;
+ hwdev->event_pri_handle = pri_handle;
+
+ pf_event_register(hwdev);
+}
+
+void hifc_event_unregister(void *dev)
+{
+ struct hifc_hwdev *hwdev = dev;
+
+ hwdev->event_callback = NULL;
+ hwdev->event_pri_handle = NULL;
+
+ hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_L2NIC);
+ hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_HILINK);
+ hifc_comm_recv_up_self_cmd_unreg(hwdev,
+ HIFC_MGMT_CMD_FAULT_REPORT);
+ hifc_comm_recv_up_self_cmd_unreg(hwdev,
+ HIFC_MGMT_CMD_WATCHDOG_INFO);
+ hifc_comm_recv_up_self_cmd_unreg(hwdev,
+ HIFC_MGMT_CMD_FMW_ACT_NTC);
+ hifc_comm_recv_up_self_cmd_unreg(hwdev,
+ HIFC_MGMT_CMD_PCIE_DFX_NTC);
+ hifc_comm_recv_up_self_cmd_unreg(hwdev,
+ HIFC_MGMT_CMD_GET_HOST_INFO);
+}
+
+/* 0 - heartbeat lost, 1 - normal */
+static u8 hifc_get_heartbeat_status(struct hifc_hwdev *hwdev)
+{
+ struct hifc_hwif *hwif = hwdev->hwif;
+ u32 attr1;
+
+ /* suprise remove should be set 1 */
+ if (!hifc_get_chip_present_flag(hwdev))
+ return 1;
+
+ attr1 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR1_ADDR);
+ if (attr1 == HIFC_PCIE_LINK_DOWN) {
+ sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n");
+ hifc_set_chip_absent(hwdev);
+ hifc_force_complete_all(hwdev);
+ /* should notify chiperr to pangea
+ * when detecting pcie link down
+ */
+ return 1;
+ }
+
+ return HIFC_AF1_GET(attr1, MGMT_INIT_STATUS);
+}
+
+static void hifc_heartbeat_event_handler(struct work_struct *work)
+{
+ struct hifc_hwdev *hwdev =
+ container_of(work, struct hifc_hwdev, timer_work);
+ u16 out = 0;
+
+ _event_handler(hwdev, HIFC_MOD_L2NIC, HIFC_MGMT_CMD_HEART_LOST_REPORT,
+ NULL, 0, &out, &out);
+}
+
+static bool __detect_heartbeat_ehd_lost(struct hifc_hwdev *hwdev)
+{
+ struct hifc_heartbeat_enhanced *hb_ehd = &hwdev->heartbeat_ehd;
+ u64 update_time;
+ bool hb_ehd_lost = false;
+
+ if (!hb_ehd->en)
+ return false;
+
+ if (time_after(jiffies, hb_ehd->start_detect_jiffies)) {
+ update_time = jiffies_to_msecs(jiffies -
+ hb_ehd->last_update_jiffies);
+ if (update_time > HIFC_HEARBEAT_ENHANCED_LOST) {
+ sdk_warn(hwdev->dev_hdl, "Heartbeat enhanced lost for %d millisecond\n",
+ (u32)update_time);
+ hb_ehd_lost = true;
+ }
+ } else {
+ /* mgmt may not report heartbeart enhanced event and won't
+ * update last_update_jiffies
+ */
+ hb_ehd->last_update_jiffies = jiffies;
+ }
+
+ return hb_ehd_lost;
+}
+
+static void hifc_heartbeat_timer_handler(struct timer_list *t)
+{
+ struct hifc_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer);
+
+ if (__detect_heartbeat_ehd_lost(hwdev) ||
+ !hifc_get_heartbeat_status(hwdev)) {
+ hwdev->heartbeat_lost = 1;
+ queue_work(hwdev->workq, &hwdev->timer_work);
+ } else {
+ mod_timer(&hwdev->heartbeat_timer,
+ jiffies + msecs_to_jiffies(HIFC_HEARTBEAT_PERIOD));
+ }
+}
+
+void add_to_timer(struct timer_list *timer, long period)
+{
+ if (!timer)
+ return;
+
+ add_timer(timer);
+}
+
+void delete_timer(struct timer_list *timer)
+{
+ if (!timer)
+ return;
+
+ del_timer_sync(timer);
+}
+
+void hifc_init_heartbeat(struct hifc_hwdev *hwdev)
+{
+ timer_setup(&hwdev->heartbeat_timer, hifc_heartbeat_timer_handler, 0);
+ hwdev->heartbeat_timer.expires =
+ jiffies + msecs_to_jiffies(HIFC_HEARTBEAT_START_EXPIRE);
+
+ add_to_timer(&hwdev->heartbeat_timer, HIFC_HEARTBEAT_PERIOD);
+
+ INIT_WORK(&hwdev->timer_work, hifc_heartbeat_event_handler);
+}
+
+void hifc_destroy_heartbeat(struct hifc_hwdev *hwdev)
+{
+ delete_timer(&hwdev->heartbeat_timer);
+}
+
+u8 hifc_nic_sw_aeqe_handler(void *handle, u8 event, u64 data)
+{
+ struct hifc_hwdev *hwdev = (struct hifc_hwdev *)handle;
+ u8 event_level = FAULT_LEVEL_MAX;
+
+ switch (event) {
+ case HIFC_INTERNAL_TSO_FATAL_ERROR:
+ case HIFC_INTERNAL_LRO_FATAL_ERROR:
+ case HIFC_INTERNAL_TX_FATAL_ERROR:
+ case HIFC_INTERNAL_RX_FATAL_ERROR:
+ case HIFC_INTERNAL_OTHER_FATAL_ERROR:
+ atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[event]);
+ sdk_err(hwdev->dev_hdl, "SW aeqe event type: 0x%x, data: 0x%llx\n",
+ event, data);
+ event_level = FAULT_LEVEL_FATAL;
+ break;
+ default:
+ sdk_err(hwdev->dev_hdl, "Unsupported sw event %d to process.\n",
+ event);
+ }
+
+ return event_level;
+}
+
+void hifc_set_pcie_order_cfg(void *handle)
+{
+ struct hifc_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev)
+ return;
+
+ val = hifc_hwif_read_reg(hwdev->hwif,
+ HIFC_GLB_DMA_SO_RO_REPLACE_ADDR);
+
+ if (HIFC_GLB_DMA_SO_RO_GET(val, SO_RO_CFG)) {
+ val = HIFC_GLB_DMA_SO_R0_CLEAR(val, SO_RO_CFG);
+ val |= HIFC_GLB_DMA_SO_R0_SET(HIFC_DISABLE_ORDER, SO_RO_CFG);
+ hifc_hwif_write_reg(hwdev->hwif,
+ HIFC_GLB_DMA_SO_RO_REPLACE_ADDR, val);
+ }
+}
+
+int hifc_get_board_info(void *hwdev, struct hifc_board_info *info)
+{
+ struct hifc_comm_board_info board_info = {0};
+ u16 out_size = sizeof(board_info);
+ int err;
+
+ if (!hwdev || !info)
+ return -EINVAL;
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_GET_BOARD_INFO,
+ &board_info, sizeof(board_info),
+ &board_info, &out_size, 0);
+ if (err || board_info.status || !out_size) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, board_info.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(info, &board_info.info, sizeof(*info));
+
+ return 0;
+}
+
+int hifc_get_phy_init_status(void *hwdev,
+ enum phy_init_status_type *init_status)
+{
+ struct hifc_phy_init_status phy_info = {0};
+ u16 out_size = sizeof(phy_info);
+ int err;
+
+ if (!hwdev || !init_status)
+ return -EINVAL;
+
+ err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_GET_PHY_INIT_STATUS,
+ &phy_info, sizeof(phy_info),
+ &phy_info, &out_size, 0);
+ if ((phy_info.status != HIFC_MGMT_CMD_UNSUPPORTED &&
+ phy_info.status) || err || !out_size) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to get phy info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, phy_info.status, out_size);
+ return -EFAULT;
+ }
+
+ *init_status = phy_info.init_status;
+
+ return phy_info.status;
+}
+
+int hifc_phy_init_status_judge(void *hwdev)
+{
+ enum phy_init_status_type init_status;
+ int ret;
+ unsigned long end;
+
+ /* It's not a phy, so don't judge phy status */
+ if (!HIFC_BOARD_IS_PHY((struct hifc_hwdev *)hwdev))
+ return 0;
+
+ end = jiffies + msecs_to_jiffies(PHY_DOING_INIT_TIMEOUT);
+ do {
+ ret = hifc_get_phy_init_status(hwdev, &init_status);
+ if (ret == HIFC_MGMT_CMD_UNSUPPORTED)
+ return 0;
+ else if (ret)
+ return -EFAULT;
+
+ switch (init_status) {
+ case PHY_INIT_SUCCESS:
+ sdk_info(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Phy init is success\n");
+ return 0;
+ case PHY_NONSUPPORT:
+ sdk_info(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Phy init is nonsupport\n");
+ return 0;
+ case PHY_INIT_FAIL:
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Phy init is failed\n");
+ return -EIO;
+ case PHY_INIT_DOING:
+ msleep(250);
+ break;
+ default:
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Phy init is invalid, init_status: %d\n",
+ init_status);
+ return -EINVAL;
+ }
+ } while (time_before(jiffies, end));
+
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Phy init is timeout\n");
+
+ return -ETIMEDOUT;
+}
+
+int hifc_get_mgmt_channel_status(void *handle)
+{
+ struct hifc_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev)
+ return true;
+
+ if (hifc_func_type(hwdev) == TYPE_VF ||
+ !(hwdev->feature_cap & HIFC_FUNC_SUPP_DFX_REG))
+ return false;
+
+ val = hifc_hwif_read_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR);
+
+ return HIFC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
+}
+
+#define HIFC_RED_REG_TIME_OUT 3000
+
+int hifc_read_reg(void *hwdev, u32 reg_addr, u32 *val)
+{
+ struct hifc_reg_info reg_info = {0};
+ u16 out_size = sizeof(reg_info);
+ int err;
+
+ if (!hwdev || !val)
+ return -EINVAL;
+
+ reg_info.reg_addr = reg_addr;
+ reg_info.val_length = sizeof(u32);
+
+ err = hifc_pf_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
+ HIFC_MGMT_CMD_REG_READ,
+ ®_info, sizeof(reg_info),
+ ®_info, &out_size,
+ HIFC_RED_REG_TIME_OUT);
+ if (reg_info.status || err || !out_size) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Failed to read reg, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, reg_info.status, out_size);
+ return -EFAULT;
+ }
+
+ *val = reg_info.data[0];
+
+ return 0;
+}
+
+void hifc_swe_fault_handler(struct hifc_hwdev *hwdev, u8 level,
+ u8 event, u64 val)
+{
+ struct hifc_fault_info_node *fault_node;
+
+ if (level < FAULT_LEVEL_MAX) {
+ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL);
+ if (!fault_node) {
+ sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n");
+ return;
+ }
+
+ fault_node->info.fault_src = HIFC_FAULT_SRC_SW_MGMT_UCODE;
+ fault_node->info.fault_lev = level;
+ fault_node->info.fault_data.sw_mgmt.event_id = event;
+ fault_node->info.fault_data.sw_mgmt.event_data = val;
+ hifc_refresh_history_fault(hwdev, &fault_node->info);
+
+ down(&hwdev->fault_list_sem);
+ kfree(fault_node);
+ up(&hwdev->fault_list_sem);
+ }
+}
+
+void hifc_set_func_deinit_flag(void *hwdev)
+{
+ struct hifc_hwdev *dev = hwdev;
+
+ set_bit(HIFC_HWDEV_FUNC_DEINIT, &dev->func_state);
+}
+
+int hifc_get_card_present_state(void *hwdev, bool *card_present_state)
+{
+ u32 addr, attr1;
+
+ if (!hwdev || !card_present_state)
+ return -EINVAL;
+
+ addr = HIFC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hifc_hwif_read_reg(((struct hifc_hwdev *)hwdev)->hwif, addr);
+ if (attr1 == HIFC_PCIE_LINK_DOWN) {
+ sdk_warn(((struct hifc_hwdev *)hwdev)->dev_hdl, "Card is not present\n");
+ *card_present_state = (bool)0;
+ } else {
+ *card_present_state = (bool)1;
+ }
+
+ return 0;
+}
+
+void hifc_disable_mgmt_msg_report(void *hwdev)
+{
+ struct hifc_hwdev *hw_dev = (struct hifc_hwdev *)hwdev;
+
+ hifc_set_pf_status(hw_dev->hwif, HIFC_PF_STATUS_INIT);
+}
+
diff --git a/drivers/scsi/huawei/hifc/hifc_hwdev.h b/drivers/scsi/huawei/hifc/hifc_hwdev.h
new file mode 100644
index 000000000000..6ebf59b31fb8
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_hwdev.h
@@ -0,0 +1,456 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef HIFC_HWDEV_H_
+#define HIFC_HWDEV_H_
+
+/* to use 0-level CLA, page size must be: 64B(wqebb) * 4096(max_q_depth) */
+#define HIFC_DEFAULT_WQ_PAGE_SIZE 0x40000
+#define HIFC_HW_WQ_PAGE_SIZE 0x1000
+
+#define HIFC_MSG_TO_MGMT_MAX_LEN 2016
+
+#define HIFC_MGMT_STATUS_ERR_OK 0 /* Ok */
+#define HIFC_MGMT_STATUS_ERR_PARAM 1 /* Invalid parameter */
+#define HIFC_MGMT_STATUS_ERR_FAILED 2 /* Operation failed */
+#define HIFC_MGMT_STATUS_ERR_PORT 3 /* Invalid port */
+#define HIFC_MGMT_STATUS_ERR_TIMEOUT 4 /* Operation time out */
+#define HIFC_MGMT_STATUS_ERR_NOMATCH 5 /* Version not match */
+#define HIFC_MGMT_STATUS_ERR_EXIST 6 /* Entry exists */
+#define HIFC_MGMT_STATUS_ERR_NOMEM 7 /* Out of memory */
+#define HIFC_MGMT_STATUS_ERR_INIT 8 /* Feature not initialized */
+#define HIFC_MGMT_STATUS_ERR_FAULT 9 /* Invalid address */
+#define HIFC_MGMT_STATUS_ERR_PERM 10 /* Operation not permitted */
+#define HIFC_MGMT_STATUS_ERR_EMPTY 11 /* Table empty */
+#define HIFC_MGMT_STATUS_ERR_FULL 12 /* Table full */
+#define HIFC_MGMT_STATUS_ERR_NOT_FOUND 13 /* Not found */
+#define HIFC_MGMT_STATUS_ERR_BUSY 14 /* Device or resource busy */
+#define HIFC_MGMT_STATUS_ERR_RESOURCE 15 /* No resources for operation */
+#define HIFC_MGMT_STATUS_ERR_CONFIG 16 /* Invalid configuration */
+#define HIFC_MGMT_STATUS_ERR_UNAVAIL 17 /* Feature unavailable */
+#define HIFC_MGMT_STATUS_ERR_CRC 18 /* CRC check failed */
+#define HIFC_MGMT_STATUS_ERR_NXIO 19 /* No such device or address */
+#define HIFC_MGMT_STATUS_ERR_ROLLBACK 20 /* Chip rollback fail */
+#define HIFC_MGMT_STATUS_ERR_LEN 32 /* Length too short or too long */
+#define HIFC_MGMT_STATUS_ERR_UNSUPPORT 0xFF /* Feature not supported*/
+/* Qe buffer relates define */
+
+enum hifc_rx_buf_size {
+ HIFC_RX_BUF_SIZE_32B = 0x20,
+ HIFC_RX_BUF_SIZE_64B = 0x40,
+ HIFC_RX_BUF_SIZE_96B = 0x60,
+ HIFC_RX_BUF_SIZE_128B = 0x80,
+ HIFC_RX_BUF_SIZE_192B = 0xC0,
+ HIFC_RX_BUF_SIZE_256B = 0x100,
+ HIFC_RX_BUF_SIZE_384B = 0x180,
+ HIFC_RX_BUF_SIZE_512B = 0x200,
+ HIFC_RX_BUF_SIZE_768B = 0x300,
+ HIFC_RX_BUF_SIZE_1K = 0x400,
+ HIFC_RX_BUF_SIZE_1_5K = 0x600,
+ HIFC_RX_BUF_SIZE_2K = 0x800,
+ HIFC_RX_BUF_SIZE_3K = 0xC00,
+ HIFC_RX_BUF_SIZE_4K = 0x1000,
+ HIFC_RX_BUF_SIZE_8K = 0x2000,
+ HIFC_RX_BUF_SIZE_16K = 0x4000,
+};
+
+enum hifc_res_state {
+ HIFC_RES_CLEAN = 0,
+ HIFC_RES_ACTIVE = 1,
+};
+
+enum ppf_tmr_status {
+ HIFC_PPF_TMR_FLAG_STOP,
+ HIFC_PPF_TMR_FLAG_START,
+};
+
+struct cfg_mgmt_info;
+struct hifc_hwif;
+struct hifc_wqs;
+struct hifc_aeqs;
+struct hifc_ceqs;
+struct hifc_msg_pf_to_mgmt;
+struct hifc_cmdqs;
+
+struct hifc_root_ctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 rsvd1;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u8 lro_en;
+ u8 rsvd2;
+ u8 ppf_idx;
+ u8 rsvd3;
+ u16 rq_depth;
+ u16 rx_buf_sz;
+ u16 sq_depth;
+};
+
+struct hifc_page_addr {
+ void *virt_addr;
+ u64 phys_addr;
+};
+
+#define HIFC_PCIE_LINK_DOWN 0xFFFFFFFF
+
+#define HIFC_DEV_ACTIVE_FW_TIMEOUT (35 * 1000)
+#define HIFC_DEV_BUSY_ACTIVE_FW 0xFE
+
+#define HIFC_HW_WQ_NAME "hifc_hardware"
+#define HIFC_HEARTBEAT_PERIOD 1000
+#define HIFC_HEARTBEAT_START_EXPIRE 5000
+
+#define HIFC_CHIP_ERROR_TYPE_MAX 1024
+#define HIFC_CHIP_FAULT_SIZE \
+ (HIFC_NODE_ID_MAX * FAULT_LEVEL_MAX * HIFC_CHIP_ERROR_TYPE_MAX)
+
+#define HIFC_CSR_DMA_ATTR_TBL_BASE 0xC80
+#define HIFC_CSR_DMA_ATTR_TBL_STRIDE 0x4
+#define HIFC_CSR_DMA_ATTR_TBL_ADDR(idx) \
+ (HIFC_CSR_DMA_ATTR_TBL_BASE \
+ + (idx) * HIFC_CSR_DMA_ATTR_TBL_STRIDE)
+
+/* MSI-X registers */
+#define HIFC_CSR_MSIX_CNT_BASE 0x2004
+#define HIFC_CSR_MSIX_STRIDE 0x8
+
+#define HIFC_CSR_MSIX_CNT_ADDR(idx) \
+ (HIFC_CSR_MSIX_CNT_BASE + (idx) * HIFC_CSR_MSIX_STRIDE)
+
+enum hifc_node_id {
+ HIFC_NODE_ID_IPSU = 4,
+ HIFC_NODE_ID_MGMT_HOST = 21, /*Host CPU send API to uP */
+ HIFC_NODE_ID_MAX = 22
+};
+
+#define HIFC_HWDEV_INIT_MODES_MASK ((1UL << HIFC_HWDEV_ALL_INITED) - 1)
+
+enum hifc_hwdev_func_state {
+ HIFC_HWDEV_FUNC_INITED = HIFC_HWDEV_ALL_INITED,
+ HIFC_HWDEV_FUNC_DEINIT,
+ HIFC_HWDEV_STATE_BUSY = 31,
+};
+
+struct hifc_cqm_stats {
+ atomic_t cqm_cmd_alloc_cnt;
+ atomic_t cqm_cmd_free_cnt;
+ atomic_t cqm_send_cmd_box_cnt;
+ atomic_t cqm_db_addr_alloc_cnt;
+ atomic_t cqm_db_addr_free_cnt;
+ atomic_t cqm_fc_srq_create_cnt;
+ atomic_t cqm_qpc_mpt_create_cnt;
+ atomic_t cqm_nonrdma_queue_create_cnt;
+ atomic_t cqm_qpc_mpt_delete_cnt;
+ atomic_t cqm_nonrdma_queue_delete_cnt;
+ atomic_t cqm_aeq_callback_cnt[112];
+};
+
+struct hifc_link_event_stats {
+ atomic_t link_down_stats;
+ atomic_t link_up_stats;
+};
+
+struct hifc_fault_event_stats {
+ atomic_t chip_fault_stats[HIFC_NODE_ID_MAX][FAULT_LEVEL_MAX];
+ atomic_t fault_type_stat[FAULT_TYPE_MAX];
+ atomic_t pcie_fault_stats;
+};
+
+struct hifc_hw_stats {
+ atomic_t heart_lost_stats;
+ atomic_t nic_ucode_event_stats[HIFC_NIC_FATAL_ERROR_MAX];
+ struct hifc_cqm_stats cqm_stats;
+ struct hifc_link_event_stats link_event_stats;
+ struct hifc_fault_event_stats fault_event_stats;
+};
+
+struct hifc_fault_info_node {
+ struct list_head list;
+ struct hifc_hwdev *hwdev;
+ struct hifc_fault_recover_info info;
+};
+
+enum heartbeat_support_state {
+ HEARTBEAT_NOT_SUPPORT = 0,
+ HEARTBEAT_SUPPORT,
+};
+
+/* 25s for max 5 heartbeat event lost */
+#define HIFC_HEARBEAT_ENHANCED_LOST 25000
+struct hifc_heartbeat_enhanced {
+ bool en; /* enable enhanced heartbeat or not */
+
+ unsigned long last_update_jiffies;
+ u32 last_heartbeat;
+
+ unsigned long start_detect_jiffies;
+};
+
+#define HIFC_CMD_VER_FUNC_ID 2
+#define HIFC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C
+#define HIFC_ICPL_RESERVD_ADDR 0x9204
+
+#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size)\
+ hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_L2NIC, cmd, \
+ buf_in, in_size, \
+ buf_out, out_size, 0)
+
+struct hifc_hwdev {
+ void *adapter_hdl; /* pointer to hifc_pcidev or NDIS_Adapter */
+ void *pcidev_hdl; /* pointer to pcidev or Handler */
+ void *dev_hdl; /* pointer to pcidev->dev or Handler, for
+ * sdk_err() or dma_alloc()
+ */
+ u32 wq_page_size;
+
+ void *cqm_hdl;
+ void *chip_node;
+
+ struct hifc_hwif *hwif; /* include void __iomem *bar */
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct hifc_wqs *wqs; /* for FC slq */
+
+ struct hifc_aeqs *aeqs;
+ struct hifc_ceqs *ceqs;
+
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt;
+
+ struct hifc_cmdqs *cmdqs;
+
+ struct hifc_page_addr page_pa0;
+ struct hifc_page_addr page_pa1;
+
+ hifc_event_handler event_callback;
+ void *event_pri_handle;
+ bool history_fault_flag;
+ struct hifc_fault_recover_info history_fault;
+ struct semaphore fault_list_sem;
+
+ struct work_struct timer_work;
+ struct workqueue_struct *workq;
+ struct timer_list heartbeat_timer;
+ /* true represent heartbeat lost, false represent heartbeat restore */
+ u32 heartbeat_lost;
+ int chip_present_flag;
+ struct hifc_heartbeat_enhanced heartbeat_ehd;
+ struct hifc_hw_stats hw_stats;
+ u8 *chip_fault_stats;
+
+ u32 statufull_ref_cnt;
+ ulong func_state;
+
+ u64 feature_cap; /* enum hifc_func_cap */
+
+ /* In bmgw x86 host, driver can't send message to mgmt cpu directly,
+ * need to trasmit message ppf mbox to bmgw arm host.
+ */
+
+ struct hifc_board_info board_info;
+};
+
+int hifc_init_comm_ch(struct hifc_hwdev *hwdev);
+void hifc_uninit_comm_ch(struct hifc_hwdev *hwdev);
+
+enum hifc_set_arm_type {
+ HIFC_SET_ARM_CMDQ,
+ HIFC_SET_ARM_SQ,
+ HIFC_SET_ARM_TYPE_NUM,
+};
+
+/* up to driver event */
+#define HIFC_PORT_CMD_MGMT_RESET 0x0
+struct hifc_vport_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct hifc_l2nic_reset {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 reset_flag;
+};
+
+/* HILINK module interface */
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hifc_hilink_cmd {
+ HIFC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HIFC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
+};
+
+enum hilink_info_print_event {
+ HILINK_EVENT_LINK_UP = 1,
+ HILINK_EVENT_LINK_DOWN,
+ HILINK_EVENT_CABLE_PLUGGED,
+ HILINK_EVENT_MAX_TYPE,
+};
+
+enum hifc_link_port_type {
+ LINK_PORT_FIBRE = 1,
+ LINK_PORT_ELECTRIC,
+ LINK_PORT_COPPER,
+ LINK_PORT_AOC,
+ LINK_PORT_BACKPLANE,
+ LINK_PORT_BASET,
+ LINK_PORT_MAX_TYPE,
+};
+
+enum hilink_fibre_subtype {
+ FIBRE_SUBTYPE_SR = 1,
+ FIBRE_SUBTYPE_LR,
+ FIBRE_SUBTYPE_MAX,
+};
+
+enum hilink_fec_type {
+ HILINK_FEC_RSFEC,
+ HILINK_FEC_BASEFEC,
+ HILINK_FEC_NOFEC,
+ HILINK_FEC_MAX_TYPE,
+};
+
+/* cmd of mgmt CPU message */
+enum hifc_port_cmd {
+ HIFC_PORT_CMD_SET_MAC = 0x9,
+ HIFC_PORT_CMD_GET_AUTONEG_CAP = 0xf,
+ HIFC_PORT_CMD_SET_VPORT_ENABLE = 0x5d,
+ HIFC_PORT_CMD_UPDATE_MAC = 0xa4,
+ HIFC_PORT_CMD_GET_SFP_INFO = 0xad,
+ HIFC_PORT_CMD_GET_STD_SFP_INFO = 0xF0,
+ HIFC_PORT_CMD_GET_SFP_ABS = 0xFB,
+};
+
+struct hi30_ffe_data {
+ u8 PRE2;
+ u8 PRE1;
+ u8 POST1;
+ u8 POST2;
+ u8 MAIN;
+};
+
+struct hi30_ctle_data {
+ u8 ctlebst[3];
+ u8 ctlecmband[3];
+ u8 ctlermband[3];
+ u8 ctleza[3];
+ u8 ctlesqh[3];
+ u8 ctleactgn[3];
+ u8 ctlepassgn;
+};
+
+#define HILINK_MAX_LANE 4
+
+struct hilink_lane {
+ u8 lane_used;
+ u8 hi30_ffe[5];
+ u8 hi30_ctle[19];
+ u8 hi30_dfe[14];
+ u8 rsvd4;
+};
+
+struct hifc_link_info {
+ u8 vendor_name[16];
+ /* port type:
+ * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane;
+ * 6 - baseT; 0xffff - unknown
+ *
+ * port subtype:
+ * Only when port_type is fiber:
+ * 1 - SR; 2 - LR
+ */
+ u32 port_type;
+ u32 port_sub_type;
+ u32 cable_length;
+ u8 cable_temp;
+ u8 cable_max_speed; /* 1(G)/10(G)/25(G)... */
+ u8 sfp_type; /* 0 - qsfp; 1 - sfp */
+ u8 rsvd0;
+ u32 power[4]; /* uW; if is sfp, only power[2] is valid */
+
+ u8 an_state; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u16 speed; /* 1(G)/10(G)/25(G)... */
+
+ u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */
+ u8 alos; /* 0 - yes; 1 - no */
+ u8 rx_los; /* 0 - yes; 1 - no */
+ u8 pma_status;
+ u32 pma_dbg_info_reg; /* pma debug info: */
+ u32 pma_signal_ok_reg; /* signal ok: */
+
+ u32 pcs_err_blk_cnt_reg; /* error block counter: */
+ u32 rf_lf_status_reg; /* RF/LF status: */
+ u8 pcs_link_reg; /* pcs link: */
+ u8 mac_link_reg; /* mac link: */
+ u8 mac_tx_en;
+ u8 mac_rx_en;
+ u32 pcs_err_cnt;
+
+ /* struct hifc_hilink_lane: 40 bytes */
+ u8 lane1[40]; /* 25GE lane in old firmware */
+
+ u8 rsvd1[266]; /* hilink machine state */
+
+ u8 lane2[HILINK_MAX_LANE * 40]; /* max 4 lane for 40GE/100GE */
+
+ u8 rsvd2[2];
+};
+
+struct hifc_hilink_link_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 port_id;
+ u8 info_type; /* 1: link up 2: link down 3 cable plugged */
+ u8 rsvd1;
+
+ struct hifc_link_info info;
+
+ u8 rsvd2[352];
+};
+
+int hifc_set_arm_bit(void *hwdev, enum hifc_set_arm_type q_type, u16 q_id);
+void hifc_set_chip_present(void *hwdev);
+void hifc_force_complete_all(void *hwdev);
+void hifc_init_heartbeat(struct hifc_hwdev *hwdev);
+void hifc_destroy_heartbeat(struct hifc_hwdev *hwdev);
+u8 hifc_nic_sw_aeqe_handler(void *handle, u8 event, u64 data);
+int hifc_l2nic_reset_base(struct hifc_hwdev *hwdev, u16 reset_flag);
+int hifc_pf_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+void hifc_swe_fault_handler(struct hifc_hwdev *hwdev, u8 level,
+ u8 event, u64 val);
+bool hifc_mgmt_event_ack_first(u8 mod, u8 cmd);
+int hifc_phy_init_status_judge(void *hwdev);
+int hifc_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val);
+int hifc_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val);
+void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+struct hifc_sge {
+ u32 hi_addr;
+ u32 lo_addr;
+ u32 len;
+};
+
+void hifc_cpu_to_be32(void *data, int len);
+void hifc_be32_to_cpu(void *data, int len);
+void hifc_set_sge(struct hifc_sge *sge, dma_addr_t addr, u32 len);
+#endif
diff --git a/drivers/scsi/huawei/hifc/hifc_hwif.c b/drivers/scsi/huawei/hifc/hifc_hwif.c
new file mode 100644
index 000000000000..ec84c9bc2f2f
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_hwif.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/io-mapping.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_hwif.h"
+#include "hifc_api_cmd.h"
+#include "hifc_mgmt.h"
+#include "hifc_eqs.h"
+
+#define WAIT_HWIF_READY_TIMEOUT 10000
+#define HIFC_SELFTEST_RESULT 0x883C
+
+u32 hifc_hwif_read_reg(struct hifc_hwif *hwif, u32 reg)
+{
+ return be32_to_cpu(readl(hwif->cfg_regs_base + reg));
+}
+
+void hifc_hwif_write_reg(struct hifc_hwif *hwif, u32 reg, u32 val)
+{
+ writel(cpu_to_be32(val), hwif->cfg_regs_base + reg);
+}
+
+/**
+ * hwif_ready - test if the HW initialization passed
+ * @hwdev: the pointer to hw device
+ * Return: 0 - success, negative - failure
+ **/
+static int hwif_ready(struct hifc_hwdev *hwdev)
+{
+ u32 addr, attr1;
+
+ addr = HIFC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hifc_hwif_read_reg(hwdev->hwif, addr);
+
+ if (attr1 == HIFC_PCIE_LINK_DOWN)
+ return -EBUSY;
+
+ if (!HIFC_AF1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int wait_hwif_ready(struct hifc_hwdev *hwdev)
+{
+ ulong timeout = 0;
+
+ do {
+ if (!hwif_ready(hwdev))
+ return 0;
+
+ usleep_range(999, 1000);
+ timeout++;
+ } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
+
+ sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n");
+ return -EBUSY;
+}
+
+/**
+ * set_hwif_attr - set the attributes as members in hwif
+ * @hwif: the hardware interface of a pci function device
+ * @attr0: the first attribute that was read from the hw
+ * @attr1: the second attribute that was read from the hw
+ * @attr2: the third attribute that was read from the hw
+ **/
+static void set_hwif_attr(struct hifc_hwif *hwif, u32 attr0, u32 attr1,
+ u32 attr2)
+{
+ hwif->attr.func_global_idx = HIFC_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+ hwif->attr.port_to_port_idx = HIFC_AF0_GET(attr0, P2P_IDX);
+ hwif->attr.pci_intf_idx = HIFC_AF0_GET(attr0, PCI_INTF_IDX);
+ hwif->attr.vf_in_pf = HIFC_AF0_GET(attr0, VF_IN_PF);
+ hwif->attr.func_type = HIFC_AF0_GET(attr0, FUNC_TYPE);
+
+ hwif->attr.ppf_idx = HIFC_AF1_GET(attr1, PPF_IDX);
+
+ hwif->attr.num_aeqs = BIT(HIFC_AF1_GET(attr1, AEQS_PER_FUNC));
+ hwif->attr.num_ceqs = BIT(HIFC_AF1_GET(attr1, CEQS_PER_FUNC));
+ hwif->attr.num_irqs = BIT(HIFC_AF1_GET(attr1, IRQS_PER_FUNC));
+ hwif->attr.num_dma_attr = BIT(HIFC_AF1_GET(attr1, DMA_ATTR_PER_FUNC));
+}
+
+/**
+ * get_hwif_attr - read and set the attributes as members in hwif
+ * @hwif: the hardware interface of a pci function device
+ **/
+static void get_hwif_attr(struct hifc_hwif *hwif)
+{
+ u32 addr, attr0, attr1, attr2;
+
+ addr = HIFC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hifc_hwif_read_reg(hwif, addr);
+
+ addr = HIFC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hifc_hwif_read_reg(hwif, addr);
+
+ addr = HIFC_CSR_FUNC_ATTR2_ADDR;
+ attr2 = hifc_hwif_read_reg(hwif, addr);
+
+ set_hwif_attr(hwif, attr0, attr1, attr2);
+}
+
+void hifc_set_pf_status(struct hifc_hwif *hwif, enum hifc_pf_status status)
+{
+ u32 attr5 = HIFC_AF5_SET(status, PF_STATUS);
+ u32 addr = HIFC_CSR_FUNC_ATTR5_ADDR;
+
+ hifc_hwif_write_reg(hwif, addr, attr5);
+}
+
+enum hifc_pf_status hifc_get_pf_status(struct hifc_hwif *hwif)
+{
+ u32 attr5 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR5_ADDR);
+
+ return HIFC_AF5_GET(attr5, PF_STATUS);
+}
+
+enum hifc_doorbell_ctrl hifc_get_doorbell_ctrl_status(struct hifc_hwif *hwif)
+{
+ u32 attr4 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR4_ADDR);
+
+ return HIFC_AF4_GET(attr4, DOORBELL_CTRL);
+}
+
+enum hifc_outbound_ctrl hifc_get_outbound_ctrl_status(struct hifc_hwif *hwif)
+{
+ u32 attr4 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR4_ADDR);
+
+ return HIFC_AF4_GET(attr4, OUTBOUND_CTRL);
+}
+
+void hifc_enable_doorbell(struct hifc_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HIFC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hifc_hwif_read_reg(hwif, addr);
+
+ attr4 = HIFC_AF4_CLEAR(attr4, DOORBELL_CTRL);
+ attr4 |= HIFC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL);
+
+ hifc_hwif_write_reg(hwif, addr, attr4);
+}
+
+void hifc_disable_doorbell(struct hifc_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HIFC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hifc_hwif_read_reg(hwif, addr);
+
+ attr4 = HIFC_AF4_CLEAR(attr4, DOORBELL_CTRL);
+ attr4 |= HIFC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL);
+
+ hifc_hwif_write_reg(hwif, addr, attr4);
+}
+
+/**
+ * set_ppf - try to set hwif as ppf and set the type of hwif in this case
+ * @hwif: the hardware interface of a pci function device
+ **/
+static void set_ppf(struct hifc_hwif *hwif)
+{
+ struct hifc_func_attr *attr = &hwif->attr;
+ u32 addr, val, ppf_election;
+
+ /* Read Modify Write */
+ addr = HIFC_CSR_PPF_ELECTION_ADDR;
+
+ val = hifc_hwif_read_reg(hwif, addr);
+ val = HIFC_PPF_ELECTION_CLEAR(val, IDX);
+
+ ppf_election = HIFC_PPF_ELECTION_SET(attr->func_global_idx, IDX);
+ val |= ppf_election;
+
+ hifc_hwif_write_reg(hwif, addr, val);
+
+ /* Check PPF */
+ val = hifc_hwif_read_reg(hwif, addr);
+
+ attr->ppf_idx = HIFC_PPF_ELECTION_GET(val, IDX);
+ if (attr->ppf_idx == attr->func_global_idx)
+ attr->func_type = TYPE_PPF;
+}
+
+/**
+ * get_mpf - get the mpf index into the hwif
+ * @hwif: the hardware interface of a pci function device
+ **/
+static void get_mpf(struct hifc_hwif *hwif)
+{
+ struct hifc_func_attr *attr = &hwif->attr;
+ u32 mpf_election, addr;
+
+ addr = HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR;
+
+ mpf_election = hifc_hwif_read_reg(hwif, addr);
+ attr->mpf_idx = HIFC_MPF_ELECTION_GET(mpf_election, IDX);
+}
+
+/**
+ * set_mpf - try to set hwif as mpf and set the mpf idx in hwif
+ * @hwif: the hardware interface of a pci function device
+ **/
+static void set_mpf(struct hifc_hwif *hwif)
+{
+ struct hifc_func_attr *attr = &hwif->attr;
+ u32 addr, val, mpf_election;
+
+ /* Read Modify Write */
+ addr = HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR;
+
+ val = hifc_hwif_read_reg(hwif, addr);
+
+ val = HIFC_MPF_ELECTION_CLEAR(val, IDX);
+ mpf_election = HIFC_MPF_ELECTION_SET(attr->func_global_idx, IDX);
+
+ val |= mpf_election;
+ hifc_hwif_write_reg(hwif, addr, val);
+}
+
+static void init_db_area_idx(struct hifc_free_db_area *free_db_area)
+{
+ u32 i;
+
+ for (i = 0; i < HIFC_DB_MAX_AREAS; i++)
+ free_db_area->db_idx[i] = i;
+
+ free_db_area->num_free = HIFC_DB_MAX_AREAS;
+
+ spin_lock_init(&free_db_area->idx_lock);
+}
+
+static int get_db_idx(struct hifc_hwif *hwif, u32 *idx)
+{
+ struct hifc_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 pos;
+ u32 pg_idx;
+
+ spin_lock(&free_db_area->idx_lock);
+
+retry:
+ if (free_db_area->num_free == 0) {
+ spin_unlock(&free_db_area->idx_lock);
+ return -ENOMEM;
+ }
+
+ free_db_area->num_free--;
+
+ pos = free_db_area->alloc_pos++;
+ pos &= HIFC_DB_MAX_AREAS - 1;
+
+ pg_idx = free_db_area->db_idx[pos];
+
+ free_db_area->db_idx[pos] = 0xFFFFFFFF;
+
+ /* pg_idx out of range */
+ if (pg_idx >= HIFC_DB_MAX_AREAS)
+ goto retry;
+
+ spin_unlock(&free_db_area->idx_lock);
+
+ *idx = pg_idx;
+
+ return 0;
+}
+
+static void free_db_idx(struct hifc_hwif *hwif, u32 idx)
+{
+ struct hifc_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 pos;
+
+ if (idx >= HIFC_DB_MAX_AREAS)
+ return;
+
+ spin_lock(&free_db_area->idx_lock);
+
+ pos = free_db_area->return_pos++;
+ pos &= HIFC_DB_MAX_AREAS - 1;
+
+ free_db_area->db_idx[pos] = idx;
+
+ free_db_area->num_free++;
+
+ spin_unlock(&free_db_area->idx_lock);
+}
+
+void hifc_free_db_addr(void *hwdev, void __iomem *db_base,
+ void __iomem *dwqe_base)
+{
+ struct hifc_hwif *hwif;
+ u32 idx;
+
+ if (!hwdev || !db_base)
+ return;
+
+ hwif = ((struct hifc_hwdev *)hwdev)->hwif;
+ idx = DB_IDX(db_base, hwif->db_base);
+
+#if defined(__aarch64__)
+ /* No need to unmap */
+#else
+ if (dwqe_base)
+ io_mapping_unmap(dwqe_base);
+#endif
+
+ free_db_idx(hwif, idx);
+}
+
+int hifc_alloc_db_addr(void *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base)
+{
+ struct hifc_hwif *hwif;
+ u64 offset;
+ u32 idx;
+ int err;
+
+ if (!hwdev || !db_base)
+ return -EINVAL;
+
+ hwif = ((struct hifc_hwdev *)hwdev)->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return -EFAULT;
+
+ *db_base = hwif->db_base + idx * HIFC_DB_PAGE_SIZE;
+
+ if (!dwqe_base)
+ return 0;
+
+ offset = ((u64)idx) << PAGE_SHIFT;
+
+#if defined(__aarch64__)
+ *dwqe_base = hwif->dwqe_mapping + offset;
+#else
+ *dwqe_base = io_mapping_map_wc(hwif->dwqe_mapping, offset,
+ HIFC_DB_PAGE_SIZE);
+#endif
+
+ if (!(*dwqe_base)) {
+ hifc_free_db_addr(hwdev, *db_base, NULL);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void hifc_set_msix_state(void *hwdev, u16 msix_idx, enum hifc_msix_state flag)
+{
+ struct hifc_hwif *hwif;
+ u32 offset = msix_idx * HIFC_PCI_MSIX_ENTRY_SIZE +
+ HIFC_PCI_MSIX_ENTRY_VECTOR_CTRL;
+ u32 mask_bits;
+
+ if (!hwdev)
+ return;
+
+ hwif = ((struct hifc_hwdev *)hwdev)->hwif;
+
+ mask_bits = readl(hwif->intr_regs_base + offset);
+ mask_bits &= ~HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ if (flag)
+ mask_bits |= HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ writel(mask_bits, hwif->intr_regs_base + offset);
+}
+
+static void disable_all_msix(struct hifc_hwdev *hwdev)
+{
+ u16 num_irqs = hwdev->hwif->attr.num_irqs;
+ u16 i;
+
+ for (i = 0; i < num_irqs; i++)
+ hifc_set_msix_state(hwdev, i, HIFC_MSIX_DISABLE);
+}
+
+static int wait_until_doorbell_and_outbound_enabled(struct hifc_hwif *hwif)
+{
+ enum hifc_doorbell_ctrl db_ctrl;
+ enum hifc_outbound_ctrl outbound_ctrl;
+ u32 cnt = 0;
+
+ while (cnt < HIFC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) {
+ db_ctrl = hifc_get_doorbell_ctrl_status(hwif);
+ outbound_ctrl = hifc_get_outbound_ctrl_status(hwif);
+
+ if (outbound_ctrl == ENABLE_OUTBOUND &&
+ db_ctrl == ENABLE_DOORBELL)
+ return 0;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return -EFAULT;
+}
+
+static void __print_selftest_reg(struct hifc_hwdev *hwdev)
+{
+ u32 addr, attr0, attr1;
+
+ addr = HIFC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hifc_hwif_read_reg(hwdev->hwif, addr);
+
+ if (attr1 == HIFC_PCIE_LINK_DOWN) {
+ sdk_err(hwdev->dev_hdl, "PCIE is link down\n");
+ return;
+ }
+
+ addr = HIFC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hifc_hwif_read_reg(hwdev->hwif, addr);
+ if (HIFC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF &&
+ !HIFC_AF0_GET(attr0, PCI_INTF_IDX))
+ sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n",
+ hifc_hwif_read_reg(hwdev->hwif,
+ HIFC_SELFTEST_RESULT));
+}
+
+/**
+ * hifc_init_hwif - initialize the hw interface
+ * @hwdev: the pointer to hw device
+ * @cfg_reg_base: configuration base address
+ * Return: 0 - success, negative - failure
+ **/
+int hifc_init_hwif(struct hifc_hwdev *hwdev, void *cfg_reg_base,
+ void *intr_reg_base, u64 db_base_phy,
+ void *db_base, void *dwqe_mapping)
+{
+ struct hifc_hwif *hwif;
+ int err;
+
+ hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+ if (!hwif)
+ return -ENOMEM;
+
+ hwdev->hwif = hwif;
+ hwif->pdev = hwdev->pcidev_hdl;
+
+ hwif->cfg_regs_base = cfg_reg_base;
+ hwif->intr_regs_base = intr_reg_base;
+
+ hwif->db_base_phy = db_base_phy;
+ hwif->db_base = db_base;
+ hwif->dwqe_mapping = dwqe_mapping;
+ init_db_area_idx(&hwif->free_db_area);
+
+ err = wait_hwif_ready(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Chip status is not ready\n");
+ __print_selftest_reg(hwdev);
+ goto hwif_ready_err;
+ }
+
+ get_hwif_attr(hwif);
+
+ err = wait_until_doorbell_and_outbound_enabled(hwif);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled\n");
+ goto hwif_ready_err;
+ }
+
+ set_ppf(hwif);
+
+ if (HIFC_IS_PPF(hwdev))
+ set_mpf(hwif);
+
+ get_mpf(hwif);
+
+ disable_all_msix(hwdev);
+ /* disable mgmt cpu report any event */
+ hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT);
+
+ pr_info("global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf: %d\n",
+ hwif->attr.func_global_idx, hwif->attr.func_type,
+ hwif->attr.pci_intf_idx, hwif->attr.ppf_idx,
+ hwif->attr.mpf_idx);
+
+ return 0;
+
+hwif_ready_err:
+ kfree(hwif);
+
+ return err;
+}
+
+/**
+ * hifc_free_hwif - free the hw interface
+ * @hwdev: the pointer to hw device
+ **/
+void hifc_free_hwif(struct hifc_hwdev *hwdev)
+{
+ kfree(hwdev->hwif);
+}
+
+int hifc_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align,
+ unsigned flag,
+ struct hifc_dma_addr_align *mem_align)
+{
+ void *vaddr, *align_vaddr;
+ dma_addr_t paddr, align_paddr;
+ u64 real_size = size;
+
+ vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ /* align */
+ if (align_paddr == paddr) {
+ align_vaddr = vaddr;
+ goto out;
+ }
+
+ dma_free_coherent(dev_hdl, real_size, vaddr, paddr);
+
+ /* realloc memory for align */
+ real_size = size + align;
+ vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr));
+
+out:
+ mem_align->real_size = (u32)real_size;
+ mem_align->ori_vaddr = vaddr;
+ mem_align->ori_paddr = paddr;
+ mem_align->align_vaddr = align_vaddr;
+ mem_align->align_paddr = align_paddr;
+
+ return 0;
+}
+
+void hifc_dma_free_coherent_align(void *dev_hdl,
+ struct hifc_dma_addr_align *mem_align)
+{
+ dma_free_coherent(dev_hdl, mem_align->real_size,
+ mem_align->ori_vaddr, mem_align->ori_paddr);
+}
+
+u16 hifc_global_func_id(void *hwdev)
+{
+ struct hifc_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hifc_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.func_global_idx;
+}
+
+/**
+ * get function id from register,used by sriov hot migration process
+ * @hwdev: the pointer to hw device
+ **/
+u16 hifc_global_func_id_hw(void *hwdev)
+{
+ u32 addr, attr0;
+ struct hifc_hwdev *dev;
+
+ dev = (struct hifc_hwdev *)hwdev;
+ addr = HIFC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hifc_hwif_read_reg(dev->hwif, addr);
+
+ return HIFC_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+}
+
+/**
+ * get function id, used by sriov hot migratition process.
+ * @hwdev: the pointer to hw device
+ * @func_id: function id
+ **/
+int hifc_global_func_id_get(void *hwdev, u16 *func_id)
+{
+ *func_id = hifc_global_func_id(hwdev);
+ return 0;
+}
+
+u8 hifc_pcie_itf_id(void *hwdev)
+{
+ struct hifc_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hifc_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.pci_intf_idx;
+}
+EXPORT_SYMBOL(hifc_pcie_itf_id);
+
+enum func_type hifc_func_type(void *hwdev)
+{
+ struct hifc_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hifc_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.func_type;
+}
+
+u8 hifc_ppf_idx(void *hwdev)
+{
+ struct hifc_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hifc_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.ppf_idx;
+}
diff --git a/drivers/scsi/huawei/hifc/hifc_hwif.h b/drivers/scsi/huawei/hifc/hifc_hwif.h
new file mode 100644
index 000000000000..da72253dcf5f
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_hwif.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef HIFC_HWIF_H
+#define HIFC_HWIF_H
+
+#include "hifc_hwdev.h"
+
+#define HIFC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000
+#define HIFC_CSR_GLOBAL_BASE_ADDR 0x4000
+/* HW interface registers */
+#define HIFC_CSR_FUNC_ATTR0_ADDR 0x0
+#define HIFC_CSR_FUNC_ATTR1_ADDR 0x4
+#define HIFC_CSR_FUNC_ATTR2_ADDR 0x8
+#define HIFC_CSR_FUNC_ATTR4_ADDR 0x10
+
+#define HIFC_CSR_FUNC_ATTR5_ADDR 0x14
+#define HIFC_PCI_MSIX_ENTRY_SIZE 16
+#define HIFC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+
+/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128*/
+#define HIFC_DB_DWQE_SIZE 0x00080000
+/* db/dwqe page size: 4K */
+#define HIFC_DB_PAGE_SIZE 0x00001000ULL
+#define HIFC_DB_MAX_AREAS (HIFC_DB_DWQE_SIZE / HIFC_DB_PAGE_SIZE)
+
+#define HIFC_ELECTION_BASE 0x200
+#define HIFC_PPF_ELECTION_STRIDE 0x4
+#define HIFC_CSR_MAX_PORTS 4
+#define HIFC_CSR_PPF_ELECTION_ADDR \
+ (HIFC_CSR_GLOBAL_BASE_ADDR + HIFC_ELECTION_BASE)
+
+#define HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR \
+ (HIFC_CSR_GLOBAL_BASE_ADDR + HIFC_ELECTION_BASE + \
+ HIFC_CSR_MAX_PORTS * HIFC_PPF_ELECTION_STRIDE)
+#define DB_IDX(db, db_base) \
+ ((u32)(((ulong)(db) - (ulong)(db_base)) / \
+ HIFC_DB_PAGE_SIZE))
+
+#define HIFC_AF0_FUNC_GLOBAL_IDX_SHIFT 0
+#define HIFC_AF0_P2P_IDX_SHIFT 10
+#define HIFC_AF0_PCI_INTF_IDX_SHIFT 14
+#define HIFC_AF0_VF_IN_PF_SHIFT 16
+#define HIFC_AF0_FUNC_TYPE_SHIFT 24
+#define HIFC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF
+#define HIFC_AF0_P2P_IDX_MASK 0xF
+#define HIFC_AF0_PCI_INTF_IDX_MASK 0x3
+#define HIFC_AF0_VF_IN_PF_MASK 0xFF
+#define HIFC_AF0_FUNC_TYPE_MASK 0x1
+
+#define HIFC_AF0_GET(val, member) \
+ (((val) >> HIFC_AF0_##member##_SHIFT) & HIFC_AF0_##member##_MASK)
+
+#define HIFC_AF1_PPF_IDX_SHIFT 0
+#define HIFC_AF1_AEQS_PER_FUNC_SHIFT 8
+#define HIFC_AF1_CEQS_PER_FUNC_SHIFT 12
+#define HIFC_AF1_IRQS_PER_FUNC_SHIFT 20
+#define HIFC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24
+#define HIFC_AF1_MGMT_INIT_STATUS_SHIFT 30
+#define HIFC_AF1_PF_INIT_STATUS_SHIFT 31
+
+#define HIFC_AF1_PPF_IDX_MASK 0x1F
+#define HIFC_AF1_AEQS_PER_FUNC_MASK 0x3
+#define HIFC_AF1_CEQS_PER_FUNC_MASK 0x7
+#define HIFC_AF1_IRQS_PER_FUNC_MASK 0xF
+#define HIFC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7
+#define HIFC_AF1_MGMT_INIT_STATUS_MASK 0x1
+#define HIFC_AF1_PF_INIT_STATUS_MASK 0x1
+
+#define HIFC_AF1_GET(val, member) \
+ (((val) >> HIFC_AF1_##member##_SHIFT) & HIFC_AF1_##member##_MASK)
+
+#define HIFC_AF4_OUTBOUND_CTRL_SHIFT 0
+#define HIFC_AF4_DOORBELL_CTRL_SHIFT 1
+#define HIFC_AF4_OUTBOUND_CTRL_MASK 0x1
+#define HIFC_AF4_DOORBELL_CTRL_MASK 0x1
+
+#define HIFC_AF4_GET(val, member) \
+ (((val) >> HIFC_AF4_##member##_SHIFT) & HIFC_AF4_##member##_MASK)
+
+#define HIFC_AF4_SET(val, member) \
+ (((val) & HIFC_AF4_##member##_MASK) << HIFC_AF4_##member##_SHIFT)
+
+#define HIFC_AF4_CLEAR(val, member) \
+ ((val) & (~(HIFC_AF4_##member##_MASK << \
+ HIFC_AF4_##member##_SHIFT)))
+
+#define HIFC_AF5_PF_STATUS_SHIFT 0
+#define HIFC_AF5_PF_STATUS_MASK 0xFFFF
+
+#define HIFC_AF5_SET(val, member) \
+ (((val) & HIFC_AF5_##member##_MASK) << HIFC_AF5_##member##_SHIFT)
+
+#define HIFC_AF5_GET(val, member) \
+ (((val) >> HIFC_AF5_##member##_SHIFT) & HIFC_AF5_##member##_MASK)
+
+#define HIFC_PPF_ELECTION_IDX_SHIFT 0
+#define HIFC_PPF_ELECTION_IDX_MASK 0x1F
+
+#define HIFC_PPF_ELECTION_SET(val, member) \
+ (((val) & HIFC_PPF_ELECTION_##member##_MASK) << \
+ HIFC_PPF_ELECTION_##member##_SHIFT)
+
+#define HIFC_PPF_ELECTION_GET(val, member) \
+ (((val) >> HIFC_PPF_ELECTION_##member##_SHIFT) & \
+ HIFC_PPF_ELECTION_##member##_MASK)
+
+#define HIFC_PPF_ELECTION_CLEAR(val, member) \
+ ((val) & (~(HIFC_PPF_ELECTION_##member##_MASK \
+ << HIFC_PPF_ELECTION_##member##_SHIFT)))
+
+#define HIFC_MPF_ELECTION_IDX_SHIFT 0
+#define HIFC_MPF_ELECTION_IDX_MASK 0x1F
+
+#define HIFC_MPF_ELECTION_SET(val, member) \
+ (((val) & HIFC_MPF_ELECTION_##member##_MASK) << \
+ HIFC_MPF_ELECTION_##member##_SHIFT)
+
+#define HIFC_MPF_ELECTION_GET(val, member) \
+ (((val) >> HIFC_MPF_ELECTION_##member##_SHIFT) & \
+ HIFC_MPF_ELECTION_##member##_MASK)
+
+#define HIFC_MPF_ELECTION_CLEAR(val, member) \
+ ((val) & (~(HIFC_MPF_ELECTION_##member##_MASK \
+ << HIFC_MPF_ELECTION_##member##_SHIFT)))
+
+#define HIFC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
+#define HIFC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
+#define HIFC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
+#define HIFC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx)
+
+#define HIFC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type)
+#define HIFC_IS_PPF(dev) (HIFC_FUNC_TYPE(dev) == TYPE_PPF)
+
+enum hifc_pcie_nosnoop {
+ HIFC_PCIE_SNOOP = 0,
+ HIFC_PCIE_NO_SNOOP = 1,
+};
+
+enum hifc_pcie_tph {
+ HIFC_PCIE_TPH_DISABLE = 0,
+ HIFC_PCIE_TPH_ENABLE = 1,
+};
+
+enum hifc_pf_status {
+ HIFC_PF_STATUS_INIT = 0X0,
+ HIFC_PF_STATUS_ACTIVE_FLAG = 0x11,
+ HIFC_PF_STATUS_FLR_START_FLAG = 0x12,
+ HIFC_PF_STATUS_FLR_FINISH_FLAG = 0x13,
+};
+
+enum hifc_outbound_ctrl {
+ ENABLE_OUTBOUND = 0x0,
+ DISABLE_OUTBOUND = 0x1,
+};
+
+enum hifc_doorbell_ctrl {
+ ENABLE_DOORBELL = 0x0,
+ DISABLE_DOORBELL = 0x1,
+};
+
+struct hifc_free_db_area {
+ u32 db_idx[HIFC_DB_MAX_AREAS];
+ u32 num_free;
+ u32 alloc_pos;
+ u32 return_pos;
+ /* spinlock for allocating doorbell area */
+ spinlock_t idx_lock;
+};
+
+enum func_type {
+ TYPE_PF,
+ TYPE_VF,
+ TYPE_PPF,
+ TYPE_UNKNOWN,
+};
+
+struct hifc_func_attr {
+ u16 func_global_idx;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 vf_in_pf;
+ enum func_type func_type;
+
+ u8 mpf_idx;
+
+ u8 ppf_idx;
+
+ u16 num_irqs; /* max: 2 ^ 15 */
+ u8 num_aeqs; /* max: 2 ^ 3 */
+ u8 num_ceqs; /* max: 2 ^ 7 */
+
+ u8 num_dma_attr; /* max: 2 ^ 6 */
+};
+
+struct hifc_hwif {
+ u8 __iomem *cfg_regs_base;
+ u8 __iomem *intr_regs_base;
+ u64 db_base_phy;
+ u8 __iomem *db_base;
+
+#if defined(__aarch64__)
+ void __iomem *dwqe_mapping;
+#else
+ struct io_mapping *dwqe_mapping;
+#endif
+ struct hifc_free_db_area free_db_area;
+ struct hifc_func_attr attr;
+ void *pdev;
+};
+
+struct hifc_dma_addr_align {
+ u32 real_size;
+ void *ori_vaddr;
+ dma_addr_t ori_paddr;
+ void *align_vaddr;
+ dma_addr_t align_paddr;
+};
+
+u32 hifc_hwif_read_reg(struct hifc_hwif *hwif, u32 reg);
+void hifc_hwif_write_reg(struct hifc_hwif *hwif, u32 reg, u32 val);
+void hifc_set_pf_status(struct hifc_hwif *hwif, enum hifc_pf_status status);
+enum hifc_pf_status hifc_get_pf_status(struct hifc_hwif *hwif);
+enum hifc_doorbell_ctrl
+ hifc_get_doorbell_ctrl_status(struct hifc_hwif *hwif);
+enum hifc_outbound_ctrl
+ hifc_get_outbound_ctrl_status(struct hifc_hwif *hwif);
+void hifc_enable_doorbell(struct hifc_hwif *hwif);
+void hifc_disable_doorbell(struct hifc_hwif *hwif);
+int hifc_init_hwif(struct hifc_hwdev *hwdev, void *cfg_reg_base,
+ void *intr_reg_base, u64 db_base_phy,
+ void *db_base, void *dwqe_mapping);
+void hifc_free_hwif(struct hifc_hwdev *hwdev);
+int hifc_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align,
+ unsigned flag,
+ struct hifc_dma_addr_align *mem_align);
+void hifc_dma_free_coherent_align(void *dev_hdl,
+ struct hifc_dma_addr_align *mem_align);
+#endif
diff --git a/drivers/scsi/huawei/hifc/hifc_mgmt.c b/drivers/scsi/huawei/hifc/hifc_mgmt.c
new file mode 100644
index 000000000000..3f4818898e8d
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_mgmt.c
@@ -0,0 +1,1426 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_hwif.h"
+#include "hifc_api_cmd.h"
+#include "hifc_mgmt.h"
+#include "hifc_eqs.h"
+
+#define BUF_OUT_DEFAULT_SIZE 1
+#define SEGMENT_LEN 48
+#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HIFC_MSG_TO_MGMT_MAX_LEN, \
+ SEGMENT_LEN) / SEGMENT_LEN)
+
+#define MAX_PF_MGMT_BUF_SIZE 2048UL
+#define MGMT_MSG_SIZE_MIN 20
+#define MGMT_MSG_SIZE_STEP 16
+#define MGMT_MSG_RSVD_FOR_DEV 8
+#define MGMT_MSG_TIMEOUT 5000 /* millisecond */
+#define SYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_FLAG 0x200
+#define MSG_NO_RESP 0xFFFF
+#define MAX_MSG_SZ 2016
+
+#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ)
+
+#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
+
+#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
+ (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK)
+
+#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id)
+
+#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \
+ ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \
+ | ASYNC_MSG_FLAG)
+
+static void pf_to_mgmt_send_event_set(struct hifc_msg_pf_to_mgmt *pf_to_mgmt,
+ int event_flag)
+{
+ spin_lock(&pf_to_mgmt->sync_event_lock);
+ pf_to_mgmt->event_flag = event_flag;
+ spin_unlock(&pf_to_mgmt->sync_event_lock);
+}
+
+/**
+ * hifc_register_mgmt_msg_cb - register sync msg handler for a module
+ * @hwdev: the pointer to hw device
+ * @mod: module in the chip that this handler will handle its sync messages
+ * @pri_handle: pri handle function
+ * @callback: the handler for a sync message that will handle messages
+ * Return: 0 - success, negative - failure
+ **/
+int hifc_register_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod,
+ void *pri_handle, hifc_mgmt_msg_cb callback)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+
+ if (mod >= HIFC_MOD_HW_MAX || !hwdev)
+ return -EFAULT;
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return -EINVAL;
+
+ pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback;
+ pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle;
+
+ set_bit(HIFC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hifc_unregister_mgmt_msg_cb - unregister sync msg handler for a module
+ * @hwdev: the pointer to hw device
+ * @mod: module in the chip that this handler will handle its sync messages
+ **/
+void hifc_unregister_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+
+ if (!hwdev || mod >= HIFC_MOD_HW_MAX)
+ return;
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ clear_bit(HIFC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]);
+
+ while (test_bit(HIFC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL;
+ pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL;
+}
+
+void hifc_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd,
+ comm_up_self_msg_proc proc)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ u8 cmd_idx;
+
+ if (!hwdev || !proc)
+ return;
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ cmd_idx = pf_to_mgmt->proc.cmd_num;
+ if (cmd_idx >= HIFC_COMM_SELF_CMD_MAX) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Register recv up process failed(cmd=0x%x)\r\n", cmd);
+ return;
+ }
+
+ pf_to_mgmt->proc.info[cmd_idx].cmd = cmd;
+ pf_to_mgmt->proc.info[cmd_idx].proc = proc;
+
+ pf_to_mgmt->proc.cmd_num++;
+}
+
+void hifc_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ u8 cmd_idx;
+
+ if (!hwdev)
+ return;
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ cmd_idx = pf_to_mgmt->proc.cmd_num;
+ if (cmd_idx >= HIFC_COMM_SELF_CMD_MAX) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Unregister recv up process failed(cmd=0x%x)\r\n", cmd);
+ return;
+ }
+
+ for (cmd_idx = 0; cmd_idx < HIFC_COMM_SELF_CMD_MAX; cmd_idx++) {
+ if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) {
+ pf_to_mgmt->proc.info[cmd_idx].cmd = 0;
+ pf_to_mgmt->proc.info[cmd_idx].proc = NULL;
+ pf_to_mgmt->proc.cmd_num--;
+ }
+ }
+}
+
+/**
+ * mgmt_msg_len - calculate the total message length
+ * @msg_data_len: the length of the message data
+ * Return: the total message length
+ **/
+static u16 mgmt_msg_len(u16 msg_data_len)
+{
+ /* u64 - the size of the header */
+ u16 msg_size;
+
+ msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len);
+
+ if (msg_size > MGMT_MSG_SIZE_MIN)
+ msg_size = MGMT_MSG_SIZE_MIN +
+ ALIGN((msg_size - MGMT_MSG_SIZE_MIN),
+ MGMT_MSG_SIZE_STEP);
+ else
+ msg_size = MGMT_MSG_SIZE_MIN;
+
+ return msg_size;
+}
+
+/**
+ * prepare_header - prepare the header of the message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: pointer of the header to prepare
+ * @msg_len: the length of the message
+ * @mod: module in the chip that will get the message
+ * @ack_type: message ack type
+ * @direction: the direction of the original message
+ * @cmd: vmd type
+ * @msg_id: message id
+ **/
+static void prepare_header(struct hifc_msg_pf_to_mgmt *pf_to_mgmt,
+ u64 *header, u16 msg_len, enum hifc_mod_type mod,
+ enum hifc_msg_ack_type ack_type,
+ enum hifc_msg_direction_type direction,
+ enum hifc_mgmt_cmd cmd, u32 msg_id)
+{
+ struct hifc_hwif *hwif = pf_to_mgmt->hwdev->hwif;
+
+ *header = HIFC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HIFC_MSG_HEADER_SET(mod, MODULE) |
+ HIFC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HIFC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HIFC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HIFC_MSG_HEADER_SET(0, SEQID) |
+ HIFC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HIFC_MSG_HEADER_SET(direction, DIRECTION) |
+ HIFC_MSG_HEADER_SET(cmd, CMD) |
+ HIFC_MSG_HEADER_SET(HIFC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HIFC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HIFC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+static void clp_prepare_header(struct hifc_hwdev *hwdev,
+ u64 *header, u16 msg_len, enum hifc_mod_type mod,
+ enum hifc_msg_ack_type ack_type,
+ enum hifc_msg_direction_type direction,
+ enum hifc_mgmt_cmd cmd, u32 msg_id)
+{
+ struct hifc_hwif *hwif = hwdev->hwif;
+
+ *header = HIFC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HIFC_MSG_HEADER_SET(mod, MODULE) |
+ HIFC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HIFC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HIFC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HIFC_MSG_HEADER_SET(0, SEQID) |
+ HIFC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HIFC_MSG_HEADER_SET(direction, DIRECTION) |
+ HIFC_MSG_HEADER_SET(cmd, CMD) |
+ HIFC_MSG_HEADER_SET(HIFC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HIFC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HIFC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+/**
+ * prepare_mgmt_cmd - prepare the mgmt command
+ * @mgmt_cmd: pointer to the command to prepare
+ * @header: pointer of the header to prepare
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ **/
+static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg,
+ int msg_len)
+{
+ memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
+
+ mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
+ memcpy(mgmt_cmd, header, sizeof(*header));
+
+ mgmt_cmd += sizeof(*header);
+ memcpy(mgmt_cmd, msg, msg_len);
+}
+
+/**
+ * send_msg_to_mgmt_async - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * Return: 0 - success, negative - failure
+ **/
+static int send_msg_to_mgmt_async(struct hifc_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hifc_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hifc_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->async_msg_buf;
+ struct hifc_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (!hifc_get_chip_present_flag(pf_to_mgmt->hwdev))
+ return -EFAULT;
+
+ if (direction == HIFC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HIFC_MSG_ACK,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HIFC_MSG_ACK,
+ direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
+
+ return hifc_api_cmd_write(chain, HIFC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+int hifc_pf_to_mgmt_async(void *hwdev, enum hifc_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl;
+ int err;
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+
+ /* Lock the async_msg_buf */
+ spin_lock_bh(&pf_to_mgmt->async_msg_lock);
+ ASYNC_MSG_ID_INC(pf_to_mgmt);
+
+ err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HIFC_MSG_DIRECT_SEND, MSG_NO_RESP);
+ spin_unlock_bh(&pf_to_mgmt->async_msg_lock);
+
+ if (err) {
+ sdk_err(dev, "Failed to send async mgmt msg\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * send_msg_to_mgmt_sync - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the msg data
+ * @msg_len: the msg data length
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * Return: 0 - success, negative - failure
+ **/
+static int send_msg_to_mgmt_sync(struct hifc_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hifc_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hifc_msg_ack_type ack_type,
+ enum hifc_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->sync_msg_buf;
+ struct hifc_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (!hifc_get_chip_present_flag(pf_to_mgmt->hwdev))
+ return -EFAULT;
+
+ if (direction == HIFC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt));
+
+ if (ack_type == HIFC_MSG_ACK)
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START);
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_WRITE_TO_MGMT_CPU];
+
+ return hifc_api_cmd_write(chain, HIFC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+static inline void msg_to_mgmt_pre(enum hifc_mod_type mod, void *buf_in)
+{
+ struct hifc_msg_head *msg_head;
+
+ /* set aeq fix num to 3, need to ensure response aeq id < 3*/
+ if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC) {
+ msg_head = buf_in;
+
+ if (msg_head->resp_aeq_num >= HIFC_MAX_AEQS)
+ msg_head->resp_aeq_num = 0;
+ }
+}
+
+int hifc_pf_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl;
+ struct hifc_recv_msg *recv_msg;
+ struct completion *recv_done;
+ ulong timeo;
+ int err;
+ ulong ret;
+
+ msg_to_mgmt_pre(mod, buf_in);
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+
+ /* Lock the sync_msg_buf */
+ down(&pf_to_mgmt->sync_msg_lock);
+ recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
+ recv_done = &recv_msg->recv_done;
+
+ init_completion(recv_done);
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HIFC_MSG_ACK, HIFC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+ if (err) {
+ sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %d\n",
+ pf_to_mgmt->sync_msg_id);
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL);
+ goto unlock_sync_msg;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+
+ ret = wait_for_completion_timeout(recv_done, timeo);
+ if (!ret) {
+ sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %d\n",
+ pf_to_mgmt->sync_msg_id);
+ hifc_dump_aeq_info((struct hifc_hwdev *)hwdev);
+ err = -ETIMEDOUT;
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT);
+ goto unlock_sync_msg;
+ }
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END);
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) {
+ up(&pf_to_mgmt->sync_msg_lock);
+ return -ETIMEDOUT;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < recv_msg->msg_len) {
+ sdk_err(dev, "Invalid response message length: %d for mod %d cmd %d from mgmt, should less than: %d\n",
+ recv_msg->msg_len, mod, cmd, *out_size);
+ err = -EFAULT;
+ goto unlock_sync_msg;
+ }
+
+ if (recv_msg->msg_len)
+ memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
+
+ *out_size = recv_msg->msg_len;
+ }
+
+unlock_sync_msg:
+ up(&pf_to_mgmt->sync_msg_lock);
+
+ return err;
+}
+
+static int __get_clp_reg(void *hwdev, enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 *reg_addr)
+{
+ struct hifc_hwdev *dev = hwdev;
+ u32 offset;
+
+ offset = HIFC_CLP_REG_GAP * hifc_pcie_itf_id(dev);
+
+ switch (reg_type) {
+ case HIFC_CLP_BA_HOST:
+ *reg_addr = (data_type == HIFC_CLP_REQ_HOST) ?
+ HIFC_CLP_REG(REQ_SRAM_BA) :
+ HIFC_CLP_REG(RSP_SRAM_BA);
+ break;
+
+ case HIFC_CLP_SIZE_HOST:
+ *reg_addr = HIFC_CLP_REG(SRAM_SIZE);
+ break;
+
+ case HIFC_CLP_LEN_HOST:
+ *reg_addr = (data_type == HIFC_CLP_REQ_HOST) ?
+ HIFC_CLP_REG(REQ) : HIFC_CLP_REG(RSP);
+ break;
+
+ case HIFC_CLP_START_REQ_HOST:
+ *reg_addr = HIFC_CLP_REG(REQ);
+ break;
+
+ case HIFC_CLP_READY_RSP_HOST:
+ *reg_addr = HIFC_CLP_REG(RSP);
+ break;
+
+ default:
+ *reg_addr = 0;
+ break;
+ }
+ if (*reg_addr == 0)
+ return -EINVAL;
+
+ *reg_addr += offset;
+
+ return 0;
+}
+
+static inline int clp_param_valid(struct hifc_hwdev *hwdev,
+ enum clp_data_type data_type,
+ enum clp_reg_type reg_type)
+{
+ if (data_type == HIFC_CLP_REQ_HOST &&
+ reg_type == HIFC_CLP_READY_RSP_HOST)
+ return -EINVAL;
+
+ if (data_type == HIFC_CLP_RSP_HOST &&
+ reg_type == HIFC_CLP_START_REQ_HOST)
+ return -EINVAL;
+
+ return 0;
+}
+
+static u32 get_clp_reg_value(struct hifc_hwdev *hwdev,
+ enum clp_reg_type reg_type, u32 reg_addr)
+{
+ u32 reg_value;
+
+ reg_value = hifc_hwif_read_reg(hwdev->hwif, reg_addr);
+
+ switch (reg_type) {
+ case HIFC_CLP_BA_HOST:
+ reg_value = ((reg_value >>
+ HIFC_CLP_OFFSET(SRAM_BASE)) &
+ HIFC_CLP_MASK(SRAM_BASE));
+ break;
+
+ case HIFC_CLP_SIZE_HOST:
+ reg_value = ((reg_value >>
+ HIFC_CLP_OFFSET(SRAM_SIZE)) &
+ HIFC_CLP_MASK(SRAM_SIZE));
+ break;
+
+ case HIFC_CLP_LEN_HOST:
+ reg_value = ((reg_value >> HIFC_CLP_OFFSET(LEN)) &
+ HIFC_CLP_MASK(LEN));
+ break;
+
+ case HIFC_CLP_START_REQ_HOST:
+ reg_value = ((reg_value >> HIFC_CLP_OFFSET(START)) &
+ HIFC_CLP_MASK(START));
+ break;
+
+ case HIFC_CLP_READY_RSP_HOST:
+ reg_value = ((reg_value >> HIFC_CLP_OFFSET(READY)) &
+ HIFC_CLP_MASK(READY));
+ break;
+
+ default:
+ break;
+ }
+
+ return reg_value;
+}
+
+static int hifc_read_clp_reg(struct hifc_hwdev *hwdev,
+ enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 *read_value)
+{
+ u32 reg_addr;
+ int err;
+
+ err = clp_param_valid(hwdev, data_type, reg_type);
+ if (err)
+ return err;
+
+ err = __get_clp_reg(hwdev, data_type, reg_type, ®_addr);
+ if (err)
+ return err;
+
+ *read_value = get_clp_reg_value(hwdev, reg_type, reg_addr);
+
+ return 0;
+}
+
+static int __check_data_type(enum clp_data_type data_type,
+ enum clp_reg_type reg_type)
+{
+ if (data_type == HIFC_CLP_REQ_HOST &&
+ reg_type == HIFC_CLP_READY_RSP_HOST)
+ return -EINVAL;
+ if (data_type == HIFC_CLP_RSP_HOST &&
+ reg_type == HIFC_CLP_START_REQ_HOST)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __check_reg_value(enum clp_reg_type reg_type, u32 value)
+{
+ if (reg_type == HIFC_CLP_BA_HOST &&
+ value > HIFC_CLP_SRAM_BASE_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HIFC_CLP_SIZE_HOST &&
+ value > HIFC_CLP_SRAM_SIZE_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HIFC_CLP_LEN_HOST &&
+ value > HIFC_CLP_LEN_REG_MAX)
+ return -EINVAL;
+
+ if ((reg_type == HIFC_CLP_START_REQ_HOST ||
+ reg_type == HIFC_CLP_READY_RSP_HOST) &&
+ value > HIFC_CLP_START_OR_READY_REG_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hifc_write_clp_reg(struct hifc_hwdev *hwdev,
+ enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 value)
+{
+ u32 reg_addr, reg_value;
+
+ if (__check_data_type(data_type, reg_type))
+ return;
+
+ if (__check_reg_value(reg_type, value))
+ return;
+
+ if (__get_clp_reg(hwdev, data_type, reg_type, ®_addr))
+ return;
+
+ reg_value = hifc_hwif_read_reg(hwdev->hwif, reg_addr);
+
+ switch (reg_type) {
+ case HIFC_CLP_LEN_HOST:
+ reg_value = reg_value &
+ (~(HIFC_CLP_MASK(LEN) << HIFC_CLP_OFFSET(LEN)));
+ reg_value = reg_value | (value << HIFC_CLP_OFFSET(LEN));
+ break;
+
+ case HIFC_CLP_START_REQ_HOST:
+ reg_value = reg_value &
+ (~(HIFC_CLP_MASK(START) <<
+ HIFC_CLP_OFFSET(START)));
+ reg_value = reg_value | (value << HIFC_CLP_OFFSET(START));
+ break;
+
+ case HIFC_CLP_READY_RSP_HOST:
+ reg_value = reg_value &
+ (~(HIFC_CLP_MASK(READY) <<
+ HIFC_CLP_OFFSET(READY)));
+ reg_value = reg_value | (value << HIFC_CLP_OFFSET(READY));
+ break;
+
+ default:
+ return;
+ }
+
+ hifc_hwif_write_reg(hwdev->hwif, reg_addr, reg_value);
+}
+
+static int hifc_read_clp_data(struct hifc_hwdev *hwdev,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+ u32 reg = HIFC_CLP_DATA(RSP);
+ u32 ready, delay_cnt;
+ u32 *ptr = (u32 *)buf_out;
+ u32 temp_out_size = 0;
+
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST,
+ HIFC_CLP_READY_RSP_HOST, &ready);
+ if (err)
+ return err;
+
+ delay_cnt = 0;
+ while (ready == 0) {
+ usleep_range(9000, 10000);
+ delay_cnt++;
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST,
+ HIFC_CLP_READY_RSP_HOST, &ready);
+ if (err || delay_cnt > HIFC_CLP_DELAY_CNT_MAX) {
+ sdk_err(hwdev->dev_hdl, "timeout with delay_cnt:%d\n",
+ delay_cnt);
+ return -EINVAL;
+ }
+ }
+
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST,
+ HIFC_CLP_LEN_HOST, &temp_out_size);
+ if (err)
+ return err;
+
+ if (temp_out_size > HIFC_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) {
+ sdk_err(hwdev->dev_hdl, "invalid temp_out_size:%d\n",
+ temp_out_size);
+ return -EINVAL;
+ }
+
+ *out_size = (u16)(temp_out_size & 0xffff);
+ for (; temp_out_size > 0; temp_out_size--) {
+ *ptr = hifc_hwif_read_reg(hwdev->hwif, reg);
+ ptr++;
+ reg = reg + 4;
+ }
+
+ hifc_write_clp_reg(hwdev, HIFC_CLP_RSP_HOST,
+ HIFC_CLP_READY_RSP_HOST, (u32)0x0);
+ hifc_write_clp_reg(hwdev, HIFC_CLP_RSP_HOST,
+ HIFC_CLP_LEN_HOST, (u32)0x0);
+
+ return 0;
+}
+
+static int hifc_write_clp_data(struct hifc_hwdev *hwdev,
+ void *buf_in, u16 in_size)
+{
+ int err;
+ u32 reg = HIFC_CLP_DATA(REQ);
+ u32 start = 1;
+ u32 delay_cnt = 0;
+ u32 *ptr = (u32 *)buf_in;
+
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST,
+ HIFC_CLP_START_REQ_HOST, &start);
+ if (err)
+ return err;
+
+ while (start == 1) {
+ usleep_range(9000, 10000);
+ delay_cnt++;
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST,
+ HIFC_CLP_START_REQ_HOST, &start);
+ if (err || delay_cnt > HIFC_CLP_DELAY_CNT_MAX)
+ return -EINVAL;
+ }
+
+ hifc_write_clp_reg(hwdev, HIFC_CLP_REQ_HOST,
+ HIFC_CLP_LEN_HOST, in_size);
+ hifc_write_clp_reg(hwdev, HIFC_CLP_REQ_HOST,
+ HIFC_CLP_START_REQ_HOST, (u32)0x1);
+
+ for (; in_size > 0; in_size--) {
+ hifc_hwif_write_reg(hwdev->hwif, reg, *ptr);
+ ptr++;
+ reg = reg + 4;
+ }
+
+ return 0;
+}
+
+static int hifc_check_clp_init_status(struct hifc_hwdev *hwdev)
+{
+ int err;
+ u32 reg_value = 0;
+
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST,
+ HIFC_CLP_BA_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong req ba value:0x%x\n", reg_value);
+ return -EINVAL;
+ }
+
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST,
+ HIFC_CLP_BA_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong rsp ba value:0x%x\n", reg_value);
+ return -EINVAL;
+ }
+
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST,
+ HIFC_CLP_SIZE_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong req size\n");
+ return -EINVAL;
+ }
+
+ err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST,
+ HIFC_CLP_SIZE_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong rsp size\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hifc_clear_clp_data(struct hifc_hwdev *hwdev,
+ enum clp_data_type data_type)
+{
+ u32 reg = (data_type == HIFC_CLP_REQ_HOST) ?
+ HIFC_CLP_DATA(REQ) : HIFC_CLP_DATA(RSP);
+ u32 count = HIFC_CLP_INPUT_BUFFER_LEN_HOST / HIFC_CLP_DATA_UNIT_HOST;
+
+ for (; count > 0; count--) {
+ hifc_hwif_write_reg(hwdev->hwif, reg, 0x0);
+ reg = reg + 4;
+ }
+}
+
+int hifc_pf_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ const void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt;
+ struct hifc_hwdev *dev = hwdev;
+ u64 header;
+ u16 real_size;
+ u8 *clp_msg_buf;
+ int err;
+
+ clp_pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->clp_pf_to_mgmt;
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+
+ /*4 bytes alignment*/
+ if (in_size % HIFC_CLP_DATA_UNIT_HOST)
+ real_size = (in_size + (u16)sizeof(header)
+ + HIFC_CLP_DATA_UNIT_HOST);
+ else
+ real_size = in_size + (u16)sizeof(header);
+ real_size = real_size / HIFC_CLP_DATA_UNIT_HOST;
+
+ if (real_size >
+ (HIFC_CLP_INPUT_BUFFER_LEN_HOST / HIFC_CLP_DATA_UNIT_HOST)) {
+ sdk_err(dev->dev_hdl, "Invalid real_size:%d\n", real_size);
+ return -EINVAL;
+ }
+ down(&clp_pf_to_mgmt->clp_msg_lock);
+
+ err = hifc_check_clp_init_status(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Check clp init status failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return err;
+ }
+
+ hifc_clear_clp_data(dev, HIFC_CLP_RSP_HOST);
+ hifc_write_clp_reg(dev, HIFC_CLP_RSP_HOST,
+ HIFC_CLP_READY_RSP_HOST, 0x0);
+
+ /*Send request*/
+ memset(clp_msg_buf, 0x0, HIFC_CLP_INPUT_BUFFER_LEN_HOST);
+ clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0);
+
+ memcpy(clp_msg_buf, &header, sizeof(header));
+ clp_msg_buf += sizeof(header);
+ memcpy(clp_msg_buf, buf_in, in_size);
+
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+
+ hifc_clear_clp_data(dev, HIFC_CLP_REQ_HOST);
+ err = hifc_write_clp_data(hwdev,
+ clp_pf_to_mgmt->clp_msg_buf, real_size);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Send clp request failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ /*Get response*/
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+ memset(clp_msg_buf, 0x0, HIFC_CLP_INPUT_BUFFER_LEN_HOST);
+ err = hifc_read_clp_data(hwdev, clp_msg_buf, &real_size);
+ hifc_clear_clp_data(dev, HIFC_CLP_RSP_HOST);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Read clp response failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ real_size = (u16)((real_size * HIFC_CLP_DATA_UNIT_HOST) & 0xffff);
+ if ((real_size <= sizeof(header)) ||
+ (real_size > HIFC_CLP_INPUT_BUFFER_LEN_HOST)) {
+ sdk_err(dev->dev_hdl, "Invalid response size:%d", real_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+ real_size = real_size - sizeof(header);
+ if (real_size != *out_size) {
+ sdk_err(dev->dev_hdl, "Invalid real_size:%d, out_size:%d\n",
+ real_size, *out_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+
+ return 0;
+}
+
+/* This function is only used by txrx flush */
+int hifc_pf_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl;
+ int err = -EINVAL;
+
+ if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) {
+ sdk_err(dev, "Mgmt module not initialized\n");
+ return -EINVAL;
+ }
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+
+ if (!MSG_SZ_IS_VALID(in_size)) {
+ sdk_err(dev, "Mgmt msg buffer size: %d is not valid\n",
+ in_size);
+ return -EINVAL;
+ }
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ /* Lock the sync_msg_buf */
+ down(&pf_to_mgmt->sync_msg_lock);
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HIFC_MSG_NO_ACK, HIFC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+
+ up(&pf_to_mgmt->sync_msg_lock);
+
+ return err;
+}
+
+/**
+ * api cmd write or read bypass defaut use poll, if want to use aeq interrupt,
+ * please set wb_trigger_aeqe to 1
+ **/
+int hifc_api_cmd_write_nack(void *hwdev, u8 dest, void *cmd, u16 size)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hifc_api_cmd_chain *chain;
+
+ if (!hwdev || !size || !cmd)
+ return -EINVAL;
+
+ if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) ||
+ hifc_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+ chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_POLL_WRITE];
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ return hifc_api_cmd_write(chain, dest, cmd, size);
+}
+
+int hifc_api_cmd_read_ack(void *hwdev, u8 dest, void *cmd, u16 size, void *ack,
+ u16 ack_size)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hifc_api_cmd_chain *chain;
+
+ if (!hwdev || !cmd || (ack_size && !ack))
+ return -EINVAL;
+
+ if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) ||
+ hifc_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt;
+ chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_POLL_READ];
+
+ if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ return hifc_api_cmd_read(chain, dest, cmd, size, ack, ack_size);
+}
+
+static void __send_mgmt_ack(struct hifc_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hifc_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, u16 msg_id)
+{
+ u16 buf_size;
+
+ if (!in_size)
+ buf_size = BUF_OUT_DEFAULT_SIZE;
+ else
+ buf_size = in_size;
+
+ spin_lock_bh(&pf_to_mgmt->async_msg_lock);
+ /* MGMT sent sync msg, send the response */
+ send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd,
+ buf_in, buf_size, HIFC_MSG_RESPONSE,
+ msg_id);
+ spin_unlock_bh(&pf_to_mgmt->async_msg_lock);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_recv_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hifc_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, u16 msg_id, int need_resp)
+{
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+ void *buf_out = pf_to_mgmt->mgmt_ack_buf;
+ enum hifc_mod_type tmp_mod = mod;
+ bool ack_first = false;
+ u16 out_size = 0;
+
+ memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+ if (mod >= HIFC_MOD_HW_MAX) {
+ sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n",
+ mod);
+ goto resp;
+ }
+
+ set_bit(HIFC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
+
+ if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] ||
+ !test_bit(HIFC_MGMT_MSG_CB_REG,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) {
+ sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n",
+ mod);
+ clear_bit(HIFC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
+ goto resp;
+ }
+
+ ack_first = hifc_mgmt_event_ack_first(mod, cmd);
+ if (ack_first && need_resp) {
+ /* send ack to mgmt first to avoid command timeout in
+ * mgmt(100ms in mgmt);
+ * mgmt to host command don't need any response data from host,
+ * just need ack from host
+ */
+ __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, in_size, msg_id);
+ }
+
+ pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev,
+ pf_to_mgmt->recv_mgmt_msg_data[tmp_mod],
+ cmd, buf_in, in_size,
+ buf_out, &out_size);
+
+ clear_bit(HIFC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
+
+resp:
+ if (!ack_first && need_resp)
+ __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size,
+ msg_id);
+}
+
+/**
+ * mgmt_resp_msg_handler - handler for response message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_resp_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hifc_recv_msg *recv_msg)
+{
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+
+ /* delete async msg */
+ if (recv_msg->msg_id & ASYNC_MSG_FLAG)
+ return;
+
+ spin_lock(&pf_to_mgmt->sync_event_lock);
+ if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id &&
+ pf_to_mgmt->event_flag == SEND_EVENT_START) {
+ complete(&recv_msg->recv_done);
+ } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) {
+ sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ } else {
+ sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ }
+ spin_unlock(&pf_to_mgmt->sync_event_lock);
+}
+
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
+{
+ struct hifc_mgmt_msg_handle_work *mgmt_work =
+ container_of(work, struct hifc_mgmt_msg_handle_work, work);
+
+ mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod,
+ mgmt_work->cmd, mgmt_work->msg,
+ mgmt_work->msg_len, mgmt_work->msg_id,
+ !mgmt_work->async_mgmt_to_pf);
+
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+static bool check_mgmt_seq_id_and_seg_len(struct hifc_recv_msg *recv_msg,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_msg->seq_id = seq_id;
+ } else {
+ if (seq_id != recv_msg->seq_id + 1)
+ return false;
+ recv_msg->seq_id = seq_id;
+ }
+
+ return true;
+}
+
+/**
+ * recv_mgmt_msg_handler - handler a message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: the header of the message
+ * @recv_msg: received message details
+ **/
+static void recv_mgmt_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt,
+ u8 *header, struct hifc_recv_msg *recv_msg)
+{
+ struct hifc_mgmt_msg_handle_work *mgmt_work;
+ u64 mbox_header = *((u64 *)header);
+ void *msg_body = header + sizeof(mbox_header);
+ u8 seq_id, seq_len;
+ u32 offset;
+ u64 dir;
+
+ /* Don't need to get anything from hw when cmd is async */
+ dir = HIFC_MSG_HEADER_GET(mbox_header, DIRECTION);
+ if (dir == HIFC_MSG_RESPONSE &&
+ HIFC_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG)
+ return;
+
+ seq_len = HIFC_MSG_HEADER_GET(mbox_header, SEG_LEN);
+ seq_id = HIFC_MSG_HEADER_GET(mbox_header, SEQID);
+
+ if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x, current seq_id: 0x%x, seg len: 0x%x\n",
+ recv_msg->seq_id, seq_id, seq_len);
+ /* set seq_id to invalid seq_id */
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
+ return;
+ }
+
+ offset = seq_id * SEGMENT_LEN;
+ memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len);
+
+ if (!HIFC_MSG_HEADER_GET(mbox_header, LAST))
+ return;
+
+ recv_msg->cmd = HIFC_MSG_HEADER_GET(mbox_header, CMD);
+ recv_msg->mod = HIFC_MSG_HEADER_GET(mbox_header, MODULE);
+ recv_msg->async_mgmt_to_pf = HIFC_MSG_HEADER_GET(mbox_header,
+ ASYNC_MGMT_TO_PF);
+ recv_msg->msg_len = HIFC_MSG_HEADER_GET(mbox_header, MSG_LEN);
+ recv_msg->msg_id = HIFC_MSG_HEADER_GET(mbox_header, MSG_ID);
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
+
+ if (HIFC_MSG_HEADER_GET(mbox_header, DIRECTION) ==
+ HIFC_MSG_RESPONSE) {
+ mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
+ return;
+ }
+
+ mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Allocate mgmt work memory failed\n");
+ return;
+ }
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt msg memory failed\n");
+ kfree(mgmt_work);
+ return;
+ }
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
+}
+
+/**
+ * hifc_mgmt_msg_aeqe_handler - handler for a mgmt message event
+ * @hwdev: the pointer to hw device
+ * @header: the header of the message
+ * @size: unused
+ **/
+void hifc_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size)
+{
+ struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev;
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hifc_recv_msg *recv_msg;
+ bool is_send_dir = false;
+
+ pf_to_mgmt = dev->pf_to_mgmt;
+
+ is_send_dir = (HIFC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
+ HIFC_MSG_DIRECT_SEND) ? true : false;
+
+ recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
+}
+
+/**
+ * alloc_recv_msg - allocate received message memory
+ * @recv_msg: pointer that will hold the allocated data
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_recv_msg(struct hifc_recv_msg *recv_msg)
+{
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
+
+ recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * free_recv_msg - free received message memory
+ * @recv_msg: pointer that holds the allocated data
+ **/
+static void free_recv_msg(struct hifc_recv_msg *recv_msg)
+{
+ kfree(recv_msg->msg);
+}
+
+/**
+ * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_msg_buf(struct hifc_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ int err;
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate recv msg\n");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate resp recv msg\n");
+ goto alloc_msg_for_resp_err;
+ }
+
+ pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->async_msg_buf) {
+ err = -ENOMEM;
+ goto async_msg_buf_err;
+ }
+
+ pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->sync_msg_buf) {
+ err = -ENOMEM;
+ goto sync_msg_buf_err;
+ }
+
+ pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf) {
+ err = -ENOMEM;
+ goto ack_msg_buf_err;
+ }
+
+ return 0;
+
+ack_msg_buf_err:
+ kfree(pf_to_mgmt->sync_msg_buf);
+
+sync_msg_buf_err:
+ kfree(pf_to_mgmt->async_msg_buf);
+
+async_msg_buf_err:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+
+alloc_msg_for_resp_err:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ return err;
+}
+
+/**
+ * free_msg_buf - free all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ **/
+static void free_msg_buf(struct hifc_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->mgmt_ack_buf);
+ kfree(pf_to_mgmt->sync_msg_buf);
+ kfree(pf_to_mgmt->async_msg_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+/**
+ * hifc_pf_to_mgmt_init - initialize PF to MGMT channel
+ * @hwdev: the pointer to hw device
+ * Return: 0 - success, negative - failure
+ **/
+int hifc_pf_to_mgmt_init(struct hifc_hwdev *hwdev)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = hwdev->dev_hdl;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt)
+ return -ENOMEM;
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+ spin_lock_init(&pf_to_mgmt->async_msg_lock);
+ spin_lock_init(&pf_to_mgmt->sync_event_lock);
+ sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+ pf_to_mgmt->workq = create_singlethread_workqueue(HIFC_MGMT_WQ_NAME);
+ if (!pf_to_mgmt->workq) {
+ sdk_err(dev, "Failed to initialize MGMT workqueue\n");
+ err = -ENOMEM;
+ goto create_mgmt_workq_err;
+ }
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate msg buffers\n");
+ goto alloc_msg_buf_err;
+ }
+
+ err = hifc_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain);
+ if (err) {
+ sdk_err(dev, "Failed to init the api cmd chains\n");
+ goto api_cmd_init_err;
+ }
+
+ return 0;
+
+api_cmd_init_err:
+ free_msg_buf(pf_to_mgmt);
+
+alloc_msg_buf_err:
+ destroy_workqueue(pf_to_mgmt->workq);
+
+create_mgmt_workq_err:
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+/**
+ * hifc_pf_to_mgmt_free - free PF to MGMT channel
+ * @hwdev: the pointer to hw device
+ **/
+void hifc_pf_to_mgmt_free(struct hifc_hwdev *hwdev)
+{
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ /* destroy workqueue before free related pf_to_mgmt resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(pf_to_mgmt->workq);
+ hifc_api_cmd_free(pf_to_mgmt->cmd_chain);
+ free_msg_buf(pf_to_mgmt);
+ kfree(pf_to_mgmt);
+}
+
+void hifc_flush_mgmt_workq(void *hwdev)
+{
+ struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev;
+
+ flush_workqueue(dev->aeqs->workq);
+
+ if (hifc_func_type(dev) != TYPE_VF &&
+ hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED))
+ flush_workqueue(dev->pf_to_mgmt->workq);
+}
+
+int hifc_clp_pf_to_mgmt_init(struct hifc_hwdev *hwdev)
+{
+ struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt;
+
+ clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL);
+ if (!clp_pf_to_mgmt)
+ return -ENOMEM;
+
+ clp_pf_to_mgmt->clp_msg_buf = kzalloc(HIFC_CLP_INPUT_BUFFER_LEN_HOST,
+ GFP_KERNEL);
+ if (!clp_pf_to_mgmt->clp_msg_buf) {
+ kfree(clp_pf_to_mgmt);
+ return -ENOMEM;
+ }
+ sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1);
+
+ hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt;
+
+ return 0;
+}
+
+void hifc_clp_pf_to_mgmt_free(struct hifc_hwdev *hwdev)
+{
+ struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt;
+
+ kfree(clp_pf_to_mgmt->clp_msg_buf);
+ kfree(clp_pf_to_mgmt);
+}
+
diff --git a/drivers/scsi/huawei/hifc/hifc_mgmt.h b/drivers/scsi/huawei/hifc/hifc_mgmt.h
new file mode 100644
index 000000000000..2adcfe2968c1
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_mgmt.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef HIFC_MGMT_H_
+#define HIFC_MGMT_H_
+
+#define HIFC_MSG_HEADER_MSG_LEN_SHIFT 0
+#define HIFC_MSG_HEADER_MODULE_SHIFT 11
+#define HIFC_MSG_HEADER_SEG_LEN_SHIFT 16
+#define HIFC_MSG_HEADER_NO_ACK_SHIFT 22
+#define HIFC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
+#define HIFC_MSG_HEADER_SEQID_SHIFT 24
+#define HIFC_MSG_HEADER_LAST_SHIFT 30
+#define HIFC_MSG_HEADER_DIRECTION_SHIFT 31
+#define HIFC_MSG_HEADER_CMD_SHIFT 32
+#define HIFC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48
+#define HIFC_MSG_HEADER_P2P_IDX_SHIFT 50
+#define HIFC_MSG_HEADER_MSG_ID_SHIFT 54
+
+#define HIFC_MSG_HEADER_MSG_LEN_MASK 0x7FF
+#define HIFC_MSG_HEADER_MODULE_MASK 0x1F
+#define HIFC_MSG_HEADER_SEG_LEN_MASK 0x3F
+#define HIFC_MSG_HEADER_NO_ACK_MASK 0x1
+#define HIFC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
+#define HIFC_MSG_HEADER_SEQID_MASK 0x3F
+#define HIFC_MSG_HEADER_LAST_MASK 0x1
+#define HIFC_MSG_HEADER_DIRECTION_MASK 0x1
+#define HIFC_MSG_HEADER_CMD_MASK 0xFF
+#define HIFC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3
+#define HIFC_MSG_HEADER_P2P_IDX_MASK 0xF
+#define HIFC_MSG_HEADER_MSG_ID_MASK 0x3FF
+
+#define HIFC_MSG_HEADER_GET(val, member) \
+ (((val) >> HIFC_MSG_HEADER_##member##_SHIFT) & \
+ HIFC_MSG_HEADER_##member##_MASK)
+
+#define HIFC_MSG_HEADER_SET(val, member) \
+ ((u64)((val) & HIFC_MSG_HEADER_##member##_MASK) << \
+ HIFC_MSG_HEADER_##member##_SHIFT)
+
+#define HIFC_MGMT_WQ_NAME "hifc_mgmt"
+
+/*CLP*/
+enum clp_data_type {
+ HIFC_CLP_REQ_HOST = 0,
+ HIFC_CLP_RSP_HOST = 1
+};
+
+enum clp_reg_type {
+ HIFC_CLP_BA_HOST = 0,
+ HIFC_CLP_SIZE_HOST = 1,
+ HIFC_CLP_LEN_HOST = 2,
+ HIFC_CLP_START_REQ_HOST = 3,
+ HIFC_CLP_READY_RSP_HOST = 4
+};
+
+/* cmd of mgmt CPU message for HW module */
+enum hifc_mgmt_cmd {
+ HIFC_MGMT_CMD_RESET_MGMT = 0x0,
+ HIFC_MGMT_CMD_START_FLR = 0x1,
+ HIFC_MGMT_CMD_FLUSH_DOORBELL = 0x2,
+ HIFC_MGMT_CMD_CMDQ_CTXT_SET = 0x10,
+ HIFC_MGMT_CMD_VAT_SET = 0x12,
+ HIFC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14,
+ HIFC_MGMT_CMD_PPF_TMR_SET = 0x22,
+ HIFC_MGMT_CMD_PPF_HT_GPA_SET = 0x23,
+ HIFC_MGMT_CMD_RES_STATE_SET = 0x24,
+ HIFC_MGMT_CMD_FUNC_TMR_BITMAT_SET = 0x32,
+ HIFC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+ HIFC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+ HIFC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+ HIFC_MGMT_CMD_FAULT_REPORT = 0x37,
+ HIFC_MGMT_CMD_HEART_LOST_REPORT = 0x38,
+ HIFC_MGMT_CMD_SYNC_TIME = 0x46,
+ HIFC_MGMT_CMD_REG_READ = 0x48,
+ HIFC_MGMT_CMD_L2NIC_RESET = 0x4b,
+ HIFC_MGMT_CMD_ACTIVATE_FW = 0x4F,
+ HIFC_MGMT_CMD_PAGESIZE_SET = 0x50,
+ HIFC_MGMT_CMD_GET_BOARD_INFO = 0x52,
+ HIFC_MGMT_CMD_WATCHDOG_INFO = 0x56,
+ HIFC_MGMT_CMD_FMW_ACT_NTC = 0x57,
+ HIFC_MGMT_CMD_PCIE_DFX_NTC = 0x65,
+ HIFC_MGMT_CMD_PCIE_DFX_GET = 0x66,
+ HIFC_MGMT_CMD_GET_HOST_INFO = 0x67,
+ HIFC_MGMT_CMD_GET_PHY_INIT_STATUS = 0x6A,
+ HIFC_MGMT_CMD_HEARTBEAT_EVENT = 0x6C,
+};
+
+#define HIFC_CLP_REG_GAP 0x20
+#define HIFC_CLP_INPUT_BUFFER_LEN_HOST 2048UL
+#define HIFC_CLP_OUTPUT_BUFFER_LEN_HOST 2048UL
+#define HIFC_CLP_DATA_UNIT_HOST 4UL
+#define HIFC_BAR01_GLOABAL_CTL_OFFSET 0x4000
+#define HIFC_BAR01_CLP_OFFSET 0x5000
+
+#define HIFC_CLP_SRAM_SIZE_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x220)
+#define HIFC_CLP_REQ_SRAM_BA_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x224)
+#define HIFC_CLP_RSP_SRAM_BA_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x228)
+#define HIFC_CLP_REQ_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x22c)
+#define HIFC_CLP_RSP_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x230)
+#define HIFC_CLP_REG(member) (HIFC_CLP_##member##_REG)
+
+#define HIFC_CLP_REQ_DATA (HIFC_BAR01_CLP_OFFSET)
+#define HIFC_CLP_RSP_DATA (HIFC_BAR01_CLP_OFFSET + 0x1000)
+#define HIFC_CLP_DATA(member) (HIFC_CLP_##member##_DATA)
+
+#define HIFC_CLP_SRAM_SIZE_OFFSET 16
+#define HIFC_CLP_SRAM_BASE_OFFSET 0
+#define HIFC_CLP_LEN_OFFSET 0
+#define HIFC_CLP_START_OFFSET 31
+#define HIFC_CLP_READY_OFFSET 31
+#define HIFC_CLP_OFFSET(member) (HIFC_CLP_##member##_OFFSET)
+
+#define HIFC_CLP_SRAM_SIZE_BIT_LEN 0x7ffUL
+#define HIFC_CLP_SRAM_BASE_BIT_LEN 0x7ffffffUL
+#define HIFC_CLP_LEN_BIT_LEN 0x7ffUL
+#define HIFC_CLP_START_BIT_LEN 0x1UL
+#define HIFC_CLP_READY_BIT_LEN 0x1UL
+#define HIFC_CLP_MASK(member) (HIFC_CLP_##member##_BIT_LEN)
+
+#define HIFC_CLP_DELAY_CNT_MAX 200UL
+#define HIFC_CLP_SRAM_SIZE_REG_MAX 0x3ff
+#define HIFC_CLP_SRAM_BASE_REG_MAX 0x7ffffff
+#define HIFC_CLP_LEN_REG_MAX 0x3ff
+#define HIFC_CLP_START_OR_READY_REG_MAX 0x1
+#define HIFC_MGMT_CMD_UNSUPPORTED 0xFF
+
+enum hifc_msg_direction_type {
+ HIFC_MSG_DIRECT_SEND = 0,
+ HIFC_MSG_RESPONSE = 1
+};
+
+enum hifc_msg_segment_type {
+ NOT_LAST_SEGMENT = 0,
+ LAST_SEGMENT = 1,
+};
+
+enum hifc_mgmt_msg_type {
+ ASYNC_MGMT_MSG = 0,
+ SYNC_MGMT_MSG = 1,
+};
+
+enum hifc_msg_ack_type {
+ HIFC_MSG_ACK = 0,
+ HIFC_MSG_NO_ACK = 1,
+};
+
+struct hifc_recv_msg {
+ void *msg;
+
+ struct completion recv_done;
+
+ u16 msg_len;
+ enum hifc_mod_type mod;
+ u8 cmd;
+ u8 seq_id;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+};
+
+struct hifc_msg_head {
+ u8 status;
+ u8 version;
+ u8 resp_aeq_num;
+ u8 rsvd0[5];
+};
+
+#define HIFC_COMM_SELF_CMD_MAX 8
+
+struct comm_up_self_msg_sub_info {
+ u8 cmd;
+ comm_up_self_msg_proc proc;
+};
+
+struct comm_up_self_msg_info {
+ u8 cmd_num;
+ struct comm_up_self_msg_sub_info info[HIFC_COMM_SELF_CMD_MAX];
+};
+
+enum comm_pf_to_mgmt_event_state {
+ SEND_EVENT_UNINIT = 0,
+ SEND_EVENT_START,
+ SEND_EVENT_FAIL,
+ SEND_EVENT_TIMEOUT,
+ SEND_EVENT_END,
+};
+
+enum hifc_mgmt_msg_cb_state {
+ HIFC_MGMT_MSG_CB_REG = 0,
+ HIFC_MGMT_MSG_CB_RUNNING,
+};
+
+struct hifc_clp_pf_to_mgmt {
+ struct semaphore clp_msg_lock;
+ void *clp_msg_buf;
+};
+
+struct hifc_msg_pf_to_mgmt {
+ struct hifc_hwdev *hwdev;
+
+ /* Async cmd can not be scheduling */
+ spinlock_t async_msg_lock;
+ struct semaphore sync_msg_lock;
+
+ struct workqueue_struct *workq;
+
+ void *async_msg_buf;
+ void *sync_msg_buf;
+ void *mgmt_ack_buf;
+
+ struct hifc_recv_msg recv_msg_from_mgmt;
+ struct hifc_recv_msg recv_resp_msg_from_mgmt;
+
+ u16 async_msg_id;
+ u16 sync_msg_id;
+
+ struct hifc_api_cmd_chain *cmd_chain[HIFC_API_CMD_MAX];
+
+ hifc_mgmt_msg_cb recv_mgmt_msg_cb[HIFC_MOD_HW_MAX];
+ void *recv_mgmt_msg_data[HIFC_MOD_HW_MAX];
+ unsigned long mgmt_msg_cb_state[HIFC_MOD_HW_MAX];
+
+ struct comm_up_self_msg_info proc;
+
+ /* lock when sending msg */
+ spinlock_t sync_event_lock;
+ enum comm_pf_to_mgmt_event_state event_flag;
+};
+
+struct hifc_mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
+ void *msg;
+ u16 msg_len;
+ enum hifc_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+};
+
+/* show each drivers only such as nic_service_cap,
+ * toe_service_cap structure, but not show service_cap
+ */
+enum hifc_service_type {
+ SERVICE_T_NIC = 0,
+
+ SERVICE_T_FC = 5,
+
+ SERVICE_T_MAX,
+
+ /* Only used for interruption resource management,
+ * mark the request module
+ */
+ SERVICE_T_INTF = (1 << 15),
+ SERVICE_T_CQM = (1 << 16),
+};
+
+/* NIC service capability
+ * 1, The chip supports NIC RQ is 1K
+ * 2, PF/VF RQ specifications:
+ * disable RSS:
+ * disable VMDq: Each PF/VF at most 8 RQ
+ * enable the VMDq: Each PF/VF at most 1K RQ
+ * enable the RSS:
+ * disable VMDq: each PF at most 64 RQ, VF at most 32 RQ
+ * enable the VMDq: Each PF/VF at most 1K RQ
+ *
+ * 3, The chip supports NIC SQ is 1K
+ * 4, PF/VF SQ specifications:
+ * disable RSS:
+ * disable VMDq: Each PF/VF at most 8 SQ
+ * enable the VMDq: Each PF/VF at most 1K SQ
+ * enable the RSS:
+ * disable VMDq: each PF at most 64 SQ, VF at most 32 SQ
+ * enable the VMDq: Each PF/VF at most 1K SQ
+ */
+struct nic_service_cap {
+ /* PF resources*/
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* VF resources, vf obtain through the MailBox mechanism from
+ * according PF
+ */
+ u16 vf_max_sqs;
+ u16 vf_max_rqs;
+ bool lro_en; /* LRO feature enable bit*/
+ u8 lro_sz; /* LRO context space: n*16B */
+ u8 tso_sz; /* TSO context space: n*16B */
+
+ u16 max_queue_allowed;
+};
+
+/* PF FC service resource structure defined*/
+struct dev_fc_svc_cap {
+ /* PF Parent QPC */
+ u32 max_parent_qpc_num; /* max number is 2048*/
+
+ /* PF Child QPC */
+ u32 max_child_qpc_num; /* max number is 2048*/
+
+ /* PF SCQ */
+ u32 scq_num; /* 16 */
+
+ /* PF supports SRQ*/
+ u32 srq_num; /* Number of SRQ is 2*/
+
+ u8 vp_id_start;
+ u8 vp_id_end;
+};
+
+/* FC services*/
+struct fc_service_cap {
+ struct dev_fc_svc_cap dev_fc_cap;
+
+ /* Parent QPC */
+ u32 parent_qpc_size; /* 256B */
+
+ /* Child QPC */
+ u32 child_qpc_size; /* 256B */
+
+ /* SQ */
+ u32 sqe_size; /* 128B(in linked list mode)*/
+
+ /* SCQ */
+ u32 scqc_size; /* Size of the Context 32B*/
+ u32 scqe_size; /* 64B */
+
+ /* SRQ */
+ u32 srqc_size; /* Size of SRQ Context (64B)*/
+ u32 srqe_size; /* 32B */
+};
+
+bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap);
+
+/* Service interface for obtaining service_cap public fields*/
+/* Obtain service_cap.host_oq_id_mask_val*/
+u8 hifc_host_oq_id_mask(void *hwdev);
+
+/* Obtain service_cap.dev_cap.max_sqs*/
+u16 hifc_func_max_qnum(void *hwdev);
+
+/* The following information is obtained from the bar space
+ * which is recorded by SDK layer.
+ * Here provide parameter query interface for service
+ */
+/* func_attr.glb_func_idx, global function index */
+u16 hifc_global_func_id(void *hwdev);
+/* func_attr.intr_num, MSI-X table entry in function*/
+enum intr_type {
+ INTR_TYPE_MSIX,
+ INTR_TYPE_MSI,
+ INTR_TYPE_INT,
+ INTR_TYPE_NONE,
+};
+
+u8 hifc_pcie_itf_id(void *hwdev); /* func_attr.itf_idx, pcie interface index */
+
+/* func_attr.func_type, 0-PF 1-VF 2-PPF */
+enum func_type hifc_func_type(void *hwdev);
+
+u8 hifc_ppf_idx(void *hwdev);
+
+enum hifc_msix_state {
+ HIFC_MSIX_ENABLE,
+ HIFC_MSIX_DISABLE,
+};
+
+void hifc_set_msix_state(void *hwdev, u16 msix_idx,
+ enum hifc_msix_state flag);
+
+/* Defines the IRQ information structure*/
+struct irq_info {
+ u16 msix_entry_idx; /* IRQ corresponding index number */
+ u32 irq_id; /* the IRQ number from OS */
+};
+
+int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 req_num,
+ struct irq_info *irq_info_array, u16 *resp_num);
+void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id);
+
+int hifc_sync_time(void *hwdev, u64 time);
+void hifc_disable_mgmt_msg_report(void *hwdev);
+void hifc_set_func_deinit_flag(void *hwdev);
+void hifc_flush_mgmt_workq(void *hwdev);
+int hifc_global_func_id_get(void *hwdev, u16 *func_id);
+u16 hifc_global_func_id_hw(void *hwdev);
+int hifc_pf_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size);
+void hifc_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size);
+int hifc_pf_to_mgmt_init(struct hifc_hwdev *hwdev);
+void hifc_pf_to_mgmt_free(struct hifc_hwdev *hwdev);
+int hifc_pf_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+int hifc_pf_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size);
+int hifc_pf_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd,
+ const void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+int hifc_clp_pf_to_mgmt_init(struct hifc_hwdev *hwdev);
+void hifc_clp_pf_to_mgmt_free(struct hifc_hwdev *hwdev);
+
+#endif
diff --git a/drivers/scsi/huawei/hifc/hifc_sml.c b/drivers/scsi/huawei/hifc/hifc_sml.c
new file mode 100644
index 000000000000..2d04ff6ed5ff
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_sml.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwdev.h"
+#include "hifc_sml.h"
+
+#ifndef HTONL
+#define HTONL(x) \
+ ((((x) & 0x000000ff) << 24) \
+ | (((x) & 0x0000ff00) << 8) \
+ | (((x) & 0x00ff0000) >> 8) \
+ | (((x) & 0xff000000) >> 24))
+#endif
+
+static void sml_ctr_htonl_n(u32 *node, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ *node = HTONL(*node);
+ node++;
+ }
+}
+
+static void hifc_sml_ctr_read_build_req(struct chipif_sml_ctr_rd_req_s *msg,
+ u8 instance_id, u8 op_id,
+ u8 ack, u32 ctr_id, u32 init_val)
+{
+ msg->head.value = 0;
+ msg->head.bs.instance = instance_id;
+ msg->head.bs.op_id = op_id;
+ msg->head.bs.ack = ack;
+ msg->head.value = HTONL(msg->head.value);
+
+ msg->ctr_id = ctr_id;
+ msg->ctr_id = HTONL(msg->ctr_id);
+
+ msg->initial = init_val;
+}
+
+static void hifc_sml_ctr_write_build_req(struct chipif_sml_ctr_wr_req_s *msg,
+ u8 instance_id, u8 op_id,
+ u8 ack, u32 ctr_id,
+ u64 val1, u64 val2)
+{
+ msg->head.value = 0;
+ msg->head.bs.instance = instance_id;
+ msg->head.bs.op_id = op_id;
+ msg->head.bs.ack = ack;
+ msg->head.value = HTONL(msg->head.value);
+
+ msg->ctr_id = ctr_id;
+ msg->ctr_id = HTONL(msg->ctr_id);
+
+ msg->value1_h = val1 >> 32;
+ msg->value1_l = val1 & 0xFFFFFFFF;
+
+ msg->value2_h = val2 >> 32;
+ msg->value2_l = val2 & 0xFFFFFFFF;
+}
+
+/**
+ * hifc_sm_ctr_rd32 - small single 32 counter read
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ */
+int hifc_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value)
+{
+ struct chipif_sml_ctr_rd_req_s req;
+ union ctr_rd_rsp_u rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req),
+ (void *)&rsp, (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Sm 32bit counter read fail, err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = rsp.bs_ss32_rsp.value1;
+
+ return 0;
+}
+
+/**
+ * hifc_sm_ctr_rd32_clear - small single 32 counter read and clear to zero
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc)
+ */
+int hifc_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u32 *value)
+{
+ struct chipif_sml_ctr_rd_req_s req;
+ union ctr_rd_rsp_u rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hifc_sml_ctr_read_build_req(&req, instance,
+ CHIPIF_SM_CTR_OP_READ_CLEAR,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req),
+ (void *)&rsp, (unsigned short)sizeof(rsp));
+
+ if (ret) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Sm 32bit counter clear fail, err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = rsp.bs_ss32_rsp.value1;
+
+ return 0;
+}
+
+/**
+ * hifc_sm_ctr_wr32 - small single 32 counter write
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: write counter value
+ * Return: 0 - success, negative - failure
+ */
+int hifc_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value)
+{
+ struct chipif_sml_ctr_wr_req_s req;
+ struct chipif_sml_ctr_wr_rsp_s rsp;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, (u64)value, 0ULL);
+
+ return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
+
+/**
+ * hifc_sm_ctr_rd64 - big counter 64 read
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ */
+int hifc_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value)
+{
+ struct chipif_sml_ctr_rd_req_s req;
+ union ctr_rd_rsp_u rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Sm 64bit counter read fail err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = ((u64)rsp.bs_bs64_rsp.value1 << 32) | rsp.bs_bs64_rsp.value2;
+
+ return 0;
+}
+
+/**
+ * hifc_sm_ctr_wr64 - big single 64 counter write
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: write counter value
+ * Return: 0 - success, negative - failure
+ */
+int hifc_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 value)
+{
+ struct chipif_sml_ctr_wr_req_s req;
+ struct chipif_sml_ctr_wr_rsp_s rsp;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, value, 0ULL);
+
+ return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
+
+/**
+ * hifc_sm_ctr_rd64_pair - big pair 128 counter read
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value1: read counter value ptr
+ * @value2: read counter value ptr
+ * Return: 0 - success, negative - failure
+ */
+int hifc_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 *value1, u64 *value2)
+{
+ struct chipif_sml_ctr_rd_req_s req;
+ union ctr_rd_rsp_u rsp;
+ int ret;
+
+ if (!hwdev || (0 != (ctr_id & 0x1)) || !value1 || !value2) {
+ pr_err("Hwdev(0x%p) or value1(0x%p) or value2(0x%p) is NULL or ctr_id(%d) is odd number\n",
+ hwdev, value1, value2, ctr_id);
+ return -EFAULT;
+ }
+
+ hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Sm 64 bit rd pair ret(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << 32) | rsp.bs_bp64_rsp.val1_l;
+ *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << 32) | rsp.bs_bp64_rsp.val2_l;
+
+ return 0;
+}
+
+/**
+ * hifc_sm_ctr_wr64_pair - big pair 128 counter write
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @ctr_id: counter id
+ * @instance: instance value
+ * @value1: write counter value
+ * @value2: write counter value
+ * Return: 0 - success, negative - failure
+ */
+int hifc_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 value1, u64 value2)
+{
+ struct chipif_sml_ctr_wr_req_s req;
+ struct chipif_sml_ctr_wr_rsp_s rsp;
+
+ /* pair pattern ctr_id must be even number */
+ if (!hwdev || (0 != (ctr_id & 0x1))) {
+ pr_err("Handle is NULL or ctr_id(%d) is odd number for write 64 bit pair\n",
+ ctr_id);
+ return -EFAULT;
+ }
+
+ hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, value1, value2);
+ return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
+
+int hifc_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val)
+{
+ struct hifc_csr_request_api_data api_data = {0};
+ u32 csr_val = 0;
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev || !val)
+ return -EFAULT;
+
+ memset(&api_data, 0, sizeof(struct hifc_csr_request_api_data));
+ api_data.dw0 = 0;
+ api_data.dw1.bits.operation_id = HIFC_CSR_OPERATION_READ_CSR;
+ api_data.dw1.bits.need_response = HIFC_CSR_NEED_RESP_DATA;
+ api_data.dw1.bits.data_size = HIFC_CSR_DATA_SZ_32;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+
+ ret = hifc_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data),
+ in_size, &csr_val, 4);
+ if (ret) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Read 32 bit csr fail, dest %d addr 0x%x, ret: 0x%x\n",
+ dest, addr, ret);
+ return ret;
+ }
+
+ *val = csr_val;
+
+ return 0;
+}
+
+int hifc_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val)
+{
+ struct hifc_csr_request_api_data api_data;
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ memset(&api_data, 0, sizeof(struct hifc_csr_request_api_data));
+ api_data.dw1.bits.operation_id = HIFC_CSR_OPERATION_WRITE_CSR;
+ api_data.dw1.bits.need_response = HIFC_CSR_NO_RESP_DATA;
+ api_data.dw1.bits.data_size = HIFC_CSR_DATA_SZ_32;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+ api_data.csr_write_data_h = 0xffffffff;
+ api_data.csr_write_data_l = val;
+
+ ret = hifc_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size);
+ if (ret) {
+ sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
+ "Write 32 bit csr fail! dest %d addr 0x%x val 0x%x\n",
+ dest, addr, val);
+ return ret;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/scsi/huawei/hifc/hifc_sml.h b/drivers/scsi/huawei/hifc/hifc_sml.h
new file mode 100644
index 000000000000..9fe2088f48a1
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_sml.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef __CHIPIF_SML_COUNTER_H__
+#define __CHIPIF_SML_COUNTER_H__
+
+#define CHIPIF_FUNC_PF 0
+#define CHIPIF_FUNC_VF 1
+#define CHIPIF_FUNC_PPF 2
+
+#define CHIPIF_ACK 1
+#define CHIPIF_NOACK 0
+
+#define CHIPIF_SM_CTR_OP_READ 0x2
+#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6
+#define CHIPIF_SM_CTR_OP_WRITE 0x3
+
+#define SMALL_CNT_READ_RSP_SIZE 16
+
+/* request head */
+union chipif_sml_ctr_req_head_u {
+ struct {
+ u32 pad:15;
+ u32 ack:1;
+ u32 op_id:5;
+ u32 instance:6;
+ u32 src:5;
+ } bs;
+
+ u32 value;
+};
+
+/* counter read request struct */
+struct chipif_sml_ctr_rd_req_s {
+ u32 extra;
+ union chipif_sml_ctr_req_head_u head;
+ u32 ctr_id;
+ u32 initial;
+ u32 pad;
+};
+
+/* counter read response union */
+union ctr_rd_rsp_u {
+ struct {
+ u32 value1:16;
+ u32 pad0:16;
+ u32 pad1[3];
+ } bs_ss16_rsp;
+
+ struct {
+ u32 value1;
+ u32 pad[3];
+ } bs_ss32_rsp;
+
+ struct {
+ u32 value1:20;
+ u32 pad0:12;
+ u32 value2:12;
+ u32 pad1:20;
+ u32 pad2[2];
+ } bs_sp_rsp;
+
+ struct {
+ u32 value1;
+ u32 value2;
+ u32 pad[2];
+ } bs_bs64_rsp;
+
+ struct {
+ u32 val1_h;
+ u32 val1_l;
+ u32 val2_h;
+ u32 val2_l;
+ } bs_bp64_rsp;
+
+};
+
+/* resopnse head */
+union sml_ctr_rsp_head_u {
+ struct {
+ u32 pad:30; /* reserve */
+ u32 code:2; /* error code */
+ } bs;
+
+ u32 value;
+};
+
+/* counter write request struct */
+struct chipif_sml_ctr_wr_req_s {
+ u32 extra;
+ union chipif_sml_ctr_req_head_u head;
+ u32 ctr_id;
+ u32 rsv1;
+ u32 rsv2;
+ u32 value1_h;
+ u32 value1_l;
+ u32 value2_h;
+ u32 value2_l;
+};
+
+/* counter write response struct */
+struct chipif_sml_ctr_wr_rsp_s {
+ union sml_ctr_rsp_head_u head;
+ u32 pad[3];
+};
+
+enum HIFC_CSR_API_DATA_OPERATION_ID {
+ HIFC_CSR_OPERATION_WRITE_CSR = 0x1E,
+ HIFC_CSR_OPERATION_READ_CSR = 0x1F
+};
+
+enum HIFC_CSR_API_DATA_NEED_RESPONSE_DATA {
+ HIFC_CSR_NO_RESP_DATA = 0,
+ HIFC_CSR_NEED_RESP_DATA = 1
+};
+
+enum HIFC_CSR_API_DATA_DATA_SIZE {
+ HIFC_CSR_DATA_SZ_32 = 0,
+ HIFC_CSR_DATA_SZ_64 = 1
+};
+
+struct hifc_csr_request_api_data {
+ u32 dw0;
+
+ union {
+ struct {
+ u32 reserved1:13;
+ /* this field indicates the write/read data size:
+ * 2'b00: 32 bits
+ * 2'b01: 64 bits
+ * 2'b10~2'b11:reserved
+ */
+ u32 data_size:2;
+ /* this field indicates that requestor expect receive a
+ * response data or not.
+ * 1'b0: expect not to receive a response data.
+ * 1'b1: expect to receive a response data.
+ */
+ u32 need_response:1;
+ /* this field indicates the operation that the requestor
+ * expected.
+ * 5'b1_1110: write value to csr space.
+ * 5'b1_1111: read register from csr space.
+ */
+ u32 operation_id:5;
+ u32 reserved2:6;
+ /* this field specifies the Src node ID for this API
+ * request message.
+ */
+ u32 src_node_id:5;
+ } bits;
+
+ u32 val32;
+ } dw1;
+
+ union {
+ struct {
+ /* it specifies the CSR address. */
+ u32 csr_addr:26;
+ u32 reserved3:6;
+ } bits;
+
+ u32 val32;
+ } dw2;
+
+ /* if data_size=2'b01, it is high 32 bits of write data. else, it is
+ * 32'hFFFF_FFFF.
+ */
+ u32 csr_write_data_h;
+ /* the low 32 bits of write data. */
+ u32 csr_write_data_l;
+};
+
+int hifc_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value);
+int hifc_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value);
+int hifc_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 *value1, u64 *value2);
+
+#endif
+
diff --git a/drivers/scsi/huawei/hifc/hifc_wq.c b/drivers/scsi/huawei/hifc/hifc_wq.c
new file mode 100644
index 000000000000..4e926d140b2c
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_wq.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "hifc_knl_adp.h"
+#include "hifc_hw.h"
+#include "hifc_hwif.h"
+#include "hifc_wq.h"
+
+#define WQS_MAX_NUM_BLOCKS 128
+#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
+ sizeof((wqs)->free_blocks[0]))
+
+static void wqs_return_block(struct hifc_wqs *wqs, u32 page_idx, u32 block_idx)
+{
+ u32 pos;
+
+ spin_lock(&wqs->alloc_blocks_lock);
+
+ wqs->num_free_blks++;
+
+ pos = wqs->return_blk_pos++;
+ pos &= WQS_MAX_NUM_BLOCKS - 1;
+
+ wqs->free_blocks[pos].page_idx = page_idx;
+ wqs->free_blocks[pos].block_idx = block_idx;
+
+ spin_unlock(&wqs->alloc_blocks_lock);
+}
+
+static int wqs_next_block(struct hifc_wqs *wqs, u32 *page_idx,
+ u32 *block_idx)
+{
+ u32 pos;
+
+ spin_lock(&wqs->alloc_blocks_lock);
+
+ if (wqs->num_free_blks <= 0) {
+ spin_unlock(&wqs->alloc_blocks_lock);
+ return -ENOMEM;
+ }
+ wqs->num_free_blks--;
+
+ pos = wqs->alloc_blk_pos++;
+ pos &= WQS_MAX_NUM_BLOCKS - 1;
+
+ *page_idx = wqs->free_blocks[pos].page_idx;
+ *block_idx = wqs->free_blocks[pos].block_idx;
+
+ wqs->free_blocks[pos].page_idx = 0xFFFFFFFF;
+ wqs->free_blocks[pos].block_idx = 0xFFFFFFFF;
+
+ spin_unlock(&wqs->alloc_blocks_lock);
+
+ return 0;
+}
+
+static int queue_alloc_page(void *handle, u64 **vaddr, u64 *paddr,
+ u64 **shadow_vaddr, u64 page_sz)
+{
+ dma_addr_t dma_addr = 0;
+
+ *vaddr = dma_zalloc_coherent(handle, page_sz, &dma_addr,
+ GFP_KERNEL);
+ if (!*vaddr) {
+ sdk_err(handle, "Failed to allocate dma to wqs page\n");
+ return -ENOMEM;
+ }
+
+ if (!ADDR_4K_ALIGNED(dma_addr)) {
+ sdk_err(handle, "Cla is not 4k aligned!\n");
+ goto shadow_vaddr_err;
+ }
+
+ *paddr = (u64)dma_addr;
+
+ /* use vzalloc for big mem, shadow_vaddr only used at initialization */
+ *shadow_vaddr = vzalloc(page_sz);
+ if (!*shadow_vaddr) {
+ sdk_err(handle, "Failed to allocate shadow page vaddr\n");
+ goto shadow_vaddr_err;
+ }
+
+ return 0;
+
+shadow_vaddr_err:
+ dma_free_coherent(handle, page_sz, *vaddr, dma_addr);
+ return -ENOMEM;
+}
+
+static int wqs_allocate_page(struct hifc_wqs *wqs, u32 page_idx)
+{
+ return queue_alloc_page(wqs->dev_hdl, &wqs->page_vaddr[page_idx],
+ &wqs->page_paddr[page_idx],
+ &wqs->shadow_page_vaddr[page_idx],
+ WQS_PAGE_SIZE);
+}
+
+static void wqs_free_page(struct hifc_wqs *wqs, u32 page_idx)
+{
+ dma_free_coherent(wqs->dev_hdl, WQS_PAGE_SIZE,
+ wqs->page_vaddr[page_idx],
+ (dma_addr_t)wqs->page_paddr[page_idx]);
+ vfree(wqs->shadow_page_vaddr[page_idx]);
+}
+
+static int cmdq_allocate_page(struct hifc_cmdq_pages *cmdq_pages)
+{
+ return queue_alloc_page(cmdq_pages->dev_hdl,
+ &cmdq_pages->cmdq_page_vaddr,
+ &cmdq_pages->cmdq_page_paddr,
+ &cmdq_pages->cmdq_shadow_page_vaddr,
+ CMDQ_PAGE_SIZE);
+}
+
+static void cmdq_free_page(struct hifc_cmdq_pages *cmdq_pages)
+{
+ dma_free_coherent(cmdq_pages->dev_hdl, CMDQ_PAGE_SIZE,
+ cmdq_pages->cmdq_page_vaddr,
+ (dma_addr_t)cmdq_pages->cmdq_page_paddr);
+ vfree(cmdq_pages->cmdq_shadow_page_vaddr);
+}
+
+static int alloc_wqes_shadow(struct hifc_wq *wq)
+{
+ u64 size;
+
+ /* if wq->max_wqe_size == 0, we don't need to alloc shadow */
+ if (wq->max_wqe_size <= wq->wqebb_size)
+ return 0;
+
+ size = (u64)wq->num_q_pages * wq->max_wqe_size;
+ wq->shadow_wqe = kzalloc(size, GFP_KERNEL);
+ if (!wq->shadow_wqe) {
+ pr_err("Failed to allocate shadow wqe\n");
+ return -ENOMEM;
+ }
+
+ size = wq->num_q_pages * sizeof(wq->prod_idx);
+ wq->shadow_idx = kzalloc(size, GFP_KERNEL);
+ if (!wq->shadow_idx) {
+ pr_err("Failed to allocate shadow index\n");
+ goto shadow_idx_err;
+ }
+
+ return 0;
+
+shadow_idx_err:
+ kfree(wq->shadow_wqe);
+ return -ENOMEM;
+}
+
+static void free_wqes_shadow(struct hifc_wq *wq)
+{
+ if (wq->max_wqe_size <= wq->wqebb_size)
+ return;
+
+ kfree(wq->shadow_idx);
+ kfree(wq->shadow_wqe);
+}
+
+static void free_wq_pages(void *handle, struct hifc_wq *wq,
+ u32 num_q_pages)
+{
+ u32 i;
+
+ for (i = 0; i < num_q_pages; i++)
+ hifc_dma_free_coherent_align(handle, &wq->mem_align[i]);
+
+ free_wqes_shadow(wq);
+
+ wq->block_vaddr = NULL;
+ wq->shadow_block_vaddr = NULL;
+
+ kfree(wq->mem_align);
+}
+
+static int alloc_wq_pages(void *dev_hdl, struct hifc_wq *wq)
+{
+ struct hifc_dma_addr_align *mem_align;
+ u64 *vaddr, *paddr;
+ u32 i, num_q_pages;
+ int err;
+
+ vaddr = wq->shadow_block_vaddr;
+ paddr = wq->block_vaddr;
+
+ num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
+ if (num_q_pages > WQ_MAX_PAGES) {
+ sdk_err(dev_hdl, "Number(%d) wq pages exceeds the limit\n",
+ num_q_pages);
+ return -EINVAL;
+ }
+
+ if (num_q_pages & (num_q_pages - 1)) {
+ sdk_err(dev_hdl, "Wq num(%d) q pages must be power of 2\n",
+ num_q_pages);
+ return -EINVAL;
+ }
+
+ wq->num_q_pages = num_q_pages;
+
+ err = alloc_wqes_shadow(wq);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate wqe shadow\n");
+ return err;
+ }
+
+ wq->mem_align = kcalloc(wq->num_q_pages, sizeof(*wq->mem_align),
+ GFP_KERNEL);
+ if (!wq->mem_align) {
+ sdk_err(dev_hdl, "Failed to allocate mem_align\n");
+ free_wqes_shadow(wq);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_q_pages; i++) {
+ mem_align = &wq->mem_align[i];
+ err = hifc_dma_zalloc_coherent_align(dev_hdl, wq->wq_page_size,
+ wq->wq_page_size,
+ GFP_KERNEL, mem_align);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate wq page\n");
+ goto alloc_wq_pages_err;
+ }
+
+ *paddr = cpu_to_be64(mem_align->align_paddr);
+ *vaddr = (u64)mem_align->align_vaddr;
+
+ paddr++;
+ vaddr++;
+ }
+
+ return 0;
+
+alloc_wq_pages_err:
+ free_wq_pages(dev_hdl, wq, i);
+
+ return -ENOMEM;
+}
+
+int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq,
+ u32 wqebb_size, u32 wq_page_size, u16 q_depth,
+ u32 max_wqe_size)
+{
+ u32 num_wqebbs_per_page;
+ int err;
+
+ if (wqebb_size == 0) {
+ sdk_err(wqs->dev_hdl, "Wqebb_size must be >0\n");
+ return -EINVAL;
+ }
+
+ if (q_depth & (q_depth - 1)) {
+ sdk_err(wqs->dev_hdl, "Wq q_depth(%d) isn't power of 2\n",
+ q_depth);
+ return -EINVAL;
+ }
+
+ if (wq_page_size & (wq_page_size - 1)) {
+ sdk_err(wqs->dev_hdl, "Wq page_size(%d) isn't power of 2\n",
+ wq_page_size);
+ return -EINVAL;
+ }
+
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+
+ if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ sdk_err(wqs->dev_hdl, "Num(%d) wqebbs per page isn't power of 2\n",
+ num_wqebbs_per_page);
+ return -EINVAL;
+ }
+
+ err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
+ if (err) {
+ sdk_err(wqs->dev_hdl, "Failed to get free wqs next block\n");
+ return err;
+ }
+
+ wq->wqebb_size = wqebb_size;
+ wq->wq_page_size = wq_page_size;
+ wq->q_depth = q_depth;
+ wq->max_wqe_size = max_wqe_size;
+ wq->num_wqebbs_per_page = num_wqebbs_per_page;
+
+ wq->wqebbs_per_page_shift = (u32)ilog2(num_wqebbs_per_page);
+
+ wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
+ wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
+ wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
+
+ err = alloc_wq_pages(wqs->dev_hdl, wq);
+ if (err) {
+ sdk_err(wqs->dev_hdl, "Failed to allocate wq pages\n");
+ goto alloc_wq_pages_err;
+ }
+
+ atomic_set(&wq->delta, q_depth);
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+ wq->mask = q_depth - 1;
+
+ return 0;
+
+alloc_wq_pages_err:
+ wqs_return_block(wqs, wq->page_idx, wq->block_idx);
+ return err;
+}
+
+void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq)
+{
+ free_wq_pages(wqs->dev_hdl, wq, wq->num_q_pages);
+
+ wqs_return_block(wqs, wq->page_idx, wq->block_idx);
+}
+
+static void init_wqs_blocks_arr(struct hifc_wqs *wqs)
+{
+ u32 page_idx, blk_idx, pos = 0;
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
+ for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
+ wqs->free_blocks[pos].page_idx = page_idx;
+ wqs->free_blocks[pos].block_idx = blk_idx;
+ pos++;
+ }
+ }
+
+ wqs->alloc_blk_pos = 0;
+ wqs->return_blk_pos = 0;
+ wqs->num_free_blks = WQS_MAX_NUM_BLOCKS;
+ spin_lock_init(&wqs->alloc_blocks_lock);
+}
+
+void hifc_wq_wqe_pg_clear(struct hifc_wq *wq)
+{
+ u64 *block_vaddr;
+ u32 pg_idx;
+
+ block_vaddr = wq->shadow_block_vaddr;
+
+ atomic_set(&wq->delta, wq->q_depth);
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ for (pg_idx = 0; pg_idx < wq->num_q_pages; pg_idx++)
+ memset((void *)(*(block_vaddr + pg_idx)), 0, wq->wq_page_size);
+}
+
+int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages,
+ struct hifc_wq *wq, void *dev_hdl,
+ int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
+ u16 q_depth, u32 max_wqe_size)
+{
+ int i, j, err = -ENOMEM;
+
+ if (q_depth & (q_depth - 1)) {
+ sdk_err(dev_hdl, "Cmdq q_depth(%d) isn't power of 2\n",
+ q_depth);
+ return -EINVAL;
+ }
+
+ cmdq_pages->dev_hdl = dev_hdl;
+
+ err = cmdq_allocate_page(cmdq_pages);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate CMDQ page\n");
+ return err;
+ }
+
+ for (i = 0; i < cmdq_blocks; i++) {
+ wq[i].page_idx = 0;
+ wq[i].block_idx = (u32)i;
+ wq[i].wqebb_size = wqebb_size;
+ wq[i].wq_page_size = wq_page_size;
+ wq[i].q_depth = q_depth;
+ wq[i].max_wqe_size = max_wqe_size;
+ wq[i].num_wqebbs_per_page =
+ ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+
+ wq[i].wqebbs_per_page_shift =
+ (u32)ilog2(wq[i].num_wqebbs_per_page);
+
+ wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
+ wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
+ wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
+
+ err = alloc_wq_pages(cmdq_pages->dev_hdl, &wq[i]);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to alloc CMDQ blocks\n");
+ goto cmdq_block_err;
+ }
+
+ atomic_set(&wq[i].delta, q_depth);
+ wq[i].cons_idx = 0;
+ wq[i].prod_idx = 0;
+ wq[i].mask = q_depth - 1;
+ }
+
+ return 0;
+
+cmdq_block_err:
+ for (j = 0; j < i; j++)
+ free_wq_pages(cmdq_pages->dev_hdl, &wq[j], wq[j].num_q_pages);
+
+ cmdq_free_page(cmdq_pages);
+ return err;
+}
+
+void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages,
+ struct hifc_wq *wq, int cmdq_blocks)
+{
+ int i;
+
+ for (i = 0; i < cmdq_blocks; i++)
+ free_wq_pages(cmdq_pages->dev_hdl, &wq[i], wq[i].num_q_pages);
+
+ cmdq_free_page(cmdq_pages);
+}
+
+static int alloc_page_addr(struct hifc_wqs *wqs)
+{
+ u64 size = wqs->num_pages * sizeof(*wqs->page_paddr);
+
+ wqs->page_paddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->page_paddr)
+ return -ENOMEM;
+
+ size = wqs->num_pages * sizeof(*wqs->page_vaddr);
+ wqs->page_vaddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->page_vaddr)
+ goto page_vaddr_err;
+
+ size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
+ wqs->shadow_page_vaddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->shadow_page_vaddr)
+ goto page_shadow_vaddr_err;
+
+ return 0;
+
+page_shadow_vaddr_err:
+ kfree(wqs->page_vaddr);
+
+page_vaddr_err:
+ kfree(wqs->page_paddr);
+ return -ENOMEM;
+}
+
+static void free_page_addr(struct hifc_wqs *wqs)
+{
+ kfree(wqs->shadow_page_vaddr);
+ kfree(wqs->page_vaddr);
+ kfree(wqs->page_paddr);
+}
+
+int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl)
+{
+ u32 i, page_idx;
+ int err;
+
+ wqs->dev_hdl = dev_hdl;
+ wqs->num_pages = WQ_NUM_PAGES(num_wqs);
+
+ if (alloc_page_addr(wqs)) {
+ sdk_err(dev_hdl, "Failed to allocate mem for page addresses\n");
+ return -ENOMEM;
+ }
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
+ err = wqs_allocate_page(wqs, page_idx);
+ if (err) {
+ sdk_err(dev_hdl, "Failed wq page allocation\n");
+ goto wq_allocate_page_err;
+ }
+ }
+
+ wqs->free_blocks = kzalloc(WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL);
+ if (!wqs->free_blocks) {
+ err = -ENOMEM;
+ goto alloc_blocks_err;
+ }
+
+ init_wqs_blocks_arr(wqs);
+ return 0;
+
+alloc_blocks_err:
+wq_allocate_page_err:
+ for (i = 0; i < page_idx; i++)
+ wqs_free_page(wqs, i);
+
+ free_page_addr(wqs);
+ return err;
+}
+
+void hifc_wqs_free(struct hifc_wqs *wqs)
+{
+ u32 page_idx;
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
+ wqs_free_page(wqs, page_idx);
+
+ free_page_addr(wqs);
+ kfree(wqs->free_blocks);
+}
+
+static void copy_wqe_to_shadow(struct hifc_wq *wq, void *shadow_addr,
+ int num_wqebbs, u16 prod_idx)
+{
+ u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr;
+ u32 i, offset;
+ u16 idx;
+
+ for (i = 0; i < (u32)num_wqebbs; i++) {
+ offset = i * wq->wqebb_size;
+ shadow_wqebb_addr = (u8 *)shadow_addr + offset;
+
+ idx = MASKED_WQE_IDX(wq, prod_idx + i);
+ wqe_page_addr = WQ_PAGE_ADDR(wq, idx);
+ wqebb_addr = wqe_page_addr +
+ WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx));
+
+ memcpy(shadow_wqebb_addr, wqebb_addr, wq->wqebb_size);
+ }
+}
+
+void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index)
+{
+ return WQ_PAGE_ADDR(wq, index) + WQE_PAGE_OFF(wq, index);
+}
+
+u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq)
+{
+ return be64_to_cpu(*wq->block_vaddr);
+}
+
+void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx)
+{
+ u32 curr_pg, end_pg;
+ u16 curr_prod_idx, end_prod_idx;
+
+ if (atomic_sub_return(num_wqebbs, &wq->delta) < 0) {
+ atomic_add(num_wqebbs, &wq->delta);
+ return NULL;
+ }
+
+ /* use original cur_pi and end_pi, no need queue depth mask as
+ * WQE_PAGE_NUM will do num_queue_pages mask
+ */
+ curr_prod_idx = (u16)wq->prod_idx;
+ wq->prod_idx += num_wqebbs;
+
+ /* end prod index should points to the last wqebb of wqe,
+ * therefore minus 1
+ */
+ end_prod_idx = (u16)wq->prod_idx - 1;
+
+ curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
+ end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
+
+ *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
+
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
+ u32 offset = curr_pg * wq->max_wqe_size;
+ u8 *shadow_addr = wq->shadow_wqe + offset;
+
+ wq->shadow_idx[curr_pg] = *prod_idx;
+ return shadow_addr;
+ }
+
+ return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
+}
+
+void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs)
+{
+ atomic_add(num_wqebbs, &wq->delta);
+ wq->cons_idx += num_wqebbs;
+}
+
+void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx)
+{
+ u32 curr_pg, end_pg;
+ u16 curr_cons_idx, end_cons_idx;
+
+ if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
+ return NULL;
+
+ curr_cons_idx = (u16)wq->cons_idx;
+
+ curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
+ end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
+
+ curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
+ end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
+
+ *cons_idx = curr_cons_idx;
+
+ if (curr_pg != end_pg) {
+ u32 offset = curr_pg * wq->max_wqe_size;
+ u8 *shadow_addr = wq->shadow_wqe + offset;
+
+ copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
+
+ return shadow_addr;
+ }
+
+ return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
+}
diff --git a/drivers/scsi/huawei/hifc/hifc_wq.h b/drivers/scsi/huawei/hifc/hifc_wq.h
new file mode 100644
index 000000000000..207d54191afa
--- /dev/null
+++ b/drivers/scsi/huawei/hifc/hifc_wq.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei Hifc PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ */
+
+#ifndef HIFC_WQ_H
+#define HIFC_WQ_H
+
+#define WQS_BLOCKS_PER_PAGE 4
+#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size)
+
+#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \
+ ((wq)->num_q_pages - 1))
+
+#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \
+ ((idx) & ((wq)->num_wqebbs_per_page - 1)))
+
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_PAGE_ADDR_SIZE_SHIFT 3
+#define WQ_PAGE_ADDR(wq, idx) \
+ (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \
+ (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT)))
+
+#define WQ_BLOCK_SIZE 4096UL
+#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
+#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)
+
+#define CMDQ_BLOCKS_PER_PAGE 8
+#define CMDQ_BLOCK_SIZE 512UL
+#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \
+ CMDQ_BLOCK_SIZE), PAGE_SIZE)
+
+#define ADDR_4K_ALIGNED(addr) (((addr) & 0xfff) == 0)
+
+#define WQ_BASE_VADDR(wqs, wq) \
+ (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \
+ + (u64)(wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_ADDR(wqs, wq) \
+ (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
+ (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
+ (((u64)((cmdq_pages)->cmdq_page_paddr)) \
+ + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
+ (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
+
+#define WQ_NUM_PAGES(num_wqs) \
+ (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)
+
+#define MAX_WQE_SIZE(max_sge, wqebb_size) \
+ ((max_sge <= 2) ? (wqebb_size) : \
+ ((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size)))
+
+struct hifc_free_block {
+ u32 page_idx;
+ u32 block_idx;
+};
+
+struct hifc_wq {
+ /* The addresses are 64 bit in the HW */
+ u64 block_paddr;
+ u64 *shadow_block_vaddr;
+ u64 *block_vaddr;
+
+ u32 wqebb_size;
+ u32 wq_page_size;
+ u16 q_depth;
+ u32 max_wqe_size;
+ u32 num_wqebbs_per_page;
+
+ /* performance: replace mul/div as shift;
+ * num_wqebbs_per_page must be power of 2
+ */
+ u32 wqebbs_per_page_shift;
+ u32 page_idx;
+ u32 block_idx;
+
+ u32 num_q_pages;
+
+ struct hifc_dma_addr_align *mem_align;
+
+ int cons_idx;
+ int prod_idx;
+
+ atomic_t delta;
+ u16 mask;
+
+ u8 *shadow_wqe;
+ u16 *shadow_idx;
+};
+
+struct hifc_cmdq_pages {
+ /* The addresses are 64 bit in the HW */
+ u64 cmdq_page_paddr;
+ u64 *cmdq_page_vaddr;
+ u64 *cmdq_shadow_page_vaddr;
+
+ void *dev_hdl;
+};
+
+struct hifc_wqs {
+ /* The addresses are 64 bit in the HW */
+ u64 *page_paddr;
+ u64 **page_vaddr;
+ u64 **shadow_page_vaddr;
+
+ struct hifc_free_block *free_blocks;
+ u32 alloc_blk_pos;
+ u32 return_blk_pos;
+ int num_free_blks;
+
+ /* for allocate blocks */
+ spinlock_t alloc_blocks_lock;
+
+ u32 num_pages;
+
+ void *dev_hdl;
+};
+
+void hifc_wq_wqe_pg_clear(struct hifc_wq *wq);
+
+int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages,
+ struct hifc_wq *wq, void *dev_hdl,
+ int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
+ u16 q_depth, u32 max_wqe_size);
+
+void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages,
+ struct hifc_wq *wq, int cmdq_blocks);
+
+int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl);
+
+void hifc_wqs_free(struct hifc_wqs *wqs);
+
+int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq,
+ u32 wqebb_size, u32 wq_page_size, u16 q_depth,
+ u32 max_wqe_size);
+
+void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq);
+
+void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index);
+
+u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq);
+
+void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx);
+
+void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs);
+
+void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx);
+
+#endif
+
--
2.25.1